text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
I_trues =
[0.44129802 0.39444956 0.30091208 0.25465866 0.30253774 0.39726529 0.44454934 ;
0.39250948 0.35223311 0.27210130 0.23339588 0.27639321 0.35966691 0.40109329 ;
0.35082997 0.31575536 0.24622966 0.21348201 0.25258702 0.32676664 0.36354469 ;
0.29863696 0.26973242 0.21277028 0.18702656 0.22140580 0.28468958 0.31590800 ;
0.26939388 0.24383765 0.19368088 0.17169133 0.20342523 0.26071536 0.28888259 ;
0.22153000 0.20137817 0.16217962 0.14615212 0.17344750 0.22089470 0.24406575 ;
0.20185562 0.18392101 0.14920591 0.13557847 0.16097330 0.20430272 0.22539038 ;
0.16889020 0.15469709 0.12752450 0.11786073 0.13989890 0.17613018 0.19363900 ;
0.13097041 0.12117362 0.10280748 0.09759154 0.11532259 0.14285044 0.15600064 ;
0.10231626 0.09595756 0.08440503 0.08239375 0.09628240 0.11652976 0.12607100 ;
0.08683737 0.08239718 0.07459712 0.07418876 0.08561228 0.10147599 0.10886768 ;
0.06783499 0.06583006 0.06269131 0.06393739 0.07157324 0.08121402 0.08559886 ;
0.05781217 0.05713933 0.05643322 0.05814731 0.06295443 0.06843439 0.07085458 ;
0.05385796 0.05372482 0.05391877 0.05550325 0.05861141 0.06185271 0.06324324 ;
0.05240320 0.05247332 0.05294178 0.05423678 0.05628819 0.05826948 0.05909602 ;
0.05300496 0.05300496 0.05300496 0.05300496 0.05300496 0.05300496 0.05300496 ]
Q_trues =
[ -0.01753141 -0.06485313 -0.15965601 -0.20757277 -0.16128167 -0.06766886 -0.02078273;
-0.01772108 -0.05944266 -0.14330674 -0.18659926 -0.14759865 -0.06687645 -0.02630489;
-0.01524525 -0.05274909 -0.12838028 -0.16821107 -0.13473764 -0.06376036 -0.02795997;
-0.00990697 -0.04270497 -0.10914792 -0.14510675 -0.11778344 -0.05766213 -0.02717801;
-0.00608288 -0.03648858 -0.09825566 -0.13222805 -0.10800002 -0.05336629 -0.02557158;
0.00131430 -0.02554220 -0.08036031 -0.11134114 -0.09162819 -0.04505874 -0.02122145;
0.00477642 -0.02076298 -0.07299588 -0.10284244 -0.08476326 -0.04114469 -0.01875835;
0.01119511 -0.01235218 -0.06066038 -0.08873702 -0.07303478 -0.03378526 -0.01355369;
0.01968930 -0.00196538 -0.04650218 -0.07273772 -0.05901730 -0.02364220 -0.00534093;
0.02701775 0.00644566 -0.03586341 -0.06078292 -0.04774078 -0.01412655 0.00326301;
0.03129776 0.01117009 -0.03016559 -0.05432509 -0.04118074 -0.00790872 0.00926745;
0.03668441 0.01698497 -0.02328501 -0.04623546 -0.03216694 0.00160101 0.01892054;
0.03923450 0.01977043 -0.01979730 -0.04164830 -0.02631851 0.00847536 0.02619209;
0.03983604 0.02053460 -0.01852852 -0.03954758 -0.02322116 0.01240671 0.03045076;
0.03969919 0.02052770 -0.01814346 -0.03853982 -0.02148987 0.01473155 0.03300636;
0.03755859 0.01877930 -0.01877930 -0.03755859 -0.01877930 0.01877930 0.03755859]
U_trues =
[ 0 0.04390364 0.07365528 0.08128298 0.06713098 0.03737934 0 ;
0 0.04428736 0.07046980 0.07153176 0.05342685 0.02724440 0 ;
0 0.04435776 0.06762732 0.06357361 0.04248541 0.01921585 0 ;
0 0.04408551 0.06384064 0.05397200 0.02964161 0.00988649 0 ;
0 0.04374666 0.06158007 0.04872177 0.02280850 0.00497510 0 ;
0 0.04291904 0.05764878 0.04024242 0.01205313 -0.00267662 0 ;
0 0.04248227 0.05594215 0.03677307 0.00775068 -0.00570920 0 ;
0 0.04161531 0.05293867 0.03093600 0.00064404 -0.01067932 0 ;
0 0.04036105 0.04917038 0.02406753 -0.00748419 -0.01629352 0 ;
0 0.03913232 0.04592516 0.01855839 -0.01378109 -0.02057393 0 ;
0 0.03828742 0.04388718 0.01529883 -0.01738884 -0.02298860 0 ;
0 0.03686504 0.04073530 0.01057373 -0.02242105 -0.02629131 0 ;
0 0.03563204 0.03822653 0.00708827 -0.02594929 -0.02854377 0 ;
0 0.03475930 0.03654849 0.00488817 -0.02808194 -0.02987113 0 ;
0 0.03413018 0.03538005 0.00341471 -0.02946560 -0.03071547 0 ;
0 0.03252669 0.03252669 0.00000000 -0.03252669 -0.03252669 0 ; ]
|
{"hexsha": "cd6b1f49a0affa440e8c679f14c99189a0de771f", "size": 3792, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/benchmarks/natraj_trues.jl", "max_stars_repo_name": "RemoteSensingTools/vSmartMOM.jl", "max_stars_repo_head_hexsha": "fe5b7d28ca99bef0d1702293749d217e8c839db6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2021-05-07T21:58:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-20T18:03:07.000Z", "max_issues_repo_path": "test/benchmarks/natraj_trues.jl", "max_issues_repo_name": "RupeshJey/RadiativeTransfer.jl", "max_issues_repo_head_hexsha": "dcb0cfcdbb8b462d7badddbd00110e66c88602a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2020-08-24T21:33:12.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-03T19:30:14.000Z", "max_forks_repo_path": "test/benchmarks/natraj_trues.jl", "max_forks_repo_name": "RadiativeTransfer/RadiativeTransfer.jl", "max_forks_repo_head_hexsha": "41c228d6058b9293338299a80baf7196b7833d23", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-22T23:35:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-22T23:35:29.000Z", "avg_line_length": 71.5471698113, "max_line_length": 87, "alphanum_fraction": 0.7354957806, "num_tokens": 2060}
|
"""End to end testing on feedforward models
"""
# pylint: disable=C0103
# pylint: disable=C0325
# pylint: disable=E1101
import numpy as np
from nltk.corpus import brown
from model_wrangler.model_wrangler import ModelWrangler
from model_wrangler.dataset_managers import DatasetManager
from model_wrangler.model.losses import accuracy
from model_wrangler.model.corral.text_classification import TextClassificationModel
from model_wrangler.model.tester import ModelTester
CONV_PARAMS = {
'name': 'test_text',
'path': './tests/test_text',
'graph': {
'num_inputs': 1,
'pad_length': 256,
'hidden_params': [
{
'num_units': 4,
'kernel': 3,
'strides': 1,
'bias': True,
'activation': 'relu',
'activity_reg': {'l1': 0.1},
'dropout_rate': 0.0,
},
{
'num_units': 4,
'kernel': 3,
'strides': 1,
'bias': True,
'activation': 'relu',
'activity_reg': {'l1': 0.1},
'dropout_rate': 0.0,
}
],
'embed_params': {
'num_units': 5,
'bias': True,
'activation': 'relu'
},
'out_sizes': [5],
}
}
def make_testdata(out_dim=3, num_samples=100):
"""Make sample data from brown corpus"""
X = []
y = []
for idx, para in enumerate(brown.paras()):
X.append(' '.join(para[0])[:256])
_tmpy = np.zeros((out_dim,))
_tmpy[idx % out_dim] = 1.0
y.append(_tmpy)
if idx > num_samples:
break
return X, y
def test_text_ff():
"""Test dense feedforward model"""
ff_model = ModelWrangler(TextClassificationModel, CONV_PARAMS)
out_dim = CONV_PARAMS['graph']['out_sizes'][0]
X, y = make_testdata(out_dim=out_dim)
dm1 = DatasetManager([X], [y])
dm2 = DatasetManager([X], [y])
ff_model.add_data(dm1, dm2)
print("Loss: {}".format(ff_model.score([X], [y])))
print("Acc'y: {}".format(ff_model.score([X], [y], score_func=accuracy)))
ff_model.train()
print("Loss: {}".format(ff_model.score([X], [y])))
print("Acc'y: {}".format(ff_model.score([X], [y], score_func=accuracy)))
if __name__ == "__main__":
#print("\n\nunit testing text convolutional model")
#ModelTester(
# ModelWrangler(TextClassificationModel, CONV_PARAMS)
#)
print("\n\ne2e testing text convolutional model")
test_text_ff()
|
{"hexsha": "fbea7322ea44f98c3a214394488f77433a7f684d", "size": 2583, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_text.py", "max_stars_repo_name": "bmcmenamin/model_wrangler", "max_stars_repo_head_hexsha": "c5471cc106d475c50bf26791b913f2d556a1de0a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_text.py", "max_issues_repo_name": "bmcmenamin/model_wrangler", "max_issues_repo_head_hexsha": "c5471cc106d475c50bf26791b913f2d556a1de0a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_text.py", "max_forks_repo_name": "bmcmenamin/model_wrangler", "max_forks_repo_head_hexsha": "c5471cc106d475c50bf26791b913f2d556a1de0a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-01-23T23:26:15.000Z", "max_forks_repo_forks_event_max_datetime": "2018-01-23T23:26:15.000Z", "avg_line_length": 24.6, "max_line_length": 83, "alphanum_fraction": 0.5574912892, "include": true, "reason": "import numpy", "num_tokens": 665}
|
import pathlib
import warnings
import numpy as np
import pytest
import xarray as xr
from tests.fixtures import generate_dataset
from xcdat.dataset import (
_has_cf_compliant_time,
_keep_single_var,
_postprocess_dataset,
_preprocess_non_cf_dataset,
_split_time_units_attr,
decode_non_cf_time,
open_dataset,
open_mfdataset,
)
from xcdat.logger import setup_custom_logger
logger = setup_custom_logger("xcdat.dataset", propagate=True)
class TestOpenDataset:
@pytest.fixture(autouse=True)
def setup(self, tmp_path):
# Create temporary directory to save files.
dir = tmp_path / "input_data"
dir.mkdir()
self.file_path = f"{dir}/file.nc"
def test_non_cf_compliant_time_is_not_decoded(self):
ds = generate_dataset(cf_compliant=False, has_bounds=True)
ds.to_netcdf(self.file_path)
result = open_dataset(self.file_path, decode_times=False)
expected = generate_dataset(cf_compliant=False, has_bounds=True)
assert result.identical(expected)
def test_non_cf_compliant_time_is_decoded(self):
ds = generate_dataset(cf_compliant=False, has_bounds=False)
ds.to_netcdf(self.file_path)
result = open_dataset(self.file_path, data_var="ts")
# Generate an expected dataset with decoded non-CF compliant time units.
expected = generate_dataset(cf_compliant=True, has_bounds=True)
expected_time_data = np.array(
[
"2000-01-01T00:00:00.000000000",
"2000-02-01T00:00:00.000000000",
"2000-03-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-05-01T00:00:00.000000000",
"2000-06-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-08-01T00:00:00.000000000",
"2000-09-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2000-11-01T00:00:00.000000000",
"2000-12-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-02-01T00:00:00.000000000",
"2001-03-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
)
expected["time"] = xr.DataArray(
name="time",
data=expected_time_data,
dims="time",
attrs={
"units": "months since 2000-01-01",
"calendar": "standard",
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected.time_bnds.data[:] = np.array(
[
["1999-12-16T12:00:00.000000000", "2000-01-16T12:00:00.000000000"],
["2000-01-16T12:00:00.000000000", "2000-02-15T12:00:00.000000000"],
["2000-02-15T12:00:00.000000000", "2000-03-16T12:00:00.000000000"],
["2000-03-16T12:00:00.000000000", "2000-04-16T00:00:00.000000000"],
["2000-04-16T00:00:00.000000000", "2000-05-16T12:00:00.000000000"],
["2000-05-16T12:00:00.000000000", "2000-06-16T00:00:00.000000000"],
["2000-06-16T00:00:00.000000000", "2000-07-16T12:00:00.000000000"],
["2000-07-16T12:00:00.000000000", "2000-08-16T12:00:00.000000000"],
["2000-08-16T12:00:00.000000000", "2000-09-16T00:00:00.000000000"],
["2000-09-16T00:00:00.000000000", "2000-10-16T12:00:00.000000000"],
["2000-10-16T12:00:00.000000000", "2000-11-16T00:00:00.000000000"],
["2000-11-16T00:00:00.000000000", "2000-12-16T12:00:00.000000000"],
["2000-12-16T12:00:00.000000000", "2001-01-16T12:00:00.000000000"],
["2001-01-16T12:00:00.000000000", "2001-02-15T00:00:00.000000000"],
["2001-02-15T00:00:00.000000000", "2001-03-15T00:00:00.000000000"],
],
dtype="datetime64[ns]",
)
expected.time.encoding = {
# Set source as result source because it changes every test run.
"source": result.time.encoding["source"],
"dtype": np.dtype(np.int64),
"original_shape": expected.time.data.shape,
"units": "months since 2000-01-01",
"calendar": "standard",
}
assert result.identical(expected)
assert result.time.encoding == expected.time.encoding
def test_preserves_lat_and_lon_bounds_if_they_exist(self):
ds = generate_dataset(cf_compliant=True, has_bounds=True)
# Suppress UserWarning regarding missing time.encoding "units" because
# it is not relevant to this test.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ds.to_netcdf(self.file_path)
result = open_dataset(self.file_path, data_var="ts")
expected = ds.copy()
assert result.identical(expected)
def test_keeps_specified_var(self):
ds = generate_dataset(cf_compliant=True, has_bounds=True)
# Create a modified version of the Dataset with a new var
ds_mod = ds.copy()
ds_mod["tas"] = ds_mod.ts.copy()
# Suppress UserWarning regarding missing time.encoding "units" because
# it is not relevant to this test.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ds_mod.to_netcdf(self.file_path)
result = open_dataset(self.file_path, data_var="ts")
expected = ds.copy()
assert result.identical(expected)
class TestOpenMfDataset:
@pytest.fixture(autouse=True)
def setUp(self, tmp_path):
# Create temporary directory to save files.
dir = tmp_path / "input_data"
dir.mkdir()
self.file_path1 = f"{dir}/file1.nc"
self.file_path2 = f"{dir}/file2.nc"
def test_non_cf_compliant_time_is_not_decoded(self):
ds1 = generate_dataset(cf_compliant=False, has_bounds=True)
ds1.to_netcdf(self.file_path1)
ds2 = generate_dataset(cf_compliant=False, has_bounds=True)
ds2 = ds2.rename_vars({"ts": "tas"})
ds2.to_netcdf(self.file_path2)
result = open_mfdataset([self.file_path1, self.file_path2], decode_times=False)
expected = ds1.merge(ds2)
assert result.identical(expected)
def test_non_cf_compliant_time_is_decoded(self):
ds1 = generate_dataset(cf_compliant=False, has_bounds=False)
ds2 = generate_dataset(cf_compliant=False, has_bounds=False)
ds2 = ds2.rename_vars({"ts": "tas"})
ds1.to_netcdf(self.file_path1)
ds2.to_netcdf(self.file_path2)
result = open_mfdataset(
[self.file_path1, self.file_path2],
data_var="ts",
)
# Generate an expected dataset, which is a combination of both datasets
# with decoded time units and coordinate bounds.
expected = generate_dataset(cf_compliant=True, has_bounds=True)
expected_time_data = np.array(
[
"2000-01-01T00:00:00.000000000",
"2000-02-01T00:00:00.000000000",
"2000-03-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-05-01T00:00:00.000000000",
"2000-06-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-08-01T00:00:00.000000000",
"2000-09-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2000-11-01T00:00:00.000000000",
"2000-12-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-02-01T00:00:00.000000000",
"2001-03-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
)
expected["time"] = xr.DataArray(
name="time",
data=expected_time_data,
dims="time",
attrs={
"units": "months since 2000-01-01",
"calendar": "standard",
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected.time_bnds.data[:] = np.array(
[
["1999-12-16T12:00:00.000000000", "2000-01-16T12:00:00.000000000"],
["2000-01-16T12:00:00.000000000", "2000-02-15T12:00:00.000000000"],
["2000-02-15T12:00:00.000000000", "2000-03-16T12:00:00.000000000"],
["2000-03-16T12:00:00.000000000", "2000-04-16T00:00:00.000000000"],
["2000-04-16T00:00:00.000000000", "2000-05-16T12:00:00.000000000"],
["2000-05-16T12:00:00.000000000", "2000-06-16T00:00:00.000000000"],
["2000-06-16T00:00:00.000000000", "2000-07-16T12:00:00.000000000"],
["2000-07-16T12:00:00.000000000", "2000-08-16T12:00:00.000000000"],
["2000-08-16T12:00:00.000000000", "2000-09-16T00:00:00.000000000"],
["2000-09-16T00:00:00.000000000", "2000-10-16T12:00:00.000000000"],
["2000-10-16T12:00:00.000000000", "2000-11-16T00:00:00.000000000"],
["2000-11-16T00:00:00.000000000", "2000-12-16T12:00:00.000000000"],
["2000-12-16T12:00:00.000000000", "2001-01-16T12:00:00.000000000"],
["2001-01-16T12:00:00.000000000", "2001-02-15T00:00:00.000000000"],
["2001-02-15T00:00:00.000000000", "2001-03-15T00:00:00.000000000"],
],
dtype="datetime64[ns]",
)
expected.time.encoding = {
# Set source as result source because it changes every test run.
"source": result.time.encoding["source"],
"dtype": np.dtype(np.int64),
"original_shape": expected.time.data.shape,
"units": "months since 2000-01-01",
"calendar": "standard",
}
assert result.identical(expected)
assert result.time.encoding == expected.time.encoding
def test_keeps_specified_var(self):
ds1 = generate_dataset(cf_compliant=True, has_bounds=True)
ds2 = generate_dataset(cf_compliant=True, has_bounds=True)
ds2 = ds2.rename_vars({"ts": "tas"})
# Suppress UserWarning regarding missing time.encoding "units" because
# it is not relevant to this test.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ds1.to_netcdf(self.file_path1)
ds2.to_netcdf(self.file_path2)
result = open_mfdataset([self.file_path1, self.file_path2], data_var="ts")
# Generate an expected dataset with decoded non-CF compliant time units.
expected = generate_dataset(cf_compliant=True, has_bounds=True)
assert result.identical(expected)
class TestHasCFCompliantTime:
@pytest.fixture(autouse=True)
def setUp(self, tmp_path):
# Create temporary directory to save files.
self.dir = tmp_path / "input_data"
self.dir.mkdir()
# Paths to the dummy datasets.
self.file_path = f"{self.dir}/file.nc"
def test_non_cf_compliant_time(self):
# Generate dummy dataset with non-CF compliant time units
ds = generate_dataset(cf_compliant=False, has_bounds=False)
ds.to_netcdf(self.file_path)
result = _has_cf_compliant_time(self.file_path)
# Check that False is returned when the dataset has non-cf_compliant time
assert result is False
def test_no_time_axis(self):
# Generate dummy dataset with CF compliant time
ds = generate_dataset(cf_compliant=True, has_bounds=False)
# remove time axis
ds = ds.isel(time=0)
ds = ds.squeeze(drop=True)
ds = ds.reset_coords()
ds = ds.drop_vars("time")
ds.to_netcdf(self.file_path)
result = _has_cf_compliant_time(self.file_path)
# Check that None is returned when there is no time axis
assert result is None
def test_glob_cf_compliant_time(self):
# Generate dummy datasets with CF compliant time
ds = generate_dataset(cf_compliant=True, has_bounds=False)
ds.to_netcdf(self.file_path)
result = _has_cf_compliant_time(f"{self.dir}/*.nc")
# Check that the wildcard path input is correctly evaluated
assert result is True
def test_list_cf_compliant_time(self):
# Generate dummy datasets with CF compliant time units
ds = generate_dataset(cf_compliant=True, has_bounds=False)
ds.to_netcdf(self.file_path)
flist = [self.file_path, self.file_path, self.file_path]
result = _has_cf_compliant_time(flist)
# Check that the list input is correctly evaluated
assert result is True
def test_cf_compliant_time_with_string_path(self):
# Generate dummy dataset with CF compliant time units
ds = generate_dataset(cf_compliant=True, has_bounds=False)
ds.to_netcdf(self.file_path)
result = _has_cf_compliant_time(self.file_path)
# Check that True is returned when the dataset has cf_compliant time
assert result is True
def test_cf_compliant_time_with_pathlib_path(self):
# Generate dummy dataset with CF compliant time units
ds = generate_dataset(cf_compliant=True, has_bounds=False)
ds.to_netcdf(self.file_path)
result = _has_cf_compliant_time(pathlib.Path(self.file_path))
# Check that True is returned when the dataset has cf_compliant time
assert result is True
def test_cf_compliant_time_with_list_of_list_of_strings(self):
# Generate dummy dataset with CF compliant time units
ds = generate_dataset(cf_compliant=True, has_bounds=False)
ds.to_netcdf(self.file_path)
result = _has_cf_compliant_time([self.file_path])
# Check that True is returned when the dataset has cf_compliant time
assert result is True
def test_cf_compliant_time_with_list_of_list_of_pathlib_paths(self):
# Generate dummy dataset with CF compliant time units
ds = generate_dataset(cf_compliant=True, has_bounds=False)
ds.to_netcdf(self.file_path)
result = _has_cf_compliant_time([[pathlib.Path(self.file_path)]])
# Check that True is returned when the dataset has cf_compliant time
assert result is True
class TestDecodeNonCFTimeUnits:
@pytest.fixture(autouse=True)
def setup(self):
time = xr.DataArray(
name="time",
data=[1, 2, 3],
dims=["time"],
attrs={
"bounds": "time_bnds",
"axis": "T",
"long_name": "time",
"standard_name": "time",
"calendar": "noleap",
},
)
time_bnds = xr.DataArray(
name="time_bnds",
data=[[0, 1], [1, 2], [2, 3]],
dims=["time", "bnds"],
)
time_bnds.encoding = {
"zlib": False,
"shuffle": False,
"complevel": 0,
"fletcher32": False,
"contiguous": False,
"chunksizes": (1, 2),
"source": "None",
"original_shape": (1980, 2),
"dtype": np.dtype("float64"),
}
self.ds = xr.Dataset({"time": time, "time_bnds": time_bnds})
def test_raises_error_if_function_is_called_on_already_decoded_cf_compliant_dataset(
self,
):
ds = generate_dataset(cf_compliant=True, has_bounds=True)
with pytest.raises(KeyError):
decode_non_cf_time(ds)
def test_decodes_months_with_a_reference_date_at_the_start_of_the_month(self):
ds = self.ds.copy()
ds.time.attrs["units"] = "months since 2000-01-01"
result = decode_non_cf_time(ds)
expected = xr.Dataset(
{
"time": xr.DataArray(
name="time",
data=np.array(
["2000-02-01", "2000-03-01", "2000-04-01"],
dtype="datetime64",
),
dims=["time"],
attrs=ds.time.attrs,
),
"time_bnds": xr.DataArray(
name="time_bnds",
data=np.array(
[
["2000-01-01", "2000-02-01"],
["2000-02-01", "2000-03-01"],
["2000-03-01", "2000-04-01"],
],
dtype="datetime64",
),
dims=["time", "bnds"],
attrs=ds.time_bnds.attrs,
),
}
)
assert result.identical(expected)
expected.time.encoding = {
"source": "None",
"dtype": np.dtype(np.int64),
"original_shape": expected.time.data.shape,
"units": ds.time.attrs["units"],
"calendar": ds.time.attrs["calendar"],
}
expected.time_bnds.encoding = ds.time_bnds.encoding
assert result.time.encoding == expected.time.encoding
assert result.time_bnds.encoding == expected.time_bnds.encoding
def test_decodes_months_with_a_reference_date_at_the_middle_of_the_month(self):
ds = self.ds.copy()
ds.time.attrs["units"] = "months since 2000-01-15"
result = decode_non_cf_time(ds)
expected = xr.Dataset(
{
"time": xr.DataArray(
name="time",
data=np.array(
["2000-02-15", "2000-03-15", "2000-04-15"],
dtype="datetime64",
),
dims=["time"],
attrs=ds.time.attrs,
),
"time_bnds": xr.DataArray(
name="time_bnds",
data=np.array(
[
["2000-01-15", "2000-02-15"],
["2000-02-15", "2000-03-15"],
["2000-03-15", "2000-04-15"],
],
dtype="datetime64",
),
dims=["time", "bnds"],
attrs=ds.time_bnds.attrs,
),
}
)
assert result.identical(expected)
expected.time.encoding = {
"source": "None",
"dtype": np.dtype(np.int64),
"original_shape": expected.time.data.shape,
"units": ds.time.attrs["units"],
"calendar": ds.time.attrs["calendar"],
}
expected.time_bnds.encoding = ds.time_bnds.encoding
assert result.time.encoding == expected.time.encoding
assert result.time_bnds.encoding == expected.time_bnds.encoding
def test_decodes_months_with_a_reference_date_at_the_end_of_the_month(self):
ds = self.ds.copy()
ds.time.attrs["units"] = "months since 1999-12-31"
result = decode_non_cf_time(ds)
expected = xr.Dataset(
{
"time": xr.DataArray(
name="time",
data=np.array(
["2000-01-31", "2000-02-29", "2000-03-31"],
dtype="datetime64",
),
dims=["time"],
attrs=ds.time.attrs,
),
"time_bnds": xr.DataArray(
name="time_bnds",
data=np.array(
[
["1999-12-31", "2000-01-31"],
["2000-01-31", "2000-02-29"],
["2000-02-29", "2000-03-31"],
],
dtype="datetime64",
),
dims=["time", "bnds"],
attrs=ds.time_bnds.attrs,
),
}
)
assert result.identical(expected)
expected.time.encoding = {
"source": "None",
"dtype": np.dtype(np.int64),
"original_shape": expected.time.data.shape,
"units": ds.time.attrs["units"],
"calendar": ds.time.attrs["calendar"],
}
expected.time_bnds.encoding = ds.time_bnds.encoding
assert result.time.encoding == expected.time.encoding
assert result.time_bnds.encoding == expected.time_bnds.encoding
def test_decodes_months_with_a_reference_date_on_a_leap_year(self):
ds = self.ds.copy()
ds.time.attrs["units"] = "months since 2000-02-29"
result = decode_non_cf_time(ds)
expected = xr.Dataset(
{
"time": xr.DataArray(
name="time",
data=np.array(
["2000-03-29", "2000-04-29", "2000-05-29"],
dtype="datetime64",
),
dims=["time"],
attrs=ds.time.attrs,
),
"time_bnds": xr.DataArray(
name="time_bnds",
data=np.array(
[
["2000-02-29", "2000-03-29"],
["2000-03-29", "2000-04-29"],
["2000-04-29", "2000-05-29"],
],
dtype="datetime64",
),
dims=["time", "bnds"],
attrs=ds.time_bnds.attrs,
),
}
)
assert result.identical(expected)
expected.time.encoding = {
"source": "None",
"dtype": np.dtype(np.int64),
"original_shape": expected.time.data.shape,
"units": ds.time.attrs["units"],
"calendar": ds.time.attrs["calendar"],
}
expected.time_bnds.encoding = ds.time_bnds.encoding
assert result.time.encoding == expected.time.encoding
assert result.time_bnds.encoding == expected.time_bnds.encoding
def test_decodes_years_with_a_reference_date_at_the_middle_of_the_year(self):
ds = self.ds.copy()
ds.time.attrs["units"] = "years since 2000-06-01"
result = decode_non_cf_time(ds)
expected = xr.Dataset(
{
"time": xr.DataArray(
name="time",
data=np.array(
["2001-06-01", "2002-06-01", "2003-06-01"],
dtype="datetime64",
),
dims=["time"],
attrs=ds.time.attrs,
),
"time_bnds": xr.DataArray(
name="time_bnds",
data=np.array(
[
["2000-06-01", "2001-06-01"],
["2001-06-01", "2002-06-01"],
["2002-06-01", "2003-06-01"],
],
dtype="datetime64",
),
dims=["time", "bnds"],
attrs=ds.time_bnds.attrs,
),
}
)
assert result.identical(expected)
expected.time.encoding = {
"source": "None",
"dtype": np.dtype(np.int64),
"original_shape": expected.time.data.shape,
"units": ds.time.attrs["units"],
"calendar": ds.time.attrs["calendar"],
}
expected.time_bnds.encoding = ds.time_bnds.encoding
assert result.time.encoding == expected.time.encoding
assert result.time_bnds.encoding == expected.time_bnds.encoding
def test_decodes_years_with_a_reference_date_on_a_leap_year(self):
ds = self.ds.copy()
ds.time.attrs["units"] = "years since 2000-02-29"
result = decode_non_cf_time(ds)
expected = xr.Dataset(
{
"time": xr.DataArray(
name="time",
data=[
np.datetime64("2001-02-28"),
np.datetime64("2002-02-28"),
np.datetime64("2003-02-28"),
],
dims=["time"],
),
"time_bnds": xr.DataArray(
name="time_bnds",
data=np.array(
[
["2000-02-29", "2001-02-28"],
["2001-02-28", "2002-02-28"],
["2002-02-28", "2003-02-28"],
],
dtype="datetime64",
),
dims=["time", "bnds"],
attrs=ds.time_bnds.attrs,
),
}
)
expected.time.attrs = ds.time.attrs
assert result.identical(expected)
expected.time.encoding = {
"source": "None",
"dtype": np.dtype(np.int64),
"original_shape": expected.time.data.shape,
"units": ds.time.attrs["units"],
"calendar": ds.time.attrs["calendar"],
}
expected.time_bnds.encoding = ds.time_bnds.encoding
assert result.time.encoding == expected.time.encoding
assert result.time_bnds.encoding == expected.time_bnds.encoding
class TestPostProcessDataset:
@pytest.fixture(autouse=True)
def setup(self):
self.ds = generate_dataset(cf_compliant=True, has_bounds=True)
def test_keeps_specified_var(self):
ds = generate_dataset(cf_compliant=True, has_bounds=True)
# Create a modified version of the Dataset with a new var
ds_mod = ds.copy()
ds_mod["tas"] = ds_mod.ts.copy()
result = _postprocess_dataset(ds, data_var="ts")
expected = ds.copy()
assert result.identical(expected)
def test_centers_time(self):
ds = generate_dataset(cf_compliant=True, has_bounds=True)
uncentered_time = np.array(
[
"2000-01-31T12:00:00.000000000",
"2000-02-29T12:00:00.000000000",
"2000-03-31T12:00:00.000000000",
"2000-04-30T00:00:00.000000000",
"2000-05-31T12:00:00.000000000",
"2000-06-30T00:00:00.000000000",
"2000-07-31T12:00:00.000000000",
"2000-08-31T12:00:00.000000000",
"2000-09-30T00:00:00.000000000",
"2000-10-16T12:00:00.000000000",
"2000-11-30T00:00:00.000000000",
"2000-12-31T12:00:00.000000000",
"2001-01-31T12:00:00.000000000",
"2001-02-28T00:00:00.000000000",
"2001-12-31T12:00:00.000000000",
],
dtype="datetime64[ns]",
)
ds.time.data[:] = uncentered_time
ds.time.encoding = {
"source": None,
"dtype": np.dtype(np.int64),
"original_shape": ds.time.data.shape,
"units": "days since 2000-01-01",
"calendar": "standard",
"_FillValue": False,
}
# Compare result of the method against the expected.
result = _postprocess_dataset(ds, center_times=True)
expected = ds.copy()
expected_time_data = np.array(
[
"2000-01-16T12:00:00.000000000",
"2000-02-15T12:00:00.000000000",
"2000-03-16T12:00:00.000000000",
"2000-04-16T00:00:00.000000000",
"2000-05-16T12:00:00.000000000",
"2000-06-16T00:00:00.000000000",
"2000-07-16T12:00:00.000000000",
"2000-08-16T12:00:00.000000000",
"2000-09-16T00:00:00.000000000",
"2000-10-16T12:00:00.000000000",
"2000-11-16T00:00:00.000000000",
"2000-12-16T12:00:00.000000000",
"2001-01-16T12:00:00.000000000",
"2001-02-15T00:00:00.000000000",
"2001-12-16T12:00:00.000000000",
],
dtype="datetime64[ns]",
)
expected = expected.assign_coords(
{
"time": xr.DataArray(
name="time",
data=expected_time_data,
coords={"time": expected_time_data},
dims="time",
attrs={
"long_name": "time",
"standard_name": "time",
"axis": "T",
"bounds": "time_bnds",
},
)
}
)
expected.time.encoding = {
"source": None,
"dtype": np.dtype("int64"),
"original_shape": (15,),
"units": "days since 2000-01-01",
"calendar": "standard",
"_FillValue": False,
}
# Update time bounds with centered time coordinates.
time_bounds = ds.time_bnds.copy()
time_bounds["time"] = expected.time
expected["time_bnds"] = time_bounds
# Compare result of the function against the expected.
assert result.identical(expected)
assert result.time.encoding == expected.time.encoding
def test_raises_error_if_dataset_has_no_time_coords_but_center_times_is_true(self):
ds = generate_dataset(cf_compliant=True, has_bounds=False)
ds = ds.drop_dims("time")
with pytest.raises(ValueError):
_postprocess_dataset(ds, center_times=True)
def test_adds_missing_lat_and_lon_bounds(self):
# Create expected dataset without bounds.
ds = generate_dataset(cf_compliant=True, has_bounds=False)
data_vars = list(ds.data_vars.keys())
assert "lat_bnds" not in data_vars
assert "lon_bnds" not in data_vars
result = _postprocess_dataset(ds, add_bounds=True)
result_data_vars = list(result.data_vars.keys())
assert "lat_bnds" in result_data_vars
assert "lon_bnds" in result_data_vars
def test_orients_longitude_bounds_from_180_to_360_and_sorts_with_prime_meridian_cell(
self,
):
# Chunk the dataset to test method also works with Dask.
ds = xr.Dataset(
coords={
"lon": xr.DataArray(
name="lon",
data=np.array([-180, -1, 0, 1, 179]),
dims=["lon"],
attrs={"units": "degrees_east", "axis": "X", "bounds": "lon_bnds"},
)
},
data_vars={
"lon_bnds": xr.DataArray(
name="lon_bnds",
data=np.array(
[
[-180.5, -1.5],
[-1.5, -0.5],
[-0.5, 0.5],
[0.5, 1.5],
[1.5, 179.5],
]
),
dims=["lon", "bnds"],
attrs={"is_generated": "True"},
),
},
).chunk({"lon": 2})
result = _postprocess_dataset(
ds, data_var=None, center_times=False, add_bounds=True, lon_orient=(0, 360)
)
expected = xr.Dataset(
coords={
"lon": xr.DataArray(
name="lon",
data=np.array([0.0, 1.0, 179.0, 180.0, 359.0, 360.0]),
dims=["lon"],
attrs={"units": "degrees_east", "axis": "X", "bounds": "lon_bnds"},
)
},
data_vars={
"lon_bnds": xr.DataArray(
name="lon_bnds",
data=np.array(
[
[0, 0.5],
[0.5, 1.5],
[1.5, 179.5],
[179.5, 358.5],
[358.5, 359.5],
[359.5, 360],
]
),
dims=["lon", "bnds"],
attrs={"is_generated": "True"},
),
},
)
assert result.identical(expected)
def test_raises_error_if_dataset_has_no_longitude_coords_but_lon_orient_is_specified(
self,
):
ds = generate_dataset(cf_compliant=True, has_bounds=False)
ds = ds.drop_dims("lon")
with pytest.raises(ValueError):
_postprocess_dataset(ds, lon_orient=(0, 360))
class TestKeepSingleVar:
@pytest.fixture(autouse=True)
def setup(self):
self.ds = generate_dataset(cf_compliant=True, has_bounds=True)
self.ds_mod = self.ds.copy()
self.ds_mod["tas"] = self.ds_mod.ts.copy()
def tests_raises_error_if_only_bounds_data_variables_exist(self):
ds = self.ds.copy()
ds = ds.drop_vars("ts")
with pytest.raises(ValueError):
_keep_single_var(ds, key="ts")
def test_raises_error_if_specified_data_var_does_not_exist(self):
ds = self.ds_mod.copy()
with pytest.raises(ValueError):
_keep_single_var(ds, key="nonexistent")
def test_raises_error_if_specified_data_var_is_a_bounds_var(self):
ds = self.ds_mod.copy()
with pytest.raises(ValueError):
_keep_single_var(ds, key="lat_bnds")
def test_returns_dataset_with_specified_data_var(self):
result = _keep_single_var(self.ds_mod, key="ts")
expected = self.ds.copy()
assert result.identical(expected)
assert not result.identical(self.ds_mod)
def test_bounds_always_persist(self):
ds = _keep_single_var(self.ds_mod, key="ts")
assert ds.get("lat_bnds") is not None
assert ds.get("lon_bnds") is not None
assert ds.get("time_bnds") is not None
class TestPreProcessNonCFDataset:
@pytest.fixture(autouse=True)
def setup(self):
self.ds = generate_dataset(cf_compliant=False, has_bounds=True)
def test_user_specified_callable_results_in_subsetting_dataset_on_time_slice(self):
def callable(ds):
return ds.isel(time=slice(0, 1))
ds = self.ds.copy()
result = _preprocess_non_cf_dataset(ds, callable)
expected = ds.copy().isel(time=slice(0, 1))
expected["time"] = xr.DataArray(
name="time",
data=np.array(
["2000-01-01"],
dtype="datetime64",
),
dims=["time"],
)
expected["time_bnds"] = xr.DataArray(
name="time_bnds",
data=np.array(
[["1999-12-01", "2000-01-01"]],
dtype="datetime64",
),
dims=["time", "bnds"],
)
expected.time.attrs = ds.time.attrs
expected.time_bnds.attrs = ds.time_bnds.attrs
assert result.identical(expected)
class TestSplitTimeUnitsAttr:
def test_raises_error_if_units_attr_is_none(self):
with pytest.raises(KeyError):
_split_time_units_attr(None) # type: ignore
def test_splits_units_attr_to_unit_and_reference_date(self):
assert _split_time_units_attr("months since 1800") == ("months", "1800")
assert _split_time_units_attr("months since 1800-01-01") == (
"months",
"1800-01-01",
)
assert _split_time_units_attr("months since 1800-01-01 00:00:00") == (
"months",
"1800-01-01 00:00:00",
)
|
{"hexsha": "4ca343867c3d63321e254619ee276e43efce15fa", "size": 35933, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_dataset.py", "max_stars_repo_name": "jasonb5/xcdat", "max_stars_repo_head_hexsha": "4a35d6a6131fe3fec22593f54a9e48b640ceac4f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_dataset.py", "max_issues_repo_name": "jasonb5/xcdat", "max_issues_repo_head_hexsha": "4a35d6a6131fe3fec22593f54a9e48b640ceac4f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_dataset.py", "max_forks_repo_name": "jasonb5/xcdat", "max_forks_repo_head_hexsha": "4a35d6a6131fe3fec22593f54a9e48b640ceac4f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3523908524, "max_line_length": 89, "alphanum_fraction": 0.5311552055, "include": true, "reason": "import numpy", "num_tokens": 8798}
|
[STATEMENT]
lemma differentiable_on_Pair:
"f differentiable_on S \<Longrightarrow> g differentiable_on S \<Longrightarrow> (\<lambda>x. (f x, g x)) differentiable_on S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>f differentiable_on S; g differentiable_on S\<rbrakk> \<Longrightarrow> (\<lambda>x. (f x, g x)) differentiable_on S
[PROOF STEP]
unfolding differentiable_on_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>x\<in>S. f differentiable at x within S; \<forall>x\<in>S. g differentiable at x within S\<rbrakk> \<Longrightarrow> \<forall>x\<in>S. (\<lambda>x. (f x, g x)) differentiable at x within S
[PROOF STEP]
using differentiable_Pair[of f _ S g]
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>f differentiable at ?x within S; g differentiable at ?x within S\<rbrakk> \<Longrightarrow> (\<lambda>x. (f x, g x)) differentiable at ?x within S
goal (1 subgoal):
1. \<lbrakk>\<forall>x\<in>S. f differentiable at x within S; \<forall>x\<in>S. g differentiable at x within S\<rbrakk> \<Longrightarrow> \<forall>x\<in>S. (\<lambda>x. (f x, g x)) differentiable at x within S
[PROOF STEP]
by auto
|
{"llama_tokens": 416, "file": "Smooth_Manifolds_Analysis_More", "length": 3}
|
"""
Created on Okt 01 16:11 2019
@author: nishit
"""
from pyomo.environ import *
from pyomo.opt import SolverStatus, TerminationCondition
import pyutilib.subprocess.GlobalData
pyutilib.subprocess.GlobalData.DEFINE_SIGNAL_HANDLERS_DEFAULT = False
class OptUt:
def thread_solver(self, single_ev, data_dict, ini_ess_soc, ini_vac_soc, solver_name, timestep, absolute_path):
v = str(timestep)+"_"+str(ini_ess_soc)+"_"+str(ini_vac_soc)
result = None
instance = None
while True:
try:
#if True:#redisDB.get_lock("opt_lock", v):
optsolver = SolverFactory(solver_name)
spec = importlib.util.spec_from_file_location(absolute_path, absolute_path)
module = spec.loader.load_module(spec.name)
my_class = getattr(module, 'Model')
instance = my_class.model.create_instance(data_dict)
result = optsolver.solve(instance)
#self.logger.debug("result "+str(result))
except Exception as e:
print("Thread: "+v+ " "+str(e))
finally:
pass
#redisDB.release_lock("opt_lock", v)
#ini_ess_soc = instance_object["ess_soc"] # instance_info[instance].ini_ess_soc
#ini_vac_soc = instance_object["vac_soc"] # instance_info[instance].ini_vac_soc
if single_ev:
position = False#instance_object["position"] # instance_info[instance].position
# self.logger.debug("solver status "+str(result.solver.status))
# self.logger.debug("termination condition " + str(result.solver.termination_condition))
if result is None:
print("result is none for "+str(v)+ " repeat")
elif (result.solver.status == SolverStatus.ok) and (
result.solver.termination_condition == TerminationCondition.optimal):
instance.solutions.load_from(result)
# * if solved get the values in dict
my_dict = {}
for v in instance.component_objects(Var, active=True):
# self.logger.debug("Variable in the optimization: " + str(v))
varobject = getattr(instance, str(v))
var_list = []
try:
# Try and add to the dictionary by key ref
for index in varobject:
var_list.append(varobject[index].value)
# self.logger.debug("Identified variables " + str(var_list))
my_dict[str(v)] = var_list
except Exception as e:
print("error reading result " + str(e))
if single_ev:
combined_key = (timestep, ini_ess_soc, ini_vac_soc, position)
else:
combined_key = (timestep, ini_ess_soc, ini_vac_soc)
Decision = {combined_key:{}}
Decision[combined_key]['Grid'] = my_dict["P_GRID_OUTPUT"][0]
Decision[combined_key]['PV'] = my_dict["P_PV_OUTPUT"][0]
Decision[combined_key]['ESS'] = my_dict["P_ESS_OUTPUT"][0]
Decision[combined_key]['VAC'] = my_dict["P_VAC_OUTPUT"][0]
Value = {combined_key:{}}
Value[combined_key] = my_dict["P_PV_OUTPUT"][0]
#self.logger.debug("Value "+str(Value))
return (Decision, Value)
elif result.solver.termination_condition == TerminationCondition.infeasible:
# do something about it? or exit?
print("Termination condition is infeasible "+v + " repeat")
else:
print("Nothing fits "+v + " repeat")
|
{"hexsha": "9c52a139c17cfa76cc2296b2c1265a8f95528df7", "size": 3823, "ext": "py", "lang": "Python", "max_stars_repo_path": "optimization/optut.py", "max_stars_repo_name": "storage4grid/PROFESS-PROFEV", "max_stars_repo_head_hexsha": "adf4e26488225206c249938c9eecc394a06f9677", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "optimization/optut.py", "max_issues_repo_name": "storage4grid/PROFESS-PROFEV", "max_issues_repo_head_hexsha": "adf4e26488225206c249938c9eecc394a06f9677", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "optimization/optut.py", "max_forks_repo_name": "storage4grid/PROFESS-PROFEV", "max_forks_repo_head_hexsha": "adf4e26488225206c249938c9eecc394a06f9677", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.9550561798, "max_line_length": 114, "alphanum_fraction": 0.5652628826, "include": true, "reason": "from pyomo", "num_tokens": 794}
|
import numpy as np
import tensorflow as tf
from random import choice,shuffle
#g_ps = []
#s_ps = []
#for g in glob.glob('../data/training_set/ffp10_p/*.npy'):
# g_ps.append(np.load(g))
#g_ps = np.array(g_ps)
#print 'g_ps.shape:',g_ps.shape
#for s in glob.glob('../data/training_set/string_p/*.npy'):
# s_ps.append(np.load(s))
#s_ps = np.array(s_ps)
#print 's_ps.shape:' , s_ps.shape
#def dp(n):
# l = 200
# x = []
# y = []
# for i in range(n):
# gm = np.random.uniform(10,1000)
# gm = gm* 1e-7
#
# r1 = np.random.randint(0,len(g_ps))
# ri,rj = np.random.randint(0,2048-l,2)
# r2 = np.random.randint(0,len(s_ps))
# ri,rj = np.random.randint(0,2048-l,2)
#
# gp = g_ps[r1][ri:ri+l,rj:rj+l]
# sp = s_ps[r2][ri:ri+l,rj:rj+l]
# x.append(gp+gm*sp)
# y.append(-np.log(gm))
# return np.expand_dims(np.array(x) , -1) , np.expand_dims(np.array(y) , -1)
def get_slice(data,nx,ny):
"""Slice matrix in x and y direction"""
lx,ly = data.shape
if nx==0 or nx==lx:
slx = slice(0, lx)
else:
idx = np.random.randint(0, lx - nx)
slx = slice(idx, (idx+nx))
if ny==0 or ny==ly:
sly = slice(0, ly)
else:
idy = np.random.randint(0, ly - ny)
sly = slice(idy, (idy+ny))
return slx, sly
class DataProvider(object):
def __init__(self,n_files,s_files,gmus,
nx=0,ny=0,n_buffer=10,
reload_rate=100,filt=None):
self.n_files = n_files
self.s_files = s_files
nmin = min(len(n_files),len(s_files))
if n_buffer>= nmin:
n_buffer = nmin
self.reload_rate = 0
else:
self.reload_rate = reload_rate
self.nx,self.ny = nx,ny
self.n_buffer = n_buffer
self.gmus = gmus
if filt is None:
def filt(x):
return x
self.filt = filt
self.counter = 0
self.reload()
def reload(self):
print('Data provider is reloading...')
self.n_set = []
self.s_set = []
# self.d_set = []
ninds = np.arange(len(self.n_files))
sinds = np.arange(len(self.s_files))
shuffle(ninds)
shuffle(sinds)
for i in range(self.n_buffer):
filen = self.n_files[ninds[i]]
files = self.s_files[sinds[i]]
self.n_set.append(np.load(filen))
signal = np.load(files)
self.s_set.append(signal)
# if self.filt:
# self.d_set.append(self.filt(signal))
# else:
# self.d_set.append(signal)
#
def get_data(self):
self.counter += 1
if self.reload_rate:
if self.counter%self.reload_rate==0:
self.reload()
n = choice(self.n_set)
sind = choice(np.arange(self.n_buffer))
s = self.s_set[sind]
# d = self.d_set[sind]
return n,s#,d
def pre_process(self, n, s, gmu):
nslice = get_slice(n,self.nx,self.ny)
n = n[nslice]
sslice = get_slice(s,self.nx,self.ny)
s = s[sslice]
sn = n + gmu*s
sn = self.filt(sn)
# d = d[sslice]
sn = np.expand_dims(sn,-1)
# d = np.expand_dims(d,-1)
return sn#,d
def __call__(self, n, gmus=None):
if gmus is None:
gmus = self.gmus
n_class = len(gmus)
# x,y = self.get_data()
X = []
Y = []
for i in range(n):
n,s = self.get_data()
inds = np.arange(n_class)
shuffle(inds)
gmu = gmus[inds[0]]
# gmu = choice(gmus)
sn = self.pre_process(n,s,gmu)
# rand = np.random.randint(0,2)
# sn = sn-sn+rand
X.append(sn)
# Y.append(-np.log10(gmu+1e-30))
lbl = n_class*[0]
lbl[inds[0]] = 1
Y.append(lbl)
X = np.array(X)
Y = np.array(Y)
# def __call__(self, n, gmus=None):
# rp = np.random.uniform(0,1)
#
# if gmus is None:
# gmus = self.gmus
## x,y = self.get_data()
# X = []
# Y = []
# for i in range(n):
# n,s = self.get_data()
# gmu = choice(gmus)
# sn = self.pre_process(n,s,gmu)
# rand = int(np.random.uniform(0,1)>rp)
# sn = sn-sn+rand
# X.append(sn)
## Y.append(-np.log10(gmu+1e-30))
# lbl = [0,0]
# lbl[rand] = 1
# Y.append(lbl)
#
# X = np.array(X)
# Y = np.array(Y)
return X,Y
#fig,ax=plt.subplots(1,1,figsize=(5,5))
#ax.imshow(x[0,:,:,0],norm=LogNorm(),cmap=plt.get_cmap('jet'))
#plt.title('G + Gu*S')
#plt.savefig('x_lognorm ')
#fig,ax=plt.subplots(1,1,figsize=(5,5))
#ax.imshow(x[0,:,:,0])
#plt.title('G + Gu*S')
#plt.savefig('x')
#print(x.shape,y.shape)
#exit()
#l = 200
#nx,ny,n_channel = l,l,1
def arch_t(x_in):
print("\033[91m ============== Begin ============== \033[0m")
x1 = tf.layers.conv2d(x_in,filters=36,kernel_size=3,
strides=(1, 1),padding='same',activation=tf.nn.relu)
print(x1)
x2 = tf.layers.average_pooling2d(x1,pool_size=2,strides=2)
print(x2)
x2 = tf.layers.conv2d(x2,filters=36,kernel_size=3,
strides=(2, 2),padding='same',activation=tf.nn.relu)
print(x2)
x3 = tf.layers.average_pooling2d(x2,pool_size=2,strides=2)
print(x3)
x3 = tf.layers.conv2d(x3,filters=36,kernel_size=3,
strides=(2, 2),padding='same',activation=tf.nn.relu)
print(x3)
x4 = tf.layers.average_pooling2d(x3,pool_size=3,strides=2)
print(x4)
x4 = tf.layers.conv2d(x4,filters=36,kernel_size=3,strides=(2, 2),padding='same',
activation=tf.nn.relu)
print(x4)
#x5 = tf.layers.average_pooling2d(x4,pool_size=2,strides=2)
x5 = tf.layers.conv2d(x4,filters=36,kernel_size=3,strides=(2, 2),padding='same',
activation=tf.nn.relu)
print(x5)
#x5 = tf.layers.average_pooling2d(x5,pool_size=2,strides=2)
#x5 = tf.layers.conv2d(x5,filters=36,kernel_size=3,strides=(2, 2),padding='same',
#activation=tf.nn.relu)
#x5 = tf.layers.average_pooling2d(x5,pool_size=2,strides=2)
x7 = tf.contrib.layers.flatten(x5)
x7 = tf.nn.dropout( x7, keep_prob=0.6)
print(x7)
x7 = tf.layers.dense(x7, 10 , activation=tf.nn.relu)
print(x7)
y_out = tf.layers.dense(x7, 1 , activation=tf.nn.relu)
print(y_out)
print("\033[91m =============== END =============== \033[0m")
return y_out
def arch_maker(x,n_conv,n_class):
#x_in = tf.placeholder(tf.float32,[None,nx,ny,n_channel])
#y_true = tf.placeholder(tf.float32,[None , n_channel])
#learning_rate = tf.placeholder(tf.float32)
print("\033[91m ============== Begin ============== \033[0m")
# for _ in range(n_conv):
# x = tf.layers.conv2d(x,filters=16,kernel_size=5,
# strides=(1, 1),padding='same')
# print(x)
# x = tf.layers.batch_normalization(x)
# print(x)
# x = tf.nn.relu(x)
# print(x)
for _ in range(n_conv):
x = tf.layers.conv2d(x,filters=4,kernel_size=3,
strides=(1, 1),padding='same')
print(x)
x = tf.layers.batch_normalization(x)
print(x)
x = tf.nn.relu(x)
print(x)
x = tf.layers.average_pooling2d(x,pool_size=2,strides=2)
print(x)
x = tf.contrib.layers.flatten(x)
print(x)
x = tf.nn.dropout( x, keep_prob=0.6)
print(x)
x = tf.layers.dense(x, 20 , activation=tf.nn.relu)
print(x)
#x = tf.layers.dense(x, 20 , activation=tf.nn.softmax)
#print(x)
y = tf.layers.dense(x, n_class, activation=tf.nn.softmax)
print(y)
print("\033[91m =============== END =============== \033[0m")
return y
|
{"hexsha": "fef652ebd8c37afc107a4fb2409e84884e7cbea8", "size": 8109, "ext": "py", "lang": "Python", "max_stars_repo_path": "cosmic_string/deep_measure/utils.py", "max_stars_repo_name": "vafaei-ar/DeePlanck", "max_stars_repo_head_hexsha": "9e9aab4dc069ed5810a6316cdcc55a1f4a58938e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cosmic_string/deep_measure/utils.py", "max_issues_repo_name": "vafaei-ar/DeePlanck", "max_issues_repo_head_hexsha": "9e9aab4dc069ed5810a6316cdcc55a1f4a58938e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cosmic_string/deep_measure/utils.py", "max_forks_repo_name": "vafaei-ar/DeePlanck", "max_forks_repo_head_hexsha": "9e9aab4dc069ed5810a6316cdcc55a1f4a58938e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1690647482, "max_line_length": 85, "alphanum_fraction": 0.5191762239, "include": true, "reason": "import numpy", "num_tokens": 2392}
|
# -*- coding: utf-8 -*-
r"""
Morphic words
This modules implements morphic words (letter-to-letter coding of fixed
point of a morphism).
AUTHORS:
- Jana Lepsova (January 2021): initial version
EXAMPLES:
Creation of the fixed point of a morphism::
sage: m = WordMorphism('a->abc,b->baba,c->ca')
sage: w = m.fixed_point('a')
sage: w
word: abcbabacababaabcbabaabccaabcbabaabcbabaa...
sage: w.length()
+Infinity
Computing the n-th letter of a fixed point is fast as it is using the
abstract numeration system associated to the morphism and the starting
letter, see chapter 3 of the book [BR2010b]_::
sage: w[10000000]
'b'
"""
from sage.combinat.words.word_infinite_datatypes import WordDatatype_callable
from sage.rings.all import Infinity
from sage.modules.free_module_element import vector
class WordDatatype_morphic(WordDatatype_callable):
r"""
Datatype for a morphic word defined by a morphism, a starting letter
and a coding.
"""
def __init__(self, parent, morphism, letter, coding=None, length=Infinity):
r"""
INPUT:
- ``parent`` - a parent
- ``morphism`` - a word morphism
- ``letter`` - a starting letter
- ``coding`` - dict (default: ``None``), if ``None``
the identity map is used for the coding
- ``length`` - integer or ``'finite'`` or ``Infinity`` or
``'unknown'`` (default: ``Infinity``) the length of the word
EXAMPLES::
sage: m = WordMorphism('a->ab,b->a')
sage: w = m.fixed_point('a')
sage: w
word: abaababaabaababaababaabaababaabaababaaba...
sage: w[555:1000]
word: abaababaabaababaababaabaababaabaababaaba...
sage: w.length()
+Infinity
::
sage: m = WordMorphism('a->abc,b->baba,c->ca')
sage: m.fixed_point('a')
word: abcbabacababaabcbabaabccaabcbabaabcbabaa...
sage: w = m.fixed_point('a')
sage: w[7]
'c'
sage: w[2:7]
word: cbaba
sage: w[500:503]
word: caa
When the morphic word is finite::
sage: m = WordMorphism("a->ab,b->")
sage: w = m.fixed_point("a")
sage: w
word: ab
sage: w[0]
'a'
sage: w.length()
2
Using the coding argument::
sage: m = WordMorphism('a->ab,b->a')
sage: W = m.domain()
sage: from sage.combinat.words.morphic import WordDatatype_morphic
sage: coding = {'a':'x', 'b':'y'}
sage: w = WordDatatype_morphic(W, m, 'a', coding=coding)
sage: [w[i] for i in range(10)]
['x', 'y', 'x', 'x', 'y', 'x', 'y', 'x', 'x', 'y']
TESTS::
sage: m = WordMorphism('a->abcd,b->bbc,c->cddd,d->cba')
sage: w = m.fixed_point('a')
sage: it = iter(w)
sage: for _ in range(10000): _ = next(it)
sage: L = [next(it) for _ in range(10)]; L
['d', 'd', 'd', 'c', 'd', 'd', 'd', 'c', 'b', 'a']
sage: w[10000:10010]
word: dddcdddcba
sage: list(w[10000:10010]) == L
True
"""
self._parent = parent
# self._func = callable
# for hashing
self._hash = None
if length is Infinity:
self._len = Infinity
elif length is None or length == 'unknown' or length == 'finite':
self._len = None
else:
self._len = length
self._morphism = morphism
self._letter = letter
self._alphabet = self._morphism.domain().alphabet()
if coding is None:
self._coding = {a: a for a in self._alphabet}
else:
self._coding = coding
def __reduce__(self):
r"""
EXAMPLES::
sage: m = WordMorphism('a->ab,b->a')
sage: w = m.fixed_point('a')
sage: w.__reduce__()
(<class 'sage.combinat.words.word.InfiniteWord_morphic'>,
(Infinite words over {'a', 'b'},
WordMorphism: a->ab, b->a,
'a',
{'a': 'a', 'b': 'b'},
+Infinity))
Below is the behavior for words of finite length::
sage: m = WordMorphism("a->ab,b->")
sage: w = m.fixed_point("a")
sage: w.__reduce__()
(<class 'sage.combinat.words.word.FiniteWord_morphic'>,
(Finite words over {'a', 'b'},
WordMorphism: a->ab, b->,
'a',
{'a': 'a', 'b': 'b'},
2))
"""
return self.__class__, (self._parent, self._morphism, self._letter,
self._coding, self._len)
def representation(self, n):
r"""
Return the representation of the integer n in the numeration system
associated to the morphism.
INPUT:
- ``n`` -- nonnegative integer
OUTPUT:
list
EXAMPLES::
sage: m = WordMorphism('a->ab,b->a')
sage: w = m.fixed_point('a')
sage: w.representation(5)
[1, 0, 0, 0]
When the morphic word is finite::
sage: m = WordMorphism("a->ab,b->,c->cdab,d->dcab")
sage: w = m.fixed_point("a")
sage: w.representation(0)
[]
sage: w.representation(1)
[1]
sage: w.representation(2)
Traceback (most recent call last):
...
IndexError: Index (=2) out of range, the fixed point is finite and has length 2.
TESTS:
Accessing this method from an instance of the current class (no using
the inherited word classes)::
sage: m = WordMorphism('a->ab,b->a')
sage: W = m.domain()
sage: from sage.combinat.words.morphic import WordDatatype_morphic
sage: w = WordDatatype_morphic(W, m, 'a')
sage: type(w)
<class 'sage.combinat.words.morphic.WordDatatype_morphic'>
sage: w.representation(5)
[1, 0, 0, 0]
"""
letters_to_int = {a:i for (i,a) in enumerate(self._alphabet)}
position = letters_to_int[self._letter]
M = self._morphism.incidence_matrix()
vMk = vector([1]*len(self._alphabet))
length_of_images = []
while vMk[position] <= n:
length_of_images.append(vMk)
vMk_next = vMk*M
if vMk[position] == vMk_next[position]:
raise IndexError('Index (={}) out of range, the fixed point is finite and has length {}.'.format(n,vMk[position]))
vMk = vMk_next
k = len(length_of_images)
letter_k = self._letter
n_k = n
path = []
while k > 0:
m_letter_k = self._morphism(letter_k)
S = 0
j = 0
while S <= n_k:
a = m_letter_k[j]
i = letters_to_int[a]
pile_length = length_of_images[k-1][i]
S += pile_length
j += 1
path.append(j-1)
n_k -= S - pile_length
letter_k = a
k -= 1
return path
def _func(self, key):
"""
Return a letter of a fixed point of a morphism on position ``key``.
INPUT:
- ``self`` - a fixed point of a morphism
- ``key`` - an integer, the position
OUTPUT:
- a letter
EXAMPLES::
sage: m = WordMorphism("a->ab,b->a")
sage: w = m.fixed_point("a")
sage: w[0]
'a'
sage: w[5]
'a'
sage: w[10000]
'a'
TESTS:
Accessing this method from an instance of the current class
(without using the inherited word classes)::
sage: m = WordMorphism('a->ab,b->a')
sage: W = m.domain()
sage: from sage.combinat.words.morphic import WordDatatype_morphic
sage: w = WordDatatype_morphic(W, m, 'a')
sage: w._func(5)
'a'
"""
letter = self._letter
for a in self.representation(key):
letter = (self._morphism(letter))[a]
if key == 0:
return self._coding[letter]
return self._coding[letter]
def __iter__(self):
r"""
Return an iterator of the letters of the fixed point of ``self``
starting with ``letter``.
If w is the iterated word, then this iterator: outputs the elements
of morphism[ w[i] ], appends morphism[ w[i+1] ] to w, increments i.
INPUT:
- ``self`` - an endomorphism, must be prolongable on
letter
- ``letter`` - a letter in the domain of ``self``
OUTPUT:
- iterator of the fixed point
EXAMPLES::
sage: m = WordMorphism("a->ab,b->a")
sage: w = m.fixed_point("a")
sage: it = iter(w)
sage: [next(it) for _ in range(10)]
['a', 'b', 'a', 'a', 'b', 'a', 'b', 'a', 'a', 'b']
Works with erasing morphisms::
sage: m = WordMorphism('a->abc,b->,c->')
sage: w = m.fixed_point("a")
sage: list(w)
['a', 'b', 'c']
The morphism must be prolongable on the letter or the iterator will
be empty::
sage: list(m.fixed_point("b"))
Traceback (most recent call last):
...
TypeError: self must be prolongable on b
The morphism must be an endomorphism::
sage: m = WordMorphism('a->ac,b->aac')
sage: w = m.fixed_point('a')
Traceback (most recent call last):
...
TypeError: self (=a->ac, b->aac) is not self-composable
We check that :trac:`8595` is fixed::
sage: s = WordMorphism({('a', 1):[('a', 1), ('a', 2)], ('a', 2):[('a', 1)]})
sage: w = s.fixed_point(('a', 1))
sage: it = iter(w)
sage: next(it)
('a', 1)
This shows that ticket :trac:`13668` has been resolved::
sage: s = WordMorphism({1:[1,2],2:[2,3],3:[4],4:[5],5:[6],6:[7],7:[8],8:[9],9:[10],10:[1]})
sage: (s^7).fixed_points()
[word: 1223234234523456234567234567823456789234...,
word: 2,3,4,5,6,7,8,9,10,1,1,2,1,2,2,3,1,2,2,3,2,3,4,1,2,2,3,2,3,4,2,3,4,5,1,2,2,3,2,3,...]
sage: (s^7).reversal().fixed_points()
[]
"""
from itertools import chain
w = iter(self._morphism.image(self._letter))
while True:
try:
for a in self._morphism.image(next(w)):
yield self._coding[a]
else:
next_w = next(w)
w = chain([next_w], w, self._morphism.image(next_w))
except StopIteration:
return
|
{"hexsha": "3fbbe48f26bf8c9adc8451e738b02e8fdd523a1c", "size": 11077, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/sage/combinat/words/morphic.py", "max_stars_repo_name": "sheerluck/sage", "max_stars_repo_head_hexsha": "b5e572b7d231f70c139d9978d68add80c4ef353d", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1742, "max_stars_repo_stars_event_min_datetime": "2015-01-04T07:06:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T11:32:52.000Z", "max_issues_repo_path": "src/sage/combinat/words/morphic.py", "max_issues_repo_name": "sheerluck/sage", "max_issues_repo_head_hexsha": "b5e572b7d231f70c139d9978d68add80c4ef353d", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 66, "max_issues_repo_issues_event_min_datetime": "2015-03-19T19:17:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-16T11:59:30.000Z", "max_forks_repo_path": "src/sage/combinat/words/morphic.py", "max_forks_repo_name": "sheerluck/sage", "max_forks_repo_head_hexsha": "b5e572b7d231f70c139d9978d68add80c4ef353d", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 495, "max_forks_repo_forks_event_min_datetime": "2015-01-10T10:23:18.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T22:06:11.000Z", "avg_line_length": 30.4313186813, "max_line_length": 130, "alphanum_fraction": 0.5023923445, "include": true, "reason": "from sage", "num_tokens": 2994}
|
# system utilities
from __future__ import print_function
import os, datetime, argparse
# pytorch utilities
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
# computing utilities
import numpy as np
import math
# custom utilities
from BayesNets import BayesNet, BayesConvNet, BayesVGGLike, BayesConvNet_, BayesVGGLike_
from tensorboardX import SummaryWriter
def main(args):
## Devices (CPU, single GPU or multiple GPU)
# whether to use GPU (or CPU)
use_cuda = args.use_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
print("Device: ", device)
# whether to use multi-GPU (or single-GPU)
multi_gpu = use_cuda and args.multi_gpu and torch.cuda.device_count() > 1
num_gpus = (torch.cuda.device_count() if multi_gpu else 1) if use_cuda else 0
print("# of GPUs: ", num_gpus)
# number of worker for dataloaders
num_workers = min(num_gpus, 1) * 4
# fix the random seed to reproductivity (if --use-seed)
if not args.use_seed:
torch.manual_seed(args.random_seed)
np.random.seed(args.random_seed)
if use_cuda:
torch.cuda.manual_seed_all(args.random_seed)
## Paths (Dataset, Checkpoints, Statistics and TensorboardX)
# path to the folder of all datasets
data_path = args.data_path
if not os.path.exists(data_path):
os.makedirs(data_path)
# path to the folder of specified dataset
dataset = args.dataset
assert dataset in ["MNIST", "FMNIST",
"KMNIST", "CIFAR10", "CIFAR100"], \
"The specified dataset is not supported."
print("Dataset: ", dataset)
if args.dataset_path == "default":
dataset_path = dataset
else: # if args.dataset_path != "default":
dataset_path = args.dataset_path
data_path = os.path.join(data_path, dataset_path)
if not os.path.exists(data_path):
os.makedirs(data_path)
# path to the folder of all outputs (for the dataset)
outputs_path = args.outputs_path
if not os.path.exists(outputs_path):
os.makedirs(outputs_path)
outputs_path = os.path.join(outputs_path, dataset_path)
if not os.path.exists(outputs_path):
os.makedirs(outputs_path)
# create the name of the current network architecture
if args.network_name == "default":
model_param = str(args.lamb)
network_name = args.model_type + '_' + model_param
else: # if args.model_name != "default":
network_name = args.network_name
outputs_path = os.path.join(outputs_path, network_name)
if not os.path.exists(outputs_path):
os.makedirs(outputs_path)
# create the name (and time stamp) of the current model
if args.model_name == "default":
model_name = "S" + str(args.random_seed)
else: # if args.model_name != "default":
model_name = args.model_name
if args.model_stamp == "default":
model_stamp = datetime.datetime.now().strftime("%m%d")
else: # if args.model_stamp != "default":
model_stamp = args.model_stamp
model_name += '_' + model_stamp
outputs_path = os.path.join(outputs_path, model_name)
if not os.path.exists(outputs_path):
os.makedirs(outputs_path)
# path to the folder of checkpoints
model_path = os.path.join(outputs_path, args.model_path)
if not os.path.exists(model_path):
os.makedirs(model_path)
# path to the folder/file of the evaluation statistics
stats_path = os.path.join(outputs_path, args.stats_path)
if not os.path.exists(stats_path):
os.makedirs(stats_path)
stats_file = os.path.join(stats_path, args.stats_file)
# path to the folder of the tensorboardX file
tensorboard_path = os.path.join(outputs_path, args.tensorboard_path)
if not os.path.exists(tensorboard_path):
os.makedirs(tensorboard_path)
tensorboard_writer = SummaryWriter(tensorboard_path)
## Data formats and Dataloaders
# data format: batch_size(0) x channels(1) x height(2) x width(3)
Dataset = {"MNIST": datasets.MNIST,
"KMNIST": datasets.KMNIST,
"FMNIST": datasets.FashionMNIST,
"CIFAR10": datasets.CIFAR10,
"CIFAR100": datasets.CIFAR100}[dataset]
output_classes = 100 if dataset == "CIFAR100" else 10
# batch size and the log intervals (0)
batch_size = args.batch_size
log_samples = args.log_samples
assert log_samples % batch_size == 0, \
"The argument log_samples should be a multiple of batch_size."
# number of channels(1), image_height (2) and image_width (3)
if args.default_parameters:
if dataset in ["MNIST", "KMNIST", "FMNIST"]:
image_height, image_width, image_channels = 28, 28, 1
image_padding, image_flipping = 2, 0.0
elif dataset in ["CIFAR10", "CIFAR100"]:
image_height, image_width, image_channels = 32, 32, 3
image_padding, image_flipping = 4, 0.5
else:
image_height = args.image_height
image_width = args.image_width
image_channels = args.image_channels
image_padding = args.image_padding
image_flipping = args.image_flipping
image_mean = tuple([0.5] * image_channels)
image_var = tuple([0.5] * image_channels)
# preprocessing/transformation of the input images
train_transform = transforms.Compose(
[transforms.Resize((image_height, image_width)),
transforms.RandomCrop((image_height, image_width), image_padding),
transforms.RandomHorizontalFlip(image_flipping),
transforms.ToTensor(),
transforms.Normalize(image_mean, image_var)])
test_transform = transforms.Compose(
[transforms.Resize((image_height, image_width)),
transforms.ToTensor(),
transforms.Normalize(image_mean, image_var)])
# dataloaders for training and test datasets
train_loader = torch.utils.data.DataLoader(Dataset(data_path,
train = True, download = True, transform = train_transform),
batch_size = batch_size, shuffle = True, num_workers = num_workers)
train_samples = len(train_loader.dataset)
print("# of training samples: ", train_samples)
test_loader = torch.utils.data.DataLoader(Dataset(data_path,
train = False, download = True, transform = test_transform),
batch_size = batch_size, shuffle = False, num_workers = num_workers)
test_samples = len(test_loader.dataset)
print("# of test samples: ", test_samples)
## Models (Multi-layer Perceptron or Convolutional Neural Networks)
# model architecture
model_type = args.model_type
assert model_type in ["MLP", "CNN", "CNN_", "VGGLike", "VGGLike_"], \
"The type of model archiecture is not supported."
# quantization levels
quantization = args.quantization
assert quantization in ["binary", "ternary", "quanternary"], \
"The type of weights quantization is not supported."
nat_per_param = {"binary": math.log(2), "ternary": math.log(3),
"quanternary": math.log(4)}[quantization]
use_bias = args.use_bias
print("Model: ", model_type,
"(Quantization: %s, Bias: %s)" % (quantization, use_bias))
# multi-layer perceptron
if model_type == "MLP":
model = BayesNet(image_height = image_height, image_width = image_width,
image_channels = image_channels, output_classes = output_classes,
use_bias = use_bias, quantization = quantization)
# convolutional networks
else:
pooling = args.pooling
print("Pooling: ", pooling)
Model = {"CNN": BayesConvNet, "VGGLike": BayesVGGLike,
"CNN_": BayesConvNet_, "VGGLike_": BayesVGGLike_}[model_type]
assert ((model_type in ["CNN", "VGGLike"]) and (pooling in ["avg", "max"])) or \
((model_type in ["CNN_", "VGGLike_"]) and (pooling in ["avg_", "prob_"])), \
"The type of pooling layer is not supported."
model = Model(image_height = image_height, image_width = image_width,
image_channels = image_channels, output_classes = output_classes,
use_bias = use_bias, quantization = quantization, pooling = pooling)
# number of parameters in the neural network
num_params_full, num_params_conv = model.num_params()
num_params = num_params_conv + num_params_full
print("# of parameters: ", num_params)
# move the model to the device (CPU, single-GPU, multi-GPU)
model.to(device)
if multi_gpu: model = nn.DataParallel(model)
## Main script for learning and evaluation
epoch_num = args.epoch_num
save_epoch = args.save_epoch
# analytic inference (probability propagation)
test_acc_ppg_ = np.zeros(epoch_num, dtype = np.float)
test_nll_ppg_ = np.zeros(epoch_num, dtype = np.float)
# maximum a posterior (point propagation)
test_acc_map_ = np.zeros(epoch_num, dtype = np.float)
test_nll_map_ = np.zeros(epoch_num, dtype = np.float)
# monte-carlo sampling (point propagation)
mc_samples = args.mc_samples # number of samples
test_acc_mcs_ = np.zeros(epoch_num, dtype = np.float)
test_nll_mcs_ = np.zeros(epoch_num, dtype = np.float)
# initial learning rate
learning_rate = args.learning_rate
# recover the model to resume training (if required)
if args.start_begin:
model_file = None
start_epoch, total_samples = 0, 0
min_epoch, min_test_nll = 0, float("inf")
else: # if args.start_exist:
model_file = os.path.join(model_path, 'training_last.pt'
if args.start_last else "training_%d.pt" % args.start_epoch)
print(model_file)
assert os.path.exists(model_file), \
"The specified model is not found in the folder."
if model_file is not None:
checkpoint = torch.load(model_file)
# model parameters
model.load_state_dict(checkpoint["model_state_dict"])
# training progress
start_epoch = checkpoint["epoch"]
total_samples = checkpoint["total_samples"]
# best model and its negative likelihood
min_epoch = checkpoint["min_epoch"]
min_test_nll = checkpoint["min_test_nll"]
# learning rate
learning_rate = checkpoint["learning_rate"]
# optimizer and corresponding scheduler
optimizer = torch.optim.Adam(model.parameters(), lr = learning_rate)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size = args.decay_epoch, gamma = args.decay_rate)
#
for epoch in range(start_epoch, epoch_num):
learning_rate = optimizer.param_groups[0]['lr']
tensorboard_writer.add_scalar('lr', learning_rate, epoch + 1)
print("Epoch %d, Learning rate: %2f" % (epoch + 1, learning_rate))
## Phase 1: Learning on training set
model.train()
samples, train_acc, train_nll = 0, 0, 0.
# initialize the statistics
LOSS, NLL, ACC = 0., 0., 0
for (data, target) in train_loader:
data, target = data.to(device), target.to(device)
total_samples += batch_size
samples += batch_size
# predict the outputs with probabilistic propagation
prob, loss_nll = model(data, target, mode = "analytic")
pred = prob.max(1, keepdim = True)[1]
loss_nll = torch.mean(loss_nll) # for multi-gpu
correct = pred.eq(target.view_as(pred)).sum().item()
# compute the regularizer based on the (joint) entropy
entropy_full, entropy_conv = model.sum_entropy(device) if \
not multi_gpu else model.module.sum_entropy(device)
entropy = entropy_full + entropy_conv
loss_reg = - (args.warm_lamb if epoch < args.warm_epoch
else args.lamb) * entropy * batch_size / train_samples
loss = loss_nll + loss_reg
optimizer.zero_grad()
loss.backward()
optimizer.step()
# accumulate the statistics
LOSS += loss.item()
NLL += loss_nll.item()
ACC += correct
if samples % args.log_samples == 0:
entropy_full_norm = entropy_full.item() / (nat_per_param * num_params_full + 1e-6)
entropy_conv_norm = entropy_conv.item() / (nat_per_param * num_params_conv + 1e-6)
tensorboard_writer.add_scalar('entropy_full', entropy_full_norm, total_samples)
tensorboard_writer.add_scalar('entropy_conv', entropy_conv_norm, total_samples)
print("Epoch: {} [{}/{} ({:.1f}%)], Total loss: {:.4f}, Avg. NLL: {:.4f}, Acc: {}/{}, \
Conv. ent. (x 1e-3): {:.4f}, Full. ent. (x 1e-3): {:.4f}".format(
epoch + 1, samples, train_samples, 100. * samples / train_samples, # progress information
LOSS / args.log_samples, NLL / args.log_samples, ACC, args.log_samples,
1e3 * entropy_conv_norm, 1e3 * entropy_full_norm))
train_nll += NLL
train_acc += ACC
tensorboard_writer.add_scalar('train_loss', LOSS / args.log_samples, total_samples)
tensorboard_writer.add_scalar('train_nll', NLL / args.log_samples, total_samples)
tensorboard_writer.add_scalar('train_acc', ACC / args.log_samples, total_samples)
# re-initialize the statistics
LOSS, NLL, ACC = 0., 0., 0
train_nll /= train_samples
print("Epoch: {} (Training), Avg. NLL: {:.4f}, Acc.: {}/{} ({:.2f}%)".format(
epoch + 1, train_nll, train_acc, train_samples, 100. * train_acc / train_samples))
train_acc = 100. * train_acc / train_samples
# break the training process if numerical error
if math.isnan(train_nll) or math.isinf(train_nll): break
## Phase 2: Evaluation on the validation set
model.eval()
test_nll_ppg, test_acc_ppg = 0., 0 # Probabilistic Propagation
test_nll_map, test_acc_map = 0., 0 # Maximum A Posterior (MAP)
test_nll_mcs, test_acc_mcs = 0., 0 # Monte-Carlo Sampling(MCS)
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
# (1) Probabilistic Propagation
(prob, loss_nll) = model(data, target, mode = "analytic")
pred = prob.max(1, keepdim = True)[1]
loss_nll = torch.mean(loss_nll) # for multi-gpu
test_nll_ppg += loss_nll.item()
test_acc_ppg += pred.eq(target.view_as(pred)).sum().item()
# (2) Maximum A Posterior and point propagation
(prob, loss_nll) = model(data, target, mode = "MAP")
pred = prob.max(1, keepdim = True)[1]
loss_nll = torch.mean(loss_nll) # for multi-gpu
test_nll_map += loss_nll.item()
test_acc_map += pred.eq(target.view_as(pred)).sum().item()
# (3) Monte Carlo Sampling and point propagation
for s in range(mc_samples):
(prob, _) = model(data, target, mode = "sampling")
# loss_nll = torch.mean(loss_nll) # multi-gpu
prob_s = prob if s == 0 else prob_s + prob
# loss_nll_s = loss_nll if s == 0 else loss_nll_s + loss_nll
prob = prob_s / mc_samples
pred = prob.max(1, keepdim = True)[1]
# loss_nll = loss_nll_s / mc_samples
loss_nll = F.nll_loss(torch.log(prob + 1e-6), target, reduction = "sum")
test_nll_mcs += loss_nll.item()
test_acc_mcs += pred.eq(target.view_as(pred)).sum().item()
## Phase 3: Logging the learning curves and checkpoints
if args.rate_decay: scheduler.step()
# (1) Probabilistic propagation (Analytical inference)
test_nll_ppg /= test_samples
print("Epoch {} (Analytical) , Avg. NLL: {:.4f}, Acc.: {}/{} ({:.2f}%)".format(
epoch + 1, test_nll_ppg, test_acc_ppg, test_samples, 100. * test_acc_ppg / test_samples))
test_acc_ppg = 100. * test_acc_ppg / test_samples
test_acc_ppg_[epoch] = test_acc_ppg
test_nll_ppg_[epoch] = test_nll_ppg
tensorboard_writer.add_scalar('test_acc_ppg', test_acc_ppg, epoch + 1)
tensorboard_writer.add_scalar('test_nll_ppg', test_nll_ppg, epoch + 1)
# (2) Maximum A Posterior and point propagation
test_nll_map /= test_samples
print("Epoch {} (MAP-rounded), Avg. NLL: {:.4f}, Acc.: {}/{} ({:.2f}%)".format(
epoch + 1, test_nll_map, test_acc_map, test_samples, 100. * test_acc_map / test_samples))
test_acc_map = 100. * test_acc_map / test_samples
test_acc_map_[epoch] = test_acc_map
test_nll_map_[epoch] = test_nll_map
tensorboard_writer.add_scalar('test_acc_map', test_acc_map, epoch + 1)
tensorboard_writer.add_scalar('test_nll_map', test_nll_map, epoch + 1)
# (3) Monte Carlo Sampling and point propagation
test_nll_mcs /= test_samples
print("Epoch {} (Monte-Carlo), Avg. NLL: {:.4f}, Acc.: {}/{} ({:.2f}%)".format(
epoch + 1, test_nll_mcs, test_acc_mcs, test_samples, 100. * test_acc_mcs / test_samples))
test_acc_mcs = 100. * test_acc_mcs / test_samples
test_acc_mcs_[epoch] = test_acc_mcs
test_nll_mcs_[epoch] = test_nll_mcs
tensorboard_writer.add_scalar('test_acc_mcs', test_acc_mcs, epoch + 1)
tensorboard_writer.add_scalar('test_nll_mcs', test_nll_mcs, epoch + 1)
# update the best model so far
if test_nll_ppg < min_test_nll:
min_epoch, min_test_nll = epoch + 1, test_nll_ppg
# save the currrent model as a checkpoint
checkpoint_info = {
"epoch": epoch + 1, "total_samples": total_samples, # training progress
"min_epoch": min_epoch, "min_test_nll": min_test_nll, # best model and loss
"learning_rate": optimizer.param_groups[0]['lr'], # current learning rate
"model_state_dict": model.state_dict() # model parameters
}
torch.save(checkpoint_info, os.path.join(model_path, 'training_last.pt'))
if (epoch + 1) % save_epoch == 0:
torch.save(checkpoint_info, os.path.join(model_path, 'training_%d.pt' % (epoch + 1)))
if (epoch + 1) == min_epoch:
torch.save(checkpoint_info, os.path.join(model_path, 'training_best.pt'))
# save the statistics
np.savez(stats_file,
test_acc_ppg = test_acc_ppg_, test_nll_ppg = test_nll_ppg_,
test_acc_map = test_acc_map_, test_nll_map = test_nll_map_,
test_acc_mcs = test_acc_mcs_, test_nll_mcs = test_nll_mcs_)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description =
"Training Bayesian Quantized Networks (BQN).")
## 1) Data format (Pytorch format)
# batch size (0) x channels (1) x height (2) x width (3)
# dataset name
parser.add_argument("--dataset", default = "MNIST", type = str,
help = "The dataset used for training (options: MNIST/FMNIST/KMNIST/CIFAR10/CIFAR100).")
parser.add_argument('--default-parameters', dest = 'default_parameters', action = 'store_true',
help = 'Use default parameters (format/augmentation) for the given dataset.')
parser.add_argument('--specified-parameters', dest = 'default_parameters', action = 'store_false',
help = 'Use specified parameters (format/augmentation) for the given dataset.')
parser.set_defaults(default_parameters = True)
# image format: channels (1), height (2), width (3)
parser.add_argument("--image-height", default = 32, type = int,
help = "The image height of each sample.")
parser.add_argument("--image-width", default = 32, type = int,
help = "The image width of each sample.")
parser.add_argument("--image-channels", default = 3, type = int,
help = "The number of channels in each sample.")
# data augmentation (in learning phase)
parser.add_argument("--image-padding", default = 2, type = int,
help = "The number of padded pixels along height/width.")
parser.add_argument("--image-flipping", default = 0.5, type = float,
help = "The probability of horizontal filpping of the images")
## 2) Paths (Data, Checkpoints, Results and TensorboardX)
# inputs: data
parser.add_argument("--data-path", default = "../data", type = str,
help = "The path to the folder stroing the data.")
parser.add_argument("--dataset-path", default = "default", type = str,
help = "The folder for the training and test datasets.")
# outputs: checkpoints, statistics and tensorboard
parser.add_argument("--outputs-path", default = "../outputs_BQN", type = str,
help = "The path to the folder storing outputs from training.")
parser.add_argument("--network-name", default = "default", type = str,
help = "The architecture model (to create the folder).")
parser.add_argument("--model-name", default = "default", type = str,
help = "The model name (to create the folder).")
parser.add_argument("--model-stamp", default = "default", type = str,
help = "The time stamp of the model (as a suffix to the its name).")
parser.add_argument("--model-path", default = "models", type = str,
help = "The folder for all checkpoints in training.")
parser.add_argument("--stats-path", default = "stats", type = str,
help = "The folder for the evaluation statistics.")
parser.add_argument("--stats-file", default = "curve", type = str,
help = "The file name for the learning curve.")
parser.add_argument('--tensorboard-path', default = 'tensorboard', type = str,
help = 'The folder for the tensorboardX files.')
## 3) Device (CPU, single GPU or multiple GPUs)
# whether to use GPU for training
parser.add_argument('--use-cuda', dest = 'use_cuda', action = 'store_true',
help = 'Use GPU for training.')
parser.add_argument('--no-cuda', dest = 'use_cuda', action = 'store_false',
help = "Do not use GPU for training.")
parser.set_defaults(use_cuda = True)
# whether to use multi-GPU for training
parser.add_argument('--multi-gpu', dest = 'multi_gpu', action = 'store_true',
help = 'Use multiple GPUs for training.')
parser.add_argument('--single-gpu', dest = 'multi_gpu', action = 'store_false',
help = 'Do not use multiple GPU for training.')
parser.set_defaults(multi_gpu = False)
# random seed for reproducibility
parser.add_argument('--use-seed', dest = 'use_seed', action = 'store_true',
help = 'Fix the random seed to reproduce the model.')
parser.add_argument('--no-seed', dest = 'use_seed', action = 'store_false',
help = 'Randomly choose the random seed.')
parser.set_defaults(use_seed = True)
parser.add_argument('--random-seed', default = 0, type = int,
help = 'The random seed number (to reproduce the model).')
## 4) Models (MLP, CNN, VGGLike)
parser.add_argument("--quantization", default = "binary", type = str,
help = "The type of quantization (options: binary/ternary/quanternary).")
parser.add_argument("--model-type", default = "CNN", type = str,
help = "The type of the model (options: MLP/CNN/VGGLike/CNN_/VGGLike_).")
parser.add_argument("--pooling", default = "avg", type = str,
help = "The type of pooling used in convolutional networks (options: avg/max/avg_/prob_).")
parser.add_argument("--use-bias", dest = "use_bias", action = "store_true",
help = "Use bias in all layers of the model.")
parser.add_argument("--no-bias", dest = "use_bias", action = "store_false",
help = "Do not use bias in all layers of the model.")
parser.set_defaults(use_bias = True)
## 5) Hyperparameters for learning
parser.add_argument("--epoch-num", default = 300, type = int,
help = "The total number of epochs for training.")
parser.add_argument("--save-epoch", default = 20, type = int,
help = "The interval of epochs to save a checkpoint.")
parser.add_argument("--batch-size", default = 100, type = int,
help = "The batch size for training.")
parser.add_argument("--log-samples", default = 5000, type = int,
help = "Log the learning curve every log_samples.")
parser.add_argument("--lamb", default = 1e-3, type = float,
help = "The coefficient of the regularizer after warm-up.")
parser.add_argument("--warm-epoch", default = 5, type = int,
help = "The number of epochs for training in warm-up mode.")
parser.add_argument("--warm-lamb", default = -1e-4, type = float,
help = "The coefficient of the regularizer in warm-up mode.")
# whether to start training from
parser.add_argument('--start-begin', dest = 'start_begin', action = 'store_true',
help = 'Start training a new model from the beginning.')
parser.add_argument('--start-exist', dest = 'start_begin', action = 'store_false',
help = 'Resume training from an existing model.')
parser.set_defaults(start_begin = True)
# if start_begin is False (--start-exist)
parser.add_argument('--start-last', dest = 'start_last', action = 'store_true',
help = 'Resume training from the last available model.')
parser.add_argument('--start-spec', dest = 'start_last', action = 'store_false',
help = 'Resume training from the model of the specified epoch.')
parser.set_defaults(start_last = True)
# if start_last is False (--start-spec)
parser.add_argument('--start-epoch', default = 0, type = int,
help = 'The number of epoch to resume training.')
# learning rate scheduling
parser.add_argument("--learning-rate", default = 1e-2, type = float,
help = "Initial learning rate of the ADAM optimizer.")
parser.add_argument("--learning-rate-decay", dest = "rate_decay", action = 'store_true',
help = "Learning rate is decayed during training.")
parser.add_argument("--learning-rate-fixed", dest = "rate_decay", action = 'store_false',
help = "Learning rate is fixed during training.")
parser.set_defaults(rate_decay = True)
# if rate_decay is True (--learning-rate-decay)
parser.add_argument("--decay-epoch", default = 1, type = int,
help = "The learning rate is decayed by decay_rate every decay_epoch.")
parser.add_argument("--decay-rate", default = 0.98, type = float,
help = "The learning rate is decayed by decay_rate every decay_epoch.")
# evaluation
parser.add_argument("--mc-samples", default = 5, type = int,
help = "The number of Monte-Carlo samples to evaluate the model.")
main(parser.parse_args())
|
{"hexsha": "edd83681ec5aa0b44d7e1c8c4a0f105734e24f56", "size": 27207, "ext": "py", "lang": "Python", "max_stars_repo_path": "BQN/class_bqn_train.py", "max_stars_repo_name": "umd-huang-lab/Bayesian-Quantized-Networks", "max_stars_repo_head_hexsha": "eb56fa1cb142cf235dde9cec7badea86009c3fcb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "BQN/class_bqn_train.py", "max_issues_repo_name": "umd-huang-lab/Bayesian-Quantized-Networks", "max_issues_repo_head_hexsha": "eb56fa1cb142cf235dde9cec7badea86009c3fcb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BQN/class_bqn_train.py", "max_forks_repo_name": "umd-huang-lab/Bayesian-Quantized-Networks", "max_forks_repo_head_hexsha": "eb56fa1cb142cf235dde9cec7badea86009c3fcb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.7111459969, "max_line_length": 109, "alphanum_fraction": 0.642739001, "include": true, "reason": "import numpy", "num_tokens": 6687}
|
import numpy as np
import hyperparameters as hp
class ActionMeta(type):
def __init__(cls, name, bases, d):
type.__init__(cls, name, bases, d)
cls.action_to_num = dict()
cls.num_to_action = dict()
options = [True, False]
counter = 0
for left in options:
for right in options:
for faster in options:
for slower in options:
if left and right:
continue
# if faster and slower:
if slower:
continue
if not faster: #and not left and not right:
continue
a = (left, right, faster, slower)
cls.action_to_num[a] = counter
cls.num_to_action[counter] = a
counter += 1
cls.num_actions = counter
class Action(object):
__metaclass__ = ActionMeta
def __init__(self, num=None, left=False, right=False, faster=False, slower=False):
if num is not None:
if num > hp.TOTAL_ACTIONS-1 or num < 0:
raise ValueError("Invalid num, must be 0-5")
self.num = num
else:
if left and right:
raise ValueError("Invalid action, cannot press both left and right")
if faster and slower:
raise ValueError("Invalid action, cannot press both faster and slower")
self.num = self.action_to_num[(left, right, faster, slower)]
@classmethod
def random_action(self):
return Action(np.random.randint(0, self.num_actions))
def to_onehot(self):
a = np.zeros(self.num_actions)
a[self.num] = 1
return a
def to_dict(self):
left, right, faster, slower = self.num_to_action[self.num]
return {
"keyLeft": left,
"keyRight": right,
"keyFaster": faster,
"keySlower": slower
}
|
{"hexsha": "d24d423fb79c7d42cdaa52822fdaca3af524b0d6", "size": 2066, "ext": "py", "lang": "Python", "max_stars_repo_path": "action.py", "max_stars_repo_name": "tigerneil/cs231n-project", "max_stars_repo_head_hexsha": "2c520fd79fabbba09ad995360c1e21f49e10a52a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2016-03-31T01:07:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-05T03:39:31.000Z", "max_issues_repo_path": "action.py", "max_issues_repo_name": "tigerneil/cs231n-project", "max_issues_repo_head_hexsha": "2c520fd79fabbba09ad995360c1e21f49e10a52a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-05-08T08:13:14.000Z", "max_issues_repo_issues_event_max_datetime": "2017-05-08T08:13:14.000Z", "max_forks_repo_path": "action.py", "max_forks_repo_name": "RaphiePS/cs231n-project", "max_forks_repo_head_hexsha": "2c520fd79fabbba09ad995360c1e21f49e10a52a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2016-04-15T07:26:21.000Z", "max_forks_repo_forks_event_max_datetime": "2018-01-18T10:41:11.000Z", "avg_line_length": 35.0169491525, "max_line_length": 87, "alphanum_fraction": 0.5145208132, "include": true, "reason": "import numpy", "num_tokens": 428}
|
import numpy as np
import pandas as pd
from corsempy.model import Model as md
from corsempy.optimizer import Optimizer as opt
from corsempy.identifier import Identifier as id
from corsempy.stats import Statistics as stat
df1 = pd.read_csv('data_poli.csv')
mod = """xi_1~=x1+x2+x3
eta_1 ~= y1+y2+y3+y4
eta_2 ~= y5+y6+y7+y8
eta_1~ xi_1
eta_2~ eta_1 + xi_1"""
my_model = md(mod, df1)
print(my_model.load_data().columns)
|
{"hexsha": "18539b9a4a0c27834824f9e740086a795de17914", "size": 421, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "iaousse/corsempy_project", "max_stars_repo_head_hexsha": "e369016e1edd9372556e13d0038088628dc7bb40", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test.py", "max_issues_repo_name": "iaousse/corsempy_project", "max_issues_repo_head_hexsha": "e369016e1edd9372556e13d0038088628dc7bb40", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test.py", "max_forks_repo_name": "iaousse/corsempy_project", "max_forks_repo_head_hexsha": "e369016e1edd9372556e13d0038088628dc7bb40", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.1578947368, "max_line_length": 48, "alphanum_fraction": 0.7529691211, "include": true, "reason": "import numpy", "num_tokens": 144}
|
import numpy as np
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def collect_trajectories(envs, action_dist, ep_length, policy, rollout_length=200):
"""
collect trajectories for a parallelized parallelEnv object
Returns : Shape
======
log_probs_old (tensor) : (rollout_length*n,)
states (tensor) : (rollout_length*n, envs.observation_space.shape[0])
actions (tensor) : (rollout_length*n,)
rewards (list,np.array) : (rollout_length, n) --> for advs
values (list,np.array) : (rollout_length, n) --> for advs
dones (list,np.array) : (rollout_length, n) --> for advs
vals_last (list,np.array): (n,) --> for advs
"""
n = len(envs.ps) # number of parallel instances
log_probs_old, states, actions, rewards, values, dones, infos = [], [], [], [], [], [], []
obs = envs.reset()
timesteps = rollout_length
for t in range(rollout_length):
batch_input = torch.from_numpy(obs).float().to(device)
traj_info = policy.act(batch_input)
log_prob_old = traj_info['log_pi_a'].detach()
action = traj_info['a'].cpu().numpy()
action_dist[action[0]] += 1
value = traj_info['v'].cpu().detach().numpy()
obs, reward, is_done, _ = envs.step(action)
if is_done.any():
if t < 199:
timesteps = t
idx = np.where(is_done==True)
reward[idx] = 0
log_probs_old.append(log_prob_old) # shape (rollout_length, n)
states.append(batch_input) # shape (rollout_length, n, envs.observation_space.shape[0])
actions.append(action) # shape (rollout_length, n)
rewards.append(reward) # shape (rollout_length, n)
values.append(value) # shape (rollout_length, n)
dones.append(is_done) # shape (rollout_length, n)
infos.append(action_dist)
ep_length.append(timesteps)
log_probs_old = torch.stack(log_probs_old).view(-1,)
states = torch.stack(states)
states = states.view(-1, envs.observation_space.shape[0])
actions = torch.tensor(actions, dtype=torch.long, device=device).view(-1,)
obs = torch.from_numpy(obs).float().to(device)
traj_info_last = policy.act(obs)
vals_last = traj_info_last['v'].cpu().detach().numpy()
return log_probs_old, states, actions, rewards, values, dones, vals_last, infos, ep_length
def random_sample(inds, minibatch_size):
inds = np.random.permutation(inds)
batches = inds[:len(inds) // minibatch_size * minibatch_size].reshape(-1, minibatch_size)
for batch in batches:
yield torch.from_numpy(batch).long()
r = len(inds) % minibatch_size
if r:
yield torch.from_numpy(inds[-r:]).long()
|
{"hexsha": "7744abf530d38f8a8508ef475f71c02271b4836d", "size": 2890, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/utils.py", "max_stars_repo_name": "rish-16/gym-navmaze", "max_stars_repo_head_hexsha": "cc21d730ec6ab1e96a4a1a8f602a5bbb951d2929", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-10T13:40:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-10T13:40:09.000Z", "max_issues_repo_path": "src/utils.py", "max_issues_repo_name": "rish-16/gym-navmaze", "max_issues_repo_head_hexsha": "cc21d730ec6ab1e96a4a1a8f602a5bbb951d2929", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utils.py", "max_forks_repo_name": "rish-16/gym-navmaze", "max_forks_repo_head_hexsha": "cc21d730ec6ab1e96a4a1a8f602a5bbb951d2929", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-10T13:40:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-10T13:40:11.000Z", "avg_line_length": 39.0540540541, "max_line_length": 103, "alphanum_fraction": 0.6117647059, "include": true, "reason": "import numpy", "num_tokens": 735}
|
C ALGORITHM 680, COLLECTED ALGORITHMS FROM ACM.
C THIS WORK PUBLISHED IN TRANSACTIONS ON MATHEMATICAL SOFTWARE,
C VOL. 16, NO. 1, PP. 47.
SUBROUTINE WOFZ (XI, YI, U, V, FLAG)
C
C GIVEN A COMPLEX NUMBER Z = (XI,YI), THIS SUBROUTINE COMPUTES
C THE VALUE OF THE FADDEEVA-FUNCTION W(Z) = EXP(-Z**2)*ERFC(-I*Z),
C WHERE ERFC IS THE COMPLEX COMPLEMENTARY ERROR-FUNCTION AND I
C MEANS SQRT(-1).
C THE ACCURACY OF THE ALGORITHM FOR Z IN THE 1ST AND 2ND QUADRANT
C IS 14 SIGNIFICANT DIGITS; IN THE 3RD AND 4TH IT IS 13 SIGNIFICANT
C DIGITS OUTSIDE A CIRCULAR REGION WITH RADIUS 0.126 AROUND A ZERO
C OF THE FUNCTION.
C ALL REAL VARIABLES IN THE PROGRAM ARE DOUBLE PRECISION.
C
C
C THE CODE CONTAINS A FEW COMPILER-DEPENDENT PARAMETERS :
C RMAXREAL = THE MAXIMUM VALUE OF RMAXREAL EQUALS THE ROOT OF
C RMAX = THE LARGEST NUMBER WHICH CAN STILL BE
C IMPLEMENTED ON THE COMPUTER IN DOUBLE PRECISION
C FLOATING-POINT ARITHMETIC
C RMAXEXP = LN(RMAX) - LN(2)
C RMAXGONI = THE LARGEST POSSIBLE ARGUMENT OF A DOUBLE PRECISION
C GONIOMETRIC FUNCTION (DCOS, DSIN, ...)
C THE REASON WHY THESE PARAMETERS ARE NEEDED AS THEY ARE DEFINED WILL
C BE EXPLAINED IN THE CODE BY MEANS OF COMMENTS
C
C
C PARAMETER LIST
C XI = REAL PART OF Z
C YI = IMAGINARY PART OF Z
C U = REAL PART OF W(Z)
C V = IMAGINARY PART OF W(Z)
C FLAG = AN ERROR FLAG INDICATING WHETHER OVERFLOW WILL
C OCCUR OR NOT; TYPE LOGICAL;
C THE VALUES OF THIS VARIABLE HAVE THE FOLLOWING
C MEANING :
C FLAG=.FALSE. : NO ERROR CONDITION
C FLAG=.TRUE. : OVERFLOW WILL OCCUR, THE ROUTINE
C BECOMES INACTIVE
C XI, YI ARE THE INPUT-PARAMETERS
C U, V, FLAG ARE THE OUTPUT-PARAMETERS
C
C FURTHERMORE THE PARAMETER FACTOR EQUALS 2/SQRT(PI)
C
C THE ROUTINE IS NOT UNDERFLOW-PROTECTED BUT ANY VARIABLE CAN BE
C PUT TO 0 UPON UNDERFLOW;
C
C REFERENCE - GPM POPPE, CMJ WIJERS; MORE EFFICIENT COMPUTATION OF
C THE COMPLEX ERROR-FUNCTION, ACM TRANS. MATH. SOFTWARE.
C
*
*
*
*
IMPLICIT DOUBLE PRECISION (A-H, O-Z)
c DOUBLE PRECISION D1MACH, FACTOR, RMAX, RMAXREAL, RMAXEXP
*
LOGICAL A, B, FLAG
c PARAMETER (FACTOR = 1.12837916709551257389615890312154517D0,
c * RMAXREAL = 1.340780792994259D+154,
c * RMAXEXP = 709.0895657128241D0,
c * RMAXGONI = 0.6746518850690209D10)
PARAMETER (FACTOR = 1.12837916709551257389615890312154517D0,
* RMAXREAL = 0.5D+154,
* RMAXEXP = 708.503061461606D0,
* RMAXGONI = 3.53711887601422D+15)
c RMAX = D1MACH(2)
c RMAXREAL = DSQRT(RMAX)
c RMAXEXP = DLOG(RMAX)-DLOG(2D0)
*
FLAG = .FALSE.
*
XABS = DABS(XI)
YABS = DABS(YI)
X = XABS/6.3
Y = YABS/4.4
*
C
C THE FOLLOWING IF-STATEMENT PROTECTS
C QRHO = (X**2 + Y**2) AGAINST OVERFLOW
C
IF ((XABS.GT.RMAXREAL).OR.(YABS.GT.RMAXREAL)) GOTO 100
*
QRHO = X**2 + Y**2
*
XABSQ = XABS**2
XQUAD = XABSQ - YABS**2
YQUAD = 2*XABS*YABS
*
A = QRHO.LT.0.085264D0
*
IF (A) THEN
C
C IF (QRHO.LT.0.085264D0) THEN THE FADDEEVA-FUNCTION IS EVALUATED
C USING A POWER-SERIES (ABRAMOWITZ/STEGUN, EQUATION (7.1.5), P.297)
C N IS THE MINIMUM NUMBER OF TERMS NEEDED TO OBTAIN THE REQUIRED
C ACCURACY
C
QRHO = (1-0.85*Y)*DSQRT(QRHO)
N = IDNINT(6 + 72*QRHO)
J = 2*N+1
XSUM = 1.0/J
YSUM = 0.0D0
DO 10 I=N, 1, -1
J = J - 2
XAUX = (XSUM*XQUAD - YSUM*YQUAD)/I
YSUM = (XSUM*YQUAD + YSUM*XQUAD)/I
XSUM = XAUX + 1.0/J
10 CONTINUE
U1 = -FACTOR*(XSUM*YABS + YSUM*XABS) + 1.0
V1 = FACTOR*(XSUM*XABS - YSUM*YABS)
DAUX = DEXP(-XQUAD)
U2 = DAUX*DCOS(YQUAD)
V2 = -DAUX*DSIN(YQUAD)
*
U = U1*U2 - V1*V2
V = U1*V2 + V1*U2
*
ELSE
C
C IF (QRHO.GT.1.O) THEN W(Z) IS EVALUATED USING THE LAPLACE
C CONTINUED FRACTION
C NU IS THE MINIMUM NUMBER OF TERMS NEEDED TO OBTAIN THE REQUIRED
C ACCURACY
C
C IF ((QRHO.GT.0.085264D0).AND.(QRHO.LT.1.0)) THEN W(Z) IS EVALUATED
C BY A TRUNCATED TAYLOR EXPANSION, WHERE THE LAPLACE CONTINUED FRACTION
C IS USED TO CALCULATE THE DERIVATIVES OF W(Z)
C KAPN IS THE MINIMUM NUMBER OF TERMS IN THE TAYLOR EXPANSION NEEDED
C TO OBTAIN THE REQUIRED ACCURACY
C NU IS THE MINIMUM NUMBER OF TERMS OF THE CONTINUED FRACTION NEEDED
C TO CALCULATE THE DERIVATIVES WITH THE REQUIRED ACCURACY
C
*
IF (QRHO.GT.1.0) THEN
H = 0.0D0
KAPN = 0
QRHO = DSQRT(QRHO)
NU = IDINT(3 + (1442/(26*QRHO+77)))
ELSE
QRHO = (1-Y)*DSQRT(1-QRHO)
H = 1.88*QRHO
H2 = 2*H
KAPN = IDNINT(7 + 34*QRHO)
NU = IDNINT(16 + 26*QRHO)
ENDIF
*
B = (H.GT.0.0)
*
IF (B) QLAMBDA = H2**KAPN
*
RX = 0.0
RY = 0.0
SX = 0.0
SY = 0.0
*
DO 11 N=NU, 0, -1
NP1 = N + 1
TX = YABS + H + NP1*RX
TY = XABS - NP1*RY
C = 0.5/(TX**2 + TY**2)
RX = C*TX
RY = C*TY
IF ((B).AND.(N.LE.KAPN)) THEN
TX = QLAMBDA + SX
SX = RX*TX - RY*SY
SY = RY*TX + RX*SY
QLAMBDA = QLAMBDA/H2
ENDIF
11 CONTINUE
*
IF (H.EQ.0.0) THEN
U = FACTOR*RX
V = FACTOR*RY
ELSE
U = FACTOR*SX
V = FACTOR*SY
END IF
*
IF (YABS.EQ.0.0) U = DEXP(-XABS**2)
*
END IF
*
*
C
C EVALUATION OF W(Z) IN THE OTHER QUADRANTS
C
*
IF (YI.LT.0.0) THEN
*
IF (A) THEN
U2 = 2*U2
V2 = 2*V2
ELSE
XQUAD = -XQUAD
*
C
C THE FOLLOWING IF-STATEMENT PROTECTS 2*EXP(-Z**2)
C AGAINST OVERFLOW
C
IF ((YQUAD.GT.RMAXGONI).OR.
* (XQUAD.GT.RMAXEXP)) GOTO 100
*
W1 = 2*DEXP(XQUAD)
U2 = W1*DCOS(YQUAD)
V2 = -W1*DSIN(YQUAD)
END IF
*
U = U2 - U
V = V2 - V
IF (XI.GT.0.0) V = -V
ELSE
IF (XI.LT.0.0) V = -V
END IF
*
RETURN
*
100 FLAG = .TRUE.
RETURN
*
END
|
{"hexsha": "e44ca90816029a4b60166b42f3dff3695e8d2dce", "size": 6385, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "fortran/algo-680-erf.f", "max_stars_repo_name": "lloda/slatec-bessel-c-", "max_stars_repo_head_hexsha": "1140750c3b5374573f92190753e7535ad066e0e7", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-07T03:23:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-07T03:23:57.000Z", "max_issues_repo_path": "fortran/algo-680-erf.f", "max_issues_repo_name": "lloda/slatec-bessel-cpp", "max_issues_repo_head_hexsha": "1140750c3b5374573f92190753e7535ad066e0e7", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fortran/algo-680-erf.f", "max_forks_repo_name": "lloda/slatec-bessel-cpp", "max_forks_repo_head_hexsha": "1140750c3b5374573f92190753e7535ad066e0e7", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6322869955, "max_line_length": 72, "alphanum_fraction": 0.5600626468, "num_tokens": 2436}
|
import unittest
from mixture_net.utils import nnelu, register_custom_activation
from mixture_net.model import MDN
import numpy as np
from sklearn.model_selection import train_test_split
from mixture_net.losses import gnll_loss
class TestModel(unittest.TestCase):
def setUp(self):
samples = int(100)
x_data = np.random.sample(samples)[:, np.newaxis].astype(np.float32)
y_data = np.add(5 * x_data, np.multiply((x_data) ** 2, np.random.standard_normal(x_data.shape)))
self.x_train, self.x_test, self.y_train, self.y_test = train_test_split(x_data, y_data,
test_size=0.5, random_state=42)
def test_param_instantiation(self):
register_custom_activation('nnelu', nnelu)
tiny_net = MDN(neurons=2, components=3)
self.assertEqual(tiny_net.neurons, 2)
self.assertEqual(tiny_net.components, 3)
def test_model_build(self):
register_custom_activation('nnelu', nnelu)
net = MDN(neurons=2, components=1)
net.compile(loss=gnll_loss(3, 1), optimizer='adam')
net.fit(x=self.x_train, y=self.y_train, epochs=10, batch_size=128)
self.assertTrue(net.built)
|
{"hexsha": "4f7dfe74923fa3fbeddebac95f24afe542457c2e", "size": 1237, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_model.py", "max_stars_repo_name": "arrigonialberto86/mixture_nets", "max_stars_repo_head_hexsha": "9965ac9c7f378eb7d7e6277e609574344602152b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_model.py", "max_issues_repo_name": "arrigonialberto86/mixture_nets", "max_issues_repo_head_hexsha": "9965ac9c7f378eb7d7e6277e609574344602152b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_model.py", "max_forks_repo_name": "arrigonialberto86/mixture_nets", "max_forks_repo_head_hexsha": "9965ac9c7f378eb7d7e6277e609574344602152b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9032258065, "max_line_length": 111, "alphanum_fraction": 0.6677445432, "include": true, "reason": "import numpy", "num_tokens": 292}
|
[STATEMENT]
lemma Bag_s_mul_ext:
"(Bag xs, Bag ys) \<in> s_mul_ext {(x, y). snd (f x y)} {(x, y). fst (f x y)} \<longleftrightarrow>
fst (mul_ext f (ass_list_to_single_list (DAList.impl_of xs)) (ass_list_to_single_list (DAList.impl_of ys)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((Bag xs, Bag ys) \<in> s_mul_ext {(x, y). snd (f x y)} {(x, y). fst (f x y)}) = fst (mul_ext f (ass_list_to_single_list (alist.impl_of xs)) (ass_list_to_single_list (alist.impl_of ys)))
[PROOF STEP]
by (auto simp: mul_ext_def Let_def Alist_impl_of)
|
{"llama_tokens": 254, "file": "Weighted_Path_Order_Multiset_Extension2_Impl", "length": 1}
|
import numpy as np
import tensorflow as tf
DIV2K_RGB_MEAN = np.array([0.4488, 0.4371, 0.4040]) * 255
def normalize(x, rgb_mean=DIV2K_RGB_MEAN):
return (x - rgb_mean) / 127.5
def denormalize(x, rgb_mean=DIV2K_RGB_MEAN):
return x * 127.5 + rgb_mean
def pixel_shuffle(scale):
return lambda x: tf.nn.depth_to_space(x, scale)
|
{"hexsha": "803989a8b549c188f5c951f5ad75eb9f384b8a1b", "size": 336, "ext": "py", "lang": "Python", "max_stars_repo_path": "Algorithms/EDSR/common.py", "max_stars_repo_name": "TheStarkor/SuperResolution", "max_stars_repo_head_hexsha": "823aa004b15d1477f685b31bef0c3d8e181741bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Algorithms/EDSR/common.py", "max_issues_repo_name": "TheStarkor/SuperResolution", "max_issues_repo_head_hexsha": "823aa004b15d1477f685b31bef0c3d8e181741bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2020-03-24T18:01:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T01:22:39.000Z", "max_forks_repo_path": "Algorithms/EDSR/common.py", "max_forks_repo_name": "TheStarkor/SuperResolution", "max_forks_repo_head_hexsha": "823aa004b15d1477f685b31bef0c3d8e181741bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8461538462, "max_line_length": 57, "alphanum_fraction": 0.7202380952, "include": true, "reason": "import numpy", "num_tokens": 116}
|
import logging
import sys
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from neural_punctuator.base.BaseTrainer import BaseTrainer
from neural_punctuator.data.dataloader import BertDataset, collate, get_data_loaders, get_datasets
from neural_punctuator.models.BertPunctuator import BertPunctuator
from torch.optim import AdamW # TODO
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from neural_punctuator.utils.data import get_target_weights
from neural_punctuator.utils.io import save, load
from neural_punctuator.utils.loss import WeightedBinaryCrossEntropy
from neural_punctuator.utils.metrics import get_total_grad_norm, get_eval_metrics
from neural_punctuator.utils.tensorboard import print_metrics
from neural_punctuator.utils.scheduler import LinearScheduler
import numpy as np
torch.manual_seed(69)
np.random.seed(69)
torch.backends.cudnn.deterministic = True
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)-9s %(message)s'))
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log.addHandler(handler)
class BertPunctuatorTrainer(BaseTrainer):
def __init__(self, model, preprocessor, config):
super().__init__(model, preprocessor, config)
if self._config.trainer.use_gpu:
self.device = torch.device(self._config.trainer.use_gpu)
torch.cuda.set_device(self.device)
else:
self.device = torch.device('cpu')
self.train_dataset, self.valid_dataset = get_datasets(config)
self.train_loader, self.valid_loader = get_data_loaders(self.train_dataset, self.valid_dataset, config)
self.model = model.to(self.device)
self.model.train()
if self._config.trainer.loss == 'NLLLoss':
target_weights = torch.Tensor(get_target_weights(self.train_dataset.targets,
self._config.model.num_classes)).clamp_max(1).to(self.device)
self.criterion = nn.NLLLoss(weight=target_weights, reduction='none')
else:
log.error('Please provide a proper loss function')
exit(1)
optimizer_args = [
{'params': self.model.base.parameters(), 'lr': self._config.trainer.base_learning_rate},
{'params': self.model.classifier.parameters(), 'lr': self._config.trainer.classifier_learning_rate}
]
if self._config.trainer.optimizer == 'adam':
self.optimizer = torch.optim.Adam(optimizer_args)
elif self._config.trainer.optimizer == 'adamw':
self.optimizer = torch.optim.AdamW(optimizer_args)
else:
log.error('Please provide a proper optimizer')
exit(1)
if self._config.trainer.load_model:
load(self.model, self.optimizer, self._config)
# TODO: add to config
self.sched = LinearScheduler(self.optimizer, self._config.trainer.warmup_steps)
# TODO:
self.all_valid_target = np.concatenate([targets.numpy() for _, targets in self.valid_loader])
self.all_valid_target = self.all_valid_target[self.all_valid_target != -1]
if self._config.debug.summary_writer:
self.summary_writer = SummaryWriter(comment=self._config.experiment.name)
#TODO: self.summary_writer.add_hparams(self._config.toDict(), {})
else:
self.summary_writer = None
def train(self):
printer_counter = 0
for epoch_num in range(self._config.trainer.num_epochs):
log.info(f"Epoch #{epoch_num}")
# Train loop
self.model.train()
pbar = tqdm(self.train_loader)
for data in pbar:
self.optimizer.zero_grad()
text, targets = data
preds, binary_preds = self.model(text.to(self.device))
# preds = preds[:, self._config.trainer.clip_seq: -self._config.trainer.clip_seq, :]
# targets = targets[:, self._config.trainer.clip_seq:-self._config.trainer.clip_seq]
# Mask some "empty" targets
mask = ((targets == 0) & (np.random.rand(*targets.shape) < .1)) | (targets > 0)
mask = mask.to(self.device)
# Do not predict output after tokens which are not the end of a word
not_a_word_mask = (targets == -1).to(self.device)
word_mask = ~not_a_word_mask
targets[not_a_word_mask] = 0
losses = self.criterion(preds.reshape(-1, self._config.model.num_classes),
targets.to(self.device).reshape(-1))
mask = word_mask * mask
# losses = mask.view(-1).to(self.device) * losses
# loss = losses.sum() / mask.sum()
loss = losses.mean()
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self._config.trainer.grad_clip)
self.optimizer.step()
self.sched.step()
loss = loss.item()
grads = get_total_grad_norm(self.model.parameters())
pbar.set_postfix({"loss": loss, "grads": grads})
print_metrics(printer_counter,
{"loss": loss, "grads": grads},
self.summary_writer, 'train',
model_name=self._config.model.name)
printer_counter += 1
if self._config.debug.break_train_loop:
break
# Valid loop
self.model.eval()
valid_loss = 0
all_valid_preds = []
for data in tqdm(self.valid_loader):
text, targets = data
with torch.no_grad():
preds, _ = self.model(text.to(self.device))
word_mask = targets != -1
preds = preds[word_mask]
targets = targets[word_mask]
loss = self.criterion(preds.view(-1, self._config.model.num_classes), targets.to(self.device).view(-1))
valid_loss += loss.mean().item()
all_valid_preds.append(preds.detach().cpu().numpy())
valid_loss /= len(self.valid_loader)
all_valid_preds = np.concatenate(all_valid_preds)
metrics = get_eval_metrics(self.all_valid_target, all_valid_preds, self._config)
metrics["loss"] = valid_loss
print_metrics(printer_counter, metrics, self.summary_writer, 'valid',
model_name=self._config.model.name)
# Save model every epoch
save(self.model, self.optimizer, epoch_num+1, metrics, self._config)
|
{"hexsha": "599682f51a622c2d45b19cafecb18e38b547808a", "size": 6828, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/neural_punctuator/trainers/BertPunctuatorTrainer.py", "max_stars_repo_name": "juliandarley/neural-punctuator", "max_stars_repo_head_hexsha": "2b3ff7e052380ec463b90a74c6960e1e90515c05", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2021-01-15T11:31:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T14:56:25.000Z", "max_issues_repo_path": "src/neural_punctuator/trainers/BertPunctuatorTrainer.py", "max_issues_repo_name": "juliandarley/neural-punctuator", "max_issues_repo_head_hexsha": "2b3ff7e052380ec463b90a74c6960e1e90515c05", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-02-05T13:17:17.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-03T03:01:05.000Z", "max_forks_repo_path": "src/neural_punctuator/trainers/BertPunctuatorTrainer.py", "max_forks_repo_name": "juliandarley/neural-punctuator", "max_forks_repo_head_hexsha": "2b3ff7e052380ec463b90a74c6960e1e90515c05", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2021-01-29T18:18:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T23:13:50.000Z", "avg_line_length": 40.1647058824, "max_line_length": 122, "alphanum_fraction": 0.6187756298, "include": true, "reason": "import numpy", "num_tokens": 1375}
|
from notebooks.profiles import BaseProfile
import numpy as np
import pandas as pd
import math
from io import BytesIO
import pickle
class VotingProfile(BaseProfile):
def __init__(self, p=None, bytesIO=None):
if bytesIO is not None:
state_dict = pickle.load(bytesIO)
if state_dict["mean"] is not None:
mean_bytes = BytesIO(state_dict["mean"])
sentences_bytes = BytesIO(state_dict["sentences"])
mean_bytes.seek(0)
sentences_bytes.seek(0)
self._mean = np.load(mean_bytes)
self._author_sentences = np.load(sentences_bytes)
self._threshold = state_dict["threshold"]
self._p = state_dict["p"]
else:
self._p = p
self._mean = None
self._threshold = None
self._author_sentences = None
else:
self._p = p
self._mean = None
self._threshold = None
self._author_sentences = None
def _feed(self, author_texts):
if self._author_sentences is not None:
self._author_sentences = np.concatenate([self._author_sentences, author_texts.to_numpy()])
else:
self._author_sentences = author_texts.to_numpy()
excluded_distances = self._excluded_distances(self._author_sentences)
sorted_distances = np.sort(excluded_distances)
self._threshold = sorted_distances[math.floor(len(sorted_distances) * self._p)]
self._mean = np.mean(self._author_sentences, axis=0)
def _distances(self, suspect_texts):
flags = self.sentence_flags(suspect_texts)
return flags.groupby(level=-2).mean()
def sentence_flags(self, suspect_texts):
if isinstance(suspect_texts, np.ndarray):
suspect_texts = pd.DataFrame(suspect_texts)
diffs = self._mean - suspect_texts
distances = pd.DataFrame(np.linalg.norm(diffs, axis=1), index=diffs.index)
return distances > self._threshold
def _ready(self):
return self._author_sentences is not None
def _reset(self):
self._author_sentences = None
self._threshold = None
self._mean = None
def _excluded_distances(self, matrix):
count = len(matrix)
result = np.zeros(shape=[count])
for i in range(count):
other_sentences = np.delete(matrix, i, axis=0)
other_mean = np.mean(other_sentences, axis=0)
result[i] = np.linalg.norm(matrix[i] - other_mean)
return result
@property
def binary(self):
mean_bytes = BytesIO()
sentences_bytes = BytesIO()
if self._mean is not None:
np.save(mean_bytes, self._mean)
np.save(sentences_bytes, self._author_sentences)
mean_bytes = mean_bytes.getvalue()
sentences_bytes = sentences_bytes.getvalue()
else:
mean_bytes = None
sentences_bytes = None
state_dict = {"mean": mean_bytes, "sentences": sentences_bytes, "threshold": self._threshold, "p": self._p}
state_bytes = BytesIO()
pickle.dump(state_dict, state_bytes)
state_bytes.seek(0)
return state_bytes
|
{"hexsha": "a23c414a336d1bb631ec775bbf1476168bcf9064", "size": 3300, "ext": "py", "lang": "Python", "max_stars_repo_path": "notebooks/notebooks/profiles/_voting_profile.py", "max_stars_repo_name": "grchristensen/avpd", "max_stars_repo_head_hexsha": "f7617844ae454a93825aa231e04c125cb4e58a20", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebooks/notebooks/profiles/_voting_profile.py", "max_issues_repo_name": "grchristensen/avpd", "max_issues_repo_head_hexsha": "f7617844ae454a93825aa231e04c125cb4e58a20", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-03-04T20:29:54.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-31T22:03:51.000Z", "max_forks_repo_path": "notebooks/notebooks/profiles/_voting_profile.py", "max_forks_repo_name": "grchristensen/avpd", "max_forks_repo_head_hexsha": "f7617844ae454a93825aa231e04c125cb4e58a20", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-01-30T02:19:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-11T19:48:37.000Z", "avg_line_length": 30.8411214953, "max_line_length": 115, "alphanum_fraction": 0.6136363636, "include": true, "reason": "import numpy", "num_tokens": 701}
|
%%=======================================================
%% Chapter 4: JOST SOLUTIONS \& THE DIRECT SCATTERING MAP
%%=======================================================
\documentclass[../dissertation.tex]{subfiles}
\begin{document}
\chapter{\chfourtitle}\label{cptr04:DM}
%%==========================
%% Section 4.0: Introduction
%%==========================
% \subfile{Chapter03-Jost/3.00-Intro} %If you are using 2018 version of LeXLive, uncomment this line
\subfile{4.0-Intro}
%%================================
%% Section 4.2: Integral Equations
%%================================
% \subfile{Chapter03-Jost/3.01-IE}
\subfile{4.1-IE}
%%==============================
%% Section 4.1: Functional Setup (Temporary Title)
%%==============================
\subfile{4.2-Equiv}
%%=======================================
%% Section 4.3: The Direct Scattering Map
%%=======================================
\subfile{4.3-DM}
\end{document}
|
{"hexsha": "e0d7e35c2b2d18af17921607114725f585d5ff10", "size": 956, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Chapter4-Jost/Chapter4-Jost.tex", "max_stars_repo_name": "ADGC/ilw-dsm-dissertation", "max_stars_repo_head_hexsha": "de0f27b6389ee55c24d155ff482743acbe6a35a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Chapter4-Jost/Chapter4-Jost.tex", "max_issues_repo_name": "ADGC/ilw-dsm-dissertation", "max_issues_repo_head_hexsha": "de0f27b6389ee55c24d155ff482743acbe6a35a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter4-Jost/Chapter4-Jost.tex", "max_forks_repo_name": "ADGC/ilw-dsm-dissertation", "max_forks_repo_head_hexsha": "de0f27b6389ee55c24d155ff482743acbe6a35a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.2444444444, "max_line_length": 100, "alphanum_fraction": 0.4215481172, "num_tokens": 224}
|
section \<open>
Equivalence of a Diamond-Shaped Forwarding Praos Network and a Cross-Shaped Broadcasting Praos
Network
\<close>
theory Ouroboros_Praos_Forwarding_Broadcasting_Equivalence
imports
"Chi_Calculus_Examples.Network_Equivalences-Forwarding_Broadcasting"
Ouroboros_Praos_Implementation
begin
theorem praos_diamond_cross_equivalence:
assumes "
(s\<^sub>1, s\<^sub>2, s\<^sub>3, s\<^sub>4) =
(untyped_channel ts\<^sub>1, untyped_channel ts\<^sub>2, untyped_channel ts\<^sub>3, untyped_channel ts\<^sub>4)"
and "
(r\<^sub>1, r\<^sub>2, r\<^sub>3, r\<^sub>4) =
(untyped_channel tr\<^sub>1, untyped_channel tr\<^sub>2, untyped_channel tr\<^sub>3, untyped_channel tr\<^sub>4)"
and "
args = [
(U\<^sub>1, sk\<^sub>1\<^sub>_\<^sub>v\<^sub>r\<^sub>f, sk\<^sub>1\<^sub>_\<^sub>k\<^sub>e\<^sub>s, sk\<^sub>1\<^sub>_\<^sub>d\<^sub>s\<^sub>i\<^sub>g, ts\<^sub>1, tr\<^sub>1), (U\<^sub>2, sk\<^sub>2\<^sub>_\<^sub>v\<^sub>r\<^sub>f, sk\<^sub>2\<^sub>_\<^sub>k\<^sub>e\<^sub>s, sk\<^sub>2\<^sub>_\<^sub>d\<^sub>s\<^sub>i\<^sub>g, ts\<^sub>2, tr\<^sub>2),
(U\<^sub>3, sk\<^sub>3\<^sub>_\<^sub>v\<^sub>r\<^sub>f, sk\<^sub>3\<^sub>_\<^sub>k\<^sub>e\<^sub>s, sk\<^sub>3\<^sub>_\<^sub>d\<^sub>s\<^sub>i\<^sub>g, ts\<^sub>3, tr\<^sub>3), (U\<^sub>4, sk\<^sub>4\<^sub>_\<^sub>v\<^sub>r\<^sub>f, sk\<^sub>4\<^sub>_\<^sub>k\<^sub>e\<^sub>s, sk\<^sub>4\<^sub>_\<^sub>d\<^sub>s\<^sub>i\<^sub>g, ts\<^sub>4, tr\<^sub>4)]"
and "
shs =
\<Prod>(U\<^sub>i, sk\<^sub>i\<^sub>_\<^sub>v\<^sub>r\<^sub>f, sk\<^sub>i\<^sub>_\<^sub>k\<^sub>e\<^sub>s, sk\<^sub>i\<^sub>_\<^sub>d\<^sub>s\<^sub>i\<^sub>g, ts\<^sub>i, tr\<^sub>i)\<leftarrow>args. stakeholder U\<^sub>i G sk\<^sub>i\<^sub>_\<^sub>v\<^sub>r\<^sub>f sk\<^sub>i\<^sub>_\<^sub>k\<^sub>e\<^sub>s sk\<^sub>i\<^sub>_\<^sub>d\<^sub>s\<^sub>i\<^sub>g (ts\<^sub>i, tr\<^sub>i)"
shows "
shs \<parallel>
\<currency>\<^sup>?r\<^sub>0 \<parallel> \<currency>\<^sup>?r\<^sub>1 \<parallel> \<currency>\<^sup>?r\<^sub>2 \<parallel> \<currency>\<^sup>?r\<^sub>3 \<parallel>
diamond s\<^sub>0 s\<^sub>1 s\<^sub>2 s\<^sub>3 r\<^sub>0 r\<^sub>1 r\<^sub>2 r\<^sub>3
\<approx>\<^sub>\<sharp>
shs \<parallel>
\<currency>\<^sup>?r\<^sub>0 \<parallel> \<currency>\<^sup>?r\<^sub>1 \<parallel> \<currency>\<^sup>?r\<^sub>2 \<parallel> \<currency>\<^sup>?r\<^sub>3 \<parallel>
cross s\<^sub>0 s\<^sub>1 s\<^sub>2 s\<^sub>3 r\<^sub>0 r\<^sub>1 r\<^sub>2 r\<^sub>3"
using diamond_cross_equivalence by equivalence
end
|
{"author": "input-output-hk", "repo": "ouroboros-high-assurance", "sha": "f1b63cb176b119183bcbe14786cd5a61e0c5bf97", "save_path": "github-repos/isabelle/input-output-hk-ouroboros-high-assurance", "path": "github-repos/isabelle/input-output-hk-ouroboros-high-assurance/ouroboros-high-assurance-f1b63cb176b119183bcbe14786cd5a61e0c5bf97/src/Ouroboros_Praos_Forwarding_Broadcasting_Equivalence.thy"}
|
using Gtk
using Test
using DrugInteractions
DrugInteractions._apps_should_persist[1] = false
drug_interactions_app()
@test DrugInteractions._apps[end] isa GtkWindow
|
{"hexsha": "a82fd3997f5d79aa1fbfbd7c3f47dd77a959368a", "size": 173, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "wherrera10/DrugInteractions.jl", "max_stars_repo_head_hexsha": "1a1ba27d64f48e0ca844b50d928c47d1d3c5044d", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-27T06:20:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-27T06:20:20.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "wherrera10/DrugInteractions.jl", "max_issues_repo_head_hexsha": "1a1ba27d64f48e0ca844b50d928c47d1d3c5044d", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "wherrera10/DrugInteractions.jl", "max_forks_repo_head_hexsha": "1a1ba27d64f48e0ca844b50d928c47d1d3c5044d", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.3, "max_line_length": 48, "alphanum_fraction": 0.8150289017, "num_tokens": 42}
|
import numpy as np
from tensorflow.compat.v1 import set_random_seed
import os
from mp3_to_wav import MP3Processor
from wav_to_spectrogram import WavProcessor
from autoencoder_network import Autoencoder
from cluster_latent_features import HClust
import matplotlib.pyplot as plt
config = {
"mp3_file_dir": "dsilt-ml-code/09 Generative Models/song_similarity/audio_mp3s/",
"wav_file_dir": "dsilt-ml-code/09 Generative Models/song_similarity/audio_wavs/",
"metadata_dir": "dsilt-ml-code/09 Generative Models/song_similarity/audio_metadata/",
"ffmpeg_dir": "C:/Program Files (x86)/FFmpeg/bin/ffmpeg.exe"
}
autoencoder_config = {
"random_seed": 14,
"validation_split_perc": 0.1,
"learning_rate": 1.0,
"epochs": 10,
"batch_size": 6,
"log_save_path": "dsilt-ml-code/09 Generative Models/song_similarity/logs/"
}
cluster_config = {
'linkage_method': 'ward',
'distance_metric': 'euclidean'
}
np.random.seed(autoencoder_config['random_seed'])
set_random_seed(autoencoder_config['random_seed'])
def plot_spectrogram(spec, times, freqs, scaling_factor=10):
if len(spec.shape) > 2:
spec = spec[0] # Can only plot 1 channel
plt.pcolormesh(times, freqs, scaling_factor*spec)
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.title('Log Scaled Frequency Spectrogram')
plt.show()
mp3Processor = MP3Processor(config['mp3_file_dir'], config['wav_file_dir'],
config['metadata_dir'], config['ffmpeg_dir'])
tags = mp3Processor.convertFiles()
wavProcessor = WavProcessor(config['wav_file_dir'])
labels, specs, times, freqs = wavProcessor.create_spectrograms()
plot_spectrogram(specs[0].reshape(specs[0].shape[0], specs[0].shape[1]), times[0], freqs[0])
# Split training/validation sets, using last val_size observations for validation
val_size = int(round(autoencoder_config['validation_split_perc']*len(specs), 0))
x_train = specs[:specs.shape[0]-val_size, :]
x_valid = specs[specs.shape[0]-val_size:, :]
plot_spectrogram(x_valid[0].reshape(x_valid[0].shape[0], x_valid[0].shape[1]), times[0], freqs[0])
# Build and train model, using x_train as both the input x and target y
autoencoder_config['input_shape'] = specs[0].shape
autoencoder = Autoencoder(autoencoder_config)
autoencoder.construct()
print(autoencoder.model.summary())
autoencoder.fit_model(x_train, x_train,
x_valid, x_valid)
# Get model outputs
encoded_valid = autoencoder.encoder.predict(x_valid)
reconstructed_valid = autoencoder.model.predict(x_valid)
# Examine the encoding and reconstruction of 1 image
plt.imshow(encoded_valid[0].reshape(encoded_valid[0].shape[0]*encoded_valid[0].shape[2],
encoded_valid[0].shape[1]).T)
plt.title('Encoded Latent Features of Spectrogram')
plt.show()
plot_spectrogram(reconstructed_valid[0].reshape(reconstructed_valid[0].shape[0],
reconstructed_valid[0].shape[1]),
times[0], freqs[0])
# Cluster data to view similar songs based on latent acoustic features
encoded_train_flat = autoencoder.encoder_flat.predict(x_train)
encoded_valid_flat = autoencoder.encoder_flat.predict(x_valid)
cluster_model = HClust(encoded_train_flat,
cluster_config['linkage_method'],
cluster_config['distance_metric'],
labels[:len(labels)-val_size])
cluster_model.cluster()
cluster_model.plot_dendrogram()
|
{"hexsha": "c82f82ed20dce644bac4d5a8337134a20f7a0876", "size": 3497, "ext": "py", "lang": "Python", "max_stars_repo_path": "09 Generative Models/song_similarity/song_similarity_pipeline.py", "max_stars_repo_name": "nlinc1905/dsilt-ml-code", "max_stars_repo_head_hexsha": "d51fffd16e83f93ea7d49f65102e731abd3ba70c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "09 Generative Models/song_similarity/song_similarity_pipeline.py", "max_issues_repo_name": "nlinc1905/dsilt-ml-code", "max_issues_repo_head_hexsha": "d51fffd16e83f93ea7d49f65102e731abd3ba70c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "09 Generative Models/song_similarity/song_similarity_pipeline.py", "max_forks_repo_name": "nlinc1905/dsilt-ml-code", "max_forks_repo_head_hexsha": "d51fffd16e83f93ea7d49f65102e731abd3ba70c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.8555555556, "max_line_length": 98, "alphanum_fraction": 0.717472119, "include": true, "reason": "import numpy", "num_tokens": 852}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : 河北雪域网络科技有限公司 A.Star
# @contact: astar@snowland.ltd
# @site: www.snowland.ltd
# @file: color.py
# @time: 2018/7/26 0:24
# @Software: PyCharm
import numpy as np
npa = np.array
def rgb2ycbcr(img):
origT = npa([[65.481, 128.553, 24.966], [-37.797, -74.203, 112], [112, -93.786, -18.214]])
oriOffset = npa([[16], [128], [128]])
if isinstance(img, np.uint8):
t = 1
offset = 1.0 / 255
# elif img.dtype.name == 'float64':
elif isinstance(img, (np.float, np.float64, float)):
t = 1.0 / 255
offset = 1.0 / 255
elif isinstance(img, np.uint16):
t = 257.0 / 65535
offset = 257
else:
raise ValueError('image type not support ')
T = origT * t
Offset = oriOffset * offset
ycbcr = np.zeros(img.shape, dtype=img.dtype)
for p in range(3):
ycbcr[:, :, p] = T[p, 0] * img[:, :, 0] + T[p, 1] * img[:, :, 1] + T[p, 2] * img[:, :, 2] + Offset[p]
return ycbcr
def ycbcr2rgb(img):
origT = npa([[65.481, 128.553, 24.966], [-37.797, -74.203, 112], [112, -93.786, -18.214]])
oriOffset = npa([[16], [128], [128]])
tinv = np.linalg.inv(origT)
if isinstance(img, np.uint8):
t = 255
offset = 255
elif isinstance(img, (np.float, np.float64)):
t = 255
offset = 1
elif isinstance(img, np.uint16):
t = 65535 / 257.0
offset = 65535
else:
raise ValueError('image type not support ')
T = tinv * t
Offset = offset * tinv.dot(oriOffset)
rgb = np.zeros(img.shape, dtype=img.dtype)
for p in range(3):
rgb[:, :, p] = T[p, 0] * img[:, :, 0] + T[p, 1] * img[:, :, 1] + T[p, 2] * img[:, :, 2] - Offset[p]
if isinstance(img, (float, np.float64)):
rgb[rgb > 1.0] = 1.0
rgb[rgb < 0] = 0
return rgb
|
{"hexsha": "d160b7b2ca0f3b9d6fc2db03537338e3e370d4cc", "size": 1862, "ext": "py", "lang": "Python", "max_stars_repo_path": "snowland/image/color/color.py", "max_stars_repo_name": "astar-club/scikit-snowland", "max_stars_repo_head_hexsha": "fc2e058f61fe44b3f065bcb4dc8de47f95055bfc", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "snowland/image/color/color.py", "max_issues_repo_name": "astar-club/scikit-snowland", "max_issues_repo_head_hexsha": "fc2e058f61fe44b3f065bcb4dc8de47f95055bfc", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "snowland/image/color/color.py", "max_forks_repo_name": "astar-club/scikit-snowland", "max_forks_repo_head_hexsha": "fc2e058f61fe44b3f065bcb4dc8de47f95055bfc", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3823529412, "max_line_length": 109, "alphanum_fraction": 0.5290010741, "include": true, "reason": "import numpy", "num_tokens": 685}
|
[STATEMENT]
lemma language_equivalence_classes_preserve_observability:
assumes "transitions M' = (\<lambda> t . ({q \<in> states M . LS M q = LS M (t_source t)} , t_input t, t_output t, {q \<in> states M . LS M q = LS M (t_target t)})) ` transitions M"
and "observable M"
shows "observable M'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. observable M'
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. observable M'
[PROOF STEP]
have "\<And> t1 t2 . t1 \<in> transitions M' \<Longrightarrow>
t2 \<in> transitions M' \<Longrightarrow>
t_source t1 = t_source t2 \<Longrightarrow>
t_input t1 = t_input t2 \<Longrightarrow>
t_output t1 = t_output t2 \<Longrightarrow>
t_target t1 = t_target t2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>t1 \<in> FSM.transitions M'; t2 \<in> FSM.transitions M'; t_source t1 = t_source t2; t_input t1 = t_input t2; t_output t1 = t_output t2\<rbrakk> \<Longrightarrow> t_target t1 = t_target t2
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>t1 \<in> FSM.transitions M'; t2 \<in> FSM.transitions M'; t_source t1 = t_source t2; t_input t1 = t_input t2; t_output t1 = t_output t2\<rbrakk> \<Longrightarrow> t_target t1 = t_target t2
[PROOF STEP]
fix t1 t2
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>t1 \<in> FSM.transitions M'; t2 \<in> FSM.transitions M'; t_source t1 = t_source t2; t_input t1 = t_input t2; t_output t1 = t_output t2\<rbrakk> \<Longrightarrow> t_target t1 = t_target t2
[PROOF STEP]
assume "t1 \<in> transitions M'" and "t2 \<in> transitions M'" and "t_source t1 = t_source t2" and "t_input t1 = t_input t2" and "t_output t1 = t_output t2"
[PROOF STATE]
proof (state)
this:
t1 \<in> FSM.transitions M'
t2 \<in> FSM.transitions M'
t_source t1 = t_source t2
t_input t1 = t_input t2
t_output t1 = t_output t2
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>t1 \<in> FSM.transitions M'; t2 \<in> FSM.transitions M'; t_source t1 = t_source t2; t_input t1 = t_input t2; t_output t1 = t_output t2\<rbrakk> \<Longrightarrow> t_target t1 = t_target t2
[PROOF STEP]
obtain t1' where t1'_def: "t1 = ({q \<in> states M . LS M q = LS M (t_source t1')} , t_input t1', t_output t1', {q \<in> states M . LS M q = LS M (t_target t1')})"
and "t1' \<in> transitions M"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>t1'. \<lbrakk>t1 = ({q \<in> FSM.states M. LS M q = LS M (t_source t1')}, t_input t1', t_output t1', {q \<in> FSM.states M. LS M q = LS M (t_target t1')}); t1' \<in> FSM.transitions M\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using \<open>t1 \<in> transitions M'\<close> assms(1)
[PROOF STATE]
proof (prove)
using this:
t1 \<in> FSM.transitions M'
FSM.transitions M' = (\<lambda>t. ({q \<in> FSM.states M. LS M q = LS M (t_source t)}, t_input t, t_output t, {q \<in> FSM.states M. LS M q = LS M (t_target t)})) ` FSM.transitions M
goal (1 subgoal):
1. (\<And>t1'. \<lbrakk>t1 = ({q \<in> FSM.states M. LS M q = LS M (t_source t1')}, t_input t1', t_output t1', {q \<in> FSM.states M. LS M q = LS M (t_target t1')}); t1' \<in> FSM.transitions M\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
t1 = ({q \<in> FSM.states M. LS M q = LS M (t_source t1')}, t_input t1', t_output t1', {q \<in> FSM.states M. LS M q = LS M (t_target t1')})
t1' \<in> FSM.transitions M
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>t1 \<in> FSM.transitions M'; t2 \<in> FSM.transitions M'; t_source t1 = t_source t2; t_input t1 = t_input t2; t_output t1 = t_output t2\<rbrakk> \<Longrightarrow> t_target t1 = t_target t2
[PROOF STEP]
obtain t2' where t2'_def: "t2 = ({q \<in> states M . LS M q = LS M (t_source t2')} , t_input t2', t_output t2', {q \<in> states M . LS M q = LS M (t_target t2')})"
and "t2' \<in> transitions M"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>t2'. \<lbrakk>t2 = ({q \<in> FSM.states M. LS M q = LS M (t_source t2')}, t_input t2', t_output t2', {q \<in> FSM.states M. LS M q = LS M (t_target t2')}); t2' \<in> FSM.transitions M\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using \<open>t2 \<in> transitions M'\<close> assms(1) \<open>t_input t1 = t_input t2\<close> \<open>t_output t1 = t_output t2\<close>
[PROOF STATE]
proof (prove)
using this:
t2 \<in> FSM.transitions M'
FSM.transitions M' = (\<lambda>t. ({q \<in> FSM.states M. LS M q = LS M (t_source t)}, t_input t, t_output t, {q \<in> FSM.states M. LS M q = LS M (t_target t)})) ` FSM.transitions M
t_input t1 = t_input t2
t_output t1 = t_output t2
goal (1 subgoal):
1. (\<And>t2'. \<lbrakk>t2 = ({q \<in> FSM.states M. LS M q = LS M (t_source t2')}, t_input t2', t_output t2', {q \<in> FSM.states M. LS M q = LS M (t_target t2')}); t2' \<in> FSM.transitions M\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
t2 = ({q \<in> FSM.states M. LS M q = LS M (t_source t2')}, t_input t2', t_output t2', {q \<in> FSM.states M. LS M q = LS M (t_target t2')})
t2' \<in> FSM.transitions M
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>t1 \<in> FSM.transitions M'; t2 \<in> FSM.transitions M'; t_source t1 = t_source t2; t_input t1 = t_input t2; t_output t1 = t_output t2\<rbrakk> \<Longrightarrow> t_target t1 = t_target t2
[PROOF STEP]
have "{q \<in> FSM.states M. LS M q = LS M (t_source t1')} = {q \<in> FSM.states M. LS M q = LS M (t_source t2')}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {q \<in> FSM.states M. LS M q = LS M (t_source t1')} = {q \<in> FSM.states M. LS M q = LS M (t_source t2')}
[PROOF STEP]
using t1'_def t2'_def \<open>t_source t1 = t_source t2\<close>
[PROOF STATE]
proof (prove)
using this:
t1 = ({q \<in> FSM.states M. LS M q = LS M (t_source t1')}, t_input t1', t_output t1', {q \<in> FSM.states M. LS M q = LS M (t_target t1')})
t2 = ({q \<in> FSM.states M. LS M q = LS M (t_source t2')}, t_input t2', t_output t2', {q \<in> FSM.states M. LS M q = LS M (t_target t2')})
t_source t1 = t_source t2
goal (1 subgoal):
1. {q \<in> FSM.states M. LS M q = LS M (t_source t1')} = {q \<in> FSM.states M. LS M q = LS M (t_source t2')}
[PROOF STEP]
by (metis (no_types, lifting) fst_eqD)
[PROOF STATE]
proof (state)
this:
{q \<in> FSM.states M. LS M q = LS M (t_source t1')} = {q \<in> FSM.states M. LS M q = LS M (t_source t2')}
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>t1 \<in> FSM.transitions M'; t2 \<in> FSM.transitions M'; t_source t1 = t_source t2; t_input t1 = t_input t2; t_output t1 = t_output t2\<rbrakk> \<Longrightarrow> t_target t1 = t_target t2
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
{q \<in> FSM.states M. LS M q = LS M (t_source t1')} = {q \<in> FSM.states M. LS M q = LS M (t_source t2')}
[PROOF STEP]
have "LS M (t_source t1') = LS M (t_source t2')"
[PROOF STATE]
proof (prove)
using this:
{q \<in> FSM.states M. LS M q = LS M (t_source t1')} = {q \<in> FSM.states M. LS M q = LS M (t_source t2')}
goal (1 subgoal):
1. LS M (t_source t1') = LS M (t_source t2')
[PROOF STEP]
using fsm_transition_source[OF \<open>t1' \<in> transitions M\<close>] fsm_transition_source[OF \<open>t2' \<in> transitions M\<close>]
[PROOF STATE]
proof (prove)
using this:
{q \<in> FSM.states M. LS M q = LS M (t_source t1')} = {q \<in> FSM.states M. LS M q = LS M (t_source t2')}
t_source t1' \<in> FSM.states M
t_source t2' \<in> FSM.states M
goal (1 subgoal):
1. LS M (t_source t1') = LS M (t_source t2')
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
LS M (t_source t1') = LS M (t_source t2')
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>t1 \<in> FSM.transitions M'; t2 \<in> FSM.transitions M'; t_source t1 = t_source t2; t_input t1 = t_input t2; t_output t1 = t_output t2\<rbrakk> \<Longrightarrow> t_target t1 = t_target t2
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
LS M (t_source t1') = LS M (t_source t2')
[PROOF STEP]
have "LS M (t_target t1') = LS M (t_target t2')"
[PROOF STATE]
proof (prove)
using this:
LS M (t_source t1') = LS M (t_source t2')
goal (1 subgoal):
1. LS M (t_target t1') = LS M (t_target t2')
[PROOF STEP]
using observable_transition_target_language_eq[OF _ \<open>t1' \<in> transitions M\<close> \<open>t2' \<in> transitions M\<close> _ _ \<open>observable M\<close>]
[PROOF STATE]
proof (prove)
using this:
LS M (t_source t1') = LS M (t_source t2')
\<lbrakk>LS M (t_source t1') = LS M (t_source t2'); t_input t1' = t_input t2'; t_output t1' = t_output t2'\<rbrakk> \<Longrightarrow> LS M (t_target t1') = LS M (t_target t2')
goal (1 subgoal):
1. LS M (t_target t1') = LS M (t_target t2')
[PROOF STEP]
using \<open>t_input t1 = t_input t2\<close> \<open>t_output t1 = t_output t2\<close>
[PROOF STATE]
proof (prove)
using this:
LS M (t_source t1') = LS M (t_source t2')
\<lbrakk>LS M (t_source t1') = LS M (t_source t2'); t_input t1' = t_input t2'; t_output t1' = t_output t2'\<rbrakk> \<Longrightarrow> LS M (t_target t1') = LS M (t_target t2')
t_input t1 = t_input t2
t_output t1 = t_output t2
goal (1 subgoal):
1. LS M (t_target t1') = LS M (t_target t2')
[PROOF STEP]
unfolding t1'_def t2'_def fst_conv snd_conv
[PROOF STATE]
proof (prove)
using this:
LS M (t_source t1') = LS M (t_source t2')
\<lbrakk>LS M (t_source t1') = LS M (t_source t2'); t_input t1' = t_input t2'; t_output t1' = t_output t2'\<rbrakk> \<Longrightarrow> LS M (t_target t1') = LS M (t_target t2')
t_input t1' = t_input t2'
t_output t1' = t_output t2'
goal (1 subgoal):
1. LS M (t_target t1') = LS M (t_target t2')
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
LS M (t_target t1') = LS M (t_target t2')
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>t1 \<in> FSM.transitions M'; t2 \<in> FSM.transitions M'; t_source t1 = t_source t2; t_input t1 = t_input t2; t_output t1 = t_output t2\<rbrakk> \<Longrightarrow> t_target t1 = t_target t2
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
LS M (t_target t1') = LS M (t_target t2')
[PROOF STEP]
show "t_target t1 = t_target t2"
[PROOF STATE]
proof (prove)
using this:
LS M (t_target t1') = LS M (t_target t2')
goal (1 subgoal):
1. t_target t1 = t_target t2
[PROOF STEP]
unfolding t1'_def t2'_def snd_conv
[PROOF STATE]
proof (prove)
using this:
LS M (t_target t1') = LS M (t_target t2')
goal (1 subgoal):
1. {q \<in> FSM.states M. LS M q = LS M (t_target t1')} = {q \<in> FSM.states M. LS M q = LS M (t_target t2')}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
t_target t1 = t_target t2
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<lbrakk>?t1.0 \<in> FSM.transitions M'; ?t2.0 \<in> FSM.transitions M'; t_source ?t1.0 = t_source ?t2.0; t_input ?t1.0 = t_input ?t2.0; t_output ?t1.0 = t_output ?t2.0\<rbrakk> \<Longrightarrow> t_target ?t1.0 = t_target ?t2.0
goal (1 subgoal):
1. observable M'
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>?t1.0 \<in> FSM.transitions M'; ?t2.0 \<in> FSM.transitions M'; t_source ?t1.0 = t_source ?t2.0; t_input ?t1.0 = t_input ?t2.0; t_output ?t1.0 = t_output ?t2.0\<rbrakk> \<Longrightarrow> t_target ?t1.0 = t_target ?t2.0
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?t1.0 \<in> FSM.transitions M'; ?t2.0 \<in> FSM.transitions M'; t_source ?t1.0 = t_source ?t2.0; t_input ?t1.0 = t_input ?t2.0; t_output ?t1.0 = t_output ?t2.0\<rbrakk> \<Longrightarrow> t_target ?t1.0 = t_target ?t2.0
goal (1 subgoal):
1. observable M'
[PROOF STEP]
unfolding observable.simps
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?t1.0 \<in> FSM.transitions M'; ?t2.0 \<in> FSM.transitions M'; t_source ?t1.0 = t_source ?t2.0; t_input ?t1.0 = t_input ?t2.0; t_output ?t1.0 = t_output ?t2.0\<rbrakk> \<Longrightarrow> t_target ?t1.0 = t_target ?t2.0
goal (1 subgoal):
1. \<forall>t1\<in>FSM.transitions M'. \<forall>t2\<in>FSM.transitions M'. t_source t1 = t_source t2 \<and> t_input t1 = t_input t2 \<and> t_output t1 = t_output t2 \<longrightarrow> t_target t1 = t_target t2
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
observable M'
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 5572, "file": "FSM_Tests_Minimisation", "length": 34}
|
import numpy as np
from dsmlt import stats
def test_trimean():
data = np.array(range(101))
true_trimean = (25 + 50 * 2 + 75) / 4
assert stats.trimean(data) == true_trimean
data = np.array(range(1, 101))
true_trimean = (25.75 + 50.5 * 2 + 75.25) / 4
assert stats.trimean(data) == true_trimean
data = np.array(range(1, 100))
true_trimean = (25.5 + 50 * 2 + 74.5) / 4
assert stats.trimean(data) == true_trimean
|
{"hexsha": "4c924421134468a02aec570da1cd13f50e416e63", "size": 449, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/stats/test_stats.py", "max_stars_repo_name": "pawlyk/dsml-tools", "max_stars_repo_head_hexsha": "6717ff6b4e58c951140e2abfad20f8d306b01d97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-08-21T09:28:22.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-21T09:28:22.000Z", "max_issues_repo_path": "tests/stats/test_stats.py", "max_issues_repo_name": "pawlyk/dsml-tools", "max_issues_repo_head_hexsha": "6717ff6b4e58c951140e2abfad20f8d306b01d97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 163, "max_issues_repo_issues_event_min_datetime": "2018-07-17T13:46:02.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-20T14:48:53.000Z", "max_forks_repo_path": "tests/stats/test_stats.py", "max_forks_repo_name": "pawlyk/dsml-tools", "max_forks_repo_head_hexsha": "6717ff6b4e58c951140e2abfad20f8d306b01d97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9444444444, "max_line_length": 49, "alphanum_fraction": 0.6191536748, "include": true, "reason": "import numpy", "num_tokens": 169}
|
import numpy as np
def confidence_transform(R, param_alpha, param_epsilon):
C = R.copy()
C.data = param_alpha * np.log(1 + param_epsilon * C.data)
return C
|
{"hexsha": "81235bc67a634555efdcb9d6d3f28404bcbcc597", "size": 169, "ext": "py", "lang": "Python", "max_stars_repo_path": "krotos/msd/latent/als.py", "max_stars_repo_name": "KelvinLu/krotos-convnet", "max_stars_repo_head_hexsha": "e37218aeaf10b73d77dfac911be46d8ab689e41d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2017-03-23T15:49:23.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-12T04:23:48.000Z", "max_issues_repo_path": "krotos/msd/latent/als.py", "max_issues_repo_name": "KelvinLu/krotos-convnet", "max_issues_repo_head_hexsha": "e37218aeaf10b73d77dfac911be46d8ab689e41d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "krotos/msd/latent/als.py", "max_forks_repo_name": "KelvinLu/krotos-convnet", "max_forks_repo_head_hexsha": "e37218aeaf10b73d77dfac911be46d8ab689e41d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.1428571429, "max_line_length": 61, "alphanum_fraction": 0.6863905325, "include": true, "reason": "import numpy", "num_tokens": 44}
|
# importing libraries
library(plyr)
library(foreign)
library(RWeka)
library(dplyr)
library(caret)
library(xgboost)
# reading datasets
esl = read.arff("esl.arff")
era = read.arff("era.arff")
lev = read.arff("lev.arff")
swd = read.arff("swd.arff")
# function to create dataset train and test partitions
createPartitions = function(dataset){
set.seed (20)
# let's change class attribute name to "class"
colnames(dataset)[length(dataset)] = "class"
# we use 66% for train an the rest for test
trainIndex = sample(1:nrow(dataset), (66*nrow(dataset))/100)
# returning train and test
list(as.data.frame(dataset[trainIndex,]),as.data.frame(dataset[-trainIndex,]))
}
# function to create n-1 models to ordinal classification
ordinalDatasets = function(dataset)
{
# extracting the original class attribute
# (assuming that class attribute is in the end)
originalClassAttribute = dataset[,dim(dataset)[2]]
# inspecting which classes contains class attribute of the dataset
classes = sort(unique(originalClassAttribute))
# we select the first one as current class
currentClasses = classes[1]
# let's remove it from classes
classes = classes[-1]
# we only need to make numberOfClasses-1 models, so last "classes"
# item won't be considered.
# we create a list containing each class combination attribute.
newClassAttributes = originalClassAttribute
for(class in classes){
newClassAttributes = cbind(newClassAttributes,Newclass = ifelse(originalClassAttribute%in%currentClasses, 0, 1))
currentClasses = c(currentClasses,class)
}
# we remove the original class attribute
newClassAttributes = newClassAttributes[,-1]
# let's create the models
models = apply(newClassAttributes, 2, function(class){
# we assign the new class attribute
dataset[,dim(dataset)[2]] = class
# xgboost only accepts matrix or xgb.DMatrix
dataset = xgb.DMatrix(as.matrix(dataset[,-dim(dataset)[2]]), label=dataset[,dim(dataset)[2]])
# let's call de model
xgboost(data = dataset, monotone_constraints=1,nrounds = 2)
})
}
# function to predict an instance (or set of instances) class
monotonePrediction = function(models, newInstances,classes){
# firstly we get the probabilities of each instance
probabilities = lapply(models, function(model){as.numeric(predict(model,as.matrix(newInstances[,-ncol(newInstances)])) > 0.5)})
#probabilities = lapply(models, function(model){predict(model,as.matrix(newInstances[,-ncol(newInstances)]))})
print(probabilities)
# we transform the list into data.frame
probs = do.call("cbind",probabilities)
# let's predict
indexOfPredictedClasses = apply(probs, 1, function(p){
sum(p)+1
})
print(indexOfPredictedClasses)
classes[indexOfPredictedClasses]
}
####################################################################################################
####################################################################################################
# firstly we partition datasets
# <datasetName>Partitiors[[1]] will contain train partition
# <datasetName>Partitiors[[2]] will contain test partition
eslPartitions = createPartitions(esl)
eraPartitions = createPartitions(era)
levPartitions = createPartitions(lev)
swdPartitions = createPartitions(swd)
# let's create the models with the train datasets
eslModels = ordinalDatasets(eslPartitions[[1]])
eraModels = ordinalDatasets(eraPartitions[[1]])
levModels = ordinalDatasets(levPartitions[[1]])
swdModels = ordinalDatasets(swdPartitions[[1]])
# we predict using test dataset without class attribute
eslPredictedResults = monotonePrediction(eslModels, eslPartitions[[2]], sort(unique(esl$out1)))
eraPredictedResults = monotonePrediction(eraModels, eraPartitions[[2]], sort(unique(era$out1)))
levPredictedResults = monotonePrediction(levModels, levPartitions[[2]], sort(unique(lev$Out1)))
swdPredictedResults = monotonePrediction(swdModels, swdPartitions[[2]], sort(unique(swd$Out1)))
# now we get the accuracy results
eslAccuracy <- sum(eslPredictedResults == eslPartitions[[2]]$class)/length(eslPredictedResults)
eraAccuracy <- sum(eraPredictedResults == eraPartitions[[2]]$class)/length(eraPredictedResults)
levAccuracy <- sum(levPredictedResults == levPartitions[[2]]$class)/length(levPredictedResults)
swdAccuracy <- sum(swdPredictedResults == swdPartitions[[2]]$class)/length(swdPredictedResults)
# let's see how this model fits the datasets
eslAccuracy
eraAccuracy
levAccuracy
swdAccuracy
# as we can see, accuracy values using ordinal classification method is not really good, so
# it may be because datasets are note made to apply this classification technique
|
{"hexsha": "60319db05ca2e21a2e660d8f44251b88b29ca438", "size": 4668, "ext": "r", "lang": "R", "max_stars_repo_path": "monoxgboost.r", "max_stars_repo_name": "CarlosSequi/DataMining-OrdinalMonotonicClassification", "max_stars_repo_head_hexsha": "4a4c5055b37540f5394779b89746d21964b5e727", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-17T10:49:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-17T10:49:28.000Z", "max_issues_repo_path": "monoxgboost.r", "max_issues_repo_name": "CarlosSequi/DataMining-OrdinalMonotonicClassification", "max_issues_repo_head_hexsha": "4a4c5055b37540f5394779b89746d21964b5e727", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "monoxgboost.r", "max_forks_repo_name": "CarlosSequi/DataMining-OrdinalMonotonicClassification", "max_forks_repo_head_hexsha": "4a4c5055b37540f5394779b89746d21964b5e727", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9512195122, "max_line_length": 129, "alphanum_fraction": 0.7249357326, "num_tokens": 1152}
|
import cv2
import sys
import imgaug.augmenters as iaa
sys.path.insert(1,"D:\\source\\repos\\rdt-reader\\object_detection_v2")
import core.model_new as model
from core.config import cfg
import numpy as np
from utils import data_loader
inpImg="../object_detection_mobile_v2/train_hor_ratioCropped/I4.jpg"
import ntpath
import math
import itertools
from imgaug.augmentables.kps import Keypoint, KeypointsOnImage
import tensorflow as tf
def euclidianDistance(p1,p2):
"""Compute euclidian distance between p1 and p2
Args:
p1 (numpy.array) : X,Y of point 1
p2 (numpy.array) : X,Y of point 2
Returns:
numpy.float: Distance between two points
"""
return np.linalg.norm(p2-p1)
def rotate_bound(image, angle):
"""Return cropped RDT
Args:
image (numpy.ndarray) : Image with channels last format
angle (numpy.float) : Angle to rotate image clockwise
centers (list) : Centers of red and blue line (Used for debugging only)
Returns:
2-element tuple containing
- **image** (*numpy.ndarray*): Rotated image
- **centers** (*list*): List of transformed centers (Used for debugging only)
"""
# grab the dimensions of the image and then determine the
# center
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
tranformedCenters=[0,0,0,0,0,0]
return cv2.warpAffine(image, M, (nW, nH)),tranformedCenters
def prepocessImageCOD(img,resize_dim):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.pyrDown(img)
# img = cv2.pyrDown(img)
img = img[...,np.newaxis]
img = img/255.0
img = img[np.newaxis,...]
img = np.array(img,dtype=np.float32)
return img
def main():
num_class = cfg.TRAIN.NUMBER_CLASSES
resize_dim=cfg.TEST.INPUT_SIZE
anchors=cfg.TRAIN.ANCHOR_ASPECTRATIO
number_blocks = cfg.TRAIN.NUMBER_BLOCKS
resizefactor = [0,0]
resizefactor[0] = int(resize_dim[0]/number_blocks[0])
resizefactor[1] = int(resize_dim[1]/number_blocks[1])
Model = model.ObjectDetection(True,"Model_KH_EXP/model_save_rot_360x640.hdf5").model
with open(cfg.TEST.LABEL_FILE_YOLO) as fin, open("Analysis_2.csv","w") as fout:
# fout.write("Arrow`_prob,Arrow`_cx,Arrow`_cy,Arrow_cx,Arrow_cy,Arrow`_Angle,Arrow_Angle,Cpattern`_prob,Cpattern`_cx,Cpattern`_cy,Cpattern_cx,Cpattern_cy,Cpattern`_Angle,Cpattern_Angle,Inlfuenza`_prob,Inlfuenza`_cx,Inlfuenza`_cy,Inlfuenza_cx,Inlfuenza_cy,Inlfuenza`_Angle,Inlfuenza_Angle\n")
fout.write("ImageName,Arrow`_prob,Cpattern`_prob,Inlfuenza`_prob,A_ang-A`_ang,C_ang-C`_ang,I_ang-I`_ang-,A_C,C_I,A_I,A-A`,C-C`,I-I`\n")
print(cfg.TEST.LABEL_FILE_YOLO)
for line in fin:
imgpath=line.strip().split()[0]
print(imgpath)
trueArrow=[0,0]
trueCpattern=[0,0]
trueInfl=[0,0]
for annots in line.strip().split()[1:]:
x1y1x2y2=[float(x) for x in annots.split(",")[:-1]]
lab = int(annots.split(",")[-1])
feat_type = int(lab/10)
cxywh=data_loader.xy2cxcy(x1y1x2y2)
if feat_type==0:
trueInfl[0]=cxywh[0]
trueInfl[1]=cxywh[1]
elif feat_type==1:
trueCpattern[0]=cxywh[0]
trueCpattern[1]=cxywh[1]
elif feat_type==2:
trueArrow[0]=cxywh[0]
trueArrow[1]=cxywh[1]
imgName = ntpath.basename(imgpath)
orientation=int(imgName.split("_")[0])
fullsizeimg = cv2.imread(imgpath,cv2.IMREAD_COLOR)
KPS=[
Keypoint(x=trueArrow[0],y=trueArrow[1]),
Keypoint(x=trueCpattern[0],y=trueCpattern[1]),
Keypoint(x=trueInfl[0],y=trueInfl[1])
]
kpsoi = KeypointsOnImage(KPS, shape=fullsizeimg.shape)
fullsizeimg,kps_aug = iaa.Affine(rotate=(-10,10))(image=fullsizeimg,keypoints=kpsoi)
trueArrow,trueCpattern,trueInfl=[kps_aug.keypoints[0].x,kps_aug.keypoints[0].y],[kps_aug.keypoints[1].x,kps_aug.keypoints[1].y],[kps_aug.keypoints[2].x,kps_aug.keypoints[2].y]
final_img = np.zeros((fullsizeimg.shape[0]*4+80,fullsizeimg.shape[1],3))
# print(img)
final_img[0:fullsizeimg.shape[0],:fullsizeimg.shape[1],:]=fullsizeimg
final_img[fullsizeimg.shape[0]+20:fullsizeimg.shape[0]*2+20,:fullsizeimg.shape[1],:]=fullsizeimg*0.2
final_img[fullsizeimg.shape[0]*2+20:fullsizeimg.shape[0]*3+20,:fullsizeimg.shape[1],:]=fullsizeimg*0.2
final_img[fullsizeimg.shape[0]*3+20:fullsizeimg.shape[0]*4+20,:fullsizeimg.shape[1],:]=fullsizeimg*0.2
Input = prepocessImageCOD(fullsizeimg,resize_dim)
# print(np.max(Input))
interpreter = tf.lite.Interpreter(model_path="D:/source/repos/object_detection_mobile_v2/eval_model/OD.lite")
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
print(input_details)
interpreter.set_tensor(input_details[0]['index'], Input)
interpreter.invoke()
#
predictions = interpreter.get_tensor(output_details[0]['index'])
# predictions=Model.predict(Input) 190 10 x 19
preds = predictions #np.reshape(predictions,(predictions.shape[0],number_blocks[0],number_blocks[1],4,num_class+4))
preds=preds[0]
all_boxes = []
orie=[0,1,2,6,7,8,9,10,14,15]
orie_angles=[0,22.5,45,135,157.5,180,202.5,225,315,337.5]
orientation=orie.index(orientation)
orientation_angle=orie_angles[orientation]
for ax_1 in range(number_blocks[0]): #10
for ax_2 in range(number_blocks[1]): #19
for anch_id in range(len(anchors[0])):
computedIndex=ax_1*number_blocks[1]+ax_2
tar_class = np.argmax(preds[computedIndex,anch_id,0:num_class])
# print(preds[ax_1,ax_2,anch_id,0:num_class])
prob=preds[computedIndex,anch_id,tar_class]
offsets = preds[computedIndex,anch_id,num_class:]
cx = (ax_2+0.5)*resizefactor[1]+offsets[-4]*resize_dim[1]
cy = (ax_1+0.5)*resizefactor[0]+offsets[-3]*resize_dim[0]
w = anchors[0][anch_id][1]*math.exp(offsets[-2])
h = anchors[0][anch_id][0]*math.exp(offsets[-1])
# if(tar_class==20):
# print(cx,cy,w,h,prob,ax_1,ax_2,anch_id,offsets)
x1,y1,x2,y2=data_loader.cxcy2xy([cx,cy,w,h])
all_boxes.append([tar_class,x1,y1,x2,y2,prob])
widthFactor = 1.0/resize_dim[1]*fullsizeimg.shape[1]
heightFactor = 1.0/resize_dim[0]*fullsizeimg.shape[0]
fullsizeimg=fullsizeimg*0.1
all_boxes=sorted(all_boxes,key=lambda x: x[5],reverse=True)
Boxes_Arrow=[]
Boxes_Cpattern=[]
Boxes_Infl=[]
for b in all_boxes:
tar=int(b[0])
color_ind=int(tar/10)
predicted_orientatation=tar%10
predicted_orientatation=orie_angles[predicted_orientatation]
x1=int(b[1]*widthFactor)
y1_true=int(b[2]*heightFactor)
y1=int(b[2]*heightFactor)+(fullsizeimg.shape[0]*(color_ind+1)+20)
x2=int(b[3]*widthFactor)
y2=int(b[4]*heightFactor)+(fullsizeimg.shape[0]*(color_ind+1)+20)
y2_true=int(b[4]*heightFactor)
val=int(127*b[5]+128)
cxcy=data_loader.xy2cxcy([x1,y1_true,x2,y2_true])
if predicted_orientatation==orientation_angle:
if color_ind!=3:
for i in range(3):
if i ==color_ind:
final_img[y1:y2,x1:x2,color_ind]=val
else:
final_img[y1:y2,x1:x2,i]=0
if(b[5]>0.0):
if color_ind==2:
Boxes_Arrow.append([b[5],cxcy[0],cxcy[1],trueArrow[0],trueArrow[1],predicted_orientatation,orientation_angle])
elif color_ind==1:
Boxes_Cpattern.append([b[5],cxcy[0],cxcy[1],trueCpattern[0],trueCpattern[1],predicted_orientatation,orientation_angle])
elif color_ind==0:
Boxes_Infl.append([b[5],cxcy[0],cxcy[1],trueInfl[0],trueInfl[1],predicted_orientatation,orientation_angle])
all_box_preds=[Boxes_Arrow,Boxes_Cpattern,Boxes_Infl]
all_combinations=list(itertools.product(*all_box_preds))
# sorted_all_combinations=sorted(all_combinations, key=lambda x: x[0][0],reverse=True)
# print(sorted_all_combinations[0])
print(len(Boxes_Arrow),len(Boxes_Cpattern),len(Boxes_Infl))
print(Boxes_Arrow)
for cmbs in all_combinations:
fout.write(imgName+",")
cmb_f=[]
for cmb in cmbs:
cmb_f += [x for x in cmb]
fout.write(str(cmb[0])+",")
# print(cmb_f)
Apred_A=euclidianDistance(np.array(cmb_f[1],cmb_f[2]),np.array(cmb_f[3],cmb_f[4]))
Cpred_C=euclidianDistance(np.array(cmb_f[8],cmb_f[9]),np.array(cmb_f[10],cmb_f[11]))
Ipred_I=euclidianDistance(np.array(cmb_f[15],cmb_f[16]),np.array(cmb_f[17],cmb_f[18]))
A_C = euclidianDistance(np.array(cmb_f[1],cmb_f[2]),np.array(cmb_f[8],cmb_f[9]))
C_I = euclidianDistance(np.array(cmb_f[15],cmb_f[16]),np.array(cmb_f[8],cmb_f[9]))
A_I = euclidianDistance(np.array(cmb_f[1],cmb_f[2]),np.array(cmb_f[15],cmb_f[16]))
Apredang_Aang=cmb_f[6]-cmb_f[5]
Cpredang_Cang=cmb_f[13]-cmb_f[12]
Ipredang_Iang=cmb_f[20]-cmb_f[19]
cmb_str=",".join([str(v) for v in [Apredang_Aang,Cpredang_Cang,Ipredang_Iang,A_C,C_I,A_I,Apred_A,Cpred_C,Ipred_I]])
fout.write(cmb_str)
fout.write("\n")
cv2.imwrite("heatmap/"+imgName,final_img)
break
# print(box_0,box_1)
main()
|
{"hexsha": "90a40103dff9439f3cece114fc73b1fc09591374", "size": 11439, "ext": "py", "lang": "Python", "max_stars_repo_path": "heatmap_newModel.py", "max_stars_repo_name": "DigitalHealthIntegration/rdt-reader", "max_stars_repo_head_hexsha": "242a4d813a6b58b3668f4d4ce35cea8f55bd651f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-05-12T13:38:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-26T13:45:54.000Z", "max_issues_repo_path": "heatmap_newModel.py", "max_issues_repo_name": "DigitalHealthIntegration/rdt-reader", "max_issues_repo_head_hexsha": "242a4d813a6b58b3668f4d4ce35cea8f55bd651f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "heatmap_newModel.py", "max_forks_repo_name": "DigitalHealthIntegration/rdt-reader", "max_forks_repo_head_hexsha": "242a4d813a6b58b3668f4d4ce35cea8f55bd651f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.0354330709, "max_line_length": 299, "alphanum_fraction": 0.5745257453, "include": true, "reason": "import numpy", "num_tokens": 3188}
|
import unittest
import umo
import math
class TestUmoApi(unittest.TestCase):
def test_creation(self):
m = umo.Model()
def test_constants(self):
m = umo.Model()
b1 = m.constant(False)
b2 = m.constant(True)
self.assertEqual(b1.value, False)
self.assertEqual(b2.value, True)
self.assertTrue(isinstance(b1, umo.BoolExpression))
self.assertTrue(isinstance(b2, umo.BoolExpression))
i1 = m.constant(2)
i2 = m.constant(-3.0)
self.assertEqual(i1.value, 2)
self.assertEqual(i2.value, -3)
self.assertTrue(isinstance(i1, umo.IntExpression))
self.assertTrue(isinstance(i2, umo.IntExpression))
f1 = m.constant(2.4)
f2 = m.constant(-3.9)
self.assertEqual(f1.value, 2.4)
self.assertEqual(f2.value, -3.9)
self.assertTrue(isinstance(f1, umo.FloatExpression))
self.assertTrue(isinstance(f2, umo.FloatExpression))
m.check()
def test_decisions(self):
m = umo.Model()
x1 = m.bool_var()
x2 = m.int_var(-4, 2)
x3 = m.float_var(0.5, 2)
self.assertTrue(isinstance(x1, umo.BoolExpression))
self.assertTrue(isinstance(x2, umo.IntExpression))
self.assertTrue(isinstance(x3, umo.FloatExpression))
x1.value = False
self.assertEqual(x1.value, False)
x1.value = True
self.assertEqual(x1.value, True)
x2.value = 1
self.assertEqual(x2.value, 1)
x2.value = -2
self.assertEqual(x2.value, -2)
x3.value = 1.0
self.assertEqual(x3.value, 1.0)
x3.value = 1.83
self.assertEqual(x3.value, 1.83)
m.check()
def test_unbounded_variables(self):
m = umo.Model()
x1 = m.int_var(-float("inf"), 0)
x2 = m.int_var(0, float("inf"))
x3 = m.int_var(umo.unbounded(), 0)
x4 = m.int_var(0, umo.unbounded())
x5 = m.int_var(umo.unbounded(), umo.unbounded())
x6 = m.int_var(-float("inf"), float("inf"))
x7 = m.float_var(-float("inf"), 0)
x8 = m.float_var(0, float("inf"))
x9 = m.float_var(-float("inf"), float("inf"))
x10 = m.float_var(umo.unbounded(), 0)
x11 = m.float_var(0, umo.unbounded())
x12 = m.float_var(umo.unbounded(), umo.unbounded())
self.assertTrue(isinstance(x1, umo.IntExpression))
self.assertTrue(isinstance(x2, umo.IntExpression))
self.assertTrue(isinstance(x3, umo.IntExpression))
self.assertTrue(isinstance(x4, umo.IntExpression))
self.assertTrue(isinstance(x5, umo.IntExpression))
self.assertTrue(isinstance(x6, umo.IntExpression))
self.assertTrue(isinstance(x7, umo.FloatExpression))
self.assertTrue(isinstance(x8, umo.FloatExpression))
self.assertTrue(isinstance(x9, umo.FloatExpression))
self.assertTrue(isinstance(x10, umo.FloatExpression))
self.assertTrue(isinstance(x11, umo.FloatExpression))
self.assertTrue(isinstance(x12, umo.FloatExpression))
m.check()
def test_float_compute(self):
m = umo.Model()
dec1 = m.float_var(0.0, 10.0)
dec2 = m.float_var(0.0, 10.0)
x01 = dec1 + dec2
x02 = dec1 - dec2
x03 = dec1 * dec2
x04 = dec1 / dec2
x05 = -dec1
for val1 in [5.2, -4.0, 2.5]:
for val2 in [-7.97, 2.0, 1.3]:
dec1.value = val1
dec2.value = val2
self.assertEqual(x01.value, val1 + val2)
self.assertEqual(x02.value, val1 - val2)
self.assertEqual(x03.value, val1 * val2)
self.assertEqual(x04.value, val1 / val2)
self.assertEqual(x05.value, -val1)
m.check()
def test_int_compute(self):
m = umo.Model()
dec1 = m.int_var(0.0, 10.0)
dec2 = m.int_var(0.0, 10.0)
x01 = dec1 + dec2
x02 = dec1 - dec2
x03 = dec1 * dec2
x04 = dec1 // dec2
x05 = dec1 % dec2
x06 = -dec1
x07 = +dec1
self.assertTrue(isinstance(x01, umo.IntExpression))
self.assertTrue(isinstance(x02, umo.IntExpression))
self.assertTrue(isinstance(x03, umo.IntExpression))
self.assertTrue(isinstance(x04, umo.IntExpression))
self.assertTrue(isinstance(x05, umo.IntExpression))
self.assertTrue(isinstance(x06, umo.IntExpression))
self.assertTrue(isinstance(x07, umo.IntExpression))
for val1 in [5, 4, 0]:
for val2 in [1, 2, 3]:
dec1.value = val1
dec2.value = val2
self.assertEqual(x01.value, val1 + val2)
self.assertEqual(x02.value, val1 - val2)
self.assertEqual(x03.value, val1 * val2)
self.assertEqual(x04.value, val1 // val2)
self.assertEqual(x05.value, val1 % val2)
self.assertEqual(x06.value, -val1)
self.assertEqual(x07.value, +val1)
m.check()
def test_bool_compute(self):
m = umo.Model()
dec1 = m.bool_var()
dec2 = m.bool_var()
x01 = dec1 & dec2
x02 = dec1 | dec2
x03 = dec1 ^ dec2
x04 = ~dec1
self.assertTrue(isinstance(x01, umo.BoolExpression))
self.assertTrue(isinstance(x02, umo.BoolExpression))
self.assertTrue(isinstance(x03, umo.BoolExpression))
self.assertTrue(isinstance(x04, umo.BoolExpression))
for val1 in [False, True]:
for val2 in [False, True]:
dec1.value = val1
dec2.value = val2
self.assertEqual(x01.value, val1 & val2)
self.assertEqual(x02.value, val1 | val2)
self.assertEqual(x03.value, val1 ^ val2)
self.assertEqual(x04.value, not val1)
m.check()
def test_operations(self):
m = umo.Model()
dec = m.float_var(-10.0, 10.0)
x01 = umo.exp(dec)
x02 = umo.cos(dec)
x03 = umo.atan(dec)
for val in [5.0, -4.0, 2.5]:
dec.value = val
self.assertEqual(x01.value, math.exp(val))
self.assertEqual(x02.value, math.cos(val))
self.assertEqual(x03.value, math.atan(val))
m.check()
def test_comparisons(self):
m = umo.Model()
dec1 = m.float_var(0.0, 10.0)
dec2 = m.float_var(0.0, 10.0)
x01 = dec1 == dec2
x02 = dec1 != dec2
x03 = dec1 <= dec2
x04 = dec1 >= dec2
x05 = dec1 < dec2
x06 = dec1 > dec2
self.assertTrue(isinstance(x01, umo.BoolExpression))
self.assertTrue(isinstance(x02, umo.BoolExpression))
self.assertTrue(isinstance(x03, umo.BoolExpression))
self.assertTrue(isinstance(x04, umo.BoolExpression))
self.assertTrue(isinstance(x05, umo.BoolExpression))
self.assertTrue(isinstance(x06, umo.BoolExpression))
for val1 in [5.0, -4.0, 2.5]:
for val2 in [5.0, -4.0, 2.5]:
dec1.value = val1
dec2.value = val2
self.assertEqual(x01.value, val1 == val2)
self.assertEqual(x02.value, val1 != val2)
self.assertEqual(x03.value, val1 <= val2)
self.assertEqual(x04.value, val1 >= val2)
self.assertEqual(x05.value, val1 < val2)
self.assertEqual(x06.value, val1 > val2)
m.check()
def test_status(self):
m = umo.Model()
dec = m.bool_var()
umo.constraint(dec)
dec.value = True
self.assertEqual(m.status, umo.SolutionStatus.VALID)
dec.value = False
self.assertEqual(m.status, umo.SolutionStatus.INVALID)
m.check()
def test_errors(self):
m = umo.Model()
self.assertRaises(RuntimeError, m.constant, float("nan"))
def test_float_ops(self):
m = umo.Model()
fdec1 = m.float_var(-10.0, 10.0)
fdec2 = m.float_var(5, 10.0)
f01 = fdec1 + fdec2
f02 = fdec1 + 1.0
f03 = 1.0 + fdec1
f04 = fdec1 * fdec2
f05 = fdec2 * 2.0
f06 = 2.0 * fdec2
f07 = fdec1 - fdec2
f08 = fdec2 - 1.0
f09 = 1.0 - fdec2
f10 = -fdec2
f11 = fdec1 / fdec2
f12 = fdec2 / 2.0
f13 = 2.0 / fdec2
f01.value
f02.value
f03.value
f04.value
f05.value
f06.value
f07.value
f08.value
f09.value
f10.value
f11.value
f12.value
f13.value
b01 = fdec1 < fdec2
b02 = fdec1 < 100.0
b03 = 100.0 < fdec2
b04 = fdec2 > fdec1
b05 = fdec1 > -100.0
b06 = 100.0 > fdec2
b07 = fdec1 <= fdec2
b08 = fdec1 <= 100.0
b09 = -100.0 <= fdec2
b10 = fdec2 >= fdec1
b11 = fdec1 >= -100.0
b12 = 100.0 >= fdec2
b13 = fdec1 == fdec2
b14 = fdec1 == 100.0
b15 = 100.0 == fdec2
b16 = fdec1 != fdec2
b17 = fdec1 != 100.0
b18 = 100.0 != fdec2
b01.value
b02.value
b03.value
b04.value
b05.value
b06.value
b07.value
b08.value
b09.value
b10.value
b11.value
b12.value
b13.value
b14.value
b15.value
b16.value
b17.value
b18.value
m.check()
def test_int_ops(self):
m = umo.Model()
idec1 = m.int_var(-10, 10)
idec2 = m.int_var(5, 10)
idec1.value = 1
idec2.value = 1
i01 = idec1 + idec2
i02 = idec1 + 1
i03 = 1 + idec1
i04 = idec1 * idec2
i05 = idec2 * 2
i06 = 2 * idec2
i07 = idec1 - idec2
i08 = idec2 - 1
i09 = 1 - idec2
i10 = -idec2
i11 = idec1 / idec2
i12 = idec2 / 2
i13 = 2 / idec2
i14 = idec1 % idec2
i15 = idec2 % 2
i16 = 2 % idec2
i01.value
i02.value
i03.value
i04.value
i05.value
i06.value
i07.value
i08.value
i09.value
i10.value
i11.value
i12.value
i13.value
i14.value
i15.value
i16.value
b01 = idec1 < idec2
b02 = idec1 < 100
b03 = 100 < idec2
b04 = idec2 > idec1
b05 = idec1 > -100
b06 = 100 > idec2
b07 = idec1 <= idec2
b08 = idec1 <= 100
b09 = -100 <= idec2
b10 = idec2 >= idec1
b11 = idec1 >= -100
b12 = 100 >= idec2
b13 = idec1 == idec2
b14 = idec1 == 100
b15 = 100 == idec2
b16 = idec1 != idec2
b17 = idec1 != 100
b18 = 100 != idec2
b01.value
b02.value
b03.value
b04.value
b05.value
b06.value
b07.value
b08.value
b09.value
b10.value
b11.value
b12.value
b13.value
b14.value
b15.value
b16.value
b17.value
b18.value
m.check()
def test_bool_ops(self):
m = umo.Model()
bdec1 = m.bool_var()
bdec2 = m.bool_var()
b01 = bdec1 & bdec2
b02 = bdec1 & True
b03 = False & bdec1
b04 = bdec1 | bdec2
b05 = bdec2 | False
b06 = True | bdec2
b07 = bdec1 ^ bdec2
b08 = bdec2 ^ False
b09 = True ^ bdec2
b10 = ~bdec2
b01.value
b02.value
b03.value
b04.value
b05.value
b06.value
b07.value
b08.value
b09.value
b10.value
m.check()
def test_compound_float(self):
m = umo.Model()
dec1 = m.float_var(-10.0, 10.0);
dec2 = m.float_var(5.0, 10.0);
dec1 += 3.0;
dec1 += 3;
dec1 += dec2;
dec2 -= 3.0;
dec2 -= 3;
dec2 -= dec1;
dec1 *= 3.0;
dec1 *= 3;
dec1 *= dec2;
dec2 /= 3.0;
dec2 /= 3;
dec2 /= dec1;
self.assertTrue(isinstance(dec1, umo.FloatExpression))
self.assertTrue(isinstance(dec2, umo.FloatExpression))
m.check()
def test_compound_int(self):
m = umo.Model()
dec1 = m.int_var(-10, 10);
dec2 = m.int_var(5, 10);
dec1 += 3;
dec1 += dec2;
dec2 -= 3;
dec2 -= dec1;
dec1 *= 3;
dec1 *= dec2;
dec2 //= 3;
dec2 //= dec1;
dec2 %= 3;
dec2 %= dec1;
self.assertTrue(isinstance(dec1, umo.IntExpression))
self.assertTrue(isinstance(dec2, umo.IntExpression))
m.check()
def test_compound_bool(self):
m = umo.Model()
dec1 = m.bool_var();
dec2 = m.bool_var();
dec1 &= dec2;
dec1 &= True;
dec2 |= dec1;
dec2 |= False;
dec1 ^= dec2;
dec1 ^= True;
self.assertTrue(isinstance(dec1, umo.BoolExpression))
self.assertTrue(isinstance(dec2, umo.BoolExpression))
m.check()
def test_intrinsics(self):
m = umo.Model()
dec1 = m.float_var(-10.0, 10.0)
x01 = abs(dec1)
x03 = pow(dec1, -3)
x04 = pow(dec1, 4)
for val1 in [5.2, -4.0, 2.51]:
dec1.value = val1
self.assertEqual(x01.value, abs(val1))
self.assertEqual(x03.value, pow(val1, -3))
self.assertEqual(x04.value, pow(val1, 4))
m.check()
def test_int_intrinsics(self):
m = umo.Model()
dec1 = m.int_var(-10, 10)
x01 = abs(dec1)
x02 = pow(dec1, 4)
self.assertTrue(isinstance(x01, umo.IntExpression))
for val1 in [5, -4, 2, 0]:
dec1.value = val1
self.assertEqual(x01.value, abs(val1))
self.assertEqual(x02.value, pow(val1, 4))
m.check()
def test_nary(self):
m = umo.Model()
dec1 = m.float_var(-10, 10)
dec2 = m.int_var(-10, 10)
dec3 = m.bool_var()
x = umo.sum(dec1, dec2, dec3)
self.assertTrue(isinstance(x, umo.FloatExpression))
x = umo.prod(dec1, dec2, dec3)
self.assertTrue(isinstance(x, umo.FloatExpression))
x = umo.min(dec1, dec2, dec3)
self.assertTrue(isinstance(x, umo.FloatExpression))
x = umo.max(dec1, dec2, dec3)
self.assertTrue(isinstance(x, umo.FloatExpression))
x = umo.logical_and(dec3, dec3, dec3)
self.assertTrue(isinstance(x, umo.BoolExpression))
x = umo.logical_or(dec3, dec3, dec3)
self.assertTrue(isinstance(x, umo.BoolExpression))
x = umo.logical_xor(dec3, dec3, dec3)
self.assertTrue(isinstance(x, umo.BoolExpression))
x = umo.sum(dec2, dec3)
self.assertTrue(isinstance(x, umo.IntExpression))
x = umo.prod(dec2, dec3)
self.assertTrue(isinstance(x, umo.IntExpression))
x = umo.min(dec2, dec3)
self.assertTrue(isinstance(x, umo.IntExpression))
x = umo.max(dec2, dec3)
self.assertTrue(isinstance(x, umo.IntExpression))
x = umo.sum(dec3, dec3)
self.assertTrue(isinstance(x, umo.IntExpression))
x = umo.prod(dec3, dec3)
self.assertTrue(isinstance(x, umo.BoolExpression))
x = umo.min(dec3, dec3)
self.assertTrue(isinstance(x, umo.BoolExpression))
x = umo.max(dec3, dec3)
self.assertTrue(isinstance(x, umo.BoolExpression))
self.assertRaises(Exception, lambda: umo.logical_and(dec3, dec3, dec1))
self.assertRaises(Exception, lambda: umo.logical_and(dec3, dec2))
self.assertRaises(Exception, lambda: umo.logical_and())
self.assertRaises(Exception, lambda: umo.logical_or(dec3, dec3, dec1))
self.assertRaises(Exception, lambda: umo.logical_or(dec3, dec2))
self.assertRaises(Exception, lambda: umo.logical_or())
self.assertRaises(Exception, lambda: umo.logical_xor(dec3, dec3, dec1))
self.assertRaises(Exception, lambda: umo.logical_xor(dec3, dec2))
self.assertRaises(Exception, lambda: umo.logical_xor())
def test_nary_list(self):
m = umo.Model()
dec1 = m.float_var(-10, 10)
dec2 = m.int_var(-10, 10)
dec3 = m.bool_var()
x = umo.sum([dec1, dec2, dec3])
self.assertTrue(isinstance(x, umo.FloatExpression))
x = umo.prod([dec1, dec2, dec3])
self.assertTrue(isinstance(x, umo.FloatExpression))
x = umo.min([dec1, dec2, dec3])
self.assertTrue(isinstance(x, umo.FloatExpression))
x = umo.max([dec1, dec2, dec3])
self.assertTrue(isinstance(x, umo.FloatExpression))
x = umo.logical_and([dec3, dec3, dec3])
self.assertTrue(isinstance(x, umo.BoolExpression))
x = umo.logical_or([dec3, dec3, dec3])
self.assertTrue(isinstance(x, umo.BoolExpression))
x = umo.logical_xor([dec3, dec3, dec3])
self.assertTrue(isinstance(x, umo.BoolExpression))
def test_deletion(self):
m = umo.Model()
dec1 = m.bool_var()
m = None
dec2 = dec1.model.int_var(-10, 10)
dec1.model.check()
def test_float_params(self):
m = umo.Model()
m.set_float_param("test_param", 5.0)
self.assertEqual(m.get_float_param("test_param"), 5.0)
m.set_float_param("test_param", 6.4)
self.assertEqual(m.get_float_param("test_param"), 6.4)
self.assertEqual(m.time_limit, float("inf"))
m.time_limit = 10.0
self.assertEqual(m.time_limit, 10.0)
def test_string_params(self):
m = umo.Model()
m.set_string_param("test_param", "test_value_1")
self.assertEqual(m.get_string_param("test_param"), "test_value_1")
m.set_string_param("test_param", "test_value_2")
self.assertEqual(m.get_string_param("test_param"), "test_value_2")
self.assertEqual(m.solver, "auto")
m.solver = "cbc"
self.assertEqual(m.solver, "cbc")
def test_forbid_implicit_bool(self):
m = umo.Model()
self.assertRaises(NotImplementedError, lambda: bool(m.bool_var()))
self.assertRaises(Exception, lambda: int(m.int_var()))
self.assertRaises(Exception, lambda: float(m.float_var()))
def test_numpy(self):
try:
import numpy as np
except:
return
m = umo.Model()
self.assertTrue(isinstance(m.constant(np.int32(6)), umo.IntExpression))
self.assertTrue(isinstance(m.constant(np.int8(6)), umo.IntExpression))
self.assertTrue(isinstance(m.constant(np.bool()), umo.BoolExpression))
self.assertTrue(isinstance(m.constant(np.float64(4.5)), umo.FloatExpression))
self.assertTrue(isinstance(m.constant(np.float32(4.5)), umo.FloatExpression))
def test_two_models(self):
m1 = umo.Model()
m2 = umo.Model()
f1 = m1.float_var()
f2 = m2.float_var()
self.assertRaises(Exception, lambda: f1 + f2)
self.assertRaises(Exception, lambda: umo.sum(f1, f2))
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "bc510e79c7cc7c14693f4b1bbe7f71090f79d168", "size": 19197, "ext": "py", "lang": "Python", "max_stars_repo_path": "apis/python/test.py", "max_stars_repo_name": "Coloquinte/umo", "max_stars_repo_head_hexsha": "1f39c316d6584bbed22913aabaa4bfb5ee02d72b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-24T20:56:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-24T20:56:25.000Z", "max_issues_repo_path": "apis/python/test.py", "max_issues_repo_name": "Coloquinte/umo", "max_issues_repo_head_hexsha": "1f39c316d6584bbed22913aabaa4bfb5ee02d72b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "apis/python/test.py", "max_forks_repo_name": "Coloquinte/umo", "max_forks_repo_head_hexsha": "1f39c316d6584bbed22913aabaa4bfb5ee02d72b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9845360825, "max_line_length": 85, "alphanum_fraction": 0.5515445122, "include": true, "reason": "import numpy", "num_tokens": 5471}
|
#include <cradle/gui/app/instance.hpp>
#include <wx/glcanvas.h>
#include <wx/msgdlg.h>
#include <cradle/external/clean.hpp>
#include <json\json.h>
#include <boost/program_options.hpp>
#include <alia/ui/utilities/styling.hpp>
#include <cradle/gui/app/internals.hpp>
#include <cradle/gui/app/top_level_ui.hpp>
#include <cradle/gui/internals.hpp>
#include <cradle/io/file.hpp>
namespace cradle {
void reset_task_groups(app_instance& instance)
{
instance.task_groups.clear();
instance.phantom_task_groups.clear();
push_task_group(instance,
instance.controller->get_root_task_group_controller());
clear_data_block(instance.task_stack_ui_block);
}
bool static
initialize_app_instance(app_instance& instance,
unsigned argc, wxCmdLineArgsArray const& argv)
{
auto& controller = *instance.controller;
auto app_info = controller.get_app_info();
instance.info = app_info;
instance.selected_page = app_level_page::APP_CONTENTS;
instance.state_write_back_requested = false;
shared_app_config shared_config;
try
{
shared_config = read_shared_app_config();
}
catch (...)
{
shared_config = shared_app_config(none, 100, none);
}
instance.shared_config = shared_config;
instance.crash_reporter.begin(
shared_config.crash_dir ?
get(shared_config.crash_dir) :
file_path("."),
app_info.thinknode_app_id,
app_info.local_version_id);
boost::filesystem::path executable_path((char const*)argv[0].ToUTF8());
namespace po = boost::program_options;
po::options_description desc("Supported options");
desc.add_options()
("help", "show help message")
("version", "show version information")
("style-file", po::value<string>(), "set style file")
("realm", po::value<string>(), "specify the realm to use (by ID)")
("username", po::value<string>(), "set username for authentication")
("password", po::value<string>(), "set password for authentication")
("token", po::value<string>(), "set token for authentication")
;
auto additional_arguments = controller.get_app_command_line_arguments();
for (auto const& argument : additional_arguments)
{
//throw exception(string(argument.first));
desc.add_options()
(argument.first, po::value<string>(), argument.second)
;
}
po::positional_options_description p;
po::variables_map vm;
po::store(po::command_line_parser(argc, argv).
options(desc).positional(p).run(), vm);
po::notify(vm);
if (vm.count("help"))
{
// TODO: This doesn't actually end up going to the Windows console.
std::cout << desc << "\n";
return false;
}
app_config config;
try
{
config = read_app_config(app_info.thinknode_app_id);
}
catch (...)
{
config =
app_config(
none, none,
regular_app_window_state(
none, make_vector<int>(850, 1000), false, true),
430,
350,
make_layout_vector(0,0),
1.0);
}
instance.config.set(config);
instance.api_url = read_app_config_file("config.txt", "api_url");
instance.gui_system.reset(new gui_system);
initialize_gui_system(&*instance.gui_system,
shared_config.cache_dir ?
get(shared_config.cache_dir) :
get_default_cache_dir("Astroid2"),
"",
shared_config.cache_size * int64_t(0x40000000),
file_path("ca-bundle.crt"));
// See if the username is available from the command-line or the config.
if (vm.count("username"))
{
auto username = vm["username"].as<string>();
instance.username.set(username);
}
else if (config.username)
{
instance.username.set(get(config.username));
}
// If token is provide from command-line use it to login with
if(vm.count("token"))
{
auto token = vm["token"].as<string>();
instance.token.set(token);
sign_in_wih_token(instance, vm["token"].as<string>());
}
// If a password and username are specified on the command-line, initiate the
// sign-in process.
else if (vm.count("password") && vm.count("username"))
{
start_sign_in(instance, instance.username.get(), vm["password"].as<string>());
}
// See if the realm ID is available from the command-line or the config.
// And if it is, select it.
if (vm.count("realm"))
{
auto realm_id = vm["realm"].as<string>();
instance.realm_id.set(realm_id);
}
else if (config.realm_id)
{
auto realm_id = get(config.realm_id);
instance.realm_id.set(realm_id);
}
if (vm.count("style-file"))
{
instance.style_file_path = vm["style-file"].as<string>();
}
else
{
instance.style_file_path = (executable_path.branch_path() /
"alia.style").string<std::string>();
}
// Process any app specific command line arguments
controller.process_app_command_line_arguments(vm);
reset_task_groups(instance);
style_tree_ptr style = parse_style_file(instance.style_file_path.c_str());
auto* window_controller = new app_window_controller;
alia__shared_ptr<alia::app_window_controller>
window_controller_ptr(window_controller);
window_controller->instance = &instance;
controller.register_tasks();
int gl_canvas_attribs[] = {
WX_GL_RGBA,
WX_GL_DOUBLEBUFFER,
WX_GL_STENCIL_SIZE, 1,
WX_GL_SAMPLE_BUFFERS, 1,
WX_GL_SAMPLES, 4,
0 };
auto* frame =
create_wx_framed_window(
app_info.app_name,
window_controller_ptr, style,
to_alia(instance.config.get().window_state),
gl_canvas_attribs);
#if defined(__WXMSW__)
frame->SetIcon(wxICON(wxSTD_FRAME));
#endif
return true;
}
void static
shut_down_app_instance(app_instance& instance)
{
clear_all_jobs(*instance.gui_system->bg);
instance.gui_system.reset();
}
untyped_application::untyped_application(app_controller_interface* controller)
{
instance = new app_instance;
instance->controller.reset(controller);
int attribs[] = { WX_GL_DOUBLEBUFFER, 0 };
if (!this->InitGLVisual(attribs))
{
wxMessageBox("OpenGL not available");
instance->return_code = -1;
}
else
instance->return_code = 0;
}
untyped_application::~untyped_application()
{
delete instance;
}
bool untyped_application::OnInit()
{
try
{
return initialize_app_instance(*instance, argc, argv);
}
catch (std::exception& e)
{
wxMessageBox(std::string("An error occurred during application"
" initialization.\n\n") + e.what());
instance->return_code = -1;
}
catch (...)
{
wxMessageBox("An unknown error occurred during application"
" initialization.");
instance->return_code = -1;
}
return true;
}
int untyped_application::OnRun()
{
if (instance->return_code == 0)
return wxGLApp::OnRun();
else
return instance->return_code;
}
int untyped_application::OnExit()
{
shut_down_app_instance(*instance);
delete instance;
instance = 0;
return wxGLApp::OnExit();
}
}
|
{"hexsha": "f176b0a7ac68540e95ea775d07f8a4c283d65648", "size": 7445, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "cradle/src/cradle/gui/app/instance.cpp", "max_stars_repo_name": "dotdecimal/open-cradle", "max_stars_repo_head_hexsha": "f8b06f8d40b0f17ac8d2bf845a32fcd57bf5ce1d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cradle/src/cradle/gui/app/instance.cpp", "max_issues_repo_name": "dotdecimal/open-cradle", "max_issues_repo_head_hexsha": "f8b06f8d40b0f17ac8d2bf845a32fcd57bf5ce1d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cradle/src/cradle/gui/app/instance.cpp", "max_forks_repo_name": "dotdecimal/open-cradle", "max_forks_repo_head_hexsha": "f8b06f8d40b0f17ac8d2bf845a32fcd57bf5ce1d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2018-09-28T17:12:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T14:22:29.000Z", "avg_line_length": 26.8772563177, "max_line_length": 86, "alphanum_fraction": 0.635325722, "num_tokens": 1659}
|
import logging
import numpy as np
from astropy.wcs import Sip
__author__ = 'drharbeck@gmail.com'
log = logging.getLogger(__name__)
akwcslookup = {
'ak01': {'SIPA_1_1': 2.8875384573560257e-06, 'SIPA_0_2': -1.2776642259520679e-05, 'SIPA_2_0': 6.873210426347869e-06,
'SIPB_1_1': 1.8322056773537455e-05, 'SIPB_0_2': 4.49844648740455e-06, 'SIPB_2_0': 2.076956178814459e-06},
'ak05': {'SIPA_1_1': -6.034337103080695e-06, 'SIPA_0_2': -1.1276384587835203e-05,
'SIPA_2_0': 1.2802411758347013e-05,
'SIPB_1_1': 2.4116031017471104e-05, 'SIPB_0_2': -4.302508968114081e-06, 'SIPB_2_0': 5.78043080411828e-07},
'ak06': {'SIPA_1_1': -9.220745938359626e-07, 'SIPA_0_2': -1.085016354858831e-05, 'SIPA_2_0': 1.3092279633304746e-05,
'SIPB_1_1': 2.311880941128194e-05, 'SIPB_0_2': -1.5497196904810146e-06,
'SIPB_2_0': -3.348831288004136e-07},
'ak10': {'SIPA_1_1': 8.681404689671723e-07, 'SIPA_0_2': -1.0897815933209444e-05, 'SIPA_2_0': 1.2154989981759681e-05,
'SIPB_1_1': 2.246038112054045e-05, 'SIPB_0_2': 2.698442176640065e-06, 'SIPB_2_0': 9.98348136299767e-07},
'kb38': {'SIPA_1_1': -5.706068042156633e-05, 'SIPA_0_2': 5.239102464660156e-06, 'SIPA_2_0': -3.0336923628038486e-06,
'SIPB_1_1': -5.785784217724779e-06, 'SIPB_0_2': -3.141027096614327e-05, 'SIPB_2_0': 2.8867430966128886e-05},
'kb42': {'SIPA_1_1': -6.223288587920849e-05, 'SIPA_0_2': 4.694665020791541e-06, 'SIPA_2_0': -6.408923602681285e-06,
'SIPB_1_1': -8.713441942086379e-06, 'SIPB_0_2': -3.306473868683677e-05, 'SIPB_2_0': 3.0916991418377915e-05},
'ak14': {'SIPA_1_1': 2.579599092541112e-06, 'SIPA_0_2': -1.15833428738132e-05, 'SIPA_2_0': 1.1950471404987229e-05,
'SIPB_1_1': 2.37986331949568e-05, 'SIPB_0_2': 3.8672274815249445e-06, 'SIPB_2_0': 5.071715952241676e-07},
}
def getWCSForcamera(cameraname, crpix1, crpix2):
""" Return SIP non-linear coordiante correction object intialized for a camera from a lookup table.
If the camera is not in the lookup table, an identify transformation is returned.
TODO: variable order, so far limit ouselves to second order
TODO: Time-constraint lookup.
:param cameraname: Name of camera, e.g., ak01
:param crpix1: CRPIX1 for camera, as that my have changed over time
:param crpix2: CRPIX2 for camera, as that my have changed over time
:return:
"""
m = 2
sip_a = np.zeros((m + 1, m + 1), np.double)
sip_b = np.zeros((m + 1, m + 1), np.double)
if cameraname in akwcslookup:
sip_a[1][1] = akwcslookup[cameraname]['SIPA_1_1']
sip_a[2][0] = akwcslookup[cameraname]['SIPA_2_0']
sip_a[0][2] = akwcslookup[cameraname]['SIPA_0_2']
sip_b[1][1] = akwcslookup[cameraname]['SIPB_1_1']
sip_b[2][0] = akwcslookup[cameraname]['SIPB_2_0']
sip_b[0][2] = akwcslookup[cameraname]['SIPB_0_2']
sip = Sip(sip_a, sip_b, None, None, [crpix1, crpix2])
return sip
def transformList(x, y, sip):
"""
:param sip:
:param x: array of x pixel coordinates
:param y: array of y pixel coordiantes
:return: (u,v) transformed pixels, but with CRPIX reapplied.
"""
log.debug("undistorting source catalog with sip %s" % sip.crpix)
uv = sip.pix2foc(np.asarray([x, y]).T, 1)
u = uv[:, 0] + sip.crpix[0]
v = uv[:, 1] + sip.crpix[1]
return u, v
|
{"hexsha": "a7b5598a1127ebe269c692d1133b1bb1f7fa968b", "size": 3412, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/LCOWCSLookupProvider.py", "max_stars_repo_name": "LCOGT/lcowcstools", "max_stars_repo_head_hexsha": "e5db536647dbb143a1f75293ec4f2a4acb68182b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/LCOWCSLookupProvider.py", "max_issues_repo_name": "LCOGT/lcowcstools", "max_issues_repo_head_hexsha": "e5db536647dbb143a1f75293ec4f2a4acb68182b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/LCOWCSLookupProvider.py", "max_forks_repo_name": "LCOGT/lcowcstools", "max_forks_repo_head_hexsha": "e5db536647dbb143a1f75293ec4f2a4acb68182b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1411764706, "max_line_length": 121, "alphanum_fraction": 0.6652989449, "include": true, "reason": "import numpy,from astropy", "num_tokens": 1478}
|
"""test_poisson.py [options]
Solves the Heterogeneous Poisson equation on a unit cube. A full
script for testing generation and tools provided by proteus.
"""
import numpy as np
import sys
from proteus import Comm, Profiling, NumericalSolution, TransportCoefficients, default_so, default_s
from proteus.FemTools import C0_AffineLinearOnSimplexWithNodalBasis
from proteus.LinearSolvers import LU
from proteus.NonlinearSolvers import Newton
from proteus.NumericalFlux import Advection_DiagonalUpwind_Diffusion_IIPG_exterior
from proteus.Quadrature import SimplexGaussQuadrature
from proteus.superluWrappers import SparseMatrix
from proteus.TimeIntegration import NoIntegration
from ignition.utils.proteus.defaults import ProteusProblem, ProteusNumerics
from ignition.utils.proteus.optparser import get_prog_opts
log = Profiling.logEvent
nd = 3
class Poisson(ProteusProblem):
"""
Heterogeneous Poisson's equation, -div(a(x)u) = f(x), on unit domain [0,1]x[0,1]x[0,1]
"""
##\page Tests Test Problems
# \ref poisson_3d_p.py "Heterogeneous Poisson's equation, -div(a(x)u) = f(x), on unit domain [0,1]x[0,1]x[0,1]"
#
##\ingroup test
#\file poisson_3d_p.py
#
#\brief Heterogenous Poisson's equations in 3D unit domain [0,1]x[0,1]x[0,1]
def __init__(self):
self.name = "Poisson"
#space dimension
self.nd = 3
#if unstructured would need variable polyfile or meshfile set
#steady-state so no initial conditions
self.initialConditions = None
#use sparse diffusion representation
self.sd=True
#identity tensor for defining analytical heterogeneity functions
self.Ident = np.zeros((nd,nd),'d')
self.Ident[0,0]=1.0; self.Ident[1,1] = 1.0; self.Ident[2,2]=1.0
#store a,f in dictionaries since coefficients class allows for one entry per component
self.aOfX = {0:self.a5}; self.fOfX = {0:self.f5}
#one component
self.nc = 1
#load analytical solution, dirichlet conditions, flux boundary conditions into the expected variables
self.analyticalSolution = {0:self.u5Ex()}
self.analyticalSolutionVelocity = {0:self.velEx(self.analyticalSolution[0],self.aOfX[0])}
#
self.dirichletConditions = {0:self.getDBC5}
self.advectiveFluxBoundaryConditions = {0:self.getAdvFluxBC5}
self.diffusiveFluxBoundaryConditions = {0:{0:self.getDiffFluxBC5}}
self.fluxBoundaryConditions = {0:'setFlow'} #options are 'setFlow','noFlow','mixedFlow'
#equation coefficient names
self.coefficients = TransportCoefficients.PoissonEquationCoefficients(self.aOfX,
self.fOfX, self.nc, self.nd)
#
self.coefficients.variableNames=['u0']
#for computing exact 'Darcy' velocity
class velEx:
def __init__(self,duex,aex):
self.duex = duex
self.aex = aex
def uOfX(self,X):
du = self.duex.duOfX(X)
A = np.reshape(self.aex(X),(3,3))
return -np.dot(A,du)
def uOfXT(self,X,T):
return self.uOfX(X)
##################################################
#define coefficients a(x)=[a_{ij}] i,j=0,2, right hand side f(x) and analytical solution u(x)
#u = x*x + y*y + z*z, a_00 = x + 5, a_11 = y + 5.0 + a_22 = z + 10.0
#f = -2*x -2*(5+x) -2*y-2*(5+y) -2*z-2*(10+z)
#
def a5(self, x):
return np.array([[x[0] + 5.0,0.0,0.0],[0.0,x[1] + 5.0,0.0],[0.0,0.0,x[2]+10.0]],'d')
def f5(self, x):
return -2.0*x[0] -2*(5.+x[0]) -2.*x[1]-2.*(5.+x[1]) -2.*x[2]-2.*(10+x[2])
#'manufactured' analytical solution
class u5Ex:
def __init__(self):
pass
def uOfX(self,x):
return x[0]**2+x[1]**2+x[2]**2
def uOfXT(self,X,T):
return self.uOfX(X)
def duOfX(self,X):
du = 2.0*np.reshape(X[0:3],(3,))
return du
def duOfXT(self,X,T):
return self.duOfX(X)
#dirichlet boundary condition functions on (x=0,y,z), (x,y=0,z), (x,y=1,z), (x,y,z=0), (x,y,z=1)
def getDBC5(self, x,flag):
if x[0] in [0.0] or x[1] in [0.0,1.0] or x[2] in [0.0,1.0]:
return lambda x,t: self.u5Ex().uOfXT(x,t)
def getAdvFluxBC5(self, x,flag):
pass
#specify flux on (x=1,y,z)
def getDiffFluxBC5(self, x,flag):
if x[0] == 1.0:
n = np.zeros((nd,),'d'); n[0]=1.0
return lambda x,t: np.dot(self.velEx(self.u5Ex(),self.a5).uOfXT(x,t),n)
if not (x[0] in [0.0] or x[1] in [0.0,1.0] or x[2] in [0.0,1.0]):
return lambda x,t: 0.0
class C0P1_Poisson_Numerics(ProteusNumerics):
#steady-state so no time integration
timeIntegration = NoIntegration
#number of output timesteps
nDTout = 1
#finite element spaces
femSpaces = {0:C0_AffineLinearOnSimplexWithNodalBasis}
#numerical quadrature choices
elementQuadrature = SimplexGaussQuadrature(nd,4)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,4)
#number of nodes in x,y,z
nnx = 7
nny = 7
nnz = 7
#if unstructured would need triangleOptions flag to be set
#number of levels in mesh
nLevels = 1
#no stabilization or shock capturing
subgridError = None
shockCapturing = None
#nonlinear solver choices
multilevelNonlinearSolver = Newton
levelNonlinearSolver = Newton
#linear problem so force 1 iteration allowed
maxNonlinearIts = 2
maxLineSearches = 1
fullNewtonFlag = True
#absolute nonlinear solver residual tolerance
nl_atol_res = 1.0e-8
#relative nonlinear solver convergence tolerance as a function of h
#(i.e., tighten relative convergence test as we refine)
tolFac = 0.0
#matrix type
matrix = SparseMatrix
#convenience flag
parallel = False
if parallel:
multilevelLinearSolver = KSP_petsc4py
#for petsc do things lie
#"-ksp_type cg -pc_type asm -pc_asm_type basic -ksp_atol 1.0e-10 -ksp_rtol 1.0e-10 -ksp_monitor_draw" or
#-pc_type lu -pc_factor_mat_solver_package
#can also set -pc_asm_overlap 2 with default asm type (restrict)
levelLinearSolver = KSP_petsc4py#
#for petsc do things like
#"-ksp_type cg -pc_type asm -pc_asm_type basic -ksp_atol 1.0e-10 -ksp_rtol 1.0e-10 -ksp_monitor_draw" or
#-pc_type lu -pc_factor_mat_solver_package
#can also set -pc_asm_overlap 2 with default asm type (restrict)
#levelLinearSolver = PETSc#
#pick number of layers to use in overlap
nLayersOfOverlapForParallel = 0
#type of partition
parallelPartitioningType = MeshParallelPartitioningTypes.node
#parallelPartitioningType = MeshParallelPartitioningTypes.element
#have to have a numerical flux in parallel
numericalFluxType = Advection_DiagonalUpwind_Diffusion_IIPG_exterior
#for true residual test
linearSolverConvergenceTest = 'r-true'
#to allow multiple models to set different ksp options
#linear_solver_options_prefix = 'poisson_'
linearSmoother = None
else:
multilevelLinearSolver = LU
levelLinearSolver = LU
numericalFluxType = Advection_DiagonalUpwind_Diffusion_IIPG_exterior
#linear solver relative convergence test
linTolFac = 0.0
#linear solver absolute convergence test
l_atol_res = 1.0e-10
#conservativeFlux = {0:'pwl'}
def init_mpi_petsc(opts):
log("Initializing MPI")
if opts.petscOptions != None:
petsc_argv = sys.argv[:1]+opts.petscOptions.split()
log("PETSc options from commandline")
log(str(petsc_argv))
else:
petsc_argv=sys.argv[:1]
if opts.petscOptionsFile != None:
petsc_argv=[sys.argv[0]]
petsc_argv += open(opts.petscOptionsFile).read().split()
log("PETSc options from commandline")
log(str(petsc_argv))
return Comm.init(argv=petsc_argv)
def main(*args):
opts, args = get_prog_opts(args, __doc__)
comm = init_mpi_petsc(opts)
problem_list = [Poisson(),]
simulation_list = [default_s]
numerics_list = [C0P1_Poisson_Numerics(),]
numerics_list[0].periodicDirichletConditions = problem_list[0].periodicDirichletConditions
numerics_list[0].T = problem_list[0].T
simulation_name = problem_list[0].name + "_" + numerics_list[0].__class__.__name__
simulation_name_proc = simulation_name + "_" + repr(comm.rank())
simFlagsList = [{ 'simulationName': simulation_name,
'simulationNameProc': simulation_name_proc,
'dataFile': simulation_name_proc + '.dat',
'components' : [ci for ci in range(problem_list[0].coefficients.nc)],
}]
so = default_so
so.name = problem_list[0].name
so.pnList = problem_list
so.sList = [default_s]
try:
so.systemStepControllerType = numerics_list[0].systemStepControllerType
except AttributeError:
pass
try:
so.tnList = numerics_list[0].tnList
so.archiveFlag = numerics_list[0].archiveFlag
except AttributeError:
pass
runNumber = 0
runName = so.name + repr(runNumber)
Profiling.procID=comm.rank()
if simulation_list[0].logAllProcesses or opts.logAllProcesses:
Profiling.logAllProcesses = True
Profiling.flushBuffer=simulation_list[0].flushBuffer
if opts.logLevel > 0:
Profiling.openLog(runName+".log",opts.logLevel)
ns = NumericalSolution.NS_base(default_so, problem_list, numerics_list, simulation_list,
opts, simFlagsList)
ns.calculateSolution(runName)
if __name__ == "__main__":
main(sys.argv[1:])
|
{"hexsha": "12224a77d7874221fed2cfb2da931124d5d10bba", "size": 9849, "ext": "py", "lang": "Python", "max_stars_repo_path": "ignition/utils/proteus/test/test_poisson.py", "max_stars_repo_name": "IgnitionProject/ignition", "max_stars_repo_head_hexsha": "0eeb3a7878d828bc3c06d2cb2dd781e17776a8a6", "max_stars_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2015-01-25T18:15:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T17:39:12.000Z", "max_issues_repo_path": "ignition/utils/proteus/test/test_poisson.py", "max_issues_repo_name": "IgnitionProject/ignition", "max_issues_repo_head_hexsha": "0eeb3a7878d828bc3c06d2cb2dd781e17776a8a6", "max_issues_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ignition/utils/proteus/test/test_poisson.py", "max_forks_repo_name": "IgnitionProject/ignition", "max_forks_repo_head_hexsha": "0eeb3a7878d828bc3c06d2cb2dd781e17776a8a6", "max_forks_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8145454545, "max_line_length": 115, "alphanum_fraction": 0.6462585034, "include": true, "reason": "import numpy", "num_tokens": 2872}
|
function cd2EditedFile()
% cd2EditedFile: goes to the the file opened in editor
%
% references at:
% <<http://blogs.mathworks.com/community/2011/05/16/matlab-editor-api-examples/>>
% Copyright 2012, Clemens Ager
%%
a = fileparts(matlab.desktop.editor.getActiveFilename);
cd(a);
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/38104-cd-to-edited-file/cd2EditedFile.m"}
|
module Languages
using Compat
export Language, EnglishLanguage, SpanishLanguage, GermanLanguage
export isocode, name
export articles, definite_articles, indefinite_articles
export prepositions
export pronouns
export stopwords
cache = Dict()
include("types.jl")
include("utils.jl")
include("word_lists.jl")
end
|
{"hexsha": "96bc3d9575d1c95cc510a34278b3aa23615427e9", "size": 322, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Languages.jl", "max_stars_repo_name": "JuliaPackageMirrors/Languages.jl", "max_stars_repo_head_hexsha": "1a333b0ccf8850f7a7fffe51ef8f4ca56f9d531b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Languages.jl", "max_issues_repo_name": "JuliaPackageMirrors/Languages.jl", "max_issues_repo_head_hexsha": "1a333b0ccf8850f7a7fffe51ef8f4ca56f9d531b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Languages.jl", "max_forks_repo_name": "JuliaPackageMirrors/Languages.jl", "max_forks_repo_head_hexsha": "1a333b0ccf8850f7a7fffe51ef8f4ca56f9d531b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.125, "max_line_length": 66, "alphanum_fraction": 0.798136646, "num_tokens": 70}
|
```python
%matplotlib inline
from sympy import *
init_printing(use_unicode=True)
```
```python
r, u, v, c, r_c, u_c, v_c, E, p, r_p, u_p, v_p, e, a, b, q, b_0, b_1, b_2, b_3, q_0, q_1, q_2, q_3, q_4, q_5, t, g, c_0, c_1, c_2, c_3, c_4, c_5 = symbols('r u v c r_c u_c v_c E p r_p u_p v_p e a b q b_0 b_1 b_2 b_3 q_0 q_1 q_2 q_3 q_4 q_5 t g c_0 c_1 c_2 c_3 c_4 c_5',positive=True)
```
```python
cprm, thetaprm, Pprm, betaprm, rtheta, gammaprm, rhoprm, Eprm, sigmaprm = symbols('cprm thetaprm Pprm betaprm r_theta gammaprm rhoprm Eprm sigmaprm',positive=True)
```
```python
gamma,r_p,u_p,e = symbols('gamma r_p u_p e',positive=True)
```
####$f_{2}(c,p) = \dfrac{1}{2}r_{c}c^{2}+\dfrac{1}{4}u_{c}c^{4}+\dfrac{1}{6}v_{c}c^{6}+\dfrac{1}{2}r_{p}p^{2}+\dfrac{1}{4}u_{p}p^{4}-\gamma cp-\dfrac{1}{2}ec^{2}p^{2}-Ep$
```python
f2 = ((1/2)*r_c*c**2+(1/4)*u_c*c**4+(1/6)*v_c*c**6+(1/2)*r_p*p**2+(1/4)*u_p*p**4-E*p-gamma*c*p-e*c**2*p**2/2)
nsimplify(f2)
```
###Rescaling
```python
P, C, w, rho, beta, tau, Epr = symbols('P C w rho beta tau E^{\prime}')
```
```python
fP = nsimplify(f2.subs(p,w*Pprm))
fP
```
```python
fP = nsimplify(fP.subs(w,sqrt(r_p/u_p)))
fP
```
```python
fP = expand(fP/(r_p**2/u_p),r_p)
fP
```
```python
fP = fP.subs([(E,Epr/sqrt(u_p)*r_p**(3/2)),(c,cprm*sqrt(r_p/e))])
fP
```
```python
fP = fP.subs([(cprm,thetaprm),(r_c,rtheta*e*r_p/u_p),(u_c,gammaprm*e**2/u_p),(v_c,rhoprm*e**3/(u_p*r_p)),(gamma,betaprm*r_p*sqrt(e/u_p))])
fP
```
```python
fhelp = nsimplify((1/2)*rtheta*thetaprm**2+(1/4)*gammaprm*thetaprm**4+(1/6)*rhoprm*thetaprm**6+(1/2)*Pprm**2+(1/4)*Pprm**4
+(1/6)*sigmaprm*Pprm**6-betaprm*thetaprm*Pprm-(1/2)*Pprm**2*thetaprm**2-Eprm*Pprm)
fhelp
```
```python
Ep = solve(fhelp.diff(Pprm),Eprm)[0]
Ep
```
```python
fhelp.diff(thetaprm)
```
```python
P_min = solve(fhelp.diff(thetaprm),Pprm)[0]
P_min
```
```python
Ep = Ep.subs(Pprm,P_min)
Ep
```
```python
series(Ep,thetaprm,n=7)
```
```python
# Series expansion of $E$ out to $\mathcal{O(\theta^{\prime 7})}$:
# $\beta E = \theta^{\prime}\left(-\beta^{\prime 2} + r_{\theta}^{\prime}\right) + \theta^{\prime 3}\left(\gamma^{\prime} - r_{\theta}^{\prime} + \dfrac{r_{\theta}^{\prime 3}}{\beta^{\prime 2}} - \dfrac{r_{\theta}^{\prime 2}}{\beta^{\prime 2}}\right) + \theta^{\prime 5}\left(-\gamma^{\prime} + \rho^{\prime} + \dfrac{3\gamma^{\prime}r_{\theta}^{\prime 2}}{\beta^{\prime 2}} - \dfrac{2\gamma^{\prime}r_{\theta}}{\beta^{\prime 2}} + \dfrac{r_{\theta}^{\prime 2}}{\beta^{\prime 2}} - \dfrac{3r_{\theta}^{\prime 4}}{\beta^{\prime 4}} + \dfrac{2r_{\theta}^{\prime 3}}{\beta^{\prime 4}}\right)$
# The coefficients of $E$ i.t.o. $a$ where $r_{\theta}^{\prime} = \beta^{\prime 2} + a$:
# $B(a) = \dfrac{a^{3}}{\beta^{\prime 2}} + a^{2}\left(3 - \dfrac{1}{\beta^{\prime 2}}\right) + a(3\beta^{\prime 2} - 3) + \beta^{\prime 4} - 2\beta^{\prime 2} + \gamma^{\prime}$
# $C(a) = -\dfrac{3a^{4}}{\beta^{\prime 4}} + a^{3}\left(\dfrac{2}{\beta^{\prime 4}} - \dfrac{12}{\beta^{\prime 2}}\right) + a^{2}\left(\dfrac{3\gamma^{\prime}}{\beta^{\prime 2}} - 18 + \dfrac{7}{\beta^{\prime 2}}\right) + a\left(6\gamma^{\prime} - 12\beta^{\prime 2} - \dfrac{2\gamma^{\prime}}{\beta^{\prime 2}} + 8\right) - 3\beta^{\prime 4} + 3\beta^{\prime 2}\gamma^{\prime} + 3\beta^{\prime 2} - 3\gamma^{\prime} + \rho^{\prime}$
# $R(a) = B(a)^{2} - \dfrac{20aC(a)}{9}$ (not going to write this one out, it's long)
```
```python
rth = betaprm**2+a*betaprm
B = (gammaprm-rtheta+(rtheta**3-rtheta**2)/betaprm**2)/betaprm
C = (-gammaprm+rhoprm+3*gammaprm*(rtheta/betaprm)**2-2*gammaprm*rtheta/betaprm**2
+(rtheta/betaprm)**2+rtheta**5*sigmaprm/betaprm**4-3*(rtheta/betaprm)**4+2*rtheta**3/betaprm**4)/betaprm
```
```python
B = collect(expand(B.subs(rtheta,rth)),a)
B
```
```python
C = collect(expand(C.subs(rtheta,rth)),a)
C
```
```python
b0 = betaprm**3-2*betaprm+gammaprm/betaprm
b1 = 3*betaprm**2-3
b2 = 3*betaprm-1/betaprm
b3 = 1
c0 = betaprm**5*sigmaprm-3*betaprm**3+3*betaprm*gammaprm+3*betaprm-3*gammaprm/betaprm+rhoprm/betaprm
c1 = 5*betaprm**4*sigmaprm+6*gammaprm-12*betaprm**2+8-2*gammaprm/betaprm**2
c2 = 10*betaprm**3*sigmaprm+(3*gammaprm+7)/betaprm-18*betaprm
c3 = 10*betaprm**2*sigmaprm-12+2/betaprm**2
c4 = 5*betaprm*sigmaprm-3/betaprm
c5 = sigmaprm
```
```python
gammap = expand(solve(b0-b_0,gammaprm)[0])
sigmap = expand(solve(c0-c_0,sigmaprm)[0])
```
```python
replacements = [(gammaprm,gammap),(sigmaprm,sigmap)]
```
```python
sigmap = simplify(sigmap.subs(gammaprm,gammap))
```
```python
sigmap
```
```python
gammap
```
```python
b0
```
```python
b1
```
```python
b2
```
```python
b3
```
```python
B_a = b3*a**3+b2*a**2+b1*a+b_0
B_a
```
```python
# c0 = c0.subs(sigmaprm,sigmap).subs(gammaprm,gammap)
c0
```
```python
c1 = c1.subs(sigmaprm,sigmap).subs(gammaprm,gammap)
expand(c1)
```
```python
c2 = c2.subs(sigmaprm,sigmap).subs(gammaprm,gammap)
expand(c2)
```
```python
c3 = c3.subs(sigmaprm,sigmap).subs(gammaprm,gammap)
expand(c3)
```
```python
c4 = c4.subs(sigmaprm,sigmap).subs(gammaprm,gammap)
expand(c4)
```
```python
c5 = c5.subs(sigmaprm,sigmap).subs(gammaprm,gammap)
c5
```
```python
C_a = c_5*a**5+c4*a**4+c3*a**3+c2*a**2+c1*a+c_0
collect(expand(C_a),a)
```
```python
series(B_a**2-20*a*C_a/9,a,n=7)
```
```python
series(B**2-20*a*C/9,a,n=7)
```
```python
Etrun = a*thetaprm+b*thetaprm**3+c*thetaprm**5
Etrun
```
```python
collect(Etrun.subs([(b,B),(c,C)]),thetaprm)
```
```python
theta_L = solve(Etrun.diff(thetaprm),thetaprm)[1]
theta_U = solve(Etrun.diff(thetaprm),thetaprm)[3]
theta_L,theta_U
```
```python
E_L = simplify(Etrun.subs(thetaprm,theta_U))
E_U = simplify(Etrun.subs(thetaprm,theta_L))
E_L,E_U
```
```python
```
|
{"hexsha": "9cb5eecf6a868ad7bcd9949cd51870f880d47e91", "size": 194750, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Smectic/VPEqualZero.ipynb", "max_stars_repo_name": "brettavedisian/Liquid-Crystals", "max_stars_repo_head_hexsha": "c7c6eaec594e0de8966408264ca7ee06c2fdb5d3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Smectic/VPEqualZero.ipynb", "max_issues_repo_name": "brettavedisian/Liquid-Crystals", "max_issues_repo_head_hexsha": "c7c6eaec594e0de8966408264ca7ee06c2fdb5d3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Smectic/VPEqualZero.ipynb", "max_forks_repo_name": "brettavedisian/Liquid-Crystals", "max_forks_repo_head_hexsha": "c7c6eaec594e0de8966408264ca7ee06c2fdb5d3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 140.3097982709, "max_line_length": 14540, "alphanum_fraction": 0.7910192555, "converted": true, "num_tokens": 2641}
|
'''
This script computes the acuity scores corresponding to the Sepsis patient cohort extracted with
the procedure provided at: https://github.com/microsoft/mimic_sepsis using the raw features.
============================================================================================================================
This code is provided under the MIT License and is meant to be helpful, but WITHOUT ANY WARRANTY;
November 2020 by Taylor Killian and Haoran Zhang; University of Toronto + Vector Institute
============================================================================================================================
Notes:
'''
import sys
import os
import time
import pandas as pd
import numpy as np
save_dir = 'data/sepsis_mimiciii/'
acuity_file = 'acuity_scores.csv'
data_file = os.path.join(save_dir, 'sepsis_final_data_RAW_withTimes.csv')
table = pd.read_csv(data_file)
obs_cols = ['o:GCS', 'o:HR', 'o:SysBP',
'o:MeanBP', 'o:DiaBP', 'o:RR', 'o:Temp_C', 'o:FiO2_1', 'o:Potassium',
'o:Sodium', 'o:Chloride', 'o:Glucose', 'o:Magnesium', 'o:Calcium',
'o:Hb', 'o:WBC_count', 'o:Platelets_count', 'o:PTT', 'o:PT',
'o:Arterial_pH', 'o:paO2', 'o:paCO2', 'o:Arterial_BE', 'o:HCO3',
'o:Arterial_lactate', 'o:SIRS', 'o:Shock_Index',
'o:PaO2_FiO2', 'o:cumulated_balance', 'o:SpO2', 'o:BUN', 'o:Creatinine',
'o:SGOT', 'o:SGPT', 'o:Total_bili', 'o:INR', 'o:input_total',
'o:input_4hourly', 'o:output_total', 'o:output_4hourly']
############################################################################
# FUNCTIONS TO CALCULATE THE ACUITY SCORES NOT EXTRACTED
############################################################################
def calc_sapsii(df):
""" Calculate the SAPSII score provided the dataframe of raw patient features. """
age_values = np.array([0, 7, 12, 15, 16, 18])
hr_values = np.array([11, 2, 0, 4, 7])
bp_values = np.array([13, 5, 0, 2])
temp_values = np.array([0, 3])
o2_values = np.array([11, 9, 6])
output_values = np.array([11, 4, 0])
bun_values = np.array([0, 6, 10])
wbc_values = np.array([12, 0, 3])
k_values = np.array([3, 0, 3])
na_values = np.array([5, 0, 1])
hco3_values = np.array([5, 3, 0])
bili_values = np.array([0, 4, 9])
gcs_values = np.array([26, 13, 7, 5, 0])
sapsii = np.zeros((df.shape[0],1))
cols = ['o:age','o:HR','o:SysBP','o:Temp_C','o:PaO2_FiO2','o:output_4hourly','o:BUN','o:WBC_count','o:Potassium','o:Sodium','o:HCO3','o:Total_bili','o:GCS']
tt = df[cols]
tt.loc[:,'o:age'] = tt['o:age'].values/365.24
age = np.array([ tt.iloc[:,0]<40, (tt.iloc[:,0]>=40)&(tt.iloc[:,0]<60), (tt.iloc[:,0]>=60)&(tt.iloc[:,0]<70), (tt.iloc[:,0]>=70)&(tt.iloc[:,0]<75), (tt.iloc[:,0]>=75)&(tt.iloc[:,0]<80), tt.iloc[:,0]>=80 ])
hr = np.array([ tt.iloc[:,1]<40, (tt.iloc[:,1]>=40)&(tt.iloc[:,1]<70), (tt.iloc[:,1]>=70)&(tt.iloc[:,1]<120), (tt.iloc[:,1]>=120)&(tt.iloc[:,1]<160), tt.iloc[:,1]>=160 ])
bp = np.array([ tt.iloc[:,2]<70, (tt.iloc[:,2]>=70)&(tt.iloc[:,2]<100), (tt.iloc[:,2]>=100)&(tt.iloc[:,2]<200), tt.iloc[:,2]>=200 ])
temp = np.array([ tt.iloc[:,3]<39, tt.iloc[:,3]>=39 ])
o2 = np.array([ tt.iloc[:,4]<100, (tt.iloc[:,4]>=100)&(tt.iloc[:,4]<200), tt.iloc[:,4]>=200 ])
out = np.array([ tt.iloc[:,5]<500, (tt.iloc[:,5]>=500)&(tt.iloc[:,5]<1000), tt.iloc[:,5]>=1000 ])
bun = np.array([ tt.iloc[:,6]<28, (tt.iloc[:,6]>=28)&(tt.iloc[:,6]<84), tt.iloc[:,6]>=84 ])
wbc = np.array([ tt.iloc[:,7]<1, (tt.iloc[:,7]>=1)&(tt.iloc[:,7]<20), tt.iloc[:,7]>=20 ])
k = np.array([ tt.iloc[:,8]<3, (tt.iloc[:,8]>=3)&(tt.iloc[:,8]<5), tt.iloc[:,8]>=5 ])
na = np.array([ tt.iloc[:,9]<125, (tt.iloc[:,9]>=125)&(tt.iloc[:,9]<145), tt.iloc[:,9]>=145 ])
hco3 = np.array([ tt.iloc[:,10]<15, (tt.iloc[:,10]>=15)&(tt.iloc[:,10]<20), tt.iloc[:,10]>=20 ])
bili = np.array([ tt.iloc[:,11]<4, (tt.iloc[:,11]>=4)&(tt.iloc[:,11]<6), tt.iloc[:,11]>=6 ])
gcs = np.array([ tt.iloc[:,12]<6, (tt.iloc[:,12]>=6)&(tt.iloc[:,12]<9), (tt.iloc[:,12]>=9)&(tt.iloc[:,12]<11), (tt.iloc[:,12]>=11)&(tt.iloc[:,12]<14), tt.iloc[:,12]>=14 ])
for ii in range(df.shape[0]):
sapsii[ii] = max(age_values[age[:,ii]], default=0) + max(hr_values[hr[:,ii]], default=0) + max(bp_values[bp[:,ii]], default=0) + max(temp_values[temp[:,ii]], default=0) + max(o2_values[o2[:,ii]]*df.loc[ii,'o:mechvent'], default=0) + max(output_values[out[:,ii]], default=0) + max(bun_values[bun[:,ii]], default=0) + max(wbc_values[wbc[:,ii]], default=0) + max(k_values[k[:,ii]], default=0) + max(na_values[na[:,ii]], default=0) + max(hco3_values[hco3[:,ii]], default=0) + max(bili_values[bili[:,ii]], default=0) + max(gcs_values[gcs[:,ii]], default=0)
return sapsii
def calc_oasis(df):
""" Calculate the OASIS score provided the dataframe of raw patient features. """
age_values = np.array([0, 3, 6, 9, 7])
bp_values = np.array([4, 3, 2, 0, 3])
gcs_values = np.array([10, 4, 3, 0])
hr_values = np.array([4, 0, 1, 3, 6])
rr_values = np.array([10, 1, 0, 1, 6, 9])
temp_values = np.array([3, 4, 2, 2, 6])
output_values = np.array([10, 5, 1, 0, 8])
vent_value = 9
oasis = np.zeros((df.shape[0],1))
cols = ['o:age','o:MeanBP','o:GCS','o:HR','o:RR','o:Temp_C','o:output_4hourly']
tt = df[cols]
tt.loc[:,'o:age'] = tt['o:age'].values/365.24 # Convert the age to years
age = np.array([ tt.iloc[:,0]<24, (tt.iloc[:,0]>=24)&(tt.iloc[:,0]<=53), (tt.iloc[:,0]>53)&(tt.iloc[:,0]<=77), (tt.iloc[:,0]>77)&(tt.iloc[:,0]<=89), tt.iloc[:,0]>89 ])
bp = np.array([ tt.iloc[:,1]<20.65, (tt.iloc[:,1]>=20.65)&(tt.iloc[:,1]<51), (tt.iloc[:,1]>=51)&(tt.iloc[:,1]<61.33), (tt.iloc[:,1]>=61.33)&(tt.iloc[:,1]<143.44), tt.iloc[:,1]>=143.44 ])
gcs = np.array([ tt.iloc[:,2]<=7, (tt.iloc[:,2]>7)&(tt.iloc[:,1]<14), tt.iloc[:,1]==14, tt.iloc[:,1]>14 ])
hr = np.array([ tt.iloc[:,3]<33, (tt.iloc[:,3]>=33)&(tt.iloc[:,3]<89), (tt.iloc[:,3]>=89)&(tt.iloc[:,3]<106), (tt.iloc[:,3]>=106)&(tt.iloc[:,3]<=125), tt.iloc[:,3]>125 ])
rr = np.array([ tt.iloc[:,4]<6, (tt.iloc[:,4]>=6)&(tt.iloc[:,4]<13), (tt.iloc[:,4]>=13)&(tt.iloc[:,4]<22), (tt.iloc[:,4]>=22)&(tt.iloc[:,4]<30), (tt.iloc[:,4]>=30)&(tt.iloc[:,4]<44), tt.iloc[:,4]>=44 ])
temp = np.array([ tt.iloc[:,5]<33.22, (tt.iloc[:,5]>=33.22)&(tt.iloc[:,5]<35.93), (tt.iloc[:,5]>=35.93)&(tt.iloc[:,5]<36.89), (tt.iloc[:,5]>=36.89)&(tt.iloc[:,5]<=39.88), tt.iloc[:,5]>39.88 ])
out = np.array([ tt.iloc[:,6]<671.09, (tt.iloc[:,6]>=671.09)&(tt.iloc[:,6]<1427), (tt.iloc[:,6]>=1427)&(tt.iloc[:,6]<=2514), (tt.iloc[:,6]>2514)&(tt.iloc[:,6]<=6896), tt.iloc[:,6]>6896 ])
vent = (vent_value*df['o:mechvent']).values
for ii in range(df.shape[0]):
oasis[ii] = max(age_values[age[:,ii]], default=0) + max(bp_values[bp[:,ii]], default=0) + max(gcs_values[gcs[:,ii]], default=0) + max(hr_values[hr[:,ii]], default=0) + max(rr_values[rr[:,ii]], default=0) + max(temp_values[temp[:,ii]], default=0) + max(output_values[out[:,ii]], default=0) + vent[ii]
return oasis
############################################################################
# ORGANIZE TABLE AND COMPUTE ACUITY SCORES
############################################################################
# Compute OASIS
table['c:oasis'] = calc_oasis(table)
# Compute SAPSII
table['c:sapsii'] = calc_sapsii(table)
# Isolate only the columns we want to keep from `table`
keeping = table[['traj', 'step', 'o:SOFA', 'c:oasis', 'c:sapsii']]
keeping = keeping.rename(columns={"o:SOFA": "c:SOFA", "c:oasis": "c:OASIS", "c:sapsii":"c:SAPSii"})
# Save off the acuity scores for use later
keeping.to_csv(os.path.join(save_dir,acuity_file))
|
{"hexsha": "a05c812377b58514a9cf71839e76f551c62cfecd", "size": 7752, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/compute_acuity_scores.py", "max_stars_repo_name": "lysuk96/rl_representations", "max_stars_repo_head_hexsha": "19de69305e40c9b3a1d746a7af26d232c9fb3f6f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2020-11-23T16:03:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T20:05:43.000Z", "max_issues_repo_path": "scripts/compute_acuity_scores.py", "max_issues_repo_name": "lysuk96/rl_representations", "max_issues_repo_head_hexsha": "19de69305e40c9b3a1d746a7af26d232c9fb3f6f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-06-16T18:08:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-05T13:39:04.000Z", "max_forks_repo_path": "scripts/compute_acuity_scores.py", "max_forks_repo_name": "lysuk96/rl_representations", "max_forks_repo_head_hexsha": "19de69305e40c9b3a1d746a7af26d232c9fb3f6f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-04-19T17:44:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-23T05:55:50.000Z", "avg_line_length": 60.5625, "max_line_length": 559, "alphanum_fraction": 0.5312177503, "include": true, "reason": "import numpy", "num_tokens": 2685}
|
[STATEMENT]
lemma suicide_aux_r:
"\<lbrakk> (\<forall>w\<in>Y. 0\<le>length w); (\<forall>w\<in>X\<^bsup>Suc n\<^esup>. n \<le> length w) \<rbrakk> \<Longrightarrow> (\<forall>w\<in>Y \<cdot> X\<^bsup>Suc n\<^esup>. n \<le> length w)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>w\<in>Y. 0 \<le> |w|; \<forall>w\<in>X\<^bsup>Suc n\<^esup>. n \<le> |w|\<rbrakk> \<Longrightarrow> \<forall>w\<in>Y \<cdot> X\<^bsup>Suc n\<^esup>. n \<le> |w|
[PROOF STEP]
by (auto, metis (full_types) le0 plus_nat.add_0 prod_lb)
|
{"llama_tokens": 245, "file": "Regular_Algebras_Regular_Algebra_Models", "length": 1}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Implementation in Chainer of https://github.com/tensorflow/models/tree/master/video_prediction
# ==============================================================================================
import types
import random
import math
from math import floor, log
import numpy as np
import subprocess
try:
import cupy
except:
cupy = np
pass
import chainer
from chainer import cuda
from chainer import variable
import chainer.functions as F
import chainer.links as L
from chainer.functions.connection import convolution_2d
from chainer import initializers
from chainer import serializers
from chainer.functions.math import square
from chainer.functions.activation import lstm
import sys
import os
import time
import glob
import csv
import click
import logging
import matplotlib.pyplot as plt
# Amount to use when lower bounding Variables
RELU_SHIFT = 1e-12
# Kernel size for DNA and CDNA
DNA_KERN_SIZE = 5
# =============================================
# Helpers functions used accross scripts (hlpe)
# =============================================
def concat_examples(batch):
img_training_set, act_training_set, sta_training_set = [], [], []
for idx in xrange(len(batch)):
img_training_set.append(batch[idx][0])
act_training_set.append(batch[idx][1])
sta_training_set.append(batch[idx][2])
img_training_set = np.array(img_training_set)
act_training_set = np.array(act_training_set)
sta_training_set = np.array(sta_training_set)
# Split the actions, states and images into timestep
act_training_set = np.split(ary=act_training_set, indices_or_sections=act_training_set.shape[1], axis=1)
act_training_set = [np.squeeze(act, axis=1) for act in act_training_set]
sta_training_set = np.split(ary=sta_training_set, indices_or_sections=sta_training_set.shape[1], axis=1)
sta_training_set = [np.squeeze(sta, axis=1) for sta in sta_training_set]
img_training_set = np.split(ary=img_training_set, indices_or_sections=img_training_set.shape[1], axis=1)
# Reshape the img training set to a Chainer compatible tensor : batch x channel x height x width instead of Tensorflow's: batch x height x width x channel
img_training_set = [np.rollaxis(np.squeeze(img, axis=1), 3, 1) for img in img_training_set]
return np.array(img_training_set), np.array(act_training_set), np.array(sta_training_set)
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
"""
Sample batch with specified mix of ground truth and generated data points.
e.g: the final matrix is a mix of vectors from the ground_truth (gt) and prediction (p)
[gt1, gt2, gt3, gt4, gt5, gt6, gt7, gt8, gt9, gt10] = ground truth
[p1, p2, p3, p4, p5, p6] = prediction
[p1, gt2, gt3, gt4, p5, p6, gt7, gt8, gt9, gt10] = returns
Args:
ground_truth_x: tensor of ground-truth data point
generated_x: tensor of generated data point
batch_size: batch size
num_ground_truth: number of ground-truth examples to include in batch
Returns:
New batch with num_ground_truth samples from ground_truth_x and the rest from generated_x
"""
xp = chainer.cuda.get_array_module(generated_x.data)
ground_truth_x = chainer.cuda.to_cpu(ground_truth_x)
generated_x = chainer.cuda.to_cpu(generated_x.data)
idx = np.arange(int(batch_size))
np.random.shuffle(idx)
ground_truth_idx = np.array(np.take(idx, np.arange(num_ground_truth)))
generated_idx = np.array(np.take(idx, np.arange(num_ground_truth, int(batch_size))))
reshaped_ground_truth_x = F.reshape(ground_truth_x, (int(batch_size), -1))
reshaped_genetated_x = F.reshape(generated_x, (int(batch_size), -1))
ground_truth_examps = np.take(reshaped_ground_truth_x.data, ground_truth_idx, axis=0)
generated_examps = np.take(reshaped_genetated_x.data, generated_idx, axis=0)
index_a = np.vstack((ground_truth_idx, np.zeros_like(ground_truth_idx)))
index_b = np.vstack((generated_idx, np.ones_like(generated_idx)))
ground_truth_generated_stacked = np.hstack((ground_truth_idx, generated_idx))
ground_truth_generated_stacked_sorted = np.argsort(ground_truth_generated_stacked)
order = np.hstack((index_a, index_b))[:, ground_truth_generated_stacked_sorted]
stitched = []
for i in xrange(len(order[0])):
if order[1][i] == 0:
pos = np.where(ground_truth_idx == i)
stitched.append(ground_truth_examps[pos])
continue
else:
pos = np.where(generated_idx == i)
stitched.append(generated_examps[pos])
continue
stitched = np.array(stitched, dtype=np.float32)
stitched = np.reshape(stitched, (ground_truth_x.shape[0], ground_truth_x.shape[1], ground_truth_x.shape[2], ground_truth_x.shape[3]))
return xp.array(stitched)
def peak_signal_to_noise_ratio(true, pred):
"""
Image quality metric based on maximal signal power vs. power of the noise
Args:
true: the ground truth image
pred: the predicted image
Returns:
Peak signal to noise ratio (PSNR)
"""
return 10.0 * F.log(1.0 / F.mean_squared_error(true, pred)) / log(10.0)
def broadcast_reshape(x, y, axis=0):
"""
Reshape y to correspond to shape of x
Args:
x: the broadcasted
y: the broadcastee
axis: where the reshape will be performed
Results:
Output variable of same shape of x
"""
y_shape = tuple([1] * axis + list(y.shape) +
[1] * (len(x.shape) - axis - len(y.shape)))
y_t = F.reshape(y, y_shape)
y_t = F.broadcast_to(y_t, x.shape)
return y_t
def broadcasted_division(x, y, axis=0):
"""
Apply a division x/y where y is broadcasted to x to be able to complete the operation
Args:
x: the numerator
y: the denominator
axis: where the reshape will be performed
Results:
Output variable of same shape of x
"""
y_t = broadcast_reshape(x, y, axis)
return x / y_t
def broadcast_scale(x, y, axis=0):
"""
Apply a multiplication x*y where y is broadcasted to x to be able to complete the operation
Args:
x: left hand operation
y: right hand operation
axis: where the reshape will be performed
Resuts:
Output variable of same shape of x
"""
y_t = broadcast_reshape(x, y, axis)
return x*y_t
# =============
# Chains (chns)
# =============
class LayerNormalizationConv2D(chainer.Chain):
def __init__(self):
super(LayerNormalizationConv2D, self).__init__()
with self.init_scope():
self.norm = L.LayerNormalization()
"""
Apply a "layer normalization" on the result of a convolution
Args:
inputs: input tensor, 4D, batch x channel x height x width
Returns:
Output variable of shape (batch x channels x height x width)
"""
def __call__(self, inputs):
batch_size, channels, height, width = inputs.shape[0:4]
inputs = F.reshape(inputs, (batch_size, -1))
inputs = self.norm(inputs)
inputs = F.reshape(inputs, (batch_size, channels, height, width))
return inputs
# =============
# Models (mdls)
# =============
class BasicConvLSTMCell(chainer.Chain):
""" Stateless convolutional LSTM, as seen in lstm_op.py from video_prediction model """
def __init__(self, out_size=None, filter_size=5):
super(BasicConvLSTMCell, self).__init__()
with self.init_scope():
# @TODO: maybe provide in channels because the concatenation
self.conv = L.Convolution2D(4*out_size, (filter_size, filter_size), pad=filter_size/2)
self.out_size = out_size
self.filter_size = filter_size
self.reset_state()
def reset_state(self):
self.c = None
self.h = None
def __call__(self, inputs, forget_bias=1.0):
"""Basic LSTM recurrent network cell, with 2D convolution connctions.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
Args:
inputs: input Tensor, 4D, batch x channels x height x width
forget_bias: the initial value of the forget biases.
Returns:
a tuple of tensors representing output and the new state.
"""
# In Tensorflow: batch x height x width x channels
# In Chainer: batch x channel x height x width
# Create a state based on Finn's implementation
xp = chainer.cuda.get_array_module(*inputs.data)
if self.c is None:
self.c = xp.zeros((inputs.shape[0], self.out_size, inputs.shape[2], inputs.shape[3]), dtype=inputs[0].data.dtype)
if self.h is None:
self.h = xp.zeros((inputs.shape[0], self.out_size, inputs.shape[2], inputs.shape[3]), dtype=inputs[0].data.dtype)
#c, h = F.split_axis(state, indices_or_sections=2, axis=1)
#inputs_h = np.concatenate((inputs, h), axis=1)
inputs_h = F.concat((inputs, self.h), axis=1)
# Parameters of gates are concatenated into one conv for efficiency
#j_i_f_o = L.Convolution2D(in_channels=inputs_h.shape[1], out_channels=4*num_channels, ksize=(filter_size, filter_size), pad=filter_size/2)(inputs_h)
j_i_f_o = self.conv(inputs_h)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
j, i, f, o = F.split_axis(j_i_f_o, indices_or_sections=4, axis=1)
self.c = self.c * F.sigmoid(f + forget_bias) + F.sigmoid(i) * F.tanh(j)
self.h = F.tanh(self.c) * F.sigmoid(o)
#return new_h, np.concatenate((new_c, new_h), axis=1)
#return new_h, F.concat((new_c, new_h), axis=1)
return self.h
class StatelessCDNA(chainer.Chain):
"""
Build convolutional lstm video predictor using CDNA
* Because the CDNA does not keep states, it should be passed as a parameter if one wants to continue learning from previous states
"""
def __init__(self, num_masks):
super(StatelessCDNA, self).__init__()
with self.init_scope():
self.enc7 = L.Deconvolution2D(in_channels=64, out_channels=3, ksize=(1,1), stride=1)
self.cdna_kerns = L.Linear(in_size=None, out_size=DNA_KERN_SIZE * DNA_KERN_SIZE * num_masks)
self.num_masks = num_masks
def __call__(self, encs, hiddens, batch_size, prev_image, num_masks, color_channels):
"""
Learn through StatelessCDNA.
Args:
encs: An array of computed transformation
hiddens: An array of hidden layers
batch_size: Size of mini batches
prev_image: The image to transform
num_masks: Number of masks to apply
color_channels: Output color channels
Returns:
transformed: A list of masks to apply on the previous image
"""
logger = logging.getLogger(__name__)
enc0, enc1, enc2, enc3, enc4, enc5, enc6 = encs
hidden1, hidden2, hidden3, hidden4, hidden5, hidden6, hidden7 = hiddens
img_height = prev_image.shape[2]
img_width = prev_image.shape[3]
# CDNA specific
enc7 = self.enc7(enc6)
enc7 = F.relu(enc7)
transformed_list = list([F.sigmoid(enc7)])
# CDNA specific
# Predict kernels using linear function of last layer
cdna_input = F.reshape(hidden5, (int(batch_size), -1))
cdna_kerns = self.cdna_kerns(cdna_input)
# Reshape and normalize
# B x C x H x W => B x NUM_MASKS x 1 x H x W
cdna_kerns = F.reshape(cdna_kerns, (int(batch_size), self.num_masks, 1, DNA_KERN_SIZE, DNA_KERN_SIZE))
cdna_kerns = F.relu(cdna_kerns - RELU_SHIFT) + RELU_SHIFT
norm_factor = F.sum(cdna_kerns, (2, 3, 4), keepdims=True)
cdna_kerns = broadcasted_division(cdna_kerns, norm_factor)
# Treat the color channel dimension as the batch dimension since the same
# transformation is applied to each color channel.
# Treat the batch dimension as the channel dimension so that
# F.depthwise_convolution_2d can apply a different transformation to each sample.
cdna_kerns = F.reshape(cdna_kerns, (int(batch_size), self.num_masks, DNA_KERN_SIZE, DNA_KERN_SIZE))
cdna_kerns = F.transpose(cdna_kerns, (1, 0, 2, 3))
# Swap the batch and channel dimension.
prev_image = F.transpose(prev_image, (1, 0, 2, 3))
# Transform the image.
transformed = F.depthwise_convolution_2d(prev_image, cdna_kerns, stride=(1, 1), pad=DNA_KERN_SIZE/2)
# Transpose the dimensions where they belong.
transformed = F.reshape(transformed, (color_channels, int(batch_size), self.num_masks, img_height, img_width))
transformed = F.transpose(transformed, (2, 1, 0, 3, 4))
transformed = F.split_axis(transformed, indices_or_sections=self.num_masks, axis=0)
transformed = [F.squeeze(t, axis=0) for t in transformed]
transformed_list += transformed
return transformed_list, enc7
class StatelessDNA(chainer.Chain):
"""
Build convolutional lstm video predictor using DNA
* Because the DNA does not keep states, it should be passed as a parameter if one wants to continue learning from previous states
"""
def __init__(self, num_masks):
super(StatelessDNA, self).__init__()
with self.init_scope():
self.enc7 = L.Deconvolution2D(DNA_KERN_SIZE**2, (1, 1), stride=1)
self.num_masks = num_masks
def __call__(self, encs, hiddens, batch_size, prev_image, num_masks, color_channels):
"""
Learn through StatelessDNA.
Args:
encs: An array of computed transformation
hiddens: An array of hidden layers
batch_size: Size of mini batches
prev_image: The image to transform
num_masks: Number of masks to apply
color_channels: Output color channels
Returns:
transformed: A list of masks to apply on the previous image
"""
logger = logging.getLogger(__name__)
enc0, enc1, enc2, enc3, enc4, enc5, enc6 = encs
hidden1, hidden2, hidden3, hidden4, hidden5, hidden6, hidden7 = hiddens
# DNA specific
enc7 = self.enc7(enc6)
enc7 = F.relu(enc7)
if num_masks != 1:
raise ValueError('Only one mask is supported for DNA model.')
# Construct translated images
img_height = prev_image.shape[2]
img_width = prev_image.shape[3]
prev_image_pad = F.pad(prev_image, pad_width=[[0,0], [0,0], [2,2], [2,2]], mode='constant', constant_values=0)
kernel_inputs = []
for xkern in range(DNA_KERN_SIZE):
for ykern in range(DNA_KERN_SIZE):
#tmp = F.get_item(prev_image_pad, list([slice(0,prev_image_pad.shape[0]), slice(0,prev_image_pad.shape[1]), slice(xkern,img_height), slice(ykern,img_width)]))
tmp = prev_image_pad[:,:,xkern:img_height, ykern:img_width]
# ** Added this operation to make sure the size was still the original one!
tmp = F.pad(tmp, [[0,0], [0,0], [0, xkern], [0, ykern]], mode='constant', constant_values=0)
tmp = F.expand_dims(tmp, axis=1) # Previously axis=3 but our channel is on axis=1 ? ok!
kernel_inputs.append(tmp.data)
kernel_inputs = F.concat(kernel_inputs, axis=1) # Previously axis=3 but our channel us on axis=1 ? ok!
# Normalize channels to 1
kernel_normalized = F.relu(enc7 - RELU_SHIFT) + RELU_SHIFT
kernel_normalized_sum = F.sum(kernel_normalized, axis=1, keepdims=True) # Previously axis=3 but our channel are on axis 1 ? ok!
kernel_normalized = broadcasted_division(kernel_normalized, kernel_normalized_sum)
kernel_normalized = F.expand_dims(kernel_normalized, axis=2)
#kernel_normalized = F.scale(kernel_inputs, kernel_normalized, axis=0)
kernel_normalized = broadcast_scale(kernel_inputs, kernel_normalized)
kernel_normalized = F.sum(kernel_normalized, axis=1, keepdims=False)
transformed = [kernel_normalized]
return transformed, enc7
class StatelessSTP(chainer.Chain):
"""
Build convolutional lstm video predictor using STP
* Because the STP does not keep states, it should be passed as a parameter if one wants to continue learning from previous states
"""
def __init__(self, num_masks):
super(StatelessSTP, self).__init__()
with self.init_scope():
self.enc7 = L.Deconvolution2D(3, (1, 1), stride=1)
self.stp_input = L.Linear(100)
self.identity_params = L.Linear(6)
def __call__(self, encs, hiddens, batch_size, prev_image, num_masks, color_channels):
"""
Learn through StatelessSTP.
Args:
encs: An array of computed transformation
hiddens: An array of hidden layers
batch_size: Size of mini batches
prev_image: The image to transform
num_masks: Number of masks to apply
color_channels: Output color channels
Returns:
transformed: A list of masks to apply on the previous image
"""
logger = logging.getLogger(__name__)
enc0, enc1, enc2, enc3, enc4, enc5, enc6 = encs
hidden1, hidden2, hidden3, hidden4, hidden5, hidden6, hidden7 = hiddens
xp = chainer.cuda.get_array_module(enc6.data)
# STP specific
enc7 = self.enc7(enc6)
transformed = list([F.sigmoid(enc7)])
stp_input0 = F.reshape(hidden5, (int(batch_size), -1))
stp_input1 = self.stp_input(stp_input0)
stp_input1 = F.relu(stp_input1)
identity_params = np.array([[1.0, 0.0, 0.0, 0.0, 1.0, 0.0]], dtype=np.float32)
identity_params = np.repeat(identity_params, int(batch_size), axis=0)
identity_params = variable.Variable(xp.array(identity_params))
stp_transformations = []
for i in range(num_masks-1):
params = self.identity_params(stp_input1)
params = params + identity_params
params = F.reshape(params, (int(params.shape[0]), 2, 3))
grid = F.spatial_transformer_grid(params, (prev_image.shape[2], prev_image.shape[3]))
trans = F.spatial_transformer_sampler(prev_image, grid)
stp_transformations.append(trans)
transformed += stp_transformations
return transformed, enc7
class Model(chainer.Chain):
"""
This Model wrap other models like CDNA, STP or DNA.
It calls their training and get the generated images and states, it then compute the losses and other various parameters
"""
def __init__(self, num_masks, is_cdna=True, is_dna=False, is_stp=False, use_state=True, scheduled_sampling_k=-1, num_frame_before_prediction=2, prefix=None):
"""
Initialize a CDNA, STP or DNA through this 'wrapper' Model
Args:
is_cdna: if the model should be an extension of CDNA
is_dna: if the model should be an extension of DNA
is_stp: if the model should be an extension of STP
use_state: if the state should be concatenated
scheduled_sampling_k: schedule sampling hyperparameter k
num_frame_before_prediction: number of frame before prediction
prefix: appended to the results to differentiate between training and validation
learning_rate: learning rate
"""
super(Model, self).__init__()
with self.init_scope():
self.enc0 = L.Convolution2D(32, (5, 5), stride=2, pad=2)
self.enc1 = L.Convolution2D(32, (3, 3), stride=2, pad=1)
self.enc2 = L.Convolution2D(64, (3, 3), stride=2, pad=1)
self.enc3 = L.Convolution2D(64, (1, 1), stride=1)
self.enc4 = L.Deconvolution2D(128, (3, 3), stride=2, outsize=(16,16), pad=1)
self.enc5 = L.Deconvolution2D(96, (3, 3), stride=2, outsize=(32,32), pad=1)
self.enc6 = L.Deconvolution2D(64, (3, 3), stride=2, outsize=(64, 64), pad=1)
self.lstm1 = BasicConvLSTMCell(32)
self.lstm2 = BasicConvLSTMCell(32)
self.lstm3 = BasicConvLSTMCell(64)
self.lstm4 = BasicConvLSTMCell(64)
self.lstm5 = BasicConvLSTMCell(128)
self.lstm6 = BasicConvLSTMCell(64)
self.lstm7 = BasicConvLSTMCell(32)
self.norm_enc0 = LayerNormalizationConv2D()
self.norm_enc6 = LayerNormalizationConv2D()
self.hidden1 = LayerNormalizationConv2D()
self.hidden2 = LayerNormalizationConv2D()
self.hidden3 = LayerNormalizationConv2D()
self.hidden4 = LayerNormalizationConv2D()
self.hidden5 = LayerNormalizationConv2D()
self.hidden6 = LayerNormalizationConv2D()
self.hidden7 = LayerNormalizationConv2D()
self.masks = L.Deconvolution2D(num_masks+1, (1, 1), stride=1)
self.current_state = L.Linear(5)
model = None
if is_cdna:
model = StatelessCDNA(num_masks)
elif is_stp:
model = StatelessSTP(num_masks)
elif is_dna:
model = StatelessDNA(num_masks)
if model is None:
raise ValueError("No network specified")
else:
self.model = model
self.num_masks = num_masks
self.use_state = use_state
self.scheduled_sampling_k = scheduled_sampling_k
self.num_frame_before_prediction = num_frame_before_prediction
self.prefix = prefix
self.loss = 0.0
self.psnr_all = 0.0
self.summaries = []
self.conv_res = []
# Condition ops callback
def ops_smear(use_state):
def ops(args):
x = args.get("x")
if use_state:
state_action = args.get("state_action")
batch_size = args.get("batch_size")
smear = F.reshape(state_action, (int(batch_size), int(state_action.shape[1]), 1, 1))
smear = F.tile(smear, (1, 1, int(x.shape[2]), int(x.shape[3])))
x = F.concat((x, smear), axis=1) # Previously axis=3 but our channel is on axis=1? ok
return x
return ops
def ops_skip_connection(enc_idx):
def ops(args):
x = args.get("x")
enc = args.get("encs")[enc_idx]
# Skip connection (current input + target enc)
x = F.concat((x, enc), axis=1) # Previously axis=3 but our channel is on axis=1? ok!
return x
return ops
def ops_save(name):
def ops(args):
x = args.get("x")
save_map = args.get("map")
save_map[name] = x
return x
return ops
def ops_get(name):
def ops(args):
save_map = args.get("map")
return save_map[name]
return ops
# Create an executable array containing all the transformations
self.ops = [
[self.enc0, self.norm_enc0],
[self.lstm1, self.hidden1, ops_save("hidden1"), self.lstm2, self.hidden2, ops_save("hidden2"), self.enc1],
[self.lstm3, self.hidden3, ops_save("hidden3"), self.lstm4, self.hidden4, ops_save("hidden4"), self.enc2],
[ops_smear(use_state), self.enc3],
[self.lstm5, self.hidden5, ops_save("hidden5"), self.enc4],
[self.lstm6, self.hidden6, ops_save("hidden6"), ops_skip_connection(1), self.enc5],
[self.lstm7, self.hidden7, ops_save("hidden7"), ops_skip_connection(0), self.enc6, self.norm_enc6]
]
def reset_state(self):
"""
Reset the gradient of this model, but also the specific model
"""
self.loss = 0.0
self.psnr_all = 0.0
self.summaries = []
self.conv_res = []
self.lstm1.reset_state()
self.lstm2.reset_state()
self.lstm3.reset_state()
self.lstm4.reset_state()
self.lstm5.reset_state()
self.lstm6.reset_state()
self.lstm7.reset_state()
def __call__(self, x, iter_num=-1.0):
"""
Calls the training process
Args:
x: an array containing an array of:
images: an array of Tensor of shape batch x channels x height x width
actions: an array of Tensor of shape batch x action
states: an array of Tensor of shape batch x state
iter_num: iteration (epoch) index
Returns:
loss, all the peak signal to noise ratio, summaries
"""
logger = logging.getLogger(__name__)
# Split the images, actions and states from the input
if len(x) > 1:
images, actions, states = x
else:
images, actions, states = x[0], None, None
batch_size, color_channels, img_height, img_width = images[0].shape[0:4]
#img_training_set = [np.transpose(np.squeeze(img), (0, 3, 1, 2)) for img in img_training_set]
# Generated robot states and images
gen_states, gen_images = [], []
current_state = states[0]
# When validation/test, disable schedule sampling
if not chainer.config.train or self.scheduled_sampling_k == -1:
feedself = True
else:
# Scheduled sampling, inverse sigmoid decay
# Calculate number of ground-truth frames to pass in.
num_ground_truth = np.int32(
np.round(np.float32(batch_size) * (self.scheduled_sampling_k / (self.scheduled_sampling_k + np.exp(iter_num / self.scheduled_sampling_k))))
)
feedself = False
for image, action in zip(images[:-1], actions[:-1]):
# Reuse variables after the first timestep
reuse = bool(gen_images)
done_warm_start = len(gen_images) > self.num_frame_before_prediction - 1
if feedself and done_warm_start:
# Feed in generated image
prev_image = gen_images[-1]
elif done_warm_start:
# Scheduled sampling
prev_image = scheduled_sample(image, gen_images[-1], batch_size, num_ground_truth)
prev_image = variable.Variable(prev_image)
else:
# Always feed in ground_truth
prev_image = variable.Variable(image)
# Predicted state is always fed back in
state_action = F.concat((action, current_state), axis=1)
""" Execute the ops array of transformations """
# If an ops has a name of "ops" it means it's a custom ops
encs = []
maps = {}
x = prev_image
for i in xrange(len(self.ops)):
for j in xrange(len(self.ops[i])):
op = self.ops[i][j]
if isinstance(op, types.FunctionType):
# Only these values are use now in the ops callback
x = op({
"x": x,
"encs": encs,
"map": maps,
"state_action": state_action,
"batch_size": batch_size
})
else:
x = op(x)
# ReLU at the end of each transformation
x = F.relu(x)
# At the end of j iteration = completed a enc transformation
encs.append(x)
# Extract the variables
hiddens = [
maps.get("hidden1"), maps.get("hidden2"), maps.get("hidden3"), maps.get("hidden4"),
maps.get("hidden5"), maps.get("hidden6"), maps.get("hidden7")
]
enc0, enc1, enc2, enc3, enc4, enc5, enc6 = encs
hidden1, hidden2, hidden3, hidden4, hidden5, hidden6, hidden7 = hiddens
""" Specific model transformations """
transformed, enc7 = self.model(
encs, hiddens,
batch_size, prev_image, self.num_masks, int(color_channels)
)
encs.append(enc7)
""" Masks """
masks = self.masks(enc6)
masks = F.relu(masks)
masks = F.reshape(masks, (-1, self.num_masks + 1))
masks = F.softmax(masks)
masks = F.reshape(masks, (int(batch_size), self.num_masks+1, int(img_height), int(img_width))) # Previously num_mask at the end, but our channels are on axis=1? ok!
mask_list = F.split_axis(masks, indices_or_sections=self.num_masks+1, axis=1) # Previously axis=3 but our channels are on axis=1 ?
output = broadcast_scale(prev_image, mask_list[0])
for layer, mask in zip(transformed, mask_list[1:]):
output += broadcast_scale(layer, mask, axis=0)
gen_images.append(output)
current_state = self.current_state(state_action)
gen_states.append(current_state)
# End of transformations
self.conv_res = encs
# L2 loss, PSNR for eval
loss, psnr_all = 0.0, 0.0
summaries = []
for i, x, gx in zip(range(len(gen_images)), images[self.num_frame_before_prediction:], gen_images[self.num_frame_before_prediction - 1:]):
x = variable.Variable(x)
recon_cost = F.mean_squared_error(x, gx)
psnr_i = peak_signal_to_noise_ratio(x, gx)
psnr_all += psnr_i
summaries.append(self.prefix + '_recon_cost' + str(i) + ': ' + str(recon_cost.data))
summaries.append(self.prefix + '_psnr' + str(i) + ': ' + str(psnr_i.data))
loss += recon_cost
#print(recon_cost.data)
for i, state, gen_state in zip(range(len(gen_states)), states[self.num_frame_before_prediction:], gen_states[self.num_frame_before_prediction - 1:]):
state = variable.Variable(state)
state_cost = F.mean_squared_error(state, gen_state) * 1e-4
summaries.append(self.prefix + '_state_cost' + str(i) + ': ' + str(state_cost.data))
loss += state_cost
summaries.append(self.prefix + '_psnr_all: ' + str(psnr_all.data if isinstance(psnr_all, variable.Variable) else psnr_all))
self.psnr_all = psnr_all
self.loss = loss = loss / np.float32(len(images) - self.num_frame_before_prediction)
summaries.append(self.prefix + '_loss: ' + str(loss.data if isinstance(loss, variable.Variable) else loss))
self.summaries = summaries
self.gen_images = gen_images
return self.loss
# =================================================
# Main entry point of the training processes (main)
# =================================================
@click.command()
@click.option('--data_dir', type=click.Path(exists=True), default='data/processed/brain-robotics-data/push/push_train', help='Directory containing data.')
@click.option('--output_dir', type=click.Path(), default='models', help='Directory for model checkpoints.')
@click.option('--event_log_dir', type=click.Path(), default='models', help='Directory for writing summary.')
@click.option('--num_iterations', type=click.INT, default=100000, help='Number of training iterations. Number of epoch is: num_iterations/batch_size.')
@click.option('--pretrained_model', type=click.Path(), default='', help='Filepath of a pretrained model to initialize from.')
@click.option('--pretrained_state', type=click.Path(), default='', help='Filepath of a pretrained state to initialize from.')
@click.option('--sequence_length', type=click.INT, default=10, help='Sequence length, including context frames.')
@click.option('--context_frames', type=click.INT, default=2, help='Number of frames before predictions.')
@click.option('--use_state', type=click.INT, default=1, help='Whether or not to give the state+action to the model.')
@click.option('--model_type', type=click.STRING, default='CDNA', help='Model architecture to use - CDNA, DNA, or STP.')
@click.option('--num_masks', type=click.INT, default=10, help='Number of masks, usually 1 for DNA, 10 for CDNA, STP.')
@click.option('--schedsamp_k', type=click.FLOAT, default=900.0, help='The k parameter for schedules sampling. -1 for no scheduled sampling.')
@click.option('--train_val_split', type=click.FLOAT, default=0.95, help='The percentage of data to use for the training set, vs. the validation set.')
@click.option('--batch_size', type=click.INT, default=32, help='Batch size for training.')
@click.option('--learning_rate', type=click.FLOAT, default=0.001, help='The base learning rate of the generator.')
@click.option('--gpu', type=click.INT, default=-1, help='ID of the gpu(s) to use')
@click.option('--validation_interval', type=click.INT, default=200, help='How often to run a batch through the validation model')
@click.option('--save_interval', type=click.INT, default=50, help='How often to save a model checkpoint')
@click.option('--debug', type=click.INT, default=0, help='Debug mode.')
def main(data_dir, output_dir, event_log_dir, num_iterations, pretrained_model, pretrained_state, sequence_length, context_frames, use_state, model_type, num_masks, schedsamp_k, train_val_split, batch_size, learning_rate, gpu, validation_interval, save_interval, debug):
if debug == 1:
chainer.set_debug(True)
""" Train the model based on the data saved in ../processed """
logger = logging.getLogger(__name__)
logger.info('Training the model')
logger.info('Model: {}'.format(model_type))
logger.info('GPU: {}'.format(gpu))
logger.info('# Minibatch-size: {}'.format(batch_size))
logger.info('# Num iterations: {}'.format(num_iterations))
logger.info('# epoch: {}'.format(round(num_iterations/batch_size)))
model_suffix_dir = "{0}-{1}-{2}".format(time.strftime("%Y%m%d-%H%M%S"), model_type, batch_size)
training_suffix = "{0}".format('training')
validation_suffix = "{0}".format('validation')
state_suffix = "{0}".format('state')
logger.info("Fetching the models and inputs")
data_map = []
with open(data_dir + '/map.csv', 'rb') as f:
reader = csv.reader(f)
for row in reader:
data_map.append(row)
if len(data_map) <= 1: # empty or only header
logger.error("No file map found")
exit()
# Load the images, actions and states
images = []
actions = []
states = []
for i in xrange(1, len(data_map)): # Exclude the header
#logger.info("Loading data {0}/{1}".format(i, len(data_map)-1))
images.append(np.float32(np.load(data_dir + '/' + data_map[i][2])))
actions.append(np.float32(np.load(data_dir + '/' + data_map[i][3])))
states.append(np.float32(np.load(data_dir + '/' + data_map[i][4])))
images = np.asarray(images, dtype=np.float32)
actions = np.asarray(actions, dtype=np.float32)
states = np.asarray(states, dtype=np.float32)
train_val_split_index = int(np.floor(train_val_split * len(images)))
images_training = np.asarray(images[:train_val_split_index])
actions_training = np.asarray(actions[:train_val_split_index])
states_training = np.asarray(states[:train_val_split_index])
images_validation = np.asarray(images[train_val_split_index:])
actions_validation = np.asarray(actions[train_val_split_index:])
states_validation = np.asarray(states[train_val_split_index:])
logger.info('Data set contain {0}, {1} will be use for training and {2} will be use for validation'.format(len(images)-1, train_val_split_index, len(images)-1-train_val_split_index))
# Create the model
training_model = Model(
num_masks=num_masks,
is_cdna=model_type == 'CDNA',
is_dna=model_type == 'DNA',
is_stp=model_type == 'STP',
use_state=use_state,
scheduled_sampling_k=schedsamp_k,
num_frame_before_prediction=context_frames,
prefix='train'
)
# Create the optimizers for the models
optimizer = chainer.optimizers.Adam(alpha=learning_rate)
optimizer.setup(training_model)
# Load a previous model
if pretrained_model:
chainer.serializers.load_npz(pretrained_model, training_model)
logger.info("Loading pretrained model {}".format(pretrained_model))
if pretrained_state:
chainer.serializers.load_npz(pretrained_state, training_model)
logger.info("Loading pretrained state {}".format(pretrained_state))
# Save the current GIT commit corresponding to the current training.
# When predicting or visualizing the model, change the working directory to the GIT snapshot
# This way, instead of copying the files into the model folder, we use GIT functionality to preserve the training files
current_version = None
try:
subprocess.check_call(['git', 'status'])
def git_exec(args):
process = subprocess.Popen(['git'] + args, stdout=subprocess.PIPE)
res = process.communicate()[0].rstrip().strip()
#process.wait()
return res
current_version = git_exec(['rev-parse', '--abbrev-ref', 'HEAD']) + '\n' + git_exec(['rev-parse', 'HEAD'])
except:
pass
# Training
# Enable GPU support if defined
if gpu > -1:
chainer.cuda.get_device_from_id(gpu).use()
training_model.to_gpu()
xp = cupy
else:
xp = np
# Create the batches for Chainer's implementation of the iterator
# Group the images, actions and states
grouped_set_training = []
grouped_set_validation = []
for idx in xrange(len(images_training)):
group = []
group.append(images_training[idx])
group.append(actions_training[idx])
group.append(states_training[idx])
grouped_set_training.append(group)
for idx in xrange(len(images_validation)):
group = []
group.append(images_validation[idx])
group.append(actions_validation[idx])
group.append(states_validation[idx])
grouped_set_validation.append(group)
#train_iter = chainer.iterators.SerialIterator(grouped_set_training, batch_size)
train_iter = chainer.iterators.SerialIterator(grouped_set_training, batch_size, repeat=True, shuffle=True)
valid_iter = chainer.iterators.SerialIterator(grouped_set_validation, batch_size, repeat=False, shuffle=True)
# Run training
# As per Finn's implementation, one epoch is run on one batch size, randomly, but never more than once.
# At the end of the queue, if the epochs len is not reach, the queue is generated again.
local_losses = []
local_psnr_all = []
local_losses_valid = []
local_psnr_all_valid = []
global_losses = []
global_psnr_all = []
global_losses_valid = []
global_psnr_all_valid = []
summaries, summaries_valid = [], []
training_queue = []
validation_queue = []
#for epoch in xrange(epochs):
start_time = None
stop_time = None
itr = 0
while itr < num_iterations:
epoch = train_iter.epoch
batch = train_iter.next()
#x = concat_examples(batch)
img_training_set, act_training_set, sta_training_set = concat_examples(batch)
# Perform training
logger.info("Begining training for mini-batch {0}/{1} of epoch {2}".format(str(train_iter.current_position), str(len(images_training)), str(epoch+1)))
logger.info("Global iteration: {}".format(str(itr+1)))
#loss = training_model(img_training_set, act_training_set, sta_training_set, epoch, schedsamp_k, use_state, num_masks, context_frames)
if start_time is None:
start_time = time.time()
optimizer.update(training_model, [xp.array(img_training_set), xp.array(act_training_set), xp.array(sta_training_set)], itr)
loss = training_model.loss
psnr_all = training_model.psnr_all
summaries = training_model.summaries
loss_data_cpu = chainer.cuda.to_cpu(loss.data)
psnr_data_cpu = chainer.cuda.to_cpu(psnr_all.data)
local_losses.append(loss_data_cpu)
local_psnr_all.append(psnr_data_cpu)
training_model.reset_state()
logger.info("{0} {1}".format(str(epoch+1), str(loss.data)))
loss, psnr_all, loss_data_cpu, psnr_data_cpu = None, None, None, None
if train_iter.is_new_epoch:
stop_time = time.time()
logger.info("[TRAIN] Epoch #: {}".format(epoch+1))
logger.info("[TRAIN] Epoch elapsed time: {}".format(stop_time-start_time))
local_losses = np.array(local_losses)
local_psnr_all = np.array(local_psnr_all)
global_losses.append([local_losses.mean(), local_losses.std(), local_losses.min(), local_losses.max(), np.median(local_losses)])
global_psnr_all.append([local_psnr_all.mean(), local_psnr_all.std(), local_psnr_all.min(), local_psnr_all.max(), np.median(local_psnr_all)])
logger.info("[TRAIN] epoch loss: {}".format(local_losses.mean()))
logger.info("[TRAIN] epoch psnr: {}".format(local_psnr_all.mean()))
local_losses, local_psnr_all = [], []
start_time, stop_time = None, None
if train_iter.is_new_epoch and epoch+1 % validation_interval == 0:
start_time = time.time()
for batch in valid_iter:
logger.info("Begining validation for mini-batch {0}/{1} of epoch {2}".format(str(valid_iter.current_position), str(len(images_validation)), str(epoch+1)))
img_validation_set, act_validation_set, sta_validation_set = concat_examples(batch)
#x_validation = concat_examples(batch)
# Run through validation set
#loss_valid, psnr_all_valid, summaries_valid = validation_model(img_validation_set, act_validation_set, sta_validation_set, epoch, schedsamp_k, use_state, num_masks, context_frames)
with chainer.using_config('train', False):
loss_valid = training_model([xp.array(img_validation_set), xp.array(xp.act_validation_set), xp.array(sta_validation_set)], itr)
psnr_all_valid = training_model.psnr_all
summaries_valid = training_model.summaries
loss_valid_data_cpu = chainer.cuda.to_cpu(loss_valid.data)
psnr_all_valid_data_cpu = chainer.cuda.to_cpu(psnr_all_valid.data)
local_losses_valid.append(loss_valid_data_cpu)
local_psnr_all_valid.append(psnr_all_valid_data_cpu)
training_model.reset_state()
loss_valid, psnr_all_valid, loss_valid_data_cpu, psnr_all_valid_data_cpu = None, None, None, None
stop_time = time.time()
logger.info("[VALID] Epoch #: {}".format(epoch+1))
logger.info("[VALID] epoch elapsed time: {}".format(stop_time-start_time))
local_losses_valid = np.array(local_losses_valid)
local_psnr_all_valid = np.array(local_psnr_all_valid)
global_losses_valid.append([local_losses_valid.mean(), local_losses_valid.std(), local_losses_valid.min(), local_losses_valid.max(), np.median(local_losses_valid)])
global_psnr_all_valid.append([local_psnr_all_valid.mean(), local_psnr_all_valid.std(), local_psnr_all_valid.min(), local_psnr_all_valid.max(), np.median(local_psnr_all_valid)])
logger.info("[VALID] epoch loss: {}".format(local_losses_valid.mean()))
logger.info("[VALID] epoch psnr: {}".format(local_psnr_all_valid.mean()))
local_losses_valid, local_psnr_all_valid = [], []
start_time, stop_time = None, None
valid_iter.reset()
training_model.reset_state()
if train_iter.is_new_epoch and epoch % save_interval == 0:
#if epoch % save_interval == 0:
logger.info('Saving model')
save_dir = output_dir + '/' + model_suffix_dir
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# Save the version of the code
f = open(save_dir + '/version', 'w')
f.write(current_version + '\n')
f.close()
serializers.save_npz(save_dir + '/' + training_suffix + '-' + str(epoch), training_model)
#serializers.save_npz(save_dir + '/' + validation_suffix + '-' + str(epoch), validation_model)
serializers.save_npz(save_dir + '/' + state_suffix + '-' + str(epoch), optimizer)
np.save(save_dir + '/' + training_suffix + '-global_losses', np.array(global_losses))
np.save(save_dir + '/' + training_suffix + '-global_psnr_all', np.array(global_psnr_all))
np.save(save_dir + '/' + training_suffix + '-global_losses_valid', np.array(global_losses_valid))
np.save(save_dir + '/' + training_suffix + '-global_psnr_all', np.array(global_psnr_all_valid))
#for summ in summaries:
#logger.info(summ)
summaries = []
#for summ_valid in summaries_valid:
#logger.info(summ_valid)
summaries_valid = []
itr += 1
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
#logging.basicConfig(level=logging.INFO, format=log_fmt, stream=sys.stdout)
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
|
{"hexsha": "f93f8f5f54c8b0335e4d92045f0fd57308c74a55", "size": 46527, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models/train_model.py", "max_stars_repo_name": "kristofbc/physical-interaction-video-prediction", "max_stars_repo_head_hexsha": "845286629065c6a62580064943800b2b4ef0e04c", "max_stars_repo_licenses": ["FTL"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2018-01-18T11:42:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-13T08:36:49.000Z", "max_issues_repo_path": "src/models/train_model.py", "max_issues_repo_name": "kristofbc/physical-interaction-video-prediction", "max_issues_repo_head_hexsha": "845286629065c6a62580064943800b2b4ef0e04c", "max_issues_repo_licenses": ["FTL"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:09:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:29:32.000Z", "max_forks_repo_path": "src/models/train_model.py", "max_forks_repo_name": "kristofbc/physical-interaction-video-prediction", "max_forks_repo_head_hexsha": "845286629065c6a62580064943800b2b4ef0e04c", "max_forks_repo_licenses": ["FTL"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2018-05-19T14:31:33.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-30T11:10:26.000Z", "avg_line_length": 44.0179754021, "max_line_length": 270, "alphanum_fraction": 0.6296988845, "include": true, "reason": "import numpy,import cupy", "num_tokens": 10832}
|
import os
import numpy as np
from rfvision.datasets import DATASETS
from torch.utils.data import Dataset
import os
import numpy as np
import rflib
from rfvision.datasets import DATASETS
from rfvision.datasets.custom3d import Custom3DDataset
SNAP_PARENT = [
0, # 0's parent
0, # 1's parent
1,
2,
3,
0, # 5's parent
5,
6,
7,
0, # 9's parent
9,
10,
11,
0, # 13's parent
13,
14,
15,
0, # 17's parent
17,
18,
19,
]
@DATASETS.register_module()
class IKDataset(Custom3DDataset):
CLASSES = None
def __init__(self,
data_root='/hddisk1/data/IKdataset',
data_source = [
"freihand_gt",
"GenData"
],
split='all',
shuffle=False,
**kwargs):
assert split in ('train', 'test', 'val', 'all')
joints_xyz_all = ()
quats_all = ()
if 'freihand_gt' in data_source:
freihand_gt_path = os.path.join(data_root, 'freihand_j_s_q.pkl')
data = rflib.load(freihand_gt_path)
joints_xyz_all += np.array(data['joints']),
quats_all += np.array(data['quat']),
if 'GenData' in data_source:
gen_data_root = os.path.join(data_root, 'GenData')
for i in os.listdir(gen_data_root):
if i.endswith('.pkl'):
data = rflib.load(os.path.join(gen_data_root,i))
joints_xyz_all += data['joint_'],
quats_all += data['quat'],
joints_xyz_all = np.concatenate(joints_xyz_all, axis=0)
quats_all = np.concatenate(quats_all, axis=0)
# split dataset
split_ratio = 0.99
if shuffle == True:
all_idx = np.random.randint(0, len(joints_xyz_all), len(joints_xyz_all))
else:
all_idx = list(range(len(joints_xyz_all)))
train_length = int(split_ratio * len(joints_xyz_all))
train_idx = all_idx[:train_length]
test_idx = all_idx[train_length:]
if split == 'train':
self.joints_xyz = joints_xyz_all[train_idx]
self.quats = quats_all[train_idx]
elif split == 'test' or split =='val':
self.joints_xyz = joints_xyz_all[test_idx]
self.quats = quats_all[test_idx]
elif split == 'all':
self.joints_xyz = joints_xyz_all
self.quats = quats_all
self.ref_bone_link = (0, 9)
self._set_group_flag()
def __len__(self):
return len(self.joints_xyz)
def __getitem__(self, index):
joints_xyz = self.joints_xyz[index]
joints_xyz = normalize_point_cloud(joints_xyz)[0]
quat = self.quats[index]
results = {'joints_xyz': joints_xyz,
'quat': quat}
return results
def normalize_point_cloud(pc):
centroid = pc[9]
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))
pc_normalized = pc / m
return pc_normalized, centroid, m
# @DATASETS.register_module()
# class IKDataset(Dataset):
# '''
# We use manotorch(https://github.com/lixiny/manotorch) to generate the INVKDataset
# according to following joint sequence.
# 0: 'wrist',
# 1: 'thumb1',
# 2: 'thumb2',
# 3: 'thumb3',
# 4: 'thumb4',
# 5: 'forefinger1',
# 6: 'forefinger2',
# 7: 'forefinger3',
# 8: 'forefinger4',
# 9: 'middle_finger1',
# 10: 'middle_finger2',
# 11: 'middle_finger3',
# 12: 'middle_finger4',
# 13: 'ring_finger1',
# 14: 'ring_finger2',
# 15: 'ring_finger3',
# 16: 'ring_finger4',
# 17: 'pinky_finger1',
# 18: 'pinky_finger2',
# 19: 'pinky_finger3',
# 20: 'pinky_finger4'
#
# INVKDataset includes two metas: joints_xyz (bz, 21, 3) and full_posesernions (bz, 48)
# Note: if you download our dataset (without any processing such as normalization and root relative), bz = 1000000. For more details: https://github.com/lixiny/manotorch
# '''
#
# def __init__(self,
# data_root='/hddisk1/data/IKdataset',
# split='train',
# shuffle=False,
# **kwargs):
#
# assert split in ('train', 'test', 'val', 'all')
# joints_xyz_path = os.path.join(data_root, 'joints_xyz.npy')
# full_poses_path = os.path.join(data_root, 'full_poses.npy')
# joints_xyz = np.load(joints_xyz_path, allow_pickle=True)
# full_poses = np.load(full_poses_path)
#
# # split dataset
# split_ratio = 0.8
# if shuffle == True:
# all_idx = np.random.randint(0, len(joints_xyz), len(joints_xyz))
# else:
# all_idx = list(range(len(joints_xyz)))
#
# train_length = int(split_ratio * len(joints_xyz))
# train_idx = all_idx[:train_length]
# test_idx = all_idx[train_length:]
#
# if split == 'train':
# self.joints_xyz = joints_xyz[train_idx]
# self.full_poses = full_poses[train_idx]
# elif split == 'test' or split =='val':
# self.joints_xyz = joints_xyz[test_idx]
# self.full_poses = full_poses[test_idx]
# elif split == 'all':
# self.joints_xyz = joints_xyz
# self.full_poses = full_poses
#
# self._processing()
#
# def _processing(self):
# self.full_poses = self.full_poses.reshape(-1, 16, 3)
#
# def __len__(self):
# return len(self.joints_xyz)
#
# def __getitem__(self, index):
# joints_xyz = self.joints_xyz[index]
# joints_xyz = normalize_point_cloud(joints_xyz)[0]
# full_poses = self.full_poses[index]
# results = {'joints_xyz': joints_xyz,
# 'full_poses': full_poses}
# return results
if __name__ == '__main__':
pass
# dataset = INVKDataset()
# sample = dataset[0]
|
{"hexsha": "07d7d990228e1363282741543661628b06ce23a4", "size": 6048, "ext": "py", "lang": "Python", "max_stars_repo_path": "rfvision/datasets/ik_dataset.py", "max_stars_repo_name": "tycoer/rfvision-1", "max_stars_repo_head_hexsha": "db6e28746d8251d1f394544c32b9e0af388d9964", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rfvision/datasets/ik_dataset.py", "max_issues_repo_name": "tycoer/rfvision-1", "max_issues_repo_head_hexsha": "db6e28746d8251d1f394544c32b9e0af388d9964", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rfvision/datasets/ik_dataset.py", "max_forks_repo_name": "tycoer/rfvision-1", "max_forks_repo_head_hexsha": "db6e28746d8251d1f394544c32b9e0af388d9964", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0895522388, "max_line_length": 173, "alphanum_fraction": 0.5616732804, "include": true, "reason": "import numpy", "num_tokens": 1625}
|
/* vim: set tabstop=4 expandtab shiftwidth=4 softtabstop=4: */
/**
* \file src/fgt_coalform.cpp
*
* \brief Form stable coalition among a set of fog nodes.
*
* \author Marco Guazzone (marco.guazzone@gmail.com)
*
* <hr/>
*
* Copyright 2017 Marco Guazzone (marco.guazzone@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <boost/timer.hpp>
#include <cstddef>
#include <dcs/assert.hpp>
#include <dcs/debug.hpp>
#include <dcs/cli.hpp>
#include <dcs/debug.hpp>
#include <dcs/exception.hpp>
#include <dcs/fgt/coalition_formation.hpp>
#include <dcs/fgt/experiment.hpp>
#include <dcs/logging.hpp>
//#include <dcs/macro.hpp>
#include <exception>
#include <iostream>
#include <limits>
#include <sstream>
#include <stdexcept>
#include <string>
#include <vector>
namespace cli = dcs::cli;
namespace fgt = dcs::fgt;
namespace /*<unnamed>*/ { namespace detail {
class cli_options_t;
template <typename CharT, typename CharTraitsT>
std::basic_ostream<CharT,CharTraitsT>& operator<<(std::basic_ostream<CharT,CharTraitsT>& os, const cli_options_t& opts);
cli_options_t parse_cli_options(int argc, char* argv[]);
void usage(char const* progname);
struct cli_options_t
{
cli_options_t()
: help(false),
coalition_formation(fgt::nash_stable_coalition_formation),
coalition_formation_interval(0),
coalition_value_division(fgt::shapley_coalition_value_division),
find_all_best_partitions(false),
optim_relative_tolerance(0),
optim_time_limit(-1),
rng_seed(5489),
service_delay_tolerance(1e-5),
sim_ci_level(0.95),
sim_ci_rel_precision(0.04),
sim_max_num_replications(0),
sim_max_replication_duration(0),
verbosity(0)
{
}
bool help;
fgt::coalition_formation_category coalition_formation; ///< The strategy according which form coalitions
double coalition_formation_interval; ///< The time interval at which the coalition formation algorithm activates (in terms of simulated time)
fgt::coalition_value_division_category coalition_value_division;
bool find_all_best_partitions; ///< A \c true value means that all possible best partitions are computed
double optim_relative_tolerance; ///< The relative tolerance option to set to the optimizer
double optim_time_limit; ///< The time limit option to set to the optimizer
std::string output_stats_data_file; ///< The path to the output stats data file
std::string output_trace_data_file; ///< The path to the output trace data file
unsigned long rng_seed; ///< The seed used for random number generation
std::string scenario_file; ///< The path to the input scenario file
double service_delay_tolerance; ///< The relative tolerance to set in the service performance model
double sim_ci_level; ///< Level for confidence intervals
double sim_ci_rel_precision; ///< Relative precision for the half-width of the confidence intervals
std::size_t sim_max_num_replications; ///< Maximum number of replications (0 means 'unlimited')
double sim_max_replication_duration; ///< Length of each replication (in terms of simulated time)
int verbosity; ///< The verbosity level: 0 for 'minimum' and 9 for 'maximum' verbosity level
}; // cli_options_t
cli_options_t parse_cli_options(int argc, char* argv[])
{
std::string opt_str;
cli_options_t opt;
DCS_DEBUG_TRACE("Parse CLI options...");//XXX
opt.help = cli::simple::get_option(argv, argv+argc, "--help");
opt_str = cli::simple::get_option<std::string>(argv, argv+argc, "--formation", "nash");
if (opt_str == "nash")
{
opt.coalition_formation = fgt::nash_stable_coalition_formation;
}
else
{
DCS_EXCEPTION_THROW(std::invalid_argument, "Unknown coalition formation category");
}
opt.coalition_formation_interval = cli::simple::get_option<double>(argv, argv+argc, "--formation-interval", 0);
opt_str = cli::simple::get_option<std::string>(argv, argv+argc, "--payoff", "shapley");
if (opt_str == "shapley")
{
opt.coalition_value_division = fgt::shapley_coalition_value_division;
}
else
{
DCS_EXCEPTION_THROW(std::invalid_argument, "Unknown coalition value division category.");
}
opt.find_all_best_partitions = cli::simple::get_option(argv, argv+argc, "--find-all-parts");
opt.optim_relative_tolerance = cli::simple::get_option<double>(argv, argv+argc, "--optim-reltol", 0);
opt.optim_time_limit = cli::simple::get_option<double>(argv, argv+argc, "--optim-tilim", -1);
opt.output_stats_data_file = cli::simple::get_option<std::string>(argv, argv+argc, "--out-stats-file");
opt.output_trace_data_file = cli::simple::get_option<std::string>(argv, argv+argc, "--out-trace-file");
opt.rng_seed = cli::simple::get_option<unsigned long>(argv, argv+argc, "--rng-seed", 5489);
opt.scenario_file = cli::simple::get_option<std::string>(argv, argv+argc, "--scenario");
opt.service_delay_tolerance = cli::simple::get_option<double>(argv, argv+argc, "--service-delay-tol", 1e-5);
opt.sim_ci_level = cli::simple::get_option<double>(argv, argv+argc, "--ci-level", 0.95);
opt.sim_ci_rel_precision = cli::simple::get_option<double>(argv, argv+argc, "--ci-rel-precision", 0.04);
opt.sim_max_num_replications = cli::simple::get_option<std::size_t>(argv, argv+argc, "--sim-max-num-rep", 0);
opt.sim_max_replication_duration = cli::simple::get_option<double>(argv, argv+argc, "--sim-max-rep-len", 0);
opt.verbosity = cli::simple::get_option<short>(argv, argv+argc, "--verbosity", 0);
if (opt.verbosity < 0)
{
opt.verbosity = 0;
}
else if (opt.verbosity > 9)
{
opt.verbosity = 9;
}
// Check CLI options
if (opt.scenario_file.empty())
{
DCS_EXCEPTION_THROW( std::invalid_argument, "Scenario file not specified" );
}
return opt;
}
template <typename CharT, typename CharTraitsT>
std::basic_ostream<CharT,CharTraitsT>& operator<<(std::basic_ostream<CharT,CharTraitsT>& os, const cli_options_t& opts)
{
os << "help: " << opts.help
<< ", coalition-formation: " << opts.coalition_formation
<< ", coalition-formation-interval: " << opts.coalition_formation_interval
<< ", coalition-value-division: " << opts.coalition_value_division
<< ", optim-relative-tolerance: " << opts.optim_relative_tolerance
<< ", optim-time-limit: " << opts.optim_time_limit
<< ", output-stats-data-file: " << opts.output_stats_data_file
<< ", output-trace-data-file: " << opts.output_trace_data_file
<< ", random-generator-seed: " << opts.rng_seed
<< ", scenario-file: " << opts.scenario_file
<< ", sim-ci-level: " << opts.sim_ci_level
<< ", sim-ci-relative-precision: " << opts.sim_ci_rel_precision
<< ", sim-max-num-replications: " << opts.sim_max_num_replications
<< ", sim-max-replication-duration: " << opts.sim_max_replication_duration
<< ", service-delay-tolerance: " << opts.service_delay_tolerance
<< ", verbosity: " << opts.verbosity;
return os;
}
void usage(char const* progname)
{
std::cerr << "Usage: " << progname << " [options]" << std::endl
<< "Options:" << std::endl
<< "--help" << std::endl
<< " Show this message." << std::endl
<< "--service-delay-tol <num>" << std::endl
<< " Real number in [0,1] denoting the relative tolerance for the delay used in the service performance model." << std::endl
<< "--find-all-parts" << std::endl
<< " For each time interval, find all possible stable partitions." << std::endl
<< "--formation {'nash'}" << std::endl
<< " Coalition formation category, where:" << std::endl
<< " * 'nash' refers to the Nash-stable coalition formation;" << std::endl
<< "--formation-interval <num>" << std::endl
<< " Real number >= 0 denoting the activating time interval of the coalition formation algorithm." << std::endl
<< "--optim-reltol <num>" << std::endl
<< " Real number in [0,1] denoting the relative tolerance parameter in the optimizer." << std::endl
<< "--optim-tilim <num>" << std::endl
<< " Real positive number denoting the maximum number of seconds to wait for the termination of the optimizer." << std::endl
<< "--output-stats-file <file>" << std::endl
<< " The output file where writing statistics." << std::endl
<< "--output-trace-file <file>" << std::endl
<< " The output file where writing run-trace information." << std::endl
<< "--payoff {'shapley'}" << std::endl
<< " Payoff division category, where:" << std::endl
<< " * 'shapley' refers to the Shapley value." << std::endl
<< "--rng-seed <num>" << std::endl
<< " Set the seed to use for random number generation." << std::endl
<< "--scenario <file>" << std::endl
<< " The path to the file describing the scenario to use for the experiment." << std::endl
<< "--sim-ci-level <num>" << std::endl
<< " Level for the confidence intervals (must be a number in [0,1])." << std::endl
<< "--sim-ci-rel-precision <num>" << std::endl
<< " Relative precision for the half-width of the confidence intervals (must be a number in [0,1])." << std::endl
<< "--sim-max-rep-len <num>" << std::endl
<< " Real number >= 0 denoting the maximum duration of each independent replication." << std::endl
<< "--sim-max-num-rep <num>" << std::endl
<< " Integer number >= 0 denoting the maximum number of independent replications. Use 0 for an unlimited number of replications." << std::endl
<< "--verbosity <num>" << std::endl
<< " An integer number in [0,9] representing the verbosity level (0 for 'minimum verbosity' and 9 for 'maximum verbosity)." << std::endl
<< std::endl;
}
template <typename RealT, typename RNGT>
void run_experiment(const fgt::scenario_t<RealT>& scen, const fgt::options_t<RealT>& opts, RNGT& rng)
{
boost::timer timer;
std::cout << "- Scenario: " << scen << std::endl;
std::cout << "- Options: " << opts << std::endl;
fgt::experiment_t<RealT> exp;
exp.setup(scen, opts, rng);
exp.run();
}
}} // Namespace <unnamed>::detail
int main(int argc, char* argv[])
{
typedef double real_t;
try
{
detail::cli_options_t cli_opts;
cli_opts = detail::parse_cli_options(argc, argv);
if (cli_opts.help)
{
detail::usage(argv[0]);
return 0;
}
// Prepare the experiment
DCS_DEBUG_TRACE("Preparing the experiment...");//XXX
fgt::scenario_t<real_t> scenario;
scenario = fgt::make_scenario<real_t>(cli_opts.scenario_file);
DCS_DEBUG_TRACE("Scenario: " << scenario);
fgt::options_t<real_t> options;
options.optim_relative_tolerance = cli_opts.optim_relative_tolerance;
options.optim_time_limit = cli_opts.optim_time_limit;
options.coalition_formation = cli_opts.coalition_formation;
options.coalition_formation_interval = cli_opts.coalition_formation_interval;
options.coalition_value_division = cli_opts.coalition_value_division;
options.find_all_best_partitions = cli_opts.find_all_best_partitions;
options.output_stats_data_file = cli_opts.output_stats_data_file;
options.output_trace_data_file = cli_opts.output_trace_data_file;
options.service_delay_tolerance = cli_opts.service_delay_tolerance;
options.sim_ci_level = cli_opts.sim_ci_level;
options.sim_ci_rel_precision = cli_opts.sim_ci_rel_precision;
options.sim_max_num_replications = cli_opts.sim_max_num_replications;
options.sim_max_replication_duration = cli_opts.sim_max_replication_duration;
options.verbosity = cli_opts.verbosity;
//std::default_random_engine rng(cli_opts.rng_seed);
fgt::random_number_engine_t rng(cli_opts.rng_seed);
// Run the experiment
DCS_DEBUG_TRACE("Run the experiment...");//XXX
//FIXME
//options.coalition_formation_trigger_interval = 4*60; // Coalition formation every 4h
detail::run_experiment(scenario, options, rng);
}
catch (const std::invalid_argument& ia)
{
dcs::log_error(DCS_LOGGING_AT, ia.what());
detail::usage(argv[0]);
return 1;
}
catch (const std::exception& e)
{
dcs::log_error(DCS_LOGGING_AT, e.what());
return 1;
}
}
|
{"hexsha": "f897c82d8fd0b2adb994e08f3e245efbbc425ce1", "size": 13335, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/fog_coalform.cpp", "max_stars_repo_name": "sguazt/fog-gt", "max_stars_repo_head_hexsha": "92a01de4f3d71bf89741c7e4af1bebb965c64d28", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-03-26T19:03:40.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-26T19:03:40.000Z", "max_issues_repo_path": "src/fog_coalform.cpp", "max_issues_repo_name": "sguazt/fog-gt", "max_issues_repo_head_hexsha": "92a01de4f3d71bf89741c7e4af1bebb965c64d28", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/fog_coalform.cpp", "max_forks_repo_name": "sguazt/fog-gt", "max_forks_repo_head_hexsha": "92a01de4f3d71bf89741c7e4af1bebb965c64d28", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.1556291391, "max_line_length": 157, "alphanum_fraction": 0.6577427822, "num_tokens": 3284}
|
from __future__ import division, print_function, absolute_import
from math import sqrt, exp, cos, sin
import numpy as np
# Import testing parameters
try:
from scipy.optimize._tstutils import methods, mstrings, functions, fstrings
except ImportError:
pass
from scipy.optimize import newton # newton predates benchmarks
from .common import Benchmark
class Zeros(Benchmark):
params = [
fstrings,
mstrings
]
param_names = ['test function', 'solver']
def setup(self, func, meth):
self.a = .5
self.b = sqrt(3)
self.func = functions[fstrings.index(func)]
self.meth = methods[mstrings.index(meth)]
def time_zeros(self, func, meth):
self.meth(self.func, self.a, self.b)
class Newton(Benchmark):
params = [
['f1', 'f2'],
['newton', 'secant', 'halley']
]
param_names = ['test function', 'solver']
def setup(self, func, meth):
self.x0 = 3
self.f_1 = None
self.f_2 = None
if func == 'f1':
self.f = lambda x: x ** 2 - 2 * x - 1
if meth in ('newton', 'halley'):
self.f_1 = lambda x: 2 * x - 2
if meth == 'halley':
self.f_2 = lambda x: 2.0 + 0 * x
else:
self.f = lambda x: exp(x) - cos(x)
if meth in ('newton', 'halley'):
self.f_1 = lambda x: exp(x) + sin(x)
if meth == 'halley':
self.f_2 = lambda x: exp(x) + cos(x)
def time_newton(self, func, meth):
newton(self.f, self.x0, args=(), fprime=self.f_1, fprime2=self.f_2)
class NewtonArray(Benchmark):
params = [['loop', 'array'], ['newton', 'secant', 'halley']]
param_names = ['vectorization', 'solver']
def setup(self, vec, meth):
if vec == 'loop':
if meth == 'newton':
self.fvec = lambda f, x0, args, fprime, fprime2: [
newton(f, x, args=(a0, a1) + args[2:], fprime=fprime)
for (x, a0, a1) in zip(x0, args[0], args[1])
]
elif meth == 'halley':
self.fvec = lambda f, x0, args, fprime, fprime2: [
newton(
f, x, args=(a0, a1) + args[2:], fprime=fprime,
fprime2=fprime2
) for (x, a0, a1) in zip(x0, args[0], args[1])
]
else:
self.fvec = lambda f, x0, args, fprime, fprime2: [
newton(f, x, args=(a0, a1) + args[2:]) for (x, a0, a1)
in zip(x0, args[0], args[1])
]
else:
if meth == 'newton':
self.fvec = lambda f, x0, args, fprime, fprime2: newton(
f, x0, args=args, fprime=fprime
)
elif meth == 'halley':
self.fvec = newton
else:
self.fvec = lambda f, x0, args, fprime, fprime2: newton(
f, x0, args=args
)
def time_array_newton(self, vec, meth):
def f(x, *a):
b = a[0] + x * a[3]
return a[1] - a[2] * (np.exp(b / a[5]) - 1.0) - b / a[4] - x
def f_1(x, *a):
b = a[3] / a[5]
return -a[2] * np.exp(a[0] / a[5] + x * b) * b - a[3] / a[4] - 1
def f_2(x, *a):
b = a[3] / a[5]
return -a[2] * np.exp(a[0] / a[5] + x * b) * b ** 2
a0 = np.array([
5.32725221, 5.48673747, 5.49539973,
5.36387202, 4.80237316, 1.43764452,
5.23063958, 5.46094772, 5.50512718,
5.42046290
])
a1 = (np.sin(range(10)) + 1.0) * 7.0
args = (a0, a1, 1e-09, 0.004, 10, 0.27456)
x0 = [7.0] * 10
self.fvec(f, x0, args=args, fprime=f_1, fprime2=f_2)
|
{"hexsha": "37eb95890965155c5d77a89789201381794a0065", "size": 3843, "ext": "py", "lang": "Python", "max_stars_repo_path": "benchmarks/benchmarks/optimize_zeros.py", "max_stars_repo_name": "magnusja/scipy", "max_stars_repo_head_hexsha": "c4a5a1f984e28840010f20a7e41caa21b8f41979", "max_stars_repo_licenses": ["FSFAP"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-06-20T14:11:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-12T07:11:36.000Z", "max_issues_repo_path": "benchmarks/benchmarks/optimize_zeros.py", "max_issues_repo_name": "magnusja/scipy", "max_issues_repo_head_hexsha": "c4a5a1f984e28840010f20a7e41caa21b8f41979", "max_issues_repo_licenses": ["FSFAP"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2015-07-05T19:58:44.000Z", "max_issues_repo_issues_event_max_datetime": "2016-01-24T17:17:02.000Z", "max_forks_repo_path": "benchmarks/benchmarks/optimize_zeros.py", "max_forks_repo_name": "magnusja/scipy", "max_forks_repo_head_hexsha": "c4a5a1f984e28840010f20a7e41caa21b8f41979", "max_forks_repo_licenses": ["FSFAP"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-05-11T09:27:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-08T01:00:21.000Z", "avg_line_length": 31.7603305785, "max_line_length": 79, "alphanum_fraction": 0.4717668488, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1207}
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h>
#define COMPEARTH_PRIVATE_DET3X3 1
#define COMPEARTH_PRIVATE_CROSS3 1
#define COMPEARTH_PRIVATE_NORM3 1
#define COMPEARTH_PRIVATE_GEM3 1
#define COMPEARTH_PRIVATE_GEMT3 1
#include "compearth.h"
#ifdef COMPEARTH_USE_MKL
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wreserved-id-macro"
#pragma clang diagnostic ignored "-Wstrict-prototypes"
#endif
#include <mkl_lapacke.h>
//#include <mkl_cblas.h>
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#else
#include <lapacke.h>
//#include <cblas.h>
#endif
#define LWORK 15
/*!
* @brief Converts a nearly orthogonal matrix into a numerically orthogonal
* matrix.
*
* @param[in] n Number of bases.
* @param[in] itype Orthogonalization strategy. \n
* CE_ORTH_SVD orthgonalizes with the SVD. \n
* CE_ORTH_TAPE2012 orthogalizes with Tape 2012c
* Appendix E. \n
* CE_ORTH_QUAT orthogonalizes with quaternions.
* @param[in] Uin [3 x 3 x n] set of bases where each [3 x 3] basis is
* in column major order.
*
* @param[out] Uout [3 x 3 x n] set of re-orthgonalized bases where
* each [3 x 3] bais is in column major order.
* @param[out] dtUin If NULL then this will be ignored. \n
* Otherwise, this is an array of dimension [n] holding
* the determinants of the input basis.
* @param[out] dtUout If NULL then this will be ignored. \n
* Otherwise, this is an array of dimension [n] holding
* the determinants of the output basis.
*
* @result 0 indicates success.
*
* @author Carl Tape and converted to C by Ben Baker.
*
*/
int compearth_Uorth(const int n,
const enum ceOrthoType_enum itype,
const double *__restrict__ Uin,
double *__restrict__ Uout,
double *__restrict__ dtUin,
double *__restrict__ dtUout)
{
double Ut[9] __attribute__((aligned(64)));
double U[9] __attribute__((aligned(64)));
double Vt[9] __attribute__((aligned(64)));
double work[LWORK], s[3], p[3], det, normb, normp;
int i, ierr;
bool lwantDetIn;
const double tol = DBL_EPSILON*100.0;
// Check inputs
if (n < 1 || Uin == NULL || Uout == NULL)
{
if (n < 1){fprintf(stderr, "%s: No matrices\n", __func__);}
if (Uin == NULL){fprintf(stderr, "%s: Uin is NULL\n", __func__);}
if (Uout == NULL){fprintf(stderr, "%s: Uout is NULL\n", __func__);}
return -1;
}
lwantDetIn = false;
if (dtUin != NULL){lwantDetIn = true;}
// Orthogonalize basis with SVD
if (itype == CE_ORTH_SVD)
{
for (i=0; i<n; i++)
{
// This will frequently be called with matrices that may already
// be sufficiently orthonormal.
det = det3x3ColumnMajor(&Uin[9*i]);
if (fabs(det - 1.0) > 1.e-12)
{
// Compute SVD
memcpy(Ut, &Uin[9*i], 9*sizeof(double));
ierr = LAPACKE_dgesvd_work(LAPACK_COL_MAJOR, 'A', 'A',
3, 3, Ut, 3, s, U, 3,
Vt, 3, work, LWORK);
if (ierr != 0)
{
fprintf(stderr, "%s: Error computing Ut\n", __func__);
return -1;
}
// Compute U*V' - note Vt is already computed by SVD
gemm3_colMajorNoTransNoTrans(U, Vt, &Uout[9*i]);
// N.B. I could get the determinant here from the singular
// values but i'll wait
}
else
{
memcpy(&Uout[9*i], &Uin[9*i], 9*sizeof(double));
}
if (lwantDetIn){dtUin[i] = det;}
}
}
// Orthgonalize with suggestion in TapeTape2012c Appendix E
else if (itype == CE_ORTH_TAPE2012)
{
for (i=0; i<n; i++)
{
// This will frequently be called with matrices that may already
// be sufficiently orthonormal.
det = det3x3ColumnMajor(&Uin[9*i]);
if (fabs(det - 1.0) > tol)
{
cross3(&Uin[9*i], &Uin[9*i+3], p);
normb = norm3(&Uin[9*i+3]);
normp = norm3(p);
Uout[9*i] = Uin[9*i];
Uout[9*i+1] = Uin[9*i+1];
Uout[9*i+2] = Uin[9*i+2];
Uout[9*i+3] = Uin[9*i+3]/normb;
Uout[9*i+4] = Uin[9*i+4]/normb;
Uout[9*i+5] = Uin[9*i+5]/normb;
Uout[9*i+6] = Uin[9*i+6]/normp;
Uout[9*i+7] = Uin[9*i+7]/normp;
Uout[9*i+8] = Uin[9*i+8]/normp;
}
else
{
memcpy(&Uout[9*i], &Uin[9*i], 9*sizeof(double));
}
if (lwantDetIn){dtUin[i] = det;}
}
}
else if (itype == CE_ORTH_QUAT)
{
fprintf(stderr, "%s: Error quaternions not yet programmed\n",
__func__);
return -1;
}
else if (itype == CE_NO_ORTH)
{
memcpy(Uout, Uin, 9*(size_t) n*sizeof(double));
//cblas_dcopy(9*n, Uin, 1, Uout, 1);
}
else
{
fprintf(stderr, "%s: Only itype=1 is programmed\n", __func__);
return -1;
}
//if (dtUin != NULL)
//{
// for (i=0; i<n; i++)
// {
// dtUin[i] = det3x3ColumnMajor(&Uin[9*i]);
// }
//}
if (dtUout != NULL)
{
for (i=0; i<n; i++)
{
dtUout[i] = det3x3ColumnMajor(&Uout[9*i]);
}
}
return 0;
}
|
{"hexsha": "184e22a232b6a8a23ab9768b171b66a56cf9acf0", "size": 5879, "ext": "c", "lang": "C", "max_stars_repo_path": "c_src/Uorth.c", "max_stars_repo_name": "OUCyf/mtbeach", "max_stars_repo_head_hexsha": "188058083602cebf1471ea88939b07999c90b655", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9.0, "max_stars_repo_stars_event_min_datetime": "2021-03-13T01:18:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T23:55:36.000Z", "max_issues_repo_path": "c_src/Uorth.c", "max_issues_repo_name": "carltape/mtbeach", "max_issues_repo_head_hexsha": "188058083602cebf1471ea88939b07999c90b655", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2017-11-02T17:30:53.000Z", "max_issues_repo_issues_event_max_datetime": "2017-11-02T17:30:53.000Z", "max_forks_repo_path": "c_src/Uorth.c", "max_forks_repo_name": "carltape/mtbeach", "max_forks_repo_head_hexsha": "188058083602cebf1471ea88939b07999c90b655", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2021-07-08T00:13:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T13:42:40.000Z", "avg_line_length": 33.5942857143, "max_line_length": 76, "alphanum_fraction": 0.5101207688, "num_tokens": 1696}
|
import asyncio
import sys
import numpy as np
import pytest
import ucp
def _skip_if_not_supported(message_type):
if message_type == "am" and not ucp._libs.ucx_api.is_am_supported():
pytest.skip("AM only supported in UCX >= 1.11")
async def _shutdown_send(ep, message_type):
msg = np.arange(10 ** 6)
if message_type == "tag":
await ep.send(msg)
else:
await ep.am_send(msg)
async def _shutdown_recv(ep, message_type):
if message_type == "tag":
msg = np.empty(10 ** 6)
await ep.recv(msg)
else:
await ep.am_recv()
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_server_shutdown(message_type):
"""The server calls shutdown"""
endpoint_error_handling = ucp.get_ucx_version() >= (1, 10, 0)
_skip_if_not_supported(message_type)
async def server_node(ep):
with pytest.raises(ucp.exceptions.UCXCanceled):
await asyncio.gather(_shutdown_recv(ep, message_type), ep.close())
async def client_node(port):
ep = await ucp.create_endpoint(
ucp.get_address(), port, endpoint_error_handling=endpoint_error_handling
)
with pytest.raises(ucp.exceptions.UCXCanceled):
await _shutdown_recv(ep, message_type)
listener = ucp.create_listener(
server_node, endpoint_error_handling=endpoint_error_handling
)
await client_node(listener.port)
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="test currently fails for python3.6"
)
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_client_shutdown(message_type):
"""The client calls shutdown"""
endpoint_error_handling = ucp.get_ucx_version() >= (1, 10, 0)
_skip_if_not_supported(message_type)
async def client_node(port):
ep = await ucp.create_endpoint(
ucp.get_address(), port, endpoint_error_handling=endpoint_error_handling
)
with pytest.raises(ucp.exceptions.UCXCanceled):
await asyncio.gather(_shutdown_recv(ep, message_type), ep.close())
async def server_node(ep):
with pytest.raises(ucp.exceptions.UCXCanceled):
await _shutdown_recv(ep, message_type)
listener = ucp.create_listener(
server_node, endpoint_error_handling=endpoint_error_handling
)
await client_node(listener.port)
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_listener_close(message_type):
"""The server close the listener"""
endpoint_error_handling = ucp.get_ucx_version() >= (1, 10, 0)
_skip_if_not_supported(message_type)
async def client_node(listener):
ep = await ucp.create_endpoint(
ucp.get_address(),
listener.port,
endpoint_error_handling=endpoint_error_handling,
)
await _shutdown_recv(ep, message_type)
await _shutdown_recv(ep, message_type)
assert listener.closed() is False
listener.close()
assert listener.closed() is True
async def server_node(ep):
await _shutdown_send(ep, message_type)
await _shutdown_send(ep, message_type)
listener = ucp.create_listener(
server_node, endpoint_error_handling=endpoint_error_handling
)
await client_node(listener)
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_listener_del(message_type):
"""The client delete the listener"""
endpoint_error_handling = ucp.get_ucx_version() >= (1, 10, 0)
_skip_if_not_supported(message_type)
async def server_node(ep):
await _shutdown_send(ep, message_type)
await _shutdown_send(ep, message_type)
listener = ucp.create_listener(
server_node, endpoint_error_handling=endpoint_error_handling
)
ep = await ucp.create_endpoint(
ucp.get_address(),
listener.port,
endpoint_error_handling=endpoint_error_handling,
)
await _shutdown_recv(ep, message_type)
assert listener.closed() is False
del listener
await _shutdown_recv(ep, message_type)
@pytest.mark.asyncio
@pytest.mark.parametrize("message_type", ["tag", "am"])
async def test_close_after_n_recv(message_type):
"""The Endpoint.close_after_n_recv()"""
endpoint_error_handling = ucp.get_ucx_version() >= (1, 10, 0)
_skip_if_not_supported(message_type)
async def server_node(ep):
for _ in range(10):
await _shutdown_send(ep, message_type)
async def client_node(port):
ep = await ucp.create_endpoint(
ucp.get_address(), port, endpoint_error_handling=endpoint_error_handling
)
ep.close_after_n_recv(10)
for _ in range(10):
await _shutdown_recv(ep, message_type)
assert ep.closed()
ep = await ucp.create_endpoint(
ucp.get_address(), port, endpoint_error_handling=endpoint_error_handling
)
for _ in range(5):
await _shutdown_recv(ep, message_type)
ep.close_after_n_recv(5)
for _ in range(5):
await _shutdown_recv(ep, message_type)
assert ep.closed()
ep = await ucp.create_endpoint(
ucp.get_address(), port, endpoint_error_handling=endpoint_error_handling
)
for _ in range(5):
await _shutdown_recv(ep, message_type)
ep.close_after_n_recv(10, count_from_ep_creation=True)
for _ in range(5):
await _shutdown_recv(ep, message_type)
assert ep.closed()
ep = await ucp.create_endpoint(
ucp.get_address(), port, endpoint_error_handling=endpoint_error_handling
)
for _ in range(10):
await _shutdown_recv(ep, message_type)
with pytest.raises(
ucp.exceptions.UCXError, match="`n` cannot be less than current recv_count",
):
ep.close_after_n_recv(5, count_from_ep_creation=True)
ep.close_after_n_recv(1)
with pytest.raises(
ucp.exceptions.UCXError, match="close_after_n_recv has already been set to",
):
ep.close_after_n_recv(1)
listener = ucp.create_listener(
server_node, endpoint_error_handling=endpoint_error_handling
)
await client_node(listener.port)
|
{"hexsha": "acffff045cd59ca3aa1ac5136002a4eca0990cc3", "size": 6351, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_shutdown.py", "max_stars_repo_name": "pentschev/ucx-py", "max_stars_repo_head_hexsha": "d701a3facd85ef2deece619a4f707fdebee36e3c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 76, "max_stars_repo_stars_event_min_datetime": "2019-06-08T04:03:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-07T20:34:23.000Z", "max_issues_repo_path": "tests/test_shutdown.py", "max_issues_repo_name": "rapidsai/ucx-py", "max_issues_repo_head_hexsha": "e28d770aa0b47c0e63c2e7e61649f1b355560e8a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 644, "max_issues_repo_issues_event_min_datetime": "2019-06-04T23:06:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T11:17:45.000Z", "max_forks_repo_path": "tests/test_shutdown.py", "max_forks_repo_name": "pentschev/ucx-py", "max_forks_repo_head_hexsha": "d701a3facd85ef2deece619a4f707fdebee36e3c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 32, "max_forks_repo_forks_event_min_datetime": "2019-08-14T09:22:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-21T20:17:50.000Z", "avg_line_length": 31.4405940594, "max_line_length": 88, "alphanum_fraction": 0.6751692647, "include": true, "reason": "import numpy", "num_tokens": 1408}
|
module allel_module
use atom_module, only: zn_atom
use ggrid_module, only: GG
use ps_local_variables, only: vqlg
use io_tools_module
implicit none
private
public :: init_allel
public :: init_ae_local_allel
logical,public :: flag_allel=.false.
real(8) :: Vcell
contains
subroutine init_allel( Zps, Vcell_in )
implicit none
real(8),intent(inout) :: Zps(:)
real(8),intent(in) ::Vcell_in
call IOTools_findKeyword( "ALLEL", flag_allel, flag_bcast=.true. )
if ( flag_allel ) then
Zps=zn_atom
Vcell=Vcell_in
end if
end subroutine init_allel
subroutine init_ae_local_allel
implicit none
integer :: Nelm, ig, NMGL, ik
real(8) :: pi, const, G2
NMGL=size( GG )
Nelm=size( zn_atom )
allocate( vqlg(NMGL,Nelm) ); vqlg=0.0d0
pi=acos(-1.0d0)
do ik=1,Nelm
const=-4.0d0*pi/Vcell*Zn_atom(ik)
do ig=1,NMGL
G2=GG(ig)
if ( G2 == 0.0d0 ) cycle
vqlg(ig,ik)=vqlg(ig,ik)+const/G2
end do
end do
end subroutine init_ae_local_allel
end module allel_module
|
{"hexsha": "fd60f2a5133c8895d607c28eccba6d591fc4092f", "size": 1090, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/allel_module.f90", "max_stars_repo_name": "j-iwata/RSDFT_DEVELOP", "max_stars_repo_head_hexsha": "14e79a4d78a19e5e5c6fd7b3d2f2f0986f2ff6df", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-02T05:03:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-02T05:03:05.000Z", "max_issues_repo_path": "src/allel_module.f90", "max_issues_repo_name": "j-iwata/RSDFT_DEVELOP", "max_issues_repo_head_hexsha": "14e79a4d78a19e5e5c6fd7b3d2f2f0986f2ff6df", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/allel_module.f90", "max_forks_repo_name": "j-iwata/RSDFT_DEVELOP", "max_forks_repo_head_hexsha": "14e79a4d78a19e5e5c6fd7b3d2f2f0986f2ff6df", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-22T02:44:58.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-22T02:44:58.000Z", "avg_line_length": 21.8, "max_line_length": 70, "alphanum_fraction": 0.6458715596, "num_tokens": 380}
|
\documentclass{article}
\usepackage{tabularx}
\usepackage{booktabs}
\title{Reflection Report on [Title of Project]}
\author{author name}
\date{}
\input{../Comments}
\begin{document}
\begin{table}[hp]
\caption{Revision History} \label{TblRevisionHistory}
\begin{tabularx}{\textwidth}{llX}
\toprule
\textbf{Date} & \textbf{Developer(s)} & \textbf{Change}\\
\midrule
Date1 & Name(s) & Description of changes\\
Date2 & Name(s) & Description of changes\\
... & ... & ...\\
\bottomrule
\end{tabularx}
\end{table}
\newpage
\maketitle
Put your introductory blurb here.
\section{Project Overview}
\plt{Summarize the original project goals and requirements}
\section{Key Accomplishments}
\plt{What went well? This can be what went well with the documentation, the
coding, the project management, etc.}
\section{Key Problem Areas}
\plt{What went wrong? This can be what went wrong with the documentation, the
technology, the coding, time management, etc.}
\section{What Would you Do Differently Next Time}
\end{document}
|
{"hexsha": "85d2ea582b8ef4394069bbee22b4b178dd9f0339", "size": 1033, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "docs/Reflection/Reflection.tex", "max_stars_repo_name": "Ao99/MISEG", "max_stars_repo_head_hexsha": "7e07da67e34c460de33fce555e93acb8e795d80b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/Reflection/Reflection.tex", "max_issues_repo_name": "Ao99/MISEG", "max_issues_repo_head_hexsha": "7e07da67e34c460de33fce555e93acb8e795d80b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 36, "max_issues_repo_issues_event_min_datetime": "2019-11-09T20:59:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-02T14:36:25.000Z", "max_forks_repo_path": "docs/Reflection/Reflection.tex", "max_forks_repo_name": "caobo1994/FourierSeries", "max_forks_repo_head_hexsha": "e6b3cab9409aaaa8071adc82276dc22d82c0575c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.2549019608, "max_line_length": 78, "alphanum_fraction": 0.7366892546, "num_tokens": 294}
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib
from matplotlib import animation, rc
colors = dict()
colors[0] = [0.2, 0.2, 0.2]
colors[1] = [0, 0, 1]
colors[2] = [0, 1, 0]
colors[3] = [1, 1, 0]
colors[4] = [0, 1, 1]
colors[5] = [1, 0, 1]
colors[6] = [0, 0.5, 0]
colors[7] = [0, 0, 0.5]
colors[8] = [0.5, 0.5, 0]
colors[9] = [0, 0.5, 0.5]
colors[10] = [0.5, 0, 0.5]
colors[11] = [0.5, 1, 0]
colors[12] = [0, 0.5, 1]
colors[13] = [1, 1, 0.5]
colors[14] = [0.5, 1, 1]
colors[15] = [1, 0.5, 1]
def visualize_PP(obj_shapes, edges, traj_real, traj_pred, filename):
fig, ax = plt.subplots(1, 2, figsize=(14, 6))
plt.close()
titles = ['Real', 'Predicted']
for ax_ind in range(2):
ax[ax_ind].set_xlim((-1.5, 0.3))
ax[ax_ind].set_ylim((-0.9, 0.9))
ax[ax_ind].set_title(titles[ax_ind])
obj_list = dict()
obj_list[0] = []
obj_list[1] = []
edge_list = dict()
edge_list[0] = []
edge_list[1] = []
for ax_ind in range(2):
for obj_index, obj in enumerate(obj_shapes):
c = colors[1+obj_index]
if obj[1] == 0:
circle = patches.Circle((0, 0), obj[-1]/2, color=c, ec='black')
obj_txt = ax[ax_ind].text(
0, 0, str(obj_index), color='black', fontsize=12, weight='bold')
obj_list[ax_ind].append((0, circle, obj_txt))
else:
patch_mini_list = list()
for k in [-1, 1]:
for l in [-1, 1]:
patch = patches.Rectangle(
(0, 0), k*obj[-2]/2, l*obj[-1]/2, ec='black', fc=c)
patch_mini_list.append(patch)
obj_txt = ax[ax_ind].text(
0, 0, str(obj_index), color='black', fontsize=12, weight='bold')
obj_list[ax_ind].append((1, patch_mini_list, obj_txt))
edg = edges
cnt = 0
for i in range(obj_shapes.shape[0]):
for j in range(obj_shapes.shape[0]):
if i != j:
if edg[cnt, 1] == 1:
c = 'black'
elif edg[cnt, 2] == 1:
c = 'purple'
elif edg[cnt, 3] == 1:
c = 'brown'
else:
cnt = cnt+1
continue
cnt = cnt+1
edge, = ax[ax_ind].plot([], [], color=c, lw=3)
edge_list[ax_ind].append((edge, (i, j)))
time_txt = ax[0].text(-1.4, 0.8, str(0), color='black',
fontsize=12, weight='bold')
def init():
for ax_ind in range(2):
for obj in obj_list[ax_ind]:
if obj[0] == 0:
ax[ax_ind].add_patch(obj[1])
else:
for patch in obj[1]:
ax[ax_ind].add_patch(patch)
return []
def animate(i):
for ax_ind in range(2):
if ax_ind == 0:
positions = traj_pred[i]
else:
positions = traj_real[i]
for obj_index, obj in enumerate(obj_list[ax_ind]):
if obj[0] == 0:
obj[1].center = (positions[obj_index, 0],
positions[obj_index, 1])
else:
for patch in obj[1]:
patch.xy = (positions[obj_index, 0],
positions[obj_index, 1])
patch.angle = np.rad2deg(positions[obj_index, 2])
for edge in edge_list[ax_ind]:
edge_line = edge[0]
edge_info = edge[1]
obj1 = edge_info[0]
obj2 = edge_info[1]
xx = [positions[obj1, 0], positions[obj2, 0]]
yy = [positions[obj1, 1], positions[obj2, 1]]
edge_line.set_data(xx, yy)
for obj_index, obj in enumerate(obj_list[ax_ind]):
obj[2].set_position((positions[obj_index, 0],
positions[obj_index, 1]))
time_txt.set_text(str(i))
return []
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=int(traj_real.shape[0]),
interval=50, blit=True)
anim.save(filename+'.gif', writer='imagemagick', fps=30)
return anim
def visualize_BR(obj_shapes, predicted_edges, traj_real, filename):
fig, ax = plt.subplots(1, 1, figsize=(6, 6), dpi=100)
plt.close()
titles = ['Belief Regulation']
ax.set_xlim((-1.5, 0.3))
ax.set_ylim((-0.9, 0.9))
ax.set_title(titles)
obj_list = []
edge_list = []
for obj_index, obj in enumerate(obj_shapes):
c = colors[1+obj_index]
if obj[1] == 0:
circle = patches.Circle((0, 0), obj[-1]/2, color=c, ec='black')
obj_txt = ax.text(
0, 0, str(obj_index), color='red', fontsize=12, weight='bold')
obj_list.append((0, circle, obj_txt))
else:
patch_mini_list = list()
for k in [-1, 1]:
for l in [-1, 1]:
patch = patches.Rectangle(
(0, 0), k*obj[-2]/2, l*obj[-1]/2, ec='black', fc=c)
patch_mini_list.append(patch)
obj_txt = ax.text(0, 0, str(obj_index),
color='red', fontsize=12, weight='bold')
obj_list.append((1, patch_mini_list, obj_txt))
edg = predicted_edges[0]
cnt = 0
for i in range(obj_shapes.shape[0]):
for j in range(obj_shapes.shape[0]):
if i != j:
max_edge = np.argmax(edg[cnt])
alp = 1 # edg[cnt, max_edge]
if max_edge == 0:
alp = 0
c = 'white'
elif max_edge == 1:
c = 'black'
elif max_edge == 2:
c = 'purple'
elif max_edge == 3:
c = 'brown'
cnt = cnt+1
edge, = ax.plot([], [], color=c, alpha=alp, lw=3)
edge_list.append((edge, (i, j)))
time_txt = ax.text(-1.4, 0.8, str(0), color='black',
fontsize=12, weight='bold')
def init():
for obj in obj_list:
if obj[0] == 0:
ax.add_patch(obj[1])
else:
for patch in obj[1]:
ax.add_patch(patch)
return []
def animate(i):
positions = traj_real[i]
for obj_index, obj in enumerate(obj_list):
if obj[0] == 0:
obj[1].center = (positions[obj_index, 0],
positions[obj_index, 1])
else:
for patch in obj[1]:
patch.xy = (positions[obj_index, 0],
positions[obj_index, 1])
patch.angle = np.rad2deg(positions[obj_index, 2])
for edge_ind, edge in enumerate(edge_list):
edge_line = edge[0]
edge_info = edge[1]
obj1 = edge_info[0]
obj2 = edge_info[1]
xx = [positions[obj1, 0], positions[obj2, 0]]
yy = [positions[obj1, 1], positions[obj2, 1]]
edg = predicted_edges[i, edge_ind]
max_edge = np.argmax(edg)
alp = 1 # edg[max_edge]
if max_edge == 0:
alp = 0
c = 'white'
elif max_edge == 1:
c = 'black'
elif max_edge == 2:
c = 'purple'
elif max_edge == 3:
c = 'brown'
edge_line.set_color(c)
edge_line.set_alpha(alp)
edge_line.set_data(xx, yy)
for obj_index, obj in enumerate(obj_list):
obj[2].set_position((positions[obj_index, 0]-0.025,
positions[obj_index, 1]-0.025))
time_txt.set_text(str(i))
return []
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=int(traj_real.shape[0]),
interval=50, blit=True)
anim.save(filename+'.gif', writer='imagemagick', fps=15)
return anim
|
{"hexsha": "36d0a3c1456022fb1883057534debde61986a243", "size": 8444, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/articulated_multi_object/visualize.py", "max_stars_repo_name": "Fzaero/Object-and-Relation-Centric-Representations-for-Push-Effect-Prediction", "max_stars_repo_head_hexsha": "8f5e120983fe2e2681ee03e0393822d3ce8706b7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-20T07:53:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-20T07:53:31.000Z", "max_issues_repo_path": "src/articulated_multi_object/visualize.py", "max_issues_repo_name": "Fzaero/Object-and-Relation-Centric-Representations-for-Push-Effect-Prediction", "max_issues_repo_head_hexsha": "8f5e120983fe2e2681ee03e0393822d3ce8706b7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/articulated_multi_object/visualize.py", "max_forks_repo_name": "Fzaero/Object-and-Relation-Centric-Representations-for-Push-Effect-Prediction", "max_forks_repo_head_hexsha": "8f5e120983fe2e2681ee03e0393822d3ce8706b7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-20T07:53:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-20T07:53:32.000Z", "avg_line_length": 36.5541125541, "max_line_length": 84, "alphanum_fraction": 0.4580767409, "include": true, "reason": "import numpy", "num_tokens": 2234}
|
import torch
import numpy as np
import time
from sklearn.feature_extraction import image
from tqdm import tqdm
from glob import glob
from sklearn.cluster import MeanShift
from matplotlib import pyplot as plt
from IPython.display import clear_output
def normalize(img, mean, std):
"""Normalize an array of images with mean and standard deviation.
Parameters
----------
img: array
An array of images.
mean: float
Mean of img array.
std: float
Standard deviation of img array.
"""
return (img - mean)/std
def denormalize(img, mean, std):
"""Denormalize an array of images with mean and standard deviation.
Parameters
----------
img: array
An array of images.
mean: float
Mean of img array.
std: float
Standard deviation of img array.
"""
return (img * std) + mean
def convertToFloat32(train_images,val_images):
"""Converts the data to float 32 bit type.
Parameters
----------
train_images: array
Training data.
val_images: array
Validation data.
"""
x_train = train_images.astype('float32')
x_val = val_images.astype('float32')
return x_train, x_val
def getMeanStdData(train_images,val_images):
"""Compute mean and standrad deviation of data.
Parameters
----------
train_images: array
Training data.
val_images: array
Validation data.
"""
x_train_ = train_images.astype('float32')
x_val_ = val_images.astype('float32')
data = np.concatenate((x_train_,x_val_), axis=0)
mean, std = np.mean(data), np.std(data)
return mean, std
def convertNumpyToTensor(numpy_array):
"""Convert numpy array to PyTorch tensor.
Parameters
----------
numpy_array: numpy array
Numpy array.
"""
return torch.from_numpy(numpy_array)
def extract_patches(x,patch_size,num_patches):
"""Deterministically extract patches from array of images.
Parameters
----------
x: numpy array
Array of images.
patch_size: int
Size of patches to be extracted from each image.
num_patches: int
Number of patches to be extracted from each image.
"""
patches = np.zeros(shape=(x.shape[0]*num_patches,patch_size,patch_size))
for i in tqdm(range(x.shape[0])):
patches[i*num_patches:(i+1)*num_patches] = image.extract_patches_2d(x[i],(patch_size,patch_size), num_patches,
random_state=i)
return patches
def augment_data(X_train):
"""Augment data by 8-fold with 90 degree rotations and flips.
Parameters
----------
X_train: numpy array
Array of training images.
"""
X_ = X_train.copy()
X_train_aug = np.concatenate((X_train, np.rot90(X_, 1, (1, 2))))
X_train_aug = np.concatenate((X_train_aug, np.rot90(X_, 2, (1, 2))))
X_train_aug = np.concatenate((X_train_aug, np.rot90(X_, 3, (1, 2))))
X_train_aug = np.concatenate((X_train_aug, np.flip(X_train_aug, axis=1)))
print('Raw image size after augmentation', X_train_aug.shape)
return X_train_aug
def loadImages(path):
"""Load images from a given directory.
Parameters
----------
path: String
Path of directory from where to load images from.
"""
files = sorted(glob(path))
data=[]
print(path)
for f in files:
if '.png' in f:
im_b = np.array(io.imread(f))
if '.npy' in f:
im_b = np.load(f)
data.append(im_b)
data = np.array(data).astype(np.float32)
return data
def getSamples(vae, size=20,zSize=64, mu=None, logvar=None, samples=1, tq=False):
"""Generate synthetic samples from DivNoising network.
Parameters
----------
vae: VAE Object
DivNoising model.
size: int
Size of generated image in the bottleneck.
zSize: int
Dimension of latent space for each pixel in bottleneck.
mu: PyTorch tensor
latent space mean tensor.
logvar: PyTorch tensor
latent space log variance tensor.
samples: int
Number of synthetic samples to generate.
tq: boolean
If tqdm should be active or not to indicate progress.
"""
if mu is None:
mu=torch.zeros(1,zSize,size,size).cuda()
if logvar is None:
logvar=torch.zeros(1,zSize,size,size).cuda()
results=[]
for i in tqdm(range(samples),disable= not tq):
z = vae.reparameterize(mu, logvar)
recon = vae.decode(z)
recon_cpu = recon.cpu()
recon_numpy = recon_cpu.detach().numpy()
recon_numpy.shape=(recon_numpy.shape[-2],recon_numpy.shape[-1])
results.append(recon_numpy)
return np.array(results)
def interpolate(vae, z_start, z_end, steps, display, vmin=0,vmax=255,):
results=[]
for i in range(steps):
alpha=(i/(steps-1.0))
z=z_end*alpha + z_start*(1.0-alpha)
recon = vae.decode(z)
recon_cpu = recon.cpu()
recon_numpy = recon_cpu.detach().numpy()
recon_numpy.shape=(recon_numpy.shape[-2],recon_numpy.shape[-1])
if display:
clear_output(wait=True)
plt.imshow(recon_numpy,vmin=vmin, vmax=vmax)
plt.show()
time.sleep(0.4)
results.append(recon_numpy)
return results
def tiledMode(im, ps, overlap, display=True, vmin=0,vmax=255,
initBW=200, minBW=100, reduce=0.9):
means=np.zeros(im.shape[1:])
xmin=0
ymin=0
xmax=ps
ymax=ps
ovLeft=0
while (xmin<im.shape[2]):
ovTop=0
while (ymin<im.shape[1]):
inputPatch=im[:,ymin:ymax,xmin:xmax]
a = findMode(inputPatch,
initBW, minBW, reduce)
a=a[:a.shape[0], :a.shape[1]]
means[ymin:ymax,xmin:xmax][ovTop:,ovLeft:] = a[ovTop:,ovLeft:]
ymin=ymin-overlap+ps
ymax=ymin+ps
ovTop=overlap//2
ymin=0
ymax=ps
xmin=xmin-overlap+ps
xmax=xmin+ps
ovLeft=overlap//2
if display:
plt.imshow(means,vmin=vmin, vmax=vmax)
plt.show()
clear_output(wait=True)
return means
def findClosest(samples, q):
"""Find closest sample to a given sample.
Parameters
----------
samples: array
Array of samples from which the closest image needs to be found.
q: image(array)
Image to which the closest image needs to be found.
"""
dif=np.mean(np.mean((samples-q)**2, -1),-1)
return samples[np.argmin(dif)]
def findMode(samples, initBW=200, minBW=100, reduce=0.9):
"""Find the modes of a distribution of images.
Parameters
----------
samples: array
Array of samples from which the modes need to be found.
initBW: int
Initial bandwidth.
minBW: int
Minimum bandwidth.
reduce: float
Factor by which to reduce bandwith i n iterations.
"""
imagesC=samples.copy()
imagesC.shape=(samples.shape[0],samples.shape[1]*samples.shape[2])
seed=np.mean(imagesC,axis=0)[np.newaxis,...]
bw=initBW
for i in range(15):
clustering = MeanShift(bandwidth=bw, seeds=seed, cluster_all=True).fit(imagesC)
centers=clustering.cluster_centers_.copy()
seed=centers
bw=bw*reduce
if bw < minBW:
break
result=seed[0]
result.shape=(samples.shape[1],samples.shape[2])
return result
def plotProbabilityDistribution(signalBinIndex, histogram, gaussianMixtureNoiseModel, min_signal, max_signal, n_bin, device):
"""Plots probability distribution P(x|s) for a certain ground truth signal.
Predictions from both Histogram and GMM-based Noise models are displayed for comparison.
Parameters
----------
signalBinIndex: int
index of signal bin. Values go from 0 to number of bins (`n_bin`).
histogram: numpy array
A square numpy array of size `nbin` times `n_bin`.
gaussianMixtureNoiseModel: GaussianMixtureNoiseModel
Object containing trained parameters.
min_signal: float
Lowest pixel intensity present in the actual sample which needs to be denoised.
max_signal: float
Highest pixel intensity present in the actual sample which needs to be denoised.
n_bin: int
Number of Bins.
device: GPU device
"""
histBinSize=(max_signal-min_signal)/n_bin
querySignal_numpy= (signalBinIndex/float(n_bin)*(max_signal-min_signal)+min_signal)
querySignal_numpy +=histBinSize/2
querySignal_torch = torch.from_numpy(np.array(querySignal_numpy)).float().to(device)
queryObservations_numpy=np.arange(min_signal, max_signal, histBinSize)
queryObservations_numpy+=histBinSize/2
queryObservations = torch.from_numpy(queryObservations_numpy).float().to(device)
pTorch=gaussianMixtureNoiseModel.likelihood(queryObservations, querySignal_torch)
pNumpy=pTorch.cpu().detach().numpy()
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.xlabel('Observation Bin')
plt.ylabel('Signal Bin')
plt.imshow(histogram**0.25, cmap='gray')
plt.axhline(y=signalBinIndex+0.5, linewidth=5, color='blue', alpha=0.5)
plt.subplot(1, 2, 2)
plt.plot(queryObservations_numpy, histogram[signalBinIndex, :]/histBinSize, label='GT Hist: bin ='+str(signalBinIndex), color='blue', linewidth=2)
plt.plot(queryObservations_numpy, pNumpy, label='GMM : '+' signal = '+str(np.round(querySignal_numpy,2)), color='red',linewidth=2)
plt.xlabel('Observations (x) for signal s = ' + str(querySignal_numpy))
plt.ylabel('Probability Density')
plt.title("Probability Distribution P(x|s) at signal =" + str(querySignal_numpy))
plt.legend()
def predictMMSE(vae, img, samples, returnSamples=False, tq=True):
'''
Predicts MMSE estimate.
Parameters
----------
vae: VAE object
DivNoising model.
img: array
Image for which denoised MMSE estimate needs to be computed.
samples: int
Number of samples to average for computing MMSE estimate.
returnSamples:
Should the method also return the individual samples?
tq: boolean
If tqdm should be active or not to indicate progress.
'''
img_height,img_width=img.shape[0],img.shape[1]
img_t = torch.Tensor(img)
image_sample = img_t.view(1,1,img.shape[0], img.shape[1]).cuda()
mu, logvar = vae.encode(image_sample)
akku=np.zeros((img_height,img_width))
if returnSamples:
retSamp=[]
for i in tqdm(range(samples), disable= not tq):
z = vae.reparameterize(mu, logvar)
#z=mu
recon = vae.decode(z)
recon_cpu = recon.cpu()
recon_numpy = recon_cpu.detach().numpy()
recon_numpy.shape=(img_height,img_width)
akku+=recon_numpy
if returnSamples:
retSamp.append(recon_numpy)
akku=akku/float(samples)
if returnSamples:
return akku, np.array(retSamp)
else:
return akku
def PSNR(gt, img):
'''
Compute PSNR.
Parameters
----------
gt: array
Ground truth image.
img: array
Predicted image.''
'''
mse = np.mean(np.square(gt - img))
return 20 * np.log10(np.max(gt)-np.min(gt)) - 10 * np.log10(mse)
|
{"hexsha": "9642a4a85a22c8fd69bb43b5e0911e87cb8671a9", "size": 11549, "ext": "py", "lang": "Python", "max_stars_repo_path": "divnoising/utils.py", "max_stars_repo_name": "mangalp/DivNoising", "max_stars_repo_head_hexsha": "19336a1dd0878526de119100dd33b26ec5dab2c4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "divnoising/utils.py", "max_issues_repo_name": "mangalp/DivNoising", "max_issues_repo_head_hexsha": "19336a1dd0878526de119100dd33b26ec5dab2c4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "divnoising/utils.py", "max_forks_repo_name": "mangalp/DivNoising", "max_forks_repo_head_hexsha": "19336a1dd0878526de119100dd33b26ec5dab2c4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.350140056, "max_line_length": 150, "alphanum_fraction": 0.6232574249, "include": true, "reason": "import numpy", "num_tokens": 2875}
|
"""Module using IndRNNCell to solve the addition problem
The addition problem is stated in https://arxiv.org/abs/1803.04831. The
hyper-parameters are taken from that paper as well. The network should converge
to a MSE around zero after 1500-20000 steps, depending on the number of time
steps.
"""
import tensorflow as tf
import numpy as np
import sys
from ind_rnn_cell import IndRNNCell
from ind_rnn_cell import MultiRNNCell
# Parameters taken from https://arxiv.org/abs/1803.04831
TIME_STEPS = 50
NUM_UNITS = 2000
LEARNING_RATE_INIT = 0.0002
LEARNING_RATE_DECAY_STEPS = 600000
NUM_LAYERS = 6
RECURRENT_MAX = pow(2, 1 / TIME_STEPS)
# Parameters taken from https://arxiv.org/abs/1511.06464
BATCH_SIZE = 128
NUM_EPOCHS = 10000
class IndRNNConfig(object):
"""Large config."""
init_scale = 0.04
learning_rate = 0.0002
max_grad_norm = 10
num_layers = 2
num_steps = 35
hidden_size = 200
max_epoch = 14
max_max_epoch = 55
keep_prob = 0.75
lr_decay = 0.1
batch_size = 128
vocab_size = 10000
#rnn_mode = BLOCK
from ptb_word_lm import *
from ptb_reader import *
ptb, vocab_size = load_ptb("/home/ziclin/data")
ITERATIONS_PER_EPOCH = int(ptb.train.num_examples / BATCH_SIZE)
VAL_ITERS = int(ptb.valid.num_examples / BATCH_SIZE)
TEST_ITERS = int(ptb.test.num_examples / BATCH_SIZE)
print (">>>>")
print ("train_iter: %d, valid_iter: %d, test_iter: %d" % (ITERATIONS_PER_EPOCH, VAL_ITERS, TEST_ITERS))
def main():
inputs_ph = tf.placeholder(tf.int64, shape=(None, None))
labels_ph = tf.placeholder(tf.int64, shape=(None, None))
embedding = tf.get_variable("embedding", [vocab_size, NUM_UNITS], dtype=tf.float32)
inputs = tf.nn.embedding_lookup(embedding, inputs_ph)
in_training = True
#if in_training:
# inputs = tf.nn.dropout(inputs, 0.75)
cell = MultiRNNCell([
IndRNNCell(NUM_UNITS, recurrent_max_abs=RECURRENT_MAX, batch_norm=False, in_training=in_training) for _ in
range(NUM_LAYERS)
])
# cell = tf.nn.rnn_cell.BasicLSTMCell(NUM_UNITS) #uncomment this for LSTM runs
output, state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
softmax_w = tf.get_variable("softmax_w", [NUM_UNITS, vocab_size], dtype=tf.float32)
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=tf.float32)
output = tf.reshape(output, [-1, NUM_UNITS])
logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
print (logits)
# Reshape logits to be a 3-D tensor for sequence loss
logits = tf.reshape(logits, [BATCH_SIZE, -1, vocab_size])
# Use the contrib sequence loss and average over the batches
loss = tf.contrib.seq2seq.sequence_loss(
logits,
labels_ph,
tf.ones([BATCH_SIZE, 50], dtype=tf.float32),
average_across_timesteps=False,
average_across_batch=True)
# Update the cost
_cost = tf.reduce_sum(loss)
_final_state = state
#########
if not in_training:
return
global_step = tf.get_variable("global_step", shape=[], trainable=False,
initializer=tf.zeros_initializer)
learning_rate = tf.train.exponential_decay(LEARNING_RATE_INIT, global_step,
LEARNING_RATE_DECAY_STEPS, 0.1,
staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
optimize = optimizer.minimize(_cost, global_step=global_step)
# Train the model
fout = open('ptb_ind.txt', 'w')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(NUM_EPOCHS):
train_per = []
for iteration in range(ITERATIONS_PER_EPOCH):
x, y = ptb.train.next_batch(BATCH_SIZE)
cost, _ = sess.run([_cost, optimize], feed_dict={inputs_ph: x, labels_ph: y})
train_per.append(cost)
if iteration % ITERATIONS_PER_EPOCH == 20:
print ("%d/%d %f" % (iteration, ITERATIONS_PER_EPOCH, np.mean(train_per[-20:])))
sys.stdout.flush()
valid_per = []
for _ in range(VAL_ITERS):
x, y = ptb.valid.next_batch()
cost = sess.run(_cost, feed_dict={inputs_ph: x, labels_ph: y})
valid_per.append(cost)
#test_per = []
#for _ in range(VAL_ITERS):
# x, y = ptb.test.next_batch()
# cost = sess.run(_cost, feed_dict={inputs_ph: x, labels_ph: y})
# test_per.append(cost)
print ("epoch %d, train=%f, valid=%f, test=%f" % (epoch, np.mean(train_per), np.mean(valid_per), np.mean(test_per)))
fout.write("%d %.4f %.4f %.4f\n" % (epoch, np.mean(train_per), np.mean(valid_per), np.mean(test_per)))
sys.stdout.flush()
fout.flush()
if __name__ == "__main__":
main()
|
{"hexsha": "762c3c3ce0e0880c42263b8f7e57b0120d511e45", "size": 4635, "ext": "py", "lang": "Python", "max_stars_repo_path": "ptb/ptb_rnn.py", "max_stars_repo_name": "narutowang/indrnn", "max_stars_repo_head_hexsha": "434e1200b5e742a0eac92bed661c69e97b8b8711", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ptb/ptb_rnn.py", "max_issues_repo_name": "narutowang/indrnn", "max_issues_repo_head_hexsha": "434e1200b5e742a0eac92bed661c69e97b8b8711", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ptb/ptb_rnn.py", "max_forks_repo_name": "narutowang/indrnn", "max_forks_repo_head_hexsha": "434e1200b5e742a0eac92bed661c69e97b8b8711", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3333333333, "max_line_length": 122, "alphanum_fraction": 0.6841423948, "include": true, "reason": "import numpy", "num_tokens": 1288}
|
import csv
from typing import Dict, List
import numpy as np
from .features import Features
CACHED_DATA: Dict[str, List[int]] = {}
class TextualFeatures(Features):
def __init__(self):
super().__init__()
self.emotional_words_count = np.zeros(2)
self.emoticon_count = np.zeros(3)
self.pronoun_count = np.zeros(3)
self.punctuation_count = np.zeros(3)
# biology, body, health, death, society, family, friends, money, work and leisure
self.topic_related_words_count = np.zeros(10)
def extract_features(self, user, cache_file):
if cache_file is not None:
try:
val = CACHED_DATA[user.id]
except KeyError:
with open(cache_file, "r") as f:
table = csv.reader(f)
for idx, row in enumerate(table):
if idx == 0:
continue
CACHED_DATA[row[0]] = [float(row[i]) for i in range(1, 103)]
val = CACHED_DATA[user.id]
num_words = val[91]
self.emotional_words_count = np.array(val[38:40]) * num_words
self.emoticon_count = np.array(user.emoticon_count)
self.pronoun_count = np.array([val[3] + val[4], val[5], val[6] + val[7]]) * num_words
self.punctuation_count = np.array(val[84:86] + [val[80] / 3]) * num_words
self.topic_related_words_count = np.array(val[56:59] + val[71:72] + val[33:36] + val[69:70] + val[65:66] + val[67:68]) * num_words
|
{"hexsha": "64a151875a8576d9e53d4c7660ffdfef01192cc1", "size": 1559, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/features/textual_features.py", "max_stars_repo_name": "icycookies/dd_benchmark", "max_stars_repo_head_hexsha": "5551c0654d3dc30d72b817096d0877a02f28f116", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-08-01T13:02:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-01T14:39:44.000Z", "max_issues_repo_path": "src/features/textual_features.py", "max_issues_repo_name": "icycookies/dd_benchmark", "max_issues_repo_head_hexsha": "5551c0654d3dc30d72b817096d0877a02f28f116", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/features/textual_features.py", "max_forks_repo_name": "icycookies/dd_benchmark", "max_forks_repo_head_hexsha": "5551c0654d3dc30d72b817096d0877a02f28f116", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-01T14:39:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-01T14:39:45.000Z", "avg_line_length": 39.9743589744, "max_line_length": 142, "alphanum_fraction": 0.5721616421, "include": true, "reason": "import numpy", "num_tokens": 399}
|
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ImageNet dataset with typical pre-processing and advanced augs."""
# pylint: disable=logging-format-interpolation
import enum
import itertools as it
import logging
import re
from typing import Generator, Mapping, Optional, Sequence, Text, Tuple
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
from nfnets import autoaugment
Batch = Mapping[Text, np.ndarray]
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
AUTOTUNE = tf.data.experimental.AUTOTUNE
class Split(enum.Enum):
"""Imagenet dataset split."""
TRAIN = 1
TRAIN_AND_VALID = 2
VALID = 3
TEST = 4
@classmethod
def from_string(cls, name: Text) -> 'Split':
return {'TRAIN': Split.TRAIN, 'TRAIN_AND_VALID': Split.TRAIN_AND_VALID,
'VALID': Split.VALID, 'VALIDATION': Split.VALID,
'TEST': Split.TEST}[name.upper()]
@property
def num_examples(self):
return {Split.TRAIN_AND_VALID: 1281167, Split.TRAIN: 1271167,
Split.VALID: 10000, Split.TEST: 50000}[self]
def load(
split: Split,
*,
is_training: bool,
batch_dims: Sequence[int],
name: str = 'imagenet',
dtype: jnp.dtype = jnp.float32,
transpose: bool = False,
fake_data: bool = False,
image_size: Tuple[int, int] = (224, 224),
augment_name: Optional[str] = None,
eval_preproc: str = 'crop_resize',
augment_before_mix: bool = True,
) -> Generator[Batch, None, None]:
"""Loads the given split of the dataset.
Args:
split: Dataset split to use.
is_training: If true, use training preproc and augmentation.
batch_dims: List indicating how to batch the dataset (typically expected to
be of shape (num_devices, bs_per_device)
name: Which dataset to use, (must be 'imagenet')
dtype: One of float32 or bfloat16 (bf16 may not be supported fully)
transpose: If true, employs double transpose trick.
fake_data: Return batches of fake data for debugging purposes.
image_size: Final image size returned by dataset pipeline. Note that the
exact procedure to arrive at this size will depend on the chosen preproc.
augment_name: Optional additional aug strategy (applied atop the default
of distorted bboxes and random L/R flips). Specified with a string
such as 'cutmix_mixup_0.4_randaugment_415'. See README for deets.
eval_preproc: Eval preproc method, either 'crop_resize' (crop on the long
edge then resize) or `resize_crop_{pct}`, which will resize the image to
`image_size / pct` on each side then take a center crop.
augment_before_mix: Apply augs like RA/AA before or after cutmix/mixup.
Yields:
A TFDS numpy iterator.
"""
start, end = _shard(split, jax.host_id(), jax.host_count())
if fake_data:
print('Using fake data!')
images = np.zeros(tuple(batch_dims) + image_size + (3,), dtype=dtype)
labels = np.zeros(tuple(batch_dims), dtype=np.int32)
if transpose:
axes = tuple(range(images.ndim))
axes = axes[:-4] + axes[-3:] + (axes[-4],) # NHWC -> HWCN
images = np.transpose(images, axes)
yield from it.repeat({'images': images, 'labels': labels}, end - start)
return
total_batch_size = np.prod(batch_dims)
if name.lower() == 'imagenet':
tfds_split = tfds.core.ReadInstruction(_to_tfds_split(split),
from_=start, to=end, unit='abs')
ds = tfds.load('imagenet2012:5.*.*', split=tfds_split,
decoders={'image': tfds.decode.SkipDecoding()})
else:
raise ValueError('Only imagenet is presently supported for this dataset.')
options = ds.options()
options.experimental_threading.private_threadpool_size = 48
options.experimental_threading.max_intra_op_parallelism = 1
options.experimental_optimization.map_parallelization = True
options.experimental_optimization.parallel_batch = True
options.experimental_optimization.autotune = True
options.experimental_optimization.hoist_random_uniform = True
if is_training:
options.experimental_deterministic = False
if is_training:
if jax.host_count() > 1:
# Only cache if we are reading a subset of the dataset.
ds = ds.cache()
ds = ds.repeat()
ds = ds.shuffle(buffer_size=10 * total_batch_size, seed=None)
else:
if split.num_examples % total_batch_size != 0:
raise ValueError(f'Test/valid must be divisible by {total_batch_size}')
def augment_normalize(batch):
"""Optionally augment, then normalize an image."""
batch = dict(**batch)
image = _augment_image(batch['images'], is_training, augment_name)
batch['images'] = _normalize_image(image)
return batch
def preprocess(example):
image = _preprocess_image(example['image'], is_training, image_size,
eval_preproc)
label = tf.cast(example['label'], tf.int32)
out = {'images': image, 'labels': label}
if augment_name is not None and 'cutmix' in augment_name:
out['mask'] = cutmix_padding(*image_size)
out['cutmix_ratio'] = tf.reduce_mean(out['mask'])
if augment_name is not None and 'mixup' in augment_name:
mixup_alpha = 0.2 # default to alpha=0.2
# If float provided, get it
if 'mixup_' in augment_name:
alpha = augment_name.split('mixup_')[1].split('_')
if any(alpha) and re.match(r'^-?\d+(?:\.\d+)?$', alpha[0]) is not None:
mixup_alpha = float(alpha[0])
beta = tfp.distributions.Beta(mixup_alpha, mixup_alpha)
out['mixup_ratio'] = beta.sample()
# Apply augs before mixing?
if augment_before_mix or augment_name is None:
out = augment_normalize(out)
return out
ds = ds.map(preprocess, num_parallel_calls=AUTOTUNE)
ds = ds.prefetch(AUTOTUNE)
def transpose_fn(batch):
# Applies the double-transpose trick for TPU.
batch = dict(**batch)
batch['images'] = tf.transpose(batch['images'], (1, 2, 3, 0))
return batch
def cast_fn(batch):
batch = dict(**batch)
batch['images'] = tf.cast(batch['images'], _to_tf_dtype(dtype))
return batch
for i, batch_size in enumerate(reversed(batch_dims)):
if i == 0:
# Deal with vectorized MixUp + CutMix ops
if augment_name is not None:
if 'mixup' in augment_name or 'cutmix' in augment_name:
ds = ds.batch(batch_size * 2)
else:
ds = ds.map(augment_normalize, num_parallel_calls=AUTOTUNE)
ds = ds.batch(batch_size)
# Apply mixup, cutmix, or mixup + cutmix
if 'mixup' in augment_name and 'cutmix' not in augment_name:
logging.info('Applying MixUp!')
ds = ds.map(my_mixup, num_parallel_calls=AUTOTUNE)
elif 'cutmix' in augment_name and 'mixup' not in augment_name:
logging.info('Applying CutMix!')
ds = ds.map(my_cutmix, num_parallel_calls=AUTOTUNE)
elif 'mixup' in augment_name and 'cutmix' in augment_name:
logging.info('Applying MixUp and CutMix!')
ds = ds.map(my_mixup_cutmix, num_parallel_calls=AUTOTUNE)
# If applying augs after mixing, unbatch, map, and rebatch
if (not augment_before_mix and
('mixup' in augment_name or 'cutmix' in augment_name)):
ds = ds.unbatch().map(augment_normalize, num_parallel_calls=AUTOTUNE)
ds = ds.batch(batch_size)
else:
ds = ds.batch(batch_size)
# Transpose and cast as needbe
if transpose:
ds = ds.map(transpose_fn) # NHWC -> HWCN
# NOTE: You may be tempted to move the casting earlier on in the pipeline,
# but for bf16 some operations will end up silently placed on the TPU and
# this causes stalls while TF and JAX battle for the accelerator.
ds = ds.map(cast_fn)
else:
ds = ds.batch(batch_size)
ds = ds.prefetch(AUTOTUNE)
ds = tfds.as_numpy(ds)
if dtype == jnp.bfloat16:
# JAX and TF disagree on the NumPy bfloat16 type so we need to reinterpret
# tf_bfloat16->jnp.bfloat16.
for batch in ds:
batch['images'] = batch['images'].view(jnp.bfloat16)
yield batch
else:
yield from ds
def cutmix_padding(h, w):
"""Returns image mask for CutMix.
Taken from (https://github.com/google/edward2/blob/master/experimental
/marginalization_mixup/data_utils.py#L367)
Args:
h: image height.
w: image width.
"""
r_x = tf.random.uniform([], 0, w, tf.int32)
r_y = tf.random.uniform([], 0, h, tf.int32)
# Beta dist in paper, but they used Beta(1,1) which is just uniform.
image1_proportion = tf.random.uniform([])
patch_length_ratio = tf.math.sqrt(1 - image1_proportion)
r_w = tf.cast(patch_length_ratio * tf.cast(w, tf.float32), tf.int32)
r_h = tf.cast(patch_length_ratio * tf.cast(h, tf.float32), tf.int32)
bbx1 = tf.clip_by_value(tf.cast(r_x - r_w // 2, tf.int32), 0, w)
bby1 = tf.clip_by_value(tf.cast(r_y - r_h // 2, tf.int32), 0, h)
bbx2 = tf.clip_by_value(tf.cast(r_x + r_w // 2, tf.int32), 0, w)
bby2 = tf.clip_by_value(tf.cast(r_y + r_h // 2, tf.int32), 0, h)
# Create the binary mask.
pad_left = bbx1
pad_top = bby1
pad_right = tf.maximum(w - bbx2, 0)
pad_bottom = tf.maximum(h - bby2, 0)
r_h = bby2 - bby1
r_w = bbx2 - bbx1
mask = tf.pad(
tf.ones((r_h, r_w)),
paddings=[[pad_top, pad_bottom], [pad_left, pad_right]],
mode='CONSTANT',
constant_values=0)
mask.set_shape((h, w))
return mask[..., None] # Add channel dim.
def my_cutmix(batch):
"""Cutmix."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 2
mask = batch['mask'][:bs]
images = (mask * batch['images'][:bs] + (1.0 - mask) * batch['images'][bs:])
mix_labels = batch['labels'][bs:]
labels = batch['labels'][:bs]
ratio = batch['cutmix_ratio'][:bs]
return {'images': images, 'labels': labels,
'mix_labels': mix_labels, 'ratio': ratio}
def my_mixup(batch):
"""Mixup."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 2
ratio = batch['mixup_ratio'][:bs, None, None, None]
images = (ratio * batch['images'][:bs] + (1.0 - ratio) * batch['images'][bs:])
mix_labels = batch['labels'][bs:]
labels = batch['labels'][:bs]
ratio = ratio[..., 0, 0, 0] # Unsqueeze
return {'images': images, 'labels': labels,
'mix_labels': mix_labels, 'ratio': ratio}
def mixup_or_cutmix(batch):
"""Randomly applies one of cutmix or mixup to a batch."""
logging.info('Randomly applying cutmix or mixup with 50% chance!')
return tf.cond(
tf.cast(tf.random.uniform([], maxval=2, dtype=tf.int32), tf.bool),
lambda: my_mixup(batch),
lambda: my_cutmix(batch))
def my_mixup_cutmix(batch):
"""Apply mixup to half the batch, and cutmix to the other."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 4
mixup_ratio = batch['mixup_ratio'][:bs, None, None, None]
mixup_images = (mixup_ratio * batch['images'][:bs]
+ (1.0 - mixup_ratio) * batch['images'][bs:2*bs])
mixup_labels = batch['labels'][:bs]
mixup_mix_labels = batch['labels'][bs:2*bs]
cutmix_mask = batch['mask'][2*bs:3*bs]
cutmix_images = (cutmix_mask * batch['images'][2*bs:3*bs]
+ (1.0 - cutmix_mask) * batch['images'][-bs:])
cutmix_labels = batch['labels'][2*bs:3*bs]
cutmix_mix_labels = batch['labels'][-bs:]
cutmix_ratio = batch['cutmix_ratio'][2*bs : 3*bs]
return {'images': tf.concat([mixup_images, cutmix_images], axis=0),
'labels': tf.concat([mixup_labels, cutmix_labels], axis=0),
'mix_labels': tf.concat([mixup_mix_labels, cutmix_mix_labels], 0),
'ratio': tf.concat([mixup_ratio[..., 0, 0, 0], cutmix_ratio], axis=0)}
def _to_tf_dtype(jax_dtype: jnp.dtype) -> tf.DType:
if jax_dtype == jnp.bfloat16:
return tf.bfloat16
else:
return tf.dtypes.as_dtype(jax_dtype)
def _to_tfds_split(split: Split) -> tfds.Split:
"""Returns the TFDS split appropriately sharded."""
if split in (Split.TRAIN, Split.TRAIN_AND_VALID, Split.VALID):
return tfds.Split.TRAIN
else:
assert split == Split.TEST
return tfds.Split.VALIDATION
def _shard(split: Split, shard_index: int, num_shards: int) -> Tuple[int, int]:
"""Returns [start, end) for the given shard index."""
assert shard_index < num_shards
arange = np.arange(split.num_examples)
shard_range = np.array_split(arange, num_shards)[shard_index]
start, end = shard_range[0], (shard_range[-1] + 1)
if split == Split.TRAIN:
# Note that our TRAIN=TFDS_TRAIN[10000:] and VALID=TFDS_TRAIN[:10000].
offset = Split.VALID.num_examples
start += offset
end += offset
return start, end
def _preprocess_image(
image_bytes: tf.Tensor,
is_training: bool,
image_size: Sequence[int],
eval_preproc: str = 'crop_resize'
) -> tf.Tensor:
"""Returns processed and resized images."""
# NOTE: Bicubic resize (1) casts uint8 to float32 and (2) resizes without
# clamping overshoots. This means values returned will be outside the range
# [0.0, 255.0] (e.g. we have observed outputs in the range [-51.1, 336.6]).
if is_training:
image = _decode_and_random_crop(image_bytes, image_size)
image = tf.image.random_flip_left_right(image)
assert image.dtype == tf.uint8
image = tf.image.resize(image, image_size, tf.image.ResizeMethod.BICUBIC)
else:
if eval_preproc == 'crop_resize':
image = _decode_and_center_crop(image_bytes, image_size=image_size)
assert image.dtype == tf.uint8
image = tf.image.resize(image, image_size, tf.image.ResizeMethod.BICUBIC)
elif 'resize_crop' in eval_preproc:
# Pass in crop percent
crop_pct = float(eval_preproc.split('_')[-1])
image = _decode_and_resize_then_crop(image_bytes, image_size=image_size,
crop_pct=crop_pct)
else:
raise ValueError(f'Unknown Eval Preproc {eval_preproc} provided!')
return image
def _augment_image(
image: tf.Tensor,
is_training: bool,
augment_name: Optional[str] = None,
) -> tf.Tensor:
"""Applies AA/RA to an image."""
if is_training and augment_name:
if 'autoaugment' in augment_name or 'randaugment' in augment_name:
input_image_type = image.dtype
image = tf.clip_by_value(image, 0.0, 255.0)
# Autoaugment requires a uint8 image; we cast here and then cast back
image = tf.cast(image, dtype=tf.uint8)
if 'autoaugment' in augment_name:
logging.info(f'Applying AutoAugment policy {augment_name}')
image = autoaugment.distort_image_with_autoaugment(image, 'v0')
elif 'randaugment' in augment_name:
magnitude = int(augment_name.split('_')[-1]) # pytype: disable=attribute-error
# Allow passing in num_layers as a magnitude > 100
if magnitude > 100:
num_layers = magnitude // 100
magnitude = magnitude - int(num_layers * 100)
else:
num_layers = 2
logging.info(f'Applying RA {num_layers} x {magnitude}')
image = autoaugment.distort_image_with_randaugment(
image, num_layers=num_layers, magnitude=magnitude)
image = tf.cast(image, dtype=input_image_type)
return image
def _normalize_image(image: tf.Tensor) -> tf.Tensor:
"""Normalize the image to zero mean and unit variance."""
image -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype)
image /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype)
return image
def _distorted_bounding_box_crop(
image_bytes: tf.Tensor,
*,
jpeg_shape: tf.Tensor,
bbox: tf.Tensor,
min_object_covered: float,
aspect_ratio_range: Tuple[float, float],
area_range: Tuple[float, float],
max_attempts: int,
) -> tf.Tensor:
"""Generates cropped_image using one of the bboxes randomly distorted."""
bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(
jpeg_shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = [offset_y, offset_x, target_height, target_width]
image = crop(image_bytes, crop_window)
return image
def _decode_and_random_crop(image_bytes: tf.Tensor,
image_size: Sequence[int] = (224, 224),
jpeg_shape: Optional[tf.Tensor] = None
) -> tf.Tensor:
"""Make a random crop of chosen size."""
if jpeg_shape is None:
jpeg_shape = get_shape(image_bytes)
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = _distorted_bounding_box_crop(
image_bytes,
jpeg_shape=jpeg_shape,
bbox=bbox,
min_object_covered=0.1,
aspect_ratio_range=(3 / 4, 4 / 3),
area_range=(0.08, 1.0),
max_attempts=10)
if tf.reduce_all(tf.equal(jpeg_shape, tf.shape(image))):
# If the random crop failed fall back to center crop.
image = _decode_and_center_crop(image_bytes, jpeg_shape, image_size)
return image
def _decode_and_center_crop(
image_bytes: tf.Tensor,
jpeg_shape: Optional[tf.Tensor] = None,
image_size: Sequence[int] = (224, 224),
) -> tf.Tensor:
"""Crops to center of image with padding then scales."""
if jpeg_shape is None:
jpeg_shape = get_shape(image_bytes)
image_height = jpeg_shape[0]
image_width = jpeg_shape[1]
# Pad the image with at least 32px on the short edge and take a
# crop that maintains aspect ratio.
scale = tf.minimum(tf.cast(image_height, tf.float32) / (image_size[0] + 32),
tf.cast(image_width, tf.float32) / (image_size[1] + 32))
padded_center_crop_height = tf.cast(scale * image_size[0], tf.int32)
padded_center_crop_width = tf.cast(scale * image_size[1], tf.int32)
offset_height = ((image_height - padded_center_crop_height) + 1) // 2
offset_width = ((image_width - padded_center_crop_width) + 1) // 2
crop_window = [offset_height, offset_width,
padded_center_crop_height, padded_center_crop_width]
image = crop(image_bytes, crop_window)
return image
def get_shape(image_bytes):
"""Gets the image shape for jpeg bytes or a uint8 decoded image."""
if image_bytes.dtype == tf.dtypes.string:
image_shape = tf.image.extract_jpeg_shape(image_bytes)
else:
image_shape = tf.shape(image_bytes)
return image_shape
def crop(image_bytes, crop_window):
"""Helper function to crop a jpeg or a decoded image."""
if image_bytes.dtype == tf.dtypes.string:
image = tf.image.decode_and_crop_jpeg(image_bytes,
tf.stack(crop_window),
channels=3)
else:
image = tf.image.crop_to_bounding_box(image_bytes, *crop_window)
return image
def _decode_and_resize_then_crop(
image_bytes: tf.Tensor,
image_size: Sequence[int] = (224, 224),
crop_pct: float = 1.0,
) -> tf.Tensor:
"""Rescales an image to image_size / crop_pct, then center crops."""
image = tf.image.decode_jpeg(image_bytes, channels=3)
# Scale image to "scaled size" before taking a center crop
if crop_pct > 1.0: # If crop_pct is >1, treat it as num pad pixels (like VGG)
scale_size = tuple([int(x + crop_pct) for x in image_size])
else:
scale_size = tuple([int(float(x) / crop_pct) for x in image_size])
image = tf.image.resize(image, scale_size, tf.image.ResizeMethod.BICUBIC)
crop_height = tf.cast(image_size[0], tf.int32)
crop_width = tf.cast(image_size[1], tf.int32)
offset_height = ((scale_size[0] - crop_height) + 1) // 2
offset_width = ((scale_size[1] - crop_width) + 1) // 2
crop_window = [offset_height, offset_width, crop_height, crop_width]
image = crop(image, crop_window)
return image
|
{"hexsha": "e6ca3e340b8f371de16ee64e61f736833db724dc", "size": 20704, "ext": "py", "lang": "Python", "max_stars_repo_path": "nfnets/dataset.py", "max_stars_repo_name": "bruinxiong/deepmind-research", "max_stars_repo_head_hexsha": "4899440e3eb2dee9335c469c7f01aadcbf21cc72", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-15T04:50:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-15T04:50:04.000Z", "max_issues_repo_path": "nfnets/dataset.py", "max_issues_repo_name": "bruinxiong/deepmind-research", "max_issues_repo_head_hexsha": "4899440e3eb2dee9335c469c7f01aadcbf21cc72", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nfnets/dataset.py", "max_forks_repo_name": "bruinxiong/deepmind-research", "max_forks_repo_head_hexsha": "4899440e3eb2dee9335c469c7f01aadcbf21cc72", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-20T15:43:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-20T15:43:47.000Z", "avg_line_length": 38.1289134438, "max_line_length": 87, "alphanum_fraction": 0.6711746522, "include": true, "reason": "import numpy,import jax", "num_tokens": 5586}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
Created on 29/07/2020
"""
__all__ = ["plot_grad_flow"]
import numpy
import torch
from matplotlib import pyplot
from matplotlib.lines import Line2D
from draugr.torch_utilities.optimisation.parameters import normal_init_weights
def plot_grad_flow(
model: torch.nn.Module,
lines: bool = True,
alpha: float = 0.5,
line_width: float = 1.0,
) -> None:
"""
Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: After loss.backwards(), use plot_grad_flow(model) to visualize the gradient flow of model
:param model:
:type model:
:param lines:
:type lines:
:param alpha:
:type alpha:
:param line_width:
:type line_width:"""
assert 0.0 < alpha <= 1.0
ave_grads = []
max_grads = []
layers = []
for n, p in model.named_parameters():
if p.requires_grad and ("bias" not in n):
layers.append(n)
grad_abs = p.grad.abs()
ave_grads.append(grad_abs.mean())
max_grads.append(grad_abs.max())
if lines:
pyplot.plot(max_grads, alpha=alpha, linewidth=line_width, color="r")
pyplot.plot(ave_grads, alpha=alpha, linewidth=line_width, color="g")
else:
pyplot.bar(
numpy.arange(len(max_grads)),
max_grads,
alpha=alpha,
linewidth=line_width,
color="r",
)
pyplot.bar(
numpy.arange(len(max_grads)),
ave_grads,
alpha=alpha,
linewidth=line_width,
color="g",
)
pyplot.hlines(0, 0, len(ave_grads) + 1, linewidth=1, color="k")
pyplot.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical")
pyplot.xlim(left=0, right=len(ave_grads))
max_g = max(max_grads)
margin = max_g * 1.1
pyplot.ylim(
bottom=max_g - margin, top=margin
) # zoom in on the lower gradient regions
pyplot.xlabel("Layers")
pyplot.ylabel("Gradient Magnitude")
pyplot.title("Gradient Flow")
pyplot.grid(True)
pyplot.legend(
[
Line2D([0], [0], color="c", lw=4),
Line2D([0], [0], color="b", lw=4),
Line2D([0], [0], color="k", lw=4),
],
["max-gradient", "mean-gradient", "zero-gradient"],
)
if __name__ == "__main__":
def a():
""" """
input = torch.randn(10, 50, requires_grad=True)
target = torch.empty(10, dtype=torch.long).random_(2)
model = torch.nn.Sequential(
torch.nn.Linear(50, 50),
torch.nn.ReLU(),
torch.nn.Linear(50, 50),
torch.nn.ReLU(),
torch.nn.Linear(50, 50),
torch.nn.ReLU(),
torch.nn.Linear(50, 50),
torch.nn.ReLU(),
torch.nn.Linear(50, 2),
)
normal_init_weights(model, std=1.2)
criterion = torch.nn.CrossEntropyLoss()
outputs = model(input)
loss = criterion(outputs, target)
loss.backward()
plot_grad_flow(model)
pyplot.show()
a()
|
{"hexsha": "536bf4ff9933c7b56e3db22b9b2a5e2602140972", "size": 3286, "ext": "py", "lang": "Python", "max_stars_repo_path": "draugr/torch_utilities/optimisation/debugging/gradients/flow.py", "max_stars_repo_name": "cnHeider/draugr", "max_stars_repo_head_hexsha": "b95e0bb1fa5efa581bfb28ff604f296ed2e6b7d6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-09-27T08:04:59.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-02T06:14:45.000Z", "max_issues_repo_path": "draugr/torch_utilities/optimisation/debugging/gradients/flow.py", "max_issues_repo_name": "cnHeider/draugr", "max_issues_repo_head_hexsha": "b95e0bb1fa5efa581bfb28ff604f296ed2e6b7d6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 64, "max_issues_repo_issues_event_min_datetime": "2019-09-27T08:03:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T15:07:30.000Z", "max_forks_repo_path": "draugr/torch_utilities/optimisation/debugging/gradients/flow.py", "max_forks_repo_name": "cnHeider/draugr", "max_forks_repo_head_hexsha": "b95e0bb1fa5efa581bfb28ff604f296ed2e6b7d6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-01T00:18:57.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-01T00:18:57.000Z", "avg_line_length": 27.3833333333, "max_line_length": 100, "alphanum_fraction": 0.5730371272, "include": true, "reason": "import numpy", "num_tokens": 818}
|
import numpy as np
a ='/home/peter/workspace/projects/tradance/traditional-dance-recognition/logs/kordance600_13-rgb-i3d-resnet-18-ts-f32/val_3crops_3clips_224_details.npy'
npy = np.load(a)
print(npy.shape)
print(npy[0])
|
{"hexsha": "44cd904a8916c456dec0d18dc26ead6847ab8ca6", "size": 228, "ext": "py", "lang": "Python", "max_stars_repo_path": "debug.py", "max_stars_repo_name": "peter-yys-yoon/traditional-dance-recognition", "max_stars_repo_head_hexsha": "be4939d53b838624a04dba0826532c65421d1325", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "debug.py", "max_issues_repo_name": "peter-yys-yoon/traditional-dance-recognition", "max_issues_repo_head_hexsha": "be4939d53b838624a04dba0826532c65421d1325", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "debug.py", "max_forks_repo_name": "peter-yys-yoon/traditional-dance-recognition", "max_forks_repo_head_hexsha": "be4939d53b838624a04dba0826532c65421d1325", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.5384615385, "max_line_length": 153, "alphanum_fraction": 0.7807017544, "include": true, "reason": "import numpy", "num_tokens": 76}
|
from spacy.en import English
from numpy import dot
from numpy.linalg import norm
# from subject_object_extraction import findSVOs
import json
"""
tokenization, sentence recognition, part of speech tagging, lemmatization,
dependency parsing, and named entity recognition
"""
def get_message_info(parsedData):
"""
:param parsedData: data after being parsed by the en parser
:return: message info in json
"""
message_info = {}
token_info = {}
for i, token in enumerate(parsedData):
token_info['original'] = str(token.orth) + " " + str(token.orth_)
token_info['lowercased'] = str(token.lower) + " " + str(token.lower_)
token_info['lemma'] = str(token.lemma) + " " + str(token.lemma_)
token_info['shape'] = str(token.shape) + " " + str(token.shape_)
token_info['prefix'] = str(token.prefix) + " " + str(token.prefix_)
token_info['suffix'] = str(token.suffix) + " " + str(token.suffix_)
token_info['log_probability'] = str(token.prob)
token_info['Brown_cluster_id'] = str(token.cluster)
message_info[i] = token_info
if i > 1:
break
json_data = json.dumps(message_info)
return json_data
def get_message_content_sents(parsedData):
"""
:param parsedData: data after being parsed by the en parser
:return: sentence recognition
"""
sents = {}
# the "sents" property returns spans
# spans have indices into the original string
# where each index value represents a token
for i, span in enumerate(parsedData.sents):
# for span in parsedData.sents:
# go from the start to the end of each span, returning each token in the sentence
# combine each token using join()
sent = ''.join(parsedData[i].string for i in range(span.start, span.end)).strip()
sents[i] = sent
json_data = json.dumps(sents)
return json_data
def get_message_content_speech_tagging(parsedData):
"""
speech tagging
:param parsedData: data after being parsed by the en parser
:return: message_content_speech_tagging
"""
speech_tagging = {}
for span in parsedData.sents:
sent = [parsedData[i] for i in range(span.start, span.end)]
break
# for token in sent:
for i, token in enumerate(sent):
speech_tagging[i] = token.orth_ + " " + token.pos_
json_data = json.dumps(speech_tagging)
return json_data
def get_message_content_dependencies(parsedData):
"""
dependency parsing
:param parsedData: data after being parsed by the en parser
:return: message_content_dependencie
"""
dependencies = {}
# shown as: original token, dependency tag, head word, left dependents, right dependents
for token in parsedData:
dependencies[token] = token.orth_+ " " + token.dep_ + " " + token.head.orth_ + " " +\
[t.orth_ for t in token.lefts] + " " + [t.orth_ for t in token.rights]
json_data = json.dumps(dependencies)
return json_data
def get_message_content_named_entities(parsedData):
"""
named entity recognition
:param parsedData: data after being parsed by the en parser
:return: message_content_named_entities
"""
entities = {}
all_entities = {}
named_entities = {}
# for token in parsedData:
for i, token in enumerate(parsedData):
all_entities[i] = token.orth_ + " " + token.ent_type_ if token.ent_type_ != "" else "(not an entity)"
entities['all_entities'] = all_entities
# if you just want the entities and nothing else, you can do access the parsed examples "ents" property like this:
ents = list(parsedData.ents)
# for entity in ents:
for i, entity in enumerate(ents):
named_entities[i] = entity.label + " " + entity.label_ + " " + ' '.join(t.orth_ for t in entity)
entities['named_entities'] = named_entities
json_data = json.dumps(entities)
return json_data
def get_message_content_messy_data(parsedData):
"""
spaCy is trained to attempt to handle messy data, including emoticons and other web-based features
:param parsedData: data after being parsed by the en parser
:return: messy_data
"""
messy_data = {}
# for token in parsedData:
for i, token in enumerate(parsedData):
messy_data[i] = token.orth_ + " " + token.pos_ + " " + token.lemma_
json_data = json.dumps(messy_data)
return json_data
def get_word_vector_representations(word):
"""
spaCy has word vector representations built in!
:param word: a word you wanna get it's vector representations
:return: word vector representations
"""
parser = English()
word_vector_representations = {}
# you can access known words from the parser's vocabulary
nasa = parser.vocab[word]
# cosine similarity
cosine = lambda v1, v2: dot(v1, v2) / (norm(v1) * norm(v2))
# gather all known words, take only the lowercased versions
allWords = list({w for w in parser.vocab if w.has_repvec and w.orth_.islower() and w.lower_ != "nasa"})
# sort by similarity to word
allWords.sort(key=lambda w: cosine(w.repvec, nasa.repvec))
allWords.reverse()
# Top 10 most similar words to word
# for word in allWords[:10]:
for i, word in enumerate(allWords[:10]):
word_vector_representations[i] = word.orth_
json_data = json.dumps(word_vector_representations)
return json_data
# def message_SVOs(parsedData):
# print(findSVOs(parsedData))
def parse_message(message):
parser = English()
parsedData = parser(message)
parsing_results = {}
message_info = get_message_info(parsedData)
parsing_results['info'] = message_info
message_content_sents = get_message_content_sents(parsedData)
parsing_results['sents'] = message_content_sents
message_content_speech_tagging = get_message_content_speech_tagging(parsedData)
parsing_results['speech_tagging'] = message_content_speech_tagging
message_content_dependencies = get_message_content_dependencies(parsedData)
parsing_results['dependencies'] = message_content_dependencies
message_content_named_entities = get_message_content_named_entities(parsedData)
parsing_results['entities'] = message_content_named_entities
message_content_messy_data = get_message_content_messy_data(parsedData)
parsing_results['messy_data'] = message_content_messy_data
word_vector_representations = get_word_vector_representations(message[0])
parsing_results['word_vector_representations'] = word_vector_representations
json_data = json.dumps(parsing_results)
return json_data
# dump
print(parse_message(input("enter a message to be parsed: \n")))
|
{"hexsha": "786324c45720315d0763b356daf12d68b4ab6676", "size": 6742, "ext": "py", "lang": "Python", "max_stars_repo_path": "chatbot/chatAPI/helper.py", "max_stars_repo_name": "iSuperMostafa/nlp-chatbot-poc", "max_stars_repo_head_hexsha": "519ce184686fd40fcc4ea86bcc200e7bfa01e37f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-02-26T17:41:40.000Z", "max_stars_repo_stars_event_max_datetime": "2018-06-09T01:04:02.000Z", "max_issues_repo_path": "chatbot/chatAPI/helper.py", "max_issues_repo_name": "iSuperMostafa/nlp-chatbot-poc", "max_issues_repo_head_hexsha": "519ce184686fd40fcc4ea86bcc200e7bfa01e37f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chatbot/chatAPI/helper.py", "max_forks_repo_name": "iSuperMostafa/nlp-chatbot-poc", "max_forks_repo_head_hexsha": "519ce184686fd40fcc4ea86bcc200e7bfa01e37f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2017-12-22T18:22:08.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-22T07:32:37.000Z", "avg_line_length": 34.7525773196, "max_line_length": 118, "alphanum_fraction": 0.6922278256, "include": true, "reason": "from numpy", "num_tokens": 1610}
|
#include "options.h"
#include <boost/program_options.hpp>
#include <iostream>
namespace po = boost::program_options;
bool is_help(const po::variables_map& vm) {
return vm.count("help") > 0;
}
po::variables_map process_cmd_line(const int argc, char** argv) {
po::options_description desc("Options");
desc.add_options()
("help,h", "produce help message")
("json_config,j", po::value<std::string>()->
default_value("client.json"), "JSON file with config information for hosted RL loop")
("threads,t", po::value<size_t>()->default_value(1), "Number of threads per instance")
("examples,n", po::value<size_t>()->default_value(10), "Number of examples per thread")
("features,x", po::value<size_t>()->default_value(10), "Features count")
("actions,a", po::value<size_t>()->default_value(2), "Number of actions")
("experiment_name,e", po::value<std::string>()->required(), "(REQUIRED) experiment name")
("float_outcome,f", "if outcome is float (otherwise - json)")
("sleep,s", po::value<size_t>()->default_value(0), "Milliseconds to sleep between loop iterations")
("duration,d", po::value<size_t>(), "Duration of experiment (in ms). Alternative to n")
("instances,i", po::value<size_t>()->default_value(1), "Number of test loop instances")
("reward_period,r", po::value<size_t>()->default_value(0), "Ratio period (0 - no reward, otherwise - every $reward_period interaction is receiving reward)")
("slots,q", po::value<size_t>()->default_value(0), "Number of slots (ccb simulation is running if > 0)")
("episode_length,m", po::value<size_t>()->default_value(0), "Length of an episode (running multistep if > 0)")
;
po::variables_map vm;
store(parse_command_line(argc, argv, desc), vm);
if (is_help(vm))
std::cout << desc << std::endl;
return vm;
}
void throw_if_conflicting(const po::variables_map& vm, const std::string& first, const std::string& second) {
if (vm.count(first) && !vm[first].defaulted() &&
vm.count(second) && !vm[second].defaulted())
{
throw std::logic_error(std::string("Conflicting options '") +
first + "' and '" + second + "'.");
}
}
|
{"hexsha": "952e088618f6b3cc6b58f547fdc655b5d61e8e04", "size": 2162, "ext": "cc", "lang": "C++", "max_stars_repo_path": "examples/test_cpp/options.cc", "max_stars_repo_name": "orenmichaely/reinforcement_learning", "max_stars_repo_head_hexsha": "1a1570641255fdcd03a33996986aa58f3c0c58e2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/test_cpp/options.cc", "max_issues_repo_name": "orenmichaely/reinforcement_learning", "max_issues_repo_head_hexsha": "1a1570641255fdcd03a33996986aa58f3c0c58e2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2022-02-08T21:18:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-08T21:18:37.000Z", "max_forks_repo_path": "examples/test_cpp/options.cc", "max_forks_repo_name": "orenmichaely/reinforcement_learning", "max_forks_repo_head_hexsha": "1a1570641255fdcd03a33996986aa58f3c0c58e2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.1224489796, "max_line_length": 160, "alphanum_fraction": 0.6669750231, "num_tokens": 550}
|
[STATEMENT]
lemma mfinalD:
fixes ln
assumes "mfinal s" "thr s t = \<lfloor>(x, ln)\<rfloor>"
shows "final x" "ln = no_wait_locks" "wset s t = None"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. final x &&& ln = no_wait_locks &&& wset s t = None
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
mfinal s
thr s t = \<lfloor>(x, ln)\<rfloor>
goal (1 subgoal):
1. final x &&& ln = no_wait_locks &&& wset s t = None
[PROOF STEP]
unfolding mfinal_def
[PROOF STATE]
proof (prove)
using this:
\<forall>t x ln. thr s t = \<lfloor>(x, ln)\<rfloor> \<longrightarrow> final x \<and> ln = no_wait_locks \<and> wset s t = None
thr s t = \<lfloor>(x, ln)\<rfloor>
goal (1 subgoal):
1. final x &&& ln = no_wait_locks &&& wset s t = None
[PROOF STEP]
by blast+
|
{"llama_tokens": 347, "file": "JinjaThreads_Framework_FWSemantics", "length": 3}
|
\chapter{\label{resources}Resources}
Examples of output from the thesis besides research:
\begin{itemize}
\item mystatsfunctions plus other packages (eg. FaIR)
\item Code accompanying papers
\item CEDA archive data
\item MARS datasets
\end{itemize}
|
{"hexsha": "2fd16309e231108ea838360128d334f883e12aef", "size": 265, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Appendices/A1.tex", "max_stars_repo_name": "njleach/Thesis", "max_stars_repo_head_hexsha": "a7594eb080d439c01312d44c20b922869c69f8ae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Appendices/A1.tex", "max_issues_repo_name": "njleach/Thesis", "max_issues_repo_head_hexsha": "a7594eb080d439c01312d44c20b922869c69f8ae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Appendices/A1.tex", "max_forks_repo_name": "njleach/Thesis", "max_forks_repo_head_hexsha": "a7594eb080d439c01312d44c20b922869c69f8ae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4444444444, "max_line_length": 57, "alphanum_fraction": 0.7547169811, "num_tokens": 67}
|
[STATEMENT]
lemma map_pred_comp: "map_pred f \<circ> map_pred g = map_pred (g \<circ> f)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map_pred f \<circ> map_pred g = map_pred (g \<circ> f)
[PROOF STEP]
using map_fun_comp[where g=id and g'=id]
[PROOF STATE]
proof (prove)
using this:
?f ---> id \<circ> (?f' ---> id) = (?f' \<circ> ?f) ---> id \<circ> id
goal (1 subgoal):
1. map_pred f \<circ> map_pred g = map_pred (g \<circ> f)
[PROOF STEP]
by (simp add: map_pred_def)
|
{"llama_tokens": 211, "file": "BNF_CC_Concrete_Examples", "length": 2}
|
[STATEMENT]
lemma sturm_id_PR_prio0:
"{x::real. P x} = {x::real. (PR_TAG P) x}"
"(\<forall>x::real. f x < g x) = (\<forall>x::real. PR_TAG (\<lambda>x. f x < g x) x)"
"(\<forall>x::real. P x) = (\<forall>x::real. \<not>(PR_TAG (\<lambda>x. \<not>P x)) x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {x. P x} = {x. PR_TAG P x} &&& (\<forall>x. f x < g x) = (\<forall>x. PR_TAG (\<lambda>x. f x < g x) x) &&& (\<forall>x. P x) = (\<forall>x. \<not> PR_TAG (\<lambda>x. \<not> P x) x)
[PROOF STEP]
by (simp_all add: PR_TAG_def)
|
{"llama_tokens": 255, "file": "Sturm_Sequences_Sturm_Method", "length": 1}
|
[STATEMENT]
lemma cfs_times_X:
assumes "g \<in> carrier P"
shows "(X \<otimes>\<^bsub>P\<^esub> g) (Suc n) = g n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (X_poly R \<otimes>\<^bsub>P\<^esub> g) (Suc n) = g n
[PROOF STEP]
apply(rule poly_induct3[of g])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. g \<in> carrier P
2. \<And>p q. \<lbrakk>q \<in> carrier P; p \<in> carrier P; (X_poly R \<otimes>\<^bsub>P\<^esub> p) (Suc n) = p n; (X_poly R \<otimes>\<^bsub>P\<^esub> q) (Suc n) = q n\<rbrakk> \<Longrightarrow> (X_poly R \<otimes>\<^bsub>P\<^esub> (p \<oplus>\<^bsub>P\<^esub> q)) (Suc n) = (p \<oplus>\<^bsub>P\<^esub> q) n
3. \<And>a na. a \<in> carrier R \<Longrightarrow> (X_poly R \<otimes>\<^bsub>P\<^esub> monom P a na) (Suc n) = monom P a na n
[PROOF STEP]
apply (simp add: assms)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>p q. \<lbrakk>q \<in> carrier P; p \<in> carrier P; (X_poly R \<otimes>\<^bsub>P\<^esub> p) (Suc n) = p n; (X_poly R \<otimes>\<^bsub>P\<^esub> q) (Suc n) = q n\<rbrakk> \<Longrightarrow> (X_poly R \<otimes>\<^bsub>P\<^esub> (p \<oplus>\<^bsub>P\<^esub> q)) (Suc n) = (p \<oplus>\<^bsub>P\<^esub> q) n
2. \<And>a na. a \<in> carrier R \<Longrightarrow> (X_poly R \<otimes>\<^bsub>P\<^esub> monom P a na) (Suc n) = monom P a na n
[PROOF STEP]
apply (metis (no_types, lifting) P.m_closed P.r_distr X_closed cfs_add)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a na. a \<in> carrier R \<Longrightarrow> (X_poly R \<otimes>\<^bsub>P\<^esub> monom P a na) (Suc n) = monom P a na n
[PROOF STEP]
by (metis (no_types, lifting) P_def R.l_one R.one_closed R.r_null Suc_eq_plus1 X_poly_def
cfs_monom coeff_monom_mult coeff_simp monom_closed monom_mult)
|
{"llama_tokens": 783, "file": "Padic_Ints_Cring_Poly", "length": 4}
|
#!/usr/bin/env python
"""
ONS Address Index - Test the Performance of the Probabilistic Parser
====================================================================
A simple script to test the performance of a trained probabilistic parser
using holdout data. Computes the number of tokens that were correctly identified.
In addition, computes the fraction of complete addresses correctly parsed and
the performance metric per token type.
Requirements
------------
:requires: sklearn-crfsuite (http://sklearn-crfsuite.readthedocs.io/en/latest/index.html)
:requires: seaborn
:requires: matplotlib
:requires: numpy
Running
-------
After all requirements are satisfied and a model has been trained, the script can be invoked using CPython interpreter::
python test_performance.py
Author
------
:author: Sami Niemi (sami.niemi@valtech.co.uk)
Version
-------
:version: 0.7
:date: 6-Feb-2017
"""
from collections import Counter
import ProbabilisticParser.common.metrics as metric
import ProbabilisticParser.common.tokens as tkns
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import sklearn_crfsuite
from ProbabilisticParser import parser
from sklearn_crfsuite import metrics
# set seaborn style
sns.set_style("whitegrid")
sns.set_context("poster")
sns.set(rc={"figure.figsize": (12, 12)})
sns.set(font_scale=1.5)
def predict(address):
"""
Predict the tokens using a trained probabilistic parser model.
The address parser must have been trained before parse can be called.
:param address: raw address string to be parsed
:type address: str
:return: parsed address
:rtype: list
"""
parsed = parser.parse(address.upper())
return parsed
def show_values(pc, fmt="%.2f", **kw):
"""
:param pc:
:param fmt:
:param kw:
:return: None
"""
pc.update_scalarmappable()
ax = pc.get_axes()
for p, color, value in zip(pc.get_paths(), pc.get_facecolors(), pc.get_array()):
x, y = p.vertices[:-2, :].mean(0)
if np.all(color[:3] > 0.5):
color = (0.0, 0.0, 0.0)
else:
color = (1.0, 1.0, 1.0)
ax.text(x, y, fmt % value, ha="center", va="center", color=color, **kw)
def cm2inch(*tupl):
"""
Specify figure size in centimeter in matplotlib.
Source: http://stackoverflow.com/a/22787457/395857
:param tupl:
:return:
"""
inch = 2.54
if type(tupl[0]) == tuple:
return tuple(i / inch for i in tupl[0])
else:
return tuple(i / inch for i in tupl)
def heatmap(AUC, title, xlabel, ylabel, xticklabels, yticklabels, figure_width=40,
figure_height=20, correct_orientation=False, cmap='RdBu'):
"""
Generate a heatmap of the classification report information.
Inspired by:
- http://stackoverflow.com/a/16124677/395857
- http://stackoverflow.com/a/25074150/395857
:param AUC:
:param title:
:param xlabel:
:param ylabel:
:param xticklabels:
:param yticklabels:
:param figure_width:
:param figure_height:
:param correct_orientation:
:param cmap:
:return: None
"""
fig, ax = plt.subplots()
c = ax.pcolor(AUC, edgecolors='k', linestyle='dashed', linewidths=0.2, cmap=cmap)
# put the major ticks at the middle of each cell
ax.set_yticks(np.arange(AUC.shape[0]) + 0.5, minor=False)
ax.set_xticks(np.arange(AUC.shape[1]) + 0.5, minor=False)
# set tick labels
ax.set_xticklabels(xticklabels, minor=False)
ax.set_yticklabels(yticklabels, minor=False)
# set title and x/y labels
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# Remove last blank column
plt.xlim((0, AUC.shape[1]))
# Turn off all the ticks§
ax = plt.gca()
for tick in ax.xaxis.get_major_ticks():
tick.tick1On = False
tick.tick2On = False
for tick in ax.yaxis.get_major_ticks():
tick.tick1On = False
tick.tick2On = False
# Add color bar
plt.colorbar(c)
# Add text in each cell
show_values(c)
# Proper orientation (origin at the top left instead of bottom left)
if correct_orientation:
ax.invert_yaxis()
ax.xaxis.tick_top()
# resize figure
fig = plt.gcf()
fig.set_size_inches(cm2inch(figure_width, figure_height))
def plot_classification_report(classification_report, title='Classification report ', cmap='RdBu',
outpath='/Users/saminiemi/Projects/ONS/AddressIndex/figs/'):
"""
Visualise a classification report. Assumes that the report is in the scikit-learn format.
from http://stackoverflow.com/questions/28200786/how-to-plot-scikit-learn-classification-report
:param classification_report: a classification report as returned by scikit-learn
:type classification_report: str
:param title: title of the figure
:type title: str
:param cmap: name of the matplotlib colour map to use
:type cmap: str
:param outpath: location to which the output figure is stored
:type outpath: str
:return: None
"""
lines = classification_report.split('\n')
classes = []
plot_matrix = []
support = []
class_names = []
for line in lines[2: (len(lines) - 2)]:
t = line.strip().split()
if len(t) < 2:
continue
v = [float(x) for x in t[1: len(t) - 1]]
classes.append(t[0])
support.append(int(t[-1]))
class_names.append(t[0])
plot_matrix.append(v)
xlabel = 'Metrics'
ylabel = 'Address Tokens'
xticklabels = ['Precision', 'Recall', 'F1-score']
yticklabels = ['{0} ({1})'.format(class_names[idx], sup) for idx, sup in enumerate(support)]
figure_width = 25
figure_height = len(class_names) + 7
correct_orientation = False
heatmap(np.array(plot_matrix), title, xlabel, ylabel, xticklabels, yticklabels,
figure_width, figure_height, correct_orientation, cmap=cmap)
plt.savefig(outpath + 'tokenParsingPerformanceReport.pdf', dpi=200, bbox_inches='tight')
plt.close()
def plot_performance(correct_counts, all_counts, outpath='/Users/saminiemi/Projects/ONS/AddressIndex/figs/'):
"""
Generate a simple bar chart showing the performance of the parser.
:param correct_counts:
:param all_counts:
:param outpath: location of the output data
:return:
"""
# compute the fractions
frac = []
labels = []
for token in correct_counts.keys():
frac.append(float(correct_counts[token]) / all_counts[token] * 100.)
labels.append(token)
# sort frac and then labels
frac = np.asarray(frac)
labels = np.array(labels)
inds = frac.argsort()
frac = frac[inds]
labels = labels[inds]
# make a simple visualisation
location = np.arange(len(labels))
width = 0.5
fig = plt.figure(figsize=(8, 6))
plt.title('Parsing Performance: 100k Holdout Sample')
ax = fig.add_subplot(1, 1, 1)
plt.barh(location, frac, width, color='g', alpha=0.6)
for p in ax.patches:
ax.annotate("%.1f" % p.get_width(), (p.get_x() + p.get_width(), p.get_y()),
xytext=(-40, 4), textcoords='offset points', color='white', fontsize=14)
plt.xlabel('Percent of the Sample Correctly Labelled')
plt.yticks(location + width / 2., labels)
plt.xlim(0, 100.1)
plt.tight_layout()
plt.savefig(outpath + 'tokenParsingPerformance.pdf')
plt.close()
def print_transitions(transition_features):
"""
Outputs the token transitions and the associated weight.
:param transition_features: counter of model instance transition features
:return: None
"""
for (label_from, label_to), weight in transition_features:
print("%-6s -> %-7s %0.6f" % (label_from, label_to, weight))
def print_state_features(state_features):
"""
Outputs the features that help to predict a label.
:param state_features: counter of model instance state features
:return: None
"""
for (attr, label), weight in state_features:
print("%0.6f %-8s %s" % (weight, label, attr))
def check_performance(holdout_file='/Users/saminiemi/Projects/ONS/AddressIndex/data/training/holdout.xml'):
"""
Checks the performance of the trained model using given holdout data.
Computes weighted f1-score, sequence accuracy, and a classification report.
Visualises the classification report.
:param holdout_file: location and name of the holdout XML data file
:type holdout_file: str
:return: None
"""
crf = sklearn_crfsuite.CRF(model_filename=tkns.MODEL_PATH + tkns.MODEL_FILE, verbose=True)
X_test, y_test = tkns.readData(holdout_file)
# store labels
labels = list(crf.classes_)
sorted_labels = sorted(labels, key=lambda name: name)
print('Predicting holdout data...')
y_pred = crf.predict(X_test)
print('\nPerformance:')
# Calculate metrics for each label, and find their average,
# weighted by support (the number of true instances for each label).
total = metrics.flat_f1_score(y_test, y_pred, average='weighted', labels=labels)
# full sequence accuracy
sequence_accuracy = metric.sequence_accuracy_score(y_test, y_pred)
print('F1-score:', total)
print('Sequence accuracy:', sequence_accuracy)
print("")
report = metrics.flat_classification_report(y_test, y_pred, labels=sorted_labels, digits=3)
print(report)
print('\nGenerating a plot...')
plot_classification_report(report)
print("\nLikeliest transitions:")
print_transitions(Counter(crf.transition_features_).most_common(15))
print("\nLeast likely transitions:")
print_transitions(Counter(crf.transition_features_).most_common()[-15:])
print("\nTop 30 positive features:")
print_state_features(Counter(crf.state_features_).most_common(30))
print("\nTop 30 negative features:")
print_state_features(Counter(crf.state_features_).most_common()[-30:])
def _manual(output_file='/Users/saminiemi/Projects/ONS/AddressIndex/data/incorrectlyParsed.csv'):
"""
Predict the tokens for the holdout data and check the performance.
:param output_file: name of the output file to store incorrectly parsed addresses
:type output_file: str
:return: None
"""
correct = 0
correctItems = 0
all = 0
allItems = 0
countsCorrect = dict()
countsAll = dict()
store = []
print('Predicting holdout data...')
for raw_string, components in tkns.readXML('holdout.xml'):
all += 1
# get the true labels
_, true_labels = list(zip(*components))
true_labels = list(true_labels)
# parse the raw string
parsed = predict(raw_string)
predicted = [x[1] for x in parsed]
# test whether the full prediction was correct, if not store for inspection
if true_labels == predicted:
correct += 1
else:
store.append([raw_string, str(true_labels), str(predicted)])
# loop over the tokens to check which are correct
for a, b in zip(predicted, true_labels):
allItems += 1
if a == b:
correctItems += 1
# check for each token separately and store to a dictionary
for token in tkns.LABELS:
if token == b:
countsAll[token] = countsAll.get(token, 0) + 1
if a == b:
countsCorrect[token] = countsCorrect.get(token, 0) + 1
print('Holdout Addresses:', all)
print('All Tokens Correct:', correct)
print('Percent of Correct:', float(correct) / all * 100.)
print('Correct Tokens:', correctItems)
print('Percent of Tokens Correct:', float(correctItems) / allItems * 100.)
for token in countsCorrect.keys():
print(float(countsCorrect[token]) / countsAll[token] * 100., 'percent of', token, 'were correct')
print('Generating plots')
plot_performance(countsCorrect, countsAll)
print('Outputting the incorrect ones to a file...')
fh = open(output_file, mode='w')
fh.write('raw, true, predicted\n')
for line in store:
fh.write('%s,"%s","%s"\n' % (line[0], line[1], line[2]))
fh.close()
if __name__ == "__main__":
check_performance()
|
{"hexsha": "e3b752c4f39faaf623367894b5edb77d5f7570a1", "size": 12336, "ext": "py", "lang": "Python", "max_stars_repo_path": "DataScience/ProbabilisticParser/tests/test_performance.py", "max_stars_repo_name": "Yasir326/address-index-data", "max_stars_repo_head_hexsha": "f95da1f5ecda911d5d5a83ce396b33837b629bdd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2016-11-30T16:52:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-26T23:49:41.000Z", "max_issues_repo_path": "DataScience/ProbabilisticParser/tests/test_performance.py", "max_issues_repo_name": "Yasir326/address-index-data", "max_issues_repo_head_hexsha": "f95da1f5ecda911d5d5a83ce396b33837b629bdd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 84, "max_issues_repo_issues_event_min_datetime": "2016-11-17T10:46:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T09:09:31.000Z", "max_forks_repo_path": "DataScience/ProbabilisticParser/tests/test_performance.py", "max_forks_repo_name": "Yasir326/address-index-data", "max_forks_repo_head_hexsha": "f95da1f5ecda911d5d5a83ce396b33837b629bdd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2019-01-26T10:43:25.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-24T08:53:54.000Z", "avg_line_length": 29.3016627078, "max_line_length": 120, "alphanum_fraction": 0.6554798962, "include": true, "reason": "import numpy", "num_tokens": 3061}
|
"""Experiment Runner. It's great!"""
from functools import partial
import json
import os
import inspect
import random
import time
import traceback
from multiprocessing import Pool
import copy
import signal
import numpy as np
from tqdm import tqdm
try:
import ray
@ray.remote(max_calls=1)
def ray_eval_fit(pre, fit, post, out_path, experiment_id, cfg):
"""wraps eval_fit to run with ray .remote()"""
return eval_fit((pre, fit, post, out_path, experiment_id, cfg))
except ImportError as error:
ray = None
try:
import malocher
except ImportError as error:
malocher = None
def stacktrace(exception):
"""convenience method for java-style stack trace error messages"""
print("\n".join(traceback.format_exception(None, exception, exception.__traceback__)),
#file=sys.stderr,
flush=True)
def replace_objects(d):
"""
convenience method to json-serialize configs that include objects,
partials, numpy-arrays and other non-json-conform things.
"""
d = d.copy()
for k, v in d.items():
if isinstance(v, dict):
d[k] = replace_objects(v)
elif isinstance(v, list):
d[k] = [replace_objects({"key":vv})["key"] for vv in v]
elif isinstance(v, np.generic):
d[k] = v.item()
elif isinstance(v, np.ndarray):
d[k] = k
elif isinstance(v, partial):
d[k] = v.func.__name__ + "_" + "_".join([str(arg) for arg in v.args]) + str(replace_objects(v.keywords))
elif callable(v) or inspect.isclass(v):
try:
d[k] = v.__name__
except:
d[k] = str(v) #.__name__
elif isinstance(v, object) and v.__class__.__module__ != 'builtins':
# print(type(v))
d[k] = str(v)
else:
d[k] = v
return d
def get_ctor_arguments(clazz):
"""
getfullargspec does not handle inheritance correctly.
Taken from https://stackoverflow.com/questions/36994217/retrieving-arguments-from-a-class-with-multiple-inheritance
"""
args = ['self']
for C in clazz.__mro__:
if '__init__' in C.__dict__:
args += inspect.getfullargspec(C).args[1:]
args += inspect.getfullargspec(C).kwonlyargs
return args
class Variation:
"""Handles testing different hyperparameter variantions"""
def __init__(self, list_of_choices):
self.choices = list_of_choices
def get(self):
"""return a random variant"""
return np.random.choice(self.choices)
def generate_configs(cfg, n_configs):
"""Resolve configs which include `Variation` parameters"""
configs = []
def n_variations(d):
n_choices = []
for val in d.values():
if isinstance(val, Variation):
n_choices.append(len(val.choices))
elif isinstance(val, dict):
n_choices.append(n_variations(val))
return np.prod(n_choices)
def vary_dict(d):
new_dict = {}
for key, val in d.items():
if isinstance(val, Variation):
new_dict[key] = val.get()
elif isinstance(val, dict):
new_dict[key] = vary_dict(val)
else:
new_dict[key] = val
return new_dict
possible_variations = n_variations(cfg)
if possible_variations < n_configs:
n_configs = possible_variations
while len(configs) < n_configs:
new_config = vary_dict(cfg)
if new_config not in configs:
configs.append(new_config)
return configs
def raise_timeout(signum, frame):
"""because lambdas cannot raise exceptions?"""
raise TimeoutError()
def eval_fit(config):
"""
Central internal method of the that calls pre, fit and post,
handles repeated execution of experiments, measures fit time and
stores results.
"""
pre, fit, post, timeout, out_path, experiment_id, cfg = config
if timeout > 0:
signal.signal(signal.SIGALRM, raise_timeout)
signal.alarm(timeout)
try:
# Make a copy of the model config for all output-related stuff
# This does not include any fields which hurt the output (e.g. x_test,y_test)
# but are usually part of the original modelcfg
# if not verbose:
# import warnings
# warnings.filterwarnings('ignore')
readable_cfg = copy.deepcopy(cfg)
readable_cfg["experiment_id"] = experiment_id
readable_cfg["out_path"] = out_path
if not os.path.exists(out_path):
os.makedirs(out_path)
with open(out_path + "/config.json", 'w') as out:
out.write(json.dumps(replace_objects(readable_cfg), indent=4))
scores = {}
repetitions = cfg.get("repetitions", 1)
for i in range(repetitions):
if repetitions > 1:
rep_out_path = os.path.join(out_path, str(i))
if not os.path.exists(rep_out_path):
os.makedirs(rep_out_path)
else:
rep_out_path = out_path
experiment_cfg = {
**cfg,
'experiment_id':experiment_id,
'out_path':rep_out_path,
'run_id':i
}
if pre is not None:
pre_stuff = pre(experiment_cfg)
start_time = time.time()
fit_stuff = fit(experiment_cfg, pre_stuff)
fit_time = time.time() - start_time
else:
start_time = time.time()
fit_stuff = fit(experiment_cfg)
fit_time = time.time() - start_time
if post is not None:
cur_scores = post(experiment_cfg, fit_stuff)
cur_scores["fit_time"] = fit_time
if i == 0:
for k in list(cur_scores.keys()):
scores[k] = [cur_scores[k]]
else:
for k in list(scores.keys()):
scores[k].append(cur_scores[k])
for k in list(scores.keys()):
scores["mean_" + k] = np.mean(scores[k])
scores["std_" + k] = np.std(scores[k])
readable_cfg["scores"] = scores
out_file_content = json.dumps(replace_objects(readable_cfg), sort_keys=True) + "\n"
signal.alarm(0)
return experiment_id, scores, out_file_content
except Exception as identifier:
stacktrace(identifier)
# Ray is somtimes a little bit to quick in killing our processes if something bad happens
# In this case we do not see the stack trace which is super annyoing. Therefore, we sleep a
# second to wait until the print has been processed / flushed
signal.alarm(0)
time.sleep(1.0)
return None
def run_experiments(basecfg, cfgs, **kwargs):
"""
The main API call of the experiment_runner.
Pass a base_cfg to configure the execution of the experiments.
Pass a list of `cfgs` to specify each experiment.
See readme for available basecfg settings and reserved cfg keys.
"""
try:
return_str = ""
# results = []
if "out_path" in basecfg:
basecfg["out_path"] = os.path.abspath(basecfg["out_path"])
if not os.path.exists(basecfg["out_path"]):
os.makedirs(basecfg["out_path"])
else:
if os.path.isfile(basecfg["out_path"] + "/results.jsonl"):
os.unlink(basecfg["out_path"] + "/results.jsonl")
# pool = NonDaemonPool(n_cores, initializer=init, initargs=(l,shared_list))
# Lets use imap and not starmap to keep track of the progress
# ray.init(address="ls8ws013:6379")
backend = basecfg.get("backend", "single")
verbose = basecfg.get("verbose", True)
print("Starting {} experiments via {} backend".format(len(cfgs), backend))
if backend == "ray":
ray.init(
address=basecfg.get("address", "auto"),
_redis_password=basecfg.get("redis_password", None)
)
if backend == "ray":
configurations = [ray_eval_fit.options(
num_cpus=basecfg.get("num_cpus", 1),
num_gpus=basecfg.get("num_gpus", 0),
memory=basecfg.get("max_memory", 1000 * 1024 * 1024) # 1 GB
).remote(
basecfg.get("pre", None),
basecfg.get("fit", None),
basecfg.get("post", None),
basecfg.get("timeout", 0),
os.path.join(basecfg["out_path"], str(experiment_id)),
experiment_id,
cfg
) for experiment_id, cfg in enumerate(cfgs)
]
print("SUBMITTED JOBS, NOW WAITING")
else:
configurations = [
(
basecfg.get("pre", None),
basecfg.get("fit", None),
basecfg.get("post", None),
basecfg.get("timeout", 0),
os.path.join(basecfg["out_path"], str(experiment_id)),
experiment_id,
cfg
) for experiment_id, cfg in enumerate(cfgs)
]
if backend == "ray":
# https://github.com/ray-project/ray/issues/8164
def to_iterator(configs):
while configs:
result, configs = ray.wait(configs)
yield ray.get(result[0])
random.shuffle(configurations)
for result in tqdm(to_iterator(configurations), total=len(configurations)):
if result is not None:
experiment_id, results, out_file_content = result
with open(basecfg["out_path"] + "/results.jsonl", "a", 1) as out_file:
out_file.write(out_file_content)
elif backend == "malocher":
malocher_dir = basecfg.get("malocher_dir", ".malocher_dir")
malocher_machines = basecfg["malocher_machines"]
malocher_user = basecfg["malocher_user"]
malocher_port = basecfg.get("malocher_port", 22)
malocher_key = basecfg.get("malocher_key", "~/.ssh/id_rsa")
for cfg in configurations:
malocher.submit(eval_fit, cfg, malocher_dir=malocher_dir)
results = malocher.process_all(
malocher_dir=malocher_dir,
ssh_machines=malocher_machines,
ssh_username=malocher_user,
ssh_port=malocher_port,
ssh_private_key=malocher_key,
)
for job_id, eval_return in tqdm(results, total=len(configurations), disable=not verbose):
if eval_return is not None:
experiment_id, results, out_file_content = eval_return
with open(basecfg["out_path"] + "/results.jsonl", "a", 1) as out_file:
out_file.write(out_file_content)
elif backend == "multiprocessing":
pool = Pool(basecfg.get("num_cpus", 1))
for eval_return in tqdm(pool.imap_unordered(eval_fit, configurations), total=len(configurations), disable=not verbose):
if eval_return is not None:
experiment_id, results, out_file_content = eval_return
with open(basecfg["out_path"] + "/results.jsonl", "a", 1) as out_file:
out_file.write(out_file_content)
else:
for f in tqdm(configurations, disable=not verbose):
eval_return = eval_fit(f)
if eval_return is not None:
experiment_id, results, out_file_content = eval_return
with open(basecfg["out_path"] + "/results.jsonl", "a", 1) as out_file:
out_file.write(out_file_content)
except Exception as e:
return_str = str(e) + "\n"
return_str += traceback.format_exc() + "\n"
finally:
print(return_str)
if backend == "ray":
ray.shutdown()
|
{"hexsha": "5b9345594662b4c873c43351ab4076909dfadb62", "size": 12254, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiment_runner/experiment_runner.py", "max_stars_repo_name": "philippjh/experiment_runner", "max_stars_repo_head_hexsha": "e2b1424dfeb9612f92a96ccd96693a6b5556ade5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "experiment_runner/experiment_runner.py", "max_issues_repo_name": "philippjh/experiment_runner", "max_issues_repo_head_hexsha": "e2b1424dfeb9612f92a96ccd96693a6b5556ade5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiment_runner/experiment_runner.py", "max_forks_repo_name": "philippjh/experiment_runner", "max_forks_repo_head_hexsha": "e2b1424dfeb9612f92a96ccd96693a6b5556ade5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3597560976, "max_line_length": 131, "alphanum_fraction": 0.5679778032, "include": true, "reason": "import numpy", "num_tokens": 2666}
|
------------------------------------------------------------------------
-- Pointwise equalities can be lifted
------------------------------------------------------------------------
module Stream.Pointwise where
open import Codata.Musical.Notation hiding (∞)
open import Stream
open import Stream.Equality
import Stream.Programs as Prog
open Prog hiding (lift; ⟦_⟧)
open import Data.Nat
open import Data.Fin using (Fin; zero; suc)
open import Data.Vec as Vec using (Vec; _∷_)
open import Relation.Binary
open import Relation.Binary.PropositionalEquality
private
module IsEq {A : Set} =
IsEquivalence (Setoid.isEquivalence (Stream.setoid A))
------------------------------------------------------------------------
-- Definitions
infix 8 _∞
infixr 7 _·_
infix 6 _⟨_⟩_
-- Expressions corresponding to pointwise definitions of streams.
-- Indexed on the number of variables.
-- It is possible to generalise this type, allowing variables to
-- correspond to streams containing elements of arbitrary type, and
-- letting the function arguments of _·_ and _⟨_⟩_ be more general.
-- However, this would complicate the development, so I hesitate to do
-- so without evidence that it would be genuinely useful.
data Pointwise A n : Set where
var : (x : Fin n) → Pointwise A n
_∞ : (x : A) → Pointwise A n
_·_ : (f : A → A) (xs : Pointwise A n) → Pointwise A n
_⟨_⟩_ : (xs : Pointwise A n)
(_∙_ : A → A → A)
(ys : Pointwise A n) →
Pointwise A n
-- Stream semantics.
⟦_⟧ : ∀ {A n} → Pointwise A n → (Vec (Prog A) n → Prog A)
⟦ var x ⟧ ρ = Vec.lookup ρ x
⟦ x ∞ ⟧ ρ = x ∞
⟦ f · xs ⟧ ρ = f · ⟦ xs ⟧ ρ
⟦ xs ⟨ _∙_ ⟩ ys ⟧ ρ = ⟦ xs ⟧ ρ ⟨ _∙_ ⟩ ⟦ ys ⟧ ρ
-- Pointwise semantics.
⟪_⟫ : ∀ {A n} → Pointwise A n → (Vec A n → A)
⟪ var x ⟫ ρ = Vec.lookup ρ x
⟪ x ∞ ⟫ ρ = x
⟪ f · xs ⟫ ρ = f (⟪ xs ⟫ ρ)
⟪ xs ⟨ _∙_ ⟩ ys ⟫ ρ = ⟪ xs ⟫ ρ ∙ ⟪ ys ⟫ ρ
------------------------------------------------------------------------
-- Some lemmas used below
private
-- lookup is natural.
lookup-nat : ∀ {a b n} {A : Set a} {B : Set b}
(f : A → B) (x : Fin n) ρ →
f (Vec.lookup ρ x) ≡ Vec.lookup (Vec.map f ρ) x
lookup-nat f zero (x ∷ ρ) = refl
lookup-nat f (suc i) (x ∷ ρ) = lookup-nat f i ρ
------------------------------------------------------------------------
-- The two semantics above are related via the function lift
private
-- Lifts a pointwise function to a function on stream programs.
lift : ∀ {A B n} →
(Vec A n → B) → Vec (Prog A) n → Prog B
lift f xs = f (Vec.map headP xs) ≺ ♯ lift f (Vec.map tailP xs)
-- lift is a congruence in its first argument.
lift-cong : ∀ {A B n} {f g : Vec A n → B} →
(∀ ρ → f ρ ≡ g ρ) →
∀ ρ → lift f ρ ≊ lift g ρ
lift-cong hyp ρ = hyp (Vec.map headP ρ) ≺
♯ lift-cong hyp (Vec.map tailP ρ)
-- unfold xs ρ is the one-step unfolding of ⟦ xs ⟧ ρ. Note the
-- similarity to lift.
unfold : ∀ {A n} (xs : Pointwise A n) ρ → Prog A
unfold xs ρ = ⟪ xs ⟫ (Vec.map headP ρ) ≺♯
⟦ xs ⟧ (Vec.map tailP ρ)
unfold-lemma : ∀ {A n} (xs : Pointwise A n) ρ →
⟦ xs ⟧ ρ ≊ unfold xs ρ
unfold-lemma (var x) ρ =
Vec.lookup ρ x
≊⟨ ≊-η (Vec.lookup ρ x) ⟩
headP (Vec.lookup ρ x) ≺♯ tailP (Vec.lookup ρ x)
≊⟨ lookup-nat headP x ρ ≺
♯ ≈⇒≅ (IsEq.reflexive
(cong Prog.⟦_⟧ (lookup-nat tailP x ρ))) ⟩
Vec.lookup (Vec.map headP ρ) x ≺♯
Vec.lookup (Vec.map tailP ρ) x
≡⟨ refl ⟩
unfold (var x) ρ
∎
unfold-lemma (x ∞) ρ = x ∞ ∎
unfold-lemma (f · xs) ρ =
f · ⟦ xs ⟧ ρ
≊⟨ ·-cong f (⟦ xs ⟧ ρ) (unfold xs ρ) (unfold-lemma xs ρ) ⟩
f · unfold xs ρ
∎
unfold-lemma (xs ⟨ ∙ ⟩ ys) ρ =
⟦ xs ⟧ ρ ⟨ ∙ ⟩ ⟦ ys ⟧ ρ
≊⟨ ⟨ ∙ ⟩-cong (⟦ xs ⟧ ρ) (unfold xs ρ) (unfold-lemma xs ρ)
(⟦ ys ⟧ ρ) (unfold ys ρ) (unfold-lemma ys ρ) ⟩
unfold xs ρ ⟨ ∙ ⟩ unfold ys ρ
∎
-- The two semantics are related.
main-lemma : ∀ {A n} (xs : Pointwise A n) →
∀ ρ → ⟦ xs ⟧ ρ ≊ lift ⟪ xs ⟫ ρ
main-lemma xs ρ =
⟦ xs ⟧ ρ
≊⟨ unfold-lemma xs ρ ⟩
unfold xs ρ
≡⟨ refl ⟩
⟪ xs ⟫ (Vec.map headP ρ) ≺♯ ⟦ xs ⟧ (Vec.map tailP ρ)
≊⟨ refl ≺ ♯ main-lemma xs (Vec.map tailP ρ) ⟩
lift ⟪ xs ⟫ ρ
∎
------------------------------------------------------------------------
-- To prove that two streams which are defined pointwise are equal, it
-- is enough to reason about a single (arbitrary) point
-- This function is a bit awkward to use, since the user has to come
-- up with a suitable environment manually. The alternative function
-- pointwise below may be slightly easier to use.
pointwise' : ∀ {A n} (xs ys : Pointwise A n) →
(∀ ρ → ⟪ xs ⟫ ρ ≡ ⟪ ys ⟫ ρ) →
(∀ ρ → ⟦ xs ⟧ ρ ≊ ⟦ ys ⟧ ρ)
pointwise' xs ys hyp ρ =
⟦ xs ⟧ ρ
≊⟨ main-lemma xs ρ ⟩
lift ⟪ xs ⟫ ρ
≊⟨ lift-cong hyp ρ ⟩
lift ⟪ ys ⟫ ρ
≊⟨ ≅-sym (main-lemma ys ρ) ⟩
⟦ ys ⟧ ρ
∎
open import Data.Vec.N-ary
-- Applies the function to all possible variables.
app : ∀ {A} n →
N-ary n (Pointwise A n) (Pointwise A n) → Pointwise A n
app n f = f $ⁿ Vec.map var (Vec.allFin n)
-- The type signature of this function may be a bit daunting, but once
-- n, f and g are instantiated with well-behaved concrete values the
-- remaining type evaluates nicely.
pointwise
: ∀ {A} n (f g : N-ary n (Pointwise A n) (Pointwise A n)) →
Eq n _≡_ (curryⁿ ⟪ app n f ⟫) (curryⁿ ⟪ app n g ⟫) →
Eq n _≊_ (curryⁿ ⟦ app n f ⟧) (curryⁿ ⟦ app n g ⟧)
pointwise n f g hyp =
curryⁿ-cong _≊_ ⟦ app n f ⟧ ⟦ app n g ⟧
(pointwise' (app n f) (app n g)
(curryⁿ-cong⁻¹ _≡_ ⟪ app n f ⟫ ⟪ app n g ⟫ hyp))
------------------------------------------------------------------------
-- Some examples
private
example₁ : suc · 0 ∞ ≊ 1 ∞
example₁ = pointwise 0 (suc · 0 ∞) (1 ∞) refl
example₂ : ∀ s → suc · s ≊ 1 ∞ ⟨ _+_ ⟩ s
example₂ = pointwise 1 (λ s → suc · s)
(λ s → 1 ∞ ⟨ _+_ ⟩ s)
(λ _ → refl)
example₃ : ∀ s t u →
(s ⟨ _+_ ⟩ t) ⟨ _+_ ⟩ u ≊ s ⟨ _+_ ⟩ (t ⟨ _+_ ⟩ u)
example₃ = pointwise 3 (λ s t u → (s ⟨ _+_ ⟩ t) ⟨ _+_ ⟩ u)
(λ s t u → s ⟨ _+_ ⟩ (t ⟨ _+_ ⟩ u))
+-assoc
where
open import Data.Nat.Properties
|
{"hexsha": "a967b51db6b7f8f8af079c1b2de4aa3612734dcf", "size": 6430, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "Stream/Pointwise.agda", "max_stars_repo_name": "nad/codata", "max_stars_repo_head_hexsha": "1b90445566df0d3b4ba6e31bd0bac417b4c0eb0e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-13T14:48:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-13T14:48:45.000Z", "max_issues_repo_path": "Stream/Pointwise.agda", "max_issues_repo_name": "nad/codata", "max_issues_repo_head_hexsha": "1b90445566df0d3b4ba6e31bd0bac417b4c0eb0e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Stream/Pointwise.agda", "max_forks_repo_name": "nad/codata", "max_forks_repo_head_hexsha": "1b90445566df0d3b4ba6e31bd0bac417b4c0eb0e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5196078431, "max_line_length": 72, "alphanum_fraction": 0.4973561431, "num_tokens": 2348}
|
function [ A, T, error, alignedShape ] = AlignShapesWithScale( alignFrom, alignTo )
%ALIGNSHAPESWITHSCALE Summary of this function goes here
% Detailed explanation goes here
numPoints = size(alignFrom,1);
meanFrom = mean(alignFrom);
meanTo = mean(alignTo);
alignFromMeanNormed = bsxfun(@minus, alignFrom, meanFrom);
alignToMeanNormed = bsxfun(@minus, alignTo, meanTo);
% scale now
sFrom = sqrt(sum(alignFromMeanNormed(:).^2)/numPoints);
sTo = sqrt(sum(alignToMeanNormed(:).^2)/numPoints);
s = sTo / sFrom;
alignFromMeanNormed = alignFromMeanNormed/sFrom;
alignToMeanNormed = alignToMeanNormed/sTo;
[R, t] = AlignShapesKabsch(alignFromMeanNormed, alignToMeanNormed);
A = s * R;
aligned = (A * alignFrom')';
T = mean(alignTo - aligned);
alignedShape = bsxfun(@plus, aligned, T);
error = sqrt(mean(sum((alignedShape - alignTo).^2,2)));
end
|
{"author": "TadasBaltrusaitis", "repo": "OpenFace", "sha": "3d4b5cf8d96138be42bed229447f36cbb09a5a29", "save_path": "github-repos/MATLAB/TadasBaltrusaitis-OpenFace", "path": "github-repos/MATLAB/TadasBaltrusaitis-OpenFace/OpenFace-3d4b5cf8d96138be42bed229447f36cbb09a5a29/matlab_version/PDM_helpers/AlignShapesWithScale.m"}
|
[STATEMENT]
lemma finite_fold_rbt_fold_eq:
assumes "comp_fun_commute f"
shows "Finite_Set.fold f A (set (RBT.entries t)) = RBT.fold (curry f) t A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Finite_Set.fold f A (set (RBT.entries t)) = RBT.fold (curry f) t A
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Finite_Set.fold f A (set (RBT.entries t)) = RBT.fold (curry f) t A
[PROOF STEP]
interpret comp_fun_commute: comp_fun_commute f
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. comp_fun_commute f
[PROOF STEP]
by (fact assms)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Finite_Set.fold f A (set (RBT.entries t)) = RBT.fold (curry f) t A
[PROOF STEP]
have *: "remdups (RBT.entries t) = RBT.entries t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. remdups (RBT.entries t) = RBT.entries t
[PROOF STEP]
using distinct_entries distinct_map
[PROOF STATE]
proof (prove)
using this:
distinct (map fst (RBT.entries ?t))
distinct (map ?f ?xs) = (distinct ?xs \<and> inj_on ?f (set ?xs))
goal (1 subgoal):
1. remdups (RBT.entries t) = RBT.entries t
[PROOF STEP]
by (auto intro: distinct_remdups_id)
[PROOF STATE]
proof (state)
this:
remdups (RBT.entries t) = RBT.entries t
goal (1 subgoal):
1. Finite_Set.fold f A (set (RBT.entries t)) = RBT.fold (curry f) t A
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Finite_Set.fold f A (set (RBT.entries t)) = RBT.fold (curry f) t A
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
comp_fun_commute f
goal (1 subgoal):
1. Finite_Set.fold f A (set (RBT.entries t)) = RBT.fold (curry f) t A
[PROOF STEP]
by (auto simp: fold_def_alt comp_fun_commute.fold_set_fold_remdups *)
[PROOF STATE]
proof (state)
this:
Finite_Set.fold f A (set (RBT.entries t)) = RBT.fold (curry f) t A
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 844, "file": null, "length": 10}
|
module par-swap.confluent where
open import par-swap
open import par-swap.properties
open import Data.Nat using (_+_ ; _≤′_ ; _<′_ ; suc ; zero ; ≤′-refl)
open import Esterel.Lang.CanFunction
open import utility
open import Esterel.Lang
open import Esterel.Context
open import Data.Product
open import Data.Sum
open import Data.List using ([] ; [_] ; _∷_ ; List ; _++_)
open import Relation.Binary.PropositionalEquality
using (_≡_ ; refl ; sym ; subst ; cong ; trans ;
module ≡-Reasoning ; cong₂ ; subst₂ ; inspect)
open import sn-calculus
open import context-properties -- get view, E-views
open import Esterel.Lang.Binding
open import binding-preserve using (sn⟶-maintains-binding ; sn⟶*-maintains-binding)
open import sn-calculus-props
∥R-confluent : CB-CONFLUENT _∥R_
∥R-confluent CBp
(∥Rstep dchole)
(∥Rstep dchole) =
_ , ∥Rstep dchole , ∥Rstep dchole
∥R-confluent CBp
(∥Rstep dchole)
(∥Rstep (dcpar₁ d≐C⟦p∥q⟧c₂)) =
_ , ∥Rstep (dcpar₂ d≐C⟦p∥q⟧c₂) , ∥Rstep dchole
∥R-confluent CBp
(∥Rstep dchole)
(∥Rstep (dcpar₂ d≐C⟦p∥q⟧c₂)) =
_ , ∥Rstep (dcpar₁ d≐C⟦p∥q⟧c₂) , ∥Rstep dchole
∥R-confluent CBp
(∥Rstep (dcpar₁ d≐C⟦p∥q⟧c₁))
(∥Rstep dchole) =
_ , ∥Rstep dchole , ∥Rstep (dcpar₂ d≐C⟦p∥q⟧c₁)
∥R-confluent (CBpar CBs _ _ _ _ _)
(∥Rstep {c₁ ∷ C₁} (dcpar₁ d≐C⟦p∥q⟧c₁))
(∥Rstep {c₂ ∷ C₂} (dcpar₁ d≐C⟦p∥q⟧c₂))
with ∥R-confluent CBs (∥Rstep d≐C⟦p∥q⟧c₁) (∥Rstep d≐C⟦p∥q⟧c₂)
... | z , C₁⟦q₁∥p₁⟧c∥Rz , C₂⟦q₂∥p₂⟧c∥Rz =
_ , Context1-∥R c₁ C₁⟦q₁∥p₁⟧c∥Rz , Context1-∥R c₂ C₂⟦q₂∥p₂⟧c∥Rz
∥R-confluent CBp
(∥Rstep (dcpar₁ d≐C⟦p∥q⟧c₁))
(∥Rstep (dcpar₂ d≐C⟦p∥q⟧c₂)) =
_ , ∥Rstep (dcpar₂ d≐C⟦p∥q⟧c₂) , ∥Rstep (dcpar₁ d≐C⟦p∥q⟧c₁)
∥R-confluent CBp
(∥Rstep (dcpar₂ d≐C⟦p∥q⟧c₁))
(∥Rstep dchole) =
_ , ∥Rstep dchole , ∥Rstep (dcpar₁ d≐C⟦p∥q⟧c₁)
∥R-confluent CBp
(∥Rstep (dcpar₂ d≐C⟦p∥q⟧c₁))
(∥Rstep (dcpar₁ d≐C⟦p∥q⟧c₂)) =
_ , ∥Rstep (dcpar₁ d≐C⟦p∥q⟧c₂) , ∥Rstep (dcpar₂ d≐C⟦p∥q⟧c₁)
∥R-confluent (CBpar _ CBs _ _ _ _)
(∥Rstep {c₁ ∷ C₁} (dcpar₂ d≐C⟦p∥q⟧c₁))
(∥Rstep {c₂ ∷ C₂} (dcpar₂ d≐C⟦p∥q⟧c₂))
with ∥R-confluent CBs (∥Rstep d≐C⟦p∥q⟧c₁) (∥Rstep d≐C⟦p∥q⟧c₂)
... | z , C₁⟦q₁∥p₁⟧c∥Rz , C₂⟦q₂∥p₂⟧c∥Rz =
_ , Context1-∥R c₁ C₁⟦q₁∥p₁⟧c∥Rz , Context1-∥R c₂ C₂⟦q₂∥p₂⟧c∥Rz
∥R-confluent (CBseq CBs _ _)
(∥Rstep {c₁ ∷ C₁} (dcseq₁ d≐C⟦p∥q⟧c₁))
(∥Rstep {c₂ ∷ C₂} (dcseq₁ d≐C⟦p∥q⟧c₂))
with ∥R-confluent CBs (∥Rstep d≐C⟦p∥q⟧c₁) (∥Rstep d≐C⟦p∥q⟧c₂)
... | z , C₁⟦q₁∥p₁⟧c∥Rz , C₂⟦q₂∥p₂⟧c∥Rz =
_ , Context1-∥R c₁ C₁⟦q₁∥p₁⟧c∥Rz , Context1-∥R c₂ C₂⟦q₂∥p₂⟧c∥Rz
∥R-confluent CBp
(∥Rstep (dcseq₁ d≐C⟦p∥q⟧c₁))
(∥Rstep (dcseq₂ d≐C⟦p∥q⟧c₂)) =
_ , ∥Rstep (dcseq₂ d≐C⟦p∥q⟧c₂) , ∥Rstep (dcseq₁ d≐C⟦p∥q⟧c₁)
∥R-confluent CBp
(∥Rstep (dcseq₂ d≐C⟦p∥q⟧c₁))
(∥Rstep (dcseq₁ d≐C⟦p∥q⟧c₂)) =
_ , ∥Rstep (dcseq₁ d≐C⟦p∥q⟧c₂) , ∥Rstep (dcseq₂ d≐C⟦p∥q⟧c₁)
∥R-confluent (CBseq _ CBs _)
(∥Rstep {c₁ ∷ C₁} (dcseq₂ d≐C⟦p∥q⟧c₁))
(∥Rstep {c₂ ∷ C₂} (dcseq₂ d≐C⟦p∥q⟧c₂))
with ∥R-confluent CBs (∥Rstep d≐C⟦p∥q⟧c₁) (∥Rstep d≐C⟦p∥q⟧c₂)
... | z , C₁⟦q₁∥p₁⟧c∥Rz , C₂⟦q₂∥p₂⟧c∥Rz =
_ , Context1-∥R c₁ C₁⟦q₁∥p₁⟧c∥Rz , Context1-∥R c₂ C₂⟦q₂∥p₂⟧c∥Rz
∥R-confluent (CBsusp CBs _)
(∥Rstep {c₁ ∷ C₁} (dcsuspend d≐C⟦p∥q⟧c₁))
(∥Rstep {c₂ ∷ C₂} (dcsuspend d≐C⟦p∥q⟧c₂))
with ∥R-confluent CBs (∥Rstep d≐C⟦p∥q⟧c₁) (∥Rstep d≐C⟦p∥q⟧c₂)
... | z , C₁⟦q₁∥p₁⟧c∥Rz , C₂⟦q₂∥p₂⟧c∥Rz =
_ , Context1-∥R c₁ C₁⟦q₁∥p₁⟧c∥Rz , Context1-∥R c₂ C₂⟦q₂∥p₂⟧c∥Rz
∥R-confluent (CBtrap CBs)
(∥Rstep {c₁ ∷ C₁} (dctrap d≐C⟦p∥q⟧c₁))
(∥Rstep {c₂ ∷ C₂} (dctrap d≐C⟦p∥q⟧c₂))
with ∥R-confluent CBs (∥Rstep d≐C⟦p∥q⟧c₁) (∥Rstep d≐C⟦p∥q⟧c₂)
... | z , C₁⟦q₁∥p₁⟧c∥Rz , C₂⟦q₂∥p₂⟧c∥Rz =
_ , Context1-∥R c₁ C₁⟦q₁∥p₁⟧c∥Rz , Context1-∥R c₂ C₂⟦q₂∥p₂⟧c∥Rz
∥R-confluent (CBsig CBs)
(∥Rstep {c₁ ∷ C₁} (dcsignl d≐C⟦p∥q⟧c₁))
(∥Rstep {c₂ ∷ C₂} (dcsignl d≐C⟦p∥q⟧c₂))
with ∥R-confluent CBs (∥Rstep d≐C⟦p∥q⟧c₁) (∥Rstep d≐C⟦p∥q⟧c₂)
... | z , C₁⟦q₁∥p₁⟧c∥Rz , C₂⟦q₂∥p₂⟧c∥Rz =
_ , Context1-∥R c₁ C₁⟦q₁∥p₁⟧c∥Rz , Context1-∥R c₂ C₂⟦q₂∥p₂⟧c∥Rz
∥R-confluent (CBpresent CBs _)
(∥Rstep {c₁ ∷ C₁} (dcpresent₁ d≐C⟦p∥q⟧c₁))
(∥Rstep {c₂ ∷ C₂} (dcpresent₁ d≐C⟦p∥q⟧c₂))
with ∥R-confluent CBs (∥Rstep d≐C⟦p∥q⟧c₁) (∥Rstep d≐C⟦p∥q⟧c₂)
... | z , C₁⟦q₁∥p₁⟧c∥Rz , C₂⟦q₂∥p₂⟧c∥Rz =
_ , Context1-∥R c₁ C₁⟦q₁∥p₁⟧c∥Rz , Context1-∥R c₂ C₂⟦q₂∥p₂⟧c∥Rz
∥R-confluent CBp
(∥Rstep (dcpresent₁ d≐C⟦p∥q⟧c₁))
(∥Rstep (dcpresent₂ d≐C⟦p∥q⟧c₂)) =
_ , ∥Rstep (dcpresent₂ d≐C⟦p∥q⟧c₂) , ∥Rstep (dcpresent₁ d≐C⟦p∥q⟧c₁)
∥R-confluent CBp
(∥Rstep (dcpresent₂ d≐C⟦p∥q⟧c₁))
(∥Rstep (dcpresent₁ d≐C⟦p∥q⟧c₂)) =
_ , ∥Rstep (dcpresent₁ d≐C⟦p∥q⟧c₂) , ∥Rstep (dcpresent₂ d≐C⟦p∥q⟧c₁)
∥R-confluent (CBpresent _ CBs)
(∥Rstep {c₁ ∷ C₁} (dcpresent₂ d≐C⟦p∥q⟧c₁))
(∥Rstep {c₂ ∷ C₂} (dcpresent₂ d≐C⟦p∥q⟧c₂))
with ∥R-confluent CBs (∥Rstep d≐C⟦p∥q⟧c₁) (∥Rstep d≐C⟦p∥q⟧c₂)
... | z , C₁⟦q₁∥p₁⟧c∥Rz , C₂⟦q₂∥p₂⟧c∥Rz =
_ , Context1-∥R c₁ C₁⟦q₁∥p₁⟧c∥Rz , Context1-∥R c₂ C₂⟦q₂∥p₂⟧c∥Rz
∥R-confluent (CBloop CBs _)
(∥Rstep {c₁ ∷ C₁} (dcloop d≐C⟦p∥q⟧c₁))
(∥Rstep {c₂ ∷ C₂} (dcloop d≐C⟦p∥q⟧c₂))
with ∥R-confluent CBs (∥Rstep d≐C⟦p∥q⟧c₁) (∥Rstep d≐C⟦p∥q⟧c₂)
... | z , C₁⟦q₁∥p₁⟧c∥Rz , C₂⟦q₂∥p₂⟧c∥Rz =
_ , Context1-∥R c₁ C₁⟦q₁∥p₁⟧c∥Rz , Context1-∥R c₂ C₂⟦q₂∥p₂⟧c∥Rz
∥R-confluent (CBloopˢ CBs _ _ _)
(∥Rstep {c₁ ∷ C₁} (dcloopˢ₁ d≐C⟦p∥q⟧c₁))
(∥Rstep {c₂ ∷ C₂} (dcloopˢ₁ d≐C⟦p∥q⟧c₂))
with ∥R-confluent CBs (∥Rstep d≐C⟦p∥q⟧c₁) (∥Rstep d≐C⟦p∥q⟧c₂)
... | z , C₁⟦q₁∥p₁⟧c∥Rz , C₂⟦q₂∥p₂⟧c∥Rz =
_ , Context1-∥R c₁ C₁⟦q₁∥p₁⟧c∥Rz , Context1-∥R c₂ C₂⟦q₂∥p₂⟧c∥Rz
∥R-confluent CBp
(∥Rstep (dcloopˢ₁ d≐C⟦p∥q⟧c₁))
(∥Rstep (dcloopˢ₂ d≐C⟦p∥q⟧c₂)) =
_ , ∥Rstep (dcloopˢ₂ d≐C⟦p∥q⟧c₂) , ∥Rstep (dcloopˢ₁ d≐C⟦p∥q⟧c₁)
∥R-confluent CBp
(∥Rstep (dcloopˢ₂ d≐C⟦p∥q⟧c₁))
(∥Rstep (dcloopˢ₁ d≐C⟦p∥q⟧c₂)) =
_ , ∥Rstep (dcloopˢ₁ d≐C⟦p∥q⟧c₂) , ∥Rstep (dcloopˢ₂ d≐C⟦p∥q⟧c₁)
∥R-confluent (CBloopˢ _ CBs _ _)
(∥Rstep {c₁ ∷ C₁} (dcloopˢ₂ d≐C⟦p∥q⟧c₁))
(∥Rstep {c₂ ∷ C₂} (dcloopˢ₂ d≐C⟦p∥q⟧c₂))
with ∥R-confluent CBs (∥Rstep d≐C⟦p∥q⟧c₁) (∥Rstep d≐C⟦p∥q⟧c₂)
... | z , C₁⟦q₁∥p₁⟧c∥Rz , C₂⟦q₂∥p₂⟧c∥Rz =
_ , Context1-∥R c₁ C₁⟦q₁∥p₁⟧c∥Rz , Context1-∥R c₂ C₂⟦q₂∥p₂⟧c∥Rz
∥R-confluent (CBshared CBs)
(∥Rstep {c₁ ∷ C₁} (dcshared d≐C⟦p∥q⟧c₁))
(∥Rstep {c₂ ∷ C₂} (dcshared d≐C⟦p∥q⟧c₂))
with ∥R-confluent CBs (∥Rstep d≐C⟦p∥q⟧c₁) (∥Rstep d≐C⟦p∥q⟧c₂)
... | z , C₁⟦q₁∥p₁⟧c∥Rz , C₂⟦q₂∥p₂⟧c∥Rz =
_ , Context1-∥R c₁ C₁⟦q₁∥p₁⟧c∥Rz , Context1-∥R c₂ C₂⟦q₂∥p₂⟧c∥Rz
∥R-confluent (CBvar CBs)
(∥Rstep {c₁ ∷ C₁} (dcvar d≐C⟦p∥q⟧c₁))
(∥Rstep {c₂ ∷ C₂} (dcvar d≐C⟦p∥q⟧c₂))
with ∥R-confluent CBs (∥Rstep d≐C⟦p∥q⟧c₁) (∥Rstep d≐C⟦p∥q⟧c₂)
... | z , C₁⟦q₁∥p₁⟧c∥Rz , C₂⟦q₂∥p₂⟧c∥Rz =
_ , Context1-∥R c₁ C₁⟦q₁∥p₁⟧c∥Rz , Context1-∥R c₂ C₂⟦q₂∥p₂⟧c∥Rz
∥R-confluent (CBif CBs _)
(∥Rstep {c₁ ∷ C₁} (dcif₁ d≐C⟦p∥q⟧c₁))
(∥Rstep {c₂ ∷ C₂} (dcif₁ d≐C⟦p∥q⟧c₂))
with ∥R-confluent CBs (∥Rstep d≐C⟦p∥q⟧c₁) (∥Rstep d≐C⟦p∥q⟧c₂)
... | z , C₁⟦q₁∥p₁⟧c∥Rz , C₂⟦q₂∥p₂⟧c∥Rz =
_ , Context1-∥R c₁ C₁⟦q₁∥p₁⟧c∥Rz , Context1-∥R c₂ C₂⟦q₂∥p₂⟧c∥Rz
∥R-confluent _
(∥Rstep (dcif₁ d≐C⟦p∥q⟧c₁))
(∥Rstep (dcif₂ d≐C⟦p∥q⟧c₂)) =
_ , ∥Rstep (dcif₂ d≐C⟦p∥q⟧c₂) , ∥Rstep (dcif₁ d≐C⟦p∥q⟧c₁)
∥R-confluent CBp
(∥Rstep (dcif₂ d≐C⟦p∥q⟧c₁))
(∥Rstep (dcif₁ d≐C⟦p∥q⟧c₂)) =
_ , ∥Rstep (dcif₁ d≐C⟦p∥q⟧c₂) , ∥Rstep (dcif₂ d≐C⟦p∥q⟧c₁)
∥R-confluent (CBif _ CBs)
(∥Rstep {c₁ ∷ C₁} (dcif₂ d≐C⟦p∥q⟧c₁))
(∥Rstep {c₂ ∷ C₂} (dcif₂ d≐C⟦p∥q⟧c₂))
with ∥R-confluent CBs (∥Rstep d≐C⟦p∥q⟧c₁) (∥Rstep d≐C⟦p∥q⟧c₂)
... | z , C₁⟦q₁∥p₁⟧c∥Rz , C₂⟦q₂∥p₂⟧c∥Rz =
_ , Context1-∥R c₁ C₁⟦q₁∥p₁⟧c∥Rz , Context1-∥R c₂ C₂⟦q₂∥p₂⟧c∥Rz
∥R-confluent (CBρ CBs)
(∥Rstep {c₁ ∷ C₁} (dcenv d≐C⟦p∥q⟧c₁))
(∥Rstep {c₂ ∷ C₂} (dcenv d≐C⟦p∥q⟧c₂))
with ∥R-confluent CBs (∥Rstep d≐C⟦p∥q⟧c₁) (∥Rstep d≐C⟦p∥q⟧c₂)
... | z , C₁⟦q₁∥p₁⟧c∥Rz , C₂⟦q₂∥p₂⟧c∥Rz =
_ , Context1-∥R c₁ C₁⟦q₁∥p₁⟧c∥Rz , Context1-∥R c₂ C₂⟦q₂∥p₂⟧c∥Rz
∥R-maintains-binding :
∀ {p BV FV q} →
CorrectBinding p BV FV →
p ∥R q →
CorrectBinding q (BVars q) (FVars q)
∥R-maintains-binding = thm where
thm :
∀ {p BV FV q} →
CorrectBinding p BV FV →
p ∥R q →
CorrectBinding q (BVars q) (FVars q)
thm{p}{BVp}{FVp}{q} CBp (∥Rstep{C}{r₁}{r₂}{d} d≐C⟦p∥q⟧c)
with binding-extractc' CBp d≐C⟦p∥q⟧c
... | (BVr₁∥r₂ , FVr₁∥r₂) ,
CBpar{.r₁}{.r₂}{BVr₁}{FVr₁}{BVr₂}{FVr₂}
CBr₁ CBr₂ BVr₁≠BVr₂ FVr₁≠BVr₂ BVr₁≠FVr₂ Xr₁≠Xr₂
with CBpar{r₂}{r₁}{BVr₂}{FVr₂}{BVr₁}{FVr₁}
CBr₂ CBr₁
(distinct-sym BVr₁≠BVr₂) (distinct-sym BVr₁≠FVr₂)
(distinct-sym FVr₁≠BVr₂) (dist'-sym Xr₁≠Xr₂)
... | CBr₂∥r₁
with binding-substc' CBp d≐C⟦p∥q⟧c
(CBpar CBr₁ CBr₂ BVr₁≠BVr₂ FVr₁≠BVr₂
BVr₁≠FVr₂ Xr₁≠Xr₂)
(∪-comm-⊆-right BVr₁ ⊆-refl)
(∪-comm-⊆-right FVr₁ ⊆-refl)
CBr₂∥r₁
... | (BVq , FVq) , (_ , CBq)
with BVFVcorrect _ BVq FVq CBq
... | (refl , refl) = CBq
∥R*-maintains-binding :
∀ {p BV FV q} →
CorrectBinding p BV FV →
p ∥R* q →
CorrectBinding q (BVars q) (FVars q)
∥R*-maintains-binding = thm where
thm : ∀ {p BV FV q} →
CorrectBinding p BV FV →
p ∥R* q →
CorrectBinding q (BVars q) (FVars q)
thm CBp ∥R0 with BVFVcorrect _ _ _ CBp
... | refl , refl = CBp
thm CBp (∥Rn p∥Rq₁ q₁∥R*q)
with ∥R-maintains-binding CBp p∥Rq₁
... | CBq₁ = thm CBq₁ q₁∥R*q
∥R*-semi-confluent :
∀ {p q r BV FV} ->
CorrectBinding p BV FV ->
p ∥R q ->
p ∥R* r ->
∃ λ {z → (q ∥R* z × r ∥R* z)}
∥R*-semi-confluent CBp p∥Rq ∥R0 = _ , ∥R0 , (∥Rn p∥Rq ∥R0)
∥R*-semi-confluent CBp p∥Rq (∥Rn p∥Rq₁ q₁∥R*r)
with ∥R-confluent CBp p∥Rq p∥Rq₁
... | z , q∥Rz , q₁∥Rz
with ∥R*-semi-confluent (∥R-maintains-binding CBp p∥Rq₁) q₁∥Rz q₁∥R*r
... | z₁ , z∥R*z₁ , r∥R*z₁ = z₁ , ∥Rn q∥Rz z∥R*z₁ , r∥R*z₁
∥R*-confluent : CB-CONFLUENT _∥R*_
∥R*-confluent CBp ∥R0 p∥R*r = _ , p∥R*r , ∥R0
∥R*-confluent CBp (∥Rn p∥Rp₁ p₁∥R*q) p∥R*r
with ∥R*-semi-confluent CBp p∥Rp₁ p∥R*r
... | z₁ , p₁∥R*z₁ , r∥R*z₁
with ∥R*-confluent (∥R-maintains-binding CBp p∥Rp₁) p₁∥R*q p₁∥R*z₁
... | z , q∥R*z , z₁∥R*z = z , q∥R*z , ∥R*-concat r∥R*z₁ z₁∥R*z
|
{"hexsha": "32c6ca63d810cc016f5eae4322931bd8eb0bea2c", "size": 9893, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "agda/par-swap/confluent.agda", "max_stars_repo_name": "florence/esterel-calculus", "max_stars_repo_head_hexsha": "4340bef3f8df42ab8167735d35a4cf56243a45cd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-04-16T10:58:53.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-01T03:59:31.000Z", "max_issues_repo_path": "agda/par-swap/confluent.agda", "max_issues_repo_name": "florence/esterel-calculus", "max_issues_repo_head_hexsha": "4340bef3f8df42ab8167735d35a4cf56243a45cd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "agda/par-swap/confluent.agda", "max_forks_repo_name": "florence/esterel-calculus", "max_forks_repo_head_hexsha": "4340bef3f8df42ab8167735d35a4cf56243a45cd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-15T20:02:49.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-15T20:02:49.000Z", "avg_line_length": 38.64453125, "max_line_length": 83, "alphanum_fraction": 0.5830385121, "num_tokens": 6670}
|
import numpy as np
import os
import pickle
import gzip
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
from pybasicbayes.util.text import progprint_xrange
from pyhawkes.models import \
DiscreteTimeNetworkHawkesModelGammaMixture, \
DiscreteTimeStandardHawkesModel
if __name__ == "__main__":
seed = 11223344
print("Setting seed to ", seed)
np.random.seed(seed)
###########################################################
# Load some example data.
# See data/synthetic/generate.py to create more.
###########################################################
data_path = os.path.join("data", "synthetic", "synthetic_K20_C4_T10000.pkl.gz")
with gzip.open(data_path, 'r') as f:
S, true_model = pickle.load(f)
T = S.shape[0]
K = true_model.K
B = true_model.B
dt = true_model.dt
dt_max = true_model.dt_max
###########################################################
# Initialize with MAP estimation on a standard Hawkes model
###########################################################
init_with_map = True
if init_with_map:
init_len = T
print("Initializing with BFGS on first ", init_len, " time bins.")
init_model = DiscreteTimeStandardHawkesModel(K=K, dt=dt, dt_max=dt_max, B=B,
alpha=1.0, beta=1.0)
init_model.add_data(S[:init_len, :])
init_model.initialize_to_background_rate()
init_model.fit_with_bfgs()
else:
init_model = None
###########################################################
# Create a test weak spike-and-slab model
###########################################################
# Copy the network hypers.
# Give the test model p, but not c, v, or m
network_hypers = true_model.network_hypers.copy()
network_hypers['v'] = None
test_model = DiscreteTimeNetworkHawkesModelGammaMixture(K=K, dt=dt, dt_max=dt_max, B=B,
basis_hypers=true_model.basis_hypers,
bkgd_hypers=true_model.bkgd_hypers,
impulse_hypers=true_model.impulse_hypers,
weight_hypers=true_model.weight_hypers)
test_model.add_data(S)
# Initialize with the standard model parameters
if init_model is not None:
test_model.initialize_with_standard_model(init_model)
###########################################################
# Fit the test model with Gibbs sampling
###########################################################
N_samples = 500
samples = []
lps = []
for itr in progprint_xrange(N_samples):
lps.append(test_model.log_probability())
samples.append(test_model.copy_sample())
test_model.resample_model()
###########################################################
# Analyze the samples
###########################################################
N_samples = len(samples)
A_samples = np.array([s.weight_model.A for s in samples])
W_samples = np.array([s.weight_model.W for s in samples])
g_samples = np.array([s.impulse_model.g for s in samples])
lambda0_samples = np.array([s.bias_model.lambda0 for s in samples])
lps = np.array(lps)
offset = N_samples // 2
A_mean = A_samples[offset:, ...].mean(axis=0)
W_mean = W_samples[offset:, ...].mean(axis=0)
g_mean = g_samples[offset:, ...].mean(axis=0)
lambda0_mean = lambda0_samples[offset:, ...].mean(axis=0)
plt.figure()
plt.plot(np.arange(N_samples), lps, 'k')
plt.xlabel("Iteration")
plt.ylabel("Log probability")
plt.show()
# Compute the link prediction accuracy curves
auc_init = roc_auc_score(true_model.weight_model.A.ravel(),
init_model.W.ravel())
auc_A_mean = roc_auc_score(true_model.weight_model.A.ravel(),
A_mean.ravel())
auc_W_mean = roc_auc_score(true_model.weight_model.A.ravel(),
W_mean.ravel())
aucs = []
for A in A_samples:
aucs.append(roc_auc_score(true_model.weight_model.A.ravel(), A.ravel()))
plt.figure()
plt.plot(aucs, '-r')
plt.plot(auc_A_mean * np.ones_like(aucs), '--r')
plt.plot(auc_W_mean * np.ones_like(aucs), '--b')
plt.plot(auc_init * np.ones_like(aucs), '--k')
plt.xlabel("Iteration")
plt.ylabel("Link prediction AUC")
plt.show()
plt.ioff()
plt.show()
|
{"hexsha": "f1103638ea31a1190bfcd9ada8eb876eec8d886c", "size": 4725, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/inference/gibbs_demo.py", "max_stars_repo_name": "thonic/pyhawkes", "max_stars_repo_head_hexsha": "99804deb9ea22ba3e1a99584420722abdf8eb56b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/inference/gibbs_demo.py", "max_issues_repo_name": "thonic/pyhawkes", "max_issues_repo_head_hexsha": "99804deb9ea22ba3e1a99584420722abdf8eb56b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/inference/gibbs_demo.py", "max_forks_repo_name": "thonic/pyhawkes", "max_forks_repo_head_hexsha": "99804deb9ea22ba3e1a99584420722abdf8eb56b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0687022901, "max_line_length": 101, "alphanum_fraction": 0.5257142857, "include": true, "reason": "import numpy", "num_tokens": 1027}
|
# Copyright (c) 2021. Nicolai Oswald
# Copyright (c) 2021. University of Edinburgh
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import networkx as nx
from antlr3.tree import CommonTree
from networkx import DiGraph
from typing import List
from Debug.Monitor.ClassDebug import Debug
from DataObjects.ClassMultiDict import MultiDict
from Parser.NetworkxParser.TransTreeGeneration.ClassProtoTransGraphObject import ProtoTransGraphObject
from Parser.NetworkxParser.TransTreeGeneration.ClassProcessTree import ProcessProgramFlowTree
from Debug.Graphv.ParserNetworkxGraph import ParserPCCGraph
from Debug.Monitor.MakeDir import make_dir, dir_up
## ProcessTransFlowTree
#
# Takes the ProcessProgramFlowTree and clusters single operations into ssp_transitions
# Dependency: ProcessProgramFlowTree, ProtoTransGraphObject, CommonTree
class ProcessTransFlowTree(ProcessProgramFlowTree):
transaction_tree_label = "Transaction_Tree_"
def __init__(self, process_node: CommonTree, dbg_graph: bool = False):
ProcessProgramFlowTree.__init__(self, process_node, dbg_graph)
Debug.__init__(self)
self.dbg_graph = dbg_graph
self.trans_object_graph = DiGraph()
# Change the output directory for the graph
make_dir(self.transaction_tree_label[0:-1])
self.transaction_label: str = None
self.start_state_str_id: str = None
# Update the transition label for the parsing graph output
self.label_transaction_tree()
# Map the trigger guard to all existing ProtoTransGraphObjects
self.guard_start_trans_object_map: MultiDict = MultiDict()
self.guard_end_trans_object_map: MultiDict = MultiDict()
self.get_start_node_process_tree()
self.gen_object_transitions()
ParserPCCGraph.debug_process_graph(self.trans_object_graph,
self.transaction_tree_label + self.transaction_label,
self.dbg_graph)
# Reset current graph directory
dir_up()
def gen_object_transitions(self):
start_node = self.get_start_node_process_tree()
final_state_str = str(start_node.getChildren()[0])
if len(start_node.getChildren()) > 2:
final_state_str = str(start_node.getChildren()[2])
# Add base start node to transition graph
graph_entry_node = ProtoTransGraphObject(None, start_node, final_state_str)
self.trans_object_graph.add_node(graph_entry_node)
self.guard_end_trans_object_map[start_node] = graph_entry_node
# Find next node tree
self.gen_next_transition(start_node, final_state_str)
def gen_next_transition(self, start_node: CommonTree, final_state_str: str):
terminal_nodes = self.get_next_terminals_process_tree(start_node)
terminal_node_final_state_dict = self.get_paths(start_node, terminal_nodes, final_state_str)
for terminal_node in terminal_node_final_state_dict:
# Iterate over multidict
for final_state_entry in terminal_node_final_state_dict[terminal_node]:
self.gen_next_transition(terminal_node, final_state_entry)
def get_paths(self, start_node: CommonTree, terminal_nodes: List[CommonTree], prev_final_state_str: str) -> \
MultiDict:
final_state_dict: MultiDict = MultiDict()
for terminal_node in terminal_nodes:
paths: List[List[CommonTree]] = list(nx.all_simple_paths(self.process_tree, start_node, terminal_node))
for path in paths:
# Avoid concurrent paths that have eventually a common terminal node, but other guards exist in the path
if self.k_guard in [str(pcc_object) for pcc_object in path[1:-1]]:
continue
if self.k_event_ack in [str(pcc_object) for pcc_object in path[1:-1]]:
continue
# Terminal node
new_node_path = path
next_guard = None
# If non terminal node, then shorten path and set next guard to last path element
if str(path[-1]) == self.k_guard or str(path[-1]) == self.k_event_ack:
new_node_path = path[0:-1]
next_guard = path[-1]
next_final_state = self.check_final_state_assignment(new_node_path, prev_final_state_str)
final_state_dict[terminal_node] = next_final_state
new_object = self.find_equivalent_trans_object(ProtoTransGraphObject(new_node_path,
next_guard,
next_final_state))
self.transition_graph_add_node(new_object)
return final_state_dict
def find_equivalent_trans_object(self, new_object: ProtoTransGraphObject) -> ProtoTransGraphObject:
if new_object.start_guard in self.guard_start_trans_object_map:
for ref_object in self.guard_start_trans_object_map[new_object.start_guard]:
if hash(ref_object) == hash(new_object):
if new_object.next_guard:
for next_guard in new_object.next_guard:
ref_object.update_next_guard(next_guard)
if (next_guard not in self.guard_end_trans_object_map or
ref_object not in self.guard_end_trans_object_map[next_guard]):
self.guard_end_trans_object_map[next_guard] = ref_object
return ref_object
else:
# Register new_graph_node, next guard so it can be found by possible children
if new_object.next_guard:
for next_guard in new_object.next_guard:
self.guard_end_trans_object_map[next_guard] = new_object
# Add the node to the start graph
self.guard_start_trans_object_map[new_object.start_guard] = new_object
return new_object
def check_final_state_assignment(self, path: List[CommonTree], prev_final_state_str: str):
next_final_state_assignment = None
for pcc_object in path:
if str(pcc_object) == self.k_assign and str(pcc_object.getChildren()[0]) == self.k_state:
next_final_state_assignment = str(pcc_object.getChildren()[2])
if next_final_state_assignment:
return next_final_state_assignment
return prev_final_state_str
def transition_graph_add_node(self, new_graph_node: ProtoTransGraphObject):
prev_nodes = self.guard_end_trans_object_map[new_graph_node.start_guard]
# Append new_graph_node to parent node
for prev_node in prev_nodes:
self.trans_object_graph.add_edge(prev_node, new_graph_node)
# Register new_graph_node, next guard so it can be found by possible children
if new_graph_node.next_guard:
for next_guard in new_graph_node.next_guard:
self.guard_end_trans_object_map[next_guard] = new_graph_node
# Add the node to the start graph
self.guard_start_trans_object_map[new_graph_node.start_guard] = new_graph_node
def get_start_node_process_tree(self) -> CommonTree:
start_nodes = list((node for node, in_degree in self.process_tree.in_degree() if in_degree == 0))
self.perror("To many start nodes in process tree", len(start_nodes) == 1)
return start_nodes[0]
def get_next_terminals_process_tree(self, cur_node: CommonTree) -> List[CommonTree]:
terminal_node_list = []
self.search_terminal_nodes_process_tree([cur_node], terminal_node_list)
# Create set to avoid duplicates due to alternative paths
return list(set(terminal_node_list))
# Tree depth first search
def search_terminal_nodes_process_tree(self,
cur_nodes: List[CommonTree],
terminal_node_list: List[CommonTree]):
for cur_node in cur_nodes:
# Create set to avoid duplicates due to alternative paths
successor_nodes = list(set(self.process_tree.successors(cur_node)))
non_terminal_nodes = []
for successor_node in successor_nodes:
if not (str(successor_node) == self.k_guard
or str(successor_node) == self.k_event_ack
or self.terminal in self.process_tree.nodes[successor_node]):
non_terminal_nodes.append(successor_node)
else:
terminal_node_list.append(successor_node)
self.search_terminal_nodes_process_tree(non_terminal_nodes, terminal_node_list)
def label_transaction_tree(self):
start_node = self.get_start_node_process_tree()
self.start_state_str_id = str(start_node.getChildren()[0])
self.transaction_label = "".join([str(pcc_object) for pcc_object in start_node.getChildren()])
|
{"hexsha": "4eb63f5cb4135c3626e9fa713e9c97ee64a04a79", "size": 10587, "ext": "py", "lang": "Python", "max_stars_repo_path": "Parser/NetworkxParser/TransTreeGeneration/ClassProtoTransObjectTree.py", "max_stars_repo_name": "Errare-humanum-est/HeteroGen", "max_stars_repo_head_hexsha": "600a7bde441cc1365a465746e15564bd8de8fc37", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-12T15:52:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T15:52:07.000Z", "max_issues_repo_path": "Parser/NetworkxParser/TransTreeGeneration/ClassProtoTransObjectTree.py", "max_issues_repo_name": "Errare-humanum-est/HeteroGen", "max_issues_repo_head_hexsha": "600a7bde441cc1365a465746e15564bd8de8fc37", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Parser/NetworkxParser/TransTreeGeneration/ClassProtoTransObjectTree.py", "max_forks_repo_name": "Errare-humanum-est/HeteroGen", "max_forks_repo_head_hexsha": "600a7bde441cc1365a465746e15564bd8de8fc37", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-14T18:03:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-14T18:03:37.000Z", "avg_line_length": 48.1227272727, "max_line_length": 120, "alphanum_fraction": 0.6860300368, "include": true, "reason": "import networkx,from networkx", "num_tokens": 2086}
|
#reading data.dat
import numpy as np
import numpy.random as rd
import random as random
import scipy
import matplotlib as mpl
import matplotlib.pyplot as plt
file1 = open("data2.dat",'r')
out2 = open("out2.txt", 'w')
data_string = []
#print(len(file1.readlines()))
#looping through the lines
for line in file1:
if line[0:3]=='C22':
print('yes')
data_string = np.append(data_string, line[3:-1])
else:
print(line[0:3])
print(data_string)
data = np.zeros(len(data_string))
for i in range(len(data_string)):
#data[i] = float(data_string[i])
#out1.write(str(data[i]))
out2.write(data_string[i])
out2.write("\n")
|
{"hexsha": "06404fc4657a0eb4435cb60a4d8ee3668f4adbbd", "size": 657, "ext": "py", "lang": "Python", "max_stars_repo_path": "Mathematics/PBC_simulations/readingDat2.py", "max_stars_repo_name": "grohalex/Final-Project", "max_stars_repo_head_hexsha": "41ac4e56e1a688a5f03f81d40d99eb2f839f9a26", "max_stars_repo_licenses": ["FTL"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Mathematics/PBC_simulations/readingDat2.py", "max_issues_repo_name": "grohalex/Final-Project", "max_issues_repo_head_hexsha": "41ac4e56e1a688a5f03f81d40d99eb2f839f9a26", "max_issues_repo_licenses": ["FTL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Mathematics/PBC_simulations/readingDat2.py", "max_forks_repo_name": "grohalex/Final-Project", "max_forks_repo_head_hexsha": "41ac4e56e1a688a5f03f81d40d99eb2f839f9a26", "max_forks_repo_licenses": ["FTL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.6551724138, "max_line_length": 56, "alphanum_fraction": 0.6666666667, "include": true, "reason": "import numpy,import scipy", "num_tokens": 178}
|
\setlength{\footskip}{8mm}
\chapter{Extracting the Object from the Shadows: Maximum Likelihood
Object/Shadow Discrimination}
\label{ch:shadow}
\textit{In this chapter, we propose and experimentally evaluate a new
method for detecting shadows using a simple maximum likelihood
formulation based on color information. We first estimate, offline, a
joint probability distribution over the difference in the HSV color
space between pixels in the current frame and the corresponding pixels
in a background model, conditional on whether the pixel is an object
pixel or a shadow pixel. Given the learned distribution, at run time,
we use the maximum likelihood principle to classify each foreground
pixel as either shadow or object. In an experimental evaluation, we
find that the method outperforms standard methods on three different
real-world video surveillance data sets. We conclude that the
proposed shadow detection method would be an extremely effective
component in an intelligent video surveillance system.}
\section{Introduction}
In many video surveillance applications, detecting and tracking moving
objects is an important issue. A very common approach to detect moving
objects is to apply background a subtraction algorithm. However,
background subtraction algorithms share one major disadvantage:
shadows tend to be misclassified as part of the foreground
object. This can lead to many undesirable consequences in the
detection step while segmenting and extracting features of moving
objects. For example, any estimate of the size of the detected object
would be an overestimate due to the misclassification of shadow pixels
as foreground pixels. Additionally, during object segmentation,
shadows misclassified as moving objects could lead to merging
otherwise separate blobs representing different people walking close
to each other. This would make isolating and tracking people in a
group much more difficult than necessary.
Since shadow removal can significantly improve the performance of
computer vision tasks such as tracking, segmentation, and object
detection, shadow detection has become an active research area in
recent years.
Several well-known algorithms for shadow detection already exist. Most
of the work is based on background modeling and color difference
information. Generally, some model of the background is estimated,
then the difference between the background and current image is used
to identify changed pixels, then the changed pixels are further
classified into object and shadow. Shadow pixels tend to have similar
chromaticity but lower luminance than the corresponding background
pixel. In the RGB color space, chromaticity and luminance are not
orthogonal, but lighting differences can be controlled for in the
normalized RGB color space \shortcite{finlayson98colour}. Some
research work thus utilizes the normalized RGB space in the background
subtraction and shadow removal
algorithm \shortcite{mckenna00tracking,elgammal02background,hong03background}.
\shortciteA{mikic00shadow} observe that in the normalized RGB color
space, shadow pixels tend to be more blue and less red than
illuminated pixels. They apply a probabilistic model based on the
normalized red and blue features to classify shadow pixels in traffic
scenes. The authors assume that the background and shadow values
follow Gaussian distributions and foreground values follow a uniform
distribution. They iteratively estimate the posterior probabilities of
a given pixel belonging to each of three classes: background, shadow,
and foreground, until one of the probabilities reaches a fixed
threshold. The pixel is then classified accordingly. If none of the
three probabilities reaches the threshold, the pixel is classified as
background.
\shortciteA{salvador04shadow} propose a new method for detecting
cast shadows. They first identify the presence of shadows in the RGB
color space based on the fact that shadows darken the surface they
cast on. The detected regions are further verified based on the color
invariance and geometric properties expected of shadows.
\shortciteA{havasi06geometric} illustrate that color-based methods
work well for weak shadows but not strong shadows. Hence, they
integrate geometric information into the detection process, resulting
in an iterative Bayesian framework combining both color and geometric
information that improves detection results.
One well-known problem with the normalized RGB space is that
normalization of pixels with low intensity results in unstable
chromatic
components \shortcite{kender76color}. \shortciteA{cucchiara01shadow}
and \shortciteA{chen08shadow} propose a HSV color-based method to
distinguish shadows from moving objects that eliminates this
concern. Their approach is based on the assumption that only the
intensity of the area covered by shadows will significantly
change. Therefore, they detect shadows using the so-called
deterministic nonmodel-based (DNM) approach as follows:
\[
SP_t (x,y) = \left\{
\begin{array}{ll}
1 & {\rm if} \; \alpha \le \frac{{I_t^V (x,y)}}{{B_t^V (x,y)}} \le \beta\\
& \;\;\; \wedge \; (I_t^S (x,y) - B_t^S (x,y)) \le T_S \\
& \;\;\; \wedge \left| {I_t^H (x,y) - B_t^H (x,y)} \right| \le T_H\\ \\
0 & {\rm otherwise}, \\
\end{array} \right.
\]
where $SP_t(x,y)$ is the resulting binary mask for shadows at each
pixel $(x,y)$ at time $t$. $I_t^H$, $I_t^S$, $I_t^V$, $B_t^H$,
$B_t^S$, and $B_t^V$ are the H, S, and V components of foreground
pixel $I_t(x,y)$ and background pixel $B_t(x, y)$ at pixel $(x,y)$ at
time $t$, respectively. They prevent foreground pixels from being
classified as shadow pixels by setting two thresholds, $0 < \alpha <
\beta < 1$. The four thresholds $\alpha$, $\beta$, $T_S$, and $T_H$
are empirically determined.
Some researchers have investigated color spaces besides RGB and
HSV. \shortciteA{blau06shadow} use an ``improved'' hue, luminance, and
saturation (IHLS) color space for shadow detection to deal with the
issue of unstable hue at low saturation by modeling the relationship
between them. They then perform simple background subtraction method
based on the IHLS color space and saturation-weighted hue statistics.
Their experimental results show that detecting shadows in this color
is more reliable than in normalized RGB or HSV color spaces in several
video sequences.
Another alternative color space is YUV. Some applications such as
television and videoconferencing use the YUV color space natively, and
since transformation from YUV to HSV is
time-consuming, \shortciteA{schreer02shadow} operate in the YUV color
space directly, developing a fast shadow detection algorithm based on
approximated changes of hue and saturation in the YUV color space.
There has been some work using texture-based methods such as the
normalized cross-correlation (NCC)
technique \shortcite{tian05shadow,jacques05shadow}. This method
detects shadows based on the assumption that the intensity of shadows
is proportional to the incident light, so shadow pixels should simply
be darker than the corresponding background pixels. Under this
assumption, shadow patches should be scaled versions of the
corresponding background patches. This assumption is most valid in
scenes with visible background texture inside the shadows. The method
computes the NCC between the neighborhood of a pixel in the foreground
mask and the neighborhood of the corresponding pixel in the background
model. For each pixel $(i, j)$ of the foreground mask, it considers a
$(2N + 1)
\times (2N + 1)$ template $T_{ij}$ defined by $T_{ij}(n, m) = I(i+n,
j+m)$ for $-N \le n \le N$ and $-N \le m \le N$ ($N$ is empirically
determined). If $B(i, j)$ is the background model, the NCC value at
pixel $(i, j)$ is defined as follows:
\[
NCC(i,j) = \frac{{ER(i,j)}}{{E_B (i,j)E_{T_{ij} } }},
\]
where
\[
ER(i,j) = \sum\limits_{n = -N}^N {\sum\limits_{m = -N}^N {B(i + n,j
+ m)T_{ij} (n,m)}},
\]
\[
E_B (i,j) = \sqrt {\sum\limits_{n = -N}^N {\sum\limits_{m = -N}^N
{B(i + n,j + m)^2}}},
\]
\[
E_{T_{ij} } = \sqrt {\sum\limits_{n = -N}^N {\sum\limits_{m = -N}^N
{T_{ij} (n,m)^2}}}.
\]
For a pixel in a shadow region, the NCC value should be large (close
to one) and the $E_{T_{ij}}$ for the region around $(i,j)$, i.e., its
magnitude, should be smaller than $E_B (i,j)$. Consequently, a pixel
is classified as shadow if
\[
NCC(i,j) \ge L_{NCC}
\]
and
\[
E_{T_{ij}} < E_B (i,j),
\]
where $L_{NCC}$ is an empirical threshold.
However, the texture-based method tends to misclassify foreground
pixels as shadow pixels when the foreground region has a similar
texture to the corresponding background
region. \shortciteA{xu05shadow} propose a hybrid shadow removal
technique that combines color and texture-based procedures to detect
shadows. Since chromaticity in a shadow region should be the same as
the corresponding background region, and since the texture in a shadow
region should be the same as the corresponding background region, the
authors first classify pixels based on a set of thresholds for
brightness and color distortion then perform speckle removal filtering
to reconstruct the final foreground shapes.
Here we propose a new method for detecting shadows using maximum
likelihood estimation based on color information. We extend the
deterministic nonmodel-based approach to parametric statistical
model-based approach. Our method estimates the joint distribution over
the difference in the HSV color space between pixels in the current
frame and the corresponding pixels in a background model, conditional
on whether the pixel is an object pixel or a shadow pixel. At run
time, we simply use the maximum likelihood principle to classify each
foreground pixel as either shadow or object given the estimated model.
Experimental results demonstrate that our proposed method outperforms
the standard methods (DNM and NCC) on three different real-world video
surveillance data sets. Our method is thus effective and also has the
potential to improve the object detection and motion analysis module
in intelligent video surveillance systems.
In the rest of this chapter, I provide details of the proposed method
and the overall process in Section \ref{sec:shadow-algorithm},
demonstrate the effectiveness of the shadow detection method with an
experimental evaluation in Section \ref{sec:shadow-results}, and then
conclude and point to future work in
Section \ref{sec:shadow-discussion}.
\section{Maximum Likelihood Classification of Foreground Pixels}
\label{sec:shadow-algorithm}
We divide our method into two phases. In the first, offline phase, we
acquire training video, construct a background model from the first
few frames, perform foreground extraction on the remaining frames,
then manually label the extracted pixels as either object pixels or
shadow pixels. I previously describe these steps in
Sections \ref{sec:blob-motion-detection}
and \ref{sec:blob-blob-extraction}. After that, we construct a joint
probability model over the difference in the HSV color space between
pixels in the current frame and the corresponding pixels in the
background model, conditional on whether the pixel is an object pixel
or a shadow pixel.
During the second, online phase, we perform the same background
modeling and foreground extraction procedure and further classify
foreground pixels as either shadow or object using the maximum
likelihood approach. I describe each of these steps in more detail in
the following sections.
\subsection{Offline Phase}
After foreground extraction, we manually label pixels as either shadow
or object. We then observe the distribution over the difference in hue
($H_{\text{diff}}$), saturation ($S_{\text{diff}}$), and value
($V_{\text{diff}}$) components in the HSV color space between pixels
in the current frame and the corresponding pixels in the background
model. Figure \ref{fig:foreground-distribution} shows examples of
these distributions for object pixels, and
Figure \ref{fig:shadow-distribution} shows examples of these
distributions for shadow pixels.
\begin{figure}[t]
\centering
\subfloat[]{\includegraphics[scale=0.25]{figures/foreground_diff_h.png}}
\hspace{0.05cm}
\subfloat[]{\includegraphics[scale=0.25]{figures/foreground_diff_s.png}}
\hspace{0.05cm}
\subfloat[]{\includegraphics[scale=0.25]{figures/foreground_diff_v.png}}
\caption[Example distributions over the difference in hue,
saturation, and value components for true object pixels, extracted
from our hallway dataset.]{\small Example distributions over the
difference in (a) hue, (b) saturation, and (c) value components
for true object pixels, extracted from our hallway dataset.}
\label{fig:foreground-distribution}
\end{figure}
\begin{figure}[t]
\centering
\subfloat[]{\includegraphics[scale=0.25]{figures/shadow_diff_h.png}}
\hspace{0.05in}
\subfloat[]{\includegraphics[scale=0.25]{figures/shadow_diff_s.png}}
\hspace{0.05in}
\subfloat[]{\includegraphics[scale=0.25]{figures/shadow_diff_v.png}}
\caption[Example distributions over the difference in hue,
saturation, and value components for true shadow pixels, extracted
from our hallway dataset.]{\small Example distributions over the
difference in (a) hue, (b) saturation, and (c) value components
for true shadow pixels, extracted from our hallway dataset.}
\label{fig:shadow-distribution}
\end{figure}
Clearly, in all three cases, the distributions for object pixels and
shadow pixels are very different. We thus introduce a measurement
probability distribution conditional on whether the assignment for a
pixel is object or shadow. In this work, we assume that the individual
component difference distributions are conditionally independent given
the assignment.
We define the measurement likelihood for pixel $(x, y)$ given its
assignment as follows.
\begin{equation}
\label{eq:shadow-measurement}
\begin{array}{ccl}
P(M_{\text{xy}} \mid A_{\text{xy}} = \text{sh})
& = & P(H_{\text{diff}} \mid A_{\text{xy}} = \text{sh}) \times \\
& & P(S_{\text{diff}} \mid A_{\text{xy}} = \text{sh}) \times \\
& & P(V_{\text{diff}} \mid A_{\text{xy}} = \text{sh}),
\end{array}
\end{equation}
where $M_{xy}$ is a tuple containing the HSV value for pixel $(x,y)$
in the current image as well as the HSV value for pixel $(x,y)$ in the
background model for pixel $(x,y)$, and $A_{xy}$ is the assignment of
pixel $(x,y)$ as object or shadow. ``sh'' stands for shadow.
To make the problem tractable, we assume that the distributions over
the components on the right hand side in \ref{eq:shadow-measurement}
follow Gaussian distributions, defined as follows.
\begin{equation*}
P(H_{\text{diff}} \mid A_{\text{xy}} = \text{sh}) =
{\cal N}(H_{\text{diff}} ;
\mu_{h_{\text{diff}}^{\text{sh}}},
\sigma^2_{h_{\text{diff}}^{\text{sh}}})
\end{equation*}
\begin{equation*}
P(S_{\text{diff}} \mid A_{\text{xy}} = \text{sh}) =
{\cal N}(S_{\text{diff}} ;
\mu_{s_{\text{diff}}^{\text{sh}}},
\sigma^2_{s_{\text{diff}}^{\text{sh}}})
\end{equation*}
\begin{equation*}
P(V_{\text{diff}} \mid A_{\text{xy}} = \text{sh}) =
{\cal N}(V_{\text{diff}} ;
\mu_{v_{\text{diff}}^{\text{sh}}},
\sigma^2_{v_{\text{diff}}^{\text{sh}}})
\end{equation*}
Similarly, the measurement likelihood for object pixels can be
computed as follows.
\begin{equation}
\label{eq:foreground-measurement}
\begin{array}{ccl}
P(M_{\text{xy}} \mid A_{\text{xy}} = \text{obj})
& = & P(H_{\text{diff}} \mid A_{\text{xy}} = \text{obj}) \times \\
& & P(S_{\text{diff}} \mid A_{\text{xy}} = \text{obj}) \times \\
& & P(V_{\text{diff}} \mid A_{\text{xy}} = \text{obj})
\end{array}
\end{equation}
Here ``obj'' stands for object. As for the shadow pixel distributions,
we assume Gaussian distributions over the components on the right hand
side in \ref{eq:foreground-measurement}, as follows.
\begin{equation*}
P(H_{\text{diff}} \mid A_{\text{xy}} = \text{obj}) =
{\cal N}(H_{\text{diff}} ;
\mu_{h_{\text{diff}}^{\text{obj}}},
\sigma^2_{h_{\text{diff}}^{\text{obj}}})
\end{equation*}
\begin{equation*}
P(S_{\text{diff}} \mid A_{\text{xy}} = \text{obj}) =
{\cal N}(S_{\text{diff}} ;
\mu_{s_{\text{diff}}^{\text{obj}}},
\sigma^2_{s_{\text{diff}}^{\text{obj}}})
\end{equation*}
\begin{equation*}
P(V_{\text{diff}} \mid A_{\text{xy}} = \text{obj}) =
{\cal N}(V_{\text{diff}} ;
\mu_{v_{\text{diff}}^{\text{obj}}},
\sigma^2_{v_{\text{diff}}^{\text{obj}}})
\end{equation*}
We estimate the parameters $\Theta = \{
\mu_{h_{\text{diff}}^{\text{sh}}},
\sigma^2_{h_{\text{diff}}^{\text{sh}}},
\mu_{s_{\text{diff}}^{\text{sh}}},
\sigma^2_{s_{\text{diff}}^{\text{sh}}},
\mu_{v_{\text{diff}}^{\text{sh}}},
\sigma^2_{v_{\text{diff}}^{\text{sh}}},
\mu_{h_{\text{diff}}^{\text{obj}}},
\sigma^2_{h_{\text{diff}}^{\text{obj}}},
\mu_{s_{\text{diff}}^{\text{obj}}},
\sigma^2_{s_{\text{diff}}^{\text{obj}}},
\mu_{v_{\text{diff}}^{\text{obj}}},
\sigma^2_{v_{\text{diff}}^{\text{obj}}} \}$ directly from training
data during the offline phase.
\subsection{Online Phase}
Given the model estimate $\Theta$, we use the maximum likelihood
approach to classify a pixel as a shadow pixel if
\begin{equation}
\label{eq:ml}
P(M_{\text{xy}} \mid A_{xy}=\text{sh} ; \Theta ) >
P(M_{\text{xy}} \mid A_{xy}=\text{obj} ; \Theta ).
\end{equation}
Otherwise, we classify the pixel as an object pixel.
We could add the prior probabilities to the shadow model and the
object model in \ref{eq:ml} to obtain a maximum a posteriori
classifier. In our experiments, we assume equal priors.
\section{Experimental Results}
\label{sec:shadow-results}
In this section, we present experimental results for our proposed
maximum likelihood (ML) classification method and compare the results
with two other methods from the literature, namely the deterministic
nonmodel-based (DNM)
method \shortcite{kender76color,cucchiara01shadow} and the normalized
cross-correlation (NCC)
method \shortcite{tian05shadow,jacques05shadow}.
\begin{figure}[t]
\centering
\subfloat[]{\includegraphics[scale=0.4]{figures/csim_hallway_benchmark.png}}
\hspace{0.05cm}
\subfloat[]{\includegraphics[scale=0.4]{figures/aton_lab_benchmark.png}}
\hspace{0.05cm}
\subfloat[]{\includegraphics[scale=0.4]{figures/aton_highway1_benchmark.png}}
\caption[Sample frames from the Hallway, Laboratory, and Highway
video sequences.]{\small Sample frames from the (a) Hallway, (b)
Laboratory, and (c) Highway video sequences}
\label{fig:benchmark}
\end{figure}
We performed the experiments on three video sequences.
Figure \ref{fig:benchmark} shows sample frames from the three video
sequences. The video sequences include both indoor and outdoor
scenes. The \textit{Hallway} sequence\footnote{Freely available for
others to experiment with
at: \url{http://www.kanouivirach.com/#downloads}.} shows a hallway
scene. For this video, we mounted a CCTV camera to record in an
academic building. The \textit{Laboratory} sequence shows a laboratory
room, and the \textit{Highway} sequence shows a traffic scene. The
last two video sequences were first introduced in Prati et al.'s
work\nocite{prati03shadow}.
To evaluate the performance of the methods, we compute the two metrics
proposed by \shortciteA{prati03shadow}, defining the shadow detection
rate $\eta$ and the shadow discrimination rate $\xi$ as follows:
\[
\eta = \frac{TP_{\text{sh}}}{TP_{\text{sh}} + FN_{\text{sh}}};\;
\xi = \frac{TP_{\text{obj}}}{TP_{\text{obj}} + FN_{\text{obj}}},
\]
where the subscript ``sh'' and ``obj'' stand for shadow and object,
respectively. $TP$ and $FN$ are the number of true positive (i.e., the
shadow or object pixels correctly identified) and false negative
(i.e., the shadow or object pixels classified incorrectly) pixels.
$\eta$ expresses the proportion of shadow pixels correctly detected,
and $\xi$ expresses the proportion of object pixels correctly
detected. $\eta$ and $\xi$ can also be thought of as the true
positive rate (sensitivity) and true negative rate (specificity) for
detecting shadows, respectively. In the experiment, we also compare
the methods with the additional two metrics: precision and $F_1$
score.
\subsection{Preparation}
Ground truth data are provided with the \textit{Laboratory} and
\textit{Highway} video sequences in Sanin et al.'s work\nocite{sanin12shadow}.
They used a standard Gaussian mixture (GMM) background
model\nocite{stauffer99background} to extract foreground pixels for
the two videos. They selected 20 frames including objects from
the \textit{Laboratory} sequence arbitrarily for labeling. For
the \textit{Highway} sequence, they labeled one out of every twenty
frames including objects for a total of 20 frames. For our
\textit{Hallway} video sequence, to prepare similar ground truth data,
we selected one out of every ten frames including objects for 20
frames and manually labeled each pixel of each frame as object,
background, or shadow. We used the previously mentioned extended
version of the GMM background model for foreground extraction, but the
results were not substantially different from those of the standard
GMM.
To find the best parameters for each model while avoiding overfitting,
for each of the three models and each of the three data sets, we
performed five-fold cross validation using 10 of the training frames,
reserving the remaining 10 frames for the final test. The 10 frames in
each case were the second of every two frames in sequential order. We
varied the parameter settings for each method on each video dataset
and selected the setting that maximized the $F_1$ score (a measure
combining both precision and recall) over the cross validation test
sets. Finally, we tested on the remaining 10-frame final test set for
each video sequence.
\subsection{Shadow Detection Performance}
Table \ref{tab:comparison-results} compares the shadow detection
results between the proposed, DNM, and NCC methods. Our method
achieves the top performance for shadow detection rate $\eta$ and
$F_1$ score in every case. We also obtain a good shadow discrimination
rate $\xi$ and precision in all three video
datasets. Figure \ref{fig:results-for-arbitrary-frame} shows the
results for an arbitrary frame in each video sequence. Green pixels
are those labeled as object pixels and red pixels are those labeled as
shadow pixels. The results in the figure confirm that our proposed
method clearly outperforms the two standard methods in all three video
datasets.
\begin{table}[t]
\caption[Comparison of shadow detection results between the
proposed, DNM, and NCC methods.]{\small Comparison of shadow
detection results between the proposed, DNM, and NCC methods.}
\begin{center}
\includegraphics[width=6.1in]{figures/tab-shadow-results}
\end{center}
\label{tab:comparison-results}
\end{table}
\begin{figure}[t]
\centering
\includegraphics[width=6.2in]{figures/fig-shadow-results}
\caption[Results for an arbitrary frame in each video
sequence.]{\small Results for an arbitrary frame in each video
sequence. The first column contains an example original frame for
each video sequence. The second column shows the ground truth for
that frame, where object pixels are labeled in white and shadow
pixels are labeled in gray. The remaining columns show shadow
detection results for each method, where pixels labeled as object
shown in green and pixels labeled as shadow are shown in red.}
\label{fig:results-for-arbitrary-frame}
\end{figure}
The DNM method has stable performance for all three videos, with good
performance for all metrics. Both the DNM method and our proposed
method suffer from the problem that the object colors can be confused
with the background color. In the \textit{Highway} sequence (third
row in Figure \ref{fig:results-for-arbitrary-frame}), we clearly see
this situation. Our method detects shadows well but misclassifies some
object pixels as shadow, whereas DNM sometimes better discriminates
the shadow from the object. However, the overall performance of our
proposed method is superior.
The NCC method achieves the best shadow discrimination rate $\xi$ and
precision. However, as can be seen in
Figure \ref{fig:results-for-arbitrary-frame}, this is because it
classifies nearly every pixel as object. This gives NCC an advantage
for shadow precision and $\xi$ but on the other two metrics, shadow
detection rate $\eta$ and $F_1$ score, NCC performs extremely poorly
in all cases. This is due to unclear background texture inside the
shadows, particularly on the \textit{Highway} sequence.
\section{Discussion}
\label{sec:shadow-discussion}
We propose a new method for detecting shadows using a simple maximum
likelihood approach based on color information. We extend the
deterministic nonmodel-based approach, designing a parametric
statistical model-based approach. Our experimental results show that
our proposed method is extremely effective and superior to the
standard methods on three different real-world video surveillance data
sets.
In some cases, our method misdetects shadow pixels due to similar
color between the object and the background and unclear background
texture in shadow regions. Incorporating geometric or shadow region
shape priors would potentially improve the detection and
discrimination rates.
In future work, we plan to address these issues, further explore the
feasibility of combining our method with other useful shadow features,
and integrate our shadow detection module with a real-world open
source video surveillance system \shortcite{zoneminder}.
\FloatBarrier
%%% Old text %%%
%\section{Introduction}
%
%In video surveillance applications, moving object detection and
%tracking is an important issue. A very common approach to detect
%moving objects is to apply a background subtraction technique. The
%process is basically to compare a new frame with a background
%model. The significant differences correspond to foreground. This
%process should ideally detect the moving objects and limit the false
%positive as much as possible at the same time. More importantly, it
%should be able to avoid detection of shadows or noise. However, one
%challenging problem arising is to identify and detect shadows. And
%this has become an active research area in recent years.
%
%Shadows can cause lots of problems in the detection step while
%segmenting and extracting features of moving objects. For example, the
%size of detected object is larger than the real one due to the
%misclassification of shadow as foreground. Shadows can also merge
%different people walking close to each other of which the output
%becomes a single object in the background subtraction step. Shadows
%and objects share two important information which make the problem
%difficult. Firstly, shadows can be detected as foreground because they
%are different from the background. Secondly, shadows have the same
%motion as the objects casting them.
%
%Generally, shadows can be categorized into two classes which are self
%and cast shadows. A self shadow occurs on the part of an object which
%is not illuminated by light. A cast shadow is an area projected where
%the light is occluded by an object. Figure \ref{fig:shadow-example}
%illustrates an example of self and cast shadow. One feature of shadows
%is that shadow does not significantly change the color and texture of
%the background, but only intensity. This feature is very useful since
%it can lead to many shadow detection algorithms which will be
%described in the next section.
%
%\begin{figure}[t]
% \begin{center}
% \includegraphics[width=2.5in]{figures/shadow-example.png}
% \caption[Self and cast shadow in a real-world scene image]{Self and
% cast shadow in a real-world scene image. Self shadow is on the back
% of the person and cast shadow in on the ground.}
% \label{fig:shadow-example}
% \end{center}
%\end{figure}
%
%Removing shadows can significantly improve the performance of the
%computer vision tasks such as tracking, segmentation, and object
%detection. Since the detection and tracking is the core of video
%surveillance systems, poor detection and tracking can cause the
%problems to the next processing step such as feature extraction and
%behavior modeling. Background modeling techniques alone cannot solve
%the problems. We need an algorithm to detect and remove shadows.
%Therefore, in this dissertation, we explore and develop the shadow
%detection and removal algorithms. The experiments analyze the
%algorithms on the real-world video data.
%
%For the existing shadow detection
%methods, \shortciteA{cucchiara01shadow} and \shortciteA{chen08shadow}
%use the HSV color information to distinguish shadows from moving
%objects. Their approach is based on the assumption that only the
%intensity of the area covered by shadows will significantly change.
%Therefore, they can detect shadows using the following equation.
%\[
% SP_t (x,y) = \left\{ \begin{array}{l}
% 1\;\;\;{\rm if}\;\alpha \le \frac{{I_t^V (x,y)}}{{B_t^V (x,y)}} \le \beta\\
% \quad \quad \wedge (I_t^S (x,y) - B_t^S (x,y)) \le T_S \\
% \quad \quad \wedge \left| {I_t^H (x,y) - B_t^H (x,y)} \right| \le T_H\\ \\
% 0\;\;\;{\rm otherwise}, \\
% \end{array} \right.
%\]
%where $SP_t(x,y)$ is the binary mask of shadows at pixel $(x,y)$ at
%time $t$. $I_t^H$, $I_t^S$, $I_t^V$, $B_t^H$, $B_t^S$, and $B_t^V$
%are H, S, V components of foreground pixel $I_t(x,y)$ and background
%pixel $B_t(x, y)$ at pixel $(x,y)$ at time $t$, respectively. They
%prevent the foreground pixel being classified into shadows by setting
%two thresholds $\alpha$ and $\beta$. The parameter $\beta$ is set
%under 1 and $\alpha$ is set over 0. $T_S$ and $T_H$ are discovered by
%experiments.
%
%\shortciteA{tian05shadow}, \shortciteA{jacques05shadow},
%and \shortciteA{tan06shadow} apply the normalized cross-correlation
%(NCC) to detect shadows based on the assumption that the intensity of
%shadows is proportional to the incident light ,and shadow pixels are
%darker than background pixels, or it can be said that the shadows are
%the scaled versions of background. Therefore, using the NCC, they can
%identify the scaled versions of the same signal. They perform the NCC
%on the foreground mask from the background subtraction progress. For
%each pixel $(i, j)$ of the foreground mask, they considered a $(2N +
%1) \times (2N + 1)$ template $T_{ij}$, and defined $T_{ij}(n, m) =
%I(i+n, j+m)$ for $-N \le n \le N$, $-N \le m \le N$. $B(i,j)$ is the
%background image formed by temporal median filtering. The NCC at pixel
%$(x,y)$ is defined as.
%\[
% NCC(i,j) = \frac{{ER(i,j)}}{{E_B (i,j)E_{T_{ij} } }},
%\]
%where
%\[
% ER(i,j) = \sum\limits_{n = -N}^N {\sum\limits_{m = -N}^N {B(i + n,j
% + m)T_{ij} (n,m)} },
%\]
%\[
% E_B (i,j) = \sqrt {\sum\limits_{n = -qN}^N {\sum\limits_{m = -N}^N
% {B(i + n,j + m)^2 } } },
%\]
%\[
% E_{T_{ij} } = \sqrt {\sum\limits_{n = -N}^N {\sum\limits_{m = -N}^N
% {T_{ij} (n,m)^2 } } }.
%\]
%For a pixel in a shadow region, the NCC value should be large (close
%to one) and the $E_{T_{ij}}$ of this region should be lower than the
%$E_B (i,j)$. Consequently, a pixel is classified into shadow if
%\[
% NCC(i,j) \ge L_{NCC}
%\]
%and
%\[
% E_{T_{ij} } < E_B (i,j),
%\]
%where $L_{NCC}$ is a threshold. Figure \ref{fig:tian-shadow-result}
%shows some examples of the results from the work
%of \shortciteA{tian05shadow}.
%
%\begin{figure}
% \centering
% \begin{tabular}{c}
% \includegraphics[scale=0.7]{figures/tian-mog-results.png}\\
% \includegraphics[scale=0.7]{figures/tian-shadow-results.png}
% \end{tabular}
% \caption[Examples of the background subtraction and shadow removal
% results.]{Examples of the background subtraction and shadow removal
% results. Upper row shows the results of the MoG background modeling
% and lower row shows the results from the work of Tian et
% al. Reprinted from the work of Tian et al.\ (2005).}
% \label{fig:tian-shadow-result}
%\end{figure}
%
%Some authors found that the blue color component increases while the
%red color component decreases in a shadow
%region. \shortciteA{mikic00shadow} combine this information and
%normalized the blue and red color components as one of their
%features. After that, they apply a probabilistic model to classify
%shadow pixels in traffic scenes. They also assume that the background
%and shadow values follow a Gaussian distribution, and assume the
%foreground values follow an uniform distribution. They iteratively
%estimate the posterior probabilities of the pixel belonging to each
%of the three classes: background, shadow, and foreground until one of
%the probabilities reaches a fixed threshold. The pixel is then
%classified into one of those classes. If none of the three
%probabilities reaches the threshold, the pixel will be classified as
%background.
%
%\shortciteA{xu05shadow} assume that the chromaticity in a shadow
%region should be the same as when it is illuminated. Based on the
%information, they use a normalized chromatic color space to remove
%shadows. In this paper, they normalize the red and green color
%components. Then they define a set of thresholds for brightness and
%color distortion to classify a pixel value into foreground, highlight,
%or shadow.
%
%\shortciteA{hong03background} mention that there are both
%chromaticity and brightness in each pixel value in the RGB space. They
%remove the lightness by using the normalized RGB color space since the
%normalized RGB color space contains only the chromaticity. Thus, they
%use this information to propose their background subtraction
%approach. \shortciteA{havasi06geometric} illustrate that the color
%based method works well in case of weak shadow, but strong
%shadow. Hence, they integrate the geometric information into the
%detection process and came up with an iterative Bayesian framework
%which combines both the color and geometric information to improve the
%detection results.
%
%\section{Methodology}
%
%We use NCC and the maximum likelihood based on the HSV color
%information extracted from a set of training images to remove shadows.
%
%We compute the difference of hue $H_{\text{diff}}$, saturation
%$S_{\text{diff}}$, and value $V_{\text{diff}}$ components under the
%mask between the current and background frames. We simply calculate
%the probability as follows.
%
%\begin{equation*}
% \begin{array}{ccl}
% P(M_{\text{xy}} \mid A_{\text{xy}} = \text{sh})
% & = & P(H_{\text{diff}}^{\text{sh}} \mid A_{\text{xy}} = \text{sh})
% P(S_{\text{diff}}^{\text{sh}} \mid A_{\text{xy}} = \text{sh}) \\
% & & P(V_{\text{diff}}^{\text{sh}} \mid A_{\text{xy}} = \text{sh}),
% \end{array}
%\end{equation*}
%where $M_{xy}$ is a measurement at pixel $(x,y)$. To make the problem
%simple, we assume that the distribution of the difference of hue,
%saturation, and value components follows a normal distribution as
%follows.
%\begin{equation*}
% P(H_{\text{diff}}^{\text{sh}} \mid A_{\text{xy}} = \text{sh}) \sim {\cal
% N}(H_{\text{diff}}^{\text{sh}} ; 0, \sigma^2_{h_{\text{diff}}^{\text{sh}}}),
%\end{equation*}
%
%\begin{equation*}
% P(S_{\text{diff}}^{\text{sh}} \mid A_{\text{xy}} = \text{sh}) \sim {\cal
% N}(S_{\text{diff}}^{\text{sh}} ; 0, \sigma^2_{s_{\text{diff}}^{\text{sh}}}),
%\end{equation*}
%and
%\begin{equation*}
% P(V_{\text{diff}}^{\text{sh}} \mid A_{\text{xy}} = \text{sh}) \sim {\cal
% N}(V_{\text{diff}}^{\text{sh}} ; 0, \sigma^2_{v_{\text{diff}}^{\text{sh}}}).
%\end{equation*}
%
%\noindent Similarly, the measurement given the foreground assignment
%can be computed as follows.
%
%\begin{equation*}
% \begin{array}{ccl}
% P(M_{\text{xy}} \mid A_{\text{xy}} = \text{fg})
% & = & P(H_{\text{diff}}^{\text{fg}} \mid A_{\text{xy}} = \text{fg})
% P(S_{\text{diff}}^{\text{fg}} \mid A_{\text{xy}} = \text{fg}) \\
% & & P(V_{\text{diff}}^{\text{fg}} \mid A_{\text{xy}} = \text{fg})
% \end{array}
%\end{equation*}
%
%\noindent Each component is defined as follows.
%
%\begin{equation*}
% P(H_{\text{diff}}^{\text{fg}} \mid A_{\text{xy}} = \text{fg}) \sim {\cal
% N}(H_{\text{diff}}^{\text{fg}} ; 0, \sigma^2_{h_{\text{diff}}^{\text{fg}}}),
%\end{equation*}
%
%\begin{equation*}
% P(S_{\text{diff}}^{\text{fg}} \mid A_{\text{xy}} = \text{fg}) \sim {\cal
% N}(S_{\text{diff}}^{\text{fg}} ; 0, \sigma^2_{s_{\text{diff}}^{\text{fg}}}),
%\end{equation*}
%and
%\begin{equation*}
% P(V_{\text{diff}}^{\text{fg}} \mid A_{\text{xy}} = \text{fg}) \sim {\cal
% N}(V_{\text{diff}}^{\text{fg}} ; 0, \sigma^2_{v_{\text{diff}}^{\text{fg}}}).
%\end{equation*}
%
%Finally, we use the maximum likelihood approach to classify a pixel
%whether it is foreground or shadow.
%
%\section{Experimental Results}
%
%To collect data, we used ZoneMinder \shortcite{zoneminder} to capture
%video during two weeks. We set up a machine with a Web camera on the
%second floor in the Computer Science and Information Management (CSIM)
%building to capture activities in the scene.
%
%We have experimented with a shadow detection method using normalized
%cross correlation (NCC). We compute the grayscale correlation between
%the foreground pixels and a background image constructed as the mean
%over each mixture of Gaussian distribution. Any foreground pixels
%whose NCC with the background are above some threshold
%$L_{\text{NCC}}$ are removed. In the experiments, we set
%$L_{\text{NCC}} = 0.995$. The results are shown in
%Figure \ref{fig:shadow-outdoor-result}. NCC works well in the outdoor
%scenes with visible background texture inside shadows. However, when
%we applied it in the indoor scenes, it does not work very well in many
%cases due to the lighting effect as shown in
%Figure \ref{fig:shadow-poor-result}.
%
%\begin{figure}[t]
% \centering
% \begin{tabular}{ccc}
% \includegraphics[width=0.28\linewidth]{figures/shadow-result01.png} &
% \includegraphics[width=0.28\linewidth]{figures/shadow-result02.png} &
% \includegraphics[width=0.28\linewidth]{figures/shadow-result03.png}
% \\
% (a) & (b) & (c)
% \end{tabular}
% \caption{Sample foreground extraction and shadow removal results in
% an outdoor scene. (a) Original image. (b) Foreground pixels
% according to background model. (c) Foreground pixels after shadow
% removal.}
% \label{fig:shadow-outdoor-result}
%\end{figure}
%
%\begin{figure}[t]
% \centering
% \begin{tabular}{ccc}
% \includegraphics[width=0.28\linewidth]{figures/shadow-poor-result01.png} &
% \includegraphics[width=0.28\linewidth]{figures/shadow-poor-result02.png} &
% \includegraphics[width=0.28\linewidth]{figures/shadow-poor-result03.png}
% \\
% (a) & (b) & (c)
% \end{tabular}
% \caption{Sample poor shadow removal results in an indoor scene. (a)
% Original image. (b) Foreground pixels according to background
% model. (c) Foreground pixels after shadow removal. Red pixels
% show the positives, and green pixels show the negatives.}
% \label{fig:shadow-poor-result}
%\end{figure}
%
%We have performed another experiment using a maximum likelihood
%approach on the HSV color space. We first compute the difference of
%hue, saturation, and value under the mask of the current and
%background frames from a set of training images. The distributions of
%those values under the foreground mask are shown in
%Figure \ref{fig:foreground-distribution}, and the distributions under
%the shadow mask are shown in Figure
%\ref{fig:shadow-distribution}. For the hue and saturation values, we
%subtracted the current frame by the background frame, but for the
%intensity value, to get the positive values, we subtracted the
%background frame by the current frame.
%
%\begin{figure}[t]
% \centering
% \subfloat[]{\includegraphics[width=0.32\linewidth]{figures/foreground_diff_h.png}}
% \hspace{0.05in}
% \subfloat[]{\includegraphics[width=0.32\linewidth]{figures/foreground_diff_s.png}}
% \hspace{0.05in}
% \subfloat[]{\includegraphics[width=0.32\linewidth]{figures/foreground_diff_v.png}}
% \caption{Distribution of the difference of hue, saturation, and
% value under the foreground mask. (a) Difference of hue.(b)
% Difference of saturation. (c) Difference of value.}
% \label{fig:foreground-distribution}
%\end{figure}
%
%\begin{figure}[t]
% \centering
% \subfloat[]{\includegraphics[width=0.32\linewidth]{figures/shadow_diff_h.png}}
% \hspace{0.05in}
% \subfloat[]{\includegraphics[width=0.32\linewidth]{figures/shadow_diff_s.png}}
% \hspace{0.05in}
% \subfloat[]{\includegraphics[width=0.32\linewidth]{figures/shadow_diff_v.png}}
% \caption{Distribution of the difference of hue, saturation, and
% value under the shadow mask. (a) Difference of hue. (b) Difference
% of saturation. (c) Difference of value.}
% \label{fig:shadow-distribution}
%\end{figure}
%
%The results for the maximum likelihood approach compared to the NCC
%approach are shown in Figure \ref{fig:comparison-shadow-results}.
%
%\begin{figure}[t]
% \centering
% \begin{tabular}{cccc}
% \includegraphics[width=0.22\linewidth]{figures/shadow-original.png} &
% \includegraphics[width=0.22\linewidth]{figures/shadow-bg-results.png} &
% \includegraphics[width=0.22\linewidth]{figures/shadow-ncc-results.png} &
% \includegraphics[width=0.22\linewidth]{figures/shadow-ml-results.png}
% \\
% (a) & (b) & (c) & (d)
% \end{tabular}
% \caption{Shadow removal results in an indoor scene. (a) Original
% image. (b) Foreground pixels according to background model. (c)
% Shadow detection using NCC. (d) Shadow detection using the
% maximum likelihood approach.}
% \label{fig:comparison-shadow-results}
%\end{figure}
%
|
{"hexsha": "5333fc15b6af359b1c45613f011f901c7dc396a7", "size": 41853, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "first-revision/shadow.tex", "max_stars_repo_name": "zkan/dissertation", "max_stars_repo_head_hexsha": "458c5fce241973008bdcc3958bdf962b9197e593", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "first-revision/shadow.tex", "max_issues_repo_name": "zkan/dissertation", "max_issues_repo_head_hexsha": "458c5fce241973008bdcc3958bdf962b9197e593", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "first-revision/shadow.tex", "max_forks_repo_name": "zkan/dissertation", "max_forks_repo_head_hexsha": "458c5fce241973008bdcc3958bdf962b9197e593", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.4517203108, "max_line_length": 85, "alphanum_fraction": 0.7445105488, "num_tokens": 11295}
|
"""Provides an easy way of generating several geometric objects.
CONTAINS
--------
vtkArrowSource
vtkCylinderSource
vtkSphereSource
vtkPlaneSource
vtkLineSource
vtkCubeSource
vtkConeSource
vtkDiskSource
vtkRegularPolygonSource
vtkPyramid
vtkPlatonicSolidSource
vtkSuperquadricSource
as well as some pure-python helpers.
"""
import numpy as np
import pyvista
from pyvista import _vtk
from pyvista.utilities import check_valid_vector
NORMALS = {
'x': [1, 0, 0],
'y': [0, 1, 0],
'z': [0, 0, 1],
'-x': [-1, 0, 0],
'-y': [0, -1, 0],
'-z': [0, 0, -1],
}
def translate(surf, center=[0., 0., 0.], direction=[1., 0., 0.]):
"""Translate and orient a mesh to a new center and direction.
By default, the input mesh is considered centered at the origin
and facing in the x direction.
"""
normx = np.array(direction)/np.linalg.norm(direction)
normz = np.cross(normx, [0, 1.0, 0.0000001])
normz /= np.linalg.norm(normz)
normy = np.cross(normz, normx)
trans = np.zeros((4, 4))
trans[:3, 0] = normx
trans[:3, 1] = normy
trans[:3, 2] = normz
trans[3, 3] = 1
surf.transform(trans)
if not np.allclose(center, [0., 0., 0.]):
surf.points += np.array(center)
def Cylinder(center=(0.0, 0.0, 0.0), direction=(1.0, 0.0, 0.0),
radius=0.5, height=1.0, resolution=100, capping=True):
"""Create the surface of a cylinder.
See also :func:`pyvista.CylinderStructured`.
Parameters
----------
center : sequence, optional
Location of the centroid in ``[x, y, z]``.
direction : sequence, optional
Direction cylinder points to in ``[x, y, z]``.
radius : float, optional
Radius of the cylinder.
height : float, optional
Height of the cylinder.
resolution : int, optional
Number of points on the circular face of the cylinder.
capping : bool, optional
Cap cylinder ends with polygons. Default ``True``.
Returns
-------
pyvista.PolyData
Cylinder surface.
Examples
--------
>>> import pyvista
>>> import numpy as np
>>> cylinder = pyvista.Cylinder(center=[1, 2, 3], direction=[1, 1, 1],
... radius=1, height=2)
>>> cylinder.plot(show_edges=True, line_width=5, cpos='xy')
"""
cylinderSource = _vtk.vtkCylinderSource()
cylinderSource.SetRadius(radius)
cylinderSource.SetHeight(height)
cylinderSource.SetCapping(capping)
cylinderSource.SetResolution(resolution)
cylinderSource.Update()
surf = pyvista.wrap(cylinderSource.GetOutput())
surf.rotate_z(-90, inplace=True)
translate(surf, center, direction)
return surf
def CylinderStructured(radius=0.5, height=1.0,
center=(0.,0.,0.), direction=(1.,0.,0.),
theta_resolution=32, z_resolution=10):
"""Create a cylinder mesh as a :class:`pyvista.StructuredGrid`.
The end caps are left open. This can create a surface mesh if a single
value for the ``radius`` is given or a 3D mesh if multiple radii are given
as a list/array in the ``radius`` argument.
Parameters
----------
radius : float, sequence, optional
Radius of the cylinder. If a sequence, then describes the
radial coordinates of the cells as a range of values as
specified by the ``radius``.
height : float, optional
Height of the cylinder along its Z-axis.
center : sequence
Location of the centroid in ``[x, y, z]``.
direction : sequence
Direction cylinder Z-axis in ``[x, y, z]``.
theta_resolution : int, optional
Number of points on the circular face of the cylinder.
Ignored if ``radius`` is an iterable.
z_resolution : int, optional
Number of points along the height (Z-axis) of the cylinder.
Returns
-------
pyvista.StructuredGrid
Structured cylinder.
Examples
--------
Default structured cylinder
>>> import pyvista
>>> mesh = pyvista.CylinderStructured()
>>> mesh.plot(show_edges=True)
Structured cylinder with an inner radius of 1, outer of 2, with 5
segments.
>>> import numpy as np
>>> mesh = pyvista.CylinderStructured(radius=np.linspace(1, 2, 5))
>>> mesh.plot(show_edges=True)
"""
# Define grid in polar coordinates
r = np.array([radius]).ravel()
nr = len(r)
theta = np.linspace(0, 2*np.pi, num=theta_resolution)
radius_matrix, theta_matrix = np.meshgrid(r, theta)
# Transform to cartesian space
X = radius_matrix * np.cos(theta_matrix)
Y = radius_matrix * np.sin(theta_matrix)
# Make all the nodes in the grid
xx = np.array([X] * z_resolution).ravel()
yy = np.array([Y] * z_resolution).ravel()
dz = height / (z_resolution - 1)
zz = np.empty(yy.size)
zz = np.full((X.size, z_resolution), dz)
zz *= np.arange(z_resolution)
zz = zz.ravel(order='f')
# Create the grid
grid = pyvista.StructuredGrid()
grid.points = np.c_[xx, yy, zz]
grid.dimensions = [nr, theta_resolution, z_resolution]
# Orient properly in user direction
vx = np.array([0., 0., 1.])
if not np.allclose(vx, direction):
direction /= np.linalg.norm(direction)
vx -= vx.dot(direction) * direction
vx /= np.linalg.norm(vx)
vy = np.cross(direction, vx)
rmtx = np.array([vx, vy, direction])
grid.points = grid.points.dot(rmtx)
# Translate to given center
grid.points -= np.array(grid.center)
grid.points += np.array(center)
return grid
def Arrow(start=(0., 0., 0.), direction=(1., 0., 0.), tip_length=0.25,
tip_radius=0.1, tip_resolution=20, shaft_radius=0.05,
shaft_resolution=20, scale=None):
"""Create an arrow.
Parameters
----------
start : iterable, optional
Start location in ``[x, y, z]``.
direction : iterable, optional
Direction the arrow points to in ``[x, y, z]``.
tip_length : float, optional
Length of the tip.
tip_radius : float, optional
Radius of the tip.
tip_resolution : int, optional
Number of faces around the tip.
shaft_radius : float, optional
Radius of the shaft.
shaft_resolution : int, optional
Number of faces around the shaft.
scale : float or str, optional
Scale factor of the entire object, default is ``None``
(i.e. scale of 1). ``'auto'`` scales to length of direction
array.
Returns
-------
pyvista.PolyData
Arrow mesh.
Examples
--------
Plot a default arrow.
>>> import pyvista
>>> mesh = pyvista.Arrow()
>>> mesh.plot(show_edges=True)
"""
# Create arrow object
arrow = _vtk.vtkArrowSource()
arrow.SetTipLength(tip_length)
arrow.SetTipRadius(tip_radius)
arrow.SetTipResolution(tip_resolution)
arrow.SetShaftRadius(shaft_radius)
arrow.SetShaftResolution(shaft_resolution)
arrow.Update()
surf = pyvista.wrap(arrow.GetOutput())
if scale == 'auto':
scale = float(np.linalg.norm(direction))
if isinstance(scale, float) or isinstance(scale, int):
surf.points *= scale
elif scale is not None:
raise TypeError("Scale must be either float, int or 'auto'.")
translate(surf, start, direction)
return surf
def Sphere(radius=0.5, center=(0, 0, 0), direction=(0, 0, 1), theta_resolution=30,
phi_resolution=30, start_theta=0, end_theta=360, start_phi=0, end_phi=180):
"""Create a vtk Sphere.
Parameters
----------
radius : float, optional
Sphere radius.
center : np.ndarray or list, optional
Center in ``[x, y, z]``.
direction : list or tuple or np.ndarray, optional
Direction the top of the sphere points to in ``[x, y, z]``.
theta_resolution : int , optional
Set the number of points in the longitude direction (ranging
from ``start_theta`` to ``end_theta``).
phi_resolution : int, optional
Set the number of points in the latitude direction (ranging from
``start_phi`` to ``end_phi``).
start_theta : float, optional
Starting longitude angle.
end_theta : float, optional
Ending longitude angle.
start_phi : float, optional
Starting latitude angle.
end_phi : float, optional
Ending latitude angle.
Returns
-------
pyvista.PolyData
Sphere mesh.
Examples
--------
Create a sphere using default parameters.
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> sphere.plot(show_edges=True)
Create a quarter sphere by setting ``end_theta``.
>>> sphere = pyvista.Sphere(end_theta=90)
>>> out = sphere.plot(show_edges=True)
"""
sphere = _vtk.vtkSphereSource()
sphere.SetRadius(radius)
sphere.SetThetaResolution(theta_resolution)
sphere.SetPhiResolution(phi_resolution)
sphere.SetStartTheta(start_theta)
sphere.SetEndTheta(end_theta)
sphere.SetStartPhi(start_phi)
sphere.SetEndPhi(end_phi)
sphere.Update()
surf = pyvista.wrap(sphere.GetOutput())
surf.rotate_y(-90, inplace=True)
translate(surf, center, direction)
return surf
def Plane(center=(0, 0, 0), direction=(0, 0, 1), i_size=1, j_size=1,
i_resolution=10, j_resolution=10):
"""Create a plane.
Parameters
----------
center : list or tuple or np.ndarray
Location of the centroid in ``[x, y, z]``.
direction : list or tuple or np.ndarray
Direction of the plane's normal in ``[x, y, z]``.
i_size : float
Size of the plane in the i direction.
j_size : float
Size of the plane in the j direction.
i_resolution : int
Number of points on the plane in the i direction.
j_resolution : int
Number of points on the plane in the j direction.
Returns
-------
pyvista.PolyData
Plane mesh.
Examples
--------
Create a default plane.
>>> import pyvista
>>> mesh = pyvista.Plane()
>>> mesh.point_data.clear()
>>> mesh.plot(show_edges=True)
"""
planeSource = _vtk.vtkPlaneSource()
planeSource.SetXResolution(i_resolution)
planeSource.SetYResolution(j_resolution)
planeSource.Update()
surf = pyvista.wrap(planeSource.GetOutput())
surf.points[:, 0] *= i_size
surf.points[:, 1] *= j_size
surf.rotate_y(-90, inplace=True)
translate(surf, center, direction)
return surf
def Line(pointa=(-0.5, 0., 0.), pointb=(0.5, 0., 0.), resolution=1):
"""Create a line.
Parameters
----------
pointa : np.ndarray or list, optional
Location in ``[x, y, z]``.
pointb : np.ndarray or list, optional
Location in ``[x, y, z]``.
resolution : int, optional
Number of pieces to divide line into.
Returns
-------
pyvista.PolyData
Line mesh.
Examples
--------
Create a line between ``(0, 0, 0)`` and ``(0, 0, 1)``.
>>> import pyvista
>>> mesh = pyvista.Line((0, 0, 0), (0, 0, 1))
>>> mesh.plot(color='k', line_width=10)
"""
if resolution <= 0:
raise ValueError('Resolution must be positive')
if np.array(pointa).size != 3:
raise TypeError('Point A must be a length three tuple of floats.')
if np.array(pointb).size != 3:
raise TypeError('Point B must be a length three tuple of floats.')
src = _vtk.vtkLineSource()
src.SetPoint1(*pointa)
src.SetPoint2(*pointb)
src.SetResolution(resolution)
src.Update()
line = pyvista.wrap(src.GetOutput())
# Compute distance of every point along line
compute = lambda p0, p1: np.sqrt(np.sum((p1 - p0)**2, axis=1))
distance = compute(np.array(pointa), line.points)
line['Distance'] = distance
return line
def Tube(pointa=(-0.5, 0., 0.), pointb=(0.5, 0., 0.), resolution=1, radius=1.0, n_sides=15):
"""Create a tube.
Parameters
----------
pointa : np.ndarray or list, optional
Location in ``[x, y, z]``.
pointb : np.ndarray or list, optional
Location in ``[x, y, z]``.
resolution : int, optional
Number of pieces to divide tube into.
radius : float, optional
Minimum tube radius (minimum because the tube radius may vary).
n_sides : int, optional
Number of sides for the tube.
Returns
-------
pyvista.PolyData
Tube mesh.
Examples
--------
Create a tube between ``(0, 0, 0)`` and ``(0, 0, 1)``.
>>> import pyvista
>>> mesh = pyvista.Tube((0, 0, 0), (0, 0, 1))
>>> mesh.plot()
"""
if resolution <= 0:
raise ValueError('Resolution must be positive.')
if np.array(pointa).size != 3:
raise TypeError('Point A must be a length three tuple of floats.')
if np.array(pointb).size != 3:
raise TypeError('Point B must be a length three tuple of floats.')
line_src = _vtk.vtkLineSource()
line_src.SetPoint1(*pointa)
line_src.SetPoint2(*pointb)
line_src.SetResolution(resolution)
line_src.Update()
if n_sides < 3:
raise ValueError('Number of sides `n_sides` must be >= 3')
tube_filter = _vtk.vtkTubeFilter()
tube_filter.SetInputConnection(line_src.GetOutputPort())
tube_filter.SetRadius(radius)
tube_filter.SetNumberOfSides(n_sides)
tube_filter.Update()
return pyvista.wrap(tube_filter.GetOutput())
def Cube(center=(0.0, 0.0, 0.0), x_length=1.0, y_length=1.0,
z_length=1.0, bounds=None, clean=True):
"""Create a cube.
It's possible to specify either the center and side lengths or
just the bounds of the cube. If ``bounds`` are given, all other
arguments are ignored.
.. versionchanged:: 0.33.0
The cube is created using ``vtk.vtkCubeSource``. For
compatibility with :func:`pyvista.PlatonicSolid`, face indices
are also added as cell data. For full compatibility with
:func:`PlatonicSolid() <pyvista.PlatonicSolid>`, one has to
use ``x_length = y_length = z_length = 2 * radius / 3**0.5``.
The cube points are also cleaned by default now, leaving only
the 8 corners and a watertight (manifold) mesh.
Parameters
----------
center : sequence, optional
Center in ``[x, y, z]``.
x_length : float, optional
Length of the cube in the x-direction.
y_length : float, optional
Length of the cube in the y-direction.
z_length : float, optional
Length of the cube in the z-direction.
bounds : sequence, optional
Specify the bounding box of the cube. If given, all other size
arguments are ignored. ``(xMin, xMax, yMin, yMax, zMin, zMax)``.
clean : bool, optional
Whether to clean the raw points of the mesh, making the cube
manifold. Note that this will degrade the texture coordinates
that come with the mesh, so if you plan to map a texture on
the cube, consider setting this to ``False``.
.. versionadded:: 0.33.0
Returns
-------
pyvista.PolyData
Mesh of the cube.
Examples
--------
Create a default cube.
>>> import pyvista
>>> mesh = pyvista.Cube()
>>> mesh.plot(show_edges=True, line_width=5)
"""
src = _vtk.vtkCubeSource()
if bounds is not None:
if np.array(bounds).size != 6:
raise TypeError('Bounds must be given as length 6 tuple: (xMin, xMax, yMin, yMax, zMin, zMax)')
src.SetBounds(bounds)
else:
src.SetCenter(center)
src.SetXLength(x_length)
src.SetYLength(y_length)
src.SetZLength(z_length)
src.Update()
cube = pyvista.wrap(src.GetOutput())
# add face index data for compatibility with PlatonicSolid
# but make it inactive for backwards compatibility
cube.cell_data.set_array([1, 4, 0, 3, 5, 2],['FaceIndex'])
# clean duplicate points
if clean:
cube.clean(inplace=True)
return cube
def Box(bounds=(-1., 1., -1., 1., -1., 1.), level=0, quads=True):
"""Create a box with solid faces for the given bounds.
Parameters
----------
bounds : iterable, optional
Specify the bounding box of the cube.
``(xMin, xMax, yMin, yMax, zMin, zMax)``.
level : int, optional
Level of subdivision of the faces.
quads : bool, optional
Flag to tell the source to generate either a quad or two
triangle for a set of four points. Default ``True``.
Returns
-------
pyvista.PolyData
Mesh of the box.
Examples
--------
Create a box with subdivision ``level=2``.
>>> import pyvista
>>> mesh = pyvista.Box(level=2)
>>> mesh.plot(show_edges=True)
"""
if np.array(bounds).size != 6:
raise TypeError('Bounds must be given as length 6 tuple: (xMin, xMax, yMin, yMax, zMin, zMax)')
src = _vtk.vtkTessellatedBoxSource()
src.SetLevel(level)
if quads:
src.QuadsOn()
else:
src.QuadsOff()
src.SetBounds(bounds)
src.Update()
return pyvista.wrap(src.GetOutput())
def Cone(center=(0., 0., 0.), direction=(1., 0., 0.), height=1.0, radius=None,
capping=True, angle=None, resolution=6):
"""Create a cone.
Parameters
----------
center : iterable, optional
Center in ``[x, y, z]``. Axis of the cone passes through this
point.
direction : iterable, optional
Direction vector in ``[x, y, z]``. Orientation vector of the
cone.
height : float, optional
Height along the cone in its specified direction.
radius : float, optional
Base radius of the cone.
capping : bool, optional
Enable or disable the capping the base of the cone with a
polygon.
angle : float, optional
The angle in degrees between the axis of the cone and a
generatrix.
resolution : int, optional
Number of facets used to represent the cone.
Returns
-------
pyvista.PolyData
Cone mesh.
Examples
--------
Create a default Cone.
>>> import pyvista
>>> mesh = pyvista.Cone()
>>> mesh.plot(show_edges=True, line_width=5)
"""
src = _vtk.vtkConeSource()
src.SetCapping(capping)
src.SetDirection(direction)
src.SetCenter(center)
src.SetHeight(height)
if angle and radius:
raise ValueError("Both radius and angle specified. They are mutually exclusive.")
elif angle and not radius:
src.SetAngle(angle)
elif not angle and radius:
src.SetRadius(radius)
elif not angle and not radius:
src.SetRadius(0.5)
src.SetResolution(resolution)
src.Update()
return pyvista.wrap(src.GetOutput())
def Polygon(center=(0., 0., 0.), radius=1, normal=(0, 0, 1), n_sides=6):
"""Create a polygon.
Parameters
----------
center : iterable, optional
Center in ``[x, y, z]``. Central axis of the polygon passes
through this point.
radius : float, optional
The radius of the polygon.
normal : iterable, optional
Direction vector in ``[x, y, z]``. Orientation vector of the polygon.
n_sides : int, optional
Number of sides of the polygon.
Returns
-------
pyvista.PolyData
Mesh of the polygon.
Examples
--------
Create an 8 sided polygon.
>>> import pyvista
>>> mesh = pyvista.Polygon(n_sides=8)
>>> mesh.plot(show_edges=True, line_width=5)
"""
src = _vtk.vtkRegularPolygonSource()
src.SetCenter(center)
src.SetNumberOfSides(n_sides)
src.SetRadius(radius)
src.SetNormal(normal)
src.Update()
return pyvista.wrap(src.GetOutput())
def Disc(center=(0., 0., 0.), inner=0.25, outer=0.5, normal=(0, 0, 1), r_res=1,
c_res=6):
"""Create a polygonal disk with a hole in the center.
The disk has zero height. The user can specify the inner and outer
radius of the disk, and the radial and circumferential resolution
of the polygonal representation.
Parameters
----------
center : iterable
Center in ``[x, y, z]``. Middle of the axis of the disc.
inner : float, optional
The inner radius.
outer : float, optional
The outer radius.
normal : iterable
Direction vector in ``[x, y, z]``. Orientation vector of the disc.
r_res : int, optional
Number of points in radial direction.
c_res : int, optional
Number of points in circumferential direction.
Returns
-------
pyvista.PolyData
Disk mesh.
Examples
--------
Create a disc with 50 points in the circumferential direction.
>>> import pyvista
>>> mesh = pyvista.Disc(c_res=50)
>>> mesh.plot(show_edges=True, line_width=5)
"""
src = _vtk.vtkDiskSource()
src.SetInnerRadius(inner)
src.SetOuterRadius(outer)
src.SetRadialResolution(r_res)
src.SetCircumferentialResolution(c_res)
src.Update()
normal = np.array(normal)
center = np.array(center)
surf = pyvista.wrap(src.GetOutput())
surf.rotate_y(90, inplace=True)
translate(surf, center, normal)
return surf
def Text3D(string, depth=0.5):
"""Create 3D text from a string.
Parameters
----------
string : str
String to generate 3D text from.
depth : float, optional
Depth of the text. Defaults to ``0.5``.
Returns
-------
pyvista.PolyData
3D text mesh.
Examples
--------
>>> import pyvista
>>> text_mesh = pyvista.Text3D('PyVista')
>>> text_mesh.plot(cpos='xy')
"""
vec_text = _vtk.vtkVectorText()
vec_text.SetText(string)
extrude = _vtk.vtkLinearExtrusionFilter()
extrude.SetInputConnection(vec_text.GetOutputPort())
extrude.SetExtrusionTypeToNormalExtrusion()
extrude.SetVector(0, 0, 1)
extrude.SetScaleFactor(depth)
tri_filter = _vtk.vtkTriangleFilter()
tri_filter.SetInputConnection(extrude.GetOutputPort())
tri_filter.Update()
return pyvista.wrap(tri_filter.GetOutput())
def Wavelet(extent=(-10, 10, -10, 10, -10, 10), center=(0, 0, 0), maximum=255,
x_freq=60, y_freq=30, z_freq=40, x_mag=10, y_mag=18, z_mag=5,
std=0.5, subsample_rate=1):
"""Create a wavelet.
Produces images with pixel values determined by
``Maximum*Gaussian*x_mag*sin(x_freq*x)*sin(y_freq*y)*cos(z_freq*z)``
Values are float scalars on point data with name ``"RTData"``.
Parameters
----------
extent : sequence, optional
Set/Get the extent of the whole output image. Default
``(-10, 10, -10, 10, -10, 10)``.
center : list, optional
Center of the wavelet.
maximum : float, optional
Maximum of the wavelet function.
x_freq : float, optional
Natural frequency in the x direction.
y_freq : float, optional
Natural frequency in the y direction.
z_freq : float, optional
Natural frequency in the z direction.
x_mag : float, optional
Magnitude in the x direction.
y_mag : float, optional
Magnitude in the y direction.
z_mag : float, optional
Magnitude in the z direction.
std : float, optional
Standard deviation.
subsample_rate : int, optional
The sub-sample rate.
Returns
-------
pyvista.PolyData
Wavelet mesh.
Examples
--------
>>> import pyvista
>>> wavelet = pyvista.Wavelet(extent=(0, 50, 0, 50, 0, 10), x_freq=20,
... y_freq=10, z_freq=1, x_mag=100, y_mag=100,
... z_mag=1000)
>>> wavelet.plot(show_scalar_bar=False)
Extract lower valued cells of the wavelet and create a surface from it.
>>> thresh = wavelet.threshold(800).extract_surface()
>>> thresh.plot(show_scalar_bar=False)
Smooth it to create "waves"
>>> waves = thresh.smooth(n_iter=100, relaxation_factor=0.1)
>>> waves.plot(color='white', smooth_shading=True, show_edges=True)
"""
wavelet_source = _vtk.vtkRTAnalyticSource()
wavelet_source.SetWholeExtent(*extent)
wavelet_source.SetCenter(center)
wavelet_source.SetMaximum(maximum)
wavelet_source.SetXFreq(x_freq)
wavelet_source.SetYFreq(y_freq)
wavelet_source.SetZFreq(z_freq)
wavelet_source.SetXMag(x_mag)
wavelet_source.SetYMag(y_mag)
wavelet_source.SetZMag(z_mag)
wavelet_source.SetStandardDeviation(std)
wavelet_source.SetSubsampleRate(subsample_rate)
wavelet_source.Update()
return pyvista.wrap(wavelet_source.GetOutput())
def CircularArc(pointa, pointb, center, resolution=100, negative=False):
"""Create a circular arc defined by two endpoints and a center.
The number of segments composing the polyline is controlled by
setting the object resolution.
Parameters
----------
pointa : sequence
Position of the first end point.
pointb : sequence
Position of the other end point.
center : sequence
Center of the circle that defines the arc.
resolution : int, optional
The number of segments of the polyline that draws the arc.
Resolution of 1 will just create a line.
negative : bool, optional
By default the arc spans the shortest angular sector between
``pointa`` and ``pointb``.
By setting this to ``True``, the longest angular sector is
used instead (i.e. the negative coterminal angle to the
shortest one).
Returns
-------
pyvista.PolyData
Circular arc mesh.
Examples
--------
Create a quarter arc centered at the origin in the xy plane.
>>> import pyvista
>>> arc = pyvista.CircularArc([-1, 0, 0], [0, 1, 0], [0, 0, 0])
>>> pl = pyvista.Plotter()
>>> _ = pl.add_mesh(arc, color='k', line_width=10)
>>> _ = pl.show_bounds(location='all', font_size=30, use_2d=True)
>>> _ = pl.view_xy()
>>> pl.show()
"""
check_valid_vector(pointa, 'pointa')
check_valid_vector(pointb, 'pointb')
check_valid_vector(center, 'center')
if not np.isclose(
np.linalg.norm(np.array(pointa) - np.array(center)),
np.linalg.norm(np.array(pointb) - np.array(center)),
):
raise ValueError("pointa and pointb are not equidistant from center")
# fix half-arc bug: if a half arc travels directly through the
# center point, it becomes a line
pointb = list(pointb)
pointb[0] -= 1E-10
pointb[1] -= 1E-10
arc = _vtk.vtkArcSource()
arc.SetPoint1(*pointa)
arc.SetPoint2(*pointb)
arc.SetCenter(*center)
arc.SetResolution(resolution)
arc.SetNegative(negative)
arc.Update()
angle = np.deg2rad(arc.GetAngle())
arc = pyvista.wrap(arc.GetOutput())
# Compute distance of every point along circular arc
center = np.array(center).ravel()
radius = np.sqrt(np.sum((arc.points[0]-center)**2, axis=0))
angles = np.arange(0.0, 1.0 + 1.0/resolution, 1.0/resolution) * angle
arc['Distance'] = radius * angles
return arc
def CircularArcFromNormal(center, resolution=100, normal=None,
polar=None, angle=None):
"""Create a circular arc defined by normal to the plane of the arc, and an angle.
The number of segments composing the polyline is controlled by
setting the object resolution.
Parameters
----------
center : sequence
Center of the circle that defines the arc.
resolution : int, optional
The number of segments of the polyline that draws the arc.
Resolution of 1 will just create a line.
normal : sequence, optional
The normal vector to the plane of the arc. By default it
points in the positive Z direction.
polar : sequence, optional
Starting point of the arc in polar coordinates. By default it
is the unit vector in the positive x direction.
angle : float, optional
Arc length (in degrees) beginning at the polar vector. The
direction is counterclockwise. By default it is 90.
Returns
-------
pyvista.PolyData
Circular arc mesh.
Examples
--------
Quarter arc centered at the origin in the xy plane.
>>> import pyvista
>>> normal = [0, 0, 1]
>>> polar = [-1, 0, 0]
>>> arc = pyvista.CircularArcFromNormal([0, 0, 0], normal=normal, polar=polar)
>>> pl = pyvista.Plotter()
>>> _ = pl.add_mesh(arc, color='k', line_width=10)
>>> _ = pl.show_bounds(location='all', font_size=30, use_2d=True)
>>> _ = pl.view_xy()
>>> pl.show()
"""
check_valid_vector(center, 'center')
if normal is None:
normal = [0, 0, 1]
if polar is None:
polar = [1, 0, 0]
if angle is None:
angle = 90.0
arc = _vtk.vtkArcSource()
arc.SetCenter(*center)
arc.SetResolution(resolution)
arc.UseNormalAndAngleOn()
check_valid_vector(normal, 'normal')
arc.SetNormal(*normal)
check_valid_vector(polar, 'polar')
arc.SetPolarVector(*polar)
arc.SetAngle(angle)
arc.Update()
angle = np.deg2rad(arc.GetAngle())
arc = pyvista.wrap(arc.GetOutput())
# Compute distance of every point along circular arc
center = np.array(center)
radius = np.sqrt(np.sum((arc.points[0] - center)**2, axis=0))
angles = np.linspace(0.0, angle, resolution+1)
arc['Distance'] = radius * angles
return arc
def Pyramid(points=None):
"""Create a pyramid defined by 5 points.
Parameters
----------
points : sequence, optional
Points of the pyramid. Points are ordered such that the first
four points are the four counterclockwise points on the
quadrilateral face, and the last point is the apex.
Defaults to pyramid in example.
Returns
-------
pyvista.UnstructuredGrid
Unstructured grid containing a single pyramid cell.
Examples
--------
>>> import pyvista
>>> pointa = [1.0, 1.0, 0.0]
>>> pointb = [-1.0, 1.0, 0.0]
>>> pointc = [-1.0, -1.0, 0.0]
>>> pointd = [1.0, -1.0, 0.0]
>>> pointe = [0.0, 0.0, 1.608]
>>> pyramid = pyvista.Pyramid([pointa, pointb, pointc, pointd, pointe])
>>> pyramid.plot(show_edges=True, line_width=5)
"""
if points is None:
points = [[1.0, 1.0, 0.0],
[-1.0, 1.0, 0.0],
[-1.0, -1.0, 0.0],
[1.0, -1.0, 0.0],
[0.0, 0.0, (4 - 2**0.5)**0.5]]
if len(points) != 5:
raise TypeError('Points must be given as length 5 np.ndarray or list')
check_valid_vector(points[0], 'points[0]')
check_valid_vector(points[1], 'points[1]')
check_valid_vector(points[2], 'points[2]')
check_valid_vector(points[3], 'points[3]')
check_valid_vector(points[4], 'points[4]')
pyramid = _vtk.vtkPyramid()
pyramid.GetPointIds().SetId(0, 0)
pyramid.GetPointIds().SetId(1, 1)
pyramid.GetPointIds().SetId(2, 2)
pyramid.GetPointIds().SetId(3, 3)
pyramid.GetPointIds().SetId(4, 4)
ug = _vtk.vtkUnstructuredGrid()
ug.SetPoints(pyvista.vtk_points(np.array(points), False))
ug.InsertNextCell(pyramid.GetCellType(), pyramid.GetPointIds())
return pyvista.wrap(ug)
def Triangle(points=None):
"""Create a triangle defined by 3 points.
Parameters
----------
points : sequence, optional
Points of the triangle. Defaults to a right isosceles
triangle (see example).
Returns
-------
pyvista.PolyData
Triangle mesh.
Examples
--------
>>> import pyvista
>>> pointa = [0, 0, 0]
>>> pointb = [1, 0, 0]
>>> pointc = [0.5, 0.707, 0]
>>> triangle = pyvista.Triangle([pointa, pointb, pointc])
>>> triangle.plot(show_edges=True, line_width=5)
"""
if points is None:
points = [[0, 0, 0], [1, 0, 0], [0.5, 0.5**0.5, 0]]
if len(points) != 3:
raise TypeError('Points must be given as length 3 np.ndarray or list')
check_valid_vector(points[0], 'points[0]')
check_valid_vector(points[1], 'points[1]')
check_valid_vector(points[2], 'points[2]')
cells = np.array([[3, 0, 1, 2]])
return pyvista.wrap(pyvista.PolyData(points, cells))
def Rectangle(points=None):
"""Create a rectangle defined by 4 points.
Parameters
----------
points : sequence, optional
Points of the rectangle. Defaults to a simple example.
Returns
-------
pyvista.PolyData
Rectangle mesh.
Examples
--------
>>> import pyvista
>>> pointa = [1.0, 0.0, 0.0]
>>> pointb = [1.0, 1.0, 0.0]
>>> pointc = [0.0, 1.0, 0.0]
>>> pointd = [0.0, 0.0, 0.0]
>>> rectangle = pyvista.Rectangle([pointa, pointb, pointc, pointd])
>>> rectangle.plot(show_edges=True, line_width=5)
"""
if points is None:
points = [[1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]
if len(points) != 4:
raise TypeError('Points must be given as length 4 np.ndarray or list')
check_valid_vector(points[0], 'points[0]')
check_valid_vector(points[1], 'points[1]')
check_valid_vector(points[2], 'points[2]')
check_valid_vector(points[3], 'points[3]')
cells = np.array([[4, 0, 1, 2, 3]])
return pyvista.wrap(pyvista.PolyData(points, cells))
def Circle(radius=0.5, resolution=100):
"""Create a single PolyData circle defined by radius in the XY plane.
Parameters
----------
radius : float, optional
Radius of circle.
resolution : int, optional
Number of points on the circle.
Returns
-------
pyvista.PolyData
Circle mesh.
Examples
--------
>>> import pyvista
>>> radius = 0.5
>>> circle = pyvista.Circle(radius)
>>> circle.plot(show_edges=True, line_width=5)
"""
points = np.zeros((resolution, 3))
theta = np.linspace(0.0, 2.0*np.pi, resolution)
points[:, 0] = radius * np.cos(theta)
points[:, 1] = radius * np.sin(theta)
cells = np.array([np.append(np.array([resolution]), np.arange(resolution))])
return pyvista.wrap(pyvista.PolyData(points, cells))
def Superquadric(center=(0., 0., 0.), scale=(1., 1., 1.), size=0.5,
theta_roundness=1., phi_roundness=1.,
theta_resolution=16, phi_resolution=16,
toroidal=False, thickness=1/3):
"""Create a superquadric.
Parameters
----------
center : iterable, optional
Center of the superquadric in ``[x, y, z]``.
scale : iterable, optional
Scale factors of the superquadric in ``[x, y, z]``.
size : float, optional
Superquadric isotropic size.
theta_roundness : float, optional
Superquadric east/west roundness.
Values range from 0 (rectangular) to 1 (circular) to higher orders.
phi_roundness : float, optional
Superquadric north/south roundness.
Values range from 0 (rectangular) to 1 (circular) to higher orders.
theta_resolution : int, optional
Number of points in the longitude direction.
Values are rounded to nearest multiple of 4.
phi_resolution : int, optional
Number of points in the latitude direction.
Values are rounded to nearest multiple of 8.
toroidal : bool, optional
Whether or not the superquadric is toroidal (``True``)
or ellipsoidal (``False``).
thickness : float, optional
Superquadric ring thickness.
Only applies if toroidal is set to ``True``.
Returns
-------
pyvista.PolyData
Superquadric mesh.
See Also
--------
pyvista.ParametricSuperEllipsoid :
Parametric superquadric if toroidal is ``False``.
pyvista.ParametricSuperToroid :
Parametric superquadric if toroidal is ``True``.
Examples
--------
>>> import pyvista
>>> superquadric = pyvista.Superquadric(scale=(3., 1., 0.5),
... phi_roundness=0.1,
... theta_roundness=0.5)
>>> superquadric.plot(show_edges=True)
"""
superquadricSource = _vtk.vtkSuperquadricSource()
superquadricSource.SetCenter(center)
superquadricSource.SetScale(scale)
superquadricSource.SetSize(size)
superquadricSource.SetThetaRoundness(theta_roundness)
superquadricSource.SetPhiRoundness(phi_roundness)
superquadricSource.SetThetaResolution(round(theta_resolution/4)*4)
superquadricSource.SetPhiResolution(round(phi_resolution/8)*8)
superquadricSource.SetToroidal(toroidal)
superquadricSource.SetThickness(thickness)
superquadricSource.Update()
return pyvista.wrap(superquadricSource.GetOutput())
def PlatonicSolid(kind='tetrahedron', radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create a Platonic solid of a given size.
Parameters
----------
kind : str or int, optional
The kind of Platonic solid to create. Either the name of the
polyhedron or an integer index:
* ``'tetrahedron'`` or ``0``
* ``'cube'`` or ``1``
* ``'octahedron'`` or ``2``
* ``'icosahedron'`` or ``3``
* ``'dodecahedron'`` or ``4``
radius : float, optional
The radius of the circumscribed sphere for the solid to create.
center : sequence, optional
Three-length sequence defining the center of the solid to create.
Returns
-------
pyvista.PolyData
One of the five Platonic solids. Cell scalars are defined that
assign integer labels to each face (with array name
``"FaceIndex"``).
Examples
--------
Create and plot a dodecahedron.
>>> import pyvista
>>> dodeca = pyvista.PlatonicSolid('dodecahedron')
>>> dodeca.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
kinds = {
'tetrahedron': 0,
'cube': 1,
'octahedron': 2,
'icosahedron': 3,
'dodecahedron': 4,
}
if isinstance(kind, str):
if kind not in kinds:
raise ValueError(f'Invalid Platonic solid kind "{kind}".')
kind = kinds[kind]
elif isinstance(kind, int) and kind not in range(5):
raise ValueError(f'Invalid Platonic solid index "{kind}".')
elif not isinstance(kind, int):
raise ValueError('Invalid Platonic solid index type '
f'"{type(kind).__name__}".')
check_valid_vector(center, 'center')
solid = _vtk.vtkPlatonicSolidSource()
solid.SetSolidType(kind)
solid.Update()
solid = pyvista.wrap(solid.GetOutput())
solid.scale(radius, inplace=True)
solid.points += np.asanyarray(center) - solid.center
# rename and activate cell scalars
cell_data = solid.get_array(0)
solid.clear_data()
solid.cell_data['FaceIndex'] = cell_data
return solid
def Tetrahedron(radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create a tetrahedron of a given size.
A tetrahedron is composed of four congruent equilateral triangles.
Parameters
----------
radius : float, optional
The radius of the circumscribed sphere for the tetrahedron.
center : sequence, optional
Three-length sequence defining the center of the tetrahedron.
Returns
-------
pyvista.PolyData
Mesh for the tetrahedron. Cell scalars are defined that assign
integer labels to each face (with array name ``"FaceIndex"``).
Examples
--------
Create and plot a tetrahedron.
>>> import pyvista
>>> tetra = pyvista.Tetrahedron()
>>> tetra.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
return PlatonicSolid(kind='tetrahedron', radius=radius, center=center)
def Octahedron(radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create an octahedron of a given size.
An octahedron is composed of eight congruent equilateral
triangles.
Parameters
----------
radius : float, optional
The radius of the circumscribed sphere for the octahedron.
center : sequence, optional
Three-length sequence defining the center of the octahedron.
Returns
-------
pyvista.PolyData
Mesh for the octahedron. Cell scalars are defined that assign
integer labels to each face (with array name ``"FaceIndex"``).
Examples
--------
Create and plot an octahedron.
>>> import pyvista
>>> tetra = pyvista.Octahedron()
>>> tetra.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
return PlatonicSolid(kind='octahedron', radius=radius, center=center)
def Dodecahedron(radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create a dodecahedron of a given size.
A dodecahedron is composed of twelve congruent regular pentagons.
Parameters
----------
radius : float, optional
The radius of the circumscribed sphere for the dodecahedron.
center : sequence, optional
Three-length sequence defining the center of the dodecahedron.
Returns
-------
pyvista.PolyData
Mesh for the dodecahedron. Cell scalars are defined that assign
integer labels to each face (with array name ``"FaceIndex"``).
Examples
--------
Create and plot a dodecahedron.
>>> import pyvista
>>> tetra = pyvista.Dodecahedron()
>>> tetra.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
return PlatonicSolid(kind='dodecahedron', radius=radius, center=center)
def Icosahedron(radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create an icosahedron of a given size.
An icosahedron is composed of twenty congruent equilateral
triangles.
Parameters
----------
radius : float, optional
The radius of the circumscribed sphere for the icosahedron.
center : sequence, optional
Three-length sequence defining the center of the icosahedron.
Returns
-------
pyvista.PolyData
Mesh for the icosahedron. Cell scalars are defined that assign
integer labels to each face (with array name ``"FaceIndex"``).
Examples
--------
Create and plot an icosahedron.
>>> import pyvista
>>> tetra = pyvista.Icosahedron()
>>> tetra.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
return PlatonicSolid(kind='icosahedron', radius=radius, center=center)
|
{"hexsha": "d9f6103fa8f9bd7245bcf4a74d66e9b71bc04dcd", "size": 42631, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyvista/utilities/geometric_objects.py", "max_stars_repo_name": "basnijholt/pyvista", "max_stars_repo_head_hexsha": "b1786b99217137e2c67566f5c09374c7a810f597", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1107, "max_stars_repo_stars_event_min_datetime": "2019-05-13T06:40:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T22:16:32.000Z", "max_issues_repo_path": "pyvista/utilities/geometric_objects.py", "max_issues_repo_name": "basnijholt/pyvista", "max_issues_repo_head_hexsha": "b1786b99217137e2c67566f5c09374c7a810f597", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1709, "max_issues_repo_issues_event_min_datetime": "2019-05-13T05:52:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T18:16:53.000Z", "max_forks_repo_path": "pyvista/utilities/geometric_objects.py", "max_forks_repo_name": "basnijholt/pyvista", "max_forks_repo_head_hexsha": "b1786b99217137e2c67566f5c09374c7a810f597", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 225, "max_forks_repo_forks_event_min_datetime": "2019-05-16T04:24:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T18:14:02.000Z", "avg_line_length": 28.4206666667, "max_line_length": 107, "alphanum_fraction": 0.6220825221, "include": true, "reason": "import numpy", "num_tokens": 11363}
|
__author__ = 'sibirrer'
import numpy as np
import numpy.testing as npt
from lenstronomy.Util import util
from lenstronomy.ImSim.Numerics.grid import AdaptiveGrid
from lenstronomy.ImSim.Numerics.grid import RegularGrid
from lenstronomy.LightModel.light_model import LightModel
import pytest
class TestAdaptiveGrid(object):
def setup(self):
deltaPix = 1.
transform_pix2angle = np.array([[1, 0], [0, 1]]) * deltaPix
ra_at_xy_0, dec_at_xy_0 = -5, -5
nx, ny = 11, 11
self._supersampling_factor = 4
supersampling_indexes = np.zeros((nx, ny))
supersampling_indexes = np.array(supersampling_indexes, dtype=bool)
supersampling_indexes[5, 5] = True
self._supersampling_indexes = supersampling_indexes
self.nx, self.ny = nx, ny
self._adaptive_grid = AdaptiveGrid(nx, ny, transform_pix2angle, ra_at_xy_0, dec_at_xy_0, supersampling_indexes, self._supersampling_factor)
def test_coordinates_evaluate(self):
x_grid, y_grid = self._adaptive_grid.coordinates_evaluate
print(np.shape(x_grid), 'test shape')
assert len(x_grid) == self._supersampling_factor**2 + self.nx * self.ny - 1
def test_subpixel_coordinates(self):
subpixel_x, subpixel_y = self._adaptive_grid._high_res_coordinates
assert len(subpixel_x) == 4**2
assert subpixel_x[0] == -0.375
assert subpixel_y[0] == -0.375
assert subpixel_y[3] == -0.375
assert subpixel_x[3] == 0.375
def test_average_subgrid(self):
subpixel_x, subpixel_y = self._adaptive_grid._high_res_coordinates
model = LightModel(light_model_list=['GAUSSIAN'])
kwargs_light = [{'center_x': 0, 'center_y': 0, 'sigma': 1, 'amp': 1}]
subgrid_values = model.surface_brightness(subpixel_x, subpixel_y, kwargs_light)
supersampled_values = self._adaptive_grid._average_subgrid(subgrid_values)
assert len(supersampled_values) == 1
def test_merge_low_high_res(self):
subpixel_x, subpixel_y = self._adaptive_grid._high_res_coordinates
x, y = self._adaptive_grid._x_low_res, self._adaptive_grid._x_low_res
model = LightModel(light_model_list=['GAUSSIAN'])
kwargs_light = [{'center_x': 0, 'center_y': 0, 'sigma': 1, 'amp': 1}]
subgrid_values = model.surface_brightness(subpixel_x, subpixel_y, kwargs_light)
image1d = model.surface_brightness(x, y, kwargs_light)
image_added = self._adaptive_grid._merge_low_high_res(image1d, subgrid_values)
added_array = util.image2array(image_added)
supersampled_values = self._adaptive_grid._average_subgrid(subgrid_values)
assert added_array[util.image2array(self._supersampling_indexes)] == supersampled_values
image_high_res = self._adaptive_grid._high_res_image(subgrid_values)
assert len(image_high_res) == self.nx * self._supersampling_factor
def test_flux_array2image_low_high(self):
x, y = self._adaptive_grid.coordinates_evaluate
model = LightModel(light_model_list=['GAUSSIAN'])
kwargs_light = [{'center_x': 0, 'center_y': 0, 'sigma': 1, 'amp': 1}]
flux_values = model.surface_brightness(x, y, kwargs_light)
image_low_res, image_high_res = self._adaptive_grid.flux_array2image_low_high(flux_values)
assert len(image_high_res) == self.nx * self._supersampling_factor
class TestRegularGrid(object):
def setup(self):
self._deltaPix = 1.
transform_pix2angle = np.array([[1, 0], [0, 1]]) * self._deltaPix
ra_at_xy_0, dec_at_xy_0 = -5, -5
nx, ny = 11, 11
self._supersampling_factor = 4
self.nx, self.ny = nx, ny
self._regular_grid = RegularGrid(nx, ny, transform_pix2angle, ra_at_xy_0, dec_at_xy_0,
supersampling_factor=self._supersampling_factor)
def test_grid_points_spacing(self):
deltaPix = self._regular_grid.grid_points_spacing
assert deltaPix == self._deltaPix / self._supersampling_factor
def test_num_grid_points_axes(self):
nx, ny = self._regular_grid.num_grid_points_axes
assert nx == self.nx * self._supersampling_factor
assert ny == self.ny * self._supersampling_factor
def test_supersampling_factor(self):
ssf = self._regular_grid.supersampling_factor
assert ssf == self._supersampling_factor
if __name__ == '__main__':
pytest.main()
|
{"hexsha": "87d0dff27f6614ab760339861fe03601ccdc02ce", "size": 4457, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_ImSim/test_Numerics/test_grid.py", "max_stars_repo_name": "heather999/lenstronomy", "max_stars_repo_head_hexsha": "8102fe026c1f3ba6e81d8a1f59cceb90e68430b4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 107, "max_stars_repo_stars_event_min_datetime": "2017-08-25T20:03:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T19:52:21.000Z", "max_issues_repo_path": "test/test_ImSim/test_Numerics/test_grid.py", "max_issues_repo_name": "heather999/lenstronomy", "max_issues_repo_head_hexsha": "8102fe026c1f3ba6e81d8a1f59cceb90e68430b4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 235, "max_issues_repo_issues_event_min_datetime": "2017-06-07T13:30:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T12:44:04.000Z", "max_forks_repo_path": "test/test_ImSim/test_Numerics/test_grid.py", "max_forks_repo_name": "heather999/lenstronomy", "max_forks_repo_head_hexsha": "8102fe026c1f3ba6e81d8a1f59cceb90e68430b4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 68, "max_forks_repo_forks_event_min_datetime": "2018-02-01T15:47:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T12:44:32.000Z", "avg_line_length": 44.1287128713, "max_line_length": 147, "alphanum_fraction": 0.7000224366, "include": true, "reason": "import numpy", "num_tokens": 1169}
|
import pandas as pd
import numpy as np
ITEM_COL = 'item_id'
USER_COL = 'user_id'
FAKE_ITEM_ID = 999999
# Предфильтрация
def prefilter_items(data, prevalence_range = (0.05, 0.95), price_range = (1.0, 100.0)):
# Уберем самые популярные товары и самые непопулярные товары
pop_thr, unpop_thr = prevalence_range
item_cum_counts = data[ITEM_COL].value_counts().cumsum()
max_count = item_cum_counts.values[-1]
top_popular_mask = item_cum_counts < max_count * pop_thr
top_uppopular_mask = item_cum_counts > max_count * unpop_thr
blocked_items = item_cum_counts[top_popular_mask | top_uppopular_mask].index
# Уберем товары, которые не продавались за последние 25 недель
recent_sale_items = data[ITEM_COL][data['week_no'] > data['week_no'].max() - 25]
old_sale_items = np.setdiff1d(data[ITEM_COL], recent_sale_items)
blocked_items = np.union1d(blocked_items, old_sale_items)
# Уберем слишком дешевые товары и слишком дорогие товары
# Цена товара косвенно оценивается по sales_value
min_price, max_price = price_range
bad_price_items = (
data
.assign(price = lambda x: np.where(x['quantity'] > 0, x['sales_value'] / x['quantity'], 0.0))
.groupby(ITEM_COL)
.agg(min_item_price=('price', 'min'), max_item_price=('price', 'max'))
.query("min_item_price >= @max_price or max_item_price <= @min_price")
.index
)
prefiltered_data = data.copy()
blocked_items = np.union1d(blocked_items, bad_price_items)
fake_mask = np.isin(data[ITEM_COL], blocked_items)
prefiltered_data.loc[fake_mask, ITEM_COL] = FAKE_ITEM_ID
return prefiltered_data
def postfilter_items(user_id, recommednations):
pass
|
{"hexsha": "1e264b5ee81dcbe1834c807a83632f83a6e4cefb", "size": 1729, "ext": "py", "lang": "Python", "max_stars_repo_path": "final/student_utils.py", "max_stars_repo_name": "avidbrain/recsys", "max_stars_repo_head_hexsha": "9f552602b95413ba6389735c51f2253f04d56537", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "final/student_utils.py", "max_issues_repo_name": "avidbrain/recsys", "max_issues_repo_head_hexsha": "9f552602b95413ba6389735c51f2253f04d56537", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "final/student_utils.py", "max_forks_repo_name": "avidbrain/recsys", "max_forks_repo_head_hexsha": "9f552602b95413ba6389735c51f2253f04d56537", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.2093023256, "max_line_length": 101, "alphanum_fraction": 0.7125506073, "include": true, "reason": "import numpy", "num_tokens": 519}
|
"""Engine that performs decisions about whether to employ a surrogate"""
from proxima.inference import BaseInferenceEngine, ScikitLearnInferenceEngine
from proxima.data import BaseDataSource
import numpy as np
from sklearn.neighbors import NearestNeighbors
# TODO (wardlt): Provide some mechanism for checking if UQ tool needs to be updated
# Perhaps having the data store track the last time it was updated?
class BaseUQEngine:
"""Base class for tools that decide whether to invoke a surrogate model"""
def is_supported(self, model: BaseInferenceEngine,
training_data: BaseDataSource, X):
"""Decide whether a prediction is supported by the data
Args:
model (BaseInferenceEngine): Model execution engine
training_data (BaseDataSource): Data source
"""
raise NotImplementedError
class SklearnUncertainty(BaseUQEngine):
"""Tool that uses uncertainty estimates from a scikit-learn model to determine applicability"""
def __init__(self, tolerance):
"""
Args:
tolerance (float): Maximum estimated standard deviation
"""
self.tolerance = tolerance
def is_supported(self, model: ScikitLearnInferenceEngine,
training_data: BaseDataSource, X):
_, y_std = model.model.predict([X], return_std=True)
return y_std < self.tolerance
class DistanceBasedUQ(BaseUQEngine):
def __init__(self, threshold, metric='mahalanobis', k=1, n_jobs=None):
"""Initialize the metric
Args:
threshold (float): Maximum distance for a prediction to be "trustable"
metric (string): Distance metric to use
k (int): Number of nearest neighbors to consider
n_jobs (int): Number of threads to use when computing distances
"""
super().__init__()
self.threshold = threshold
self.metric = metric
self.k = k
self.n_jobs = n_jobs
self.nn_ = None
def is_supported(self, model: BaseInferenceEngine,
training_data: BaseDataSource, X):
# Get the training points
train_X, _ = training_data.get_all_data()
# Make a distance computer
# TODO (wardlt): Here is where we could benefit from checking if training set is updated
nn = NearestNeighbors(n_neighbors=self.k, n_jobs=self.n_jobs,
metric=self.metric).fit(train_X)
# Get the distance
dists, _ = nn.kneighbors([X])
return np.mean(dists, axis=1) < self.threshold
|
{"hexsha": "85d6dd70b63d2ed233b072c475cb3c928c4fc0b2", "size": 2601, "ext": "py", "lang": "Python", "max_stars_repo_path": "proxima/uq.py", "max_stars_repo_name": "YulianaGomez/proxima-1", "max_stars_repo_head_hexsha": "cfb4d9a530bed1b222b27b9c74b210e61ed7919a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-12T17:04:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-16T17:29:34.000Z", "max_issues_repo_path": "proxima/uq.py", "max_issues_repo_name": "YulianaGomez/proxima-1", "max_issues_repo_head_hexsha": "cfb4d9a530bed1b222b27b9c74b210e61ed7919a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "proxima/uq.py", "max_forks_repo_name": "YulianaGomez/proxima-1", "max_forks_repo_head_hexsha": "cfb4d9a530bed1b222b27b9c74b210e61ed7919a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-04-30T02:14:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-11T08:38:41.000Z", "avg_line_length": 35.6301369863, "max_line_length": 99, "alphanum_fraction": 0.6578239139, "include": true, "reason": "import numpy", "num_tokens": 547}
|
# needs `AbstractObjective` (which in turn needs the Surrogate Interface)
Broadcast.broadcastable( mop :: AbstractMOP ) = Ref( mop );
# MANDATORY methods
"Return full vector of lower variable vectors for original problem."
full_lower_bounds( :: AbstractMOP ) :: Vec = nothing
"Return full vector of upper variable vectors for original problem."
full_upper_bounds( :: AbstractMOP ) :: Vec = nothing
"Return a list of `AbstractVectorObjective`s."
list_of_objectives( :: AbstractMOP ) :: Union{AbstractVector{<:AbstractObjective}, Tuple{Vararg{<:AbstractObjective}}} = nothing
# only for user editable problems, i.e. <:AbstractMOP{true}
"Remove an objective function from MOP."
_del!(::AbstractMOP, ::AbstractObjective) :: Nothing = nothing
"Add an objective function to MOP with specified output indices."
_add!(::AbstractMOP, ::AbstractObjective, ::Union{Nothing,Vector{Int}}) :: Nothing = nothing
# MOI METHODS
# # add only required for AbstractMOP{true}
MOI.add_variable( :: AbstractMOP ) :: MOI.VariableIndex = nothing
MOI.add_variables( :: AbstractMOP ) :: Vector{MOI.VariableIndex} = nothing
MOI.get( :: AbstractMOP, :: MOI.NumberOfVariables ) = -1;
MOI.get( :: AbstractMOP, :: MOI.ListOfVariableIndices) = -1;
MOI.get( :: AbstractMOP, :: MOI.ListOfConstraints )::Vector{Tuple} = nothing ;
MOI.supports_constraint( ::AbstractMOP, ::Type{MOI.SingleVariable}, ::Type{MOI.Interval}) = nothing ::Bool;
MOI.add_constraint(::AbstractMOP, func::F, set::S) where {F,S} = nothing :: MOI.ConstraintIndex{F,S}
# DERIVED methods
num_vars( mop :: AbstractMOP ) = MOI.get( mop, MOI.NumberOfVariables() );
"Number of scalar-valued objectives of the problem."
function num_objectives( mop :: AbstractMOP )
let objf_list = list_of_objectives(mop);
isempty(objf_list) ? 0 : sum( num_outputs(objf) for objf ∈ objf_list )
end
end
function _scale!( x :: Vec, lb :: Vec, ub :: Vec )
for (i,var_bounds) ∈ enumerate(zip( lb, ub ))
if !(isinf(var_bounds[1]) || isinf(var_bounds[2]))
x[i] -= var_bounds[1]
x[i] /= ( var_bounds[2] - var_bounds[1] )
end
end
nothing
end
function _scale( x :: Vec, lb :: Vec, ub :: Vec )
χ = copy(x);
_scale!(χ, lb, ub);
return χ
end
function _unscale!( x̂ :: Vec, lb :: Vec, ub :: Vec )
for (i,var_bounds) ∈ enumerate(zip( lb, ub ))
if !(isinf(var_bounds[1]) || isinf(var_bounds[2]))
# TODO: Make the component scaling memoized?
x̂[i] *= (var_bounds[2] - var_bounds[1])
x̂[i] += var_bounds[1]
end
end
nothing
end
function _unscale( x̂ :: Vec, lb :: Vec, ub :: Vec )
χ̂ = copy(x̂)
_unscale!(χ̂, lb, ub)
return χ̂
end
"Scale variables fully constrained to a closed interval to [0,1] internally."
function scale( x :: Vec, mop :: AbstractMOP )
x̂ = copy(x);
lb, ub = full_lower_bounds(mop), full_upper_bounds(mop);
_scale!(x̂, lb, ub);
return x̂
end
"Reverse scaling for fully constrained variables from [0,1] to their former domain."
function unscale( x̂ :: Vec, mop :: AbstractMOP )
x = copy(x̂);
lb, ub = full_lower_bounds(mop), full_upper_bounds(mop);
_unscale!(x, lb, ub);
return x
end
function scale!( x :: Vec, mop :: AbstractMOP )
lb, ub = full_lower_bounds(mop), full_upper_bounds(mop);
_scale!(x, lb, ub);
end
function unscale!( x̂ :: Vec, mop :: AbstractMOP )
lb, ub = full_lower_bounds(mop), full_upper_bounds(mop);
_unscale!( x̂, lb, ub);
end
"Position of `objf` in `list_of_objectives(mop)`."
function _objf_index( objf :: AbstractObjective, mop :: AbstractMOP)
return findfirst( list_of_objectives(mop) .== objf );
end
function output_indices( mop :: AbstractMOP )
all_outputs = Int[];
for objf ∈ list_of_objectives( mop )
push!( all_outputs, output_indices( objf, mop )...);
end
return all_outputs;
end
# TODO use memoization in MixedMOP here
function output_indices( objf ::AbstractObjective, mop :: AbstractMOP )
return let first_index = _objf_index(objf,mop);
collect( first_index : first_index + num_outputs(objf) - 1 );
end
end
"Remove `objf` from `list_of_objectives(mop)` and return its output indices."
function pop_objf!( mop :: AbstractMOP, objf :: AbstractObjective )
oi = output_indices( objf, mop );
_del!(mop, objf)
return oi
end
"Return lower variable bounds for scaled variables."
function full_lower_bounds_internal( mop :: AbstractMOP )
[ isinf(l) ? l : 0.0 for l ∈ full_lower_bounds(mop) ];
end
"Return upper variable bounds for scaled variables."
function full_upper_bounds_internal( mop :: AbstractMOP )
[ isinf(u) ? u : 1.0 for u ∈ full_upper_bounds(mop) ];
end
function full_bounds( mop :: AbstractMOP )
(full_lower_bounds(mop), full_upper_bounds(mop))
end
function full_bounds_internal( mop :: AbstractMOP )
(full_lower_bounds_internal(mop), full_upper_bounds_internal(mop))
end
"Return lower and upper bound vectors combining global and trust region constraints."
function _local_bounds( x :: Vec, Δ :: Union{Real, Vec}, lb :: Vec, ub :: Vec )
lb_eff = max.( lb, x .- Δ );
ub_eff = min.( ub, x .+ Δ );
return lb_eff, ub_eff
end
"Local bounds vectors `lb_eff` and `ub_eff` using scaled variable constraints from `mop`."
function local_bounds( mop :: AbstractMOP, x :: Vec, Δ :: Union{Real, Vec} )
lb, ub = full_lower_bounds_internal( mop ), full_upper_bounds_internal( mop );
return _local_bounds( x, Δ, lb, ub );
end
function _project_into_box( z, lb, ub)
return min.( max.( z, lb ), ub )
end
"Return smallest positive and biggest negative and `σ₊` and `σ₋` so that `x .+ σ± .* d` stays within bounds."
function _intersect_bounds( x :: Vec, d :: Vec, lb :: Vec, ub :: Vec )
d_scaled = (ub .- lb ) .* d ./ norm( d, Inf )
σ_pos = norm( _project_into_box( x .+ d_scaled, lb, ub ) - x, 2 )
σ_neg = norm( _project_into_box( x .- d_scaled, lb, ub ) - x, 2 )
return σ_pos, σ_neg
end
function intersect_bounds( mop :: AbstractMOP, x :: Vec, Δ :: Union{Real, Vec},
d :: Vec; return_vals :: Symbol = :both )
lb_eff, ub_eff = local_bounds( mop, x, Δ );
return intersect_bounds( x, d, lb_eff, ub_eff; return_vals )
end
function intersect_bounds( x :: Vec, d :: Vec, lb, ub ; return_vals :: Symbol = :both )
σ_pos, σ_neg = _intersect_bounds( x, d, lb, ub )
if return_vals == :both
return σ_pos, σ_neg
elseif return_vals == :pos
return σ_pos
elseif return_vals == :neg
return σ_neg
elseif return_vals == :absmax
if abs(σ_pos) >= abs(σ_neg)
return σ_pos
else
return σ_neg
end
end
end
function _add_objective!( mop :: AbstractMOP{true}, T :: Type{<:AbstractObjective},
func :: Function, model_cfg :: SurrogateConfig; n_out :: Int = 0,
can_batch :: Bool = false, out_type :: Union{Type{<:Vec},Nothing} = nothing )
fx = can_batch ? BatchObjectiveFunction(func) : ensure_vec ∘ func;
inner_objf = _wrap_func( T, fx, model_cfg, num_vars(mop), n_out )
objf = isnothing(out_type) ? inner_objf : OutTypeWrapper(inner_objf, out_type)
out_indices = let oi = output_indices(mop);
max_out = isempty( oi ) ? 1 : maximum( oi ) + 1;
collect(max_out : max_out + n_out - 1)
end
for other_objf ∈ list_of_objectives(mop)
if combinable( objf, other_objf )
other_output_indices = pop_objf!( mop, other_objf );
out_indices = [other_output_indices; out_indices];
objf = combine(other_objf, objf);
break;
end
end
_add!(mop, objf, out_indices);
return num_objectives( mop );
end
"Return index vector so that an internal objective vector is sorted according to the order the objectives where added."
function reverse_internal_sorting_indices(mop :: AbstractMOP)
internal_indices = output_indices(mop);
return sortperm( internal_indices );
end
"Sort an interal objective vector so that the objectives are in the order in which they were added."
function reverse_internal_sorting( ŷ :: Vec, mop :: AbstractMOP )
reverse_indices = reverse_internal_sorting_indices(mop)
return ŷ[ reverse_indices ];
end
function apply_internal_sorting( y :: Vec, mop :: AbstractMOP )
return y[ output_indices(mop) ]
end
function reverse_internal_sorting!( ŷ :: Vec, mop :: AbstractMOP )
reverse_indices = reverse_internal_sorting_indices(mop)
ŷ[:] = ŷ[ reverse_indices ]
nothing
end
function apply_internal_sorting!( y :: Vec, mop :: AbstractMOP )
y[:] = y[ output_indices(mop) ]
nothing
end
# custom broadcast to only retrieve sorting indices once
function Broadcast.broadcasted( :: typeof( reverse_internal_sorting ), mop :: AbstractMOP, Ŷ :: VecVec )
reverse_indices = reverse_internal_sorting_indices(mop);
return [ ŷ[reverse_indices] for ŷ ∈ Ŷ];
end
"(Internally) Evaluate all objectives at site `x̂::Vec`. Objective order might differ from order in which they were added."
function eval_all_objectives( mop :: AbstractMOP, x̂ :: Vec )
reduce(vcat, [ eval_objf( objf, unscale(x̂, mop) ) for objf ∈ list_of_objectives(mop) ] )
end
#=
function eval_all_objectives( mop :: AbstractMOP, x̂ :: Vec, tfn :: TransformerFn )
vcat( [ eval_objf( objf, tfn(x̂) ) for objf ∈ list_of_objectives(mop) ]... )
end
=#
function Broadcast.broadcasted(::typeof(eval_all_objectives), mop :: AbstractMOP, X :: VecVec, args... )
if isempty(X)
return Vec[]
else
X_unscaled = unscale.(X,mop)
all_vec_objfs = list_of_objectives(mop)
b_res = Vector{VecVec}(undef, length(all_vec_objfs))
for (i,objf) ∈ enumerate(all_vec_objfs)
b_res[i] = eval_objf.(objf, X_unscaled)
end
# stack the results
N = length(X);
ret_res = Vector{Vec}(undef, N)
for i = 1:N
ret_res[i] = vcat( (r[i] for r ∈ b_res )...)
end
return ret_res
end
end
"Evaluate all objectives at site `x̂::Vec` and sort the result according to the order in which objectives were added."
function eval_and_sort_objectives(mop :: AbstractMOP, x̂ :: Vec)
ŷ = eval_all_objectives(mop, x̂)
return reverse_internal_sorting( ŷ, mop )
end
function Broadcast.broadcasted( ::typeof(eval_and_sort_objectives), mop :: AbstractMOP, X :: VecVec )
Y = eval_all_objectives.(mop, X)
reverse_internal_sorting!.(Y, mop)
return Y
end
# Helper functions …
function num_evals( mop :: AbstractMOP ) :: Vector{Int}
[ num_evals(objf) for objf ∈ list_of_objectives(mop) ]
end
@doc "Set evaluation counter to 0 for each VectorObjectiveFunction in `m.vector_of_objectives`."
function reset_evals!(mop :: AbstractMOP) :: Nothing
for objf ∈ list_of_objectives( mop )
num_evals!( objf, 0)
end
return nothing
end
# use for finite (e.g. local) bounds only
_rand_box_point(lb::Vec, ub::Vec, type :: Type{<:Real} = MIN_PRECISION) ::Vec = lb .+ (ub .- lb) .* rand(type, length(lb));
# wrapper to unscale x̂ from internal domain
# and safeguard against boundary violations
struct TransformerFn{F}
lb :: Vector{F}
ub :: Vector{F}
w :: Vector{F}
inf_indices :: Vector{Int}
not_inf_indices :: Vector{Int}
end
"Evaluate the objective at scaled site `x̂` with help of `tfn`. Used in `diff_wrappers`."
function eval_objf(objf :: AbstractObjective, tfn :: TransformerFn, x̂ :: Vec)
inc_evals!(objf)
eval_handle(objf)( unscale(tfn, x̂ ) )
end
"Return the `TransformerFn` defined by `mop` with a minimum precision of `T`."
function TransformerFn(mop :: AbstractMOP, T :: Type{<:AbstractFloat} = MIN_PRECISION)
LB, UB = full_bounds( mop )
W = UB - LB
I = findall(isinf.(W))
NI = setdiff( 1 : length(W), I )
W[ I ] .= 1
F = Base.promote_eltype( T, W )
return TransformerFn{F}(LB,UB,W,I,NI)
end
Base.broadcastable( tfn :: TransformerFn ) = Ref(tfn)
using LinearAlgebra: diagm
function _jacobian_unscaling( tfn :: TransformerFn, x̂ :: Vec)
# for our simple bounds scaling the jacobian is diagonal.
return diagm(tfn.w)
end
"Unscale the point `x̂` from internal to original domain."
function (tfn:: TransformerFn)( x̂ :: AbstractVector{<:Real} )
χ = copy(x̂)
I = tfn.not_inf_indices
χ[I] .= tfn.lb[I] .+ tfn.w[I] .* χ[I]
return χ
end
unscale( tfn :: TransformerFn, x̂ :: Vec ) = tfn(x̂)
function scale( tfn :: TransformerFn, x :: Vec )
χ = copy( x )
I = tfn.not_inf_indices;
χ[I] .= ( χ[I] .- tfn.lb[I] ) ./ tfn.w[I]
return χ
end
#=
# used in special broadcast to only retrieve bounds once
function ( tfn ::TransformerFn)( X :: AbstractVector{<:AbstractVector} )
return [ _unscale( x, tfn.lb, tfn.ub ) for x ∈ X ]
end
=#
|
{"hexsha": "ad0c38a7cd451e2eaf9e553c496eceaf750a91bc", "size": 12770, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/AbstractMOPInterface.jl", "max_stars_repo_name": "manuelbb-upb/Morbit.jl", "max_stars_repo_head_hexsha": "bfc6b1a7982d2c0003042ec9af75e64ad7ef5cf1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-07-21T14:38:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T13:14:14.000Z", "max_issues_repo_path": "src/AbstractMOPInterface.jl", "max_issues_repo_name": "manuelbb-upb/Morbit.jl", "max_issues_repo_head_hexsha": "bfc6b1a7982d2c0003042ec9af75e64ad7ef5cf1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2021-04-14T09:40:31.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-09T10:43:40.000Z", "max_forks_repo_path": "src/AbstractMOPInterface.jl", "max_forks_repo_name": "manuelbb-upb/Morbit.jl", "max_forks_repo_head_hexsha": "bfc6b1a7982d2c0003042ec9af75e64ad7ef5cf1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-01T02:51:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-01T02:51:21.000Z", "avg_line_length": 34.1443850267, "max_line_length": 129, "alphanum_fraction": 0.6656225529, "num_tokens": 3692}
|
import torch.nn.functional as F
import torch
from torch.autograd import Variable
import numpy as np
from src.data_ops.wrapping import wrap
from src.admin.utils import see_tensors_in_memory
def loss(y_pred, y, y_mask, bm):
l = nll
return l(y_pred, y, y_mask, bm)
def kl(y_pred, y, y_mask):
n = y_pred.shape[1]
dists = wrap(torch.Tensor(distances(n)) ** (1/2.5)).view(-1, n, n)
logprobs = stable_log(y_pred)
lossfn = torch.nn.KLDivLoss(reduce=False)
l = lossfn(logprobs, y)
l = l * dists
l = reweight_loss(l, y)
l = l.masked_select(y_mask.byte())
l = l.mean()
return l
def nll(y_pred, y, y_mask, batch_mask):
n = y_pred.shape[1]
n_ = batch_mask.sum(1,keepdim=True)[:,:,0]
#x = F.sigmoid(distances(n) - n / 2)
dists = wrap(torch.Tensor(distances(n))).view(-1, n, n) * batch_mask
x = torch.exp(-(n_.unsqueeze(1) - dists - 1)*0.01)
#import ipdb; ipdb.set_trace()
dists = (x)
lossfn = torch.nn.NLLLoss(reduce=False)
logprobs = stable_log(torch.stack([1-y_pred, y_pred], 1))
l = (lossfn(logprobs, y.long()))
l = l * dists
l = reweight_loss(l, y)
l = l.masked_select(y_mask.byte())
l = l.mean()
return l
def cho_loss(y_pred, y, y_mask):
n = y_pred.shape[1]
dists = wrap(torch.Tensor(distances(n)) ** (1./2.5))
y_pred = y_pred.view(-1, n ** 2)
y = y.view(-1, n ** 2)
l = my_bce_loss(y_pred, y, reduce=False)
l = reweight_loss(l, y)
l = l * dists
l = l.masked_select(y_mask.view(-1, n**2).byte())
l = l.mean()
return l
def vanilla_bce_loss(y_pred, y, y_mask):
n = y_pred.shape[1]
l = my_bce_loss(y_pred, y, reduce=False).view(-1, n**2)
l = l.masked_select(y_mask.view(-1, n**2).byte())
l = l.mean()
return l
def distance_loss(y_pred, y, y_mask):
return ((y_pred - y).pow(2) * y_mask).mean()
def reweight_loss(l, y):
n_pos = y.sum(1, keepdim=True)
n_neg = (1 - y).sum(1, keepdim=True)
l_pos = y * l
l_neg = (1 - y) * l
l = (l_pos * n_neg + l_neg * n_pos) / (n_pos + n_neg)
return l
def reweight(tensor, idx, weight):
tensor[:,idx] = tensor[:,idx] * weight
return tensor
def distances(n):
indices = np.arange(n ** 2)
rows = indices // n
columns = indices % n
b_dists = abs(rows - columns)
return b_dists
def stable_log(x):
minvar = Variable(torch.Tensor([1e-20]))
if torch.cuda.is_available():
minvar = minvar.cuda()
x = torch.log(torch.max(x, minvar))
return x
def my_bce_loss(input, target, weight=None, reduce=True):
input = stable_log(input)
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()
if weight is not None:
loss = loss * weight
if reduce:
loss = loss.mean()
return loss
|
{"hexsha": "04febf0e2bb6dd4ed7033686c91bb2890a33a4b9", "size": 3024, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/proteins/loss.py", "max_stars_repo_name": "isaachenrion/jets", "max_stars_repo_head_hexsha": "59aeba81788d0741af448192d9dfb764fb97cf8d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2017-10-09T17:01:52.000Z", "max_stars_repo_stars_event_max_datetime": "2018-06-12T18:06:05.000Z", "max_issues_repo_path": "src/proteins/loss.py", "max_issues_repo_name": "isaachenrion/jets", "max_issues_repo_head_hexsha": "59aeba81788d0741af448192d9dfb764fb97cf8d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 31, "max_issues_repo_issues_event_min_datetime": "2017-11-01T14:39:02.000Z", "max_issues_repo_issues_event_max_datetime": "2018-04-18T15:34:24.000Z", "max_forks_repo_path": "src/proteins/loss.py", "max_forks_repo_name": "isaachenrion/jets", "max_forks_repo_head_hexsha": "59aeba81788d0741af448192d9dfb764fb97cf8d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2017-10-17T19:23:14.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-05T04:44:45.000Z", "avg_line_length": 25.8461538462, "max_line_length": 116, "alphanum_fraction": 0.6028439153, "include": true, "reason": "import numpy", "num_tokens": 963}
|
from srtmath import *
from srtshapes import *
import numpy as np
import scipy.misc
import time
WIDTH = 1280
HEIGHT = 720
SPHERE_COLOR = [0,255,0]
if __name__ == "__main__":
objects = []
objects.append(Sphere(Point(), 500))
# objects.append(Plane(Point(0, 0, 750), Vector(0,0,-1)))
light = Point(0, 700, 1000)
image = np.zeros((WIDTH, HEIGHT, 3), dtype = np.uint8)
startT = time.clock()
print('Began rendering at {0}'.format(startT))
for i in range(WIDTH):
for j in range(HEIGHT):
#Generate a ray at the current pixel
r = Ray(Point(i, j, -1000), Vector(0,0,1))
tHit = float("inf")
normal = None
b = False
for obj in objects:
b, t0, t1, n = obj.intersect(r)
if b:
if t0 < tHit:
tHit = t0
normal = n
#Calculate path to light
if not b:
image[i,j,0] = 0
image[i,j,1] = 0
image[i,j,2] = 0
break
p = r(tHit)
tol = normalize(p - light)
c = max(dot(normal, tol), 0)
image[i,j,0] = c * SPHERE_COLOR[0]
image[i,j,1] = c * SPHERE_COLOR[1]
image[i,j,2] = c * SPHERE_COLOR[2]
print('This render took {0} seconds'.format(time.clock() - startT))
scipy.misc.imsave('./render.png', image)
|
{"hexsha": "4306d1a94390867eb6394d07d38537112bdd44b6", "size": 1451, "ext": "py", "lang": "Python", "max_stars_repo_path": "srt.py", "max_stars_repo_name": "Seek/SimpleRT", "max_stars_repo_head_hexsha": "fa9ba1fa1c843b417b1e19a5315a63da718eaa43", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "srt.py", "max_issues_repo_name": "Seek/SimpleRT", "max_issues_repo_head_hexsha": "fa9ba1fa1c843b417b1e19a5315a63da718eaa43", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "srt.py", "max_forks_repo_name": "Seek/SimpleRT", "max_forks_repo_head_hexsha": "fa9ba1fa1c843b417b1e19a5315a63da718eaa43", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9772727273, "max_line_length": 71, "alphanum_fraction": 0.4941419711, "include": true, "reason": "import numpy,import scipy", "num_tokens": 410}
|
# Simple mnist convutional network
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import math
from glob import glob
# Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# data preparation
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = keras.Sequential(
[
keras.Input(shape=input_shape),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
]
)
model.summary()
#Train the model
batch_size = 128
epochs = 4
#epochs = 15
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)
score = model.evaluate(x_test, y_test, verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
|
{"hexsha": "a18fb45f3b3195311ea2a1d416c715eebe341b38", "size": 2242, "ext": "py", "lang": "Python", "max_stars_repo_path": "recognizer.py", "max_stars_repo_name": "ColdBacon/Digit-recognizer", "max_stars_repo_head_hexsha": "af039cf16eff02595cd9806cbc4e0ee314970be6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "recognizer.py", "max_issues_repo_name": "ColdBacon/Digit-recognizer", "max_issues_repo_head_hexsha": "af039cf16eff02595cd9806cbc4e0ee314970be6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "recognizer.py", "max_forks_repo_name": "ColdBacon/Digit-recognizer", "max_forks_repo_head_hexsha": "af039cf16eff02595cd9806cbc4e0ee314970be6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.7701149425, "max_line_length": 97, "alphanum_fraction": 0.7149866191, "include": true, "reason": "import numpy", "num_tokens": 570}
|
#xortraintest-5.jl
#trains an xornet using noisy and unreliable data. Ten data inputs, top two xor'ed to get the
#correct values, 5% of the time.
function xortrain_4()
srand(10)
println("working on unreliable xor data set with backpropagation")
input_matrix = rand(Bool, 10, 500)
training_results = Array{Bool,2}(1,500)
training_results[:] = [input_matrix[1, col] $ input_matrix[2, col] $ (rand() < 0.05) for col in 1:size(input_matrix,2)]
xornet = GenML.MLP.MultilayerPerceptron{Float64,(10,2,1)}(randn)
for rounds = 1:25
for idx = 1:500
input_column = input_matrix[:, idx]
training_column = training_results[:, idx]
GenML.Optimizers.backpropagationoptimize(xornet, input_column, training_column, GenML.CF.crossentropy)
end
end
#verify that the optimization has resulted in a good data set.
wrongcount = 0
for x = 1:50
input_vector = rand(Bool, 10)
wrongcount += (xornet(input_vector)[1] > 0.5) != (input_vector[1] $ input_vector[2])
end
println("incorrect responses for noisy xor: $wrongcount")
return wrongcount
end
xortrain_4() == 0 || xortrain_4() == 0 || throw(ErrorException("two executions failed."))
|
{"hexsha": "d9711dfb4c0a65e82860aab4fa1602dc0efc6e04", "size": 1173, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/xortraintest-4.jl", "max_stars_repo_name": "interplanetary-robot/GenML", "max_stars_repo_head_hexsha": "f99015ab404250861334e75445b3a701293349e2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/xortraintest-4.jl", "max_issues_repo_name": "interplanetary-robot/GenML", "max_issues_repo_head_hexsha": "f99015ab404250861334e75445b3a701293349e2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/xortraintest-4.jl", "max_forks_repo_name": "interplanetary-robot/GenML", "max_forks_repo_head_hexsha": "f99015ab404250861334e75445b3a701293349e2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0769230769, "max_line_length": 121, "alphanum_fraction": 0.7084398977, "num_tokens": 369}
|
'''
(*)~---------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2018 Pupil Labs
Distributed under the terms of the GNU
Lesser General Public License (LGPL v3.0).
See COPYING and COPYING.LESSER for license details.
---------------------------------------------------------------------------~(*)
'''
import cv2
import numpy as np
from pyglui.cygl.utils import draw_points_norm, draw_polyline, RGBA
from OpenGL.GL import GL_POLYGON
from circle_detector import CircleTracker
from . finish_calibration import finish_calibration
import audio
from pyglui import ui
from . calibration_plugin_base import Calibration_Plugin
# logging
import logging
logger = logging.getLogger(__name__)
class Manual_Marker_Calibration(Calibration_Plugin):
"""
CircleTracker looks for proper markers
Using at least 9 positions/points within the FOV
Ref detector will direct one to good positions with audio cues
Calibration only collects data at the good positions
"""
def __init__(self, g_pool):
super().__init__(g_pool)
self.pos = None
self.smooth_pos = 0.,0.
self.smooth_vel = 0.
self.sample_site = (-2,-2)
self.counter = 0
self.counter_max = 30
self.stop_marker_found = False
self.auto_stop = 0
self.auto_stop_max = 30
self.menu = None
self.circle_tracker = CircleTracker()
self.markers = []
def init_ui(self):
super().init_ui()
self.menu.label = "Manual Calibration"
self.menu.append(ui.Info_Text("Calibrate gaze parameters using a handheld marker."))
def start(self):
super().start()
audio.say("Starting {}".format(self.mode_pretty))
logger.info("Starting {}".format(self.mode_pretty))
self.active = True
self.ref_list = []
self.pupil_list = []
def stop(self):
audio.say("Stopping {}".format(self.mode_pretty))
logger.info('Stopping {}'.format(self.mode_pretty))
self.screen_marker_state = 0
self.active = False
self.smooth_pos = 0.,0.
# self.close_window()
self.button.status_text = ''
if self.mode == 'calibration':
finish_calibration(self.g_pool, self.pupil_list, self.ref_list)
elif self.mode == 'accuracy_test':
self.finish_accuracy_test(self.pupil_list, self.ref_list)
super().stop()
def on_notify(self, notification):
'''
Reacts to notifications:
``calibration.should_start``: Starts the calibration procedure
``calibration.should_stop``: Stops the calibration procedure
Emits notifications:
``calibration.started``: Calibration procedure started
``calibration.stopped``: Calibration procedure stopped
``calibration.marker_found``: Steady marker found
``calibration.marker_moved_too_quickly``: Marker moved too quickly
``calibration.marker_sample_completed``: Enough data points sampled
'''
super().on_notify(notification)
def recent_events(self, events):
"""
gets called once every frame.
reference positon need to be published to shared_pos
if no reference was found, publish 0,0
"""
frame = events.get('frame')
if self.active and frame:
gray_img = frame.gray
# Update the marker
self.markers = self.circle_tracker.update(gray_img)
self.stop_marker_found = False
if len(self.markers):
# Set the pos to be the center of the first detected marker
marker_pos = self.markers[0]['img_pos']
self.pos = self.markers[0]['norm_pos']
# Check if there are stop markers
for marker in self.markers:
if marker['marker_type'] == 'Stop':
self.auto_stop += 1
self.stop_marker_found = True
break
else:
self.pos = None # indicate that no reference is detected
if self.stop_marker_found is False:
self.auto_stop = 0
# Check if there are more than one markers
if len(self.markers) > 1:
audio.tink()
logger.warning("{} markers detected. Please remove all the other markers".format(len(self.markers)))
# tracking logic
if len(self.markers) and not self.stop_marker_found:
# start counter if ref is resting in place and not at last sample site
# calculate smoothed manhattan velocity
smoother = 0.3
smooth_pos = np.array(self.smooth_pos)
pos = np.array(self.pos)
new_smooth_pos = smooth_pos + smoother*(pos-smooth_pos)
smooth_vel_vec = new_smooth_pos - smooth_pos
smooth_pos = new_smooth_pos
self.smooth_pos = list(smooth_pos)
#manhattan distance for velocity
new_vel = abs(smooth_vel_vec[0])+abs(smooth_vel_vec[1])
self.smooth_vel = self.smooth_vel + smoother*(new_vel-self.smooth_vel)
#distance to last sampled site
sample_ref_dist = smooth_pos-np.array(self.sample_site)
sample_ref_dist = abs(sample_ref_dist[0])+abs(sample_ref_dist[1])
# start counter if ref is resting in place and not at last sample site
if self.counter <= 0:
if self.smooth_vel < 0.01 and sample_ref_dist > 0.1:
self.sample_site = self.smooth_pos
audio.beep()
logger.debug("Steady marker found. Starting to sample {} datapoints".format(self.counter_max))
self.notify_all({'subject':'calibration.marker_found','timestamp':self.g_pool.get_timestamp(),'record':True})
self.counter = self.counter_max
if self.counter > 0:
if self.smooth_vel > 0.01:
audio.tink()
logger.warning("Marker moved too quickly: Aborted sample. Sampled {} datapoints. Looking for steady marker again.".format(self.counter_max-self.counter))
self.notify_all({'subject':'calibration.marker_moved_too_quickly','timestamp':self.g_pool.get_timestamp(),'record':True})
self.counter = 0
else:
self.counter -= 1
ref = {}
ref["norm_pos"] = self.pos
ref["screen_pos"] = marker_pos
ref["timestamp"] = frame.timestamp
self.ref_list.append(ref)
if events.get('fixations', []):
self.counter -= 5
if self.counter <= 0:
#last sample before counter done and moving on
audio.tink()
logger.debug("Sampled {} datapoints. Stopping to sample. Looking for steady marker again.".format(self.counter_max))
self.notify_all({'subject':'calibration.marker_sample_completed','timestamp':self.g_pool.get_timestamp(),'record':True})
# Always save pupil positions
self.pupil_list.extend(events['pupil'])
if self.counter:
if len(self.markers):
self.button.status_text = 'Sampling Gaze Data'
else:
self.button.status_text = 'Marker Lost'
else:
self.button.status_text = 'Looking for Marker'
# Stop if autostop condition is satisfied:
if self.auto_stop >=self.auto_stop_max:
self.auto_stop = 0
self.stop()
else:
pass
def gl_display(self):
"""
use gl calls to render
at least:
the published position of the reference
better:
show the detected postion even if not published
"""
if self.active:
draw_points_norm([self.smooth_pos],size=15,color=RGBA(1.,1.,0.,.5))
if self.active and len(self.markers):
# draw the largest ellipse of all detected markers
for marker in self.markers:
e = marker['ellipses'][-1]
pts = cv2.ellipse2Poly( (int(e[0][0]),int(e[0][1])),
(int(e[1][0]/2),int(e[1][1]/2)),
int(e[-1]),0,360,15)
draw_polyline(pts,color=RGBA(0.,1.,0,1.))
if len(self.markers) > 1:
draw_polyline(pts, 1, RGBA(1., 0., 0., .5), line_type=GL_POLYGON)
# draw indicator on the first detected marker
if self.counter and self.markers[0]['marker_type'] == 'Ref':
e = self.markers[0]['ellipses'][-1]
pts = cv2.ellipse2Poly( (int(e[0][0]),int(e[0][1])),
(int(e[1][0]/2),int(e[1][1]/2)),
int(e[-1]),0,360,360//self.counter_max)
indicator = [e[0]] + pts[self.counter:].tolist()[::-1] + [e[0]]
draw_polyline(indicator, color=RGBA(0.1,.5,.7,.8),line_type=GL_POLYGON)
# draw indicator on the stop marker(s)
if self.auto_stop:
for marker in self.markers:
if marker['marker_type'] == 'Stop':
e = marker['ellipses'][-1]
pts = cv2.ellipse2Poly( (int(e[0][0]),int(e[0][1])),
(int(e[1][0]/2),int(e[1][1]/2)),
int(e[-1]),0,360,360//self.auto_stop_max)
indicator = [e[0]] + pts[self.auto_stop:].tolist() + [e[0]]
draw_polyline(indicator,color=RGBA(8.,0.1,0.1,.8),line_type=GL_POLYGON)
else:
pass
def deinit_ui(self):
"""gets called when the plugin get terminated.
This happens either voluntarily or forced.
if you have an atb bar or glfw window destroy it here.
"""
if self.active:
self.stop()
super().deinit_ui()
|
{"hexsha": "615d0f755db6e920713567a40463e412315bd0b3", "size": 10531, "ext": "py", "lang": "Python", "max_stars_repo_path": "pupil_src/shared_modules/calibration_routines/manual_marker_calibration.py", "max_stars_repo_name": "paulmathai01/Pupil-Interfaced", "max_stars_repo_head_hexsha": "4ec40c90876af3bdf75b1def47d21e49a79252b8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-10-29T04:30:32.000Z", "max_stars_repo_stars_event_max_datetime": "2018-10-29T04:30:32.000Z", "max_issues_repo_path": "pupil_src/shared_modules/calibration_routines/manual_marker_calibration.py", "max_issues_repo_name": "paulmathai01/Pupil-Interfaced", "max_issues_repo_head_hexsha": "4ec40c90876af3bdf75b1def47d21e49a79252b8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pupil_src/shared_modules/calibration_routines/manual_marker_calibration.py", "max_forks_repo_name": "paulmathai01/Pupil-Interfaced", "max_forks_repo_head_hexsha": "4ec40c90876af3bdf75b1def47d21e49a79252b8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.6245059289, "max_line_length": 177, "alphanum_fraction": 0.5438229988, "include": true, "reason": "import numpy", "num_tokens": 2207}
|
import pip
def install():
if hasattr(pip, 'main'):
pip.main(['install', 'keras', 'tensorflow','opencv-python','numpy'])
else:
pip._internal.main(['install', 'keras', 'tensorflow','opencv-python','numpy'])
try:
import random,keras,cv2
import os
from keras.preprocessing import image
from keras.models import load_model
import numpy as np
from keras.models import model_from_json
import json
except:
install()
def give_prediction(filename=''):
'''
Call this function with a filename of an image to get predictions
'''
path = os.path.abspath(filename)
json_file = open('results/model_4.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("results/model_4.h5")
#print("Loaded model from disk")
img = cv2.imread(path)
img = cv2.resize(img, (100, 100))
if len(img.shape) > 2 and img.shape[2] == 4:
#convert the image from RGBA2RGB
img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
pred = model.predict(img.reshape(-1,100,100,3))
pred2= [x*100 for x in pred[0]]
class_label_list = ['TB','normal','pneumonia']
print(pred,pred2)
return class_label_list[np.argmax(pred)]
if __name__=="__main__":
fname=input("Enter file name: ")
resulty=give_prediction(fname)
print(resulty)
|
{"hexsha": "f3ad2875a683a54650123f86a586c19122952455", "size": 1625, "ext": "py", "lang": "Python", "max_stars_repo_path": "detect.py", "max_stars_repo_name": "newage-virtual-world/tb-and-pneumonia-detection", "max_stars_repo_head_hexsha": "7ac7171f776b90254c86a1cfbc72577470b673b9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "detect.py", "max_issues_repo_name": "newage-virtual-world/tb-and-pneumonia-detection", "max_issues_repo_head_hexsha": "7ac7171f776b90254c86a1cfbc72577470b673b9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:12:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:35:21.000Z", "max_forks_repo_path": "detect.py", "max_forks_repo_name": "newage-virtual-world/tb-and-pneumonia-detection", "max_forks_repo_head_hexsha": "7ac7171f776b90254c86a1cfbc72577470b673b9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-08-16T06:50:43.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-02T14:50:20.000Z", "avg_line_length": 32.5, "max_line_length": 86, "alphanum_fraction": 0.5827692308, "include": true, "reason": "import numpy", "num_tokens": 371}
|
import matplotlib.pyplot as plt
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
class AggregateScalar(object):
"""
Computes and stores the average and std of stream.
Mostly used to average losses and accuracies.
"""
def __init__(self):
self.reset()
def reset(self):
self.count = 0.0001 # DIV/0!
self.sum = 0
def update(self, val, w=1):
"""
:param val: new running value
:param w: weight, e.g batch size
"""
self.sum += w * (val)
self.count += w
def avg(self):
return self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
if len(target.shape) > 1:
target = torch.argmax(target, dim=1)
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(1 / batch_size))
return res
def str2bool(v):
"""
used in argparse, to pass booleans
codes from : https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise UserWarning
def set_torch_seeds(seed):
import random
import numpy as np
import torch
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def delete_files_from_name(folder_path, file_name, type='contains'):
""" Delete log files based on their name"""
assert type in ['is', 'contains']
for f in os.listdir(folder_path):
if (type == 'is' and file_name == f) or (type == 'contains' and file_name in f):
os.remove(os.path.join(folder_path, f))
def plot_image(input):
npimg = input.numpy()
npimg = np.transpose(npimg, (1, 2, 0))
if npimg.shape[-1] != 3: npimg = npimg[:, :, 0]
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111)
ax.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.imshow(npimg, cmap='gray')
plt.show()
return fig
|
{"hexsha": "adbe8a2320f62460528889c7a53e6d80a3e927b9", "size": 2392, "ext": "py", "lang": "Python", "max_stars_repo_path": "Authors' code/Zero_shot_learning/utils/helpers.py", "max_stars_repo_name": "onicolini/zero-shot_knowledge_transfer", "max_stars_repo_head_hexsha": "9dd6d08eadb8243881f0fb8e9ac2d5653dd25229", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Authors' code/Zero_shot_learning/utils/helpers.py", "max_issues_repo_name": "onicolini/zero-shot_knowledge_transfer", "max_issues_repo_head_hexsha": "9dd6d08eadb8243881f0fb8e9ac2d5653dd25229", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Authors' code/Zero_shot_learning/utils/helpers.py", "max_forks_repo_name": "onicolini/zero-shot_knowledge_transfer", "max_forks_repo_head_hexsha": "9dd6d08eadb8243881f0fb8e9ac2d5653dd25229", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-10-27T15:44:17.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-27T15:44:17.000Z", "avg_line_length": 24.9166666667, "max_line_length": 98, "alphanum_fraction": 0.6007525084, "include": true, "reason": "import numpy", "num_tokens": 646}
|
import autogp
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import sklearn.metrics.pairwise as sk
import time
import scipy
import seaborn as sns
import random
from kerpy.Kernel import Kernel
from kerpy.MaternKernel import MaternKernel
from kerpy.GaussianKernel import GaussianKernel
# This code does the following:
# generate the values for f
# generate the values for exp pf f = intensity
# generate with this intensity some observations (thus each obs will correspond to a different intensity)
# use the inputs and the outputs in the GP model
# look at the posterior distribution for the intenisty to see if the true values of lambda are close to the posterior means
## NB. When using the Matern Kernel there is still a problem of numerical stability for the inversion the Cholesky matrix! I had to increase the jitter.
## Similar problem when computing the RBF kernel
#np.random.seed(1990)
np.random.seed(1500)
# Generate synthetic data. N_all = total number of observations, N = training points.
N_all = 200
N = 50
#Set data parameters
offset_data = 1.0
lengthscale_data = 1.0/3.0 #fixed to be a third od the range of the dataset
sigma_data = 1.0
random_noise = np.random.normal(loc=0.0, scale=1.0, size=None)
#Set initial parameters
#gamma = 1. #to check
#lengthscale_initial = np.sqrt(1/(2*gamma)) + random_noise
lengthscale_initial = lengthscale_data + random_noise
offset_initial = offset_data + random_noise
sigma_initial = sigma_data + random_noise
inputs = np.linspace(0, 1, num=N_all)[:, np.newaxis]
sigma = GaussianKernel(sigma = lengthscale_data).kernel(inputs,inputs) #lenghtscale in kerpy is called sigma
#np.savetxt("../../Workspace/updated_AutoGP/R_plots/inputs.csv", inputs, header="inputs", delimiter=",")
# There is a problem of numerical precision
pert = np.zeros((N_all,N_all))
np.fill_diagonal(pert, 0.001)
#print('perturbation',pert)
sigma = sigma + pert
#print('this is the covariance used to generate the data', sigma)
#print('this is the covariance shape', sigma.shape)
#print('its cholesky', np.linalg.cholesky(sigma+pert))
#sigma = MaternKernel(width = lengthscale_data, nu = 1.5, sigma = sigma_data).kernel(inputs,inputs) #Matern 3_2
#sigma = MaternKernel(width = lengthscale_data, nu = 2.5, sigma = sigma_data).kernel(inputs,inputs) #Matern 5_2
#sigma = sk.rbf_kernel(inputs, inputs)
#sigma = sk.rbf_kernel(inputs, inputs, gamma = 50)
#print('shape of sigma', sigma.shape)
n_samples = 1 # num of realisations for the GP
process_values = np.random.multivariate_normal(mean=np.repeat(0,N_all), cov=sigma)
process_values = np.reshape(process_values, (N_all,n_samples))
sample_intensity = np.exp(process_values + offset_data)
outputs = np.ones((N_all,n_samples))
for i in range(N_all):
for j in range(n_samples):
outputs[i,j] = np.random.poisson(lam=sample_intensity[i,j])
# selects training and test
idx = np.arange(N_all)
np.random.shuffle(idx) #Maybe to be fixed?
xtrain = inputs[idx[:N]]
ytrain = outputs[idx[:N]]
data = autogp.datasets.DataSet(xtrain, ytrain)
xtest = inputs[idx[N:]]
ytest = outputs[idx[N:]]
# Initialize the Gaussian process.
likelihood = autogp.likelihoods.LGCP(offset = offset_initial)
kernel = [autogp.kernels.RadialBasis(1, lengthscale= lengthscale_initial, std_dev = sigma_initial)]
#kernel = [autogp.kernels.Matern_3_2(1, lengthscale= lengthscale_initial, std_dev = sigma_initial)]
#kernel = [autogp.kernels.Matern_5_2(1, lengthscale= lengthscale_initial, std_dev = sigma_initial)]
sparsity_vector = np.array([1.,0.5,0.2,0.1])
times = np.zeros((sparsity_vector.shape[0]))
for i in range(sparsity_vector.shape[0]):
# Need to redefine the data
data = autogp.datasets.DataSet(xtrain, ytrain)
sparsity_factor = sparsity_vector[i]
print('sparsity factor is', sparsity_factor)
inducing_number = int(sparsity_factor*N)
print('number of inducing inputs', inducing_number)
id_sparse = np.arange(N)
np.random.shuffle(id_sparse)
inducing_inputs = xtrain[id_sparse[:inducing_number]]
# Define the model
model = autogp.GaussianProcess(likelihood, kernel, inducing_inputs, num_components=2, diag_post=True)
# Define the optimizer
optimizer = tf.train.RMSPropOptimizer(0.005)
# Define array to store the times
# Train the model
start = time.time()
print("Start the training")
print("The sparsity factor is" + " " + str(sparsity_factor))
model.fit(data, optimizer, loo_steps=0, var_steps=50, epochs=1000, display_step=30)
end = time.time()
time_elapsed = end-start
times[i] = time_elapsed
print("Execution finished in seconds", time_elapsed)
# Predict new inputs.
ypred, _ = model.predict(xtest) #V_the command predict gives back the predicted mean and the predicted variance corresponding to the xtest
_, post_var = model.predict(xtest) #V_the command predict gives back the predicted mean and the predicted variance corresponding to the xtest
ypred_np = np.asarray(ypred)
post_var_np = np.asarray(post_var)
path = "../../Workspace/updated_AutoGP/R_plots/"
sparse = str(sparsity_factor)
var_distr = "2sparse"
# Save the data to export to R
np.savetxt(path + sparse + var_distr + "data_inputs.csv", inputs, header='inputs', delimiter=",")
np.savetxt(path + sparse + var_distr + "data_outputs.csv", outputs, header='outputs', delimiter=",")
np.savetxt(path + sparse + var_distr + "xtest.csv", xtest, header='xtest', delimiter=",")
np.savetxt(path + sparse + var_distr + "ytest.csv", ytest, header='ytest', delimiter=",")
np.savetxt(path + sparse + var_distr + "xtrain.csv", xtrain, header='xtrain', delimiter=",")
np.savetxt(path + sparse + var_distr + "ytrain.csv", ytrain, header='ytrain', delimiter=",")
np.savetxt(path + sparse + var_distr + "sample_intensity_test.csv", sample_intensity[idx[N:]], header='sample_intensity_test', delimiter=",")
np.savetxt(path + sparse + var_distr + "total_results_ypred.csv", ypred_np, header='ypred', delimiter =",")
np.savetxt(path + sparse + var_distr + "total_results_postvar.csv", post_var_np, header='post_var', delimiter =",")
# Plot the training set, the set test and the posterior mean for the intesity which is equal to the E[y].
first_line, = plt.plot(xtrain, ytrain, '.', mew=2, label = "a") #V_plot the points used to train the model
second_line, = plt.plot(xtest, ytest, 'o', mew=2, label = "b") #V_plot the points used to trest the model
third_line, = plt.plot(xtest, ypred, 'x', mew=2, label = "c") #V_plot the tested x with the predicted y
plt.ylabel('Value of the process')
plt.xlabel('x')
plt.legend([first_line, second_line, third_line], ['Training set', 'Test set', 'Predicted y values'])
plt.savefig(path + sparse + 'first_plot.png')
plt.show()
#Plot posterior intensity together with the the intensity used to generate the model
first_line, = plt.plot(xtest, ypred, 'x', mew=2) #plotting the ypred which is the pposterior mean of the intensity
second_line, = plt.plot(xtest, sample_intensity[idx[N:]], 'o', mew=2) #plotting the intensity used to generate ytest
plt.ylabel('Intensity')
plt.xlabel('x')
plt.legend([first_line, second_line], ['Posterior mean', 'True mean'])
plt.savefig(path + sparse + 'second_plot.png')
plt.show()
# Plotting the histograms for the true intensity and posterior mean intensity
#count, bins, ignored = plt.hist(sample_intensity, normed=True)
#count, bins, ignored = plt.hist(ypred, normed=True)
sns.distplot(sample_intensity, label='Sample intensity')
sns.distplot(ypred, label = 'Posterior mean intensity')
plt.ylabel('Frequency of the intensity')
plt.xlabel('x')
plt.legend()
plt.savefig(path + sparse + 'third_plot.png')
plt.show()
print('time vector', times)
|
{"hexsha": "44da5af7b25874e6816b71b2b6b88574296afafc", "size": 7652, "ext": "py", "lang": "Python", "max_stars_repo_path": "LGCP_2sparse.py", "max_stars_repo_name": "VirgiAgl/updated_AutoGP", "max_stars_repo_head_hexsha": "2ec5671a4c1554555ab70c351944b3e8649e4237", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "LGCP_2sparse.py", "max_issues_repo_name": "VirgiAgl/updated_AutoGP", "max_issues_repo_head_hexsha": "2ec5671a4c1554555ab70c351944b3e8649e4237", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LGCP_2sparse.py", "max_forks_repo_name": "VirgiAgl/updated_AutoGP", "max_forks_repo_head_hexsha": "2ec5671a4c1554555ab70c351944b3e8649e4237", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.3621621622, "max_line_length": 152, "alphanum_fraction": 0.7458180868, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2032}
|
import numpy as np
import autoarray as aa
from autogalaxy.plane.plane import Plane
class SimulatorInterferometer(aa.SimulatorInterferometer):
def __init__(
self,
uv_wavelengths,
exposure_time: float,
transformer_class=aa.TransformerDFT,
noise_sigma=0.1,
noise_if_add_noise_false=0.1,
noise_seed=-1,
):
"""A class representing a Imaging observation, using the shape of the image, the pixel scale,
psf, exposure time, etc.
Parameters
----------
shape_native
The shape of the observation. Note that we do not simulator a full Imaging frame (e.g. 2000 x 2000 pixels for \
Hubble imaging), but instead just a cut-out around the strong lens.
pixel_scales
The size of each pixel in arc seconds.
psf : PSF
An arrays describing the PSF kernel of the image.
exposure_time_map
The exposure time of an observation using this data.
"""
super().__init__(
uv_wavelengths=uv_wavelengths,
exposure_time=exposure_time,
transformer_class=transformer_class,
noise_sigma=noise_sigma,
noise_if_add_noise_false=noise_if_add_noise_false,
noise_seed=noise_seed,
)
def via_plane_from(self, plane, grid, name=None):
"""
Returns a realistic simulated image by applying effects to a plain simulated image.
Parameters
----------
name
image : np.ndarray
The image before simulating (e.g. the lens and source galaxies before optics blurring and Imaging read-out).
pixel_scales: float
The scale of each pixel in arc seconds
exposure_time_map : np.ndarray
An arrays representing the effective exposure time of each pixel.
psf: PSF
An arrays describing the PSF the simulated image is blurred with.
add_poisson_noise: Bool
If `True` poisson noise_maps is simulated and added to the image, based on the total counts in each image
pixel
noise_seed: int
A seed for random noise_maps generation
"""
image = plane.image_2d_from(grid=grid)
return self.via_image_from(image=image.binned, name=name)
def via_galaxies_from(self, galaxies, grid, name=None):
"""Simulate imaging data for this data, as follows:
1) Setup the image-plane grid of the Imaging arrays, which defines the coordinates used for the ray-tracing.
2) Use this grid and the lens and source galaxies to setup a plane, which generates the image of \
the simulated imaging data.
3) Simulate the imaging data, using a special image which ensures edge-effects don't
degrade simulator of the telescope optics (e.g. the PSF convolution).
4) Plot the image using Matplotlib, if the plot_imaging bool is True.
5) Output the dataset to .fits format if a dataset_path and data_name are specified. Otherwise, return the simulated \
imaging data instance."""
plane = Plane(
redshift=float(np.mean([galaxy.redshift for galaxy in galaxies])),
galaxies=galaxies,
)
return self.via_plane_from(plane=plane, grid=grid, name=name)
|
{"hexsha": "3d79ee8efb9e7c816ee8b3dfe5bf350a21682602", "size": 3459, "ext": "py", "lang": "Python", "max_stars_repo_path": "autogalaxy/interferometer/interferometer.py", "max_stars_repo_name": "caoxiaoyue/PyAutoGalaxy", "max_stars_repo_head_hexsha": "ad2b4b27404f5bf0f65ba9a0cd7c3ee6570e2d05", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-05-29T08:46:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-23T14:06:20.000Z", "max_issues_repo_path": "autogalaxy/interferometer/interferometer.py", "max_issues_repo_name": "caoxiaoyue/PyAutoGalaxy", "max_issues_repo_head_hexsha": "ad2b4b27404f5bf0f65ba9a0cd7c3ee6570e2d05", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-01-06T09:42:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-10T15:52:23.000Z", "max_forks_repo_path": "autogalaxy/interferometer/interferometer.py", "max_forks_repo_name": "caoxiaoyue/PyAutoGalaxy", "max_forks_repo_head_hexsha": "ad2b4b27404f5bf0f65ba9a0cd7c3ee6570e2d05", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-02-10T07:45:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-21T17:36:40.000Z", "avg_line_length": 38.4333333333, "max_line_length": 127, "alphanum_fraction": 0.6299508528, "include": true, "reason": "import numpy", "num_tokens": 729}
|
// Boost.Geometry (aka GGL, Generic Geometry Library)
// Copyright (c) 2007-2012 Barend Gehrels, Amsterdam, the Netherlands.
// Use, modification and distribution is subject to the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_GEOMETRY_EXT_GIS_IO_SHAPELIB_SHAPE_CREATOR_HPP
#define BOOST_GEOMETRY_EXT_GIS_IO_SHAPELIB_SHAPE_CREATOR_HPP
#include <fstream>
#include "shapefil.h"
#include <boost/noncopyable.hpp>
#include <boost/type_traits/promote.hpp>
#include <boost/geometry/io/wkt/wkt.hpp>
#include <boost/geometry/extensions/gis/io/shapelib/shp_create_object.hpp>
#include <boost/geometry/extensions/gis/io/shapelib/shp_create_object_multi.hpp>
#include <boost/geometry/extensions/gis/io/shapelib/dbf_write_attribute.hpp>
namespace boost { namespace geometry
{
class shapelib_file_create_exception : public geometry::exception
{
public:
inline shapelib_file_create_exception(std::string const& filename)
: m_filename(filename)
{}
virtual char const* what() const throw()
{
return m_filename.c_str();
}
private :
std::string m_filename;
};
namespace detail
{
template <typename Tag>
struct SHPType
{
};
template <> struct SHPType<point_tag> { static int const value = SHPT_POINT; };
template <> struct SHPType<segment_tag> { static int const value = SHPT_ARC; };
template <> struct SHPType<linestring_tag> { static int const value = SHPT_ARC; };
template <> struct SHPType<polygon_tag> { static int const value = SHPT_POLYGON; };
template <> struct SHPType<ring_tag> { static int const value = SHPT_POLYGON; };
template <> struct SHPType<box_tag> { static int const value = SHPT_POLYGON; };
template <> struct SHPType<multi_point_tag> { static int const value = SHPT_MULTIPOINT; };
template <> struct SHPType<multi_linestring_tag> { static int const value = SHPT_ARC; };
template <> struct SHPType<multi_polygon_tag> { static int const value = SHPT_POLYGON; };
} // namespace detail
template
<
typename Geometry,
int ShapeType = detail::SHPType
<
typename geometry::tag<Geometry>::type
>::value
>
class shape_creator : public boost::noncopyable
{
public :
shape_creator(std::string const& name)
{
m_shp = ::SHPCreate((name + ".shp").c_str(), ShapeType);
m_dbf = ::DBFCreate((name + ".dbf").c_str());
m_prj_name = name + ".prj";
if (m_shp == NULL || m_dbf == NULL)
{
throw shapelib_file_create_exception(name);
}
}
virtual ~shape_creator()
{
if (m_shp) ::SHPClose(m_shp);
if (m_dbf) ::DBFClose(m_dbf);
}
// Returns: index in shapefile
inline int AddShape(Geometry const& geometry)
{
// Note: we MIGHT design a small wrapper class which destroys in destructor
::SHPObject* obj = SHPCreateObject(geometry);
int result = SHPWriteObject(m_shp, -1, obj );
::SHPDestroyObject( obj );
return result;
}
template <typename T>
inline void AddField(std::string const& name, int width = 16, int decimals = 0)
{
::DBFAddField(m_dbf, name.c_str(),
detail::DBFFieldType
<
typename boost::promote<T>::type
>::value,
width, decimals);
}
template <typename T>
inline void WriteField(int row_index, int field_index, T const& value)
{
detail::DBFWriteAttribute
<
typename boost::promote<T>::type
>::apply(m_dbf, row_index, field_index, value);
}
inline void SetSrid(int srid)
{
if (srid == 28992)
{
std::ofstream out(m_prj_name.c_str());
out << "PROJCS[\"RD_New\""
<< ",GEOGCS[\"GCS_Amersfoort\""
<< ",DATUM[\"D_Amersfoort\""
<< ",SPHEROID[\"Bessel_1841\",6377397.155,299.1528128]]"
<< ",PRIMEM[\"Greenwich\",0]"
<< ",UNIT[\"Degree\",0.0174532925199432955]]"
<< ",PROJECTION[\"Double_Stereographic\"]"
<< ",PARAMETER[\"False_Easting\",155000]"
<< ",PARAMETER[\"False_Northing\",463000]"
<< ",PARAMETER[\"Central_Meridian\",5.38763888888889]"
<< ",PARAMETER[\"Scale_Factor\",0.9999079]"
<< ",PARAMETER[\"Latitude_Of_Origin\",52.15616055555555]"
<< ",UNIT[\"Meter\",1]]"
<< std::endl;
}
}
private :
::SHPHandle m_shp;
::DBFHandle m_dbf;
std::string m_prj_name;
};
}} // namespace boost::geometry
#endif // BOOST_GEOMETRY_EXT_GIS_IO_SHAPELIB_SHAPE_CREATOR_HPP
|
{"hexsha": "eeabbc891dfa27dc6414777eed21c614fd231d01", "size": 4739, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "Siv3D/src/ThirdParty/boost/geometry/extensions/gis/io/shapelib/shape_creator.hpp", "max_stars_repo_name": "yumetodo/OpenSiv3D", "max_stars_repo_head_hexsha": "ea191438ecbc64185f5df3d9f79dffc6757e4192", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 709.0, "max_stars_repo_stars_event_min_datetime": "2016-03-19T07:55:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:02:22.000Z", "max_issues_repo_path": "Siv3D/src/ThirdParty/boost/geometry/extensions/gis/io/shapelib/shape_creator.hpp", "max_issues_repo_name": "yumetodo/OpenSiv3D", "max_issues_repo_head_hexsha": "ea191438ecbc64185f5df3d9f79dffc6757e4192", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 623.0, "max_issues_repo_issues_event_min_datetime": "2015-01-02T23:45:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-09T11:15:23.000Z", "max_forks_repo_path": "Siv3D/src/ThirdParty/boost/geometry/extensions/gis/io/shapelib/shape_creator.hpp", "max_forks_repo_name": "yumetodo/OpenSiv3D", "max_forks_repo_head_hexsha": "ea191438ecbc64185f5df3d9f79dffc6757e4192", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 215.0, "max_forks_repo_forks_event_min_datetime": "2015-01-14T15:50:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T03:58:36.000Z", "avg_line_length": 30.5741935484, "max_line_length": 90, "alphanum_fraction": 0.6322008863, "num_tokens": 1228}
|
const opts = Base.JLOptions()
const inline_flag = opts.can_inline == 1 ? `` : `--inline=no`
const cov_flag = (opts.code_coverage == 1) ? `--code-coverage=user` :
(opts.code_coverage == 2) ? `--code-coverage=all` :
``
function run_test(script)
srvrscript = joinpath(dirname(@__FILE__), script)
srvrcmd = `$(joinpath(JULIA_HOME, "julia")) $cov_flag $inline_flag $script`
println("Running tests from ", script, "\n", "="^60)
ret = run(srvrcmd)
println("Finished ", script, "\n", "="^60)
nothing
end
ENV["NODE_META_IMPL"] = "DagScheduler.ShmemMeta.ShmemExecutorMeta"
ENV["CLUSTER_META_IMPL"] = "DagScheduler.SimpleMeta.SimpleExecutorMeta"
println("Running with ShmemMeta and SimpleMeta")
println("===================================")
run(`ipcs -a`)
println("===================================")
run_test("runtests_master_only.jl")
run_test("runtests_1node.jl")
run_test("runtests_2node.jl")
println("===================================")
run(`ipcs -a`)
ENV["NODE_META_IMPL"] = "DagScheduler.ShmemMeta.ShmemExecutorMeta"
ENV["CLUSTER_META_IMPL"] = "DagScheduler.FdbMeta.FdbExecutorMeta"
println("Running with ShmemMeta and FdbMeta")
println("===================================")
run(`ipcs -a`)
println("===================================")
run_test("runtests_1node.jl")
run_test("runtests_2node.jl")
println("===================================")
run(`ipcs -a`)
ENV["NODE_META_IMPL"] = "DagScheduler.FdbMeta.FdbExecutorMeta"
ENV["CLUSTER_META_IMPL"] = "DagScheduler.FdbMeta.FdbExecutorMeta"
println("Running with FdbMeta")
println("===================================")
run_test("runtests_fdb_queue.jl")
run_test("runtests_fdb_dict.jl")
run_test("runtests_master_only.jl")
|
{"hexsha": "ffa5baed3fc0323f1fa61ddb7b2888ed2ab5ff39", "size": 1734, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "tanmaykm/DagScheduler.jl", "max_stars_repo_head_hexsha": "00859c8f12589166443a04f71a57cacc5b4ccaf1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-02-05T21:55:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T07:33:21.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "tanmaykm/DagScheduler.jl", "max_issues_repo_head_hexsha": "00859c8f12589166443a04f71a57cacc5b4ccaf1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "tanmaykm/DagScheduler.jl", "max_forks_repo_head_hexsha": "00859c8f12589166443a04f71a57cacc5b4ccaf1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:44:42.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:44:42.000Z", "avg_line_length": 37.6956521739, "max_line_length": 79, "alphanum_fraction": 0.6107266436, "num_tokens": 455}
|
! try movement
SUBROUTINE try_conf(imol,itype)
use movetype
use ints
use coupling_pres
IMPLICIT NONE
integer :: imol, itype
logical :: success
! write(*,*) "try_move:"
movetype_i_try(itype) = movetype_i_try(itype) + 1
call conf_tran(imol,success)
if(success) then
movetype_i_success(itype) = movetype_i_success(itype) + 1
call cellmap_update(imol) ! xyz and cell list update
endif
RETURN
END SUBROUTINE try_conf
! translation algorithm
SUBROUTINE conf_tran(imol,success)
use pos
use trans
use ints
IMPLICIT NONE
integer :: imol
logical :: success, overlap_q
double precision :: dlr, rand
external rand
if (typs(imol) == "A") THEN
dlr = dlr_a
else
dlr = dlr_b
endif
coord_try(1) = x(imol) + dlr*(rand()-0.50d0)
coord_try(2) = y(imol) + dlr*(rand()-0.50d0)
coord_try(3) = z(imol) + dlr*(rand()-0.50d0)
typs_try = typs(imol)
call overlap(imol,coord_try,typs_try,'x',overlap_q) ! if overlaps, stop the subroutine
if ( .not. overlap_q ) then
success = .true.
else
success = .false.
endif
return
end subroutine conf_tran
! try movement
SUBROUTINE try_pres(itype)
use omp_lib
use coupling_pres
use pos
use ints, only: nptot
use inp
use cellmap
use movetype
use sigmas
IMPLICIT NONE
double precision :: oldvol, rand, expd2
double precision :: delh, boltz, xt, yt, zt, rt
integer :: itype, i, j
logical :: success
integer :: id, start_k, end_k, kmax, k ! for openmp
logical :: overlap_q ! for openmp
external rand
! write(*,*) "try_pres:"
movetype_i_try(itype) = movetype_i_try(itype) + 1
expd2 = expd**2
! check overlaps
if (expd < 1.0) then
!! single node job
! do i=1,nptot-1
! do j=i+1, nptot
! if(typs(i) == typs(j)) cycle
! xt = x(i) - x(j)
! yt = y(i) - y(j)
! zt = z(i) - z(j)
! XT = XT - box(1)*DNINT(XT/box(1))
! YT = YT - box(2)*DNINT(YT/box(2))
! ZT = ZT - box(3)*DNINT(ZT/box(3))
! rt = (XT*XT+YT*YT+ZT*ZT)*expd2*pos_scaling2
! if( rt < sigma_ab**2 ) return
! enddo
! enddo
! endif
! openmp version
overlap_q = .false.
start_k = 0
kmax = 16 ! might be better performance if the optimized value used.
do k=1,kmax
end_k = k*(nptot-1)/kmax
!$omp parallel private (xt, yt, zt, rt, id, i, j) shared (overlap_q)
id = omp_get_thread_num() + 1
do i=start_k+id,end_k,openmp_thread_num
do j=i+1, nptot
if(typs(i) == typs(j)) cycle
xt = x(i) - x(j)
yt = y(i) - y(j)
zt = z(i) - z(j)
XT = XT - box(1)*DNINT(XT/box(1))
YT = YT - box(2)*DNINT(YT/box(2))
ZT = ZT - box(3)*DNINT(ZT/box(3))
RT = (XT*XT+YT*YT+ZT*ZT)*expd2*pos_scaling2
if( (rt < sigma_ab**2) .or. overlap_q) then
overlap_q = .true.
exit
endif
enddo
if(overlap_q) exit
enddo
!$omp end parallel
if(overlap_q) return
start_k = end_k
enddo
endif
! accepted or denied?
success = .false.
! if volume change is on 3-axis
!oldvol = box(1)*box(2)*box(3)*(pos_scaling**3)
oldvol = box(1)*box(2)*box(3)*pos_scaling3
delh = tinv*press_val*oldvol*(expd**3-1.0D0) - dble(nptot+1)*DLOG(expd)*3.0D0
if(delh <= 0.0D0) then
!write(*,*) "negative", delh
success = .true.
else
boltz = dexp(-delh)
if(rand() < boltz) then
success = .true.
endif
endif
! accepted
if(success) then
movetype_i_success(itype) = movetype_i_success(itype) + 1
!call energy_update
! position and box update
pos_scaling = pos_scaling*expd
pos_scaling2 = pos_scaling**2
pos_scaling3 = pos_scaling**3
endif
return
END SUBROUTINE try_pres
subroutine try_exch(imol,itype)
use pos
use movetype
use ints
use coupling_exch
IMPLICIT NONE
integer :: imol, itype, try_type
double precision :: m_val, delxi, rand, boltz
logical :: success, overlap_q
external rand
! write(*,*) "try_exch:"
movetype_i_try(itype) = movetype_i_try(itype) + 1
!write(*,*) "movetype_i_try exch", movetype_i_try(itype)
DO while (.true.)
try_type = INT(exch_ncomp*RAND() ) + 1
if(typs(imol) == exch_tcomp(try_type)) then
cycle
else
exit ! continue
endif
enddo
!write(*,*) "exchange =>", typs(imol), exch_tcomp(try_type)
coord_try(1) = x(imol)
coord_try(2) = y(imol)
coord_try(3) = z(imol)
CALL overlap(imol,coord_try,exch_tcomp(try_type),'x',overlap_q) ! if overlaps, stop the subroutine
if(overlap_q) then
!write(*,*) "return overlap"
return
endif
! determine m value
if ( (typs(imol) == exch_tcomp(1)) .and. (exch_tcomp(try_type) == exch_tcomp(2)) ) then
m_val = -1.0D0
else
m_val = 1.0D0
endif
!write(*,*) "m val = ", m_val, "due to", typs(imol), "=",exch_tcomp(1),"and",exch_tcomp(try_type),"=",exch_tcomp(2)
! accepted or denied?
success = .false.
delxi = m_val*log_xi
if(delxi >= 0.0D0) then
success = .true.
else
boltz = dexp(delxi)
!write(*,*) "boltz = ", boltz
if(rand() < boltz) then
success = .true.
endif
endif
!if(success) then
! write(*,*) "success", delxi, boltz
!else
! write(*,*) "fail", delxi, boltz
!endif
! update new particle
if(success) then
!write(*,*) "exchange result: ",imol," th particle ",typs(imol), " => ",exch_tcomp(try_type)
movetype_i_success(itype) = movetype_i_success(itype) + 1
!write(*,*) movetype_i_success(itype)
if (typs(imol) == 'A') then
nmol_a = nmol_a - 1
else if (typs(imol) == 'B') then
nmol_b = nmol_b - 1
else
write(*,*) "no info component"
stop
endif
typs(imol) = exch_tcomp(try_type)
if (typs(imol) == 'A') then
nmol_a = nmol_a + 1
else if (typs(imol) == 'B') then
nmol_b = nmol_b + 1
else
write(*,*) "no info component"
stop
endif
endif
RETURN
end subroutine try_exch
|
{"hexsha": "7da7175e85b00af92c3ab67f3b834d69c28d8df4", "size": 6085, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "MonteCarlo/mcrun_v2/try.f90", "max_stars_repo_name": "jht0664/Utility_python_gromacs", "max_stars_repo_head_hexsha": "4457b62e2f0252bcb38021d5deda0cfb932e3ed9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-02T11:27:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-02T11:27:59.000Z", "max_issues_repo_path": "MonteCarlo/mcrun_v2/try.f90", "max_issues_repo_name": "jht0664/Utility_python_gromacs", "max_issues_repo_head_hexsha": "4457b62e2f0252bcb38021d5deda0cfb932e3ed9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MonteCarlo/mcrun_v2/try.f90", "max_forks_repo_name": "jht0664/Utility_python_gromacs", "max_forks_repo_head_hexsha": "4457b62e2f0252bcb38021d5deda0cfb932e3ed9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1651785714, "max_line_length": 118, "alphanum_fraction": 0.5955628595, "num_tokens": 2173}
|
using Polyhedra
include("simplex.jl")
include("permutahedron.jl")
include("board.jl")
myeq(x::Real, y::Real) = myeq(promote(x, y)...)
myeq{T<:Real}(x::T, y::T) = x == y
myeq{T<:AbstractFloat}(x::T, y::T) = y < x+1024*eps(T) && x < y+1024*eps(T)
myeq{S<:Real,T<:Real}(x::Vector{S}, y::Vector{T}) = myeq(promote(x, y)...)
myeq{T<:Real}(x::Vector{T}, y::Vector{T}) = x == y
myeq{T<:AbstractFloat}(x::Vector{T}, y::Vector{T}) = myeq(norm(x - y), zero(T))
myeqzero{T<:Real}(x::T) = myeq(x, zero(T))
tomatrix(M::Matrix) = M
function tomatrix(v::Vector)
M = Matrix{eltype(v)}(length(v), 1)
M[:,1] = v
M
end
function inlinspace(x, L)
for i in 1:size(L, 1)
y = vec(L[i,:])
# remove component
x = x * dot(y, y) - y * dot(y, x)
end
myeqzero(norm(x))
end
function inequality_fulltest(p::Polyhedron, A, b, linset)
A = tomatrix(A)
detecthlinearities!(p)
removeredundantinequalities!(p)
ine = SimpleHRepresentation(getinequalities(p))
@test size(ine.A) == size(A)
@test length(ine.linset) == length(linset)
aff = SimpleHRepresentation(getinequalities(affinehull(p)))
affAb = [aff.b aff.A]
inaff(x) = inlinspace(x, affAb)
for i in 1:size(A, 1)
found = false
for j in 1:size(ine.A, 1)
# vec for julia 0.4
if !((i in linset) $ (j in ine.linset)) && inaff([b[i]-ine.b[j];vec(A[i,:]-ine.A[j,:])])
found = true
break
end
end
@test found
end
end
function generator_fulltest(p::Polyhedron, V, R=Matrix{eltype(V)}(0, size(V, 2)), Vlinset = IntSet(), Rlinset = IntSet())
V = tomatrix(V)
R = tomatrix(R)
detectvlinearities!(p)
removeredundantgenerators!(p)
ext = SimpleVRepresentation(getgenerators(p))
@test size(ext.V) == size(V)
@test size(ext.R) == size(R)
@test length(ext.Vlinset) == length(Vlinset)
@test length(ext.Rlinset) == length(Rlinset)
for i in 1:size(V, 1)
found = false
for j in 1:size(ext.V, 1)
if myeq(vec(V[i, :]), vec(ext.V[j, :]))
found = true
break
end
end
@test found
end
linspace = ext.R[collect(ext.Rlinset),:]
inlin(x) = inlinspace(vec(x), linspace)
for i in 1:size(R, 1)
found = false
for j in 1:size(ext.R, 1)
if !((i in Rlinset) $ (j in ext.Rlinset)) && inlin(R[i,:]-ext.R[j,:])
#if parallel(vec(R[i, :]), vec(ext.R[j, :]), (i in Rlinset) || (j in ext.Rlinset))
found = true
break
end
end
@test found
end
end
#generator_fulltest(p::Polyhedron, V) = generator_fulltest(p, V, Matrix{eltype(V)}(0, size(V, 2)))
function alltests{Lib<:PolyhedraLibrary}(lib::Lib)
simplextest(lib)
permutahedrontest(lib)
boardtest(lib)
end
|
{"hexsha": "7ae7ba711d2ceb53a085cbfca2efd852ec8a4fff", "size": 2656, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/alltests.jl", "max_stars_repo_name": "JuliaPackageMirrors/Polyhedra.jl", "max_stars_repo_head_hexsha": "a4489180581383b750b1af4e043650f66fa61e76", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/alltests.jl", "max_issues_repo_name": "JuliaPackageMirrors/Polyhedra.jl", "max_issues_repo_head_hexsha": "a4489180581383b750b1af4e043650f66fa61e76", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/alltests.jl", "max_forks_repo_name": "JuliaPackageMirrors/Polyhedra.jl", "max_forks_repo_head_hexsha": "a4489180581383b750b1af4e043650f66fa61e76", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.6666666667, "max_line_length": 121, "alphanum_fraction": 0.6054216867, "num_tokens": 966}
|
"""
Make a histogram of the masses.
"""
import matplotlib.pyplot as plt
plt.rc("text", usetex=True)
import numpy as np
h=0.7
def get_mass_array():
zs = np.loadtxt("data/z.txt")
lMs = []
for i in range(len(zs)):
lMs.append(np.loadtxt("results/bestfits/bf_cluster%d.txt"%i)[0])
return np.array(lMs).flatten()
if __name__ == "__main__":
#lMs = get_mass_array()
#np.savetxt("results/l10masses.txt", lMs)
lMs = np.loadtxt("results/l10masses.txt")
lMs -= np.log10(h) #Msun from Msun/h
plt.hist(lMs, 50, facecolor="gray", alpha=0.7)
plt.xlabel(r"$\log_{10}M\ [{\rm M_\odot}]$")
plt.ylabel(r"${\rm Number}$")
plt.show()
|
{"hexsha": "1e941fd1611f3d04c9603abcdb041deab1a98987", "size": 670, "ext": "py", "lang": "Python", "max_stars_repo_path": "histM.py", "max_stars_repo_name": "tmcclintock/PLANCK_DES_Clusters", "max_stars_repo_head_hexsha": "c9864577d1baec753199327a5a8312576a2c33d4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "histM.py", "max_issues_repo_name": "tmcclintock/PLANCK_DES_Clusters", "max_issues_repo_head_hexsha": "c9864577d1baec753199327a5a8312576a2c33d4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "histM.py", "max_forks_repo_name": "tmcclintock/PLANCK_DES_Clusters", "max_forks_repo_head_hexsha": "c9864577d1baec753199327a5a8312576a2c33d4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8, "max_line_length": 72, "alphanum_fraction": 0.6253731343, "include": true, "reason": "import numpy", "num_tokens": 213}
|
import json
import numpy as np
import pandas as pd
from keras.models import model_from_json
import matplotlib.pyplot as plt
from ADFA_DDQN import huber_loss
from network_classification import NetworkClassificationEnv
import itertools
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
if __name__ == "__main__":
formated_test_path = "../../datasets/formated/formated_test_ADFA.data"
with open("models/ADFA_DDQN.json", "r") as jfile:
model = model_from_json(json.load(jfile))
model.load_weights("models/ADFA_DDQN.h5")
model.compile(loss=huber_loss,optimizer="sgd")
attack_map = {'Normal': 'Normal',
'Generic': 'Generic',
'Exploits': 'Exploits',
'Fuzzers':'Fuzzers',
'DoS':'DoS',
'Reconnaissance':'Reconnaissance',
'Analysis':'Analysis',
'Backdoor':'Backdoor',
'Shellcode':'Shellcode',
'Worms':'Worms'
}
env = NetworkClassificationEnv('test',attack_map,formated_test_path = formated_test_path)
total_reward = 0
true_labels = np.zeros(len(env.attack_types),dtype=int)
estimated_labels = np.zeros(len(env.attack_types),dtype=int)
estimated_correct_labels = np.zeros(len(env.attack_types),dtype=int)
states , labels = env.get_full()
q = model.predict(states)
actions = np.argmax(q,axis=1)
labs,true_labels = np.unique(labels,return_counts=True)
for indx,a in enumerate(actions):
estimated_labels[a] +=1
if a == labels[indx]:
total_reward += 1
estimated_correct_labels[a] += 1
Accuracy = estimated_correct_labels / true_labels
Mismatch = estimated_labels - true_labels
print('\r\nTotal reward: {} | Number of samples: {} | Accuracy = {}%'.format(total_reward,
len(states),float(100*total_reward/len(states))))
outputs_df = pd.DataFrame(index = env.attack_types,columns = ["Estimated","Correct","Total","Acuracy"])
for indx,att in enumerate(env.attack_types):
outputs_df.iloc[indx].Estimated = estimated_labels[indx]
outputs_df.iloc[indx].Correct = estimated_correct_labels[indx]
outputs_df.iloc[indx].Total = true_labels[indx]
outputs_df.iloc[indx].Acuracy = Accuracy[indx]*100
outputs_df.iloc[indx].Mismatch = abs(Mismatch[indx])
print(outputs_df)
#%%
fig, ax = plt.subplots()
width = 0.35
pos = np.arange(len(true_labels))
p1 = plt.bar(pos, estimated_correct_labels,width,color='g')
p1 = plt.bar(pos+width,
(np.abs(estimated_correct_labels-true_labels)),width,
color='r')
p2 = plt.bar(pos+width,np.abs(estimated_labels-estimated_correct_labels),width,
bottom=(np.abs(estimated_correct_labels-true_labels)),
color='b')
ax.set_xticks(pos+width/2)
ax.set_xticklabels(env.attack_types,rotation='vertical')
#ax.set_yscale('log')
#ax.set_ylim([0, 100])
ax.set_title('Test set scores, Acc = {:.2f}'.format(100*total_reward/len(states)))
plt.legend(('Correct estimated','False negative','False positive'))
plt.tight_layout()
#plt.show()
plt.savefig('results/ADFA_DDQN.svg', format='svg', dpi=1000)
#%% Agregated precision
aggregated_data_test =labels
print('Performance measures on Test data')
print('Accuracy = {:.4f}'.format(accuracy_score( aggregated_data_test,actions)))
print('F1 = {:.4f}'.format(f1_score(aggregated_data_test,actions, average='weighted')))
print('Precision_score = {:.4f}'.format(precision_score(aggregated_data_test,actions, average='weighted')))
print('recall_score = {:.4f}'.format(recall_score(aggregated_data_test,actions, average='weighted')))
cnf_matrix = confusion_matrix(aggregated_data_test,actions)
np.set_printoptions(precision=2)
plt.figure()
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=env.attack_types, normalize=True,
title='Normalized confusion matrix')
plt.savefig('results/confusion_matrix_ADFA_DDQN.svg', format='svg', dpi=1000)
|
{"hexsha": "a28c7f6ee5b1c868efb4b956ef476ca6c3de4a33", "size": 5482, "ext": "py", "lang": "Python", "max_stars_repo_path": "estimators/universal_env/ADFA_DDQN_test.py", "max_stars_repo_name": "boyuruan/Anomaly-ReactionRL", "max_stars_repo_head_hexsha": "a82da87e2da28ad333a7e19af5a0608390c3312c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 75, "max_stars_repo_stars_event_min_datetime": "2018-06-12T10:51:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T14:16:40.000Z", "max_issues_repo_path": "estimators/universal_env/ADFA_DDQN_test.py", "max_issues_repo_name": "draryan/Anomaly-ReactionRL", "max_issues_repo_head_hexsha": "590fbc89dfa761be324c35e0dcf5d08f6086df77", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2018-07-21T17:56:29.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-24T00:48:21.000Z", "max_forks_repo_path": "estimators/universal_env/ADFA_DDQN_test.py", "max_forks_repo_name": "draryan/Anomaly-ReactionRL", "max_forks_repo_head_hexsha": "590fbc89dfa761be324c35e0dcf5d08f6086df77", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 35, "max_forks_repo_forks_event_min_datetime": "2018-09-27T06:03:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T13:54:37.000Z", "avg_line_length": 35.5974025974, "max_line_length": 112, "alphanum_fraction": 0.6437431594, "include": true, "reason": "import numpy", "num_tokens": 1269}
|
#include <boost/test/unit_test.hpp>
#include "golden/include/gold.hpp"
using namespace golden;
BOOST_AUTO_TEST_CASE(my_test) {
// seven ways to detect and report the same error:
BOOST_CHECK(add(2, 2) == 4); // #1 continues on error
BOOST_REQUIRE(add(2, 2) == 4); // #2 throws on error
if (add(2, 2) != 4)
BOOST_ERROR("Ouch..."); // #3 continues on error
if (add(2, 2) != 4)
BOOST_FAIL("Ouch..."); // #4 throws on error
if (add(2, 2) != 4)
throw "Ouch..."; // #5 throws on error
BOOST_CHECK_MESSAGE(add(2, 2) == 4, // #6 continues on error
"add(..) result: " << add(2, 2));
BOOST_CHECK_EQUAL(add(2, 2), 4); // #7 continues on error
}
|
{"hexsha": "7ab15ac3be27256a834f1ce161bad435d8a4c348", "size": 691, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/unit/test_golden.cpp", "max_stars_repo_name": "nokx5/golden-cpp", "max_stars_repo_head_hexsha": "1eb5e05d35b315aeeeabf49b0795c9859707ae0a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/unit/test_golden.cpp", "max_issues_repo_name": "nokx5/golden-cpp", "max_issues_repo_head_hexsha": "1eb5e05d35b315aeeeabf49b0795c9859707ae0a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-04-24T21:24:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T22:03:51.000Z", "max_forks_repo_path": "tests/unit/test_golden.cpp", "max_forks_repo_name": "nokx5/golden_cpp", "max_forks_repo_head_hexsha": "1eb5e05d35b315aeeeabf49b0795c9859707ae0a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5769230769, "max_line_length": 62, "alphanum_fraction": 0.5933429812, "num_tokens": 228}
|
"""
Created on Dec,27,2020
@author: junyun,Pan
Aim:反向拟合物性参数。
"""
######################################################################
#input
import sys
import os
mupif_dir=os.path.abspath(os.path.join(os.getcwd(), "../"))
sys.path.append(mupif_dir)
import mupif
import numpy as np
from scipy import stats
from bayes_opt import BayesianOptimization
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import logging
log = logging.getLogger()
#get
######################################################################
#Step 1: input 50组不同参数对应的输入文件 distributed compute
#run micress
#get 50张图片,不同的界面能对应的晶粒分布
######################################################################
#Step 2: input 50张图片
#imagepy
#get 尺寸分布 ->psd.csv
######################################################################
#Step 3: input 尺寸分布
df=pd.read_csv('psd.csv').values
#get 核密度估计
######################################################################
#Step 4: input 核密度估计(实验&&模拟)
def kde_error(label,yhat):
yhat = np.array(yhat)
label = np.array(label)
error_sum = ((yhat - label)**2).sum()
return kde_error_sum
c=[]
d=[]
for i in range(6):
c.append(sns.distplot(df[1:,i]).get_lines()[i].get_data()[1])
for j in range(1,6):
d.append(kde_error(c[0],c[j]))
#get KL(EXPMS||PRDMS)
######################################################################
#Step 5: input KL(EXPMS||PRDMS)
#GPR+Baysian
def black_box_function(x, y):
"""Function with unknown internals we wish to maximize.
This is just serving as an example, for all intents and
purposes think of the internals of this function, i.e.: the process
which generates its output values, as unknown.
"""
return -x ** 2 - (y - 1) ** 2 + 1
pbounds = {'x': (2, 4), 'y': (-3, 3)}
optimizer = BayesianOptimization(
f=black_box_function,
pbounds=pbounds,
random_state=1,
)
optimizer.maximize(
init_points=2,
n_iter=3,
)
#get next interfacial energy
######################################################################
#Step 6: input interfacial energy
#Step 1--->Step 2--->Step 3--->Step 4--->Step 5 until KL<某个值
micressJobs = cConf.micressJobs
for i in range(micressJobs):
# initialize working directory for MICRESS
# i.e. make directory if necessary,
# copy input files
jobsWorkdir = cConf.localWorkdir + "/" + str(jobs)
print ("Creating working directory ... (or reuse existing)")
print (jobsWorkdir)
if ( not os.path.exists(jobsWorkdir) ):
#raw_input('Press <ENTER> to confirm. Break with <CTRL-C>.')
os.mkdir(jobsWorkdir)
for f in cConf.micressInputFiles:
filename = cConf.micressPrefix + '/' + f
dest = jobsWorkdir + "/" + f
try:
copyfile(filename,dest)
except:
print ("Error: file copy failed")
print (f + ' -> ' + dest )
sys.exit()
# get an MICRESS interface object
try:
mic.append(micress.micress(workdir=jobsWorkdir, file='input.in'))
except Exception as e:
log.error('jobsWorkdir=%s' % (jobsWorkdir) )
log.exception(e)
## set the properties for micro simulation
mic[interface].setProperty(propLocation)
mic[interface].setProperty(propT)
mic[interface].setProperty(propzG)
## make a MICRESS step
mic[interface].solveStep(istep)
#get
######################################################################
#Step 7: input
#get
######################################################################
|
{"hexsha": "0dbd9be98a753425e48e7eb1b925cee0390e34e0", "size": 3512, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "PjyCMS1Beike/USTB_MGE_ICME_group414", "max_stars_repo_head_hexsha": "e16819eb71bfda5580e4e0147447017a1c22ae19", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-28T12:29:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-28T12:29:42.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "PjyCMS1Beike/USTB_MGE_ICME_group414", "max_issues_repo_head_hexsha": "e16819eb71bfda5580e4e0147447017a1c22ae19", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "PjyCMS1Beike/USTB_MGE_ICME_group414", "max_forks_repo_head_hexsha": "e16819eb71bfda5580e4e0147447017a1c22ae19", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8235294118, "max_line_length": 72, "alphanum_fraction": 0.5444191344, "include": true, "reason": "import numpy,from scipy", "num_tokens": 925}
|
[STATEMENT]
lemma addO_assoc [simp]:
"addO n (addO m p) = addO (addO n m) p"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. addO n (addO m p) = addO (addO n m) p
[PROOF STEP]
by (induct p) auto
|
{"llama_tokens": 95, "file": "Goodstein_Lambda_Goodstein_Lambda", "length": 1}
|
from scipy.special import gammainc
from math import log
L = 0
R = 10**9
n = 10 ** 7
T = 0.75
for i in range(1000):
M = (L + R) * 0.5
v = gammainc(n, M)
if v < T:
L = M
else:
R = M
print(L / log(10.0))
|
{"hexsha": "3a11dcf895624760371c2c41c5e4ad1bee962528", "size": 253, "ext": "py", "lang": "Python", "max_stars_repo_path": "600-700/697.py", "max_stars_repo_name": "Thomaw/Project-Euler", "max_stars_repo_head_hexsha": "bcad5d8a1fd3ebaa06fa52d92d286607e9372a8d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "600-700/697.py", "max_issues_repo_name": "Thomaw/Project-Euler", "max_issues_repo_head_hexsha": "bcad5d8a1fd3ebaa06fa52d92d286607e9372a8d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "600-700/697.py", "max_forks_repo_name": "Thomaw/Project-Euler", "max_forks_repo_head_hexsha": "bcad5d8a1fd3ebaa06fa52d92d286607e9372a8d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.0555555556, "max_line_length": 35, "alphanum_fraction": 0.4584980237, "include": true, "reason": "from scipy", "num_tokens": 101}
|
from __future__ import division, print_function
from bm_tools import OnlineLogsumexp, sigmoid, log1pexp, logsumexp
import numpy
from scipy import linalg
TRAIN = "/home/mark/Projects/succotash/succotash/datasets/train_examples.npy"
TEST = "/home/mark/Projects/succotash/succotash/datasets/test_examples.npy"
X = numpy.load(TRAIN)
n_components = 24
n_features = X.shape[1]
model_weights = numpy.ones(n_components)/n_components
model_covars = numpy.zeros((n_components,n_features, n_features))
second_moment_stats = numpy.zeros((n_components, n_features, n_features))
weights = numpy.zeros(model_weights.shape)
def score_samples(X,model_weights,model_covars,use_scipy_misc=False):
n_samples, n_dim = X.shape
nmix = len(model_covars)
lpr = numpy.empty((n_samples, nmix))
for c, cv in enumerate(model_covars):
cv_chol = linalg.cholesky(cv, lower=True)
cv_log_det = 2 * numpy.sum(numpy.log(numpy.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, X.T, lower=True).T
lpr[:, c] = - .5 * (numpy.sum(cv_sol ** 2, axis=1) +
n_dim * numpy.log(2 * numpy.pi) + cv_log_det)
if numpy.any(numpy.isnan(lpr)):
import pdb; pdb.set_trace()
lpr += numpy.log(model_weights)
logprob = logsumexp(lpr)
responsibilities = numpy.exp(lpr - logprob[:,numpy.newaxis])
return logprob, responsibilities
for i in xrange(n_components):
# model_means[i] = numpy.mean(X[i::n_components],0)
model_covars[i] = numpy.cov(X[i::n_components].T)
second_moment_stats[:] = cv
minibatch_size = 300
alpha = 0.05
n_batches = X.shape[0]/minibatch_size
current_log_likelihood=None
for i in xrange(2000):
prev_log_likelihood = current_log_likelihood
batch_idx = i % n_batches
if batch_idx == n_batches - 1:
batch_end = X.shape[0]
else:
batch_end = (batch_idx+1)*minibatch_size
cur_minibatch_size = batch_end - batch_idx*minibatch_size
X_batch = X[batch_idx*minibatch_size:batch_end]
lls, responsibilities = score_samples(
X_batch, model_weights,model_covars)
current_log_likelihood = lls.mean()
if prev_log_likelihood is not None:
change = abs((current_log_likelihood - prev_log_likelihood)/prev_log_likelihood)
if change < .00001:
pass #break
weights_tmp = responsibilities.sum(0)
if i == 0:
weights[:] = weights_tmp
else:
weights += alpha * ( weights_tmp - weights)
inverse_weights = 1.0/(weights_tmp[:,numpy.newaxis] + 1e-7)
model_weights = weights/(weights.sum() + 1e-5) + 1e-6
model_weights /= model_weights.sum()
# model_means[:] = first_moment_stats
cv = numpy.empty((n_components, n_features, n_features))
for c in range(n_components):
post = responsibilities[:, c]
# Underflow Errors in doing post * X.T are not important
numpy.seterr(under='ignore')
cv[c] = numpy.dot(post * X_batch.T, X_batch) * (inverse_weights[c])
print(c, numpy.linalg.slogdet(cv[c]))
second_moment_stats[:] += alpha * (cv - second_moment_stats)
model_covars = second_moment_stats
print(current_log_likelihood,i)
X_test = numpy.load(TEST)
lls, responsibilities = score_samples(X_test,model_weights,model_covars)
print(n_components, lls.mean())
# n_c = 25; lls =
# n_c = 50; lls =
# n_c = 100; lls = -1764.16076962
# n_c = 200; lls = -1761.26919623
# n_c = 300; lls = -1761.63032148
from sklearn.mixture import gmm
smodel = gmm.GMM(n_components=24, covariance_type='full', n_iter=200)
smodel.fit(X)
smodel.score(X_test).mean()
# n_c = 24; lls = -1686.6809502524477
|
{"hexsha": "536b5d93b0662a5f3cdaf88d61f3c7970e94454c", "size": 3634, "ext": "py", "lang": "Python", "max_stars_repo_path": "structured_gaussian_mixtures/online_full_GMM_no_mean.py", "max_stars_repo_name": "markstoehr/structured_gaussian_mixtures", "max_stars_repo_head_hexsha": "f0c30770c8a851da7a7218b0b040b4f386f2bc5b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "structured_gaussian_mixtures/online_full_GMM_no_mean.py", "max_issues_repo_name": "markstoehr/structured_gaussian_mixtures", "max_issues_repo_head_hexsha": "f0c30770c8a851da7a7218b0b040b4f386f2bc5b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "structured_gaussian_mixtures/online_full_GMM_no_mean.py", "max_forks_repo_name": "markstoehr/structured_gaussian_mixtures", "max_forks_repo_head_hexsha": "f0c30770c8a851da7a7218b0b040b4f386f2bc5b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7070707071, "max_line_length": 88, "alphanum_fraction": 0.6951018162, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1009}
|
#=
polylagrange:
- Julia version:
- Author: ymocquar
- Date: 2019-11-25
=#
include("polyexp.jl")
function getpolylagrange(k::Int64, j::Int64, N::DataType)
@assert k <= j "_getpolylagrange(k=$k,j=$j) k must be less or equal to j"
@assert N <: Signed "the type $N must be an Integer"
result = Polynomial([one(Complex{Rational{N}})])
for l = 0:j
if l != k
result *= Polynomial([l // 1, 1 // 1]) / (l - k)
end
end
return result
end
function interpolate(tab, order, value, N::DataType)
T = (N == BigInt) ? BigFloat : Float64
res = zeros(Complex{T}, size(tab[1]))
for i = 0:order
res .+= getpolylagrange(i, order, N)(-value) * tab[i+1]
end
return res
end
function getpolylagrange(t::Vector{T}, k, j) where {T<:Number}
result = Polynomial([one(T)])
for l = 0:j
if l != k
result *= Polynomial([-t[l+1], one(T)]) / (t[k+1] - t[l+1])
end
end
return result
end
function interpolate(tab_time::Vector{T}, tab, order, value) where {T<:Number}
res = zeros(Complex{T}, size(tab[1]))
for i = 0:order
res .+= getpolylagrange(tab_time, i, order)(value) * tab[i+1]
end
return res
end
interpolate(tab, order, value) = interpolate(tab, order, value, BigInt)
struct CoefExpABRational
tab_coef::Any
function CoefExpABRational(
order::Int64,
epsilon::AbstractFloat,
list_tau,
dt::AbstractFloat,
)
n_tau = size(list_tau, 1)
T = typeof(epsilon)
tab_coef = zeros(Complex{T}, n_tau, order + 1, order + 1)
N = T == BigFloat ? BigInt : Int64
epsilon = rationalize(N, epsilon, tol = epsilon * 10 * Base.eps(T))
dt = rationalize(N, dt, tol = dt * 10 * Base.eps(T))
list_tau = rationalize.(N, list_tau)
pol_x = Polynomial([0 // 1, 1 // dt])
for j = 0:order
for k = 0:j
res = view(tab_coef, :, k + 1, j + 1)
pol = getpolylagrange(k, j, N)
pol2 = pol(pol_x)
for ind = 1:n_tau
ell = list_tau[ind]
pol3 = undef
pol_int = if ell == 0
# in this case the exponentiel value is always 1
Polynomials.integrate(pol2)
else
pol3 = PolyExp(pol2, im * ell / epsilon, -im * ell * dt / epsilon)
Polynomials.integrate(pol3)
end
res[ind] = pol_int(dt) - pol_int(0)
end
end
end
return new(tab_coef)
end
end
struct CoefExpAB
tab_coef::Any
tab_coef_neg::Any
function CoefExpAB(order::Int64, epsilon::AbstractFloat, n_tau, dt)
T = typeof(epsilon)
N = T == BigFloat ? BigInt : Int64
new_prec = precision(BigFloat)
new_prec += T == BigFloat ? order * 16 : 0
setprecision(BigFloat, new_prec) do
list_tau = [collect(0:n_tau/2-1); collect(-n_tau/2:-1)]
T2 = BigFloat
epsilon = T2(epsilon)
dt = T2(dt)
tab_coef = zeros(Complex{T2}, n_tau, order + 1, order + 1)
pol_x = Polynomial([0, 1 / dt])
for j = 0:order
for k = 0:j
res = view(tab_coef, :, k + 1, j + 1)
pol = getpolylagrange(k, j, N)
pol2 = pol(pol_x)
for ind = 1:n_tau
ell = list_tau[ind]
pol_int = if ell == 0
# in this case the exponentiel value is always 1
Polynomials.integrate(pol2)
else
Polynomials.integrate(
PolyExp(pol2, im * ell / epsilon, -im * ell * dt / epsilon),
)
end
res[ind] = pol_int(dt) - pol_int(0)
end
end
end # end of for j=....
end # end of setprecision(...)
# conversion to the new precision
tab_coef = T.(real(tab_coef)) + im * T.(imag(tab_coef))
tab_coef_neg = -conj(tab_coef)
return new(tab_coef, tab_coef_neg)
end
end
|
{"hexsha": "b3966e5473d6d73ac49f6874b54286ad1b18d961", "size": 4333, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/coefexp_ab.jl", "max_stars_repo_name": "vissarion/HOODESolver.jl", "max_stars_repo_head_hexsha": "cabc5b036c94f23a05a338c6dfc86c45982a8e24", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-12-14T08:35:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T19:53:27.000Z", "max_issues_repo_path": "src/coefexp_ab.jl", "max_issues_repo_name": "vissarion/HOODESolver.jl", "max_issues_repo_head_hexsha": "cabc5b036c94f23a05a338c6dfc86c45982a8e24", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2020-12-20T10:57:39.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-03T07:36:44.000Z", "max_forks_repo_path": "src/coefexp_ab.jl", "max_forks_repo_name": "vissarion/HOODESolver.jl", "max_forks_repo_head_hexsha": "cabc5b036c94f23a05a338c6dfc86c45982a8e24", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-01-15T15:29:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-14T13:35:09.000Z", "avg_line_length": 35.2276422764, "max_line_length": 92, "alphanum_fraction": 0.4952688668, "num_tokens": 1188}
|
module AES
using StaticArrays, Random
abstract type AbstractSymmetricKey end
abstract type AbstractCipher end
abstract type AbstractCipherCache end
abstract type AbstractAESKey <: AbstractSymmetricKey end
abstract type AbstractAESCache <: AbstractCipherCache end
include("constants.jl")
include("types.jl")
include("block_encryption.jl")
include("block_decryption.jl")
include("modes/cbc.jl")
include("modes/ctr.jl")
include("modes/ecb.jl")
include("encrypt.jl")
include("decrypt.jl")
export AESCipher
export AES128Key, AES192Key, AES256Key
export AESCache
export encrypt, decrypt
end # module
|
{"hexsha": "1395eff31c2460f02ef2ffc23ee7d96c9e1b8fa5", "size": 619, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/AES.jl", "max_stars_repo_name": "Seelengrab/AES.jl", "max_stars_repo_head_hexsha": "7af7764bb7918b91d2c495d238003649e0cc7ca3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-05-28T09:23:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T23:20:25.000Z", "max_issues_repo_path": "src/AES.jl", "max_issues_repo_name": "Seelengrab/AES.jl", "max_issues_repo_head_hexsha": "7af7764bb7918b91d2c495d238003649e0cc7ca3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-06-24T19:00:05.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-18T12:30:27.000Z", "max_forks_repo_path": "src/AES.jl", "max_forks_repo_name": "kanav99/Rijndael.jl", "max_forks_repo_head_hexsha": "6c936bc6b56ed568c445e7f56ebea30d3b51cf73", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-06-24T18:16:12.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-18T09:49:14.000Z", "avg_line_length": 22.1071428571, "max_line_length": 58, "alphanum_fraction": 0.7835218094, "num_tokens": 152}
|
# Utils.jl
# [[file:~/Documents/Julia/scrap.org::*Utils.jl][Utils.jl:1]]
walk(x, inner, outer) = outer(x)
walk(x::T, inner, outer) where {T<:AbstractSymExpr} = outer(T(inner(x.op), map(inner, x.args)))
walk(x::Expr, inner, outer) = outer(Expr(x.head, map(inner, x.args)...))
postwalk(f, x) = walk(x, x -> postwalk(f, x), f)
removeiden(ex::AbstractSymExpr) = (ex.op == Sym(:identity)) && (length(ex.args) == 1) ? ex.args[1] : ex
removeiden(x) = x
stripiden(x) = x
stripiden(x::AbstractSymExpr) = postwalk(removeiden, x)
replace_sym(x, p::Pair{AbstractSym, Number}) = x
function replace_sym(ex::Symbolic, p::Pair)
postwalk(sym -> sym == p.first ? p.second : sym, ex)
end
(ex::SymExpr)(p::Pair) = replace_sym(ex, p)
(ex::Sym)(p::Pair) = replace_sym(ex, p)
Base.promote(::Type{Sym}) = SymExpr
Base.promote(::Type{SymExpr}) = SymExpr
Base.promote(x::T, y::Number) where {T<:Symbolic} = (promote(T)(:identity, [x]), promote(T)(:identity, [y]))
Base.promote(x::Number, y::T) where {T<:Symbolic} = (promote(T)(:identity, [x]), promote(T)(:identity, [y]))
Base.promote(x::Sym, y::SymExpr) = (SymExpr(:identity, [x]), y)
Base.promote(x::SymExpr, y::Sym) = (x, SymExpr(:identity, [y]))
promote_SymForm(x::Number, y::Union{Sym,SymExpr}) = SymExpr
promote_SymForm(x::Union{Sym,SymExpr}, y::Number) = SymExpr
promote_SymForm(x::Union{Sym,SymExpr}, y::Union{Sym,SymExpr}) = SymExpr
# Utils.jl:1 ends here
|
{"hexsha": "e2a4740961efa2d12ba40b9da9d0417988837244", "size": 1402, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Utils.jl", "max_stars_repo_name": "jagot/Symbolics.jl", "max_stars_repo_head_hexsha": "b8994e3d79803daa3a57012fe9ccbe0b01c346d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 102, "max_stars_repo_stars_event_min_datetime": "2018-01-20T22:35:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-06T07:29:23.000Z", "max_issues_repo_path": "src/Utils.jl", "max_issues_repo_name": "jagot/Symbolics.jl", "max_issues_repo_head_hexsha": "b8994e3d79803daa3a57012fe9ccbe0b01c346d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2018-10-02T06:44:45.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-13T02:04:30.000Z", "max_forks_repo_path": "src/Utils.jl", "max_forks_repo_name": "jagot/Symbolics.jl", "max_forks_repo_head_hexsha": "b8994e3d79803daa3a57012fe9ccbe0b01c346d7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2018-05-02T19:10:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T09:36:14.000Z", "avg_line_length": 40.0571428571, "max_line_length": 108, "alphanum_fraction": 0.6583452211, "num_tokens": 490}
|
from __future__ import print_function
import os
import numpy as np
from tractor.ellipses import EllipseESoft
from tractor.utils import _GaussianPriors
def log_info(logger, args):
msg = ' '.join(map(str, args))
logger.info(msg)
def log_debug(logger, args):
import logging
if logger.isEnabledFor(logging.DEBUG):
msg = ' '.join(map(str, args))
logger.debug(msg)
class EllipseWithPriors(EllipseESoft):
'''An ellipse (used to represent galaxy shapes) with Gaussian priors
over softened ellipticity parameters. This class is used during
fitting.
We ALSO place a prior on log-radius, forcing it to be < +5 (in
log-arcsec); though this gets dynamically adjusted in the oneblob.py code.
To use this class, subclass it and set the 'ellipticityStd' class
member.
'''
ellipticityStd = 0.
ellipsePriors = None
# EllipseESoft extends EllipseE extends ParamList, has
# GaussianPriorsMixin. GaussianPriorsMixin sets a "gpriors"
# member variable to a _GaussianPriors
def __init__(self, *args, **kwargs):
super(EllipseWithPriors, self).__init__(*args, **kwargs)
if self.ellipsePriors is None:
ellipsePriors = _GaussianPriors(None)
ellipsePriors.add('ee1', 0., self.ellipticityStd,
param=EllipseESoft(1.,0.,0.))
ellipsePriors.add('ee2', 0., self.ellipticityStd,
param=EllipseESoft(1.,0.,0.))
self.__class__.ellipsePriors = ellipsePriors
self.gpriors = self.ellipsePriors
# MAGIC -- 30" default max r_e!
# SEE ALSO survey.py : class(LogRadius)!
self.uppers[0] = np.log(30.)
def setMaxLogRadius(self, rmax):
self.uppers[0] = rmax
def getMaxLogRadius(self):
return self.uppers[0]
@classmethod
def fromRAbPhi(cls, r, ba, phi):
logr, ee1, ee2 = EllipseESoft.rAbPhiToESoft(r, ba, phi)
return cls(logr, ee1, ee2)
def isLegal(self):
return self.logre <= self.uppers[0]
@classmethod
def getName(cls):
return "EllipseWithPriors(%g)" % cls.ellipticityStd
class RunbrickError(RuntimeError):
pass
class NothingToDoError(RunbrickError):
pass
class iterwrapper(object):
def __init__(self, y, n):
self.n = n
self.y = y
def __str__(self):
return 'iterwrapper: n=%i; ' % self.n + str(self.y)
def __iter__(self):
return self
def next(self):
try:
return self.y.next()
except StopIteration:
raise
except:
import traceback
print(str(self), 'next()')
traceback.print_exc()
raise
# py3
def __next__(self):
try:
return self.y.__next__()
except StopIteration:
raise
except:
import traceback
print(str(self), '__next__()')
traceback.print_exc()
raise
def __len__(self):
return self.n
def _ring_unique(wcs, W, H, i, unique, ra1,ra2,dec1,dec2):
lo, hix, hiy = i, W-i-1, H-i-1
# one slice per side; we double-count the last pix of each side.
sidex = slice(lo,hix+1)
sidey = slice(lo,hiy+1)
top = (lo, sidex)
bot = (hiy, sidex)
left = (sidey, lo)
right = (sidey, hix)
xx = np.arange(W)
yy = np.arange(H)
nu,ntot = 0,0
for slc in [top, bot, left, right]:
#print('xx,yy', xx[slc], yy[slc])
(yslc,xslc) = slc
rr,dd = wcs.pixelxy2radec(xx[xslc]+1, yy[yslc]+1)
U = (rr >= ra1 ) * (rr < ra2 ) * (dd >= dec1) * (dd < dec2)
#print('Pixel', i, ':', np.sum(U), 'of', len(U), 'pixels are unique')
unique[slc] = U
nu += np.sum(U)
ntot += len(U)
#if allin:
# print('Scanned to pixel', i)
# break
return nu,ntot
def find_unique_pixels(wcs, W, H, unique, ra1,ra2,dec1,dec2):
if unique is None:
unique = np.ones((H,W), bool)
# scan the outer annulus of pixels, and shrink in until all pixels
# are unique.
step = 10
for i in range(0, W//2, step):
nu,ntot = _ring_unique(wcs, W, H, i, unique, ra1,ra2,dec1,dec2)
#print('Pixel', i, ': nu/ntot', nu, ntot)
if nu > 0:
i -= step
break
unique[:i,:] = False
unique[H-1-i:,:] = False
unique[:,:i] = False
unique[:,W-1-i:] = False
for j in range(max(i+1, 0), W//2):
nu,ntot = _ring_unique(wcs, W, H, j, unique, ra1,ra2,dec1,dec2)
#print('Pixel', j, ': nu/ntot', nu, ntot)
if nu == ntot:
break
return unique
def read_primary_header(fn):
'''
Reads the FITS primary header (HDU 0) from the given filename.
This is just a faster version of fitsio.read_header(fn).
'''
import fitsio
if fn.endswith('.gz'):
return fitsio.read_header(fn)
# Weirdly, this can be MUCH faster than letting fitsio do it...
hdr = fitsio.FITSHDR()
foundEnd = False
ff = open(fn, 'rb')
h = b''
while True:
hnew = ff.read(32768)
if len(hnew) == 0:
# EOF
ff.close()
raise RuntimeError('Reached end-of-file in "%s" before finding end of FITS header.' % fn)
h = h + hnew
while True:
line = h[:80]
h = h[80:]
#print('Header line "%s"' % line)
# HACK -- fitsio apparently can't handle CONTINUE.
# It also has issues with slightly malformed cards, like
# KEYWORD = / no value
if line[:8] != b'CONTINUE':
try:
hdr.add_record(line.decode())
except OSError as err:
print('Warning: failed to parse FITS header line: ' +
('"%s"; error "%s"; skipped' % (line.strip(), str(err))))
if line == (b'END' + b' '*77):
foundEnd = True
break
if len(h) < 80:
break
if foundEnd:
break
ff.close()
return hdr
def run_ps_thread(parent_pid, parent_ppid, fn, shutdown, event_queue):
from astrometry.util.run_command import run_command
from astrometry.util.fits import fits_table, merge_tables
import time
import re
import fitsio
from functools import reduce
# my pid = parent pid -- this is a thread.
print('run_ps_thread starting: parent PID', parent_pid, ', my PID', os.getpid(), fn)
TT = []
step = 0
events = []
trex = re.compile('(((?P<days>\d*)-)?(?P<hours>\d*):)?(?P<minutes>\d*):(?P<seconds>[\d\.]*)')
def parse_time_strings(ss):
etime = []
any_failed = None
for s in ss:
m = trex.match(s)
if m is None:
any_failed = s
break
days,hours,mins,secs = m.group('days', 'hours', 'minutes',
'seconds')
#print('Elapsed time', s, 'parsed to', days,hours,mins,secs)
days = int(days, 10) if days is not None else 0
hours = int(hours, 10) if hours is not None else 0
mins = int(mins, 10)
if secs.startswith('0'):
secs = secs[1:]
secs = float(secs)
tt = days * 24 * 3600 + hours * 3600 + mins * 60 + secs
#print('->', tt, 'seconds')
etime.append(tt)
return any_failed, etime
def write_results(fn, T, events, hdr):
T.mine = np.logical_or(T.pid == parent_pid, T.ppid == parent_pid)
T.main = (T.pid == parent_pid)
tmpfn = os.path.join(os.path.dirname(fn), 'tmp-' + os.path.basename(fn))
T.writeto(tmpfn, header=hdr)
if len(events):
E = fits_table()
E.unixtime = np.array([e[0] for e in events])
E.event = np.array([e[1] for e in events])
E.step = np.array([e[2] for e in events])
E.writeto(tmpfn, append=True)
os.rename(tmpfn, fn)
print('Wrote', fn)
fitshdr = fitsio.FITSHDR()
fitshdr['PPID'] = parent_pid
last_time = {}
last_proc_time = {}
clock_ticks = os.sysconf('SC_CLK_TCK')
#print('Clock times:', clock_ticks)
if clock_ticks == -1:
#print('Failed to get clock times per second; assuming 100')
clock_ticks = 100
while True:
shutdown.wait(5.0)
if shutdown.is_set():
print('ps shutdown flag set. Quitting.')
break
if event_queue is not None:
while True:
try:
(t,msg) = event_queue.popleft()
events.append((t,msg,step))
#print('Popped event', t,msg)
except IndexError:
# no events
break
step += 1
#cmd = ('ps ax -o "user pcpu pmem state cputime etime pgid pid ppid ' +
# 'psr rss session vsize args"')
# OSX-compatible
cmd = ('ps ax -o "user pcpu pmem state cputime etime pgid pid ppid ' +
'rss vsize wchan command"')
#print('Command:', cmd)
rtn,out,err = run_command(cmd)
if rtn:
print('FAILED to run ps:', rtn, out, err)
time.sleep(1)
break
# print('Got PS output')
# print(out)
# print('Err')
# print(err)
if len(err):
print('Error string from ps:', err)
lines = out.split('\n')
hdr = lines.pop(0)
cols = hdr.split()
cols = [c.replace('%','P') for c in cols]
cols = [c.lower() for c in cols]
#print('Columns:', cols)
vals = [[] for c in cols]
# maximum length for 'command', command-line args field
maxlen = 128
for line in lines:
words = line.split()
# "command" column can contain spaces; it is last
if len(words) == 0:
continue
words = (words[:len(cols)-1] +
[' '.join(words[len(cols)-1:])[:maxlen]])
assert(len(words) == len(cols))
for v,w in zip(vals, words):
v.append(w)
parsetypes = dict(pcpu = np.float32,
pmem = np.float32,
pgid = np.int32,
pid = np.int32,
ppid = np.int32,
rs = np.float32,
vsz = np.float32,
)
T = fits_table()
for c,v in zip(cols, vals):
# print('Col', c, 'Values:', v[:3], '...')
v = np.array(v)
tt = parsetypes.get(c, None)
if tt is not None:
v = v.astype(tt)
T.set(c, v)
any_failed,etime = parse_time_strings(T.elapsed)
if any_failed is not None:
print('Failed to parse elapsed time string:', any_failed)
else:
T.elapsed = np.array(etime)
any_failed,ctime = parse_time_strings(T.time)
if any_failed is not None:
print('Failed to parse elapsed time string:', any_failed)
else:
T.time = np.array(ctime)
T.rename('time', 'cputime')
# Compute 'instantaneous' (5-sec averaged) %cpu
# BUT this only counts whole seconds in the 'ps' output.
T.icpu = np.zeros(len(T), np.float32)
icpu = T.icpu
for i,(p,etime,ctime) in enumerate(zip(T.pid, T.elapsed, T.cputime)):
try:
elast,clast = last_time[p]
# new process with an existing PID?
if etime > elast:
icpu[i] = 100. * (ctime - clast) / (etime - elast)
except:
pass
last_time[p] = (etime, ctime)
# print('Processes:')
# J = np.argsort(-T.icpu)
# for j in J:
# p = T.pid[j]
# pp = T.ppid[j]
# print(' PID', p, '(main)' if p == parent_pid else '',
# '(worker)' if pp == parent_pid else '',
# 'pcpu', T.pcpu[j], 'pmem', T.pmem[j], 'icpu', T.icpu[j],
# T.command[j][:20])
# Apply cuts!
T.cut(reduce(np.logical_or, [
T.pcpu > 5,
T.pmem > 5,
T.icpu > 5,
T.pid == parent_pid,
(T.ppid == parent_pid) * np.array([not c.startswith('ps ax') for c in T.command])]))
#print('Cut to', len(T), 'with significant CPU/MEM use or my PPID')
# print('Kept:')
# J = np.argsort(-T.icpu)
# for j in J:
# p = T.pid[j]
# pp = T.ppid[j]
# print(' PID', p, '(main)' if p == parent_pid else '',
# '(worker)' if pp == parent_pid else '',
# 'pcpu', T.pcpu[j], 'pmem', T.pmem[j], 'icpu', T.icpu[j],
# T.command[j][:20])
if len(T) == 0:
continue
timenow = time.time()
T.unixtime = np.zeros(len(T), np.float64) + timenow
T.step = np.zeros(len(T), np.int16) + step
if os.path.exists('/proc'):
# Try to grab higher-precision CPU timing info from /proc/PID/stat
T.proc_utime = np.zeros(len(T), np.float32)
T.proc_stime = np.zeros(len(T), np.float32)
T.processor = np.zeros(len(T), np.int16)
T.proc_icpu = np.zeros(len(T), np.float32)
for i,p in enumerate(T.pid):
try:
# See:
# http://man7.org/linux/man-pages/man5/proc.5.html
procfn = '/proc/%i/stat' % p
txt = open(procfn).read()
#print('Read', procfn, ':', txt)
words = txt.split()
utime = int(words[13]) / float(clock_ticks)
stime = int(words[14]) / float(clock_ticks)
proc = int(words[38])
#print('utime', utime, 'stime', stime, 'processor', proc)
ctime = utime + stime
try:
tlast,clast = last_proc_time[p]
#print('pid', p, 'Tnow,Cnow', timenow, ctime, 'Tlast,Clast', tlast,clast)
if ctime >= clast:
T.proc_icpu[i] = 100. * (ctime - clast) / float(timenow - tlast)
except:
pass
last_proc_time[p] = (timenow, ctime)
T.proc_utime[i] = utime
T.proc_stime[i] = stime
T.processor [i] = proc
except:
pass
TT.append(T)
#print('ps -- step', step)
if (step % 12 == 0) and len(TT) > 0:
# Write out results every ~ minute.
print('ps -- writing', fn)
T = merge_tables(TT, columns='fillzero')
write_results(fn, T, events, fitshdr)
TT = [T]
# Just before returning, write out results.
if len(TT) > 0:
print('ps -- writing', fn)
T = merge_tables(TT, columns='fillzero')
write_results(fn, T, events, fitshdr)
# Memory Limits
def get_ulimit():
import resource
for name, desc in [
('RLIMIT_AS', 'VMEM'),
('RLIMIT_CORE', 'core file size'),
('RLIMIT_CPU', 'CPU time'),
('RLIMIT_FSIZE', 'file size'),
('RLIMIT_DATA', 'heap size'),
('RLIMIT_STACK', 'stack size'),
('RLIMIT_RSS', 'resident set size'),
('RLIMIT_NPROC', 'number of processes'),
('RLIMIT_NOFILE', 'number of open files'),
('RLIMIT_MEMLOCK', 'lockable memory address'),
]:
limit_num = getattr(resource, name)
soft, hard = resource.getrlimit(limit_num)
print('Maximum %-25s (%-15s) : %20s %20s' % (desc, name, soft, hard))
|
{"hexsha": "68d0693491cdbe8fed3a219b6a539eb8fcb623d4", "size": 15999, "ext": "py", "lang": "Python", "max_stars_repo_path": "py/legacypipe/utils.py", "max_stars_repo_name": "michaelJwilson/legacypipe", "max_stars_repo_head_hexsha": "47d005356cbd0c9fb864c960ee7bbf800e543cad", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "py/legacypipe/utils.py", "max_issues_repo_name": "michaelJwilson/legacypipe", "max_issues_repo_head_hexsha": "47d005356cbd0c9fb864c960ee7bbf800e543cad", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "py/legacypipe/utils.py", "max_forks_repo_name": "michaelJwilson/legacypipe", "max_forks_repo_head_hexsha": "47d005356cbd0c9fb864c960ee7bbf800e543cad", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1130063966, "max_line_length": 101, "alphanum_fraction": 0.5074067129, "include": true, "reason": "import numpy", "num_tokens": 4247}
|
import sys
import qprompt
import random
from enum import Enum
from copy import deepcopy
from itertools import groupby
import numpy as np
from scipy.ndimage import rotate
from rich.progress import (
BarColumn,
TimeRemainingColumn,
Progress,
)
class Player(Enum):
X = 1
O = 2
def initialize_board():
return [[-1, -1, -1], [-1, -1, -1], [-1, -1, -1]]
def standardPositionRenderer(position, space):
position_str = '\033[96m | \033[0m'
if position == Player.X.value:
position_str += f'\033[91m{Player.X.name}\033[0m'
elif position == Player.O.value:
position_str += f'\033[92m{Player.O.name}\033[0m'
else:
position_str += f'{space}'
return position_str
def row_separator_renderer(column_count):
separator = '\033[96m -\033[0m'
for j in range(0, column_count):
separator += '\033[96m----\033[0m'
separator += '\n'
return separator
def row_terminator_renderer():
return '\033[96m | \033[0m\n'
def value_is_neutral(value):
return value != Player.X.value and value != Player.O.value
def display_board(board, indent=0):
space = 0
for i in range(0, indent):
sys.stdout.write('\t')
for b in range(0, indent):
sys.stdout.write('\t')
sys.stdout.write(row_separator_renderer(len(board[0])))
for i in board:
for b in range(0, indent):
sys.stdout.write('\t')
for j in i:
sys.stdout.write(standardPositionRenderer(j, space))
if value_is_neutral(j):
space = space + 1
sys.stdout.write(row_terminator_renderer())
for b in range(0, indent):
sys.stdout.write('\t')
sys.stdout.write(row_separator_renderer(len(i)))
def get_available_moves(board):
available_moves = []
for i in range(0, len(board)):
for j in range(0, len(board)):
if board[i][j] == -1:
available_moves.append((i, j))
return available_moves
def create_menu(move_list):
menu = qprompt.Menu()
for i in range(0, len(move_list)):
menu.add(str(i), move_list[i])
return menu
def get_player_to_move(move_counter):
return move_counter % 2 + 1
def menu_prompt(move_list, move_counter):
menu = create_menu(move_list)
return menu.show(returns="desc", header=str.format("Turn {}, {} to move", move_counter, Player(get_player_to_move(move_counter))))
def set_board_position(board, position, value):
x, y = position
board[x][y] = value
def detect_horizontal_win_states(board):
for row in board:
if value_is_neutral(row[0]):
continue
# group row by unique values, if all the values are the same, the iterator will return one value followed by False
grouped_iterator = groupby(row)
if next(grouped_iterator, True) and not next(grouped_iterator, False):
return row[0]
return None
def transpose_board(board):
return zip(*board)
def detect_win_state(board):
orthogonal_win_state = detect_horizontal_win_states(transpose_board(board)) or detect_horizontal_win_states(board)
diagonal_win_state = detect_horizontal_win_states([np.diag(board)]) or detect_horizontal_win_states([np.diag(np.flip(board, axis=1))])
return orthogonal_win_state or diagonal_win_state
def calculate_board_fitness(board, player):
opponent = Player.X
if player == Player.X:
opponent = Player.O
if detect_win_state(board) == None:
return 0.25
elif Player(detect_win_state(board)) == player:
return 1.0
elif Player(detect_win_state(board)) == opponent:
return - 1.0
elif get_current_move(board) == 9:
return 0.5
def get_current_move(board):
move = 0
for row in board:
for cell in row:
if cell in Player._value2member_map_:
move += 1
return move
def get_current_player(board):
if get_current_move(board) % 2 == 1:
return Player.O
return Player.X
class Node():
def __init__(self, board, move=None):
self.board = board
self.move = move
self.player = Player.O
def get_player(self):
return get_current_player(self.board)
def copy(self, move):
board_copy = deepcopy(self.get_board())
set_board_position(board_copy, move, self.get_player().value)
return Node(board_copy, move)
def get_board(self):
return self.board
def has_win_state(self):
return detect_win_state(self.get_board()) != None
def get_children(self):
children = []
for move in get_available_moves(self.get_board()):
children.append(self.copy(move))
return children
def get_heuristic(self):
return calculate_board_fitness(self.board, self.player)
def get_move(self):
return self.move
def get_child_count(self):
return len(self.get_children())
def has_children(self):
return self.get_child_count() > 0
def minimax(node, depth, maximizingPlayer):
children = node.get_children()
if node.has_win_state() or depth == 0 or len(children) == 0:
return node.get_heuristic()
if maximizingPlayer:
value = -1000
for child in children:
value = max(value, minimax(child, depth - 1, False))
return value
else:
value = 1000
for child in children:
value = min(value, minimax(child, depth - 1, True))
return value
def computer_compute_move(_board):
progress = Progress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
TimeRemainingColumn()
)
root = Node(_board)
task1 = progress.add_task("[red] BEEP BOP", total=root.get_child_count())
if root.has_children():
best_candidate_child = root.get_children()[0]
best_candidate_child_score = -1
with progress:
for child in root.get_children():
progress.update(task1, advance=1, refresh=True)
current_child_score = minimax(child, 200, False)
if current_child_score > best_candidate_child_score:
best_candidate_child = child
best_candidate_child_score = current_child_score
return best_candidate_child.get_move()
def computer_random_move(board, move_list, player):
return random.choice(move_list)
_game_counter = 0
_draws = 0
_x_wins = 0
_o_wins = 0
_games_to_play = 10
while (_game_counter < _games_to_play):
_board = initialize_board()
display_board(_board)
_move_list = get_available_moves(_board)
while (_move_list and len(_move_list) > 0):
if Player(get_player_to_move(get_current_move(_board))) == Player.X:
# move = menu_prompt(_move_list, get_current_move(_board))
move = computer_random_move(_board, _move_list, Player(get_player_to_move(get_current_move(_board))))
else:
move = computer_compute_move(_board)
set_board_position(_board, move, get_player_to_move(get_current_move(_board)))
display_board(_board)
if detect_win_state(_board) != None:
print()
print(f'\tWIN STATE DETECTED FOR {Player(get_player_to_move(get_current_move(_board) - 1))}')
if Player(get_player_to_move(get_current_move(_board) - 1)) == Player.X:
_x_wins += 1
# Force early exit if the AI manages to lose. It shouldn't do worse than a draw.
# This ensures the game history is captured in the terminal.
_game_counter = _games_to_play
else:
_o_wins += 1
break
_move_list = get_available_moves(_board)
if not detect_win_state(_board):
print()
_draws += 1
print(f'\tDRAW STATE DETECTED')
print()
_gc_disp = _game_counter + 1
print(f'\t GAME {_gc_disp:08}')
print(f'\t DRAWS {_draws:08}')
print(f'\tX WINS {_x_wins:08}')
print(f'\tO WINS {_o_wins:08}')
print()
_game_counter += 1
|
{"hexsha": "2e049f26d8682cf332b16886bde11db4b2c16175", "size": 7522, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "spacekitcat/minimax-tic-tac-toe-python", "max_stars_repo_head_hexsha": "d50872137273cddc467485f1432ea28c86461c57", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "spacekitcat/minimax-tic-tac-toe-python", "max_issues_repo_head_hexsha": "d50872137273cddc467485f1432ea28c86461c57", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "spacekitcat/minimax-tic-tac-toe-python", "max_forks_repo_head_hexsha": "d50872137273cddc467485f1432ea28c86461c57", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8642857143, "max_line_length": 136, "alphanum_fraction": 0.6932996543, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1950}
|
import librosa
import math
import numpy as np
def wav_to_mfcc(wav_path, n_mfcc=13, n_fft=2048, hop_length=512):
SAMPLE_RATE = 48000
DA_FACTOR = 10 # data augmentation factor
X = []
signal, sr = librosa.load(wav_path, sr=None)
samples_per_track = len(signal)
num_samples_per_segment = 3 * sr
expected_num_mfcc_per_segment = math.ceil(num_samples_per_segment / hop_length)
num_segments = int(samples_per_track / num_samples_per_segment)
for ii in range(DA_FACTOR):
bias = int(ii * (SAMPLE_RATE / DA_FACTOR))
for s in range(num_segments):
start_sample = num_samples_per_segment * s + bias
finish_sample = min(start_sample + num_samples_per_segment, samples_per_track)
mfcc = librosa.feature.mfcc(signal[start_sample:finish_sample],
sr=sr,
n_fft=n_fft,
n_mfcc=n_mfcc,
hop_length=hop_length,
)
mfcc = mfcc.T
if len(mfcc) == expected_num_mfcc_per_segment:
X.append(mfcc.tolist())
# X is the tensor to be passed into the NN
# add new dimension, pad, cast to tf.float32
X = np.array(X)
X = X[..., np.newaxis]
max_value = max(np.max(X), -np.min(X))
X /= max_value
# when sample rate is 48k
# shape of X should be (batch_size, 282, 32, 1)
return X
|
{"hexsha": "b827b74f7bc745ab5dd12ba4eae0263c3034c70e", "size": 1509, "ext": "py", "lang": "Python", "max_stars_repo_path": "app/src/main/python/WAV_TO_MFCC.py", "max_stars_repo_name": "peisong0109/Detection", "max_stars_repo_head_hexsha": "16b50ca19a0f26818db897eb0f1c295f4bf42fe9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app/src/main/python/WAV_TO_MFCC.py", "max_issues_repo_name": "peisong0109/Detection", "max_issues_repo_head_hexsha": "16b50ca19a0f26818db897eb0f1c295f4bf42fe9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app/src/main/python/WAV_TO_MFCC.py", "max_forks_repo_name": "peisong0109/Detection", "max_forks_repo_head_hexsha": "16b50ca19a0f26818db897eb0f1c295f4bf42fe9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9285714286, "max_line_length": 90, "alphanum_fraction": 0.5765407555, "include": true, "reason": "import numpy", "num_tokens": 356}
|
import numpy as np
import sys, tempfile, subprocess
from functools import reduce
from calculate_axis import get_axis
from amino import get_atom_type_array
from Bio import AlignIO
from prody import parsePDB, LOGGER
LOGGER.verbosity = 'none'
def align_fasta(input_pdb_path, target_fasta_path):
pdb = parsePDB(input_pdb_path)
input_fasta_path = tempfile.mktemp(suffix='.fasta')
f = open(input_fasta_path, 'w')
f.write('>temp\n')
if len(pdb.select('name CA').getSequence()) < 25:
return None, None, None
else:
f.write(reduce(lambda a, b: a + b, pdb.select('name CA').getSequence()))
f.close()
needle_path = tempfile.mktemp(suffix='.needle')
cmd = ['needle', '-outfile', needle_path, '-asequence', input_fasta_path, '-bsequence', target_fasta_path,
'-gapopen', '10', '-gapextend', '0.5']
subprocess.call(cmd)
needle_result = list(AlignIO.parse(needle_path, 'emboss'))[0]
input_seq, target_seq = np.array(list(str(needle_result[0].seq))), np.array(list(str(needle_result[1].seq)))
input_seq, target_seq = input_seq[np.where(target_seq != '-')], target_seq[np.where(input_seq != '-')]
input_align_indices = np.where(target_seq != '-')[0]
target_align_indices = np.where(input_seq != '-')[0]
align_pdb = pdb.select('resindex ' + reduce(lambda a, b: str(a) + ' ' + str(b), input_align_indices))
input_mol = input_mol.select('element C or element N or element O or element S')
return align_pdb, input_align_indices, target_align_indices
def calc_occupancy_bool(atom_coord, channel, buffer, width, axis):
atom_coord = np.dot(atom_coord, np.linalg.inv(axis))
atom_coord += np.array([buffer // 2, buffer // 2, buffer // 2])
index = np.where(np.all(atom_coord >= 0, 1) * np.all(atom_coord < buffer, 1))
atom_coord = (atom_coord / width).astype(np.int)
atom_coord, channel = atom_coord[index], channel[index]
length = int(buffer / width)
occus = np.zeros([length, length, length, channel.shape[1]])
for i in range(len(atom_coord)):
h = channel[i]
occus[atom_coord[i][0]][atom_coord[i][1]][atom_coord[i][2]] = h
occus = occus.transpose([3, 0, 1, 2])
return occus
def make_voxel(input_mol, buffer, width):
atom_coord = input_mol.getCoords()
CA_list, C_list, N_list = input_mol.select('name CA').getCoords(), input_mol.select(
'name C').getCoords(), input_mol.select('name N').getCoords()
channel = get_atom_type_array(res_name=input_mol.getResnames(), atom_name=input_mol.getNames())
output = []
for ca_coord, c_coord, n_coord in zip(CA_list, C_list, N_list):
axis = get_axis(CA_coord=ca_coord, N_coord=n_coord, C_coord=c_coord)
atom = atom_coord - ca_coord
occus = calc_occupancy_bool(atom_coord=atom, channel=channel, buffer=buffer, width=width, axis=axis)
output.append(occus)
output = np.array(output, dtype=bool)
return output
def get_voxel(input_path, buffer, width):
input_mol = parsePDB(input_path)
input_mol = input_mol.select('element C or element N or element O or element S')
occus = make_voxel(input_mol=input_mol, buffer=buffer, width=width)
return occus, input_mol.select('name CA').getResnames(), input_mol.select('name CA').getResnums()
def get_voxel_fasta(input_path, target_path, buffer, width):
input_mol, _, _ = align_fasta(input_pdb_path=input_path, target_fasta_path=target_path)
if input_mol is not None:
occus = make_voxel(input_mol=input_mol, buffer=buffer, width=width)
return occus, input_mol.select('name CA').getResnames(), input_mol.select('name CA').getResnums()
|
{"hexsha": "55e28ec58869f1927f75cb1b36254849a7288063", "size": 3704, "ext": "py", "lang": "Python", "max_stars_repo_path": "source/make_voxel.py", "max_stars_repo_name": "ishidalab-titech/3DCNN_MQA", "max_stars_repo_head_hexsha": "8f68a3719065338f03eca44da9a6eb0262da0ce9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-02-09T03:17:22.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-31T08:42:30.000Z", "max_issues_repo_path": "source/make_voxel.py", "max_issues_repo_name": "ishidalab-titech/3DCNN_MQA", "max_issues_repo_head_hexsha": "8f68a3719065338f03eca44da9a6eb0262da0ce9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/make_voxel.py", "max_forks_repo_name": "ishidalab-titech/3DCNN_MQA", "max_forks_repo_head_hexsha": "8f68a3719065338f03eca44da9a6eb0262da0ce9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-07-26T07:47:35.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-25T21:03:27.000Z", "avg_line_length": 47.4871794872, "max_line_length": 116, "alphanum_fraction": 0.6860151188, "include": true, "reason": "import numpy", "num_tokens": 993}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.