code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from sklearn.cluster import DBSCAN
import numpy as np
states = ["INITIAL","login","View_Items","home","logout","View_Items_quantity","Add_to_Cart","shoppingcart",
"remove","deferorder","purchasecart","inventory","sellinventory","clearcart","cancelorder","$"]
# Data imports
PATH = "../data/raw/"
sessions_file = (PATH+'sessions.dat')
class Clustering:
def __init__(self, X):
"""
Class for the DBSCAN clustering algorithm with sklearn.
:param X: Input data for the clustering
"""
self.X = X
def dbscan(self):
return DBSCAN(eps=1.5, min_samples=10).fit(self.X)
def unique_labels(self):
labels = self.dbscan().labels_
unique, counts = np.unique(labels, return_counts=True)
return unique, counts, labels
def compare_results(self):
unique, counts = self.unique_labels()
# represent the cluster results as dict
result = dict(zip(unique, counts))
return result
# Dict_Cluster
def cluster_dict(self, labels, X_):
cluster_list = []
for label in np.unique(labels):
points = X_[labels == label].toarray()
for point in points:
cluster_dict = {}
cluster_dict[label] = point
cluster_list.append(cluster_dict)
return cluster_list
def list_cluster(self, cluster_dict_, labels_next, labels_past):
cluster_list = []
if labels_next in labels_past:
for cluster_index, value in enumerate(np.unique(labels_next)):
tmp = []
for item in cluster_dict_:
for k, v in item.items():
if k == cluster_index:
tmp.append(v.tolist())
cluster_list.append(np.mean(tmp, axis=0))
return cluster_list
| [
"numpy.mean",
"sklearn.cluster.DBSCAN",
"numpy.unique"
] | [((725, 762), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (734, 762), True, 'import numpy as np\n'), ((1101, 1118), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (1110, 1118), True, 'import numpy as np\n'), ((587, 618), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': '(1.5)', 'min_samples': '(10)'}), '(eps=1.5, min_samples=10)\n', (593, 618), False, 'from sklearn.cluster import DBSCAN\n'), ((1547, 1569), 'numpy.unique', 'np.unique', (['labels_next'], {}), '(labels_next)\n', (1556, 1569), True, 'import numpy as np\n'), ((1820, 1840), 'numpy.mean', 'np.mean', (['tmp'], {'axis': '(0)'}), '(tmp, axis=0)\n', (1827, 1840), True, 'import numpy as np\n')] |
# Author: <NAME>
# email: <EMAIL>
import numpy as np
import init_paths
from bbox_transform import bbox_TLWH2TLBR
from xinshuo_miscellaneous import CHECK_EQ_NUMPY
def test_bbox_TLWH2TLBR():
print('check basic')
bbox = [1, 1, 10, 10]
clipped = bbox_TLWH2TLBR(bbox)
print(clipped)
assert CHECK_EQ_NUMPY(clipped, np.array([1, 1, 11, 11]).reshape((1, 4)))
print('check out of boundary and 0 height')
bbox = [-1, 3, 20, 0]
clipped = bbox_TLWH2TLBR(bbox)
print(clipped)
assert CHECK_EQ_NUMPY(clipped, np.array([-1, 3, 19, 3]).reshape((1, 4)))
print('check 0 height and width')
bbox = [10, 30, 0, 0]
clipped = bbox_TLWH2TLBR(bbox)
print(clipped)
assert CHECK_EQ_NUMPY(clipped, np.array([10, 30, 10, 30]).reshape((1, 4)))
print('check multi bboxes')
bbox = np.array([[10, 30, 0, 0], [-1, 3, 20, 0]])
clipped = bbox_TLWH2TLBR(bbox)
print(clipped)
assert CHECK_EQ_NUMPY(clipped, np.array([[10, 30, 10, 30], [-1, 3, 19, 3]]).reshape((2, 4)))
print('check width < 0')
bbox = [10, 30, -1, 29]
try:
clipped = bbox_TLWH2TLBR(bbox)
sys.exit('\nwrong! never should be here\n\n')
except AssertionError:
print('the width should be no less than 0')
print('\n\nDONE! SUCCESSFUL!!\n')
if __name__ == '__main__':
test_bbox_TLWH2TLBR() | [
"bbox_transform.bbox_TLWH2TLBR",
"numpy.array"
] | [((247, 267), 'bbox_transform.bbox_TLWH2TLBR', 'bbox_TLWH2TLBR', (['bbox'], {}), '(bbox)\n', (261, 267), False, 'from bbox_transform import bbox_TLWH2TLBR\n'), ((439, 459), 'bbox_transform.bbox_TLWH2TLBR', 'bbox_TLWH2TLBR', (['bbox'], {}), '(bbox)\n', (453, 459), False, 'from bbox_transform import bbox_TLWH2TLBR\n'), ((620, 640), 'bbox_transform.bbox_TLWH2TLBR', 'bbox_TLWH2TLBR', (['bbox'], {}), '(bbox)\n', (634, 640), False, 'from bbox_transform import bbox_TLWH2TLBR\n'), ((771, 813), 'numpy.array', 'np.array', (['[[10, 30, 0, 0], [-1, 3, 20, 0]]'], {}), '([[10, 30, 0, 0], [-1, 3, 20, 0]])\n', (779, 813), True, 'import numpy as np\n'), ((825, 845), 'bbox_transform.bbox_TLWH2TLBR', 'bbox_TLWH2TLBR', (['bbox'], {}), '(bbox)\n', (839, 845), False, 'from bbox_transform import bbox_TLWH2TLBR\n'), ((1026, 1046), 'bbox_transform.bbox_TLWH2TLBR', 'bbox_TLWH2TLBR', (['bbox'], {}), '(bbox)\n', (1040, 1046), False, 'from bbox_transform import bbox_TLWH2TLBR\n'), ((316, 340), 'numpy.array', 'np.array', (['[1, 1, 11, 11]'], {}), '([1, 1, 11, 11])\n', (324, 340), True, 'import numpy as np\n'), ((508, 532), 'numpy.array', 'np.array', (['[-1, 3, 19, 3]'], {}), '([-1, 3, 19, 3])\n', (516, 532), True, 'import numpy as np\n'), ((689, 715), 'numpy.array', 'np.array', (['[10, 30, 10, 30]'], {}), '([10, 30, 10, 30])\n', (697, 715), True, 'import numpy as np\n'), ((894, 938), 'numpy.array', 'np.array', (['[[10, 30, 10, 30], [-1, 3, 19, 3]]'], {}), '([[10, 30, 10, 30], [-1, 3, 19, 3]])\n', (902, 938), True, 'import numpy as np\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from auto_scan_test import MkldnnAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig, OpConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
class TestMkldnnPreluOp(MkldnnAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
# if mode is channel, and in_shape is 1 rank
if len(program_config.inputs['input_data'].
shape) == 1 and program_config.ops[0].attrs['mode'] == 'channel':
return False
return True
def sample_program_configs(self, *args, **kwargs):
def generate_input(*args, **kwargs):
return np.random.random(kwargs['in_shape']).astype(np.float32)
def generate_alpha(*args, **kwargs):
if kwargs["mode"] == "all":
return np.random.random(size=(1)).astype(np.float32)
elif kwargs["mode"] == "channel":
if len(kwargs['in_shape']) <= 1:
# not valid case, just return 0
return np.zeros((1)).astype(np.float32)
return np.random.random(kwargs['in_shape'][1]).astype(
np.float32)
else:
if len(kwargs['in_shape']) <= 1:
# not valid case, just return 0
return np.zeros((1)).astype(np.float32)
return np.random.random(kwargs['in_shape']).astype(np.float32)
prelu_op = OpConfig(
type="prelu",
inputs={"X": ["input_data"],
"Alpha": ["alpha_weight"]},
outputs={"Out": ["output_data"]},
attrs={"mode": kwargs['mode']})
program_config = ProgramConfig(
ops=[prelu_op],
weights={
"alpha_weight":
TensorConfig(data_gen=partial(generate_alpha, *args, **kwargs))
},
inputs={
"input_data":
TensorConfig(data_gen=partial(generate_input, *args, **kwargs)),
},
outputs=["output_data"])
yield program_config
def sample_predictor_configs(self, program_config):
config = self.create_inference_config(use_mkldnn=True)
yield config, (1e-5, 1e-5)
def add_skip_pass_case(self):
pass
@given(
mode=st.sampled_from(['all', 'channel', 'element']),
in_shape=st.lists(
st.integers(
min_value=1, max_value=32), min_size=1, max_size=4))
def test(self, *args, **kwargs):
self.add_skip_pass_case()
self.run_test(quant=False, *args, **kwargs)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"functools.partial",
"program_config.OpConfig",
"numpy.zeros",
"hypothesis.strategies.sampled_from",
"numpy.random.random",
"hypothesis.strategies.integers"
] | [((3485, 3500), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3498, 3500), False, 'import unittest\n'), ((2284, 2438), 'program_config.OpConfig', 'OpConfig', ([], {'type': '"""prelu"""', 'inputs': "{'X': ['input_data'], 'Alpha': ['alpha_weight']}", 'outputs': "{'Out': ['output_data']}", 'attrs': "{'mode': kwargs['mode']}"}), "(type='prelu', inputs={'X': ['input_data'], 'Alpha': [\n 'alpha_weight']}, outputs={'Out': ['output_data']}, attrs={'mode':\n kwargs['mode']})\n", (2292, 2438), False, 'from program_config import TensorConfig, ProgramConfig, OpConfig\n'), ((3160, 3206), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (["['all', 'channel', 'element']"], {}), "(['all', 'channel', 'element'])\n", (3175, 3206), True, 'import hypothesis.strategies as st\n'), ((3247, 3285), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(32)'}), '(min_value=1, max_value=32)\n', (3258, 3285), True, 'import hypothesis.strategies as st\n'), ((1485, 1521), 'numpy.random.random', 'np.random.random', (["kwargs['in_shape']"], {}), "(kwargs['in_shape'])\n", (1501, 1521), True, 'import numpy as np\n'), ((1650, 1674), 'numpy.random.random', 'np.random.random', ([], {'size': '(1)'}), '(size=1)\n', (1666, 1674), True, 'import numpy as np\n'), ((1926, 1965), 'numpy.random.random', 'np.random.random', (["kwargs['in_shape'][1]"], {}), "(kwargs['in_shape'][1])\n", (1942, 1965), True, 'import numpy as np\n'), ((2208, 2244), 'numpy.random.random', 'np.random.random', (["kwargs['in_shape']"], {}), "(kwargs['in_shape'])\n", (2224, 2244), True, 'import numpy as np\n'), ((2660, 2700), 'functools.partial', 'partial', (['generate_alpha', '*args'], {}), '(generate_alpha, *args, **kwargs)\n', (2667, 2700), False, 'from functools import partial\n'), ((2806, 2846), 'functools.partial', 'partial', (['generate_input', '*args'], {}), '(generate_input, *args, **kwargs)\n', (2813, 2846), False, 'from functools import partial\n'), ((1870, 1881), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (1878, 1881), True, 'import numpy as np\n'), ((2152, 2163), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (2160, 2163), True, 'import numpy as np\n')] |
import sounddevice as sd
import librosa
import numpy as np
from keras.models import load_model
from sklearn.preprocessing import LabelEncoder
import os
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
model_file = os.path.join(os.path.dirname(__file__), '..', 'train', 'saved_models', 'weights.best.basic_cnn.hdf5')
model = load_model(model_file)
le = LabelEncoder()
classes_file = os.path.join(os.path.dirname(__file__), '..', 'train', 'saved_models', 'classes.npy')
le.classes_ = np.load(classes_file)
num_rows = 40
num_columns = 174
num_channels = 1
max_pad_len = 174
def classify(sample):
mfccs = librosa.feature.mfcc(
y=sample['data'], sr=sample['rate'], n_mfcc=40)
pad_width = max_pad_len - mfccs.shape[1]
mfccs = np.pad(mfccs, pad_width=((0, 0), (0, pad_width)), mode='constant')
prediction_feature = mfccs.reshape(1, num_rows, num_columns, num_channels)
predicted_vector = model.predict_classes(prediction_feature)
predicted_class = le.inverse_transform(predicted_vector)
return predicted_class[0]
| [
"keras.models.load_model",
"numpy.pad",
"numpy.load",
"os.path.dirname",
"sklearn.preprocessing.LabelEncoder",
"tensorflow.compat.v1.logging.set_verbosity",
"librosa.feature.mfcc"
] | [((177, 239), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (211, 239), True, 'import tensorflow as tf\n'), ((364, 386), 'keras.models.load_model', 'load_model', (['model_file'], {}), '(model_file)\n', (374, 386), False, 'from keras.models import load_model\n'), ((392, 406), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (404, 406), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((522, 543), 'numpy.load', 'np.load', (['classes_file'], {}), '(classes_file)\n', (529, 543), True, 'import numpy as np\n'), ((267, 292), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (282, 292), False, 'import os\n'), ((435, 460), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (450, 460), False, 'import os\n'), ((648, 716), 'librosa.feature.mfcc', 'librosa.feature.mfcc', ([], {'y': "sample['data']", 'sr': "sample['rate']", 'n_mfcc': '(40)'}), "(y=sample['data'], sr=sample['rate'], n_mfcc=40)\n", (668, 716), False, 'import librosa\n'), ((783, 849), 'numpy.pad', 'np.pad', (['mfccs'], {'pad_width': '((0, 0), (0, pad_width))', 'mode': '"""constant"""'}), "(mfccs, pad_width=((0, 0), (0, pad_width)), mode='constant')\n", (789, 849), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 24 00:59:55 2020
@author: nemo
"""
from copy import deepcopy
import os
import random
import numpy as np
import nibabel as nib
os.chdir('../90.template')
os.makedirs('rand_parc')
mni_img = nib.load('MNI152_T1_1mm_GM_resamp_2.5mm.nii.gz')
# Extract mni_data = 1, give it indexes and shuffle
mni_data = mni_img.get_data()
new_shape = (np.prod(mni_data.shape[-1]), mni_data.shape[-1])
mni_res = mni_data.reshape(new_shape, order='F')
mask = mni_res == 1
mni_masked = mni_res[mask]
mni_indexed = mni_masked * range(mni_masked.size)
for m in range(5):
for n in range(2, 121):
rand_seed = random.choices(mni_indexed, k=n)
rand_atlas = mni_masked * 0
for i, k in enumerate(rand_seed):
rand_atlas[int(k)] = i+1
out = deepcopy(mni_res)
# Populate the output target and reshape to 3D
out[mask == True] = rand_atlas
out_data = out.reshape(mni_data.shape, order='F')
# Create the nifti file and export it
out_img = nib.Nifti1Image(out_data.astype(int), mni_img.affine, mni_img.header)
out_img.to_filename(f'rand_parc/{n}-{m}-parc.nii.gz')
| [
"copy.deepcopy",
"os.makedirs",
"nibabel.load",
"random.choices",
"os.chdir",
"numpy.prod"
] | [((199, 225), 'os.chdir', 'os.chdir', (['"""../90.template"""'], {}), "('../90.template')\n", (207, 225), False, 'import os\n'), ((226, 250), 'os.makedirs', 'os.makedirs', (['"""rand_parc"""'], {}), "('rand_parc')\n", (237, 250), False, 'import os\n'), ((261, 309), 'nibabel.load', 'nib.load', (['"""MNI152_T1_1mm_GM_resamp_2.5mm.nii.gz"""'], {}), "('MNI152_T1_1mm_GM_resamp_2.5mm.nii.gz')\n", (269, 309), True, 'import nibabel as nib\n'), ((405, 432), 'numpy.prod', 'np.prod', (['mni_data.shape[-1]'], {}), '(mni_data.shape[-1])\n', (412, 432), True, 'import numpy as np\n'), ((668, 700), 'random.choices', 'random.choices', (['mni_indexed'], {'k': 'n'}), '(mni_indexed, k=n)\n', (682, 700), False, 'import random\n'), ((831, 848), 'copy.deepcopy', 'deepcopy', (['mni_res'], {}), '(mni_res)\n', (839, 848), False, 'from copy import deepcopy\n')] |
# -*- coding: utf-8 -*-
import datetime
import numpy as np
import pandas as pd
from ..signal import signal_period
def benchmark_ecg_preprocessing(function, ecg, rpeaks=None, sampling_rate=1000):
"""Benchmark ECG preprocessing pipelines.
Parameters
----------
function : function
Must be a Python function which first argument is the ECG signal and which has a
``sampling_rate`` argument.
ecg : pd.DataFrame or str
The path to a folder where you have an `ECGs.csv` file or directly its loaded DataFrame.
Such file can be obtained by running THIS SCRIPT (TO COMPLETE).
rpeaks : pd.DataFrame or str
The path to a folder where you have an `Rpeaks.csv` fils or directly its loaded DataFrame.
Such file can be obtained by running THIS SCRIPT (TO COMPLETE).
sampling_rate : int
The sampling frequency of `ecg_signal` (in Hz, i.e., samples/second). Only used if ``ecgs``
and ``rpeaks`` are single vectors.
Returns
--------
pd.DataFrame
A DataFrame containing the results of the benchmarking
Examples
--------
>>> import neurokit2 as nk
>>>
>>> # Define a preprocessing routine
>>> def function(ecg, sampling_rate):
>>> signal, info = nk.ecg_peaks(ecg, method='engzeemod2012', sampling_rate=sampling_rate)
>>> return info["ECG_R_Peaks"]
>>>
>>> # Synthetic example
>>> ecg = nk.ecg_simulate(duration=20, sampling_rate=200)
>>> true_rpeaks = nk.ecg_peaks(ecg, sampling_rate=200)[1]["ECG_R_Peaks"]
>>>
>>> nk.benchmark_ecg_preprocessing(function, ecg, true_rpeaks, sampling_rate=200)
>>>
>>> # Example using database (commented-out)
>>> # nk.benchmark_ecg_preprocessing(function, r'path/to/GUDB_database')
"""
# find data
if rpeaks is None:
rpeaks = ecg
if isinstance(ecg, str):
ecg = pd.read_csv(ecg + "/ECGs.csv")
if isinstance(rpeaks, str):
rpeaks = pd.read_csv(rpeaks + "/Rpeaks.csv")
if isinstance(ecg, pd.DataFrame):
results = _benchmark_ecg_preprocessing_databases(function, ecg, rpeaks)
else:
results = _benchmark_ecg_preprocessing(function, ecg, rpeaks, sampling_rate=sampling_rate)
return results
# =============================================================================
# Utils
# =============================================================================
def _benchmark_ecg_preprocessing_databases(function, ecgs, rpeaks):
"""A wrapper over _benchmark_ecg_preprocessing when the input is a database."""
# Run algorithms
results = []
for participant in ecgs["Participant"].unique():
for database in ecgs[ecgs["Participant"] == participant]["Database"].unique():
# Extract the right slice of data
ecg_slice = ecgs[(ecgs["Participant"] == participant) & (ecgs["Database"] == database)]
rpeaks_slice = rpeaks[(rpeaks["Participant"] == participant) & (rpeaks["Database"] == database)]
sampling_rate = ecg_slice["Sampling_Rate"].unique()[0]
# Extract values
ecg = ecg_slice["ECG"].values
rpeak = rpeaks_slice["Rpeaks"].values
# Run benchmark
result = _benchmark_ecg_preprocessing(function, ecg, rpeak, sampling_rate)
# Add info
result["Participant"] = participant
result["Database"] = database
results.append(result)
return pd.concat(results)
def _benchmark_ecg_preprocessing(function, ecg, rpeak, sampling_rate=1000):
# Apply function
t0 = datetime.datetime.now()
try:
found_rpeaks = function(ecg, sampling_rate=sampling_rate)
duration = (datetime.datetime.now() - t0).total_seconds()
# In case of failure
except Exception as error: # pylint: disable=broad-except
return pd.DataFrame(
{
"Sampling_Rate": [sampling_rate],
"Duration": [np.nan],
"Score": [np.nan],
"Recording_Length": [len(ecg) / sampling_rate / 60],
"Error": str(error),
}
)
# Compare R peaks
score, error = benchmark_ecg_compareRpeaks(rpeak, found_rpeaks, sampling_rate=sampling_rate)
return pd.DataFrame(
{
"Sampling_Rate": [sampling_rate],
"Duration": [duration],
"Score": [score],
"Recording_Length": [len(ecg) / sampling_rate / 60],
"Error": error,
}
)
# =============================================================================
# Comparison methods
# =============================================================================
def benchmark_ecg_compareRpeaks(true_rpeaks, found_rpeaks, sampling_rate=250):
# Failure to find sufficient R-peaks
if len(found_rpeaks) <= 3:
return np.nan, "R-peaks detected <= 3"
length = np.max(np.concatenate([true_rpeaks, found_rpeaks]))
true_interpolated = signal_period(
true_rpeaks, sampling_rate=sampling_rate, desired_length=length, interpolation_method="linear"
)
found_interpolated = signal_period(
found_rpeaks, sampling_rate=sampling_rate, desired_length=length, interpolation_method="linear"
)
return np.mean(np.abs(found_interpolated - true_interpolated)), "None"
| [
"numpy.abs",
"pandas.read_csv",
"datetime.datetime.now",
"pandas.concat",
"numpy.concatenate"
] | [((3492, 3510), 'pandas.concat', 'pd.concat', (['results'], {}), '(results)\n', (3501, 3510), True, 'import pandas as pd\n'), ((3619, 3642), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3640, 3642), False, 'import datetime\n'), ((1904, 1934), 'pandas.read_csv', 'pd.read_csv', (["(ecg + '/ECGs.csv')"], {}), "(ecg + '/ECGs.csv')\n", (1915, 1934), True, 'import pandas as pd\n'), ((1985, 2020), 'pandas.read_csv', 'pd.read_csv', (["(rpeaks + '/Rpeaks.csv')"], {}), "(rpeaks + '/Rpeaks.csv')\n", (1996, 2020), True, 'import pandas as pd\n'), ((4947, 4990), 'numpy.concatenate', 'np.concatenate', (['[true_rpeaks, found_rpeaks]'], {}), '([true_rpeaks, found_rpeaks])\n', (4961, 4990), True, 'import numpy as np\n'), ((5311, 5357), 'numpy.abs', 'np.abs', (['(found_interpolated - true_interpolated)'], {}), '(found_interpolated - true_interpolated)\n', (5317, 5357), True, 'import numpy as np\n'), ((3738, 3761), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3759, 3761), False, 'import datetime\n')] |
import torch
import numpy as np
from mean_average_precision import MetricBuilder
from .utils import masks_to_bboxes
from functools import partial
class MeanAveragePrecision:
def __init__(
self,
num_classes: int,
out_img_size: int = 64,
threshold_iou: float = 0.5,
threshold_kpoint_prob: float = 0.4,
max_bbox_per_img: int = 5
):
self.threshold_iou = threshold_iou
self.map_metric = MetricBuilder.build_evaluation_metric("map_2d", async_mode=True, num_classes=num_classes)
self.masks_to_bboxes = partial(
masks_to_bboxes,
num_classes=num_classes,
out_size=out_img_size,
threshold=threshold_kpoint_prob,
max_bbox_per_img=max_bbox_per_img
)
def update(self, predict: torch.Tensor, gt: torch.Tensor):
bboxes_gt_batch = self.masks_to_bboxes(gt)
bboxes_predict_batch = self.masks_to_bboxes(predict, is_predict=True)
for bboxes_predict, bboxes_gt in zip(bboxes_predict_batch, bboxes_gt_batch):
self.map_metric.add(np.array(bboxes_predict), np.array(bboxes_gt))
def pascal_map_value(self, reset: bool = True):
pascal_map = self.map_metric.value(iou_thresholds=self.threshold_iou)['mAP']
if reset:
self.map_metric.reset()
return pascal_map
| [
"numpy.array",
"functools.partial",
"mean_average_precision.MetricBuilder.build_evaluation_metric"
] | [((480, 573), 'mean_average_precision.MetricBuilder.build_evaluation_metric', 'MetricBuilder.build_evaluation_metric', (['"""map_2d"""'], {'async_mode': '(True)', 'num_classes': 'num_classes'}), "('map_2d', async_mode=True,\n num_classes=num_classes)\n", (517, 573), False, 'from mean_average_precision import MetricBuilder\n'), ((601, 745), 'functools.partial', 'partial', (['masks_to_bboxes'], {'num_classes': 'num_classes', 'out_size': 'out_img_size', 'threshold': 'threshold_kpoint_prob', 'max_bbox_per_img': 'max_bbox_per_img'}), '(masks_to_bboxes, num_classes=num_classes, out_size=out_img_size,\n threshold=threshold_kpoint_prob, max_bbox_per_img=max_bbox_per_img)\n', (608, 745), False, 'from functools import partial\n'), ((1124, 1148), 'numpy.array', 'np.array', (['bboxes_predict'], {}), '(bboxes_predict)\n', (1132, 1148), True, 'import numpy as np\n'), ((1150, 1169), 'numpy.array', 'np.array', (['bboxes_gt'], {}), '(bboxes_gt)\n', (1158, 1169), True, 'import numpy as np\n')] |
from matplotlib import cm, rcParams
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib as matplotlib
from matplotlib import patheffects
import numpy as np
import math as math
import random as rand
import os, sys, csv
import pandas as pd
#matplotlib.pyplot.xkcd(scale=.5, length=100, randomness=2)
#rcParams['path.effects'] = [patheffects.withStroke(linewidth=.5)]
dW1, dW2, dW3 = 0, 0 ,0
np.random.seed() #42
def lif_euler(dt, v1, v2, I1, I2):
return [v1 + dt*(-v1 + gamma*(v2-v1) + I1) , v2 + dt*(-v2 + gamma*(v1-v2) + I2) ]
def lif_euler_stoch(dt, v1, v2, I1, I2, s1, s2, s3):
global dW1, dW2, dW3
dW1 = s1*math.sqrt(dt)*np.random.randn()
dW2 = s2*math.sqrt(dt)*np.random.randn()
dW3 = s3*math.sqrt(dt)*np.random.randn()
return [v1 + dt*(-v1 + gamma*(v2-v1) + I1) + dW1 + dW3, v2 + dt*(-v2 + gamma*(v1-v2) + I2) + dW2 + dW3]
gamma, beta = 0.1, 0.1
Vth, Vr = 1, 0
dt = 0.001
nb_iterations = 1
phis1, phis2 = [], []
maxtime = 40
for k in range(nb_iterations) :
#v1_0, v2_0 = 0.7611728117817528, 0.1654125684129333 # Used XPPAUT to find ideal initial conditions s.t. we begin in antiphase with I = 1.4
v1_0, v2_0 = 0.3764002759711251, 0.8546679415731656
x1, x2 = [v1_0], [v2_0]
t = [0]
nb_spikes = 0
I_baseline = 1.2
I1 = [I_baseline]
I2 = [I_baseline]
while t[-1] < maxtime :
t.append(t[-1]+dt)
next_values= lif_euler_stoch(dt, x1[-1], x2[-1], I1[-1], I2[-1], 0., 0., 0.02) # example of common input
I1.append(I_baseline + (dW1+dW3)/dt)
I2.append(I_baseline + (dW2+dW3)/dt)
if next_values[0] > 1 :
x1.append(0)
nb_spikes += 1
if next_values[1] + gamma*beta > 1 :
x2.append(0)
else :
x2.append(next_values[1]+gamma*beta)
elif next_values[1] > 1 :
x2.append(0)
if next_values[0] + gamma*beta > 1 :
x1.append(0)
else :
x1.append(next_values[0]+gamma*beta)
else :
x1.append(next_values[0])
x2.append(next_values[1])
# A spike occurs iff there was a reset
for i in range(1,len(x1)) :
if abs(x1[i]-x1[i-1]) > (Vth-Vr)/2 and x1[i] < 1 and x1[i-1] < 1:
x1.insert(i, Vth+0.5)
x2.insert(i, x2[i])
I1.insert(i, I1[i])
I2.insert(i, I2[i])
t.insert(i, t[i])
for i in range(1,len(x2)) :
if abs(x2[i]-x2[i-1]) > (Vth-Vr)/2 and x2[i] < 1 and x2[i-1] < 1:
x2.insert(i, Vth+0.5)
x1.insert(i, x1[i])
I1.insert(i, I1[i])
I2.insert(i, I2[i])
t.insert(i, t[i])
fig, ax = plt.subplots(2, 1, figsize=(12,5), sharey='row', sharex='col')
ax[1].plot(t, x1, label='$V_{1}$', color='#aa3863')
ax[1].plot(t, x2, label='$V_{2}$', color='#3b7d86')
if I1 == I2 :
ax[0].plot(t, I1, label='$I$', color='#ef9f07', alpha=0.8)
else:
ax[0].plot(t, I1, label='$I_1$', color='#aa3863')
ax[0].plot(t, I2, label='$I_2$', color='#3b7d86')
ax[0].legend(loc='upper right', fontsize=10)
ax[1].legend(loc='upper right', fontsize=10)
#ax[0].set_title('Noisy input current trial, $\sigma=0.0025, I_{base}=1.5, \gamma=0.4, \\beta=0.1$')
ax[0].set_title('Noisy input current trial, correlated stochastic input, $I_{mean}$=1.2, $\gamma$=0.1, $\\beta$=0.1', size=14)
ax[1].set_xlabel('Time ($10^-2$ s)')
plt.savefig('trial_example_dependent.svg')
plt.show()
| [
"numpy.random.seed",
"matplotlib.pyplot.show",
"math.sqrt",
"numpy.random.randn",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((424, 440), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (438, 440), True, 'import numpy as np\n'), ((2740, 2803), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(12, 5)', 'sharey': '"""row"""', 'sharex': '"""col"""'}), "(2, 1, figsize=(12, 5), sharey='row', sharex='col')\n", (2752, 2803), True, 'import matplotlib.pyplot as plt\n'), ((3509, 3551), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""trial_example_dependent.svg"""'], {}), "('trial_example_dependent.svg')\n", (3520, 3551), True, 'import matplotlib.pyplot as plt\n'), ((3556, 3566), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3564, 3566), True, 'import matplotlib.pyplot as plt\n'), ((673, 690), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (688, 690), True, 'import numpy as np\n'), ((718, 735), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (733, 735), True, 'import numpy as np\n'), ((763, 780), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (778, 780), True, 'import numpy as np\n'), ((659, 672), 'math.sqrt', 'math.sqrt', (['dt'], {}), '(dt)\n', (668, 672), True, 'import math as math\n'), ((704, 717), 'math.sqrt', 'math.sqrt', (['dt'], {}), '(dt)\n', (713, 717), True, 'import math as math\n'), ((749, 762), 'math.sqrt', 'math.sqrt', (['dt'], {}), '(dt)\n', (758, 762), True, 'import math as math\n')] |
import argparse
import os
import matplotlib.pyplot as plt
import librosa
from tqdm import tqdm
import numpy as np
SILENCE_THRESHOLD = 60
FRAME_LENGTH = 2048
WIN_LENGTH = 1024
HOP_LENGTH = 512
HOP_LENGTH_2 = 256
N_FFT = 1024
NUM_MELS = 80
FMIN = 0
FMAX = 8000
def wav_to_mel(path, output_path, sample_rate):
wav = librosa.load(path, sr=sample_rate)[0]
peak = np.abs(wav).max()
if peak > 1.0:
wav /= peak
# Trim silence
wav = librosa.effects.trim(wav, top_db=SILENCE_THRESHOLD, frame_length=FRAME_LENGTH, hop_length=HOP_LENGTH)[0]
# Convert to MEL
D = librosa.stft(y=wav, n_fft=N_FFT, hop_length=HOP_LENGTH_2, win_length=WIN_LENGTH)
S = librosa.feature.melspectrogram(S=np.abs(D), sr=sample_rate, n_fft=N_FFT, n_mels=NUM_MELS, fmin=FMIN, fmax=FMAX)
# Normalise
S = np.clip(S, a_min=1.0e-5, a_max=None)
S = np.log(S)
# Save
np.save(output_path, S, allow_pickle=False)
if __name__ == "__main__":
""" Script to generate MELs from wavs """
parser = argparse.ArgumentParser(description="Convert WAVs to MEL spectograms")
parser.add_argument("-w", "--wavs", help="Text file path", type=str, required=True)
parser.add_argument("-o", "--output", help="Output path", type=str, required=True)
parser.add_argument("--sample_rate", help="Audio sample rate", type=int, default=22050)
args = parser.parse_args()
os.makedirs(args.output, exist_ok=True)
for f in tqdm(os.listdir(args.wavs)):
wav_path = os.path.join(args.wavs, f)
output_path = os.path.join(args.output, f.replace(".wav", ".npy"))
wav_to_mel(wav_path, output_path, args.sample_rate)
| [
"os.listdir",
"numpy.save",
"numpy.abs",
"argparse.ArgumentParser",
"numpy.log",
"os.makedirs",
"librosa.effects.trim",
"numpy.clip",
"librosa.load",
"os.path.join",
"librosa.stft"
] | [((590, 675), 'librosa.stft', 'librosa.stft', ([], {'y': 'wav', 'n_fft': 'N_FFT', 'hop_length': 'HOP_LENGTH_2', 'win_length': 'WIN_LENGTH'}), '(y=wav, n_fft=N_FFT, hop_length=HOP_LENGTH_2, win_length=WIN_LENGTH\n )\n', (602, 675), False, 'import librosa\n'), ((815, 850), 'numpy.clip', 'np.clip', (['S'], {'a_min': '(1e-05)', 'a_max': 'None'}), '(S, a_min=1e-05, a_max=None)\n', (822, 850), True, 'import numpy as np\n'), ((860, 869), 'numpy.log', 'np.log', (['S'], {}), '(S)\n', (866, 869), True, 'import numpy as np\n'), ((885, 928), 'numpy.save', 'np.save', (['output_path', 'S'], {'allow_pickle': '(False)'}), '(output_path, S, allow_pickle=False)\n', (892, 928), True, 'import numpy as np\n'), ((1017, 1087), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convert WAVs to MEL spectograms"""'}), "(description='Convert WAVs to MEL spectograms')\n", (1040, 1087), False, 'import argparse\n'), ((1391, 1430), 'os.makedirs', 'os.makedirs', (['args.output'], {'exist_ok': '(True)'}), '(args.output, exist_ok=True)\n', (1402, 1430), False, 'import os\n'), ((321, 355), 'librosa.load', 'librosa.load', (['path'], {'sr': 'sample_rate'}), '(path, sr=sample_rate)\n', (333, 355), False, 'import librosa\n'), ((456, 562), 'librosa.effects.trim', 'librosa.effects.trim', (['wav'], {'top_db': 'SILENCE_THRESHOLD', 'frame_length': 'FRAME_LENGTH', 'hop_length': 'HOP_LENGTH'}), '(wav, top_db=SILENCE_THRESHOLD, frame_length=\n FRAME_LENGTH, hop_length=HOP_LENGTH)\n', (476, 562), False, 'import librosa\n'), ((1450, 1471), 'os.listdir', 'os.listdir', (['args.wavs'], {}), '(args.wavs)\n', (1460, 1471), False, 'import os\n'), ((1493, 1519), 'os.path.join', 'os.path.join', (['args.wavs', 'f'], {}), '(args.wavs, f)\n', (1505, 1519), False, 'import os\n'), ((370, 381), 'numpy.abs', 'np.abs', (['wav'], {}), '(wav)\n', (376, 381), True, 'import numpy as np\n'), ((712, 721), 'numpy.abs', 'np.abs', (['D'], {}), '(D)\n', (718, 721), True, 'import numpy as np\n')] |
import os
import random
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
from pytorch_pretrained_bert.modeling import WEIGHTS_NAME, CONFIG_NAME, BertConfig
from pytorch_pretrained_bert.optimization import BertAdam
from model import HLG
from train_evaluate import train, evaluate
from utils import get_device
cudnn.benchmark = True
cudnn.deterministic = False
cudnn.enabled = True
def main(config, model_id, data_processor, load_data):
if not os.path.exists(config.output_dir + model_id):
os.makedirs(config.output_dir + model_id)
if not os.path.exists(config.cache_dir + model_id):
os.makedirs(config.cache_dir + model_id)
output_model_file = os.path.join(config.output_dir, model_id, WEIGHTS_NAME)
output_config_file = os.path.join(config.output_dir, model_id, CONFIG_NAME)
gpu_ids = [int(device_id) for device_id in config.gpu_ids.split()]
device, n_gpu = get_device(gpu_ids[0])
if n_gpu > 1:
n_gpu = len(gpu_ids)
config.train_batch_size = config.train_batch_size // config.gradient_accumulation_steps
random.seed(config.seed)
np.random.seed(config.seed)
torch.manual_seed(config.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(config.seed)
processor = data_processor()
label_list = processor.get_labels()
num_labels = len(label_list)
# Train and dev
if config.do_train:
train_dataloader, train_examples_len = load_data(
config.data_dir, processor, config.max_seq_length, config.train_batch_size, "train", config.num_workers)
dev_dataloader, _ = load_data(
config.data_dir, processor, config.max_seq_length, config.dev_batch_size, "dev", config.num_workers)
num_train_optimization_steps = int(
train_examples_len / config.train_batch_size / config.gradient_accumulation_steps) * (
config.num_train_epochs + 1)
model = HLG.from_pretrained(config.bert_model_dir, cache_dir=config.cache_dir, num_labels=num_labels)
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=gpu_ids)
""" 优化器准备 """
param_optimizer = list(model.named_parameters())
bert_parameters = [(n, p) for n, p in param_optimizer if 'bert' in n]
model_parameters = [(n, p) for n, p in param_optimizer if 'bert' not in n]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in bert_parameters if not any(
nd in n for nd in no_decay)], 'weight_decay': 0.01, 'lr': config.bert_learning_rate},
{'params': [p for n, p in bert_parameters if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0, 'lr': config.bert_learning_rate},
{'params': [p for n, p in model_parameters if not any(
nd in n for nd in no_decay)], 'weight_decay': 0.01, 'lr': config.model_learning_rate},
{'params': [p for n, p in model_parameters if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0, 'lr': config.model_learning_rate}
]
optimizer = BertAdam(optimizer_grouped_parameters,
warmup=config.warmup_proportion,
t_total=num_train_optimization_steps)
""" 损失函数准备 """
criterion = nn.CrossEntropyLoss()
criterion = criterion.to(device)
train(config.num_train_epochs, n_gpu, model, train_dataloader, dev_dataloader, optimizer, criterion,
config.gradient_accumulation_steps, device, label_list, output_model_file, output_config_file,
config.early_stop)
else:
bert_config = BertConfig(output_config_file)
model = HLG(bert_config, num_labels=num_labels)
model.load_state_dict(torch.load(output_model_file))
model.to(device)
""" Test """
test_dataloader, _ = load_data(
config.data_dir, processor, config.max_seq_length, config.test_batch_size, "test", config.num_workers)
criterion = nn.CrossEntropyLoss()
criterion = criterion.to(device)
test_loss, test_acc, test_report, test_auc = evaluate(model, test_dataloader, criterion, device, label_list)
print("-------------- Test -------------")
print(f'\t Loss: {test_loss: .3f} | Acc: {test_acc * 100: .3f} % | AUC:{test_auc}')
for label in label_list:
print('\t {}: Precision: {} | recall: {} | f1 score: {}'.format(
label, test_report[label]['precision'], test_report[label]['recall'], test_report[label]['f1-score']))
print_list = ['macro avg', 'weighted avg']
for label in print_list:
print('\t {}: Precision: {} | recall: {} | f1 score: {}'.format(
label, test_report[label]['precision'], test_report[label]['recall'], test_report[label]['f1-score']))
| [
"numpy.random.seed",
"utils.get_device",
"os.makedirs",
"pytorch_pretrained_bert.optimization.BertAdam",
"model.HLG.from_pretrained",
"torch.manual_seed",
"torch.load",
"torch.nn.CrossEntropyLoss",
"os.path.exists",
"pytorch_pretrained_bert.modeling.BertConfig",
"train_evaluate.evaluate",
"tor... | [((720, 775), 'os.path.join', 'os.path.join', (['config.output_dir', 'model_id', 'WEIGHTS_NAME'], {}), '(config.output_dir, model_id, WEIGHTS_NAME)\n', (732, 775), False, 'import os\n'), ((801, 855), 'os.path.join', 'os.path.join', (['config.output_dir', 'model_id', 'CONFIG_NAME'], {}), '(config.output_dir, model_id, CONFIG_NAME)\n', (813, 855), False, 'import os\n'), ((948, 970), 'utils.get_device', 'get_device', (['gpu_ids[0]'], {}), '(gpu_ids[0])\n', (958, 970), False, 'from utils import get_device\n'), ((1116, 1140), 'random.seed', 'random.seed', (['config.seed'], {}), '(config.seed)\n', (1127, 1140), False, 'import random\n'), ((1145, 1172), 'numpy.random.seed', 'np.random.seed', (['config.seed'], {}), '(config.seed)\n', (1159, 1172), True, 'import numpy as np\n'), ((1177, 1207), 'torch.manual_seed', 'torch.manual_seed', (['config.seed'], {}), '(config.seed)\n', (1194, 1207), False, 'import torch\n'), ((4162, 4183), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4181, 4183), True, 'import torch.nn as nn\n'), ((4271, 4334), 'train_evaluate.evaluate', 'evaluate', (['model', 'test_dataloader', 'criterion', 'device', 'label_list'], {}), '(model, test_dataloader, criterion, device, label_list)\n', (4279, 4334), False, 'from train_evaluate import train, evaluate\n'), ((493, 537), 'os.path.exists', 'os.path.exists', (['(config.output_dir + model_id)'], {}), '(config.output_dir + model_id)\n', (507, 537), False, 'import os\n'), ((547, 588), 'os.makedirs', 'os.makedirs', (['(config.output_dir + model_id)'], {}), '(config.output_dir + model_id)\n', (558, 588), False, 'import os\n'), ((601, 644), 'os.path.exists', 'os.path.exists', (['(config.cache_dir + model_id)'], {}), '(config.cache_dir + model_id)\n', (615, 644), False, 'import os\n'), ((654, 694), 'os.makedirs', 'os.makedirs', (['(config.cache_dir + model_id)'], {}), '(config.cache_dir + model_id)\n', (665, 694), False, 'import os\n'), ((1234, 1273), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['config.seed'], {}), '(config.seed)\n', (1260, 1273), False, 'import torch\n'), ((1990, 2087), 'model.HLG.from_pretrained', 'HLG.from_pretrained', (['config.bert_model_dir'], {'cache_dir': 'config.cache_dir', 'num_labels': 'num_labels'}), '(config.bert_model_dir, cache_dir=config.cache_dir,\n num_labels=num_labels)\n', (2009, 2087), False, 'from model import HLG\n'), ((3248, 3361), 'pytorch_pretrained_bert.optimization.BertAdam', 'BertAdam', (['optimizer_grouped_parameters'], {'warmup': 'config.warmup_proportion', 't_total': 'num_train_optimization_steps'}), '(optimizer_grouped_parameters, warmup=config.warmup_proportion,\n t_total=num_train_optimization_steps)\n', (3256, 3361), False, 'from pytorch_pretrained_bert.optimization import BertAdam\n'), ((3460, 3481), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3479, 3481), True, 'import torch.nn as nn\n'), ((3532, 3759), 'train_evaluate.train', 'train', (['config.num_train_epochs', 'n_gpu', 'model', 'train_dataloader', 'dev_dataloader', 'optimizer', 'criterion', 'config.gradient_accumulation_steps', 'device', 'label_list', 'output_model_file', 'output_config_file', 'config.early_stop'], {}), '(config.num_train_epochs, n_gpu, model, train_dataloader,\n dev_dataloader, optimizer, criterion, config.\n gradient_accumulation_steps, device, label_list, output_model_file,\n output_config_file, config.early_stop)\n', (3537, 3759), False, 'from train_evaluate import train, evaluate\n'), ((3807, 3837), 'pytorch_pretrained_bert.modeling.BertConfig', 'BertConfig', (['output_config_file'], {}), '(output_config_file)\n', (3817, 3837), False, 'from pytorch_pretrained_bert.modeling import WEIGHTS_NAME, CONFIG_NAME, BertConfig\n'), ((3854, 3893), 'model.HLG', 'HLG', (['bert_config'], {'num_labels': 'num_labels'}), '(bert_config, num_labels=num_labels)\n', (3857, 3893), False, 'from model import HLG\n'), ((2153, 2201), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {'device_ids': 'gpu_ids'}), '(model, device_ids=gpu_ids)\n', (2174, 2201), False, 'import torch\n'), ((3924, 3953), 'torch.load', 'torch.load', (['output_model_file'], {}), '(output_model_file)\n', (3934, 3953), False, 'import torch\n')] |
import argparse
import json
import logging
import os
import random
import shutil
import sys
from argparse import Namespace
from datetime import datetime
from itertools import chain, combinations
import torch
import numpy as np
from tools.config import Config
logger = logging.getLogger(__name__)
def set_seed(args: Config):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def parse_args() -> Namespace:
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', type=str, default=None,)
return parser.parse_args()
def initialize_logging(to_file=True, config=None):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%d-%m-%Y %H:%M:%S')
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
if to_file:
fh = logging.FileHandler(os.path.join(config.output_dir, 'output.log'))
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
def write_questions_and_facts_to_file(ds):
challenge = ds.qa_feats[ds.df_qas.focusnotes == 'Challenge']
with open('./challenge-qas-facts.txt', 'w') as f:
for i in range(len(challenge.index)):
id = challenge.iloc[i].id
question = challenge.iloc[i].original
gold_idxs = challenge.iloc[i].gold_facts
facts = list(ds.df_facts.iloc[gold_idxs].text)
f.write('Question {}: {}\n'.format(id, question))
f.writelines('Fact {}: '.format(i) + fact + '\n' for i, fact in enumerate(facts))
f.write('\n')
def write_questions_and_facts_to_file_2(vds_chains):
with open('./data/dataset-samples.txt', 'w') as f:
for index, row in vds_chains.qa_feats.iterrows():
gold_idxs = row.gold_facts
facts = list(vds_chains.df_facts.iloc[gold_idxs].text)
fact_roles = vds_chains.qf_roles.loc[
(vds_chains.qf_roles.q_idx == index) & (vds_chains.qf_roles.f_idx.isin(gold_idxs))]
f.write('Question {}: {}\n'.format(row.id, row.original))
f.writelines('Fact {} - Role {}: '.format(i, fact_roles.loc[fact_roles.f_idx == fact_idx, 'role'].iloc[
0]) + fact + '\n'
for i, (fact_idx, fact) in enumerate(zip(gold_idxs, facts)))
f.write('\n')
def print_predicted_facts(preds, fact_id, ds):
for i, f_id in enumerate(preds[fact_id][:15]):
gold = ds.fact_uid_2_idx[f_id] in ds.qa_feats.iloc[
ds.qa_uid_2_idx[fact_id]].gold_facts
print(fact_id)
print(
'Fact {} - {}: {}'.format(i, gold,
ds.fact_feats.loc[ds.fact_feats.id == f_id].iloc[0].original)
)
def get_output_dir(config: Config) -> str:
output_dir = os.path.join(
config.output_dir,
'{}_{}_{}_{}'.format(config.model_type, config.algo, str(datetime.now().strftime('%Y-%m-%d_%Hh%M')), os.getpid())
)
if config.debug:
output_dir += '_debug'
if config.task == '19':
output_dir += '_19'
if config.v2:
output_dir += '_v2'
return output_dir
def powerset(iterable):
"""powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def plot(sttl, i, loss_ylim=None, constr_ylim=None):
from matplotlib import pyplot as plt
plt.plot([s['loss'] for s in sttl])
if loss_ylim is not None:
plt.ylim(top=loss_ylim)
plt.savefig('data/loss_%i.png' % i)
plt.close()
plt.plot([s['other']['lambda'] for s in sttl])
plt.savefig('data/lambda_%i.png' % i)
plt.close()
plt.plot([s['other']['constraint'] for s in sttl])
if constr_ylim is not None:
plt.ylim(top=constr_ylim)
plt.savefig('data/constraint_%i.png' % i)
plt.close()
def get_maps(file, start):
with open(file, 'r') as f:
stats = json.load(f)
sttl = stats['stat_points'][start:]
return [s['map'] for s in sttl if s['map'] is not None]
def open_and_plot_stats(file, start, i, ly=None, cy=None):
with open(file, 'r') as f:
stats = json.load(f)
sttl = stats['stat_points'][start:]
plot(sttl, i, ly, cy)
def remove_debug_folders(root_dir):
for path in [f.path for f in os.scandir(root_dir) if f.is_dir() and f.path.endswith('debug')]:
shutil.rmtree(path)
def map_from_txt_lines(path):
with open(path, 'r') as f:
lines = f.readlines()
results = {}
for line in lines:
key, value = line.rstrip('\n').split('\t')
key, value = key.lower(), value.lower()
old = results.get(key, [])
results[key] = old + [value]
from rankers.utils import mean_average_precision_score
from datasets.factory import DatasetFactory
from rankers.chain_ranker import set_up_experiment
ranker, config, tokenizer = set_up_experiment('./config/chains.json')
vds = DatasetFactory.load_and_cache_dataset(config, tokenizer, config.val_qa_path, valid=True)
return mean_average_precision_score(vds.gold, results)
def h_to_s(h, m, s):
return 60 * (m + 60 * h) + s
def s_to_h(s):
h = s // 3600
mrest = s % 3600
m = mrest // 60
srest = mrest % 60
return h, m, srest
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"logging.getLogger",
"logging.Formatter",
"rankers.chain_ranker.set_up_experiment",
"shutil.rmtree",
"os.path.join",
"matplotlib.pyplot.close",
"datasets.factory.DatasetFactory.load_and_cache_dataset",
"random.seed",
"rankers.utils.mean_average_pre... | [((271, 298), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (288, 298), False, 'import logging\n'), ((333, 355), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (344, 355), False, 'import random\n'), ((360, 385), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (374, 385), True, 'import numpy as np\n'), ((390, 418), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (407, 418), False, 'import torch\n'), ((534, 559), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (557, 559), False, 'import argparse\n'), ((723, 742), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (740, 742), False, 'import logging\n'), ((794, 910), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""', 'datefmt': '"""%d-%m-%Y %H:%M:%S"""'}), "(fmt=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt=\n '%d-%m-%Y %H:%M:%S')\n", (811, 910), False, 'import logging\n'), ((944, 984), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (965, 984), False, 'import logging\n'), ((3728, 3763), 'matplotlib.pyplot.plot', 'plt.plot', (["[s['loss'] for s in sttl]"], {}), "([s['loss'] for s in sttl])\n", (3736, 3763), True, 'from matplotlib import pyplot as plt\n'), ((3830, 3865), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('data/loss_%i.png' % i)"], {}), "('data/loss_%i.png' % i)\n", (3841, 3865), True, 'from matplotlib import pyplot as plt\n'), ((3870, 3881), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3879, 3881), True, 'from matplotlib import pyplot as plt\n'), ((3886, 3932), 'matplotlib.pyplot.plot', 'plt.plot', (["[s['other']['lambda'] for s in sttl]"], {}), "([s['other']['lambda'] for s in sttl])\n", (3894, 3932), True, 'from matplotlib import pyplot as plt\n'), ((3937, 3974), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('data/lambda_%i.png' % i)"], {}), "('data/lambda_%i.png' % i)\n", (3948, 3974), True, 'from matplotlib import pyplot as plt\n'), ((3979, 3990), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3988, 3990), True, 'from matplotlib import pyplot as plt\n'), ((3995, 4045), 'matplotlib.pyplot.plot', 'plt.plot', (["[s['other']['constraint'] for s in sttl]"], {}), "([s['other']['constraint'] for s in sttl])\n", (4003, 4045), True, 'from matplotlib import pyplot as plt\n'), ((4116, 4157), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('data/constraint_%i.png' % i)"], {}), "('data/constraint_%i.png' % i)\n", (4127, 4157), True, 'from matplotlib import pyplot as plt\n'), ((4162, 4173), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4171, 4173), True, 'from matplotlib import pyplot as plt\n'), ((5213, 5254), 'rankers.chain_ranker.set_up_experiment', 'set_up_experiment', (['"""./config/chains.json"""'], {}), "('./config/chains.json')\n", (5230, 5254), False, 'from rankers.chain_ranker import set_up_experiment\n'), ((5265, 5357), 'datasets.factory.DatasetFactory.load_and_cache_dataset', 'DatasetFactory.load_and_cache_dataset', (['config', 'tokenizer', 'config.val_qa_path'], {'valid': '(True)'}), '(config, tokenizer, config.val_qa_path,\n valid=True)\n', (5302, 5357), False, 'from datasets.factory import DatasetFactory\n'), ((5365, 5412), 'rankers.utils.mean_average_precision_score', 'mean_average_precision_score', (['vds.gold', 'results'], {}), '(vds.gold, results)\n', (5393, 5412), False, 'from rankers.utils import mean_average_precision_score\n'), ((450, 487), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (476, 487), False, 'import torch\n'), ((3802, 3825), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'top': 'loss_ylim'}), '(top=loss_ylim)\n', (3810, 3825), True, 'from matplotlib import pyplot as plt\n'), ((4086, 4111), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'top': 'constr_ylim'}), '(top=constr_ylim)\n', (4094, 4111), True, 'from matplotlib import pyplot as plt\n'), ((4250, 4262), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4259, 4262), False, 'import json\n'), ((4471, 4483), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4480, 4483), False, 'import json\n'), ((4695, 4714), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (4708, 4714), False, 'import shutil\n'), ((1122, 1167), 'os.path.join', 'os.path.join', (['config.output_dir', '"""output.log"""'], {}), "(config.output_dir, 'output.log')\n", (1134, 1167), False, 'import os\n'), ((3232, 3243), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3241, 3243), False, 'import os\n'), ((3583, 3601), 'itertools.combinations', 'combinations', (['s', 'r'], {}), '(s, r)\n', (3595, 3601), False, 'from itertools import chain, combinations\n'), ((4621, 4641), 'os.scandir', 'os.scandir', (['root_dir'], {}), '(root_dir)\n', (4631, 4641), False, 'import os\n'), ((3188, 3202), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3200, 3202), False, 'from datetime import datetime\n')] |
from pandas import date_range, Series, DatetimeIndex, concat
from pandas.core.generic import NDFrame
from pandas.tseries.frequencies import to_offset
from numpy import array
from numpy.random import choice, seed
from irradiance_synth.ts_bootstrap.stitch import stitch
from irradiance_synth.ts_bootstrap.pool_selector import NullPoolSelector
def ts_bootstrap(data, index, chunk_size='D', pool_selector=None, random_seed=None, stitch_boundaries=False):
"""Sample from chunks of a timeseries or timedataframe to produce a new series or dataframe with a given index.
The new data is assembled in chunks of a fixed `chunk_size` (a pandas offset string).
Parameters
----------
data : pandas.NDFrame
The timeseries data to utilise
index : pandas.DateTimeIndex
The "destination" index that we want to produce data onto.
Note that `index` must have a freq attribute defined i.e. it must be a fixed frequency index
chunk_size : str
A pandas date offset string (e.g. 'W' for week, 'A' for year, 'M' for month)
This defines the size of chunks to be sampled; the output series is assembled in chunks of this size.
pool_selector:
random_seed : Number
The random seed to pass to numpy when sampling. If left as None, resampling
will produce non-deterministic samples. Passing any other value will ensure
that the same "random" sample is always produced for the same inputs.
TODO
----
* allow a user-defined aggregation/interpolation method if the source data needs resampling
"""
if pool_selector is None:
pool_selector = NullPoolSelector()
if not isinstance(data, NDFrame):
raise Exception(f"expected series :: pandas.NDFrame, got {type(data)}")
if not isinstance(index, DatetimeIndex):
raise Exception(f"expected index :: pandas.DatetimeIndex, got {type(index)}")
if index.freq is None:
raise Exception("index must have a fixed freq attribute.")
if random_seed is not None:
seed(random_seed)
# the keys for the chunks of the new index that we need to fill
dest_keys = date_range(index[0], index[-1], freq=chunk_size)
# resample the input data so that it is in the same frequency as the target index
# TODO: aggregation function should be customisable
resampled_input = data.resample(index.freq).mean().dropna()
# use resample again to split our input data into chunks that we can sample from
resampler = resampled_input.resample(chunk_size)
def set_index(chunk, bin_id):
# given a chunk and a target key, try to assign the index
if chunk is None:
return None
chunk.index = date_range(start=bin_id, periods=len(chunk), freq=index.freq)
return chunk
# iterate over the destination keys, yielding a pool of source chunks
chunk_pools = (pool_selector.get_pool(list(resampler.groups.keys()), key) for key in dest_keys)
# iterate over the pools, and select a random chunk from each
chunks = (
resampler.get_group(choice(chunk_pool))
for chunk_pool in chunk_pools
)
# reindex each chunk using the destination keys
reindexed_chunks = (set_index(chunk, key) for chunk, key in zip(chunks, dest_keys))
# concat all the chunks into a pandas series
out = concat(reindexed_chunks)
if stitch_boundaries:
# TODO: pass in window size for stitching
return stitch(out, dest_keys[1:])
else:
return out
| [
"irradiance_synth.ts_bootstrap.pool_selector.NullPoolSelector",
"irradiance_synth.ts_bootstrap.stitch.stitch",
"numpy.random.seed",
"pandas.date_range",
"numpy.random.choice",
"pandas.concat"
] | [((2152, 2200), 'pandas.date_range', 'date_range', (['index[0]', 'index[-1]'], {'freq': 'chunk_size'}), '(index[0], index[-1], freq=chunk_size)\n', (2162, 2200), False, 'from pandas import date_range, Series, DatetimeIndex, concat\n'), ((3358, 3382), 'pandas.concat', 'concat', (['reindexed_chunks'], {}), '(reindexed_chunks)\n', (3364, 3382), False, 'from pandas import date_range, Series, DatetimeIndex, concat\n'), ((1643, 1661), 'irradiance_synth.ts_bootstrap.pool_selector.NullPoolSelector', 'NullPoolSelector', ([], {}), '()\n', (1659, 1661), False, 'from irradiance_synth.ts_bootstrap.pool_selector import NullPoolSelector\n'), ((2049, 2066), 'numpy.random.seed', 'seed', (['random_seed'], {}), '(random_seed)\n', (2053, 2066), False, 'from numpy.random import choice, seed\n'), ((3475, 3501), 'irradiance_synth.ts_bootstrap.stitch.stitch', 'stitch', (['out', 'dest_keys[1:]'], {}), '(out, dest_keys[1:])\n', (3481, 3501), False, 'from irradiance_synth.ts_bootstrap.stitch import stitch\n'), ((3088, 3106), 'numpy.random.choice', 'choice', (['chunk_pool'], {}), '(chunk_pool)\n', (3094, 3106), False, 'from numpy.random import choice, seed\n')] |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('seaborn-white')
import numpy as np
#from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.cm as cm
import argparse
from .Format import formdata
import os
def run(parser):
args = parser.parse_args()
if len(args.name)!=len(args.inputfile):
raise Exception('Equal number of sample names and input sample files are required!')
bedfile = args.bed
names = args.name
samples = args.inputfile
coverage = args.cov
if bedfile!='':
if not os.path.exists(bedfile):
raise Exception(bedfile+' does not exist!')
sample_number = len(samples)
flat_sample_name = []
ind = []
extend_label = []
index = 0
for n,label in zip(samples,names):
c = n.strip().split(',')
# replicates_number.append(len(c))
flat_sample_name.extend(c)
for i in range(len(c)):
extend_label.append(label)
ind.append(index)
index += 1
data=formdata(flat_sample_name, coverage, bedfile)
data1=np.transpose(data).astype(float)
X=data1
names = args.name
if not args.method in ['pca','TSNE']:
raise Exception("Unacceptable method")
if args.method=='pca':
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
else:
pca = PCA(n_components=50)
xx = pca.fit_transform(X)
tsne = TSNE(n_components=2)
X_r = tsne.fit_transform(xx)
colors = cm.rainbow(np.linspace(0, 1, len(flat_sample_name)))
fig=plt.figure()
plt.subplot(111)
plt.xlim(np.min(X_r[:,0])-0.2*np.abs(np.min(X_r[:,0])),np.max(X_r[:,0])+0.2*np.abs(np.max(X_r[:,0])))
plt.ylim(np.min(X_r[:,1])-0.2*np.abs(np.min(X_r[:,1])),np.max(X_r[:,1])+0.2*np.abs(np.max(X_r[:,1])))
fig.subplots_adjust(left=0.1,right=0.8)
markers = ['o', '^','v','<','>','1','2', '3','4','8','s','P','p', '*','H','h','x','X','D', 'd','|','_','+']
for i,label in enumerate(extend_label):
plt.scatter(X_r[i,0], X_r[i,1], c=colors[ind[i]], label=label, alpha=0.8, marker=markers[ind[i]],s=80)
plt.legend(loc='center left', bbox_to_anchor=(1.04, 0.5))
plt.savefig(args.output+'.pdf', bbox_inches="tight")
##python PCA.meth.py -i inputfile -n 5 -r 2 -N A B C D E -o PCA
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-i','--inputfile',help="input file name", metavar="FILE", nargs='+', required=True)
parser.add_argument('-N','--name', nargs='+', help="the samples' names", required=True)
parser.add_argument('-o','--output',help="the output file")
parser.add_argument('-b','--bed',metavar="FILE",default='',help="If -b available, only cpgs in these regions will be selected in cluster.")
parser.add_argument('-c','--cov',type=int,help="minimal coverage of cpg sites for every sample,default=0",default=0)
parser.add_argument('-m','--method',help="pca or TSNE",default="pca")
run(parser)
| [
"matplotlib.pyplot.subplot",
"argparse.ArgumentParser",
"sklearn.manifold.TSNE",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"os.path.exists",
"numpy.transpose",
"matplotlib.pyplot.style.use",
"matplotlib.use",
"matplotlib.pyplot.figure",
"sklearn.decomposition.PCA",
"numpy.min",
... | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((72, 102), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-white"""'], {}), "('seaborn-white')\n", (85, 102), True, 'import matplotlib.pyplot as plt\n'), ((1636, 1648), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1646, 1648), True, 'import matplotlib.pyplot as plt\n'), ((1653, 1669), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1664, 1669), True, 'import matplotlib.pyplot as plt\n'), ((2197, 2254), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""center left"""', 'bbox_to_anchor': '(1.04, 0.5)'}), "(loc='center left', bbox_to_anchor=(1.04, 0.5))\n", (2207, 2254), True, 'import matplotlib.pyplot as plt\n'), ((2259, 2313), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(args.output + '.pdf')"], {'bbox_inches': '"""tight"""'}), "(args.output + '.pdf', bbox_inches='tight')\n", (2270, 2313), True, 'import matplotlib.pyplot as plt\n'), ((2420, 2445), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2443, 2445), False, 'import argparse\n'), ((1351, 1370), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (1354, 1370), False, 'from sklearn.decomposition import PCA\n'), ((1433, 1453), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(50)'}), '(n_components=50)\n', (1436, 1453), False, 'from sklearn.decomposition import PCA\n'), ((1503, 1523), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (1507, 1523), False, 'from sklearn.manifold import TSNE\n'), ((2090, 2199), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_r[i, 0]', 'X_r[i, 1]'], {'c': 'colors[ind[i]]', 'label': 'label', 'alpha': '(0.8)', 'marker': 'markers[ind[i]]', 's': '(80)'}), '(X_r[i, 0], X_r[i, 1], c=colors[ind[i]], label=label, alpha=0.8,\n marker=markers[ind[i]], s=80)\n', (2101, 2199), True, 'import matplotlib.pyplot as plt\n'), ((625, 648), 'os.path.exists', 'os.path.exists', (['bedfile'], {}), '(bedfile)\n', (639, 648), False, 'import os\n'), ((1154, 1172), 'numpy.transpose', 'np.transpose', (['data'], {}), '(data)\n', (1166, 1172), True, 'import numpy as np\n'), ((1683, 1700), 'numpy.min', 'np.min', (['X_r[:, 0]'], {}), '(X_r[:, 0])\n', (1689, 1700), True, 'import numpy as np\n'), ((1729, 1746), 'numpy.max', 'np.max', (['X_r[:, 0]'], {}), '(X_r[:, 0])\n', (1735, 1746), True, 'import numpy as np\n'), ((1789, 1806), 'numpy.min', 'np.min', (['X_r[:, 1]'], {}), '(X_r[:, 1])\n', (1795, 1806), True, 'import numpy as np\n'), ((1835, 1852), 'numpy.max', 'np.max', (['X_r[:, 1]'], {}), '(X_r[:, 1])\n', (1841, 1852), True, 'import numpy as np\n'), ((1711, 1728), 'numpy.min', 'np.min', (['X_r[:, 0]'], {}), '(X_r[:, 0])\n', (1717, 1728), True, 'import numpy as np\n'), ((1757, 1774), 'numpy.max', 'np.max', (['X_r[:, 0]'], {}), '(X_r[:, 0])\n', (1763, 1774), True, 'import numpy as np\n'), ((1817, 1834), 'numpy.min', 'np.min', (['X_r[:, 1]'], {}), '(X_r[:, 1])\n', (1823, 1834), True, 'import numpy as np\n'), ((1863, 1880), 'numpy.max', 'np.max', (['X_r[:, 1]'], {}), '(X_r[:, 1])\n', (1869, 1880), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torchx as tx
from torchx.nn.hyper_scheduler import *
import numpy as np
from .base import Learner
from .aggregator import MultistepAggregatorWithInfo
from surreal.model.ppo_net import PPOModel, DiagGauss
from surreal.model.reward_filter import RewardFilter
from surreal.session import Config, extend_config, BASE_SESSION_CONFIG, BASE_LEARNER_CONFIG, ConfigError
class PPOLearner(Learner):
'''
PPOLearner: subclass of Learner that contains PPO algorithm logic
Attributes:
gpu_option: 'cpu' if not using GPU, 'cuda:all' otherwise
model: instance of PPOModel from surreal.model.ppo_net
ref_target_model: instance of PPOModel, kept to used as
reference policy
ppo_mode: string of either 'adapt' or 'clip' to determine
which variant of PPO is used. For details of variants
see https://arxiv.org/pdf/1707.06347.pdf
norm_adv: boolean flag -- whether to use batch advantage
normalization
use_z_filter: boolean flag -- whether to use obs Z-Filtering
actor/critic_optim: Adam Optimizer for policy and baseline network
actor/critic_lr_scheduler: Learning rate scheduler. details see
surreal.utils.pytorch.scheduler
aggregator: experience aggregator used to batch experiences.
for available aggregators, see surreal.learner.aggregator
pd: probability distribution class (Assumed as Diagonal Gaussian)
see surreal.model.ppo_net for details
important member functions:
private methods:
_clip_loss: computes the loss and various statistics
for 'clip' variant PPO
_clip_update: uses loss information to make policy update
_adapt_loss: computes loss and various statistics for
'adapt' variant of PPO
_adapt_update: uses loss info to make policy update
_value_loss: computes loss and various statistics for value function
_value_update: uses loss info to update value function
_gae_and_return: computes generalized advantage estimate and
corresponding N-step return. Details of algorithm can be found
here: https://arxiv.org/pdf/1506.02438.pdf
_advantage_and_return: basic advantage and N-step return estimate
_optimize: fucntion that makes policy and value function update
_post_publish: function that manages metrics and behavior after
parameter release
public methods:
learn: method to perform optimization and send to tensorplex for log
module_dict: returns the corresponding parameters
publish_parameter: publishes parameters in self.model to parameter server
'''
def __init__(self, learner_config, env_config, session_config):
super().__init__(learner_config, env_config, session_config)
# GPU setting
self.current_iteration = 0
self.global_step = 0
if not torch.cuda.is_available():
self.gpu_option = 'cpu'
else:
self.gpu_option = 'cuda:all'
self.use_cuda = torch.cuda.is_available()
if not self.use_cuda:
self.log.info('Using CPU')
else:
self.log.info('Using GPU: {}'.format(self.gpu_option))
# RL general parameters
self.gamma = self.learner_config.algo.gamma
self.lam = self.learner_config.algo.advantage.lam
self.n_step = self.learner_config.algo.n_step
self.use_z_filter = self.learner_config.algo.use_z_filter
self.use_r_filter = self.learner_config.algo.use_r_filter
self.norm_adv = self.learner_config.algo.advantage.norm_adv
self.batch_size = self.learner_config.replay.batch_size
self.action_dim = self.env_config.action_spec.dim[0]
self.obs_spec = self.env_config.obs_spec
self.init_log_sig = self.learner_config.algo.consts.init_log_sig
# PPO parameters
self.ppo_mode = self.learner_config.algo.ppo_mode
self.if_rnn_policy = self.learner_config.algo.rnn.if_rnn_policy
self.horizon = self.learner_config.algo.rnn.horizon
self.lr_actor = self.learner_config.algo.network.lr_actor
self.lr_critic = self.learner_config.algo.network.lr_critic
self.epoch_policy = self.learner_config.algo.consts.epoch_policy
self.epoch_baseline = self.learner_config.algo.consts.epoch_baseline
self.kl_target = self.learner_config.algo.consts.kl_target
self.adjust_threshold = self.learner_config.algo.consts.adjust_threshold
self.reward_scale = self.learner_config.algo.advantage.reward_scale
# PPO mode 'adjust'
self.kl_cutoff_coeff = self.learner_config.algo.adapt_consts.kl_cutoff_coeff
self.beta_init = self.learner_config.algo.adapt_consts.beta_init
self.beta_range = self.learner_config.algo.adapt_consts.beta_range
# PPO mode 'clip'
self.clip_range = self.learner_config.algo.clip_consts.clip_range
self.clip_epsilon_init = self.learner_config.algo.clip_consts.clip_epsilon_init
if self.ppo_mode == 'adapt':
self.beta = self.beta_init
self.eta = self.kl_cutoff_coeff
self.beta_upper = self.beta_range[1]
self.beta_lower = self.beta_range[0]
self.beta_adjust_threshold = self.adjust_threshold
else: # method == 'clip'
self.clip_epsilon = self.clip_epsilon_init
self.clip_adjust_threshold = self.adjust_threshold
self.clip_upper = self.clip_range[1]
self.clip_lower = self.clip_range[0]
# learning rate setting:
self.min_lr = self.learner_config.algo.network.anneal.min_lr
self.lr_update_frequency = self.learner_config.algo.network.anneal.lr_update_frequency
self.frames_to_anneal = self.learner_config.algo.network.anneal.frames_to_anneal
num_updates = int(self.frames_to_anneal / self.learner_config.parameter_publish.exp_interval)
lr_scheduler = eval(self.learner_config.algo.network.anneal.lr_scheduler)
self.exp_counter = 0
self.kl_record = []
with tx.device_scope(self.gpu_option):
self.model = PPOModel(
obs_spec=self.obs_spec,
action_dim=self.action_dim,
model_config=self.learner_config.model,
use_cuda=self.use_cuda,
init_log_sig=self.init_log_sig,
use_z_filter=self.use_z_filter,
if_pixel_input=self.env_config.pixel_input,
rnn_config=self.learner_config.algo.rnn,
)
self.ref_target_model = PPOModel(
obs_spec=self.obs_spec,
action_dim=self.action_dim,
model_config=self.learner_config.model,
use_cuda=self.use_cuda,
init_log_sig=self.init_log_sig,
use_z_filter=self.use_z_filter,
if_pixel_input=self.env_config.pixel_input,
rnn_config=self.learner_config.algo.rnn,
)
self.ref_target_model.update_target_params(self.model)
# Learning parameters and optimizer
self.clip_actor_gradient = self.learner_config.algo.network.clip_actor_gradient
self.actor_gradient_clip_value = self.learner_config.algo.network.actor_gradient_norm_clip
self.clip_critic_gradient = self.learner_config.algo.network.clip_critic_gradient
self.critic_gradient_clip_value = self.learner_config.algo.network.critic_gradient_norm_clip
self.critic_optim = torch.optim.Adam(
self.model.get_critic_params(),
lr=self.lr_critic,
weight_decay=self.learner_config.algo.network.critic_regularization
)
self.actor_optim = torch.optim.Adam(
self.model.get_actor_params(),
lr=self.lr_actor,
weight_decay=self.learner_config.algo.network.actor_regularization
)
# learning rate scheduler
self.actor_lr_scheduler = lr_scheduler(self.actor_optim,
num_updates,
update_freq=self.lr_update_frequency,
min_lr = self.min_lr)
self.critic_lr_scheduler = lr_scheduler(self.critic_optim,
num_updates,
update_freq=self.lr_update_frequency,
min_lr = self.min_lr)
# Experience Aggregator
self.aggregator = MultistepAggregatorWithInfo(self.env_config.obs_spec,
self.env_config.action_spec)
# probability distribution. Gaussian only for now
self.pd = DiagGauss(self.action_dim)
# placeholder for RNN hidden cells
self.cells = None
# Reward White-filtering
if self.use_r_filter:
self.reward_filter= RewardFilter()
def _clip_loss(self, obs, actions, advantages, behave_pol):
"""
Computes the loss with current data. also returns a dictionary of statistics
which includes surrogate loss, clipped surrogate los, policy entropy, clip
constant
return: surreal.utils.pytorch.GPUVariable, dict
Args:
obs: batch of observations in form of (batch_size, obs_dim)
actions: batch of actions in form of (batch_size, act_dim)
advantages: batch of normalized advantage, (batch_size, 1)
behave_pol: batch of behavior policy (batch_size, 2 * act_dim)
Returns:
clip_loss: Variable for loss
stats: dictionary of recorded statistics
"""
learn_pol = self.model.forward_actor(obs, self.cells)
learn_prob = self.pd.likelihood(actions, learn_pol)
behave_prob = self.pd.likelihood(actions, behave_pol)
prob_ratio = learn_prob / behave_prob
cliped_ratio = torch.clamp(prob_ratio, 1 - self.clip_epsilon,
1 + self.clip_epsilon)
surr = -prob_ratio * advantages.view(-1, 1)
cliped_surr = -cliped_ratio * advantages.view(-1, 1)
clip_loss = torch.cat([surr, cliped_surr], 1).max(1)[0].mean()
stats = {
"_surr_loss": surr.mean().item(),
"_clip_surr_loss": clip_loss.item(),
"_entropy": self.pd.entropy(learn_pol).mean().item(),
'_clip_epsilon': self.clip_epsilon
}
return clip_loss, stats
def _clip_update(self, obs, actions, advantages, behave_pol):
"""
Method that makes policy updates. calls _clip_loss method
Note: self.clip_actor_gradient determines whether gradient is clipped
return: dictionary of statistics to be sent to tensorplex server
Args:
obs: batch of observations in form of (batch_size, obs_dim)
actions: batch of actions in form of (batch_size, act_dim)
advantages: batch of normalized advantage, (batch_size, 1)
behave_pol: batch of behavior policy (batch_size, 2 * act_dim)
Returns:
stats: dictionary of recorded statistics
"""
loss, stats = self._clip_loss(obs, actions, advantages, behave_pol)
self.model.clear_actor_grad()
loss.backward()
if self.clip_actor_gradient:
stats['grad_norm_actor'] = nn.utils.clip_grad_norm_(
self.model.get_actor_params(),
self.actor_gradient_clip_value)
self.actor_optim.step()
return stats
def _adapt_loss(self, obs, actions, advantages, behave_pol, ref_pol):
"""
Computes the loss with current data. also returns a dictionary of statistics
which includes surrogate loss, clipped surrogate los, policy entropy, adaptive
KL penalty constant, policy KL divergence
return: surreal.utils.pytorch.GPUVariable, dict
Args:
obs: batch of observations in form of (batch_size, obs_dim)
actions: batch of actions in form of (batch_size, act_dim)
advantages: batch of normalized advantage, (batch_size, 1)
behave_pol: batch of behavior policy (batch_size, 2 * act_dim)
ref_pol: batch of reference policy (batch_size, 2 * act_dim)
Returns:
loss: Variable for loss
stats: dictionary of recorded statistics
"""
learn_pol = self.model.forward_actor(obs, self.cells)
prob_behave = self.pd.likelihood(actions, behave_pol)
prob_learn = self.pd.likelihood(actions, learn_pol)
kl = self.pd.kl(ref_pol, learn_pol).mean()
surr = -(advantages.view(-1, 1) * (prob_learn/ torch.clamp(prob_behave, min=1e-2))).mean()
loss = surr + self.beta * kl
entropy = self.pd.entropy(learn_pol).mean()
if kl.item() - 2.0 * self.kl_target > 0:
loss += self.eta * (kl - 2.0 * self.kl_target).pow(2)
stats = {
'_kl_loss_adapt': loss.item(),
'_surr_loss': surr.item(),
'_pol_kl': kl.item(),
'_entropy': entropy.item(),
'_beta': self.beta
}
return loss, stats
def _adapt_update(self, obs, actions, advantages, behave_pol, ref_pol):
"""
Method that makes policy updates. calls _adapt_loss method
Note: self.clip_actor_gradient determines whether gradient is clipped
return: dictionary of statistics to be sent to tensorplex server
Args:
obs: batch of observations in form of (batch_size, obs_dim)
actions: batch of actions in form of (batch_size, act_dim)
advantages: batch of normalized advantage, (batch_size, 1)
behave_pol: batch of behavior policy (batch_size, 2 * act_dim)
ref_pol: batch of reference policy (batch_size, 2 * act_dim)
Returns:
stats: dictionary of recorded statistics
"""
loss, stats = self._adapt_loss(obs, actions, advantages, behave_pol, ref_pol)
self.model.clear_actor_grad()
loss.backward()
if self.clip_actor_gradient:
stats['grad_norm_actor'] = nn.utils.clip_grad_norm_(
self.model.get_actor_params(),
self.actor_gradient_clip_value)
self.actor_optim.step()
return stats
def _value_loss(self, obs, returns):
"""
Computes the loss with current data. also returns a dictionary of statistics
which includes value loss and explained variance
return: surreal.utils.pytorch.GPUVariable, dict
Args:
obs: batch of observations in form of (batch_size, obs_dim)
returns: batch of N-step return estimate (batch_size,)
Returns:
loss: Variable for loss
stats: dictionary of recorded statistics
"""
values = self.model.forward_critic(obs, self.cells)
if len(values.size()) == 3: values = values.squeeze(2)
explained_var = 1 - torch.var(returns - values) / torch.var(returns)
loss = (values - returns).pow(2).mean()
stats = {
'_val_loss': loss.item(),
'_val_explained_var': explained_var.item()
}
return loss, stats
def _value_update(self, obs, returns):
"""
Method that makes baseline function updates. calls _value_loss method
Note: self.clip_actor_gradient determines whether gradient is clipped
return: dictionary of statistics to be sent to tensorplex server
Args:
obs: batch of observations in form of (batch_size, obs_dim)
returns: batch of N-step return estimate (batch_size,)
Returns:
stats: dictionary of recorded statistics
"""
loss, stats = self._value_loss(obs, returns)
self.model.clear_critic_grad()
loss.backward()
if self.clip_critic_gradient:
stats['grad_norm_critic'] = nn.utils.clip_grad_norm_(
self.model.get_critic_params(),
self.critic_gradient_clip_value)
self.critic_optim.step()
return stats
def _gae_and_return(self, obs, obs_next, rewards, dones):
'''
computes generalized advantage estimate and corresponding N-step return.
Details of algorithm can be found here: https://arxiv.org/pdf/1506.02438.pdf
Args:
obs: batch of observations (batch_size, N-step , obs_dim)
obs_next: batch of next observations (batch_size, 1 , obs_dim)
actions: batch of actions (batch_size, N-step , act_dim)
rewards: batch of rewards (batch_size, N-step)
dones: batch of termination flags (batch_size, N-step)
Returns:
obs: batch of observation (batch_size, obs_dim)
actions: batch of action (batch_size, act_dim)
advantage: batch of advantages (batch_size, 1)
returns: batch of returns (batch_size, 1)
'''
with tx.device_scope(self.gpu_option):
index_set = torch.tensor(range(self.n_step), dtype=torch.float32)
gamma = torch.pow(self.gamma, index_set)
lam = torch.pow(self.lam, index_set)
obs_concat_var = {}
for mod in obs.keys():
obs_concat_var[mod] = {}
for k in obs[mod].keys():
obs_concat_var[mod][k] = (torch.cat([obs[mod][k], obs_next[mod][k]], dim=1))
if not self.if_rnn_policy:
obs_shape = obs_concat_var[mod][k].size()
obs_concat_var[mod][k] = obs_concat_var[mod][k].view(-1, *obs_shape[2:])
values = self.model.forward_critic(obs_concat_var, self.cells)
values = values.view(self.batch_size, self.n_step + 1)
values[:, 1:] *= 1 - dones
if self.if_rnn_policy:
tds = rewards + self.gamma * values[:, 1:] - values[:, :-1]
eff_len = self.n_step - self.horizon + 1
gamma = gamma[:self.horizon]
lam = lam[:self.horizon]
returns = torch.zeros(self.batch_size, eff_len)
advs = torch.zeros(self.batch_size, eff_len)
for step in range(eff_len):
returns[:, step] = torch.sum(gamma * rewards[:, step:step + self.horizon], 1) + \
values[:, step + self.horizon] * (self.gamma ** self.horizon)
advs[:, step] = torch.sum(tds[:, step:step + self.horizon] * gamma * lam, 1)
if self.norm_adv:
std = advs.std()
mean = advs.mean()
advs = (advs - mean) / max(std, 1e-4)
return advs, returns
else:
returns = torch.sum(gamma * rewards, 1) + values[:, -1] * (self.gamma ** self.n_step)
tds = rewards + self.gamma * values[:, 1:] - values[:, :-1]
gae = torch.sum(tds * gamma * lam, 1)
if self.norm_adv:
std = gae.std()
mean = gae.mean()
gae = (gae - mean) / max(std, 1e-4)
return gae.view(-1, 1), returns.view(-1, 1)
def _preprocess_batch_ppo(self, batch):
'''
Loading experiences from numpy to torch.FloatTensor type
Args:
batch: BeneDict of experiences containing following attributes
'obs' - observation
'actions' - actions
'rewards' - rewards
'obs_next' - next observation
'persistent_infos' - action policy
'onetime_infos' - RNN hidden cells or None
Return:
Benedict of torch.FloatTensors
'''
with tx.device_scope(self.gpu_option):
obs, actions, rewards, obs_next, done, persistent_infos, onetime_infos = (
batch['obs'],
batch['actions'],
batch['rewards'],
batch['obs_next'],
batch['dones'],
batch['persistent_infos'],
batch['onetime_infos'],
)
for modality in obs:
for key in obs[modality]:
obs[modality][key] = (torch.tensor(obs[modality][key], dtype=torch.float32)).detach()
obs_next[modality][key] = (torch.tensor(obs_next[modality][key], dtype=torch.float32)).detach()
actions = torch.tensor(actions, dtype=torch.float32)
rewards = torch.tensor(rewards, dtype=torch.float32) * self.reward_scale
if self.use_r_filter:
normed_reward = self.reward_filter.forward(rewards)
self.reward_filter.update(rewards)
rewards = normed_reward
done = torch.tensor(done, dtype=torch.float32)
if persistent_infos is not None:
for i in range(len(persistent_infos)):
persistent_infos[i] = torch.tensor(persistent_infos[i], dtype=torch.float32).detach()
if onetime_infos is not None:
for i in range(len(onetime_infos)):
onetime_infos[i] = torch.tensor(onetime_infos[i], dtype=torch.float32).detach()
(
batch['obs'],
batch['actions'],
batch['rewards'],
batch['obs_next'],
batch['dones'],
batch['persistent_infos'],
batch['onetime_infos'],
) = (
obs,
actions,
rewards,
obs_next,
done,
persistent_infos,
onetime_infos
)
return batch
def _optimize(self, obs, actions, rewards, obs_next, persistent_infos, onetime_infos, dones):
'''
main method for optimization that calls _adapt/clip_update and
_value_update epoch_policy and epoch_baseline times respectively
return: dictionary of tracted statistics
Args:
obs: batch of observations (batch_size, N-step , obs_dim)
obs_next: batch of next observations (batch_size, 1 , obs_dim)
actions: batch of actions (batch_size, N-step , act_dim)
rewards: batch of rewards (batch_size, N-step)
dones: batch of termination flags (batch_size, N-step)
action_infos: list of batched other attributes tracted, such as
behavior policy, RNN hidden states and etc.
Returns:
dictionary of recorded statistics
'''
# convert everything to float tensor:
with tx.device_scope(self.gpu_option):
pds = persistent_infos[-1]
if self.if_rnn_policy:
h = (onetime_infos[0].transpose(0, 1).contiguous()).detach()
c = (onetime_infos[1].transpose(0, 1).contiguous()).detach()
self.cells = (h, c)
advantages, returns = self._gae_and_return(obs,
obs_next,
rewards,
dones)
advantages = advantages.detach()
returns = returns.detach()
if self.if_rnn_policy:
h = self.cells[0].detach()
c = self.cells[1].detach()
self.cells = (h, c)
eff_len = self.n_step - self.horizon + 1
behave_pol = pds[:, :eff_len, :].contiguous().detach()
actions_iter = actions[:, :eff_len, :].contiguous().detach()
else:
behave_pol = pds[:, 0, :].contiguous().detach()
actions_iter = actions[:, 0, :].contiguous().detach()
obs_iter = {}
for mod in obs.keys():
obs_iter[mod] = {}
for k in obs[mod].keys():
if self.if_rnn_policy:
obs_iter[mod][k] = obs[mod][k][:, :self.n_step - self.horizon + 1, :].contiguous().detach()
else:
obs_iter[mod][k] = obs[mod][k][:, 0, :].contiguous().detach()
ref_pol = self.ref_target_model.forward_actor(obs_iter, self.cells).detach()
for ep in range(self.epoch_policy):
if self.ppo_mode == 'clip':
stats = self._clip_update(obs_iter,
actions_iter,
advantages,
behave_pol)
else:
stats = self._adapt_update(obs_iter,
actions_iter,
advantages,
behave_pol,
ref_pol)
curr_pol = self.model.forward_actor(obs_iter, self.cells).detach()
kl = self.pd.kl(ref_pol, curr_pol).mean()
stats['_pol_kl'] = kl.item()
if kl.item() > self.kl_target * 4:
break
self.kl_record.append(stats['_pol_kl'])
for _ in range(self.epoch_baseline):
baseline_stats = self._value_update(obs_iter, returns)
# Collecting metrics and updating tensorplex
for k in baseline_stats:
stats[k] = baseline_stats[k]
behave_likelihood = self.pd.likelihood(actions_iter, behave_pol)
curr_likelihood = self.pd.likelihood(actions_iter, curr_pol)
stats['_avg_return_targ'] = returns.mean().item()
stats['_avg_log_sig'] = self.model.actor.log_var.mean().item()
stats['_avg_behave_likelihood'] = behave_likelihood.mean().item()
stats['_avg_is_weight'] = (curr_likelihood / (behave_likelihood + 1e-4)).mean().item()
stats['_ref_behave_diff'] = self.pd.kl(ref_pol, behave_pol).mean().item()
stats['_lr'] = self.actor_lr_scheduler.get_lr()[0]
if self.use_z_filter:
self.model.z_update(obs_iter)
stats['obs_running_mean'] = np.mean(self.model.z_filter.running_mean())
stats['obs_running_square'] = np.mean(self.model.z_filter.running_square())
stats['obs_running_std'] = np.mean(self.model.z_filter.running_std())
if self.use_r_filter:
stats['reward_mean'] = self.reward_filter.reward_mean()
return stats
def learn(self, batch):
'''
main method for learning, calls _optimize. Also sends update stats
to Tensorplex
Args:
batch: pre-aggregated list of experiences rolled out by the agent
'''
self.current_iteration += 1
batch = self._preprocess_batch_ppo(batch)
tensorplex_update_dict = self._optimize(
batch.obs,
batch.actions,
batch.rewards,
batch.obs_next,
batch.persistent_infos,
batch.onetime_infos,
batch.dones,
)
self.periodic_checkpoint(
global_steps=self.current_iteration,
score=None,
)
self.tensorplex.add_scalars(tensorplex_update_dict, self.global_step)
self.exp_counter += self.batch_size
self.global_step += 1
def module_dict(self):
'''
returns the corresponding parameters
'''
return {
'ppo': self.model,
}
def publish_parameter(self, iteration, message=''):
"""
Learner publishes latest parameters to the parameter server only when
accumulated enough experiences specified by
learner_config.algo.network.update_target.interval
Note: this overrides the base class publish_parameter method
Args:
iteration: the current number of learning iterations
message: optional message, must be pickleable.
"""
if self.exp_counter >= self.learner_config.parameter_publish.exp_interval:
self._ps_publisher.publish(iteration, message=message)
self._post_publish()
def _post_publish(self):
'''
function that manages metrics and behavior after parameter release
Actions include:
adjusts adaptive threshold for KL penalty for 'adapt' PPO
adjusts adaptive prob ratio clip rate for 'clip' PPO
clears KL-Divergence record
clears experience counter after parameter release
steps actor and critic learning rate scheduler
'''
final_kl = np.mean(self.kl_record)
if self.ppo_mode == 'clip': # adapts clip ratios
if final_kl > self.kl_target * self.clip_adjust_threshold[1]:
if self.clip_lower < self.clip_epsilon:
self.clip_epsilon = self.clip_epsilon / self.learner_config.algo.clip_consts.scale_constant
elif final_kl < self.kl_target * self.clip_adjust_threshold[0]:
if self.clip_upper > self.clip_epsilon:
self.clip_epsilon = self.clip_epsilon * self.learner_config.algo.clip_consts.scale_constant
else: # adapt KL divergence penalty before returning the statistics
if final_kl > self.kl_target * self.beta_adjust_threshold[1]:
if self.beta_upper > self.beta:
self.beta = self.beta * self.learner_config.algo.adapt_consts.scale_constant
elif final_kl < self.kl_target * self.beta_adjust_threshold[0]:
if self.beta_lower < self.beta:
self.beta = self.beta / self.learner_config.algo.adapt_consts.scale_constant
self.ref_target_model.update_target_params(self.model)
self.kl_record = []
self.exp_counter = 0
self.actor_lr_scheduler.step()
self.critic_lr_scheduler.step()
def checkpoint_attributes(self):
'''
outlines attributes to be checkpointed
'''
return [
'model',
'ref_target_model',
'actor_lr_scheduler',
'critic_lr_scheduler',
'current_iteration',
]
def _prefetcher_preprocess(self, batch):
batch = self.aggregator.aggregate(batch)
return batch
| [
"torchx.device_scope",
"surreal.model.ppo_net.PPOModel",
"surreal.model.reward_filter.RewardFilter",
"torch.var",
"surreal.model.ppo_net.DiagGauss",
"torch.cat",
"numpy.mean",
"torch.clamp",
"torch.cuda.is_available",
"torch.pow",
"torch.zeros",
"torch.sum",
"torch.tensor"
] | [((3142, 3167), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3165, 3167), False, 'import torch\n'), ((10327, 10396), 'torch.clamp', 'torch.clamp', (['prob_ratio', '(1 - self.clip_epsilon)', '(1 + self.clip_epsilon)'], {}), '(prob_ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon)\n', (10338, 10396), False, 'import torch\n'), ((29696, 29719), 'numpy.mean', 'np.mean', (['self.kl_record'], {}), '(self.kl_record)\n', (29703, 29719), True, 'import numpy as np\n'), ((3000, 3025), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3023, 3025), False, 'import torch\n'), ((6260, 6292), 'torchx.device_scope', 'tx.device_scope', (['self.gpu_option'], {}), '(self.gpu_option)\n', (6275, 6292), True, 'import torchx as tx\n'), ((6319, 6607), 'surreal.model.ppo_net.PPOModel', 'PPOModel', ([], {'obs_spec': 'self.obs_spec', 'action_dim': 'self.action_dim', 'model_config': 'self.learner_config.model', 'use_cuda': 'self.use_cuda', 'init_log_sig': 'self.init_log_sig', 'use_z_filter': 'self.use_z_filter', 'if_pixel_input': 'self.env_config.pixel_input', 'rnn_config': 'self.learner_config.algo.rnn'}), '(obs_spec=self.obs_spec, action_dim=self.action_dim, model_config=\n self.learner_config.model, use_cuda=self.use_cuda, init_log_sig=self.\n init_log_sig, use_z_filter=self.use_z_filter, if_pixel_input=self.\n env_config.pixel_input, rnn_config=self.learner_config.algo.rnn)\n', (6327, 6607), False, 'from surreal.model.ppo_net import PPOModel, DiagGauss\n'), ((6772, 7060), 'surreal.model.ppo_net.PPOModel', 'PPOModel', ([], {'obs_spec': 'self.obs_spec', 'action_dim': 'self.action_dim', 'model_config': 'self.learner_config.model', 'use_cuda': 'self.use_cuda', 'init_log_sig': 'self.init_log_sig', 'use_z_filter': 'self.use_z_filter', 'if_pixel_input': 'self.env_config.pixel_input', 'rnn_config': 'self.learner_config.algo.rnn'}), '(obs_spec=self.obs_spec, action_dim=self.action_dim, model_config=\n self.learner_config.model, use_cuda=self.use_cuda, init_log_sig=self.\n init_log_sig, use_z_filter=self.use_z_filter, if_pixel_input=self.\n env_config.pixel_input, rnn_config=self.learner_config.algo.rnn)\n', (6780, 7060), False, 'from surreal.model.ppo_net import PPOModel, DiagGauss\n'), ((9100, 9126), 'surreal.model.ppo_net.DiagGauss', 'DiagGauss', (['self.action_dim'], {}), '(self.action_dim)\n', (9109, 9126), False, 'from surreal.model.ppo_net import PPOModel, DiagGauss\n'), ((17651, 17683), 'torchx.device_scope', 'tx.device_scope', (['self.gpu_option'], {}), '(self.gpu_option)\n', (17666, 17683), True, 'import torchx as tx\n'), ((17783, 17815), 'torch.pow', 'torch.pow', (['self.gamma', 'index_set'], {}), '(self.gamma, index_set)\n', (17792, 17815), False, 'import torch\n'), ((17834, 17864), 'torch.pow', 'torch.pow', (['self.lam', 'index_set'], {}), '(self.lam, index_set)\n', (17843, 17864), False, 'import torch\n'), ((20543, 20575), 'torchx.device_scope', 'tx.device_scope', (['self.gpu_option'], {}), '(self.gpu_option)\n', (20558, 20575), True, 'import torchx as tx\n'), ((21248, 21290), 'torch.tensor', 'torch.tensor', (['actions'], {'dtype': 'torch.float32'}), '(actions, dtype=torch.float32)\n', (21260, 21290), False, 'import torch\n'), ((21589, 21628), 'torch.tensor', 'torch.tensor', (['done'], {'dtype': 'torch.float32'}), '(done, dtype=torch.float32)\n', (21601, 21628), False, 'import torch\n'), ((23516, 23548), 'torchx.device_scope', 'tx.device_scope', (['self.gpu_option'], {}), '(self.gpu_option)\n', (23531, 23548), True, 'import torchx as tx\n'), ((9314, 9328), 'surreal.model.reward_filter.RewardFilter', 'RewardFilter', ([], {}), '()\n', (9326, 9328), False, 'from surreal.model.reward_filter import RewardFilter\n'), ((15577, 15604), 'torch.var', 'torch.var', (['(returns - values)'], {}), '(returns - values)\n', (15586, 15604), False, 'import torch\n'), ((15607, 15625), 'torch.var', 'torch.var', (['returns'], {}), '(returns)\n', (15616, 15625), False, 'import torch\n'), ((18788, 18825), 'torch.zeros', 'torch.zeros', (['self.batch_size', 'eff_len'], {}), '(self.batch_size, eff_len)\n', (18799, 18825), False, 'import torch\n'), ((18849, 18886), 'torch.zeros', 'torch.zeros', (['self.batch_size', 'eff_len'], {}), '(self.batch_size, eff_len)\n', (18860, 18886), False, 'import torch\n'), ((19657, 19688), 'torch.sum', 'torch.sum', (['(tds * gamma * lam)', '(1)'], {}), '(tds * gamma * lam, 1)\n', (19666, 19688), False, 'import torch\n'), ((21313, 21355), 'torch.tensor', 'torch.tensor', (['rewards'], {'dtype': 'torch.float32'}), '(rewards, dtype=torch.float32)\n', (21325, 21355), False, 'import torch\n'), ((18062, 18111), 'torch.cat', 'torch.cat', (['[obs[mod][k], obs_next[mod][k]]'], {'dim': '(1)'}), '([obs[mod][k], obs_next[mod][k]], dim=1)\n', (18071, 18111), False, 'import torch\n'), ((19170, 19230), 'torch.sum', 'torch.sum', (['(tds[:, step:step + self.horizon] * gamma * lam)', '(1)'], {}), '(tds[:, step:step + self.horizon] * gamma * lam, 1)\n', (19179, 19230), False, 'import torch\n'), ((19482, 19511), 'torch.sum', 'torch.sum', (['(gamma * rewards)', '(1)'], {}), '(gamma * rewards, 1)\n', (19491, 19511), False, 'import torch\n'), ((18970, 19028), 'torch.sum', 'torch.sum', (['(gamma * rewards[:, step:step + self.horizon])', '(1)'], {}), '(gamma * rewards[:, step:step + self.horizon], 1)\n', (18979, 19028), False, 'import torch\n'), ((10578, 10611), 'torch.cat', 'torch.cat', (['[surr, cliped_surr]', '(1)'], {}), '([surr, cliped_surr], 1)\n', (10587, 10611), False, 'import torch\n'), ((13187, 13221), 'torch.clamp', 'torch.clamp', (['prob_behave'], {'min': '(0.01)'}), '(prob_behave, min=0.01)\n', (13198, 13221), False, 'import torch\n'), ((21045, 21098), 'torch.tensor', 'torch.tensor', (['obs[modality][key]'], {'dtype': 'torch.float32'}), '(obs[modality][key], dtype=torch.float32)\n', (21057, 21098), False, 'import torch\n'), ((21156, 21214), 'torch.tensor', 'torch.tensor', (['obs_next[modality][key]'], {'dtype': 'torch.float32'}), '(obs_next[modality][key], dtype=torch.float32)\n', (21168, 21214), False, 'import torch\n'), ((21772, 21826), 'torch.tensor', 'torch.tensor', (['persistent_infos[i]'], {'dtype': 'torch.float32'}), '(persistent_infos[i], dtype=torch.float32)\n', (21784, 21826), False, 'import torch\n'), ((21969, 22020), 'torch.tensor', 'torch.tensor', (['onetime_infos[i]'], {'dtype': 'torch.float32'}), '(onetime_infos[i], dtype=torch.float32)\n', (21981, 22020), False, 'import torch\n')] |
"""nyquist_test.py - test Nyquist plots
RMM, 30 Jan 2021
This set of unit tests covers various Nyquist plot configurations. Because
much of the output from these tests are graphical, this file can also be run
from ipython to generate plots interactively.
"""
import pytest
import numpy as np
import matplotlib.pyplot as plt
import control as ct
pytestmark = pytest.mark.usefixtures("mplcleanup")
# Utility function for counting unstable poles of open loop (P in FBS)
def _P(sys, indent='right'):
if indent == 'right':
return (sys.pole().real > 0).sum()
elif indent == 'left':
return (sys.pole().real >= 0).sum()
elif indent == 'none':
if any(sys.pole().real == 0):
raise ValueError("indent must be left or right for imaginary pole")
else:
raise TypeError("unknown indent value")
# Utility function for counting unstable poles of closed loop (Z in FBS)
def _Z(sys):
return (sys.feedback().pole().real >= 0).sum()
# Basic tests
def test_nyquist_basic():
# Simple Nyquist plot
sys = ct.rss(5, 1, 1)
N_sys = ct.nyquist_plot(sys)
assert _Z(sys) == N_sys + _P(sys)
# Unstable system
sys = ct.tf([10], [1, 2, 2, 1])
N_sys = ct.nyquist_plot(sys)
assert _Z(sys) > 0
assert _Z(sys) == N_sys + _P(sys)
# Multiple systems - return value is final system
sys1 = ct.rss(3, 1, 1)
sys2 = ct.rss(4, 1, 1)
sys3 = ct.rss(5, 1, 1)
counts = ct.nyquist_plot([sys1, sys2, sys3])
for N_sys, sys in zip(counts, [sys1, sys2, sys3]):
assert _Z(sys) == N_sys + _P(sys)
# Nyquist plot with poles at the origin, omega specified
sys = ct.tf([1], [1, 3, 2]) * ct.tf([1], [1, 0])
omega = np.linspace(0, 1e2, 100)
count, contour = ct.nyquist_plot(sys, omega, return_contour=True)
np.testing.assert_array_equal(
contour[contour.real < 0], omega[contour.real < 0])
# Make sure things match at unmodified frequencies
np.testing.assert_almost_equal(
contour[contour.real == 0],
1j*np.linspace(0, 1e2, 100)[contour.real == 0])
# Make sure that we can turn off frequency modification
count, contour_indented = ct.nyquist_plot(
sys, np.linspace(1e-4, 1e2, 100), return_contour=True)
assert not all(contour_indented.real == 0)
count, contour = ct.nyquist_plot(
sys, np.linspace(1e-4, 1e2, 100), return_contour=True,
indent_direction='none')
np.testing.assert_almost_equal(contour, 1j*np.linspace(1e-4, 1e2, 100))
# Nyquist plot with poles at the origin, omega unspecified
sys = ct.tf([1], [1, 3, 2]) * ct.tf([1], [1, 0])
count, contour = ct.nyquist_plot(sys, return_contour=True)
assert _Z(sys) == count + _P(sys)
# Nyquist plot with poles at the origin, return contour
sys = ct.tf([1], [1, 3, 2]) * ct.tf([1], [1, 0])
count, contour = ct.nyquist_plot(sys, return_contour=True)
assert _Z(sys) == count + _P(sys)
# Nyquist plot with poles on imaginary axis, omega specified
sys = ct.tf([1], [1, 3, 2]) * ct.tf([1], [1, 0, 1])
count = ct.nyquist_plot(sys, np.linspace(1e-3, 1e1, 1000))
assert _Z(sys) == count + _P(sys)
# Nyquist plot with poles on imaginary axis, omega specified, with contour
sys = ct.tf([1], [1, 3, 2]) * ct.tf([1], [1, 0, 1])
count, contour = ct.nyquist_plot(
sys, np.linspace(1e-3, 1e1, 1000), return_contour=True)
assert _Z(sys) == count + _P(sys)
# Nyquist plot with poles on imaginary axis, return contour
sys = ct.tf([1], [1, 3, 2]) * ct.tf([1], [1, 0, 1])
count, contour = ct.nyquist_plot(sys, return_contour=True)
assert _Z(sys) == count + _P(sys)
# Nyquist plot with poles at the origin and on imaginary axis
sys = ct.tf([1], [1, 3, 2]) * ct.tf([1], [1, 0, 1]) * ct.tf([1], [1, 0])
count, contour = ct.nyquist_plot(sys, return_contour=True)
assert _Z(sys) == count + _P(sys)
# Some FBS examples, for comparison
def test_nyquist_fbs_examples():
s = ct.tf('s')
"""Run through various examples from FBS2e to compare plots"""
plt.figure()
plt.title("Figure 10.4: L(s) = 1.4 e^{-s}/(s+1)^2")
sys = ct.tf([1.4], [1, 2, 1]) * ct.tf(*ct.pade(1, 4))
count = ct.nyquist_plot(sys)
assert _Z(sys) == count + _P(sys)
plt.figure()
plt.title("Figure 10.4: L(s) = 1/(s + a)^2 with a = 0.6")
sys = 1/(s + 0.6)**3
count = ct.nyquist_plot(sys)
assert _Z(sys) == count + _P(sys)
plt.figure()
plt.title("Figure 10.6: L(s) = 1/(s (s+1)^2) - pole at the origin")
sys = 1/(s * (s+1)**2)
count = ct.nyquist_plot(sys)
assert _Z(sys) == count + _P(sys)
plt.figure()
plt.title("Figure 10.10: L(s) = 3 (s+6)^2 / (s (s+1)^2)")
sys = 3 * (s+6)**2 / (s * (s+1)**2)
count = ct.nyquist_plot(sys)
assert _Z(sys) == count + _P(sys)
plt.figure()
plt.title("Figure 10.10: L(s) = 3 (s+6)^2 / (s (s+1)^2) [zoom]")
count = ct.nyquist_plot(sys, omega_limits=[1.5, 1e3])
# Frequency limits for zoom give incorrect encirclement count
# assert _Z(sys) == count + _P(sys)
assert count == -1
@pytest.mark.parametrize("arrows", [
None, # default argument
1, 2, 3, 4, # specified number of arrows
[0.1, 0.5, 0.9], # specify arc lengths
])
def test_nyquist_arrows(arrows):
sys = ct.tf([1.4], [1, 2, 1]) * ct.tf(*ct.pade(1, 4))
plt.figure();
plt.title("L(s) = 1.4 e^{-s}/(s+1)^2 / arrows = %s" % arrows)
count = ct.nyquist_plot(sys, arrows=arrows)
assert _Z(sys) == count + _P(sys)
def test_nyquist_encirclements():
# Example 14.14: effect of friction in a cart-pendulum system
s = ct.tf('s')
sys = (0.02 * s**3 - 0.1 * s) / (s**4 + s**3 + s**2 + 0.25 * s + 0.04)
plt.figure();
count = ct.nyquist_plot(sys)
plt.title("Stable system; encirclements = %d" % count)
assert _Z(sys) == count + _P(sys)
plt.figure();
count = ct.nyquist_plot(sys * 3)
plt.title("Unstable system; encirclements = %d" % count)
assert _Z(sys * 3) == count + _P(sys * 3)
# System with pole at the origin
sys = ct.tf([3], [1, 2, 2, 1, 0])
plt.figure();
count = ct.nyquist_plot(sys)
plt.title("Pole at the origin; encirclements = %d" % count)
assert _Z(sys) == count + _P(sys)
@pytest.fixture
def indentsys():
# FBS Figure 10.10
# poles: [-1, -1, 0]
s = ct.tf('s')
return 3 * (s+6)**2 / (s * (s+1)**2)
def test_nyquist_indent_default(indentsys):
plt.figure();
count = ct.nyquist_plot(indentsys)
plt.title("Pole at origin; indent_radius=default")
assert _Z(indentsys) == count + _P(indentsys)
def test_nyquist_indent_dont(indentsys):
# first value of default omega vector was 0.1, replaced by 0. for contour
# indent_radius is larger than 0.1 -> no extra quater circle around origin
count, contour = ct.nyquist_plot(indentsys,
plot=False,
indent_radius=.1007,
return_contour=True)
np.testing.assert_allclose(contour[0], .1007+0.j)
# second value of omega_vector is larger than indent_radius: not indented
assert np.all(contour.real[2:] == 0.)
def test_nyquist_indent_do(indentsys):
plt.figure();
count, contour = ct.nyquist_plot(indentsys,
indent_radius=0.01,
return_contour=True)
plt.title("Pole at origin; indent_radius=0.01; encirclements = %d" % count)
assert _Z(indentsys) == count + _P(indentsys)
# indent radius is smaller than the start of the default omega vector
# check that a quarter circle around the pole at origin has been added.
np.testing.assert_allclose(contour[:50].real**2 + contour[:50].imag**2,
0.01**2)
def test_nyquist_indent_left(indentsys):
plt.figure();
count = ct.nyquist_plot(indentsys, indent_direction='left')
plt.title(
"Pole at origin; indent_direction='left'; encirclements = %d" % count)
assert _Z(indentsys) == count + _P(indentsys, indent='left')
def test_nyquist_indent_im():
"""Test system with poles on the imaginary axis."""
sys = ct.tf([1, 1], [1, 0, 1])
# Imaginary poles with standard indentation
plt.figure();
count = ct.nyquist_plot(sys)
plt.title("Imaginary poles; encirclements = %d" % count)
assert _Z(sys) == count + _P(sys)
# Imaginary poles with indentation to the left
plt.figure();
count = ct.nyquist_plot(sys, indent_direction='left', label_freq=300)
plt.title(
"Imaginary poles; indent_direction='left'; encirclements = %d" % count)
assert _Z(sys) == count + _P(sys, indent='left')
# Imaginary poles with no indentation
plt.figure();
count = ct.nyquist_plot(
sys, np.linspace(0, 1e3, 1000), indent_direction='none')
plt.title(
"Imaginary poles; indent_direction='none'; encirclements = %d" % count)
assert _Z(sys) == count + _P(sys)
def test_nyquist_exceptions():
# MIMO not implemented
sys = ct.rss(2, 2, 2)
with pytest.raises(
ct.exception.ControlMIMONotImplemented,
match="only supports SISO"):
ct.nyquist_plot(sys)
# Legacy keywords for arrow size
sys = ct.rss(2, 1, 1)
with pytest.warns(FutureWarning, match="use `arrow_size` instead"):
ct.nyquist_plot(sys, arrow_width=8, arrow_length=6)
# Discrete time system sampled above Nyquist frequency
sys = ct.drss(2, 1, 1)
sys.dt = 0.01
with pytest.warns(UserWarning, match="above Nyquist"):
ct.nyquist_plot(sys, np.logspace(-2, 3))
if __name__ == "__main__":
#
# Interactive mode: generate plots for manual viewing
#
# Running this script in python (or better ipython) will show a collection of
# figures that should all look OK on the screeen.
#
# In interactive mode, turn on ipython interactive graphics
plt.ion()
# Start by clearing existing figures
plt.close('all')
print("Nyquist examples from FBS")
test_nyquist_fbs_examples()
print("Arrow test")
test_nyquist_arrows(None)
test_nyquist_arrows(1)
test_nyquist_arrows(3)
test_nyquist_arrows([0.1, 0.5, 0.9])
print("Stability checks")
test_nyquist_encirclements()
print("Indentation checks")
test_nyquist_indent()
print("Unusual Nyquist plot")
sys = ct.tf([1], [1, 3, 2]) * ct.tf([1], [1, 0, 1])
plt.figure()
plt.title("Poles: %s" % np.array2string(sys.pole(), precision=2, separator=','))
count = ct.nyquist_plot(sys)
assert _Z(sys) == count + _P(sys)
| [
"matplotlib.pyplot.title",
"control.drss",
"pytest.warns",
"numpy.testing.assert_array_equal",
"matplotlib.pyplot.close",
"numpy.testing.assert_allclose",
"control.tf",
"numpy.all",
"numpy.logspace",
"control.rss",
"control.pade",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.figure",
"pytes... | [((364, 401), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""mplcleanup"""'], {}), "('mplcleanup')\n", (387, 401), False, 'import pytest\n'), ((5106, 5176), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""arrows"""', '[None, 1, 2, 3, 4, [0.1, 0.5, 0.9]]'], {}), "('arrows', [None, 1, 2, 3, 4, [0.1, 0.5, 0.9]])\n", (5129, 5176), False, 'import pytest\n'), ((1064, 1079), 'control.rss', 'ct.rss', (['(5)', '(1)', '(1)'], {}), '(5, 1, 1)\n', (1070, 1079), True, 'import control as ct\n'), ((1092, 1112), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys'], {}), '(sys)\n', (1107, 1112), True, 'import control as ct\n'), ((1184, 1209), 'control.tf', 'ct.tf', (['[10]', '[1, 2, 2, 1]'], {}), '([10], [1, 2, 2, 1])\n', (1189, 1209), True, 'import control as ct\n'), ((1222, 1242), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys'], {}), '(sys)\n', (1237, 1242), True, 'import control as ct\n'), ((1370, 1385), 'control.rss', 'ct.rss', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (1376, 1385), True, 'import control as ct\n'), ((1397, 1412), 'control.rss', 'ct.rss', (['(4)', '(1)', '(1)'], {}), '(4, 1, 1)\n', (1403, 1412), True, 'import control as ct\n'), ((1424, 1439), 'control.rss', 'ct.rss', (['(5)', '(1)', '(1)'], {}), '(5, 1, 1)\n', (1430, 1439), True, 'import control as ct\n'), ((1453, 1488), 'control.nyquist_plot', 'ct.nyquist_plot', (['[sys1, sys2, sys3]'], {}), '([sys1, sys2, sys3])\n', (1468, 1488), True, 'import control as ct\n'), ((1713, 1739), 'numpy.linspace', 'np.linspace', (['(0)', '(100.0)', '(100)'], {}), '(0, 100.0, 100)\n', (1724, 1739), True, 'import numpy as np\n'), ((1759, 1807), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys', 'omega'], {'return_contour': '(True)'}), '(sys, omega, return_contour=True)\n', (1774, 1807), True, 'import control as ct\n'), ((1812, 1897), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['contour[contour.real < 0]', 'omega[contour.real < 0]'], {}), '(contour[contour.real < 0], omega[contour.real <\n 0])\n', (1841, 1897), True, 'import numpy as np\n'), ((2653, 2694), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys'], {'return_contour': '(True)'}), '(sys, return_contour=True)\n', (2668, 2694), True, 'import control as ct\n'), ((2868, 2909), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys'], {'return_contour': '(True)'}), '(sys, return_contour=True)\n', (2883, 2909), True, 'import control as ct\n'), ((3589, 3630), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys'], {'return_contour': '(True)'}), '(sys, return_contour=True)\n', (3604, 3630), True, 'import control as ct\n'), ((3834, 3875), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys'], {'return_contour': '(True)'}), '(sys, return_contour=True)\n', (3849, 3875), True, 'import control as ct\n'), ((3993, 4003), 'control.tf', 'ct.tf', (['"""s"""'], {}), "('s')\n", (3998, 4003), True, 'import control as ct\n'), ((4076, 4088), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4086, 4088), True, 'import matplotlib.pyplot as plt\n'), ((4093, 4144), 'matplotlib.pyplot.title', 'plt.title', (['"""Figure 10.4: L(s) = 1.4 e^{-s}/(s+1)^2"""'], {}), "('Figure 10.4: L(s) = 1.4 e^{-s}/(s+1)^2')\n", (4102, 4144), True, 'import matplotlib.pyplot as plt\n'), ((4215, 4235), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys'], {}), '(sys)\n', (4230, 4235), True, 'import control as ct\n'), ((4279, 4291), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4289, 4291), True, 'import matplotlib.pyplot as plt\n'), ((4296, 4353), 'matplotlib.pyplot.title', 'plt.title', (['"""Figure 10.4: L(s) = 1/(s + a)^2 with a = 0.6"""'], {}), "('Figure 10.4: L(s) = 1/(s + a)^2 with a = 0.6')\n", (4305, 4353), True, 'import matplotlib.pyplot as plt\n'), ((4391, 4411), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys'], {}), '(sys)\n', (4406, 4411), True, 'import control as ct\n'), ((4455, 4467), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4465, 4467), True, 'import matplotlib.pyplot as plt\n'), ((4472, 4539), 'matplotlib.pyplot.title', 'plt.title', (['"""Figure 10.6: L(s) = 1/(s (s+1)^2) - pole at the origin"""'], {}), "('Figure 10.6: L(s) = 1/(s (s+1)^2) - pole at the origin')\n", (4481, 4539), True, 'import matplotlib.pyplot as plt\n'), ((4579, 4599), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys'], {}), '(sys)\n', (4594, 4599), True, 'import control as ct\n'), ((4643, 4655), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4653, 4655), True, 'import matplotlib.pyplot as plt\n'), ((4660, 4717), 'matplotlib.pyplot.title', 'plt.title', (['"""Figure 10.10: L(s) = 3 (s+6)^2 / (s (s+1)^2)"""'], {}), "('Figure 10.10: L(s) = 3 (s+6)^2 / (s (s+1)^2)')\n", (4669, 4717), True, 'import matplotlib.pyplot as plt\n'), ((4770, 4790), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys'], {}), '(sys)\n', (4785, 4790), True, 'import control as ct\n'), ((4834, 4846), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4844, 4846), True, 'import matplotlib.pyplot as plt\n'), ((4851, 4915), 'matplotlib.pyplot.title', 'plt.title', (['"""Figure 10.10: L(s) = 3 (s+6)^2 / (s (s+1)^2) [zoom]"""'], {}), "('Figure 10.10: L(s) = 3 (s+6)^2 / (s (s+1)^2) [zoom]')\n", (4860, 4915), True, 'import matplotlib.pyplot as plt\n'), ((4928, 4976), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys'], {'omega_limits': '[1.5, 1000.0]'}), '(sys, omega_limits=[1.5, 1000.0])\n', (4943, 4976), True, 'import control as ct\n'), ((5406, 5418), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5416, 5418), True, 'import matplotlib.pyplot as plt\n'), ((5424, 5485), 'matplotlib.pyplot.title', 'plt.title', (["('L(s) = 1.4 e^{-s}/(s+1)^2 / arrows = %s' % arrows)"], {}), "('L(s) = 1.4 e^{-s}/(s+1)^2 / arrows = %s' % arrows)\n", (5433, 5485), True, 'import matplotlib.pyplot as plt\n'), ((5498, 5533), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys'], {'arrows': 'arrows'}), '(sys, arrows=arrows)\n', (5513, 5533), True, 'import control as ct\n'), ((5682, 5692), 'control.tf', 'ct.tf', (['"""s"""'], {}), "('s')\n", (5687, 5692), True, 'import control as ct\n'), ((5773, 5785), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5783, 5785), True, 'import matplotlib.pyplot as plt\n'), ((5799, 5819), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys'], {}), '(sys)\n', (5814, 5819), True, 'import control as ct\n'), ((5824, 5878), 'matplotlib.pyplot.title', 'plt.title', (["('Stable system; encirclements = %d' % count)"], {}), "('Stable system; encirclements = %d' % count)\n", (5833, 5878), True, 'import matplotlib.pyplot as plt\n'), ((5922, 5934), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5932, 5934), True, 'import matplotlib.pyplot as plt\n'), ((5948, 5972), 'control.nyquist_plot', 'ct.nyquist_plot', (['(sys * 3)'], {}), '(sys * 3)\n', (5963, 5972), True, 'import control as ct\n'), ((5977, 6033), 'matplotlib.pyplot.title', 'plt.title', (["('Unstable system; encirclements = %d' % count)"], {}), "('Unstable system; encirclements = %d' % count)\n", (5986, 6033), True, 'import matplotlib.pyplot as plt\n'), ((6128, 6155), 'control.tf', 'ct.tf', (['[3]', '[1, 2, 2, 1, 0]'], {}), '([3], [1, 2, 2, 1, 0])\n', (6133, 6155), True, 'import control as ct\n'), ((6161, 6173), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6171, 6173), True, 'import matplotlib.pyplot as plt\n'), ((6187, 6207), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys'], {}), '(sys)\n', (6202, 6207), True, 'import control as ct\n'), ((6212, 6271), 'matplotlib.pyplot.title', 'plt.title', (["('Pole at the origin; encirclements = %d' % count)"], {}), "('Pole at the origin; encirclements = %d' % count)\n", (6221, 6271), True, 'import matplotlib.pyplot as plt\n'), ((6401, 6411), 'control.tf', 'ct.tf', (['"""s"""'], {}), "('s')\n", (6406, 6411), True, 'import control as ct\n'), ((6503, 6515), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6513, 6515), True, 'import matplotlib.pyplot as plt\n'), ((6529, 6555), 'control.nyquist_plot', 'ct.nyquist_plot', (['indentsys'], {}), '(indentsys)\n', (6544, 6555), True, 'import control as ct\n'), ((6560, 6610), 'matplotlib.pyplot.title', 'plt.title', (['"""Pole at origin; indent_radius=default"""'], {}), "('Pole at origin; indent_radius=default')\n", (6569, 6610), True, 'import matplotlib.pyplot as plt\n'), ((6882, 6968), 'control.nyquist_plot', 'ct.nyquist_plot', (['indentsys'], {'plot': '(False)', 'indent_radius': '(0.1007)', 'return_contour': '(True)'}), '(indentsys, plot=False, indent_radius=0.1007, return_contour\n =True)\n', (6897, 6968), True, 'import control as ct\n'), ((7078, 7131), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['contour[0]', '(0.1007 + 0.0j)'], {}), '(contour[0], 0.1007 + 0.0j)\n', (7104, 7131), True, 'import numpy as np\n'), ((7217, 7248), 'numpy.all', 'np.all', (['(contour.real[2:] == 0.0)'], {}), '(contour.real[2:] == 0.0)\n', (7223, 7248), True, 'import numpy as np\n'), ((7293, 7305), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7303, 7305), True, 'import matplotlib.pyplot as plt\n'), ((7328, 7395), 'control.nyquist_plot', 'ct.nyquist_plot', (['indentsys'], {'indent_radius': '(0.01)', 'return_contour': '(True)'}), '(indentsys, indent_radius=0.01, return_contour=True)\n', (7343, 7395), True, 'import control as ct\n'), ((7474, 7549), 'matplotlib.pyplot.title', 'plt.title', (["('Pole at origin; indent_radius=0.01; encirclements = %d' % count)"], {}), "('Pole at origin; indent_radius=0.01; encirclements = %d' % count)\n", (7483, 7549), True, 'import matplotlib.pyplot as plt\n'), ((7754, 7844), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(contour[:50].real ** 2 + contour[:50].imag ** 2)', '(0.01 ** 2)'], {}), '(contour[:50].real ** 2 + contour[:50].imag ** 2,\n 0.01 ** 2)\n', (7780, 7844), True, 'import numpy as np\n'), ((7913, 7925), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7923, 7925), True, 'import matplotlib.pyplot as plt\n'), ((7939, 7990), 'control.nyquist_plot', 'ct.nyquist_plot', (['indentsys'], {'indent_direction': '"""left"""'}), "(indentsys, indent_direction='left')\n", (7954, 7990), True, 'import control as ct\n'), ((7995, 8080), 'matplotlib.pyplot.title', 'plt.title', (['("Pole at origin; indent_direction=\'left\'; encirclements = %d" % count)'], {}), '("Pole at origin; indent_direction=\'left\'; encirclements = %d" % count\n )\n', (8004, 8080), True, 'import matplotlib.pyplot as plt\n'), ((8248, 8272), 'control.tf', 'ct.tf', (['[1, 1]', '[1, 0, 1]'], {}), '([1, 1], [1, 0, 1])\n', (8253, 8272), True, 'import control as ct\n'), ((8326, 8338), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8336, 8338), True, 'import matplotlib.pyplot as plt\n'), ((8352, 8372), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys'], {}), '(sys)\n', (8367, 8372), True, 'import control as ct\n'), ((8377, 8433), 'matplotlib.pyplot.title', 'plt.title', (["('Imaginary poles; encirclements = %d' % count)"], {}), "('Imaginary poles; encirclements = %d' % count)\n", (8386, 8433), True, 'import matplotlib.pyplot as plt\n'), ((8528, 8540), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8538, 8540), True, 'import matplotlib.pyplot as plt\n'), ((8554, 8615), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys'], {'indent_direction': '"""left"""', 'label_freq': '(300)'}), "(sys, indent_direction='left', label_freq=300)\n", (8569, 8615), True, 'import control as ct\n'), ((8620, 8705), 'matplotlib.pyplot.title', 'plt.title', (['("Imaginary poles; indent_direction=\'left\'; encirclements = %d" % count)'], {}), '("Imaginary poles; indent_direction=\'left\'; encirclements = %d" %\n count)\n', (8629, 8705), True, 'import matplotlib.pyplot as plt\n'), ((8811, 8823), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8821, 8823), True, 'import matplotlib.pyplot as plt\n'), ((8923, 9008), 'matplotlib.pyplot.title', 'plt.title', (['("Imaginary poles; indent_direction=\'none\'; encirclements = %d" % count)'], {}), '("Imaginary poles; indent_direction=\'none\'; encirclements = %d" %\n count)\n', (8932, 9008), True, 'import matplotlib.pyplot as plt\n'), ((9122, 9137), 'control.rss', 'ct.rss', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (9128, 9137), True, 'import control as ct\n'), ((9332, 9347), 'control.rss', 'ct.rss', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (9338, 9347), True, 'import control as ct\n'), ((9550, 9566), 'control.drss', 'ct.drss', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (9557, 9566), True, 'import control as ct\n'), ((10003, 10012), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (10010, 10012), True, 'import matplotlib.pyplot as plt\n'), ((10059, 10075), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (10068, 10075), True, 'import matplotlib.pyplot as plt\n'), ((10516, 10528), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10526, 10528), True, 'import matplotlib.pyplot as plt\n'), ((10626, 10646), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys'], {}), '(sys)\n', (10641, 10646), True, 'import control as ct\n'), ((1658, 1679), 'control.tf', 'ct.tf', (['[1]', '[1, 3, 2]'], {}), '([1], [1, 3, 2])\n', (1663, 1679), True, 'import control as ct\n'), ((1682, 1700), 'control.tf', 'ct.tf', (['[1]', '[1, 0]'], {}), '([1], [1, 0])\n', (1687, 1700), True, 'import control as ct\n'), ((2208, 2239), 'numpy.linspace', 'np.linspace', (['(0.0001)', '(100.0)', '(100)'], {}), '(0.0001, 100.0, 100)\n', (2219, 2239), True, 'import numpy as np\n'), ((2356, 2387), 'numpy.linspace', 'np.linspace', (['(0.0001)', '(100.0)', '(100)'], {}), '(0.0001, 100.0, 100)\n', (2367, 2387), True, 'import numpy as np\n'), ((2589, 2610), 'control.tf', 'ct.tf', (['[1]', '[1, 3, 2]'], {}), '([1], [1, 3, 2])\n', (2594, 2610), True, 'import control as ct\n'), ((2613, 2631), 'control.tf', 'ct.tf', (['[1]', '[1, 0]'], {}), '([1], [1, 0])\n', (2618, 2631), True, 'import control as ct\n'), ((2804, 2825), 'control.tf', 'ct.tf', (['[1]', '[1, 3, 2]'], {}), '([1], [1, 3, 2])\n', (2809, 2825), True, 'import control as ct\n'), ((2828, 2846), 'control.tf', 'ct.tf', (['[1]', '[1, 0]'], {}), '([1], [1, 0])\n', (2833, 2846), True, 'import control as ct\n'), ((3024, 3045), 'control.tf', 'ct.tf', (['[1]', '[1, 3, 2]'], {}), '([1], [1, 3, 2])\n', (3029, 3045), True, 'import control as ct\n'), ((3048, 3069), 'control.tf', 'ct.tf', (['[1]', '[1, 0, 1]'], {}), '([1], [1, 0, 1])\n', (3053, 3069), True, 'import control as ct\n'), ((3103, 3133), 'numpy.linspace', 'np.linspace', (['(0.001)', '(10.0)', '(1000)'], {}), '(0.001, 10.0, 1000)\n', (3114, 3133), True, 'import numpy as np\n'), ((3261, 3282), 'control.tf', 'ct.tf', (['[1]', '[1, 3, 2]'], {}), '([1], [1, 3, 2])\n', (3266, 3282), True, 'import control as ct\n'), ((3285, 3306), 'control.tf', 'ct.tf', (['[1]', '[1, 0, 1]'], {}), '([1], [1, 0, 1])\n', (3290, 3306), True, 'import control as ct\n'), ((3358, 3388), 'numpy.linspace', 'np.linspace', (['(0.001)', '(10.0)', '(1000)'], {}), '(0.001, 10.0, 1000)\n', (3369, 3388), True, 'import numpy as np\n'), ((3522, 3543), 'control.tf', 'ct.tf', (['[1]', '[1, 3, 2]'], {}), '([1], [1, 3, 2])\n', (3527, 3543), True, 'import control as ct\n'), ((3546, 3567), 'control.tf', 'ct.tf', (['[1]', '[1, 0, 1]'], {}), '([1], [1, 0, 1])\n', (3551, 3567), True, 'import control as ct\n'), ((3794, 3812), 'control.tf', 'ct.tf', (['[1]', '[1, 0]'], {}), '([1], [1, 0])\n', (3799, 3812), True, 'import control as ct\n'), ((4155, 4178), 'control.tf', 'ct.tf', (['[1.4]', '[1, 2, 1]'], {}), '([1.4], [1, 2, 1])\n', (4160, 4178), True, 'import control as ct\n'), ((5354, 5377), 'control.tf', 'ct.tf', (['[1.4]', '[1, 2, 1]'], {}), '([1.4], [1, 2, 1])\n', (5359, 5377), True, 'import control as ct\n'), ((8867, 8895), 'numpy.linspace', 'np.linspace', (['(0)', '(1000.0)', '(1000)'], {}), '(0, 1000.0, 1000)\n', (8878, 8895), True, 'import numpy as np\n'), ((9147, 9233), 'pytest.raises', 'pytest.raises', (['ct.exception.ControlMIMONotImplemented'], {'match': '"""only supports SISO"""'}), "(ct.exception.ControlMIMONotImplemented, match=\n 'only supports SISO')\n", (9160, 9233), False, 'import pytest\n'), ((9263, 9283), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys'], {}), '(sys)\n', (9278, 9283), True, 'import control as ct\n'), ((9357, 9418), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {'match': '"""use `arrow_size` instead"""'}), "(FutureWarning, match='use `arrow_size` instead')\n", (9369, 9418), False, 'import pytest\n'), ((9428, 9479), 'control.nyquist_plot', 'ct.nyquist_plot', (['sys'], {'arrow_width': '(8)', 'arrow_length': '(6)'}), '(sys, arrow_width=8, arrow_length=6)\n', (9443, 9479), True, 'import control as ct\n'), ((9594, 9642), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""above Nyquist"""'}), "(UserWarning, match='above Nyquist')\n", (9606, 9642), False, 'import pytest\n'), ((10466, 10487), 'control.tf', 'ct.tf', (['[1]', '[1, 3, 2]'], {}), '([1], [1, 3, 2])\n', (10471, 10487), True, 'import control as ct\n'), ((10490, 10511), 'control.tf', 'ct.tf', (['[1]', '[1, 0, 1]'], {}), '([1], [1, 0, 1])\n', (10495, 10511), True, 'import control as ct\n'), ((2486, 2517), 'numpy.linspace', 'np.linspace', (['(0.0001)', '(100.0)', '(100)'], {}), '(0.0001, 100.0, 100)\n', (2497, 2517), True, 'import numpy as np\n'), ((3746, 3767), 'control.tf', 'ct.tf', (['[1]', '[1, 3, 2]'], {}), '([1], [1, 3, 2])\n', (3751, 3767), True, 'import control as ct\n'), ((3770, 3791), 'control.tf', 'ct.tf', (['[1]', '[1, 0, 1]'], {}), '([1], [1, 0, 1])\n', (3775, 3791), True, 'import control as ct\n'), ((9673, 9691), 'numpy.logspace', 'np.logspace', (['(-2)', '(3)'], {}), '(-2, 3)\n', (9684, 9691), True, 'import numpy as np\n'), ((2042, 2068), 'numpy.linspace', 'np.linspace', (['(0)', '(100.0)', '(100)'], {}), '(0, 100.0, 100)\n', (2053, 2068), True, 'import numpy as np\n'), ((4188, 4201), 'control.pade', 'ct.pade', (['(1)', '(4)'], {}), '(1, 4)\n', (4195, 4201), True, 'import control as ct\n'), ((5387, 5400), 'control.pade', 'ct.pade', (['(1)', '(4)'], {}), '(1, 4)\n', (5394, 5400), True, 'import control as ct\n')] |
import pandas as pd
import numpy as np
import string
def create_predictive_table():
index = pd.Index(['S', 'bloco', 'declaracao', 'tipo', 'comandos', 'condicao', 'expressao',
'expressao\'', 'termo', 'relop', 'artop', 'atrop'], 'rows')
columns = pd.Index(['programa', 'inicio', 'fim', 'id', ';', 'int', 'char', 'real', 'se', '(', ')', 'enquanto', '==', '<>', '<', '<=', '>', '>=',
'+', '-', '*', '/', '=', '$', 'numero'], 'cols')
table = pd.DataFrame(columns=columns, index=index)
table = set_productions(table)
table.to_csv('tables/predictive_table.csv', encoding='utf-8')
def set_productions(table):
table['programa']['S'] = 1
table['inicio']['bloco'] = 2
table['fim']['declaracao'] = 4
table['fim']['comandos'] = 11
table['id']['declaracao'] = 4
table['id']['comandos'] = 10
table['id']['condicao'] = 12
table['id']['expressao'] = 13
table['id']['termo'] = 17
table[';']['expressao\''] = 16
table['int']['declaracao'] = 3
table['int']['tipo'] = 5
table['char']['declaracao'] = 3
table['char']['tipo'] = 6
table['real']['declaracao'] = 3
table['real']['tipo'] = 7
table['se']['declaracao'] = 4
table['se']['comandos'] = 8
table['(']['condicao'] = 12
table['(']['expressao'] = 13
table['(']['termo'] = 19
table[')']['expressao\''] = 16
table['enquanto']['declaracao'] = 4
table['enquanto']['comandos'] = 9
table['==']['relop'] = 20
table['<>']['relop'] = 21
table['<']['relop'] = 22
table['<=']['relop'] = 24
table['>']['relop'] = 23
table['>=']['relop'] = 25
table['+']['expressao\''] = 14
table['+']['artop'] = 26
table['-']['expressao\''] = 14
table['-']['artop'] = 27
table['*']['expressao\''] = 14
table['*']['artop'] = 28
table['/']['expressao\''] = 14
table['/']['artop'] = 29
table['=']['expressao\''] = 15
table['=']['atrop'] = 30
table['numero']['condicao'] = 12
table['numero']['expressao'] = 13
table['numero']['termo'] = 18
return table
def create_transition_table():
columns = ['FS']
columns.extend([str(x) for x in range(0, 10)]) # 0 - 9
columns.extend(string.ascii_uppercase) # A - Z
columns.extend(string.ascii_lowercase) # a - z
columns.extend(['(', ')', ';', '+', '-', '*', '/', '=', '<', '>', '.', '_'])
table = pd.DataFrame(columns=columns, index=np.arange(53))
table = set_final_states(table)
table = set_transitions(table)
table.to_csv('tables/transition_table.csv', encoding='utf-8', index=False)
def set_final_states(table):
table['FS'][0] = 'N'
table['FS'][1] = 'S'
table['FS'][2] = 'S'
table['FS'][3] = 'S'
table['FS'][4] = 'S'
table['FS'][5] = 'S'
table['FS'][6] = 'S'
table['FS'][7] = 'S'
table['FS'][8] = 'S'
table['FS'][9] = 'N'
table['FS'][10] = 'S'
table['FS'][11] = 'N'
table['FS'][12] = 'N'
table['FS'][13] = 'S'
table['FS'][14] = 'S'
table['FS'][15] = 'N'
table['FS'][16] = 'N'
table['FS'][17] = 'N'
table['FS'][18] = 'S'
table['FS'][19] = 'N'
table['FS'][20] = 'N'
table['FS'][21] = 'N'
table['FS'][22] = 'S'
table['FS'][23] = 'N'
table['FS'][24] = 'N'
table['FS'][25] = 'S'
table['FS'][26] = 'N'
table['FS'][27] = 'N'
table['FS'][28] = 'N'
table['FS'][29] = 'S'
table['FS'][30] = 'N'
table['FS'][31] = 'S'
table['FS'][32] = 'N'
table['FS'][33] = 'N'
table['FS'][34] = 'S'
table['FS'][35] = 'N'
table['FS'][36] = 'N'
table['FS'][37] = 'N'
table['FS'][38] = 'N'
table['FS'][39] = 'N'
table['FS'][40] = 'N'
table['FS'][41] = 'N'
table['FS'][42] = 'S'
table['FS'][43] = 'N'
table['FS'][44] = 'N'
table['FS'][45] = 'N'
table['FS'][46] = 'N'
table['FS'][47] = 'N'
table['FS'][48] = 'N'
table['FS'][49] = 'N'
table['FS'][50] = 'S'
table['FS'][51] = 'S'
table['FS'][52] = 'N'
return table
def set_transitions(table):
for x in range(0, 10):
x_str = str(x)
table[x_str][0] = 8
table[x_str][8] = 8
table[x_str][9] = 10
table[x_str][10] = 10
table[x_str][11] = 13
table[x_str][12] = 13
table[x_str][13] = 13
for j in range(14, 51):
table[x_str][j] = 14
for x_lower in string.ascii_lowercase:
x_upper = x_lower.upper()
table[x_lower][0] = 14
table[x_upper][0] = 14
for j in range(14, 51):
table[x_lower][j] = 14
table[x_upper][j] = 14
for x in range(14, 51):
table['_'][x] = 14
table['('][0] = 1
table[')'][0] = 1
table[';'][0] = 1
table['+'][0] = 51
table['-'][0] = 51
table['*'][0] = 51
table['/'][0] = 51
table['='][0] = 2
table['='][2] = 3
table['<'][0] = 4
table['>'][4] = 5
table['='][4] = 6
table['>'][0] = 7
table['='][7] = 6
table['.'][8] = 9
table['E'][8] = 11
table['E'][10] = 11
table['+'][11] = 12
table['-'][11] = 12
table['r'][0] = 15
table['e'][15] = 16
table['a'][16] = 17
table['l'][17] = 18
table['c'][0] = 19
table['h'][19] = 20
table['a'][20] = 21
table['r'][21] = 22
table['i'][0] = 23
table['n'][23] = 24
table['t'][24] = 25
table['i'][24] = 26
table['c'][26] = 27
table['i'][27] = 28
table['o'][28] = 29
table['s'][0] = 30
table['e'][30] = 31
table['f'][0] = 32
table['i'][32] = 33
table['m'][33] = 34
table['p'][0] = 35
table['r'][35] = 36
table['o'][36] = 37
table['g'][37] = 38
table['r'][38] = 39
table['a'][39] = 40
table['m'][40] = 41
table['a'][41] = 42
table['e'][0] = 43
table['n'][43] = 44
table['q'][44] = 45
table['u'][45] = 46
table['a'][46] = 47
table['n'][47] = 48
table['t'][48] = 49
table['o'][49] = 50
return table
if __name__ == '__main__':
create_transition_table()
create_predictive_table()
| [
"pandas.DataFrame",
"numpy.arange",
"pandas.Index"
] | [((97, 242), 'pandas.Index', 'pd.Index', (['[\'S\', \'bloco\', \'declaracao\', \'tipo\', \'comandos\', \'condicao\', \'expressao\',\n "expressao\'", \'termo\', \'relop\', \'artop\', \'atrop\']', '"""rows"""'], {}), '([\'S\', \'bloco\', \'declaracao\', \'tipo\', \'comandos\', \'condicao\',\n \'expressao\', "expressao\'", \'termo\', \'relop\', \'artop\', \'atrop\'], \'rows\')\n', (105, 242), True, 'import pandas as pd\n'), ((259, 450), 'pandas.Index', 'pd.Index', (["['programa', 'inicio', 'fim', 'id', ';', 'int', 'char', 'real', 'se', '(',\n ')', 'enquanto', '==', '<>', '<', '<=', '>', '>=', '+', '-', '*', '/',\n '=', '$', 'numero']", '"""cols"""'], {}), "(['programa', 'inicio', 'fim', 'id', ';', 'int', 'char', 'real',\n 'se', '(', ')', 'enquanto', '==', '<>', '<', '<=', '>', '>=', '+', '-',\n '*', '/', '=', '$', 'numero'], 'cols')\n", (267, 450), True, 'import pandas as pd\n'), ((460, 502), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns', 'index': 'index'}), '(columns=columns, index=index)\n', (472, 502), True, 'import pandas as pd\n'), ((2417, 2430), 'numpy.arange', 'np.arange', (['(53)'], {}), '(53)\n', (2426, 2430), True, 'import numpy as np\n')] |
"""
Basic data visualization (using PlenOctree's volrend)
Usage: python view_data.py <data_root>
default output: data_vis.html. You can open this in your browser. (bash sensei/mkweb)
"""
# Copyright 2021 <NAME>
import sys
import os
from os import path
DIR_PATH = path.dirname(os.path.realpath(__file__))
sys.path.append(path.join(DIR_PATH, ".."))
import warnings
import numpy as np
import math
from argparse import ArgumentParser
from nerfvis import Scene # pip install nerfvis
from scipy.spatial.transform import Rotation
# BEGIN BORROWED CODE
# Copyright (c) 2006, <NAME>
# Copyright (c) 2006-2009, The Regents of the University of California
# All rights reserved.
def unit_vector(data, axis=None, out=None):
"""Return ndarray normalized by length, i.e. eucledian norm, along axis. """
if out is None:
data = np.array(data, dtype=np.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(np.dot(data, data))
return data
else:
if out is not data:
out[:] = np.array(data, copy=False)
data = out
length = np.atleast_1d(np.sum(data*data, axis))
np.sqrt(length, length)
if axis is not None:
length = np.expand_dims(length, axis)
data /= length
if out is None:
return data
def rotation_matrix(angle, direction, point=None):
"""Return matrix to rotate about axis defined by point and direction. """
sina = math.sin(angle)
cosa = math.cos(angle)
direction = unit_vector(direction[:3])
# rotation matrix around unit vector
R = np.array(((cosa, 0.0, 0.0),
(0.0, cosa, 0.0),
(0.0, 0.0, cosa)), dtype=np.float64)
R += np.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += np.array(((0.0, -direction[2], direction[1]),
(direction[2], 0.0, -direction[0]),
(-direction[1], direction[0], 0.0)),
dtype=np.float64)
M = np.identity(4)
M[:3, :3] = R
if point is not None:
# rotation not around origin
point = np.array(point[:3], dtype=np.float64, copy=False)
M[:3, 3] = point - np.dot(R, point)
return M
def get_best_yaw(C):
'''
maximize trace(Rz(theta) * C)
'''
assert C.shape == (3, 3)
A = C[0, 1] - C[1, 0]
B = C[0, 0] + C[1, 1]
theta = np.pi / 2 - np.arctan2(B, A)
return theta
def rot_z(theta):
R = tfs.rotation_matrix(theta, [0, 0, 1])
R = R[0:3, 0:3]
return R
def align_umeyama(model, data, known_scale=False, yaw_only=False):
"""Implementation of the paper: <NAME>, Least-Squares Estimation
of Transformation Parameters Between Two Point Patterns,
IEEE Trans. Pattern Anal. Mach. Intell., vol. 13, no. 4, 1991.
model = s * R * data + t
Input:
model -- first trajectory (nx3), numpy array type
data -- second trajectory (nx3), numpy array type
Output:
s -- scale factor (scalar)
R -- rotation matrix (3x3)
t -- translation vector (3x1)
t_error -- translational error per point (1xn)
"""
# substract mean
mu_M = model.mean(0)
mu_D = data.mean(0)
model_zerocentered = model - mu_M
data_zerocentered = data - mu_D
n = np.shape(model)[0]
# correlation
C = 1.0/n*np.dot(model_zerocentered.transpose(), data_zerocentered)
sigma2 = 1.0/n*np.multiply(data_zerocentered, data_zerocentered).sum()
U_svd, D_svd, V_svd = np.linalg.linalg.svd(C)
D_svd = np.diag(D_svd)
V_svd = np.transpose(V_svd)
S = np.eye(3)
if(np.linalg.det(U_svd)*np.linalg.det(V_svd) < 0):
S[2, 2] = -1
if yaw_only:
rot_C = np.dot(data_zerocentered.transpose(), model_zerocentered)
theta = get_best_yaw(rot_C)
R = rot_z(theta)
else:
R = np.dot(U_svd, np.dot(S, np.transpose(V_svd)))
if known_scale:
s = 1
else:
s = 1.0/sigma2*np.trace(np.dot(D_svd, S))
t = mu_M-s*np.dot(R, mu_D)
return s, R, t
def align_procrustes_rt(t_a : np.ndarray, q_a : np.ndarray,
t_ref : np.ndarray,
use_first_k : int = 1000000,
want_transform : bool = False):
"""
Align translation + rotation
:param t_a: camera translations to align (N, 3)
:param q_a: camera rotations to align (xyz axis-angle, xyzw quaternion, or rotation matrix) (N, {3, 4, 9})
:param t_ref: reference camera translations (N, 3)
:param use_first_k: int, if set, uses only first k number of cameras to align
:param want_transform: bool, if set, returns transform function instead of transformed points
:return:
if want_transform == False:
t (N, 3), q (N, {3, 4, 9}) similarity-transformed version of cameraa poses, aligned to ref
else: function which given points, applies the aligning transform
"""
assert t_ref.shape[0] == t_a.shape[0]
s, R, t = align_umeyama(t_ref[:use_first_k], t_a[:use_first_k])
# # Advanced alignment
# n_points = t_a.shape[0]
# z = np.zeros((n_points, 3))
# z[:, -1] = 0.05
# t_a_aug = t_a + quaternion_rotate_vector_np(q_a, z) / s
# t_ref_aug = t_ref + quaternion_rotate_vector_np(q_ref, z)
#
# _, R, t = align_umeyama(np.concatenate([t_ref, t_ref_aug], axis=0), np.concatenate([t_a * s, t_a_aug * s], axis=0), known_scale=True)
def transform(t_b : np.ndarray, q_b : np.ndarray):
t_align = s * t_b @ R.T + t
Ra = Rotation.from_matrix(R)
q_align = (Ra * Rotation.from_matrix(q_b)).as_matrix()
return t_align, q_align
return transform if want_transform else transform(t_a, q_a)
# END BORROWED CODE
def get_image_size(path : str):
"""
Get image size without loading it
"""
from PIL import Image
im = Image.open(path)
return im.size # W, H
def sort_key(x):
if len(x) > 2 and x[1] == "_":
return x[2:]
return x
def main():
parser = ArgumentParser()
parser.add_argument("data_dir", type=str, help="dataset root")
parser.add_argument(
"--seg",
action="store_true",
default=False,
help="connect camera trajectories with lines, should be used e.g. in NeRF synthetic",
)
parser.add_argument(
"--n_cameras_for_procrustes", '-P',
type=int,
default=100000,
help="use at most first x cameras for procrustes. Useful if trajectory starts to diverge",
)
args = parser.parse_args()
dataset_name = path.basename(path.abspath(args.data_dir))
def look_for_dir(cands, required=True):
for cand in cands:
if path.isdir(path.join(args.data_dir, cand)):
return path.join(args.data_dir, cand)
if required:
assert False, "None of " + str(cands) + " found in data directory"
return ""
pose_dir = path.join(args.data_dir, "pose_colmap")
pose_gt_dir = look_for_dir(["poses", "pose", "c2w", "cameras"])
if not path.isdir(pose_dir):
pose_dir, pose_gt_dir = pose_gt_dir, None
images_dir = look_for_dir(["images", "image", "rgb", "color", "rgbs"])
intrin_path = path.join(args.data_dir, "intrinsics.txt")
point_cloud_path = path.join(args.data_dir, "points.npy")
print("POSE_DIR", pose_dir)
print("IMAGES_PATH", images_dir)
print("INTRIN_PATH", intrin_path)
print("POINT_CLOUD_PATH", point_cloud_path)
pose_files = sorted([x for x in os.listdir(pose_dir) if x.lower().endswith('.txt')], key=sort_key)
image_files = sorted([x for x in os.listdir(images_dir) if x.lower().endswith('.png') or x.lower().endswith('.jpg')], key=sort_key)
all_poses = []
pnum, seg_begin = None, 0
segs = []
for i, pose_file in enumerate(pose_files):
pose = np.loadtxt(path.join(pose_dir, pose_file)).reshape(4, 4)
splt = path.splitext(pose_file)[0].split('_')
num = int(splt[1] if len(splt) > 1 else splt[0])
if pnum is not None and num - pnum > 1 and seg_begin < num:
segs.append((seg_begin, num))
seg_begin = num
pnum = num
all_poses.append(pose)
all_poses = np.stack(all_poses)
segs.append((seg_begin, len(pose_files)))
def get_transform(c2w):
t = c2w[:, :3, 3]
R = c2w[:, :3, :3]
# (1) Rotate the world so that z+ is the up axis
# we estimate the up axis by averaging the camera up axes
ups = np.sum(R * np.array([0, -1.0, 0]), axis=-1)
world_up = np.mean(ups, axis=0)
world_up /= np.linalg.norm(world_up)
up_camspace = np.array([0.0, -1.0, 0.0])
c = (up_camspace * world_up).sum()
cross = np.cross(world_up, up_camspace)
skew = np.array([[0.0, -cross[2], cross[1]],
[cross[2], 0.0, -cross[0]],
[-cross[1], cross[0], 0.0]])
R_align = np.eye(3)
if c > -1:
R_align = R_align + skew + (skew @ skew) * 1 / (1+c)
else:
# In the unlikely case the original data has y+ up axis,
# rotate 180-deg about x axis
R_align = np.array([[-1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
R = (R_align @ R)
fwds = np.sum(R * np.array([0, 0.0, 1.0]), axis=-1)
t = (R_align @ t[..., None])[..., 0]
# (2) Recenter the scene using camera center rays
# find the closest point to the origin for each camera's center ray
dvec = t + (fwds * -t).sum(-1)[:, None] * fwds
# Median for more robustness
translate = -np.median(dvec, axis=0)
transform = np.eye(4)
transform[:3, 3] = translate
transform[:3, :3] = R_align
# (3) Rescale the scene using camera distances
scale = 1.0 / np.median(np.linalg.norm(t + translate, axis=-1))
scale *= 0.95
return transform, scale
T, scale = get_transform(all_poses)
all_poses = T @ all_poses
R = all_poses[:, :3, :3]
t = all_poses[:, :3, 3] * scale
intrins = np.loadtxt(intrin_path)
focal = (intrins[0, 0] + intrins[1, 1]) * 0.5
image_wh = get_image_size(path.join(images_dir, image_files[0]))
scene = Scene("colmap dataset: " + dataset_name)
# Try to pick a good frustum size
avg_dist : float = np.mean(np.linalg.norm(t[1:] - t[:-1], axis=-1))
cam_scale = avg_dist * 0.3
# Infer world up direction from GT cams
ups = np.sum(R * np.array([0, -1.0, 0]), axis=-1)
world_up = np.mean(ups, axis=0)
world_up /= np.linalg.norm(world_up)
# Camera forward vector
forwards = np.sum(R * np.array([0, 0, 1.0]), axis=-1)
vforward = np.mean(forwards, axis=0)
vforward /= np.linalg.norm(vforward)
# Set camera center of rotation (origin) for orbit
origin = np.mean(t, axis=0)
# Set camera position
center = origin - vforward * np.linalg.norm(t - origin, axis=-1).mean() * 0.7 * 3
print(' camera center', center, 'vforward', vforward, 'world_up', world_up)
for i, seg in enumerate(segs):
print(seg)
print(R.shape, t.shape)
print(seg[0], seg[1])
scene.add_camera_frustum(name=f"traj_{i:04d}", focal_length=focal,
image_width=image_wh[0],
image_height=image_wh[1],
z=0.1,
r=R[seg[0]:seg[1]],
t=t[seg[0]:seg[1]],
connect=args.seg,
color=[1.0, 0.0, 0.0])
if pose_gt_dir is not None:
print('Loading GT')
pose_gt_files = sorted([x for x in os.listdir(pose_gt_dir) if x.endswith('.txt')], key=sort_key)
all_gt_poses = []
for pose_file in pose_gt_files:
pose = np.loadtxt(path.join(pose_gt_dir, pose_file))
all_gt_poses.append(pose)
all_gt_poses = np.stack(all_gt_poses)
R_gt = all_gt_poses[:, :3, :3]
t_gt = all_gt_poses[:, :3, 3]
pose_files_st = set(pose_files)
pose_gt_inds = np.array([i for i, pose_gt_file in enumerate(pose_gt_files) if pose_gt_file in pose_files_st], dtype=np.int64)
print(len(pose_gt_inds), 'of', len(pose_gt_files), 'registered')
if len(pose_gt_inds) < len(pose_gt_files):
warnings.warn("Not all frames registered")
r = R.reshape(-1, 9)
r_gt = R_gt.reshape(-1, 9)
transform = align_procrustes_rt(
t_gt[pose_gt_inds], r_gt[pose_gt_inds],
t, r, use_first_k=args.n_cameras_for_procrustes, want_transform=True)
t_gt, r_gt = transform(t_gt, r_gt)
R_gt = r_gt.reshape(-1, 3, 3)
scene.add_camera_frustum(name=f"traj_gt", focal_length=focal,
image_width=image_wh[0],
image_height=image_wh[1],
z=0.1,
r=R_gt,
t=t_gt,
connect=args.seg,
color=[0.0, 0.0, 1.0])
scene.add_sphere(name=f"start", translation=t_gt[0],
scale=avg_dist * 0.1,
color=[0.0, 1.0, 1.0])
if path.isfile(point_cloud_path):
point_cloud = np.load(point_cloud_path)
point_cloud = (T[:3, :3] @ point_cloud[:, :, None])[:, :, 0] + T[:3, 3]
point_cloud *= scale
scene.add_points("point_cloud", point_cloud, color=[0.0, 0.0, 0.0], unlit=True)
out_dir = path.join(args.data_dir, "visual")
scene.add_axes(length=1.0, visible=False)
scene.add_sphere("Unit Sphere", visible=False)
scene.add_cube("Unit Cube", scale=2, visible=False)
print('WRITING', out_dir)
scene.display(out_dir, world_up=world_up, cam_origin=origin, cam_center=center, cam_forward=vforward)
if __name__ == "__main__":
main()
| [
"numpy.load",
"numpy.sum",
"argparse.ArgumentParser",
"numpy.arctan2",
"numpy.shape",
"os.path.isfile",
"numpy.mean",
"numpy.linalg.norm",
"numpy.diag",
"os.path.join",
"os.path.abspath",
"numpy.multiply",
"numpy.linalg.linalg.svd",
"numpy.transpose",
"numpy.identity",
"math.cos",
"n... | [((277, 303), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (293, 303), False, 'import os\n'), ((321, 346), 'os.path.join', 'path.join', (['DIR_PATH', '""".."""'], {}), "(DIR_PATH, '..')\n", (330, 346), False, 'from os import path\n'), ((1138, 1161), 'numpy.sqrt', 'np.sqrt', (['length', 'length'], {}), '(length, length)\n', (1145, 1161), True, 'import numpy as np\n'), ((1434, 1449), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (1442, 1449), False, 'import math\n'), ((1461, 1476), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (1469, 1476), False, 'import math\n'), ((1569, 1656), 'numpy.array', 'np.array', (['((cosa, 0.0, 0.0), (0.0, cosa, 0.0), (0.0, 0.0, cosa))'], {'dtype': 'np.float64'}), '(((cosa, 0.0, 0.0), (0.0, cosa, 0.0), (0.0, 0.0, cosa)), dtype=np.\n float64)\n', (1577, 1656), True, 'import numpy as np\n'), ((1784, 1925), 'numpy.array', 'np.array', (['((0.0, -direction[2], direction[1]), (direction[2], 0.0, -direction[0]), (-\n direction[1], direction[0], 0.0))'], {'dtype': 'np.float64'}), '(((0.0, -direction[2], direction[1]), (direction[2], 0.0, -\n direction[0]), (-direction[1], direction[0], 0.0)), dtype=np.float64)\n', (1792, 1925), True, 'import numpy as np\n'), ((2013, 2027), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (2024, 2027), True, 'import numpy as np\n'), ((3492, 3515), 'numpy.linalg.linalg.svd', 'np.linalg.linalg.svd', (['C'], {}), '(C)\n', (3512, 3515), True, 'import numpy as np\n'), ((3529, 3543), 'numpy.diag', 'np.diag', (['D_svd'], {}), '(D_svd)\n', (3536, 3543), True, 'import numpy as np\n'), ((3556, 3575), 'numpy.transpose', 'np.transpose', (['V_svd'], {}), '(V_svd)\n', (3568, 3575), True, 'import numpy as np\n'), ((3585, 3594), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3591, 3594), True, 'import numpy as np\n'), ((5861, 5877), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (5871, 5877), False, 'from PIL import Image\n'), ((6017, 6033), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (6031, 6033), False, 'from argparse import ArgumentParser\n'), ((6924, 6963), 'os.path.join', 'path.join', (['args.data_dir', '"""pose_colmap"""'], {}), "(args.data_dir, 'pose_colmap')\n", (6933, 6963), False, 'from os import path\n'), ((7208, 7250), 'os.path.join', 'path.join', (['args.data_dir', '"""intrinsics.txt"""'], {}), "(args.data_dir, 'intrinsics.txt')\n", (7217, 7250), False, 'from os import path\n'), ((7274, 7312), 'os.path.join', 'path.join', (['args.data_dir', '"""points.npy"""'], {}), "(args.data_dir, 'points.npy')\n", (7283, 7312), False, 'from os import path\n'), ((8206, 8225), 'numpy.stack', 'np.stack', (['all_poses'], {}), '(all_poses)\n', (8214, 8225), True, 'import numpy as np\n'), ((10151, 10174), 'numpy.loadtxt', 'np.loadtxt', (['intrin_path'], {}), '(intrin_path)\n', (10161, 10174), True, 'import numpy as np\n'), ((10307, 10347), 'nerfvis.Scene', 'Scene', (["('colmap dataset: ' + dataset_name)"], {}), "('colmap dataset: ' + dataset_name)\n", (10312, 10347), False, 'from nerfvis import Scene\n'), ((10605, 10625), 'numpy.mean', 'np.mean', (['ups'], {'axis': '(0)'}), '(ups, axis=0)\n', (10612, 10625), True, 'import numpy as np\n'), ((10642, 10666), 'numpy.linalg.norm', 'np.linalg.norm', (['world_up'], {}), '(world_up)\n', (10656, 10666), True, 'import numpy as np\n'), ((10769, 10794), 'numpy.mean', 'np.mean', (['forwards'], {'axis': '(0)'}), '(forwards, axis=0)\n', (10776, 10794), True, 'import numpy as np\n'), ((10811, 10835), 'numpy.linalg.norm', 'np.linalg.norm', (['vforward'], {}), '(vforward)\n', (10825, 10835), True, 'import numpy as np\n'), ((10905, 10923), 'numpy.mean', 'np.mean', (['t'], {'axis': '(0)'}), '(t, axis=0)\n', (10912, 10923), True, 'import numpy as np\n'), ((13402, 13431), 'os.path.isfile', 'path.isfile', (['point_cloud_path'], {}), '(point_cloud_path)\n', (13413, 13431), False, 'from os import path\n'), ((13694, 13728), 'os.path.join', 'path.join', (['args.data_dir', '"""visual"""'], {}), "(args.data_dir, 'visual')\n", (13703, 13728), False, 'from os import path\n'), ((832, 875), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.float64', 'copy': '(True)'}), '(data, dtype=np.float64, copy=True)\n', (840, 875), True, 'import numpy as np\n'), ((1109, 1134), 'numpy.sum', 'np.sum', (['(data * data)', 'axis'], {}), '(data * data, axis)\n', (1115, 1134), True, 'import numpy as np\n'), ((1204, 1232), 'numpy.expand_dims', 'np.expand_dims', (['length', 'axis'], {}), '(length, axis)\n', (1218, 1232), True, 'import numpy as np\n'), ((1707, 1737), 'numpy.outer', 'np.outer', (['direction', 'direction'], {}), '(direction, direction)\n', (1715, 1737), True, 'import numpy as np\n'), ((2125, 2174), 'numpy.array', 'np.array', (['point[:3]'], {'dtype': 'np.float64', 'copy': '(False)'}), '(point[:3], dtype=np.float64, copy=False)\n', (2133, 2174), True, 'import numpy as np\n'), ((2410, 2426), 'numpy.arctan2', 'np.arctan2', (['B', 'A'], {}), '(B, A)\n', (2420, 2426), True, 'import numpy as np\n'), ((3281, 3296), 'numpy.shape', 'np.shape', (['model'], {}), '(model)\n', (3289, 3296), True, 'import numpy as np\n'), ((5535, 5558), 'scipy.spatial.transform.Rotation.from_matrix', 'Rotation.from_matrix', (['R'], {}), '(R)\n', (5555, 5558), False, 'from scipy.spatial.transform import Rotation\n'), ((6576, 6603), 'os.path.abspath', 'path.abspath', (['args.data_dir'], {}), '(args.data_dir)\n', (6588, 6603), False, 'from os import path\n'), ((7043, 7063), 'os.path.isdir', 'path.isdir', (['pose_dir'], {}), '(pose_dir)\n', (7053, 7063), False, 'from os import path\n'), ((8555, 8575), 'numpy.mean', 'np.mean', (['ups'], {'axis': '(0)'}), '(ups, axis=0)\n', (8562, 8575), True, 'import numpy as np\n'), ((8596, 8620), 'numpy.linalg.norm', 'np.linalg.norm', (['world_up'], {}), '(world_up)\n', (8610, 8620), True, 'import numpy as np\n'), ((8644, 8670), 'numpy.array', 'np.array', (['[0.0, -1.0, 0.0]'], {}), '([0.0, -1.0, 0.0])\n', (8652, 8670), True, 'import numpy as np\n'), ((8730, 8761), 'numpy.cross', 'np.cross', (['world_up', 'up_camspace'], {}), '(world_up, up_camspace)\n', (8738, 8761), True, 'import numpy as np\n'), ((8777, 8876), 'numpy.array', 'np.array', (['[[0.0, -cross[2], cross[1]], [cross[2], 0.0, -cross[0]], [-cross[1], cross[\n 0], 0.0]]'], {}), '([[0.0, -cross[2], cross[1]], [cross[2], 0.0, -cross[0]], [-cross[1\n ], cross[0], 0.0]])\n', (8785, 8876), True, 'import numpy as np\n'), ((8940, 8949), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (8946, 8949), True, 'import numpy as np\n'), ((9734, 9743), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (9740, 9743), True, 'import numpy as np\n'), ((10255, 10292), 'os.path.join', 'path.join', (['images_dir', 'image_files[0]'], {}), '(images_dir, image_files[0])\n', (10264, 10292), False, 'from os import path\n'), ((10419, 10458), 'numpy.linalg.norm', 'np.linalg.norm', (['(t[1:] - t[:-1])'], {'axis': '(-1)'}), '(t[1:] - t[:-1], axis=-1)\n', (10433, 10458), True, 'import numpy as np\n'), ((12038, 12060), 'numpy.stack', 'np.stack', (['all_gt_poses'], {}), '(all_gt_poses)\n', (12046, 12060), True, 'import numpy as np\n'), ((13455, 13480), 'numpy.load', 'np.load', (['point_cloud_path'], {}), '(point_cloud_path)\n', (13462, 13480), True, 'import numpy as np\n'), ((1036, 1062), 'numpy.array', 'np.array', (['data'], {'copy': '(False)'}), '(data, copy=False)\n', (1044, 1062), True, 'import numpy as np\n'), ((2202, 2218), 'numpy.dot', 'np.dot', (['R', 'point'], {}), '(R, point)\n', (2208, 2218), True, 'import numpy as np\n'), ((3602, 3622), 'numpy.linalg.det', 'np.linalg.det', (['U_svd'], {}), '(U_svd)\n', (3615, 3622), True, 'import numpy as np\n'), ((3623, 3643), 'numpy.linalg.det', 'np.linalg.det', (['V_svd'], {}), '(V_svd)\n', (3636, 3643), True, 'import numpy as np\n'), ((4003, 4018), 'numpy.dot', 'np.dot', (['R', 'mu_D'], {}), '(R, mu_D)\n', (4009, 4018), True, 'import numpy as np\n'), ((9181, 9243), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (9189, 9243), True, 'import numpy as np\n'), ((9689, 9712), 'numpy.median', 'np.median', (['dvec'], {'axis': '(0)'}), '(dvec, axis=0)\n', (9698, 9712), True, 'import numpy as np\n'), ((10557, 10579), 'numpy.array', 'np.array', (['[0, -1.0, 0]'], {}), '([0, -1.0, 0])\n', (10565, 10579), True, 'import numpy as np\n'), ((10722, 10743), 'numpy.array', 'np.array', (['[0, 0, 1.0]'], {}), '([0, 0, 1.0])\n', (10730, 10743), True, 'import numpy as np\n'), ((12448, 12490), 'warnings.warn', 'warnings.warn', (['"""Not all frames registered"""'], {}), "('Not all frames registered')\n", (12461, 12490), False, 'import warnings\n'), ((933, 951), 'numpy.dot', 'np.dot', (['data', 'data'], {}), '(data, data)\n', (939, 951), True, 'import numpy as np\n'), ((3410, 3459), 'numpy.multiply', 'np.multiply', (['data_zerocentered', 'data_zerocentered'], {}), '(data_zerocentered, data_zerocentered)\n', (3421, 3459), True, 'import numpy as np\n'), ((3870, 3889), 'numpy.transpose', 'np.transpose', (['V_svd'], {}), '(V_svd)\n', (3882, 3889), True, 'import numpy as np\n'), ((3969, 3985), 'numpy.dot', 'np.dot', (['D_svd', 'S'], {}), '(D_svd, S)\n', (3975, 3985), True, 'import numpy as np\n'), ((6703, 6733), 'os.path.join', 'path.join', (['args.data_dir', 'cand'], {}), '(args.data_dir, cand)\n', (6712, 6733), False, 'from os import path\n'), ((6759, 6789), 'os.path.join', 'path.join', (['args.data_dir', 'cand'], {}), '(args.data_dir, cand)\n', (6768, 6789), False, 'from os import path\n'), ((7505, 7525), 'os.listdir', 'os.listdir', (['pose_dir'], {}), '(pose_dir)\n', (7515, 7525), False, 'import os\n'), ((7609, 7631), 'os.listdir', 'os.listdir', (['images_dir'], {}), '(images_dir)\n', (7619, 7631), False, 'import os\n'), ((8503, 8525), 'numpy.array', 'np.array', (['[0, -1.0, 0]'], {}), '([0, -1.0, 0])\n', (8511, 8525), True, 'import numpy as np\n'), ((9361, 9384), 'numpy.array', 'np.array', (['[0, 0.0, 1.0]'], {}), '([0, 0.0, 1.0])\n', (9369, 9384), True, 'import numpy as np\n'), ((9905, 9943), 'numpy.linalg.norm', 'np.linalg.norm', (['(t + translate)'], {'axis': '(-1)'}), '(t + translate, axis=-1)\n', (9919, 9943), True, 'import numpy as np\n'), ((11942, 11975), 'os.path.join', 'path.join', (['pose_gt_dir', 'pose_file'], {}), '(pose_gt_dir, pose_file)\n', (11951, 11975), False, 'from os import path\n'), ((5583, 5608), 'scipy.spatial.transform.Rotation.from_matrix', 'Rotation.from_matrix', (['q_b'], {}), '(q_b)\n', (5603, 5608), False, 'from scipy.spatial.transform import Rotation\n'), ((7845, 7875), 'os.path.join', 'path.join', (['pose_dir', 'pose_file'], {}), '(pose_dir, pose_file)\n', (7854, 7875), False, 'from os import path\n'), ((7906, 7930), 'os.path.splitext', 'path.splitext', (['pose_file'], {}), '(pose_file)\n', (7919, 7930), False, 'from os import path\n'), ((11784, 11807), 'os.listdir', 'os.listdir', (['pose_gt_dir'], {}), '(pose_gt_dir)\n', (11794, 11807), False, 'import os\n'), ((10984, 11019), 'numpy.linalg.norm', 'np.linalg.norm', (['(t - origin)'], {'axis': '(-1)'}), '(t - origin, axis=-1)\n', (10998, 11019), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from soepy.shared.shared_constants import (
HOURS,
DATA_LABLES_SIM,
DATA_FORMATS_SIM,
)
from soepy.shared.shared_auxiliary import draw_disturbances
from soepy.shared.shared_auxiliary import calculate_utility_components
from soepy.shared.shared_auxiliary import calculate_employment_consumption_resources
def pyth_simulate(
model_params,
model_spec,
states,
indexer,
emaxs,
covariates,
non_employment_consumption_resources,
deductions_spec,
income_tax_spec,
child_age_update_rule,
prob_educ_level,
prob_child_age,
prob_partner_present,
prob_exp_ft,
prob_exp_pt,
prob_child,
prob_partner_arrival,
prob_partner_separation,
is_expected,
):
"""Simulate agent experiences."""
np.random.seed(model_spec.seed_sim)
# Draw initial condition: education level
initial_educ_level = np.random.choice(
model_spec.num_educ_levels, model_spec.num_agents_sim, p=prob_educ_level
)
# Draw initial conditions: age of youngest child, partner status,
# experience full-time and experience part-time
initial_child_age = np.full(model_spec.num_agents_sim, np.nan)
initial_partner_status = np.full(model_spec.num_agents_sim, np.nan)
initial_pt_exp = np.full(model_spec.num_agents_sim, np.nan)
initial_ft_exp = np.full(model_spec.num_agents_sim, np.nan)
for educ_level in range(model_spec.num_educ_levels):
# Child
initial_child_age[initial_educ_level == educ_level] = np.random.choice(
list(range(-1, model_spec.child_age_init_max + 1)),
sum(initial_educ_level == educ_level),
p=prob_child_age[educ_level],
)
# Partner
initial_partner_status[initial_educ_level == educ_level] = np.random.binomial(
size=sum(initial_educ_level == educ_level),
n=1,
p=prob_partner_present[educ_level],
)
# Part-time experience
initial_pt_exp[initial_educ_level == educ_level] = np.random.choice(
list(range(0, model_spec.init_exp_max + 1)),
sum(initial_educ_level == educ_level),
p=prob_exp_pt[educ_level],
)
# Full-time experience
initial_ft_exp[initial_educ_level == educ_level] = np.random.choice(
list(range(0, model_spec.init_exp_max + 1)),
sum(initial_educ_level == educ_level),
p=prob_exp_ft[educ_level],
)
# Draw random type
type_ = np.random.choice(
list(np.arange(model_spec.num_types)),
model_spec.num_agents_sim,
p=model_params.type_shares,
)
# Draw shocks
attrs_spec = ["seed_sim", "num_periods", "num_agents_sim"]
draws_sim = draw_disturbances(
*[getattr(model_spec, attr) for attr in attrs_spec], model_params
)
# Calculate utility components
log_wage_systematic, non_consumption_utilities = calculate_utility_components(
model_params, model_spec, states, covariates, is_expected
)
# Determine initial states according to initial conditions
initial_states = pd.DataFrame(
np.column_stack(
(
np.arange(model_spec.num_agents_sim),
np.array(model_spec.educ_years)[initial_educ_level],
initial_educ_level,
np.zeros(model_spec.num_agents_sim),
initial_pt_exp,
initial_ft_exp,
type_,
initial_child_age,
initial_partner_status,
)
),
columns=DATA_LABLES_SIM[:9],
).astype(np.int)
data = []
# Loop over all periods
for period in range(model_spec.num_periods):
initial_states_in_period = initial_states.loc[
initial_states.Period.eq(period)
].to_numpy()
# Get all agents in the period.
if period == 0:
current_states = initial_states_in_period
else:
current_states = np.vstack((current_states, initial_states_in_period))
idx = indexer[
current_states[:, 1], # 0 period
current_states[:, 2], # 1 educ_level
current_states[:, 3], # 2 lagged_choice
current_states[:, 4], # 3 exp_pt
current_states[:, 5], # 4 exp_ft
current_states[:, 6], # 5 type
current_states[:, 7], # 6 age_youngest_child
current_states[:, 8], # 7 partner_indicator
]
# Extract corresponding utilities
current_log_wage_systematic = log_wage_systematic[idx]
current_non_consumption_utilities = non_consumption_utilities[idx]
current_non_employment_consumption_resources = (
non_employment_consumption_resources[idx]
)
current_equivalence_scale = covariates[idx][:, 2]
current_male_wages = covariates[idx][:, 1]
current_child_benefits = covariates[idx][:, 3]
current_wages = np.exp(
current_log_wage_systematic.reshape(-1, 1)
+ draws_sim[period, current_states[:, 0]]
)
current_hh_income = HOURS[1:] * current_wages + current_male_wages.reshape(
-1, 1
)
current_hh_income = current_hh_income.reshape(
2 * current_wages.shape[0], order="F"
)
current_employment_consumption_resources = (
calculate_employment_consumption_resources(
deductions_spec,
income_tax_spec,
current_hh_income,
)
)
current_employment_consumption_resources = (
current_employment_consumption_resources.reshape(
current_wages.shape[0], 2, order="F"
)
+ current_child_benefits.reshape(-1, 1)
)
current_consumption_resources = np.hstack(
(
current_non_employment_consumption_resources.reshape(-1, 1),
current_employment_consumption_resources,
)
)
# Calculate total values for all choices
flow_utilities = (
((current_consumption_resources) / current_equivalence_scale.reshape(-1, 1))
** model_spec.mu
/ model_spec.mu
* current_non_consumption_utilities
)
# Extract continuation values for all choices
continuation_values = emaxs[idx, :3]
value_functions = flow_utilities + model_spec.delta * continuation_values
# Determine choice as option with highest choice specific value function
choice = np.argmax(value_functions, axis=1)
child_current_age = current_states[:, 7]
# Update child age
# Modification for simulations with very few periods
# where maximum childbearing age is not reached by the end of the model
if period == model_spec.num_periods - 1:
child_new_age = child_current_age
# Periods where the probability to have a child is still positive
elif period <= model_spec.last_child_bearing_period:
# Update current states according to exogenous processes
# Relate to child age updating
kids_current_draw = np.random.binomial(
size=current_states.shape[0],
n=1,
p=prob_child[period + 1, current_states[:, 2]],
)
# Convert to age of child according to age update rule
child_new_age = np.where(
kids_current_draw == 0, child_age_update_rule[idx], 0
)
# Periods where no new child can arrive
else:
child_new_age = child_age_update_rule[idx]
# Update partner status according to random draw
current_partner_status = current_states[:, 8]
new_partner_status = np.full(current_states.shape[0], np.nan)
# Get individuals without partner
current_states_no_partner = current_states[current_states[:, 8] == 0]
partner_arrival_current_draw = np.random.binomial(
size=current_states_no_partner.shape[0],
n=1,
p=prob_partner_arrival[period, current_states_no_partner[:, 2]],
)
new_partner_status[current_states[:, 8] == 0] = partner_arrival_current_draw
# Get individuals with partner
current_states_with_partner = current_states[current_states[:, 8] == 1]
partner_separation_current_draw = np.random.binomial(
size=current_states_with_partner.shape[0],
n=1,
p=prob_partner_separation[period, current_states_with_partner[:, 2]],
)
new_partner_status[current_states[:, 8] == 1] = (
current_partner_status[current_states[:, 8] == 1]
- partner_separation_current_draw
)
# Record period experiences
rows = np.column_stack(
(
current_states.copy(),
choice,
current_log_wage_systematic,
current_wages,
current_non_consumption_utilities,
flow_utilities,
continuation_values,
value_functions,
)
)
data.append(rows)
# Update current states according to choice
current_states[:, 1] += 1
current_states[:, 3] = choice
current_states[:, 4] = np.where(
choice == 1, current_states[:, 4] + 1, current_states[:, 4]
)
current_states[:, 5] = np.where(
choice == 2, current_states[:, 5] + 1, current_states[:, 5]
)
current_states[:, 7] = child_new_age
current_states[:, 8] = new_partner_status
dataset = pd.DataFrame(np.vstack(data), columns=DATA_LABLES_SIM).astype(
DATA_FORMATS_SIM
)
# Determine the period wage given choice in the period
dataset["Wage_Observed"] = np.nan
dataset.loc[dataset["Choice"] == 1, "Wage_Observed"] = dataset.loc[
dataset["Choice"] == 1, "Period_Wage_P"
]
dataset.loc[dataset["Choice"] == 2, "Wage_Observed"] = dataset.loc[
dataset["Choice"] == 2, "Period_Wage_F"
]
return dataset
| [
"numpy.full",
"numpy.random.seed",
"numpy.random.binomial",
"numpy.argmax",
"numpy.zeros",
"numpy.where",
"numpy.arange",
"numpy.array",
"numpy.random.choice",
"numpy.vstack",
"soepy.shared.shared_auxiliary.calculate_utility_components",
"soepy.shared.shared_auxiliary.calculate_employment_cons... | [((809, 844), 'numpy.random.seed', 'np.random.seed', (['model_spec.seed_sim'], {}), '(model_spec.seed_sim)\n', (823, 844), True, 'import numpy as np\n'), ((917, 1012), 'numpy.random.choice', 'np.random.choice', (['model_spec.num_educ_levels', 'model_spec.num_agents_sim'], {'p': 'prob_educ_level'}), '(model_spec.num_educ_levels, model_spec.num_agents_sim, p=\n prob_educ_level)\n', (933, 1012), True, 'import numpy as np\n'), ((1169, 1211), 'numpy.full', 'np.full', (['model_spec.num_agents_sim', 'np.nan'], {}), '(model_spec.num_agents_sim, np.nan)\n', (1176, 1211), True, 'import numpy as np\n'), ((1241, 1283), 'numpy.full', 'np.full', (['model_spec.num_agents_sim', 'np.nan'], {}), '(model_spec.num_agents_sim, np.nan)\n', (1248, 1283), True, 'import numpy as np\n'), ((1305, 1347), 'numpy.full', 'np.full', (['model_spec.num_agents_sim', 'np.nan'], {}), '(model_spec.num_agents_sim, np.nan)\n', (1312, 1347), True, 'import numpy as np\n'), ((1369, 1411), 'numpy.full', 'np.full', (['model_spec.num_agents_sim', 'np.nan'], {}), '(model_spec.num_agents_sim, np.nan)\n', (1376, 1411), True, 'import numpy as np\n'), ((2962, 3053), 'soepy.shared.shared_auxiliary.calculate_utility_components', 'calculate_utility_components', (['model_params', 'model_spec', 'states', 'covariates', 'is_expected'], {}), '(model_params, model_spec, states, covariates,\n is_expected)\n', (2990, 3053), False, 'from soepy.shared.shared_auxiliary import calculate_utility_components\n'), ((5437, 5536), 'soepy.shared.shared_auxiliary.calculate_employment_consumption_resources', 'calculate_employment_consumption_resources', (['deductions_spec', 'income_tax_spec', 'current_hh_income'], {}), '(deductions_spec, income_tax_spec,\n current_hh_income)\n', (5479, 5536), False, 'from soepy.shared.shared_auxiliary import calculate_employment_consumption_resources\n'), ((6639, 6673), 'numpy.argmax', 'np.argmax', (['value_functions'], {'axis': '(1)'}), '(value_functions, axis=1)\n', (6648, 6673), True, 'import numpy as np\n'), ((7885, 7925), 'numpy.full', 'np.full', (['current_states.shape[0]', 'np.nan'], {}), '(current_states.shape[0], np.nan)\n', (7892, 7925), True, 'import numpy as np\n'), ((8086, 8220), 'numpy.random.binomial', 'np.random.binomial', ([], {'size': 'current_states_no_partner.shape[0]', 'n': '(1)', 'p': 'prob_partner_arrival[period, current_states_no_partner[:, 2]]'}), '(size=current_states_no_partner.shape[0], n=1, p=\n prob_partner_arrival[period, current_states_no_partner[:, 2]])\n', (8104, 8220), True, 'import numpy as np\n'), ((8510, 8651), 'numpy.random.binomial', 'np.random.binomial', ([], {'size': 'current_states_with_partner.shape[0]', 'n': '(1)', 'p': 'prob_partner_separation[period, current_states_with_partner[:, 2]]'}), '(size=current_states_with_partner.shape[0], n=1, p=\n prob_partner_separation[period, current_states_with_partner[:, 2]])\n', (8528, 8651), True, 'import numpy as np\n'), ((9452, 9521), 'numpy.where', 'np.where', (['(choice == 1)', '(current_states[:, 4] + 1)', 'current_states[:, 4]'], {}), '(choice == 1, current_states[:, 4] + 1, current_states[:, 4])\n', (9460, 9521), True, 'import numpy as np\n'), ((9575, 9644), 'numpy.where', 'np.where', (['(choice == 2)', '(current_states[:, 5] + 1)', 'current_states[:, 5]'], {}), '(choice == 2, current_states[:, 5] + 1, current_states[:, 5])\n', (9583, 9644), True, 'import numpy as np\n'), ((2565, 2596), 'numpy.arange', 'np.arange', (['model_spec.num_types'], {}), '(model_spec.num_types)\n', (2574, 2596), True, 'import numpy as np\n'), ((4036, 4089), 'numpy.vstack', 'np.vstack', (['(current_states, initial_states_in_period)'], {}), '((current_states, initial_states_in_period))\n', (4045, 4089), True, 'import numpy as np\n'), ((7268, 7373), 'numpy.random.binomial', 'np.random.binomial', ([], {'size': 'current_states.shape[0]', 'n': '(1)', 'p': 'prob_child[period + 1, current_states[:, 2]]'}), '(size=current_states.shape[0], n=1, p=prob_child[period +\n 1, current_states[:, 2]])\n', (7286, 7373), True, 'import numpy as np\n'), ((7529, 7592), 'numpy.where', 'np.where', (['(kids_current_draw == 0)', 'child_age_update_rule[idx]', '(0)'], {}), '(kids_current_draw == 0, child_age_update_rule[idx], 0)\n', (7537, 7592), True, 'import numpy as np\n'), ((9790, 9805), 'numpy.vstack', 'np.vstack', (['data'], {}), '(data)\n', (9799, 9805), True, 'import numpy as np\n'), ((3218, 3254), 'numpy.arange', 'np.arange', (['model_spec.num_agents_sim'], {}), '(model_spec.num_agents_sim)\n', (3227, 3254), True, 'import numpy as np\n'), ((3377, 3412), 'numpy.zeros', 'np.zeros', (['model_spec.num_agents_sim'], {}), '(model_spec.num_agents_sim)\n', (3385, 3412), True, 'import numpy as np\n'), ((3272, 3303), 'numpy.array', 'np.array', (['model_spec.educ_years'], {}), '(model_spec.educ_years)\n', (3280, 3303), True, 'import numpy as np\n')] |
import os
import unittest
from typing import Optional
from unittest.mock import MagicMock, patch, call
import numpy as np
import pandas as pd
import xarray as xr
from pywatts.wrapper.keras_wrapper import KerasWrapper
stored_model = {
"aux_models": [
[
"encoder",
os.path.join("pipe1", "SimpleAE_4encoder.h5")
],
[
"decoder",
os.path.join("pipe1", "SimpleAE_4decoder.h5")
]
],
"class": "KerasWrapper",
"model": os.path.join("pipe1", "SimpleAE_4.h5"),
"module": "pywatts.wrapper.keras_wrapper",
"name": "SimpleAE",
'is_fitted': False,
"params": {
"compile_kwargs": {
"loss": "mse",
"metrics": [
"mse"
],
"optimizer": "Adam"
},
"fit_kwargs": {
"batch_size": 512,
"epochs": 1
}
}
}
class TestKerasWrapper(unittest.TestCase):
def setUp(self) -> None:
self.keras_mock: Optional[MagicMock] = MagicMock()
self.keras_wrapper = KerasWrapper(self.keras_mock, compile_kwargs={"test": "arg1"}, fit_kwargs={"42": 24})
def tearDown(self) -> None:
self.keras_wrapper: Optional[KerasWrapper] = None
self.keras_mock = None
def test_fit(self):
self.keras_wrapper.set_params(fit_kwargs={"epochs": 200}, compile_kwargs={"optimizer": "adam"})
time = pd.date_range('2000-01-01', freq='24H', periods=7)
da = xr.DataArray([[2, 0], [3, 2], [4, 3], [5, 4], [6, 5], [7, 6], [8, 7]],
dims=["time", "horizon"], coords={"time": time, "horizon": [0, 1]})
target = xr.DataArray([[5, 5], [6, 6], [7, 7], [7, 7], [8, 8], [9, 9], [9, 9]],
dims=["time", "horizon"], coords={"time": time, "horizon": [0, 1]})
self.keras_wrapper.fit(data=da, target=target)
self.keras_mock.compile.assert_called_once_with(optimizer="adam")
self.keras_mock.fit.assert_called_once()
args = self.keras_mock.fit.call_args
np.testing.assert_equal(args[1]["x"]["data"],
np.array([[2, 0], [3, 2], [4, 3], [5, 4], [6, 5], [7, 6], [8, 7]]))
np.testing.assert_equal(args[1]["y"]["target"],
np.array([[5, 5], [6, 6], [7, 7], [7, 7], [8, 8], [9, 9], [9, 9]])),
self.assertEqual(len(args[1]["x"]), 1)
self.assertEqual(len(args[1]["y"]), 1)
self.assertEqual(args[1]["epochs"], 200)
self.assertEqual(len(args[1]), 3)
self.assertTrue(self.keras_wrapper.compiled)
def test_transform_single_output(self):
time = pd.date_range('2000-01-01', freq='24H', periods=7)
da = xr.DataArray([[2, 0], [3, 2], [4, 3], [5, 4], [6, 5], [7, 6], [8, 7]],
dims=["time", "horizon"], coords={"time": time, "horizon": [0, 1]})
target = np.array([[5, 5], [6, 6], [7, 7], [7, 7], [8, 8], [9, 9], [9, 9]])
self.keras_mock.predict.return_value = target
self.keras_mock.outputs[0].name = "first/output"
self.keras_wrapper.targets = ["target"]
result = self.keras_wrapper.transform(x=da)
self.keras_mock.predict.assert_called_once()
np.testing.assert_equal(target,
result["target"])
def test_transform_multiple_output(self):
time = pd.date_range('2000-01-01', freq='24H', periods=7)
da = xr.DataArray([[2, 0], [3, 2], [4, 3], [5, 4], [6, 5], [7, 6], [8, 7]],
dims=["time", "horizon"], coords={"time": time, "horizon": [0, 1]})
target = [np.array([[5, 5], [6, 6], [7, 7], [7, 7], [8, 8], [9, 9], [9, 9]]),
np.array([[5, 5], [6, 6], [7, 7], [7, 7], [8, 8], [9, 9], [9, 9]])]
self.keras_mock.predict.return_value = target
first = MagicMock()
first.name = "first/output"
second = MagicMock()
second.name = "second"
outputs = [first, second]
self.keras_mock.outputs = outputs
self.keras_wrapper.targets = ["target1", "target2"]
result = self.keras_wrapper.transform(x=da)
self.keras_mock.predict.assert_called_once()
args = self.keras_mock.predict.call_args
np.testing.assert_equal(args[0][0]["x"],
np.array([[2, 0], [3, 2], [4, 3], [5, 4], [6, 5], [7, 6], [8, 7]]))
np.testing.assert_equal(target[0],
result["target1"])
np.testing.assert_equal(target[1],
result["target2"])
def test_get_params(self):
self.assertEqual(self.keras_wrapper.get_params(),
{'compile_kwargs': {'test': 'arg1'},
'fit_kwargs': {'42': 24}})
def test_set_params(self):
self.assertEqual(self.keras_wrapper.get_params(),
{'compile_kwargs': {'test': 'arg1'},
'fit_kwargs': {'42': 24}})
self.keras_wrapper.set_params(fit_kwargs={"loss": "mse"},
compile_kwargs={"optimizer": "Adam"})
self.assertEqual(self.keras_wrapper.get_params(),
{
"fit_kwargs": {
"loss": "mse"
}, "compile_kwargs": {
"optimizer": "Adam"
},
})
def test_save(self):
fm_mock = MagicMock()
fm_mock.get_path.return_value = os.path.join("new_path", "to_somewhere", "KerasWrapper.h5")
json = self.keras_wrapper.save(fm_mock)
self.keras_mock.save.assert_called_once_with(
filepath=os.path.join("new_path", "to_somewhere", "KerasWrapper.h5"))
fm_mock.get_path.has_calls(call(os.path.join("to_somewhere", "KerasWrapper.h5")),
any_order=True)
self.assertEqual(json, {'aux_models': [],
'class': 'KerasWrapper',
'is_fitted': False,
'model': os.path.join("new_path", "to_somewhere", "KerasWrapper.h5"),
'module': 'pywatts.wrapper.keras_wrapper',
'name': 'KerasWrapper',
'params': {'compile_kwargs': {"test": "arg1"}, 'fit_kwargs': {"42": 24}}
})
@patch('pywatts.wrapper.keras_wrapper.tf.keras.models.load_model')
def test_load(self, load_model_mock):
new_keras_mock = MagicMock()
load_model_mock.return_value = new_keras_mock
new_keras_wrapper = KerasWrapper.load(stored_model)
calls_open = [call(filepath=os.path.join("pipe1", "SimpleAE_4decoder.h5")),
call(filepath=os.path.join("pipe1", "SimpleAE_4encoder.h5")),
call(filepath=os.path.join("pipe1", "SimpleAE_4.h5")),
]
load_model_mock.assert_has_calls(calls_open, any_order=True)
self.assertEqual(load_model_mock.call_count, 3)
self.assertEqual(new_keras_mock, new_keras_wrapper.model)
self.assertEqual(new_keras_wrapper.get_params(),
{
"compile_kwargs": {
"loss": "mse",
"metrics": [
"mse"
],
"optimizer": "Adam"
},
"fit_kwargs": {
"batch_size": 512,
"epochs": 1
}
})
| [
"pywatts.wrapper.keras_wrapper.KerasWrapper",
"pandas.date_range",
"unittest.mock.MagicMock",
"unittest.mock.patch",
"numpy.array",
"xarray.DataArray",
"numpy.testing.assert_equal",
"pywatts.wrapper.keras_wrapper.KerasWrapper.load",
"os.path.join"
] | [((509, 547), 'os.path.join', 'os.path.join', (['"""pipe1"""', '"""SimpleAE_4.h5"""'], {}), "('pipe1', 'SimpleAE_4.h5')\n", (521, 547), False, 'import os\n'), ((6535, 6600), 'unittest.mock.patch', 'patch', (['"""pywatts.wrapper.keras_wrapper.tf.keras.models.load_model"""'], {}), "('pywatts.wrapper.keras_wrapper.tf.keras.models.load_model')\n", (6540, 6600), False, 'from unittest.mock import MagicMock, patch, call\n'), ((1038, 1049), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1047, 1049), False, 'from unittest.mock import MagicMock, patch, call\n'), ((1079, 1169), 'pywatts.wrapper.keras_wrapper.KerasWrapper', 'KerasWrapper', (['self.keras_mock'], {'compile_kwargs': "{'test': 'arg1'}", 'fit_kwargs': "{'42': 24}"}), "(self.keras_mock, compile_kwargs={'test': 'arg1'}, fit_kwargs={\n '42': 24})\n", (1091, 1169), False, 'from pywatts.wrapper.keras_wrapper import KerasWrapper\n'), ((1431, 1481), 'pandas.date_range', 'pd.date_range', (['"""2000-01-01"""'], {'freq': '"""24H"""', 'periods': '(7)'}), "('2000-01-01', freq='24H', periods=7)\n", (1444, 1481), True, 'import pandas as pd\n'), ((1496, 1639), 'xarray.DataArray', 'xr.DataArray', (['[[2, 0], [3, 2], [4, 3], [5, 4], [6, 5], [7, 6], [8, 7]]'], {'dims': "['time', 'horizon']", 'coords': "{'time': time, 'horizon': [0, 1]}"}), "([[2, 0], [3, 2], [4, 3], [5, 4], [6, 5], [7, 6], [8, 7]], dims\n =['time', 'horizon'], coords={'time': time, 'horizon': [0, 1]})\n", (1508, 1639), True, 'import xarray as xr\n'), ((1679, 1822), 'xarray.DataArray', 'xr.DataArray', (['[[5, 5], [6, 6], [7, 7], [7, 7], [8, 8], [9, 9], [9, 9]]'], {'dims': "['time', 'horizon']", 'coords': "{'time': time, 'horizon': [0, 1]}"}), "([[5, 5], [6, 6], [7, 7], [7, 7], [8, 8], [9, 9], [9, 9]], dims\n =['time', 'horizon'], coords={'time': time, 'horizon': [0, 1]})\n", (1691, 1822), True, 'import xarray as xr\n'), ((2683, 2733), 'pandas.date_range', 'pd.date_range', (['"""2000-01-01"""'], {'freq': '"""24H"""', 'periods': '(7)'}), "('2000-01-01', freq='24H', periods=7)\n", (2696, 2733), True, 'import pandas as pd\n'), ((2748, 2891), 'xarray.DataArray', 'xr.DataArray', (['[[2, 0], [3, 2], [4, 3], [5, 4], [6, 5], [7, 6], [8, 7]]'], {'dims': "['time', 'horizon']", 'coords': "{'time': time, 'horizon': [0, 1]}"}), "([[2, 0], [3, 2], [4, 3], [5, 4], [6, 5], [7, 6], [8, 7]], dims\n =['time', 'horizon'], coords={'time': time, 'horizon': [0, 1]})\n", (2760, 2891), True, 'import xarray as xr\n'), ((2931, 2997), 'numpy.array', 'np.array', (['[[5, 5], [6, 6], [7, 7], [7, 7], [8, 8], [9, 9], [9, 9]]'], {}), '([[5, 5], [6, 6], [7, 7], [7, 7], [8, 8], [9, 9], [9, 9]])\n', (2939, 2997), True, 'import numpy as np\n'), ((3274, 3323), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['target', "result['target']"], {}), "(target, result['target'])\n", (3297, 3323), True, 'import numpy as np\n'), ((3418, 3468), 'pandas.date_range', 'pd.date_range', (['"""2000-01-01"""'], {'freq': '"""24H"""', 'periods': '(7)'}), "('2000-01-01', freq='24H', periods=7)\n", (3431, 3468), True, 'import pandas as pd\n'), ((3483, 3626), 'xarray.DataArray', 'xr.DataArray', (['[[2, 0], [3, 2], [4, 3], [5, 4], [6, 5], [7, 6], [8, 7]]'], {'dims': "['time', 'horizon']", 'coords': "{'time': time, 'horizon': [0, 1]}"}), "([[2, 0], [3, 2], [4, 3], [5, 4], [6, 5], [7, 6], [8, 7]], dims\n =['time', 'horizon'], coords={'time': time, 'horizon': [0, 1]})\n", (3495, 3626), True, 'import xarray as xr\n'), ((3892, 3903), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (3901, 3903), False, 'from unittest.mock import MagicMock, patch, call\n'), ((3957, 3968), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (3966, 3968), False, 'from unittest.mock import MagicMock, patch, call\n'), ((4451, 4504), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['target[0]', "result['target1']"], {}), "(target[0], result['target1'])\n", (4474, 4504), True, 'import numpy as np\n'), ((4545, 4598), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['target[1]', "result['target2']"], {}), "(target[1], result['target2'])\n", (4568, 4598), True, 'import numpy as np\n'), ((5560, 5571), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (5569, 5571), False, 'from unittest.mock import MagicMock, patch, call\n'), ((5612, 5671), 'os.path.join', 'os.path.join', (['"""new_path"""', '"""to_somewhere"""', '"""KerasWrapper.h5"""'], {}), "('new_path', 'to_somewhere', 'KerasWrapper.h5')\n", (5624, 5671), False, 'import os\n'), ((6668, 6679), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (6677, 6679), False, 'from unittest.mock import MagicMock, patch, call\n'), ((6762, 6793), 'pywatts.wrapper.keras_wrapper.KerasWrapper.load', 'KerasWrapper.load', (['stored_model'], {}), '(stored_model)\n', (6779, 6793), False, 'from pywatts.wrapper.keras_wrapper import KerasWrapper\n'), ((302, 347), 'os.path.join', 'os.path.join', (['"""pipe1"""', '"""SimpleAE_4encoder.h5"""'], {}), "('pipe1', 'SimpleAE_4encoder.h5')\n", (314, 347), False, 'import os\n'), ((404, 449), 'os.path.join', 'os.path.join', (['"""pipe1"""', '"""SimpleAE_4decoder.h5"""'], {}), "('pipe1', 'SimpleAE_4decoder.h5')\n", (416, 449), False, 'import os\n'), ((2159, 2225), 'numpy.array', 'np.array', (['[[2, 0], [3, 2], [4, 3], [5, 4], [6, 5], [7, 6], [8, 7]]'], {}), '([[2, 0], [3, 2], [4, 3], [5, 4], [6, 5], [7, 6], [8, 7]])\n', (2167, 2225), True, 'import numpy as np\n'), ((3667, 3733), 'numpy.array', 'np.array', (['[[5, 5], [6, 6], [7, 7], [7, 7], [8, 8], [9, 9], [9, 9]]'], {}), '([[5, 5], [6, 6], [7, 7], [7, 7], [8, 8], [9, 9], [9, 9]])\n', (3675, 3733), True, 'import numpy as np\n'), ((3753, 3819), 'numpy.array', 'np.array', (['[[5, 5], [6, 6], [7, 7], [7, 7], [8, 8], [9, 9], [9, 9]]'], {}), '([[5, 5], [6, 6], [7, 7], [7, 7], [8, 8], [9, 9], [9, 9]])\n', (3761, 3819), True, 'import numpy as np\n'), ((4374, 4440), 'numpy.array', 'np.array', (['[[2, 0], [3, 2], [4, 3], [5, 4], [6, 5], [7, 6], [8, 7]]'], {}), '([[2, 0], [3, 2], [4, 3], [5, 4], [6, 5], [7, 6], [8, 7]])\n', (4382, 4440), True, 'import numpy as np\n'), ((2315, 2381), 'numpy.array', 'np.array', (['[[5, 5], [6, 6], [7, 7], [7, 7], [8, 8], [9, 9], [9, 9]]'], {}), '([[5, 5], [6, 6], [7, 7], [7, 7], [8, 8], [9, 9], [9, 9]])\n', (2323, 2381), True, 'import numpy as np\n'), ((5795, 5854), 'os.path.join', 'os.path.join', (['"""new_path"""', '"""to_somewhere"""', '"""KerasWrapper.h5"""'], {}), "('new_path', 'to_somewhere', 'KerasWrapper.h5')\n", (5807, 5854), False, 'import os\n'), ((5896, 5943), 'os.path.join', 'os.path.join', (['"""to_somewhere"""', '"""KerasWrapper.h5"""'], {}), "('to_somewhere', 'KerasWrapper.h5')\n", (5908, 5943), False, 'import os\n'), ((6197, 6256), 'os.path.join', 'os.path.join', (['"""new_path"""', '"""to_somewhere"""', '"""KerasWrapper.h5"""'], {}), "('new_path', 'to_somewhere', 'KerasWrapper.h5')\n", (6209, 6256), False, 'import os\n'), ((6830, 6875), 'os.path.join', 'os.path.join', (['"""pipe1"""', '"""SimpleAE_4decoder.h5"""'], {}), "('pipe1', 'SimpleAE_4decoder.h5')\n", (6842, 6875), False, 'import os\n'), ((6914, 6959), 'os.path.join', 'os.path.join', (['"""pipe1"""', '"""SimpleAE_4encoder.h5"""'], {}), "('pipe1', 'SimpleAE_4encoder.h5')\n", (6926, 6959), False, 'import os\n'), ((6998, 7036), 'os.path.join', 'os.path.join', (['"""pipe1"""', '"""SimpleAE_4.h5"""'], {}), "('pipe1', 'SimpleAE_4.h5')\n", (7010, 7036), False, 'import os\n')] |
#!/usr/bin/env python
from flask import Flask, request
from flask_cors import CORS, cross_origin
import tensorflow as tf
import models_tf as models
import utils
import os
import json
import urllib
import cv2
import PIL
import uuid
import numpy as np
import imutils
from birads_prediction_tf import inference
def save_file (nparray,filename):
image = PIL.Image.fromarray(nparray)
filepath = "./static/img/test/{}".format(filename)
image.save(filepath)
return filename
def classify(names):
parameters_ = {
"model_path": "saved_models/model.ckpt",
"device_type": "cpu",
"gpu_number": 0,
"image_path": "static/img/test/",
"input_size": (2600, 2000),
}
prediction_birads = inference(parameters_, names)
final_results = prediction_birads.tolist()
return json.dumps(final_results, separators=(',', ':'), sort_keys=True, indent=4)
app = Flask(__name__)
CORS(app)
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/js/<path:path>')
def send_js(path):
return send_from_directory('js', path)
@app.route('/css/<path:path>')
def send_css(path):
return send_from_directory('css', path)
def allowed_file(filename):
allowed_extensions = set(['png', 'jpg', 'jpeg', 'bmp'])
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in allowed_extensions
@app.route('/api/v1/classify', methods=['POST'])
def classifyOnPost():
names = []
for input_file in request.files.getlist('file'):
filestr = input_file.read()
npimg = np.fromstring(filestr, np.uint8)
img = cv2.imdecode(npimg, cv2.IMREAD_COLOR)
rgb_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
names.append(save_file(rgb_img,input_file.filename))
return classify(names)
if __name__ == "__main__":
app.run(host='0.0.0.0') | [
"flask.request.files.getlist",
"flask_cors.CORS",
"cv2.cvtColor",
"flask.Flask",
"cv2.imdecode",
"json.dumps",
"birads_prediction_tf.inference",
"PIL.Image.fromarray",
"numpy.fromstring"
] | [((872, 887), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (877, 887), False, 'from flask import Flask, request\n'), ((888, 897), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (892, 897), False, 'from flask_cors import CORS, cross_origin\n'), ((353, 381), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['nparray'], {}), '(nparray)\n', (372, 381), False, 'import PIL\n'), ((706, 735), 'birads_prediction_tf.inference', 'inference', (['parameters_', 'names'], {}), '(parameters_, names)\n', (715, 735), False, 'from birads_prediction_tf import inference\n'), ((790, 864), 'json.dumps', 'json.dumps', (['final_results'], {'separators': "(',', ':')", 'sort_keys': '(True)', 'indent': '(4)'}), "(final_results, separators=(',', ':'), sort_keys=True, indent=4)\n", (800, 864), False, 'import json\n'), ((1452, 1481), 'flask.request.files.getlist', 'request.files.getlist', (['"""file"""'], {}), "('file')\n", (1473, 1481), False, 'from flask import Flask, request\n'), ((1527, 1559), 'numpy.fromstring', 'np.fromstring', (['filestr', 'np.uint8'], {}), '(filestr, np.uint8)\n', (1540, 1559), True, 'import numpy as np\n'), ((1570, 1607), 'cv2.imdecode', 'cv2.imdecode', (['npimg', 'cv2.IMREAD_COLOR'], {}), '(npimg, cv2.IMREAD_COLOR)\n', (1582, 1607), False, 'import cv2\n'), ((1622, 1659), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (1634, 1659), False, 'import cv2\n')] |
from sgan import SGAN
import sys
import numpy as np
from data_io import save_tensor
if len(sys.argv) <= 1:
print ("please give model filename")
raise Exception('no filename specified')
name = sys.argv[1]
print ("using stored model", name)
##sample a periodically tiling texture
def mosaic_tile(sgan, NZ1=12, NZ2=12, repeat=(2, 3)):
ovp = 2 # how many z values should we keep for overlap, for 5 layer architecture and (5,5) kernels 2 is enough
tot_subsample = 2 ** sgan.gen_depth
print ("NZ1 NZ2 for tilable texture: ", NZ1, NZ2)
sample_zmb = np.random.uniform(-1., 1., (1, sgan.config.nz, NZ1, NZ2))
sample_zmb[:, :, :, -ovp * 2:] = sample_zmb[:, :, :, :ovp * 2]
sample_zmb[:, :, -ovp * 2:, :] = sample_zmb[:, :, :ovp * 2, :]
samples = sgan.generate(sample_zmb)
# measure the optimal offset of pixels we crop from the edge of the tile: this should have loss of 0 if the tile is perfectly periodical
##note: for Theano code we had a nice analytical formula for the optimal offset
##note: for the Lasagna code we calculate this empirically since the conv. arithmetic is not well documented and varies depending on NZ1 and NZ2
## calc. the pixel discrepancy btw the left and right column, and top and bottom row, when cropping crop1 and crop2 pixels
def offsetLoss(crop1, crop2):
return np.abs(samples[:, :, :, crop1] - samples[:, :, :, -crop2]).mean() + np.abs(
samples[:, :, crop1] - samples[:, :, -crop2]).mean()
best = 1e6
crop1 = 0
crop2 = 0
for i in range(ovp * tot_subsample / 2, ovp * tot_subsample):
for j in range(ovp * tot_subsample / 2, ovp * tot_subsample):
loss = offsetLoss(i, j)
if loss < best:
best = loss
crop1 = i
crop2 = j
print ("optimal offsets", crop1, crop2, "offset edge errors", best)
samples = samples[:, :, crop1:-crop2, crop1:-crop2]
s = (samples.shape[2], samples.shape[3])
print ("tile sample size", samples.shape)
save_tensor(samples[0], "samples/TILE_%s_%s.jpg" % (name.replace('/', '_'), s))
if repeat is not None:
sbig = np.zeros((3, repeat[0] * s[0], repeat[1] * s[1]))
for i in range(repeat[0]):
for j in range(repeat[1]):
sbig[:, i * s[0]:(i + 1) * s[0], j * s[1]:(j + 1) * s[1]] = samples[0]
save_tensor(sbig, "samples/TILE_%s_%s_%s.jpg" % (name.replace('/', '_'), s, repeat))
return
# sample a random texture
def sample_texture(sgan, NZ1=60, NZ2=60):
z_sample = np.random.uniform(-1., 1., (1, c.nz, NZ1, NZ2))
data = sgan.generate(z_sample)
save_tensor(data[0], 'samples/stored_%s.jpg' % (name.replace('/', '_')))
sgan = SGAN(name=name)
c = sgan.config
print ("nz", c.nz)
print ("G values", c.gen_fn, c.gen_ks)
print ("D values", c.dis_fn, c.dis_ks)
sample_texture(sgan)
mosaic_tile(sgan)
| [
"numpy.random.uniform",
"sgan.SGAN",
"numpy.zeros",
"numpy.abs"
] | [((2736, 2751), 'sgan.SGAN', 'SGAN', ([], {'name': 'name'}), '(name=name)\n', (2740, 2751), False, 'from sgan import SGAN\n'), ((572, 631), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '(1, sgan.config.nz, NZ1, NZ2)'], {}), '(-1.0, 1.0, (1, sgan.config.nz, NZ1, NZ2))\n', (589, 631), True, 'import numpy as np\n'), ((2567, 2616), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)', '(1, c.nz, NZ1, NZ2)'], {}), '(-1.0, 1.0, (1, c.nz, NZ1, NZ2))\n', (2584, 2616), True, 'import numpy as np\n'), ((2167, 2216), 'numpy.zeros', 'np.zeros', (['(3, repeat[0] * s[0], repeat[1] * s[1])'], {}), '((3, repeat[0] * s[0], repeat[1] * s[1]))\n', (2175, 2216), True, 'import numpy as np\n'), ((1355, 1413), 'numpy.abs', 'np.abs', (['(samples[:, :, :, crop1] - samples[:, :, :, -crop2])'], {}), '(samples[:, :, :, crop1] - samples[:, :, :, -crop2])\n', (1361, 1413), True, 'import numpy as np\n'), ((1423, 1475), 'numpy.abs', 'np.abs', (['(samples[:, :, crop1] - samples[:, :, -crop2])'], {}), '(samples[:, :, crop1] - samples[:, :, -crop2])\n', (1429, 1475), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from starfish import ImageStack
from starfish.core.imagestack.test.factories import unique_tiles_imagestack
from starfish.core.test.factories import (
two_spot_informative_blank_coded_data_factory,
two_spot_one_hot_coded_data_factory,
two_spot_sparse_coded_data_factory,
)
from starfish.types import Axes, Features
from .._base import DetectSpotsAlgorithmBase
from ..blob import BlobDetector
from ..detect import detect_spots
from ..local_max_peak_finder import LocalMaxPeakFinder
from ..trackpy_local_max_peak_finder import TrackpyLocalMaxPeakFinder
# verify all spot finders handle different coding types
_, ONE_HOT_IMAGESTACK, ONE_HOT_MAX_INTENSITY = two_spot_one_hot_coded_data_factory()
_, SPARSE_IMAGESTACK, SPARSE_MAX_INTENSITY = two_spot_sparse_coded_data_factory()
_, BLANK_IMAGESTACK, BLANK_MAX_INTENSITY = two_spot_informative_blank_coded_data_factory()
# make sure that all spot finders handle empty arrays
EMPTY_IMAGESTACK = ImageStack.from_numpy(np.zeros((4, 2, 10, 100, 100), dtype=np.float32))
def simple_gaussian_spot_detector() -> BlobDetector:
"""create a basic gaussian spot detector"""
return BlobDetector(min_sigma=1, max_sigma=4, num_sigma=5, threshold=0, measurement_type='max')
def simple_trackpy_local_max_spot_detector() -> TrackpyLocalMaxPeakFinder:
"""create a basic local max peak finder"""
return TrackpyLocalMaxPeakFinder(
spot_diameter=3,
min_mass=0.01,
max_size=10,
separation=2,
)
def simple_local_max_spot_detector() -> LocalMaxPeakFinder:
return LocalMaxPeakFinder(
min_distance=6,
stringency=0,
min_obj_area=0,
max_obj_area=np.inf,
threshold=0
)
# initialize spot detectors
gaussian_spot_detector = simple_gaussian_spot_detector()
trackpy_local_max_spot_detector = simple_trackpy_local_max_spot_detector()
local_max_spot_detector = simple_local_max_spot_detector()
# test parameterization
test_parameters = (
'data_stack, spot_detector, radius_is_gyration, max_intensity',
[
(ONE_HOT_IMAGESTACK, gaussian_spot_detector, False, ONE_HOT_MAX_INTENSITY),
(ONE_HOT_IMAGESTACK, trackpy_local_max_spot_detector, True, ONE_HOT_MAX_INTENSITY),
(ONE_HOT_IMAGESTACK, local_max_spot_detector, False, ONE_HOT_MAX_INTENSITY),
(SPARSE_IMAGESTACK, gaussian_spot_detector, False, SPARSE_MAX_INTENSITY),
(SPARSE_IMAGESTACK, trackpy_local_max_spot_detector, True, SPARSE_MAX_INTENSITY),
(SPARSE_IMAGESTACK, local_max_spot_detector, False, SPARSE_MAX_INTENSITY),
(BLANK_IMAGESTACK, gaussian_spot_detector, False, BLANK_MAX_INTENSITY),
(BLANK_IMAGESTACK, trackpy_local_max_spot_detector, True, BLANK_MAX_INTENSITY),
(BLANK_IMAGESTACK, local_max_spot_detector, False, BLANK_MAX_INTENSITY),
]
)
@pytest.mark.parametrize(*test_parameters)
def test_spot_detection_with_reference_image(
data_stack: ImageStack,
spot_detector: DetectSpotsAlgorithmBase,
radius_is_gyration: bool,
max_intensity: float,
):
"""This testing method uses a reference image to identify spot locations."""
def call_detect_spots(stack):
return detect_spots(
data_stack=stack,
spot_finding_method=spot_detector.image_to_spots,
reference_image=stack,
reference_image_max_projection_axes=(Axes.ROUND, Axes.CH),
measurement_function=np.max,
radius_is_gyration=radius_is_gyration,
n_processes=1,
)
intensity_table = call_detect_spots(data_stack)
assert intensity_table.sizes[Features.AXIS] == 2, "wrong number of spots detected"
expected = [max_intensity * 2, max_intensity * 2]
assert np.allclose(intensity_table.sum((Axes.ROUND, Axes.CH)).values, expected), \
"wrong spot intensities detected"
# verify this execution strategy produces an empty intensitytable when called with a blank image
empty_intensity_table = call_detect_spots(EMPTY_IMAGESTACK)
assert empty_intensity_table.sizes[Features.AXIS] == 0
@pytest.mark.parametrize(*test_parameters)
def test_spot_detection_with_reference_image_from_max_projection(
data_stack: ImageStack,
spot_detector: DetectSpotsAlgorithmBase,
radius_is_gyration: bool,
max_intensity: float,
):
"""This testing method builds a reference image to identify spot locations."""
def call_detect_spots(stack):
return detect_spots(
data_stack=stack,
spot_finding_method=spot_detector.image_to_spots,
reference_image=stack,
reference_image_max_projection_axes=(Axes.ROUND, Axes.CH),
measurement_function=np.max,
radius_is_gyration=radius_is_gyration,
n_processes=1,
)
intensity_table = call_detect_spots(data_stack)
assert intensity_table.sizes[Features.AXIS] == 2, "wrong number of spots detected"
expected = [max_intensity * 2, max_intensity * 2]
assert np.allclose(intensity_table.sum((Axes.ROUND, Axes.CH)).values, expected), \
"wrong spot intensities detected"
empty_intensity_table = call_detect_spots(EMPTY_IMAGESTACK)
assert empty_intensity_table.sizes[Features.AXIS] == 0
@pytest.mark.parametrize(*test_parameters)
def test_spot_finding_no_reference_image(
data_stack: ImageStack,
spot_detector: DetectSpotsAlgorithmBase,
radius_is_gyration: bool,
max_intensity: float,
):
"""
This testing method does not provide a reference image, and should therefore check for spots
in each (round, ch) combination in sequence. With the given input, it should detect 4 spots.
"""
def call_detect_spots(stack):
return detect_spots(
data_stack=stack,
spot_finding_method=spot_detector.image_to_spots,
measurement_function=np.max,
radius_is_gyration=radius_is_gyration,
n_processes=1,
)
intensity_table = call_detect_spots(data_stack)
assert intensity_table.sizes[Features.AXIS] == 4, "wrong number of spots detected"
expected = [max_intensity] * 4
assert np.allclose(intensity_table.sum((Axes.ROUND, Axes.CH)).values, expected), \
"wrong spot intensities detected"
empty_intensity_table = call_detect_spots(EMPTY_IMAGESTACK)
assert empty_intensity_table.sizes[Features.AXIS] == 0
def _make_labeled_image() -> ImageStack:
ROUND_LABELS = (1, 4, 6)
CH_LABELS = (2, 4, 6, 8)
ZPLANE_LABELS = (3, 4)
HEIGHT = 2
WIDTH = 4
return unique_tiles_imagestack(
ROUND_LABELS, CH_LABELS, ZPLANE_LABELS, HEIGHT, WIDTH)
def test_reference_image_spot_detection_with_image_with_labeled_axes(monkeypatch):
"""This testing method uses a reference image to identify spot locations."""
def call_detect_spots(stack):
return detect_spots(
data_stack=stack,
spot_finding_method=gaussian_spot_detector.image_to_spots,
reference_image=stack,
reference_image_max_projection_axes=(Axes.ROUND, Axes.CH),
measurement_function=np.max,
radius_is_gyration=False,
n_processes=1,
)
data_stack = _make_labeled_image()
call_detect_spots(data_stack)
def test_spot_detection_with_image_with_labeled_axes():
"""This testing method uses no reference image to identify spot locations."""
def call_detect_spots(stack):
return detect_spots(
data_stack=stack,
spot_finding_method=gaussian_spot_detector.image_to_spots,
measurement_function=np.max,
radius_is_gyration=False,
n_processes=1,
)
data_stack = _make_labeled_image()
call_detect_spots(data_stack)
| [
"starfish.core.test.factories.two_spot_sparse_coded_data_factory",
"starfish.core.imagestack.test.factories.unique_tiles_imagestack",
"numpy.zeros",
"starfish.core.test.factories.two_spot_one_hot_coded_data_factory",
"starfish.core.test.factories.two_spot_informative_blank_coded_data_factory",
"pytest.mar... | [((701, 738), 'starfish.core.test.factories.two_spot_one_hot_coded_data_factory', 'two_spot_one_hot_coded_data_factory', ([], {}), '()\n', (736, 738), False, 'from starfish.core.test.factories import two_spot_informative_blank_coded_data_factory, two_spot_one_hot_coded_data_factory, two_spot_sparse_coded_data_factory\n'), ((784, 820), 'starfish.core.test.factories.two_spot_sparse_coded_data_factory', 'two_spot_sparse_coded_data_factory', ([], {}), '()\n', (818, 820), False, 'from starfish.core.test.factories import two_spot_informative_blank_coded_data_factory, two_spot_one_hot_coded_data_factory, two_spot_sparse_coded_data_factory\n'), ((864, 911), 'starfish.core.test.factories.two_spot_informative_blank_coded_data_factory', 'two_spot_informative_blank_coded_data_factory', ([], {}), '()\n', (909, 911), False, 'from starfish.core.test.factories import two_spot_informative_blank_coded_data_factory, two_spot_one_hot_coded_data_factory, two_spot_sparse_coded_data_factory\n'), ((2854, 2895), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['*test_parameters'], {}), '(*test_parameters)\n', (2877, 2895), False, 'import pytest\n'), ((4114, 4155), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['*test_parameters'], {}), '(*test_parameters)\n', (4137, 4155), False, 'import pytest\n'), ((5293, 5334), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['*test_parameters'], {}), '(*test_parameters)\n', (5316, 5334), False, 'import pytest\n'), ((1008, 1056), 'numpy.zeros', 'np.zeros', (['(4, 2, 10, 100, 100)'], {'dtype': 'np.float32'}), '((4, 2, 10, 100, 100), dtype=np.float32)\n', (1016, 1056), True, 'import numpy as np\n'), ((6617, 6695), 'starfish.core.imagestack.test.factories.unique_tiles_imagestack', 'unique_tiles_imagestack', (['ROUND_LABELS', 'CH_LABELS', 'ZPLANE_LABELS', 'HEIGHT', 'WIDTH'], {}), '(ROUND_LABELS, CH_LABELS, ZPLANE_LABELS, HEIGHT, WIDTH)\n', (6640, 6695), False, 'from starfish.core.imagestack.test.factories import unique_tiles_imagestack\n')] |
## for data
import numpy as np
import pandas as pd
## for geospatial
import folium
import geopy
## for machine learning
from sklearn import preprocessing
## for statistical tests
import scipy
## for plotting
import matplotlib.pyplot as plt
import seaborn as sns
## for deep learning
import minisom
'''
Fit a Self Organizing Map neural network.
:paramater
:param X: dtf
:param model: minisom instance - if None uses a map of 5*sqrt(n) x 5*sqrt(n) neurons
:param lst_2Dplot: list - 2 features to use for a 2D plot, if None it plots only if X is 2D
:return
model and dtf with clusters
'''
def fit_dl_cluster(X, model=None, epochs=100, lst_2Dplot=None, figsize=(10,5)):
## model
model = minisom.MiniSom(x=int(np.sqrt(5*np.sqrt(X.shape[0]))), y=int(np.sqrt(5*np.sqrt(X.shape[0]))), input_len=X.shape[1]) if model is None else model
scaler = preprocessing.StandardScaler()
X_preprocessed = scaler.fit_transform(X.values)
model.train_batch(X_preprocessed, num_iteration=epochs, verbose=False)
## clustering
map_shape = (model.get_weights().shape[0], model.get_weights().shape[1])
print("--- map shape:", map_shape, "---")
dtf_X = X.copy()
dtf_X["cluster"] = np.ravel_multi_index(np.array([model.winner(x) for x in X_preprocessed]).T, dims=map_shape)
k = dtf_X["cluster"].nunique()
print("--- found", k, "clusters ---")
print(dtf_X.groupby("cluster")["cluster"].count().sort_values(ascending=False))
## find real centroids
cluster_centers = np.array([vec for center in model.get_weights() for vec in center])
closest, distances = scipy.cluster.vq.vq(cluster_centers, X_preprocessed)
dtf_X["centroids"] = 0
for i in closest:
dtf_X["centroids"].iloc[i] = 1
## plot
if (lst_2Dplot is not None) or (X.shape[1] == 2):
lst_2Dplot = X.columns.tolist() if lst_2Dplot is None else lst_2Dplot
utils_plot_cluster(dtf_X, x1=lst_2Dplot[0], x2=lst_2Dplot[1], th_centroids=scaler.inverse_transform(cluster_centers), figsize=figsize)
return model, dtf_X
'''
Creates a map with folium.
:parameter
:param dtf: pandas
:param x: str - column with latitude
:param y: str - column with longitude
:param starting_point: list - coordinates (ex. [45.0703, 7.6869])
:param tiles: str - "cartodbpositron", "OpenStreetMap", "Stamen Terrain", "Stamen Toner"
:param popup: str - column with text to popup if clicked, if None there is no popup
:param size: str - column with size variable, if None takes size=5
:param color: str - column with color variable, if None takes default color
:param lst_colors: list - list with multiple colors to use if color column is not None, if not given it generates randomly
:param marker: str - column with marker variable, takes up to 7 unique values
:return
map object to display
'''
def plot_map(dtf, x, y, start, zoom=12, tiles="cartodbpositron", popup=None, size=None, color=None, legend=False, lst_colors=None, marker=None):
data = dtf.copy()
## create columns for plotting
if color is not None:
lst_elements = sorted(list(dtf[color].unique()))
lst_colors = ['#%06X' % np.random.randint(0, 0xFFFFFF) for i in range(len(lst_elements))] if lst_colors is None else lst_colors
data["color"] = data[color].apply(lambda x: lst_colors[lst_elements.index(x)])
if size is not None:
scaler = preprocessing.MinMaxScaler(feature_range=(3,15))
data["size"] = scaler.fit_transform(data[size].values.reshape(-1,1)).reshape(-1)
## map
map_ = folium.Map(location=start, tiles=tiles, zoom_start=zoom)
if (size is not None) and (color is None):
data.apply(lambda row: folium.CircleMarker(location=[row[x],row[y]], popup=row[popup],
color='#3186cc', fill=True, radius=row["size"]).add_to(map_), axis=1)
elif (size is None) and (color is not None):
data.apply(lambda row: folium.CircleMarker(location=[row[x],row[y]], popup=row[popup],
color=row["color"], fill=True, radius=5).add_to(map_), axis=1)
elif (size is not None) and (color is not None):
data.apply(lambda row: folium.CircleMarker(location=[row[x],row[y]], popup=row[popup],
color=row["color"], fill=True, radius=row["size"]).add_to(map_), axis=1)
else:
data.apply(lambda row: folium.CircleMarker(location=[row[x],row[y]], popup=row[popup],
color='#3186cc', fill=True, radius=5).add_to(map_), axis=1)
## legend
if (color is not None) and (legend is True):
legend_html = """<div style="position:fixed; bottom:10px; left:10px; border:2px solid black; z-index:9999; font-size:14px;"> <b>"""+color+""":</b><br>"""
for i in lst_elements:
legend_html = legend_html+""" <i class="fa fa-circle fa-1x" style="color:"""+lst_colors[lst_elements.index(i)]+""""></i> """+str(i)+"""<br>"""
legend_html = legend_html+"""</div>"""
map_.get_root().html.add_child(folium.Element(legend_html))
## add marker
if marker is not None:
lst_elements = sorted(list(dtf[marker].unique()))
lst_colors = ["orange","orange","orange"] #9
### too many values, can't mark
if len(lst_elements) > len(lst_colors):
raise Exception("marker has uniques > "+str(len(lst_colors)))
### binary case (1/0): mark only 1s
elif len(lst_elements) == 2:
data[data[marker]==lst_elements[1]].apply(lambda row: folium.Marker(location=[row[x],row[y]], popup=row[marker], draggable=False,
icon=folium.Icon(color=lst_colors[0])).add_to(map_), axis=1)
### normal case: mark all values
else:
for i in lst_elements:
data[data[marker]==i].apply(lambda row: folium.Marker(location=[row[x],row[y]], popup=row[marker], draggable=False,
icon=folium.Icon(color=lst_colors[lst_elements.index(i)])).add_to(map_), axis=1)
return map_
def label_utilization(row):
if row['BED_UTILIZATION'] <= 0.33:
return 'low'
elif (row['BED_UTILIZATION'] > 0.33) & (row['BED_UTILIZATION'] < 0.66):
return 'medium'
else:
return 'high' | [
"sklearn.preprocessing.StandardScaler",
"scipy.cluster.vq.vq",
"sklearn.preprocessing.MinMaxScaler",
"folium.Element",
"numpy.random.randint",
"folium.Map",
"folium.Icon",
"numpy.sqrt",
"folium.CircleMarker"
] | [((872, 902), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (900, 902), False, 'from sklearn import preprocessing\n'), ((1620, 1672), 'scipy.cluster.vq.vq', 'scipy.cluster.vq.vq', (['cluster_centers', 'X_preprocessed'], {}), '(cluster_centers, X_preprocessed)\n', (1639, 1672), False, 'import scipy\n'), ((3593, 3649), 'folium.Map', 'folium.Map', ([], {'location': 'start', 'tiles': 'tiles', 'zoom_start': 'zoom'}), '(location=start, tiles=tiles, zoom_start=zoom)\n', (3603, 3649), False, 'import folium\n'), ((3432, 3481), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {'feature_range': '(3, 15)'}), '(feature_range=(3, 15))\n', (3458, 3481), False, 'from sklearn import preprocessing\n'), ((5178, 5205), 'folium.Element', 'folium.Element', (['legend_html'], {}), '(legend_html)\n', (5192, 5205), False, 'import folium\n'), ((3198, 3228), 'numpy.random.randint', 'np.random.randint', (['(0)', '(16777215)'], {}), '(0, 16777215)\n', (3215, 3228), True, 'import numpy as np\n'), ((3730, 3847), 'folium.CircleMarker', 'folium.CircleMarker', ([], {'location': '[row[x], row[y]]', 'popup': 'row[popup]', 'color': '"""#3186cc"""', 'fill': '(True)', 'radius': "row['size']"}), "(location=[row[x], row[y]], popup=row[popup], color=\n '#3186cc', fill=True, radius=row['size'])\n", (3749, 3847), False, 'import folium\n'), ((747, 766), 'numpy.sqrt', 'np.sqrt', (['X.shape[0]'], {}), '(X.shape[0])\n', (754, 766), True, 'import numpy as np\n'), ((786, 805), 'numpy.sqrt', 'np.sqrt', (['X.shape[0]'], {}), '(X.shape[0])\n', (793, 805), True, 'import numpy as np\n'), ((3995, 4105), 'folium.CircleMarker', 'folium.CircleMarker', ([], {'location': '[row[x], row[y]]', 'popup': 'row[popup]', 'color': "row['color']", 'fill': '(True)', 'radius': '(5)'}), "(location=[row[x], row[y]], popup=row[popup], color=row[\n 'color'], fill=True, radius=5)\n", (4014, 4105), False, 'import folium\n'), ((4257, 4377), 'folium.CircleMarker', 'folium.CircleMarker', ([], {'location': '[row[x], row[y]]', 'popup': 'row[popup]', 'color': "row['color']", 'fill': '(True)', 'radius': "row['size']"}), "(location=[row[x], row[y]], popup=row[popup], color=row[\n 'color'], fill=True, radius=row['size'])\n", (4276, 4377), False, 'import folium\n'), ((4486, 4593), 'folium.CircleMarker', 'folium.CircleMarker', ([], {'location': '[row[x], row[y]]', 'popup': 'row[popup]', 'color': '"""#3186cc"""', 'fill': '(True)', 'radius': '(5)'}), "(location=[row[x], row[y]], popup=row[popup], color=\n '#3186cc', fill=True, radius=5)\n", (4505, 4593), False, 'import folium\n'), ((5840, 5872), 'folium.Icon', 'folium.Icon', ([], {'color': 'lst_colors[0]'}), '(color=lst_colors[0])\n', (5851, 5872), False, 'import folium\n')] |
from abc import abstractmethod
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import griddata
class Model:
def __init__(self):
super().__init__()
self.X = []
self.y = []
@abstractmethod
def predict(self, X):
''' Return a tuple of the mean and uncertainty (optional) of the response surface at the point X '''
return
@abstractmethod
def fit(self, X, y):
''' Fit the response surface to a passed set of coordinates and costs '''
return
@abstractmethod
def cost(self, X, **kwargs):
''' Returns an effective cost at a point X, which is generally a function of the mean and/or uncertainty '''
return
def plot_1d(self, axis, X0, ax=None):
''' Plots a slice along one axis passing through a point X0'''
dim = self.X.shape[1]
bounds = [[self.X[:, i].min(), self.X[:,i].max()] for i in range(dim)]
N=100
X = np.zeros(shape=(N, dim))
for i in range(dim):
X[:, i] = np.ones(N)*X0[i]
xi = np.linspace(*bounds[axis], N)
X[:, axis] = xi
mu, sigma = self.predict(X)
if ax is None:
plt.figure(dpi=300)
ax = plt.gca()
ax.plot(xi, mu, color='#1f77b4', label='Model')
ax.fill_between(xi, mu-1.96*sigma, mu+1.96*sigma, color='#1f77b4', alpha=0.25, label='95% confidence region')
def plot_2d(self, ax0, ax1, X0, levels=20):
''' Plots a slice along one axis passing through a point X0'''
N = 100
dim = self.X.shape[1]
bounds = [[self.X[:, i].min(), self.X[:,i].max()] for i in range(dim)]
xi = np.linspace(bounds[ax0][0], bounds[ax0][1], N)
yi = np.linspace(bounds[ax1][0], bounds[ax1][1], N)
grid = []
grid.append(xi)
grid.append(yi)
grid = np.transpose(np.meshgrid(*[grid[n] for n in range(2)])).reshape(-1, 2)
X = np.zeros(shape=(N**2, dim))
for i in range(dim):
X[:, i] = np.ones(N**2)*X0[i]
X[:, ax0] = grid[:, 0]
X[:, ax1] = grid[:, 1]
mu, sigma = self.predict(X)
ordinate_mesh, abscissa_mesh = np.meshgrid(xi, yi)
cost_grid = griddata(X[:,[ax0, ax1]], mu, (ordinate_mesh,abscissa_mesh))
plt.figure(dpi=300)
plt.contourf(ordinate_mesh, abscissa_mesh, cost_grid, cmap='viridis', levels=levels)
plt.colorbar()
plt.xlim(bounds[ax0])
plt.ylim(bounds[ax1])
## plot crosshairs through best point
plt.plot(xi, X0[ax1] * np.ones(N), 'k')
plt.plot(X0[ax0] * np.ones(N), yi, 'k')
plt.scatter(X0[ax0], X0[ax1], marker='o', c='k') | [
"matplotlib.pyplot.xlim",
"numpy.meshgrid",
"scipy.interpolate.griddata",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"numpy.ones",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.contourf",
"numpy.linspace",
"matplotlib.pyplot.gca"
] | [((975, 999), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N, dim)'}), '(shape=(N, dim))\n', (983, 999), True, 'import numpy as np\n'), ((1083, 1112), 'numpy.linspace', 'np.linspace', (['*bounds[axis]', 'N'], {}), '(*bounds[axis], N)\n', (1094, 1112), True, 'import numpy as np\n'), ((1698, 1744), 'numpy.linspace', 'np.linspace', (['bounds[ax0][0]', 'bounds[ax0][1]', 'N'], {}), '(bounds[ax0][0], bounds[ax0][1], N)\n', (1709, 1744), True, 'import numpy as np\n'), ((1758, 1804), 'numpy.linspace', 'np.linspace', (['bounds[ax1][0]', 'bounds[ax1][1]', 'N'], {}), '(bounds[ax1][0], bounds[ax1][1], N)\n', (1769, 1804), True, 'import numpy as np\n'), ((1983, 2012), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N ** 2, dim)'}), '(shape=(N ** 2, dim))\n', (1991, 2012), True, 'import numpy as np\n'), ((2230, 2249), 'numpy.meshgrid', 'np.meshgrid', (['xi', 'yi'], {}), '(xi, yi)\n', (2241, 2249), True, 'import numpy as np\n'), ((2270, 2332), 'scipy.interpolate.griddata', 'griddata', (['X[:, [ax0, ax1]]', 'mu', '(ordinate_mesh, abscissa_mesh)'], {}), '(X[:, [ax0, ax1]], mu, (ordinate_mesh, abscissa_mesh))\n', (2278, 2332), False, 'from scipy.interpolate import griddata\n'), ((2348, 2367), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': '(300)'}), '(dpi=300)\n', (2358, 2367), True, 'import matplotlib.pyplot as plt\n'), ((2376, 2464), 'matplotlib.pyplot.contourf', 'plt.contourf', (['ordinate_mesh', 'abscissa_mesh', 'cost_grid'], {'cmap': '"""viridis"""', 'levels': 'levels'}), "(ordinate_mesh, abscissa_mesh, cost_grid, cmap='viridis',\n levels=levels)\n", (2388, 2464), True, 'import matplotlib.pyplot as plt\n'), ((2469, 2483), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2481, 2483), True, 'import matplotlib.pyplot as plt\n'), ((2492, 2513), 'matplotlib.pyplot.xlim', 'plt.xlim', (['bounds[ax0]'], {}), '(bounds[ax0])\n', (2500, 2513), True, 'import matplotlib.pyplot as plt\n'), ((2522, 2543), 'matplotlib.pyplot.ylim', 'plt.ylim', (['bounds[ax1]'], {}), '(bounds[ax1])\n', (2530, 2543), True, 'import matplotlib.pyplot as plt\n'), ((2695, 2743), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X0[ax0]', 'X0[ax1]'], {'marker': '"""o"""', 'c': '"""k"""'}), "(X0[ax0], X0[ax1], marker='o', c='k')\n", (2706, 2743), True, 'import matplotlib.pyplot as plt\n'), ((1210, 1229), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': '(300)'}), '(dpi=300)\n', (1220, 1229), True, 'import matplotlib.pyplot as plt\n'), ((1247, 1256), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1254, 1256), True, 'import matplotlib.pyplot as plt\n'), ((1051, 1061), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (1058, 1061), True, 'import numpy as np\n'), ((2062, 2077), 'numpy.ones', 'np.ones', (['(N ** 2)'], {}), '(N ** 2)\n', (2069, 2077), True, 'import numpy as np\n'), ((2622, 2632), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (2629, 2632), True, 'import numpy as np\n'), ((2666, 2676), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (2673, 2676), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Implements Calibrated Camera processor. Uses camera intrinsic parameters transforms the image.
"""
import cv2
import numpy as np
from .base import *
class PinholeCamera(namedtuple('PinholeCamera', ['size', 'matrix', 'distortion', 'rectify', 'projection'])):
"""Pinhole Camera model for calibrated camera processor.
Contains these fields:
size - (width, height)
matrix - camera matrix 3x3
distortion - camera distortion coefficients
rectify - rectification transform matrix 3x3
projection - projection matrix after rectification 3x3
"""
def __new__(cls, size, matrix, distortion, rectify=None, projection=None):
if isinstance(size, list):
size = tuple(size)
if not isinstance(size, tuple) or len(size) != 2 or not all((isinstance(i, int) or isinstance(i, long)) and i > 0 for i in size):
raise TypeError('Frame size must be a tuple consisting of two positive integers')
matrix = np.float64(matrix) if not isinstance(matrix, np.ndarray) and matrix is not None else matrix
distortion = np.float64(distortion) if not isinstance(distortion, np.ndarray) and distortion is not None else distortion
rectify = np.float64(rectify) if not isinstance(rectify, np.ndarray) and rectify is not None else rectify
projection = np.float64(projection) if not isinstance(projection, np.ndarray) and projection is not None else projection
return super(PinholeCamera, cls).__new__(cls, size, matrix, distortion, rectify, projection)
@property
def width(self):
"""Width of the camera sensor in pixels"""
return self.size[0]
@property
def height(self):
"""Height of the camera sensor in pixels"""
return self.size[1]
@property
def focal_point(self):
"""Focal point tuple in pixels"""
return (self.matrix[0, 0], self.matrix[1, 1])
@property
def center(self):
"""Image center in pixels"""
return (self.matrix[0, 2], self.matrix[1, 2])
@staticmethod
def fromdict(as_dict):
"""Creates camera object from dict"""
return PinholeCamera(**as_dict)
def todict(self):
"""Converts camera object to dict"""
d = {
"size": self.size,
"matrix": self.matrix.tolist(),
"distortion": self.distortion.tolist(),
"rectify": self.rectify.tolist() if self.rectify is not None else None,
"projection": self.projection.tolist() if self.projection is not None else None
}
return d
@staticmethod
def from_parameters(frame_size, focal_point, center, distortion, rectify=None, projection=None):
"""Creates camera object from parameters
:param frame_size: tuple containing (frame_width, frame_height)
:param focal_point: tuple containing (focal_x, focal_y)
:param center: tuple containing (center_x, center_y)
:param distortion: distortion coefficients
:param rectify: rectification 3x3 matrix
:param projection: projection 3x3 matrix
:return: PinholeCamera object
"""
if len(distortion) != 5:
raise ValueError("distortion must be vector of length 5")
if len(frame_size) != 2:
raise ValueError("frame size must be vector of length 2")
if len(focal_point) != 2:
raise ValueError("focal point must be vector of length 2")
if len(center) != 2:
raise ValueError("center must be vector of length 2")
matrix = np.zeros((3, 3), np.float64)
matrix[0, 0] = focal_point[0]
matrix[1, 1] = focal_point[1]
matrix[0, 2] = center[0]
matrix[1, 2] = center[1]
matrix[2, 2] = 1
d = np.zeros((1, 5), np.float64)
d[0] = distortion
return PinholeCamera(frame_size, matrix, d, rectify, projection)
class CalibratedCamera(ProcessorBase):
"""Class implementing Calibrated Camera undistort/rectify and calibration using rectangular calibration pattern.
"""
def __init__(self, vision, camera, grid_shape=(7, 6), square_size=20, max_samples=20, frame_delay=1, *args, **kwargs):
"""CalibratedCamera instance initialization
:param vision: source vision object
:param camera: either PinholeCamera object or None. If None is specified - then it will enter calibration mode.
:param grid_shape: shape of the calibration pattern.
:param square_size: size of the calibration pattern element e.g. in mm.
:param max_samples: number of samples to collect for calibration
:param frame_delay: how many frames to skip. Useful online calibration using camera.
"""
calibrate = camera is None
if not calibrate:
if not isinstance(camera, PinholeCamera) and not (isinstance(camera, tuple) and len(camera) == 3):
raise TypeError("Camera must be either PinholeCamera or tuple with (frame_size, camera_matrix, distortion)")
self._camera = camera
else:
self._criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
self._camera = None
self._grid_shape = grid_shape
self._square_size = square_size
self._max_samples = max_samples
self._frame_delay = frame_delay
self._last_timestamp = None
self._cache_mapped = None
self._calibrate = calibrate
self.__setup_called = False
super(CalibratedCamera, self).__init__(vision, *args, **kwargs)
def __setup(self):
if self._calibrate:
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
self._objp = np.zeros((np.prod(self._grid_shape), 3), np.float32)
self._objp[:, :2] = np.indices(self._grid_shape).T.reshape(-1, 2)
self._objp *= self._square_size
# Arrays to store object points and image points from all the images.
self._objpoints = [] # 3d point in real world space
self._imgpoints = [] # 2d points in image plane.
self._calibration_samples = 0
else:
self._mapx, self._mapy = cv2.initUndistortRectifyMap(
self.camera.matrix,
self.camera.distortion,
self.camera.rectify,
self.camera.projection,
self.camera.size,
cv2.CV_32FC1)
def setup(self):
self.__setup()
self.__setup_called = True
super(CalibratedCamera, self).setup()
@property
def description(self):
return "Pinhole camera undistort processor"
@property
def camera(self):
"""Get/Set PinholeCamera object. Setting camera will disable calibration mode."""
return self._camera
@camera.setter
def camera(self, value):
if not isinstance(value, PinholeCamera):
raise TypeError("Must be PinholeCamera")
self._camera = value
self._calibrate = False
if self.__setup_called:
self.__setup()
def process(self, image):
if self._calibrate:
img = image.image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, self._grid_shape, None)
if ret is True:
corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), self._criteria)
# Draw and display the corners
if self.display_results:
img = cv2.drawChessboardCorners(img, self._grid_shape, corners, ret)
cv2.putText(img, "Samples added: {}/{}".format(self._calibration_samples, self._max_samples),
(20, 11), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1, 8)
cv2.imshow(self.name, img)
return Image(self, gray, features=(ret, corners), feature_type='corners')
else:
mapped = cv2.remap(image.image, self._mapx, self._mapy, cv2.INTER_NEAREST, dst=self._cache_mapped)
if self.display_results:
cv2.imshow(self.name, mapped)
self._cache_mapped = image.image
return image._replace(image=mapped)
def calibrate(self):
"""Calibration method to be used instead of ``capture`` used for camera calibration."""
if not self._calibrate:
raise ValueError("calibrate parameter must be set")
if self._calibration_samples >= self._max_samples:
return self._camera
frame = self.capture()
if self._last_timestamp is None:
self._last_timestamp = frame.timestamp
if (frame.timestamp - self._last_timestamp).total_seconds() > self._frame_delay:
ret, corners = frame.images[0].features
if ret is True:
self._objpoints.append(self._objp)
self._imgpoints.append(corners)
self._calibration_samples += 1
self._last_timestamp = frame.timestamp
if self._calibration_samples >= self._max_samples:
img = frame.images[0].image
shape = img.shape[::-1]
self._camera = self._finish_calibration(self._objpoints, self._imgpoints, shape)
return self._camera
def _finish_calibration(self, objpoints, imgpoints, shape):
"""Helper method that executes camera calibration algorithm. Factored out specifically for ``CalibratedStereoCamera``"""
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, shape, None, None)
return PinholeCamera(shape, mtx, dist)
| [
"cv2.findChessboardCorners",
"cv2.cvtColor",
"numpy.zeros",
"cv2.cornerSubPix",
"cv2.remap",
"numpy.prod",
"numpy.indices",
"cv2.calibrateCamera",
"numpy.float64",
"cv2.drawChessboardCorners",
"cv2.imshow",
"cv2.initUndistortRectifyMap"
] | [((3613, 3641), 'numpy.zeros', 'np.zeros', (['(3, 3)', 'np.float64'], {}), '((3, 3), np.float64)\n', (3621, 3641), True, 'import numpy as np\n'), ((3821, 3849), 'numpy.zeros', 'np.zeros', (['(1, 5)', 'np.float64'], {}), '((1, 5), np.float64)\n', (3829, 3849), True, 'import numpy as np\n'), ((9681, 9741), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objpoints', 'imgpoints', 'shape', 'None', 'None'], {}), '(objpoints, imgpoints, shape, None, None)\n', (9700, 9741), False, 'import cv2\n'), ((1016, 1034), 'numpy.float64', 'np.float64', (['matrix'], {}), '(matrix)\n', (1026, 1034), True, 'import numpy as np\n'), ((1129, 1151), 'numpy.float64', 'np.float64', (['distortion'], {}), '(distortion)\n', (1139, 1151), True, 'import numpy as np\n'), ((1255, 1274), 'numpy.float64', 'np.float64', (['rectify'], {}), '(rectify)\n', (1265, 1274), True, 'import numpy as np\n'), ((1372, 1394), 'numpy.float64', 'np.float64', (['projection'], {}), '(projection)\n', (1382, 1394), True, 'import numpy as np\n'), ((6276, 6433), 'cv2.initUndistortRectifyMap', 'cv2.initUndistortRectifyMap', (['self.camera.matrix', 'self.camera.distortion', 'self.camera.rectify', 'self.camera.projection', 'self.camera.size', 'cv2.CV_32FC1'], {}), '(self.camera.matrix, self.camera.distortion,\n self.camera.rectify, self.camera.projection, self.camera.size, cv2.CV_32FC1\n )\n', (6303, 6433), False, 'import cv2\n'), ((7300, 7337), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (7312, 7337), False, 'import cv2\n'), ((7409, 7464), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', 'self._grid_shape', 'None'], {}), '(gray, self._grid_shape, None)\n', (7434, 7464), False, 'import cv2\n'), ((8112, 8206), 'cv2.remap', 'cv2.remap', (['image.image', 'self._mapx', 'self._mapy', 'cv2.INTER_NEAREST'], {'dst': 'self._cache_mapped'}), '(image.image, self._mapx, self._mapy, cv2.INTER_NEAREST, dst=self.\n _cache_mapped)\n', (8121, 8206), False, 'import cv2\n'), ((7519, 7586), 'cv2.cornerSubPix', 'cv2.cornerSubPix', (['gray', 'corners', '(11, 11)', '(-1, -1)', 'self._criteria'], {}), '(gray, corners, (11, 11), (-1, -1), self._criteria)\n', (7535, 7586), False, 'import cv2\n'), ((7690, 7752), 'cv2.drawChessboardCorners', 'cv2.drawChessboardCorners', (['img', 'self._grid_shape', 'corners', 'ret'], {}), '(img, self._grid_shape, corners, ret)\n', (7715, 7752), False, 'import cv2\n'), ((7963, 7989), 'cv2.imshow', 'cv2.imshow', (['self.name', 'img'], {}), '(self.name, img)\n', (7973, 7989), False, 'import cv2\n'), ((8256, 8285), 'cv2.imshow', 'cv2.imshow', (['self.name', 'mapped'], {}), '(self.name, mapped)\n', (8266, 8285), False, 'import cv2\n'), ((5808, 5833), 'numpy.prod', 'np.prod', (['self._grid_shape'], {}), '(self._grid_shape)\n', (5815, 5833), True, 'import numpy as np\n'), ((5883, 5911), 'numpy.indices', 'np.indices', (['self._grid_shape'], {}), '(self._grid_shape)\n', (5893, 5911), True, 'import numpy as np\n')] |
# The majority of the present code originally comes from
# https://github.com/liyaguang/DCRNN/blob/master/lib/utils.py
import numpy as np
import tensorflow as tf
import scipy.sparse as sp
from scipy.sparse import linalg
def calculate_normalized_laplacian(adj):
"""
# L = D^-1/2 (D-A) D^-1/2 = I - D^-1/2 A D^-1/2
# D = diag(A 1)
:param adj:
:return:
"""
adj = sp.coo_matrix(adj)
d = np.array(adj.sum(1))
d_inv_sqrt = np.power(d, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
return normalized_laplacian
def calculate_random_walk_matrix(adj_mx):
adj_mx = sp.coo_matrix(adj_mx)
d = np.array(adj_mx.sum(1))
d_inv = np.power(d, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat_inv = sp.diags(d_inv)
random_walk_mx = d_mat_inv.dot(adj_mx).tocoo()
return random_walk_mx
def calculate_reverse_random_walk_matrix(adj_mx):
return calculate_random_walk_matrix(np.transpose(adj_mx))
def calculate_scaled_laplacian(adj_mx, lambda_max=2, undirected=True):
if undirected:
adj_mx = np.maximum.reduce([adj_mx, adj_mx.T])
L = calculate_normalized_laplacian(adj_mx)
if lambda_max is None:
lambda_max, _ = linalg.eigsh(L, 1, which='LM')
lambda_max = lambda_max[0]
L = sp.csr_matrix(L)
M, _ = L.shape
I = sp.identity(M, format='csr', dtype=L.dtype)
L = (2 / lambda_max * L) - I
return L.astype(np.float32)
def build_sparse_matrix(L):
L = L.astype('float32')
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
L = tf.SparseTensor(indices, L.data, L.shape)
return tf.sparse.reorder(L)
| [
"scipy.sparse.diags",
"numpy.power",
"numpy.transpose",
"numpy.isinf",
"scipy.sparse.linalg.eigsh",
"scipy.sparse.coo_matrix",
"scipy.sparse.csr_matrix",
"scipy.sparse.identity",
"tensorflow.sparse.reorder",
"tensorflow.SparseTensor",
"numpy.column_stack",
"numpy.maximum.reduce",
"scipy.spar... | [((407, 425), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['adj'], {}), '(adj)\n', (420, 425), True, 'import scipy.sparse as sp\n'), ((567, 587), 'scipy.sparse.diags', 'sp.diags', (['d_inv_sqrt'], {}), '(d_inv_sqrt)\n', (575, 587), True, 'import scipy.sparse as sp\n'), ((797, 818), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['adj_mx'], {}), '(adj_mx)\n', (810, 818), True, 'import scipy.sparse as sp\n'), ((941, 956), 'scipy.sparse.diags', 'sp.diags', (['d_inv'], {}), '(d_inv)\n', (949, 956), True, 'import scipy.sparse as sp\n'), ((1483, 1499), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['L'], {}), '(L)\n', (1496, 1499), True, 'import scipy.sparse as sp\n'), ((1529, 1572), 'scipy.sparse.identity', 'sp.identity', (['M'], {'format': '"""csr"""', 'dtype': 'L.dtype'}), "(M, format='csr', dtype=L.dtype)\n", (1540, 1572), True, 'import scipy.sparse as sp\n'), ((1736, 1767), 'numpy.column_stack', 'np.column_stack', (['(L.row, L.col)'], {}), '((L.row, L.col))\n', (1751, 1767), True, 'import numpy as np\n'), ((1777, 1818), 'tensorflow.SparseTensor', 'tf.SparseTensor', (['indices', 'L.data', 'L.shape'], {}), '(indices, L.data, L.shape)\n', (1792, 1818), True, 'import tensorflow as tf\n'), ((1831, 1851), 'tensorflow.sparse.reorder', 'tf.sparse.reorder', (['L'], {}), '(L)\n', (1848, 1851), True, 'import tensorflow as tf\n'), ((518, 538), 'numpy.isinf', 'np.isinf', (['d_inv_sqrt'], {}), '(d_inv_sqrt)\n', (526, 538), True, 'import numpy as np\n'), ((616, 636), 'scipy.sparse.eye', 'sp.eye', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (622, 636), True, 'import scipy.sparse as sp\n'), ((902, 917), 'numpy.isinf', 'np.isinf', (['d_inv'], {}), '(d_inv)\n', (910, 917), True, 'import numpy as np\n'), ((1132, 1152), 'numpy.transpose', 'np.transpose', (['adj_mx'], {}), '(adj_mx)\n', (1144, 1152), True, 'import numpy as np\n'), ((1268, 1305), 'numpy.maximum.reduce', 'np.maximum.reduce', (['[adj_mx, adj_mx.T]'], {}), '([adj_mx, adj_mx.T])\n', (1285, 1305), True, 'import numpy as np\n'), ((1407, 1437), 'scipy.sparse.linalg.eigsh', 'linalg.eigsh', (['L', '(1)'], {'which': '"""LM"""'}), "(L, 1, which='LM')\n", (1419, 1437), False, 'from scipy.sparse import linalg\n'), ((474, 491), 'numpy.power', 'np.power', (['d', '(-0.5)'], {}), '(d, -0.5)\n', (482, 491), True, 'import numpy as np\n'), ((865, 880), 'numpy.power', 'np.power', (['d', '(-1)'], {}), '(d, -1)\n', (873, 880), True, 'import numpy as np\n')] |
"""
Test adding an image with a range one dimensions.
There should be no slider shown for the axis corresponding to the range
one dimension.
"""
import numpy as np
from skimage import data
import napari
with napari.gui_qt():
np.random.seed(0)
# image = 2 * np.random.random((20, 20, 3)) - 1.0
image = 20 * np.random.random((20, 20, 3)) - 10
print(image.min(), image.max())
image = np.clip(image, 0, 1)
viewer = napari.view(image)
| [
"numpy.random.seed",
"napari.gui_qt",
"numpy.clip",
"numpy.random.random",
"napari.view"
] | [((212, 227), 'napari.gui_qt', 'napari.gui_qt', ([], {}), '()\n', (225, 227), False, 'import napari\n'), ((233, 250), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (247, 250), True, 'import numpy as np\n'), ((405, 425), 'numpy.clip', 'np.clip', (['image', '(0)', '(1)'], {}), '(image, 0, 1)\n', (412, 425), True, 'import numpy as np\n'), ((439, 457), 'napari.view', 'napari.view', (['image'], {}), '(image)\n', (450, 457), False, 'import napari\n'), ((322, 351), 'numpy.random.random', 'np.random.random', (['(20, 20, 3)'], {}), '((20, 20, 3))\n', (338, 351), True, 'import numpy as np\n')] |
"""The implementation for a neighborhood based recommender."""
import heapq
import numpy as np
import scipy.sparse
import scipy.sparse.linalg
from . import recommender
class KNNRecommender(recommender.PredictRecommender):
"""A neighborhood based collaborative filtering algorithm.
The class supports both user and item based collaborative filtering.
Parameters
----------
shrinkage : float
The shrinkage parameter applied to the similarity measure.
neighborhood_size : int
The number of users/items to consider when estimating a rating.
user_based : bool
If this variable is set to true the created object will use user-based collaborative
filtering, otherwise it will use item-based collaborative filtering.
use_content : bool
Whether to use the user/item features when computing the similarity measure.
use_means : bool
Whether to adjust the ratings based on the mean rating of each user/item.
"""
def __init__(self, shrinkage=0, neighborhood_size=40,
user_based=True, use_content=True, use_means=True,
**kwargs):
"""Create a new neighborhood recommender."""
super().__init__(**kwargs)
self._shrinkage = shrinkage
self._neighborhood_size = neighborhood_size
self._user_based = user_based
self._use_content = use_content
self._use_means = use_means
self._feature_matrix = scipy.sparse.csr_matrix((0, 0))
self._means = np.empty(0)
self._similarity_matrix = np.empty((0, 0))
self._ratings_matrix = np.empty((0, 0))
self._hyperparameters.update(locals())
# We only want the function arguments so remove class related objects.
del self._hyperparameters['self']
del self._hyperparameters['__class__']
@property
def name(self): # noqa: D102
return 'knn'
@property
def dense_predictions(self): # noqa: D102
if self._dense_predictions is not None:
return self._dense_predictions
# Set up whether we will loop over users or items.
if self._user_based:
loop_range = range(len(self._users))
ratings_matrix = self._ratings_matrix
else:
loop_range = range(len(self._items))
ratings_matrix = self._ratings_matrix.T
preds = []
for idx in loop_range:
relevant_idxs = nlargest_indices(
self._neighborhood_size, self._similarity_matrix[idx])
ratings = ratings_matrix[relevant_idxs]
# We only care about means and similarities with corresponding nonzero ratings.
zero = ratings == 0
# Create a matrix of means that can easily be subtracted by the ratings.
relevant_means = self._means[relevant_idxs]
relevant_means = np.tile(relevant_means, (ratings_matrix.shape[1], 1)).T
relevant_means[zero] = 0.0
# Create a matrix of relevant similarities that can easily be multiplied with ratings.
similarities = self._similarity_matrix[relevant_idxs, idx]
similarities = np.tile(similarities, (ratings_matrix.shape[1], 1)).T
similarities[zero] = 0.0
# Ensure that we aren't weighting by all 0.
zero = np.all(np.isclose(similarities, 0), axis=0)
similarities[:, zero] = 1.0
# Compute the predictions.
if self._use_means:
ratings_sum = self._means[idx] + (ratings - relevant_means)
else:
ratings_sum = ratings
preds.append((ratings_sum * similarities).sum(axis=0) / similarities.sum(axis=0))
preds = np.array(preds)
if not self._user_based:
preds = preds.T
self._dense_predictions = preds
return preds
def reset(self, users=None, items=None, ratings=None): # noqa: D102
self._feature_matrix = scipy.sparse.csr_matrix((0, 0))
self._similarity_matrix = np.empty((0, 0))
self._means = np.empty(0)
self._ratings_matrix = np.empty((0, 0))
super().reset(users, items, ratings)
def update(self, users=None, items=None, ratings=None): # noqa: D102
super().update(users, items, ratings)
if self._user_based:
self._feature_matrix = scipy.sparse.csr_matrix(self._ratings)
else:
self._feature_matrix = scipy.sparse.csr_matrix(self._ratings.T)
self._means = divide_zero(flatten(self._feature_matrix.sum(axis=1)),
self._feature_matrix.getnnz(axis=1))
if self._use_content:
if self._user_based:
self._feature_matrix = scipy.sparse.hstack([self._feature_matrix, self._users])
else:
self._feature_matrix = scipy.sparse.hstack([self._feature_matrix, self._items])
self._similarity_matrix = cosine_similarity(self._feature_matrix, self._feature_matrix,
self._shrinkage)
np.fill_diagonal(self._similarity_matrix, 0)
# TODO: this may not be the best way to store ratings, but it does speed access
self._ratings_matrix = self._ratings.A
def _predict(self, user_item): # noqa: D102
preds = []
relevant_idxs_cache = {}
for user_id, item_id, _ in user_item:
if self._user_based:
if user_id not in relevant_idxs_cache:
relevant_idxs_cache[user_id] = nlargest_indices(
self._neighborhood_size, self._similarity_matrix[user_id])
relevant_idxs = relevant_idxs_cache[user_id]
similarities = self._similarity_matrix[relevant_idxs, user_id]
ratings = self._ratings_matrix[relevant_idxs, item_id].ravel()
mean = self._means[user_id]
else:
if item_id not in relevant_idxs_cache:
relevant_idxs_cache[item_id] = nlargest_indices(
self._neighborhood_size, self._similarity_matrix[item_id])
relevant_idxs = relevant_idxs_cache[item_id]
similarities = self._similarity_matrix[relevant_idxs, item_id]
ratings = self._ratings_matrix.T[relevant_idxs, user_id].ravel()
mean = self._means[item_id]
relevant_means = self._means[relevant_idxs]
nonzero = ratings != 0
ratings = ratings[nonzero]
similarities = similarities[nonzero]
# ensure that we aren't weighting by all 0
if np.all(np.isclose(similarities, 0)):
similarities = np.ones_like(similarities)
if self._use_means:
if len(ratings) == 0:
preds.append(mean)
else:
preds.append(mean + np.average(ratings - relevant_means[nonzero],
weights=similarities))
else:
if len(ratings) == 0:
preds.append(0)
else:
preds.append(np.average(ratings, weights=similarities))
return np.array(preds)
def cosine_similarity(X, Y, shrinkage):
"""Compute the cosine similarity between each row vector in each matrix X and Y.
Parameters
----------
X : np.matrix
The first matrix for which to compute the cosine similarity.
Y : np.matrix
The second matrix for which to compute the cosine similarity.
shrinkage : float
The amount of shrinkage to apply to the similarity computation.
Returns
-------
similarity : np.ndarray
The similarity array between each pairs of row, where similarity[i, j]
is the cosine similarity between X[i] and Y[j].
"""
return divide_zero((X @ Y.T).A, scipy.sparse.linalg.norm(X, axis=1)[:, np.newaxis] *
scipy.sparse.linalg.norm(Y, axis=1)[np.newaxis, :] + shrinkage)
def nlargest_indices(n, iterable):
"""Given an iterable, computes the indices of the n largest items.
Parameters
----------
n : int
How many indices to retrieve.
iterable : iterable
The iterable from which to compute the n largest indices.
Returns
-------
largest : list of int
The n largest indices where largest[i] is the index of the i-th largest index.
"""
nlargest = heapq.nlargest(n, enumerate(iterable),
key=lambda x: x[1])
return [i[0] for i in nlargest]
def flatten(matrix):
"""Given a matrix return a flattened numpy array."""
return matrix.A.ravel()
def divide_zero(num, denom):
"""Divide a and b but return 0 instead of nan for divide by 0."""
# TODO: is this the desired zero-division behavior?
return np.divide(num, denom, out=np.zeros_like(num), where=(denom != 0))
| [
"numpy.fill_diagonal",
"numpy.zeros_like",
"numpy.ones_like",
"numpy.average",
"numpy.empty",
"numpy.isclose",
"numpy.array",
"numpy.tile"
] | [((1525, 1536), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (1533, 1536), True, 'import numpy as np\n'), ((1571, 1587), 'numpy.empty', 'np.empty', (['(0, 0)'], {}), '((0, 0))\n', (1579, 1587), True, 'import numpy as np\n'), ((1619, 1635), 'numpy.empty', 'np.empty', (['(0, 0)'], {}), '((0, 0))\n', (1627, 1635), True, 'import numpy as np\n'), ((3752, 3767), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (3760, 3767), True, 'import numpy as np\n'), ((4062, 4078), 'numpy.empty', 'np.empty', (['(0, 0)'], {}), '((0, 0))\n', (4070, 4078), True, 'import numpy as np\n'), ((4101, 4112), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (4109, 4112), True, 'import numpy as np\n'), ((4144, 4160), 'numpy.empty', 'np.empty', (['(0, 0)'], {}), '((0, 0))\n', (4152, 4160), True, 'import numpy as np\n'), ((5114, 5158), 'numpy.fill_diagonal', 'np.fill_diagonal', (['self._similarity_matrix', '(0)'], {}), '(self._similarity_matrix, 0)\n', (5130, 5158), True, 'import numpy as np\n'), ((7276, 7291), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (7284, 7291), True, 'import numpy as np\n'), ((8960, 8978), 'numpy.zeros_like', 'np.zeros_like', (['num'], {}), '(num)\n', (8973, 8978), True, 'import numpy as np\n'), ((2893, 2946), 'numpy.tile', 'np.tile', (['relevant_means', '(ratings_matrix.shape[1], 1)'], {}), '(relevant_means, (ratings_matrix.shape[1], 1))\n', (2900, 2946), True, 'import numpy as np\n'), ((3186, 3237), 'numpy.tile', 'np.tile', (['similarities', '(ratings_matrix.shape[1], 1)'], {}), '(similarities, (ratings_matrix.shape[1], 1))\n', (3193, 3237), True, 'import numpy as np\n'), ((3360, 3387), 'numpy.isclose', 'np.isclose', (['similarities', '(0)'], {}), '(similarities, 0)\n', (3370, 3387), True, 'import numpy as np\n'), ((6691, 6718), 'numpy.isclose', 'np.isclose', (['similarities', '(0)'], {}), '(similarities, 0)\n', (6701, 6718), True, 'import numpy as np\n'), ((6752, 6778), 'numpy.ones_like', 'np.ones_like', (['similarities'], {}), '(similarities)\n', (6764, 6778), True, 'import numpy as np\n'), ((7217, 7258), 'numpy.average', 'np.average', (['ratings'], {'weights': 'similarities'}), '(ratings, weights=similarities)\n', (7227, 7258), True, 'import numpy as np\n'), ((6950, 7017), 'numpy.average', 'np.average', (['(ratings - relevant_means[nonzero])'], {'weights': 'similarities'}), '(ratings - relevant_means[nonzero], weights=similarities)\n', (6960, 7017), True, 'import numpy as np\n')] |
import numpy as np
from numpy.lib.arraysetops import isin
from sympy import Matrix, flatten, Rational
from .. import (_LieAlgebraBackend)
def _annotate(M: Matrix, basis: str) -> Matrix:
"""Adds basis attribute to sympy.Matrix"""
setattr(M, "basis", basis)
return M
def _to_rational_tuple(obj):
"""Converts to a sympy matrix into into two
ndarray(dtype=int), one for numerators and one
for denoms
"""
if obj is None:
return
elif isinstance(obj, list) or isinstance(obj, tuple):
obj = Matrix(obj)
elif isinstance(obj, Rational):
return int(obj)
elif isinstance(obj, int) or isinstance(obj, np.ndarray):
return obj
else:
pass
x = flatten(obj)
return np.array([(i.p, i.q) for i in x], dtype=np.int64).reshape(*obj.shape, 2)
def _is_scalar(x) -> bool:
"""Is a basic type"""
return isinstance(x, (int, str, float, bool)) or x is None
def _is_tuple_int(x) -> bool:
"""Tuple of ints"""
if not isinstance(x, tuple):
raise Exception("Wrapper error, tuple expected")
return isinstance(x[0], int) and isinstance(x[1], int)
def _rust_wrapper(func=None, default=None):
"""Wraps the rust methods to and from. Formats the calls
to rust by turning either a 2d matrix to 3d matrix of (x,y) => (x,y,[numerator,denominator])
for preserving rational numbers. Rust returns objects as a tuple of (numerator-matrix, denominator-matrix)
"""
if func is None and default is not None:
return lambda f: _rust_wrapper(func=f, default=default)
def inner(*args, **kwargs):
cls = args[0]
rank = cls.rank
nargs = [_to_rational_tuple(x) for x in args[1:]]
result = func(cls, *nargs, **kwargs)
if result is None:
return default
if _is_scalar(result):
return result
if _is_tuple_int(result):
return Rational(*result)
# tuple of ndarrays
numer, denom = (x.squeeze() for x in result)
shape = numer.shape
plain_values = [Rational(f"{x}/{y}")
for x, y in zip(numer.flatten(), denom.flatten())]
# vectorlike
if len(shape) == 1:
shape = (shape[0], 1) if rank == 1 else (1, shape[0])
m = Matrix(*shape, plain_values)
return [m.row(i) for i in range(m.shape[0])]
return inner
def _rust_new(func):
"""Transforms into rust acceptable types"""
def inner(*args, **kwargs):
cls = args[0]
nargs = [_to_rational_tuple(x) for x in args[1:]]
return func(cls, *nargs, **kwargs)
return inner
class _LieAlgebraBackendWrapped:
@_rust_new
def __init__(self, *args, **kwargs):
# obscuring this option, used in testing
backend = kwargs.get("backend", _LieAlgebraBackend)
self.rank = args[0]
self.backend = backend(*args)
@_rust_wrapper
def orbit(self, weight, stabilizers):
return self.backend.orbit(weight, stabilizers)
@_rust_wrapper
def root_system(self):
return self.backend.root_system()
@_rust_wrapper
def tensor_product_decomposition(self, irrepA, irrepB):
return self.backend.tensor_product_decomposition(irrepA, irrepB)
@_rust_wrapper
def dim(self, irrep):
return self.backend.dim(irrep)
@_rust_wrapper(default=[])
def get_irrep_by_dim(self, dim, max_dd):
return self.backend.irrep_by_dim(dim, max_dd)
@_rust_wrapper
def index_irrep(self, irrep, dim):
return self.backend.index_irrep(irrep, dim)
@_rust_wrapper
def conjugate(self, irrep):
return self.backend.conjugate_irrep(irrep)
def create_backend(algebra):
return _LieAlgebraBackendWrapped(
algebra.rank,
algebra.n_pos_roots,
algebra.simple_roots,
# algebra.cartan_matrix,
algebra.cartan_matrix.pinv(),
algebra.omega_matrix,
algebra.omega_matrix.pinv(),
# algebra.cocartan_matrix,
)
| [
"sympy.flatten",
"numpy.array",
"sympy.Matrix",
"sympy.Rational"
] | [((724, 736), 'sympy.flatten', 'flatten', (['obj'], {}), '(obj)\n', (731, 736), False, 'from sympy import Matrix, flatten, Rational\n'), ((2303, 2331), 'sympy.Matrix', 'Matrix', (['*shape', 'plain_values'], {}), '(*shape, plain_values)\n', (2309, 2331), False, 'from sympy import Matrix, flatten, Rational\n'), ((540, 551), 'sympy.Matrix', 'Matrix', (['obj'], {}), '(obj)\n', (546, 551), False, 'from sympy import Matrix, flatten, Rational\n'), ((748, 797), 'numpy.array', 'np.array', (['[(i.p, i.q) for i in x]'], {'dtype': 'np.int64'}), '([(i.p, i.q) for i in x], dtype=np.int64)\n', (756, 797), True, 'import numpy as np\n'), ((1926, 1943), 'sympy.Rational', 'Rational', (['*result'], {}), '(*result)\n', (1934, 1943), False, 'from sympy import Matrix, flatten, Rational\n'), ((2079, 2099), 'sympy.Rational', 'Rational', (['f"""{x}/{y}"""'], {}), "(f'{x}/{y}')\n", (2087, 2099), False, 'from sympy import Matrix, flatten, Rational\n')] |
#######################################################################
# Copyright (C) #
# 2016-2018 <NAME>(<EMAIL>) #
# 2016 <NAME>(<EMAIL>) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from tqdm import tqdm
# world height
WORLD_HEIGHT = 4
# world width
WORLD_WIDTH = 12
# probability for exploration
EPSILON = 0.1
# step size
ALPHA = 0.5
# gamma for Q-Learning and Expected Sarsa
GAMMA = 1
# all possible actions
ACTION_UP = 0
ACTION_DOWN = 1
ACTION_LEFT = 2
ACTION_RIGHT = 3
ACTIONS = [ACTION_UP, ACTION_DOWN, ACTION_LEFT, ACTION_RIGHT]
# initial state action pair values
START = [3, 0]
GOAL = [3, 11]
def step(state, action):
i, j = state
if action == ACTION_UP:
next_state = [max(i - 1, 0), j]
elif action == ACTION_LEFT:
next_state = [i, max(j - 1, 0)]
elif action == ACTION_RIGHT:
next_state = [i, min(j + 1, WORLD_WIDTH - 1)]
elif action == ACTION_DOWN:
next_state = [min(i + 1, WORLD_HEIGHT - 1), j]
else:
assert False
reward = -1
if (action == ACTION_DOWN and i == 2 and 1 <= j <= 10) or (
action == ACTION_RIGHT and state == START):
reward = -100
next_state = START
return next_state, reward
# reward for each action in each state
# actionRewards = np.zeros((WORLD_HEIGHT, WORLD_WIDTH, 4))
# actionRewards[:, :, :] = -1.0
# actionRewards[2, 1:11, ACTION_DOWN] = -100.0
# actionRewards[3, 0, ACTION_RIGHT] = -100.0
# set up destinations for each action in each state
# actionDestination = []
# for i in range(0, WORLD_HEIGHT):
# actionDestination.append([])
# for j in range(0, WORLD_WIDTH):
# destinaion = dict()
# destinaion[ACTION_UP] = [max(i - 1, 0), j]
# destinaion[ACTION_LEFT] = [i, max(j - 1, 0)]
# destinaion[ACTION_RIGHT] = [i, min(j + 1, WORLD_WIDTH - 1)]
# if i == 2 and 1 <= j <= 10:
# destinaion[ACTION_DOWN] = START
# else:
# destinaion[ACTION_DOWN] = [min(i + 1, WORLD_HEIGHT - 1), j]
# actionDestination[-1].append(destinaion)
# actionDestination[3][0][ACTION_RIGHT] = START
# choose an action based on epsilon greedy algorithm
def choose_action(state, q_value):
if np.random.binomial(1, EPSILON) == 1:
return np.random.choice(ACTIONS)
else:
values_ = q_value[state[0], state[1], :]
return np.random.choice([action_ for action_, value_ in enumerate(values_) if value_ == np.max(values_)])
# an episode with Sarsa
# @q_value: values for state action pair, will be updated
# @expected: if True, will use expected Sarsa algorithm
# @step_size: step size for updating
# @return: total rewards within this episode
def sarsa(q_value, expected=False, step_size=ALPHA):
state = START
action = choose_action(state, q_value)
rewards = 0.0
while state != GOAL:
next_state, reward = step(state, action)
next_action = choose_action(next_state, q_value)
rewards += reward
if not expected:
target = q_value[next_state[0], next_state[1], next_action]
else:
# calculate the expected value of new state
target = 0.0
q_next = q_value[next_state[0], next_state[1], :]
best_actions = np.argwhere(q_next == np.max(q_next))
for action_ in ACTIONS:
if action_ in best_actions:
target += ((1.0 - EPSILON) / len(best_actions) + EPSILON / len(ACTIONS)) * q_value[next_state[0], next_state[1], action_]
else:
target += EPSILON / len(ACTIONS) * q_value[next_state[0], next_state[1], action_]
target *= GAMMA
q_value[state[0], state[1], action] += step_size * (
reward + target - q_value[state[0], state[1], action])
state = next_state
action = next_action
return rewards
# an episode with Q-Learning
# @q_value: values for state action pair, will be updated
# @step_size: step size for updating
# @return: total rewards within this episode
def q_learning(q_value, step_size=ALPHA):
state = START
rewards = 0.0
while state != GOAL:
action = choose_action(state, q_value)
next_state, reward = step(state, action)
rewards += reward
# Q-Learning update
q_value[state[0], state[1], action] += step_size * (
reward + GAMMA * np.max(q_value[next_state[0], next_state[1], :]) -
q_value[state[0], state[1], action])
state = next_state
return rewards
# print optimal policy
def print_optimal_policy(q_value):
optimal_policy = []
for i in range(0, WORLD_HEIGHT):
optimal_policy.append([])
for j in range(0, WORLD_WIDTH):
if [i, j] == GOAL:
optimal_policy[-1].append('G')
continue
bestAction = np.argmax(q_value[i, j, :])
if bestAction == ACTION_UP:
optimal_policy[-1].append('U')
elif bestAction == ACTION_DOWN:
optimal_policy[-1].append('D')
elif bestAction == ACTION_LEFT:
optimal_policy[-1].append('L')
elif bestAction == ACTION_RIGHT:
optimal_policy[-1].append('R')
for row in optimal_policy:
print(row)
# Use multiple runs instead of a single run and a sliding window
# With a single run I failed to present a smooth curve
# However the optimal policy converges well with a single run
# Sarsa converges to the safe path, while Q-Learning converges to the optimal path
def figure_6_4():
# episodes of each run
episodes = 500
# perform 40 independent runs
runs = 50
rewards_sarsa = np.zeros(episodes)
rewards_q_learning = np.zeros(episodes)
for r in tqdm(range(runs)):
q_sarsa = np.zeros((WORLD_HEIGHT, WORLD_WIDTH, 4))
q_q_learning = np.copy(q_sarsa)
for i in range(0, episodes):
# cut off the value by -100 to draw the figure more elegantly
# rewards_sarsa[i] += max(sarsa(q_sarsa), -100)
# rewards_q_learning[i] += max(q_learning(q_q_learning), -100)
rewards_sarsa[i] += sarsa(q_sarsa)
rewards_q_learning[i] += q_learning(q_q_learning)
# averaging over independt runs
rewards_sarsa /= runs
rewards_q_learning /= runs
# draw reward curves
plt.plot(rewards_sarsa, label='Sarsa')
plt.plot(rewards_q_learning, label='Q-Learning')
plt.xlabel('Episodes')
plt.ylabel('Sum of rewards during episode')
plt.ylim([-100, 0])
plt.legend()
plt.savefig('../images/figure_6_4.png')
plt.close()
# display optimal policy
print('Sarsa Optimal Policy:')
print_optimal_policy(q_sarsa)
print('Q-Learning Optimal Policy:')
print_optimal_policy(q_q_learning)
# Due to limited capacity of calculation of my machine, I can't complete this experiment
# with 100,000 episodes and 50,000 runs to get the fully averaged performance
# However even I only play for 1,000 episodes and 10 runs, the curves looks still good.
def figure_6_6():
step_sizes = np.arange(0.1, 1.1, 0.1)
episodes = 1000
runs = 10
ASY_SARSA = 0
ASY_EXPECTED_SARSA = 1
ASY_QLEARNING = 2
INT_SARSA = 3
INT_EXPECTED_SARSA = 4
INT_QLEARNING = 5
methods = range(0, 6)
performace = np.zeros((6, len(step_sizes)))
for run in range(runs):
for ind, step_size in tqdm(list(zip(range(0, len(step_sizes)), step_sizes))):
q_sarsa = np.zeros((WORLD_HEIGHT, WORLD_WIDTH, 4))
q_expected_sarsa = np.copy(q_sarsa)
q_q_learning = np.copy(q_sarsa)
for ep in range(episodes):
sarsa_reward = sarsa(q_sarsa, expected=False, step_size=step_size)
expected_sarsa_reward = sarsa(q_expected_sarsa, expected=True, step_size=step_size)
q_learning_reward = q_learning(q_q_learning, step_size=step_size)
performace[ASY_SARSA, ind] += sarsa_reward
performace[ASY_EXPECTED_SARSA, ind] += expected_sarsa_reward
performace[ASY_QLEARNING, ind] += q_learning_reward
if ep < 100:
performace[INT_SARSA, ind] += sarsa_reward
performace[INT_EXPECTED_SARSA, ind] += expected_sarsa_reward
performace[INT_QLEARNING, ind] += q_learning_reward
performace[:3, :] /= episodes * runs
performace[3:, :] /= 100 * runs
labels = ['Asymptotic Sarsa', 'Asymptotic Expected Sarsa', 'Asymptotic Q-Learning',
'Interim Sarsa', 'Interim Expected Sarsa', 'Interim Q-Learning']
for method, label in zip(methods, labels):
plt.plot(step_sizes, performace[method, :], label=label)
plt.xlabel('alpha')
plt.ylabel('reward per episode')
plt.legend()
plt.savefig('../images/figure_6_6.png')
plt.close()
if __name__ == '__main__':
figure_6_4()
figure_6_6()
| [
"numpy.random.binomial",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"numpy.copy",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"numpy.argmax",
"numpy.zeros",
"numpy.max",
"matplotlib.use",
"numpy.arange",
"numpy.random.choice",
"matplotlib.pyplot.ylabel",
"matplotlib.pyp... | [((489, 510), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (503, 510), False, 'import matplotlib\n'), ((6040, 6058), 'numpy.zeros', 'np.zeros', (['episodes'], {}), '(episodes)\n', (6048, 6058), True, 'import numpy as np\n'), ((6084, 6102), 'numpy.zeros', 'np.zeros', (['episodes'], {}), '(episodes)\n', (6092, 6102), True, 'import numpy as np\n'), ((6713, 6751), 'matplotlib.pyplot.plot', 'plt.plot', (['rewards_sarsa'], {'label': '"""Sarsa"""'}), "(rewards_sarsa, label='Sarsa')\n", (6721, 6751), True, 'import matplotlib.pyplot as plt\n'), ((6756, 6804), 'matplotlib.pyplot.plot', 'plt.plot', (['rewards_q_learning'], {'label': '"""Q-Learning"""'}), "(rewards_q_learning, label='Q-Learning')\n", (6764, 6804), True, 'import matplotlib.pyplot as plt\n'), ((6809, 6831), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episodes"""'], {}), "('Episodes')\n", (6819, 6831), True, 'import matplotlib.pyplot as plt\n'), ((6836, 6879), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sum of rewards during episode"""'], {}), "('Sum of rewards during episode')\n", (6846, 6879), True, 'import matplotlib.pyplot as plt\n'), ((6884, 6903), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-100, 0]'], {}), '([-100, 0])\n', (6892, 6903), True, 'import matplotlib.pyplot as plt\n'), ((6908, 6920), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6918, 6920), True, 'import matplotlib.pyplot as plt\n'), ((6926, 6965), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../images/figure_6_4.png"""'], {}), "('../images/figure_6_4.png')\n", (6937, 6965), True, 'import matplotlib.pyplot as plt\n'), ((6970, 6981), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6979, 6981), True, 'import matplotlib.pyplot as plt\n'), ((7451, 7475), 'numpy.arange', 'np.arange', (['(0.1)', '(1.1)', '(0.1)'], {}), '(0.1, 1.1, 0.1)\n', (7460, 7475), True, 'import numpy as np\n'), ((9105, 9124), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""alpha"""'], {}), "('alpha')\n", (9115, 9124), True, 'import matplotlib.pyplot as plt\n'), ((9129, 9161), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""reward per episode"""'], {}), "('reward per episode')\n", (9139, 9161), True, 'import matplotlib.pyplot as plt\n'), ((9166, 9178), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9176, 9178), True, 'import matplotlib.pyplot as plt\n'), ((9184, 9223), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../images/figure_6_6.png"""'], {}), "('../images/figure_6_6.png')\n", (9195, 9223), True, 'import matplotlib.pyplot as plt\n'), ((9228, 9239), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9237, 9239), True, 'import matplotlib.pyplot as plt\n'), ((2555, 2585), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'EPSILON'], {}), '(1, EPSILON)\n', (2573, 2585), True, 'import numpy as np\n'), ((2607, 2632), 'numpy.random.choice', 'np.random.choice', (['ACTIONS'], {}), '(ACTIONS)\n', (2623, 2632), True, 'import numpy as np\n'), ((6153, 6193), 'numpy.zeros', 'np.zeros', (['(WORLD_HEIGHT, WORLD_WIDTH, 4)'], {}), '((WORLD_HEIGHT, WORLD_WIDTH, 4))\n', (6161, 6193), True, 'import numpy as np\n'), ((6217, 6233), 'numpy.copy', 'np.copy', (['q_sarsa'], {}), '(q_sarsa)\n', (6224, 6233), True, 'import numpy as np\n'), ((9044, 9100), 'matplotlib.pyplot.plot', 'plt.plot', (['step_sizes', 'performace[method, :]'], {'label': 'label'}), '(step_sizes, performace[method, :], label=label)\n', (9052, 9100), True, 'import matplotlib.pyplot as plt\n'), ((5201, 5228), 'numpy.argmax', 'np.argmax', (['q_value[i, j, :]'], {}), '(q_value[i, j, :])\n', (5210, 5228), True, 'import numpy as np\n'), ((7856, 7896), 'numpy.zeros', 'np.zeros', (['(WORLD_HEIGHT, WORLD_WIDTH, 4)'], {}), '((WORLD_HEIGHT, WORLD_WIDTH, 4))\n', (7864, 7896), True, 'import numpy as np\n'), ((7928, 7944), 'numpy.copy', 'np.copy', (['q_sarsa'], {}), '(q_sarsa)\n', (7935, 7944), True, 'import numpy as np\n'), ((7972, 7988), 'numpy.copy', 'np.copy', (['q_sarsa'], {}), '(q_sarsa)\n', (7979, 7988), True, 'import numpy as np\n'), ((3619, 3633), 'numpy.max', 'np.max', (['q_next'], {}), '(q_next)\n', (3625, 3633), True, 'import numpy as np\n'), ((2788, 2803), 'numpy.max', 'np.max', (['values_'], {}), '(values_)\n', (2794, 2803), True, 'import numpy as np\n'), ((4729, 4777), 'numpy.max', 'np.max', (['q_value[next_state[0], next_state[1], :]'], {}), '(q_value[next_state[0], next_state[1], :])\n', (4735, 4777), True, 'import numpy as np\n')] |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# This script is based on an early version of the tensorflow example on
# language modeling with PTB (at /models/blob/master/tutorials/rnn/ptb/ptb_word_lm.py)
#
# <NAME> & <NAME>
#
import reader3 as rdr
import tensorflow as tf
import collections
import math, sys
import time
import argparse
import numpy as np
import os
from collections import Counter
import requests
PENN_URLS = {
'Train': "https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.train.txt",
'Test': "https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.test.txt",
'Valid': "https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.valid.txt"
}
def none_pointer_copy(dictionary):
new_dictionary = {}
for key in list(dictionary):
new_dictionary[key] = dictionary[key]
return new_dictionary
def get_data():
"""
Function to get PennTreeBank data (train, test and validation splits)
"""
print("Downloading PennTreeBank Data from https://raw.githubusercontent.com/wojzaremba/lstm/master/data ... ", end = "")
for url_key in PENN_URLS:
url = PENN_URLS[url_key]
data = requests.get(url, stream = True)
file_name = url.split("/")[-1]
with open('data/PennTreeBank/' + file_name, 'wb') as f:
for chunk in data.iter_content(chunk_size = 1024):
if chunk:
f.write(chunk)
print("Done")
def buildData(path, dsm):
'''
Function to process ptb dataset
'''
if len(os.listdir(path)) == 0:
get_data()
# Load all data from path
with open(path + '/ptb.train.txt', 'r') as f:
train = f.read().split('\n')
with open(path + '/ptb.test.txt', 'r') as f:
test = f.read().split('\n')
with open(path +'/ptb.valid.txt', 'r') as f:
valid = f.read().split('\n')
# collect words that overlap
words = list(dsm.keys())
counter = Counter([x for sentence in train for x in sentence.split()])
overlapping_vocab = list(set(list(counter.keys())).intersection(set(words)))
identity_map = dict(zip(overlapping_vocab, overlapping_vocab))
print('Number of Words:', len(overlapping_vocab))
# process data
processed_train = [[identity_map.setdefault(x, '<UNK>') for x in sentence.split()] for sentence in train]
processed_test = [[identity_map.setdefault(x, '<UNK>') for x in sentence.split()] for sentence in test]
processed_valid = [[identity_map.setdefault(x, '<UNK>') for x in sentence.split()] for sentence in valid]
# write data back
if not os.path.exists(path + '/processed'):
os.mkdir(path + '/processed')
with open(path + '/processed/ptb.train.txt', 'w') as f:
f.write(' \n '.join([' '.join(x) for x in processed_train]))
with open(path + '/processed/ptb.test.txt', 'w') as f:
f.write(' \n '.join([' '.join(x) for x in processed_test]))
with open(path + '/processed/ptb.valid.txt', 'w') as f:
f.write(' \n '.join([' '.join(x) for x in processed_valid]))
class LanguageModel(object):
"""2-Layer LSTM language model"""
def __init__(self, is_training, config, weight_embeddings, vocabulary ):
self.config = config
self.vocabulary = vocabulary
self._input_data = tf.placeholder(tf.int32, [self.config['batch_size'], self.config['num_steps']])
self._targets = tf.placeholder(tf.int32, [self.config['batch_size'], self.config['num_steps']])
embedding_size = weight_embeddings.shape[1]
with tf.name_scope('Language_Model'):
# build embedding lookup table
with tf.variable_scope('embedding'):
if self.config['model'] == 'output' or self.config['model'] == 'test':
embedding_matrix = tf.get_variable("embedding",
[self.vocabulary, self.config['size']],
dtype=tf.float32)
else:
embedding_matrix = tf.constant(weight_embeddings, dtype = tf.float32)
with tf.variable_scope('rnn'):
lstms = [tf.contrib.rnn.BasicLSTMCell(size, forget_bias=0.0) for size in [self.config['size'], self.config['size']]]
drops = [tf.contrib.rnn.DropoutWrapper(lstm, input_size=self.config['size'],
output_keep_prob=self.config['dropout'],
dtype=tf.float32) for lstm in lstms]
cell = tf.nn.rnn_cell.MultiRNNCell(drops, state_is_tuple=True)
self._initial_state = cell.zero_state(self.config['batch_size'], tf.float32)
inputs = tf.nn.embedding_lookup(embedding_matrix, self.input_data)
inputs = tf.nn.dropout(inputs, self.config['dropout'])
inputs, state = tf.nn.dynamic_rnn(cell, inputs, initial_state=self._initial_state)
if self.config['model'] == 'test':
with tf.variable_scope('softmax_output'):
softmax_w = tf.get_variable(
"softmax_w", [self.config['size'], self.vocabulary], dtype=tf.float32)
softmax_b = tf.get_variable("softmax_b", [self.vocabulary], dtype=tf.float32)
if self.config['model'] == 'input':
# Set inputs as pretrained embeddings
with tf.variable_scope('softmax_output'):
softmax_w = tf.get_variable(
"softmax_w", [embedding_size, self.vocabulary], dtype=tf.float32)
softmax_b = tf.get_variable("softmax_b", [self.vocabulary], dtype=tf.float32)
#inputs = tf.transpose(inputs, [1, 0, 2])
if self.config['model'] == 'output':
# Set outputs as pretrained embeddings
softmax_w = tf.constant(weight_embeddings.T, dtype = tf.float32)
softmax_b = tf.zeros(shape=[self.vocabulary], dtype=tf.float32, name="softmax_b")
#inputs = tf.transpose(inputs, [1, 0, 2])
if self.config['model'] == 'tied':
# tie input embedding weights to output embedding weights which are non trainable
softmax_w = tf.constant(weight_embeddings.T, dtype = tf.float32)
softmax_b = tf.zeros(shape=[self.vocabulary], dtype=tf.float32, name="softmax_b")
self.W = softmax_w
self.b = softmax_b
# calculate logits
inputs = tf.reshape(inputs, [-1, self.config['size']])
if not self.config['model'] == 'test':
projection = tf.get_variable(
"projection", [self.config['size'], embedding_size], dtype=tf.float32)
inputs = tf.matmul(inputs, projection)
logits = tf.nn.xw_plus_b(inputs, softmax_w, softmax_b)
# Reshape logits to be a 3-D tensor for sequence loss
logits = tf.reshape(logits, [self.config['batch_size'], self.config['num_steps'], self.vocabulary])
loss = tf.contrib.seq2seq.sequence_loss(logits,
self.targets,
tf.ones([self.config['batch_size'], self.config['num_steps']], dtype=tf.float32),
average_across_timesteps=False,
average_across_batch=True)
# labels = tf.reshape(self._targets, [-1])
# loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = logits,labels = labels)
self._cost = cost = tf.reduce_sum(loss)
self._lr = tf.Variable(0.0, trainable=False)
self._perp = tf.reduce_sum(loss)
self._final_state = state
if not is_training:
return
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
self.config['clip_norm'])
optimizer = tf.train.GradientDescentOptimizer(self._lr)
self.optimizer = optimizer
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
def assign_lr(self, session, lr_value):
session.run(tf.assign(self.lr, lr_value))
@property
def input_data(self):
return self._input_data
@property
def targets(self):
return self._targets
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def perp(self):
return self._perp
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
def run_epoch(session, m, data, eval_op):
epoch_size = ((len(data) // m.config['batch_size']) - 1) // m.config['num_steps']
start_time = time.time()
costs = 0.0
perps = 0.0
iters = 0
state = session.run(m.initial_state)
epoch_size, iterator = rdr.ptb_iterator(data, m.config['batch_size'], m.config['num_steps'])
for step, (x, y) in enumerate(iterator):
perp, cost, state, _ = session.run([m.perp, m.cost, m.final_state, eval_op],
{m.input_data: x,
m.targets: y,
m.initial_state: state})
costs += cost
perps += perp
iters += m.config['num_steps']
print("%d / %d exp_cost: %.3f perplexity: %.3f speed: %.0f wps" %
(step, epoch_size,np.exp(costs / iters), np.exp(perps / iters),
iters * m.config['batch_size'] / (time.time() - start_time)), end = '\r')
return np.exp(perps / iters)
def run(config, dsm):
'''
Run language model
'''
train_data, valid_data, test_data, vocabulary, word_to_id, id_to_word = rdr.ptb_raw_data(config['data_path'])
# create LSTM layers
embedding_size = 650
weight_embeddings = np.zeros((1,650))
if not config['model'] == 'test':
embedding_size = np.asarray(list(dsm.values())).shape[1]
weight_embeddings = np.zeros((len(word_to_id), embedding_size))
for word in word_to_id.keys():
weight_embeddings[word_to_id[word],:] = dsm[word]
# create validation and test configerations withourt
test_config = {}
valid_config = {}
test_config = none_pointer_copy(config)
valid_config = none_pointer_copy(config)
test_config['batch_size'] = 1
test_config['num_steps'] = 1
test_config['dropout'] = 1.0
valid_config['dropout'] = 1.0
train_perplexity = []
valid_perplexity = []
test_perplexity = []
with tf.Graph().as_default(), tf.Session() as session:
initializer = tf.random_uniform_initializer(-config['init_val'], config['init_val'])
with tf.variable_scope("model", reuse=None, initializer=initializer):
m = LanguageModel(is_training=True, config=config, weight_embeddings = weight_embeddings, vocabulary = vocabulary)
with tf.variable_scope("model", reuse=True, initializer=initializer):
mvalid = LanguageModel(is_training=False, config=valid_config, weight_embeddings = weight_embeddings, vocabulary = vocabulary)
mtest = LanguageModel(is_training=False, config=test_config, weight_embeddings = weight_embeddings, vocabulary = vocabulary)
tf.initialize_all_variables().run()
for i in range(config['epochs']):
lr_decay = config['decay_rate']** max(i - config['max_epochs'], 0)
m.assign_lr(session, config['learning_rate'] * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr),))
train_perplexity.append(run_epoch(session, m, train_data, m.train_op))
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity[-1]))
valid_perplexity.append(run_epoch(session, mvalid, valid_data, tf.no_op()))
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity[-1]))
test_perplexity.append(run_epoch(session, mtest, test_data, tf.no_op()))
print("Test Perplexity: %.3f" % test_perplexity[-1])
return train_perplexity, valid_perplexity, test_perplexity
if __name__ == "__main__":
# Set up a few command line arguments for program
parser = argparse.ArgumentParser(description = 'Language model module with several different modes for the final softmax layer.')
parser.add_argument('--check_gpu', '-g', action = 'store',
required = False,
default = False,
help = ('Function to check if there is a gpu installed for tensorflow. If there is a GPU that is not detected then CUDDA may need to be installed'))
parser.add_argument('--data_path', '-d', action = 'store',
required = False,
default = 'data/PennTreeBank/processed',
help = ('Function to get data for lm_1b model'))
parser.add_argument('--size', '-s', action = 'store',
required = False,
default = '650',
help = ('Size of hidden layers of model'))
parser.add_argument('--learning_rate', '-lr', action = 'store',
required = False,
default = '1.0',
help = ('Learning rate of languge model whem training.'))
parser.add_argument('--batch_size', '-bs', action = 'store',
required = False,
default = '20',
help = ('Batch size for training.'))
parser.add_argument('--num_steps', '-ns', action = 'store',
required = False,
default = '35',
help = ('Time step for unrolling lstms of language model.'))
parser.add_argument('--dropout_keep', '-dk', action = 'store',
required = False,
default = '0.5',
help = ('Probability of keeping a value using dropout.'))
parser.add_argument('--epochs', '-ep', action = 'store',
required = False,
default = '39',
help = ('Epochs used for training.'))
parser.add_argument('--max_epochs', '-mep', action = 'store',
required = False,
default = '6',
help = ('Max epochs before learning decay schedule activates.'))
parser.add_argument('--decay_rate', '-dr', action = 'store',
required = False,
default = '0.8',
help = ('Decay rate to decrease learning rate by after each epoch.'))
parser.add_argument('--clip_norm', '-cn', action = 'store',
required = False,
default = '5.0',
help = ('Size of global clip norm of gradients.'))
parser.add_argument('--init_value', '-iv', action = 'store',
required = False,
default = '0.05',
help = ('Range of values +- for the uniform random distribution initializer.'))
parser.add_argument('--model', '-m', action = 'store',
required = False,
default = 'baseline',
help = ('Type of model to build, including "baseline", "tied", "tied+h" and "htied.'))
parser.add_argument('--dsm_path', '-dsm', action = 'store',
required = False,
default = 'new',
help = ('Path to distributional Model.'))
parser.add_argument('--save_path', '-sp', action = 'store',
required = False,
default = 'History',
help = ('Path to save history information.'))
parser.add_argument('--build', '-bu', action = 'store',
required = False,
default = 'False',
help = ('Function to build data'))
args = parser.parse_args()
if args.check_gpu:
print(tf.test.is_gpu_available())
# build configuration file
config = {
'batch_size': int(args.batch_size),
'num_steps': int(args.num_steps),
'epochs': int(args.epochs),
'data_path': args.data_path,
'decay_rate': float(args.decay_rate),
'learning_rate': float(args.learning_rate),
'init_val': float(args.init_value),
'model': args.model,
'clip_norm': float(args.clip_norm),
'max_epochs': int(args.max_epochs),
'dropout': float(args.dropout_keep),
'size': int(args.size)
}
name = args.model + '_' + args.dsm_path.split('/')[-1][:-4]
print(name)
dsm = np.load(args.dsm_path).item()
if args.build == 'False':
train_perplexity, valid_perplexity, test_perplexity = run(config = config, dsm = dsm)
name = args.model + '_' + args.dsm_path.split('/')[-1][:-4]
history = {}
history['train'] = train_perplexity
history['valid'] = valid_perplexity
history['test'] = test_perplexity
np.save(args.save_path + '/' + name + '_history.npy', history)
else:
buildData(path = args.data_path, dsm = dsm)
# else:
# raise Exception('No commands provided. Please choose one of the options.')
| [
"os.mkdir",
"numpy.load",
"tensorflow.reduce_sum",
"argparse.ArgumentParser",
"tensorflow.trainable_variables",
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.assign",
"tensorflow.Variable",
"numpy.exp",
"reader3.ptb_iterator",
"reader3.ptb_raw_data",
"tensorflow.get_variable",
"te... | [((9304, 9315), 'time.time', 'time.time', ([], {}), '()\n', (9313, 9315), False, 'import time\n'), ((9423, 9492), 'reader3.ptb_iterator', 'rdr.ptb_iterator', (['data', "m.config['batch_size']", "m.config['num_steps']"], {}), "(data, m.config['batch_size'], m.config['num_steps'])\n", (9439, 9492), True, 'import reader3 as rdr\n'), ((10125, 10146), 'numpy.exp', 'np.exp', (['(perps / iters)'], {}), '(perps / iters)\n', (10131, 10146), True, 'import numpy as np\n'), ((10292, 10329), 'reader3.ptb_raw_data', 'rdr.ptb_raw_data', (["config['data_path']"], {}), "(config['data_path'])\n", (10308, 10329), True, 'import reader3 as rdr\n'), ((10405, 10423), 'numpy.zeros', 'np.zeros', (['(1, 650)'], {}), '((1, 650))\n', (10413, 10423), True, 'import numpy as np\n'), ((12806, 12934), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Language model module with several different modes for the final softmax layer."""'}), "(description=\n 'Language model module with several different modes for the final softmax layer.'\n )\n", (12829, 12934), False, 'import argparse\n'), ((1827, 1857), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (1839, 1857), False, 'import requests\n'), ((3300, 3335), 'os.path.exists', 'os.path.exists', (["(path + '/processed')"], {}), "(path + '/processed')\n", (3314, 3335), False, 'import os\n'), ((3345, 3374), 'os.mkdir', 'os.mkdir', (["(path + '/processed')"], {}), "(path + '/processed')\n", (3353, 3374), False, 'import os\n'), ((4004, 4083), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', "[self.config['batch_size'], self.config['num_steps']]"], {}), "(tf.int32, [self.config['batch_size'], self.config['num_steps']])\n", (4018, 4083), True, 'import tensorflow as tf\n'), ((4104, 4183), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', "[self.config['batch_size'], self.config['num_steps']]"], {}), "(tf.int32, [self.config['batch_size'], self.config['num_steps']])\n", (4118, 4183), True, 'import tensorflow as tf\n'), ((5385, 5442), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding_matrix', 'self.input_data'], {}), '(embedding_matrix, self.input_data)\n', (5407, 5442), True, 'import tensorflow as tf\n'), ((5456, 5501), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['inputs', "self.config['dropout']"], {}), "(inputs, self.config['dropout'])\n", (5469, 5501), True, 'import tensorflow as tf\n'), ((5522, 5588), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'inputs'], {'initial_state': 'self._initial_state'}), '(cell, inputs, initial_state=self._initial_state)\n', (5539, 5588), True, 'import tensorflow as tf\n'), ((7051, 7096), 'tensorflow.reshape', 'tf.reshape', (['inputs', "[-1, self.config['size']]"], {}), "(inputs, [-1, self.config['size']])\n", (7061, 7096), True, 'import tensorflow as tf\n'), ((7323, 7368), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['inputs', 'softmax_w', 'softmax_b'], {}), '(inputs, softmax_w, softmax_b)\n', (7338, 7368), True, 'import tensorflow as tf\n'), ((7440, 7534), 'tensorflow.reshape', 'tf.reshape', (['logits', "[self.config['batch_size'], self.config['num_steps'], self.vocabulary]"], {}), "(logits, [self.config['batch_size'], self.config['num_steps'],\n self.vocabulary])\n", (7450, 7534), True, 'import tensorflow as tf\n'), ((8081, 8100), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), '(loss)\n', (8094, 8100), True, 'import tensorflow as tf\n'), ((8117, 8150), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'trainable': '(False)'}), '(0.0, trainable=False)\n', (8128, 8150), True, 'import tensorflow as tf\n'), ((8168, 8187), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), '(loss)\n', (8181, 8187), True, 'import tensorflow as tf\n'), ((8278, 8302), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (8300, 8302), True, 'import tensorflow as tf\n'), ((8448, 8491), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['self._lr'], {}), '(self._lr)\n', (8481, 8491), True, 'import tensorflow as tf\n'), ((11148, 11160), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (11158, 11160), True, 'import tensorflow as tf\n'), ((11195, 11265), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (["(-config['init_val'])", "config['init_val']"], {}), "(-config['init_val'], config['init_val'])\n", (11224, 11265), True, 'import tensorflow as tf\n'), ((17809, 17871), 'numpy.save', 'np.save', (["(args.save_path + '/' + name + '_history.npy')", 'history'], {}), "(args.save_path + '/' + name + '_history.npy', history)\n", (17816, 17871), True, 'import numpy as np\n'), ((2235, 2251), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2245, 2251), False, 'import os\n'), ((4242, 4273), 'tensorflow.name_scope', 'tf.name_scope', (['"""Language_Model"""'], {}), "('Language_Model')\n", (4255, 4273), True, 'import tensorflow as tf\n'), ((6475, 6525), 'tensorflow.constant', 'tf.constant', (['weight_embeddings.T'], {'dtype': 'tf.float32'}), '(weight_embeddings.T, dtype=tf.float32)\n', (6486, 6525), True, 'import tensorflow as tf\n'), ((6548, 6617), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[self.vocabulary]', 'dtype': 'tf.float32', 'name': '"""softmax_b"""'}), "(shape=[self.vocabulary], dtype=tf.float32, name='softmax_b')\n", (6556, 6617), True, 'import tensorflow as tf\n'), ((6823, 6873), 'tensorflow.constant', 'tf.constant', (['weight_embeddings.T'], {'dtype': 'tf.float32'}), '(weight_embeddings.T, dtype=tf.float32)\n', (6834, 6873), True, 'import tensorflow as tf\n'), ((6896, 6965), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[self.vocabulary]', 'dtype': 'tf.float32', 'name': '"""softmax_b"""'}), "(shape=[self.vocabulary], dtype=tf.float32, name='softmax_b')\n", (6904, 6965), True, 'import tensorflow as tf\n'), ((7160, 7251), 'tensorflow.get_variable', 'tf.get_variable', (['"""projection"""', "[self.config['size'], embedding_size]"], {'dtype': 'tf.float32'}), "('projection', [self.config['size'], embedding_size], dtype=\n tf.float32)\n", (7175, 7251), True, 'import tensorflow as tf\n'), ((7279, 7308), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'projection'], {}), '(inputs, projection)\n', (7288, 7308), True, 'import tensorflow as tf\n'), ((7686, 7771), 'tensorflow.ones', 'tf.ones', (["[self.config['batch_size'], self.config['num_steps']]"], {'dtype': 'tf.float32'}), "([self.config['batch_size'], self.config['num_steps']], dtype=tf.float32\n )\n", (7693, 7771), True, 'import tensorflow as tf\n'), ((8341, 8366), 'tensorflow.gradients', 'tf.gradients', (['cost', 'tvars'], {}), '(cost, tvars)\n', (8353, 8366), True, 'import tensorflow as tf\n'), ((8654, 8682), 'tensorflow.assign', 'tf.assign', (['self.lr', 'lr_value'], {}), '(self.lr, lr_value)\n', (8663, 8682), True, 'import tensorflow as tf\n'), ((11279, 11342), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': 'None', 'initializer': 'initializer'}), "('model', reuse=None, initializer=initializer)\n", (11296, 11342), True, 'import tensorflow as tf\n'), ((11484, 11547), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': '(True)', 'initializer': 'initializer'}), "('model', reuse=True, initializer=initializer)\n", (11501, 11547), True, 'import tensorflow as tf\n'), ((16751, 16777), 'tensorflow.test.is_gpu_available', 'tf.test.is_gpu_available', ([], {}), '()\n', (16775, 16777), True, 'import tensorflow as tf\n'), ((17422, 17444), 'numpy.load', 'np.load', (['args.dsm_path'], {}), '(args.dsm_path)\n', (17429, 17444), True, 'import numpy as np\n'), ((4328, 4358), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""embedding"""'], {}), "('embedding')\n", (4345, 4358), True, 'import tensorflow as tf\n'), ((4771, 4795), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rnn"""'], {}), "('rnn')\n", (4788, 4795), True, 'import tensorflow as tf\n'), ((5225, 5280), 'tensorflow.nn.rnn_cell.MultiRNNCell', 'tf.nn.rnn_cell.MultiRNNCell', (['drops'], {'state_is_tuple': '(True)'}), '(drops, state_is_tuple=True)\n', (5252, 5280), True, 'import tensorflow as tf\n'), ((5643, 5678), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""softmax_output"""'], {}), "('softmax_output')\n", (5660, 5678), True, 'import tensorflow as tf\n'), ((5712, 5803), 'tensorflow.get_variable', 'tf.get_variable', (['"""softmax_w"""', "[self.config['size'], self.vocabulary]"], {'dtype': 'tf.float32'}), "('softmax_w', [self.config['size'], self.vocabulary], dtype=\n tf.float32)\n", (5727, 5803), True, 'import tensorflow as tf\n'), ((5856, 5921), 'tensorflow.get_variable', 'tf.get_variable', (['"""softmax_b"""', '[self.vocabulary]'], {'dtype': 'tf.float32'}), "('softmax_b', [self.vocabulary], dtype=tf.float32)\n", (5871, 5921), True, 'import tensorflow as tf\n'), ((6030, 6065), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""softmax_output"""'], {}), "('softmax_output')\n", (6047, 6065), True, 'import tensorflow as tf\n'), ((6099, 6185), 'tensorflow.get_variable', 'tf.get_variable', (['"""softmax_w"""', '[embedding_size, self.vocabulary]'], {'dtype': 'tf.float32'}), "('softmax_w', [embedding_size, self.vocabulary], dtype=tf.\n float32)\n", (6114, 6185), True, 'import tensorflow as tf\n'), ((6238, 6303), 'tensorflow.get_variable', 'tf.get_variable', (['"""softmax_b"""', '[self.vocabulary]'], {'dtype': 'tf.float32'}), "('softmax_b', [self.vocabulary], dtype=tf.float32)\n", (6253, 6303), True, 'import tensorflow as tf\n'), ((11123, 11133), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (11131, 11133), True, 'import tensorflow as tf\n'), ((11834, 11863), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (11861, 11863), True, 'import tensorflow as tf\n'), ((12568, 12578), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (12576, 12578), True, 'import tensorflow as tf\n'), ((4486, 4577), 'tensorflow.get_variable', 'tf.get_variable', (['"""embedding"""', "[self.vocabulary, self.config['size']]"], {'dtype': 'tf.float32'}), "('embedding', [self.vocabulary, self.config['size']], dtype=\n tf.float32)\n", (4501, 4577), True, 'import tensorflow as tf\n'), ((4706, 4754), 'tensorflow.constant', 'tf.constant', (['weight_embeddings'], {'dtype': 'tf.float32'}), '(weight_embeddings, dtype=tf.float32)\n', (4717, 4754), True, 'import tensorflow as tf\n'), ((4818, 4869), 'tensorflow.contrib.rnn.BasicLSTMCell', 'tf.contrib.rnn.BasicLSTMCell', (['size'], {'forget_bias': '(0.0)'}), '(size, forget_bias=0.0)\n', (4846, 4869), True, 'import tensorflow as tf\n'), ((4948, 5078), 'tensorflow.contrib.rnn.DropoutWrapper', 'tf.contrib.rnn.DropoutWrapper', (['lstm'], {'input_size': "self.config['size']", 'output_keep_prob': "self.config['dropout']", 'dtype': 'tf.float32'}), "(lstm, input_size=self.config['size'],\n output_keep_prob=self.config['dropout'], dtype=tf.float32)\n", (4977, 5078), True, 'import tensorflow as tf\n'), ((9982, 10003), 'numpy.exp', 'np.exp', (['(costs / iters)'], {}), '(costs / iters)\n', (9988, 10003), True, 'import numpy as np\n'), ((10005, 10026), 'numpy.exp', 'np.exp', (['(perps / iters)'], {}), '(perps / iters)\n', (10011, 10026), True, 'import numpy as np\n'), ((12400, 12410), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (12408, 12410), True, 'import tensorflow as tf\n'), ((10075, 10086), 'time.time', 'time.time', ([], {}), '()\n', (10084, 10086), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
Created on 2020.04.11
@author: MiniUFO
Copyright 2018. All rights reserved. Use is subject to license terms.
"""
import os
import numpy as np
import xarray as xr
import dask.array as dsa
from dask.base import tokenize
from glob import glob
from pathlib import Path
from .core import CtlDescriptor
from functools import reduce
"""
IO related functions here
"""
def open_mfdataset(paths, parallel=False, encoding='GBK'):
"""
Open multiple ctl files as a single dataset.
Parameters
----------
paths : str or sequence
Either a string glob in the form ``"path/to/my/files/*.ctl"`` or an
explicit list of files to open. Paths can be given as strings or as
pathlib Paths.
parallel : bool, optional
If True, the open and preprocess steps of this function will be
performed in parallel using ``dask.delayed``. Default is False.
encoding : str
Encoding for the ctl file content e.g., ['GBK', 'UTF-8'].
Returns
-------
xarray.Dataset
Notes
-----
``open_mfdataset`` opens files with read-only access. When you modify values
of a Dataset, even one linked to files on disk, only the in-memory copy you
are manipulating in xarray is modified: the original file on disk is never
touched.
"""
if isinstance(paths, str):
paths = sorted(glob(paths))
else:
paths = [str(p) if isinstance(p, Path) else p for p in paths]
if not paths:
raise OSError("no files to open")
if parallel:
import dask
# wrap the open_dataset
open_ = dask.delayed(open_CtlDataset)
else:
open_ = open_CtlDataset
datasets = [open_(p, encoding=encoding) for p in paths]
if parallel:
# calling compute here will return the datasets/file_objs lists,
# the underlying datasets will still be stored as dask arrays
datasets = dask.compute(datasets)
return xr.concat(datasets[0], dim='time')
combined = xr.concat(datasets, dim='time')
return combined
def open_CtlDataset(desfile, returnctl=False, encoding='GBK'):
"""
Open a 4D dataset with a descriptor file end with .ctl and
return a xarray.Dataset. This also uses the dask to chunk
very large dataset, which is similar to the xarray.open_dataset.
Parameters
----------
desfile: string
Path to the descriptor file end with .ctl or .cts
returnctl: bool
Return dset and ctl as a tuple
Returns
-------
dset : xarray.Dataset
Dataset object containing all coordinates and variables.
ctl : xgrads.CtlDescriptor
Ctl descriptor file.
"""
if isinstance(desfile, str):
if not desfile.endswith('.ctl'):
raise Exception('unsupported file, suffix should be .ctl')
ctl = CtlDescriptor(encoding=encoding, file=desfile)
elif isinstance(desfile, CtlDescriptor):
ctl = desfile
else:
raise Exception('unsupported type of input ('+str(type(desfile))+'), ' +
'[CtlDescriptor or str] are allowed')
if ctl.template:
tcount = len(ctl.tdef.samples) # number of total time count
tcPerf = [] # number of time count per file
for file in ctl.dsetPath:
fsize = os.path.getsize(file)
if fsize % ctl.tRecLength != 0:
raise Exception('incomplete file for ' + file +
' (not multiple of ' + str(ctl.tRecLength) +
' bytes)')
tcPerf.append(fsize // ctl.tRecLength)
total_size = sum(tcPerf)
if total_size < tcount:
raise Exception('no enough files for ' + str(tcount) +
' time records')
# get time record number in each file
rem = tcount
idx = 0
for i, num in enumerate(tcPerf):
rem -= num
if rem <= 0:
idx = i
break
tcPerf_m = tcPerf[:idx+1]
tcPerf_m[idx] = tcPerf[idx ] + rem
# print(ctl.dsetPath)
# print(tcPerf)
# print(tcPerf_m)
binData = __read_template_as_dask(ctl, tcPerf_m)
else:
expect = ctl.tRecLength * ctl.tdef.length()
actual = os.path.getsize(ctl.dsetPath)
if expect != actual:
print('WARNING: expected binary file size: {0}, actual size: {1}'
.format(expect, actual))
binData = __read_as_dask(ctl)
variables = []
if ctl.pdef is None:
for m, v in enumerate(ctl.vdef):
if v.dependZ:
da = xr.DataArray(name=v.name, data=binData[m],
dims=['time', 'lev', 'lat', 'lon'],
coords={'time': ctl.tdef.samples[:],
'lev' : ctl.zdef.samples[:v.zcount],
'lat' : ctl.ydef.samples[:],
'lon' : ctl.xdef.samples[:]},
attrs={'comment': v.comment,
'storage': v.storage})
else:
t, z, y, x = binData[m].shape
da = xr.DataArray(name=v.name,
data=binData[m].reshape((t,y,x)),
dims=['time', 'lat', 'lon'],
coords={'time': ctl.tdef.samples[:],
'lat' : ctl.ydef.samples[:],
'lon' : ctl.xdef.samples[:]},
attrs={'comment': v.comment,
'storage': v.storage})
variables.append(da)
else:
PDEF = ctl.pdef
if PDEF.proj in ['lcc', 'lccr']:
ycoord = np.linspace(0, (PDEF.jsize-1) * PDEF.dx, PDEF.jsize)
xcoord = np.linspace(0, (PDEF.isize-1) * PDEF.dy, PDEF.isize)
elif PDEF.proj in ['sps', 'nps']:
inc = PDEF.gridinc * 1000 # change unit from km to m
ycoord = np.linspace(-(PDEF.jpole), (PDEF.jsize-PDEF.jpole),
PDEF.jsize) * inc
xcoord = np.linspace(-(PDEF.ipole), (PDEF.isize-PDEF.ipole),
PDEF.isize) * inc
for m, v in enumerate(ctl.vdef):
if v.dependZ:
da = xr.DataArray(name=v.name, data=binData[m],
dims=['time', 'lev', 'y', 'x'],
coords={'time': ctl.tdef.samples[:],
'lev' : ctl.zdef.samples[:v.zcount],
'y' : ycoord,
'x' : xcoord},
attrs={'comment': v.comment,
'storage': v.storage})
else:
t, z, y, x = binData[m].shape
da = xr.DataArray(name=v.name,
data=binData[m].reshape((t,y,x)),
dims=['time', 'y', 'x'],
coords={'time': ctl.tdef.samples[:],
'y' : ycoord,
'x' : xcoord},
attrs={'comment': v.comment,
'storage': v.storage})
variables.append(da)
# variables = {v.name: (['time','lev','lat','lon'], binData[m])
# for m,v in enumerate(ctl.vdef)}
dset = xr.merge(variables)
dset.attrs['title'] = ctl.title
dset.attrs['undef'] = ctl.undef
dset.attrs['pdef' ] = 'None'
if ctl.pdef:
dset.attrs['pdef' ] = ctl.pdef.proj
if returnctl:
return dset, ctl
else:
return dset
"""
Helper (private) methods are defined below
"""
def __read_as_dask(dd):
"""
Read binary data and return as a dask array
"""
if dd.pdef is None:
t, y, x = dd.tdef.length(), dd.ydef.length(), dd.xdef.length()
else:
t, y, x = dd.tdef.length(), dd.pdef.jsize, dd.pdef.isize
totalNum = sum([reduce(lambda x, y:
x*y, (t,v.zcount,y,x)) for v in dd.vdef])
if dd.sequential:
sequentialSize = x * y + 2
else:
sequentialSize = -1
# print(totalNum * 4.0 / 1024.0 / 1024.0)
binData = []
dtype = '<f4' if dd.byteOrder == 'little' else '>f4'
for m, v in enumerate(dd.vdef):
name = '@miniufo_' + tokenize(v, m)
if totalNum < (100 * 100 * 100 * 10): # about 40 MB, chunk all
# print('small')
chunk = (t, v.zcount, y, x)
shape = (t, v.zcount, y, x)
dsk = {(name, 0, 0, 0, 0):
(__read_var, dd.dsetPath, v, dd.tRecLength,
None, None, dtype, sequentialSize)}
binData.append(dsa.Array(dsk, name, chunk,
dtype=dtype, shape=shape))
elif totalNum > (200 * 100 * 100 * 100): # about 800 MB, chunk 2D slice
# print('large')
chunk = (1, 1, y, x)
shape = (t, v.zcount, y, x)
dsk = {(name, l, k, 0, 0):
(__read_var, dd.dsetPath, v, dd.tRecLength,
l, k, dtype, sequentialSize)
for l in range(t)
for k in range(v.zcount)}
binData.append(dsa.Array(dsk, name, chunk,
dtype=dtype, shape=shape))
else: # in between, chunk 3D slice
# print('between')
chunk = (1, v.zcount, y, x)
shape = (t, v.zcount, y, x)
dsk = {(name, l, 0, 0, 0):
(__read_var, dd.dsetPath, v, dd.tRecLength,
l, None, dtype, sequentialSize)
for l in range(t)}
binData.append(dsa.Array(dsk, name, chunk,
dtype=dtype, shape=shape))
return binData
def __read_template_as_dask(dd, tcPerf):
"""
Read template binary data and return as a dask array
"""
if dd.pdef is None:
t, y, x = dd.tdef.length(), dd.ydef.length(), dd.xdef.length()
else:
t, y, x = dd.tdef.length(), dd.pdef.jsize, dd.pdef.isize
totalNum = sum([reduce(lambda x, y:
x*y, (tcPerf[0],v.zcount,y,x)) for v in dd.vdef])
if dd.sequential:
sequentialSize = x * y + 2
else:
sequentialSize = -1
# print(totalNum * 4.0 / 1024.0 / 1024.0)
binData = []
dtype = '<f4' if dd.byteOrder == 'little' else '>f4'
for m, v in enumerate(dd.vdef):
name = '@miniufo_' + tokenize(v, m)
if totalNum > (200 * 100 * 100 * 100): # about 800 MB, chunk 2D slice
# print('large')
chunk = (1, 1, y, x)
shape = (t, v.zcount, y, x)
dsk = {(name, l + sum(tcPerf[:m]), k, 0, 0):
(__read_var, f, v, dd.tRecLength,
l, k, dtype, sequentialSize)
for m, f in enumerate(dd.dsetPath[:len(tcPerf)])
for l in range(tcPerf[m])
for k in range(v.zcount)}
binData.append(dsa.Array(dsk, name, chunk,
dtype=dtype, shape=shape))
else: # in between, chunk 3D slice
# print('between')
chunk = (1, v.zcount, y, x)
shape = (t, v.zcount, y, x)
dsk = {(name, l + sum(tcPerf[:m]), 0, 0, 0):
(__read_var, f, v, dd.tRecLength,
l, None, dtype, sequentialSize)
for m, f in enumerate(dd.dsetPath[:len(tcPerf)])
for l in range(tcPerf[m])}
binData.append(dsa.Array(dsk, name, chunk,
dtype=dtype, shape=shape))
return binData
def __read_var(file, var, tstride, tstep, zstep, dtype, sequentialSize=-1):
"""
Read a variable given the trange.
Parameters
----------
file : str
A file from which data are read.
var : CtlVar
A variable that need to be read.
tstride : int
Stride of a single time record.
tstep : int
T-step to be read, started from 0. If None, read all t-steps
zstep : int
Z-step to be read, started from 0. If None, read all z-steps
sequentialSize : int
Size of the sequential block (= y * x). Default of -1 means
non-sequential storage.
"""
# print(var.name+' '+str(tstep)+' '+str(zstep)+' '+str(var.strPos))
if var.storage == '-1,20':
if tstep is None and zstep is None:
shape = (var.tcount, var.zcount, var.ycount, var.xcount)
if sequentialSize != -1:
seqShp = (var.tcount, var.zcount, sequentialSize)
else:
seqShp = shape
pos = var.strPos
return __read_continuous(file, pos, shape, dtype,
sequentialShape=seqShp)
elif zstep is None and tstep is not None:
shape = (1, var.zcount, var.ycount, var.xcount)
if sequentialSize != -1:
seqShp = (1, var.zcount, sequentialSize)
else:
seqShp = shape
pos = var.strPos
return __read_continuous(file, pos, shape, dtype,
sequentialShape=seqShp)
elif tstep is None and zstep is not None:
raise Exception('not implemented in -1,20')
else:
shape = (1, 1, var.ycount, var.xcount)
zstri = var.ycount * var.xcount * 4
if sequentialSize != -1:
seqShp = (1, 1, sequentialSize)
zstri += 8
else:
seqShp = shape
pos = var.strPos + zstri * zstep
return __read_continuous(file, pos, shape, dtype,
sequentialShape=seqShp)
elif var.storage in ['99', '0', '00', '000', '1', '11', '111']:
# elif var.storage == '99' or var.storage == '0':
if tstep is None and zstep is None:
shape = (1, var.zcount, var.ycount, var.xcount)
if sequentialSize != -1:
seqShp = (1, var.zcount, sequentialSize)
else:
seqShp = shape
pos = var.strPos
data = []
for l in range(var.tcount):
data.append(__read_continuous(file, pos, shape, dtype,
sequentialShape=seqShp))
pos += tstride
return np.concatenate(data)
elif zstep is None and tstep is not None:
shape = (1, var.zcount, var.ycount, var.xcount)
if sequentialSize != -1:
seqShp = (1, var.zcount, sequentialSize)
else:
seqShp = shape
pos = var.strPos + tstride * tstep
data = __read_continuous(file, pos, shape, dtype,
sequentialShape=seqShp)
return data
elif tstep is None and zstep is not None:
raise Exception('not implemented in 0,99')
else:
shape = (1, 1, var.ycount, var.xcount)
zstri = var.ycount * var.xcount * 4
if sequentialSize != -1:
seqShp = (1, 1, sequentialSize)
zstri += 8
else:
seqShp = shape
pos = var.strPos + tstride * tstep + zstri * zstep
return __read_continuous(file, pos, shape, dtype,
sequentialShape=seqShp)
else:
raise Exception('invalid storage ' + var.storage +
', only "99" or "-1,20" are supported')
def __read_continuous(file, offset=0, shape=None, dtype='<f4',
use_mmap=True, sequentialShape=None):
"""
Read a block of continuous data into the memory.
Parameters
----------
file : str
A file from which data are read.
offset: int
An offset where the read is started.
shape : tuple
A tuple indicate shape of the Array returned.
sequentialShape : tuple
If in Fortran-sequential storage, provide a shape including the
beginning and the end numbers.
"""
with open(file, 'rb') as f:
if use_mmap:
data = np.memmap(f, dtype=dtype, mode='r', offset=offset,
shape=sequentialShape, order='C')
else:
number_of_values = reduce(lambda x, y: x*y, sequentialShape)
f.seek(offset)
data = np.fromfile(f, dtype=dtype, count=number_of_values)
if sequentialShape != shape:
data = data.reshape((shape[0],shape[1],-1))[:,:,1:-1]
data = data.reshape(shape, order='C')
data.shape = shape
return data
| [
"dask.array.Array",
"dask.delayed",
"dask.base.tokenize",
"numpy.fromfile",
"os.path.getsize",
"functools.reduce",
"xarray.concat",
"xarray.merge",
"xarray.DataArray",
"numpy.linspace",
"dask.compute",
"glob.glob",
"numpy.memmap",
"numpy.concatenate"
] | [((2023, 2054), 'xarray.concat', 'xr.concat', (['datasets'], {'dim': '"""time"""'}), "(datasets, dim='time')\n", (2032, 2054), True, 'import xarray as xr\n'), ((7744, 7763), 'xarray.merge', 'xr.merge', (['variables'], {}), '(variables)\n', (7752, 7763), True, 'import xarray as xr\n'), ((1620, 1649), 'dask.delayed', 'dask.delayed', (['open_CtlDataset'], {}), '(open_CtlDataset)\n', (1632, 1649), False, 'import dask\n'), ((1933, 1955), 'dask.compute', 'dask.compute', (['datasets'], {}), '(datasets)\n', (1945, 1955), False, 'import dask\n'), ((1972, 2006), 'xarray.concat', 'xr.concat', (['datasets[0]'], {'dim': '"""time"""'}), "(datasets[0], dim='time')\n", (1981, 2006), True, 'import xarray as xr\n'), ((4345, 4374), 'os.path.getsize', 'os.path.getsize', (['ctl.dsetPath'], {}), '(ctl.dsetPath)\n', (4360, 4374), False, 'import os\n'), ((1379, 1390), 'glob.glob', 'glob', (['paths'], {}), '(paths)\n', (1383, 1390), False, 'from glob import glob\n'), ((3340, 3361), 'os.path.getsize', 'os.path.getsize', (['file'], {}), '(file)\n', (3355, 3361), False, 'import os\n'), ((5956, 6010), 'numpy.linspace', 'np.linspace', (['(0)', '((PDEF.jsize - 1) * PDEF.dx)', 'PDEF.jsize'], {}), '(0, (PDEF.jsize - 1) * PDEF.dx, PDEF.jsize)\n', (5967, 6010), True, 'import numpy as np\n'), ((6030, 6084), 'numpy.linspace', 'np.linspace', (['(0)', '((PDEF.isize - 1) * PDEF.dy)', 'PDEF.isize'], {}), '(0, (PDEF.isize - 1) * PDEF.dy, PDEF.isize)\n', (6041, 6084), True, 'import numpy as np\n'), ((8339, 8386), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', '(t, v.zcount, y, x)'], {}), '(lambda x, y: x * y, (t, v.zcount, y, x))\n', (8345, 8386), False, 'from functools import reduce\n'), ((8708, 8722), 'dask.base.tokenize', 'tokenize', (['v', 'm'], {}), '(v, m)\n', (8716, 8722), False, 'from dask.base import tokenize\n'), ((10515, 10570), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', '(tcPerf[0], v.zcount, y, x)'], {}), '(lambda x, y: x * y, (tcPerf[0], v.zcount, y, x))\n', (10521, 10570), False, 'from functools import reduce\n'), ((10892, 10906), 'dask.base.tokenize', 'tokenize', (['v', 'm'], {}), '(v, m)\n', (10900, 10906), False, 'from dask.base import tokenize\n'), ((16674, 16762), 'numpy.memmap', 'np.memmap', (['f'], {'dtype': 'dtype', 'mode': '"""r"""', 'offset': 'offset', 'shape': 'sequentialShape', 'order': '"""C"""'}), "(f, dtype=dtype, mode='r', offset=offset, shape=sequentialShape,\n order='C')\n", (16683, 16762), True, 'import numpy as np\n'), ((16833, 16876), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'sequentialShape'], {}), '(lambda x, y: x * y, sequentialShape)\n', (16839, 16876), False, 'from functools import reduce\n'), ((16922, 16973), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'dtype', 'count': 'number_of_values'}), '(f, dtype=dtype, count=number_of_values)\n', (16933, 16973), True, 'import numpy as np\n'), ((4709, 4982), 'xarray.DataArray', 'xr.DataArray', ([], {'name': 'v.name', 'data': 'binData[m]', 'dims': "['time', 'lev', 'lat', 'lon']", 'coords': "{'time': ctl.tdef.samples[:], 'lev': ctl.zdef.samples[:v.zcount], 'lat':\n ctl.ydef.samples[:], 'lon': ctl.xdef.samples[:]}", 'attrs': "{'comment': v.comment, 'storage': v.storage}"}), "(name=v.name, data=binData[m], dims=['time', 'lev', 'lat',\n 'lon'], coords={'time': ctl.tdef.samples[:], 'lev': ctl.zdef.samples[:v\n .zcount], 'lat': ctl.ydef.samples[:], 'lon': ctl.xdef.samples[:]},\n attrs={'comment': v.comment, 'storage': v.storage})\n", (4721, 4982), True, 'import xarray as xr\n'), ((6532, 6771), 'xarray.DataArray', 'xr.DataArray', ([], {'name': 'v.name', 'data': 'binData[m]', 'dims': "['time', 'lev', 'y', 'x']", 'coords': "{'time': ctl.tdef.samples[:], 'lev': ctl.zdef.samples[:v.zcount], 'y':\n ycoord, 'x': xcoord}", 'attrs': "{'comment': v.comment, 'storage': v.storage}"}), "(name=v.name, data=binData[m], dims=['time', 'lev', 'y', 'x'],\n coords={'time': ctl.tdef.samples[:], 'lev': ctl.zdef.samples[:v.zcount],\n 'y': ycoord, 'x': xcoord}, attrs={'comment': v.comment, 'storage': v.\n storage})\n", (6544, 6771), True, 'import xarray as xr\n'), ((9091, 9144), 'dask.array.Array', 'dsa.Array', (['dsk', 'name', 'chunk'], {'dtype': 'dtype', 'shape': 'shape'}), '(dsk, name, chunk, dtype=dtype, shape=shape)\n', (9100, 9144), True, 'import dask.array as dsa\n'), ((11434, 11487), 'dask.array.Array', 'dsa.Array', (['dsk', 'name', 'chunk'], {'dtype': 'dtype', 'shape': 'shape'}), '(dsk, name, chunk, dtype=dtype, shape=shape)\n', (11443, 11487), True, 'import dask.array as dsa\n'), ((11986, 12039), 'dask.array.Array', 'dsa.Array', (['dsk', 'name', 'chunk'], {'dtype': 'dtype', 'shape': 'shape'}), '(dsk, name, chunk, dtype=dtype, shape=shape)\n', (11995, 12039), True, 'import dask.array as dsa\n'), ((14873, 14893), 'numpy.concatenate', 'np.concatenate', (['data'], {}), '(data)\n', (14887, 14893), True, 'import numpy as np\n'), ((6212, 6273), 'numpy.linspace', 'np.linspace', (['(-PDEF.jpole)', '(PDEF.jsize - PDEF.jpole)', 'PDEF.jsize'], {}), '(-PDEF.jpole, PDEF.jsize - PDEF.jpole, PDEF.jsize)\n', (6223, 6273), True, 'import numpy as np\n'), ((6338, 6399), 'numpy.linspace', 'np.linspace', (['(-PDEF.ipole)', '(PDEF.isize - PDEF.ipole)', 'PDEF.isize'], {}), '(-PDEF.ipole, PDEF.isize - PDEF.ipole, PDEF.isize)\n', (6349, 6399), True, 'import numpy as np\n'), ((9628, 9681), 'dask.array.Array', 'dsa.Array', (['dsk', 'name', 'chunk'], {'dtype': 'dtype', 'shape': 'shape'}), '(dsk, name, chunk, dtype=dtype, shape=shape)\n', (9637, 9681), True, 'import dask.array as dsa\n'), ((10096, 10149), 'dask.array.Array', 'dsa.Array', (['dsk', 'name', 'chunk'], {'dtype': 'dtype', 'shape': 'shape'}), '(dsk, name, chunk, dtype=dtype, shape=shape)\n', (10105, 10149), True, 'import dask.array as dsa\n')] |
import inspect
import os
import time
import h5py
import shutil
import logging
import subprocess
import numpy as np
from romshake.simulators.reorder_elements import run_reordering
imt = 'PGV'
mask_file = 'mask.npy'
h5_gm_cor_file = 'loh1-GME_corrected.h5'
remote_dir = '<EMAIL>:/hppfs/scratch/0B/di46bak/'
sleepy_time = 1 * 60 # (5 minutes)
class SeisSolSimulator():
def __init__(self, par_file, sim_job_file, mesh_file, material_file,
max_jobs, t_per_sim, t_max_per_job, take_log_imt,
mask_bounds, netcdf_files=[]):
self.par_file = par_file
self.sim_job_file = sim_job_file
self.mesh_file = mesh_file
self.material_file = material_file
self.max_jobs = max_jobs
self.t_per_sim = t_per_sim
self.t_max_per_job = t_max_per_job
self.take_log_imt = take_log_imt
self.mask_bounds = mask_bounds
self.netcdf_files = netcdf_files
def load_data(self, folder, indices):
logging.info('Loading data.')
all_data = []
mask_path = os.path.join(folder, mask_file)
if os.path.exists(mask_path):
elem_mask = np.load(mask_path)
else:
elem_mask = self.make_mask(folder, mask_path)
for sim_idx in indices:
h5f = h5py.File(os.path.join(
folder, 'data', str(sim_idx), h5_gm_cor_file), 'r')
data = np.array(h5f[imt]).flatten()[elem_mask]
if self.take_log_imt:
data = np.log(data)
all_data.append(data)
return np.array(all_data).T
def make_mask(self, folder, mask_path):
logging.info('Creating mask.')
ref_idx = self.get_ref_idx(folder)
h5f = h5py.File(
os.path.join(folder, 'data', str(ref_idx), h5_gm_cor_file), 'r')
connect = h5f['connect']
geom = h5f['geometry']
imts = list(h5f.keys())
imts.remove('connect')
imts.remove('geometry')
elem_mask = np.zeros(connect.shape[0], dtype=bool)
for i, element in enumerate(connect):
cx, cy, _ = geom[element].mean(axis=0)
if (cx >= self.mask_bounds[0] and cx <= self.mask_bounds[1]
and cy >= self.mask_bounds[2]
and cy <= self.mask_bounds[3]):
elem_mask[i] = 1.0
np.save(mask_path, elem_mask)
return elem_mask
def evaluate(self, params_dict, indices, folder, **kwargs):
self.prepare_common_files(folder)
source_params = {
'M': 6.0,
'lon': -117.9437,
'lat': 34.1122,
'tini': 0.0,
'slip1_cm': 100.0,
'slip2_cm': 0.0,
'slip3_cm': 0.0}
for i, sim_idx in enumerate(indices):
for param_label, param_vals in params_dict.items():
source_params[param_label] = param_vals[i]
print(source_params)
self.write_source_files(folder, source_params, sim_idx)
self.prepare_jobs(folder, indices)
self.sync_files(folder, remote_dir, False)
self.launch_jobs(folder)
self.sync_files('%s/%s/' % (remote_dir, folder), folder, True)
successful_indices = self.get_successful_indices(folder, indices)
self.reorder_elements(folder, successful_indices)
params_arr = np.array(list(params_dict.values())).T
good_params = np.array(
[param for param, idx in zip(params_arr, indices)
if idx in successful_indices])
return good_params, self.load_data(folder, successful_indices)
def write_source_files(self, folder, source_params, sim_idx):
logging.info('Writing source files for simulation index %s' % sim_idx)
sim_dir = os.path.join(folder, 'data', str(sim_idx))
odir = os.path.join(sim_dir, 'output')
for dir in [sim_dir, odir]:
if not os.path.exists(dir):
os.makedirs(dir)
srf_fname = os.path.join(sim_dir, 'source.srf')
nrf_fname = srf_fname.replace('srf', 'nrf')
self.write_standard_rupture_format(**source_params, fname=srf_fname)
subprocess.call(
['/bin/zsh', '-i', '-c', (
'rconv -i %s -m "+proj=utm +zone=11 +ellps=WGS84 +datum=WGS84'
' +units=m +no_defs" -o %s') % (srf_fname, nrf_fname)])
shutil.copyfile(
os.path.join(folder, self.par_file),
os.path.join(sim_dir, self.par_file))
def prepare_jobs(self, folder, indices):
logging.info('Preparing job files.')
job_dir = os.path.join(folder, 'jobs')
# Start with a clean job directory
if os.path.exists(job_dir):
shutil.rmtree(job_dir)
os.makedirs(job_dir)
# Calculate number of jobs we should submit
nsims = len(indices)
njobs = max(self.max_jobs, nsims * self.t_per_sim / self.t_max_per_job)
njobs = min(nsims, njobs)
sims_groups = np.array_split(indices, njobs)
# Create and populate the job files
with open(os.path.join(folder, self.sim_job_file), 'r') as myfile:
data_temp = myfile.readlines()
for i in range(njobs):
data = data_temp.copy()
job_run_time = self.t_per_sim * len(sims_groups[i])
data = [sub.replace(
'00:30:00',
time.strftime('%H:%M:%S', time.gmtime(job_run_time*3600)))
for sub in data]
for sim_idx in sims_groups[i]:
data.append('\ncd %s' % sim_idx)
data.append(
('\nmpiexec -n $SLURM_NTASKS '
'SeisSol_Release_dskx_5_elastic %s' % self.par_file))
data.append((
'\nmpiexec -n $SLURM_NTASKS python -u '
'/dss/dsshome1/0B/di46bak/'
'SeisSol/postprocessing/science/'
'GroundMotionParametersMaps/'
'ComputeGroundMotionParametersFromSurfaceOutput_Hybrid.py '
'output/loh1-surface.xdmf'))
data.append('\ncd ..')
with open(os.path.join(job_dir, 'job%s' % i), 'w') as myfile:
myfile.writelines(data)
logging.info('Created %s job files.' % njobs)
def sync_files(self, source, dest, exclude_output):
exclude_file = os.path.join(
os.path.split(inspect.getfile(self.__class__))[0], 'exclude.txt')
logging.info('Syncing files. Source: %s, Destination: %s' % (
source, dest))
cmd = ("rsync -a %s %s --delete --progress "
"--exclude-from=%s" % (source, dest, exclude_file))
if exclude_output:
cmd += ' --exclude output/'
print('command:', cmd)
while True:
res = subprocess.call(cmd.split())
if res == 0:
break
time.sleep(sleepy_time)
def get_successful_indices(self, folder, indices):
files = [
'loh1-GME-surface_cell.h5',
'loh1-GME-surface_vertex.h5',
'loh1-GME.xdmf'
]
good_indices = []
for idx in indices:
success = True
for file in files:
if not os.path.exists(
os.path.join(folder, 'data', str(idx), file)):
success = False
if success:
good_indices.append(idx)
return good_indices
def reorder_elements(self, folder, indices):
logging.info('Correcting element ordering.')
idir = os.path.join(folder, 'data')
ref_idx = self.get_ref_idx(folder)
ref_file = os.path.join(idir, str(ref_idx), 'loh1-GME.xdmf')
for idx in indices:
idx = str(idx)
file = os.path.join(idir, idx, 'loh1-GME.xdmf')
run_reordering(ref_file, file, [-1], ['all'])
fname = 'loh1-GME_corrected'
h5name = '%s.h5' % fname
xdmfname = '%s.xdmf' % fname
h5new = os.path.join(idir, idx, h5name)
xdmfnew = os.path.join(idir, idx, xdmfname)
os.rename(h5name, h5new)
os.rename(xdmfname, xdmfnew)
def launch_jobs(self, folder):
logging.info('Launching jobs.')
cmd = ('cd $SCRATCH/%s/jobs; for fname in job*; '
'do sbatch $fname; done' % folder)
res = self.issue_remote_command(
self.build_remote_command(cmd)).splitlines()
job_ids = [line.split('job ')[1] for line in res]
# Wait for the jobs to finish, check status using squeue
jobs_finished = False
logging.info('Waiting for jobs to finish.')
while not jobs_finished:
time.sleep(sleepy_time)
res = self.issue_remote_command(
self.build_remote_command('squeue -u di46bak'))
finished = [job_id not in res for job_id in job_ids]
if all(finished):
jobs_finished = True
logging.info('Jobs all finished.')
def prepare_common_files(self, folder):
with open(self.par_file, 'rt') as f:
data = f.read()
data = data.replace('material_file_name', self.material_file)
data = data.replace('mesh_file_name', self.mesh_file)
with open(os.path.join(folder, self.par_file), 'wt') as f:
f.write(data)
for file in self.netcdf_files + [
self.mesh_file, self.material_file, self.sim_job_file]:
shutil.copyfile(file, os.path.join(folder, file))
def get_ref_idx(self, folder):
idir = os.path.join(folder, 'data')
all_indices = sorted([
int(idx) for idx in os.listdir(idir) if not idx.startswith('.')])
ref_idx = self.get_successful_indices(folder, all_indices)[0]
logging.info('Using index %s as the reference.' % ref_idx)
return ref_idx
def write_standard_rupture_format(
self, lon, lat, depth, strike, dip, rake, M, tini, slip1_cm,
slip2_cm, slip3_cm, fname):
dt = 0.0002
rho = 2700.0
vs = 3464.0
mu = vs**2*rho
M0 = 10**(1.5 * M + 9.1)
area = M0/mu*1e4 # m^2 to cm^2
T = 0.1
vtime = np.arange(0, 4, dt)
sliprate_cm = slip1_cm * 1/T**2 * vtime*np.exp(-vtime/T)
nt1 = vtime.shape[0]
nt2 = 0
nt3 = 0
fout = open(fname, 'w')
fout.write('1.0\n')
fout.write('POINTS 1\n')
fout.write("%.5e %.5e %f %f %f %.10e %f %f\n" %
(lon, lat, depth, strike, dip, area, tini, dt))
fout.write("%f %f %d %f %d %f %d\n" %
(rake, slip1_cm, nt1, slip2_cm, nt2, slip3_cm, nt3))
np.savetxt(fout, sliprate_cm, fmt='%.18e')
fout.close()
def issue_remote_command(self, cmd):
done = False
while not done:
try:
res = subprocess.check_output(cmd).decode('utf-8')
done = True
except subprocess.CalledProcessError:
# try command again in a little bit when connection
# is hopefully back
print('Unable to make connection... trying again later.')
time.sleep(sleepy_time)
return res
def build_remote_command(self, cmd):
host = 'skx.supermuc.lrz.de'
return ['ssh', '%s' % host, cmd]
| [
"numpy.load",
"numpy.arange",
"numpy.exp",
"shutil.rmtree",
"os.path.join",
"numpy.savetxt",
"os.path.exists",
"numpy.save",
"os.rename",
"subprocess.check_output",
"romshake.simulators.reorder_elements.run_reordering",
"time.sleep",
"inspect.getfile",
"subprocess.call",
"os.listdir",
... | [((994, 1023), 'logging.info', 'logging.info', (['"""Loading data."""'], {}), "('Loading data.')\n", (1006, 1023), False, 'import logging\n'), ((1066, 1097), 'os.path.join', 'os.path.join', (['folder', 'mask_file'], {}), '(folder, mask_file)\n', (1078, 1097), False, 'import os\n'), ((1109, 1134), 'os.path.exists', 'os.path.exists', (['mask_path'], {}), '(mask_path)\n', (1123, 1134), False, 'import os\n'), ((1645, 1675), 'logging.info', 'logging.info', (['"""Creating mask."""'], {}), "('Creating mask.')\n", (1657, 1675), False, 'import logging\n'), ((2001, 2039), 'numpy.zeros', 'np.zeros', (['connect.shape[0]'], {'dtype': 'bool'}), '(connect.shape[0], dtype=bool)\n', (2009, 2039), True, 'import numpy as np\n'), ((2354, 2383), 'numpy.save', 'np.save', (['mask_path', 'elem_mask'], {}), '(mask_path, elem_mask)\n', (2361, 2383), True, 'import numpy as np\n'), ((3682, 3752), 'logging.info', 'logging.info', (["('Writing source files for simulation index %s' % sim_idx)"], {}), "('Writing source files for simulation index %s' % sim_idx)\n", (3694, 3752), False, 'import logging\n'), ((3829, 3860), 'os.path.join', 'os.path.join', (['sim_dir', '"""output"""'], {}), "(sim_dir, 'output')\n", (3841, 3860), False, 'import os\n'), ((3991, 4026), 'os.path.join', 'os.path.join', (['sim_dir', '"""source.srf"""'], {}), "(sim_dir, 'source.srf')\n", (4003, 4026), False, 'import os\n'), ((4165, 4330), 'subprocess.call', 'subprocess.call', (['[\'/bin/zsh\', \'-i\', \'-c\', \n \'rconv -i %s -m "+proj=utm +zone=11 +ellps=WGS84 +datum=WGS84 +units=m +no_defs" -o %s\'\n % (srf_fname, nrf_fname)]'], {}), '([\'/bin/zsh\', \'-i\', \'-c\', \n \'rconv -i %s -m "+proj=utm +zone=11 +ellps=WGS84 +datum=WGS84 +units=m +no_defs" -o %s\'\n % (srf_fname, nrf_fname)])\n', (4180, 4330), False, 'import subprocess\n'), ((4551, 4587), 'logging.info', 'logging.info', (['"""Preparing job files."""'], {}), "('Preparing job files.')\n", (4563, 4587), False, 'import logging\n'), ((4606, 4634), 'os.path.join', 'os.path.join', (['folder', '"""jobs"""'], {}), "(folder, 'jobs')\n", (4618, 4634), False, 'import os\n'), ((4689, 4712), 'os.path.exists', 'os.path.exists', (['job_dir'], {}), '(job_dir)\n', (4703, 4712), False, 'import os\n'), ((4757, 4777), 'os.makedirs', 'os.makedirs', (['job_dir'], {}), '(job_dir)\n', (4768, 4777), False, 'import os\n'), ((4996, 5026), 'numpy.array_split', 'np.array_split', (['indices', 'njobs'], {}), '(indices, njobs)\n', (5010, 5026), True, 'import numpy as np\n'), ((6271, 6316), 'logging.info', 'logging.info', (["('Created %s job files.' % njobs)"], {}), "('Created %s job files.' % njobs)\n", (6283, 6316), False, 'import logging\n'), ((6497, 6572), 'logging.info', 'logging.info', (["('Syncing files. Source: %s, Destination: %s' % (source, dest))"], {}), "('Syncing files. Source: %s, Destination: %s' % (source, dest))\n", (6509, 6572), False, 'import logging\n'), ((7557, 7601), 'logging.info', 'logging.info', (['"""Correcting element ordering."""'], {}), "('Correcting element ordering.')\n", (7569, 7601), False, 'import logging\n'), ((7617, 7645), 'os.path.join', 'os.path.join', (['folder', '"""data"""'], {}), "(folder, 'data')\n", (7629, 7645), False, 'import os\n'), ((8282, 8313), 'logging.info', 'logging.info', (['"""Launching jobs."""'], {}), "('Launching jobs.')\n", (8294, 8313), False, 'import logging\n'), ((8682, 8725), 'logging.info', 'logging.info', (['"""Waiting for jobs to finish."""'], {}), "('Waiting for jobs to finish.')\n", (8694, 8725), False, 'import logging\n'), ((9665, 9693), 'os.path.join', 'os.path.join', (['folder', '"""data"""'], {}), "(folder, 'data')\n", (9677, 9693), False, 'import os\n'), ((9881, 9939), 'logging.info', 'logging.info', (["('Using index %s as the reference.' % ref_idx)"], {}), "('Using index %s as the reference.' % ref_idx)\n", (9893, 9939), False, 'import logging\n'), ((10306, 10325), 'numpy.arange', 'np.arange', (['(0)', '(4)', 'dt'], {}), '(0, 4, dt)\n', (10315, 10325), True, 'import numpy as np\n'), ((10796, 10838), 'numpy.savetxt', 'np.savetxt', (['fout', 'sliprate_cm'], {'fmt': '"""%.18e"""'}), "(fout, sliprate_cm, fmt='%.18e')\n", (10806, 10838), True, 'import numpy as np\n'), ((1160, 1178), 'numpy.load', 'np.load', (['mask_path'], {}), '(mask_path)\n', (1167, 1178), True, 'import numpy as np\n'), ((1571, 1589), 'numpy.array', 'np.array', (['all_data'], {}), '(all_data)\n', (1579, 1589), True, 'import numpy as np\n'), ((4410, 4445), 'os.path.join', 'os.path.join', (['folder', 'self.par_file'], {}), '(folder, self.par_file)\n', (4422, 4445), False, 'import os\n'), ((4459, 4495), 'os.path.join', 'os.path.join', (['sim_dir', 'self.par_file'], {}), '(sim_dir, self.par_file)\n', (4471, 4495), False, 'import os\n'), ((4726, 4748), 'shutil.rmtree', 'shutil.rmtree', (['job_dir'], {}), '(job_dir)\n', (4739, 4748), False, 'import shutil\n'), ((6930, 6953), 'time.sleep', 'time.sleep', (['sleepy_time'], {}), '(sleepy_time)\n', (6940, 6953), False, 'import time\n'), ((7834, 7874), 'os.path.join', 'os.path.join', (['idir', 'idx', '"""loh1-GME.xdmf"""'], {}), "(idir, idx, 'loh1-GME.xdmf')\n", (7846, 7874), False, 'import os\n'), ((7887, 7932), 'romshake.simulators.reorder_elements.run_reordering', 'run_reordering', (['ref_file', 'file', '[-1]', "['all']"], {}), "(ref_file, file, [-1], ['all'])\n", (7901, 7932), False, 'from romshake.simulators.reorder_elements import run_reordering\n'), ((8072, 8103), 'os.path.join', 'os.path.join', (['idir', 'idx', 'h5name'], {}), '(idir, idx, h5name)\n', (8084, 8103), False, 'import os\n'), ((8126, 8159), 'os.path.join', 'os.path.join', (['idir', 'idx', 'xdmfname'], {}), '(idir, idx, xdmfname)\n', (8138, 8159), False, 'import os\n'), ((8172, 8196), 'os.rename', 'os.rename', (['h5name', 'h5new'], {}), '(h5name, h5new)\n', (8181, 8196), False, 'import os\n'), ((8209, 8237), 'os.rename', 'os.rename', (['xdmfname', 'xdmfnew'], {}), '(xdmfname, xdmfnew)\n', (8218, 8237), False, 'import os\n'), ((8771, 8794), 'time.sleep', 'time.sleep', (['sleepy_time'], {}), '(sleepy_time)\n', (8781, 8794), False, 'import time\n'), ((10374, 10392), 'numpy.exp', 'np.exp', (['(-vtime / T)'], {}), '(-vtime / T)\n', (10380, 10392), True, 'import numpy as np\n'), ((1509, 1521), 'numpy.log', 'np.log', (['data'], {}), '(data)\n', (1515, 1521), True, 'import numpy as np\n'), ((3916, 3935), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (3930, 3935), False, 'import os\n'), ((3953, 3969), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (3964, 3969), False, 'import os\n'), ((5090, 5129), 'os.path.join', 'os.path.join', (['folder', 'self.sim_job_file'], {}), '(folder, self.sim_job_file)\n', (5102, 5129), False, 'import os\n'), ((9052, 9086), 'logging.info', 'logging.info', (['"""Jobs all finished."""'], {}), "('Jobs all finished.')\n", (9064, 9086), False, 'import logging\n'), ((9363, 9398), 'os.path.join', 'os.path.join', (['folder', 'self.par_file'], {}), '(folder, self.par_file)\n', (9375, 9398), False, 'import os\n'), ((9586, 9612), 'os.path.join', 'os.path.join', (['folder', 'file'], {}), '(folder, file)\n', (9598, 9612), False, 'import os\n'), ((6171, 6205), 'os.path.join', 'os.path.join', (['job_dir', "('job%s' % i)"], {}), "(job_dir, 'job%s' % i)\n", (6183, 6205), False, 'import os\n'), ((6437, 6468), 'inspect.getfile', 'inspect.getfile', (['self.__class__'], {}), '(self.__class__)\n', (6452, 6468), False, 'import inspect\n'), ((9757, 9773), 'os.listdir', 'os.listdir', (['idir'], {}), '(idir)\n', (9767, 9773), False, 'import os\n'), ((11303, 11326), 'time.sleep', 'time.sleep', (['sleepy_time'], {}), '(sleepy_time)\n', (11313, 11326), False, 'import time\n'), ((1412, 1430), 'numpy.array', 'np.array', (['h5f[imt]'], {}), '(h5f[imt])\n', (1420, 1430), True, 'import numpy as np\n'), ((5425, 5457), 'time.gmtime', 'time.gmtime', (['(job_run_time * 3600)'], {}), '(job_run_time * 3600)\n', (5436, 5457), False, 'import time\n'), ((10986, 11014), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {}), '(cmd)\n', (11009, 11014), False, 'import subprocess\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 11 12:43:36 2021
@author: ziyi
"""
from utils import try_cuda, read_vocab, Tokenizer, vocab_pad_idx, vocab_eos_idx
import numpy as np
import json
import networkx as nx
import sys
sys.path.append('build')
import MatterSim
import math
import torch
from follower import Seq2SeqAgent
from model import EncoderLSTM, AttnDecoderLSTM
from vocab import SUBTRAIN_VOCAB, TRAIN_VOCAB, TRAINVAL_VOCAB
MAX_INPUT_LENGTH = 80
feature_size = 2048+128
max_episode_len = 10
word_embedding_size = 300
glove_path = 'tasks/R2R/data/train_glove.npy'
action_embedding_size = 2048+128
hidden_size = 512
dropout_ratio = 0.5
vocab = read_vocab(TRAIN_VOCAB)
tok = Tokenizer(vocab=vocab)
glove = np.load(glove_path)
encoder = try_cuda(EncoderLSTM(
len(vocab), word_embedding_size, hidden_size, vocab_pad_idx,
dropout_ratio, glove=glove))
decoder = try_cuda(AttnDecoderLSTM(
action_embedding_size, hidden_size, dropout_ratio,
feature_size=feature_size))
agent = Seq2SeqAgent(
None, "", encoder, decoder, max_episode_len,
max_instruction_length=MAX_INPUT_LENGTH)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
agent.load('tasks/R2R/snapshots/release/follower_final_release', map_location = device)
def get_end_pose(agent,encoded_instructions, scanId, viewpointId, heading = 0., elevation = 0.):
pose = agent.end_pose(encoded_instructions, scanId, viewpointId, heading = heading, elevation = elevation)
return pose.point
def load_nav_graphs(scans):
''' Load connectivity graph for each scan '''
def distance(pose1, pose2):
''' Euclidean distance between two graph poses '''
return ((pose1['pose'][3]-pose2['pose'][3])**2\
+ (pose1['pose'][7]-pose2['pose'][7])**2\
+ (pose1['pose'][11]-pose2['pose'][11])**2)**0.5
graphs = {}
for scan in scans:
with open('connectivity/%s_connectivity.json' % scan) as f:
G = nx.Graph()
positions = {}
data = json.load(f)
for i,item in enumerate(data):
if item['included']:
for j,conn in enumerate(item['unobstructed']):
if conn and data[j]['included']:
positions[item['image_id']] = np.array([item['pose'][3],
item['pose'][7], item['pose'][11]]);
assert data[j]['unobstructed'][i], 'Graph should be undirected'
G.add_edge(item['image_id'],data[j]['image_id'],weight=distance(item,data[j]))
nx.set_node_attributes(G, values=positions, name='position')
graphs[scan] = G
return graphs
def _load_nav_graphs(scans):
''' Load connectivity graph for each scan, useful for reasoning about shortest paths '''
print('Loading navigation graphs for %d scans' % len(scans))
graphs = load_nav_graphs(scans)
paths = {}
for scan,G in graphs.items(): # compute all shortest paths
paths[scan] = dict(nx.all_pairs_dijkstra_path(G))
distances = {}
for scan,G in graphs.items(): # compute all shortest paths
distances[scan] = dict(nx.all_pairs_dijkstra_path_length(G))
return distances
if __name__ == '__main__':
# =============================================================================
# #change following lines to ground your own instr
# #######################################################################################
# scanId = 'vyrNrziPKCB'
# viewpointId = 'c8a5472a5ef243319ffa4f88d3ddb4bd'
# encoded_instructions, _ = tok.encode_sentence('Exit the room using the door on the left. Turn slightly left and go past the round table an chairs. Wait there. ')
# #######################################################################################
#
# encoded_instructions = torch.tensor(encoded_instructions, device = device)
# traj = agent.generate(encoded_instructions, scanId, viewpointId)
#
# print(traj)
# =============================================================================
sim = MatterSim.Simulator()
sim.setRenderingEnabled(False)
sim.setDiscretizedViewingAngles(True)
sim.setCameraResolution(640, 480)
sim.setCameraVFOV(math.radians(60))
sim.init()
with open('tasks/R2R/data/R2R_val_unseen.json') as f:
data = json.load(f)
scans = []
for traj in data:
if traj['scan'] not in scans:
scans.append(traj['scan'])
distances = _load_nav_graphs(scans)
itr = 0
success = 0
distance_all = 0
for i in range(len(data)):
scan = data[i]['scan']
path_gt = data[i]['path']
viewpoint_st = data[i]['path'][0]
viewpoint_end = data[i]['path'][-1]
heading = data[i]['heading']
#end_pose_gt = get_gt_end_pose(scan, viewpoint_end)
ins = data[i]['instructions']
for ins_i in ins:
encoded_instructions, _ = tok.encode_sentence(ins_i)
encoded_instructions = np.concatenate((np.flip(encoded_instructions,0),[vocab_eos_idx]))
encoded_instructions = torch.tensor(encoded_instructions, device = device)
traj = agent.generate(sim, encoded_instructions, scan, viewpoint_st,heading=heading)
path = [traj['trajectory'][i][0] for i in range(len(traj['trajectory']))]
end_pose_pred = traj['trajectory'][-1][0]
distance = distances[scan][viewpoint_end][end_pose_pred]
if distance < 3:
suc = 1
step_dist = [distances[scan][path[x]][path[x+1]] for x in range(len(path)-1)]
length = np.sum(step_dist)
reference_dist = [distances[scan][path_gt[x]][path_gt[x+1]] for x in range(len(path_gt)-1)]
reference_length = np.sum(reference_dist)
spl = suc * reference_length/ max(length,reference_length)
success += spl
#distance_all += distance
itr += 1
sr = success/itr
#dis_avg = distance_all/itr
print('spl = {}/{} = {}'.format(success,itr,sr))
#################################################################
# =============================================================================
# sim = MatterSim.Simulator()
# sim.setRenderingEnabled(False)
# sim.setDiscretizedViewingAngles(True)
# sim.setCameraResolution(640, 480)
# sim.setCameraVFOV(math.radians(60))
# sim.init()
# with open('tasks/R2R/data/R2R_val_seen.json') as f:
# data_gt = json.load(f)
# with open('tasks/R2R/speaker/BERT300_val_seen.json') as f:
# data_pred = json.load(f)
#
# scans = []
# for traj in data_gt:
# if traj['scan'] not in scans:
# scans.append(traj['scan'])
#
# distances = _load_nav_graphs(scans)
#
# itr = 0
# success = 0
#
# distance_all = 0
# for traj_gt in data_gt:
# path_id = traj_gt['path_id']
# scan = traj_gt['scan']
# #ins = traj_gt['instructions']
# path_gt = traj_gt['path']
# viewpoint_st = path_gt[0]
# viewpoint_end = path_gt[-1]
# heading = traj_gt['heading']
# for i in range(3):
# long_id = str(path_id) + '_' + str(i)
# ins_pred = ' '.join(data_pred[long_id]['words'])
# encoded_instructions, _ = tok.encode_sentence(ins_pred)
# encoded_instructions = np.concatenate((np.flip(encoded_instructions,0),[vocab_eos_idx]))
# encoded_instructions = torch.tensor(encoded_instructions, device = device)
# traj = agent.generate(sim, encoded_instructions, scan, viewpoint_st,heading=heading)
# end_pose_pred = traj['trajectory'][-1][0]
#
# distance = distances[scan][viewpoint_end][end_pose_pred]
#
# if distance < 3:
# success += 1
# distance_all += distance
#
# itr += 1
#
# sr = success/itr
# dis_avg = distance_all/itr
# print('sr = {}/{} = {}, avg_dis = {}'.format(success,itr,sr, dis_avg))
# =============================================================================
# =============================================================================
# sim = MatterSim.Simulator()
# sim.setRenderingEnabled(False)
# sim.setDiscretizedViewingAngles(True)
# sim.setCameraResolution(640, 480)
# sim.setCameraVFOV(math.radians(60))
# sim.init()
#
# with open('tasks/R2R/data/R2R_val_unseen.json') as f:
# data = json.load(f)
#
# results = {}
#
# for i in range(len(data)):
# path_id = str(data[i]['path_id'])
# scan = data[i]['scan']
# viewpoint_st = data[i]['path'][0]
# #viewpoint_end = data[i]['path'][-1]
# heading = data[i]['heading']
# #end_pose_gt = get_gt_end_pose(scan, viewpoint_end)
# ins = data[i]['instructions']
# for j in range(len(ins)):
# long_id = path_id + '_' + str(j)
# ins_i = ins[j]
# encoded_instructions, _ = tok.encode_sentence(ins_i)
# encoded_instructions = np.concatenate((np.flip(encoded_instructions,0),[vocab_eos_idx]))
# encoded_instructions = torch.tensor(encoded_instructions, device = device)
# traj = agent.generate(sim, encoded_instructions, scan, viewpoint_st,heading=heading)
# results[long_id] = traj
# #end_pose_pred = traj['trajectory'][-1][0]
# #end_pose_pred = get_end_pose(agent,encoded_instructions, scan, viewpoint_st)
# #distance = dist(end_pose_pred, end_pose_gt)
# #distance = distances[scan][viewpoint_end][end_pose_pred]
#
# import eval
# evaluator = eval.Evaluation(['val_unseen'])
#
# score_summary, _ = evaluator.score_results(results)
# for metric, val in sorted(score_summary.items()):
# print("{}\t{}".format(metric, val))
# =============================================================================
# =============================================================================
# sim = MatterSim.Simulator()
# sim.setRenderingEnabled(False)
# sim.setDiscretizedViewingAngles(True)
# sim.setCameraResolution(640, 480)
# sim.setCameraVFOV(math.radians(60))
# sim.init()
# with open('tasks/R2R/data/R2R_val_seen.json') as f:
# data_gt = json.load(f)
# with open('tasks/R2R/speaker/origin_val_seen.json') as f:
# data_pred = json.load(f)
#
# scans = []
# for traj in data_gt:
# if traj['scan'] not in scans:
# scans.append(traj['scan'])
#
# distances = _load_nav_graphs(scans)
#
# itr = 0
# success = 0
#
# distance_all = 0
# for traj_gt in data_gt:
# path_id = traj_gt['path_id']
# scan = traj_gt['scan']
# #ins = traj_gt['instructions']
# path_gt = traj_gt['path']
# viewpoint_st = path_gt[0]
# viewpoint_end = path_gt[-1]
# heading = traj_gt['heading']
# for i in range(3):
# long_id = str(path_id) + '_' + str(i)
# ins_pred = ' '.join(data_pred[long_id]['words'])
# encoded_instructions, _ = tok.encode_sentence(ins_pred)
# encoded_instructions = np.concatenate((np.flip(encoded_instructions,0),[vocab_eos_idx]))
# encoded_instructions = torch.tensor(encoded_instructions, device = device)
# traj = agent.generate(sim, encoded_instructions, scan, viewpoint_st,heading=heading)
# path = [traj['trajectory'][i][0] for i in range(len(traj['trajectory']))]
# #end_pose_pred = traj['trajectory'][-1][0]
#
# #distance = distances[scan][viewpoint_end][end_pose_pred]
# distance_to_goal = [distances[scan][x][viewpoint_end] for x in path]
# oracle_success = np.any([x<3 for x in distance_to_goal])
# success += oracle_success
# #distance_all += distance
#
# itr += 1
#
# sr = success/itr
# #dis_avg = distance_all/itr
# print('oracle sr = {}/{} = {}'.format(success,itr,sr))
# =============================================================================
# =============================================================================
#
# sim = MatterSim.Simulator()
# sim.setRenderingEnabled(False)
# sim.setDiscretizedViewingAngles(True)
# sim.setCameraResolution(640, 480)
# sim.setCameraVFOV(math.radians(60))
# sim.init()
# with open('tasks/R2R/data/R2R_val_unseen.json') as f:
# data_gt = json.load(f)
# with open('tasks/R2R/speaker/origin_val_unseen.json') as f:
# data_pred = json.load(f)
#
# scans = []
# for traj in data_gt:
# if traj['scan'] not in scans:
# scans.append(traj['scan'])
#
# distances = _load_nav_graphs(scans)
#
# itr = 0
# success = 0
#
# distance_all = 0
# for traj_gt in data_gt:
# path_id = traj_gt['path_id']
# scan = traj_gt['scan']
# #ins = traj_gt['instructions']
# path_gt = traj_gt['path']
# viewpoint_st = path_gt[0]
# viewpoint_end = path_gt[-1]
# heading = traj_gt['heading']
# for i in range(3):
# long_id = str(path_id) + '_' + str(i)
# ins_pred = ' '.join(data_pred[long_id]['words'])
# encoded_instructions, _ = tok.encode_sentence(ins_pred)
# encoded_instructions = np.concatenate((np.flip(encoded_instructions,0),[vocab_eos_idx]))
# encoded_instructions = torch.tensor(encoded_instructions, device = device)
# traj = agent.generate(sim, encoded_instructions, scan, viewpoint_st,heading=heading)
# path = [traj['trajectory'][i][0] for i in range(len(traj['trajectory']))]
# end_pose_pred = traj['trajectory'][-1][0]
# distance = distances[scan][viewpoint_end][end_pose_pred]
#
# if distance < 3:
# suc = 1
# step_dist = [distances[scan][path[x]][path[x+1]] for x in range(len(path)-1)]
# length = np.sum(step_dist)
#
# reference_dist = [distances[scan][path_gt[x]][path_gt[x+1]] for x in range(len(path_gt)-1)]
# reference_length = np.sum(reference_dist)
# spl = suc * reference_length/ max(length,reference_length)
#
#
# success += spl
# #distance_all += distance
#
# itr += 1
#
# sr = success/itr
# #dis_avg = distance_all/itr
# print('spl = {}/{} = {}'.format(success,itr,sr))
# =============================================================================
| [
"sys.path.append",
"numpy.load",
"json.load",
"numpy.sum",
"numpy.flip",
"networkx.set_node_attributes",
"math.radians",
"model.AttnDecoderLSTM",
"utils.Tokenizer",
"torch.cuda.is_available",
"utils.read_vocab",
"networkx.Graph",
"follower.Seq2SeqAgent",
"networkx.all_pairs_dijkstra_path_l... | [((254, 278), 'sys.path.append', 'sys.path.append', (['"""build"""'], {}), "('build')\n", (269, 278), False, 'import sys\n'), ((683, 706), 'utils.read_vocab', 'read_vocab', (['TRAIN_VOCAB'], {}), '(TRAIN_VOCAB)\n', (693, 706), False, 'from utils import try_cuda, read_vocab, Tokenizer, vocab_pad_idx, vocab_eos_idx\n'), ((713, 735), 'utils.Tokenizer', 'Tokenizer', ([], {'vocab': 'vocab'}), '(vocab=vocab)\n', (722, 735), False, 'from utils import try_cuda, read_vocab, Tokenizer, vocab_pad_idx, vocab_eos_idx\n'), ((744, 763), 'numpy.load', 'np.load', (['glove_path'], {}), '(glove_path)\n', (751, 763), True, 'import numpy as np\n'), ((1035, 1137), 'follower.Seq2SeqAgent', 'Seq2SeqAgent', (['None', '""""""', 'encoder', 'decoder', 'max_episode_len'], {'max_instruction_length': 'MAX_INPUT_LENGTH'}), "(None, '', encoder, decoder, max_episode_len,\n max_instruction_length=MAX_INPUT_LENGTH)\n", (1047, 1137), False, 'from follower import Seq2SeqAgent\n'), ((922, 1019), 'model.AttnDecoderLSTM', 'AttnDecoderLSTM', (['action_embedding_size', 'hidden_size', 'dropout_ratio'], {'feature_size': 'feature_size'}), '(action_embedding_size, hidden_size, dropout_ratio,\n feature_size=feature_size)\n', (937, 1019), False, 'from model import EncoderLSTM, AttnDecoderLSTM\n'), ((4190, 4211), 'MatterSim.Simulator', 'MatterSim.Simulator', ([], {}), '()\n', (4209, 4211), False, 'import MatterSim\n'), ((1196, 1221), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1219, 1221), False, 'import torch\n'), ((4349, 4365), 'math.radians', 'math.radians', (['(60)'], {}), '(60)\n', (4361, 4365), False, 'import math\n'), ((4455, 4467), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4464, 4467), False, 'import json\n'), ((2021, 2031), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (2029, 2031), True, 'import networkx as nx\n'), ((2078, 2090), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2087, 2090), False, 'import json\n'), ((2664, 2724), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['G'], {'values': 'positions', 'name': '"""position"""'}), "(G, values=positions, name='position')\n", (2686, 2724), True, 'import networkx as nx\n'), ((3101, 3130), 'networkx.all_pairs_dijkstra_path', 'nx.all_pairs_dijkstra_path', (['G'], {}), '(G)\n', (3127, 3130), True, 'import networkx as nx\n'), ((3245, 3281), 'networkx.all_pairs_dijkstra_path_length', 'nx.all_pairs_dijkstra_path_length', (['G'], {}), '(G)\n', (3278, 3281), True, 'import networkx as nx\n'), ((5252, 5301), 'torch.tensor', 'torch.tensor', (['encoded_instructions'], {'device': 'device'}), '(encoded_instructions, device=device)\n', (5264, 5301), False, 'import torch\n'), ((5795, 5812), 'numpy.sum', 'np.sum', (['step_dist'], {}), '(step_dist)\n', (5801, 5812), True, 'import numpy as np\n'), ((5973, 5995), 'numpy.sum', 'np.sum', (['reference_dist'], {}), '(reference_dist)\n', (5979, 5995), True, 'import numpy as np\n'), ((5167, 5199), 'numpy.flip', 'np.flip', (['encoded_instructions', '(0)'], {}), '(encoded_instructions, 0)\n', (5174, 5199), True, 'import numpy as np\n'), ((2353, 2415), 'numpy.array', 'np.array', (["[item['pose'][3], item['pose'][7], item['pose'][11]]"], {}), "([item['pose'][3], item['pose'][7], item['pose'][11]])\n", (2361, 2415), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Program for the regression of mlp about paramagnetic FCC Fe
"""
import argparse
import copy
# import time
# import tqdm
import random
import numpy as np
from mlptools.common.fileio import InputParams
from mlptools.common.structure import Structure
from mlptools.mlpgen.model import Terms
from mlptools.mlpgen.myIO import ReadFeatureParams, read_regression_params
from mlptools.mlpgen.regression import PotEstimation
def rearange_L(array, index_array):
"""Move designated columns to the head of array.
Args:
array (ndarray): input array(2D)
index_array (list): list of index by which columns are designated
Returns:
ndarray: changed array
"""
rest = np.delete(array, index_array, 1)
return np.hstack((array[:, index_array], rest))
class VirtualDataInput:
"""Generate a virtual DataInput from normal DataInput"""
def __init__(self, di):
self.vdi = copy.deepcopy(di)
self.vdi.n_type = 2
def get_data_input(self):
"""Return a newly generated DataInput.
Returns:
DataInput: virtual DataInput
"""
return self.vdi
class MagneticStructuralFeatures:
"""Data structure including magnetic structural features"""
def __init__(self, tr, spin_array, vdi):
st_set_all_train = self.get_virtual_structures(tr.train, spin_array)
n_st_dataset = [len(data.st_set) for data in tr.train]
term = Terms(st_set_all_train, vdi, n_st_dataset, vdi.train_force)
self.train_x = np.hstack((tr.train_x, term.get_x()))
st_set_all_test = self.get_virtual_structures(tr.test, spin_array)
n_st_dataset = [len(data.st_set) for data in tr.test]
force_dataset = [vdi.wforce for v in vdi.test_names]
term = Terms(st_set_all_test, vdi, n_st_dataset, force_dataset)
self.test_x = np.hstack((tr.test_x, term.get_x()))
def get_x(self):
"""Return the X matrices for regression
Returns:
ndarray: X matrices needed for training and test
"""
return self.train_x, self.test_x
def get_virtual_structures(self, dataset, spin_array):
"""Generate virtual structures from dataset based on spin_array and return the
list of them
Args:
dataset (dr_array): array of the instances, DataRegression
spin_array (ndarray): which spin each atom has
Returns:
list: list of rewrited structures
"""
index_array = np.nonzero(spin_array == 1)[0]
n_atom_1 = len(index_array)
n_atom_2 = sum(dataset[0].st_set[0].n_atoms) - len(index_array)
n_atoms = [n_atom_1, n_atom_2]
specie1 = ["A" for i in range(n_atom_1)]
specie2 = ["B" for i in range(n_atom_2)]
elements = specie1.extend(specie2)
type1 = [0 for i in range(n_atom_1)]
type2 = [1 for i in range(n_atom_2)]
types = type1.extend(type2)
st_list = [
Structure(
st.axis,
rearange_L(st.positions, index_array),
n_atoms,
elements,
types=types,
comment=st.comment,
)
for data in dataset
for st in data.st_set
]
return st_list
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-i",
"--infile",
type=str,
required=True,
help="Input file name. Training is performed from vasprun files.",
)
parser.add_argument(
"-p",
"--pot",
type=str,
default="mlp.pkl",
help="Potential file name for mlptools",
)
args = parser.parse_args()
# prepare temporary spin array
spin_array = np.array(random.choices([-1, 1], k=32))
p = InputParams(args.infile)
di = ReadFeatureParams(p).get_params()
vdi = VirtualDataInput(di).get_data_input()
# calculate structural features
tr = PotEstimation(di=di)
# calculate magnetic structural features
tr.train_x, tr.test_x = MagneticStructuralFeatures(tr, spin_array, vdi).get_x()
tr.set_regression_data()
# start regression
if args.noreg is False:
reg_method, alpha_min, alpha_max, n_alpha = read_regression_params(p)
if reg_method == "ridge" or reg_method == "lasso":
pot = tr.regularization_reg(
method=reg_method,
alpha_min=alpha_min,
alpha_max=alpha_max,
n_alpha=n_alpha,
svd=args.svd,
)
elif reg_method == "normal":
pot = tr.normal_reg()
pot.save_pot(file_name=args.pot)
pot.save_pot_for_lammps(file_name=args.lammps)
print(" --- input parameters ----")
pot.di.model_e.print()
print(" --- best model ----")
if reg_method == "ridge" or reg_method == "lasso":
print(" alpha = ", tr.best_alpha)
(
rmse_train_e,
rmse_test_e,
rmse_train_f,
files_train,
rmse_test_f,
rmse_train_s,
rmse_test_s,
files_test,
) = tr.get_best_rmse()
print(" -- Prediction Error --")
for re, rf, rs, f in zip(rmse_train_e, rmse_train_f, rmse_train_s, files_train):
print(" structures :", f)
print(" rmse (energy, train) = ", re * 1000, " (meV/atom)")
if rf is not None:
print(" rmse (force, train) = ", rf, " (eV/ang)")
print(" rmse (stress, train) = ", rs, " (GPa)")
for re, rf, rs, f in zip(rmse_test_e, rmse_test_f, rmse_test_s, files_test):
print(" structures :", f)
print(" rmse (energy, test) = ", re * 1000, " (meV/atom)")
if rf is not None:
print(" rmse (force, test) = ", rf, " (eV/ang)")
print(" rmse (stress, test) = ", rs, " (GPa)")
| [
"copy.deepcopy",
"argparse.ArgumentParser",
"random.choices",
"numpy.hstack",
"numpy.nonzero",
"mlptools.mlpgen.model.Terms",
"mlptools.mlpgen.regression.PotEstimation",
"mlptools.common.fileio.InputParams",
"mlptools.mlpgen.myIO.ReadFeatureParams",
"mlptools.mlpgen.myIO.read_regression_params",
... | [((726, 758), 'numpy.delete', 'np.delete', (['array', 'index_array', '(1)'], {}), '(array, index_array, 1)\n', (735, 758), True, 'import numpy as np\n'), ((770, 810), 'numpy.hstack', 'np.hstack', (['(array[:, index_array], rest)'], {}), '((array[:, index_array], rest))\n', (779, 810), True, 'import numpy as np\n'), ((3371, 3396), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3394, 3396), False, 'import argparse\n'), ((3867, 3891), 'mlptools.common.fileio.InputParams', 'InputParams', (['args.infile'], {}), '(args.infile)\n', (3878, 3891), False, 'from mlptools.common.fileio import InputParams\n'), ((4029, 4049), 'mlptools.mlpgen.regression.PotEstimation', 'PotEstimation', ([], {'di': 'di'}), '(di=di)\n', (4042, 4049), False, 'from mlptools.mlpgen.regression import PotEstimation\n'), ((946, 963), 'copy.deepcopy', 'copy.deepcopy', (['di'], {}), '(di)\n', (959, 963), False, 'import copy\n'), ((1466, 1525), 'mlptools.mlpgen.model.Terms', 'Terms', (['st_set_all_train', 'vdi', 'n_st_dataset', 'vdi.train_force'], {}), '(st_set_all_train, vdi, n_st_dataset, vdi.train_force)\n', (1471, 1525), False, 'from mlptools.mlpgen.model import Terms\n'), ((1800, 1856), 'mlptools.mlpgen.model.Terms', 'Terms', (['st_set_all_test', 'vdi', 'n_st_dataset', 'force_dataset'], {}), '(st_set_all_test, vdi, n_st_dataset, force_dataset)\n', (1805, 1856), False, 'from mlptools.mlpgen.model import Terms\n'), ((3827, 3856), 'random.choices', 'random.choices', (['[-1, 1]'], {'k': '(32)'}), '([-1, 1], k=32)\n', (3841, 3856), False, 'import random\n'), ((4312, 4337), 'mlptools.mlpgen.myIO.read_regression_params', 'read_regression_params', (['p'], {}), '(p)\n', (4334, 4337), False, 'from mlptools.mlpgen.myIO import ReadFeatureParams, read_regression_params\n'), ((2532, 2559), 'numpy.nonzero', 'np.nonzero', (['(spin_array == 1)'], {}), '(spin_array == 1)\n', (2542, 2559), True, 'import numpy as np\n'), ((3901, 3921), 'mlptools.mlpgen.myIO.ReadFeatureParams', 'ReadFeatureParams', (['p'], {}), '(p)\n', (3918, 3921), False, 'from mlptools.mlpgen.myIO import ReadFeatureParams, read_regression_params\n')] |
from pathlib import Path
import torch
import re
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.utils.data.sampler import Sampler
from torch.utils.data.distributed import DistributedSampler
from dataclasses import dataclass
from typing import Dict, Optional, Union, List, Tuple
# from fairseq.data import Dictionary
import numpy as np
import json
import pickle
@dataclass
class DataWrap:
path: Union[str, Path]
train_dl: DataLoader
valid_dl: DataLoader
test_dl: Optional[Union[DataLoader, Dict]] = None
def make_data_sampler(dataset: Dataset, shuffle: bool, distributed: bool) -> Sampler:
if distributed:
# return NewDistributedSampler(dataset, shuffle=shuffle)
return DistributedSampler(dataset=dataset, shuffle=shuffle)
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
return sampler
def get_dataloader(cfg, dataset: Dataset, is_train: bool, collate_fn) -> DataLoader:
is_distributed = cfg.do_dist
batch_size_inp = cfg.train.bs if is_train else cfg.train.bsv
nw = cfg.train.nw if is_train else cfg.train.nwv
if is_distributed:
# DistributedDataParallel
assert batch_size_inp % cfg.num_gpus == 0
batch_size = batch_size_inp // cfg.num_gpus
num_workers = nw
elif cfg.do_dp:
# DataParallel
batch_size = batch_size_inp * cfg.num_gpus
num_workers = nw * cfg.num_gpus
else:
batch_size = batch_size_inp
num_workers = nw
if is_train:
shuffle = True and cfg.ds.trn_shuffle
else:
shuffle = False
# shuffle = False
sampler = make_data_sampler(dataset, shuffle, is_distributed)
collator = collate_fn
return DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
drop_last=is_train,
num_workers=num_workers,
collate_fn=collator,
)
def collate_dct_lst_naive(batch: List[Dict]):
all_keys = list(batch[0].keys())
out_dict = {}
for k in all_keys:
out_dict[k] = [b[k] for b in batch]
return out_dict
def simple_collate_dct_list(
batch: List[Dict], stack_or_cat: str = "stack", cat_dim: int = None
) -> Dict[str, List]:
"""
Convert List[Dict[k, tensor]] -> Dict[k, Stacked Tensor]
"""
assert stack_or_cat in ["stack", "cat"]
if stack_or_cat == "cat":
assert cat_dim is not None
out_dict = {}
# nothing needs to be done
all_keys = list(batch[0].keys())
if stack_or_cat == "stack":
batch_size = len(batch)
else:
batch_size = len(batch) * batch[0][all_keys[0]].shape[0]
for k in all_keys:
shape = batch[0][k].shape
if not all([b[k].shape == shape for b in batch]):
raise NotImplementedError
# ForkedPdb().set_trace()
if stack_or_cat == "stack":
out_dict[k] = torch.stack([b[k] for b in batch])
elif stack_or_cat == "cat":
out_dict[k] = torch.cat([b[k] for b in batch], cat_dim)
else:
raise NotImplementedError
assert all([len(v) == batch_size for k, v in out_dict.items()])
return out_dict
def coalesce_dicts(dct_list: List[Dict]) -> Dict:
"""
Convert list of dicts with different keys
to a single dict
"""
out_dict = {}
for dct in dct_list:
for k in dct:
if k in out_dict:
assert torch.all(out_dict[k] == dct[k])
out_dict.update(dct)
return out_dict
def arg_mapper(arg_inp, argm_re=None):
if argm_re is None:
argm_re = re.compile(r"ArgM (.*)")
arg_name = arg_inp.split(" ")[0]
if arg_name in set(["Arg0", "Arg1", "Arg2", "Arg3", "Arg4", "Arg5"]):
return arg_name
elif arg_inp == "Scene of the Event":
return "AScn"
else:
assert arg_name == "ArgM"
y2 = argm_re.findall(arg_inp)[0].strip()
if "direction" in y2:
return "ADir"
elif "purpose" in y2:
return "APrp"
elif "manner" in y2:
return "AMnr"
elif "location" in y2:
return "ALoc"
elif "goal" in y2:
return "AGol"
else:
raise NotImplementedError
def truncate_batch(
inp_dict: Dict[str, torch.tensor], key: str, max_len: int, dim: int
) -> Dict[str, torch.tensor]:
"""
Truncate the value for the dictionary key
with max len and wrt dim
"""
assert len(inp_dict[key].shape) > dim
if dim == 1:
inp_dict[key] = inp_dict[key][:, :max_len].contiguous()
elif dim == 2:
inp_dict[key] = inp_dict[key][:, :, :max_len].contiguous()
elif dim == 3:
inp_dict[key] = inp_dict[key][:, :, :, :max_len].contiguous()
else:
raise NotImplementedError
return
def pad_words(
word_list: List, max_len: int, pad_index, eos_index=None, append_eos=False
) -> Tuple[List, int]:
if append_eos:
assert eos_index is not None
cur_len = len(word_list)
if cur_len >= max_len:
return word_list[: max_len - 1] + [eos_index], max_len
out_word_list = word_list + [eos_index] + [pad_index] * (max_len - 1 - cur_len)
return out_word_list, cur_len + 1
else:
cur_len = len(word_list)
if cur_len > max_len:
return word_list[:max_len], max_len
out_word_list = word_list + [pad_index] * (max_len - cur_len)
return out_word_list, cur_len
def pad_tokens(
lst: List[int],
pad_index: int,
pad_side: str,
append_eos: bool,
eos_index: int,
max_len: int,
):
curr_len = len(lst)
if isinstance(lst, list):
lst = torch.tensor(lst, dtype=torch.long)
sent_out_enc = lst.new_full((max_len,), pad_index, dtype=torch.long)
if append_eos:
if curr_len >= max_len:
sent_out_enc[:max_len] = lst[:max_len]
sent_out_enc[max_len - 1] = eos_index
out_len = max_len
else:
if pad_side == "right":
sent_out_enc[:curr_len] = lst
else:
sent_out_enc[-curr_len:] = lst
sent_out_enc[curr_len] = eos_index
out_len = curr_len + 1
else:
if curr_len >= max_len:
sent_out_enc[:max_len] = lst[:max_len]
out_len = max_len
else:
if pad_side == "right":
sent_out_enc[:curr_len] = lst
else:
sent_out_enc[-curr_len:] = lst
out_len = curr_len
if pad_side == "right":
attn_mask = [1] * out_len + [0] * (max_len - out_len)
else:
attn_mask = [0] * (max_len - out_len) + [1] * out_len
assert len(attn_mask) == max_len
return sent_out_enc, attn_mask
def pad_words_new(
sent: str,
max_len: int,
wvoc,
append_eos=False,
use_hf: bool = False,
pad_side: str = "right",
prefix_lst: List[int] = None,
) -> Tuple[List, int]:
assert pad_side in ["left", "right"]
if use_hf:
sent_enc = wvoc(sent)["input_ids"]
pad_index = wvoc.pad_token_id
eos_index = wvoc.eos_token_id
else:
sent_enc = wvoc.encode_line(sent, add_if_not_exist=False, append_eos=False)
pad_index = wvoc.pad_index
eos_index = wvoc.eos_index
if prefix_lst is not None:
sent_enc = prefix_lst + sent_enc
sent_out_enc, attn_mask = pad_tokens(
sent_enc,
pad_index=pad_index,
pad_side=pad_side,
append_eos=append_eos,
eos_index=eos_index,
max_len=max_len,
)
return sent_out_enc, attn_mask
def add_prev_tokens(
inp_dict: Dict[str, torch.tensor], key: str, pad_token: int, bos_token: int
) -> Dict[str, torch.tensor]:
"""
Create prev tokens for the given dictionary key
"""
src_toks = inp_dict[key]
# prev_output_tokens = src_toks.new_full(src_toks.shape, fill_value=pad_token)
# prev_output_tokens[..., 0] = bos_token
# prev_output_tokens[..., 1:] = src_toks[..., :-1].clone()
prev_output_tokens = add_prev_tokens_tensor(
src_tensor=src_toks, pad_token=pad_token, bos_token=bos_token
)
out_key = f"prev_out_{key}"
inp_dict[out_key] = prev_output_tokens
return
def add_prev_tokens_tensor(
src_tensor: torch.tensor, pad_token: int, bos_token: int
) -> torch.tensor:
"""
Create prev tokens for the given dictionary key
"""
prev_output_tokens = src_tensor.new_full(src_tensor.shape, fill_value=pad_token)
prev_output_tokens[..., 0] = bos_token
prev_output_tokens[..., 1:] = src_tensor[..., :-1].clone()
return prev_output_tokens
def read_file_with_assertion(fpath: str, read_type: str = "r", reader: str = "json"):
fpath1 = Path(fpath)
if read_type == "r":
assert fpath1.exists(), f"{fpath1} doesn't exist"
if reader == "json":
with open(fpath1, "r") as f:
file_data = json.load(f)
return file_data
elif reader == "pickle":
with open(fpath1, "rb") as f:
file_data = pickle.load(f)
return file_data
elif reader == "numpy":
return np.load(fpath1)
elif read_type == "w":
assert fpath1.parent.exists()
else:
raise NotImplementedError
| [
"numpy.load",
"json.load",
"torch.stack",
"torch.utils.data.DataLoader",
"torch.utils.data.sampler.SequentialSampler",
"torch.cat",
"torch.utils.data.distributed.DistributedSampler",
"pathlib.Path",
"pickle.load",
"torch.all",
"torch.utils.data.sampler.RandomSampler",
"torch.tensor",
"re.com... | [((1841, 1971), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'sampler': 'sampler', 'drop_last': 'is_train', 'num_workers': 'num_workers', 'collate_fn': 'collator'}), '(dataset, batch_size=batch_size, sampler=sampler, drop_last=\n is_train, num_workers=num_workers, collate_fn=collator)\n', (1851, 1971), False, 'from torch.utils.data import DataLoader\n'), ((8854, 8865), 'pathlib.Path', 'Path', (['fpath'], {}), '(fpath)\n', (8858, 8865), False, 'from pathlib import Path\n'), ((750, 802), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', ([], {'dataset': 'dataset', 'shuffle': 'shuffle'}), '(dataset=dataset, shuffle=shuffle)\n', (768, 802), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((837, 884), 'torch.utils.data.sampler.RandomSampler', 'torch.utils.data.sampler.RandomSampler', (['dataset'], {}), '(dataset)\n', (875, 884), False, 'import torch\n'), ((913, 964), 'torch.utils.data.sampler.SequentialSampler', 'torch.utils.data.sampler.SequentialSampler', (['dataset'], {}), '(dataset)\n', (955, 964), False, 'import torch\n'), ((3699, 3722), 're.compile', 're.compile', (['"""ArgM (.*)"""'], {}), "('ArgM (.*)')\n", (3709, 3722), False, 'import re\n'), ((5787, 5822), 'torch.tensor', 'torch.tensor', (['lst'], {'dtype': 'torch.long'}), '(lst, dtype=torch.long)\n', (5799, 5822), False, 'import torch\n'), ((3001, 3035), 'torch.stack', 'torch.stack', (['[b[k] for b in batch]'], {}), '([b[k] for b in batch])\n', (3012, 3035), False, 'import torch\n'), ((3098, 3139), 'torch.cat', 'torch.cat', (['[b[k] for b in batch]', 'cat_dim'], {}), '([b[k] for b in batch], cat_dim)\n', (3107, 3139), False, 'import torch\n'), ((3533, 3565), 'torch.all', 'torch.all', (['(out_dict[k] == dct[k])'], {}), '(out_dict[k] == dct[k])\n', (3542, 3565), False, 'import torch\n'), ((9047, 9059), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9056, 9059), False, 'import json\n'), ((9192, 9206), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9203, 9206), False, 'import pickle\n'), ((9287, 9302), 'numpy.load', 'np.load', (['fpath1'], {}), '(fpath1)\n', (9294, 9302), True, 'import numpy as np\n')] |
import cv2 as cv
import numpy as np
feature_params=dict(maxCorners=300,qualityLevel=0.2,minDistance=2, blockSize=7)
lk_params=dict(winSize = (15,15),maxLevel=2,criteria=(cv.TERM_CRITERIA_EPS|cv.TermCriteria_COUNT,10,0.03))
cap=cv.VideoCapture('../Assets/bees1.mp4')
color=(0,255,0)
ret,first_frame=cap.read()
prev_gray=cv.cvtColor(first_frame,cv.COLOR_BGR2GRAY)
prev=cv.goodFeaturesToTrack(prev_gray, mask=None, **feature_params)
mask=np.zeros_like(first_frame)
while(cap.isOpened()):
ret,frame=cap.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
next, status, error = cv.calcOpticalFlowPyrLK(prev_gray,gray,prev,None,**lk_params)
good_old=prev[status==1]
good_new=next[status==1]
for i, (new, old) in enumerate(zip(good_new, good_old)):
a,b=new.ravel()
c,d=old.ravel()
mask=cv.line(mask,(a,b), (c,d), color,2)
frame=cv.circle(frame,(a,b), 3, color,-1)
output=cv.add(frame,mask)
prev_gray=gray.copy()
prev=good_new.reshape(-1,1,2)
cv.imshow("sparse optical flow",output)
if cv.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv.destroyAllWindows() | [
"cv2.line",
"numpy.zeros_like",
"cv2.circle",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.goodFeaturesToTrack",
"cv2.calcOpticalFlowPyrLK",
"cv2.destroyAllWindows",
"cv2.add"
] | [((228, 266), 'cv2.VideoCapture', 'cv.VideoCapture', (['"""../Assets/bees1.mp4"""'], {}), "('../Assets/bees1.mp4')\n", (243, 266), True, 'import cv2 as cv\n'), ((321, 364), 'cv2.cvtColor', 'cv.cvtColor', (['first_frame', 'cv.COLOR_BGR2GRAY'], {}), '(first_frame, cv.COLOR_BGR2GRAY)\n', (332, 364), True, 'import cv2 as cv\n'), ((369, 431), 'cv2.goodFeaturesToTrack', 'cv.goodFeaturesToTrack', (['prev_gray'], {'mask': 'None'}), '(prev_gray, mask=None, **feature_params)\n', (391, 431), True, 'import cv2 as cv\n'), ((437, 463), 'numpy.zeros_like', 'np.zeros_like', (['first_frame'], {}), '(first_frame)\n', (450, 463), True, 'import numpy as np\n'), ((1121, 1143), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (1141, 1143), True, 'import cv2 as cv\n'), ((524, 561), 'cv2.cvtColor', 'cv.cvtColor', (['frame', 'cv.COLOR_BGR2GRAY'], {}), '(frame, cv.COLOR_BGR2GRAY)\n', (535, 561), True, 'import cv2 as cv\n'), ((588, 653), 'cv2.calcOpticalFlowPyrLK', 'cv.calcOpticalFlowPyrLK', (['prev_gray', 'gray', 'prev', 'None'], {}), '(prev_gray, gray, prev, None, **lk_params)\n', (611, 653), True, 'import cv2 as cv\n'), ((927, 946), 'cv2.add', 'cv.add', (['frame', 'mask'], {}), '(frame, mask)\n', (933, 946), True, 'import cv2 as cv\n'), ((1010, 1050), 'cv2.imshow', 'cv.imshow', (['"""sparse optical flow"""', 'output'], {}), "('sparse optical flow', output)\n", (1019, 1050), True, 'import cv2 as cv\n'), ((830, 869), 'cv2.line', 'cv.line', (['mask', '(a, b)', '(c, d)', 'color', '(2)'], {}), '(mask, (a, b), (c, d), color, 2)\n', (837, 869), True, 'import cv2 as cv\n'), ((880, 918), 'cv2.circle', 'cv.circle', (['frame', '(a, b)', '(3)', 'color', '(-1)'], {}), '(frame, (a, b), 3, color, -1)\n', (889, 918), True, 'import cv2 as cv\n'), ((1057, 1071), 'cv2.waitKey', 'cv.waitKey', (['(10)'], {}), '(10)\n', (1067, 1071), True, 'import cv2 as cv\n')] |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Assertion test for multi_dispatch function/decorator
"""
import autoray
import numpy as onp
import pytest
from autoray import numpy as anp
from pennylane import numpy as np
from pennylane import math as fn
tf = pytest.importorskip("tensorflow", minversion="2.1")
torch = pytest.importorskip("torch")
jax = pytest.importorskip("jax")
test_multi_dispatch_stack_data = [
[[1.0, 0.0], [2.0, 3.0]],
([1.0, 0.0], [2.0, 3.0]),
onp.array([[1.0, 0.0], [2.0, 3.0]]),
anp.array([[1.0, 0.0], [2.0, 3.0]]),
np.array([[1.0, 0.0], [2.0, 3.0]]),
jax.numpy.array([[1.0, 0.0], [2.0, 3.0]]),
tf.constant([[1.0, 0.0], [2.0, 3.0]]),
]
@pytest.mark.parametrize("x", test_multi_dispatch_stack_data)
def test_multi_dispatch_stack(x):
"""Test that the decorated autoray function stack can handle all inputs"""
stack = fn.multi_dispatch(argnum=0, tensor_list=0)(autoray.numpy.stack)
res = stack(x)
assert fn.allequal(res, [[1.0, 0.0], [2.0, 3.0]])
@pytest.mark.parametrize("x", test_multi_dispatch_stack_data)
def test_multi_dispatch_decorate(x):
"""Test decorating a standard numpy function for PennyLane"""
@fn.multi_dispatch(argnum=[0], tensor_list=[0])
def tensordot(x, like, axes=None):
return np.tensordot(x[0], x[1], axes=axes)
assert fn.allequal(tensordot(x, axes=(0, 0)).numpy(), 2)
test_data0 = [
(1, 2, 3),
[1, 2, 3],
onp.array([1, 2, 3]),
anp.array([1, 2, 3]),
np.array([1, 2, 3]),
torch.tensor([1, 2, 3]),
jax.numpy.array([1, 2, 3]),
tf.constant([1, 2, 3]),
]
test_data = [(x, x) for x in test_data0]
@pytest.mark.parametrize("t1,t2", test_data)
def test_multi_dispatch_decorate_argnum_none(t1, t2):
"""Test decorating a standard numpy function for PennyLane, automatically dispatching all inputs by choosing argnum=None"""
@fn.multi_dispatch(argnum=None, tensor_list=None)
def tensordot(tensor1, tensor2, like, axes=None):
return np.tensordot(tensor1, tensor2, axes=axes)
assert fn.allequal(tensordot(t1, t2, axes=(0, 0)).numpy(), 14)
test_data_values = [
[[1, 2, 3] for _ in range(5)],
[(1, 2, 3) for _ in range(5)],
[np.array([1, 2, 3]) for _ in range(5)],
[onp.array([1, 2, 3]) for _ in range(5)],
[anp.array([1, 2, 3]) for _ in range(5)],
[torch.tensor([1, 2, 3]) for _ in range(5)],
[jax.numpy.array([1, 2, 3]) for _ in range(5)],
[tf.constant([1, 2, 3]) for _ in range(5)],
]
@pytest.mark.parametrize("values", test_data_values)
def test_multi_dispatch_decorate_non_dispatch(values):
"""Test decorating a custom function for PennyLane including a non-dispatchable parameter"""
@fn.multi_dispatch(argnum=0, tensor_list=0)
def custom_function(values, like, coefficient=10):
"""
A dummy custom function that computes coeff :math:`c \\sum_i (v_i)^T v_i` where :math:`v_i` are vectors in ``values``
and :math:`c` is a fixed ``coefficient``.
values is a list of vectors
like can force the interface (optional)
"""
return coefficient * np.sum([fn.dot(v, v) for v in values])
assert fn.allequal(custom_function(values), 700)
| [
"pennylane.numpy.tensordot",
"pytest.importorskip",
"autoray.numpy.array",
"pennylane.math.dot",
"pennylane.math.allequal",
"numpy.array",
"pennylane.numpy.array",
"pennylane.math.multi_dispatch",
"pytest.mark.parametrize"
] | [((815, 866), 'pytest.importorskip', 'pytest.importorskip', (['"""tensorflow"""'], {'minversion': '"""2.1"""'}), "('tensorflow', minversion='2.1')\n", (834, 866), False, 'import pytest\n'), ((875, 903), 'pytest.importorskip', 'pytest.importorskip', (['"""torch"""'], {}), "('torch')\n", (894, 903), False, 'import pytest\n'), ((910, 936), 'pytest.importorskip', 'pytest.importorskip', (['"""jax"""'], {}), "('jax')\n", (929, 936), False, 'import pytest\n'), ((1250, 1310), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x"""', 'test_multi_dispatch_stack_data'], {}), "('x', test_multi_dispatch_stack_data)\n", (1273, 1310), False, 'import pytest\n'), ((1576, 1636), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x"""', 'test_multi_dispatch_stack_data'], {}), "('x', test_multi_dispatch_stack_data)\n", (1599, 1636), False, 'import pytest\n'), ((2205, 2248), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""t1,t2"""', 'test_data'], {}), "('t1,t2', test_data)\n", (2228, 2248), False, 'import pytest\n'), ((3049, 3100), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""values"""', 'test_data_values'], {}), "('values', test_data_values)\n", (3072, 3100), False, 'import pytest\n'), ((1037, 1072), 'numpy.array', 'onp.array', (['[[1.0, 0.0], [2.0, 3.0]]'], {}), '([[1.0, 0.0], [2.0, 3.0]])\n', (1046, 1072), True, 'import numpy as onp\n'), ((1078, 1113), 'autoray.numpy.array', 'anp.array', (['[[1.0, 0.0], [2.0, 3.0]]'], {}), '([[1.0, 0.0], [2.0, 3.0]])\n', (1087, 1113), True, 'from autoray import numpy as anp\n'), ((1119, 1153), 'pennylane.numpy.array', 'np.array', (['[[1.0, 0.0], [2.0, 3.0]]'], {}), '([[1.0, 0.0], [2.0, 3.0]])\n', (1127, 1153), True, 'from pennylane import numpy as np\n'), ((1530, 1572), 'pennylane.math.allequal', 'fn.allequal', (['res', '[[1.0, 0.0], [2.0, 3.0]]'], {}), '(res, [[1.0, 0.0], [2.0, 3.0]])\n', (1541, 1572), True, 'from pennylane import math as fn\n'), ((1746, 1792), 'pennylane.math.multi_dispatch', 'fn.multi_dispatch', ([], {'argnum': '[0]', 'tensor_list': '[0]'}), '(argnum=[0], tensor_list=[0])\n', (1763, 1792), True, 'from pennylane import math as fn\n'), ((1996, 2016), 'numpy.array', 'onp.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2005, 2016), True, 'import numpy as onp\n'), ((2022, 2042), 'autoray.numpy.array', 'anp.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2031, 2042), True, 'from autoray import numpy as anp\n'), ((2048, 2067), 'pennylane.numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2056, 2067), True, 'from pennylane import numpy as np\n'), ((2437, 2485), 'pennylane.math.multi_dispatch', 'fn.multi_dispatch', ([], {'argnum': 'None', 'tensor_list': 'None'}), '(argnum=None, tensor_list=None)\n', (2454, 2485), True, 'from pennylane import math as fn\n'), ((3259, 3301), 'pennylane.math.multi_dispatch', 'fn.multi_dispatch', ([], {'argnum': '(0)', 'tensor_list': '(0)'}), '(argnum=0, tensor_list=0)\n', (3276, 3301), True, 'from pennylane import math as fn\n'), ((1436, 1478), 'pennylane.math.multi_dispatch', 'fn.multi_dispatch', ([], {'argnum': '(0)', 'tensor_list': '(0)'}), '(argnum=0, tensor_list=0)\n', (1453, 1478), True, 'from pennylane import math as fn\n'), ((1847, 1882), 'pennylane.numpy.tensordot', 'np.tensordot', (['x[0]', 'x[1]'], {'axes': 'axes'}), '(x[0], x[1], axes=axes)\n', (1859, 1882), True, 'from pennylane import numpy as np\n'), ((2555, 2596), 'pennylane.numpy.tensordot', 'np.tensordot', (['tensor1', 'tensor2'], {'axes': 'axes'}), '(tensor1, tensor2, axes=axes)\n', (2567, 2596), True, 'from pennylane import numpy as np\n'), ((2763, 2782), 'pennylane.numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2771, 2782), True, 'from pennylane import numpy as np\n'), ((2808, 2828), 'numpy.array', 'onp.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2817, 2828), True, 'import numpy as onp\n'), ((2854, 2874), 'autoray.numpy.array', 'anp.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2863, 2874), True, 'from autoray import numpy as anp\n'), ((3678, 3690), 'pennylane.math.dot', 'fn.dot', (['v', 'v'], {}), '(v, v)\n', (3684, 3690), True, 'from pennylane import math as fn\n')] |
import os, csv, six
import numpy as np
import pandas as pd
from glob import glob
from tqdm import tqdm
from minepy import MINE
from scipy import stats
from itertools import permutations, combinations
from sklearn import feature_selection
from sklearn import ensemble
from sklearn import linear_model
from sklearn.decomposition import PCA
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler
from tqdm import tqdm
from radiomics import featureextractor
import SimpleITK as sitk
class FeatureExtractor():
def __init__(self, idList, imagePaths, maskPaths, paramPath, outputPath):
self.idList = idList
self.imagePaths = imagePaths
self.maskPaths = maskPaths
self.paramPath = paramPath
self.outputPath = outputPath
assert len(self.imagePaths) == len(self.maskPaths), "#imagePaths #maskPaths is not consistent!"
def save2csv(self, features, idx):
assert len(features) == len(idx), "#features #idx is not consistent!"
if len(features) == 0:
print("No features!")
else:
obs_header = ["id"]
obs_header.extend(list(features[0].keys()))
if os.path.exists(self.outputPath) or os.path.isfile(self.outputPath):
os.remove(self.outputPath)
f = open(self.outputPath, 'w')
f.write(','.join(obs_header) + '\n')
f.close()
w = csv.DictWriter(open(self.outputPath, 'a'), fieldnames=obs_header, quoting=csv.QUOTE_MINIMAL)
for i in range(len(features)):
fea = features[i]
fea["id"] = idx[i]
w.writerows([fea])
print("features saved to ", self.outputPath)
def singleExtract(self, imageName, maskName, params):
if imageName.split(".")[-1] in ["bmp", "jpg", "png"]:
return self.singleExtract_BMP(imageName, maskName, params)
else:
return self.singleExtract_NII(imageName, maskName, params)
def singleExtract_BMP(self, imageName, maskName, params):
try:
extractor = featureextractor.RadiomicsFeatureExtractor(params)
color_channel = 0
im = sitk.ReadImage(imageName)
selector = sitk.VectorIndexSelectionCastImageFilter()
selector.SetIndex(color_channel)
im = selector.Execute(im)
color_channel = 0
mas = sitk.ReadImage(maskName)
selector = sitk.VectorIndexSelectionCastImageFilter()
selector.SetIndex(color_channel)
mas = selector.Execute(mas)
result = extractor.execute(im, mas)
feature = {key: val for key, val in six.iteritems(result)}
except Exception as e:
print(e)
print("error when extacting ", imageName)
feature = None
return feature
def singleExtract_NII(self, imageName, maskName, params):
try:
extractor = featureextractor.RadiomicsFeatureExtractor(params)
result = extractor.execute(imageName, maskName)
feature = {key: val for key, val in six.iteritems(result)}
except Exception as e:
print(e)
print("error when extacting ", imageName)
feature = None
return feature
def extract(self, force=False):
if os.path.exists(self.outputPath) and not force:
print("file already exist : ", self.outputPath)
else:
features = []
idx = []
lens = len(self.imagePaths)
for i in tqdm(range(lens), total=lens):
imageName = self.imagePaths[i]
maskName = self.maskPaths[i]
feature = self.singleExtract(imageName, maskName, self.paramPath)
if feature is not None:
features.append(feature)
idx.append(self.idList[i])
self.save2csv(features, idx)
class FeatureProcess():
def __init__(self, X_train, X_test):
self.X_train = X_train
self.X_test = X_test
def simpleImpute(self, strategy='mean'):
imp = SimpleImputer(missing_values=np.nan, strategy=strategy)
imp.fit(self.X_train)
self.X_train = imp.transform(self.X_train)
self.X_test = imp.transform(self.X_test)
def standardScale(self):
scaler = StandardScaler()
scaler.fit(self.X_train)
self.X_train = scaler.transform(self.X_train)
self.X_test = scaler.transform(self.X_test)
def normalizer(self):
nor = Normalizer()
nor.fit(self.X_train)
self.X_train = nor.transform(self.X_train)
self.X_test = nor.transform(self.X_test)
def minMaxScaler(self):
mms = MinMaxScaler()
mms.fit(self.X_train)
self.X_train = mms.transform(self.X_train)
self.X_test = mms.transform(self.X_test)
def pca(self, n_components=10):
pca = PCA(n_components=n_components)
pca.fit(self.X_train)
self.X_train = pca.transform(self.X_train)
self.X_test = pca.transform(self.X_test)
class bicluster:
def __init__(self, vec, left=None,right=None,distance=0.0,id=None) :
self.left = left
self.right = right
self.vec = vec
self.id = id
self.distance = distance
class FeatureSelector():
def __init__(self, features, labels, featureNames, eps=1e-3):
self.x = features
self.y = labels
self.featureNames = np.array(featureNames)
self.eps = eps
self.n = self.x.shape[1]
self.indexs = None
############################
# univariate selection #
############################
def univarSelector(self, top_k=1, method_name="f_classif", inplace=True):
"""
:method_name {"chi2", "f_classif", "mutual_info_classif"}
"""
print("Feature selecting method: ", method_name)
selector = {"chi2": feature_selection.chi2,
"f_classif": feature_selection.f_classif,
"mutual_info_classif": feature_selection.mutual_info_classif}
func = selector[method_name]
sler = feature_selection.SelectKBest(func, k=top_k)
sler.fit(self.x, self.y)
self.indexs = sler.get_support()
if inplace:
self.x = self.x[:, self.indexs]
self.n = self.x.shape[1]
self.featureNames = self.featureNames[self.indexs]
return self.x, self.y
else:
return self.x[:, self.indexs], self.y
#########################
# regression selecting #
#########################
def lrSelector(self, method_name="lr", inplace=True):
"""
:method_name {"lr", "ridge"}
"""
print("Feature selecting method: ", method_name)
selector = {"lr": linear_model.LinearRegression(), "ridge": linear_model.Ridge()}
lr = selector[method_name]
lr.fit(self.x, self.y)
coefs = lr.coef_.tolist()
self.indexs = [i for i in range(len(coefs)) if np.abs(coefs[i]) > self.eps]
if inplace:
self.x = self.x[:, self.indexs]
self.n = self.x.shape[1]
self.featureNames = self.featureNames[self.indexs]
return self.x, self.y
else:
return self.x[:, self.indexs], self.y
############################
# model selecting #
############################
def modelSelector(self, model_name="rf", inplace=True):
"""
:method_name {"rf", "lasso"}
"""
print("Feature selecting method: ", model_name)
selector = {"rf": ensemble.RandomForestClassifier(n_estimators=10), "lasso": linear_model.LassoCV(cv=5, max_iter=5000)}
model = selector[model_name]
sler = feature_selection.SelectFromModel(model)
sler.fit(self.x, self.y)
self.indexs = sler.get_support()
if inplace:
self.x = self.x[:, self.indexs]
self.n = self.x.shape[1]
self.featureNames = self.featureNames[self.indexs]
return self.x, self.y
else:
return self.x[:, self.indexs], self.y
#########################
# correlation selecting #
#########################
def calMic(self, x1, x2):
# Maximal Information Coefficient
mine = MINE(alpha=0.6, c=15)
mine.compute_score(x1, x2)
return mine.mic()
def hcluster(self, X, calDistance) :
biclusters = [ bicluster(vec = X[:, i], id = i ) for i in range(X.shape[1]) ]
distances = {}
flag = None
currentclusted = -1
print("features dim: ", len(biclusters))
while(len(biclusters) > 1) :
max_val = -1
biclusters_len = len(biclusters)
for i in range(biclusters_len-1) :
for j in range(i + 1, biclusters_len) :
if distances.get((biclusters[i].id,biclusters[j].id)) == None:
distances[(biclusters[i].id,biclusters[j].id)], _ = calDistance(biclusters[i].vec,biclusters[j].vec)
d = distances[(biclusters[i].id,biclusters[j].id)]
if d > max_val :
max_val = d
flag = (i,j)
bic1,bic2 = flag
newvec = (biclusters[bic1].vec + biclusters[bic2].vec) / 2
newbic = bicluster(newvec, left=biclusters[bic1], right=biclusters[bic2], distance=max_val, id = currentclusted)
currentclusted -= 1
del biclusters[bic2]
del biclusters[bic1]
biclusters.append(newbic)
return biclusters[0]
def corrSelector(self, method_name="pearson", num=1, inplace=True):
"""
:method_name {"pearson", "kendall", "spearman", "mic"}
"""
print("Feature selecting method: ", method_name)
selector = {"pearson": stats.pearsonr, "kendall": stats.kendalltau,
"spearman": stats.spearmanr, "mic": self.calMic}
func = selector[method_name]
root_node = self.hcluster(self.x, func)
node_ids = []
nodes = [root_node]
while(len(nodes)>0):
tmp = nodes.pop(0)
if tmp.id<0:
nodes.append(tmp.left)
nodes.append(tmp.right)
else:
node_ids.append(tmp.id)
self.indexs = np.asarray(node_ids)[:num]
if inplace:
self.x = self.x[:, self.indexs]
self.n = self.x.shape[1]
self.featureNames = self.featureNames[self.indexs]
return self.x, self.y
else:
return self.x[:, self.indexs], self.y
#########################
# T test selecting #
#########################
def calTtest(self, x1, x2, threshold=0.05):
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_ind.html
stat, p = stats.levene(x1, x2)
if p > threshold:
_, t = stats.ttest_ind(x1, x2)
else:
_, t = stats.ttest_ind(x1, x2, equal_var=False)
return t
def ttestSelector(self, threshold=0.05, inplace=True):
print("Feature selecting method: T test")
labels = np.unique(self.y)
num_labels = len(labels)
cbs = list(combinations(range(num_labels), 2))
num_cbs = len(cbs)
eps = 0.1
self.indexs = []
for i in range(self.n):
count = 0
for c in range(num_cbs):
index1, index2 = cbs[c]
row1, row2 = labels[index1], labels[index2]
r1, r2 = self.y==row1, self.y==row2
t = self.calTtest(self.x[r1, i], self.x[r2, i])
if t > threshold:
count += 1
if count/num_cbs < eps:
self.indexs.append(i)
if inplace:
self.x = self.x[:, self.indexs]
self.n = self.x.shape[1]
self.featureNames = self.featureNames[self.indexs]
return self.x, self.y
else:
return self.x[:, self.indexs], self.y
#########################
# mann whitney u test #
#########################
def mannSelector(self, threshold=0.05, inplace=True):
print("Feature selecting method: mann whitney u test")
labels = np.unique(self.y)
num_labels = len(labels)
cbs = list(combinations(range(num_labels), 2))
num_cbs = len(cbs)
eps = 0.1
self.indexs = []
for i in range(self.n):
count = 0
for c in range(num_cbs):
index1, index2 = cbs[c]
row1, row2 = labels[index1], labels[index2]
r1, r2 = self.y==row1, self.y==row2
_, t = stats.mannwhitneyu(self.x[r1, i], self.x[r2, i], alternative='two-sided')
if t > threshold:
count += 1
if count/num_cbs < eps:
self.indexs.append(i)
if inplace:
self.x = self.x[:, self.indexs]
self.n = self.x.shape[1]
self.featureNames = self.featureNames[self.indexs]
return self.x, self.y
else:
return self.x[:, self.indexs], self.y
#########################
# combination #
#########################
def combination(self, train_test_run, x_test, y_test, inplace=True):
print("Feature selecting method: combination")
len_f = self.x.shape[1]
res_indexs = []
min_value = 2
for c_f in range(1,len_f):
cbs = list(combinations(list(range(len_f)), c_f))
for c in cbs:
indexs = [i for i in c]
error_mse, error_mae = train_test_run(self.x[:, indexs], self.y, x_test[:, indexs], y_test)
if error_mae<min_value:
min_value=error_mae
self.indexs = indexs
res_indexs.append({"error_mse":error_mse,"error_mae":error_mae,"indexs":indexs})
print(res_indexs[-1])
if inplace:
self.x = self.x[:, self.indexs]
self.n = self.x.shape[1]
self.featureNames = self.featureNames[self.indexs]
return self.x, self.y
else:
return self.x[:, self.indexs], self.y
| [
"os.remove",
"sklearn.preprocessing.StandardScaler",
"numpy.abs",
"sklearn.preprocessing.MinMaxScaler",
"scipy.stats.levene",
"sklearn.feature_selection.SelectFromModel",
"os.path.isfile",
"radiomics.featureextractor.RadiomicsFeatureExtractor",
"six.iteritems",
"numpy.unique",
"sklearn.impute.Si... | [((4215, 4270), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'missing_values': 'np.nan', 'strategy': 'strategy'}), '(missing_values=np.nan, strategy=strategy)\n', (4228, 4270), False, 'from sklearn.impute import SimpleImputer\n'), ((4456, 4472), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4470, 4472), False, 'from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler\n'), ((4653, 4665), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {}), '()\n', (4663, 4665), False, 'from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler\n'), ((4839, 4853), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4851, 4853), False, 'from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler\n'), ((5039, 5069), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_components'}), '(n_components=n_components)\n', (5042, 5069), False, 'from sklearn.decomposition import PCA\n'), ((5593, 5615), 'numpy.array', 'np.array', (['featureNames'], {}), '(featureNames)\n', (5601, 5615), True, 'import numpy as np\n'), ((6272, 6316), 'sklearn.feature_selection.SelectKBest', 'feature_selection.SelectKBest', (['func'], {'k': 'top_k'}), '(func, k=top_k)\n', (6301, 6316), False, 'from sklearn import feature_selection\n'), ((7914, 7954), 'sklearn.feature_selection.SelectFromModel', 'feature_selection.SelectFromModel', (['model'], {}), '(model)\n', (7947, 7954), False, 'from sklearn import feature_selection\n'), ((8471, 8492), 'minepy.MINE', 'MINE', ([], {'alpha': '(0.6)', 'c': '(15)'}), '(alpha=0.6, c=15)\n', (8475, 8492), False, 'from minepy import MINE\n'), ((11070, 11090), 'scipy.stats.levene', 'stats.levene', (['x1', 'x2'], {}), '(x1, x2)\n', (11082, 11090), False, 'from scipy import stats\n'), ((11378, 11395), 'numpy.unique', 'np.unique', (['self.y'], {}), '(self.y)\n', (11387, 11395), True, 'import numpy as np\n'), ((12492, 12509), 'numpy.unique', 'np.unique', (['self.y'], {}), '(self.y)\n', (12501, 12509), True, 'import numpy as np\n'), ((2141, 2191), 'radiomics.featureextractor.RadiomicsFeatureExtractor', 'featureextractor.RadiomicsFeatureExtractor', (['params'], {}), '(params)\n', (2183, 2191), False, 'from radiomics import featureextractor\n'), ((2241, 2266), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['imageName'], {}), '(imageName)\n', (2255, 2266), True, 'import SimpleITK as sitk\n'), ((2290, 2332), 'SimpleITK.VectorIndexSelectionCastImageFilter', 'sitk.VectorIndexSelectionCastImageFilter', ([], {}), '()\n', (2330, 2332), True, 'import SimpleITK as sitk\n'), ((2466, 2490), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['maskName'], {}), '(maskName)\n', (2480, 2490), True, 'import SimpleITK as sitk\n'), ((2514, 2556), 'SimpleITK.VectorIndexSelectionCastImageFilter', 'sitk.VectorIndexSelectionCastImageFilter', ([], {}), '()\n', (2554, 2556), True, 'import SimpleITK as sitk\n'), ((3032, 3082), 'radiomics.featureextractor.RadiomicsFeatureExtractor', 'featureextractor.RadiomicsFeatureExtractor', (['params'], {}), '(params)\n', (3074, 3082), False, 'from radiomics import featureextractor\n'), ((3419, 3450), 'os.path.exists', 'os.path.exists', (['self.outputPath'], {}), '(self.outputPath)\n', (3433, 3450), False, 'import os, csv, six\n'), ((6947, 6978), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (6976, 6978), False, 'from sklearn import linear_model\n'), ((6989, 7009), 'sklearn.linear_model.Ridge', 'linear_model.Ridge', ([], {}), '()\n', (7007, 7009), False, 'from sklearn import linear_model\n'), ((7760, 7808), 'sklearn.ensemble.RandomForestClassifier', 'ensemble.RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (7791, 7808), False, 'from sklearn import ensemble\n'), ((7819, 7860), 'sklearn.linear_model.LassoCV', 'linear_model.LassoCV', ([], {'cv': '(5)', 'max_iter': '(5000)'}), '(cv=5, max_iter=5000)\n', (7839, 7860), False, 'from sklearn import linear_model\n'), ((10533, 10553), 'numpy.asarray', 'np.asarray', (['node_ids'], {}), '(node_ids)\n', (10543, 10553), True, 'import numpy as np\n'), ((11136, 11159), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['x1', 'x2'], {}), '(x1, x2)\n', (11151, 11159), False, 'from scipy import stats\n'), ((11193, 11233), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['x1', 'x2'], {'equal_var': '(False)'}), '(x1, x2, equal_var=False)\n', (11208, 11233), False, 'from scipy import stats\n'), ((1225, 1256), 'os.path.exists', 'os.path.exists', (['self.outputPath'], {}), '(self.outputPath)\n', (1239, 1256), False, 'import os, csv, six\n'), ((1260, 1291), 'os.path.isfile', 'os.path.isfile', (['self.outputPath'], {}), '(self.outputPath)\n', (1274, 1291), False, 'import os, csv, six\n'), ((1309, 1335), 'os.remove', 'os.remove', (['self.outputPath'], {}), '(self.outputPath)\n', (1318, 1335), False, 'import os, csv, six\n'), ((12934, 13007), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['self.x[r1, i]', 'self.x[r2, i]'], {'alternative': '"""two-sided"""'}), "(self.x[r1, i], self.x[r2, i], alternative='two-sided')\n", (12952, 13007), False, 'from scipy import stats\n'), ((2739, 2760), 'six.iteritems', 'six.iteritems', (['result'], {}), '(result)\n', (2752, 2760), False, 'import os, csv, six\n'), ((3191, 3212), 'six.iteritems', 'six.iteritems', (['result'], {}), '(result)\n', (3204, 3212), False, 'import os, csv, six\n'), ((7166, 7182), 'numpy.abs', 'np.abs', (['coefs[i]'], {}), '(coefs[i])\n', (7172, 7182), True, 'import numpy as np\n')] |
# nawiąż połączenie
import psycopg2
from DB_connection_functions import *
from DB_connection_parameters import user, password, host, port, database3
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
try:
# nawiązuje połączenie z bazą danych
connection = psycopg2.connect(user=user, password=password, host=host, port=port, database=database3)
cursor = connection.cursor()
n = 20 # ile punktow
cursor.execute(
# f'SELECT id, ST_AsText(geom) FROM public.centroidy_budynki ORDER BY random() limit {n}'
# f'SELECT id, ST_AsText(geom) FROM public.budynki_wawa_centroidy ORDER BY random() limit {n}'
f'SELECT id, ST_AsText(geom) FROM public.budynki_wawa_centroidy ORDER BY random() limit {n}'
) # pobieram dane o budynkach z bazy danych
buildings_table = cursor.fetchall()
print('\n\n', buildings_table)
############################ ROZPAKOWUJE DANE O BUDYNKACH############################
indeksy_budynki = [column[0] for column in buildings_table] # indeksy budynków
wspolrzedna_budynki_X = [float(column[1][6:-1].split()[0]) for column in buildings_table] # współrzędna w prawo
wspolrzedna_budynki_Y = [float(column[1][6:-1].split()[1]) for column in buildings_table] # współrzędna w górę
df = pd.DataFrame(
list(zip(indeksy_budynki, wspolrzedna_budynki_X, wspolrzedna_budynki_Y)),
columns=['indeks', 'X', 'Y']
) # df do wygodnego przetwarzania danych
print('\n\ndf z danymi początkowymi: \n', df.to_string())
# Snapowanie do węzłów siatki graf wszystkich punktow
dane_snapowane = []
# petla do znajdowania najbliższych punktów
for index, coordX, coordY in zip(df.indeks, df.X, df.Y):
qstring = f'SELECT id, source, ST_AsText(geom), target FROM public."500m_g" ORDER BY geom <-> ST_SetSRID(ST_MakePoint({coordX},{coordY}), 4326) LIMIT 1'
# print(qstring)
cursor.execute(qstring)
dane_snapowane.append(cursor.fetchall()[0])
# print(dane_snapowane)
############################ ROZPAKOWUJE DANE po snapie ############################
indeksy_punktow_siatki = [column[0] for column in dane_snapowane]
print('\n\nindeksy po snapie: \n', indeksy_punktow_siatki)
source_linii = [column[1] for column in dane_snapowane]
wspolrzedna_lini_siatki = [column[2] for column in dane_snapowane]
target_linii = [column[3] for column in dane_snapowane]
A_to_sa_wspolrzedne_source_X = [float(wspolrzedna[11:].split(',')[0].split()[0]) for wspolrzedna in
wspolrzedna_lini_siatki]
A_to_sa_wspolrzedne_source_Y = [float(wspolrzedna[11:].split(',')[0].split()[1]) for wspolrzedna in
wspolrzedna_lini_siatki]
df_punktow_siatki = pd.DataFrame(
list(zip(indeksy_punktow_siatki, source_linii, target_linii, wspolrzedna_lini_siatki)),
columns=['indeksy_punktow_siatki', 'source_linii', 'target_linii', 'wspolrzedna_lini_siatki']
)
print('\n\n df z punktami siatki: \n', df_punktow_siatki.to_string())
# ############################# grupowanie ############################
k = 10
indeksy_punktow_centralnych_w_grupie = np.random.choice(len(indeksy_punktow_siatki), k,
replace=False)
#############TRZEBA wybrać source centroidów i target punktów i między nimi liczyć odległość!!!!!!!!!!!!!!!!!!!!!!
# wybieram losowo indeksy centroidów
centroidsID = [[indeksy_punktow_siatki[item], source_linii[item]] for item in
indeksy_punktow_centralnych_w_grupie] # id i source obiektów wybranych do pierwszej iteracji
print('\n Source punktow z najbliższych węzłów grafu: \n', centroidsID)
centroidsAA = [indeksy_punktow_siatki[item] for item in indeksy_punktow_centralnych_w_grupie]
print(centroidsAA) # <<<< indeksy centroidów do grupowania w pierwszej iteracji
df_filtered = df_punktow_siatki.query(f'indeksy_punktow_siatki in ({centroidsAA})')
print('\n\n\n\nfiltered ', f'indeksy_punktow_siatki in ({centroidsAA})', '\n\n', df_filtered.to_string())
def my_func(source, target):
postgreSQL_select_Query = \
f'SELECT * FROM pgr_astar(\'SELECT id, source, target, cost, reverse_co, x1, y1, x2, y2 FROM public."500m_g"\', {source}, {target}, directed := false, heuristic := 5)'
print(postgreSQL_select_Query)
cursor.execute(postgreSQL_select_Query)
response = cursor.fetchall()
response = response[-1][5]
return response
distances = [[[my_func(source, target)] for target in df_filtered.target_linii] for source in
df_punktow_siatki.source_linii]
klasyfikacja = np.array(
[np.argmin(i) for i in distances]) # wybieram do którego centroidu jest najblizej do punktu
df_klasyfikacja = pd.DataFrame(klasyfikacja)
print('\nWynik klasyfikacji: \n', klasyfikacja)
print('\n\n\n sumaryczny ', df_klasyfikacja, )
df_sumaryczny = pd.DataFrame(list(
zip(indeksy_punktow_siatki, distances, klasyfikacja, A_to_sa_wspolrzedne_source_X,
A_to_sa_wspolrzedne_source_Y)),
columns=['indeksy_punktow_siatki', 'distances', 'klasyfikacja', 'A_to_sa_wspolrzedne_source_X',
'A_to_sa_wspolrzedne_source_Y'
])
print(df_sumaryczny.to_string())
# obliczam współrzędne środka geometrycznego linii dla nowych środków
df1 = pd.DataFrame([df_sumaryczny.groupby('klasyfikacja')['A_to_sa_wspolrzedne_source_X'].mean(),
df_sumaryczny.groupby('klasyfikacja')['A_to_sa_wspolrzedne_source_Y'].mean()]).T
print('\n średnie współrzędne nowych centroidów \n\n', df1.to_string())
plt.plot(figsize=(10, 10))
no_of_iterations = 4
list_of_dfs = [df_sumaryczny, ]
for _ in range(no_of_iterations):
centroidsID = []
data_centroid_lines = []
# SNAPOWANIE DO Węzła nowych środków geometrycznych
for index, coordX, coordY in zip(df1.index, df1.A_to_sa_wspolrzedne_source_X, df1.A_to_sa_wspolrzedne_source_Y):
# print (index, coordX, coordY)
qstring = f'SELECT id, source, ST_AsText(geom), target FROM public."500m_g" ORDER BY geom <-> ST_SetSRID(ST_MakePoint({coordX},{coordY}), 4326) LIMIT 1'
print(qstring)
cursor.execute(qstring)
nearese = cursor.fetchall()
data_centroid_lines.append(nearese)
centroidsID.append(nearese[0][0])
# rozpakowuje po snapie
print('surowe dane po snapie loop: <<<<<<<<<<<<<<<<<<\n', data_centroid_lines)
indeksy_punktow_siatki_loop = [column[0][0] for column in data_centroid_lines]
print('\n\nindeksy po snapie loop: \n', indeksy_punktow_siatki_loop)
source_linii_loop = [column[0][1] for column in data_centroid_lines]
print('\n\nsource po snapie loop: \n', source_linii_loop)
wspolrzedna_lini_siatki_loop = [column[0][2] for column in data_centroid_lines]
print('\n\nwspolrzedna_lini_siatki_loop po snapie: \n', wspolrzedna_lini_siatki_loop)
target_linii_loop = [column[0][3] for column in data_centroid_lines]
print('\n\ntarget_linii_loop po snapie: \n', target_linii_loop)
A_to_sa_wspolrzedne_source_X_loop = [float(wspolrzedna[11:].split(',')[0].split()[0]) for wspolrzedna in
wspolrzedna_lini_siatki_loop]
A_to_sa_wspolrzedne_source_Y_loop = [float(wspolrzedna[11:].split(',')[0].split()[1]) for wspolrzedna in
wspolrzedna_lini_siatki_loop]
df_punktow_siatki_loop = pd.DataFrame(
list(zip(indeksy_punktow_siatki_loop, source_linii_loop, target_linii_loop, wspolrzedna_lini_siatki_loop)),
columns=['indeksy_punktow_siatki_loop', 'source_linii_loop', 'target_linii_loop',
'wspolrzedna_lini_siatki_loop']
)
print('\n\n df z punktami siatki: \n', df_punktow_siatki_loop.to_string())
print('\n Indeksy obiektów wybranych jako centroidy w iteracji \n ',
centroidsID)
print(f'indeksy_punktow_siatki_loop in ({centroidsID})')
df_filtered_loop = df_punktow_siatki_loop.query(f'indeksy_punktow_siatki_loop in ({centroidsID})')
print('\n\n\n\nfiltered indeksy_punktow_siatki_loop \n>>>>>>>>>>>>>>>>>>>>>>>\n', df_filtered_loop.to_string())
distances_loop = [[[my_func(source, target)] for source in df_filtered_loop.target_linii_loop] for target in
df_punktow_siatki.source_linii] # tutaj troche nazwy pomieszały sie w tym df
print('\n\n\n\ndistances_loop \n>>>>>>>>>>>>>>>>>>>>>>>\n', distances_loop)
klasyfikacja_loop = np.array([np.argmin(i) for i in distances_loop])
print('\nWynik klasyfikacji: \n', klasyfikacja_loop)
df_sumaryczny_loop = pd.DataFrame(list(
zip(indeksy_punktow_siatki, distances_loop, klasyfikacja_loop, A_to_sa_wspolrzedne_source_X,
A_to_sa_wspolrzedne_source_Y)),
columns=['indeksy_punktow_siatki', 'distances', 'klasyfikacja', 'A_to_sa_wspolrzedne_source_X',
'A_to_sa_wspolrzedne_source_Y'
])
print(df_sumaryczny_loop.to_string())
# obliczam współrzędne środka geometrycznego linii dla nowych środków
df1 = pd.DataFrame([df_sumaryczny_loop.groupby('klasyfikacja')['A_to_sa_wspolrzedne_source_X'].mean(),
df_sumaryczny_loop.groupby('klasyfikacja')['A_to_sa_wspolrzedne_source_Y'].mean()]).T
print('\n średnie współrzędne nowych centroidów \n\n', df1.to_string())
list_of_dfs.append(df_sumaryczny_loop)
for one_df in list_of_dfs:
print('\n df wynikowy \n', one_df.to_string())
plt.plot(figsize=(10, 10))
plt.subplots(3, 2, figsize=(10, 14))
for numer in range(len(list_of_dfs)):
print('!!!!!!!!!!!!!!!!!!!', numer + 1)
df_tmp = list_of_dfs[numer]
groups = df_tmp.groupby('klasyfikacja')
plt.subplot(3, 2, numer + 1)
for name, group in groups:
plt.plot(group.A_to_sa_wspolrzedne_source_Y, group.A_to_sa_wspolrzedne_source_X, marker='o', linestyle='',
markersize=3, label=name)
# do zrobienia centroidy w każdej iteracji
plt.title(numer)
# plt.legend()
plt.grid()
plt.show()
# do zrobienie -> wyniki na tle mapy
except(Exception, psycopg2.Error) as error:
print("Próba połączenia zakończona niepowodzeniem", error)
finally:
# zamkniecie nawiazanego połączenia.
if (connection):
cursor.close()
connection.close()
print("Zakończono połączenie")
| [
"pandas.DataFrame",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"psycopg2.connect",
"numpy.argmin",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.grid"
] | [((285, 377), 'psycopg2.connect', 'psycopg2.connect', ([], {'user': 'user', 'password': 'password', 'host': 'host', 'port': 'port', 'database': 'database3'}), '(user=user, password=password, host=host, port=port,\n database=database3)\n', (301, 377), False, 'import psycopg2\n'), ((4900, 4926), 'pandas.DataFrame', 'pd.DataFrame', (['klasyfikacja'], {}), '(klasyfikacja)\n', (4912, 4926), True, 'import pandas as pd\n'), ((5779, 5805), 'matplotlib.pyplot.plot', 'plt.plot', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (5787, 5805), True, 'import matplotlib.pyplot as plt\n'), ((9898, 9924), 'matplotlib.pyplot.plot', 'plt.plot', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (9906, 9924), True, 'import matplotlib.pyplot as plt\n'), ((9929, 9965), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2)'], {'figsize': '(10, 14)'}), '(3, 2, figsize=(10, 14))\n', (9941, 9965), True, 'import matplotlib.pyplot as plt\n'), ((10481, 10491), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (10489, 10491), True, 'import matplotlib.pyplot as plt\n'), ((10496, 10506), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10504, 10506), True, 'import matplotlib.pyplot as plt\n'), ((10148, 10176), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(numer + 1)'], {}), '(3, 2, numer + 1)\n', (10159, 10176), True, 'import matplotlib.pyplot as plt\n'), ((10441, 10457), 'matplotlib.pyplot.title', 'plt.title', (['numer'], {}), '(numer)\n', (10450, 10457), True, 'import matplotlib.pyplot as plt\n'), ((4785, 4797), 'numpy.argmin', 'np.argmin', (['i'], {}), '(i)\n', (4794, 4797), True, 'import numpy as np\n'), ((10224, 10365), 'matplotlib.pyplot.plot', 'plt.plot', (['group.A_to_sa_wspolrzedne_source_Y', 'group.A_to_sa_wspolrzedne_source_X'], {'marker': '"""o"""', 'linestyle': '""""""', 'markersize': '(3)', 'label': 'name'}), "(group.A_to_sa_wspolrzedne_source_Y, group.\n A_to_sa_wspolrzedne_source_X, marker='o', linestyle='', markersize=3,\n label=name)\n", (10232, 10365), True, 'import matplotlib.pyplot as plt\n'), ((8844, 8856), 'numpy.argmin', 'np.argmin', (['i'], {}), '(i)\n', (8853, 8856), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import sys
import scipy.ndimage
# import matplotlib.pyplot as plt
def dodgeV2(image, mask):
return cv2.divide(image, 255-mask, scale=256)
def burnV2(image, mask):
tmp = np.subtract(255, cv2.divide(255-image, 255-mask, scale=256))
return tmp
def dodge(front,back):
result=front*255/(255-back + 1)
result[np.logical_or(result > 255, back ==255)] =255
return result.astype('uint8')
# takes in np array
def killWaterMarks(image):
avg = np.sum(image, axis=2)/3
r = image[:,:, 0]
b = image[:,:, 1]
g = image[:,:, 2]
colorfulness = np.power((np.power(r-avg, 2) + np.power(g-avg, 2) + np.power(b - avg, 2)), .5)
if np.max(colorfulness) > 10: # image is not too gray. Proceed with kill
image[np.logical_and(colorfulness < 5, avg > 150)] = 255
return image
# Takes in an already resized image. DO NOT RESIZE AFTERWARD. You've been warned.
def sketchify(resized):
# resized = cv2.resize(rawImage, (300, 300))
width, height = resized.shape[:2]
alpha = resized[:,:,3]
marked_img = resized[:,:, :3]
cleaned_img = killWaterMarks(marked_img)
# cv2.imshow("blend", cleaned_img)
# cv2.waitKey(0)
gray_img = np.dot(marked_img[...,:3], [0.299, 0.587, 0.114])
gray_inv_img = 255-gray_img
blur_img = scipy.ndimage.filters.gaussian_filter(gray_inv_img,sigma=11)
blend = dodge(blur_img,gray_img)
sketch = cv2.normalize(blend, None, 0, 255, cv2.NORM_MINMAX)
# sketch = np.power(sketch, 2)/ 255
normalized = np.copy(sketch)
normalized[normalized < 240] = 0 # Dump all non dark parts.
# Now that we have the "core" darkness, we smear it out a bit.
# elements in the interior of a dark polygon get darker than those outside it.
reblur = scipy.ndimage.filters.gaussian_filter(normalized,sigma=7)
# mask = np.logical_and(alpha == 0, reblur > 245)
alpha[alpha > 0] = 0
mask = reblur < 250
alpha[mask] = 255
# alphaMap = cv2.cvtColor(alpha, cv2.COLOR_GRAY2RGB)
# cv2.imshow("pencil sketch", alphaMap)
# cv2.waitKey(0)
rgb = cv2.cvtColor(normalized, cv2.COLOR_GRAY2RGB)
return np.stack((rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2], alpha), axis=2)
def sketchifyCompare(resized):
# resized = cv2.resize(rawImage, (300, 300))
width, height = resized.shape[:2]
alpha = resized[:,:,3]
marked_img = resized[:,:, :3]
cleaned_img = killWaterMarks(marked_img)
# cv2.imshow("blend", cleaned_img)
# cv2.waitKey(0)
gray_img = np.dot(marked_img[...,:3], [0.299, 0.587, 0.114])
gray_inv_img = 255-gray_img
blur_img = scipy.ndimage.filters.gaussian_filter(gray_inv_img,sigma=11)
blend = dodge(blur_img,gray_img)
sketch = cv2.normalize(blend, None, 0, 255, cv2.NORM_MINMAX)
# sketch = np.power(sketch, 2)/ 255
normalized = np.copy(sketch)
normalized[normalized < 240] = 0 # Dump all non dark parts.
# Now that we have the "core" darkness, we smear it out a bit.
# elements in the interior of a dark polygon get darker than those outside it.
reblur = scipy.ndimage.filters.gaussian_filter(normalized,sigma=7)
# mask = np.logical_and(alpha == 0, reblur > 245)
alpha[alpha > 0] = 0
mask = reblur < 250
alpha[mask] = 255
fig = plt.figure()
fig.add_subplot(2, 2, 1)
plt.imshow(cv2.cvtColor(resized, cv2.COLOR_RGB2BGR))
fig.add_subplot(2, 2, 2)
plt.imshow(cv2.cvtColor(sketch, cv2.COLOR_GRAY2BGR))
fig.add_subplot(2, 2, 3)
plt.imshow(cv2.cvtColor(normalized, cv2.COLOR_GRAY2BGR))
fig.add_subplot(2, 2, 4)
plt.imshow(cv2.cvtColor(alpha, cv2.COLOR_GRAY2BGR))
plt.show()
if __name__ == "__main__":
if len(sys.argv) > 1:
name = sys.argv[1]
else:
name = "images/dog1.png"
print(name)
# name = "images/bicycle.png"
pencilized = sketchify(cv2.imread(name, cv2.IMREAD_UNCHANGED))
cv2.imshow("pencil sketch", pencilized[:, :, :3])
cv2.waitKey(0)
#sketchifyCompare(cv2.imread("images/cat0.png", cv2.IMREAD_UNCHANGED))
| [
"numpy.stack",
"numpy.sum",
"numpy.copy",
"numpy.logical_and",
"cv2.cvtColor",
"cv2.waitKey",
"numpy.power",
"cv2.imread",
"numpy.max",
"cv2.divide",
"numpy.logical_or",
"cv2.normalize",
"numpy.dot",
"cv2.imshow"
] | [((134, 174), 'cv2.divide', 'cv2.divide', (['image', '(255 - mask)'], {'scale': '(256)'}), '(image, 255 - mask, scale=256)\n', (144, 174), False, 'import cv2\n'), ((1223, 1273), 'numpy.dot', 'np.dot', (['marked_img[..., :3]', '[0.299, 0.587, 0.114]'], {}), '(marked_img[..., :3], [0.299, 0.587, 0.114])\n', (1229, 1273), True, 'import numpy as np\n'), ((1431, 1482), 'cv2.normalize', 'cv2.normalize', (['blend', 'None', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(blend, None, 0, 255, cv2.NORM_MINMAX)\n', (1444, 1482), False, 'import cv2\n'), ((1541, 1556), 'numpy.copy', 'np.copy', (['sketch'], {}), '(sketch)\n', (1548, 1556), True, 'import numpy as np\n'), ((2107, 2151), 'cv2.cvtColor', 'cv2.cvtColor', (['normalized', 'cv2.COLOR_GRAY2RGB'], {}), '(normalized, cv2.COLOR_GRAY2RGB)\n', (2119, 2151), False, 'import cv2\n'), ((2163, 2230), 'numpy.stack', 'np.stack', (['(rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2], alpha)'], {'axis': '(2)'}), '((rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2], alpha), axis=2)\n', (2171, 2230), True, 'import numpy as np\n'), ((2538, 2588), 'numpy.dot', 'np.dot', (['marked_img[..., :3]', '[0.299, 0.587, 0.114]'], {}), '(marked_img[..., :3], [0.299, 0.587, 0.114])\n', (2544, 2588), True, 'import numpy as np\n'), ((2746, 2797), 'cv2.normalize', 'cv2.normalize', (['blend', 'None', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(blend, None, 0, 255, cv2.NORM_MINMAX)\n', (2759, 2797), False, 'import cv2\n'), ((2856, 2871), 'numpy.copy', 'np.copy', (['sketch'], {}), '(sketch)\n', (2863, 2871), True, 'import numpy as np\n'), ((3919, 3968), 'cv2.imshow', 'cv2.imshow', (['"""pencil sketch"""', 'pencilized[:, :, :3]'], {}), "('pencil sketch', pencilized[:, :, :3])\n", (3929, 3968), False, 'import cv2\n'), ((3973, 3987), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3984, 3987), False, 'import cv2\n'), ((226, 272), 'cv2.divide', 'cv2.divide', (['(255 - image)', '(255 - mask)'], {'scale': '(256)'}), '(255 - image, 255 - mask, scale=256)\n', (236, 272), False, 'import cv2\n'), ((356, 396), 'numpy.logical_or', 'np.logical_or', (['(result > 255)', '(back == 255)'], {}), '(result > 255, back == 255)\n', (369, 396), True, 'import numpy as np\n'), ((494, 515), 'numpy.sum', 'np.sum', (['image'], {'axis': '(2)'}), '(image, axis=2)\n', (500, 515), True, 'import numpy as np\n'), ((689, 709), 'numpy.max', 'np.max', (['colorfulness'], {}), '(colorfulness)\n', (695, 709), True, 'import numpy as np\n'), ((3356, 3396), 'cv2.cvtColor', 'cv2.cvtColor', (['resized', 'cv2.COLOR_RGB2BGR'], {}), '(resized, cv2.COLOR_RGB2BGR)\n', (3368, 3396), False, 'import cv2\n'), ((3442, 3482), 'cv2.cvtColor', 'cv2.cvtColor', (['sketch', 'cv2.COLOR_GRAY2BGR'], {}), '(sketch, cv2.COLOR_GRAY2BGR)\n', (3454, 3482), False, 'import cv2\n'), ((3528, 3572), 'cv2.cvtColor', 'cv2.cvtColor', (['normalized', 'cv2.COLOR_GRAY2BGR'], {}), '(normalized, cv2.COLOR_GRAY2BGR)\n', (3540, 3572), False, 'import cv2\n'), ((3618, 3657), 'cv2.cvtColor', 'cv2.cvtColor', (['alpha', 'cv2.COLOR_GRAY2BGR'], {}), '(alpha, cv2.COLOR_GRAY2BGR)\n', (3630, 3657), False, 'import cv2\n'), ((3875, 3913), 'cv2.imread', 'cv2.imread', (['name', 'cv2.IMREAD_UNCHANGED'], {}), '(name, cv2.IMREAD_UNCHANGED)\n', (3885, 3913), False, 'import cv2\n'), ((655, 675), 'numpy.power', 'np.power', (['(b - avg)', '(2)'], {}), '(b - avg, 2)\n', (663, 675), True, 'import numpy as np\n'), ((773, 816), 'numpy.logical_and', 'np.logical_and', (['(colorfulness < 5)', '(avg > 150)'], {}), '(colorfulness < 5, avg > 150)\n', (787, 816), True, 'import numpy as np\n'), ((613, 633), 'numpy.power', 'np.power', (['(r - avg)', '(2)'], {}), '(r - avg, 2)\n', (621, 633), True, 'import numpy as np\n'), ((634, 654), 'numpy.power', 'np.power', (['(g - avg)', '(2)'], {}), '(g - avg, 2)\n', (642, 654), True, 'import numpy as np\n')] |
""" Utils functions for main.py """
from datetime import timedelta
import numpy as np
def floor_30_minutes_dt(dt):
"""
Floor a datetime by 30 mins.
For example:
2021-01-01 17:01:01 --> 2021-01-01 17:00:00
2021-01-01 17:35:01 --> 2021-01-01 17:30:00
:param dt:
:return:
"""
approx = np.floor(dt.minute / 30.0) * 30
dt = dt.replace(minute=0)
dt = dt.replace(second=0)
dt = dt.replace(microsecond=0)
dt += timedelta(minutes=approx)
return dt
| [
"numpy.floor",
"datetime.timedelta"
] | [((460, 485), 'datetime.timedelta', 'timedelta', ([], {'minutes': 'approx'}), '(minutes=approx)\n', (469, 485), False, 'from datetime import timedelta\n'), ((323, 349), 'numpy.floor', 'np.floor', (['(dt.minute / 30.0)'], {}), '(dt.minute / 30.0)\n', (331, 349), True, 'import numpy as np\n')] |
import numpy as np
import reaclib
ip = 0
ihe4 = 1
ic12 = 2
ic13 = 3
in13 = 4
in14 = 5
in15 = 6
io14 = 7
io15 = 8
nnuc = 9
A = np.zeros((nnuc), dtype=np.int32)
A[ip] = 1
A[ihe4] = 4
A[ic12] = 12
A[ic13] = 13
A[in13] = 13
A[in14] = 14
A[in15] = 15
A[io14] = 14
A[io15] = 15
def c12_pg_n13(tf):
# p + c12 --> n13
rate = 0.0
# ls09n
rate += np.exp( 17.1482 + -13.692*tf.T913i + -0.230881*tf.T913
+ 4.44362*tf.T9 + -3.15898*tf.T953 + -0.666667*tf.lnT9)
# ls09r
rate += np.exp( 17.5428 + -3.77849*tf.T9i + -5.10735*tf.T913i + -2.24111*tf.T913
+ 0.148883*tf.T9 + -1.5*tf.lnT9)
return rate
def c13_pg_n14(tf):
# p + c13 --> n14
rate = 0.0
# nacrn
rate += np.exp( 18.5155 + -13.72*tf.T913i + -0.450018*tf.T913
+ 3.70823*tf.T9 + -1.70545*tf.T953 + -0.666667*tf.lnT9)
# nacrr
rate += np.exp( 13.9637 + -5.78147*tf.T9i + -0.196703*tf.T913
+ 0.142126*tf.T9 + -0.0238912*tf.T953 + -1.5*tf.lnT9)
# nacrr
rate += np.exp( 15.1825 + -13.5543*tf.T9i
+ -1.5*tf.lnT9)
return rate
def n13_c13(tf):
# n13 --> c13
rate = 0.0
# wc12w
rate += np.exp( -6.7601)
return rate
def n13_pg_o14(tf):
# p + n13 --> o14
rate = 0.0
# lg06n
rate += np.exp( 18.1356 + -15.1676*tf.T913i + 0.0955166*tf.T913
+ 3.0659*tf.T9 + -0.507339*tf.T953 + -0.666667*tf.lnT9)
# lg06r
rate += np.exp( 10.9971 + -6.12602*tf.T9i + 1.57122*tf.T913i
+ -1.5*tf.lnT9)
return rate
def n14_pg_o15(tf):
# p + n14 --> o15
rate = 0.0
# im05n
rate += np.exp( 17.01 + -15.193*tf.T913i + -0.161954*tf.T913
+ -7.52123*tf.T9 + -0.987565*tf.T953 + -0.666667*tf.lnT9)
# im05r
rate += np.exp( 6.73578 + -4.891*tf.T9i
+ 0.0682*tf.lnT9)
# im05r
rate += np.exp( 7.65444 + -2.998*tf.T9i
+ -1.5*tf.lnT9)
# im05n
rate += np.exp( 20.1169 + -15.193*tf.T913i + -4.63975*tf.T913
+ 9.73458*tf.T9 + -9.55051*tf.T953 + 0.333333*tf.lnT9)
return rate
def n15_pa_c12(tf):
# p + n15 --> he4 + c12
rate = 0.0
# nacrn
rate += np.exp( 27.4764 + -15.253*tf.T913i + 1.59318*tf.T913
+ 2.4479*tf.T9 + -2.19708*tf.T953 + -0.666667*tf.lnT9)
# nacrr
rate += np.exp( -6.57522 + -1.1638*tf.T9i + 22.7105*tf.T913
+ -2.90707*tf.T9 + 0.205754*tf.T953 + -1.5*tf.lnT9)
# nacrr
rate += np.exp( 20.8972 + -7.406*tf.T9i
+ -1.5*tf.lnT9)
# nacrr
rate += np.exp( -4.87347 + -2.02117*tf.T9i + 30.8497*tf.T913
+ -8.50433*tf.T9 + -1.54426*tf.T953 + -1.5*tf.lnT9)
return rate
def o14_n14(tf):
# o14 --> n14
rate = 0.0
# wc12w
rate += np.exp( -4.62354)
return rate
def o15_n15(tf):
# o15 --> n15
rate = 0.0
# wc12w
rate += np.exp( -5.17053)
return rate
def rhs(t, Y, rho, T):
tf = reaclib.Tfactors(T)
lambda_c12_pg_n13 = c12_pg_n13(tf)
lambda_c13_pg_n14 = c13_pg_n14(tf)
lambda_n13_c13 = n13_c13(tf)
lambda_n13_pg_o14 = n13_pg_o14(tf)
lambda_n14_pg_o15 = n14_pg_o15(tf)
lambda_n15_pa_c12 = n15_pa_c12(tf)
lambda_o14_n14 = o14_n14(tf)
lambda_o15_n15 = o15_n15(tf)
dYdt = np.zeros((nnuc), dtype=np.float64)
dYdt[ip] = (
-rho*Y[ip]*Y[ic12]*lambda_c12_pg_n13
-rho*Y[ip]*Y[ic13]*lambda_c13_pg_n14
-rho*Y[ip]*Y[in13]*lambda_n13_pg_o14
-rho*Y[ip]*Y[in14]*lambda_n14_pg_o15
-rho*Y[ip]*Y[in15]*lambda_n15_pa_c12
)
dYdt[ihe4] = (
+rho*Y[ip]*Y[in15]*lambda_n15_pa_c12
)
dYdt[ic12] = (
-rho*Y[ip]*Y[ic12]*lambda_c12_pg_n13
+rho*Y[ip]*Y[in15]*lambda_n15_pa_c12
)
dYdt[ic13] = (
-rho*Y[ip]*Y[ic13]*lambda_c13_pg_n14
+Y[in13]*lambda_n13_c13
)
dYdt[in13] = (
-Y[in13]*lambda_n13_c13
-rho*Y[ip]*Y[in13]*lambda_n13_pg_o14
+rho*Y[ip]*Y[ic12]*lambda_c12_pg_n13
)
dYdt[in14] = (
-rho*Y[ip]*Y[in14]*lambda_n14_pg_o15
+rho*Y[ip]*Y[ic13]*lambda_c13_pg_n14
+Y[io14]*lambda_o14_n14
)
dYdt[in15] = (
-rho*Y[ip]*Y[in15]*lambda_n15_pa_c12
+Y[io15]*lambda_o15_n15
)
dYdt[io14] = (
-Y[io14]*lambda_o14_n14
+rho*Y[ip]*Y[in13]*lambda_n13_pg_o14
)
dYdt[io15] = (
-Y[io15]*lambda_o15_n15
+rho*Y[ip]*Y[in14]*lambda_n14_pg_o15
)
return dYdt
| [
"numpy.zeros",
"numpy.exp",
"reaclib.Tfactors"
] | [((128, 158), 'numpy.zeros', 'np.zeros', (['nnuc'], {'dtype': 'np.int32'}), '(nnuc, dtype=np.int32)\n', (136, 158), True, 'import numpy as np\n'), ((362, 485), 'numpy.exp', 'np.exp', (['(17.1482 + -13.692 * tf.T913i + -0.230881 * tf.T913 + 4.44362 * tf.T9 + -\n 3.15898 * tf.T953 + -0.666667 * tf.lnT9)'], {}), '(17.1482 + -13.692 * tf.T913i + -0.230881 * tf.T913 + 4.44362 * tf.T9 +\n -3.15898 * tf.T953 + -0.666667 * tf.lnT9)\n', (368, 485), True, 'import numpy as np\n'), ((516, 635), 'numpy.exp', 'np.exp', (['(17.5428 + -3.77849 * tf.T9i + -5.10735 * tf.T913i + -2.24111 * tf.T913 + \n 0.148883 * tf.T9 + -1.5 * tf.lnT9)'], {}), '(17.5428 + -3.77849 * tf.T9i + -5.10735 * tf.T913i + -2.24111 * tf.\n T913 + 0.148883 * tf.T9 + -1.5 * tf.lnT9)\n', (522, 635), True, 'import numpy as np\n'), ((749, 871), 'numpy.exp', 'np.exp', (['(18.5155 + -13.72 * tf.T913i + -0.450018 * tf.T913 + 3.70823 * tf.T9 + -\n 1.70545 * tf.T953 + -0.666667 * tf.lnT9)'], {}), '(18.5155 + -13.72 * tf.T913i + -0.450018 * tf.T913 + 3.70823 * tf.T9 +\n -1.70545 * tf.T953 + -0.666667 * tf.lnT9)\n', (755, 871), True, 'import numpy as np\n'), ((902, 1022), 'numpy.exp', 'np.exp', (['(13.9637 + -5.78147 * tf.T9i + -0.196703 * tf.T913 + 0.142126 * tf.T9 + -\n 0.0238912 * tf.T953 + -1.5 * tf.lnT9)'], {}), '(13.9637 + -5.78147 * tf.T9i + -0.196703 * tf.T913 + 0.142126 * tf.T9 +\n -0.0238912 * tf.T953 + -1.5 * tf.lnT9)\n', (908, 1022), True, 'import numpy as np\n'), ((1053, 1105), 'numpy.exp', 'np.exp', (['(15.1825 + -13.5543 * tf.T9i + -1.5 * tf.lnT9)'], {}), '(15.1825 + -13.5543 * tf.T9i + -1.5 * tf.lnT9)\n', (1059, 1105), True, 'import numpy as np\n'), ((1223, 1238), 'numpy.exp', 'np.exp', (['(-6.7601)'], {}), '(-6.7601)\n', (1229, 1238), True, 'import numpy as np\n'), ((1349, 1473), 'numpy.exp', 'np.exp', (['(18.1356 + -15.1676 * tf.T913i + 0.0955166 * tf.T913 + 3.0659 * tf.T9 + -\n 0.507339 * tf.T953 + -0.666667 * tf.lnT9)'], {}), '(18.1356 + -15.1676 * tf.T913i + 0.0955166 * tf.T913 + 3.0659 * tf.T9 +\n -0.507339 * tf.T953 + -0.666667 * tf.lnT9)\n', (1355, 1473), True, 'import numpy as np\n'), ((1504, 1577), 'numpy.exp', 'np.exp', (['(10.9971 + -6.12602 * tf.T9i + 1.57122 * tf.T913i + -1.5 * tf.lnT9)'], {}), '(10.9971 + -6.12602 * tf.T9i + 1.57122 * tf.T913i + -1.5 * tf.lnT9)\n', (1510, 1577), True, 'import numpy as np\n'), ((1700, 1823), 'numpy.exp', 'np.exp', (['(17.01 + -15.193 * tf.T913i + -0.161954 * tf.T913 + -7.52123 * tf.T9 + -\n 0.987565 * tf.T953 + -0.666667 * tf.lnT9)'], {}), '(17.01 + -15.193 * tf.T913i + -0.161954 * tf.T913 + -7.52123 * tf.T9 +\n -0.987565 * tf.T953 + -0.666667 * tf.lnT9)\n', (1706, 1823), True, 'import numpy as np\n'), ((1854, 1906), 'numpy.exp', 'np.exp', (['(6.73578 + -4.891 * tf.T9i + 0.0682 * tf.lnT9)'], {}), '(6.73578 + -4.891 * tf.T9i + 0.0682 * tf.lnT9)\n', (1860, 1906), True, 'import numpy as np\n'), ((1947, 1997), 'numpy.exp', 'np.exp', (['(7.65444 + -2.998 * tf.T9i + -1.5 * tf.lnT9)'], {}), '(7.65444 + -2.998 * tf.T9i + -1.5 * tf.lnT9)\n', (1953, 1997), True, 'import numpy as np\n'), ((2038, 2159), 'numpy.exp', 'np.exp', (['(20.1169 + -15.193 * tf.T913i + -4.63975 * tf.T913 + 9.73458 * tf.T9 + -\n 9.55051 * tf.T953 + 0.333333 * tf.lnT9)'], {}), '(20.1169 + -15.193 * tf.T913i + -4.63975 * tf.T913 + 9.73458 * tf.T9 +\n -9.55051 * tf.T953 + 0.333333 * tf.lnT9)\n', (2044, 2159), True, 'import numpy as np\n'), ((2280, 2401), 'numpy.exp', 'np.exp', (['(27.4764 + -15.253 * tf.T913i + 1.59318 * tf.T913 + 2.4479 * tf.T9 + -\n 2.19708 * tf.T953 + -0.666667 * tf.lnT9)'], {}), '(27.4764 + -15.253 * tf.T913i + 1.59318 * tf.T913 + 2.4479 * tf.T9 + \n -2.19708 * tf.T953 + -0.666667 * tf.lnT9)\n', (2286, 2401), True, 'import numpy as np\n'), ((2431, 2547), 'numpy.exp', 'np.exp', (['(-6.57522 + -1.1638 * tf.T9i + 22.7105 * tf.T913 + -2.90707 * tf.T9 + \n 0.205754 * tf.T953 + -1.5 * tf.lnT9)'], {}), '(-6.57522 + -1.1638 * tf.T9i + 22.7105 * tf.T913 + -2.90707 * tf.T9 +\n 0.205754 * tf.T953 + -1.5 * tf.lnT9)\n', (2437, 2547), True, 'import numpy as np\n'), ((2578, 2628), 'numpy.exp', 'np.exp', (['(20.8972 + -7.406 * tf.T9i + -1.5 * tf.lnT9)'], {}), '(20.8972 + -7.406 * tf.T9i + -1.5 * tf.lnT9)\n', (2584, 2628), True, 'import numpy as np\n'), ((2669, 2786), 'numpy.exp', 'np.exp', (['(-4.87347 + -2.02117 * tf.T9i + 30.8497 * tf.T913 + -8.50433 * tf.T9 + -\n 1.54426 * tf.T953 + -1.5 * tf.lnT9)'], {}), '(-4.87347 + -2.02117 * tf.T9i + 30.8497 * tf.T913 + -8.50433 * tf.T9 +\n -1.54426 * tf.T953 + -1.5 * tf.lnT9)\n', (2675, 2786), True, 'import numpy as np\n'), ((2894, 2910), 'numpy.exp', 'np.exp', (['(-4.62354)'], {}), '(-4.62354)\n', (2900, 2910), True, 'import numpy as np\n'), ((3014, 3030), 'numpy.exp', 'np.exp', (['(-5.17053)'], {}), '(-5.17053)\n', (3020, 3030), True, 'import numpy as np\n'), ((3088, 3107), 'reaclib.Tfactors', 'reaclib.Tfactors', (['T'], {}), '(T)\n', (3104, 3107), False, 'import reaclib\n'), ((3415, 3447), 'numpy.zeros', 'np.zeros', (['nnuc'], {'dtype': 'np.float64'}), '(nnuc, dtype=np.float64)\n', (3423, 3447), True, 'import numpy as np\n')] |
# Utilities for loading data from Stanford Dogs Dataset.
#
# <NAME>, 08/03/2021
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
import matplotlib.pyplot as plt
import datetime
import os
from PIL import Image
def load_dataset():
"""
Load stanford_dogs tensorflow dataset.
:return:
ds_train (tf.data.DataSet) The requested training dataset.
ds_test (tf.data.DataSet) The requested test dataset.
ds_info (tfds.core.DatasetInfo) The requested dataset info.
"""
(ds_train, ds_test), ds_info = tfds.load('stanford_dogs',
split=['train', 'test'],
shuffle_files=True,
as_supervised=False,
with_info=True,
data_dir='data/tfds')
return ds_train, ds_test, ds_info
def preprocess(data, image_size, num_labels, cast=True, resize=True, normalize=True, one_hot=True):
"""
Process an image.
:param data: Tensorflow datset containing an image and label.
:param image_size: Size of the image. Images may be resized to this size. E.g. (224, 224)
:param num_labels: Number of labels for prediction.
:param cast: Flag for casting to float32. True or False.
:param resize: Flag for resizing the image. True or False.
:param normalize: Flag for normalizing the image pixel values from 0-1. True or False.
:param one_hot: Flag for one hot encoding the labels. True or False.
:return: Processed image and encoded label.
"""
# processed_image = tf.keras.applications.resnet.preprocess_input(data['image'])
processed_image = data['image']
label = data['label']
if cast:
processed_image = tf.cast(processed_image, tf.float32)
if resize:
processed_image = tf.image.resize(processed_image, image_size, method='nearest')
if normalize:
processed_image = processed_image / 255.
if one_hot:
label = tf.one_hot(label, num_labels)
return processed_image, label
def prepare(dataset, image_shape, num_classes, batch_size=None):
"""
Prepare an input pipeline for training a dataset.
:param dataset: The dataset containing training data.
:param image_shape: A common shape of the input image. Images with different sizes will be resized. E.g. (80, 80, 3)
:param num_classes: Number of prediction classes.
:param batch_size: Batch size for training.
:return: Prepared dataset.
"""
dataset = dataset.map(lambda x: preprocess(x, image_shape[0:-1], num_classes),
num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.cache()
if batch_size is not None:
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(buffer_size=tf.data.AUTOTUNE)
return dataset
def analyze():
ds_train, ds_test, ds_info = load_dataset()
img_width = []
img_height = []
for row in ds_train:
img_width.append(row['image'].shape[0])
img_height.append(row['image'].shape[1])
for row in ds_test:
img_width.append(row['image'].shape[0])
img_height.append(row['image'].shape[1])
plt.subplot(211)
plt.hist(x=img_width, bins=20, alpha=0.7, density=True)
plt.axvline(max(set(img_width), key=img_width.count), color='r')
plt.ylabel('Pixels')
plt.ylabel('Frequency')
plt.title('Image width distribution')
plt.grid(axis='y', alpha=0.75)
plt.subplot(212)
plt.hist(x=img_height, bins=20, alpha=0.7)
plt.axvline(max(set(img_height), key=img_height.count), color='r')
plt.xlabel('Pixels')
plt.ylabel('Frequency')
plt.title('Image height distribution')
plt.grid(axis='y', alpha=0.75)
plt.subplots_adjust(hspace=0.5)
plt.show()
def decode(label_index):
ds_train, _, _ = load_dataset()
for row in ds_train.take(5):
label = row['label']
# print(label)
print(label.decode_example())
def show_examples():
"""
Display a random set of examples in the stanford_dogs dataset
:return: None
"""
ds_train, _, ds_info = load_dataset()
ds_train = ds_train.map(lambda x: (preprocess(x, (80, 80), 120, cast=False, resize=True, one_hot=False)))
tfds.show_examples(ds_train, ds_info)
def tensor_to_image(tensor):
tensor = tensor * 255
tensor = np.array(tensor, dtype=np.uint8)
if np.ndim(tensor) > 3:
assert tensor.shape[0] == 1
tensor = tensor[0]
return Image.fromarray(tensor)
def load_image(url_list, output_shape=(224, 224, 3)):
cwd = os.path.dirname(os.path.abspath(__file__))
timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
img_list = []
file_list = []
for idx, url in enumerate(url_list):
file_name = f"{cwd}/data/downloads/image-{timestamp}_{idx+1}.jpg"
image_file = tf.keras.utils.get_file(file_name, url, extract=True)
img = tf.keras.preprocessing.image.load_img(image_file).resize(output_shape[:-1])
img_list.append(tf.keras.preprocessing.image.img_to_array(img) / 255.)
file_list.append(file_name)
return img_list, file_list
def load_labels():
breed_names = []
with open("data/breed_names.txt", 'r') as f:
for line in f:
line = line.rstrip()
line = line.split('-')[1:]
line = ''.join(line)
breed_names.append(line)
return breed_names
def load_labels_from_dataset():
ds_train, ds_test, ds_info = load_dataset()
unformatted_labels = ds_info.features['label'].names
breed_names = [''.join(item.split('-')[1:]) for item in unformatted_labels]
return breed_names
def main():
show_examples()
# analyze()
# print(load_labels_from_dataset())
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.title",
"tensorflow_datasets.load",
"matplotlib.pyplot.xlabel",
"os.path.abspath",
"tensorflow.one_hot",
"numpy.ndim",
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.cast",
"tensorflow.keras.utils.get_file",
"datetime.datetime.now",
"matplotlib.pyplot.show",
"t... | [((553, 687), 'tensorflow_datasets.load', 'tfds.load', (['"""stanford_dogs"""'], {'split': "['train', 'test']", 'shuffle_files': '(True)', 'as_supervised': '(False)', 'with_info': '(True)', 'data_dir': '"""data/tfds"""'}), "('stanford_dogs', split=['train', 'test'], shuffle_files=True,\n as_supervised=False, with_info=True, data_dir='data/tfds')\n", (562, 687), True, 'import tensorflow_datasets as tfds\n'), ((3261, 3277), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (3272, 3277), True, 'import matplotlib.pyplot as plt\n'), ((3282, 3337), 'matplotlib.pyplot.hist', 'plt.hist', ([], {'x': 'img_width', 'bins': '(20)', 'alpha': '(0.7)', 'density': '(True)'}), '(x=img_width, bins=20, alpha=0.7, density=True)\n', (3290, 3337), True, 'import matplotlib.pyplot as plt\n'), ((3411, 3431), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pixels"""'], {}), "('Pixels')\n", (3421, 3431), True, 'import matplotlib.pyplot as plt\n'), ((3436, 3459), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (3446, 3459), True, 'import matplotlib.pyplot as plt\n'), ((3464, 3501), 'matplotlib.pyplot.title', 'plt.title', (['"""Image width distribution"""'], {}), "('Image width distribution')\n", (3473, 3501), True, 'import matplotlib.pyplot as plt\n'), ((3506, 3536), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""y"""', 'alpha': '(0.75)'}), "(axis='y', alpha=0.75)\n", (3514, 3536), True, 'import matplotlib.pyplot as plt\n'), ((3542, 3558), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (3553, 3558), True, 'import matplotlib.pyplot as plt\n'), ((3563, 3605), 'matplotlib.pyplot.hist', 'plt.hist', ([], {'x': 'img_height', 'bins': '(20)', 'alpha': '(0.7)'}), '(x=img_height, bins=20, alpha=0.7)\n', (3571, 3605), True, 'import matplotlib.pyplot as plt\n'), ((3681, 3701), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Pixels"""'], {}), "('Pixels')\n", (3691, 3701), True, 'import matplotlib.pyplot as plt\n'), ((3706, 3729), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (3716, 3729), True, 'import matplotlib.pyplot as plt\n'), ((3734, 3772), 'matplotlib.pyplot.title', 'plt.title', (['"""Image height distribution"""'], {}), "('Image height distribution')\n", (3743, 3772), True, 'import matplotlib.pyplot as plt\n'), ((3777, 3807), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""y"""', 'alpha': '(0.75)'}), "(axis='y', alpha=0.75)\n", (3785, 3807), True, 'import matplotlib.pyplot as plt\n'), ((3813, 3844), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.5)'}), '(hspace=0.5)\n', (3832, 3844), True, 'import matplotlib.pyplot as plt\n'), ((3849, 3859), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3857, 3859), True, 'import matplotlib.pyplot as plt\n'), ((4325, 4362), 'tensorflow_datasets.show_examples', 'tfds.show_examples', (['ds_train', 'ds_info'], {}), '(ds_train, ds_info)\n', (4343, 4362), True, 'import tensorflow_datasets as tfds\n'), ((4433, 4465), 'numpy.array', 'np.array', (['tensor'], {'dtype': 'np.uint8'}), '(tensor, dtype=np.uint8)\n', (4441, 4465), True, 'import numpy as np\n'), ((4568, 4591), 'PIL.Image.fromarray', 'Image.fromarray', (['tensor'], {}), '(tensor)\n', (4583, 4591), False, 'from PIL import Image\n'), ((1825, 1861), 'tensorflow.cast', 'tf.cast', (['processed_image', 'tf.float32'], {}), '(processed_image, tf.float32)\n', (1832, 1861), True, 'import tensorflow as tf\n'), ((1903, 1965), 'tensorflow.image.resize', 'tf.image.resize', (['processed_image', 'image_size'], {'method': '"""nearest"""'}), "(processed_image, image_size, method='nearest')\n", (1918, 1965), True, 'import tensorflow as tf\n'), ((2065, 2094), 'tensorflow.one_hot', 'tf.one_hot', (['label', 'num_labels'], {}), '(label, num_labels)\n', (2075, 2094), True, 'import tensorflow as tf\n'), ((4473, 4488), 'numpy.ndim', 'np.ndim', (['tensor'], {}), '(tensor)\n', (4480, 4488), True, 'import numpy as np\n'), ((4674, 4699), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4689, 4699), False, 'import os\n'), ((4940, 4993), 'tensorflow.keras.utils.get_file', 'tf.keras.utils.get_file', (['file_name', 'url'], {'extract': '(True)'}), '(file_name, url, extract=True)\n', (4963, 4993), True, 'import tensorflow as tf\n'), ((4717, 4740), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4738, 4740), False, 'import datetime\n'), ((5008, 5057), 'tensorflow.keras.preprocessing.image.load_img', 'tf.keras.preprocessing.image.load_img', (['image_file'], {}), '(image_file)\n', (5045, 5057), True, 'import tensorflow as tf\n'), ((5108, 5154), 'tensorflow.keras.preprocessing.image.img_to_array', 'tf.keras.preprocessing.image.img_to_array', (['img'], {}), '(img)\n', (5149, 5154), True, 'import tensorflow as tf\n')] |
"""
Created on Mar 5, 2018
@author: lubo
"""
import logging
import numpy as np
from dae.genome.genomes_db import Genome
from dae.variants.attributes import Sex, VariantDesc, VariantType
logger = logging.getLogger(__name__)
GENOTYPE_TYPE = np.int8
BEST_STATE_TYPE = np.int8
def mat2str(mat, col_sep="", row_sep="/"):
return row_sep.join(
[
col_sep.join([str(n) if n >= 0 else "?" for n in mat[i, :]])
for i in range(mat.shape[0])
]
)
def str2mat(mat, col_sep="", row_sep="/"):
if col_sep == "":
return np.array(
[[int(c) for c in r] for r in mat.split(row_sep)],
dtype=GENOTYPE_TYPE,
)
return np.array(
[[int(v) for v in r.split(col_sep)] for r in mat.split(row_sep)],
dtype=GENOTYPE_TYPE,
)
def best2gt(best_state):
rows, cols = best_state.shape
genotype = np.zeros(shape=(2, cols), dtype=GENOTYPE_TYPE)
# genotype[1, :] = -2
ploidy = np.sum(best_state, 0)
for allele_index in range(rows):
best_state_row = best_state[allele_index, :]
for col in range(cols):
if best_state_row[col] == 2:
genotype[:, col] = allele_index
elif best_state_row[col] == 1:
if genotype[0, col] == 0:
genotype[0, col] = allele_index
if ploidy[col] == 1:
genotype[1, col] = -2
else:
genotype[1, col] = allele_index
return genotype
def fgt2str(fgt, sep=";"):
result = []
for i in range(len(fgt)):
v0 = fgt[i][0]
v1 = fgt[i][1]
if v0 < 0:
v0 = "."
if v1 < 0:
v1 = "."
result.append(f"{v0}/{v1}")
return sep.join(result)
def str2fgt(fgt, split=";"):
cols = fgt.split(";")
result = np.zeros(shape=(2, len(cols)), dtype=GENOTYPE_TYPE)
for idx, col in enumerate(cols):
sp = col.split("/")
if sp[0] == ".":
v0 = -1
else:
v0 = int(sp[0])
if sp[1] == ".":
v1 = -1
else:
v1 = int(sp[1])
result[0][idx] = v0
result[1][idx] = v1
return result
def gt2str(gt):
assert gt.shape[0] == 2
result = []
for i in range(gt.shape[1]):
v0 = gt[0, i]
v1 = gt[1, i]
if v0 < 0:
v0 = "."
if v1 < 0:
v1 = "."
result.append(f"{v0}/{v1}")
return ",".join(result)
def str2gt(gts, split=","):
gts = gts.split(split)
result = np.zeros(shape=(2, len(gts)), dtype=GENOTYPE_TYPE)
for col, pgts in enumerate(gts):
vals = [
int(p) if p != "." else -1
for p in pgts.split("/")
]
result[0, col] = vals[0]
result[1, col] = vals[1]
return result
def reference_genotype(size):
return np.zeros(shape=(2, size), dtype=GENOTYPE_TYPE)
def is_reference_genotype(gt):
return np.any(gt == 0) and np.all(np.logical_or(gt == 0, gt == -1))
def is_all_reference_genotype(gt):
return not np.any(gt != 0)
def is_unknown_genotype(gt):
return np.any(gt == -1)
def is_all_unknown_genotype(gt):
return np.all(gt == -1)
def trim_str_front(pos, ref, alt):
assert alt, (pos, ref, alt)
assert ref, (pos, ref, alt)
n = 0
for n, s in enumerate(zip(ref, alt)):
if s[0] != s[1]:
break
if ref[n] == alt[n]:
ref = ref[n + 1:]
alt = alt[n + 1:]
pos += n + 1
else:
ref = ref[n:]
alt = alt[n:]
pos += n
if len(ref) == 0 or len(alt) == 0:
return pos, ref, alt
for n, s in enumerate(zip(ref[::-1], alt[::-1])):
if s[0] != s[1]:
break
# not made simple
if ref[-(n + 1)] == alt[-(n + 1)]:
r, a = ref[: -(n + 1)], alt[: -(n + 1)]
else:
if n == 0:
r, a = ref[:], alt[:]
else:
r, a = ref[:-n], alt[:-n]
if len(r) == 0 or len(a) == 0:
return pos, r, a
return pos, r, a
def trim_str_back(pos, ref, alt):
assert alt, (pos, ref, alt)
assert ref, (pos, ref, alt)
n = 0
for n, s in enumerate(zip(ref[::-1], alt[::-1])):
if s[0] != s[1]:
break
# not made simple
if ref[-(n + 1)] == alt[-(n + 1)]:
r, a = ref[: -(n + 1)], alt[: -(n + 1)]
else:
if n == 0:
r, a = ref[:], alt[:]
else:
r, a = ref[:-n], alt[:-n]
if len(r) == 0 or len(a) == 0:
return pos, r, a
for n, s in enumerate(zip(r, a)):
if s[0] != s[1]:
break
if r[n] == a[n]:
return pos + n + 1, r[n + 1:], a[n + 1:]
return pos + n, r[n:], a[n:]
def cshl_format(pos, ref, alt, trimmer=trim_str_front):
p, r, a = trimmer(pos, ref, alt)
if len(r) == len(a) and len(r) == 0:
return VariantDesc(
VariantType.comp, p, ref=r, alt=a, length=0)
if len(r) == len(a) and len(r) == 1:
return VariantDesc(
VariantType.substitution, p, ref=r, alt=a, length=1)
if len(r) > len(a) and len(a) == 0:
return VariantDesc(
VariantType.deletion, p, length=len(r)
)
# len(ref) < len(alt):
if len(r) < len(a) and len(r) == 0:
return VariantDesc(
VariantType.insertion, p, alt=a, length=len(a))
return VariantDesc(
VariantType.comp, p, ref=r, alt=a, length=max(len(r), len(a))
)
def get_locus_ploidy(
chrom: str, pos: int, sex: Sex, genome: Genome
) -> int:
if chrom in ("chrX", "X") and sex == Sex.M:
if not genome.is_pseudoautosomal(chrom, pos):
return 1
return 2
def get_interval_locus_ploidy(
chrom: str, pos_start: int, pos_end: int,
sex: Sex, genome: Genome) -> int:
start_ploidy = get_locus_ploidy(chrom, pos_start, sex, genome)
end_ploidy = get_locus_ploidy(chrom, pos_end, sex, genome)
return max(start_ploidy, end_ploidy)
DNA_COMPLEMENT_NUCLEOTIDES = {
"A": "T",
"T": "A",
"G": "C",
"C": "G",
}
def complement(nucleotides: str) -> str:
return "".join(
[
DNA_COMPLEMENT_NUCLEOTIDES.get(n.upper(), n)
for n in nucleotides
])
def reverse_complement(nucleotides: str) -> str:
return complement(nucleotides[::-1])
def liftover_variant(chrom, pos, ref, alt, lo, target_genome):
lo_coordinates = lo.convert_coordinate(chrom, pos - 1)
if not lo_coordinates:
return None
if len(lo_coordinates) > 1:
logger.info(
f"liftover_variant: liftover returns more than one target "
f"position: {lo_coordinates}")
lo_chrom, lo_pos, lo_strand, _ = lo_coordinates[0]
if lo_strand == "+" or len(ref) == len(alt):
lo_pos += 1
elif lo_strand == "-":
lo_pos -= len(ref)
lo_pos -= 1
_, tr_ref, tr_alt = trim_str_front(pos, ref, alt)
lo_ref = target_genome.get_sequence(
lo_chrom, lo_pos, lo_pos + len(ref) - 1)
if lo_ref is None:
logger.warning(
f"can't find genomic sequence for {lo_chrom}:{lo_pos}")
return None
lo_alt = alt
if lo_strand == "-":
if not tr_alt:
lo_alt = f"{lo_ref[0]}"
else:
lo_alt = reverse_complement(tr_alt)
if not tr_ref:
lo_alt = f"{lo_ref[0]}{lo_alt}"
return (lo_chrom, lo_pos, lo_ref, lo_alt)
def tandem_repeat(ref, alt, min_mono_reference=8):
for period in range(1, len(ref) // 2 + 1):
if len(ref) % period != 0:
continue
unit = ref[:period]
ref_repeats = len(ref) // period
if ref_repeats * unit != ref:
continue
if len(alt) % period != 0:
continue
alt_repeats = len(alt) // period
if alt_repeats * unit != alt:
continue
if len(unit) == 1 and len(ref) < min_mono_reference:
return None, None, None
return unit, ref_repeats, alt_repeats
return None, None, None
def vcf2cshl(pos, ref, alt, trimmer=trim_str_back):
tr_vd = None
tr_unit, tr_ref, tr_alt = tandem_repeat(ref, alt)
if tr_unit is not None:
assert tr_ref is not None
assert tr_alt is not None
tr_vd = VariantDesc(
VariantType.tandem_repeat, pos,
tr_ref=tr_ref, tr_alt=tr_alt, tr_unit=tr_unit)
vd = cshl_format(pos, ref, alt, trimmer=trimmer)
vd.variant_type |= tr_vd.variant_type
vd.tr_unit = tr_vd.tr_unit
vd.tr_ref = tr_vd.tr_ref
vd.tr_alt = tr_vd.tr_alt
return vd
return cshl_format(pos, ref, alt, trimmer=trimmer)
| [
"dae.variants.attributes.VariantDesc",
"numpy.sum",
"numpy.zeros",
"logging.getLogger",
"numpy.any",
"numpy.logical_or",
"numpy.all"
] | [((199, 226), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (216, 226), False, 'import logging\n'), ((894, 940), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, cols)', 'dtype': 'GENOTYPE_TYPE'}), '(shape=(2, cols), dtype=GENOTYPE_TYPE)\n', (902, 940), True, 'import numpy as np\n'), ((980, 1001), 'numpy.sum', 'np.sum', (['best_state', '(0)'], {}), '(best_state, 0)\n', (986, 1001), True, 'import numpy as np\n'), ((2909, 2955), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, size)', 'dtype': 'GENOTYPE_TYPE'}), '(shape=(2, size), dtype=GENOTYPE_TYPE)\n', (2917, 2955), True, 'import numpy as np\n'), ((3171, 3187), 'numpy.any', 'np.any', (['(gt == -1)'], {}), '(gt == -1)\n', (3177, 3187), True, 'import numpy as np\n'), ((3234, 3250), 'numpy.all', 'np.all', (['(gt == -1)'], {}), '(gt == -1)\n', (3240, 3250), True, 'import numpy as np\n'), ((3000, 3015), 'numpy.any', 'np.any', (['(gt == 0)'], {}), '(gt == 0)\n', (3006, 3015), True, 'import numpy as np\n'), ((3113, 3128), 'numpy.any', 'np.any', (['(gt != 0)'], {}), '(gt != 0)\n', (3119, 3128), True, 'import numpy as np\n'), ((4922, 4978), 'dae.variants.attributes.VariantDesc', 'VariantDesc', (['VariantType.comp', 'p'], {'ref': 'r', 'alt': 'a', 'length': '(0)'}), '(VariantType.comp, p, ref=r, alt=a, length=0)\n', (4933, 4978), False, 'from dae.variants.attributes import Sex, VariantDesc, VariantType\n'), ((5049, 5113), 'dae.variants.attributes.VariantDesc', 'VariantDesc', (['VariantType.substitution', 'p'], {'ref': 'r', 'alt': 'a', 'length': '(1)'}), '(VariantType.substitution, p, ref=r, alt=a, length=1)\n', (5060, 5113), False, 'from dae.variants.attributes import Sex, VariantDesc, VariantType\n'), ((8352, 8446), 'dae.variants.attributes.VariantDesc', 'VariantDesc', (['VariantType.tandem_repeat', 'pos'], {'tr_ref': 'tr_ref', 'tr_alt': 'tr_alt', 'tr_unit': 'tr_unit'}), '(VariantType.tandem_repeat, pos, tr_ref=tr_ref, tr_alt=tr_alt,\n tr_unit=tr_unit)\n', (8363, 8446), False, 'from dae.variants.attributes import Sex, VariantDesc, VariantType\n'), ((3027, 3059), 'numpy.logical_or', 'np.logical_or', (['(gt == 0)', '(gt == -1)'], {}), '(gt == 0, gt == -1)\n', (3040, 3059), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import satlas as s
# Gather all information
I = 1.0
J = [0.5, 0.5]
ABC = [500, 200, 0, 0, 0, 0]
df = 5000
np.random.seed(0)
# Create the basemodel
hfs = s.HFSModel(I, J, ABC, df, scale=3000, saturation=10)
hfs.background = 200
constraintsDict = {'Au': {'min': None, 'max': None}}
hfs.set_boundaries(constraintsDict)
# Say which frequencies are scanned
x = np.linspace(4000, 6000, 100)
superx = np.linspace(x.min(), x.max(), 100 * len(x))
# Generate the data, add some noise
y = hfs(x)
y += 3 * np.random.randn(x.shape[0]) * np.sqrt(y)
s.chisquare_spectroscopic_fit(hfs, x, y, monitor=False)
# Print the fit report
hfs.display_chisquare_fit()
hfs.plot(x, y)
# s.likelihood_fit(hfs, x, y, walking=True)
# fig, axes, cbar = s.generate_correlation_plot(hfs.mle_data)
# fig.savefig('walk')
# Plot the result
# fig, ax = plt.subplots(1, 1)
# hfs.plot(show=True, x=x, y=y, no_of_points=1000, legend=r'$\chi^2$', data_legend='Data', bayesian=True, colormap='gnuplot2_r')
# # Example of Maximum Likelihood Estimation (MLE) fitting,
# # along with error calculation using Monte Carlo walking.
# hfs.likelihood_fit(x, y,
# walking=False, # Perform the walk
# walkers=50, # Number of walkers,
# # see the emcee documentation for
# # more on this
# nsteps=200, # Number of steps for each walker
# burnin=10.0, # Defines the percentage of burnin
# )
# hfs.plot(ax=ax, show=False, bayesian=True, legend=r'MLE')
# ax.legend(loc=0)
# hfs.display_mle_fit()
# # # plt.tight_layout()
# utils.generate_correlation_plot(hfs.mle_data)
# plt.show()
| [
"numpy.random.seed",
"numpy.random.randn",
"satlas.HFSModel",
"numpy.linspace",
"satlas.chisquare_spectroscopic_fit",
"numpy.sqrt"
] | [((159, 176), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (173, 176), True, 'import numpy as np\n'), ((206, 258), 'satlas.HFSModel', 's.HFSModel', (['I', 'J', 'ABC', 'df'], {'scale': '(3000)', 'saturation': '(10)'}), '(I, J, ABC, df, scale=3000, saturation=10)\n', (216, 258), True, 'import satlas as s\n'), ((409, 437), 'numpy.linspace', 'np.linspace', (['(4000)', '(6000)', '(100)'], {}), '(4000, 6000, 100)\n', (420, 437), True, 'import numpy as np\n'), ((589, 644), 'satlas.chisquare_spectroscopic_fit', 's.chisquare_spectroscopic_fit', (['hfs', 'x', 'y'], {'monitor': '(False)'}), '(hfs, x, y, monitor=False)\n', (618, 644), True, 'import satlas as s\n'), ((577, 587), 'numpy.sqrt', 'np.sqrt', (['y'], {}), '(y)\n', (584, 587), True, 'import numpy as np\n'), ((547, 574), 'numpy.random.randn', 'np.random.randn', (['x.shape[0]'], {}), '(x.shape[0])\n', (562, 574), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
class PdosOut:
"""
"""
def __init__(self):
self.data = {} # contain the pdos data but not the tdos
self.tdos = None
self.energies = None
def get_data(self, directory="tmp-qe-static", filpdos="projwfc", usefermi="scf"):
"""
this function first try to get fermi energy from the nscfout file
and if nscfout doesn't exist it will try to extract fermi energy
from scfout. if both don't exist, it will stop the program and
print out the warnnig, which guarantee that the fermi energy is
always shifted to 0
atomorb is a string in format like this atm#1(Li)_wfc#2(s).
"""
# first check whether there is a previous scf running
if not os.path.exists(directory):
print("===================================================\n")
print(" Warning !!!\n")
print("===================================================\n")
print("pdos post:\n")
print(" directory of previous scf calculattion not found!\n")
sys.exit(1)
os.chdir(directory)
os.system("ls | grep '%s.pdos_' > projwfc-pdos-file.data" % filpdos)
with open("projwfc-pdos-file.data", 'r') as fin:
for line in fin:
if line.split(".")[1] == "pdos_tot\n":
with open(line.split()[0], 'r') as f:
f.readline()
self.tdos = np.loadtxt(f)
continue
atmorb = line.split("_")[1]+"_"+line.split("_")[2].split()[0]
# atomorb is a string in format like this atm#1(Li)_wfc#2(s).
with open(line.split()[0], 'r') as f:
f.readline()
self.data[atmorb] = np.loadtxt(f)
# check information on spin
with open("%s.pdos_tot" % filpdos, 'r') as fin:
first_line = fin.readline()
if "pdosup(E)" in first_line.split() and "pdosdw(E)" in first_line.split():
if "dosup(E)" in first_line.split() and "dosdw(E)" in first_line.split():
self.magnetic_status = "collinear-spin-polarized"
else:
self.magnetic_status = "non-collinear-non-spin-orbit"
else:
self.magnetic_status = "collinear-spin-unpolarized"
os.chdir("../")
self.energies = self.tdos[:, 0]
# get fermi energy from scf or nscf output
scfout = "static-scf.out"
nscfout = "static-nscf.out"
if usefermi == "scf":
with open(os.path.join(directory, scfout), 'r') as fin:
for line in fin:
if len(line.split()) == 0:
continue
if line.split()[0] == "the" and line.split()[1] == "Fermi":
efermi = float(line.split()[4])
elif usefermi == "nscf":
with open(os.path.join(directory, nscfout), 'r') as fin:
for line in fin:
if len(line.split()) == 0:
continue
if line.split()[0] == "the" and line.split()[1] == "Fermi":
efermi = float(line.split()[4])
# shift fermie energy to 0
for i in range(len(self.energies)):
self.energies[i] = self.energies[i] - efermi
self.efermi = efermi
print("===============================================\n")
print("qe.post.pdos:\n")
print("we automatically shift the fermi energy\n")
print("from %f to 0\n" % efermi)
print("efermi is read from %s\n" % ("statis-scf.out" if usefermi == "scf" else "statis-nscf.out"))
print("you can choose to read efermi from scf or nscf output by --use-fermi")
#
# tranfser self.data to new data structure for better usage:
self._transfer_data_to_new_struct()
def _transfer_data_to_new_struct(self):
"""
self.magnetic_status:
"collinear-spin-unpolarized" -> self.data_0
"collinear-spin-polarized" -> self.data_1
"non-collinear-non-spin-orbit" -> self.data_2
"non-collinear-spin-orbit" -> self.data_3
"""
if self.magnetic_status == "collinear-spin-unpolarized":
"""
# pdos are in format like this:
# E LDOS(E) PDOS_1(E) ... PDOS_2l+1(E)
# self.data_0:
{
atmorb: {
"ldos": [],
"pdos_l_1": [],
"pdos_l_2": [],
...
"pdos_l_2l+1": []
}
}
l could be: s, p, d, f, in format like this: 1(s), 2(s), 2(p)
self.data_0_tdos:
{
dos: [],
pdos: [],
}
"""
self.data_0 = {}
for atmorb in self.data:
self.data_0[atmorb] = {}
self.data_0[atmorb]["ldos"] = self.data[atmorb][:, 1]
for i in range(self.data[atmorb].shape[1]-2):
self.data_0[atmorb]["pdos_%s_%d" % (self.get_orb_type(atmorb), i+1)] = self.data[atmorb][:, i+2]
#
self.data_0_tdos = {}
self.data_0_tdos["dos"] = self.tdos[:, 1]
self.data_0_tdos["pdos"] = self.tdos[:, 2]
elif self.magnetic_status == "collinear-spin-polarized":
"""
# pdos are in format like this:
# E ldosup(E) ldosdw(E) pdos_1up(E) pdos_1dw(E) ... pdow_2l+1up(E) pdos_2l+1dw(E)
# self.data_1:
{
atmorb: {
"ldos_up": [],
"ldos_dw": [],
"pdos_l_1_up": [],
"pdos_l_1_dw": [],
"pdos_l_2_up": [],
"pdos_l_2_dw": [],
...
"pdos_l_2l+1_up": [],
"pdos_l_2l+1_dw": [],
}
}
l could be: s, p, d, f, in format like this: 1(s), 2(s), 2(p)
self.data_1_tdos:
{
"dos_up": [],
"dos_dw": [],
"pdos_up": [],
"pdos_dw": []
}
"""
self.data_1 = {}
for atmorb in self.data:
self.data_1[atmorb] = {}
self.data_1[atmorb]["ldos_up"] = self.data[atmorb][:, 1]
self.data_1[atmorb]["ldos_dw"] = self.data[atmorb][:, 2]
for i in range(int((self.data[atmorb].shape[1]-3)/2)):
self.data_1[atmorb]["pdos_%s_%d_up" % (self.get_orb_type(atmorb), i+1)] = self.data[atmorb][:, 3+2*i]
self.data_1[atmorb]["pdos_%s_%d_dw" % (self.get_orb_type(atmorb), i+1)] = self.data[atmorb][:, 3+2*i+1]
#
self.data_1_tdos = {}
self.data_1_tdos["dos_up"] = self.tdos[:, 1]
self.data_1_tdos["dos_dw"] = self.tdos[:, 2]
self.data_1_tdos["pdos_up"] = self.tdos[:, 3]
self.data_1_tdos["pdos_dw"] = self.tdos[:, 4]
elif self.magnetic_status == "non-collinear-non-spin-orbit":
pass
elif self.magnetic_status == "non-collinear-spin-orbit":
pass
def export_data(self, directory):
"""
"""
if self.magnetic_status == "collinear-spin-unpolarized":
data = {}
for atmorb in self.data_0:
key = self.get_elem_type(atmorb)+"-"+self.get_orb_type(atmorb)
if key in data:
data[key] = data[key] + self.data_0[atmorb]["ldos"]
else:
data[key] = self.data_0[atmorb]["ldos"]
elif self.magnetic_status == "collinear-spin-polarized":
data = {}
for atmorb in self.data_1:
key = self.get_elem_type(atmorb)+"-"+self.get_orb_type(atmorb)
key_up = self.get_elem_type(atmorb)+"-"+self.get_orb_type(atmorb)+"-up"
key_dw = self.get_elem_type(atmorb)+"-"+self.get_orb_type(atmorb)+"-down"
if key_up in data and key_dw in data:
data[key_up] = data[key_up] + self.data_1[atmorb]["ldos_up"]
data[key_dw] = data[key_dw] + (-self.data_1[atmorb]["ldos_dw"])
else:
data[key_up] = self.data_1[atmorb]["ldos_up"]
data[key_dw] = (-self.data_1[atmorb]["ldos_dw"])
# export pdos projected to element and orbital l
with open(os.path.join(directory, "pdos-projected-to-element-and-orbital-l.data"), 'w') as fout:
fout.write("# efermi shifted to 0 already, previous efermi=%f\n" % self.efermi)
fout.write("#Energy")
for key in data:
fout.write(" %s" % key)
fout.write("\n")
for i in range(len(self.energies)):
fout.write("%.9f" % self.energies[i])
for key in data:
fout.write(" %.9f" % data[key][i])
fout.write("\n")
# export pdos projected to atom and orbital l
if self.magnetic_status == "collinear-spin-unpolarized":
with open(os.path.join(directory, "pdos-projected-to-atom-and-orbital-l.data"), 'w') as fout:
fout.write("# efermi shifted to 0 already, previous efermi=%f\n" % self.efermi)
fout.write("#Energy")
for atmorb in self.data_0:
fout.write(" Atom(%d):%s-%s" % (self.get_atom_num(atmorb), self.get_elem_type(atmorb), self.get_orb_type(atmorb)))
fout.write("\n")
for i in range(len(self.energies)):
fout.write("%.9f" % self.energies[i])
for atmorb in self.data_0:
fout.write(" %.9f" % self.data_0[atmorb]["ldos"][i])
fout.write("\n")
elif self.magnetic_status == "collinear-spin-polarized":
with open(os.path.join(directory, "pdos-projected-to-atom-and-orbital-l.data"), 'w') as fout:
fout.write("# efermi shifted to 0 already, previous efermi=%f\n" % self.efermi)
fout.write("#Energy")
for atmorb in self.data_0:
fout.write(" Atom(%d):%s-%s-up Atom(%d):%s-%s-down" % (self.get_atom_num(atmorb), self.get_elem_type(atmorb), self.get_orb_type(atmorb), self.get_atom_num(atmorb), self.get_elem_type(atmorb), self.get_orb_type(atmorb)))
fout.write("\n")
for i in range(len(self.energies)):
fout.write("%.9f" % self.energies[i])
for atmorb in self.data_1:
fout.write(" %.9f %.9f" % (self.data_1[atmorb]["ldos_up"][i], self.data_1[atmorb]["ldos_dw"][i]))
fout.write("\n")
# export total dos to data file
if self.magnetic_status == "collinear-spin-unpolarized":
with open(os.path.join(directory, "total-dos.data"), 'w') as fout:
fout.write("# efermi shifted to 0 already, previous efermi=%f\n" % self.efermi)
fout.write("#energye dos\n")
for i in range(len(self.energies)):
fout.write("%.9f %.9f\n" % (self.energies[i], self.data_0_tdos["dos"][i]))
elif self.magnetic_status == "collinear-spin-polarized":
with open(os.path.join(directory, "total-dos.data"), 'w') as fout:
fout.write("# efermi shifted to 0 already, previous efermi=%f\n" % self.efermi)
fout.write("#energye dos(up) dos(down)\n")
for i in range(len(self.energies)):
fout.write("%.9f %.9f %.9f\n" % (self.energies[i], self.data_0_tdos["dos_up"][i], self.data_0_tdos["dos_dw"][i]))
def plot_elem_orb_l_proj(self, plotrange=[0.0, 1.0], filename="pdos-projected-to-element-and-orbital-l.png", fontsize=10):
"""
plotrange:
a list of two values(between 0 and 1) defining the percentage
of data to plot.
plotrange[0]: left boundary of the data to plot
plotrange[1]: right boundary of the data to plot
default is plotrange[0] = 0, plotrange[1], in which case
all the data will be plot.
"""
if self.magnetic_status == "collinear-spin-unpolarized":
data = {}
for atmorb in self.data_0:
key = self.get_elem_type(atmorb)+"-"+self.get_orb_type(atmorb)
if key in data:
data[key] = data[key] + self.data_0[atmorb]["ldos"]
else:
data[key] = self.data_0[atmorb]["ldos"]
elif self.magnetic_status == "collinear-spin-polarized":
data = {}
for atmorb in self.data_1:
key = self.get_elem_type(atmorb)+"-"+self.get_orb_type(atmorb)
key_up = self.get_elem_type(atmorb)+"-"+self.get_orb_type(atmorb)+"-up"
key_dw = self.get_elem_type(atmorb)+"-"+self.get_orb_type(atmorb)+"-down"
if key_up in data and key_dw in data:
data[key_up] = data[key_up] + self.data_1[atmorb]["ldos_up"]
data[key_dw] = data[key_dw] + (-self.data_1[atmorb]["ldos_dw"])
else:
data[key_up] = self.data_1[atmorb]["ldos_up"]
data[key_dw] = (-self.data_1[atmorb]["ldos_dw"])
# plot the pdos in the specified percentage range
begin = int(len(self.energies)*plotrange[0])
end = int(len(self.energies)*plotrange[1])
for key in data:
plt.plot(self.energies[begin:end], data[key][begin:end], label=key)
# plot the total dos in the specified percentage range
if self.magnetic_status == "collinear-spin-unpolarized":
plt.plot(self.energies[begin:end], self.data_0_tdos["dos"][begin:end], label="Total-DOS")
elif self.magnetic_status == "collinear-spin-polarized":
plt.plot(self.energies[begin:end], self.data_1_tdos["dos_up"], label="Total-DOS-Up")
plt.plot(self.energies[begin:end], -self.data_1_tdos["dos_dw"], label="Total-DOS-Down")
# set formats
font = {'size': fontsize}
plt.tick_params(labelsize=fontsize)
plt.grid(which="major", axis="x", linewidth=0.75, linestyle="-", color="0.75")
plt.grid(which="major", axis="y", linewidth=0.75, linestyle="-", color="0.75")
plt.title("Projected Density of States")
plt.xlabel(r"$\mathit{E}-\mathit{E}_\mathrm{f} \mathrm{(eV)}$", font)
plt.ylabel("States", font)
plt.legend(prop=font)
plt.tight_layout()
plt.savefig("%s" % filename)
plt.close()
def plot_atom_orb_l_proj(self, atomtoproj=[], plotrange=[0.0, 1.0], filename="pdos-projected-to-atom-and-orbital-l.png", fontsize=10):
"""
plotrange:
a list of two values(between 0 and 1) defining the percentage
of data to plot.
plotrange[0]: left boundary of the data to plot
plotrange[1]: right boundary of the data to plot
default is plotrange[0] = 0, plotrange[1], in which case
all the data will be plot.
atomtoproj:
the list of atoms to do the projection. atom number starts with 1
"""
# plot the data in the specified percentage range
begin = int(len(self.energies)*plotrange[0])
end = int(len(self.energies)*plotrange[1])
# atom projected dos
if self.magnetic_status == "collinear-spin-unpolarized":
for atmorb in self.data_0:
if self.get_atom_num(atmorb) in atomtoproj:
plt.plit(self.energies[begin:end], self.data_0[atmorb]["ldos"][begin:end], label="Atom(%d):%s-%s" % (self.get_atom_num(atmorb), self.get_elem_type(atmorb), self.get_orb_type(atmorb)))
elif self.magnetic_status == "collinear-spin-polarized":
for atmorb in self.data_1:
if self.get_atom_num(atmorb) in atomtoproj:
plt.plot(self.energies[begin:end], self.data_1[atmorb]["ldos_up"][begin:end], label="Atom(%d):%s-%s-up" % (self.get_atom_num(atmorb), self.get_elem_type(atmorb), self.get_orb_type(atmorb)))
plt.plot(self.energies[begin:end], -self.data_1[atmorb]["ldos_dw"][begin:end], label="Atom(%d):%s-%s-down" % (self.get_atom_num(atmorb), self.get_elem_type(atmorb), self.get_orb_type(atmorb)))
# set formats
font = {'size': fontsize}
plt.tick_params(labelsize=fontsize)
plt.grid(which="major", axis="x", linewidth=0.75, linestyle="-", color="0.75")
plt.grid(which="major", axis="y", linewidth=0.75, linestyle="-", color="0.75")
plt.title("Projected(Atom) Density of States")
plt.xlabel(r"$\mathit{E}-\mathit{E}_\mathrm{f} \mathrm{(eV)}$", font)
plt.ylabel("States", font)
if len(atomtoproj) != 0:
plt.legend(prop=font)
plt.tight_layout()
plt.savefig("%s" % filename)
plt.close()
def plot_tdos(self, plotrange=[0, 1.0], filename="total-dos.png"):
"""
plotrange:
a list of two values(between 0 and 1) defining the percentage
of data to plot.
plotrange[0]: left boundary of the data to plot
plotrange[1]: right boundary of the data to plot
default is plotrange[0] = 0, plotrange[1], in which case
all the data will be plot.
"""
# plot the total dos in the specified percentage range
begin = int(len(self.energies)*plotrange[0])
end = int(len(self.energies)*plotrange[1])
if self.magnetic_status == "collinear-spin-unpolarized":
plt.plot(self.energies[begin:end], self.data_0_tdos["dos"][begin:end], label="Total-DOS")
elif self.magnetic_status == "collinear-spin-polarized":
plt.plot(self.energies[begin:end], self.data_1_tdos["dos_up"], label="Total-DOS-Up")
plt.plot(self.energies[begin:end], -self.data_1_tdos["dos_dw"], label="Total-DOS-Down")
plt.grid(which="major", axis="x", linewidth=0.75, linestyle="-", color="0.75")
plt.grid(which="major", axis="y", linewidth=0.75, linestyle="-", color="0.75")
plt.title("Total Density of States")
plt.xlabel(r"$\mathit{E}-\mathit{E}_\mathrm{f} (eV)$")
plt.ylabel("States")
plt.legend()
plt.tight_layout()
plt.savefig("%s" % filename)
plt.close()
def get_elem_type(self, atmorb):
"""
get element name from atmorb
atmorb is the key in self.data
it's like this:
atm#1(Li)_wfc#2(s)
return value of the above input
will be:
'Li'
"""
return atmorb.split("(")[1].split(")")[0]
def get_orb_type(self, atmorb):
"""
get element name and orb from atmorb
atmorb is the key in self.data
it's like this:
atm#1(Li)_wfc#2(s)
return value of the above input
will be:
'2(s)'
"""
return atmorb.split("#")[2]
def get_atom_num(self, atmorb):
"""
get atom name from atmorb
atmorb is the key in self.data
it's like this:
atm#1(Li)_wfc#2(s)
return value of the above input
will be:
1
"""
return int(atmorb.split("(")[0].split("#")[1])
def markdown_report(self, md="pdos-report.md"):
"""
when writing Chinese to a file you must specify
encoding='utf-8' when open the file for writing
"""
with open(md, 'w', encoding="utf-8") as fout:
fout.write("# 投影态密度图\n")
fout.write("**指定能量范围数据图\n")
fout.write("\n")
fout.write("\n")
fout.write("\n")
fout.write("**所有可获取能量范围数据图**\n")
fout.write("\n")
fout.write("\n")
fout.write("\n")
def export(self, directory="tmp-qe-static", plotrange=[0, 1.0], atomtoproj=[], fontsize=10):
os.chdir(directory)
os.system("mkdir -p post-processing")
self.export_data(directory="post-processing")
self.plot_elem_orb_l_proj(plotrange=plotrange, filename="post-processing/pdos-specified-range.png", fontsize=fontsize)
self.plot_atom_orb_l_proj(plotrange=plotrange, atomtoproj=atomtoproj, filename="post-processing/pdos-atomproj-specified-range.png", fontsize=fontsize)
self.plot_tdos(plotrange=plotrange, filename="post-processing/tdos-specified-range.png")
# also plot the all data
self.plot_elem_orb_l_proj(plotrange=[0, 1.0], filename="post-processing/pdos-all-energy-available.png", fontsize=fontsize)
self.plot_atom_orb_l_proj(plotrange=[0, 1.0], atomtoproj=atomtoproj, filename="post-processing/pdos-atomproj-all-energy-available.png", fontsize=fontsize)
self.plot_tdos(plotrange=[0, 1.0], filename="post-processing/tdos-all-energy-available.png")
self.markdown_report(md="post-processing/pdos-report.md")
os.chdir("../")
#
class PdosPost:
"""
"""
def __init__(self):
self.data = {} # contain the pdos data but not the tdos
self.tdos = None
self.energies = None
def get_data(self, directory="tmp-qe-static", filpdos="projwfc"):
"""
this function first try to get fermi energy from the nscfout file
and if nscfout doesn't exist it will try to extract fermi energy
from scfout. if both don't exist, it will stop the program and
print out the warnnig, which guarantee that the fermi energy is
always shifted to 0
atomorb is a string in format like this atm#1(Li)_wfc#2(s).
"""
# first check whether there is a previous scf running
if not os.path.exists(directory):
print("===================================================\n")
print(" Warning !!!\n")
print("===================================================\n")
print("pdos post:\n")
print(" directory of previous scf calculattion not found!\n")
sys.exit(1)
os.chdir(directory)
os.system("ls | grep '%s.pdos_' > projwfc-pdos-file.data" % filpdos)
with open("projwfc-pdos-file.data", 'r') as fin:
for line in fin:
if line.split(".")[1] == "pdos_tot\n":
with open(line.split()[0], 'r') as f:
f.readline()
self.tdos = np.loadtxt(f)
continue
atmorb = line.split("_")[1]+"_"+line.split("_")[2].split()[0]
# atomorb is a string in format like this atm#1(Li)_wfc#2(s).
with open(line.split()[0], 'r') as f:
f.readline()
self.data[atmorb] = np.loadtxt(f)
os.chdir("../")
self.energies = self.tdos[:, 0]
# get fermi energy from nscf output
scfout = "static-scf.out"
nscfout = "static-nscf.out"
if os.path.exists(os.path.join(directory, nscfout)):
with open(os.path.join(directory, nscfout), 'r') as fin:
for line in fin:
if len(line.split()) == 0:
continue
if line.split()[0] == "the" and line.split()[1] == "Fermi":
efermi = float(line.split()[4])
elif os.path.exists(os.path.join(directory, scfout)):
with open(os.path.join(directory, scfout), 'r') as fin:
for line in fin:
if len(line.split()) == 0:
continue
if line.split()[0] == "the" and line.split()[1] == "Fermi":
efermi = float(line.split()[4])
else:
print("===========================================================\n")
print(" Warning !!!\n")
print("===========================================================\n")
print("PDOS postprocessing:\n")
print("must provide nscfout or at least scfout to get Fermi energy\n")
sys.exit(1)
# shift fermie energy to 0
for i in range(len(self.energies)):
self.energies[i] = self.energies[i] - efermi
print("===============================================\n")
print("qe.post.pdos:\n")
print("we automatically shift the fermi energy\n")
print("from %f to 0\n" % efermi)
print("efermi is read from static-nscf.out\n")
print("or statis-scf.out, if static-nscf.out is not available\n")
#
def plot_elem_orb_proj(self, plotrange=[0.0, 1.0], filename="pdos-projected-to-element-and-orbital.png", fontsize=10):
"""
plotrange:
a list of two values(between 0 and 1) defining the percentage
of data to plot.
plotrange[0]: left boundary of the data to plot
plotrange[1]: right boundary of the data to plot
default is plotrange[0] = 0, plotrange[1], in which case
all the data will be plot.
"""
data = {}
for atmorb in self.data:
key = self.get_elem_type(atmorb)+"-"+self.get_orb_type(atmorb)
if key in data:
data[key] = data[key] + self.data[atmorb][:, 2]
else:
data[key] = self.data[atmorb][:, 2]
# plot the pdos in the specified percentage range
begin = int(len(self.energies)*plotrange[0])
end = int(len(self.energies)*plotrange[1])
for key in data:
plt.plot(self.energies[begin:end], data[key][begin:end], label=key)
# plot the total dos in the specified percentage range
plt.plot(self.energies[begin:end], self.tdos[begin:end, 2], label="Total-DOS")
# set formats
font = {'size': fontsize}
plt.tick_params(labelsize=fontsize)
plt.grid(which="major", axis="x", linewidth=0.75, linestyle="-", color="0.75")
plt.grid(which="major", axis="y", linewidth=0.75, linestyle="-", color="0.75")
plt.title("Projected Density of States")
plt.xlabel(r"$\mathit{E}-\mathit{E}_\mathrm{f} \mathrm{(eV)}$", font)
plt.ylabel("States", font)
plt.legend(prop=font)
plt.tight_layout()
plt.savefig("%s" % filename)
plt.close()
def plot_atom_orb_proj(self, atomtoproj=[], plotrange=[0.0, 1.0], filename="pdos-projected-to-atom-and-orbital.png", fontsize=10):
"""
plotrange:
a list of two values(between 0 and 1) defining the percentage
of data to plot.
plotrange[0]: left boundary of the data to plot
plotrange[1]: right boundary of the data to plot
default is plotrange[0] = 0, plotrange[1], in which case
all the data will be plot.
atomtoproj:
the list of atoms to do the projection. atom number starts with 1
"""
# plot the data in the specified percentage range
begin = int(len(self.energies)*plotrange[0])
end = int(len(self.energies)*plotrange[1])
# atom projected dos
for atmorb in self.data:
if self.get_atom_num(atmorb) in atomtoproj:
plt.plot(self.energies[begin:end], self.data[atmorb][begin:end, 2], label="Atom(%d):%s-%s" % (self.get_atom_num(atmorb), self.get_elem_type(atmorb), self.get_orb_type(atmorb)))
# plot the total dos in the specified percentage range
plt.plot(self.energies[begin:end], self.tdos[begin:end, 2], label="Total-DOS")
#
# set formats
font = {'size': fontsize}
plt.tick_params(labelsize=fontsize)
plt.grid(which="major", axis="x", linewidth=0.75, linestyle="-", color="0.75")
plt.grid(which="major", axis="y", linewidth=0.75, linestyle="-", color="0.75")
plt.title("Projected(Atom) Density of States")
plt.xlabel(r"$\mathit{E}-\mathit{E}_\mathrm{f} \mathrm{(eV)}$", font)
plt.ylabel("States", font)
plt.legend(prop=font)
plt.tight_layout()
plt.savefig("%s" % filename)
plt.close()
def plot_tdos(self, plotrange=[0, 1.0], filename="total-dos.png"):
"""
plotrange:
a list of two values(between 0 and 1) defining the percentage
of data to plot.
plotrange[0]: left boundary of the data to plot
plotrange[1]: right boundary of the data to plot
default is plotrange[0] = 0, plotrange[1], in which case
all the data will be plot.
"""
# plot the total dos in the specified percentage range
begin = int(len(self.energies)*plotrange[0])
end = int(len(self.energies)*plotrange[1])
#plt.plot(self.energies, self.tdos[:, 2], label="total-dos")
plt.plot(self.energies[begin:end], self.tdos[begin:end, 2], label="Total-DOS")
plt.grid(which="major", axis="x", linewidth=0.75, linestyle="-", color="0.75")
plt.grid(which="major", axis="y", linewidth=0.75, linestyle="-", color="0.75")
plt.title("Total Density of States")
plt.xlabel(r"$\mathit{E}-\mathit{E}_\mathrm{f} (eV)$")
plt.ylabel("States")
plt.legend()
plt.tight_layout()
plt.savefig("%s" % filename)
plt.close()
def get_elem_type(self, atmorb):
"""
get element name from atmorb
atmorb is the key in self.data
it's like this:
atm#1(Li)_wfc#2(s)
return value of the above input
will be:
'Li'
"""
return atmorb.split("(")[1].split(")")[0]
def get_orb_type(self, atmorb):
"""
get element name and orb from atmorb
atmorb is the key in self.data
it's like this:
atm#1(Li)_wfc#2(s)
return value of the above input
will be:
'2(s)'
"""
return atmorb.split("#")[2]
def get_atom_num(self, atmorb):
"""
get atom name from atmorb
atmorb is the key in self.data
it's like this:
atm#1(Li)_wfc#2(s)
return value of the above input
will be:
1
"""
return int(atmorb.split("(")[0].split("#")[1])
def markdown_report(self, md="pdos-report.md"):
"""
when writing Chinese to a file you must specify
encoding='utf-8' when open the file for writing
"""
with open(md, 'w', encoding="utf-8") as fout:
fout.write("# 投影态密度图\n")
fout.write("**指定能量范围数据图\n")
fout.write("\n")
fout.write("\n")
fout.write("\n")
fout.write("**所有可获取能量范围数据图**\n")
fout.write("\n")
fout.write("\n")
fout.write("\n")
def export(self, directory="tmp-qe-static", plotrange=[0, 1.0], atomtoproj=[], fontsize=10):
os.chdir(directory)
os.system("mkdir -p post-processing")
self.export_data(directory="post-processing")
self.plot_elem_orb_proj(plotrange=plotrange, filename="post-processing/pdos-specified-range.png", fontsize=fontsize)
self.plot_atom_orb_proj(plotrange=plotrange, atomtoproj=atomtoproj, filename="post-processing/pdos-atomproj-specified-range.png", fontsize=fontsize)
self.plot_tdos(plotrange=plotrange, filename="post-processing/tdos-specified-range.png")
# also plot the all data
self.plot_elem_orb_proj(plotrange=[0, 1.0], filename="post-processing/pdos-all-energy-available.png", fontsize=fontsize)
self.plot_atom_orb_proj(plotrange=[0, 1.0], atomtoproj=atomtoproj, filename="post-processing/pdos-atomproj-all-energy-available.png", fontsize=fontsize)
self.plot_tdos(plotrange=[0, 1.0], filename="post-processing/tdos-all-energy-available.png")
self.markdown_report(md="post-processing/pdos-report.md")
os.chdir("../")
#
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"os.path.exists",
"os.system",
"sys.exit",
"numpy.loadtxt",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ylabel",
"matplotlib.p... | [((1280, 1299), 'os.chdir', 'os.chdir', (['directory'], {}), '(directory)\n', (1288, 1299), False, 'import os\n'), ((1309, 1377), 'os.system', 'os.system', (['("ls | grep \'%s.pdos_\' > projwfc-pdos-file.data" % filpdos)'], {}), '("ls | grep \'%s.pdos_\' > projwfc-pdos-file.data" % filpdos)\n', (1318, 1377), False, 'import os\n'), ((2555, 2570), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (2563, 2570), False, 'import os\n'), ((14823, 14858), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': 'fontsize'}), '(labelsize=fontsize)\n', (14838, 14858), True, 'import matplotlib.pyplot as plt\n'), ((14870, 14948), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'axis': '"""x"""', 'linewidth': '(0.75)', 'linestyle': '"""-"""', 'color': '"""0.75"""'}), "(which='major', axis='x', linewidth=0.75, linestyle='-', color='0.75')\n", (14878, 14948), True, 'import matplotlib.pyplot as plt\n'), ((14958, 15036), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'axis': '"""y"""', 'linewidth': '(0.75)', 'linestyle': '"""-"""', 'color': '"""0.75"""'}), "(which='major', axis='y', linewidth=0.75, linestyle='-', color='0.75')\n", (14966, 15036), True, 'import matplotlib.pyplot as plt\n'), ((15046, 15086), 'matplotlib.pyplot.title', 'plt.title', (['"""Projected Density of States"""'], {}), "('Projected Density of States')\n", (15055, 15086), True, 'import matplotlib.pyplot as plt\n'), ((15096, 15168), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mathit{E}-\\\\mathit{E}_\\\\mathrm{f} \\\\mathrm{(eV)}$"""', 'font'], {}), "('$\\\\mathit{E}-\\\\mathit{E}_\\\\mathrm{f} \\\\mathrm{(eV)}$', font)\n", (15106, 15168), True, 'import matplotlib.pyplot as plt\n'), ((15175, 15201), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""States"""', 'font'], {}), "('States', font)\n", (15185, 15201), True, 'import matplotlib.pyplot as plt\n'), ((15211, 15232), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': 'font'}), '(prop=font)\n', (15221, 15232), True, 'import matplotlib.pyplot as plt\n'), ((15242, 15260), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15258, 15260), True, 'import matplotlib.pyplot as plt\n'), ((15270, 15298), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s' % filename)"], {}), "('%s' % filename)\n", (15281, 15298), True, 'import matplotlib.pyplot as plt\n'), ((15308, 15319), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15317, 15319), True, 'import matplotlib.pyplot as plt\n'), ((17178, 17213), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': 'fontsize'}), '(labelsize=fontsize)\n', (17193, 17213), True, 'import matplotlib.pyplot as plt\n'), ((17225, 17303), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'axis': '"""x"""', 'linewidth': '(0.75)', 'linestyle': '"""-"""', 'color': '"""0.75"""'}), "(which='major', axis='x', linewidth=0.75, linestyle='-', color='0.75')\n", (17233, 17303), True, 'import matplotlib.pyplot as plt\n'), ((17313, 17391), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'axis': '"""y"""', 'linewidth': '(0.75)', 'linestyle': '"""-"""', 'color': '"""0.75"""'}), "(which='major', axis='y', linewidth=0.75, linestyle='-', color='0.75')\n", (17321, 17391), True, 'import matplotlib.pyplot as plt\n'), ((17401, 17447), 'matplotlib.pyplot.title', 'plt.title', (['"""Projected(Atom) Density of States"""'], {}), "('Projected(Atom) Density of States')\n", (17410, 17447), True, 'import matplotlib.pyplot as plt\n'), ((17457, 17529), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mathit{E}-\\\\mathit{E}_\\\\mathrm{f} \\\\mathrm{(eV)}$"""', 'font'], {}), "('$\\\\mathit{E}-\\\\mathit{E}_\\\\mathrm{f} \\\\mathrm{(eV)}$', font)\n", (17467, 17529), True, 'import matplotlib.pyplot as plt\n'), ((17536, 17562), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""States"""', 'font'], {}), "('States', font)\n", (17546, 17562), True, 'import matplotlib.pyplot as plt\n'), ((17641, 17659), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17657, 17659), True, 'import matplotlib.pyplot as plt\n'), ((17669, 17697), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s' % filename)"], {}), "('%s' % filename)\n", (17680, 17697), True, 'import matplotlib.pyplot as plt\n'), ((17707, 17718), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (17716, 17718), True, 'import matplotlib.pyplot as plt\n'), ((18796, 18874), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'axis': '"""x"""', 'linewidth': '(0.75)', 'linestyle': '"""-"""', 'color': '"""0.75"""'}), "(which='major', axis='x', linewidth=0.75, linestyle='-', color='0.75')\n", (18804, 18874), True, 'import matplotlib.pyplot as plt\n'), ((18884, 18962), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'axis': '"""y"""', 'linewidth': '(0.75)', 'linestyle': '"""-"""', 'color': '"""0.75"""'}), "(which='major', axis='y', linewidth=0.75, linestyle='-', color='0.75')\n", (18892, 18962), True, 'import matplotlib.pyplot as plt\n'), ((18972, 19008), 'matplotlib.pyplot.title', 'plt.title', (['"""Total Density of States"""'], {}), "('Total Density of States')\n", (18981, 19008), True, 'import matplotlib.pyplot as plt\n'), ((19018, 19074), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mathit{E}-\\\\mathit{E}_\\\\mathrm{f} (eV)$"""'], {}), "('$\\\\mathit{E}-\\\\mathit{E}_\\\\mathrm{f} (eV)$')\n", (19028, 19074), True, 'import matplotlib.pyplot as plt\n'), ((19082, 19102), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""States"""'], {}), "('States')\n", (19092, 19102), True, 'import matplotlib.pyplot as plt\n'), ((19112, 19124), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (19122, 19124), True, 'import matplotlib.pyplot as plt\n'), ((19134, 19152), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19150, 19152), True, 'import matplotlib.pyplot as plt\n'), ((19162, 19190), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s' % filename)"], {}), "('%s' % filename)\n", (19173, 19190), True, 'import matplotlib.pyplot as plt\n'), ((19200, 19211), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19209, 19211), True, 'import matplotlib.pyplot as plt\n'), ((21139, 21158), 'os.chdir', 'os.chdir', (['directory'], {}), '(directory)\n', (21147, 21158), False, 'import os\n'), ((21168, 21205), 'os.system', 'os.system', (['"""mkdir -p post-processing"""'], {}), "('mkdir -p post-processing')\n", (21177, 21205), False, 'import os\n'), ((22155, 22170), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (22163, 22170), False, 'import os\n'), ((23316, 23335), 'os.chdir', 'os.chdir', (['directory'], {}), '(directory)\n', (23324, 23335), False, 'import os\n'), ((23345, 23413), 'os.system', 'os.system', (['("ls | grep \'%s.pdos_\' > projwfc-pdos-file.data" % filpdos)'], {}), '("ls | grep \'%s.pdos_\' > projwfc-pdos-file.data" % filpdos)\n', (23354, 23413), False, 'import os\n'), ((24047, 24062), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (24055, 24062), False, 'import os\n'), ((27025, 27103), 'matplotlib.pyplot.plot', 'plt.plot', (['self.energies[begin:end]', 'self.tdos[begin:end, 2]'], {'label': '"""Total-DOS"""'}), "(self.energies[begin:end], self.tdos[begin:end, 2], label='Total-DOS')\n", (27033, 27103), True, 'import matplotlib.pyplot as plt\n'), ((27173, 27208), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': 'fontsize'}), '(labelsize=fontsize)\n', (27188, 27208), True, 'import matplotlib.pyplot as plt\n'), ((27220, 27298), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'axis': '"""x"""', 'linewidth': '(0.75)', 'linestyle': '"""-"""', 'color': '"""0.75"""'}), "(which='major', axis='x', linewidth=0.75, linestyle='-', color='0.75')\n", (27228, 27298), True, 'import matplotlib.pyplot as plt\n'), ((27308, 27386), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'axis': '"""y"""', 'linewidth': '(0.75)', 'linestyle': '"""-"""', 'color': '"""0.75"""'}), "(which='major', axis='y', linewidth=0.75, linestyle='-', color='0.75')\n", (27316, 27386), True, 'import matplotlib.pyplot as plt\n'), ((27396, 27436), 'matplotlib.pyplot.title', 'plt.title', (['"""Projected Density of States"""'], {}), "('Projected Density of States')\n", (27405, 27436), True, 'import matplotlib.pyplot as plt\n'), ((27446, 27518), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mathit{E}-\\\\mathit{E}_\\\\mathrm{f} \\\\mathrm{(eV)}$"""', 'font'], {}), "('$\\\\mathit{E}-\\\\mathit{E}_\\\\mathrm{f} \\\\mathrm{(eV)}$', font)\n", (27456, 27518), True, 'import matplotlib.pyplot as plt\n'), ((27525, 27551), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""States"""', 'font'], {}), "('States', font)\n", (27535, 27551), True, 'import matplotlib.pyplot as plt\n'), ((27561, 27582), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': 'font'}), '(prop=font)\n', (27571, 27582), True, 'import matplotlib.pyplot as plt\n'), ((27592, 27610), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (27608, 27610), True, 'import matplotlib.pyplot as plt\n'), ((27620, 27648), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s' % filename)"], {}), "('%s' % filename)\n", (27631, 27648), True, 'import matplotlib.pyplot as plt\n'), ((27658, 27669), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (27667, 27669), True, 'import matplotlib.pyplot as plt\n'), ((28851, 28929), 'matplotlib.pyplot.plot', 'plt.plot', (['self.energies[begin:end]', 'self.tdos[begin:end, 2]'], {'label': '"""Total-DOS"""'}), "(self.energies[begin:end], self.tdos[begin:end, 2], label='Total-DOS')\n", (28859, 28929), True, 'import matplotlib.pyplot as plt\n'), ((29010, 29045), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': 'fontsize'}), '(labelsize=fontsize)\n', (29025, 29045), True, 'import matplotlib.pyplot as plt\n'), ((29057, 29135), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'axis': '"""x"""', 'linewidth': '(0.75)', 'linestyle': '"""-"""', 'color': '"""0.75"""'}), "(which='major', axis='x', linewidth=0.75, linestyle='-', color='0.75')\n", (29065, 29135), True, 'import matplotlib.pyplot as plt\n'), ((29145, 29223), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'axis': '"""y"""', 'linewidth': '(0.75)', 'linestyle': '"""-"""', 'color': '"""0.75"""'}), "(which='major', axis='y', linewidth=0.75, linestyle='-', color='0.75')\n", (29153, 29223), True, 'import matplotlib.pyplot as plt\n'), ((29233, 29279), 'matplotlib.pyplot.title', 'plt.title', (['"""Projected(Atom) Density of States"""'], {}), "('Projected(Atom) Density of States')\n", (29242, 29279), True, 'import matplotlib.pyplot as plt\n'), ((29289, 29361), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mathit{E}-\\\\mathit{E}_\\\\mathrm{f} \\\\mathrm{(eV)}$"""', 'font'], {}), "('$\\\\mathit{E}-\\\\mathit{E}_\\\\mathrm{f} \\\\mathrm{(eV)}$', font)\n", (29299, 29361), True, 'import matplotlib.pyplot as plt\n'), ((29368, 29394), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""States"""', 'font'], {}), "('States', font)\n", (29378, 29394), True, 'import matplotlib.pyplot as plt\n'), ((29404, 29425), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': 'font'}), '(prop=font)\n', (29414, 29425), True, 'import matplotlib.pyplot as plt\n'), ((29435, 29453), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (29451, 29453), True, 'import matplotlib.pyplot as plt\n'), ((29463, 29491), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s' % filename)"], {}), "('%s' % filename)\n", (29474, 29491), True, 'import matplotlib.pyplot as plt\n'), ((29501, 29512), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (29510, 29512), True, 'import matplotlib.pyplot as plt\n'), ((30222, 30300), 'matplotlib.pyplot.plot', 'plt.plot', (['self.energies[begin:end]', 'self.tdos[begin:end, 2]'], {'label': '"""Total-DOS"""'}), "(self.energies[begin:end], self.tdos[begin:end, 2], label='Total-DOS')\n", (30230, 30300), True, 'import matplotlib.pyplot as plt\n'), ((30312, 30390), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'axis': '"""x"""', 'linewidth': '(0.75)', 'linestyle': '"""-"""', 'color': '"""0.75"""'}), "(which='major', axis='x', linewidth=0.75, linestyle='-', color='0.75')\n", (30320, 30390), True, 'import matplotlib.pyplot as plt\n'), ((30400, 30478), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'axis': '"""y"""', 'linewidth': '(0.75)', 'linestyle': '"""-"""', 'color': '"""0.75"""'}), "(which='major', axis='y', linewidth=0.75, linestyle='-', color='0.75')\n", (30408, 30478), True, 'import matplotlib.pyplot as plt\n'), ((30488, 30524), 'matplotlib.pyplot.title', 'plt.title', (['"""Total Density of States"""'], {}), "('Total Density of States')\n", (30497, 30524), True, 'import matplotlib.pyplot as plt\n'), ((30534, 30590), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mathit{E}-\\\\mathit{E}_\\\\mathrm{f} (eV)$"""'], {}), "('$\\\\mathit{E}-\\\\mathit{E}_\\\\mathrm{f} (eV)$')\n", (30544, 30590), True, 'import matplotlib.pyplot as plt\n'), ((30598, 30618), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""States"""'], {}), "('States')\n", (30608, 30618), True, 'import matplotlib.pyplot as plt\n'), ((30628, 30640), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (30638, 30640), True, 'import matplotlib.pyplot as plt\n'), ((30650, 30668), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (30666, 30668), True, 'import matplotlib.pyplot as plt\n'), ((30678, 30706), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s' % filename)"], {}), "('%s' % filename)\n", (30689, 30706), True, 'import matplotlib.pyplot as plt\n'), ((30716, 30727), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (30725, 30727), True, 'import matplotlib.pyplot as plt\n'), ((32655, 32674), 'os.chdir', 'os.chdir', (['directory'], {}), '(directory)\n', (32663, 32674), False, 'import os\n'), ((32684, 32721), 'os.system', 'os.system', (['"""mkdir -p post-processing"""'], {}), "('mkdir -p post-processing')\n", (32693, 32721), False, 'import os\n'), ((33663, 33678), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (33671, 33678), False, 'import os\n'), ((901, 926), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (915, 926), False, 'import os\n'), ((1257, 1268), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1265, 1268), False, 'import sys\n'), ((14186, 14253), 'matplotlib.pyplot.plot', 'plt.plot', (['self.energies[begin:end]', 'data[key][begin:end]'], {'label': 'key'}), '(self.energies[begin:end], data[key][begin:end], label=key)\n', (14194, 14253), True, 'import matplotlib.pyplot as plt\n'), ((14399, 14492), 'matplotlib.pyplot.plot', 'plt.plot', (['self.energies[begin:end]', "self.data_0_tdos['dos'][begin:end]"], {'label': '"""Total-DOS"""'}), "(self.energies[begin:end], self.data_0_tdos['dos'][begin:end],\n label='Total-DOS')\n", (14407, 14492), True, 'import matplotlib.pyplot as plt\n'), ((17610, 17631), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': 'font'}), '(prop=font)\n', (17620, 17631), True, 'import matplotlib.pyplot as plt\n'), ((18430, 18523), 'matplotlib.pyplot.plot', 'plt.plot', (['self.energies[begin:end]', "self.data_0_tdos['dos'][begin:end]"], {'label': '"""Total-DOS"""'}), "(self.energies[begin:end], self.data_0_tdos['dos'][begin:end],\n label='Total-DOS')\n", (18438, 18523), True, 'import matplotlib.pyplot as plt\n'), ((22937, 22962), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (22951, 22962), False, 'import os\n'), ((23293, 23304), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (23301, 23304), False, 'import sys\n'), ((24252, 24284), 'os.path.join', 'os.path.join', (['directory', 'nscfout'], {}), '(directory, nscfout)\n', (24264, 24284), False, 'import os\n'), ((26882, 26949), 'matplotlib.pyplot.plot', 'plt.plot', (['self.energies[begin:end]', 'data[key][begin:end]'], {'label': 'key'}), '(self.energies[begin:end], data[key][begin:end], label=key)\n', (26890, 26949), True, 'import matplotlib.pyplot as plt\n'), ((8960, 9031), 'os.path.join', 'os.path.join', (['directory', '"""pdos-projected-to-element-and-orbital-l.data"""'], {}), "(directory, 'pdos-projected-to-element-and-orbital-l.data')\n", (8972, 9031), False, 'import os\n'), ((14568, 14657), 'matplotlib.pyplot.plot', 'plt.plot', (['self.energies[begin:end]', "self.data_1_tdos['dos_up']"], {'label': '"""Total-DOS-Up"""'}), "(self.energies[begin:end], self.data_1_tdos['dos_up'], label=\n 'Total-DOS-Up')\n", (14576, 14657), True, 'import matplotlib.pyplot as plt\n'), ((14666, 14758), 'matplotlib.pyplot.plot', 'plt.plot', (['self.energies[begin:end]', "(-self.data_1_tdos['dos_dw'])"], {'label': '"""Total-DOS-Down"""'}), "(self.energies[begin:end], -self.data_1_tdos['dos_dw'], label=\n 'Total-DOS-Down')\n", (14674, 14758), True, 'import matplotlib.pyplot as plt\n'), ((18599, 18688), 'matplotlib.pyplot.plot', 'plt.plot', (['self.energies[begin:end]', "self.data_1_tdos['dos_up']"], {'label': '"""Total-DOS-Up"""'}), "(self.energies[begin:end], self.data_1_tdos['dos_up'], label=\n 'Total-DOS-Up')\n", (18607, 18688), True, 'import matplotlib.pyplot as plt\n'), ((18697, 18789), 'matplotlib.pyplot.plot', 'plt.plot', (['self.energies[begin:end]', "(-self.data_1_tdos['dos_dw'])"], {'label': '"""Total-DOS-Down"""'}), "(self.energies[begin:end], -self.data_1_tdos['dos_dw'], label=\n 'Total-DOS-Down')\n", (18705, 18789), True, 'import matplotlib.pyplot as plt\n'), ((24640, 24671), 'os.path.join', 'os.path.join', (['directory', 'scfout'], {}), '(directory, scfout)\n', (24652, 24671), False, 'import os\n'), ((25374, 25385), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (25382, 25385), False, 'import sys\n'), ((1988, 2001), 'numpy.loadtxt', 'np.loadtxt', (['f'], {}), '(f)\n', (1998, 2001), True, 'import numpy as np\n'), ((2794, 2825), 'os.path.join', 'os.path.join', (['directory', 'scfout'], {}), '(directory, scfout)\n', (2806, 2825), False, 'import os\n'), ((9651, 9719), 'os.path.join', 'os.path.join', (['directory', '"""pdos-projected-to-atom-and-orbital-l.data"""'], {}), "(directory, 'pdos-projected-to-atom-and-orbital-l.data')\n", (9663, 9719), False, 'import os\n'), ((11454, 11495), 'os.path.join', 'os.path.join', (['directory', '"""total-dos.data"""'], {}), "(directory, 'total-dos.data')\n", (11466, 11495), False, 'import os\n'), ((24024, 24037), 'numpy.loadtxt', 'np.loadtxt', (['f'], {}), '(f)\n', (24034, 24037), True, 'import numpy as np\n'), ((24310, 24342), 'os.path.join', 'os.path.join', (['directory', 'nscfout'], {}), '(directory, nscfout)\n', (24322, 24342), False, 'import os\n'), ((1656, 1669), 'numpy.loadtxt', 'np.loadtxt', (['f'], {}), '(f)\n', (1666, 1669), True, 'import numpy as np\n'), ((3151, 3183), 'os.path.join', 'os.path.join', (['directory', 'nscfout'], {}), '(directory, nscfout)\n', (3163, 3183), False, 'import os\n'), ((10451, 10519), 'os.path.join', 'os.path.join', (['directory', '"""pdos-projected-to-atom-and-orbital-l.data"""'], {}), "(directory, 'pdos-projected-to-atom-and-orbital-l.data')\n", (10463, 10519), False, 'import os\n'), ((11892, 11933), 'os.path.join', 'os.path.join', (['directory', '"""total-dos.data"""'], {}), "(directory, 'total-dos.data')\n", (11904, 11933), False, 'import os\n'), ((23692, 23705), 'numpy.loadtxt', 'np.loadtxt', (['f'], {}), '(f)\n', (23702, 23705), True, 'import numpy as np\n'), ((24697, 24728), 'os.path.join', 'os.path.join', (['directory', 'scfout'], {}), '(directory, scfout)\n', (24709, 24728), False, 'import os\n')] |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for parameter resolvers."""
import fractions
import numpy as np
import pytest
import sympy
import cirq
@pytest.mark.parametrize(
'val',
[
3.2,
np.float32(3.2),
int(1),
np.int32(45),
np.float64(6.3),
np.int32(2),
np.complex64(1j),
np.complex128(2j),
complex(1j),
fractions.Fraction(3, 2),
],
)
def test_value_of_pass_through_types(val):
_assert_consistent_resolution(val, val)
@pytest.mark.parametrize(
'val,resolved',
[(sympy.pi, np.pi), (sympy.S.NegativeOne, -1), (sympy.S.Half, 0.5), (sympy.S.One, 1)],
)
def test_value_of_transformed_types(val, resolved):
_assert_consistent_resolution(val, resolved)
@pytest.mark.parametrize('val,resolved', [(sympy.I, 1j)])
def test_value_of_substituted_types(val, resolved):
_assert_consistent_resolution(val, resolved, True)
def _assert_consistent_resolution(v, resolved, subs_called=False):
"""Asserts that parameter resolution works consistently.
The ParamResolver.value_of method can resolve any Sympy expression -
subclasses of sympy.Basic. In the generic case, it calls `sympy.Basic.subs`
to substitute symbols with values specified in a dict, which is known to be
very slow. Instead value_of defines a pass-through shortcut for known
numeric types. For a given value `v` it is asserted that value_of resolves
it to `resolved`, with the exact type of `resolved`.`subs_called` indicates
whether it is expected to have `subs` called or not during the resolution.
Args:
v: the value to resolve
resolved: the expected resolution result
subs_called: if True, it is expected that the slow subs method is called
Raises:
AssertionError in case resolution assertion fail.
"""
class SubsAwareSymbol(sympy.Symbol):
"""A Symbol that registers a call to its `subs` method."""
def __init__(self, sym: str):
self.called = False
self.symbol = sympy.Symbol(sym)
# note: super().subs() doesn't resolve based on the param_dict properly
# for some reason, that's why a delegate (self.symbol) is used instead
def subs(self, *args, **kwargs):
self.called = True
return self.symbol.subs(*args, **kwargs)
r = cirq.ParamResolver({'a': v})
# symbol based resolution
s = SubsAwareSymbol('a')
assert r.value_of(s) == resolved, f"expected {resolved}, got {r.value_of(s)}"
assert (
subs_called == s.called
), f"For pass-through type {type(v)} sympy.subs shouldn't have been called."
assert isinstance(
r.value_of(s), type(resolved)
), f"expected {type(resolved)} got {type(r.value_of(s))}"
# string based resolution (which in turn uses symbol based resolution)
assert r.value_of('a') == resolved, f"expected {resolved}, got {r.value_of('a')}"
assert isinstance(
r.value_of('a'), type(resolved)
), f"expected {type(resolved)} got {type(r.value_of('a'))}"
# value based resolution
assert r.value_of(v) == resolved, f"expected {resolved}, got {r.value_of(v)}"
assert isinstance(
r.value_of(v), type(resolved)
), f"expected {type(resolved)} got {type(r.value_of(v))}"
def test_value_of_strings():
assert cirq.ParamResolver().value_of('x') == sympy.Symbol('x')
def test_value_of_calculations():
assert not bool(cirq.ParamResolver())
r = cirq.ParamResolver({'a': 0.5, 'b': 0.1, 'c': 1 + 1j})
assert bool(r)
assert r.value_of(2 * sympy.pi) == 2 * np.pi
assert r.value_of(4 ** sympy.Symbol('a') + sympy.Symbol('b') * 10) == 3
assert r.value_of(sympy.I * sympy.pi) == np.pi * 1j
assert r.value_of(sympy.Symbol('a') * 3) == 1.5
assert r.value_of(sympy.Symbol('b') / 0.1 - sympy.Symbol('a')) == 0.5
def test_param_dict():
r = cirq.ParamResolver({'a': 0.5, 'b': 0.1})
r2 = cirq.ParamResolver(r)
assert r2 is r
assert r.param_dict == {'a': 0.5, 'b': 0.1}
def test_param_dict_iter():
r = cirq.ParamResolver({'a': 0.5, 'b': 0.1})
assert [key for key in r] == ['a', 'b']
assert [r.value_of(key) for key in r] == [0.5, 0.1]
assert list(r) == ['a', 'b']
# TODO(#3388) Add summary line to docstring.
# pylint: disable=docstring-first-line-empty
def test_formulas_in_param_dict():
"""
Param dicts are allowed to have str or sympy.Symbol as keys and
floats or sympy.Symbol as values. This should not be a common use case,
but this tests makes sure something reasonable is returned when
mixing these types and using formulas in ParamResolvers.
Note that sympy orders expressions for deterministic resolution, so
depending on the operands sent to sub(), the expression may not fully
resolve if it needs to take several iterations of resolution.
"""
a = sympy.Symbol('a')
b = sympy.Symbol('b')
c = sympy.Symbol('c')
e = sympy.Symbol('e')
r = cirq.ParamResolver({a: b + 1, b: 2, b + c: 101, 'd': 2 * e})
assert sympy.Eq(r.value_of('a'), 3)
assert sympy.Eq(r.value_of('b'), 2)
assert sympy.Eq(r.value_of(b + c), 101)
assert sympy.Eq(r.value_of('c'), c)
assert sympy.Eq(r.value_of('d'), 2 * e)
# pylint: enable=docstring-first-line-empty
def test_recursive_evaluation():
a = sympy.Symbol('a')
b = sympy.Symbol('b')
c = sympy.Symbol('c')
d = sympy.Symbol('d')
e = sympy.Symbol('e')
r = cirq.ParamResolver(
{
a: a,
b: e + 2,
c: b + d,
d: a + 3,
e: 0,
}
)
# sympy.Basic.subs evaluates in alphabetical order.
assert c.subs(r.param_dict) == b + a + 3
assert r.value_of(a) == a
assert sympy.Eq(r.value_of(b), 2)
assert sympy.Eq(r.value_of(c), a + 5)
assert sympy.Eq(r.value_of(d), a + 3)
assert sympy.Eq(r.value_of(e), 0)
def test_unbound_recursion_halted():
a = sympy.Symbol('a')
b = sympy.Symbol('b')
c = sympy.Symbol('c')
# Non-recursive resolution ignores loops
r = cirq.ParamResolver({a: b, b: a})
assert r.value_of(a, recursive=False) == b
assert r.value_of(r.value_of(a, recursive=False), recursive=False) == a
# Self-definition is OK (this is a terminal symbol)
r = cirq.ParamResolver({a: a})
assert r.value_of(a) == a
r = cirq.ParamResolver({a: a + 1})
with pytest.raises(RecursionError):
_ = r.value_of(a)
r = cirq.ParamResolver({a: b, b: a})
with pytest.raises(RecursionError):
_ = r.value_of(a)
r = cirq.ParamResolver({a: b, b: c, c: b})
with pytest.raises(RecursionError):
_ = r.value_of(a)
r = cirq.ParamResolver({a: b + c, b: 1, c: a})
with pytest.raises(RecursionError):
_ = r.value_of(a)
def test_resolve_unknown_type():
a = sympy.Symbol('a')
b = sympy.Symbol('b')
r = cirq.ParamResolver({a: b})
assert r.value_of(cirq.X) == cirq.X
def test_custom_resolved_value():
class Foo:
def _resolved_value_(self):
return self
class Bar:
def _resolved_value_(self):
return NotImplemented
class Baz:
def _resolved_value_(self):
return 'Baz'
foo = Foo()
bar = Bar()
baz = Baz()
a = sympy.Symbol('a')
b = sympy.Symbol('b')
c = sympy.Symbol('c')
r = cirq.ParamResolver({a: foo, b: bar, c: baz})
assert r.value_of(a) is foo
assert r.value_of(b) is b
assert r.value_of(c) == 'Baz'
# TODO(#3388) Add summary line to docstring.
# pylint: disable=docstring-first-line-empty
def test_compose():
"""
Calling cirq.resolve_paramters on a ParamResolver composes that resolver
with the provided resolver.
"""
a = sympy.Symbol('a')
b = sympy.Symbol('b')
c = sympy.Symbol('c')
d = sympy.Symbol('d')
r1 = cirq.ParamResolver({a: b})
r2 = cirq.ParamResolver({b: c + d})
r3 = cirq.ParamResolver({c: 12})
r12 = cirq.resolve_parameters(r1, r2)
assert r12.value_of('a') == c + d
r23 = cirq.resolve_parameters(r2, r3)
assert sympy.Eq(r23.value_of('b'), 12 + d)
r123 = cirq.resolve_parameters(r12, r3)
assert sympy.Eq(r123.value_of('a'), 12 + d)
r13 = cirq.resolve_parameters(r1, r3)
assert r13.value_of('a') == b
# pylint: enable=docstring-first-line-empty
@pytest.mark.parametrize(
'p1, p2, p3',
[
({'a': 1}, {}, {}),
({}, {'a': 1}, {}),
({}, {}, {'a': 1}),
({'a': 'b'}, {}, {'b': 'c'}),
({'a': 'b'}, {'c': 'd'}, {'b': 'c'}),
({'a': 'b'}, {'c': 'a'}, {'b': 'd'}),
({'a': 'b'}, {'c': 'd', 'd': 1}, {'d': 2}),
({'a': 'b'}, {'c': 'd', 'd': 'a'}, {'b': 2}),
],
)
@pytest.mark.parametrize('resolve_fn', [cirq.resolve_parameters, cirq.resolve_parameters_once])
def test_compose_associative(p1, p2, p3, resolve_fn):
r1, r2, r3 = [
cirq.ParamResolver(
{sympy.Symbol(k): (sympy.Symbol(v) if isinstance(v, str) else v) for k, v in pd.items()}
)
for pd in [p1, p2, p3]
]
assert sympy.Eq(
resolve_fn(r1, resolve_fn(r2, r3)).param_dict, resolve_fn(resolve_fn(r1, r2), r3).param_dict
)
def test_equals():
et = cirq.testing.EqualsTester()
et.add_equality_group(
cirq.ParamResolver(),
cirq.ParamResolver(None),
cirq.ParamResolver({}),
cirq.ParamResolver(cirq.ParamResolver({})),
)
et.make_equality_group(lambda: cirq.ParamResolver({'a': 0.0}))
et.add_equality_group(cirq.ParamResolver({'a': 0.1}))
et.add_equality_group(cirq.ParamResolver({'a': 0.0, 'b': 0.1}))
et.add_equality_group(cirq.ParamResolver({'a': 0.3, 'b': 0.1}))
et.add_equality_group(cirq.ParamResolver({'b': 0.1}))
et.add_equality_group(cirq.ParamResolver({'c': 0.1}))
def test_repr():
cirq.testing.assert_equivalent_repr(cirq.ParamResolver())
cirq.testing.assert_equivalent_repr(cirq.ParamResolver({'a': 2.0}))
cirq.testing.assert_equivalent_repr(cirq.ParamResolver({'a': sympy.Symbol('a')}))
cirq.testing.assert_equivalent_repr(
cirq.ParamResolver({sympy.Symbol('a'): sympy.Symbol('b') + 1})
)
| [
"cirq.resolve_parameters",
"sympy.Symbol",
"cirq.testing.EqualsTester",
"numpy.complex128",
"numpy.float32",
"cirq.ParamResolver",
"pytest.raises",
"numpy.int32",
"numpy.complex64",
"pytest.mark.parametrize",
"numpy.float64",
"fractions.Fraction"
] | [((1073, 1204), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val,resolved"""', '[(sympy.pi, np.pi), (sympy.S.NegativeOne, -1), (sympy.S.Half, 0.5), (sympy.\n S.One, 1)]'], {}), "('val,resolved', [(sympy.pi, np.pi), (sympy.S.\n NegativeOne, -1), (sympy.S.Half, 0.5), (sympy.S.One, 1)])\n", (1096, 1204), False, 'import pytest\n'), ((1315, 1373), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val,resolved"""', '[(sympy.I, 1.0j)]'], {}), "('val,resolved', [(sympy.I, 1.0j)])\n", (1338, 1373), False, 'import pytest\n'), ((8937, 9251), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""p1, p2, p3"""', "[({'a': 1}, {}, {}), ({}, {'a': 1}, {}), ({}, {}, {'a': 1}), ({'a': 'b'}, {\n }, {'b': 'c'}), ({'a': 'b'}, {'c': 'd'}, {'b': 'c'}), ({'a': 'b'}, {'c':\n 'a'}, {'b': 'd'}), ({'a': 'b'}, {'c': 'd', 'd': 1}, {'d': 2}), ({'a':\n 'b'}, {'c': 'd', 'd': 'a'}, {'b': 2})]"], {}), "('p1, p2, p3', [({'a': 1}, {}, {}), ({}, {'a': 1}, {\n }), ({}, {}, {'a': 1}), ({'a': 'b'}, {}, {'b': 'c'}), ({'a': 'b'}, {'c':\n 'd'}, {'b': 'c'}), ({'a': 'b'}, {'c': 'a'}, {'b': 'd'}), ({'a': 'b'}, {\n 'c': 'd', 'd': 1}, {'d': 2}), ({'a': 'b'}, {'c': 'd', 'd': 'a'}, {'b': 2})]\n )\n", (8960, 9251), False, 'import pytest\n'), ((9316, 9415), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""resolve_fn"""', '[cirq.resolve_parameters, cirq.resolve_parameters_once]'], {}), "('resolve_fn', [cirq.resolve_parameters, cirq.\n resolve_parameters_once])\n", (9339, 9415), False, 'import pytest\n'), ((2924, 2952), 'cirq.ParamResolver', 'cirq.ParamResolver', (["{'a': v}"], {}), "({'a': v})\n", (2942, 2952), False, 'import cirq\n'), ((4053, 4108), 'cirq.ParamResolver', 'cirq.ParamResolver', (["{'a': 0.5, 'b': 0.1, 'c': 1 + 1.0j}"], {}), "({'a': 0.5, 'b': 0.1, 'c': 1 + 1.0j})\n", (4071, 4108), False, 'import cirq\n'), ((4467, 4507), 'cirq.ParamResolver', 'cirq.ParamResolver', (["{'a': 0.5, 'b': 0.1}"], {}), "({'a': 0.5, 'b': 0.1})\n", (4485, 4507), False, 'import cirq\n'), ((4517, 4538), 'cirq.ParamResolver', 'cirq.ParamResolver', (['r'], {}), '(r)\n', (4535, 4538), False, 'import cirq\n'), ((4644, 4684), 'cirq.ParamResolver', 'cirq.ParamResolver', (["{'a': 0.5, 'b': 0.1}"], {}), "({'a': 0.5, 'b': 0.1})\n", (4662, 4684), False, 'import cirq\n'), ((5456, 5473), 'sympy.Symbol', 'sympy.Symbol', (['"""a"""'], {}), "('a')\n", (5468, 5473), False, 'import sympy\n'), ((5482, 5499), 'sympy.Symbol', 'sympy.Symbol', (['"""b"""'], {}), "('b')\n", (5494, 5499), False, 'import sympy\n'), ((5508, 5525), 'sympy.Symbol', 'sympy.Symbol', (['"""c"""'], {}), "('c')\n", (5520, 5525), False, 'import sympy\n'), ((5534, 5551), 'sympy.Symbol', 'sympy.Symbol', (['"""e"""'], {}), "('e')\n", (5546, 5551), False, 'import sympy\n'), ((5560, 5622), 'cirq.ParamResolver', 'cirq.ParamResolver', (["{a: b + 1, b: 2, (b + c): 101, 'd': 2 * e}"], {}), "({a: b + 1, b: 2, (b + c): 101, 'd': 2 * e})\n", (5578, 5622), False, 'import cirq\n'), ((5916, 5933), 'sympy.Symbol', 'sympy.Symbol', (['"""a"""'], {}), "('a')\n", (5928, 5933), False, 'import sympy\n'), ((5942, 5959), 'sympy.Symbol', 'sympy.Symbol', (['"""b"""'], {}), "('b')\n", (5954, 5959), False, 'import sympy\n'), ((5968, 5985), 'sympy.Symbol', 'sympy.Symbol', (['"""c"""'], {}), "('c')\n", (5980, 5985), False, 'import sympy\n'), ((5994, 6011), 'sympy.Symbol', 'sympy.Symbol', (['"""d"""'], {}), "('d')\n", (6006, 6011), False, 'import sympy\n'), ((6020, 6037), 'sympy.Symbol', 'sympy.Symbol', (['"""e"""'], {}), "('e')\n", (6032, 6037), False, 'import sympy\n'), ((6046, 6108), 'cirq.ParamResolver', 'cirq.ParamResolver', (['{a: a, b: e + 2, c: b + d, d: a + 3, e: 0}'], {}), '({a: a, b: e + 2, c: b + d, d: a + 3, e: 0})\n', (6064, 6108), False, 'import cirq\n'), ((6534, 6551), 'sympy.Symbol', 'sympy.Symbol', (['"""a"""'], {}), "('a')\n", (6546, 6551), False, 'import sympy\n'), ((6560, 6577), 'sympy.Symbol', 'sympy.Symbol', (['"""b"""'], {}), "('b')\n", (6572, 6577), False, 'import sympy\n'), ((6586, 6603), 'sympy.Symbol', 'sympy.Symbol', (['"""c"""'], {}), "('c')\n", (6598, 6603), False, 'import sympy\n'), ((6658, 6690), 'cirq.ParamResolver', 'cirq.ParamResolver', (['{a: b, b: a}'], {}), '({a: b, b: a})\n', (6676, 6690), False, 'import cirq\n'), ((6879, 6905), 'cirq.ParamResolver', 'cirq.ParamResolver', (['{a: a}'], {}), '({a: a})\n', (6897, 6905), False, 'import cirq\n'), ((6945, 6975), 'cirq.ParamResolver', 'cirq.ParamResolver', (['{a: a + 1}'], {}), '({a: a + 1})\n', (6963, 6975), False, 'import cirq\n'), ((7051, 7083), 'cirq.ParamResolver', 'cirq.ParamResolver', (['{a: b, b: a}'], {}), '({a: b, b: a})\n', (7069, 7083), False, 'import cirq\n'), ((7159, 7197), 'cirq.ParamResolver', 'cirq.ParamResolver', (['{a: b, b: c, c: b}'], {}), '({a: b, b: c, c: b})\n', (7177, 7197), False, 'import cirq\n'), ((7273, 7315), 'cirq.ParamResolver', 'cirq.ParamResolver', (['{a: b + c, b: 1, c: a}'], {}), '({a: b + c, b: 1, c: a})\n', (7291, 7315), False, 'import cirq\n'), ((7425, 7442), 'sympy.Symbol', 'sympy.Symbol', (['"""a"""'], {}), "('a')\n", (7437, 7442), False, 'import sympy\n'), ((7451, 7468), 'sympy.Symbol', 'sympy.Symbol', (['"""b"""'], {}), "('b')\n", (7463, 7468), False, 'import sympy\n'), ((7477, 7503), 'cirq.ParamResolver', 'cirq.ParamResolver', (['{a: b}'], {}), '({a: b})\n', (7495, 7503), False, 'import cirq\n'), ((7876, 7893), 'sympy.Symbol', 'sympy.Symbol', (['"""a"""'], {}), "('a')\n", (7888, 7893), False, 'import sympy\n'), ((7902, 7919), 'sympy.Symbol', 'sympy.Symbol', (['"""b"""'], {}), "('b')\n", (7914, 7919), False, 'import sympy\n'), ((7928, 7945), 'sympy.Symbol', 'sympy.Symbol', (['"""c"""'], {}), "('c')\n", (7940, 7945), False, 'import sympy\n'), ((7954, 7998), 'cirq.ParamResolver', 'cirq.ParamResolver', (['{a: foo, b: bar, c: baz}'], {}), '({a: foo, b: bar, c: baz})\n', (7972, 7998), False, 'import cirq\n'), ((8340, 8357), 'sympy.Symbol', 'sympy.Symbol', (['"""a"""'], {}), "('a')\n", (8352, 8357), False, 'import sympy\n'), ((8366, 8383), 'sympy.Symbol', 'sympy.Symbol', (['"""b"""'], {}), "('b')\n", (8378, 8383), False, 'import sympy\n'), ((8392, 8409), 'sympy.Symbol', 'sympy.Symbol', (['"""c"""'], {}), "('c')\n", (8404, 8409), False, 'import sympy\n'), ((8418, 8435), 'sympy.Symbol', 'sympy.Symbol', (['"""d"""'], {}), "('d')\n", (8430, 8435), False, 'import sympy\n'), ((8445, 8471), 'cirq.ParamResolver', 'cirq.ParamResolver', (['{a: b}'], {}), '({a: b})\n', (8463, 8471), False, 'import cirq\n'), ((8481, 8511), 'cirq.ParamResolver', 'cirq.ParamResolver', (['{b: c + d}'], {}), '({b: c + d})\n', (8499, 8511), False, 'import cirq\n'), ((8521, 8548), 'cirq.ParamResolver', 'cirq.ParamResolver', (['{c: 12}'], {}), '({c: 12})\n', (8539, 8548), False, 'import cirq\n'), ((8560, 8591), 'cirq.resolve_parameters', 'cirq.resolve_parameters', (['r1', 'r2'], {}), '(r1, r2)\n', (8583, 8591), False, 'import cirq\n'), ((8641, 8672), 'cirq.resolve_parameters', 'cirq.resolve_parameters', (['r2', 'r3'], {}), '(r2, r3)\n', (8664, 8672), False, 'import cirq\n'), ((8732, 8764), 'cirq.resolve_parameters', 'cirq.resolve_parameters', (['r12', 'r3'], {}), '(r12, r3)\n', (8755, 8764), False, 'import cirq\n'), ((8824, 8855), 'cirq.resolve_parameters', 'cirq.resolve_parameters', (['r1', 'r3'], {}), '(r1, r3)\n', (8847, 8855), False, 'import cirq\n'), ((9818, 9845), 'cirq.testing.EqualsTester', 'cirq.testing.EqualsTester', ([], {}), '()\n', (9843, 9845), False, 'import cirq\n'), ((765, 780), 'numpy.float32', 'np.float32', (['(3.2)'], {}), '(3.2)\n', (775, 780), True, 'import numpy as np\n'), ((806, 818), 'numpy.int32', 'np.int32', (['(45)'], {}), '(45)\n', (814, 818), True, 'import numpy as np\n'), ((828, 843), 'numpy.float64', 'np.float64', (['(6.3)'], {}), '(6.3)\n', (838, 843), True, 'import numpy as np\n'), ((853, 864), 'numpy.int32', 'np.int32', (['(2)'], {}), '(2)\n', (861, 864), True, 'import numpy as np\n'), ((874, 892), 'numpy.complex64', 'np.complex64', (['(1.0j)'], {}), '(1.0j)\n', (886, 892), True, 'import numpy as np\n'), ((900, 919), 'numpy.complex128', 'np.complex128', (['(2.0j)'], {}), '(2.0j)\n', (913, 919), True, 'import numpy as np\n'), ((948, 972), 'fractions.Fraction', 'fractions.Fraction', (['(3)', '(2)'], {}), '(3, 2)\n', (966, 972), False, 'import fractions\n'), ((3948, 3965), 'sympy.Symbol', 'sympy.Symbol', (['"""x"""'], {}), "('x')\n", (3960, 3965), False, 'import sympy\n'), ((6985, 7014), 'pytest.raises', 'pytest.raises', (['RecursionError'], {}), '(RecursionError)\n', (6998, 7014), False, 'import pytest\n'), ((7093, 7122), 'pytest.raises', 'pytest.raises', (['RecursionError'], {}), '(RecursionError)\n', (7106, 7122), False, 'import pytest\n'), ((7207, 7236), 'pytest.raises', 'pytest.raises', (['RecursionError'], {}), '(RecursionError)\n', (7220, 7236), False, 'import pytest\n'), ((7325, 7354), 'pytest.raises', 'pytest.raises', (['RecursionError'], {}), '(RecursionError)\n', (7338, 7354), False, 'import pytest\n'), ((9881, 9901), 'cirq.ParamResolver', 'cirq.ParamResolver', ([], {}), '()\n', (9899, 9901), False, 'import cirq\n'), ((9911, 9935), 'cirq.ParamResolver', 'cirq.ParamResolver', (['None'], {}), '(None)\n', (9929, 9935), False, 'import cirq\n'), ((9945, 9967), 'cirq.ParamResolver', 'cirq.ParamResolver', (['{}'], {}), '({})\n', (9963, 9967), False, 'import cirq\n'), ((10120, 10150), 'cirq.ParamResolver', 'cirq.ParamResolver', (["{'a': 0.1}"], {}), "({'a': 0.1})\n", (10138, 10150), False, 'import cirq\n'), ((10178, 10218), 'cirq.ParamResolver', 'cirq.ParamResolver', (["{'a': 0.0, 'b': 0.1}"], {}), "({'a': 0.0, 'b': 0.1})\n", (10196, 10218), False, 'import cirq\n'), ((10246, 10286), 'cirq.ParamResolver', 'cirq.ParamResolver', (["{'a': 0.3, 'b': 0.1}"], {}), "({'a': 0.3, 'b': 0.1})\n", (10264, 10286), False, 'import cirq\n'), ((10314, 10344), 'cirq.ParamResolver', 'cirq.ParamResolver', (["{'b': 0.1}"], {}), "({'b': 0.1})\n", (10332, 10344), False, 'import cirq\n'), ((10372, 10402), 'cirq.ParamResolver', 'cirq.ParamResolver', (["{'c': 0.1}"], {}), "({'c': 0.1})\n", (10390, 10402), False, 'import cirq\n'), ((10463, 10483), 'cirq.ParamResolver', 'cirq.ParamResolver', ([], {}), '()\n', (10481, 10483), False, 'import cirq\n'), ((10525, 10555), 'cirq.ParamResolver', 'cirq.ParamResolver', (["{'a': 2.0}"], {}), "({'a': 2.0})\n", (10543, 10555), False, 'import cirq\n'), ((2612, 2629), 'sympy.Symbol', 'sympy.Symbol', (['sym'], {}), '(sym)\n', (2624, 2629), False, 'import sympy\n'), ((4022, 4042), 'cirq.ParamResolver', 'cirq.ParamResolver', ([], {}), '()\n', (4040, 4042), False, 'import cirq\n'), ((9996, 10018), 'cirq.ParamResolver', 'cirq.ParamResolver', (['{}'], {}), '({})\n', (10014, 10018), False, 'import cirq\n'), ((10062, 10092), 'cirq.ParamResolver', 'cirq.ParamResolver', (["{'a': 0.0}"], {}), "({'a': 0.0})\n", (10080, 10092), False, 'import cirq\n'), ((3910, 3930), 'cirq.ParamResolver', 'cirq.ParamResolver', ([], {}), '()\n', (3928, 3930), False, 'import cirq\n'), ((4330, 4347), 'sympy.Symbol', 'sympy.Symbol', (['"""a"""'], {}), "('a')\n", (4342, 4347), False, 'import sympy\n'), ((4408, 4425), 'sympy.Symbol', 'sympy.Symbol', (['"""a"""'], {}), "('a')\n", (4420, 4425), False, 'import sympy\n'), ((9525, 9540), 'sympy.Symbol', 'sympy.Symbol', (['k'], {}), '(k)\n', (9537, 9540), False, 'import sympy\n'), ((10622, 10639), 'sympy.Symbol', 'sympy.Symbol', (['"""a"""'], {}), "('a')\n", (10634, 10639), False, 'import sympy\n'), ((10712, 10729), 'sympy.Symbol', 'sympy.Symbol', (['"""a"""'], {}), "('a')\n", (10724, 10729), False, 'import sympy\n'), ((4203, 4220), 'sympy.Symbol', 'sympy.Symbol', (['"""a"""'], {}), "('a')\n", (4215, 4220), False, 'import sympy\n'), ((4223, 4240), 'sympy.Symbol', 'sympy.Symbol', (['"""b"""'], {}), "('b')\n", (4235, 4240), False, 'import sympy\n'), ((4382, 4399), 'sympy.Symbol', 'sympy.Symbol', (['"""b"""'], {}), "('b')\n", (4394, 4399), False, 'import sympy\n'), ((9543, 9558), 'sympy.Symbol', 'sympy.Symbol', (['v'], {}), '(v)\n', (9555, 9558), False, 'import sympy\n'), ((10731, 10748), 'sympy.Symbol', 'sympy.Symbol', (['"""b"""'], {}), "('b')\n", (10743, 10748), False, 'import sympy\n')] |
# %jupyter_snippet import
import pinocchio as pin
from utils.meshcat_viewer_wrapper import MeshcatVisualizer
import time
import numpy as np
from numpy.linalg import inv,norm,pinv,svd,eig
from scipy.optimize import fmin_bfgs,fmin_slsqp
from utils.load_ur5_with_obstacles import load_ur5_with_obstacles,Target
import matplotlib.pylab as plt
# %end_jupyter_snippet
plt.ion() # matplotlib with interactive setting
# %jupyter_snippet robot
robot = load_ur5_with_obstacles(reduced=True)
# %end_jupyter_snippet
# %jupyter_snippet viewer
viz = MeshcatVisualizer(robot)
viz.display(robot.q0)
# %end_jupyter_snippet
# %jupyter_snippet target
target = Target(viz,position = np.array([.5,.5]))
# %end_jupyter_snippet
################################################################################
################################################################################
################################################################################
# %jupyter_snippet endef
def endef(q):
'''Return the 2d position of the end effector.'''
pin.framesForwardKinematics(robot.model,robot.data,q)
return robot.data.oMf[-1].translation[[0,2]]
# %end_jupyter_snippet
# %jupyter_snippet dist
def dist(q):
'''Return the distance between the end effector end the target (2d).'''
return norm(endef(q)-target.position)
# %end_jupyter_snippet
# %jupyter_snippet coll
def coll(q):
'''Return true if in collision, false otherwise.'''
pin.updateGeometryPlacements(robot.model,robot.data,robot.collision_model,robot.collision_data,q)
return pin.computeCollisions(robot.collision_model,robot.collision_data,False)
# %end_jupyter_snippet
# %jupyter_snippet qrand
def qrand(check=False):
'''
Return a random configuration. If check is True, this
configuration is not is collision
'''
while True:
q = np.random.rand(2)*6.4-3.2 # sample between -3.2 and +3.2.
if not check or not coll(q): return q
# %end_jupyter_snippet
# %jupyter_snippet colldist
def collisionDistance(q):
'''Return the minimal distance between robot and environment. '''
pin.updateGeometryPlacements(robot.model,robot.data,robot.collision_model,robot.collision_data,q)
if pin.computeCollisions(robot.collision_model,robot.collision_data,False): 0
idx = pin.computeDistances(robot.collision_model,robot.collision_data)
return robot.collision_data.distanceResults[idx].min_distance
# %end_jupyter_snippet
################################################################################
################################################################################
################################################################################
# %jupyter_snippet qrand_target
# Sample a random free configuration where dist is small enough.
def qrandTarget(threshold=5e-2, display=False):
while True:
q = qrand()
if display:
viz.display(q)
time.sleep(1e-3)
if not coll(q) and dist(q)<threshold:
return q
viz.display(qrandTarget())
# %end_jupyter_snippet
################################################################################
################################################################################
################################################################################
# %jupyter_snippet random_descent
# Random descent: crawling from one free configuration to the target with random
# steps.
def randomDescent(q0 = None):
q = qrand(check=True) if q0 is None else q0
hist = [ q.copy() ]
for i in range(100):
dq = qrand()*.1 # Choose a random step ...
qtry = q+dq # ... apply
if dist(q)>dist(q+dq) and not coll(q+dq): # If distance decrease without collision ...
q = q+dq # ... keep the step
hist.append(q.copy()) # ... keep a trace of it
viz.display(q) # ... display it
time.sleep(5e-3) # ... and sleep for a short while
return hist
randomDescent();
# %end_jupyter_snippet
################################################################################
################################################################################
################################################################################
# %jupyter_snippet sample
def sampleSpace(nbSamples=500):
'''
Sample nbSamples configurations and store them in two lists depending
if the configuration is in free space (hfree) or in collision (hcol), along
with the distance to the target and the distance to the obstacles.
'''
hcol = []
hfree = []
for i in range(nbSamples):
q = qrand(False)
if not coll(q):
hfree.append( list(q.flat) + [ dist(q), collisionDistance(q) ])
else:
hcol.append( list(q.flat) + [ dist(q), 1e-2 ])
return hcol,hfree
def plotConfigurationSpace(hcol,hfree,markerSize=20):
'''
Plot 2 "scatter" plots: the first one plot the distance to the target for
each configuration, the second plots the distance to the obstacles (axis q1,q2,
distance in the color space).
'''
htotal = hcol + hfree
h=np.array(htotal)
plt.subplot(2,1,1)
plt.scatter(h[:,0],h[:,1],c=h[:,2],s=markerSize,lw=0)
plt.title("Distance to the target")
plt.colorbar()
plt.subplot(2,1,2)
plt.scatter(h[:,0],h[:,1],c=h[:,3],s=markerSize,lw=0)
plt.title("Distance to the obstacles")
plt.colorbar()
hcol,hfree = sampleSpace(100)
plotConfigurationSpace(hcol,hfree)
# %end_jupyter_snippet
################################################################################
################################################################################
################################################################################
### Plot random trajectories in the same plot
# %jupyter_snippet traj
qinit = np.array([-1.1, -3. ])
for i in range(100):
traj = randomDescent(qinit)
if dist(traj[-1])<5e-2:
print('We found a good traj!')
break
traj = np.array(traj)
### Chose trajectory end to be in [-pi,pi]
qend = (traj[-1]+np.pi) % (2*np.pi) - np.pi
### Take the entire trajectory it modulo 2 pi
traj += (qend-traj[-1])
# %end_jupyter_snippet
plt.plot(traj[:,0],traj[:,1],'r',lw=5)
################################################################################
################################################################################
################################################################################
# %jupyter_snippet optim
def cost(q):
'''
Cost function: distance to the target
'''
return dist(q)**2
def constraint(q):
'''
Constraint function: distance to the obstacle should be positive.
'''
return collisionDistance(q)
def callback(q):
'''
At each optimization step, display the robot configuration in gepetto-viewer.
'''
viz.display(q)
time.sleep(.01)
def optimize():
'''
Optimize from an initial random configuration to discover a collision-free
configuration as close as possible to the target.
'''
return fmin_slsqp(x0=qrand(check=True),
func=cost,
f_ieqcons=constraint,callback=callback,
full_output=1)
optimize()
# %end_jupyter_snippet
# %jupyter_snippet useit
while True:
res=optimize()
q=res[0]
viz.display(q)
if res[4]=='Optimization terminated successfully' and res[1]<1e-6:
print('Finally successful!')
break
print("Failed ... let's try again! ")
# %end_jupyter_snippet
| [
"matplotlib.pylab.colorbar",
"pinocchio.updateGeometryPlacements",
"pinocchio.framesForwardKinematics",
"matplotlib.pylab.scatter",
"matplotlib.pylab.subplot",
"pinocchio.computeCollisions",
"utils.meshcat_viewer_wrapper.MeshcatVisualizer",
"time.sleep",
"matplotlib.pylab.plot",
"pinocchio.compute... | [((362, 371), 'matplotlib.pylab.ion', 'plt.ion', ([], {}), '()\n', (369, 371), True, 'import matplotlib.pylab as plt\n'), ((445, 482), 'utils.load_ur5_with_obstacles.load_ur5_with_obstacles', 'load_ur5_with_obstacles', ([], {'reduced': '(True)'}), '(reduced=True)\n', (468, 482), False, 'from utils.load_ur5_with_obstacles import load_ur5_with_obstacles, Target\n'), ((539, 563), 'utils.meshcat_viewer_wrapper.MeshcatVisualizer', 'MeshcatVisualizer', (['robot'], {}), '(robot)\n', (556, 563), False, 'from utils.meshcat_viewer_wrapper import MeshcatVisualizer\n'), ((6071, 6093), 'numpy.array', 'np.array', (['[-1.1, -3.0]'], {}), '([-1.1, -3.0])\n', (6079, 6093), True, 'import numpy as np\n'), ((6241, 6255), 'numpy.array', 'np.array', (['traj'], {}), '(traj)\n', (6249, 6255), True, 'import numpy as np\n'), ((6436, 6479), 'matplotlib.pylab.plot', 'plt.plot', (['traj[:, 0]', 'traj[:, 1]', '"""r"""'], {'lw': '(5)'}), "(traj[:, 0], traj[:, 1], 'r', lw=5)\n", (6444, 6479), True, 'import matplotlib.pylab as plt\n'), ((1053, 1108), 'pinocchio.framesForwardKinematics', 'pin.framesForwardKinematics', (['robot.model', 'robot.data', 'q'], {}), '(robot.model, robot.data, q)\n', (1080, 1108), True, 'import pinocchio as pin\n'), ((1462, 1567), 'pinocchio.updateGeometryPlacements', 'pin.updateGeometryPlacements', (['robot.model', 'robot.data', 'robot.collision_model', 'robot.collision_data', 'q'], {}), '(robot.model, robot.data, robot.collision_model,\n robot.collision_data, q)\n', (1490, 1567), True, 'import pinocchio as pin\n'), ((1572, 1645), 'pinocchio.computeCollisions', 'pin.computeCollisions', (['robot.collision_model', 'robot.collision_data', '(False)'], {}), '(robot.collision_model, robot.collision_data, False)\n', (1593, 1645), True, 'import pinocchio as pin\n'), ((2116, 2221), 'pinocchio.updateGeometryPlacements', 'pin.updateGeometryPlacements', (['robot.model', 'robot.data', 'robot.collision_model', 'robot.collision_data', 'q'], {}), '(robot.model, robot.data, robot.collision_model,\n robot.collision_data, q)\n', (2144, 2221), True, 'import pinocchio as pin\n'), ((2222, 2295), 'pinocchio.computeCollisions', 'pin.computeCollisions', (['robot.collision_model', 'robot.collision_data', '(False)'], {}), '(robot.collision_model, robot.collision_data, False)\n', (2243, 2295), True, 'import pinocchio as pin\n'), ((2308, 2373), 'pinocchio.computeDistances', 'pin.computeDistances', (['robot.collision_model', 'robot.collision_data'], {}), '(robot.collision_model, robot.collision_data)\n', (2328, 2373), True, 'import pinocchio as pin\n'), ((5351, 5367), 'numpy.array', 'np.array', (['htotal'], {}), '(htotal)\n', (5359, 5367), True, 'import numpy as np\n'), ((5373, 5393), 'matplotlib.pylab.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (5384, 5393), True, 'import matplotlib.pylab as plt\n'), ((5397, 5457), 'matplotlib.pylab.scatter', 'plt.scatter', (['h[:, 0]', 'h[:, 1]'], {'c': 'h[:, 2]', 's': 'markerSize', 'lw': '(0)'}), '(h[:, 0], h[:, 1], c=h[:, 2], s=markerSize, lw=0)\n', (5408, 5457), True, 'import matplotlib.pylab as plt\n'), ((5456, 5491), 'matplotlib.pylab.title', 'plt.title', (['"""Distance to the target"""'], {}), "('Distance to the target')\n", (5465, 5491), True, 'import matplotlib.pylab as plt\n'), ((5497, 5511), 'matplotlib.pylab.colorbar', 'plt.colorbar', ([], {}), '()\n', (5509, 5511), True, 'import matplotlib.pylab as plt\n'), ((5517, 5537), 'matplotlib.pylab.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (5528, 5537), True, 'import matplotlib.pylab as plt\n'), ((5541, 5601), 'matplotlib.pylab.scatter', 'plt.scatter', (['h[:, 0]', 'h[:, 1]'], {'c': 'h[:, 3]', 's': 'markerSize', 'lw': '(0)'}), '(h[:, 0], h[:, 1], c=h[:, 3], s=markerSize, lw=0)\n', (5552, 5601), True, 'import matplotlib.pylab as plt\n'), ((5600, 5638), 'matplotlib.pylab.title', 'plt.title', (['"""Distance to the obstacles"""'], {}), "('Distance to the obstacles')\n", (5609, 5638), True, 'import matplotlib.pylab as plt\n'), ((5644, 5658), 'matplotlib.pylab.colorbar', 'plt.colorbar', ([], {}), '()\n', (5656, 5658), True, 'import matplotlib.pylab as plt\n'), ((7138, 7154), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (7148, 7154), False, 'import time\n'), ((667, 687), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (675, 687), True, 'import numpy as np\n'), ((2958, 2975), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (2968, 2975), False, 'import time\n'), ((4069, 4086), 'time.sleep', 'time.sleep', (['(0.005)'], {}), '(0.005)\n', (4079, 4086), False, 'import time\n'), ((1857, 1874), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (1871, 1874), True, 'import numpy as np\n')] |
import algos_torch
import numpy as np
import common.object_factory
import common.env_configurations as env_configurations
import algos_torch.network_builder as network_builder
import algos_torch.model_builder as model_builder
import algos_torch.a2c_continuous as a2c_continuous
import algos_torch.a2c_discrete as a2c_discrete
#import algos_torch.dqnagent as dqnagent
import common.tr_helpers as tr_helpers
import yaml
import ray
import algos_torch.players as players
import argparse
import common.experiment as experiment
import copy
import torch
from sacred import Experiment
import numpy as np
import os
import collections
from os.path import dirname, abspath
import pymongo
from sacred import Experiment, SETTINGS
from sacred.observers import FileStorageObserver
from sacred.observers import MongoObserver
from sacred.utils import apply_backspaces_and_linefeeds
from utils.logging import get_logger, Logger
SETTINGS['CAPTURE_MODE'] = "fd" # set to "no" if you want to see stdout/stderr in console
logger = get_logger()
ex = Experiment("pymarl")
ex.logger = logger
ex.captured_out_filter = apply_backspaces_and_linefeeds
results_path = os.path.join(dirname(dirname(abspath(__file__))), "results")
mongo_client = None
class Runner:
def __init__(self, logger):
self.algo_factory = common.object_factory.ObjectFactory()
self.algo_factory.register_builder('a2c_continuous', lambda **kwargs : a2c_continuous.A2CAgent(**kwargs))
self.algo_factory.register_builder('a2c_discrete', lambda **kwargs : a2c_discrete.DiscreteA2CAgent(**kwargs))
#self.algo_factory.register_builder('dqn', lambda **kwargs : dqnagent.DQNAgent(**kwargs))
self.player_factory = common.object_factory.ObjectFactory()
self.player_factory.register_builder('a2c_continuous', lambda **kwargs : players.PpoPlayerContinuous(**kwargs))
self.player_factory.register_builder('a2c_discrete', lambda **kwargs : players.PpoPlayerDiscrete(**kwargs))
#self.player_factory.register_builder('dqn', lambda **kwargs : players.DQNPlayer(**kwargs))
self.model_builder = model_builder.ModelBuilder()
self.network_builder = network_builder.NetworkBuilder()
self.logger = logger
def reset(self):
pass
def load_config(self, params):
self.seed = params.get('seed', None)
self.algo_params = params['algo']
self.algo_name = self.algo_params['name']
self.load_check_point = params['load_checkpoint']
self.exp_config = None
if self.seed:
torch.manual_seed(self.seed)
torch.cuda.manual_seed_all(self.seed)
np.random.seed(self.seed)
if self.load_check_point:
self.load_path = params['load_path']
else:
self.load_path = None
self.model = self.model_builder.load(params)
self.config = copy.deepcopy(params['config'])
self.config['reward_shaper'] = tr_helpers.DefaultRewardsShaper(**self.config['reward_shaper'])
self.config['network'] = self.model
has_rnd_net = self.config.get('rnd_config', None) != None
if has_rnd_net:
#print('Adding RND Network')
logger.console_logger.info('Adding RND Network')
network = self.model_builder.network_factory.create(params['config']['rnd_config']['network']['name'])
print(network)
network.load(params['config']['rnd_config']['network'])
self.config['rnd_config']['network'] = network
def load(self, yaml_conf):
self.default_config = yaml_conf['params']
self.load_config(copy.deepcopy(self.default_config))
if 'experiment_config' in yaml_conf:
self.exp_config = yaml_conf['experiment_config']
def get_prebuilt_config(self):
return self.config
def run_train(self):
#print('Started to train')
self.logger.console_logger.info('Started to train')
ray.init(redis_max_memory=1024*1024*1000, object_store_memory=1024*1024*1000)
obs_space, action_space = env_configurations.get_obs_and_action_spaces_from_config(self.config)
#print('obs_space:', obs_space)
#print('action_space:', action_space)
self.logger.console_logger.info('obs_space: {}'.format(obs_space))
self.logger.console_logger.info('action_space: {}'.format(action_space))
if self.exp_config:
self.experiment = experiment.Experiment(self.default_config, self.exp_config)
exp_num = 0
exp = self.experiment.get_next_config()
while exp is not None:
exp_num += 1
#print('Starting experiment number: ' + str(exp_num))
self.logger.console_logger.info('Starting experiment number: ' + str(exp_num))
self.reset()
self.load_config(exp)
agent = self.algo_factory.create(self.algo_name, base_name='run', observation_space=obs_space, action_space=action_space, config=self.config)
self.experiment.set_results(*agent.train())
exp = self.experiment.get_next_config()
else:
self.reset()
self.load_config(self.default_config)
agent = self.algo_factory.create(self.algo_name, base_name='run', observation_space=obs_space, action_space=action_space, config=self.config, logger=self.logger)
if self.load_check_point or (self.load_path is not None):
agent.restore(self.load_path)
agent.train()
def create_player(self):
return self.player_factory.create(self.algo_name, config=self.config)
def create_agent(self, obs_space, action_space):
return self.algo_factory.create(self.algo_name, base_name='run', observation_space=obs_space, action_space=action_space, config=self.config)
def run(self, args):
if 'checkpoint' in args:
self.load_path = args['checkpoint']
if args['train']:
self.run_train()
elif args['play']:
#print('Started to play')
logger.console_logger.info('Started to play')
player = self.player_factory.create(self.algo_name, config=self.config)
player.restore(self.load_path)
player.run()
ray.shutdown()
# Function to connect to a mongodb and add a Sacred MongoObserver
def setup_mongodb(db_url, db_name):
client = None
mongodb_fail = True
# Try 5 times to connect to the mongodb
for tries in range(5):
# First try to connect to the central server. If that doesn't work then just save locally
maxSevSelDelay = 10000 # Assume 10s maximum server selection delay
try:
# Check whether server is accessible
logger.info("Trying to connect to mongoDB '{}'".format(db_url))
client = pymongo.MongoClient(db_url, ssl=True, serverSelectionTimeoutMS=maxSevSelDelay)
client.server_info()
# If this hasn't raised an exception, we can add the observer
ex.observers.append(MongoObserver.create(url=db_url, db_name=db_name, ssl=True)) # db_name=db_name,
logger.info("Added MongoDB observer on {}.".format(db_url))
mongodb_fail = False
break
except pymongo.errors.ServerSelectionTimeoutError:
logger.warning("Couldn't connect to MongoDB on try {}".format(tries + 1))
if mongodb_fail:
logger.error("Couldn't connect to MongoDB after 5 tries!")
# TODO: Maybe we want to end the script here sometimes?
return client
@ex.main
def my_main(_run, _config, _log):
global mongo_client
import datetime
#arglist = parse_args()
#unique_token = "{}__{}".format(arglist.name, datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
# run the framework
# run(_run, _config, _log, mongo_client, unique_token)
logger = Logger(_log)
# configure tensorboard logger
unique_token = "{}__{}".format(_config["label"], datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
use_tensorboard = False
if use_tensorboard:
tb_logs_direc = os.path.join(dirname(dirname(abspath(__file__))), "results", "tb_logs")
tb_exp_direc = os.path.join(tb_logs_direc, "{}").format(unique_token)
logger.setup_tb(tb_exp_direc)
logger.setup_sacred(_run)
_log.info("Experiment Parameters:")
import pprint
experiment_params = pprint.pformat(_config,
indent=4,
width=1)
_log.info("\n\n" + experiment_params + "\n")
# START THE TRAINING PROCESS
runner = Runner(logger)
runner.load(_config)
runner.reset()
#args = vars(arglist)
runner.run(_config)
#runner.run(args)
# train(arglist, logger, _config)
# arglist = convert(_config)
#train(arglist)
# force exit
os._exit(0)
def _get_config(params, arg_name, subfolder):
config_name = None
for _i, _v in enumerate(params):
if _v.split("=")[0] == arg_name:
config_name = _v.split("=")[1]
del params[_i]
break
if config_name is not None:
with open(os.path.join(os.path.dirname(__file__), "configs", subfolder, "{}.yaml".format(config_name)), "r") as f:
try:
config_dict = yaml.load(f)
except yaml.YAMLError as exc:
assert False, "{}.yaml error: {}".format(config_name, exc)
return config_dict
def recursive_dict_update(d, u):
for k, v in u.items():
if isinstance(v, collections.Mapping):
d[k] = recursive_dict_update(d.get(k, {}), v)
else:
d[k] = v
return d
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--train", required=False, help="train network", action='store_true')
ap.add_argument("-p", "--play", required=False, help="play network", action='store_true')
ap.add_argument("-c", "--checkpoint", required=False, help="path to checkpoint")
ap.add_argument("-f", "--file", required=True, help="path to config")
ap.add_argument("-e", "--exp-name", required=True, help="experiment name")
return ap.parse_args()
if __name__ == '__main__':
import os
import sys
from copy import deepcopy
params = deepcopy(sys.argv)
# args = vars(ap.parse_args())
# config_name = args['file']
# print('Loading config: ', config_name)
# with open(config_name, 'r') as stream:
# config = yaml.safe_load(stream)
# runner = Runner()
# try:
# runner.load(config)
# except yaml.YAMLError as exc:
# print(exc)
#
# # Load algorithm and env base configs
# #file_config = _get_config(params, "--file", "envs")
#
# # Load into official sacred configs
# if config_name is not None:
# with open(os.path.join(os.path.dirname(__file__), "configs", subfolder, "{}.yaml".format(config_name)), "r") as f:
# try:
# file_config = yaml.load(f)
# except yaml.YAMLError as exc:
# assert False, "{}.yaml error: {}".format(config_name, exc)
config_dict = {"train":True,
"load_checkpoint":False,
"load_path":None}
file_config = _get_config(params, "--file", "")
config_dict = recursive_dict_update(config_dict, file_config)
# now add all the config to sacred
ex.add_config(config_dict)
#arglist = ap.parse_args()
#from copy import deepcopy
#ex.add_config({"name":arglist.exp_name})
# Check if we don't want to save to sacred mongodb
no_mongodb = False
for _i, _v in enumerate(params):
if "no-mongo" in _v:
if "--no-mongo" == _v:
del params[_i]
no_mongodb = True
break
config_dict={"train": True}
db_config_path = "./db_config.private.yaml"
with open(db_config_path, 'r') as stream:
config_dict = yaml.safe_load(stream)
# If there is no url set for the mongodb, we cannot use it
if not no_mongodb and "db_url" not in config_dict:
no_mongodb = True
logger.error("No 'db_url' to use for Sacred MongoDB")
if not no_mongodb:
db_url = config_dict["db_url"]
db_name = config_dict["db_name"]
mongo_client = setup_mongodb(db_url, db_name)
# Save to disk by default for sacred, even if we are using the mongodb
logger.info("Saving to FileStorageObserver in results/sacred.")
file_obs_path = os.path.join(results_path, "sacred")
ex.observers.append(FileStorageObserver.create(file_obs_path))
ex.run_commandline(params)
# if __name__ == '__main__':
# ap = argparse.ArgumentParser()
# ap.add_argument("-t", "--train", required=False, help="train network", action='store_true')
# ap.add_argument("-p", "--play", required=False, help="play network", action='store_true')
# ap.add_argument("-c", "--checkpoint", required=False, help="path to checkpoint")
# ap.add_argument("-f", "--file", required=True, help="path to config")
#
# args = vars(ap.parse_args())
# config_name = args['file']
# print('Loading config: ', config_name)
# with open(config_name, 'r') as stream:
# config = yaml.safe_load(stream)
# runner = Runner()
# try:
# runner.load(config)
# except yaml.YAMLError as exc:
# print(exc)
#
# main()
| [
"pymongo.MongoClient",
"yaml.load",
"pprint.pformat",
"numpy.random.seed",
"argparse.ArgumentParser",
"utils.logging.Logger",
"yaml.safe_load",
"os.path.join",
"os.path.abspath",
"os.path.dirname",
"algos_torch.a2c_continuous.A2CAgent",
"utils.logging.get_logger",
"common.experiment.Experime... | [((1013, 1025), 'utils.logging.get_logger', 'get_logger', ([], {}), '()\n', (1023, 1025), False, 'from utils.logging import get_logger, Logger\n'), ((1032, 1052), 'sacred.Experiment', 'Experiment', (['"""pymarl"""'], {}), "('pymarl')\n", (1042, 1052), False, 'from sacred import Experiment, SETTINGS\n'), ((7992, 8004), 'utils.logging.Logger', 'Logger', (['_log'], {}), '(_log)\n', (7998, 8004), False, 'from utils.logging import get_logger, Logger\n'), ((8528, 8570), 'pprint.pformat', 'pprint.pformat', (['_config'], {'indent': '(4)', 'width': '(1)'}), '(_config, indent=4, width=1)\n', (8542, 8570), False, 'import pprint\n'), ((8991, 9002), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (8999, 9002), False, 'import os\n'), ((9842, 9867), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9865, 9867), False, 'import argparse\n'), ((10424, 10442), 'copy.deepcopy', 'deepcopy', (['sys.argv'], {}), '(sys.argv)\n', (10432, 10442), False, 'from copy import deepcopy\n'), ((12671, 12707), 'os.path.join', 'os.path.join', (['results_path', '"""sacred"""'], {}), "(results_path, 'sacred')\n", (12683, 12707), False, 'import os\n'), ((2107, 2135), 'algos_torch.model_builder.ModelBuilder', 'model_builder.ModelBuilder', ([], {}), '()\n', (2133, 2135), True, 'import algos_torch.model_builder as model_builder\n'), ((2167, 2199), 'algos_torch.network_builder.NetworkBuilder', 'network_builder.NetworkBuilder', ([], {}), '()\n', (2197, 2199), True, 'import algos_torch.network_builder as network_builder\n'), ((2888, 2919), 'copy.deepcopy', 'copy.deepcopy', (["params['config']"], {}), "(params['config'])\n", (2901, 2919), False, 'import copy\n'), ((2968, 3031), 'common.tr_helpers.DefaultRewardsShaper', 'tr_helpers.DefaultRewardsShaper', ([], {}), "(**self.config['reward_shaper'])\n", (2999, 3031), True, 'import common.tr_helpers as tr_helpers\n'), ((3988, 4078), 'ray.init', 'ray.init', ([], {'redis_max_memory': '(1024 * 1024 * 1000)', 'object_store_memory': '(1024 * 1024 * 1000)'}), '(redis_max_memory=1024 * 1024 * 1000, object_store_memory=1024 * \n 1024 * 1000)\n', (3996, 4078), False, 'import ray\n'), ((4100, 4169), 'common.env_configurations.get_obs_and_action_spaces_from_config', 'env_configurations.get_obs_and_action_spaces_from_config', (['self.config'], {}), '(self.config)\n', (4156, 4169), True, 'import common.env_configurations as env_configurations\n'), ((6364, 6378), 'ray.shutdown', 'ray.shutdown', ([], {}), '()\n', (6376, 6378), False, 'import ray\n'), ((12119, 12141), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (12133, 12141), False, 'import yaml\n'), ((12732, 12773), 'sacred.observers.FileStorageObserver.create', 'FileStorageObserver.create', (['file_obs_path'], {}), '(file_obs_path)\n', (12758, 12773), False, 'from sacred.observers import FileStorageObserver\n'), ((1173, 1190), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (1180, 1190), False, 'from os.path import dirname, abspath\n'), ((2563, 2591), 'torch.manual_seed', 'torch.manual_seed', (['self.seed'], {}), '(self.seed)\n', (2580, 2591), False, 'import torch\n'), ((2604, 2641), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['self.seed'], {}), '(self.seed)\n', (2630, 2641), False, 'import torch\n'), ((2654, 2679), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (2668, 2679), True, 'import numpy as np\n'), ((3653, 3687), 'copy.deepcopy', 'copy.deepcopy', (['self.default_config'], {}), '(self.default_config)\n', (3666, 3687), False, 'import copy\n'), ((4472, 4531), 'common.experiment.Experiment', 'experiment.Experiment', (['self.default_config', 'self.exp_config'], {}), '(self.default_config, self.exp_config)\n', (4493, 4531), True, 'import common.experiment as experiment\n'), ((6932, 7010), 'pymongo.MongoClient', 'pymongo.MongoClient', (['db_url'], {'ssl': '(True)', 'serverSelectionTimeoutMS': 'maxSevSelDelay'}), '(db_url, ssl=True, serverSelectionTimeoutMS=maxSevSelDelay)\n', (6951, 7010), False, 'import pymongo\n'), ((1419, 1452), 'algos_torch.a2c_continuous.A2CAgent', 'a2c_continuous.A2CAgent', ([], {}), '(**kwargs)\n', (1442, 1452), True, 'import algos_torch.a2c_continuous as a2c_continuous\n'), ((1531, 1570), 'algos_torch.a2c_discrete.DiscreteA2CAgent', 'a2c_discrete.DiscreteA2CAgent', ([], {}), '(**kwargs)\n', (1560, 1570), True, 'import algos_torch.a2c_discrete as a2c_discrete\n'), ((1821, 1858), 'algos_torch.players.PpoPlayerContinuous', 'players.PpoPlayerContinuous', ([], {}), '(**kwargs)\n', (1848, 1858), True, 'import algos_torch.players as players\n'), ((1939, 1974), 'algos_torch.players.PpoPlayerDiscrete', 'players.PpoPlayerDiscrete', ([], {}), '(**kwargs)\n', (1964, 1974), True, 'import algos_torch.players as players\n'), ((7150, 7209), 'sacred.observers.MongoObserver.create', 'MongoObserver.create', ([], {'url': 'db_url', 'db_name': 'db_name', 'ssl': '(True)'}), '(url=db_url, db_name=db_name, ssl=True)\n', (7170, 7209), False, 'from sacred.observers import MongoObserver\n'), ((8096, 8119), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8117, 8119), False, 'import datetime\n'), ((8322, 8355), 'os.path.join', 'os.path.join', (['tb_logs_direc', '"""{}"""'], {}), "(tb_logs_direc, '{}')\n", (8334, 8355), False, 'import os\n'), ((9442, 9454), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (9451, 9454), False, 'import yaml\n'), ((8256, 8273), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (8263, 8273), False, 'from os.path import dirname, abspath\n'), ((9303, 9328), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (9318, 9328), False, 'import os\n')] |
# Simple CNN model for CIFAR-10
import numpy
from keras.datasets import mnist
from keras import backend as K
K.set_image_dim_ordering('th')
import utils as ut
import numpy as np
from keras.models import load_model
import matplotlib.pyplot as plt
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
path_to_model = 'mnist/mnist_model.h5'
iteration_steps = 10
flag1 = 1
flag2 = 1
perCsp = np.linspace(0.5,0.95,iteration_steps)
l2_avg = np.zeros((iteration_steps,1))
l2_var = np.zeros((iteration_steps,1))
model1 = load_model(path_to_model)
dense_1 = model1.get_layer('dense_1')
weights1 = dense_1.get_weights()[0]
bias1 = dense_1.get_weights()[1]
dense_2 = model1.get_layer('dense_2')
weights2 = dense_2.get_weights()[0]
bias2 = dense_2.get_weights()[1]
#dense_3 = model1.get_layer('dense_3')
#weights3 = dense_3.get_weights()[0]
#bias3 = dense_3.get_weights()[0]
latent_X_original = np.load('mnist/testing/dense_2_output_ts.npy')
for i in range(0,iteration_steps):
new_dense_1 = ut.compute_thresholding_sparsification(weights1, perCsp[i])
new_dense_2 = ut.compute_thresholding_sparsification(weights2, perCsp[i])
l2_avg[i],l2_var[i] = ut.evaluate_l2_norm_keras(path_to_model,[new_dense_1,bias1],[new_dense_2,bias2],flag1,flag2,X_test,latent_X_original)
print('Calculating Step: ', i )
#plt.plot(perCsp*100, acc*100)
np.save('mnist/results/l2_norm/no_redundant_thresh_spars.npy',perCsp*100)
np.save('mnist/results/l2_norm/no_redundant_thresh_l2_avg.npy',l2_avg)
np.save('mnist/results/l2_norm/no_redundant_thresh_l2_var.npy',l2_var)
end = 1 | [
"keras.models.load_model",
"numpy.load",
"numpy.save",
"numpy.random.seed",
"keras.datasets.mnist.load_data",
"numpy.zeros",
"keras.backend.set_image_dim_ordering",
"utils.evaluate_l2_norm_keras",
"numpy.linspace",
"utils.compute_thresholding_sparsification"
] | [((109, 139), 'keras.backend.set_image_dim_ordering', 'K.set_image_dim_ordering', (['"""th"""'], {}), "('th')\n", (133, 139), True, 'from keras import backend as K\n'), ((294, 317), 'numpy.random.seed', 'numpy.random.seed', (['seed'], {}), '(seed)\n', (311, 317), False, 'import numpy\n'), ((370, 387), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (385, 387), False, 'from keras.datasets import mnist\n'), ((480, 519), 'numpy.linspace', 'np.linspace', (['(0.5)', '(0.95)', 'iteration_steps'], {}), '(0.5, 0.95, iteration_steps)\n', (491, 519), True, 'import numpy as np\n'), ((528, 558), 'numpy.zeros', 'np.zeros', (['(iteration_steps, 1)'], {}), '((iteration_steps, 1))\n', (536, 558), True, 'import numpy as np\n'), ((567, 597), 'numpy.zeros', 'np.zeros', (['(iteration_steps, 1)'], {}), '((iteration_steps, 1))\n', (575, 597), True, 'import numpy as np\n'), ((607, 632), 'keras.models.load_model', 'load_model', (['path_to_model'], {}), '(path_to_model)\n', (617, 632), False, 'from keras.models import load_model\n'), ((984, 1030), 'numpy.load', 'np.load', (['"""mnist/testing/dense_2_output_ts.npy"""'], {}), "('mnist/testing/dense_2_output_ts.npy')\n", (991, 1030), True, 'import numpy as np\n'), ((1442, 1518), 'numpy.save', 'np.save', (['"""mnist/results/l2_norm/no_redundant_thresh_spars.npy"""', '(perCsp * 100)'], {}), "('mnist/results/l2_norm/no_redundant_thresh_spars.npy', perCsp * 100)\n", (1449, 1518), True, 'import numpy as np\n'), ((1516, 1587), 'numpy.save', 'np.save', (['"""mnist/results/l2_norm/no_redundant_thresh_l2_avg.npy"""', 'l2_avg'], {}), "('mnist/results/l2_norm/no_redundant_thresh_l2_avg.npy', l2_avg)\n", (1523, 1587), True, 'import numpy as np\n'), ((1587, 1658), 'numpy.save', 'np.save', (['"""mnist/results/l2_norm/no_redundant_thresh_l2_var.npy"""', 'l2_var'], {}), "('mnist/results/l2_norm/no_redundant_thresh_l2_var.npy', l2_var)\n", (1594, 1658), True, 'import numpy as np\n'), ((1087, 1146), 'utils.compute_thresholding_sparsification', 'ut.compute_thresholding_sparsification', (['weights1', 'perCsp[i]'], {}), '(weights1, perCsp[i])\n', (1125, 1146), True, 'import utils as ut\n'), ((1165, 1224), 'utils.compute_thresholding_sparsification', 'ut.compute_thresholding_sparsification', (['weights2', 'perCsp[i]'], {}), '(weights2, perCsp[i])\n', (1203, 1224), True, 'import utils as ut\n'), ((1253, 1382), 'utils.evaluate_l2_norm_keras', 'ut.evaluate_l2_norm_keras', (['path_to_model', '[new_dense_1, bias1]', '[new_dense_2, bias2]', 'flag1', 'flag2', 'X_test', 'latent_X_original'], {}), '(path_to_model, [new_dense_1, bias1], [new_dense_2,\n bias2], flag1, flag2, X_test, latent_X_original)\n', (1278, 1382), True, 'import utils as ut\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
..author:: <NAME>, ETH Zürich, Switzerland.
..date:: September 2017
Code for training a LSTM model on peptide sequences followed by sampling novel sequences through the model.
Check the readme for possible flags to use with this script.
"""
import json
import os
import pickle
import random
import argparse
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras.layers import Dense, LSTM, GRU
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from modlamp.analysis import GlobalAnalysis
from modlamp.core import count_aas
from modlamp.descriptors import PeptideDescriptor, GlobalDescriptor
from modlamp.sequences import Random, Helices
from progressbar import ProgressBar
from scipy.spatial import distance
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
plt.switch_backend('agg')
flags = argparse.ArgumentParser()
flags.add_argument("-d", "--dataset", default="training_sequences_noC.csv", help="dataset file (expecting csv)", type=str)
flags.add_argument("-n", "--name", default="test", help="run name for log and checkpoint files", type=str)
flags.add_argument("-b", "--batch_size", default=128, help="batch size", type=int)
flags.add_argument("-e", "--epochs", default=50, help="epochs to train", type=int)
flags.add_argument("-l", "--layers", default=2, help="number of layers in the network", type=int)
flags.add_argument("-x", "--neurons", default=256, help="number of units per layer", type=int)
flags.add_argument("-c", "--cell", default="LSTM", help="type of neuron to use, available: LSTM, GRU", type=str)
flags.add_argument("-o", "--dropout", default=0.1, help="dropout to use in every layer; layer 1 gets 1*dropout, layer 2 2*dropout etc.", type=float)
flags.add_argument("-t", "--train", default=True, help="whether the network should be trained or just sampled from", type=bool)
flags.add_argument("-v", "--valsplit", default=0.2, help="fraction of the data to use for validation", type=float)
flags.add_argument("-s", "--sample", default=100, help="number of sequences to sample training", type=int)
flags.add_argument("-p", "--temp", default=1.25, help="temperature used for sampling", type=float)
flags.add_argument("-m", "--maxlen", default=0, help="maximum sequence length allowed when sampling new sequences", type=int)
flags.add_argument("-a", "--startchar", default="B", help="starting character to begin sampling. Default='B' for 'begin'", type=str)
flags.add_argument("-r", "--lr", default=0.01, help="learning rate to be used with the Adam optimizer", type=float)
flags.add_argument("--l2", default=None, help="l2 regularization rate. If None, no l2 regularization is used", type=float)
flags.add_argument("--modfile", default=None, help="filename of the pretrained model to used for sampling if train=False", type=str)
flags.add_argument("--finetune", default=False, help="if True, a pretrained model provided in modfile is finetuned on the dataset", type=bool)
flags.add_argument("--cv", default=None, help="number of folds to use for cross-validation; if None, no CV is performed", type=int)
flags.add_argument("--window", default=0, help="window size used to process sequences. If 0, all sequences are padded to the longest sequence length in the dataset", type=int)
flags.add_argument("--step", default=1, help="step size to move window or prediction target", type=int)
flags.add_argument("--target", default="all", help="whether to learn all proceeding characters or just the last `one` in sequence", type=str)
flags.add_argument("--padlen", default=0, help="number of spaces to use for padding sequences (if window not 0); if 0, sequences are padded to the length of the longest sequence in the dataset", type=int)
flags.add_argument("--refs", default=True, help="whether reference sequence sets should be generated for the analysis", type=bool)
args = flags.parse_args()
def _save_flags(filename):
""" Function to save used arguments to log-file
:return: saved file
"""
with open(filename, 'w') as f:
f.write("Used flags:\n-----------\n")
json.dump(args.__dict__, f, indent=2)
def _onehotencode(s, vocab=None):
""" Function to one-hot encode a sring.
:param s: {str} String to encode in one-hot fashion
:param vocab: vocabulary to use fore encoding, if None, default AAs are used
:return: one-hot encoded string as a np.array
"""
if not vocab:
vocab = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W',
'Y', ' ']
# generate translation dictionary for one-hot encoding
to_one_hot = dict()
for i, a in enumerate(vocab):
v = np.zeros(len(vocab))
v[i] = 1
to_one_hot[a] = v
result = []
for l in s:
result.append(to_one_hot[l])
result = np.array(result)
return np.reshape(result, (1, result.shape[0], result.shape[1])), to_one_hot, vocab
def _onehotdecode(matrix, vocab=None, filename=None):
""" Decode a given one-hot represented matrix back into sequences
:param matrix: matrix containing sequence patterns that are one-hot encoded
:param vocab: vocabulary, if None, standard AAs are used
:param filename: filename for saving sequences, if ``None``, sequences are returned in a list
:return: list of decoded sequences in the range lenmin-lenmax, if ``filename``, they are saved to a file
"""
if not vocab:
_, _, vocab = _onehotencode('A')
if len(matrix.shape) == 2: # if a matrix containing only one string is supplied
result = []
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
aa = np.where(matrix[i, j] == 1.)[0][0]
result.append(vocab[aa])
seq = ''.join(result)
if filename:
with open(filename, 'wb') as f:
f.write(seq)
else:
return seq
elif len(matrix.shape) == 3: # if a matrix containing several strings is supplied
result = []
for n in range(matrix.shape[0]):
oneresult = []
for i in range(matrix.shape[1]):
for j in range(matrix.shape[2]):
aa = np.where(matrix[n, i, j] == 1.)[0][0]
oneresult.append(vocab[aa])
seq = ''.join(oneresult)
result.append(seq)
if filename:
with open(filename, 'wb') as f:
for s in result:
f.write(s + '\n')
else:
return result
def _sample_with_temp(preds, temp=1.0):
""" Helper function to sample one letter from a probability array given a temperature.
:param preds: {np.array} predictions returned by the network
:param temp: {float} temperature value to sample at.
"""
streched = np.log(preds) / temp
stretched_probs = np.exp(streched) / np.sum(np.exp(streched))
return np.random.choice(len(streched), p=stretched_probs)
def load_model_instance(filename):
""" Load a whole Model class instance from a given epoch file
:param filename: epoch file, e.g. model_epoch_5.hdf5
:return: model instance with trained weights
"""
modfile = os.path.dirname(filename) + '/model.p'
mod = pickle.load(open(modfile, 'rb'))
hdf5_file = ''.join(modfile.split('.')[:-1]) + '.hdf5'
mod.model = load_model(hdf5_file)
return mod
def save_model_instance(mod):
""" Save a whole Model instance and the corresponding model with weights to two files (model.p and model.hdf5)
:param mod: model instance
:return: saved model files in the checkpoint dir
"""
tmp = mod.model
tmp.save(mod.checkpointdir + 'model.hdf5')
mod.model = None
pickle.dump(mod, open(mod.checkpointdir + 'model.p', 'wb'))
mod.model = tmp
class SequenceHandler(object):
""" Class for handling peptide sequences, e.g. loading, one-hot encoding or decoding and saving """
def __init__(self, window=0, step=2, refs=True):
"""
:param window: {str} window used for chopping up sequences. If 0: False
:param step: {int} size of the steps to move the window forward
:param refs {bool} whether to generate reference sequence sets for analysis
"""
self.sequences = None
self.generated = None
self.ran = None
self.hel = None
self.X = list()
self.y = list()
self.window = window
self.step = step
self.refs = refs
# generate translation dictionary for one-hot encoding
_, self.to_one_hot, self.vocab = _onehotencode('A')
def load_sequences(self, filename):
""" Method to load peptide sequences from a csv file
:param filename: {str} filename of the sequence file to be read (``csv``, one sequence per line)
:return: sequences in self.sequences
"""
with open(filename) as f:
self.sequences = [s.strip() for s in f]
self.sequences = random.sample(self.sequences, len(self.sequences)) # shuffle sequences randomly
def pad_sequences(self, pad_char=' ', padlen=0):
""" Pad all sequences to the longest length (default, padlen=0) or a given length
:param pad_char: {str} Character to pad sequences with
:param padlen: {int} Custom length of padding to add to all sequences to (optional), default: 0. If
0, sequences are padded to the length of the longest sequence in the training set. If a window is used and the
padded sequence is shorter than the window size, it is padded to fit the window.
"""
if padlen:
padded_seqs = []
for seq in self.sequences:
if len(seq) < self.window:
padded_seq = seq + pad_char * (self.step + self.window - len(seq))
else:
padded_seq = seq + pad_char * padlen
padded_seqs.append(padded_seq)
else:
length = max([len(seq) for seq in self.sequences])
padded_seqs = []
for seq in self.sequences:
padded_seq = 'B' + seq + pad_char * (length - len(seq))
padded_seqs.append(padded_seq)
if pad_char not in self.vocab:
self.vocab += [pad_char]
self.sequences = padded_seqs # overwrite sequences with padded sequences
def one_hot_encode(self, target='all'):
""" Chop up loaded sequences into patterns of length ``window`` by moving by stepsize ``step`` and translate
them with a one-hot vector encoding
:param target: {str} whether all proceeding AA should be learned or just the last one in sequence (`all`, `one`)
:return: one-hot encoded sequence patterns in self.X and corresponding target amino acids in self.y
"""
if self.window == 0:
for s in self.sequences:
self.X.append([self.to_one_hot[char] for char in s[:-self.step]])
if target == 'all':
self.y.append([self.to_one_hot[char] for char in s[self.step:]])
elif target == 'one':
self.y.append(s[-self.step:])
self.X = np.reshape(self.X, (len(self.X), len(self.sequences[0]) - self.step, len(self.vocab)))
self.y = np.reshape(self.y, (len(self.y), len(self.sequences[0]) - self.step, len(self.vocab)))
else:
for s in self.sequences:
for i in range(0, len(s) - self.window, self.step):
self.X.append([self.to_one_hot[char] for char in s[i: i + self.window]])
if target == 'all':
self.y.append([self.to_one_hot[char] for char in s[i + 1: i + self.window + 1]])
elif target == 'one':
self.y.append(s[-self.step:])
self.X = np.reshape(self.X, (len(self.X), self.window, len(self.vocab)))
self.y = np.reshape(self.y, (len(self.y), self.window, len(self.vocab)))
print("\nData shape:\nX: " + str(self.X.shape) + "\ny: " + str(self.y.shape))
def analyze_training(self):
""" Method to analyze the distribution of the training data
:return: prints out information about the length distribution of the sequences in ``self.sequences``
"""
d = GlobalDescriptor(self.sequences)
d.length()
print("\nLENGTH DISTRIBUTION OF TRAINING DATA:\n")
print("Number of sequences: \t%i" % len(self.sequences))
print("Mean sequence length: \t%.1f ± %.1f" % (np.mean(d.descriptor), np.std(d.descriptor)))
print("Median sequence length: \t%i" % np.median(d.descriptor))
print("Minimal sequence length:\t%i" % np.min(d.descriptor))
print("Maximal sequence length:\t%i" % np.max(d.descriptor))
def analyze_generated(self, num, fname='analysis.txt', plot=False):
""" Method to analyze the generated sequences located in `self.generated`.
:param num: {int} wanted number of sequences to sample
:param fname: {str} filename to save analysis info to
:param plot: {bool} whether to plot an overview of descriptors
:return: file with analysis info (distances)
"""
with open(fname, 'w') as f:
print("Analyzing...")
f.write("ANALYSIS OF SAMPLED SEQUENCES\n==============================\n\n")
f.write("Nr. of duplicates in generated sequences: %i\n" % (len(self.generated) - len(set(self.generated))))
count = len(set(self.generated) & set(self.sequences)) # get shared entries in both lists
f.write("%.1f percent of generated sequences are present in the training data.\n" %
((count / len(self.generated)) * 100))
d = GlobalDescriptor(self.generated)
len1 = len(d.sequences)
d.filter_aa('B')
len2 = len(d.sequences)
d.length()
f.write("\n\nLENGTH DISTRIBUTION OF GENERATED DATA:\n\n")
f.write("Number of sequences too short:\t%i\n" % (num - len1))
f.write("Number of invalid (with 'B'):\t%i\n" % (len1 - len2))
f.write("Number of valid unique seqs:\t%i\n" % len2)
f.write("Mean sequence length: \t\t%.1f ± %.1f\n" % (np.mean(d.descriptor), np.std(d.descriptor)))
f.write("Median sequence length: \t\t%i\n" % np.median(d.descriptor))
f.write("Minimal sequence length: \t\t%i\n" % np.min(d.descriptor))
f.write("Maximal sequence length: \t\t%i\n" % np.max(d.descriptor))
descriptor = 'pepcats'
seq_desc = PeptideDescriptor([s[1:].rstrip() for s in self.sequences], descriptor)
seq_desc.calculate_autocorr(7)
gen_desc = PeptideDescriptor(d.sequences, descriptor)
gen_desc.calculate_autocorr(7)
# random comparison set
self.ran = Random(len(self.generated), np.min(d.descriptor), np.max(d.descriptor)) # generate rand seqs
probas = count_aas(''.join(seq_desc.sequences)).values() # get the aa distribution of training seqs
self.ran.generate_sequences(proba=probas)
ran_desc = PeptideDescriptor(self.ran.sequences, descriptor)
ran_desc.calculate_autocorr(7)
# amphipathic helices comparison set
self.hel = Helices(len(self.generated), np.min(d.descriptor), np.max(d.descriptor))
self.hel.generate_sequences()
hel_desc = PeptideDescriptor(self.hel.sequences, descriptor)
hel_desc.calculate_autocorr(7)
# distance calculation
f.write("\n\nDISTANCE CALCULATION IN '%s' DESCRIPTOR SPACE\n\n" % descriptor.upper())
desc_dist = distance.cdist(gen_desc.descriptor, seq_desc.descriptor, metric='euclidean')
f.write("Average euclidean distance of sampled to training data:\t%.3f +/- %.3f\n" %
(np.mean(desc_dist), np.std(desc_dist)))
ran_dist = distance.cdist(ran_desc.descriptor, seq_desc.descriptor, metric='euclidean')
f.write("Average euclidean distance if randomly sampled seqs:\t%.3f +/- %.3f\n" %
(np.mean(ran_dist), np.std(ran_dist)))
hel_dist = distance.cdist(hel_desc.descriptor, seq_desc.descriptor, metric='euclidean')
f.write("Average euclidean distance if amphipathic helical seqs:\t%.3f +/- %.3f\n" %
(np.mean(hel_dist), np.std(hel_dist)))
# more simple descriptors
g_seq = GlobalDescriptor(seq_desc.sequences)
g_gen = GlobalDescriptor(gen_desc.sequences)
g_ran = GlobalDescriptor(ran_desc.sequences)
g_hel = GlobalDescriptor(hel_desc.sequences)
g_seq.calculate_all()
g_gen.calculate_all()
g_ran.calculate_all()
g_hel.calculate_all()
sclr = StandardScaler()
sclr.fit(g_seq.descriptor)
f.write("\n\nDISTANCE CALCULATION FOR SCALED GLOBAL DESCRIPTORS\n\n")
desc_dist = distance.cdist(sclr.transform(g_gen.descriptor), sclr.transform(g_seq.descriptor),
metric='euclidean')
f.write("Average euclidean distance of sampled to training data:\t%.2f +/- %.2f\n" %
(np.mean(desc_dist), np.std(desc_dist)))
ran_dist = distance.cdist(sclr.transform(g_ran.descriptor), sclr.transform(g_seq.descriptor),
metric='euclidean')
f.write("Average euclidean distance if randomly sampled seqs:\t%.2f +/- %.2f\n" %
(np.mean(ran_dist), np.std(ran_dist)))
hel_dist = distance.cdist(sclr.transform(g_hel.descriptor), sclr.transform(g_seq.descriptor),
metric='euclidean')
f.write("Average euclidean distance if amphipathic helical seqs:\t%.2f +/- %.2f\n" %
(np.mean(hel_dist), np.std(hel_dist)))
# hydrophobic moments
uh_seq = PeptideDescriptor(seq_desc.sequences, 'eisenberg')
uh_seq.calculate_moment()
uh_gen = PeptideDescriptor(gen_desc.sequences, 'eisenberg')
uh_gen.calculate_moment()
uh_ran = PeptideDescriptor(ran_desc.sequences, 'eisenberg')
uh_ran.calculate_moment()
uh_hel = PeptideDescriptor(hel_desc.sequences, 'eisenberg')
uh_hel.calculate_moment()
f.write("\n\nHYDROPHOBIC MOMENTS\n\n")
f.write("Hydrophobic moment of training seqs:\t%.3f +/- %.3f\n" %
(np.mean(uh_seq.descriptor), np.std(uh_seq.descriptor)))
f.write("Hydrophobic moment of sampled seqs:\t\t%.3f +/- %.3f\n" %
(np.mean(uh_gen.descriptor), np.std(uh_gen.descriptor)))
f.write("Hydrophobic moment of random seqs:\t\t%.3f +/- %.3f\n" %
(np.mean(uh_ran.descriptor), np.std(uh_ran.descriptor)))
f.write("Hydrophobic moment of amphipathic seqs:\t%.3f +/- %.3f\n" %
(np.mean(uh_hel.descriptor), np.std(uh_hel.descriptor)))
if plot:
if self.refs:
a = GlobalAnalysis([uh_seq.sequences, uh_gen.sequences, uh_hel.sequences, uh_ran.sequences],
['training', 'sampled', 'hel', 'ran'])
else:
a = GlobalAnalysis([uh_seq.sequences, uh_gen.sequences], ['training', 'sampled'])
a.plot_summary(filename=fname[:-4] + '.png')
def save_generated(self, logdir, filename):
""" Save all sequences in `self.generated` to file
:param logdir: {str} current log directory (used for comparison sequences)
:param filename: {str} filename to save the sequences to
:return: saved file
"""
with open(filename, 'w') as f:
for s in self.generated:
f.write(s + '\n')
self.ran.save_fasta(logdir + '/random_sequences.fasta')
self.hel.save_fasta(logdir + '/helical_sequences.fasta')
class Model(object):
"""
Class containing the LSTM model to learn sequential data
"""
def __init__(self, n_vocab, outshape, session_name, cell="LSTM", n_units=256, batch=64, layers=2, lr=0.001,
dropoutfract=0.1, loss='categorical_crossentropy', l2_reg=None, ask=True, seed=42):
""" Initialize the model
:param n_vocab: {int} length of vocabulary
:param outshape: {int} output dimensionality of the model
:param session_name: {str} custom name for the current session. Will create directory with this name to save
results / logs to.
:param n_units: {int} number of LSTM units per layer
:param batch: {int} batch size
:param layers: {int} number of layers in the network
:param loss: {str} applied loss function, choose from available keras loss functions
:param lr: {float} learning rate to use with Adam optimizer
:param dropoutfract: {float} fraction of dropout to add to each layer. Layer1 gets 1 * value, Layer2 2 *
value and so on.
:param l2_reg: {float} l2 regularization for kernel
:param seed {int} random seed used to initialize weights
"""
random.seed(seed)
self.seed = seed
self.dropout = dropoutfract
self.inshape = (None, n_vocab)
self.outshape = outshape
self.neurons = n_units
self.layers = layers
self.losses = list()
self.val_losses = list()
self.batchsize = batch
self.lr = lr
self.cv_loss = None
self.cv_loss_std = None
self.cv_val_loss = None
self.cv_val_loss_std = None
self.model = None
self.cell = cell
self.losstype = loss
self.session_name = session_name
self.logdir = './' + session_name
self.l2 = l2_reg
if ask and os.path.exists(self.logdir):
decision = input('\nSession folder already exists!\n'
'Do you want to overwrite the previous session? [y/n] ')
if decision in ['n', 'no', 'N', 'NO', 'No']:
self.logdir = './' + input('Enter new session name: ')
os.makedirs(self.logdir)
self.checkpointdir = self.logdir + '/checkpoint/'
if not os.path.exists(self.checkpointdir):
os.makedirs(self.checkpointdir)
_, _, self.vocab = _onehotencode('A')
self.initialize_model(seed=self.seed)
def initialize_model(self, seed=42):
""" Method to initialize the model with all parameters saved in the attributes. This method is used during
initialization of the class, as well as in cross-validation to reinitialize a fresh model for every fold.
:param seed: {int} random seed to use for weight initialization
:return: initialized model in ``self.model``
"""
self.losses = list()
self.val_losses = list()
self.cv_loss = None
self.cv_loss_std = None
self.cv_val_loss = None
self.cv_val_loss_std = None
self.model = None
weight_init = RandomNormal(mean=0.0, stddev=0.05, seed=seed) # weights randomly between -0.05 and 0.05
optimizer = Adam(lr=self.lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
if self.l2:
l2reg = l2(self.l2)
else:
l2reg = None
self.model = Sequential()
for l in range(self.layers):
if self.cell == "GRU":
self.model.add(GRU(units=self.neurons,
name='GRU%i' % (l + 1),
input_shape=self.inshape,
return_sequences=True,
kernel_initializer=weight_init,
kernel_regularizer=l2reg,
dropout=self.dropout * (l + 1)))
else:
self.model.add(LSTM(units=self.neurons,
name='LSTM%i' % (l + 1),
input_shape=self.inshape,
return_sequences=True,
kernel_initializer=weight_init,
kernel_regularizer=l2reg,
dropout=self.dropout * (l + 1),
recurrent_dropout=self.dropout * (l + 1)))
self.model.add(Dense(self.outshape,
name='Dense',
activation='softmax',
kernel_regularizer=self.l2,
kernel_initializer=weight_init))
self.model.compile(loss=self.losstype, optimizer=optimizer)
with open(self.checkpointdir + "model.json", 'w') as f:
json.dump(self.model.to_json(), f)
self.model.summary()
def finetuneinit(self, session_name):
""" Method to generate a new directory for finetuning a pre-existing model on a new dataset with a new name
:param session_name: {str} new session name for finetuning
:return: generates all necessary session folders
"""
self.session_name = session_name
self.logdir = './' + session_name
if os.path.exists(self.logdir):
decision = input('\nSession folder already exists!\n'
'Do you want to overwrite the previous session? [y/n] ')
if decision in ['n', 'no', 'N', 'NO', 'No']:
self.logdir = './' + input('Enter new session name: ')
os.makedirs(self.logdir)
self.checkpointdir = self.logdir + '/checkpoint/'
if not os.path.exists(self.checkpointdir):
os.makedirs(self.checkpointdir)
def train(self, x, y, epochs=100, valsplit=0.2, sample=100):
""" Train the model on given training data.
:param x: {array} training data
:param y: {array} targets for training data in X
:param epochs: {int} number of epochs to train
:param valsplit: {float} fraction of data that should be used as validation data during training
:param sample: {int} number of sequences to sample after every training epoch
:return: trained model and measured losses in self.model, self.losses and self.val_losses
"""
writer = tf.summary.create_file_writer('./logs/' + self.session_name)
with writer.as_default():
for e in range(epochs):
print("Epoch %i" % e)
checkpoints = [ModelCheckpoint(filepath=self.checkpointdir + 'model_epoch_%i.hdf5' % e, verbose=0)]
train_history = self.model.fit(x, y, epochs=1, batch_size=self.batchsize, validation_split=valsplit,
shuffle=False, callbacks=checkpoints)
tf.summary.scalar('loss', train_history.history['loss'][-1], step=e)
self.losses.append(train_history.history['loss'])
if valsplit > 0.:
self.val_losses.append(train_history.history['val_loss'])
tf.summary.scalar('val_loss', train_history.history['val_loss'][-1], step=e)
if sample:
for s in self.sample(sample): # sample sequences after every training epoch
print(s)
writer.close()
def cross_val(self, x, y, epochs=100, cv=5, plot=True):
""" Method to perform cross-validation with the model given data X, y
:param x: {array} training data
:param y: {array} targets for training data in X
:param epochs: {int} number of epochs to train
:param cv: {int} fold
:param plot: {bool} whether the losses should be plotted and saved to the session folder
:return:
"""
self.losses = list() # clean losses if already present
self.val_losses = list()
kf = KFold(n_splits=cv)
cntr = 0
for train, test in kf.split(x):
print("\nFold %i" % (cntr + 1))
self.initialize_model(seed=cntr) # reinitialize every fold, otherwise it will "remember" previous data
train_history = self.model.fit(x[train], y[train], epochs=epochs, batch_size=self.batchsize,
validation_data=(x[test], y[test]))
self.losses.append(train_history.history['loss'])
self.val_losses.append(train_history.history['val_loss'])
cntr += 1
self.cv_loss = np.mean(self.losses, axis=0)
self.cv_loss_std = np.std(self.losses, axis=0)
self.cv_val_loss = np.mean(self.val_losses, axis=0)
self.cv_val_loss_std = np.std(self.val_losses, axis=0)
if plot:
self.plot_losses(cv=True)
# get best epoch with corresponding val_loss
minloss = np.min(self.cv_val_loss)
e = np.where(minloss == self.cv_val_loss)[0][0]
print("\n%i-fold cross-validation result:\n\nBest epoch:\t%i\nVal_loss:\t%.4f" % (cv, e, minloss))
with open(self.logdir + '/' + self.session_name + '_best_epoch.txt', 'w') as f:
f.write("%i-fold cross-validation result:\n\nBest epoch:\t%i\nVal_loss:\t%.4f" % (cv, e, minloss))
def plot_losses(self, show=False, cv=False):
"""Plot the losses obtained in training.
:param show: {bool} Whether the plot should be shown or saved. If ``False``, the plot is saved to the
session folder.
:param cv: {bool} Whether the losses from cross-validation should be plotted. The standard deviation will be
depicted as filled areas around the mean curve.
:return: plot (saved) or shown interactive
"""
fig, ax = plt.subplots()
ax.set_title('LSTM Categorical Crossentropy Loss Plot', fontweight='bold', fontsize=16)
if cv:
filename = self.logdir + '/' + self.session_name + '_cv_loss_plot.pdf'
x = range(1, len(self.cv_loss) + 1)
ax.plot(x, self.cv_loss, '-', color='#FE4365', label='Training')
ax.plot(x, self.cv_val_loss, '-', color='k', label='Validation')
ax.fill_between(x, self.cv_loss + self.cv_loss_std, self.cv_loss - self.cv_loss_std,
facecolors='#FE4365', alpha=0.5)
ax.fill_between(x, self.cv_val_loss + self.cv_val_loss_std, self.cv_val_loss - self.cv_val_loss_std,
facecolors='k', alpha=0.5)
ax.set_xlim([0.5, len(self.cv_loss) + 0.5])
minloss = np.min(self.cv_val_loss)
plt.text(x=0.5, y=0.5, s='best epoch: ' + str(np.where(minloss == self.cv_val_loss)[0][0]) + ', val_loss: '
+ str(minloss.round(4)), transform=ax.transAxes)
else:
filename = self.logdir + '/' + self.session_name + '_loss_plot.pdf'
x = range(1, len(self.losses) + 1)
ax.plot(x, self.losses, '-', color='#FE4365', label='Training')
if self.val_losses:
ax.plot(x, self.val_losses, '-', color='k', label='Validation')
ax.set_xlim([0.5, len(self.losses) + 0.5])
ax.set_ylabel('Loss', fontweight='bold', fontsize=14)
ax.set_xlabel('Epoch', fontweight='bold', fontsize=14)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.legend(loc='best')
if show:
plt.show()
else:
plt.savefig(filename)
def sample(self, num=100, minlen=7, maxlen=50, start=None, temp=2.5, show=False):
"""Invoke generation of sequence patterns through sampling from the trained model.
:param num: {int} number of sequences to sample
:param minlen {int} minimal allowed sequence length
:param maxlen: {int} maximal length of each pattern generated, if 0, a random length is chosen between 7 and 50
:param start: {str} start AA to be used for sampling. If ``None``, a random AA is chosen
:param temp: {float} temperature value to sample at.
:param show: {bool} whether the sampled sequences should be printed out
:return: {array} matrix of patterns of shape (num, seqlen, inputshape[0])
"""
print("\nSampling...\n")
sampled = []
lcntr = 0
pbar = ProgressBar()
for rs in pbar(range(num)):
random.seed(rs)
if not maxlen: # if the length should be randomly sampled
longest = np.random.randint(7, 50)
else:
longest = maxlen
if start:
start_aa = start
else: # generate random starting letter
start_aa = 'B'
sequence = start_aa # start with starting letter
while sequence[-1] != ' ' and len(sequence) <= longest: # sample until padding or maxlen is reached
x, _, _ = _onehotencode(sequence)
preds = self.model.predict(x)[0][-1]
next_aa = _sample_with_temp(preds, temp=temp)
sequence += self.vocab[next_aa]
if start_aa == 'B':
sequence = sequence[1:].rstrip()
else: # keep starting AA if chosen for sampling
sequence = sequence.rstrip()
if len(sequence) < minlen: # don't take sequences shorter than the minimal length
lcntr += 1
continue
sampled.append(sequence)
if show:
print(sequence)
print("\t%i sequences were shorter than %i" % (lcntr, minlen))
return sampled
def load_model(self, filename):
"""Method to load a trained model from a hdf5 file
:return: model loaded from file in ``self.model``
"""
self.model.load_weights(filename)
# def get_num_params(self):
# """Method to get the amount of trainable parameters in the model.
# """
# trainable = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
# non_trainable = np.sum([np.prod(v.get_shape().as_list()) for v in tf.non_trainable_variables()])
# print('\nMODEL PARAMETERS')
# print('Total parameters: %i' % (trainable + non_trainable))
# print('Trainable parameters: %i' % trainable)
# print('Non-trainable parameters: %i' % non_trainable)
def main(infile, sessname, neurons=64, layers=2, epochs=100, batchsize=128, window=0, step=1, target='all',
valsplit=0.2, sample=100, aa='B', temperature=2.5, cell="LSTM", dropout=0.1, train=True, learningrate=0.01,
modfile=None, samplelength=36, pad=0, l2_rate=None, cv=None, finetune=False, references=True):
# loading sequence data, analyze, pad and encode it
data = SequenceHandler(window=window, step=step, refs=references)
print("Loading sequences...")
data.load_sequences(infile)
data.analyze_training()
# pad sequences
print("\nPadding sequences...")
data.pad_sequences(padlen=pad)
# one-hot encode padded sequences
print("One-hot encoding sequences...")
data.one_hot_encode(target=target)
if train:
# building the LSTM model
print("\nBuilding model...")
model = Model(n_vocab=len(data.vocab), outshape=len(data.vocab), session_name=sessname, n_units=neurons,
batch=batchsize, layers=layers, cell=cell, loss='categorical_crossentropy', lr=learningrate,
dropoutfract=dropout, l2_reg=l2_rate, ask=True, seed=42)
print("Model built!")
if cv:
print("\nPERFORMING %i-FOLD CROSS-VALIDATION...\n" % cv)
model.cross_val(data.X, data.y, epochs=epochs, cv=cv)
model.initialize_model(seed=42)
model.train(data.X, data.y, epochs=epochs, valsplit=0.0, sample=0)
model.plot_losses()
else:
# training model on data
print("\nTRAINING MODEL FOR %i EPOCHS...\n" % epochs)
model.train(data.X, data.y, epochs=epochs, valsplit=valsplit, sample=0)
model.plot_losses() # plot loss
save_model_instance(model)
elif finetune:
print("\nUSING PRETRAINED MODEL FOR FINETUNING... (%s)\n" % modfile)
print("Loading model...")
model = load_model_instance(modfile)
model.load_model(modfile)
model.finetuneinit(sessname) # generate new session folders for finetuning run
print("Finetuning model...")
model.train(data.X, data.y, epochs=epochs, valsplit=valsplit, sample=0)
model.plot_losses() # plot loss
save_model_instance(model)
else:
print("\nUSING PRETRAINED MODEL... (%s)\n" % modfile)
model = load_model_instance(modfile)
model.load_model(modfile)
print(model.model.summary()) # print number of parameters in the model
# generating new data through sampling
print("\nSAMPLING %i SEQUENCES...\n" % sample)
data.generated = model.sample(sample, start=aa, maxlen=samplelength, show=False, temp=temperature)
data.analyze_generated(sample, fname=model.logdir + '/analysis_temp' + str(temperature) + '.txt', plot=True)
data.save_generated(model.logdir, model.logdir + '/sampled_sequences_temp' + str(temperature) + '.csv')
if __name__ == "__main__":
# run main code
main(infile=args.dataset, sessname=args.name, batchsize=args.batch_size, epochs=args.epochs,
layers=args.layers, valsplit=args.valsplit, neurons=args.neurons, cell=args.cell, sample=args.sample,
temperature=args.temp, dropout=args.dropout, train=args.train, modfile=args.modfile,
learningrate=args.lr, cv=args.cv, samplelength=args.maxlen, window=args.window,
step=args.step, aa=args.startchar, l2_rate=args.l2, target=args.target, pad=args.padlen,
finetune=args.finetune, references=args.refs)
# save used flags to log file
_save_flags("./" + args.name + "/flags.txt")
| [
"sklearn.preprocessing.StandardScaler",
"argparse.ArgumentParser",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.ModelCheckpoint",
"numpy.mean",
"numpy.random.randint",
"numpy.exp",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.regularizers.l2",
"modlamp.descriptors.Peptid... | [((1103, 1128), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (1121, 1128), True, 'import matplotlib.pyplot as plt\n'), ((1137, 1162), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1160, 1162), False, 'import argparse\n'), ((5117, 5133), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (5125, 5133), True, 'import numpy as np\n'), ((7659, 7680), 'tensorflow.keras.models.load_model', 'load_model', (['hdf5_file'], {}), '(hdf5_file)\n', (7669, 7680), False, 'from tensorflow.keras.models import Sequential, load_model\n'), ((4356, 4393), 'json.dump', 'json.dump', (['args.__dict__', 'f'], {'indent': '(2)'}), '(args.__dict__, f, indent=2)\n', (4365, 4393), False, 'import json\n'), ((5145, 5202), 'numpy.reshape', 'np.reshape', (['result', '(1, result.shape[0], result.shape[1])'], {}), '(result, (1, result.shape[0], result.shape[1]))\n', (5155, 5202), True, 'import numpy as np\n'), ((7121, 7134), 'numpy.log', 'np.log', (['preds'], {}), '(preds)\n', (7127, 7134), True, 'import numpy as np\n'), ((7164, 7180), 'numpy.exp', 'np.exp', (['streched'], {}), '(streched)\n', (7170, 7180), True, 'import numpy as np\n'), ((7502, 7527), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (7517, 7527), False, 'import os\n'), ((12722, 12754), 'modlamp.descriptors.GlobalDescriptor', 'GlobalDescriptor', (['self.sequences'], {}), '(self.sequences)\n', (12738, 12754), False, 'from modlamp.descriptors import PeptideDescriptor, GlobalDescriptor\n'), ((21837, 21854), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (21848, 21854), False, 'import random\n'), ((23757, 23803), 'tensorflow.keras.initializers.RandomNormal', 'RandomNormal', ([], {'mean': '(0.0)', 'stddev': '(0.05)', 'seed': 'seed'}), '(mean=0.0, stddev=0.05, seed=seed)\n', (23769, 23803), False, 'from tensorflow.keras.initializers import RandomNormal\n'), ((23867, 23935), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': 'self.lr', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': '(1e-08)', 'decay': '(0.0)'}), '(lr=self.lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n', (23871, 23935), False, 'from tensorflow.keras.optimizers import Adam\n'), ((24066, 24078), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (24076, 24078), False, 'from tensorflow.keras.models import Sequential, load_model\n'), ((25972, 25999), 'os.path.exists', 'os.path.exists', (['self.logdir'], {}), '(self.logdir)\n', (25986, 25999), False, 'import os\n'), ((27072, 27132), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (["('./logs/' + self.session_name)"], {}), "('./logs/' + self.session_name)\n", (27101, 27132), True, 'import tensorflow as tf\n'), ((28661, 28679), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'cv'}), '(n_splits=cv)\n', (28666, 28679), False, 'from sklearn.model_selection import KFold\n'), ((29258, 29286), 'numpy.mean', 'np.mean', (['self.losses'], {'axis': '(0)'}), '(self.losses, axis=0)\n', (29265, 29286), True, 'import numpy as np\n'), ((29314, 29341), 'numpy.std', 'np.std', (['self.losses'], {'axis': '(0)'}), '(self.losses, axis=0)\n', (29320, 29341), True, 'import numpy as np\n'), ((29369, 29401), 'numpy.mean', 'np.mean', (['self.val_losses'], {'axis': '(0)'}), '(self.val_losses, axis=0)\n', (29376, 29401), True, 'import numpy as np\n'), ((29433, 29464), 'numpy.std', 'np.std', (['self.val_losses'], {'axis': '(0)'}), '(self.val_losses, axis=0)\n', (29439, 29464), True, 'import numpy as np\n'), ((29600, 29624), 'numpy.min', 'np.min', (['self.cv_val_loss'], {}), '(self.cv_val_loss)\n', (29606, 29624), True, 'import numpy as np\n'), ((30479, 30493), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (30491, 30493), True, 'import matplotlib.pyplot as plt\n'), ((32222, 32244), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (32232, 32244), True, 'import matplotlib.pyplot as plt\n'), ((33171, 33184), 'progressbar.ProgressBar', 'ProgressBar', ([], {}), '()\n', (33182, 33184), False, 'from progressbar import ProgressBar\n'), ((7190, 7206), 'numpy.exp', 'np.exp', (['streched'], {}), '(streched)\n', (7196, 7206), True, 'import numpy as np\n'), ((14190, 14222), 'modlamp.descriptors.GlobalDescriptor', 'GlobalDescriptor', (['self.generated'], {}), '(self.generated)\n', (14206, 14222), False, 'from modlamp.descriptors import PeptideDescriptor, GlobalDescriptor\n'), ((15202, 15244), 'modlamp.descriptors.PeptideDescriptor', 'PeptideDescriptor', (['d.sequences', 'descriptor'], {}), '(d.sequences, descriptor)\n', (15219, 15244), False, 'from modlamp.descriptors import PeptideDescriptor, GlobalDescriptor\n'), ((15644, 15693), 'modlamp.descriptors.PeptideDescriptor', 'PeptideDescriptor', (['self.ran.sequences', 'descriptor'], {}), '(self.ran.sequences, descriptor)\n', (15661, 15693), False, 'from modlamp.descriptors import PeptideDescriptor, GlobalDescriptor\n'), ((15960, 16009), 'modlamp.descriptors.PeptideDescriptor', 'PeptideDescriptor', (['self.hel.sequences', 'descriptor'], {}), '(self.hel.sequences, descriptor)\n', (15977, 16009), False, 'from modlamp.descriptors import PeptideDescriptor, GlobalDescriptor\n'), ((16223, 16299), 'scipy.spatial.distance.cdist', 'distance.cdist', (['gen_desc.descriptor', 'seq_desc.descriptor'], {'metric': '"""euclidean"""'}), "(gen_desc.descriptor, seq_desc.descriptor, metric='euclidean')\n", (16237, 16299), False, 'from scipy.spatial import distance\n'), ((16481, 16557), 'scipy.spatial.distance.cdist', 'distance.cdist', (['ran_desc.descriptor', 'seq_desc.descriptor'], {'metric': '"""euclidean"""'}), "(ran_desc.descriptor, seq_desc.descriptor, metric='euclidean')\n", (16495, 16557), False, 'from scipy.spatial import distance\n'), ((16734, 16810), 'scipy.spatial.distance.cdist', 'distance.cdist', (['hel_desc.descriptor', 'seq_desc.descriptor'], {'metric': '"""euclidean"""'}), "(hel_desc.descriptor, seq_desc.descriptor, metric='euclidean')\n", (16748, 16810), False, 'from scipy.spatial import distance\n'), ((17038, 17074), 'modlamp.descriptors.GlobalDescriptor', 'GlobalDescriptor', (['seq_desc.sequences'], {}), '(seq_desc.sequences)\n', (17054, 17074), False, 'from modlamp.descriptors import PeptideDescriptor, GlobalDescriptor\n'), ((17095, 17131), 'modlamp.descriptors.GlobalDescriptor', 'GlobalDescriptor', (['gen_desc.sequences'], {}), '(gen_desc.sequences)\n', (17111, 17131), False, 'from modlamp.descriptors import PeptideDescriptor, GlobalDescriptor\n'), ((17152, 17188), 'modlamp.descriptors.GlobalDescriptor', 'GlobalDescriptor', (['ran_desc.sequences'], {}), '(ran_desc.sequences)\n', (17168, 17188), False, 'from modlamp.descriptors import PeptideDescriptor, GlobalDescriptor\n'), ((17209, 17245), 'modlamp.descriptors.GlobalDescriptor', 'GlobalDescriptor', (['hel_desc.sequences'], {}), '(hel_desc.sequences)\n', (17225, 17245), False, 'from modlamp.descriptors import PeptideDescriptor, GlobalDescriptor\n'), ((17401, 17417), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (17415, 17417), False, 'from sklearn.preprocessing import StandardScaler\n'), ((18568, 18618), 'modlamp.descriptors.PeptideDescriptor', 'PeptideDescriptor', (['seq_desc.sequences', '"""eisenberg"""'], {}), "(seq_desc.sequences, 'eisenberg')\n", (18585, 18618), False, 'from modlamp.descriptors import PeptideDescriptor, GlobalDescriptor\n'), ((18678, 18728), 'modlamp.descriptors.PeptideDescriptor', 'PeptideDescriptor', (['gen_desc.sequences', '"""eisenberg"""'], {}), "(gen_desc.sequences, 'eisenberg')\n", (18695, 18728), False, 'from modlamp.descriptors import PeptideDescriptor, GlobalDescriptor\n'), ((18788, 18838), 'modlamp.descriptors.PeptideDescriptor', 'PeptideDescriptor', (['ran_desc.sequences', '"""eisenberg"""'], {}), "(ran_desc.sequences, 'eisenberg')\n", (18805, 18838), False, 'from modlamp.descriptors import PeptideDescriptor, GlobalDescriptor\n'), ((18898, 18948), 'modlamp.descriptors.PeptideDescriptor', 'PeptideDescriptor', (['hel_desc.sequences', '"""eisenberg"""'], {}), "(hel_desc.sequences, 'eisenberg')\n", (18915, 18948), False, 'from modlamp.descriptors import PeptideDescriptor, GlobalDescriptor\n'), ((22497, 22524), 'os.path.exists', 'os.path.exists', (['self.logdir'], {}), '(self.logdir)\n', (22511, 22524), False, 'import os\n'), ((22924, 22958), 'os.path.exists', 'os.path.exists', (['self.checkpointdir'], {}), '(self.checkpointdir)\n', (22938, 22958), False, 'import os\n'), ((22972, 23003), 'os.makedirs', 'os.makedirs', (['self.checkpointdir'], {}), '(self.checkpointdir)\n', (22983, 23003), False, 'import os\n'), ((23985, 23996), 'tensorflow.keras.regularizers.l2', 'l2', (['self.l2'], {}), '(self.l2)\n', (23987, 23996), False, 'from tensorflow.keras.regularizers import l2\n'), ((25136, 25257), 'tensorflow.keras.layers.Dense', 'Dense', (['self.outshape'], {'name': '"""Dense"""', 'activation': '"""softmax"""', 'kernel_regularizer': 'self.l2', 'kernel_initializer': 'weight_init'}), "(self.outshape, name='Dense', activation='softmax', kernel_regularizer\n =self.l2, kernel_initializer=weight_init)\n", (25141, 25257), False, 'from tensorflow.keras.layers import Dense, LSTM, GRU\n'), ((26399, 26433), 'os.path.exists', 'os.path.exists', (['self.checkpointdir'], {}), '(self.checkpointdir)\n', (26413, 26433), False, 'import os\n'), ((26447, 26478), 'os.makedirs', 'os.makedirs', (['self.checkpointdir'], {}), '(self.checkpointdir)\n', (26458, 26478), False, 'import os\n'), ((31294, 31318), 'numpy.min', 'np.min', (['self.cv_val_loss'], {}), '(self.cv_val_loss)\n', (31300, 31318), True, 'import numpy as np\n'), ((32274, 32284), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32282, 32284), True, 'import matplotlib.pyplot as plt\n'), ((32311, 32332), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (32322, 32332), True, 'import matplotlib.pyplot as plt\n'), ((33233, 33248), 'random.seed', 'random.seed', (['rs'], {}), '(rs)\n', (33244, 33248), False, 'import random\n'), ((13051, 13074), 'numpy.median', 'np.median', (['d.descriptor'], {}), '(d.descriptor)\n', (13060, 13074), True, 'import numpy as np\n'), ((13123, 13143), 'numpy.min', 'np.min', (['d.descriptor'], {}), '(d.descriptor)\n', (13129, 13143), True, 'import numpy as np\n'), ((13192, 13212), 'numpy.max', 'np.max', (['d.descriptor'], {}), '(d.descriptor)\n', (13198, 13212), True, 'import numpy as np\n'), ((15388, 15408), 'numpy.min', 'np.min', (['d.descriptor'], {}), '(d.descriptor)\n', (15394, 15408), True, 'import numpy as np\n'), ((15410, 15430), 'numpy.max', 'np.max', (['d.descriptor'], {}), '(d.descriptor)\n', (15416, 15430), True, 'import numpy as np\n'), ((15851, 15871), 'numpy.min', 'np.min', (['d.descriptor'], {}), '(d.descriptor)\n', (15857, 15871), True, 'import numpy as np\n'), ((15873, 15893), 'numpy.max', 'np.max', (['d.descriptor'], {}), '(d.descriptor)\n', (15879, 15893), True, 'import numpy as np\n'), ((19734, 19865), 'modlamp.analysis.GlobalAnalysis', 'GlobalAnalysis', (['[uh_seq.sequences, uh_gen.sequences, uh_hel.sequences, uh_ran.sequences]', "['training', 'sampled', 'hel', 'ran']"], {}), "([uh_seq.sequences, uh_gen.sequences, uh_hel.sequences,\n uh_ran.sequences], ['training', 'sampled', 'hel', 'ran'])\n", (19748, 19865), False, 'from modlamp.analysis import GlobalAnalysis\n'), ((19935, 20012), 'modlamp.analysis.GlobalAnalysis', 'GlobalAnalysis', (['[uh_seq.sequences, uh_gen.sequences]', "['training', 'sampled']"], {}), "([uh_seq.sequences, uh_gen.sequences], ['training', 'sampled'])\n", (19949, 20012), False, 'from modlamp.analysis import GlobalAnalysis\n'), ((22826, 22850), 'os.makedirs', 'os.makedirs', (['self.logdir'], {}), '(self.logdir)\n', (22837, 22850), False, 'import os\n'), ((26301, 26325), 'os.makedirs', 'os.makedirs', (['self.logdir'], {}), '(self.logdir)\n', (26312, 26325), False, 'import os\n'), ((27575, 27643), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', "train_history.history['loss'][-1]"], {'step': 'e'}), "('loss', train_history.history['loss'][-1], step=e)\n", (27592, 27643), True, 'import tensorflow as tf\n'), ((29637, 29674), 'numpy.where', 'np.where', (['(minloss == self.cv_val_loss)'], {}), '(minloss == self.cv_val_loss)\n', (29645, 29674), True, 'import numpy as np\n'), ((33346, 33370), 'numpy.random.randint', 'np.random.randint', (['(7)', '(50)'], {}), '(7, 50)\n', (33363, 33370), True, 'import numpy as np\n'), ((12959, 12980), 'numpy.mean', 'np.mean', (['d.descriptor'], {}), '(d.descriptor)\n', (12966, 12980), True, 'import numpy as np\n'), ((12982, 13002), 'numpy.std', 'np.std', (['d.descriptor'], {}), '(d.descriptor)\n', (12988, 13002), True, 'import numpy as np\n'), ((14806, 14829), 'numpy.median', 'np.median', (['d.descriptor'], {}), '(d.descriptor)\n', (14815, 14829), True, 'import numpy as np\n'), ((14890, 14910), 'numpy.min', 'np.min', (['d.descriptor'], {}), '(d.descriptor)\n', (14896, 14910), True, 'import numpy as np\n'), ((14971, 14991), 'numpy.max', 'np.max', (['d.descriptor'], {}), '(d.descriptor)\n', (14977, 14991), True, 'import numpy as np\n'), ((24182, 24376), 'tensorflow.keras.layers.GRU', 'GRU', ([], {'units': 'self.neurons', 'name': "('GRU%i' % (l + 1))", 'input_shape': 'self.inshape', 'return_sequences': '(True)', 'kernel_initializer': 'weight_init', 'kernel_regularizer': 'l2reg', 'dropout': '(self.dropout * (l + 1))'}), "(units=self.neurons, name='GRU%i' % (l + 1), input_shape=self.inshape,\n return_sequences=True, kernel_initializer=weight_init,\n kernel_regularizer=l2reg, dropout=self.dropout * (l + 1))\n", (24185, 24376), False, 'from tensorflow.keras.layers import Dense, LSTM, GRU\n'), ((24629, 24871), 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'units': 'self.neurons', 'name': "('LSTM%i' % (l + 1))", 'input_shape': 'self.inshape', 'return_sequences': '(True)', 'kernel_initializer': 'weight_init', 'kernel_regularizer': 'l2reg', 'dropout': '(self.dropout * (l + 1))', 'recurrent_dropout': '(self.dropout * (l + 1))'}), "(units=self.neurons, name='LSTM%i' % (l + 1), input_shape=self.inshape,\n return_sequences=True, kernel_initializer=weight_init,\n kernel_regularizer=l2reg, dropout=self.dropout * (l + 1),\n recurrent_dropout=self.dropout * (l + 1))\n", (24633, 24871), False, 'from tensorflow.keras.layers import Dense, LSTM, GRU\n'), ((27272, 27359), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': "(self.checkpointdir + 'model_epoch_%i.hdf5' % e)", 'verbose': '(0)'}), "(filepath=self.checkpointdir + 'model_epoch_%i.hdf5' % e,\n verbose=0)\n", (27287, 27359), False, 'from tensorflow.keras.callbacks import ModelCheckpoint\n'), ((27842, 27918), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""val_loss"""', "train_history.history['val_loss'][-1]"], {'step': 'e'}), "('val_loss', train_history.history['val_loss'][-1], step=e)\n", (27859, 27918), True, 'import tensorflow as tf\n'), ((5976, 6005), 'numpy.where', 'np.where', (['(matrix[i, j] == 1.0)'], {}), '(matrix[i, j] == 1.0)\n', (5984, 6005), True, 'import numpy as np\n'), ((14702, 14723), 'numpy.mean', 'np.mean', (['d.descriptor'], {}), '(d.descriptor)\n', (14709, 14723), True, 'import numpy as np\n'), ((14725, 14745), 'numpy.std', 'np.std', (['d.descriptor'], {}), '(d.descriptor)\n', (14731, 14745), True, 'import numpy as np\n'), ((16418, 16436), 'numpy.mean', 'np.mean', (['desc_dist'], {}), '(desc_dist)\n', (16425, 16436), True, 'import numpy as np\n'), ((16438, 16455), 'numpy.std', 'np.std', (['desc_dist'], {}), '(desc_dist)\n', (16444, 16455), True, 'import numpy as np\n'), ((16673, 16690), 'numpy.mean', 'np.mean', (['ran_dist'], {}), '(ran_dist)\n', (16680, 16690), True, 'import numpy as np\n'), ((16692, 16708), 'numpy.std', 'np.std', (['ran_dist'], {}), '(ran_dist)\n', (16698, 16708), True, 'import numpy as np\n'), ((16929, 16946), 'numpy.mean', 'np.mean', (['hel_dist'], {}), '(hel_dist)\n', (16936, 16946), True, 'import numpy as np\n'), ((16948, 16964), 'numpy.std', 'np.std', (['hel_dist'], {}), '(hel_dist)\n', (16954, 16964), True, 'import numpy as np\n'), ((17823, 17841), 'numpy.mean', 'np.mean', (['desc_dist'], {}), '(desc_dist)\n', (17830, 17841), True, 'import numpy as np\n'), ((17843, 17860), 'numpy.std', 'np.std', (['desc_dist'], {}), '(desc_dist)\n', (17849, 17860), True, 'import numpy as np\n'), ((18142, 18159), 'numpy.mean', 'np.mean', (['ran_dist'], {}), '(ran_dist)\n', (18149, 18159), True, 'import numpy as np\n'), ((18161, 18177), 'numpy.std', 'np.std', (['ran_dist'], {}), '(ran_dist)\n', (18167, 18177), True, 'import numpy as np\n'), ((18462, 18479), 'numpy.mean', 'np.mean', (['hel_dist'], {}), '(hel_dist)\n', (18469, 18479), True, 'import numpy as np\n'), ((18481, 18497), 'numpy.std', 'np.std', (['hel_dist'], {}), '(hel_dist)\n', (18487, 18497), True, 'import numpy as np\n'), ((19137, 19163), 'numpy.mean', 'np.mean', (['uh_seq.descriptor'], {}), '(uh_seq.descriptor)\n', (19144, 19163), True, 'import numpy as np\n'), ((19165, 19190), 'numpy.std', 'np.std', (['uh_seq.descriptor'], {}), '(uh_seq.descriptor)\n', (19171, 19190), True, 'import numpy as np\n'), ((19293, 19319), 'numpy.mean', 'np.mean', (['uh_gen.descriptor'], {}), '(uh_gen.descriptor)\n', (19300, 19319), True, 'import numpy as np\n'), ((19321, 19346), 'numpy.std', 'np.std', (['uh_gen.descriptor'], {}), '(uh_gen.descriptor)\n', (19327, 19346), True, 'import numpy as np\n'), ((19448, 19474), 'numpy.mean', 'np.mean', (['uh_ran.descriptor'], {}), '(uh_ran.descriptor)\n', (19455, 19474), True, 'import numpy as np\n'), ((19476, 19501), 'numpy.std', 'np.std', (['uh_ran.descriptor'], {}), '(uh_ran.descriptor)\n', (19482, 19501), True, 'import numpy as np\n'), ((19606, 19632), 'numpy.mean', 'np.mean', (['uh_hel.descriptor'], {}), '(uh_hel.descriptor)\n', (19613, 19632), True, 'import numpy as np\n'), ((19634, 19659), 'numpy.std', 'np.std', (['uh_hel.descriptor'], {}), '(uh_hel.descriptor)\n', (19640, 19659), True, 'import numpy as np\n'), ((6512, 6544), 'numpy.where', 'np.where', (['(matrix[n, i, j] == 1.0)'], {}), '(matrix[n, i, j] == 1.0)\n', (6520, 6544), True, 'import numpy as np\n'), ((31377, 31414), 'numpy.where', 'np.where', (['(minloss == self.cv_val_loss)'], {}), '(minloss == self.cv_val_loss)\n', (31385, 31414), True, 'import numpy as np\n')] |
from typing import Optional, Sequence, Tuple, List, Dict
import torch
import torch.nn as nn
import model.backbone as backbone
import torch.nn.functional as F
# from dalib.modules.classifier import Classifier as ClassifierBase
# from dalib.modules.kernels import optimal_kernel_combinations
from numpy import array, dot
import numpy as np
from qpsolvers import solve_qp
__all__ = ['MultipleKernelMaximumMeanDiscrepancy', 'ImageClassifier']
class DANNet(nn.Module):
def __init__(self, base_net='ResNet50', use_bottleneck=True, bottleneck_dim=256, width=256, class_num=31):
super(DANNet, self).__init__()
## set base network
self.backbone = backbone.network_dict[base_net]()
self.use_bottleneck = use_bottleneck
self.bottleneck_layer_list = [nn.Linear(self.backbone.output_num(), bottleneck_dim), nn.BatchNorm1d(bottleneck_dim), nn.ReLU(), nn.Dropout(0.5)]
self.bottleneck = nn.Sequential(*self.bottleneck_layer_list)
self.head = nn.Linear(bottleneck_dim, class_num)
self.softmax = nn.Softmax(dim=1)
self.sigmoid = nn.Sigmoid()
## initialization
self.bottleneck[0].weight.data.normal_(0, 0.005)
self.bottleneck[0].bias.data.fill_(0.1)
self.head.weight.data.normal_(0, 0.01)
self.head.bias.data.fill_(0.0)
self.parameter_list = [{"params": self.backbone.parameters(), "lr": 0.1},
{"params": self.bottleneck.parameters(), "lr": 1.},
{"params": self.head.parameters(), "lr": 1.},]
def forward(self, inputs):
features = self.backbone(inputs)
if self.use_bottleneck:
features = self.bottleneck(features)
logits = self.head(features)
softmax_outputs = self.softmax(logits)
return features, logits, softmax_outputs
class DAN(object):
def __init__(self, base_net='ResNet50', width=1024, class_num=31, use_bottleneck=True, use_gpu=True, srcweight=3):
self.c_net = DANNet(base_net, use_bottleneck, width, width, class_num)
self.mkmmd_loss = MultipleKernelMaximumMeanDiscrepancy(kernels=[GaussianKernel(alpha=2 ** k) for k in range(-3, 2)], linear= False, quadratic_program=False)
self.use_gpu = use_gpu
self.is_train = False
self.iter_num = 0
self.class_num = class_num
if self.use_gpu:
self.c_net = self.c_net.cuda()
self.srcweight = srcweight
def get_loss(self, inputs, labels_source):
class_criterion = nn.CrossEntropyLoss()
# define loss function
mkmmd_loss = MultipleKernelMaximumMeanDiscrepancy(
kernels=[GaussianKernel(alpha=2 ** k) for k in range(-3, 2)],
linear= False, quadratic_program=False)
features, logits, softmax_outputs = self.c_net(inputs)
outputs_source = logits.narrow(0, 0, labels_source.size(0))
source_features = features.narrow(0, 0, labels_source.size(0))
target_features = features.narrow(0, labels_source.size(0), inputs.size(0) - labels_source.size(0))
classifier_loss = class_criterion(outputs_source, labels_source)
transfer_loss = mkmmd_loss(source_features, target_features)
total_loss = classifier_loss + 1.0*transfer_loss
return [total_loss, classifier_loss, transfer_loss]
def predict(self, inputs):
feature, _, softmax_outputs= self.c_net(inputs)
return softmax_outputs, feature
def get_parameter_list(self):
return self.c_net.parameter_list
def set_train(self, mode):
self.c_net.train(mode)
self.mkmmd_loss.train(mode)
self.is_train = mode
class MultipleKernelMaximumMeanDiscrepancy(nn.Module):
r"""The Multiple Kernel Maximum Mean Discrepancy (MK-MMD) used in
`Learning Transferable Features with Deep Adaptation Networks <https://arxiv.org/pdf/1502.02791>`_
Given source domain :math:`\mathcal{D}_s` of :math:`n_s` labeled points and target domain :math:`\mathcal{D}_t`
of :math:`n_t` unlabeled points drawn i.i.d. from P and Q respectively, the deep networks will generate
activations as :math:`\{z_i^s\}_{i=1}^{n_s}` and :math:`\{z_i^t\}_{i=1}^{n_t}`.
The MK-MMD :math:`D_k (P, Q)` between probability distributions P and Q is defined as
.. math::
D_k(P, Q) \triangleq \| E_p [\phi(z^s)] - E_q [\phi(z^t)] \|^2_{\mathcal{H}_k},
:math:`k` is a kernel function in the function space
.. math::
\mathcal{K} \triangleq \{ k=\sum_{u=1}^{m}\beta_{u} k_{u} \}
where :math:`k_{u}` is a single kernel.
Using kernel trick, MK-MMD can be computed as
.. math::
\hat{D}_k(P, Q) &=
\dfrac{1}{n_s^2} \sum_{i=1}^{n_s}\sum_{j=1}^{n_s} k(z_i^{s}, z_j^{s}) \\
&+ \dfrac{1}{n_t^2} \sum_{i=1}^{n_t}\sum_{j=1}^{n_t} k(z_i^{t}, z_j^{t}) \\
&- \dfrac{2}{n_s n_t} \sum_{i=1}^{n_s}\sum_{j=1}^{n_t} k(z_i^{s}, z_j^{t}). \\
Parameters:
- **kernels** (tuple(`nn.Module`)): kernel functions.
- **linear** (bool): whether use the linear version of DAN. Default: False
- **quadratic_program** (bool): whether use quadratic program to solve :math:`\beta`. Default: False
Inputs: z_s, z_t
- **z_s** (tensor): activations from the source domain, :math:`z^s`
- **z_t** (tensor): activations from the target domain, :math:`z^t`
Shape:
- Inputs: :math:`(minibatch, *)` where * means any dimension
- Outputs: scalar
.. note::
Activations :math:`z^{s}` and :math:`z^{t}` must have the same shape.
.. note::
The kernel values will add up when there are multiple kernels.
Examples::
>>> from dalib.modules.kernels import GaussianKernel
>>> feature_dim = 1024
>>> batch_size = 10
>>> kernels = (GaussianKernel(alpha=0.5), GaussianKernel(alpha=1.), GaussianKernel(alpha=2.))
>>> loss = MultipleKernelMaximumMeanDiscrepancy(kernels)
>>> # features from source domain and target domain
>>> z_s, z_t = torch.randn(batch_size, feature_dim), torch.randn(batch_size, feature_dim)
>>> output = loss(z_s, z_t)
"""
def __init__(self, kernels: Sequence[nn.Module], linear: Optional[bool] = False,
quadratic_program: Optional[bool] = False):
super(MultipleKernelMaximumMeanDiscrepancy, self).__init__()
self.kernels = kernels
self.index_matrix = None
self.linear = linear
self.quadratic_program = quadratic_program
def forward(self, z_s: torch.Tensor, z_t: torch.Tensor) -> torch.Tensor:
features = torch.cat([z_s, z_t], dim=0)
batch_size = int(z_s.size(0))
self.index_matrix = _update_index_matrix(batch_size, self.index_matrix, self.linear).to(z_s.device)
if not self.quadratic_program:
kernel_matrix = sum([kernel(features) for kernel in self.kernels]) # Add up the matrix of each kernel
# Add 2 / (n-1) to make up for the value on the diagonal
# to ensure loss is positive in the non-linear version
loss = (kernel_matrix * self.index_matrix).sum() + 2. / float(batch_size - 1)
else:
kernel_values = [(kernel(features) * self.index_matrix).sum() + 2. / float(batch_size - 1) for kernel in self.kernels]
loss = optimal_kernel_combinations(kernel_values)
return loss
def _update_index_matrix(batch_size: int, index_matrix: Optional[torch.Tensor] = None,
linear: Optional[bool] = True) -> torch.Tensor:
r"""
Update the `index_matrix` which convert `kernel_matrix` to loss.
If `index_matrix` is a tensor with shape (2 x batch_size, 2 x batch_size), then return `index_matrix`.
Else return a new tensor with shape (2 x batch_size, 2 x batch_size).
"""
if index_matrix is None or index_matrix.size(0) != batch_size * 2:
index_matrix = torch.zeros(2 * batch_size, 2 * batch_size)
if linear:
for i in range(batch_size):
s1, s2 = i, (i + 1) % batch_size
t1, t2 = s1 + batch_size, s2 + batch_size
index_matrix[s1, s2] = 1. / float(batch_size)
index_matrix[t1, t2] = 1. / float(batch_size)
index_matrix[s1, t2] = -1. / float(batch_size)
index_matrix[s2, t1] = -1. / float(batch_size)
else:
for i in range(batch_size):
for j in range(batch_size):
if i != j:
index_matrix[i][j] = 1. / float(batch_size * (batch_size - 1))
index_matrix[i + batch_size][j + batch_size] = 1. / float(batch_size * (batch_size - 1))
for i in range(batch_size):
for j in range(batch_size):
index_matrix[i][j + batch_size] = -1. / float(batch_size * batch_size)
index_matrix[i + batch_size][j] = -1. / float(batch_size * batch_size)
return index_matrix
class GaussianKernel(nn.Module):
r"""Gaussian Kernel Matrix
Gaussian Kernel k is defined by
.. math::
k(x_1, x_2) = \exp \left( - \dfrac{\| x_1 - x_2 \|^2}{2\sigma^2} \right)
where :math:`x_1, x_2 \in R^d` are 1-d tensors.
Gaussian Kernel Matrix K is defined on input group :math:`X=(x_1, x_2, ..., x_m),`
.. math::
K(X)_{i,j} = k(x_i, x_j)
Also by default, during training this layer keeps running estimates of the
mean of L2 distances, which are then used to set hyperparameter :math:`\sigma`.
Mathematically, the estimation is :math:`\sigma^2 = \dfrac{\alpha}{n^2}\sum_{i,j} \| x_i - x_j \|^2`.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and use a fixed :math:`\sigma` instead.
Parameters:
- sigma (float, optional): bandwidth :math:`\sigma`. Default: None
- track_running_stats (bool, optional): If ``True``, this module tracks the running mean of :math:`\sigma^2`.
Otherwise, it won't track such statistics and always uses fix :math:`\sigma^2`. Default: ``True``
- alpha (float, optional): :math:`\alpha` which decides the magnitude of :math:`\sigma^2` when track_running_stats is set to ``True``
Inputs:
- X (tensor): input group :math:`X`
Shape:
- Inputs: :math:`(minibatch, F)` where F means the dimension of input features.
- Outputs: :math:`(minibatch, minibatch)`
"""
def __init__(self, sigma: Optional[float] = None, track_running_stats: Optional[bool] = True,
alpha: Optional[float] = 1.):
super(GaussianKernel, self).__init__()
assert track_running_stats or sigma is not None
self.sigma_square = torch.tensor(sigma * sigma) if sigma is not None else None
self.track_running_stats = track_running_stats
self.alpha = alpha
def forward(self, X: torch.Tensor) -> torch.Tensor:
l2_distance_square = ((X.unsqueeze(0) - X.unsqueeze(1)) ** 2).sum(2)
if self.track_running_stats:
self.sigma_square = self.alpha * torch.mean(l2_distance_square.detach())
return torch.exp(-l2_distance_square / (2 * self.sigma_square))
def optimal_kernel_combinations(kernel_values: List[torch.Tensor]) -> torch.Tensor:
# use quadratic program to get optimal kernel
num_kernel = len(kernel_values)
kernel_values_numpy = array([float(k.detach().cpu().data.item()) for k in kernel_values])
if np.all(kernel_values_numpy <= 0):
beta = solve_qp(
P=-np.eye(num_kernel),
q=np.zeros(num_kernel),
A=kernel_values_numpy,
b=np.array([-1.]),
G=-np.eye(num_kernel),
h=np.zeros(num_kernel),
)
else:
beta = solve_qp(
P=np.eye(num_kernel),
q=np.zeros(num_kernel),
A=kernel_values_numpy,
b=np.array([1.]),
G=-np.eye(num_kernel),
h=np.zeros(num_kernel),
)
beta = beta / beta.sum(axis=0) * num_kernel # normalize
return sum([k * b for (k, b) in zip(kernel_values, beta)]) | [
"torch.nn.Dropout",
"torch.nn.ReLU",
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"torch.nn.CrossEntropyLoss",
"torch.cat",
"numpy.zeros",
"torch.exp",
"torch.nn.Softmax",
"numpy.array",
"torch.nn.Linear",
"torch.zeros",
"numpy.eye",
"torch.tensor",
"numpy.all",
"torch.nn.Sigmoid"
] | [((11514, 11546), 'numpy.all', 'np.all', (['(kernel_values_numpy <= 0)'], {}), '(kernel_values_numpy <= 0)\n', (11520, 11546), True, 'import numpy as np\n'), ((926, 968), 'torch.nn.Sequential', 'nn.Sequential', (['*self.bottleneck_layer_list'], {}), '(*self.bottleneck_layer_list)\n', (939, 968), True, 'import torch.nn as nn\n'), ((989, 1025), 'torch.nn.Linear', 'nn.Linear', (['bottleneck_dim', 'class_num'], {}), '(bottleneck_dim, class_num)\n', (998, 1025), True, 'import torch.nn as nn\n'), ((1057, 1074), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (1067, 1074), True, 'import torch.nn as nn\n'), ((1098, 1110), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1108, 1110), True, 'import torch.nn as nn\n'), ((2544, 2565), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2563, 2565), True, 'import torch.nn as nn\n'), ((6634, 6662), 'torch.cat', 'torch.cat', (['[z_s, z_t]'], {'dim': '(0)'}), '([z_s, z_t], dim=0)\n', (6643, 6662), False, 'import torch\n'), ((7940, 7983), 'torch.zeros', 'torch.zeros', (['(2 * batch_size)', '(2 * batch_size)'], {}), '(2 * batch_size, 2 * batch_size)\n', (7951, 7983), False, 'import torch\n'), ((11184, 11240), 'torch.exp', 'torch.exp', (['(-l2_distance_square / (2 * self.sigma_square))'], {}), '(-l2_distance_square / (2 * self.sigma_square))\n', (11193, 11240), False, 'import torch\n'), ((840, 870), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['bottleneck_dim'], {}), '(bottleneck_dim)\n', (854, 870), True, 'import torch.nn as nn\n'), ((872, 881), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (879, 881), True, 'import torch.nn as nn\n'), ((883, 898), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (893, 898), True, 'import torch.nn as nn\n'), ((10770, 10797), 'torch.tensor', 'torch.tensor', (['(sigma * sigma)'], {}), '(sigma * sigma)\n', (10782, 10797), False, 'import torch\n'), ((11622, 11642), 'numpy.zeros', 'np.zeros', (['num_kernel'], {}), '(num_kernel)\n', (11630, 11642), True, 'import numpy as np\n'), ((11693, 11709), 'numpy.array', 'np.array', (['[-1.0]'], {}), '([-1.0])\n', (11701, 11709), True, 'import numpy as np\n'), ((11759, 11779), 'numpy.zeros', 'np.zeros', (['num_kernel'], {}), '(num_kernel)\n', (11767, 11779), True, 'import numpy as np\n'), ((11840, 11858), 'numpy.eye', 'np.eye', (['num_kernel'], {}), '(num_kernel)\n', (11846, 11858), True, 'import numpy as np\n'), ((11874, 11894), 'numpy.zeros', 'np.zeros', (['num_kernel'], {}), '(num_kernel)\n', (11882, 11894), True, 'import numpy as np\n'), ((11945, 11960), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (11953, 11960), True, 'import numpy as np\n'), ((12010, 12030), 'numpy.zeros', 'np.zeros', (['num_kernel'], {}), '(num_kernel)\n', (12018, 12030), True, 'import numpy as np\n'), ((11588, 11606), 'numpy.eye', 'np.eye', (['num_kernel'], {}), '(num_kernel)\n', (11594, 11606), True, 'import numpy as np\n'), ((11725, 11743), 'numpy.eye', 'np.eye', (['num_kernel'], {}), '(num_kernel)\n', (11731, 11743), True, 'import numpy as np\n'), ((11976, 11994), 'numpy.eye', 'np.eye', (['num_kernel'], {}), '(num_kernel)\n', (11982, 11994), True, 'import numpy as np\n')] |
if __name__ == "__main__":
import numpy as np
array = np.zeros((32,32))
print(array.shape)
array = array.reshape(1,32,32,1)
print(array.shape)
from readTrafficSigns import readTrafficSigns
from matplotlib import pyplot as plt
trainImages, trainLabels = readTrafficSigns(
"./traffic-signs-data/GTSRB_Final_Training_Images/GTSRB/Final_Training/Images/")
print(f"Labels {len(trainLabels)} and Images {len(trainImages)}")
plt.imshow(trainImages[42])
plt.show()
| [
"readTrafficSigns.readTrafficSigns",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"matplotlib.pyplot.show"
] | [((62, 80), 'numpy.zeros', 'np.zeros', (['(32, 32)'], {}), '((32, 32))\n', (70, 80), True, 'import numpy as np\n'), ((287, 394), 'readTrafficSigns.readTrafficSigns', 'readTrafficSigns', (['"""./traffic-signs-data/GTSRB_Final_Training_Images/GTSRB/Final_Training/Images/"""'], {}), "(\n './traffic-signs-data/GTSRB_Final_Training_Images/GTSRB/Final_Training/Images/'\n )\n", (303, 394), False, 'from readTrafficSigns import readTrafficSigns\n'), ((469, 496), 'matplotlib.pyplot.imshow', 'plt.imshow', (['trainImages[42]'], {}), '(trainImages[42])\n', (479, 496), True, 'from matplotlib import pyplot as plt\n'), ((501, 511), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (509, 511), True, 'from matplotlib import pyplot as plt\n')] |
import pytest
import mplstereonet
import numpy as np
class TestParseStrikes:
def test_parse_strike(self):
data = [
[('N30E', '45NW'), (210, 45)],
[('210', '45'), (210, 45)],
[('E10N', '20NW'), (260, 20)],
[('350', '40W'), (170, 40)],
[('280', '30SW'), (100, 30)],
[('280', '30 SW'), (100, 30)],
]
for test, correct in data:
result = mplstereonet.parse_strike_dip(*test)
assert np.allclose(result, correct)
class TestParseQuadrant:
def test_parse_quadrant(self):
data = [('N30E', 30),
('E30N', 60),
('E30S', 120),
('S80E', 100),
('S10W', 190),
('W10S', 260),
('W30N', 300),
('N10E', 10),
('N10W', 350),
('N 10 W', 350),
]
for strike, azi in data:
assert azi == mplstereonet.parse_quadrant_measurement(strike)
def test_parse_quadrant_errors(self):
data = ['N10S', 'S80N', 'E10W', 'W30E']
for quad in data:
with pytest.raises(ValueError):
mplstereonet.parse_quadrant_measurement(quad)
class TestParseAzimuth:
def test_parse_azimuth(self):
data = [('N30E', 30),
('E30N', 60),
('E30S', 120),
('S80E', 100),
('S10W', 190),
('W10S', 260),
('W30N', 300),
('N10E', 10),
('N10W', 350),
('N 10 W', 350),
('310', 310),
(' 310 ', 310),
('32.5', 32.5),
]
for strike, azi in data:
assert azi == mplstereonet.parse_azimuth(strike)
def test_parse_azimuth_errors(self):
data = ['30NW', '30S', 'A40N', 'N10S', 'S80N', 'E10W', 'W30E']
for quad in data:
with pytest.raises(ValueError):
mplstereonet.parse_azimuth(quad)
class TestParseRakes:
def test_parse_rake(self):
data = [
[('N30E', '45NW', '10NE'), (210, 45, 170)],
[('N30E', '45NW', '10SW'), (210, 45, 10)],
[('210', '45', '10'), (210, 45, 10)],
[('210', '45', '-10'), (210, 45, 170)],
[('210', '45', '170'), (210, 45, 170)],
[('E10N', '20NW', '80E'), (260, 20, 100)],
[('E10N', '20NW', '100'), (260, 20, 100)],
[('E10N', '20NW', '80W'), (260, 20, 80)],
[('E10N', '20NW', '-80'), (260, 20, 100)],
[('350', '40W', '45N'), (170, 40, 135)],
[('350', '40W', '45S'), (170, 40, 45)],
[('280', '30SW', '30E'), (100, 30, 30)],
[('280', '30SW', '30W'), (100, 30, 150)],
]
for test, correct in data:
result = mplstereonet.parse_rake(*test)
assert np.allclose(result, correct)
class TestParsePlungeBearing:
def test_parse_pb(self):
data = [
[('10NE', 'N30E'), (10, 30)],
[('10SW', 'N30E'), (10, 210)],
[('10', '210'), (10, 210)],
[('-10', '210'), (10, 30)],
[('170', '210'), (10, 30)],
[('-170', '210'), (10, 210)],
]
for test, correct in data:
result = mplstereonet.parse_plunge_bearing(*test)
assert np.allclose(result, correct)
| [
"mplstereonet.parse_strike_dip",
"mplstereonet.parse_azimuth",
"numpy.allclose",
"mplstereonet.parse_quadrant_measurement",
"mplstereonet.parse_rake",
"mplstereonet.parse_plunge_bearing",
"pytest.raises"
] | [((484, 520), 'mplstereonet.parse_strike_dip', 'mplstereonet.parse_strike_dip', (['*test'], {}), '(*test)\n', (513, 520), False, 'import mplstereonet\n'), ((540, 568), 'numpy.allclose', 'np.allclose', (['result', 'correct'], {}), '(result, correct)\n', (551, 568), True, 'import numpy as np\n'), ((3025, 3055), 'mplstereonet.parse_rake', 'mplstereonet.parse_rake', (['*test'], {}), '(*test)\n', (3048, 3055), False, 'import mplstereonet\n'), ((3075, 3103), 'numpy.allclose', 'np.allclose', (['result', 'correct'], {}), '(result, correct)\n', (3086, 3103), True, 'import numpy as np\n'), ((3548, 3588), 'mplstereonet.parse_plunge_bearing', 'mplstereonet.parse_plunge_bearing', (['*test'], {}), '(*test)\n', (3581, 3588), False, 'import mplstereonet\n'), ((3608, 3636), 'numpy.allclose', 'np.allclose', (['result', 'correct'], {}), '(result, correct)\n', (3619, 3636), True, 'import numpy as np\n'), ((1016, 1063), 'mplstereonet.parse_quadrant_measurement', 'mplstereonet.parse_quadrant_measurement', (['strike'], {}), '(strike)\n', (1055, 1063), False, 'import mplstereonet\n'), ((1198, 1223), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1211, 1223), False, 'import pytest\n'), ((1241, 1286), 'mplstereonet.parse_quadrant_measurement', 'mplstereonet.parse_quadrant_measurement', (['quad'], {}), '(quad)\n', (1280, 1286), False, 'import mplstereonet\n'), ((1826, 1860), 'mplstereonet.parse_azimuth', 'mplstereonet.parse_azimuth', (['strike'], {}), '(strike)\n', (1852, 1860), False, 'import mplstereonet\n'), ((2017, 2042), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2030, 2042), False, 'import pytest\n'), ((2060, 2092), 'mplstereonet.parse_azimuth', 'mplstereonet.parse_azimuth', (['quad'], {}), '(quad)\n', (2086, 2092), False, 'import mplstereonet\n')] |
# -*- coding: utf-8 -*-
#
# misc.py
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import os
import csv
import json
import numpy as np
import torch as th
import glob
to_device = lambda x, gpu_id: x.to(th.device('cpu')) if gpu_id == -1 else x.to(th.device('cuda:%d' % gpu_id))
none = lambda x: x
norm = lambda x, p: x.norm(p=p) ** p
get_scalar = lambda x: x.detach().item()
reshape = lambda arr, x, y: arr.view(x, y)
def get_device(args):
return th.device('cpu') if args.gpu[0] < 0 else th.device('cuda:' + str(args.gpu[0]))
def get_compatible_batch_size(batch_size, neg_sample_size):
if neg_sample_size < batch_size and batch_size % neg_sample_size != 0:
old_batch_size = batch_size
batch_size = int(math.ceil(batch_size / neg_sample_size) * neg_sample_size)
print('batch size ({}) is incompatible to the negative sample size ({}). Change the batch size to {}'.format(
old_batch_size, neg_sample_size, batch_size))
return batch_size
def save_model(args, model, emap_file=None, rmap_file=None):
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
print('Save model to {}'.format(args.save_path))
model.save_emb(args.save_path, args.dataset)
# We need to save the model configurations as well.
conf_file = os.path.join(args.save_path, 'config.json')
dict = {}
config = args
dict.update(vars(config))
dict.update({'emp_file': emap_file,
'rmap_file': rmap_file})
with open(conf_file, 'w') as outfile:
json.dump(dict, outfile, indent=4)
def load_model_config(config_f):
print(config_f)
with open(config_f, "r") as f:
config = json.loads(f.read())
#config = json.load(f)
print(config)
return config
def load_raw_triplet_data(head_f=None, rel_f=None, tail_f=None, emap_f=None, rmap_f=None):
if emap_f is not None:
eid_map = {}
id2e_map = {}
with open(emap_f, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
eid_map[row[1]] = int(row[0])
id2e_map[int(row[0])] = row[1]
if rmap_f is not None:
rid_map = {}
id2r_map = {}
with open(rmap_f, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
rid_map[row[1]] = int(row[0])
id2r_map[int(row[0])] = row[1]
if head_f is not None:
head = []
with open(head_f, 'r') as f:
id = f.readline()
while len(id) > 0:
head.append(eid_map[id[:-1]])
id = f.readline()
head = np.asarray(head)
else:
head = None
if rel_f is not None:
rel = []
with open(rel_f, 'r') as f:
id = f.readline()
while len(id) > 0:
rel.append(rid_map[id[:-1]])
id = f.readline()
rel = np.asarray(rel)
else:
rel = None
if tail_f is not None:
tail = []
with open(tail_f, 'r') as f:
id = f.readline()
while len(id) > 0:
tail.append(eid_map[id[:-1]])
id = f.readline()
tail = np.asarray(tail)
else:
tail = None
return head, rel, tail, id2e_map, id2r_map
def load_triplet_data(head_f=None, rel_f=None, tail_f=None):
if head_f is not None:
head = []
with open(head_f, 'r') as f:
id = f.readline()
while len(id) > 0:
head.append(int(id))
id = f.readline()
head = np.asarray(head)
else:
head = None
if rel_f is not None:
rel = []
with open(rel_f, 'r') as f:
id = f.readline()
while len(id) > 0:
rel.append(int(id))
id = f.readline()
rel = np.asarray(rel)
else:
rel = None
if tail_f is not None:
tail = []
with open(tail_f, 'r') as f:
id = f.readline()
while len(id) > 0:
tail.append(int(id))
id = f.readline()
tail = np.asarray(tail)
else:
tail = None
return head, rel, tail
def load_raw_emb_mapping(map_f):
assert map_f is not None
id2e_map = {}
with open(map_f, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
id2e_map[int(row[0])] = row[1]
return id2e_map
def load_raw_emb_data(file, map_f=None, e2id_map=None):
if map_f is not None:
e2id_map = {}
id2e_map = {}
with open(map_f, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
e2id_map[row[1]] = int(row[0])
id2e_map[int(row[0])] = row[1]
elif e2id_map is not None:
id2e_map = [] # dummpy return value
else:
assert False, 'There should be an ID mapping file provided'
ids = []
with open(file, 'r') as f:
line = f.readline()
while len(line) > 0:
ids.append(e2id_map[line[:-1]])
line = f.readline()
ids = np.asarray(ids)
return ids, id2e_map, e2id_map
def load_entity_data(file=None):
if file is None:
return None
entity = []
with open(file, 'r') as f:
id = f.readline()
while len(id) > 0:
entity.append(int(id))
id = f.readline()
entity = np.asarray(entity)
return entity
def prepare_save_path(args):
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
folder = '{}_{}_'.format(args.model_name, args.dataset)
n = len([x for x in os.listdir(args.save_path) if x.startswith(folder)])
folder += str(n)
args.save_path = os.path.join(args.save_path, folder)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
def set_seed(args):
np.random.seed(args.seed)
th.manual_seed(args.seed)
def evaluate_best_result(model_name, dataset, save_path, threshold=3):
file_pattern = '{}/{}_{}_*/result.txt'.format(save_path, model_name, dataset)
files = glob.glob(file_pattern)
best_result = None
best_dir = None
for file in files:
dir = file.split('/')[-2]
with open(file, 'r') as f:
result = json.load(f)
if best_result is None:
best_result = result
best_dir = dir
continue
else:
cnt = 0
for k in result.keys():
if k == 'MR':
if result[k] <= best_result[k]:
cnt += 1
else:
if result[k] >= best_result[k]:
cnt += 1
if cnt >= threshold:
best_result = result
best_dir = dir
print(f'''{model_name} training on {dataset} best result is in folder {best_dir}\n'
best result:\n''')
for k, v in best_result.items():
print(f'{k}: {v}')
| [
"os.mkdir",
"json.dump",
"json.load",
"numpy.random.seed",
"csv.reader",
"os.makedirs",
"math.ceil",
"torch.manual_seed",
"numpy.asarray",
"os.path.exists",
"glob.glob",
"torch.device",
"os.path.join",
"os.listdir"
] | [((1887, 1930), 'os.path.join', 'os.path.join', (['args.save_path', '"""config.json"""'], {}), "(args.save_path, 'config.json')\n", (1899, 1930), False, 'import os\n'), ((6067, 6085), 'numpy.asarray', 'np.asarray', (['entity'], {}), '(entity)\n', (6077, 6085), True, 'import numpy as np\n'), ((6390, 6426), 'os.path.join', 'os.path.join', (['args.save_path', 'folder'], {}), '(args.save_path, folder)\n', (6402, 6426), False, 'import os\n'), ((6533, 6558), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (6547, 6558), True, 'import numpy as np\n'), ((6563, 6588), 'torch.manual_seed', 'th.manual_seed', (['args.seed'], {}), '(args.seed)\n', (6577, 6588), True, 'import torch as th\n'), ((6755, 6778), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (6764, 6778), False, 'import glob\n'), ((1041, 1057), 'torch.device', 'th.device', (['"""cpu"""'], {}), "('cpu')\n", (1050, 1057), True, 'import torch as th\n'), ((1647, 1677), 'os.path.exists', 'os.path.exists', (['args.save_path'], {}), '(args.save_path)\n', (1661, 1677), False, 'import os\n'), ((1687, 1711), 'os.mkdir', 'os.mkdir', (['args.save_path'], {}), '(args.save_path)\n', (1695, 1711), False, 'import os\n'), ((2126, 2160), 'json.dump', 'json.dump', (['dict', 'outfile'], {'indent': '(4)'}), '(dict, outfile, indent=4)\n', (2135, 2160), False, 'import json\n'), ((3252, 3268), 'numpy.asarray', 'np.asarray', (['head'], {}), '(head)\n', (3262, 3268), True, 'import numpy as np\n'), ((3533, 3548), 'numpy.asarray', 'np.asarray', (['rel'], {}), '(rel)\n', (3543, 3548), True, 'import numpy as np\n'), ((3817, 3833), 'numpy.asarray', 'np.asarray', (['tail'], {}), '(tail)\n', (3827, 3833), True, 'import numpy as np\n'), ((4203, 4219), 'numpy.asarray', 'np.asarray', (['head'], {}), '(head)\n', (4213, 4219), True, 'import numpy as np\n'), ((4475, 4490), 'numpy.asarray', 'np.asarray', (['rel'], {}), '(rel)\n', (4485, 4490), True, 'import numpy as np\n'), ((4750, 4766), 'numpy.asarray', 'np.asarray', (['tail'], {}), '(tail)\n', (4760, 4766), True, 'import numpy as np\n'), ((4955, 4984), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""'}), "(f, delimiter='\\t')\n", (4965, 4984), False, 'import csv\n'), ((5761, 5776), 'numpy.asarray', 'np.asarray', (['ids'], {}), '(ids)\n', (5771, 5776), True, 'import numpy as np\n'), ((6145, 6175), 'os.path.exists', 'os.path.exists', (['args.save_path'], {}), '(args.save_path)\n', (6159, 6175), False, 'import os\n'), ((6185, 6209), 'os.mkdir', 'os.mkdir', (['args.save_path'], {}), '(args.save_path)\n', (6193, 6209), False, 'import os\n'), ((6439, 6469), 'os.path.exists', 'os.path.exists', (['args.save_path'], {}), '(args.save_path)\n', (6453, 6469), False, 'import os\n'), ((6479, 6506), 'os.makedirs', 'os.makedirs', (['args.save_path'], {}), '(args.save_path)\n', (6490, 6506), False, 'import os\n'), ((792, 808), 'torch.device', 'th.device', (['"""cpu"""'], {}), "('cpu')\n", (801, 808), True, 'import torch as th\n'), ((836, 865), 'torch.device', 'th.device', (["('cuda:%d' % gpu_id)"], {}), "('cuda:%d' % gpu_id)\n", (845, 865), True, 'import torch as th\n'), ((2576, 2605), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""'}), "(f, delimiter='\\t')\n", (2586, 2605), False, 'import csv\n'), ((2859, 2888), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""'}), "(f, delimiter='\\t')\n", (2869, 2888), False, 'import csv\n'), ((5261, 5290), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""'}), "(f, delimiter='\\t')\n", (5271, 5290), False, 'import csv\n'), ((6935, 6947), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6944, 6947), False, 'import json\n'), ((1317, 1356), 'math.ceil', 'math.ceil', (['(batch_size / neg_sample_size)'], {}), '(batch_size / neg_sample_size)\n', (1326, 1356), False, 'import math\n'), ((6295, 6321), 'os.listdir', 'os.listdir', (['args.save_path'], {}), '(args.save_path)\n', (6305, 6321), False, 'import os\n')] |
##
# @file electric_potential_unitest.py
# @author <NAME>
# @date Mar 2019
#
import time
import numpy as np
import unittest
import logging
import torch
from torch.autograd import Function, Variable
import os
import sys
import gzip
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from dreamplace.ops.dct import dct
from dreamplace.ops.dct import discrete_spectral_transform
from dreamplace.ops.electric_potential import electric_potential
sys.path.pop()
if sys.version_info[0] < 3:
import cPickle as pickle
else:
import _pickle as pickle
import inspect
import pdb
from scipy import fftpack
import matplotlib
matplotlib.use('Agg')
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
class ElectricPotentialOpTest(unittest.TestCase):
def test_densityOverflowRandom(self):
dtype = np.float64
xx = np.array([
1000, 11148, 11148, 11148, 11148, 11148, 11124, 11148, 11148,
11137, 11126, 11148, 11130, 11148, 11148, 11148, 11148, 11148,
11148, 0, 11148, 11148, 11150, 11134, 11148, 11148, 11148, 10550,
11148, 11148, 11144, 11148, 11148, 11148, 11148, 11140, 11120,
11154, 11148, 11133, 11148, 11148, 11134, 11125, 11148, 11148,
11148, 11155, 11127, 11148, 11148, 11148, 11148, 11131, 11148,
11148, 11148, 11148, 11136, 11148, 11146, 11148, 11135, 11148,
11125, 11150, 11148, 11139, 11148, 11148, 11130, 11148, 11128,
11148, 11138, 11148, 11148, 11148, 11130, 11148, 11132, 11148,
11148, 11090
]).astype(dtype)
yy = np.array([
1000, 11178, 11178, 11190, 11400, 11178, 11172, 11178, 11178,
11418, 11418, 11178, 11418, 11178, 11178, 11178, 11178, 11178,
11178, 11414, 11178, 11178, 11172, 11418, 11406, 11184, 11178,
10398, 11178, 11178, 11172, 11178, 11178, 11178, 11178, 11418,
11418, 11172, 11178, 11418, 11178, 11178, 11172, 11418, 11178,
11178, 11178, 11418, 11418, 11178, 11178, 11178, 11178, 11418,
11178, 11178, 11394, 11178, 11418, 11178, 11418, 11178, 11418,
11178, 11418, 11418, 11178, 11172, 11178, 11178, 11418, 11178,
11418, 11178, 11418, 11412, 11178, 11178, 11172, 11178, 11418,
11178, 11178, 11414
]).astype(dtype)
node_size_x = np.array([
6, 3, 3, 3, 3, 3, 5, 3, 3, 1, 1, 3, 1, 3, 3, 3, 3, 3, 3, 16728, 3,
3, 5, 1, 3, 3, 3, 740, 3, 3, 5, 3, 3, 3, 3, 5, 5, 5, 3, 1, 3, 3, 5,
1, 3, 3, 3, 5, 1, 3, 3, 3, 3, 1, 3, 3, 3, 3, 5, 3, 5, 3, 1, 3, 5,
5, 3, 5, 3, 3, 5, 3, 1, 3, 1, 3, 3, 3, 5, 3, 1, 3, 3, 67
]).astype(dtype)
node_size_y = np.array([
6, 240, 240, 6, 6, 240, 6, 240, 240, 6, 6, 240, 6, 240, 240, 240,
240, 240, 240, 10, 240, 6, 6, 6, 6, 6, 240, 780, 240, 240, 6, 240,
240, 240, 240, 6, 6, 6, 240, 6, 240, 240, 6, 6, 240, 240, 240, 6,
6, 240, 240, 240, 240, 6, 240, 240, 6, 240, 6, 240, 6, 240, 6, 240,
6, 6, 240, 6, 240, 240, 6, 240, 6, 240, 6, 6, 240, 240, 6, 240, 6,
240, 240, 10
]).astype(dtype)
#xx = np.array([2.0]).astype(dtype)
#yy = np.array([1.5]).astype(dtype)
#node_size_x = np.array([1.0]).astype(dtype)
#node_size_y = np.array([1.0]).astype(dtype)
num_nodes = len(xx)
num_movable_nodes = 1
num_terminals = len(xx) - num_movable_nodes
scale_factor = 1.0
xl = 0.0
yl = 6.0
xh = 16728.0
yh = 11430.0
target_density = 0.7
num_bins_x = 1024
num_bins_y = 1024
bin_size_x = (xh - xl) / num_bins_x
bin_size_y = (yh - yl) / num_bins_y
"""
return bin xl
"""
def bin_xl(id_x):
return xl + id_x * bin_size_x
"""
return bin xh
"""
def bin_xh(id_x):
return min(bin_xl(id_x) + bin_size_x, xh)
"""
return bin yl
"""
def bin_yl(id_y):
return yl + id_y * bin_size_y
"""
return bin yh
"""
def bin_yh(id_y):
return min(bin_yl(id_y) + bin_size_y, yh)
bin_center_x = np.zeros(num_bins_x, dtype=dtype)
for id_x in range(num_bins_x):
bin_center_x[id_x] = (bin_xl(id_x) +
bin_xh(id_x)) / 2 * scale_factor
bin_center_y = np.zeros(num_bins_y, dtype=dtype)
for id_y in range(num_bins_y):
bin_center_y[id_y] = (bin_yl(id_y) +
bin_yh(id_y)) / 2 * scale_factor
print("target_area = ", target_density * bin_size_x * bin_size_y)
if dtype == np.float64:
dtype = torch.float64
elif dtype == np.float32:
dtype = torch.float32
movable_size_x = node_size_x[:num_movable_nodes]
_, sorted_node_map = torch.sort(
torch.tensor(movable_size_x, requires_grad=False, dtype=dtype))
sorted_node_map = sorted_node_map.to(torch.int32).contiguous()
# test cpu
custom = electric_potential.ElectricPotential(
torch.tensor(node_size_x, requires_grad=False, dtype=dtype),
torch.tensor(node_size_y, requires_grad=False, dtype=dtype),
torch.tensor(bin_center_x, requires_grad=False, dtype=dtype),
torch.tensor(bin_center_y, requires_grad=False, dtype=dtype),
target_density=torch.tensor(target_density,
requires_grad=False,
dtype=dtype),
xl=xl,
yl=yl,
xh=xh,
yh=yh,
bin_size_x=bin_size_x,
bin_size_y=bin_size_y,
num_movable_nodes=num_movable_nodes,
num_terminals=num_terminals,
num_filler_nodes=0,
padding=0,
sorted_node_map=sorted_node_map,
movable_macro_mask=None,
deterministic_flag=True)
pos = Variable(torch.from_numpy(np.concatenate([xx, yy])),
requires_grad=True)
result = custom.forward(pos)
print("custom_result = ", result)
print(result.type())
result.backward()
grad = pos.grad.clone()
print("custom_grad = ", grad)
# test cuda
if torch.cuda.device_count():
custom_cuda = electric_potential.ElectricPotential(
torch.tensor(node_size_x, requires_grad=False,
dtype=dtype).cuda(),
torch.tensor(node_size_y, requires_grad=False,
dtype=dtype).cuda(),
torch.tensor(bin_center_x, requires_grad=False,
dtype=dtype).cuda(),
torch.tensor(bin_center_y, requires_grad=False,
dtype=dtype).cuda(),
target_density=torch.tensor(target_density,
requires_grad=False,
dtype=dtype).cuda(),
xl=xl,
yl=yl,
xh=xh,
yh=yh,
bin_size_x=bin_size_x,
bin_size_y=bin_size_y,
num_movable_nodes=num_movable_nodes,
num_terminals=num_terminals,
num_filler_nodes=0,
padding=0,
sorted_node_map=sorted_node_map.cuda(),
movable_macro_mask=None,
deterministic_flag=False)
pos = Variable(torch.from_numpy(np.concatenate([xx, yy])).cuda(),
requires_grad=True)
#pos.grad.zero_()
result_cuda = custom_cuda.forward(pos)
print("custom_result_cuda = ", result_cuda.data.cpu())
print(result_cuda.type())
result_cuda.backward()
grad_cuda = pos.grad.clone()
print("custom_grad_cuda = ", grad_cuda.data.cpu())
np.testing.assert_allclose(result.detach().numpy(),
result_cuda.data.cpu().detach().numpy())
np.testing.assert_allclose(grad.detach().numpy(),
grad_cuda.data.cpu().detach().numpy())
def plot(plot_count, density_map, padding, name):
"""
density map contour and heat map
"""
density_map = density_map[padding:density_map.shape[0] - padding,
padding:density_map.shape[1] - padding]
print("max density = %g" % (np.amax(density_map)))
print("mean density = %g" % (np.mean(density_map)))
fig = plt.figure()
ax = fig.gca(projection='3d')
x = np.arange(density_map.shape[0])
y = np.arange(density_map.shape[1])
x, y = np.meshgrid(x, y)
# looks like x and y should be swapped
ax.plot_surface(y, x, density_map, alpha=0.8)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('density')
# plt.tight_layout()
plt.savefig(name + ".3d.png")
plt.close()
# plt.clf()
#fig, ax = plt.subplots()
# ax.pcolor(density_map)
# Loop over data dimensions and create text annotations.
# for i in range(density_map.shape[0]):
# for j in range(density_map.shape[1]):
# text = ax.text(j, i, density_map[i, j],
# ha="center", va="center", color="w")
# fig.tight_layout()
#plt.savefig(name+".2d.%d.png" % (plot_count))
# plt.close()
def eval_runtime(design):
# e.g., adaptec1_density.pklz
with gzip.open(design, "rb") as f:
node_size_x, node_size_y, bin_center_x, bin_center_y, target_density, xl, yl, xh, yh, bin_size_x, bin_size_y, num_movable_nodes, num_terminals, num_filler_nodes = pickle.load(
f)
dtype = torch.float64
num_threads = 10
torch.set_num_threads(num_threads)
print("num_threads = %d" % (torch.get_num_threads()))
movable_size_x = node_size_x[:num_movable_nodes]
_, sorted_node_map = torch.sort(
torch.tensor(movable_size_x, requires_grad=False, dtype=dtype).cuda())
sorted_node_map = sorted_node_map.to(torch.int32).contiguous()
pos_var = Variable(torch.empty(len(node_size_x) * 2,
dtype=dtype).uniform_(xl, xh),
requires_grad=True)
custom = electric_potential.ElectricPotential(
torch.tensor(node_size_x, requires_grad=False, dtype=dtype).cpu(),
torch.tensor(node_size_y, requires_grad=False, dtype=dtype).cpu(),
torch.tensor(bin_center_x, requires_grad=False, dtype=dtype).cpu(),
torch.tensor(bin_center_y, requires_grad=False, dtype=dtype).cpu(),
target_density=torch.tensor(target_density,
requires_grad=False,
dtype=dtype).cpu(),
xl=xl,
yl=yl,
xh=xh,
yh=yh,
bin_size_x=bin_size_x,
bin_size_y=bin_size_y,
num_movable_nodes=num_movable_nodes,
num_terminals=num_terminals,
num_filler_nodes=num_filler_nodes,
padding=0,
sorted_node_map=sorted_node_map.cpu())
custom_cuda = electric_potential.ElectricPotential(
torch.tensor(node_size_x, requires_grad=False, dtype=dtype).cuda(),
torch.tensor(node_size_y, requires_grad=False, dtype=dtype).cuda(),
torch.tensor(bin_center_x, requires_grad=False, dtype=dtype).cuda(),
torch.tensor(bin_center_y, requires_grad=False, dtype=dtype).cuda(),
target_density=torch.tensor(target_density,
requires_grad=False,
dtype=dtype).cuda(),
xl=xl,
yl=yl,
xh=xh,
yh=yh,
bin_size_x=bin_size_x,
bin_size_y=bin_size_y,
num_movable_nodes=num_movable_nodes,
num_terminals=num_terminals,
num_filler_nodes=num_filler_nodes,
padding=0,
sorted_node_map=sorted_node_map)
torch.cuda.synchronize()
iters = 100
tbackward = 0
tt = time.time()
for i in range(iters):
result = custom.forward(pos_var)
ttb = time.time()
result.backward()
tbackward += time.time() - ttb
torch.cuda.synchronize()
print("custom takes %.3f ms, backward %.3f ms" %
((time.time() - tt) / iters * 1000, (tbackward / iters * 1000)))
pos_var = pos_var.cuda()
tt = time.time()
for i in range(iters):
result = custom_cuda.forward(pos_var)
result.backward()
torch.cuda.synchronize()
print("custom_cuda takes %.3f ms" % ((time.time() - tt) / iters * 1000))
if __name__ == '__main__':
logging.root.name = 'DREAMPlace'
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)-7s] %(name)s - %(message)s',
stream=sys.stdout)
if len(sys.argv) < 2:
unittest.main()
else:
design = sys.argv[1]
eval_runtime(design)
| [
"sys.path.pop",
"torch.cuda.synchronize",
"matplotlib.pyplot.savefig",
"torch.cuda.device_count",
"matplotlib.pyplot.figure",
"torch.set_num_threads",
"numpy.arange",
"numpy.mean",
"unittest.main",
"_pickle.load",
"os.path.abspath",
"numpy.meshgrid",
"matplotlib.pyplot.close",
"matplotlib.... | [((505, 519), 'sys.path.pop', 'sys.path.pop', ([], {}), '()\n', (517, 519), False, 'import sys\n'), ((684, 705), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (698, 705), False, 'import matplotlib\n'), ((8795, 8807), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8805, 8807), True, 'import matplotlib.pyplot as plt\n'), ((8851, 8882), 'numpy.arange', 'np.arange', (['density_map.shape[0]'], {}), '(density_map.shape[0])\n', (8860, 8882), True, 'import numpy as np\n'), ((8891, 8922), 'numpy.arange', 'np.arange', (['density_map.shape[1]'], {}), '(density_map.shape[1])\n', (8900, 8922), True, 'import numpy as np\n'), ((8935, 8952), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (8946, 8952), True, 'import numpy as np\n'), ((9152, 9181), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(name + '.3d.png')"], {}), "(name + '.3d.png')\n", (9163, 9181), True, 'import matplotlib.pyplot as plt\n'), ((9186, 9197), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9195, 9197), True, 'import matplotlib.pyplot as plt\n'), ((9961, 9995), 'torch.set_num_threads', 'torch.set_num_threads', (['num_threads'], {}), '(num_threads)\n', (9982, 9995), False, 'import torch\n'), ((12129, 12153), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (12151, 12153), False, 'import torch\n'), ((12197, 12208), 'time.time', 'time.time', ([], {}), '()\n', (12206, 12208), False, 'import time\n'), ((12372, 12396), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (12394, 12396), False, 'import torch\n'), ((12564, 12575), 'time.time', 'time.time', ([], {}), '()\n', (12573, 12575), False, 'import time\n'), ((12679, 12703), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (12701, 12703), False, 'import torch\n'), ((12851, 12966), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""[%(levelname)-7s] %(name)s - %(message)s"""', 'stream': 'sys.stdout'}), "(level=logging.DEBUG, format=\n '[%(levelname)-7s] %(name)s - %(message)s', stream=sys.stdout)\n", (12870, 12966), False, 'import logging\n'), ((4334, 4367), 'numpy.zeros', 'np.zeros', (['num_bins_x'], {'dtype': 'dtype'}), '(num_bins_x, dtype=dtype)\n', (4342, 4367), True, 'import numpy as np\n'), ((4547, 4580), 'numpy.zeros', 'np.zeros', (['num_bins_y'], {'dtype': 'dtype'}), '(num_bins_y, dtype=dtype)\n', (4555, 4580), True, 'import numpy as np\n'), ((6487, 6512), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6510, 6512), False, 'import torch\n'), ((9680, 9703), 'gzip.open', 'gzip.open', (['design', '"""rb"""'], {}), "(design, 'rb')\n", (9689, 9703), False, 'import gzip\n'), ((9881, 9895), '_pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9892, 9895), True, 'import _pickle as pickle\n'), ((12291, 12302), 'time.time', 'time.time', ([], {}), '()\n', (12300, 12302), False, 'import time\n'), ((13044, 13059), 'unittest.main', 'unittest.main', ([], {}), '()\n', (13057, 13059), False, 'import unittest\n'), ((5056, 5118), 'torch.tensor', 'torch.tensor', (['movable_size_x'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(movable_size_x, requires_grad=False, dtype=dtype)\n', (5068, 5118), False, 'import torch\n'), ((5277, 5336), 'torch.tensor', 'torch.tensor', (['node_size_x'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(node_size_x, requires_grad=False, dtype=dtype)\n', (5289, 5336), False, 'import torch\n'), ((5350, 5409), 'torch.tensor', 'torch.tensor', (['node_size_y'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(node_size_y, requires_grad=False, dtype=dtype)\n', (5362, 5409), False, 'import torch\n'), ((5423, 5483), 'torch.tensor', 'torch.tensor', (['bin_center_x'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(bin_center_x, requires_grad=False, dtype=dtype)\n', (5435, 5483), False, 'import torch\n'), ((5497, 5557), 'torch.tensor', 'torch.tensor', (['bin_center_y'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(bin_center_y, requires_grad=False, dtype=dtype)\n', (5509, 5557), False, 'import torch\n'), ((8705, 8725), 'numpy.amax', 'np.amax', (['density_map'], {}), '(density_map)\n', (8712, 8725), True, 'import numpy as np\n'), ((8761, 8781), 'numpy.mean', 'np.mean', (['density_map'], {}), '(density_map)\n', (8768, 8781), True, 'import numpy as np\n'), ((10028, 10051), 'torch.get_num_threads', 'torch.get_num_threads', ([], {}), '()\n', (10049, 10051), False, 'import torch\n'), ((12350, 12361), 'time.time', 'time.time', ([], {}), '()\n', (12359, 12361), False, 'import time\n'), ((316, 341), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (331, 341), False, 'import os\n'), ((912, 1544), 'numpy.array', 'np.array', (['[1000, 11148, 11148, 11148, 11148, 11148, 11124, 11148, 11148, 11137, 11126,\n 11148, 11130, 11148, 11148, 11148, 11148, 11148, 11148, 0, 11148, 11148,\n 11150, 11134, 11148, 11148, 11148, 10550, 11148, 11148, 11144, 11148, \n 11148, 11148, 11148, 11140, 11120, 11154, 11148, 11133, 11148, 11148, \n 11134, 11125, 11148, 11148, 11148, 11155, 11127, 11148, 11148, 11148, \n 11148, 11131, 11148, 11148, 11148, 11148, 11136, 11148, 11146, 11148, \n 11135, 11148, 11125, 11150, 11148, 11139, 11148, 11148, 11130, 11148, \n 11128, 11148, 11138, 11148, 11148, 11148, 11130, 11148, 11132, 11148, \n 11148, 11090]'], {}), '([1000, 11148, 11148, 11148, 11148, 11148, 11124, 11148, 11148, \n 11137, 11126, 11148, 11130, 11148, 11148, 11148, 11148, 11148, 11148, 0,\n 11148, 11148, 11150, 11134, 11148, 11148, 11148, 10550, 11148, 11148, \n 11144, 11148, 11148, 11148, 11148, 11140, 11120, 11154, 11148, 11133, \n 11148, 11148, 11134, 11125, 11148, 11148, 11148, 11155, 11127, 11148, \n 11148, 11148, 11148, 11131, 11148, 11148, 11148, 11148, 11136, 11148, \n 11146, 11148, 11135, 11148, 11125, 11150, 11148, 11139, 11148, 11148, \n 11130, 11148, 11128, 11148, 11138, 11148, 11148, 11148, 11130, 11148, \n 11132, 11148, 11148, 11090])\n', (920, 1544), True, 'import numpy as np\n'), ((1663, 2300), 'numpy.array', 'np.array', (['[1000, 11178, 11178, 11190, 11400, 11178, 11172, 11178, 11178, 11418, 11418,\n 11178, 11418, 11178, 11178, 11178, 11178, 11178, 11178, 11414, 11178, \n 11178, 11172, 11418, 11406, 11184, 11178, 10398, 11178, 11178, 11172, \n 11178, 11178, 11178, 11178, 11418, 11418, 11172, 11178, 11418, 11178, \n 11178, 11172, 11418, 11178, 11178, 11178, 11418, 11418, 11178, 11178, \n 11178, 11178, 11418, 11178, 11178, 11394, 11178, 11418, 11178, 11418, \n 11178, 11418, 11178, 11418, 11418, 11178, 11172, 11178, 11178, 11418, \n 11178, 11418, 11178, 11418, 11412, 11178, 11178, 11172, 11178, 11418, \n 11178, 11178, 11414]'], {}), '([1000, 11178, 11178, 11190, 11400, 11178, 11172, 11178, 11178, \n 11418, 11418, 11178, 11418, 11178, 11178, 11178, 11178, 11178, 11178, \n 11414, 11178, 11178, 11172, 11418, 11406, 11184, 11178, 10398, 11178, \n 11178, 11172, 11178, 11178, 11178, 11178, 11418, 11418, 11172, 11178, \n 11418, 11178, 11178, 11172, 11418, 11178, 11178, 11178, 11418, 11418, \n 11178, 11178, 11178, 11178, 11418, 11178, 11178, 11394, 11178, 11418, \n 11178, 11418, 11178, 11418, 11178, 11418, 11418, 11178, 11172, 11178, \n 11178, 11418, 11178, 11418, 11178, 11418, 11412, 11178, 11178, 11172, \n 11178, 11418, 11178, 11178, 11414])\n', (1671, 2300), True, 'import numpy as np\n'), ((2427, 2709), 'numpy.array', 'np.array', (['[6, 3, 3, 3, 3, 3, 5, 3, 3, 1, 1, 3, 1, 3, 3, 3, 3, 3, 3, 16728, 3, 3, 5, 1,\n 3, 3, 3, 740, 3, 3, 5, 3, 3, 3, 3, 5, 5, 5, 3, 1, 3, 3, 5, 1, 3, 3, 3, \n 5, 1, 3, 3, 3, 3, 1, 3, 3, 3, 3, 5, 3, 5, 3, 1, 3, 5, 5, 3, 5, 3, 3, 5,\n 3, 1, 3, 1, 3, 3, 3, 5, 3, 1, 3, 3, 67]'], {}), '([6, 3, 3, 3, 3, 3, 5, 3, 3, 1, 1, 3, 1, 3, 3, 3, 3, 3, 3, 16728, 3,\n 3, 5, 1, 3, 3, 3, 740, 3, 3, 5, 3, 3, 3, 3, 5, 5, 5, 3, 1, 3, 3, 5, 1, \n 3, 3, 3, 5, 1, 3, 3, 3, 3, 1, 3, 3, 3, 3, 5, 3, 5, 3, 1, 3, 5, 5, 3, 5,\n 3, 3, 5, 3, 1, 3, 1, 3, 3, 3, 5, 3, 1, 3, 3, 67])\n', (2435, 2709), True, 'import numpy as np\n'), ((2791, 3167), 'numpy.array', 'np.array', (['[6, 240, 240, 6, 6, 240, 6, 240, 240, 6, 6, 240, 6, 240, 240, 240, 240, 240,\n 240, 10, 240, 6, 6, 6, 6, 6, 240, 780, 240, 240, 6, 240, 240, 240, 240,\n 6, 6, 6, 240, 6, 240, 240, 6, 6, 240, 240, 240, 6, 6, 240, 240, 240, \n 240, 6, 240, 240, 6, 240, 6, 240, 6, 240, 6, 240, 6, 6, 240, 6, 240, \n 240, 6, 240, 6, 240, 6, 6, 240, 240, 6, 240, 6, 240, 240, 10]'], {}), '([6, 240, 240, 6, 6, 240, 6, 240, 240, 6, 6, 240, 6, 240, 240, 240,\n 240, 240, 240, 10, 240, 6, 6, 6, 6, 6, 240, 780, 240, 240, 6, 240, 240,\n 240, 240, 6, 6, 6, 240, 6, 240, 240, 6, 6, 240, 240, 240, 6, 6, 240, \n 240, 240, 240, 6, 240, 240, 6, 240, 6, 240, 6, 240, 6, 240, 6, 6, 240, \n 6, 240, 240, 6, 240, 6, 240, 6, 6, 240, 240, 6, 240, 6, 240, 240, 10])\n', (2799, 3167), True, 'import numpy as np\n'), ((5586, 5648), 'torch.tensor', 'torch.tensor', (['target_density'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(target_density, requires_grad=False, dtype=dtype)\n', (5598, 5648), False, 'import torch\n'), ((6181, 6205), 'numpy.concatenate', 'np.concatenate', (['[xx, yy]'], {}), '([xx, yy])\n', (6195, 6205), True, 'import numpy as np\n'), ((10152, 10214), 'torch.tensor', 'torch.tensor', (['movable_size_x'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(movable_size_x, requires_grad=False, dtype=dtype)\n', (10164, 10214), False, 'import torch\n'), ((10516, 10575), 'torch.tensor', 'torch.tensor', (['node_size_x'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(node_size_x, requires_grad=False, dtype=dtype)\n', (10528, 10575), False, 'import torch\n'), ((10591, 10650), 'torch.tensor', 'torch.tensor', (['node_size_y'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(node_size_y, requires_grad=False, dtype=dtype)\n', (10603, 10650), False, 'import torch\n'), ((10666, 10726), 'torch.tensor', 'torch.tensor', (['bin_center_x'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(bin_center_x, requires_grad=False, dtype=dtype)\n', (10678, 10726), False, 'import torch\n'), ((10742, 10802), 'torch.tensor', 'torch.tensor', (['bin_center_y'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(bin_center_y, requires_grad=False, dtype=dtype)\n', (10754, 10802), False, 'import torch\n'), ((11353, 11412), 'torch.tensor', 'torch.tensor', (['node_size_x'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(node_size_x, requires_grad=False, dtype=dtype)\n', (11365, 11412), False, 'import torch\n'), ((11429, 11488), 'torch.tensor', 'torch.tensor', (['node_size_y'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(node_size_y, requires_grad=False, dtype=dtype)\n', (11441, 11488), False, 'import torch\n'), ((11505, 11565), 'torch.tensor', 'torch.tensor', (['bin_center_x'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(bin_center_x, requires_grad=False, dtype=dtype)\n', (11517, 11565), False, 'import torch\n'), ((11582, 11642), 'torch.tensor', 'torch.tensor', (['bin_center_y'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(bin_center_y, requires_grad=False, dtype=dtype)\n', (11594, 11642), False, 'import torch\n'), ((10833, 10895), 'torch.tensor', 'torch.tensor', (['target_density'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(target_density, requires_grad=False, dtype=dtype)\n', (10845, 10895), False, 'import torch\n'), ((11674, 11736), 'torch.tensor', 'torch.tensor', (['target_density'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(target_density, requires_grad=False, dtype=dtype)\n', (11686, 11736), False, 'import torch\n'), ((6594, 6653), 'torch.tensor', 'torch.tensor', (['node_size_x'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(node_size_x, requires_grad=False, dtype=dtype)\n', (6606, 6653), False, 'import torch\n'), ((6707, 6766), 'torch.tensor', 'torch.tensor', (['node_size_y'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(node_size_y, requires_grad=False, dtype=dtype)\n', (6719, 6766), False, 'import torch\n'), ((6820, 6880), 'torch.tensor', 'torch.tensor', (['bin_center_x'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(bin_center_x, requires_grad=False, dtype=dtype)\n', (6832, 6880), False, 'import torch\n'), ((6934, 6994), 'torch.tensor', 'torch.tensor', (['bin_center_y'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(bin_center_y, requires_grad=False, dtype=dtype)\n', (6946, 6994), False, 'import torch\n'), ((12746, 12757), 'time.time', 'time.time', ([], {}), '()\n', (12755, 12757), False, 'import time\n'), ((7063, 7125), 'torch.tensor', 'torch.tensor', (['target_density'], {'requires_grad': '(False)', 'dtype': 'dtype'}), '(target_density, requires_grad=False, dtype=dtype)\n', (7075, 7125), False, 'import torch\n'), ((7737, 7761), 'numpy.concatenate', 'np.concatenate', (['[xx, yy]'], {}), '([xx, yy])\n', (7751, 7761), True, 'import numpy as np\n'), ((12462, 12473), 'time.time', 'time.time', ([], {}), '()\n', (12471, 12473), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 24 17:01:16 2022
Determine optic flow given two images
@author: guido
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
def determine_optic_flow(filename_1, filename_2, method='Harris', max_points = 100, graphics = True):
# load the BGR color image:
BGR_1 = cv2.imread(filename_1);
# convert the image to gray scale:
gray_1 = cv2.cvtColor(BGR_1, cv2.COLOR_BGR2GRAY);
# load the BGR color image:
BGR_2 = cv2.imread(filename_2);
# convert the image to gray scale:
gray_2 = cv2.cvtColor(BGR_2, cv2.COLOR_BGR2GRAY);
# (1) Detect features:
if(method == 'Harris'):
gray_Harris = np.float32(gray_1)
# https://docs.opencv.org/4.x/dd/d1a/group__imgproc__feature.html#gac1fc3598018010880e370e2f709b4345
# blockSize Neighborhood size (see the details on cornerEigenValsAndVecs ).
# ksize Aperture parameter for the Sobel operator.
# k Harris detector free parameter.
blockSize = 2
ksize = 3
k = 0.04
# threshold_factor (multiplied with max value in image)
threshold_factor = 0.01
# calculate the Harris value everywhere in the image:
dst = cv2.cornerHarris(gray_Harris, blockSize, ksize, k)
# dilate the values:
# dst = cv2.dilate(dst, None)
# Threshold the Harris values and set the corresponding pixels red
BGR_1[dst > threshold_factor * dst.max()] = [0,0,255]
cv2.imshow('dst', BGR_1)
inds = np.where(dst > threshold_factor * dst.max())
n_points = len(inds[0])
corners = np.float32(np.zeros([n_points, 2]));
for i in range(n_points):
corners[i, 0] = inds[1][i];
corners[i, 1] = inds[0][i];
elif(method =='FAST'):
# Initiate FAST object with default values
# https://docs.opencv.org/3.4/df/d74/classcv_1_1FastFeatureDetector.html
threshold = 70
nonmaxSuppression = True
type_detector = cv2.FAST_FEATURE_DETECTOR_TYPE_9_16
fast = cv2.FastFeatureDetector_create(threshold, nonmaxSuppression, type_detector)
# find and draw the keypoints
kp = fast.detect(gray_1,None)
img2 = cv2.drawKeypoints(BGR_1, kp, None, color=(255,0,0))
cv2.imshow('dst', img2)
print( "Total Keypoints with nonmaxSuppression: {}".format(len(kp)) )
# downselect the points:
kp = np.random.choice(kp, size=max_points)
n_points = len(kp)
# convert the points to a 2D numpy array:
corners = np.float32(np.zeros([n_points, 2]));
for i in range(n_points):
corners[i, 0] = kp[i].pt[0];
corners[i, 1] = kp[i].pt[1];
elif(method == 'ShiTomasi'):
# https://docs.opencv.org/3.4/dd/d1a/group__imgproc__feature.html#ga1d6bb77486c8f92d79c8793ad995d541
# maxCorners Maximum number of corners to return. If there are more corners than are found, the strongest of them is returned. maxCorners <= 0 implies that no limit on the maximum is set and all detected corners are returned.
# qualityLevel Parameter characterizing the minimal accepted quality of image corners. The parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue (see cornerMinEigenVal ) or the Harris function response (see cornerHarris ). The corners with the quality measure less than the product are rejected. For example, if the best corner has the quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure less than 15 are rejected.
# minDistance Minimum possible Euclidean distance between the returned corners.
maxCorners = 100
qualityLevel = 0.01
minDistance = 10
C = cv2.goodFeaturesToTrack(gray_1, maxCorners, qualityLevel, minDistance)
n_points = C.shape[0]
corners = np.float32(np.zeros([n_points, 2]))
for i in range(n_points):
corners[i, :] = C[i][0]
C = np.int0(C)
for i in C:
x,y = i.ravel()
cv2.circle(BGR_1,(x,y),3,255,-1)
cv2.imshow('dst', BGR_1)
# (2) Track the features to the next frame:
# Parameters for lucas kanade optical flow
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# calculate optical flow
corners_new, status, error_match = cv2.calcOpticalFlowPyrLK(gray_1, gray_2, corners, None, **lk_params)
# filter the points by their status:
# corners = corners[status == 1, :];
# corners_new = corners_new[status == 1, :];
flow_vectors = corners_new - corners
if(graphics):
im = (0.5 * BGR_1.copy().astype(float) + 0.5 * BGR_2.copy().astype(float)) / 255.0
n_corners = len(corners)
color = (0.0,1.0,0.0)
for p in range(n_corners):
cv2.arrowedLine(im, tuple(corners[p, :]), tuple(corners_new[p,:]), color, thickness=2, tipLength=0.5)
cv2.imshow('Flow', im);
# plt.figure();
# plt.imshow(im);
# plt.title('Optical flow');
if __name__ == "__main__":
# Determine optic flow on the images provided on the repository:
determine_optic_flow('bebop_flowers_1.jpg', 'bebop_flowers_2.jpg')
| [
"numpy.random.choice",
"cv2.drawKeypoints",
"numpy.int0",
"cv2.circle",
"cv2.cvtColor",
"numpy.float32",
"numpy.zeros",
"cv2.FastFeatureDetector_create",
"cv2.imread",
"cv2.goodFeaturesToTrack",
"cv2.calcOpticalFlowPyrLK",
"cv2.imshow",
"cv2.cornerHarris"
] | [((340, 362), 'cv2.imread', 'cv2.imread', (['filename_1'], {}), '(filename_1)\n', (350, 362), False, 'import cv2\n'), ((416, 455), 'cv2.cvtColor', 'cv2.cvtColor', (['BGR_1', 'cv2.COLOR_BGR2GRAY'], {}), '(BGR_1, cv2.COLOR_BGR2GRAY)\n', (428, 455), False, 'import cv2\n'), ((501, 523), 'cv2.imread', 'cv2.imread', (['filename_2'], {}), '(filename_2)\n', (511, 523), False, 'import cv2\n'), ((577, 616), 'cv2.cvtColor', 'cv2.cvtColor', (['BGR_2', 'cv2.COLOR_BGR2GRAY'], {}), '(BGR_2, cv2.COLOR_BGR2GRAY)\n', (589, 616), False, 'import cv2\n'), ((4598, 4666), 'cv2.calcOpticalFlowPyrLK', 'cv2.calcOpticalFlowPyrLK', (['gray_1', 'gray_2', 'corners', 'None'], {}), '(gray_1, gray_2, corners, None, **lk_params)\n', (4622, 4666), False, 'import cv2\n'), ((700, 718), 'numpy.float32', 'np.float32', (['gray_1'], {}), '(gray_1)\n', (710, 718), True, 'import numpy as np\n'), ((1245, 1295), 'cv2.cornerHarris', 'cv2.cornerHarris', (['gray_Harris', 'blockSize', 'ksize', 'k'], {}), '(gray_Harris, blockSize, ksize, k)\n', (1261, 1295), False, 'import cv2\n'), ((1508, 1532), 'cv2.imshow', 'cv2.imshow', (['"""dst"""', 'BGR_1'], {}), "('dst', BGR_1)\n", (1518, 1532), False, 'import cv2\n'), ((5192, 5214), 'cv2.imshow', 'cv2.imshow', (['"""Flow"""', 'im'], {}), "('Flow', im)\n", (5202, 5214), False, 'import cv2\n'), ((1663, 1686), 'numpy.zeros', 'np.zeros', (['[n_points, 2]'], {}), '([n_points, 2])\n', (1671, 1686), True, 'import numpy as np\n'), ((2102, 2177), 'cv2.FastFeatureDetector_create', 'cv2.FastFeatureDetector_create', (['threshold', 'nonmaxSuppression', 'type_detector'], {}), '(threshold, nonmaxSuppression, type_detector)\n', (2132, 2177), False, 'import cv2\n'), ((2269, 2322), 'cv2.drawKeypoints', 'cv2.drawKeypoints', (['BGR_1', 'kp', 'None'], {'color': '(255, 0, 0)'}), '(BGR_1, kp, None, color=(255, 0, 0))\n', (2286, 2322), False, 'import cv2\n'), ((2337, 2360), 'cv2.imshow', 'cv2.imshow', (['"""dst"""', 'img2'], {}), "('dst', img2)\n", (2347, 2360), False, 'import cv2\n'), ((2485, 2522), 'numpy.random.choice', 'np.random.choice', (['kp'], {'size': 'max_points'}), '(kp, size=max_points)\n', (2501, 2522), True, 'import numpy as np\n'), ((2638, 2661), 'numpy.zeros', 'np.zeros', (['[n_points, 2]'], {}), '([n_points, 2])\n', (2646, 2661), True, 'import numpy as np\n'), ((3856, 3926), 'cv2.goodFeaturesToTrack', 'cv2.goodFeaturesToTrack', (['gray_1', 'maxCorners', 'qualityLevel', 'minDistance'], {}), '(gray_1, maxCorners, qualityLevel, minDistance)\n', (3879, 3926), False, 'import cv2\n'), ((4111, 4121), 'numpy.int0', 'np.int0', (['C'], {}), '(C)\n', (4118, 4121), True, 'import numpy as np\n'), ((4223, 4247), 'cv2.imshow', 'cv2.imshow', (['"""dst"""', 'BGR_1'], {}), "('dst', BGR_1)\n", (4233, 4247), False, 'import cv2\n'), ((3995, 4018), 'numpy.zeros', 'np.zeros', (['[n_points, 2]'], {}), '([n_points, 2])\n', (4003, 4018), True, 'import numpy as np\n'), ((4182, 4219), 'cv2.circle', 'cv2.circle', (['BGR_1', '(x, y)', '(3)', '(255)', '(-1)'], {}), '(BGR_1, (x, y), 3, 255, -1)\n', (4192, 4219), False, 'import cv2\n')] |
# imports
import sys
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
from scipy.signal import medfilt
sys.path.append("./")
from data.dbase.db_tables import Recording, LocomotionBouts
from fcutils.plot.figure import clean_axes
from analysis.ephys.utils import (
get_data,
get_clean_walking_onsets,
get_walking_from_body,
cleanup_running_bouts,
)
from analysis.ephys.viz import (
time_aligned_raster,
# plot_frate_binned_by_var,
bouts_raster,
plot_tuning_curves,
)
from analysis.ephys.tuning_curves import (
get_tuning_curves,
upsample_farmes_to_ms,
)
"""
Makes a summary plot with various views of a single unit's activity.
"""
params = dict(
MIN_WAKING_DURATION=1.0, # when the mouse walks < than this we ignore it (seconds)
MIN_PAUSE_DURATION=1.0, # when the mouse pauses < before a walking bout than this we ignore it (seconds)
SPEED_TH=10, # speed threshold for walking (cm/s)
min_delta_gcoord=0.4,
speed_tuning_curve_bins=np.arange(0, 80, 2),
avel_tuning_curve_bins=np.arange(-450, 450, 50),
tuning_curves_repeats=25,
)
base_folder = Path(r"D:\Dropbox (UCL)\Rotation_vte\Locomotion\analysis\ephys")
# print all available recordings
recordings = Recording().fetch("name")
for rec in recordings:
print(f"Processing {rec}")
# get data
try:
units, left_fl, right_fl, left_hl, right_hl, body = get_data(rec)
except KeyError:
print(f"No data for {rec}")
continue
nunits = len(units)
# get walking (onset/offset in frames)
walking = get_walking_from_body(body, params["SPEED_TH"])
walking_starts, walking_ends = get_clean_walking_onsets(
walking, params["MIN_WAKING_DURATION"], params["MIN_PAUSE_DURATION"]
)
# smooth data for tuning curves
speed = medfilt(body.speed, 21)
avel = medfilt(body.thetadot, 21)
params["speed_tuning_curve_bins"] = np.arange(
0, np.max([50, np.percentile(speed, 97.5)]), 2
)
# upsample to ms
speed_ms = upsample_farmes_to_ms(speed)
avel_ms = upsample_farmes_to_ms(avel)
# get locomotion bouts
bouts = LocomotionBouts.get_session_bouts(rec)
bouts = cleanup_running_bouts(
bouts, body, min_delta_gcoord=params["min_delta_gcoord"]
)
print(f"Kept {len(bouts)} complete locomotion bouts bouts")
# prepare folders
rec_fld = base_folder / rec
rec_fld.mkdir(exist_ok=True)
rec_svg_fld = rec_fld / "svg"
rec_svg_fld.mkdir(exist_ok=True)
for i in range(nunits):
print(f" processing unit {i+1}/{nunits}")
# get unit
unit = units.iloc[i]
region = unit.brain_region.replace("\\", "_")
if "RSP" in region:
region = "RSP"
elif "VISp" in region:
region = "VISp"
savepath = rec_fld / f"unit_{unit.unit_id}_{region}.png"
if savepath.exists():
continue
# create figure
fig = plt.figure(figsize=(22, 10))
axes = fig.subplot_mosaic(
"""
AABBBB
AABBBB
CCDDEE
CCDDEE
"""
)
# plot locomotion onset/offset rasters
time_aligned_raster(
axes["A"], unit, walking_starts, t_before=2, t_after=2, dt=0.05
)
time_aligned_raster(
axes["C"], unit, walking_ends, t_before=2, t_after=2, dt=0.05
)
# plot tuning curves
speed_tuning_curves = get_tuning_curves(
unit.spikes_ms, speed_ms, params["speed_tuning_curve_bins"]
)
plot_tuning_curves(
axes["D"], speed_tuning_curves, unit.color, xlabel="speed (cm/s)"
)
avel_tuning_curves = get_tuning_curves(
unit.spikes_ms, avel_ms, params["avel_tuning_curve_bins"]
)
plot_tuning_curves(
axes["E"], avel_tuning_curves, "black", xlabel="avel (deg/s)"
)
# plot locomotion bouts raster
if len(bouts):
bouts_raster(axes["B"], unit, bouts, body, ds=2)
# styling
axes["A"].set(title="Locomotion onset")
axes["C"].set(title="Locomotion offset")
axes["B"].set(title="Running bouts")
axes["D"].set(title="Speed tuning", xlim=[0, 80])
axes["E"].set(title="Angular velocity tuning")
# save figure
clean_axes(fig)
fig.tight_layout()
# fig.savefig(rec_svg_fld / f"unit_{unit.unit_id}_{region}.svg")
fig.savefig(savepath, dpi=400)
plt.close(fig)
# plt.show()
# break
# break
| [
"analysis.ephys.viz.bouts_raster",
"pathlib.Path",
"matplotlib.pyplot.figure",
"numpy.arange",
"fcutils.plot.figure.clean_axes",
"analysis.ephys.tuning_curves.get_tuning_curves",
"sys.path.append",
"matplotlib.pyplot.close",
"scipy.signal.medfilt",
"analysis.ephys.utils.get_data",
"analysis.ephy... | [((131, 152), 'sys.path.append', 'sys.path.append', (['"""./"""'], {}), "('./')\n", (146, 152), False, 'import sys\n'), ((1145, 1213), 'pathlib.Path', 'Path', (['"""D:\\\\Dropbox (UCL)\\\\Rotation_vte\\\\Locomotion\\\\analysis\\\\ephys"""'], {}), "('D:\\\\Dropbox (UCL)\\\\Rotation_vte\\\\Locomotion\\\\analysis\\\\ephys')\n", (1149, 1213), False, 'from pathlib import Path\n'), ((1594, 1641), 'analysis.ephys.utils.get_walking_from_body', 'get_walking_from_body', (['body', "params['SPEED_TH']"], {}), "(body, params['SPEED_TH'])\n", (1615, 1641), False, 'from analysis.ephys.utils import get_data, get_clean_walking_onsets, get_walking_from_body, cleanup_running_bouts\n'), ((1677, 1776), 'analysis.ephys.utils.get_clean_walking_onsets', 'get_clean_walking_onsets', (['walking', "params['MIN_WAKING_DURATION']", "params['MIN_PAUSE_DURATION']"], {}), "(walking, params['MIN_WAKING_DURATION'], params[\n 'MIN_PAUSE_DURATION'])\n", (1701, 1776), False, 'from analysis.ephys.utils import get_data, get_clean_walking_onsets, get_walking_from_body, cleanup_running_bouts\n'), ((1835, 1858), 'scipy.signal.medfilt', 'medfilt', (['body.speed', '(21)'], {}), '(body.speed, 21)\n', (1842, 1858), False, 'from scipy.signal import medfilt\n'), ((1870, 1896), 'scipy.signal.medfilt', 'medfilt', (['body.thetadot', '(21)'], {}), '(body.thetadot, 21)\n', (1877, 1896), False, 'from scipy.signal import medfilt\n'), ((2047, 2075), 'analysis.ephys.tuning_curves.upsample_farmes_to_ms', 'upsample_farmes_to_ms', (['speed'], {}), '(speed)\n', (2068, 2075), False, 'from analysis.ephys.tuning_curves import get_tuning_curves, upsample_farmes_to_ms\n'), ((2090, 2117), 'analysis.ephys.tuning_curves.upsample_farmes_to_ms', 'upsample_farmes_to_ms', (['avel'], {}), '(avel)\n', (2111, 2117), False, 'from analysis.ephys.tuning_curves import get_tuning_curves, upsample_farmes_to_ms\n'), ((2158, 2196), 'data.dbase.db_tables.LocomotionBouts.get_session_bouts', 'LocomotionBouts.get_session_bouts', (['rec'], {}), '(rec)\n', (2191, 2196), False, 'from data.dbase.db_tables import Recording, LocomotionBouts\n'), ((2209, 2288), 'analysis.ephys.utils.cleanup_running_bouts', 'cleanup_running_bouts', (['bouts', 'body'], {'min_delta_gcoord': "params['min_delta_gcoord']"}), "(bouts, body, min_delta_gcoord=params['min_delta_gcoord'])\n", (2230, 2288), False, 'from analysis.ephys.utils import get_data, get_clean_walking_onsets, get_walking_from_body, cleanup_running_bouts\n'), ((1024, 1043), 'numpy.arange', 'np.arange', (['(0)', '(80)', '(2)'], {}), '(0, 80, 2)\n', (1033, 1043), True, 'import numpy as np\n'), ((1072, 1096), 'numpy.arange', 'np.arange', (['(-450)', '(450)', '(50)'], {}), '(-450, 450, 50)\n', (1081, 1096), True, 'import numpy as np\n'), ((1258, 1269), 'data.dbase.db_tables.Recording', 'Recording', ([], {}), '()\n', (1267, 1269), False, 'from data.dbase.db_tables import Recording, LocomotionBouts\n'), ((1424, 1437), 'analysis.ephys.utils.get_data', 'get_data', (['rec'], {}), '(rec)\n', (1432, 1437), False, 'from analysis.ephys.utils import get_data, get_clean_walking_onsets, get_walking_from_body, cleanup_running_bouts\n'), ((2985, 3013), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(22, 10)'}), '(figsize=(22, 10))\n', (2995, 3013), True, 'import matplotlib.pyplot as plt\n'), ((3239, 3327), 'analysis.ephys.viz.time_aligned_raster', 'time_aligned_raster', (["axes['A']", 'unit', 'walking_starts'], {'t_before': '(2)', 't_after': '(2)', 'dt': '(0.05)'}), "(axes['A'], unit, walking_starts, t_before=2, t_after=2,\n dt=0.05)\n", (3258, 3327), False, 'from analysis.ephys.viz import time_aligned_raster, bouts_raster, plot_tuning_curves\n'), ((3354, 3440), 'analysis.ephys.viz.time_aligned_raster', 'time_aligned_raster', (["axes['C']", 'unit', 'walking_ends'], {'t_before': '(2)', 't_after': '(2)', 'dt': '(0.05)'}), "(axes['C'], unit, walking_ends, t_before=2, t_after=2,\n dt=0.05)\n", (3373, 3440), False, 'from analysis.ephys.viz import time_aligned_raster, bouts_raster, plot_tuning_curves\n'), ((3519, 3597), 'analysis.ephys.tuning_curves.get_tuning_curves', 'get_tuning_curves', (['unit.spikes_ms', 'speed_ms', "params['speed_tuning_curve_bins']"], {}), "(unit.spikes_ms, speed_ms, params['speed_tuning_curve_bins'])\n", (3536, 3597), False, 'from analysis.ephys.tuning_curves import get_tuning_curves, upsample_farmes_to_ms\n'), ((3628, 3718), 'analysis.ephys.viz.plot_tuning_curves', 'plot_tuning_curves', (["axes['D']", 'speed_tuning_curves', 'unit.color'], {'xlabel': '"""speed (cm/s)"""'}), "(axes['D'], speed_tuning_curves, unit.color, xlabel=\n 'speed (cm/s)')\n", (3646, 3718), False, 'from analysis.ephys.viz import time_aligned_raster, bouts_raster, plot_tuning_curves\n'), ((3766, 3842), 'analysis.ephys.tuning_curves.get_tuning_curves', 'get_tuning_curves', (['unit.spikes_ms', 'avel_ms', "params['avel_tuning_curve_bins']"], {}), "(unit.spikes_ms, avel_ms, params['avel_tuning_curve_bins'])\n", (3783, 3842), False, 'from analysis.ephys.tuning_curves import get_tuning_curves, upsample_farmes_to_ms\n'), ((3873, 3959), 'analysis.ephys.viz.plot_tuning_curves', 'plot_tuning_curves', (["axes['E']", 'avel_tuning_curves', '"""black"""'], {'xlabel': '"""avel (deg/s)"""'}), "(axes['E'], avel_tuning_curves, 'black', xlabel=\n 'avel (deg/s)')\n", (3891, 3959), False, 'from analysis.ephys.viz import time_aligned_raster, bouts_raster, plot_tuning_curves\n'), ((4406, 4421), 'fcutils.plot.figure.clean_axes', 'clean_axes', (['fig'], {}), '(fig)\n', (4416, 4421), False, 'from fcutils.plot.figure import clean_axes\n'), ((4570, 4584), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (4579, 4584), True, 'import matplotlib.pyplot as plt\n'), ((4052, 4100), 'analysis.ephys.viz.bouts_raster', 'bouts_raster', (["axes['B']", 'unit', 'bouts', 'body'], {'ds': '(2)'}), "(axes['B'], unit, bouts, body, ds=2)\n", (4064, 4100), False, 'from analysis.ephys.viz import time_aligned_raster, bouts_raster, plot_tuning_curves\n'), ((1972, 1998), 'numpy.percentile', 'np.percentile', (['speed', '(97.5)'], {}), '(speed, 97.5)\n', (1985, 1998), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Hw2.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1sygHqvk0q2joBLtaOA3NQILrUbrA54yI
**Part A**
**Load Data**
"""
# import necessary libraries
import pandas as pd
df = pd.read_csv('/content/drive/MyDrive/tree/Agaricus-lepiota.data.txt', header=None)
df
# Specify Column Names (Features)
df.columns = ["class", "cap-shape", "cap-surface", "cap-color", "bruises",
"odor", "gill-attachment", "gill-spacing", "gill-size",
"gill-color", "stalk-shape", "stalk-root",
"stalk-surface-above-ring",
"stalk-surface-below-ring",
"stalk-color-above-ring",
"stalk-color-below-ring",
"veil-type", "veil-color", "ring-number", "ring-type",
"spore-print-color", "population", "habitat" ]
df
"""Missing Values"""
df.isnull().sum()
df.isnull().any() # This code shows that there is a column contains a null value or not, as you can see below the dataset has a columns contains "nan" value.
# So, this code returns true for stalk-root column.
# Also, we can write a simple for loop to see column(s) which contains(s) non-sense or null value(s).
for i in df.columns:
print(i,df[i].unique())
""" as you can see 'stalk-root' contains '?' value which is non-sense. We need to fill it with reasonable value instead of '?'. There are several ways to fill missing or non-sense data.This method is very dummy, but sometimes it may be working well suprisingly. The column(s) contain(s) missing or non-sense value(s) can be wiped out. </br>If the column contains missing or non-sense value is categorical, then we can fill the missing or non-sense values with value has most frequency in that column. If the column is numerical, then we can fill the missing or non-sense values with average or median value of that column. </br>We can built decision tree or regression model for missing or non-sense value(s)."""
df['stalk-root'].value_counts()
"""For edible class"""
temp = df.loc[df['class'] == 'e' ]
temp['stalk-root'].value_counts()
"""For poisonous class"""
temp = df.loc[df['class'] == 'p' ]
temp['stalk-root'].value_counts()
df['stalk-root'] = df['stalk-root'].replace('?','b') # '?' in stalk-root attribute filled with 'b' has most frequency.
# Let's see whether non-sense data exists, or not.
df['stalk-root'].value_counts()
temp = df.drop('class', axis=1)
temp['class']= df['class']
df = temp
df
print('Number of samples: ', df.shape[0])
print('Number of attributes: ', df.shape[1])
value_counts = df['class'].value_counts()
e = value_counts['e']
p = value_counts['p']
print('\nEdible: ', e)
print('Poisonous: ', p)
print('\nTotal: ', e + p)
"""
**ID3 Algorithm**
**In the ID3 algorithm, decision trees are** **calculated using the concept of entropy and** **information gain.**
**Entropy can be defined as:**
**H(S)=−∑i=1Npilog2pi **"""
import pandas as pd
import numpy as np
# eps for making value a bit greater than 0 later on
eps = np.finfo(float).eps
from numpy import log2 as log
def find_entropy(df):
'''
Function to calculate the entropy of the label class
'''
Class = df.keys()[-1]
entropy = 0
values = df[Class].unique()
for value in values:
fraction = df[Class].value_counts()[value]/len(df[Class])
entropy += -fraction*np.log2(fraction)
return entropy
def find_entropy_attribute(df,attribute):
'''
Function to calculate the entropy of all features.
'''
Class = df.keys()[-1]
target_variables = df[Class].unique()
variables = df[attribute].unique()
entropy2 = 0
for variable in variables:
entropy = 0
for target_variable in target_variables:
num = len(df[attribute][df[attribute]==variable][df[Class] ==target_variable])
den = len(df[attribute][df[attribute]==variable])
fraction = num/(den+eps)
entropy += -fraction*log(fraction+eps)
fraction2 = den/len(df)
entropy2 += -fraction2*entropy
return abs(entropy2)
def find_winner(df):
'''
Function to find the feature with the highest information gain.
'''
Entropy_att = []
IG = []
for key in df.keys()[:-1]:
# Entropy_att.append(find_entropy_attribute(df,key))
IG.append(find_entropy(df)-find_entropy_attribute(df,key))
return df.keys()[:-1][np.argmax(IG)]
def get_subtable(df, node, value):
'''
Function to get a subtable of met conditions.
node: Column name
value: Unique value of the column
'''
return df[df[node] == value].reset_index(drop=True)
def buildTree(df,tree=None):
'''
Function to build the ID3 Decision Tree.
'''
Class = df.keys()[-1]
#Here we build our decision tree
#Get attribute with maximum information gain
node = find_winner(df)
#Get distinct value of that attribute e.g Salary is node and Low,Med and High are values
attValue = np.unique(df[node])
#Create an empty dictionary to create tree
if tree is None:
tree={}
tree[node] = {}
#We make loop to construct a tree by calling this function recursively.
#In this we check if the subset is pure and stops if it is pure.
for value in attValue:
subtable = get_subtable(df,node,value)
clValue,counts = np.unique(subtable['class'],return_counts=True)
if len(counts)==1:#Checking purity of subset
tree[node][value] = clValue[0]
else:
tree[node][value] = buildTree(subtable) #Calling the function recursively
return tree
def predict(inst,tree):
'''
Function to predict for any input variable.
'''
#Recursively we go through the tree that we built earlier
for nodes in tree.keys():
value = inst[nodes]
tree = tree[nodes][value]
prediction = 0
if type(tree) is dict:
prediction = predict(inst, tree)
else:
prediction = tree
break;
return prediction
""" Data Transformation
Our dataset is full of categorical data. We need to convert these categorical values into numerical representatives. There are two ways to do this.
Label Encoding: This method can be perform on columns with binary values. We can't apply this method on a columns has more than two different values, because this method may cause a hierarchy between values in a column. So, this may cause incorrect classification.
One-Hot Encoding: This method can also be called "Dummy Variable" by some. In this method, each distinct values in a column transform into a column. Unlike label encoding, there is no trouble in terms of hierarchy. As dataset grows dimensionally, this method may cause low running performance.
"""
from sklearn.preprocessing import LabelEncoder
labelencoder=LabelEncoder()
for col in df.columns:
df[col] = labelencoder.fit_transform(df[col])
df.head()
"""**10 Fold Cross Validation**"""
import numpy as np
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
kf = KFold(n_splits=10)
X = df
y = df['class']
accuracy_list = []
for train_index, test_index in kf.split(X):
# print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
X_test.drop('class', axis=1)
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
tree = buildTree(X_train)
y_pred=[]
for i in range(len(X_test)):
inst = pd.Series(X_test.iloc[i])
prediction = predict(inst, tree)
y_pred.append(prediction)
accuracy = accuracy_score(y_test, y_pred)
accuracy_list.append(accuracy)
report = classification_report(y_test, y_pred)
print('Fold Number:', len(accuracy_list))
print('Accuracy Score:', accuracy)
print('Classification Report:', '\n', report)
print(100 * '=')
print('\n')
print('Final Accuracy', np.mean(accuracy_list))
print('Standard Deviation', np.std(accuracy_list))
print('\n')
print(100 * '=') | [
"numpy.argmax",
"pandas.read_csv",
"numpy.std",
"sklearn.metrics.accuracy_score",
"numpy.log2",
"sklearn.model_selection.KFold",
"sklearn.preprocessing.LabelEncoder",
"sklearn.metrics.classification_report",
"numpy.finfo",
"numpy.mean",
"pandas.Series",
"numpy.unique"
] | [((276, 362), 'pandas.read_csv', 'pd.read_csv', (['"""/content/drive/MyDrive/tree/Agaricus-lepiota.data.txt"""'], {'header': 'None'}), "('/content/drive/MyDrive/tree/Agaricus-lepiota.data.txt', header\n =None)\n", (287, 362), True, 'import pandas as pd\n'), ((7140, 7154), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (7152, 7154), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((7440, 7458), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)'}), '(n_splits=10)\n', (7445, 7458), False, 'from sklearn.model_selection import KFold\n'), ((3071, 3086), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (3079, 3086), True, 'import numpy as np\n'), ((5057, 5076), 'numpy.unique', 'np.unique', (['df[node]'], {}), '(df[node])\n', (5066, 5076), True, 'import numpy as np\n'), ((7960, 7990), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7974, 7990), False, 'from sklearn.metrics import accuracy_score\n'), ((8037, 8074), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (8058, 8074), False, 'from sklearn.metrics import classification_report\n'), ((8269, 8291), 'numpy.mean', 'np.mean', (['accuracy_list'], {}), '(accuracy_list)\n', (8276, 8291), True, 'import numpy as np\n'), ((8321, 8342), 'numpy.std', 'np.std', (['accuracy_list'], {}), '(accuracy_list)\n', (8327, 8342), True, 'import numpy as np\n'), ((4471, 4484), 'numpy.argmax', 'np.argmax', (['IG'], {}), '(IG)\n', (4480, 4484), True, 'import numpy as np\n'), ((5475, 5523), 'numpy.unique', 'np.unique', (["subtable['class']"], {'return_counts': '(True)'}), "(subtable['class'], return_counts=True)\n", (5484, 5523), True, 'import numpy as np\n'), ((7850, 7875), 'pandas.Series', 'pd.Series', (['X_test.iloc[i]'], {}), '(X_test.iloc[i])\n', (7859, 7875), True, 'import pandas as pd\n'), ((3414, 3431), 'numpy.log2', 'np.log2', (['fraction'], {}), '(fraction)\n', (3421, 3431), True, 'import numpy as np\n'), ((4033, 4052), 'numpy.log2', 'log', (['(fraction + eps)'], {}), '(fraction + eps)\n', (4036, 4052), True, 'from numpy import log2 as log\n')] |
import os
import argparse
import json
import torch
import numpy as np
from datasets.oxford import get_dataloaders
from datasets.boreas import get_dataloaders_boreas
from networks.under_the_radar import UnderTheRadar
from networks.hero import HERO
from utils.utils import get_lr
from utils.losses import supervised_loss, unsupervised_loss
from utils.monitor import SVDMonitor, SteamMonitor
from datasets.transforms import augmentBatch, augmentBatch2, augmentBatch3
from ipdb import set_trace
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
torch.manual_seed(0)
np.random.seed(0)
torch.set_num_threads(8)
torch.multiprocessing.set_sharing_strategy('file_system')
print(torch.__version__)
print(torch.version.cuda)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='config/steam.json', type=str, help='config file path')
parser.add_argument('--pretrain', default=None, type=str, help='pretrain checkpoint path')
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
if config['dataset'] == 'oxford':
train_loader, valid_loader, _ = get_dataloaders(config)
elif config['dataset'] == 'boreas':
train_loader, valid_loader, _ = get_dataloaders_boreas(config)
if config['model'] == 'UnderTheRadar':
model = UnderTheRadar(config).to(config['gpuid'])
elif config['model'] == 'HERO':
model = HERO(config).to(config['gpuid'])
ckpt_path = None
if os.path.isfile(config['log_dir'] + 'latest.pt'):
ckpt_path = config['log_dir'] + 'latest.pt'
elif args.pretrain is not None:
ckpt_path = args.pretrain
optimizer = torch.optim.Adam(model.parameters(), lr=config['lr'])
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', patience=2.5e4 / config['val_rate'], factor=0.5)
if config['model'] == 'UnderTheRadar':
monitor = SVDMonitor(model, valid_loader, config)
elif config['model'] == 'HERO':
monitor = SteamMonitor(model, valid_loader, config)
start_epoch = 0
if ckpt_path is not None:
try:
print('Loading from checkpoint: ' + ckpt_path)
checkpoint = torch.load(ckpt_path, map_location=torch.device(config['gpuid']))
model.load_state_dict(checkpoint['model_state_dict'], strict=False)
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
start_epoch = checkpoint['epoch']
monitor.counter = checkpoint['counter']
print('success')
except Exception as e:
print(e)
print('Defaulting to legacy checkpoint style')
model.load_state_dict(checkpoint, strict=False)
print('success')
if not os.path.isfile(config['log_dir'] + args.config):
os.system('cp ' + args.config + ' ' + config['log_dir'])
model.train()
for epoch in range(start_epoch, config['max_epochs']):
for batchi, batch in enumerate(train_loader):
if config['augmentation']['rot_max'] != 0:
if config['dataset'] == 'boreas':
batch = augmentBatch2(batch, config)
elif config['dataset'] == 'oxford' and config['model'] == 'HERO':
batch = augmentBatch3(batch, config)
elif config['dataset'] == 'oxford' and config['model'] == 'UnderTheRadar':
batch = augmentBatch(batch, config)
optimizer.zero_grad()
try:
out = model(batch)
except RuntimeError as e:
print(e)
print('WARNING: exception encountered... skipping this batch.')
continue
if config['model'] == 'UnderTheRadar':
loss, dict_loss = supervised_loss(out['R'], out['t'], batch, config)
elif config['model'] == 'HERO':
loss, dict_loss = unsupervised_loss(out, batch, config, model.solver)
if loss == 0:
print("No movement predicted. Skipping mini-batch.")
continue
if loss.requires_grad:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 10)
optimizer.step()
if (monitor.counter + 1) % config['save_rate'] == 0:
with torch.no_grad():
model.eval()
mname = os.path.join(config['log_dir'], '{}.pt'.format(monitor.counter + 1))
print('saving model', mname)
torch.save({
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
'counter': monitor.counter,
'epoch': epoch,
}, mname)
model.train()
if (monitor.counter + 1) % config['backup_rate'] == 0:
with torch.no_grad():
model.eval()
mname = os.path.join(config['log_dir'], 'latest.pt')
print('saving model', mname)
torch.save({
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
'counter': monitor.counter,
'epoch': epoch,
}, mname)
model.train()
valid_metric = monitor.step(loss, dict_loss)
if valid_metric is not None:
scheduler.step(valid_metric)
monitor.writer.add_scalar('val/learning_rate', get_lr(optimizer), monitor.counter)
if monitor.counter >= config['max_iterations']:
break
| [
"utils.losses.supervised_loss",
"numpy.random.seed",
"argparse.ArgumentParser",
"utils.monitor.SteamMonitor",
"os.path.isfile",
"torch.set_num_threads",
"datasets.boreas.get_dataloaders_boreas",
"torch.device",
"torch.no_grad",
"os.path.join",
"networks.under_the_radar.UnderTheRadar",
"torch.m... | [((611, 631), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (628, 631), False, 'import torch\n'), ((632, 649), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (646, 649), True, 'import numpy as np\n'), ((650, 674), 'torch.set_num_threads', 'torch.set_num_threads', (['(8)'], {}), '(8)\n', (671, 674), False, 'import torch\n'), ((675, 732), 'torch.multiprocessing.set_sharing_strategy', 'torch.multiprocessing.set_sharing_strategy', (['"""file_system"""'], {}), "('file_system')\n", (717, 732), False, 'import torch\n'), ((825, 850), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (848, 850), False, 'import argparse\n'), ((1571, 1618), 'os.path.isfile', 'os.path.isfile', (["(config['log_dir'] + 'latest.pt')"], {}), "(config['log_dir'] + 'latest.pt')\n", (1585, 1618), False, 'import os\n'), ((1830, 1951), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'mode': '"""min"""', 'patience': "(25000.0 / config['val_rate'])", 'factor': '(0.5)'}), "(optimizer, mode='min', patience=\n 25000.0 / config['val_rate'], factor=0.5)\n", (1872, 1951), False, 'import torch\n'), ((1127, 1139), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1136, 1139), False, 'import json\n'), ((1220, 1243), 'datasets.oxford.get_dataloaders', 'get_dataloaders', (['config'], {}), '(config)\n', (1235, 1243), False, 'from datasets.oxford import get_dataloaders\n'), ((2006, 2045), 'utils.monitor.SVDMonitor', 'SVDMonitor', (['model', 'valid_loader', 'config'], {}), '(model, valid_loader, config)\n', (2016, 2045), False, 'from utils.monitor import SVDMonitor, SteamMonitor\n'), ((2922, 2969), 'os.path.isfile', 'os.path.isfile', (["(config['log_dir'] + args.config)"], {}), "(config['log_dir'] + args.config)\n", (2936, 2969), False, 'import os\n'), ((2979, 3035), 'os.system', 'os.system', (["('cp ' + args.config + ' ' + config['log_dir'])"], {}), "('cp ' + args.config + ' ' + config['log_dir'])\n", (2988, 3035), False, 'import os\n'), ((1324, 1354), 'datasets.boreas.get_dataloaders_boreas', 'get_dataloaders_boreas', (['config'], {}), '(config)\n', (1346, 1354), False, 'from datasets.boreas import get_dataloaders_boreas\n'), ((2100, 2141), 'utils.monitor.SteamMonitor', 'SteamMonitor', (['model', 'valid_loader', 'config'], {}), '(model, valid_loader, config)\n', (2112, 2141), False, 'from utils.monitor import SVDMonitor, SteamMonitor\n'), ((1415, 1436), 'networks.under_the_radar.UnderTheRadar', 'UnderTheRadar', (['config'], {}), '(config)\n', (1428, 1436), False, 'from networks.under_the_radar import UnderTheRadar\n'), ((3956, 4006), 'utils.losses.supervised_loss', 'supervised_loss', (["out['R']", "out['t']", 'batch', 'config'], {}), "(out['R'], out['t'], batch, config)\n", (3971, 4006), False, 'from utils.losses import supervised_loss, unsupervised_loss\n'), ((1509, 1521), 'networks.hero.HERO', 'HERO', (['config'], {}), '(config)\n', (1513, 1521), False, 'from networks.hero import HERO\n'), ((2325, 2354), 'torch.device', 'torch.device', (["config['gpuid']"], {}), "(config['gpuid'])\n", (2337, 2354), False, 'import torch\n'), ((3302, 3330), 'datasets.transforms.augmentBatch2', 'augmentBatch2', (['batch', 'config'], {}), '(batch, config)\n', (3315, 3330), False, 'from datasets.transforms import augmentBatch, augmentBatch2, augmentBatch3\n'), ((4085, 4136), 'utils.losses.unsupervised_loss', 'unsupervised_loss', (['out', 'batch', 'config', 'model.solver'], {}), '(out, batch, config, model.solver)\n', (4102, 4136), False, 'from utils.losses import supervised_loss, unsupervised_loss\n'), ((4506, 4521), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4519, 4521), False, 'import torch\n'), ((5191, 5206), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5204, 5206), False, 'import torch\n'), ((5269, 5313), 'os.path.join', 'os.path.join', (["config['log_dir']", '"""latest.pt"""'], {}), "(config['log_dir'], 'latest.pt')\n", (5281, 5313), False, 'import os\n'), ((5971, 5988), 'utils.utils.get_lr', 'get_lr', (['optimizer'], {}), '(optimizer)\n', (5977, 5988), False, 'from utils.utils import get_lr\n'), ((3441, 3469), 'datasets.transforms.augmentBatch3', 'augmentBatch3', (['batch', 'config'], {}), '(batch, config)\n', (3454, 3469), False, 'from datasets.transforms import augmentBatch, augmentBatch2, augmentBatch3\n'), ((3589, 3616), 'datasets.transforms.augmentBatch', 'augmentBatch', (['batch', 'config'], {}), '(batch, config)\n', (3601, 3616), False, 'from datasets.transforms import augmentBatch, augmentBatch2, augmentBatch3\n')] |
import ReadData
from matplotlib import pyplot as plt
from collections import Counter
import numpy as np
def Flatten(l):
return [item for sublist in l for item in sublist]
#len_stat = [len(i) for i in short_data]
#ttt = sum([i > 500 for i in len_stat]) + sum([i < 80 for i in len_stat])
#print(ttt/len(len_stat))
#plt.hist(len_stat, bins=200, range=[80,500])
#plt.plot(my_short_train_data[131])
########## plot long short
#my_short_all_data = my_short_train_data + my_short_val_data
#
##ll = sorted(len_stat, reverse=True)
#for i in range(100):
# fig = plt.figure()
# plt.plot(my_short_all_data[len_stat.index(ll[i])])
# plt.savefig('img/'+str(ll[i])+'.png', bbox_inches='tight')
# plt.close(fig)
############ plot qrs
#QRS_pid, QRS_data, QRS_label = ReadData.ReadData( '../../data1/QRSinfo.csv' )
#tmp = Flatten(QRS_data)
# plt.hist(tmp, bins=100, range=[600, 2000])
#Counter
############ plot long
#long_pid, long_data, long_label = ReadData.ReadData( '../../data1/long.csv' )
len_stat = [len(i) for i in long_data]
print(len(len_stat))
print(sum(np.array(len_stat) >= 9000))
print(sum(np.array(len_stat) >= 6000))
print(sum(np.array(len_stat) >= 3000))
| [
"numpy.array"
] | [((1075, 1093), 'numpy.array', 'np.array', (['len_stat'], {}), '(len_stat)\n', (1083, 1093), True, 'import numpy as np\n'), ((1114, 1132), 'numpy.array', 'np.array', (['len_stat'], {}), '(len_stat)\n', (1122, 1132), True, 'import numpy as np\n'), ((1153, 1171), 'numpy.array', 'np.array', (['len_stat'], {}), '(len_stat)\n', (1161, 1171), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as osp
import os
import tensorflow as tf
import time
import numpy as np
import data_io.basepy as basepy
import multiprocessing as mp
def main(_):
tags = tf.flags
# Net config
tags.DEFINE_string('npy_file_path',
'/absolute/datasets/anoma_motion_pyramid_120_85_c3d_npy',
'npy file folder to be reformed.')
tags.DEFINE_string('testing_list',
'/absolute/datasets/Anomaly-Detection-Dataset/Temporal_Anomaly_Annotation_for_Testing_Videos.txt',
'testing txt list, temporal annotation.')
tags.DEFINE_boolean('multiprocessing', True, 'choose multiprocessing or not.')
tags.DEFINE_integer('var0', 0, 'choose NPY_FILE_FOLDER, SEGMENT_NUM, TEST_FILE.')
tags.DEFINE_integer('var1', 0, 'choose MULTISCALE, MULTIREGION.')
F = tags.FLAGS
REFORM_TYPE, REFORM_NUM = (('maxtop', 1000), ('segment', 32))[F.var0]
MULTISCALE, MULTIREGION = (('pyramid', 4), ('pyramid', 1), ('single', 4), ('single', 1), (None, None))[F.var1]
_ = npy_reform(F.npy_file_path, MULTISCALE, MULTIREGION, REFORM_TYPE, REFORM_NUM, F.multiprocessing, F.test_file)
def npy_reform(npy_file_folder_path,
multiscale, multiregion, reform_type, reform_num,
if_multiprocessing, test_file):
try:
results_folder_path = npy_file_folder_path.replace('_motion_', '_motion_reformed_') \
.replace('_pyramid_', '_%s_' % multiscale) \
.replace('_c3d_npy', '_%dregion_c3d_npy' % multiregion)
except:
results_folder_path = npy_file_folder_path.replace('_motion_', '_motion_reformed_')
results_folder_path = results_folder_path.replace('_c3d_npy', '_%s_%d_c3d_npy' % (reform_type, reform_num))
test_str = str(basepy.read_txt_lines2list(test_file, sep=' '))
print('Converting %s to %s :' % (npy_file_folder_path, results_folder_path))
multiprocessing_num = int(mp.cpu_count() / 4)
remaining_list, split_list = basepy.get_remaining_to_multi(
basepy.get_1tier_file_path_list(npy_file_folder_path, '.npy'),
basepy.get_1tier_file_path_list(basepy.check_or_create_path(results_folder_path), suffix='.npy'),
divide_num=multiprocessing_num, if_print=True)
# npy_list_preprocessing(remaining_list, EVAL_RESULT_FOLDER, MULTISCALE, MULTIREGION)
if if_multiprocessing:
p = mp.Pool(multiprocessing_num)
for j, em in enumerate(split_list):
p.apply_async(npy_list_preprocessing,
args=(em,
results_folder_path, multiscale, multiregion, reform_type, reform_num, test_str))
p.close()
p.join()
else:
npy_list_preprocessing(remaining_list,
results_folder_path, multiscale, multiregion, reform_type, reform_num, test_str)
# END
print('Converting DONE ------ Debug Symbol ------ %s ------' % time.asctime(time.localtime(time.time())))
return results_folder_path
def npy_list_preprocessing(npy_file_list, eval_result_folder,
multiscale, multiregion, reform_type, number_in_one, test_str):
for npy_file in npy_file_list:
npy_preprocessing(npy_file, eval_result_folder, multiscale, multiregion, reform_type, number_in_one, test_str)
def npy_preprocessing(npy_file, eval_result_folder, multiscale, multiregion, reform_type, reform_num, test_str):
# print('processing %s ...' % npy_file)
npy_data = np.load(npy_file)
if multiregion == 1:
line_split = [i for i in range(npy_data.shape[0]) if i % 4 == 0]
for j, line in enumerate(line_split):
motion_value_split = npy_data[line:line + 4, -2]
max_index = np.argmax(motion_value_split)
line_split[j] = line + max_index
npy_data = npy_data[line_split]
# npy_data.shape[1] // 4096
if multiscale == 'single':
npy_data = np.concatenate((npy_data[:, :4096], npy_data[:, 8192:]), axis=1)
elif multiscale == 'pyramid':
npy_data = np.concatenate((np.maximum(npy_data[:, :4096], npy_data[:, 4096:8192]), npy_data[:, 8192:]), axis=1)
# DIFFERENT STRATEGY IN TRAINING AND TESTING
if osp.basename(npy_file).split('@')[1].split('.')[0] in test_str:
new_npy_data = npy_data
else:
if reform_type == 'segment':
if multiregion == 4:
new_npy_data = np.array(
[merge_1region_2segment(np.array([line for line in npy_data if line[4097] == 0]), reform_num),
merge_1region_2segment(np.array([line for line in npy_data if line[4097] == 1]), reform_num),
merge_1region_2segment(np.array([line for line in npy_data if line[4097] == 2]), reform_num),
merge_1region_2segment(np.array([line for line in npy_data if line[4097] == 3]), reform_num)])
new_npy_data = new_npy_data.reshape([-1, new_npy_data.shape[-1]], order='F')
else:
new_npy_data = merge_1region_2segment(npy_data, reform_num)
elif reform_type == 'maxtop':
if multiregion == 4:
new_npy_data = np.array(
[max_1region_select(np.array([line for line in npy_data if line[4097] == 0]), reform_num),
max_1region_select(np.array([line for line in npy_data if line[4097] == 1]), reform_num),
max_1region_select(np.array([line for line in npy_data if line[4097] == 2]), reform_num),
max_1region_select(np.array([line for line in npy_data if line[4097] == 3]), reform_num)])
new_npy_data = new_npy_data.reshape([-1, new_npy_data.shape[-1]], order='F')
else:
new_npy_data = max_1region_select(npy_data, reform_num)
else:
raise ValueError('Wrong reform_type: %s' % reform_type)
npy_result_file = osp.join(eval_result_folder, osp.basename(npy_file))
np.save(npy_result_file, new_npy_data)
def merge_1region_2segment(npy_data, segment_num):
# npy_data must in size 4096 + n
npy_data_num = npy_data.shape[0]
segment_split = [int(i * (npy_data_num) / segment_num) for i in range(segment_num)]
npy_segment_data = []
for j, segment in enumerate(segment_split):
try:
start, end = [segment, max(segment + 1, segment_split[j + 1])]
except:
start, end = [segment, npy_data_num]
npy_segment_data.append(np.average(npy_data[start:end], axis=0))
return np.array(npy_segment_data)
def max_1region_select(npy_data, num_from_max):
# npy_data must in size 4096 + n
data = npy_data[np.argsort(-npy_data[:, -2])]
return data[:num_from_max]
def clear_npy_for_testing(npy_file_folder, test_str):
# del reform folder: npy_file_folder
npy_list = basepy.get_1tier_file_path_list(npy_file_folder, '.npy')
j = 0
for npy in npy_list:
if osp.basename(npy).split('@')[1].split('.')[0] in str(test_str):
os.remove(npy)
print('remove %d-th file: %s' % (j, npy))
j += 1
if __name__ == '__main__':
tf.app.run()
| [
"numpy.load",
"numpy.save",
"data_io.basepy.check_or_create_path",
"numpy.average",
"os.remove",
"os.path.basename",
"numpy.argmax",
"numpy.maximum",
"time.time",
"numpy.argsort",
"data_io.basepy.get_1tier_file_path_list",
"numpy.array",
"multiprocessing.Pool",
"data_io.basepy.read_txt_lin... | [((3613, 3630), 'numpy.load', 'np.load', (['npy_file'], {}), '(npy_file)\n', (3620, 3630), True, 'import numpy as np\n'), ((6101, 6139), 'numpy.save', 'np.save', (['npy_result_file', 'new_npy_data'], {}), '(npy_result_file, new_npy_data)\n', (6108, 6139), True, 'import numpy as np\n'), ((6666, 6692), 'numpy.array', 'np.array', (['npy_segment_data'], {}), '(npy_segment_data)\n', (6674, 6692), True, 'import numpy as np\n'), ((6973, 7029), 'data_io.basepy.get_1tier_file_path_list', 'basepy.get_1tier_file_path_list', (['npy_file_folder', '""".npy"""'], {}), "(npy_file_folder, '.npy')\n", (7004, 7029), True, 'import data_io.basepy as basepy\n'), ((7273, 7285), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (7283, 7285), True, 'import tensorflow as tf\n'), ((1896, 1943), 'data_io.basepy.read_txt_lines2list', 'basepy.read_txt_lines2list', (['test_file'], {'sep': '""" """'}), "(test_file, sep=' ')\n", (1922, 1943), True, 'import data_io.basepy as basepy\n'), ((2149, 2210), 'data_io.basepy.get_1tier_file_path_list', 'basepy.get_1tier_file_path_list', (['npy_file_folder_path', '""".npy"""'], {}), "(npy_file_folder_path, '.npy')\n", (2180, 2210), True, 'import data_io.basepy as basepy\n'), ((2502, 2530), 'multiprocessing.Pool', 'mp.Pool', (['multiprocessing_num'], {}), '(multiprocessing_num)\n', (2509, 2530), True, 'import multiprocessing as mp\n'), ((4058, 4122), 'numpy.concatenate', 'np.concatenate', (['(npy_data[:, :4096], npy_data[:, 8192:])'], {'axis': '(1)'}), '((npy_data[:, :4096], npy_data[:, 8192:]), axis=1)\n', (4072, 4122), True, 'import numpy as np\n'), ((6073, 6095), 'os.path.basename', 'osp.basename', (['npy_file'], {}), '(npy_file)\n', (6085, 6095), True, 'import os.path as osp\n'), ((6800, 6828), 'numpy.argsort', 'np.argsort', (['(-npy_data[:, -2])'], {}), '(-npy_data[:, -2])\n', (6810, 6828), True, 'import numpy as np\n'), ((2057, 2071), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (2069, 2071), True, 'import multiprocessing as mp\n'), ((2252, 2300), 'data_io.basepy.check_or_create_path', 'basepy.check_or_create_path', (['results_folder_path'], {}), '(results_folder_path)\n', (2279, 2300), True, 'import data_io.basepy as basepy\n'), ((3860, 3889), 'numpy.argmax', 'np.argmax', (['motion_value_split'], {}), '(motion_value_split)\n', (3869, 3889), True, 'import numpy as np\n'), ((6614, 6653), 'numpy.average', 'np.average', (['npy_data[start:end]'], {'axis': '(0)'}), '(npy_data[start:end], axis=0)\n', (6624, 6653), True, 'import numpy as np\n'), ((7152, 7166), 'os.remove', 'os.remove', (['npy'], {}), '(npy)\n', (7161, 7166), False, 'import os\n'), ((3084, 3095), 'time.time', 'time.time', ([], {}), '()\n', (3093, 3095), False, 'import time\n'), ((4192, 4246), 'numpy.maximum', 'np.maximum', (['npy_data[:, :4096]', 'npy_data[:, 4096:8192]'], {}), '(npy_data[:, :4096], npy_data[:, 4096:8192])\n', (4202, 4246), True, 'import numpy as np\n'), ((4595, 4651), 'numpy.array', 'np.array', (['[line for line in npy_data if line[4097] == 0]'], {}), '([line for line in npy_data if line[4097] == 0])\n', (4603, 4651), True, 'import numpy as np\n'), ((4710, 4766), 'numpy.array', 'np.array', (['[line for line in npy_data if line[4097] == 1]'], {}), '([line for line in npy_data if line[4097] == 1])\n', (4718, 4766), True, 'import numpy as np\n'), ((4825, 4881), 'numpy.array', 'np.array', (['[line for line in npy_data if line[4097] == 2]'], {}), '([line for line in npy_data if line[4097] == 2])\n', (4833, 4881), True, 'import numpy as np\n'), ((4940, 4996), 'numpy.array', 'np.array', (['[line for line in npy_data if line[4097] == 3]'], {}), '([line for line in npy_data if line[4097] == 3])\n', (4948, 4996), True, 'import numpy as np\n'), ((4334, 4356), 'os.path.basename', 'osp.basename', (['npy_file'], {}), '(npy_file)\n', (4346, 4356), True, 'import os.path as osp\n'), ((5351, 5407), 'numpy.array', 'np.array', (['[line for line in npy_data if line[4097] == 0]'], {}), '([line for line in npy_data if line[4097] == 0])\n', (5359, 5407), True, 'import numpy as np\n'), ((5462, 5518), 'numpy.array', 'np.array', (['[line for line in npy_data if line[4097] == 1]'], {}), '([line for line in npy_data if line[4097] == 1])\n', (5470, 5518), True, 'import numpy as np\n'), ((5573, 5629), 'numpy.array', 'np.array', (['[line for line in npy_data if line[4097] == 2]'], {}), '([line for line in npy_data if line[4097] == 2])\n', (5581, 5629), True, 'import numpy as np\n'), ((5684, 5740), 'numpy.array', 'np.array', (['[line for line in npy_data if line[4097] == 3]'], {}), '([line for line in npy_data if line[4097] == 3])\n', (5692, 5740), True, 'import numpy as np\n'), ((7076, 7093), 'os.path.basename', 'osp.basename', (['npy'], {}), '(npy)\n', (7088, 7093), True, 'import os.path as osp\n')] |
import random
import numpy as np
from collections import deque
class ReplayBuffer:
"""
replay bufferstores and retrieves gameplay experiences
"""
def __init__(self):
self.gameplay_experiences = deque(maxlen=1000000)
def store_gameplay_experience(self, current_obs, next_obs, action, reward, done):
"""
records a single step (state transition) of gameplay experiences.
:param state: the current game state
:param next_state: the game state after taking action
:param reward: the reward taking action at the current state brings
:param action: the action taken at the current state
:param done: a boolean indicating if the game is finished after taking the action
:return: None
"""
self.gameplay_experiences.append((current_obs, next_obs, action, reward, done))
def sample_gameplay_batch(self):
"""
samples a batch of gameplay experiences for training
:return: a list of game experiences
"""
batch_size = min(24*7, len(self.gameplay_experiences))
sampled_gameplay_batch = random.sample(self.gameplay_experiences, batch_size)
current_obs_batch = []
next_obs_batch = []
action_batch = []
reward_batch = []
done_batch = []
for gameplay_experience in sampled_gameplay_batch:
current_obs_batch.append(gameplay_experience[0])
next_obs_batch.append(gameplay_experience[1])
action_batch.append(gameplay_experience[2])
reward_batch.append(gameplay_experience[3])
done_batch.append(gameplay_experience[4])
return np.array(current_obs_batch), np.array(next_obs_batch), np.array(action_batch), np.array(reward_batch), np.array(done_batch)
| [
"random.sample",
"numpy.array",
"collections.deque"
] | [((232, 253), 'collections.deque', 'deque', ([], {'maxlen': '(1000000)'}), '(maxlen=1000000)\n', (237, 253), False, 'from collections import deque\n'), ((1166, 1218), 'random.sample', 'random.sample', (['self.gameplay_experiences', 'batch_size'], {}), '(self.gameplay_experiences, batch_size)\n', (1179, 1218), False, 'import random\n'), ((1725, 1752), 'numpy.array', 'np.array', (['current_obs_batch'], {}), '(current_obs_batch)\n', (1733, 1752), True, 'import numpy as np\n'), ((1754, 1778), 'numpy.array', 'np.array', (['next_obs_batch'], {}), '(next_obs_batch)\n', (1762, 1778), True, 'import numpy as np\n'), ((1780, 1802), 'numpy.array', 'np.array', (['action_batch'], {}), '(action_batch)\n', (1788, 1802), True, 'import numpy as np\n'), ((1804, 1826), 'numpy.array', 'np.array', (['reward_batch'], {}), '(reward_batch)\n', (1812, 1826), True, 'import numpy as np\n'), ((1828, 1848), 'numpy.array', 'np.array', (['done_batch'], {}), '(done_batch)\n', (1836, 1848), True, 'import numpy as np\n')] |
# coding: utf-8
# # Random Forest
#
# In this lab you will learn the most important aspects of the random forest learning method.
# Completing this lab and analyzing the code will give you a deeper understanding of these type of models.
# In our experiments we will mostly use the package sklearn from which we import RandomForestClassifier.
#
# In[ ]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
get_ipython().magic(u'matplotlib inline')
get_ipython().magic(u'load_ext autoreload')
get_ipython().magic(u'autoreload 2')
# In[ ]:
from sklearn.datasets import make_classification, make_regression
# ## Data Creation
#
# First of all, we create a data set containing 1000 samples with 2 features and two classes:
# In[ ]:
X, y = make_classification(n_samples = 1000,n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
# <b>Exercise 1:</b>
#
# Visualize the data set. It should look like this:
# <img src="figures/dataset.png" width="600"/>
# In[ ]:
### WRITE YOUR CODE HERE ###
# We split our data into train and test data. Then we can train our model (a random forest) on the train data and evaluate the model on the hold out test data. We split the data in a way that we train our model on 67% of the data and test our model on 33% of the data.
# In[ ]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.33, random_state=42)
# <b>Exercise 2:</b>
#
# Train a random forest on the training data and report the accuracy for this model on the train and test data using the default parameters of a random forest. What can you conclude from this? from sklearn.
# In[ ]:
clf = RandomForestClassifier()
### WRITE YOUR CODE HERE ###
# ## Decision Boundary
#
# Sometimes it is helpful to plot the decision boundary for a learned model. To do so, we create a grid of data points and calculate the probability of belonging to class 1.
# In[ ]:
x_min, x_max = X[:, 0].min(), X[:, 0].max()
y_min, y_max = X[:, 1].min(), X[:, 1].max()
h = .1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z = Z.reshape(xx.shape)
# Then we can plot the boundary using the 'contourf' function of matplotlib.
# In[ ]:
cm = plt.cm.RdBu # color map
plt.contourf(xx, yy, Z, alpha=.8, cmap=cm)
colors = ['red','blue']
for cur_class in [0,1]:
plt.scatter(X[y==cur_class, 0], X[y == cur_class, 1], c=colors[cur_class],
edgecolors='k', alpha=0.6, label=cur_class)
plt.legend()
plt.show()
# What can you conclude from the figure above?
# ## Parameter Selection
#
# The implementation of the random forest algorithm in sklearn has many parameter. The most important ones are the number of trees used (n_estimators) and the maximal depth of a single tree (max_depth). Investigate how the number of used trees effects the training and testing accuracy.
#
# <b>Exercise 3:</b>
#
# Plot a diagram that shows the training and testing accuracy depending on the number of trees (from 1 to 20) used. This plot should look like this:
# <img src="figures/num_trees.png" width="600"/>
# In[ ]:
### WRITE YOUR CODE HERE ###
# ## Churn Data Set
# Lets revisit the churn data set from the first tutorial.
# In[ ]:
churn_df = pd.read_csv('telecom_churn.csv')
label = churn_df['Churn']
churn_df = churn_df.drop(columns=['Churn'])
# <b>Exercise 4:</b>
#
# Create a data set containing only the numeric values. <b>Optional:</b> Try to convert all non numeric values to numeric values using a one hot encoding or by binning them.
# In[ ]:
### WRITE YOUR CODE HERE ###
# <b>Exercise 5:</b>
#
# Train a model on this data set and visualize the most important features in a figure. This should look like this (The scaling and order of features can be different):
# <img src="figures/importance.png" width="600"/>
#
# <b>Hint</b>: The method feature_importance_ should be used.
# What can you conclude?
# In[ ]:
### WRITE YOUR CODE HERE ###
# <b>Exercise 6:</b>
#
# If we want to use a random forest to solve regression problems we can use the RandomForestRegressor from sklearn.
# * Generate an easy regression data set using make_regression with 10 features. (use function make_regression)
# * Split the data set into a train and test set.
# * Train a model and report the training and testing mean square error (can be calculated using sklearn.metrics.mean_squared_error)
# In[ ]:
### WRITE YOUR CODE HERE ###
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| [
"sklearn.ensemble.RandomForestClassifier",
"matplotlib.pyplot.show",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.scatter",
"sklearn.datasets.make_classification",
"matplotlib.pyplot.contourf",
"numpy.arange"
] | [((874, 999), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(1000)', 'n_features': '(2)', 'n_redundant': '(0)', 'n_informative': '(2)', 'random_state': '(1)', 'n_clusters_per_class': '(1)'}), '(n_samples=1000, n_features=2, n_redundant=0,\n n_informative=2, random_state=1, n_clusters_per_class=1)\n', (893, 999), False, 'from sklearn.datasets import make_classification, make_regression\n'), ((1559, 1614), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(X, y, test_size=0.33, random_state=42)\n', (1575, 1614), False, 'from sklearn.model_selection import train_test_split\n'), ((1864, 1888), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (1886, 1888), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2532, 2575), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx', 'yy', 'Z'], {'alpha': '(0.8)', 'cmap': 'cm'}), '(xx, yy, Z, alpha=0.8, cmap=cm)\n', (2544, 2575), True, 'import matplotlib.pyplot as plt\n'), ((2769, 2781), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2779, 2781), True, 'import matplotlib.pyplot as plt\n'), ((2782, 2792), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2790, 2792), True, 'import matplotlib.pyplot as plt\n'), ((3526, 3558), 'pandas.read_csv', 'pd.read_csv', (['"""telecom_churn.csv"""'], {}), "('telecom_churn.csv')\n", (3537, 3558), True, 'import pandas as pd\n'), ((2248, 2274), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'h'], {}), '(x_min, x_max, h)\n', (2257, 2274), True, 'import numpy as np\n'), ((2301, 2327), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'h'], {}), '(y_min, y_max, h)\n', (2310, 2327), True, 'import numpy as np\n'), ((2627, 2751), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[y == cur_class, 0]', 'X[y == cur_class, 1]'], {'c': 'colors[cur_class]', 'edgecolors': '"""k"""', 'alpha': '(0.6)', 'label': 'cur_class'}), "(X[y == cur_class, 0], X[y == cur_class, 1], c=colors[cur_class],\n edgecolors='k', alpha=0.6, label=cur_class)\n", (2638, 2751), True, 'import matplotlib.pyplot as plt\n')] |
"""
CEASIOMpy: Conceptual Aircraft Design Software
Developed for CFS ENGINEERING, 1015 Lausanne, Switzerland
The script evaluates the centre of gravity coordinates in case of:
* OEM = Operating empty mass;
* MTOM = Maximum take off mass, with Max Payload:
* ZFM = zero fuel mass;
* ZPM = zero Payload mass
* With a percentage of Fuel and Payload defined by the user.
| Works with Python 2.7
| Author: <NAME>
| Date of creation: 2018-10-12
| Last modifiction: 2019-02-20
"""
#=============================================================================
# IMPORTS
#=============================================================================
import numpy as np
from ceasiompy.utils.ceasiomlogger import get_logger
log = get_logger(__file__.split('.')[0])
#=============================================================================
# CLASSES
#=============================================================================
"""All classes are defined inside the InputClasses/Conventional"""
#=============================================================================
# FUNCTIONS
#=============================================================================
def center_of_gravity_evaluation(F_PERC, P_PERC, cabin_seg, ag, mw,\
WING_MOUNTED = True):
""" The function evaluates the center of gravity of airplanes given the
geometry from cpacs file (tigl_func.py) and masses from
weight_main.py.
Source: An introduction to mechanics, 2nd ed., <NAME>
and <NAME>, Cambridge University Press.
ARGUMENTS
(int) FPERC --Arg.: Percentage of the maximum amount of fuel used
(int_array) cabin_seg --Arg.: Array that will contain 1 if the
segment is a cabin segment or
0 otherwise.
(class) ag --Arg.: AircraftGeometry class look at
aircraft_geometry_class.py in the
classes folder for explanation.
(class) mw --Arg.: MassesWeights class.
##======= Class is defined in the InputClasses folder =======##
(boolean) WING_MOUNTED --Att.: True if the engines are
mounted on the front main wing.
RETURN
(float_array) center_of_gravity --Out.: x,y,z coordinates of the CoG.
(float_array) mass_seg_i --Out.: Mass of each segment of each
component of the aircraft.
(float_array) airplane_centers_segs --Out.: Point at the center of
each segment of the
aircraft.
"""
max_seg_n = np.max([np.amax(ag.fuse_seg_nb), np.amax(ag.wing_seg_nb)])
t_nb = ag.fus_nb + ag.w_nb # Number of parts not counting symmetry
tot_nb = ag.fuse_nb + ag.wing_nb # Number of parts counting symmetry
segments_nb = []
for i in range(1,ag.fus_nb+1):
segments_nb.append(ag.fuse_seg_nb[i-1])
if ag.fuse_sym[i-1] != 0:
segments_nb.append(ag.fuse_seg_nb[i-1])
htw = 0
x0 = 0
s = 0
for i in range(1,ag.w_nb+1):
segments_nb.append(ag.wing_seg_nb[i-1])
if ag.wing_sym[i-1] != 0:
segments_nb.append(ag.wing_seg_nb[i-1])
s += 1
if ag.is_horiz[i-1+s]:
if i != ag.main_wing_index:
htw = i
else:
x = np.amax(ag.wing_center_seg_point[:,i+s-1,0])
if x > x0:
tw = i
x0 = x
mass_seg_i = np.zeros((max_seg_n,tot_nb))
v_tot = (ag.wing_tot_vol-ag.wing_fuel_vol) + np.sum(ag.fuse_vol)
# Evaluating eom density, fuel density, passenger density
oem_par = mw.operating_empty_mass / v_tot
mpass_par = (mw.mass_payload*(P_PERC/100)) / np.sum(ag.fuse_cabin_vol)
mfuel_par = (mw.mass_fuel_max*(F_PERC/100)) / ag.wing_fuel_vol
mtom = mw.operating_empty_mass + (mw.mass_payload*(P_PERC/100))\
+ (mw.mass_fuel_max*(F_PERC/100))
# Definition of the mass of each segment
ex = False
fs = []
wg = []
f = 0
i = ag.fus_nb
for j in range(1,ag.fuse_seg_nb[i-1]+1):
if cabin_seg[j-1][i-1+f] == 1:
mass_seg_i[j-1][i-1+f] = (oem_par+mpass_par)\
* ag.fuse_seg_vol[j-1][i-1]
else:
mass_seg_i[j-1][i-1+f] = oem_par * ag.fuse_seg_vol[j-1][i-1]
fs.append(i)
if ag.fuse_sym[i-1-ag.fus_nb] != 0:
f += 1
mass_seg_i[:,i-1+f] = mass_seg_i[:,i-2+f]
fs.append(i)
w = 0
for i in range(ag.fus_nb+1,t_nb+1):
for j in range(1,ag.wing_seg_nb[i-1-ag.fus_nb]+1):
if i == ag.main_wing_index:
mass_seg_i[j-1][i-1+w] = oem_par\
* (ag.wing_seg_vol[j-1][i-1-ag.fus_nb]\
- ag.wing_fuel_seg_vol[j-1][i-1-ag.fus_nb])\
+ mfuel_par * (ag.wing_fuel_seg_vol[j-1][i-1-ag.fus_nb])
else:
mass_seg_i[j-1][i-1+w] = oem_par\
* ag.wing_seg_vol[j-1][i-1-ag.fus_nb]
wg.append(i-ag.fus_nb)
if ag.wing_sym[i-1-ag.fus_nb] != 0:
w += 1
mass_seg_i[:,i-1+w]=mass_seg_i[:,i-2+w]
wg.append(i-ag.fus_nb)
if i+w+f == tot_nb:
break
# Mass check
while not ex:
if abs(round(mtom,3) - round(np.sum(mass_seg_i),3)) < 0.0001:
ex = True
else:
mass = (round(mtom,3)- round(np.sum(mass_seg_i),3))/2
if not WING_MOUNTED:
if htw != 0:
a = wg.index(htw)
else:
a = wg.index(tw)
else:
a = wg.index(ag.main_wing_index)
mass_seg_i[0][ag.fuse_nb+a] = mass_seg_i[0][ag.fuse_nb+a] + mass
if ag.is_horiz[a]:
mass_seg_i[0][ag.fuse_nb+a+1]\
= mass_seg_i[0][ag.fuse_nb+a+1] + mass
else:
mass_seg_i[0][ag.fuse_nb+a]\
= mass_seg_i[0][ag.fuse_nb+a] + mass
ag.wing_center_seg_point.resize(max_seg_n,ag.wing_nb,3)
ag.fuse_center_seg_point.resize(max_seg_n,ag.fuse_nb,3)
airplane_centers_segs = np.concatenate((ag.fuse_center_seg_point,\
ag.wing_center_seg_point),1)
# CoG evalution
center_of_gravity = []
center_of_gravity.append(round(np.sum(airplane_centers_segs[:,:,0]\
*mass_seg_i) / mtom,3))
center_of_gravity.append(round(np.sum(airplane_centers_segs[:,:,1]\
*mass_seg_i) / mtom,3))
center_of_gravity.append(round(np.sum(airplane_centers_segs[:,:,2]\
*mass_seg_i) / mtom,3))
for i in range(1,4):
if abs(center_of_gravity[i-1]) < 10**(-5):
center_of_gravity[i-1] = 0.0
return(center_of_gravity, mass_seg_i, airplane_centers_segs)
#=============================================================================
# MAIN
#=============================================================================
if __name__ == '__main__':
log.warning('##########################################################')
log.warning('### ERROR NOT A STANDALONE PROGRAM, RUN balancemain.py ###')
log.warning('##########################################################')
| [
"numpy.amax",
"numpy.zeros",
"numpy.sum",
"numpy.concatenate"
] | [((3712, 3741), 'numpy.zeros', 'np.zeros', (['(max_seg_n, tot_nb)'], {}), '((max_seg_n, tot_nb))\n', (3720, 3741), True, 'import numpy as np\n'), ((6427, 6498), 'numpy.concatenate', 'np.concatenate', (['(ag.fuse_center_seg_point, ag.wing_center_seg_point)', '(1)'], {}), '((ag.fuse_center_seg_point, ag.wing_center_seg_point), 1)\n', (6441, 6498), True, 'import numpy as np\n'), ((3790, 3809), 'numpy.sum', 'np.sum', (['ag.fuse_vol'], {}), '(ag.fuse_vol)\n', (3796, 3809), True, 'import numpy as np\n'), ((3968, 3993), 'numpy.sum', 'np.sum', (['ag.fuse_cabin_vol'], {}), '(ag.fuse_cabin_vol)\n', (3974, 3993), True, 'import numpy as np\n'), ((2795, 2818), 'numpy.amax', 'np.amax', (['ag.fuse_seg_nb'], {}), '(ag.fuse_seg_nb)\n', (2802, 2818), True, 'import numpy as np\n'), ((2820, 2843), 'numpy.amax', 'np.amax', (['ag.wing_seg_nb'], {}), '(ag.wing_seg_nb)\n', (2827, 2843), True, 'import numpy as np\n'), ((3568, 3618), 'numpy.amax', 'np.amax', (['ag.wing_center_seg_point[:, i + s - 1, 0]'], {}), '(ag.wing_center_seg_point[:, i + s - 1, 0])\n', (3575, 3618), True, 'import numpy as np\n'), ((6627, 6678), 'numpy.sum', 'np.sum', (['(airplane_centers_segs[:, :, 0] * mass_seg_i)'], {}), '(airplane_centers_segs[:, :, 0] * mass_seg_i)\n', (6633, 6678), True, 'import numpy as np\n'), ((6752, 6803), 'numpy.sum', 'np.sum', (['(airplane_centers_segs[:, :, 1] * mass_seg_i)'], {}), '(airplane_centers_segs[:, :, 1] * mass_seg_i)\n', (6758, 6803), True, 'import numpy as np\n'), ((6877, 6928), 'numpy.sum', 'np.sum', (['(airplane_centers_segs[:, :, 2] * mass_seg_i)'], {}), '(airplane_centers_segs[:, :, 2] * mass_seg_i)\n', (6883, 6928), True, 'import numpy as np\n'), ((5581, 5599), 'numpy.sum', 'np.sum', (['mass_seg_i'], {}), '(mass_seg_i)\n', (5587, 5599), True, 'import numpy as np\n'), ((5692, 5710), 'numpy.sum', 'np.sum', (['mass_seg_i'], {}), '(mass_seg_i)\n', (5698, 5710), True, 'import numpy as np\n')] |
# Lint as: python3
"""Tests for epi_forecast_stat_mech.high_level."""
import collections
import functools
from absl.testing import absltest
from absl.testing import parameterized
from epi_forecast_stat_mech import high_level
from epi_forecast_stat_mech import sir_sim
from epi_forecast_stat_mech.evaluation import run_on_data
from jax.config import config
import numpy as np
import sklearn
config.parse_flags_with_absl() # Necessary for running on TPU.
def create_synthetic_dataset(
seed=0,
num_epidemics=50,
num_important_cov=1,
num_unimportant_cov=2,
train_length=100,
prediction_length=10,
):
"""Creates synthetic data."""
np.random.seed(seed) # TODO(shoyer): use np.random.RandomState
beta_fn = functools.partial(sir_sim.generate_betas_many_cov2,
num_pred=num_important_cov,
num_not_pred=num_unimportant_cov)
trajectories = sir_sim.generate_simulations(
beta_fn,
num_epidemics,
num_time_steps=train_length+prediction_length)
# This puts slightly more stress on the predict's time output.
trajectories['time'] = trajectories.time + 50
train_data, test_data = run_on_data.train_test_split_time(
trajectories, trajectories.time[-prediction_length])
return train_data, test_data
def create_synthetic_dynamic_dataset(
seed=0,
num_epidemics=25,
num_important_cov=1,
num_unimportant_cov=2,
num_time_steps=200,
):
"""Creates synthetic dynamic data."""
np.random.seed(seed)
beta_fn = functools.partial(
sir_sim.generate_betas_many_cov2,
num_pred=num_important_cov,
num_not_pred=num_unimportant_cov)
data = sir_sim.generate_social_distancing_simulations(
beta_fn, sir_sim.gen_social_distancing_weight, num_epidemics,
num_time_steps)
data = data.sel(
time=((data.new_infections.sum('location') >= 1).cumsum('time') >= 1))
data = data.sel(location=(data.new_infections.sum('time') >= 100))
train_data, test_data = run_on_data.train_test_split_time(
data, data.canonical_split_time)
return train_data, test_data
class TestHighLevelStatMech(absltest.TestCase):
"""Tests for StatMech high_level module."""
def test_StatMechEstimator(self):
"""Verify we can fit and predict from StatMechEstimator."""
num_samples = 11 # number of 'roll out' samples.
train_data, test_data = create_synthetic_dataset()
estimator = high_level.StatMechEstimator(train_steps=1000).fit(train_data)
_ = estimator.mech_params.to_netcdf()
predictions = estimator.predict(test_data, num_samples)
self.assertCountEqual(['location', 'sample', 'time'], predictions.dims)
np.testing.assert_array_equal(predictions.time, test_data.time)
np.testing.assert_array_equal(train_data.location, predictions.location)
self.assertLen(predictions.sample, num_samples)
class TestHighLevelRtLive(absltest.TestCase):
"""Tests for RtLive high_level module."""
def test_RtLiveEstimator(self):
"""Verify we can fit and predict from RtLiveEstimator."""
num_samples = 11 # number of 'roll out' samples.
train_data, test_data = create_synthetic_dataset()
estimator = high_level.RtLiveEstimator(gamma=1.0).fit(train_data)
predictions = estimator.predict(test_data, num_samples)
self.assertCountEqual(['location', 'sample', 'time'], predictions.dims)
np.testing.assert_array_equal(predictions.time, test_data.time)
np.testing.assert_array_equal(train_data.location, predictions.location)
self.assertLen(predictions.sample, num_samples)
class TestGetEstimatorDict(absltest.TestCase):
"""Tests for get_estimator_dict."""
def test_get_estimator_dict(self):
_ = high_level.get_estimator_dict()
class TestEstimatorDictEstimator(parameterized.TestCase):
"""Tests for high_level.get_estimator_dict estimators."""
@parameterized.parameters(
dict(estimator_name='iterative_randomforest__VC'),
dict(estimator_name='iterative_mean__Gaussian_PL'),
)
def test_EstimatorDictEstimator(self, estimator_name):
"""Verify we can fit and predict from the named estimator.
This test requires mech_params and mech_params_hat methods.
Args:
estimator_name: a key into high_level.get_estimator_dict().
"""
num_samples = 11 # number of 'roll out' samples.
train_data, test_data = create_synthetic_dataset()
estimator = high_level.get_estimator_dict()[estimator_name]
estimator.fit(train_data)
_ = estimator.mech_params.to_netcdf()
_ = estimator.mech_params_hat.to_netcdf()
predictions = estimator.predict(test_data, num_samples)
self.assertCountEqual(['location', 'sample', 'time'], predictions.dims)
np.testing.assert_array_equal(predictions.time, test_data.time)
np.testing.assert_array_equal(train_data.location, predictions.location)
self.assertLen(predictions.sample, num_samples)
@parameterized.parameters(
dict(estimator_name='LSML_Gaussian_PL_Linear_ObsEnc'),
dict(estimator_name='LSML_VC_PlainLinear_ObsEnc'),
dict(estimator_name='LSML_Turner_Linear_ObsEnc_6wk_plugin'),
)
def test_EstimatorDictEstimatorWithCoef(self, estimator_name):
"""Verify we can fit and predict from the named estimator.
This test requires mech_params as well as alpha and intercept.
Args:
estimator_name: a key into high_level.get_estimator_dict().
"""
num_samples = 11 # number of 'roll out' samples.
train_data, test_data = create_synthetic_dataset()
estimator = high_level.get_estimator_dict()[estimator_name]
estimator.fit(train_data)
_ = estimator.alpha.to_netcdf()
_ = estimator.intercept.to_netcdf()
_ = estimator.mech_params.to_netcdf()
predictions = estimator.predict(test_data, num_samples)
self.assertCountEqual(['location', 'sample', 'time'], predictions.dims)
np.testing.assert_array_equal(predictions.time, test_data.time)
np.testing.assert_array_equal(train_data.location, predictions.location)
self.assertLen(predictions.sample, num_samples)
@parameterized.parameters(
dict(
estimator_name='iterative_randomforest__DynamicMultiplicative',
run_it=False),
dict(
estimator_name='iterative_mean__DynamicBaselineSEIRModel',
run_it=False),
)
def test_DynamicEstimatorDictEstimator(self, estimator_name, run_it):
"""Verify we can fit and predict from the named estimator.
This test requires mech_params and mech_params_hat methods.
Args:
estimator_name: a key into high_level.get_estimator_dict().
"""
num_samples = 11
estimator = high_level.get_estimator_dict()[estimator_name]
# I'm conditionally disabling this code to reduce timeout issues.
if run_it:
train_data, test_data = create_synthetic_dynamic_dataset()
estimator.fit(train_data)
_ = estimator.mech_params.to_netcdf()
_ = estimator.mech_params_hat.to_netcdf()
predictions = estimator.predict(test_data, num_samples)
self.assertCountEqual(['location', 'sample', 'time'], predictions.dims)
np.testing.assert_array_equal(predictions.time, test_data.time)
np.testing.assert_array_equal(train_data.location, predictions.location)
self.assertLen(predictions.sample, num_samples)
if __name__ == '__main__':
absltest.main()
| [
"epi_forecast_stat_mech.sir_sim.generate_social_distancing_simulations",
"functools.partial",
"absl.testing.absltest.main",
"numpy.random.seed",
"epi_forecast_stat_mech.high_level.RtLiveEstimator",
"epi_forecast_stat_mech.high_level.get_estimator_dict",
"numpy.testing.assert_array_equal",
"epi_forecas... | [((395, 425), 'jax.config.config.parse_flags_with_absl', 'config.parse_flags_with_absl', ([], {}), '()\n', (423, 425), False, 'from jax.config import config\n'), ((662, 682), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (676, 682), True, 'import numpy as np\n'), ((738, 856), 'functools.partial', 'functools.partial', (['sir_sim.generate_betas_many_cov2'], {'num_pred': 'num_important_cov', 'num_not_pred': 'num_unimportant_cov'}), '(sir_sim.generate_betas_many_cov2, num_pred=\n num_important_cov, num_not_pred=num_unimportant_cov)\n', (755, 856), False, 'import functools\n'), ((930, 1036), 'epi_forecast_stat_mech.sir_sim.generate_simulations', 'sir_sim.generate_simulations', (['beta_fn', 'num_epidemics'], {'num_time_steps': '(train_length + prediction_length)'}), '(beta_fn, num_epidemics, num_time_steps=\n train_length + prediction_length)\n', (958, 1036), False, 'from epi_forecast_stat_mech import sir_sim\n'), ((1190, 1281), 'epi_forecast_stat_mech.evaluation.run_on_data.train_test_split_time', 'run_on_data.train_test_split_time', (['trajectories', 'trajectories.time[-prediction_length]'], {}), '(trajectories, trajectories.time[-\n prediction_length])\n', (1223, 1281), False, 'from epi_forecast_stat_mech.evaluation import run_on_data\n'), ((1511, 1531), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1525, 1531), True, 'import numpy as np\n'), ((1544, 1662), 'functools.partial', 'functools.partial', (['sir_sim.generate_betas_many_cov2'], {'num_pred': 'num_important_cov', 'num_not_pred': 'num_unimportant_cov'}), '(sir_sim.generate_betas_many_cov2, num_pred=\n num_important_cov, num_not_pred=num_unimportant_cov)\n', (1561, 1662), False, 'import functools\n'), ((1686, 1815), 'epi_forecast_stat_mech.sir_sim.generate_social_distancing_simulations', 'sir_sim.generate_social_distancing_simulations', (['beta_fn', 'sir_sim.gen_social_distancing_weight', 'num_epidemics', 'num_time_steps'], {}), '(beta_fn, sir_sim.\n gen_social_distancing_weight, num_epidemics, num_time_steps)\n', (1732, 1815), False, 'from epi_forecast_stat_mech import sir_sim\n'), ((2016, 2082), 'epi_forecast_stat_mech.evaluation.run_on_data.train_test_split_time', 'run_on_data.train_test_split_time', (['data', 'data.canonical_split_time'], {}), '(data, data.canonical_split_time)\n', (2049, 2082), False, 'from epi_forecast_stat_mech.evaluation import run_on_data\n'), ((7342, 7357), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (7355, 7357), False, 'from absl.testing import absltest\n'), ((2690, 2753), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['predictions.time', 'test_data.time'], {}), '(predictions.time, test_data.time)\n', (2719, 2753), True, 'import numpy as np\n'), ((2758, 2830), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['train_data.location', 'predictions.location'], {}), '(train_data.location, predictions.location)\n', (2787, 2830), True, 'import numpy as np\n'), ((3392, 3455), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['predictions.time', 'test_data.time'], {}), '(predictions.time, test_data.time)\n', (3421, 3455), True, 'import numpy as np\n'), ((3460, 3532), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['train_data.location', 'predictions.location'], {}), '(train_data.location, predictions.location)\n', (3489, 3532), True, 'import numpy as np\n'), ((3718, 3749), 'epi_forecast_stat_mech.high_level.get_estimator_dict', 'high_level.get_estimator_dict', ([], {}), '()\n', (3747, 3749), False, 'from epi_forecast_stat_mech import high_level\n'), ((4722, 4785), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['predictions.time', 'test_data.time'], {}), '(predictions.time, test_data.time)\n', (4751, 4785), True, 'import numpy as np\n'), ((4790, 4862), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['train_data.location', 'predictions.location'], {}), '(train_data.location, predictions.location)\n', (4819, 4862), True, 'import numpy as np\n'), ((5878, 5941), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['predictions.time', 'test_data.time'], {}), '(predictions.time, test_data.time)\n', (5907, 5941), True, 'import numpy as np\n'), ((5946, 6018), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['train_data.location', 'predictions.location'], {}), '(train_data.location, predictions.location)\n', (5975, 6018), True, 'import numpy as np\n'), ((4415, 4446), 'epi_forecast_stat_mech.high_level.get_estimator_dict', 'high_level.get_estimator_dict', ([], {}), '()\n', (4444, 4446), False, 'from epi_forecast_stat_mech import high_level\n'), ((5541, 5572), 'epi_forecast_stat_mech.high_level.get_estimator_dict', 'high_level.get_estimator_dict', ([], {}), '()\n', (5570, 5572), False, 'from epi_forecast_stat_mech import high_level\n'), ((6645, 6676), 'epi_forecast_stat_mech.high_level.get_estimator_dict', 'high_level.get_estimator_dict', ([], {}), '()\n', (6674, 6676), False, 'from epi_forecast_stat_mech import high_level\n'), ((7114, 7177), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['predictions.time', 'test_data.time'], {}), '(predictions.time, test_data.time)\n', (7143, 7177), True, 'import numpy as np\n'), ((7184, 7256), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['train_data.location', 'predictions.location'], {}), '(train_data.location, predictions.location)\n', (7213, 7256), True, 'import numpy as np\n'), ((2444, 2490), 'epi_forecast_stat_mech.high_level.StatMechEstimator', 'high_level.StatMechEstimator', ([], {'train_steps': '(1000)'}), '(train_steps=1000)\n', (2472, 2490), False, 'from epi_forecast_stat_mech import high_level\n'), ((3197, 3234), 'epi_forecast_stat_mech.high_level.RtLiveEstimator', 'high_level.RtLiveEstimator', ([], {'gamma': '(1.0)'}), '(gamma=1.0)\n', (3223, 3234), False, 'from epi_forecast_stat_mech import high_level\n')] |
# Copyright (c) 2019 Toyota Research Institute. All rights reserved.
"""
This module contains basic campaign functionality. Objects
and logic in this module should be very generic and not
constrained to a particular mode of materials discovery.
Furthermore, the "Campaign" logic should be kept as simple
as possible.
"""
import os
import pickle
import json
import numpy as np
import pandas as pd
import shutil
from monty.json import MSONable
from camd.utils.data import s3_sync
from camd import CAMD_S3_BUCKET
from camd.agent.base import RandomAgent
class Campaign(MSONable):
"""
Campaign provides a sequential, workflow-like capability where an
Agent iterates over a candidate space to choose and execute
new Experiments, given a certain objective. The abstraction
follows closely the "scientific method". Agent is the entity
that suggests new Experiments.
Supporting entities are Analyzers and Finalizers. Framework
is flexible enough to implement many sequential learning or
optimization tasks, including active-learning, bayesian optimization
or black-box optimization with local or global optima search.
"""
def __init__(
self,
candidate_data,
agent,
experiment,
analyzer,
seed_data=None,
create_seed=False,
heuristic_stopper=np.inf,
s3_prefix=None,
s3_bucket=CAMD_S3_BUCKET,
path=None,
):
"""
Invokes a campaign from candidates, seed, agent, and other supporting
entities for decision-making, experiment, and analysis.
Args:
candidate_data (pd.DataFrame): List of uids for candidate
search space for active learning
agent (HypothesisAgent): a subclass of HypothesisAgent
experiment (Experiment): a subclass of Experiment
analyzer (Analyzer): a subclass of Analyzer
seed_data (pandas.DataFrame): Seed Data for active learning,
index is to be the assumed uid
create_seed (int): an initial seed size to create from the data
heuristic_stopper (int): If int, the heuristic stopper will
kick in to check if loop should be terminated after this
many iterations, if no discoveries in past #n loops.
s3_prefix (str): prefix which to prepend all s3 synced files with,
if None is specified, s3 syncing will not occur
s3_bucket (str): bucket name for s3 sync. If not specified,
CAMD will sync to the specified environment variable.
path (str): local path in which to execute the loop, defaults
to current folder if path is not provided
"""
# Cloud parameters
self.s3_prefix = s3_prefix
self.s3_bucket = s3_bucket
# Data parameters
self.candidate_data = candidate_data
self.seed_data = seed_data if seed_data is not None else pd.DataFrame()
self.create_seed = create_seed
self.history = pd.DataFrame()
# Object parameters
self.agent = agent
self.experiment = experiment
self.analyzer = analyzer
# Other parameters
# TODO: think about how to abstract this away from the loop
self.heuristic_stopper = heuristic_stopper
self.path = path if path else os.getcwd()
os.chdir(self.path)
# Internal data
self._exp_raw_results = None
# Check if there exists earlier iterations
if os.path.exists(os.path.join(self.path, "iteration.json")):
self.load("iteration")
self.initialized = True
else:
self.iteration = 0
self.initialized = False
if self.initialized:
self.create_seed = False
self.load("job_status")
self.experiment.job_status = self.job_status
self.load("experiment", method="pickle")
self.load("seed_data", method="pickle")
self.load("consumed_candidates")
self.load("loop_state", no_exist_fail=False)
self.initialized = True
else:
self.submitted_experiment_requests = []
self.consumed_candidates = []
self.job_status = {}
self.initialized = False
self.loop_state = "UNSTARTED"
def run(self, finalize=False):
"""
This method applies a single iteration of the loop, and
keeps record of everything in place.
Each iteration consists of:
1. Get results of requested experiments
2. Load, Expand, Save seed_data
3. Augment candidate_space
4. Analyze results - Stop / Go
5. Hypothesize
6. Submit new experiments
"""
if not self.initialized:
raise ValueError("Campaign must be initialized.")
# Get new results
print("{} {} state: Getting new results".format(self.type, self.iteration))
self.experiment.monitor()
new_experimental_results = self.experiment.get_results()
os.chdir(self.path)
# Load seed_data
self.load("seed_data", method="pickle")
# Analyze new results
print("{} {} state: Analyzing results".format(self.type, self.iteration))
summary, new_seed_data = self.analyzer.analyze(
new_experimental_results, self.seed_data
)
# Augment summary and seed
self.history = self.history.append(summary)
self.history = self.history.reset_index(drop=True)
self.save("history", method="pickle")
self.seed_data = new_seed_data
self.save("seed_data", method="pickle")
# Remove candidates from candidate space
candidate_space = self.candidate_data.index.difference(
self.consumed_candidates, sort=False
).tolist()
self.candidate_data = self.candidate_data.loc[candidate_space]
if len(self.candidate_data) == 0:
print("Candidate data exhausted. Stopping loop.")
return False
# Campaign stopper if no discoveries in last few cycles.
if self.iteration > self.heuristic_stopper:
new_discoveries = self.history["new_discovery"][-3:].values.sum()
if new_discoveries == 0:
self.finalize()
print("Not enough new discoveries. Stopping the loop.")
return False
# Campaign stopper if finalization is desired but will be done
# outside of run (e.g. auto_loop)
if finalize:
return False
# Agent suggests new experiments
print(
"{} {} state: Agent {} hypothesizing".format(
self.type, self.iteration, self.agent.__class__.__name__
)
)
suggested_experiments = self.agent.get_hypotheses(
self.candidate_data, self.seed_data
)
# Campaign stopper if agent doesn't have anything to suggest.
if len(suggested_experiments) == 0:
self.finalize()
print("No agent suggestions. Stopping the loop.")
return False
# Experiments submitted
print("{} {} state: Running experiments".format(self.type, self.iteration))
self.job_status = self.experiment.submit(suggested_experiments)
self.save("job_status")
self.save("experiment", method="pickle")
self.consumed_candidates += suggested_experiments.index.values.tolist()
self.save("consumed_candidates")
self.iteration += 1
self.save("iteration")
return True
def auto_loop(self, n_iterations=10, monitor=False,
initialize=False, save_iterations=False):
"""
Runs the loop repeatedly, and locally. Contains
option for backing up the loop in enumerated
subdirectories for each iteration.
Args:
n_iterations (int): Number of iterations.
monitor (bool): Use Experiment's monitor method to
keep track of requested experiments.
initialize (bool): whether to initialize the loop
before starting
save_iterations (bool): whether or not to save
iterations in subdirectories of the working
directory
"""
if initialize:
self.initialize()
if save_iterations:
loop_backup(self.path, "-1")
while n_iterations - self.iteration >= 0:
print("Iteration: {}".format(self.iteration))
if not self.run():
break
print(" Waiting for next round ...")
if monitor:
self.experiment.monitor()
if save_iterations:
loop_backup(self.path, str(self.iteration - 1))
self.run(finalize=True)
self.finalize()
def initialize(self, random_state=42):
"""
Initializes a campaign. The primary goal of initialization is to ensure a proper seed exists. If create_seed
is set in Campaign, it creates the seed by deploying the RandomAgent before the user-provided agent is
deployed in the regular campaign iterations.
random_state (int): ensures reproducible results.
"""
if self.initialized:
raise ValueError("Initialization may overwrite existing loop data. Exit.")
if not self.seed_data.empty and not self.create_seed:
print(
"{} {} state: Agent {} hypothesizing".format(
self.type, "initialization", self.agent.__class__.__name__
)
)
suggested_experiments = self.agent.get_hypotheses(
self.candidate_data, self.seed_data
)
elif self.create_seed:
np.random.seed(seed=random_state)
_agent = RandomAgent(self.candidate_data, n_query=self.create_seed)
print(
"{} {} state: Agent {} hypothesizing".format(
self.type, "initialization", _agent.__class__.__name__
)
)
suggested_experiments = _agent.get_hypotheses(self.candidate_data)
else:
raise ValueError(
"No seed data available. Either supply or ask for creation."
)
self.analyzer._initial_seed_indices = self.seed_data.index.tolist()
print("{} {} state: Running experiments".format(self.type, self.iteration))
self.job_status = self.experiment.submit(suggested_experiments)
self.consumed_candidates = suggested_experiments.index.values.tolist()
self.create_seed = False
self.initialized = True
self.save("job_status")
self.save("seed_data", method="pickle")
self.save("experiment", method="pickle")
self.save("consumed_candidates")
self.save("iteration")
if self.s3_prefix:
self.s3_sync()
@property
def type(self):
"""
Convenience property for campaign type that
gets the class name, mostly for logging
Returns:
(str): class name
"""
return self.__class__.__name__
def finalize(self):
"""
Run finalization method for campaign
if analyzer has finalize method
Returns:
None
"""
print("Finalizing campaign.")
os.chdir(self.path)
if hasattr(self.analyzer, "finalize"):
self.analyzer.finalize(self.path)
if self.s3_prefix:
self.s3_sync()
def load(self, data_holder, method="json", no_exist_fail=True):
"""
Method to load stored object attributes
Args:
data_holder (str): attribute to be loaded
method (str): method by which to load object,
'pickle' and 'json' are currently supported
no_exist_fail (bool): whether to throw error
on non-existence of data
Returns:
None
"""
if method == "pickle":
m = pickle
mode = "rb"
elif method == "json":
m = json
mode = "r"
else:
raise ValueError("Unknown data save method")
file_name = os.path.join(self.path, data_holder + "." + method)
exists = os.path.exists(file_name)
if exists:
with open(file_name, mode) as f:
self.__setattr__(data_holder, m.load(f))
else:
if no_exist_fail:
raise IOError("No {} file exists".format(data_holder))
else:
self.__setattr__(data_holder, None)
def save(self, data_holder, custom_name=None, method="json"):
"""
Save method for storing campaign data
Args:
data_holder (str): attribute to be written to file
custom_name (str): custom filename if desired
method (str): method option for data storage,
'json' or 'pickle' are supported
Returns:
None
"""
if custom_name:
_path = os.path.join(self.path, custom_name)
else:
_path = os.path.join(self.path, data_holder + "." + method)
if method == "pickle":
m = pickle
mode = "wb"
elif method == "json":
m = json
mode = "w"
else:
raise ValueError("Unknown data save method")
with open(_path, mode) as f:
m.dump(self.__getattribute__(data_holder), f)
if self.s3_prefix:
self.s3_sync()
def s3_sync(self):
"""
Syncs current run to s3_prefix and bucket
"""
s3_sync(self.s3_bucket, self.s3_prefix, self.path)
def loop_backup(source_dir, target_dir):
"""
Helper method to backup finished loop iterations.
Args:
source_dir (str, Path): directory to be backed up
target_dir (str, Path): directory to back up to
Returns:
(None)
"""
os.mkdir(os.path.join(source_dir, target_dir))
_files = os.listdir(source_dir)
for file_name in _files:
full_file_name = os.path.join(source_dir, file_name)
if os.path.isfile(full_file_name):
shutil.copy(full_file_name, target_dir)
| [
"pandas.DataFrame",
"numpy.random.seed",
"os.getcwd",
"camd.agent.base.RandomAgent",
"os.path.exists",
"camd.utils.data.s3_sync",
"os.path.isfile",
"os.chdir",
"os.path.join",
"os.listdir",
"shutil.copy"
] | [((14269, 14291), 'os.listdir', 'os.listdir', (['source_dir'], {}), '(source_dir)\n', (14279, 14291), False, 'import os\n'), ((3069, 3083), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3081, 3083), True, 'import pandas as pd\n'), ((3415, 3434), 'os.chdir', 'os.chdir', (['self.path'], {}), '(self.path)\n', (3423, 3434), False, 'import os\n'), ((5157, 5176), 'os.chdir', 'os.chdir', (['self.path'], {}), '(self.path)\n', (5165, 5176), False, 'import os\n'), ((11547, 11566), 'os.chdir', 'os.chdir', (['self.path'], {}), '(self.path)\n', (11555, 11566), False, 'import os\n'), ((12421, 12472), 'os.path.join', 'os.path.join', (['self.path', "(data_holder + '.' + method)"], {}), "(self.path, data_holder + '.' + method)\n", (12433, 12472), False, 'import os\n'), ((12490, 12515), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (12504, 12515), False, 'import os\n'), ((13886, 13936), 'camd.utils.data.s3_sync', 's3_sync', (['self.s3_bucket', 'self.s3_prefix', 'self.path'], {}), '(self.s3_bucket, self.s3_prefix, self.path)\n', (13893, 13936), False, 'from camd.utils.data import s3_sync\n'), ((14218, 14254), 'os.path.join', 'os.path.join', (['source_dir', 'target_dir'], {}), '(source_dir, target_dir)\n', (14230, 14254), False, 'import os\n'), ((14346, 14381), 'os.path.join', 'os.path.join', (['source_dir', 'file_name'], {}), '(source_dir, file_name)\n', (14358, 14381), False, 'import os\n'), ((14393, 14423), 'os.path.isfile', 'os.path.isfile', (['full_file_name'], {}), '(full_file_name)\n', (14407, 14423), False, 'import os\n'), ((2992, 3006), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3004, 3006), True, 'import pandas as pd\n'), ((3395, 3406), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3404, 3406), False, 'import os\n'), ((3575, 3616), 'os.path.join', 'os.path.join', (['self.path', '"""iteration.json"""'], {}), "(self.path, 'iteration.json')\n", (3587, 3616), False, 'import os\n'), ((13283, 13319), 'os.path.join', 'os.path.join', (['self.path', 'custom_name'], {}), '(self.path, custom_name)\n', (13295, 13319), False, 'import os\n'), ((13354, 13405), 'os.path.join', 'os.path.join', (['self.path', "(data_holder + '.' + method)"], {}), "(self.path, data_holder + '.' + method)\n", (13366, 13405), False, 'import os\n'), ((14437, 14476), 'shutil.copy', 'shutil.copy', (['full_file_name', 'target_dir'], {}), '(full_file_name, target_dir)\n', (14448, 14476), False, 'import shutil\n'), ((9935, 9968), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'random_state'}), '(seed=random_state)\n', (9949, 9968), True, 'import numpy as np\n'), ((9990, 10048), 'camd.agent.base.RandomAgent', 'RandomAgent', (['self.candidate_data'], {'n_query': 'self.create_seed'}), '(self.candidate_data, n_query=self.create_seed)\n', (10001, 10048), False, 'from camd.agent.base import RandomAgent\n')] |
import numpy as np
import matplotlib.pyplot as plt
import itertools
from sklearn import metrics
import pandas as pd
import statsmodels.api as sm
from corrplots import partialcorr
from functools import partial
from scipy import stats
import cycluster as cy
__all__ = ['compareClusters',
'alignClusters',
'crossCompartmentCorr',
'pwdistComp',
'pwdistCompXY',
'pwdistCompCI',
'moduleCorrRatio']
def compareClusters(labelsA, labelsB, method='ARI', alignFirst=True, useCommon=False):
"""Requre that labelsA and labelsB have the same index"""
if useCommon:
labelsA, labelsB = labelsA.align(labelsB, join='inner')
assert len(labelsA.index) == len(labelsB.index)
assert (labelsA.index == labelsB.index).sum() == len(labelsA.index)
uLabels = np.unique(labelsA)
assert (uLabels == np.unique(labelsB)).sum() == uLabels.shape[0]
if alignFirst:
alignedB = alignClusters(labelsA, labelsB)
else:
alignedB = labelsB
if method == 'ARI':
s = metrics.adjusted_rand_score(labelsA.values, alignedB.values)
elif method == 'AMI':
s = metrics.adjusted_mutual_info_score(labelsA.values, alignedB.values)
elif method == 'overlap':
s = np.zeros(uLabels.shape[0])
for labi, lab in enumerate(uLabels):
membersA = labelsA.index[labelsA == lab]
membersB = alignedB.index[alignedB == lab]
accA = np.sum([1 for cy in membersA if cy in membersB]) / len(membersA)
accB = np.sum([1 for cy in membersB if cy in membersA]) / len(membersB)
s[labi] = (accA + accB) / 2
return s
def _alignClusterMats(matA, matB):
"""Returns a copy of matB with columns shuffled to maximize overlap with matA
matX is a representation of cluster labels using a sparse\binary np.ndarray
with labels along the columns"""
out = matB.copy()
nCols = matA.shape[1]
swaps = {}
for colA in range(nCols):
match = np.argmax([(matA[:, colA] * matB[:, colB]).sum() for colB in range(nCols)])
swaps.update({match:colA})
if len(swaps) == nCols:
"""Easy 1:1 matching"""
for colB, colA in list(swaps.items()):
out[:, colA] = matB[:, colB]
"""In case the clusters aren't clearly 1:1 then try extra swaps until the optimum is found"""
niters = 0
while True:
swaps = []
curProd = (matA * out).sum()
for ai, bi in itertools.product(list(range(nCols)), list(range(nCols))):
ind = np.arange(nCols)
ind[ai] = bi
ind[bi] = ai
newProd = (matA * out[:, ind]).sum()
if curProd < newProd:
swaps.append((ai, bi, newProd))
if len(swaps) == 0:
break
else:
ai, bi, newProd = swaps[np.argmax([x[2] for x in swaps])]
ind = np.arange(nCols)
ind[ai] = bi
ind[bi] = ai
out = out[:, ind]
return out
def _alignSparseDf(dfA, dfB):
out = _alignClusterMats(dfA.values, dfB.values)
return pd.DataFrame(out, index = dfB.index, columns = dfB.columns)
def _labels2sparseDf(labels):
labelCols = np.unique(labels)
clusterMat = np.zeros((labels.shape[0], labelCols.shape[0]), dtype = np.int32)
for labi, lab in enumerate(labelCols):
ind = (labels==lab).values
clusterMat[ind, labi] = 1
return pd.DataFrame(clusterMat, index = labels.index, columns = labelCols)
def _sparseDf2labels(sparseDf):
labels = pd.Series(np.zeros(sparseDf.shape[0]), index = sparseDf.index)
for clusterCol in sparseDf.columns:
labels[sparseDf[clusterCol].astype(bool)] = clusterCol
return labels.astype(np.int32)
def alignClusters(labelsA, labelsB):
"""Returns a copy of labelsB with renamed labels shuffled to maximize overlap with matA"""
sparseA = _labels2sparseDf(labelsA)
sparseB = _labels2sparseDf(labelsB)
outLabelsB = _sparseDf2labels(_alignSparseDf(sparseA, sparseB))
return outLabelsB
def crossCompartmentCorr(dfA, dfB, method='pearson'):
"""Cytokine correlation for those that are common to both A and B"""
cyList = np.array([cy for cy in dfA.columns if cy in dfB.columns])
joinedDf = pd.merge(dfA[cyList], dfB[cyList], suffixes=('_A', '_B'), left_index=True, right_index=True)
tmpCorr = np.zeros((len(cyList), 3))
for i, cy in enumerate(cyList):
tmp = joinedDf[[cy + '_A', cy + '_B']].dropna()
tmpCorr[i, :2] = partialcorr(tmp[cy + '_A'], tmp[cy + '_B'], method=method)
sorti = np.argsort(tmpCorr[:, 0])
tmpCorr = tmpCorr[sorti,:]
_, tmpCorr[:, 2], _, _ = sm.stats.multipletests(tmpCorr[:, 1], method='fdr_bh')
return pd.DataFrame(tmpCorr, index=cyList[sorti], columns=['rho', 'pvalue', 'qvalue'])
def pwdistCompXY(dmatA, dmatB):
"""Return unraveled upper triangles of the two distance matrices
using only common columns.
Parameters
----------
dmatA, dmatB : pd.DataFrame [nfeatures x nfeatures]
Symetric pairwise distance matrices for comparison.
Only common columns will be used for comparison (at least 3).
Returns
-------
vecA, vecN : pd.Series"""
cyVars = [c for c in dmatA.columns if c in dmatB.columns.tolist()]
n = len(cyVars)
vecA = dmatA[cyVars].loc[cyVars].values[np.triu_indices(n, k=1)]
vecB = dmatB[cyVars].loc[cyVars].values[np.triu_indices(n, k=1)]
return vecA, vecB
def pwdistComp(dmatA, dmatB, method='spearman', nperms=10000, returnPermutations=False):
"""Compare two pairwise distance matrices
using a permutation test. Test the null-hypothesis that
the pairwise distance matrices are uncorrelated.
Note: comparison is based only on shared columns
Parameters
----------
dmatA, dmatB : pd.DataFrame [nfeatures x nfeatures]
Symetric pairwise distance matrices for comparison.
Only common columns will be used for comparison (at least 3).
method : str
Method for comparison: "pearson", "spearman"
nperms : int
Number of permutations to compute p-value
Returns
-------
stat : float
Correlation statistic, rho, of all pairwise distances between cytokines.
pvalue : float
Two-sided pvalue testing the null hypothesis that
the distance matrices of dfA and dfB are uncorrelated
commonVars : list
List of the common columns in A and B"""
def corrComp(dmatA, dmatB, method):
n = dmatB.shape[0]
if method == 'pearson':
rho, p = stats.pearsonr(dmatA[np.triu_indices(n, k=1)], dmatB[np.triu_indices(n, k=1)])
elif method == 'spearman':
rho, p = stats.spearmanr(dmatA[np.triu_indices(n, k=1)], dmatB[np.triu_indices(n, k=1)])
else:
raise ValueError('Must specify method as "pearson" or "spearman"')
return rho
cyVars = [c for c in dmatA.columns if c in dmatB.columns.tolist()]
ncols = len(cyVars)
compFunc = partial(corrComp, method=method)
dA = dmatA[cyVars].loc[cyVars].values
dB = dmatB[cyVars].loc[cyVars].values
stat = compFunc(dA, dB)
permstats = np.zeros(nperms)
for i in range(nperms):
"""Permutation of common columns"""
rindA = np.random.permutation(ncols)
rindB = np.random.permutation(ncols)
permstats[i] = compFunc(dA[rindA,:][:, rindA], dB[rindB,:][:, rindB])
pvalue = ((np.abs(permstats) > np.abs(stat)).sum() + 1)/(nperms + 1)
out = (stat, pvalue, cyVars)
if returnPermutations:
return out + (permstats,)
else:
return out
def pwdistCompCI(dfA, dfB, dmatFunc=None, alpha=0.05, method='spearman', nstraps=10000, returnBootstraps=False):
"""Compare two pairwise distance matrices
and compute bootstrap confidence intervals.
Note: comparison is based only on shared columns
Parameters
----------
dfA, dfA : pd.DataFrame [nfeatures x nfeatures]
Symetric pairwise distance matrices for comparison.
Only common columns will be used for comparison (at least 3).
method : str
Method for comparison: "pearson", "spearman"
nstraps : int
Number of bootstraps used to compute confidence interval.
Returns
-------
lb : float
Lower bound of the confidence interval covering (1 - alpha)%
stat : float
Correlation statistic, rho, of all pairwise distances between cytokines.
ub : float
Upper bound of the confidence interval covering (1 - alpha)%"""
def corrComp(dmatA, dmatB, method):
n = dmatB.shape[0]
if method == 'pearson':
rho, p = stats.pearsonr(dmatA[np.triu_indices(n, k=1)], dmatB[np.triu_indices(n, k=1)])
elif method == 'spearman':
rho, p = stats.spearmanr(dmatA[np.triu_indices(n, k=1)], dmatB[np.triu_indices(n, k=1)])
else:
raise ValueError('Must specify method as "pearson" or "spearman"')
return rho
cyVars = [c for c in dfA.columns if c in dfB.columns.tolist()]
ncols = len(cyVars)
compFunc = partial(corrComp, method=method)
dA = dfA[cyVars]
dB = dfB[cyVars]
strapped = np.zeros(nstraps)
for i in range(nstraps):
if dmatFunc is None:
tmpA = dA.sample(frac=1, replace=True, axis=0).corr()
tmpB = dB.sample(frac=1, replace=True, axis=0).corr()
else:
tmpA = dmatFunc(dA.sample(frac=1, replace=True, axis=0))
tmpB = dmatFunc(dB.sample(frac=1, replace=True, axis=0))
strapped[i] = compFunc(tmpA.values, tmpB.values)
out = tuple(np.percentile(strapped, [100*alpha/2, 50, 100*(1-alpha/2)]))
if returnBootstraps:
out += (strapped,)
return out
def moduleCorrRatio(cyDf, labels, cyVars=None, alpha=0.05, nstraps=10000):
"""Compute all pairwise intra- and inter-module cytokine correlation
coefficients with their IQRs.
Additionally compute the intra : inter ratio with 95% CI, where the
ratio is of signed-pearson correlation coefficients transformed to
the [0,1] interval with 0 meaning perfect anti-correlation
and 1 meaning perfect correlation
For ratio, uses a signed Pearson correlation coefficient since this is what is used
for clustering. The disadvantage is that it can't be described as fractional
variance, while the upside is that it captures the potential problem with
forming modules of anti-correlated cytokines.
Parameters
----------
cyDf : pd.DataFrame [n_participants x n_cytokines]
Raw or normalized analyte log-concentrations.
labels : pd.Series
Module labels for each analyte
Returns
-------
intra : np.ndarray shape (3,)
Vector containing 25th, 50th and 75th quantiles of all cytokine pairs within the same module.
inter : np.ndarray shape (3,)
Vector containing 25th, 50th and 75th quantiles of all cytokine pairs from different modules.
ratio : np.ndarray shape (3,)
Vector containing the intra : inter correlation ratio with bootstrap 95% CI or (1 - alpha)%
[LB, ratio, UB]"""
def ratioFunc(cyDf, intraMask, interMask):
"""smat is on the [0, 1] interval with 0 meaning perfect anti-correlation and 1 meaning perfect correlation"""
smat = 1 - cy.corrDmatFunc(cyDf, metric='pearson-signed').values
return np.nanmean((smat * intraMask).ravel()) / np.nanmean((smat * interMask).ravel())
if cyVars is None:
cyVars = cyDf.columns.tolist()
"""corrmat is on the [-1, 1] interval with 1 meaning perfect correlation and -1 meaning perfect anti-correlation"""
corrmat = cyDf[cyVars].corr()
intra = []
inter = []
intraMask = np.nan * np.zeros(corrmat.shape)
interMask = np.nan * np.zeros(corrmat.shape)
for a, b in itertools.combinations(cyVars, 2):
if not a == b:
s = corrmat.loc[a, b]
i, j = cyVars.index(a), cyVars.index(b)
if labels[a] == labels[b]:
intra.append(s)
intraMask[i, j] = 1.
else:
inter.append(s)
interMask[i, j] = 1.
intra = np.percentile(intra, q=[25, 50, 75])
inter = np.percentile(inter, q=[25, 50, 75])
if nstraps is None or nstraps == 0:
return intra, inter
else:
rratios = np.zeros(nstraps)
for strapi in range(nstraps):
rratios[strapi] = ratioFunc(cyDf[cyVars].sample(frac=1, replace=True, axis=0), intraMask, interMask)
ratio = np.percentile(rratios, [100*alpha/2, 50, 100*(1-alpha/2)])
return intra, inter, ratio | [
"numpy.sum",
"numpy.abs",
"numpy.argmax",
"sklearn.metrics.adjusted_mutual_info_score",
"numpy.argsort",
"numpy.arange",
"sklearn.metrics.adjusted_rand_score",
"corrplots.partialcorr",
"statsmodels.api.stats.multipletests",
"numpy.unique",
"pandas.DataFrame",
"pandas.merge",
"functools.parti... | [((831, 849), 'numpy.unique', 'np.unique', (['labelsA'], {}), '(labelsA)\n', (840, 849), True, 'import numpy as np\n'), ((3121, 3176), 'pandas.DataFrame', 'pd.DataFrame', (['out'], {'index': 'dfB.index', 'columns': 'dfB.columns'}), '(out, index=dfB.index, columns=dfB.columns)\n', (3133, 3176), True, 'import pandas as pd\n'), ((3228, 3245), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (3237, 3245), True, 'import numpy as np\n'), ((3263, 3326), 'numpy.zeros', 'np.zeros', (['(labels.shape[0], labelCols.shape[0])'], {'dtype': 'np.int32'}), '((labels.shape[0], labelCols.shape[0]), dtype=np.int32)\n', (3271, 3326), True, 'import numpy as np\n'), ((3452, 3515), 'pandas.DataFrame', 'pd.DataFrame', (['clusterMat'], {'index': 'labels.index', 'columns': 'labelCols'}), '(clusterMat, index=labels.index, columns=labelCols)\n', (3464, 3515), True, 'import pandas as pd\n'), ((4211, 4268), 'numpy.array', 'np.array', (['[cy for cy in dfA.columns if cy in dfB.columns]'], {}), '([cy for cy in dfA.columns if cy in dfB.columns])\n', (4219, 4268), True, 'import numpy as np\n'), ((4284, 4380), 'pandas.merge', 'pd.merge', (['dfA[cyList]', 'dfB[cyList]'], {'suffixes': "('_A', '_B')", 'left_index': '(True)', 'right_index': '(True)'}), "(dfA[cyList], dfB[cyList], suffixes=('_A', '_B'), left_index=True,\n right_index=True)\n", (4292, 4380), True, 'import pandas as pd\n'), ((4606, 4631), 'numpy.argsort', 'np.argsort', (['tmpCorr[:, 0]'], {}), '(tmpCorr[:, 0])\n', (4616, 4631), True, 'import numpy as np\n'), ((4692, 4746), 'statsmodels.api.stats.multipletests', 'sm.stats.multipletests', (['tmpCorr[:, 1]'], {'method': '"""fdr_bh"""'}), "(tmpCorr[:, 1], method='fdr_bh')\n", (4714, 4746), True, 'import statsmodels.api as sm\n'), ((4758, 4837), 'pandas.DataFrame', 'pd.DataFrame', (['tmpCorr'], {'index': 'cyList[sorti]', 'columns': "['rho', 'pvalue', 'qvalue']"}), "(tmpCorr, index=cyList[sorti], columns=['rho', 'pvalue', 'qvalue'])\n", (4770, 4837), True, 'import pandas as pd\n'), ((7053, 7085), 'functools.partial', 'partial', (['corrComp'], {'method': 'method'}), '(corrComp, method=method)\n', (7060, 7085), False, 'from functools import partial\n'), ((7220, 7236), 'numpy.zeros', 'np.zeros', (['nperms'], {}), '(nperms)\n', (7228, 7236), True, 'import numpy as np\n'), ((9157, 9189), 'functools.partial', 'partial', (['corrComp'], {'method': 'method'}), '(corrComp, method=method)\n', (9164, 9189), False, 'from functools import partial\n'), ((9253, 9270), 'numpy.zeros', 'np.zeros', (['nstraps'], {}), '(nstraps)\n', (9261, 9270), True, 'import numpy as np\n'), ((11907, 11940), 'itertools.combinations', 'itertools.combinations', (['cyVars', '(2)'], {}), '(cyVars, 2)\n', (11929, 11940), False, 'import itertools\n'), ((12259, 12295), 'numpy.percentile', 'np.percentile', (['intra'], {'q': '[25, 50, 75]'}), '(intra, q=[25, 50, 75])\n', (12272, 12295), True, 'import numpy as np\n'), ((12308, 12344), 'numpy.percentile', 'np.percentile', (['inter'], {'q': '[25, 50, 75]'}), '(inter, q=[25, 50, 75])\n', (12321, 12344), True, 'import numpy as np\n'), ((1064, 1124), 'sklearn.metrics.adjusted_rand_score', 'metrics.adjusted_rand_score', (['labelsA.values', 'alignedB.values'], {}), '(labelsA.values, alignedB.values)\n', (1091, 1124), False, 'from sklearn import metrics\n'), ((3576, 3603), 'numpy.zeros', 'np.zeros', (['sparseDf.shape[0]'], {}), '(sparseDf.shape[0])\n', (3584, 3603), True, 'import numpy as np\n'), ((4535, 4593), 'corrplots.partialcorr', 'partialcorr', (["tmp[cy + '_A']", "tmp[cy + '_B']"], {'method': 'method'}), "(tmp[cy + '_A'], tmp[cy + '_B'], method=method)\n", (4546, 4593), False, 'from corrplots import partialcorr\n'), ((5379, 5402), 'numpy.triu_indices', 'np.triu_indices', (['n'], {'k': '(1)'}), '(n, k=1)\n', (5394, 5402), True, 'import numpy as np\n'), ((5448, 5471), 'numpy.triu_indices', 'np.triu_indices', (['n'], {'k': '(1)'}), '(n, k=1)\n', (5463, 5471), True, 'import numpy as np\n'), ((7325, 7353), 'numpy.random.permutation', 'np.random.permutation', (['ncols'], {}), '(ncols)\n', (7346, 7353), True, 'import numpy as np\n'), ((7370, 7398), 'numpy.random.permutation', 'np.random.permutation', (['ncols'], {}), '(ncols)\n', (7391, 7398), True, 'import numpy as np\n'), ((9691, 9760), 'numpy.percentile', 'np.percentile', (['strapped', '[100 * alpha / 2, 50, 100 * (1 - alpha / 2)]'], {}), '(strapped, [100 * alpha / 2, 50, 100 * (1 - alpha / 2)])\n', (9704, 9760), True, 'import numpy as np\n'), ((11818, 11841), 'numpy.zeros', 'np.zeros', (['corrmat.shape'], {}), '(corrmat.shape)\n', (11826, 11841), True, 'import numpy as np\n'), ((11867, 11890), 'numpy.zeros', 'np.zeros', (['corrmat.shape'], {}), '(corrmat.shape)\n', (11875, 11890), True, 'import numpy as np\n'), ((12447, 12464), 'numpy.zeros', 'np.zeros', (['nstraps'], {}), '(nstraps)\n', (12455, 12464), True, 'import numpy as np\n'), ((12632, 12700), 'numpy.percentile', 'np.percentile', (['rratios', '[100 * alpha / 2, 50, 100 * (1 - alpha / 2)]'], {}), '(rratios, [100 * alpha / 2, 50, 100 * (1 - alpha / 2)])\n', (12645, 12700), True, 'import numpy as np\n'), ((1163, 1230), 'sklearn.metrics.adjusted_mutual_info_score', 'metrics.adjusted_mutual_info_score', (['labelsA.values', 'alignedB.values'], {}), '(labelsA.values, alignedB.values)\n', (1197, 1230), False, 'from sklearn import metrics\n'), ((2569, 2585), 'numpy.arange', 'np.arange', (['nCols'], {}), '(nCols)\n', (2578, 2585), True, 'import numpy as np\n'), ((2915, 2931), 'numpy.arange', 'np.arange', (['nCols'], {}), '(nCols)\n', (2924, 2931), True, 'import numpy as np\n'), ((1273, 1299), 'numpy.zeros', 'np.zeros', (['uLabels.shape[0]'], {}), '(uLabels.shape[0])\n', (1281, 1299), True, 'import numpy as np\n'), ((2863, 2895), 'numpy.argmax', 'np.argmax', (['[x[2] for x in swaps]'], {}), '([x[2] for x in swaps])\n', (2872, 2895), True, 'import numpy as np\n'), ((11395, 11441), 'cycluster.corrDmatFunc', 'cy.corrDmatFunc', (['cyDf'], {'metric': '"""pearson-signed"""'}), "(cyDf, metric='pearson-signed')\n", (11410, 11441), True, 'import cycluster as cy\n'), ((873, 891), 'numpy.unique', 'np.unique', (['labelsB'], {}), '(labelsB)\n', (882, 891), True, 'import numpy as np\n'), ((6631, 6654), 'numpy.triu_indices', 'np.triu_indices', (['n'], {'k': '(1)'}), '(n, k=1)\n', (6646, 6654), True, 'import numpy as np\n'), ((6663, 6686), 'numpy.triu_indices', 'np.triu_indices', (['n'], {'k': '(1)'}), '(n, k=1)\n', (6678, 6686), True, 'import numpy as np\n'), ((8743, 8766), 'numpy.triu_indices', 'np.triu_indices', (['n'], {'k': '(1)'}), '(n, k=1)\n', (8758, 8766), True, 'import numpy as np\n'), ((8775, 8798), 'numpy.triu_indices', 'np.triu_indices', (['n'], {'k': '(1)'}), '(n, k=1)\n', (8790, 8798), True, 'import numpy as np\n'), ((1472, 1522), 'numpy.sum', 'np.sum', (['[(1) for cy in membersA if cy in membersB]'], {}), '([(1) for cy in membersA if cy in membersB])\n', (1478, 1522), True, 'import numpy as np\n'), ((1556, 1606), 'numpy.sum', 'np.sum', (['[(1) for cy in membersB if cy in membersA]'], {}), '([(1) for cy in membersB if cy in membersA])\n', (1562, 1606), True, 'import numpy as np\n'), ((6767, 6790), 'numpy.triu_indices', 'np.triu_indices', (['n'], {'k': '(1)'}), '(n, k=1)\n', (6782, 6790), True, 'import numpy as np\n'), ((6799, 6822), 'numpy.triu_indices', 'np.triu_indices', (['n'], {'k': '(1)'}), '(n, k=1)\n', (6814, 6822), True, 'import numpy as np\n'), ((7492, 7509), 'numpy.abs', 'np.abs', (['permstats'], {}), '(permstats)\n', (7498, 7509), True, 'import numpy as np\n'), ((7512, 7524), 'numpy.abs', 'np.abs', (['stat'], {}), '(stat)\n', (7518, 7524), True, 'import numpy as np\n'), ((8879, 8902), 'numpy.triu_indices', 'np.triu_indices', (['n'], {'k': '(1)'}), '(n, k=1)\n', (8894, 8902), True, 'import numpy as np\n'), ((8911, 8934), 'numpy.triu_indices', 'np.triu_indices', (['n'], {'k': '(1)'}), '(n, k=1)\n', (8926, 8934), True, 'import numpy as np\n')] |
import struct
import uuid
import asyncio
import numpy as np
from bleak import BleakClient
from bleak import discover
# from bleak import _logger as logger
class TindeqProgressor(object):
response_codes = {
'cmd_resp': 0, 'weight_measure': 1, 'low_pwr': 4
}
cmds = dict(
TARE_SCALE=0x64,
START_WEIGHT_MEAS=0x65,
STOP_WEIGHT_MEAS=0x66,
START_PEAK_RFD_MEAS=0x67,
START_PEAK_RFD_MEAS_SERIES=0x68,
ADD_CALIB_POINT=0x69,
SAVE_CALIB=0x6a,
GET_APP_VERSION=0x6b,
GET_ERR_INFO=0x6c,
CLR_ERR_INFO=0x6d,
SLEEP=0x6e,
GET_BATT_VLTG=0x6f
)
service_uuid = '7e4e1701-1ea6-40c9-9dcc-13d34ffead57'
write_uuid = '7e4e1703-1ea6-40c9-9dcc-13d34ffead57'
notify_uuid = '7e4e1702-1ea6-40c9-9dcc-13d34ffead57'
def __init__(self, parent):
"""
Uses Bluetooth 4 (LE) to communicate with Tindeq Progressor
Send bytes to write UUID to control the device. Current weight or
rate of force is reported on the notify UUID, so use callbacks to do
something when you receive info on this UUID.
Use as a context manager:
>>> aysnc with TindeqProgressor(parent) as tindeq:
>>> await tindeq.get_batt()
Parameters
----------
parent: object
An owning class that implements callbacks specifying
what to do when receiving weight notifications
"""
self.parent = parent
self.info_struct = struct.Struct('<bb')
self.data_struct = struct.Struct('<fl')
self._tare_value = 0.0
async def __aenter__(self):
await self.connect()
return self
async def __aexit__(self, *excinfo):
await self.disconnect()
def _notify_handler(self, sender, data):
"""
Simply pass on payload to correct handler
"""
data = bytes(data)
kind, size = self.info_struct.unpack(data[:2])
if kind == self.response_codes['weight_measure']:
# decode data
for weight, useconds in self.data_struct.iter_unpack(data[2:]):
now = useconds/1.0e6
self.parent.log_force_sample(now, weight - self._tare_value)
elif kind == self.response_codes['cmd_resp']:
self._cmd_response(data)
elif kind == self.response_codes['low_pwr']:
print('low power warning')
else:
raise RuntimeError(f'unknown msg kind {kind}')
def _cmd_response(self, value):
if self.last_cmd == 'get_app':
print(f"FW version : {value[2:].decode('utf-8')}")
elif self.last_cmd == 'get_batt':
vdd, = struct.unpack("<I", value[2:])
print(f"Battery level = {vdd} [mV]")
elif self.last_cmd == 'get_err':
try:
print("Crashlog : {0}".format(value[2:].decode("utf-8")))
except UnicodeDecodeError:
pass
self.last_cmd = None
async def disconnect(self):
await self._send_cmd('SLEEP')
await self.client.disconnect()
self.client = None
async def connect(self):
print('Searching for progressor...')
devices = await discover()
TARGET_NAME = 'Progressor'
address = None
for d in devices:
if d.name[:len(TARGET_NAME)] == TARGET_NAME:
address = d.address
print(
"Found \"{0}\" with address {1}".format(d.name, d.address)
)
break
if address is None:
raise RuntimeError('cannot find tindeq')
self.client = BleakClient(address)
await self.client.connect()
success = await self.client.is_connected()
if success:
await self.client.start_notify(
uuid.UUID(self.notify_uuid),
self._notify_handler
)
else:
raise RuntimeError('could not connect to progressor')
return success
def _pack(self, cmd):
return cmd.to_bytes(2, byteorder='little')
async def _send_cmd(self, cmd_key):
if not hasattr(self, 'client') or self.client is None:
return
await self.client.write_gatt_char(
uuid.UUID(self.write_uuid),
self._pack(self.cmds[cmd_key])
)
async def get_batt(self):
self.last_cmd = 'get_batt'
await self._send_cmd('GET_BATT_VLTG')
async def get_fw_info(self):
self.last_cmd = 'get_app'
await self._send_cmd('GET_APP_VERSION')
async def get_err(self):
self.last_cmd = 'get_err'
await self._send_cmd('GET_ERR_INFO')
async def clear_err(self):
self.last_cmd = None
await self._send_cmd('CLR_ERR_INFO')
async def start_logging_weight(self):
self.last_cmd = None
await self._send_cmd('START_WEIGHT_MEAS')
async def stop_logging_weight(self):
self.last_cmd = None
await self._send_cmd('STOP_WEIGHT_MEAS')
async def sleep(self):
self.last_cmd = None
await self._send_cmd('SLEEP')
async def soft_tare(self):
_saved_parent = self.parent
self.parent = SampleAverage()
await self.start_logging_weight()
await asyncio.sleep(1)
await self.stop_logging_weight()
self._tare_value = self.parent.mean
self.parent = _saved_parent
class SampleAverage:
def __init__(self):
self.weights = []
def log_force_sample(self, time, weight):
self.weights.append(weight)
@property
def mean(self):
return np.mean(self.weights)
async def example():
class Wrapper:
def log_force_sample(self, time, weight):
print(f'{time}: {weight}')
wrap = Wrapper()
async with TindeqProgressor(wrap) as tindeq:
await tindeq.get_batt()
await asyncio.sleep(0.5)
await tindeq.get_fw_info()
await asyncio.sleep(0.5)
await tindeq.get_err()
await asyncio.sleep(0.5)
await tindeq.clear_err()
await asyncio.sleep(0.5)
await tindeq.soft_tare()
await asyncio.sleep(1)
await tindeq.start_logging_weight()
await asyncio.sleep(3)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(example()) | [
"struct.Struct",
"asyncio.get_event_loop",
"asyncio.sleep",
"struct.unpack",
"numpy.mean",
"uuid.UUID",
"bleak.BleakClient",
"bleak.discover"
] | [((6359, 6383), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (6381, 6383), False, 'import asyncio\n'), ((1536, 1556), 'struct.Struct', 'struct.Struct', (['"""<bb"""'], {}), "('<bb')\n", (1549, 1556), False, 'import struct\n'), ((1584, 1604), 'struct.Struct', 'struct.Struct', (['"""<fl"""'], {}), "('<fl')\n", (1597, 1604), False, 'import struct\n'), ((3696, 3716), 'bleak.BleakClient', 'BleakClient', (['address'], {}), '(address)\n', (3707, 3716), False, 'from bleak import BleakClient\n'), ((5691, 5712), 'numpy.mean', 'np.mean', (['self.weights'], {}), '(self.weights)\n', (5698, 5712), True, 'import numpy as np\n'), ((3261, 3271), 'bleak.discover', 'discover', ([], {}), '()\n', (3269, 3271), False, 'from bleak import discover\n'), ((5347, 5363), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (5360, 5363), False, 'import asyncio\n'), ((5961, 5979), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (5974, 5979), False, 'import asyncio\n'), ((6029, 6047), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (6042, 6047), False, 'import asyncio\n'), ((6093, 6111), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (6106, 6111), False, 'import asyncio\n'), ((6159, 6177), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (6172, 6177), False, 'import asyncio\n'), ((6226, 6242), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (6239, 6242), False, 'import asyncio\n'), ((6302, 6318), 'asyncio.sleep', 'asyncio.sleep', (['(3)'], {}), '(3)\n', (6315, 6318), False, 'import asyncio\n'), ((2724, 2754), 'struct.unpack', 'struct.unpack', (['"""<I"""', 'value[2:]'], {}), "('<I', value[2:])\n", (2737, 2754), False, 'import struct\n'), ((4324, 4350), 'uuid.UUID', 'uuid.UUID', (['self.write_uuid'], {}), '(self.write_uuid)\n', (4333, 4350), False, 'import uuid\n'), ((3884, 3911), 'uuid.UUID', 'uuid.UUID', (['self.notify_uuid'], {}), '(self.notify_uuid)\n', (3893, 3911), False, 'import uuid\n')] |
import numpy as np
from datetime import datetime, timedelta
from numpy import nanmean
from get_kpap import get_kpap
def get_apmsis(dn):
"""
Function: get_apmsis(dn)
---------------------
returns an array of calculated ap indices suitable for MSIS.
MSIS requires an array of ap values, described in nrlmsise00.f.
This Python function formulates the various ap values for MSIS. From the
fortran subroutine, we see that
AP - MAGNETIC INDEX(DAILY) OR WHEN SW(9)=-1. :
- ARRAY CONTAINING:
(1) DAILY AP
(2) 3 HR AP INDEX FOR CURRENT TIME
(3) 3 HR AP INDEX FOR 3 HRS BEFORE CURRENT TIME
(4) 3 HR AP INDEX FOR 6 HRS BEFORE CURRENT TIME
(5) 3 HR AP INDEX FOR 9 HRS BEFORE CURRENT TIME
(6) AVERAGE OF EIGHT 3 HR AP INDICIES FROM 12 TO 33 HRS
PRIOR TO CURRENT TIME
(7) AVERAGE OF EIGHT 3 HR AP INDICIES FROM 36 TO 57 HRS
PRIOR TO CURRENT TIME
Inputs:
--------
dn : datetime object of the requested time
Outputs:
--------
out : a 1x7 array of the caclulated ap indices
History:
--------
7/21/12 Created, <NAME> (<EMAIL>)
"""
out = float('nan')*np.zeros(7)
# (1) DAILY AP
_, ap, _, _, _, _, daily_ap, _, _ = get_kpap(dn)
out[0] = daily_ap
# (2) 3 HR AP INDEX FOR CURRENT TIME
out[1] = ap
# (3) 3 HR AP INDEX FOR 3 HRS BEFORE CURRENT TIME
_, ap, _, _, _, _, _, _, _ = get_kpap(dn+timedelta(hours=-3))
out[2] = ap
# (4) 3 HR AP INDEX FOR 6 HRS BEFORE CURRENT TIME
_, ap, _, _, _, _, _, _, _ = get_kpap(dn+timedelta(hours=-6))
out[3] = ap
# (5) 3 HR AP INDEX FOR 9 HRS BEFORE CURRENT TIME
_, ap, _, _, _, _, _, _, _ = get_kpap(dn+timedelta(hours=-9))
out[4] = ap
# (6) AVERAGE OF EIGHT 3 HR AP INDICIES FROM 12 TO 33 HRS
# PRIOR TO CURRENT TIME
temp = np.zeros(8)
_, temp[0], _, _, _, _, _, _, _ = get_kpap(dn+timedelta(hours=-12))
_, temp[1], _, _, _, _, _, _, _ = get_kpap(dn+timedelta(hours=-15))
_, temp[2], _, _, _, _, _, _, _ = get_kpap(dn+timedelta(hours=-18))
_, temp[3], _, _, _, _, _, _, _ = get_kpap(dn+timedelta(hours=-21))
_, temp[4], _, _, _, _, _, _, _ = get_kpap(dn+timedelta(hours=-24))
_, temp[5], _, _, _, _, _, _, _ = get_kpap(dn+timedelta(hours=-27))
_, temp[6], _, _, _, _, _, _, _ = get_kpap(dn+timedelta(hours=-30))
_, temp[7], _, _, _, _, _, _, _ = get_kpap(dn+timedelta(hours=-33))
out[5] = np.nan if all(np.isnan(temp)) else np.nanmean(temp)
# (7) AVERAGE OF EIGHT 3 HR AP INDICIES FROM 36 TO 57 HRS
# PRIOR TO CURRENT TIME
temp = np.zeros(8)
_, temp[0], _, _, _, _, _, _, _ = get_kpap(dn+timedelta(hours=-36))
_, temp[1], _, _, _, _, _, _, _ = get_kpap(dn+timedelta(hours=-39))
_, temp[2], _, _, _, _, _, _, _ = get_kpap(dn+timedelta(hours=-42))
_, temp[3], _, _, _, _, _, _, _ = get_kpap(dn+timedelta(hours=-45))
_, temp[4], _, _, _, _, _, _, _ = get_kpap(dn+timedelta(hours=-48))
_, temp[5], _, _, _, _, _, _, _ = get_kpap(dn+timedelta(hours=-51))
_, temp[6], _, _, _, _, _, _, _ = get_kpap(dn+timedelta(hours=-54))
_, temp[7], _, _, _, _, _, _, _ = get_kpap(dn+timedelta(hours=-57))
out[6] = np.nan if all(np.isnan(temp)) else np.nanmean(temp)
return out
def test_get_apmsis():
dn = datetime(2000,3,23,0)
out = get_apmsis(dn)
print("ap indices for msis are:\n{}".format(out))
if __name__ == '__main__':
test_get_apmsis()
| [
"get_kpap.get_kpap",
"numpy.zeros",
"numpy.isnan",
"datetime.datetime",
"datetime.timedelta",
"numpy.nanmean"
] | [((1344, 1356), 'get_kpap.get_kpap', 'get_kpap', (['dn'], {}), '(dn)\n', (1352, 1356), False, 'from get_kpap import get_kpap\n'), ((1958, 1969), 'numpy.zeros', 'np.zeros', (['(8)'], {}), '(8)\n', (1966, 1969), True, 'import numpy as np\n'), ((2721, 2732), 'numpy.zeros', 'np.zeros', (['(8)'], {}), '(8)\n', (2729, 2732), True, 'import numpy as np\n'), ((3426, 3450), 'datetime.datetime', 'datetime', (['(2000)', '(3)', '(23)', '(0)'], {}), '(2000, 3, 23, 0)\n', (3434, 3450), False, 'from datetime import datetime, timedelta\n'), ((1272, 1283), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (1280, 1283), True, 'import numpy as np\n'), ((2596, 2612), 'numpy.nanmean', 'np.nanmean', (['temp'], {}), '(temp)\n', (2606, 2612), True, 'import numpy as np\n'), ((3359, 3375), 'numpy.nanmean', 'np.nanmean', (['temp'], {}), '(temp)\n', (3369, 3375), True, 'import numpy as np\n'), ((1537, 1556), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-3)'}), '(hours=-3)\n', (1546, 1556), False, 'from datetime import datetime, timedelta\n'), ((1674, 1693), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-6)'}), '(hours=-6)\n', (1683, 1693), False, 'from datetime import datetime, timedelta\n'), ((1812, 1831), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-9)'}), '(hours=-9)\n', (1821, 1831), False, 'from datetime import datetime, timedelta\n'), ((2021, 2041), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-12)'}), '(hours=-12)\n', (2030, 2041), False, 'from datetime import datetime, timedelta\n'), ((2093, 2113), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-15)'}), '(hours=-15)\n', (2102, 2113), False, 'from datetime import datetime, timedelta\n'), ((2165, 2185), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-18)'}), '(hours=-18)\n', (2174, 2185), False, 'from datetime import datetime, timedelta\n'), ((2237, 2257), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-21)'}), '(hours=-21)\n', (2246, 2257), False, 'from datetime import datetime, timedelta\n'), ((2309, 2329), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-24)'}), '(hours=-24)\n', (2318, 2329), False, 'from datetime import datetime, timedelta\n'), ((2381, 2401), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-27)'}), '(hours=-27)\n', (2390, 2401), False, 'from datetime import datetime, timedelta\n'), ((2453, 2473), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-30)'}), '(hours=-30)\n', (2462, 2473), False, 'from datetime import datetime, timedelta\n'), ((2525, 2545), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-33)'}), '(hours=-33)\n', (2534, 2545), False, 'from datetime import datetime, timedelta\n'), ((2575, 2589), 'numpy.isnan', 'np.isnan', (['temp'], {}), '(temp)\n', (2583, 2589), True, 'import numpy as np\n'), ((2784, 2804), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-36)'}), '(hours=-36)\n', (2793, 2804), False, 'from datetime import datetime, timedelta\n'), ((2856, 2876), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-39)'}), '(hours=-39)\n', (2865, 2876), False, 'from datetime import datetime, timedelta\n'), ((2928, 2948), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-42)'}), '(hours=-42)\n', (2937, 2948), False, 'from datetime import datetime, timedelta\n'), ((3000, 3020), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-45)'}), '(hours=-45)\n', (3009, 3020), False, 'from datetime import datetime, timedelta\n'), ((3072, 3092), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-48)'}), '(hours=-48)\n', (3081, 3092), False, 'from datetime import datetime, timedelta\n'), ((3144, 3164), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-51)'}), '(hours=-51)\n', (3153, 3164), False, 'from datetime import datetime, timedelta\n'), ((3216, 3236), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-54)'}), '(hours=-54)\n', (3225, 3236), False, 'from datetime import datetime, timedelta\n'), ((3288, 3308), 'datetime.timedelta', 'timedelta', ([], {'hours': '(-57)'}), '(hours=-57)\n', (3297, 3308), False, 'from datetime import datetime, timedelta\n'), ((3338, 3352), 'numpy.isnan', 'np.isnan', (['temp'], {}), '(temp)\n', (3346, 3352), True, 'import numpy as np\n')] |
import numpy as np
import random
import tensorflow as tf
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.layers import (
Input,
Conv2D,
Dense,
Flatten,
Embedding,
Concatenate,
GlobalMaxPool1D,
Conv1D,
MaxPooling1D,
)
from tensorflow.keras.models import Model, load_model
import os
import json
import mlflow
import mlflow.tensorflow
tracking_uri = (
"http://testuser:password@ec2-18-218-100-222.us-east-2.compute.amazonaws.com"
)
s3_bucket = "s3://docuedge-mlflow-bucket" # replace this value
def read_data(path):
bow = open(path, "r")
data = bow.readlines()
all_data_paths = []
all_texts = []
doc_type_y_labels = {}
master_doc_type_y_labels = {}
for line in data:
line_data = line.split("####")
all_data_paths.append(line_data[0])
all_texts.append(line_data[-1][:-1])
doc_type_label = line_data[0].split("/")[-2]
master_doc_type_label = line_data[0].split("/")[-3]
if doc_type_label not in doc_type_y_labels:
doc_type_y_labels[doc_type_label] = len(doc_type_y_labels)
if master_doc_type_label not in master_doc_type_y_labels:
master_doc_type_y_labels[master_doc_type_label] = len(
master_doc_type_y_labels
)
rev_labels_doc_type = {}
for key, val in doc_type_y_labels.items():
rev_labels_doc_type[val] = key
rev_labels_master_doc_type = {}
for key, val in master_doc_type_y_labels.items():
rev_labels_master_doc_type[val] = key
return (
all_data_paths,
doc_type_y_labels,
rev_labels_doc_type,
all_texts,
master_doc_type_y_labels,
rev_labels_master_doc_type,
)
def tokenize_sentence(sentence, tokenizer, maximum_word_length):
updated_sentence = sentence.split(" ")
tok_sent = []
for word in updated_sentence:
if word in tokenizer.word_index:
tok_sent.append(tokenizer.word_index[word])
else:
tok_sent.append(0)
if len(tok_sent) != maximum_word_length:
delta = maximum_word_length - len(tok_sent)
for i in range(delta):
tok_sent.append(0)
return tok_sent
def data_loader_text(
bs,
data,
y_lab,
tokenizer,
text_data,
image_input_shape,
max_word_length,
y_sub_labels,
):
while True:
images = []
master_labels = []
sub_labels = []
texts = []
while len(images) < bs:
indice = random.randint(0, len(data) - 1)
target = data[indice].split("/")[-3]
sub_target = data[indice].split("/")[-2]
master_labels.append(y_lab[target])
sub_labels.append(y_sub_labels[sub_target])
test_img = np.asarray(load_img(data[indice], target_size=image_input_shape))
img = np.divide(test_img, 255.0)
images.append(img)
tok_sen = tokenize_sentence(
text_data[indice], tokenizer, maximum_word_length=max_word_length
)
texts.append(tok_sen)
yield [np.asarray(images), np.asarray(texts)], [
np.asarray(master_labels),
np.asarray(sub_labels),
]
def model_arc(y_labels, tokenizer, text_model_inp_shape, image_inp_shape, y_sub_labels):
inp_layer_texts = Input(shape=text_model_inp_shape)
inp_layer_images = Input(shape=image_inp_shape)
embedding_layer = Embedding(
input_dim=len(tokenizer.word_index) + 1,
output_dim=64,
input_length=text_model_inp_shape,
trainable=True,
)(inp_layer_texts)
pooling_layer = GlobalMaxPool1D()(embedding_layer)
dense_layer = Dense(units=64, activation="relu")(pooling_layer)
conv_layer = Conv2D(filters=64, kernel_size=(2, 2), activation="relu")(
inp_layer_images
)
flatten_layer = Flatten()(conv_layer)
concat_layer = Concatenate()([flatten_layer, dense_layer])
out_layer = Dense(len(y_labels), activation="softmax")(concat_layer)
sub_model_inp = Dense(units=64, activation="relu")(out_layer)
sub_dense_layer = Dense(units=256, activation="relu")(sub_model_inp)
sub_concat_layer = Concatenate()([sub_dense_layer, concat_layer])
sub_out_layer = Dense(units=len(y_sub_labels), activation="softmax")(
sub_concat_layer
)
model = Model([inp_layer_images, inp_layer_texts], [out_layer, sub_out_layer])
model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
return model
def train_hybrid_v2(
text_plus_file_path: str,
batch_size: int,
epochs: int,
image_shape: int,
max_words: int,
artifact_name: str,
save_dir_path: str,
trained_model_path: str,
experiment_name: str,
):
mlflow.set_tracking_uri(tracking_uri)
client = mlflow.tracking.MlflowClient(tracking_uri=tracking_uri)
try:
expr_name = experiment_name # create a new experiment (do not replace)
mlflow.create_experiment(expr_name, s3_bucket)
mlflow.set_experiment(expr_name)
experiment = mlflow.get_experiment_by_name(experiment_name)
except:
experiment = mlflow.get_experiment_by_name(experiment_name)
(
all_imgs_path,
doc_type_y_labels,
rev_labels_doc_type,
all_text,
master_doc_type_label,
rev_labels_master_doc_type,
) = read_data(path=text_plus_file_path)
num_train_img = len(all_imgs_path)
with open(
os.path.join(save_dir_path, artifact_name, f"rev_labels_{artifact_name}.json"),
"w+",
) as tar:
json.dump(rev_labels_doc_type, tar)
with open(
os.path.join(
save_dir_path, artifact_name, f"rev_labels_master_{artifact_name}.json"
),
"w+",
) as tar:
json.dump(rev_labels_master_doc_type, tar)
print("target_encodings: ", master_doc_type_label)
print("target_encodings: ", doc_type_y_labels)
print("Number of training images: ", num_train_img)
bow = open(text_plus_file_path, "r")
tokenizer = Tokenizer()
tokenizer.fit_on_texts(bow.read().split("####"))
train_gen = data_loader_text(
tokenizer=tokenizer,
y_lab=master_doc_type_label,
data=all_imgs_path,
text_data=all_text,
bs=batch_size,
image_input_shape=(image_shape, image_shape, 3),
max_word_length=max_words,
y_sub_labels=doc_type_y_labels,
)
if os.path.isfile(trained_model_path):
model = load_model(trained_model_path)
else:
model = model_arc(
y_labels=master_doc_type_label,
tokenizer=tokenizer,
text_model_inp_shape=(max_words,),
image_inp_shape=(image_shape, image_shape, 3),
y_sub_labels=doc_type_y_labels,
)
mlflow.tensorflow.autolog(every_n_iter=1)
with mlflow.start_run(experiment_id=experiment.experiment_id):
mlflow.log_metrics(
{
"batch_size": batch_size,
"epochs": epochs,
"image_shape": image_shape,
"max_words": max_words,
}
)
model.fit(
x=train_gen, steps_per_epoch=num_train_img // batch_size, epochs=epochs
)
model.save(
filepath=os.path.join(
save_dir_path, artifact_name, "document_classifier.h5"
)
)
meta_data_path = os.path.join(save_dir_path, artifact_name)
for artifact in sorted(os.listdir(meta_data_path)):
if artifact != ".DS_Store":
artifact_path = os.path.join(meta_data_path, artifact)
if (
os.path.isfile(artifact_path)
and artifact_path.split(".")[-1] != "h5"
):
print(f"artifact to be uploaded is: {artifact}")
mlflow.log_artifact(local_path=artifact_path)
artifact_uri = mlflow.get_artifact_uri()
print(artifact_uri)
mlflow.end_run()
| [
"mlflow.tensorflow.autolog",
"mlflow.tracking.MlflowClient",
"tensorflow.keras.layers.Dense",
"mlflow.get_artifact_uri",
"mlflow.create_experiment",
"mlflow.log_artifact",
"os.path.isfile",
"os.path.join",
"tensorflow.keras.layers.Flatten",
"mlflow.start_run",
"tensorflow.keras.preprocessing.tex... | [((3441, 3474), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'text_model_inp_shape'}), '(shape=text_model_inp_shape)\n', (3446, 3474), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Embedding, Concatenate, GlobalMaxPool1D, Conv1D, MaxPooling1D\n'), ((3498, 3526), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'image_inp_shape'}), '(shape=image_inp_shape)\n', (3503, 3526), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Embedding, Concatenate, GlobalMaxPool1D, Conv1D, MaxPooling1D\n'), ((4462, 4532), 'tensorflow.keras.models.Model', 'Model', (['[inp_layer_images, inp_layer_texts]', '[out_layer, sub_out_layer]'], {}), '([inp_layer_images, inp_layer_texts], [out_layer, sub_out_layer])\n', (4467, 4532), False, 'from tensorflow.keras.models import Model, load_model\n'), ((4905, 4942), 'mlflow.set_tracking_uri', 'mlflow.set_tracking_uri', (['tracking_uri'], {}), '(tracking_uri)\n', (4928, 4942), False, 'import mlflow\n'), ((4956, 5011), 'mlflow.tracking.MlflowClient', 'mlflow.tracking.MlflowClient', ([], {'tracking_uri': 'tracking_uri'}), '(tracking_uri=tracking_uri)\n', (4984, 5011), False, 'import mlflow\n'), ((6207, 6218), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), '()\n', (6216, 6218), False, 'from tensorflow.keras.preprocessing.text import Tokenizer\n'), ((6597, 6631), 'os.path.isfile', 'os.path.isfile', (['trained_model_path'], {}), '(trained_model_path)\n', (6611, 6631), False, 'import os\n'), ((6958, 6999), 'mlflow.tensorflow.autolog', 'mlflow.tensorflow.autolog', ([], {'every_n_iter': '(1)'}), '(every_n_iter=1)\n', (6983, 6999), False, 'import mlflow\n'), ((3743, 3760), 'tensorflow.keras.layers.GlobalMaxPool1D', 'GlobalMaxPool1D', ([], {}), '()\n', (3758, 3760), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Embedding, Concatenate, GlobalMaxPool1D, Conv1D, MaxPooling1D\n'), ((3796, 3830), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(64)', 'activation': '"""relu"""'}), "(units=64, activation='relu')\n", (3801, 3830), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Embedding, Concatenate, GlobalMaxPool1D, Conv1D, MaxPooling1D\n'), ((3864, 3921), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(2, 2)', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(2, 2), activation='relu')\n", (3870, 3921), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Embedding, Concatenate, GlobalMaxPool1D, Conv1D, MaxPooling1D\n'), ((3974, 3983), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3981, 3983), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Embedding, Concatenate, GlobalMaxPool1D, Conv1D, MaxPooling1D\n'), ((4016, 4029), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (4027, 4029), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Embedding, Concatenate, GlobalMaxPool1D, Conv1D, MaxPooling1D\n'), ((4154, 4188), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(64)', 'activation': '"""relu"""'}), "(units=64, activation='relu')\n", (4159, 4188), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Embedding, Concatenate, GlobalMaxPool1D, Conv1D, MaxPooling1D\n'), ((4222, 4257), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(256)', 'activation': '"""relu"""'}), "(units=256, activation='relu')\n", (4227, 4257), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Embedding, Concatenate, GlobalMaxPool1D, Conv1D, MaxPooling1D\n'), ((4297, 4310), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (4308, 4310), False, 'from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Embedding, Concatenate, GlobalMaxPool1D, Conv1D, MaxPooling1D\n'), ((5109, 5155), 'mlflow.create_experiment', 'mlflow.create_experiment', (['expr_name', 's3_bucket'], {}), '(expr_name, s3_bucket)\n', (5133, 5155), False, 'import mlflow\n'), ((5164, 5196), 'mlflow.set_experiment', 'mlflow.set_experiment', (['expr_name'], {}), '(expr_name)\n', (5185, 5196), False, 'import mlflow\n'), ((5218, 5264), 'mlflow.get_experiment_by_name', 'mlflow.get_experiment_by_name', (['experiment_name'], {}), '(experiment_name)\n', (5247, 5264), False, 'import mlflow\n'), ((5739, 5774), 'json.dump', 'json.dump', (['rev_labels_doc_type', 'tar'], {}), '(rev_labels_doc_type, tar)\n', (5748, 5774), False, 'import json\n'), ((5943, 5985), 'json.dump', 'json.dump', (['rev_labels_master_doc_type', 'tar'], {}), '(rev_labels_master_doc_type, tar)\n', (5952, 5985), False, 'import json\n'), ((6649, 6679), 'tensorflow.keras.models.load_model', 'load_model', (['trained_model_path'], {}), '(trained_model_path)\n', (6659, 6679), False, 'from tensorflow.keras.models import Model, load_model\n'), ((7009, 7065), 'mlflow.start_run', 'mlflow.start_run', ([], {'experiment_id': 'experiment.experiment_id'}), '(experiment_id=experiment.experiment_id)\n', (7025, 7065), False, 'import mlflow\n'), ((7075, 7195), 'mlflow.log_metrics', 'mlflow.log_metrics', (["{'batch_size': batch_size, 'epochs': epochs, 'image_shape': image_shape,\n 'max_words': max_words}"], {}), "({'batch_size': batch_size, 'epochs': epochs,\n 'image_shape': image_shape, 'max_words': max_words})\n", (7093, 7195), False, 'import mlflow\n'), ((7581, 7623), 'os.path.join', 'os.path.join', (['save_dir_path', 'artifact_name'], {}), '(save_dir_path, artifact_name)\n', (7593, 7623), False, 'import os\n'), ((8105, 8130), 'mlflow.get_artifact_uri', 'mlflow.get_artifact_uri', ([], {}), '()\n', (8128, 8130), False, 'import mlflow\n'), ((8167, 8183), 'mlflow.end_run', 'mlflow.end_run', ([], {}), '()\n', (8181, 8183), False, 'import mlflow\n'), ((2956, 2982), 'numpy.divide', 'np.divide', (['test_img', '(255.0)'], {}), '(test_img, 255.0)\n', (2965, 2982), True, 'import numpy as np\n'), ((5298, 5344), 'mlflow.get_experiment_by_name', 'mlflow.get_experiment_by_name', (['experiment_name'], {}), '(experiment_name)\n', (5327, 5344), False, 'import mlflow\n'), ((5623, 5701), 'os.path.join', 'os.path.join', (['save_dir_path', 'artifact_name', 'f"""rev_labels_{artifact_name}.json"""'], {}), "(save_dir_path, artifact_name, f'rev_labels_{artifact_name}.json')\n", (5635, 5701), False, 'import os\n'), ((5798, 5887), 'os.path.join', 'os.path.join', (['save_dir_path', 'artifact_name', 'f"""rev_labels_master_{artifact_name}.json"""'], {}), "(save_dir_path, artifact_name,\n f'rev_labels_master_{artifact_name}.json')\n", (5810, 5887), False, 'import os\n'), ((7655, 7681), 'os.listdir', 'os.listdir', (['meta_data_path'], {}), '(meta_data_path)\n', (7665, 7681), False, 'import os\n'), ((2883, 2936), 'tensorflow.keras.preprocessing.image.load_img', 'load_img', (['data[indice]'], {'target_size': 'image_input_shape'}), '(data[indice], target_size=image_input_shape)\n', (2891, 2936), False, 'from tensorflow.keras.preprocessing.image import load_img\n'), ((7447, 7515), 'os.path.join', 'os.path.join', (['save_dir_path', 'artifact_name', '"""document_classifier.h5"""'], {}), "(save_dir_path, artifact_name, 'document_classifier.h5')\n", (7459, 7515), False, 'import os\n'), ((7756, 7794), 'os.path.join', 'os.path.join', (['meta_data_path', 'artifact'], {}), '(meta_data_path, artifact)\n', (7768, 7794), False, 'import os\n'), ((3201, 3219), 'numpy.asarray', 'np.asarray', (['images'], {}), '(images)\n', (3211, 3219), True, 'import numpy as np\n'), ((3221, 3238), 'numpy.asarray', 'np.asarray', (['texts'], {}), '(texts)\n', (3231, 3238), True, 'import numpy as np\n'), ((3255, 3280), 'numpy.asarray', 'np.asarray', (['master_labels'], {}), '(master_labels)\n', (3265, 3280), True, 'import numpy as np\n'), ((3294, 3316), 'numpy.asarray', 'np.asarray', (['sub_labels'], {}), '(sub_labels)\n', (3304, 3316), True, 'import numpy as np\n'), ((7836, 7865), 'os.path.isfile', 'os.path.isfile', (['artifact_path'], {}), '(artifact_path)\n', (7850, 7865), False, 'import os\n'), ((8035, 8080), 'mlflow.log_artifact', 'mlflow.log_artifact', ([], {'local_path': 'artifact_path'}), '(local_path=artifact_path)\n', (8054, 8080), False, 'import mlflow\n')] |
"""Test module for general class metafeatures."""
import pytest
from pymfe.mfe import MFE
from tests.utils import load_xy
import numpy as np
GNAME = "general"
class TestGeneral:
"""TestClass dedicated to test general metafeatures."""
@pytest.mark.parametrize(
"dt_id, ft_name, exp_value, precompute",
[
###################
# Mixed data
###################
(0, "attr_to_inst", 0.08, False),
(0, "cat_to_num", 1, False),
(0, "freq_class", [0.50, 0.0], False),
(0, "inst_to_attr", 12.50, False),
(0, "nr_attr", 4, False),
(0, "nr_bin", 0, False),
(0, "nr_cat", 2, False),
(0, "nr_class", 2, False),
(0, "nr_inst", 50, False),
(0, "nr_num", 2, False),
(0, "num_to_cat", 1.0, False),
(0, "attr_to_inst", 0.08, True),
(0, "cat_to_num", 1, True),
(0, "freq_class", [0.50, 0.0], True),
(0, "inst_to_attr", 12.50, True),
(0, "nr_attr", 4, True),
(0, "nr_bin", 0, True),
(0, "nr_cat", 2, True),
(0, "nr_class", 2, True),
(0, "nr_inst", 50, True),
(0, "nr_num", 2, True),
(0, "num_to_cat", 1.0, True),
###################
# Categorical data
###################
(1, "attr_to_inst", 36 / 3196, False),
(1, "cat_to_num", np.nan, False),
(1, "freq_class", [0.5, 0.03141713], False),
(1, "inst_to_attr", 88.77778, False),
(1, "nr_attr", 36, False),
(1, "nr_bin", 35, False),
(1, "nr_cat", 36, False),
(1, "nr_class", 2, False),
(1, "nr_inst", 3196, False),
(1, "nr_num", 0, False),
(1, "num_to_cat", 0, False),
(1, "attr_to_inst", 36 / 3196, True),
(1, "cat_to_num", np.nan, True),
(1, "freq_class", [0.5, 0.03141713], True),
(1, "inst_to_attr", 88.77778, True),
(1, "nr_attr", 36, True),
(1, "nr_bin", 35, True),
(1, "nr_cat", 36, True),
(1, "nr_class", 2, True),
(1, "nr_inst", 3196, True),
(1, "nr_num", 0, True),
(1, "num_to_cat", 0, True),
###################
# Numerical data
###################
(2, "attr_to_inst", 0.02666667, False),
(2, "cat_to_num", 0.0, False),
(2, "freq_class", [0.33333333, 0.0], False),
(2, "inst_to_attr", 37.50, False),
(2, "nr_attr", 4, False),
(2, "nr_bin", 0, False),
(2, "nr_cat", 0, False),
(2, "nr_class", 3, False),
(2, "nr_inst", 150, False),
(2, "nr_num", 4, False),
(2, "num_to_cat", np.nan, False),
(2, "attr_to_inst", 0.02666667, True),
(2, "cat_to_num", 0.0, True),
(2, "freq_class", [0.33333333, 0.0], True),
(2, "inst_to_attr", 37.50, True),
(2, "nr_attr", 4, True),
(2, "nr_bin", 0, True),
(2, "nr_cat", 0, True),
(2, "nr_class", 3, True),
(2, "nr_inst", 150, True),
(2, "nr_num", 4, True),
(2, "num_to_cat", np.nan, True),
])
def test_ft_methods_general(self, dt_id, ft_name, exp_value, precompute):
"""Function to test each meta-feature belongs to general group.
"""
precomp_group = GNAME if precompute else None
X, y = load_xy(dt_id)
mfe = MFE(
groups=[GNAME], features=[ft_name]).fit(
X.values, y.values, precomp_groups=precomp_group)
value = mfe.extract()[1]
if exp_value is np.nan:
assert value[0] is exp_value
else:
assert np.allclose(value, exp_value)
@pytest.mark.parametrize(
"dt_id, exp_value, precompute",
[
###################
# Mixed data
###################
(0, [0.08, 1, 0.50, 12.50, 4, 0, 2, 2, 50, 2, 1.0], False),
(0, [0.08, 1, 0.50, 12.50, 4, 0, 2, 2, 50, 2, 1.0], True),
###################
# Categorical data
###################
(1, [36 / 3196, np.nan, 0.5, 88.77778, 36, 35, 36, 2, 3196, 0, 0],
False),
(1, [36 / 3196, np.nan, 0.5, 88.77778, 36, 35, 36, 2, 3196, 0, 0],
True),
###################
# Numerical data
###################
(2, [
0.02666667, 0.0, 0.33333333, 37.50, 4, 0, 0, 3, 150, 4, np.nan
], False),
(2, [
0.02666667, 0.0, 0.33333333, 37.50, 4, 0, 0, 3, 150, 4, np.nan
], True),
])
def test_integration_general(self, dt_id, exp_value, precompute):
precomp_group = GNAME if precompute else None
X, y = load_xy(dt_id)
mfe = MFE(
groups=[GNAME], summary="mean").fit(
X.values, y.values, precomp_groups=precomp_group)
value = mfe.extract()[1]
assert np.allclose(value, exp_value, equal_nan=True)
| [
"pytest.mark.parametrize",
"tests.utils.load_xy",
"pymfe.mfe.MFE",
"numpy.allclose"
] | [((248, 2418), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dt_id, ft_name, exp_value, precompute"""', "[(0, 'attr_to_inst', 0.08, False), (0, 'cat_to_num', 1, False), (0,\n 'freq_class', [0.5, 0.0], False), (0, 'inst_to_attr', 12.5, False), (0,\n 'nr_attr', 4, False), (0, 'nr_bin', 0, False), (0, 'nr_cat', 2, False),\n (0, 'nr_class', 2, False), (0, 'nr_inst', 50, False), (0, 'nr_num', 2, \n False), (0, 'num_to_cat', 1.0, False), (0, 'attr_to_inst', 0.08, True),\n (0, 'cat_to_num', 1, True), (0, 'freq_class', [0.5, 0.0], True), (0,\n 'inst_to_attr', 12.5, True), (0, 'nr_attr', 4, True), (0, 'nr_bin', 0, \n True), (0, 'nr_cat', 2, True), (0, 'nr_class', 2, True), (0, 'nr_inst',\n 50, True), (0, 'nr_num', 2, True), (0, 'num_to_cat', 1.0, True), (1,\n 'attr_to_inst', 36 / 3196, False), (1, 'cat_to_num', np.nan, False), (1,\n 'freq_class', [0.5, 0.03141713], False), (1, 'inst_to_attr', 88.77778, \n False), (1, 'nr_attr', 36, False), (1, 'nr_bin', 35, False), (1,\n 'nr_cat', 36, False), (1, 'nr_class', 2, False), (1, 'nr_inst', 3196, \n False), (1, 'nr_num', 0, False), (1, 'num_to_cat', 0, False), (1,\n 'attr_to_inst', 36 / 3196, True), (1, 'cat_to_num', np.nan, True), (1,\n 'freq_class', [0.5, 0.03141713], True), (1, 'inst_to_attr', 88.77778, \n True), (1, 'nr_attr', 36, True), (1, 'nr_bin', 35, True), (1, 'nr_cat',\n 36, True), (1, 'nr_class', 2, True), (1, 'nr_inst', 3196, True), (1,\n 'nr_num', 0, True), (1, 'num_to_cat', 0, True), (2, 'attr_to_inst', \n 0.02666667, False), (2, 'cat_to_num', 0.0, False), (2, 'freq_class', [\n 0.33333333, 0.0], False), (2, 'inst_to_attr', 37.5, False), (2,\n 'nr_attr', 4, False), (2, 'nr_bin', 0, False), (2, 'nr_cat', 0, False),\n (2, 'nr_class', 3, False), (2, 'nr_inst', 150, False), (2, 'nr_num', 4,\n False), (2, 'num_to_cat', np.nan, False), (2, 'attr_to_inst', \n 0.02666667, True), (2, 'cat_to_num', 0.0, True), (2, 'freq_class', [\n 0.33333333, 0.0], True), (2, 'inst_to_attr', 37.5, True), (2, 'nr_attr',\n 4, True), (2, 'nr_bin', 0, True), (2, 'nr_cat', 0, True), (2,\n 'nr_class', 3, True), (2, 'nr_inst', 150, True), (2, 'nr_num', 4, True),\n (2, 'num_to_cat', np.nan, True)]"], {}), "('dt_id, ft_name, exp_value, precompute', [(0,\n 'attr_to_inst', 0.08, False), (0, 'cat_to_num', 1, False), (0,\n 'freq_class', [0.5, 0.0], False), (0, 'inst_to_attr', 12.5, False), (0,\n 'nr_attr', 4, False), (0, 'nr_bin', 0, False), (0, 'nr_cat', 2, False),\n (0, 'nr_class', 2, False), (0, 'nr_inst', 50, False), (0, 'nr_num', 2, \n False), (0, 'num_to_cat', 1.0, False), (0, 'attr_to_inst', 0.08, True),\n (0, 'cat_to_num', 1, True), (0, 'freq_class', [0.5, 0.0], True), (0,\n 'inst_to_attr', 12.5, True), (0, 'nr_attr', 4, True), (0, 'nr_bin', 0, \n True), (0, 'nr_cat', 2, True), (0, 'nr_class', 2, True), (0, 'nr_inst',\n 50, True), (0, 'nr_num', 2, True), (0, 'num_to_cat', 1.0, True), (1,\n 'attr_to_inst', 36 / 3196, False), (1, 'cat_to_num', np.nan, False), (1,\n 'freq_class', [0.5, 0.03141713], False), (1, 'inst_to_attr', 88.77778, \n False), (1, 'nr_attr', 36, False), (1, 'nr_bin', 35, False), (1,\n 'nr_cat', 36, False), (1, 'nr_class', 2, False), (1, 'nr_inst', 3196, \n False), (1, 'nr_num', 0, False), (1, 'num_to_cat', 0, False), (1,\n 'attr_to_inst', 36 / 3196, True), (1, 'cat_to_num', np.nan, True), (1,\n 'freq_class', [0.5, 0.03141713], True), (1, 'inst_to_attr', 88.77778, \n True), (1, 'nr_attr', 36, True), (1, 'nr_bin', 35, True), (1, 'nr_cat',\n 36, True), (1, 'nr_class', 2, True), (1, 'nr_inst', 3196, True), (1,\n 'nr_num', 0, True), (1, 'num_to_cat', 0, True), (2, 'attr_to_inst', \n 0.02666667, False), (2, 'cat_to_num', 0.0, False), (2, 'freq_class', [\n 0.33333333, 0.0], False), (2, 'inst_to_attr', 37.5, False), (2,\n 'nr_attr', 4, False), (2, 'nr_bin', 0, False), (2, 'nr_cat', 0, False),\n (2, 'nr_class', 3, False), (2, 'nr_inst', 150, False), (2, 'nr_num', 4,\n False), (2, 'num_to_cat', np.nan, False), (2, 'attr_to_inst', \n 0.02666667, True), (2, 'cat_to_num', 0.0, True), (2, 'freq_class', [\n 0.33333333, 0.0], True), (2, 'inst_to_attr', 37.5, True), (2, 'nr_attr',\n 4, True), (2, 'nr_bin', 0, True), (2, 'nr_cat', 0, True), (2,\n 'nr_class', 3, True), (2, 'nr_inst', 150, True), (2, 'nr_num', 4, True),\n (2, 'num_to_cat', np.nan, True)])\n", (271, 2418), False, 'import pytest\n'), ((3958, 4460), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dt_id, exp_value, precompute"""', '[(0, [0.08, 1, 0.5, 12.5, 4, 0, 2, 2, 50, 2, 1.0], False), (0, [0.08, 1, \n 0.5, 12.5, 4, 0, 2, 2, 50, 2, 1.0], True), (1, [36 / 3196, np.nan, 0.5,\n 88.77778, 36, 35, 36, 2, 3196, 0, 0], False), (1, [36 / 3196, np.nan, \n 0.5, 88.77778, 36, 35, 36, 2, 3196, 0, 0], True), (2, [0.02666667, 0.0,\n 0.33333333, 37.5, 4, 0, 0, 3, 150, 4, np.nan], False), (2, [0.02666667,\n 0.0, 0.33333333, 37.5, 4, 0, 0, 3, 150, 4, np.nan], True)]'], {}), "('dt_id, exp_value, precompute', [(0, [0.08, 1, 0.5,\n 12.5, 4, 0, 2, 2, 50, 2, 1.0], False), (0, [0.08, 1, 0.5, 12.5, 4, 0, 2,\n 2, 50, 2, 1.0], True), (1, [36 / 3196, np.nan, 0.5, 88.77778, 36, 35, \n 36, 2, 3196, 0, 0], False), (1, [36 / 3196, np.nan, 0.5, 88.77778, 36, \n 35, 36, 2, 3196, 0, 0], True), (2, [0.02666667, 0.0, 0.33333333, 37.5, \n 4, 0, 0, 3, 150, 4, np.nan], False), (2, [0.02666667, 0.0, 0.33333333, \n 37.5, 4, 0, 0, 3, 150, 4, np.nan], True)])\n", (3981, 4460), False, 'import pytest\n'), ((3628, 3642), 'tests.utils.load_xy', 'load_xy', (['dt_id'], {}), '(dt_id)\n', (3635, 3642), False, 'from tests.utils import load_xy\n'), ((5041, 5055), 'tests.utils.load_xy', 'load_xy', (['dt_id'], {}), '(dt_id)\n', (5048, 5055), False, 'from tests.utils import load_xy\n'), ((5239, 5284), 'numpy.allclose', 'np.allclose', (['value', 'exp_value'], {'equal_nan': '(True)'}), '(value, exp_value, equal_nan=True)\n', (5250, 5284), True, 'import numpy as np\n'), ((3922, 3951), 'numpy.allclose', 'np.allclose', (['value', 'exp_value'], {}), '(value, exp_value)\n', (3933, 3951), True, 'import numpy as np\n'), ((3657, 3696), 'pymfe.mfe.MFE', 'MFE', ([], {'groups': '[GNAME]', 'features': '[ft_name]'}), '(groups=[GNAME], features=[ft_name])\n', (3660, 3696), False, 'from pymfe.mfe import MFE\n'), ((5070, 5105), 'pymfe.mfe.MFE', 'MFE', ([], {'groups': '[GNAME]', 'summary': '"""mean"""'}), "(groups=[GNAME], summary='mean')\n", (5073, 5105), False, 'from pymfe.mfe import MFE\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from sudoku.sudoku import Sudoku
@pytest.fixture
def sudoku_board():
s = Sudoku()
# creates numpy array [[1 2 3] [4 5 6]].
s._matrix = np.arange(1, 7, 1).reshape([2, 3])
return s
| [
"sudoku.sudoku.Sudoku",
"numpy.arange"
] | [((138, 146), 'sudoku.sudoku.Sudoku', 'Sudoku', ([], {}), '()\n', (144, 146), False, 'from sudoku.sudoku import Sudoku\n'), ((208, 226), 'numpy.arange', 'np.arange', (['(1)', '(7)', '(1)'], {}), '(1, 7, 1)\n', (217, 226), True, 'import numpy as np\n')] |
'''
Sparse approximation using Smolyak's algorithm
Usage:
1) Setup a function instance that computes elements in a multi-index
decomposition (and possibly auxiliary information about work and
contribution of the computed terms).
Here, it can be helpful to use MixedDifferences from the module indices
which turns regular algorithms into the form required for sparse
approximations. As a special case, function approximation using
least squares polynomial approximations is already implemented in
PolynomialApproximator.
n = 2
f = lambda x: np.sin(np.prod(x,1))
pa = PolynomialApproximator(f,domain = [[0,1]]**n)
2) Pass that function to a SparseApproximator instance, along
with information about work and runtime estimates, etc.
In the case of PolynomialApproximator instances, additional information
is optional.
sa = SparseApproximator(pa)
3) Use :code:`update_approximation` to perform the computations
sa.update_approximation(T=10)
pa.get_approximation().plot()
'''
import copy
import warnings
import math
from timeit import default_timer as timer
import itertools
import random
import collections
import numpy as np
from numpy import Inf
from swutil.logs import Log
from swutil.decorators import log_calls
from swutil.collections import DefaultDict
from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, \
Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, \
List, Equals, InInterval, Arg, Passed, In
from swutil import plots
from smolyak.indices import MultiIndexDict, get_admissible_indices, MISet, \
kronecker, MultiIndex, get_bundles, get_bundle
from smolyak import indices
from smolyak.applications.polynomials import PolynomialApproximator
class _Factor:
@validate_args('multipliers>(~func,~dims) multipliers==n',warnings=False)
def __init__(self,
func:Function,
multipliers:Dict(value_spec=Positive & Float),
n:Positive & Integer,
dims:Function(value_spec=Bool)=lambda dim: True,
bundled:Bool=False,
):
self.bundled = bundled
if Passed(multipliers):
self.multipliers = multipliers
func = lambda mi: np.prod([
self.multipliers[dim] ** mi[dim]
for dim in self.multipliers
])
if self.bundled:
self.func= lambda mis: sum(func(mi) for mi in mis)
else:
self.func = func
self.have_none = False
if math.isinf(n):
self.have_all = False
else:
self.have_all = all(dim in self.multipliers for dim in range(n))
self.dims = self.multipliers.__contains__
else:
self.multipliers = {}
self.func = func
self.dims = dims
def __call__(self, *args):
return self.func(*args)
class WorkFunction(_Factor):
pass
class ContributionFunction(_Factor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.bundled:
raise ValueError('Contribution function cannot be bundled')
class _Decomposition:
@validate_args(
'~work_multipliers|~work_function',
'~contribution_multipliers|~contribution_function',
'bundled_dims>bundled',
Arg('init_dims|next_dims', Passed) > Arg('n', Equals(math.inf)),
warnings=False,
)
def __init__(
self,
Delta,
n: Positive & Integer,
work_multipliers: Dict(value_spec = InInterval(l = 1, lo = False), lenience = 2),
work_function: Instance(WorkFunction),
contribution_multipliers: Dict(value_spec = InInterval(l = 0, lo = True, r = 1, ro = True), lenience = 2),
contribution_function: Instance(ContributionFunction),
returns_work: Bool = False,
returns_contributions: Bool = False,
init_dims: List(Nonnegative & Integer, lenience = 2) = NotPassed,
next_dims: Function = NotPassed,
bundled: Bool = False,
bundled_dims: Function(value_spec = Bool) | List(value_spec = Integer) = NotPassed,
kronecker_exponents: Function(value_spec = Nonnegative & Float) = NotPassed,
stores_approximation: Bool = False,
structure=None,
callback=None,
):
r'''
:param Delta: Computes decomposition terms.
In the most basic form, a single multi-index is passed to
:code:`Delta` and a single value, which represents the corresponding
decomposition term and supports vector space operations, is expected.
If :code:`returns_work == True`, :code:`Delta` must return (value,work) (or only work if stores_approximation),
where work is a single float representing some abstract type of work
that was required for the computation of value. Otherwise, algorithms
are guided by runtime.
If :code:`returns_contributions == True`, return (value,contribution) (or only
contribution if stores_approximation), where contribution is:
*a single float if not stores_approximation and not bundled_dims or else
*a dictionary {(mi,contribution(float) for mi in bundle}
where bundle is the multi-index set that has been passed
Otherwise, the return values will be normed by means of np.linalg.norm to
assess their contribution, or they may implement norm() methods.
If both :code:`returns_contributions == True` and :code:`returns_work`
then :code:`Delta` must return (value,work,contribution) (or only (work,contribution) in case of external storage)
If :code:`bundled == True`, then :code:`Delta` is passed an iterable of multi-indices
(which agree in the dimensions for which :code:`bundled_dims` returns True) must return
the sum of the corresponding decomposition elements.
This may be useful for parallelization, or for problems where joint
computation of decomposition elements is analytically more efficient.
If :code:`stores_approximation == True`, no n_results is required and decomposition terms
are expected to be stored by :code:`Delta` itself. Each call
will contain the full multi-index set representing the current approximation.
:type Delta: Function.
:param n: Number of discretization parameters
:type n: Integer, including math.inf (or 'inf')
:param work_multipliers: Specify factor by which work increases if index is increased in a given dimension
:type work_multipliers: Dict
:param work_function: If work is more complex, use work_function instead of work_multipliers to compute
expected work associated with a given multi-index
:type work_function: Function MultiIndex->Positive reals
:param contribution_multipliers: Specify factor by which contribution decreases if index is increased in a given dimension
:type contribution_multipliers: Dict
:param contribution_function: If contribution is more complex, use contribution_function instead of contribution_multipliers to compute
expected contribution of a given multi-index
:type contribution_function: Function MultiIndex->Positive reals
:param returns_work: Are the deltas returned together with their own work specification?
:type returns_work: Boolean
:param returns_contributions: Are the deltas returned together with their own contribution specification?
:type returns_contributions: Boolean
:param init_dims: Initial dimensions used to create multi-index set. Defaults to :code:`range(n)`. However,
for large or infinite `n`, it may make sense (or be necessary) to restrict this initially.
Each time a multi-index with non-zero entry in one of these initial dimensions, say j, is selected,
new dimensions are added, according to output of :code:`next_dims(j)`
:type init_dims: Integer or list of integers
:param next_dims: Assigns to each dimension a list of child dimensions (see above)
:type next_dims: :math:`\mathbb{N}\to 2^{\mathbb{N}}`
:param: bundled: To be used when the decomposition terms cannot be computed independent of
each other. In this case, :code:`Delta` is called with subsets of :math:`\mathcal{I}` whose entries agree
except for the dimensions specified in bundled_dims
:type bundled: Boolean
:param bundled_dims: Specifies dimensions used for bundling
:type bundled_dims: :math:`\mathbb{N}\to\{\text{True},\text{False}\}` or list of dimensions
:param kronecker_exponents: For infinite dimensional problems, the
contribution of Kronecker multi-index e_j is estimated as exp(kronecker_exponents(j))
:type kronecker_exponents: Function from integers to negative reals
:param stores_approximation: Specifies whether approximation is computed externally. If True, :code:`Delta` is called multiple times
with a SparseIndex as argument (or a list of SparseIndices if :code:`bundled_dims` is True as well) and must collect the associated
decomposition terms itself. If False, :code:`Delta` is also called multiple times with a SparseIndex (or a list of SparseIndices)
and must return the associated decomposition terms, which are then stored within the SparseApproximator instance
:type stores_approximation: Boolean
:param structure: Used to enforce symmetries in approximating set.
:type structure: Function: multiindices->setsofmultiindices
:param callback: If this returns a Trueish value, approximation is stopped (use for accuracy based approximations)
:type callback: Function(): Bool
'''
if isinstance(Delta,PolynomialApproximator):
if Passed(n) or returns_work:
raise ValueError('Do not specify `n` or `returns_work` to SparseApproximator for polynomial approximation')
self.Delta = Delta.update_approximation
self.n = Delta.n_acc+Delta.n
_work_function = WorkFunction(func = Delta.estimated_work, dims = lambda n: (True if Passed(work_function) else n>=Delta.n_acc),bundled=True)
self.returns_work = True
self.returns_contributions = True
self.stores_approximation = True
self.kronecker_exponents = kronecker_exponents
self._set_is_bundled(True,Delta.bundled_dims)
self._set_work(work_multipliers,_work_function)
self._set_contribution(contribution_multipliers,contribution_function)
if isinstance(structure,str):
if structure.lower() == 'td':
structure = lambda mi: [mi.restrict(lambda n:n<Delta.n_acc) + mi2.shifted(Delta.n_acc)
for mi2 in indices.simplex(n = Delta.n,L=mi.mod(lambda n:n<Delta.n_acc).sum())]
elif structure.lower() == 'pd':
structure = lambda mi: [mi.restrict(lambda n:n<Delta.n_acc) + mi2.shifted(Delta.n_acc)
for mi2 in indices.rectangle(n = Delta.n,L=max(mi.mod(lambda n:n<Delta.n_acc)))]
elif structure.lower() == 'sym':
from sympy.utilities.iterables import multiset_permutations
def structure(mi):
mim = mi.mod(lambda n:n<Delta.n_acc)
if mim==MultiIndex():
return []
else:
ret = [mi.restrict(lambda n:n<Delta.n_acc) + mi2.shifted(Delta.n_acc)
for mi2 in [MultiIndex(perm) for perm in multiset_permutations(mi.mod(lambda n:n<Delta.n_acc).full_tuple())]]
return ret
else:
self.Delta = Delta
self.n = n
self.returns_work = returns_work
self.returns_contributions = returns_contributions
self.stores_approximation = stores_approximation
self.kronecker_exponents = kronecker_exponents
self._set_is_bundled(bundled, bundled_dims)
self._set_work(work_multipliers, work_function)
self._set_contribution(contribution_multipliers, contribution_function)
self.callback = callback or (lambda: False)
self.structure = structure or (lambda mi: set())
if math.isinf(self.n):
if not init_dims:
init_dims = [0]
self.init_dims = init_dims
self._set_next_dims(next_dims)
else:
self.init_dims = list(range(self.n))
def _set_work(self, work_multipliers, work_function):
if work_multipliers:
self.work_function = WorkFunction(multipliers=work_multipliers, n=self.n,bundled=self.bundled)
elif work_function:
if work_function.bundled==self.bundled:
self.work_function = work_function
elif work_function.bundled and not self.bundled:
raise ValueError("Work function cannot act on bundles when computations don't")
elif self.bundled and not work_function.bundled:
self.work_function = WorkFunction(func = lambda mis: sum(work_function(mi) for mi in mis),dims = work_function.dims,bundled=True)
else:
self.work_function = WorkFunction(func=lambda _: 1, dims=lambda dim: False, bundled=self.bundled)
if self.bundled!=self.work_function.bundled:
raise ValueError('If computations are bundled, work function must act on bundles as well (and conversely)')
def _set_contribution(self, contribution_multipliers, contribution_function):
if contribution_multipliers:
self.contribution_function = ContributionFunction(multipliers=contribution_multipliers, n=self.n,bundled = False)
elif contribution_function:
self.contribution_function = contribution_function
else:
self.contribution_function = ContributionFunction(func=lambda _: 1, dims=lambda dim:False, bundled=False)
def _set_is_bundled(self, bundled, bundled_dims):
self.bundled = bundled
if hasattr(bundled_dims, '__contains__'):
self.bundled_dims = lambda dim: dim in bundled_dims
else:
self.bundled_dims = bundled_dims
def _set_next_dims(self, next_dims):
if (not next_dims) and self.n == Inf:
self.next_dims = lambda dim: [dim + 1] if dim + 1 not in self.init_dims else []
else:
self.next_dims = next_dims
class SparseApproximator:
r'''
Sparse approximation based on multi-index decomposition.
Given a decomposition of :math:`f_{\infty}` as
.. math::
f_{\infty}=\sum_{\mathbf{k}\in\mathbb{N}^{n}} (\Delta f)(\mathbf{k}),
this class computes and stores approximations of the form
.. math::
\mathcal{S}_{\mathcal{I}}f:=\sum_{\mathbf{k}\in\mathcal{I}} (\Delta f)(\mathbf{k}),
for finite index sets :math:`\mathcal{I}\subset\mathbb{R}^n`.
To compute the approximation, use `update_approximation`. This method provides
multiple ways to choose the index set :math:`\mathcal{I}`.
'''
@validate_args(
'~work_multipliers|~work_function',
'~contribution_multipliers|~contribution_function',
'bundled_dims>bundled',
Arg('init_dims|next_dims', Passed) > Arg('n', Equals(math.inf)),
warnings=False,
)
def __init__(
self,
Delta,
n: Positive & Integer,
work_multipliers: Dict(value_spec = InInterval(l = 1, lo = False), lenience = 2),
work_function: Instance(WorkFunction),
contribution_multipliers: Dict(value_spec = InInterval(l = 0, lo = True, r = 1, ro = True), lenience = 2),
contribution_function: Instance(ContributionFunction),
returns_work: Bool = False,
returns_contributions: Bool = False,
init_dims: List(Nonnegative & Integer, lenience = 2) = NotPassed,
next_dims: Function = NotPassed,
bundled: Bool = False,
bundled_dims: Function(value_spec = Bool) | List(value_spec = Integer) = NotPassed,
kronecker_exponents: Function(value_spec = Nonnegative & Float) = NotPassed,
stores_approximation: Bool = False,
structure=None,
callback=None,
):
r'''
:param Delta: Computes decomposition terms.
In the most basic form, a single multi-index is passed to
:code:`Delta` and a single value, which represents the corresponding
decomposition term and supports vector space operations, is expected.
If :code:`returns_work == True`, :code:`Delta` must return (value,work) (or only work if stores_approximation),
where work is a single float representing some abstract type of work
that was required for the computation of value. Otherwise, algorithms
are guided by runtime.
If :code:`returns_contributions == True`, return (value,contribution) (or only
contribution if stores_approximation), where contribution is:
*a single float if not stores_approximation and not bundled_dims or else
*a dictionary {(mi,contribution(float) for mi in bundle}
where bundle is the multi-index set that has been passed
Otherwise, the return values will be normed by means of np.linalg.norm to
assess their contribution, or they may implement norm() methods.
If both :code:`returns_contributions == True` and :code:`returns_work`
then :code:`Delta` must return (value,work,contribution) (or only (work,contribution) in case of external storage)
If :code:`bundled == True`, then :code:`Delta` is passed an iterable of multi-indices
(which agree in the dimensions for which :code:`bundled_dims` returns True) must return
the sum of the corresponding decomposition elements.
This may be useful for parallelization, or for problems where joint
computation of decomposition elements is analytically more efficient.
If :code:`stores_approximation == True`, no n_results is required and decomposition terms
are expected to be stored by :code:`Delta` itself. Each call
will contain the full multi-index set representing the current approximation.
:type Delta: Function.
:param n: Number of discretization parameters
:type n: Integer, including math.inf (or 'inf')
:param work_multipliers: Specify factor by which work increases if index is increased in a given dimension
:type work_multipliers: Dict
:param work_function: If work is more complex, use work_function instead of work_multipliers to compute
expected work associated with a given multi-index
:type work_function: Function MultiIndex->Positive reals
:param contribution_multipliers: Specify factor by which contribution decreases if index is increased in a given dimension
:type work_multipliers: Dict
:param work_function: If contribution is more complex, use contribution_function instead of contribution_multipliers to compute
expected contribution of a given multi-index
:type work_function: Function MultiIndex->Positive reals
:param returns_work: Does the decomposition come with its own cost specification?
:type returns_work: Boolean
:param returns_contributions: Does the decomposition come with its own contribution specification?
:type returns_contributions: Boolean
:param init_dims: Initial dimensions used to create multi-index set. Defaults to :code:`range(n)`. However,
for large or infinite `n`, it may make sense (or be necessary) to restrict this initially.
Each time a multi-index with non-zero entry in one of these initial dimensions, say j, is selected,
new dimensions are added, according to output of :code:`next_dims(j)`
:type init_dims: Integer or list of integers
:param next_dims: Assigns to each dimension a list of child dimensions (see above)
:type next_dims: :math:`\mathbb{N}\to 2^{\mathbb{N}}`
:param: bundled: To be used when the decomposition terms cannot be computed independent of
each other. In this case, :code:`Delta` is called with subsets of :math:`\mathcal{I}` whose entries agree
except for the dimensions specified in bundled_dims
:type bundled: Boolean
:param bundled_dims: Specifies dimensions used for bundling
:type bundled_dims: :math:`\mathbb{N}\to\{\text{True},\text{False}\}` or list of dimensions
:param kronecker_exponents: For infinite dimensional problems, the
contribution of Kronecker multi-index e_j is estimated as exp(kronecker_exponents(j))
:type kronecker_exponents: Function from integers to negative reals
:param stores_approximation: Specifies whether approximation is computed externally. If True, :code:`Delta` is called multiple times
with a SparseIndex as argument (or a list of SparseIndices if :code:`bundled_dims` is True as well) and must collect the associated
decomposition terms itself. If False, :code:`Delta` is also called multiple times with a SparseIndex (or a list of SparseIndices)
and must return the associated decomposition terms, which are then stored within the SparseApproximator instance
:type stores_approximation: Boolean
:param structure: Used to enforce symmetries in approximating set.
:type structure: Function: multiindices->setsofmultiindices
:param callback: If this returns a Trueish value, approximation is stopped (use for accuracy based approximations)
:type callback: Function(): Bool
'''
self.decomposition = _Decomposition(
Delta,
n,
work_multipliers,
work_function,
contribution_multipliers,
contribution_function,
returns_work,
returns_contributions,
init_dims,
next_dims,
bundled,
bundled_dims,
kronecker_exponents,
stores_approximation,
structure,
callback,
)
self.log = Log(print_filter=False)
self.data = _Data(self.decomposition)
def update_approximation(self, mode=None,indices=None, T=None, L=None):
'''
Update the multi-index set and compute the resulting updated approximation.
There are four ways to determine the updated multi-index set:
:code:`indices`, which requires passing the new multi-indices with the argument `indices`
:code:`adaptive`, which increases the multi-index set adaptively, one multi-index at a time
:code:`apriori`, which constructs the set based on a-priori knowledge about work and contribution of the decomposition terms
:code:`continuation`, which constructs the set by a combination of first learning the behavior of work and contribution, and then using this knowledge to create optimal sets.
ADAPTIVE:
To decide on the multi-index to be added at each step, estimates of contributions and work are maintained.
These estimates are based on neighbors that are already in the set :math:`\mathcal{I}`,
unless they are specified in :code:`contribution_factors` and :code:`work_factors`.
If user defines :code:`have_work_factor` and :code:`have_contribution_factor`
that only estimates for some of the :code:`n` involved parameters are available,
then the estimates from :code:`contribution_factor` and :code:`work_factor` for those parameters
are combined with neighbor estimates for the remaining parameters.
Must pass exactly one of the following additional arguments:
:param N: Maximal number of new multi-indices.
:type N: Integer
:param T: Maximal time (in seconds).
:type T: Positive
APRIORI:
Use apriori estimates of contributions and work provided to determine.
Must pass exactly one of the following additional arguments:
:param L: Threshold
:type L: Positive
:param T: Maximal time (in seconds).
:type T: Positive
CONTINUATION:
In an iterative manner, determine optimal multi-index-simplices by fitting contribution
and work parameters to simplex of previous iteration.
:param T: Maximal time (in seconds).
:type T: Positive
INDICES:
Use user-specified multi-index set
:param indices: Multi-index set
:type indices: iterable of MultiIndex instances
:param mode: Update mode
:type mode: One of 'indices', 'adaptive', 'apriori', 'continuation'
'''
tic_init = timer()
Passed = lambda x: x is not None
if not Passed(mode):
if Passed(indices):
mode = 'indices'
if Passed(T):
mode = 'adaptive'
if Passed(L):
mode = 'apriori'
if mode == 'indices':
if Passed(L) or Passed(T):
raise ValueError('Cannot pass L or T in indices mode')
it = [0]
elif mode == 'apriori':
if Passed(T) == Passed(L):
raise ValueError('Must pass either L or T in apriori mode')
if Passed(L):
it = [L]
else:
it = itertools.count()
elif mode == 'adaptive':
if Passed(T) == Passed(L):
raise ValueError('Must pass either L or T in adaptive mode')
if self.decomposition.bundled and not self.decomposition.returns_contributions:
raise ValueError('Cannot run adaptively when decomposition.bundled but not decomposition.returns_contributions')
if Passed(L):
it = range(L)
else:
it = itertools.count()
elif mode== 'continuation':
if Passed('L'):
raise ValueError('Cannot pass L in continuation mode')
if np.isinf(self.decomposition.n):
raise ValueError('Cannot use continuation for infinite-dimensional problems')
it = itertools.count()
n = self.decomposition.n
work_exponents = lambda: [
self._get_work_exponent(i)
if (
self.data.work_model_estimator if
self.decomposition.returns_work
else self.data.runtime_estimator
).ratios[i]
else 1
for i in range(n)
]
contribution_exponents = lambda: [
self._get_contribution_exponent(i)
if
self.data.contribution_estimator.ratios[i]
else 1
for i in range(n)
]
else:
raise ValueError('No mode selected')
max_time=0
for l in it:
tic_iter = timer()
if Passed(T) and l>0 and max_time + (timer() - tic_init) > T:
break
if self.decomposition.callback():
break
if mode == 'apriori':
mis_update = self.data.apriori_indices(l)
elif mode == 'adaptive':
mis_update = [self.data.next_best_mi()]
elif mode == 'continuation':
scale = min(work_exponents() + contribution_exponents() for dim in range(n))
mis_update = indices.simplex(L=l, weights=(work_exponents() + contribution_exponents()) / scale, n=n)
elif mode == 'indices':
mis_update = indices
self._extend(mis_update) # check whether this rbreaks when mis_udpate is a subset of current mis
max_time = max(timer() - tic_iter,max_time)
@log_calls
def _extend(self, mis):
'''
:param mis: Multi-indices to add to approximation
:type mis: Iterable of multi-indices
'''
if self.decomposition.bundled:
miss = get_bundles(mis, self.decomposition.bundled_dims)
not_bundled_dims = lambda dim: not self.decomposition.bundled_dims(dim)
key = lambda mis: mis[0].restrict(not_bundled_dims)
it = sorted(miss, key=key)
else:
it = mis
for temp in it:
work_model = None
contribution = None
object_slice = None
if self.decomposition.bundled:
mi_update = temp[0]
mis_update = temp
else:
mi_update = temp
mis_update = [temp]
if not self.decomposition.stores_approximation and not self.decomposition.Delta: # Dry run
return
if self.decomposition.bundled:
external_work_factor = self.decomposition.work_function(mis_update) # Want to be able to provide work function with whole bundle without checking what is actually new and then asking each of those separately, thats why it is required that work_function covers all bundled dimensions
else:
external_work_factor = self.decomposition.work_function(mi_update)
self.data.extend(mis_update)
if self.decomposition.stores_approximation:
argument = self.data.mis.mis # provide full set, leave it to external to reuse computations or not
else:
argument = mis_update if self.decomposition.bundled else mi_update
tic = timer()
output = self.decomposition.Delta(copy.deepcopy(argument))
runtime = timer() - tic
n_arg = sum(map(int, [not self.decomposition.stores_approximation, self.decomposition.returns_work, self.decomposition.returns_contributions])) # Possible outputs: Decomposition term, work, contribution
if n_arg == 1 and not isinstance(output, tuple): # Allow user to not return tuples if not necessary
output = [output]
if not self.decomposition.stores_approximation: # Decomposition term
object_slice, output = output[0], output[1:]
output = output[1:]
if self.decomposition.returns_work and self.decomposition.returns_contributions: # Remaining 2 outputs
work_model, contribution = output
elif self.decomposition.returns_work and not self.decomposition.returns_contributions: # Remaining 1 output
work_model = output[0]
elif not self.decomposition.returns_work and self.decomposition.returns_contributions: # Remaining 1 output
contribution = output[0]
if self.decomposition.returns_contributions: # User decides what contribution means
if not self.decomposition.bundled_dims and not Dict.valid(contribution): # Allow user to not return dictionary if not_bundled (which means user is only passed single multi-index)
contribution = {mi_update: contribution}
elif not self.decomposition.stores_approximation: # If approximation is stored here instead of by user, try to figure out contribution
try:
if self.decomposition.bundled_dims: # If approximation is grouped into bundles, norm function must be able to divide contribution into single multi-indices
contribution = {mi: object_slice.norm(mi) for mi in mis_update}
else:
contribution = {mi_update: object_slice.norm()}
except AttributeError: # User didn't implement .norm()
try:
if self.decomposition.bundled_dims: # TODO: Does it make sense to assign each term in bundle the same contribution?
contribution = {mi: np.linalg.norm(object_slice) for mi in mis_update}
else: # Only one new term in approximation
contribution = {mi_update: np.linalg.norm(object_slice)}
except AttributeError:
pass
if self.decomposition.returns_contributions:
if contribution is None:
raise ValueError("Decomposition didn't return contributions")
if set(contribution.keys())!=set(argument):
raise ValueError('Contributions did not match multi-index set')
if self.decomposition.returns_work and work_model == None:
raise ValueError("Decomposition didn't return work")
self.data.update_estimates(mis_update, mi_update, object_slice, contribution, work_model, runtime, external_work_factor)
def get_approximation(self):
if self.decomposition.stores_approximation:
raise ValueError('Decomposition is stored externally')
else:
return sum([self.data.object_slices[mi] for mi in self.data.object_slices])
def get_contribution_multiplier(self, dim):
return np.exp(-self._get_contribution_exponent(dim))
def get_runtime_multiplier(self, dim):
if self.decomposition.returns_work:
raise ValueError('Since decomposition provides abstract work model, no runtime estimates are kept. Try `get_work_multiplier` instead.')
return np.exp(self._get_runtime_exponent(dim))
def get_work_multiplier(self, dim):
return np.exp(self._get_work_exponent(dim))
def get_total_work_model(self):
return sum(self.data.work_models._dict.values())
def get_total_runtime(self):
return sum(self.data.runtimes._dict.values())
def get_indices(self):
return copy.deepcopy(self.data.mis.mis)
def _get_work_exponent(self, dim):
estimator = self.data.work_model_estimator if self.decomposition.returns_work else self.data.runtime_estimator
if not estimator.dims_ignore(dim):
return estimator.exponents[dim]
else:
raise KeyError('No work fit for this dimension')
def _get_runtime_exponent(self, dim):
if not self.data.runtime_estimator.dims_ignore(dim):
return self.data.runtime_estimator.exponents[dim]
else:
raise KeyError('No runtime fit for this dimension')
def _get_contribution_exponent(self, dim):
if not self.data.contribution_estimator.dims_ignore(dim):
return -self.data.contribution_estimator.exponents[dim]
else:
raise KeyError('No contribution fit for this dimension')
@validate_args(warnings=False)
def plot_indices(self, dims:Iterable, weights:In('contribution','work_model','runtime','contribution/work_model','contribution/runtime',False)=False, percentiles:Positive & Integer=4):
'''
:param dims: Dimensions that should be used for plotting
:type dims: List of integers, length at most 3
:param weights: Determines size of points
:type weights: 'contribution' or 'work_model' or 'runtime' or 'contribution/work_model' or 'contribution/runtime'
:param percentiles: Plot given number of weight-percentile groups in different colors
:type perentiles: Integer
'''
if NotPassed(dims):
dims = list(self.data.mis.active_dims)
if not weights:
percentiles = 1
weight_dict = None
elif weights == 'contribution':
weight_dict = {mi: self.data.contributions[mi] for mi in self.get_indices()}
elif weights == 'work_model':
if not self.decomposition.returns_work:
raise ValueError('Decomposition does not provide abstract work model')
weight_dict = {mi: self.data.work_models[mi] for mi in self.get_indices()}
elif weights == 'runtime':
weight_dict = {mi: self.data.runtimes[mi] for mi in self.get_indices()}
elif weights == 'contribution/work_model':
assert(self.decomposition.returns_work)
weight_dict = {mi:self.data.contributions[mi] / self.data.work_models[mi] for mi in self.get_indices()}
elif weights == 'contribution/runtime':
weight_dict = {mi: self.data.contributions[mi] / self.data.runtimes[mi] for mi in self.get_indices()}
else:
raise ValueError('Cannot use weights {}'.format(weights))
plots.plot_indices(mis=self.get_indices(), dims=dims, weights=weight_dict, groups=percentiles)
class _Estimator:
def __init__(self, dims_ignore, exponent_max, exponent_min, init_exponents=None,name=''):
self.quantities = {}
self.dims_ignore = dims_ignore
self.FIT_WINDOW = int(1e6)
self.ratios = DefaultDict(lambda dim: collections.deque([],self.FIT_WINDOW))
init_exponents = init_exponents or (lambda dim:0)
self.fallback_exponents = DefaultDict(init_exponents) # USED AS PRIOR IN EXPONENT ESTIMATION AND AS INITIAL GUESS OF EXPONENT WHEN NO DATA AVAILABLE AT ALL
self.exponents = DefaultDict(lambda dim: self.fallback_exponents[dim])
self.exponent_max = exponent_max
self.exponent_min = exponent_min
self.active_dims = set()
self.name=name
def set_fallback_exponent(self, dim, fallback_exponent):
self.fallback_exponents[dim] = fallback_exponent
def __contains__(self, mi):
mi = mi.mod(self.dims_ignore)
return mi in self.quantities
def __setitem__(self, mi, q):
self.active_dims.update(set(mi.active_dims()))
mi = mi.mod(self.dims_ignore)
q = float(q)
have = mi in self.quantities
self.quantities[mi] = q #two reasons to overwrite: 1) in least squares polynomials for the contribution estimate, the estimate of every single coefficient gets better and better over time 2) in general, for dimensions that are modulod out because their work contribution factor is known, this entry is theoretically the same but practically different. for example, look at MLMC, assume the work factor of the first parameter is theoretically 2. then this stores effectively the cost per sample divided by 2**l. however, this cost is not actually indpendent of the level
if not have:
get_ratio = lambda a,b: a/b if b>0 else (np.Inf if a>0 else 1)
for dim in self.active_dims:
neighbor = mi + (-1)*kronecker(dim)
if neighbor in self.quantities:
self.ratios[dim].append(get_ratio(q,self.quantities[neighbor]))
self._update_exponents()
def _update_exponents(self):
for dim in self.ratios:
ratios = np.array(self.ratios[dim])
estimate = max(min(np.median(ratios), np.exp(self.exponent_max)), np.exp(self.exponent_min))
c = len(ratios)
self.exponents[dim] = (self.fallback_exponents[dim] + c * np.log(estimate)) / (c + 1.)
def __call__(self, mi):
mi = mi.mod(self.dims_ignore)
if mi in self.quantities:
return self.quantities[mi]
else:
if mi.is_kronecker():
dim = mi.active_dims()[0]
return self.quantities[MultiIndex()]*np.exp(self.exponents[dim])
else:
estimate = 0
for dim,sign in itertools.product(self.active_dims,(-1,1)):
neighbor = mi + sign*kronecker(dim)
if neighbor in self.quantities:
estimate = max(estimate,self.quantities[neighbor])
return estimate
class _Data:
def __init__(self, decomposition):
self.WORK_EXPONENT_MAX = 10
self.WORK_EXPONENT_MIN = 0
self.CONTRIBUTION_EXPONENT_MAX = 0
self.CONTRIBUTION_EXPONENT_MIN = -10
self.decomposition = decomposition
# If decomposition is bundled, runtime_estimator only makes sense if runtime_function has all bundled dimensions. runtime_function may or not be bundled; if it is not it will be called for each index in bundled and the observed runtime will then be divided by the sum
# work_model function does not have to be bundled if decomposition is. if it is bundled, it does have to include bundled dims in its dims
# contribution function cannot be bundled
self.runtime_estimator = _Estimator(self.decomposition.work_function.dims,
exponent_max=self.WORK_EXPONENT_MAX,
exponent_min=self.WORK_EXPONENT_MIN,
name='runtime')
self.runtimes = MultiIndexDict(lambda dim: self.decomposition.bundled and self.decomposition.bundled_dims(dim))
self.contribution_estimator = _Estimator(self.decomposition.contribution_function.dims,
exponent_max=self.CONTRIBUTION_EXPONENT_MAX,
exponent_min=self.CONTRIBUTION_EXPONENT_MIN,
init_exponents=self.decomposition.kronecker_exponents,
name='contribution')
if self.decomposition.returns_work:
self.work_model_estimator = _Estimator(self.decomposition.work_function.dims,
exponent_max=self.WORK_EXPONENT_MAX,
exponent_min=self.WORK_EXPONENT_MIN,
name='work_model')
self.work_models = MultiIndexDict(decomposition.bundled_dims)
self.mis = MISet(dims=decomposition.init_dims)
self.mis.structure_constraints = set([MultiIndex()])
self.mis.structure_constraints |= set(MultiIndex(((i,1),),sparse=True) for i in self.decomposition.init_dims)
self.object_slices = MultiIndexDict(decomposition.bundled_dims)
self.contributions = dict()
def next_best_mi(self):
mis = self.mis
for mi in mis.structure_constraints.copy():
if mi in mis:
mis.structure_constraints.discard(mi)
elif mis.is_admissible(mi):
mi_update = mi
break
else:
estimates = {mi:self.profit_estimate(mi) for mi in mis.candidates}
# plots.plot_indices(mis.candidates)
# plots.plot_indices(mis.mis,colors=[[0,0,0]])
# import matplotlib.pyplot as plt
# plt.show()
mi_update = max(mis.candidates, key=lambda mi: self.profit_estimate(mi))
if self.decomposition.structure:
mis.structure_constraints |= set(self.decomposition.structure(mi_update))
return mi_update
def apriori_indices(self,L):
def admissible(mi):
return self.profit_estimate(mi) ** (-1) <= np.exp(L + 1e-12)
try:
mis = get_admissible_indices(lambda mi: admissible(mi), self.decomposition.n)
except KeyError:
raise KeyError('Did you specify the work for all dimensions?')
def extend(self,mis_update):
if math.isinf(self.decomposition.n):
self._find_new_dims(mis_update)
self.mis.update(mis_update)
def update_estimates(self, mis_update, mi_update, object_slice, contribution, work_model, runtime, external_work_factor):
self.object_slices[mi_update] = object_slice
self.runtimes[mi_update] = runtime
if self.decomposition.returns_work: # external_work_factor refers to work_model
if external_work_factor>0:
self.work_model_estimator[mi_update] = work_model / external_work_factor # Here lies reason why work factor must be bundled if decomposition is bundled: `work_model` does not distinguish different contributions to work (its for the entire mi_update-bundle), so external_work_factor must also make its predictions for entires bundles
self.work_models[mi_update] = work_model
else: # external_work_factor refers to runtime
self.runtime_estimator[mi_update] = runtime / external_work_factor
try:
for mi in contribution:
self.contribution_estimator[mi] = contribution[mi] / self.decomposition.contribution_function(mi) # Here lies reason why the reasoning above does not apply to contributions: they can always be split up among all the multi-indices in a bundle (this is implicit in the assumption that the used provided contributions are separate for each mi in mi_update)
self.contributions[mi] = contribution[mi]
except (KeyError, NameError):
pass # Contribution could not be determined, contribution was never created
def profit_estimate(self, mi):
contribution = self.contribution_estimator(mi) * self.decomposition.contribution_function(mi)
if self.decomposition.bundled:
mi_to_bundle = lambda mi: get_bundle(mi, self.mis, self.decomposition.bundled_dims) + [mi]
else:
mi_to_bundle = lambda mi: mi
if self.decomposition.returns_work:
work = self.work_model_estimator(mi) * self.decomposition.work_function(mi_to_bundle(mi))
return contribution / work
else:
runtime = self.runtime_estimator(mi) * self.decomposition.work_function(mi_to_bundle(mi))
return contribution / runtime
def _find_new_dims(self, mis_update):
for mi in mis_update:
if mi.is_kronecker() and not mi in self.mis:
dim_trigger = mi.active_dims()[0]
dims_new = self.decomposition.next_dims(dim_trigger)
for dim in dims_new:
self.mis.add_dimensions([dim])
if self.decomposition.returns_work:
self.work_model_estimator.set_fallback_exponent(dim, self.work_model_estimator.exponents[dim_trigger])
else:
self.runtime_estimator.set_fallback_exponent(dim, self.runtime_estimator.exponents[dim_trigger])
if not self.decomposition.kronecker_exponents:
self.contribution_estimator.set_fallback_exponent(dim, self.contribution_estimator.exponents[dim_trigger])
| [
"math.isinf",
"swutil.validation.Function",
"numpy.linalg.norm",
"numpy.exp",
"swutil.validation.NotPassed",
"smolyak.indices.MultiIndex",
"numpy.prod",
"collections.deque",
"smolyak.indices.get_bundle",
"swutil.collections.DefaultDict",
"smolyak.indices.get_bundles",
"smolyak.indices.MultiInd... | [((1826, 1899), 'swutil.validation.validate_args', 'validate_args', (['"""multipliers>(~func,~dims) multipliers==n"""'], {'warnings': '(False)'}), "('multipliers>(~func,~dims) multipliers==n', warnings=False)\n", (1839, 1899), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((35700, 35729), 'swutil.validation.validate_args', 'validate_args', ([], {'warnings': '(False)'}), '(warnings=False)\n', (35713, 35729), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((2222, 2241), 'swutil.validation.Passed', 'Passed', (['multipliers'], {}), '(multipliers)\n', (2228, 2241), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((12841, 12859), 'math.isinf', 'math.isinf', (['self.n'], {}), '(self.n)\n', (12851, 12859), False, 'import math\n'), ((23043, 23066), 'swutil.logs.Log', 'Log', ([], {'print_filter': '(False)'}), '(print_filter=False)\n', (23046, 23066), False, 'from swutil.logs import Log\n'), ((25767, 25774), 'timeit.default_timer', 'timer', ([], {}), '()\n', (25772, 25774), True, 'from timeit import default_timer as timer\n'), ((34811, 34843), 'copy.deepcopy', 'copy.deepcopy', (['self.data.mis.mis'], {}), '(self.data.mis.mis)\n', (34824, 34843), False, 'import copy\n'), ((36374, 36389), 'swutil.validation.NotPassed', 'NotPassed', (['dims'], {}), '(dims)\n', (36383, 36389), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((38000, 38027), 'swutil.collections.DefaultDict', 'DefaultDict', (['init_exponents'], {}), '(init_exponents)\n', (38011, 38027), False, 'from swutil.collections import DefaultDict\n'), ((38156, 38209), 'swutil.collections.DefaultDict', 'DefaultDict', (['(lambda dim: self.fallback_exponents[dim])'], {}), '(lambda dim: self.fallback_exponents[dim])\n', (38167, 38209), False, 'from swutil.collections import DefaultDict\n'), ((42747, 42782), 'smolyak.indices.MISet', 'MISet', ([], {'dims': 'decomposition.init_dims'}), '(dims=decomposition.init_dims)\n', (42752, 42782), False, 'from smolyak.indices import MultiIndexDict, get_admissible_indices, MISet, kronecker, MultiIndex, get_bundles, get_bundle\n'), ((42991, 43033), 'smolyak.indices.MultiIndexDict', 'MultiIndexDict', (['decomposition.bundled_dims'], {}), '(decomposition.bundled_dims)\n', (43005, 43033), False, 'from smolyak.indices import MultiIndexDict, get_admissible_indices, MISet, kronecker, MultiIndex, get_bundles, get_bundle\n'), ((44252, 44284), 'math.isinf', 'math.isinf', (['self.decomposition.n'], {}), '(self.decomposition.n)\n', (44262, 44284), False, 'import math\n'), ((1983, 2016), 'swutil.validation.Dict', 'Dict', ([], {'value_spec': '(Positive & Float)'}), '(value_spec=Positive & Float)\n', (1987, 2016), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((2079, 2104), 'swutil.validation.Function', 'Function', ([], {'value_spec': 'Bool'}), '(value_spec=Bool)\n', (2087, 2104), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((2639, 2652), 'math.isinf', 'math.isinf', (['n'], {}), '(n)\n', (2649, 2652), False, 'import math\n'), ((3749, 3771), 'swutil.validation.Instance', 'Instance', (['WorkFunction'], {}), '(WorkFunction)\n', (3757, 3771), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((3919, 3949), 'swutil.validation.Instance', 'Instance', (['ContributionFunction'], {}), '(ContributionFunction)\n', (3927, 3949), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((4051, 4090), 'swutil.validation.List', 'List', (['(Nonnegative & Integer)'], {'lenience': '(2)'}), '(Nonnegative & Integer, lenience=2)\n', (4055, 4090), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((4299, 4339), 'swutil.validation.Function', 'Function', ([], {'value_spec': '(Nonnegative & Float)'}), '(value_spec=Nonnegative & Float)\n', (4307, 4339), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((3463, 3497), 'swutil.validation.Arg', 'Arg', (['"""init_dims|next_dims"""', 'Passed'], {}), "('init_dims|next_dims', Passed)\n", (3466, 3497), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((16142, 16164), 'swutil.validation.Instance', 'Instance', (['WorkFunction'], {}), '(WorkFunction)\n', (16150, 16164), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((16312, 16342), 'swutil.validation.Instance', 'Instance', (['ContributionFunction'], {}), '(ContributionFunction)\n', (16320, 16342), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((16444, 16483), 'swutil.validation.List', 'List', (['(Nonnegative & Integer)'], {'lenience': '(2)'}), '(Nonnegative & Integer, lenience=2)\n', (16448, 16483), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((16692, 16732), 'swutil.validation.Function', 'Function', ([], {'value_spec': '(Nonnegative & Float)'}), '(value_spec=Nonnegative & Float)\n', (16700, 16732), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((15856, 15890), 'swutil.validation.Arg', 'Arg', (['"""init_dims|next_dims"""', 'Passed'], {}), "('init_dims|next_dims', Passed)\n", (15859, 15890), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((25831, 25843), 'swutil.validation.Passed', 'Passed', (['mode'], {}), '(mode)\n', (25837, 25843), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((25860, 25875), 'swutil.validation.Passed', 'Passed', (['indices'], {}), '(indices)\n', (25866, 25875), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((25926, 25935), 'swutil.validation.Passed', 'Passed', (['T'], {}), '(T)\n', (25932, 25935), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((25986, 25995), 'swutil.validation.Passed', 'Passed', (['L'], {}), '(L)\n', (25992, 25995), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((28032, 28039), 'timeit.default_timer', 'timer', ([], {}), '()\n', (28037, 28039), True, 'from timeit import default_timer as timer\n'), ((29121, 29170), 'smolyak.indices.get_bundles', 'get_bundles', (['mis', 'self.decomposition.bundled_dims'], {}), '(mis, self.decomposition.bundled_dims)\n', (29132, 29170), False, 'from smolyak.indices import MultiIndexDict, get_admissible_indices, MISet, kronecker, MultiIndex, get_bundles, get_bundle\n'), ((30615, 30622), 'timeit.default_timer', 'timer', ([], {}), '()\n', (30620, 30622), True, 'from timeit import default_timer as timer\n'), ((35780, 35885), 'swutil.validation.In', 'In', (['"""contribution"""', '"""work_model"""', '"""runtime"""', '"""contribution/work_model"""', '"""contribution/runtime"""', '(False)'], {}), "('contribution', 'work_model', 'runtime', 'contribution/work_model',\n 'contribution/runtime', False)\n", (35782, 35885), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((39819, 39845), 'numpy.array', 'np.array', (['self.ratios[dim]'], {}), '(self.ratios[dim])\n', (39827, 39845), True, 'import numpy as np\n'), ((42685, 42727), 'smolyak.indices.MultiIndexDict', 'MultiIndexDict', (['decomposition.bundled_dims'], {}), '(decomposition.bundled_dims)\n', (42699, 42727), False, 'from smolyak.indices import MultiIndexDict, get_admissible_indices, MISet, kronecker, MultiIndex, get_bundles, get_bundle\n'), ((2316, 2389), 'numpy.prod', 'np.prod', (['[(self.multipliers[dim] ** mi[dim]) for dim in self.multipliers]'], {}), '([(self.multipliers[dim] ** mi[dim]) for dim in self.multipliers])\n', (2323, 2389), True, 'import numpy as np\n'), ((4200, 4225), 'swutil.validation.Function', 'Function', ([], {'value_spec': 'Bool'}), '(value_spec=Bool)\n', (4208, 4225), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((4230, 4254), 'swutil.validation.List', 'List', ([], {'value_spec': 'Integer'}), '(value_spec=Integer)\n', (4234, 4254), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((10251, 10260), 'swutil.validation.Passed', 'Passed', (['n'], {}), '(n)\n', (10257, 10260), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((3509, 3525), 'swutil.validation.Equals', 'Equals', (['math.inf'], {}), '(math.inf)\n', (3515, 3525), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((16593, 16618), 'swutil.validation.Function', 'Function', ([], {'value_spec': 'Bool'}), '(value_spec=Bool)\n', (16601, 16618), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((16623, 16647), 'swutil.validation.List', 'List', ([], {'value_spec': 'Integer'}), '(value_spec=Integer)\n', (16627, 16647), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((15902, 15918), 'swutil.validation.Equals', 'Equals', (['math.inf'], {}), '(math.inf)\n', (15908, 15918), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((26075, 26084), 'swutil.validation.Passed', 'Passed', (['L'], {}), '(L)\n', (26081, 26084), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((26088, 26097), 'swutil.validation.Passed', 'Passed', (['T'], {}), '(T)\n', (26094, 26097), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((26353, 26362), 'swutil.validation.Passed', 'Passed', (['L'], {}), '(L)\n', (26359, 26362), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((28055, 28064), 'swutil.validation.Passed', 'Passed', (['T'], {}), '(T)\n', (28061, 28064), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((30669, 30692), 'copy.deepcopy', 'copy.deepcopy', (['argument'], {}), '(argument)\n', (30682, 30692), False, 'import copy\n'), ((30716, 30723), 'timeit.default_timer', 'timer', ([], {}), '()\n', (30721, 30723), True, 'from timeit import default_timer as timer\n'), ((37869, 37907), 'collections.deque', 'collections.deque', (['[]', 'self.FIT_WINDOW'], {}), '([], self.FIT_WINDOW)\n', (37886, 37907), False, 'import collections\n'), ((39924, 39949), 'numpy.exp', 'np.exp', (['self.exponent_min'], {}), '(self.exponent_min)\n', (39930, 39949), True, 'import numpy as np\n'), ((40472, 40516), 'itertools.product', 'itertools.product', (['self.active_dims', '(-1, 1)'], {}), '(self.active_dims, (-1, 1))\n', (40489, 40516), False, 'import itertools\n'), ((42829, 42841), 'smolyak.indices.MultiIndex', 'MultiIndex', ([], {}), '()\n', (42839, 42841), False, 'from smolyak.indices import MultiIndexDict, get_admissible_indices, MISet, kronecker, MultiIndex, get_bundles, get_bundle\n'), ((42890, 42924), 'smolyak.indices.MultiIndex', 'MultiIndex', (['((i, 1),)'], {'sparse': '(True)'}), '(((i, 1),), sparse=True)\n', (42900, 42924), False, 'from smolyak.indices import MultiIndexDict, get_admissible_indices, MISet, kronecker, MultiIndex, get_bundles, get_bundle\n'), ((43982, 43999), 'numpy.exp', 'np.exp', (['(L + 1e-12)'], {}), '(L + 1e-12)\n', (43988, 43999), True, 'import numpy as np\n'), ((3680, 3705), 'swutil.validation.InInterval', 'InInterval', ([], {'l': '(1)', 'lo': '(False)'}), '(l=1, lo=False)\n', (3690, 3705), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((3825, 3863), 'swutil.validation.InInterval', 'InInterval', ([], {'l': '(0)', 'lo': '(True)', 'r': '(1)', 'ro': '(True)'}), '(l=0, lo=True, r=1, ro=True)\n', (3835, 3863), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((16073, 16098), 'swutil.validation.InInterval', 'InInterval', ([], {'l': '(1)', 'lo': '(False)'}), '(l=1, lo=False)\n', (16083, 16098), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((16218, 16256), 'swutil.validation.InInterval', 'InInterval', ([], {'l': '(0)', 'lo': '(True)', 'r': '(1)', 'ro': '(True)'}), '(l=0, lo=True, r=1, ro=True)\n', (16228, 16256), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((26238, 26247), 'swutil.validation.Passed', 'Passed', (['T'], {}), '(T)\n', (26244, 26247), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((26251, 26260), 'swutil.validation.Passed', 'Passed', (['L'], {}), '(L)\n', (26257, 26260), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((26428, 26445), 'itertools.count', 'itertools.count', ([], {}), '()\n', (26443, 26445), False, 'import itertools\n'), ((26831, 26840), 'swutil.validation.Passed', 'Passed', (['L'], {}), '(L)\n', (26837, 26840), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((28851, 28858), 'timeit.default_timer', 'timer', ([], {}), '()\n', (28856, 28858), True, 'from timeit import default_timer as timer\n'), ((39877, 39894), 'numpy.median', 'np.median', (['ratios'], {}), '(ratios)\n', (39886, 39894), True, 'import numpy as np\n'), ((39896, 39921), 'numpy.exp', 'np.exp', (['self.exponent_max'], {}), '(self.exponent_max)\n', (39902, 39921), True, 'import numpy as np\n'), ((40365, 40392), 'numpy.exp', 'np.exp', (['self.exponents[dim]'], {}), '(self.exponents[dim])\n', (40371, 40392), True, 'import numpy as np\n'), ((46101, 46158), 'smolyak.indices.get_bundle', 'get_bundle', (['mi', 'self.mis', 'self.decomposition.bundled_dims'], {}), '(mi, self.mis, self.decomposition.bundled_dims)\n', (46111, 46158), False, 'from smolyak.indices import MultiIndexDict, get_admissible_indices, MISet, kronecker, MultiIndex, get_bundles, get_bundle\n'), ((26494, 26503), 'swutil.validation.Passed', 'Passed', (['T'], {}), '(T)\n', (26500, 26503), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((26507, 26516), 'swutil.validation.Passed', 'Passed', (['L'], {}), '(L)\n', (26513, 26516), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((26911, 26928), 'itertools.count', 'itertools.count', ([], {}), '()\n', (26926, 26928), False, 'import itertools\n'), ((26980, 26991), 'swutil.validation.Passed', 'Passed', (['"""L"""'], {}), "('L')\n", (26986, 26991), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((27079, 27109), 'numpy.isinf', 'np.isinf', (['self.decomposition.n'], {}), '(self.decomposition.n)\n', (27087, 27109), True, 'import numpy as np\n'), ((27222, 27239), 'itertools.count', 'itertools.count', ([], {}), '()\n', (27237, 27239), False, 'import itertools\n'), ((31918, 31942), 'swutil.validation.Dict.valid', 'Dict.valid', (['contribution'], {}), '(contribution)\n', (31928, 31942), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((39539, 39553), 'smolyak.indices.kronecker', 'kronecker', (['dim'], {}), '(dim)\n', (39548, 39553), False, 'from smolyak.indices import MultiIndexDict, get_admissible_indices, MISet, kronecker, MultiIndex, get_bundles, get_bundle\n'), ((40049, 40065), 'numpy.log', 'np.log', (['estimate'], {}), '(estimate)\n', (40055, 40065), True, 'import numpy as np\n'), ((40351, 40363), 'smolyak.indices.MultiIndex', 'MultiIndex', ([], {}), '()\n', (40361, 40363), False, 'from smolyak.indices import MultiIndexDict, get_admissible_indices, MISet, kronecker, MultiIndex, get_bundles, get_bundle\n'), ((10594, 10615), 'swutil.validation.Passed', 'Passed', (['work_function'], {}), '(work_function)\n', (10600, 10615), False, 'from swutil.validation import NotPassed, Positive, Integer, Float, validate_args, Nonnegative, Instance, DefaultGenerator, Function, Iterable, Bool, Dict, List, Equals, InInterval, Arg, Passed, In\n'), ((28089, 28096), 'timeit.default_timer', 'timer', ([], {}), '()\n', (28094, 28096), True, 'from timeit import default_timer as timer\n'), ((40557, 40571), 'smolyak.indices.kronecker', 'kronecker', (['dim'], {}), '(dim)\n', (40566, 40571), False, 'from smolyak.indices import MultiIndexDict, get_admissible_indices, MISet, kronecker, MultiIndex, get_bundles, get_bundle\n'), ((11863, 11875), 'smolyak.indices.MultiIndex', 'MultiIndex', ([], {}), '()\n', (11873, 11875), False, 'from smolyak.indices import MultiIndexDict, get_admissible_indices, MISet, kronecker, MultiIndex, get_bundles, get_bundle\n'), ((32931, 32959), 'numpy.linalg.norm', 'np.linalg.norm', (['object_slice'], {}), '(object_slice)\n', (32945, 32959), True, 'import numpy as np\n'), ((33105, 33133), 'numpy.linalg.norm', 'np.linalg.norm', (['object_slice'], {}), '(object_slice)\n', (33119, 33133), True, 'import numpy as np\n'), ((12088, 12104), 'smolyak.indices.MultiIndex', 'MultiIndex', (['perm'], {}), '(perm)\n', (12098, 12104), False, 'from smolyak.indices import MultiIndexDict, get_admissible_indices, MISet, kronecker, MultiIndex, get_bundles, get_bundle\n')] |
import numpy, sys, os, shutil
templates = [(1, '%s*', 'q1'), (2, '%s %s*', 'q2'), (3, '%s %s* %s*', 'q3'),
(2, '(%s | %s)*' , 'q4_2'), (3, '(%s | %s | %s)*', 'q4_3'), (4, '(%s | %s | %s | %s)*', 'q4_4'), (5, '(%s | %s | %s | %s | %s)*', 'q4_5'),
(3, '%s %s* %s', 'q5'), (2, '%s* %s*', 'q6'), (3, '%s %s %s*', 'q7'), (2, '%s? %s*', 'q8'),
(2, '(%s | %s)+', 'q9_2'), (3, '(%s | %s | %s)+', 'q9_3'), (4, '(%s | %s | %s | %s)+', 'q9_4'), (5, '(%s | %s | %s | %s | %s)+', 'q9_5'),
(3, '(%s | %s) %s*', 'q10_2'), (4, '(%s | %s | %s) %s*', 'q10_3'), (5, '(%s | %s | %s | %s) %s*', 'q10_4'), (6, '(%s | %s | %s | %s | %s) %s*', 'q10_5'),
(2, '%s %s', 'q11_2'), (3, '%s %s %s', 'q11_3'), (4, '%s %s %s %s', 'q11_4'), (5, '%s %s %s %s %s', 'q11_5')]
def gen (tpl, n, lst, k):
res = set()
while (len(res) < n):
perm = numpy.random.permutation(lst)
res.add(((tpl % tuple(perm[0:k])),tuple(perm[0:k])))
return res
def gen_from_config(config, num_of_lalbels, num_of_queries):
lbls = [ l.split(' ')[1].rstrip() for l in open(config,'r').readlines()]
return [(tpl[2], gen (tpl[1], num_of_queries, lbls[0:num_of_lalbels], tpl[0])) for tpl in templates]
def print_qs (qs, root_dir):
for qd in qs:
path = os.path.join(root_dir, qd[0])
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
i = 0
for q in qd[1]:
with open(os.path.join(path,str(i)),'w') as out:
out.write('S\n')
out.write(' '.join(q[1]) + '\n')
out.write('S -> ' + q[0] + '\n')
i = i + 1
r = gen_from_config(sys.argv[1],int(sys.argv[2]),int(sys.argv[3]))
print_qs(r, sys.argv[4])
for s in r:
for q in s:
print(q) | [
"os.mkdir",
"os.path.exists",
"numpy.random.permutation",
"shutil.rmtree",
"os.path.join"
] | [((892, 921), 'numpy.random.permutation', 'numpy.random.permutation', (['lst'], {}), '(lst)\n', (916, 921), False, 'import numpy, sys, os, shutil\n'), ((1305, 1334), 'os.path.join', 'os.path.join', (['root_dir', 'qd[0]'], {}), '(root_dir, qd[0])\n', (1317, 1334), False, 'import numpy, sys, os, shutil\n'), ((1346, 1366), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1360, 1366), False, 'import numpy, sys, os, shutil\n'), ((1407, 1421), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (1415, 1421), False, 'import numpy, sys, os, shutil\n'), ((1379, 1398), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (1392, 1398), False, 'import numpy, sys, os, shutil\n')] |
import os
import numpy as np
import argparse
import torch
import time
import librosa
import pickle
import preprocess
from trainingDataset import trainingDataset
from model_GLU import Generator, Discriminator
def loadPickleFile(fileName):
with open(fileName, 'rb') as f:
return pickle.load(f)
parser = argparse.ArgumentParser(
description="Train CycleGAN using source dataset and target dataset")
logf0s_normalization_default = '../cache/logf0s_normalization.npz'
mcep_normalization_default = '../cache/mcep_normalization.npz'
coded_sps_A_norm = '../cache/coded_sps_A_norm.pickle'
coded_sps_B_norm = '../cache/coded_sps_B_norm.pickle'
model_checkpoint = '../cache/model_checkpoint/'
resume_training_at = '../cache/model_checkpoint/_CycleGAN_CheckPoint'
resume_training_at = None
validation_A_dir_default = '../data/vcc2016_training/evaluation_all/SF1/'
output_A_dir_default = '../data/vcc2016_training/converted_sound/SF1'
validation_B_dir_default = '../data/vcc2016_training/evaluation_all/TF2/'
output_B_dir_default = '../data/vcc2016_training/converted_sound/TF2/'
parser.add_argument('--logf0s_normalization', type=str,
help="Cached location for log f0s normalized", default=logf0s_normalization_default)
parser.add_argument('--mcep_normalization', type=str,
help="Cached location for mcep normalization", default=mcep_normalization_default)
parser.add_argument('--coded_sps_A_norm', type=str,
help="mcep norm for data A", default=coded_sps_A_norm)
parser.add_argument('--coded_sps_B_norm', type=str,
help="mcep norm for data B", default=coded_sps_B_norm)
parser.add_argument('--model_checkpoint', type=str,
help="location where you want to save the model", default=model_checkpoint)
parser.add_argument('--resume_training_at', type=str,
help="Location of the pre-trained model to resume training",
default=resume_training_at)
parser.add_argument('--validation_A_dir', type=str,
help="validation set for sound source A", default=validation_A_dir_default)
parser.add_argument('--output_A_dir', type=str,
help="output for converted Sound Source A", default=output_A_dir_default)
parser.add_argument('--validation_B_dir', type=str,
help="Validation set for sound source B", default=validation_B_dir_default)
parser.add_argument('--output_B_dir', type=str,
help="Output for converted sound Source B", default=output_B_dir_default)
argv = parser.parse_args()
logf0s_normalization = argv.logf0s_normalization
mcep_normalization = argv.mcep_normalization
coded_sps_A_norm = argv.coded_sps_A_norm
coded_sps_B_norm = argv.coded_sps_B_norm
model_checkpoint = argv.model_checkpoint
resume_training_at = argv.resume_training_at
validation_A_dir = argv.validation_A_dir
output_A_dir = argv.output_A_dir
validation_B_dir = argv.validation_B_dir
output_B_dir = argv.output_B_dir
restart_training_at=resume_training_at
start_epoch = 0
num_epochs = 5000
mini_batch_size = 1
dataset_A = loadPickleFile(coded_sps_A_norm)
dataset_B = loadPickleFile(coded_sps_B_norm)
device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
# Speech Parameters
logf0s_normalization = np.load(logf0s_normalization)
log_f0s_mean_A = logf0s_normalization['mean_A']
log_f0s_std_A = logf0s_normalization['std_A']
log_f0s_mean_B = logf0s_normalization['mean_B']
log_f0s_std_B = logf0s_normalization['std_B']
mcep_normalization = np.load(mcep_normalization)
coded_sps_A_mean = mcep_normalization['mean_A']
coded_sps_A_std = mcep_normalization['std_A']
coded_sps_B_mean = mcep_normalization['mean_B']
coded_sps_B_std = mcep_normalization['std_B']
# Generator and Discriminator
generator_A2B = Generator().to(device)
generator_B2A = Generator().to(device)
discriminator_A = Discriminator().to(device)
discriminator_B = Discriminator().to(device)
# Loss Functions
criterion_mse = torch.nn.MSELoss()
# Optimizer
g_params = list(generator_A2B.parameters()) + \
list(generator_B2A.parameters())
g_params1 = list(generator_A2B.parameters()) + list(generator_B2A.parameters())
d_params = list(discriminator_A.parameters()) + list(discriminator_B.parameters())
# Initial learning rates
generator_lr = 0.0002
discriminator_lr = 0.0001
# Learning rate decay
generator_lr_decay = generator_lr / 200000
discriminator_lr_decay = discriminator_lr / 200000
# Starts learning rate decay from after this many iterations have passed
start_decay = 200000
generator_optimizer = torch.optim.Adam(
g_params, lr=generator_lr, betas=(0.5, 0.999))
discriminator_optimizer = torch.optim.Adam(
d_params, lr=discriminator_lr, betas=(0.5, 0.999))
# To Load save previously saved models
modelCheckpoint = model_checkpoint
# Validation set Parameters
validation_A_dir = validation_A_dir
output_A_dir = output_A_dir
validation_B_dir = validation_B_dir
output_B_dir = output_B_dir
# Storing Discriminatior and Generator Loss
generator_loss_store = []
discriminator_loss_store = []
file_name = 'log_store_non_sigmoid.txt'
if restart_training_at is not None:
# Training will resume from previous checkpoint
start_epoch = loadModel(restart_training_at)
print("Training resumed")
###################################################################
# finish initialization and start training
###################################################################
# 1 loop of training
start_time_epoch = time.time()
# Constants
cycle_loss_lambda = 10
identity_loss_lambda = 5
# Preparing Dataset
n_samples = len(dataset_A)
dataset = trainingDataset(datasetA=dataset_A,
datasetB=dataset_B,
n_frames=128)
train_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=mini_batch_size,
shuffle=True,
drop_last=False)
for i, (real_A, real_B) in enumerate(train_loader):
print (i,len(real_A),len(real_B),real_B)
| [
"torch.nn.MSELoss",
"numpy.load",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"model_GLU.Discriminator",
"trainingDataset.trainingDataset",
"time.time",
"model_GLU.Generator",
"torch.optim.Adam",
"pickle.load",
"torch.cuda.is_available"
] | [((320, 418), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train CycleGAN using source dataset and target dataset"""'}), "(description=\n 'Train CycleGAN using source dataset and target dataset')\n", (343, 418), False, 'import argparse\n'), ((3183, 3212), 'numpy.load', 'np.load', (['logf0s_normalization'], {}), '(logf0s_normalization)\n', (3190, 3212), True, 'import numpy as np\n'), ((3423, 3450), 'numpy.load', 'np.load', (['mcep_normalization'], {}), '(mcep_normalization)\n', (3430, 3450), True, 'import numpy as np\n'), ((3877, 3895), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (3893, 3895), False, 'import torch\n'), ((4475, 4538), 'torch.optim.Adam', 'torch.optim.Adam', (['g_params'], {'lr': 'generator_lr', 'betas': '(0.5, 0.999)'}), '(g_params, lr=generator_lr, betas=(0.5, 0.999))\n', (4491, 4538), False, 'import torch\n'), ((4570, 4637), 'torch.optim.Adam', 'torch.optim.Adam', (['d_params'], {'lr': 'discriminator_lr', 'betas': '(0.5, 0.999)'}), '(d_params, lr=discriminator_lr, betas=(0.5, 0.999))\n', (4586, 4637), False, 'import torch\n'), ((5483, 5494), 'time.time', 'time.time', ([], {}), '()\n', (5492, 5494), False, 'import time\n'), ((5615, 5684), 'trainingDataset.trainingDataset', 'trainingDataset', ([], {'datasetA': 'dataset_A', 'datasetB': 'dataset_B', 'n_frames': '(128)'}), '(datasetA=dataset_A, datasetB=dataset_B, n_frames=128)\n', (5630, 5684), False, 'from trainingDataset import trainingDataset\n'), ((5736, 5843), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_size': 'mini_batch_size', 'shuffle': '(True)', 'drop_last': '(False)'}), '(dataset=dataset, batch_size=mini_batch_size,\n shuffle=True, drop_last=False)\n', (5763, 5843), False, 'import torch\n'), ((293, 307), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (304, 307), False, 'import pickle\n'), ((3101, 3126), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3124, 3126), False, 'import torch\n'), ((3691, 3702), 'model_GLU.Generator', 'Generator', ([], {}), '()\n', (3700, 3702), False, 'from model_GLU import Generator, Discriminator\n'), ((3730, 3741), 'model_GLU.Generator', 'Generator', ([], {}), '()\n', (3739, 3741), False, 'from model_GLU import Generator, Discriminator\n'), ((3771, 3786), 'model_GLU.Discriminator', 'Discriminator', ([], {}), '()\n', (3784, 3786), False, 'from model_GLU import Generator, Discriminator\n'), ((3816, 3831), 'model_GLU.Discriminator', 'Discriminator', ([], {}), '()\n', (3829, 3831), False, 'from model_GLU import Generator, Discriminator\n')] |
from cv2 import cv2
import numpy as np
import sys
image_name = input("Enter the name of the input image: ")
# reading the image
img = cv2.imread(image_name)
while img is None:
image_name = input("Enter the name of the input image or Enter 'exit' to end program : ")
# if end the program
if image_name == "exit":
sys.exit()
# reading the image
img = cv2.imread(image_name)
# resize the image to fit it to show in the screen
img = cv2.resize(img, (0, 0), None, .75, .75)
# converting the image to grayscale
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# inverting the image
img_invert = cv2.bitwise_not(img_gray)
# bluring or smoothing the inverted image with the kernel size (25,25)
img_blur = cv2.blur(img_invert, (25, 25))
# Applying another type of blur for cheaking
img_blur_2 = cv2.GaussianBlur(img_invert, (23, 23),0)
# The Dodge blend function divides the bottom layer by the inverted top layer.
# This lightens the bottom layer depending on the value of the top layer.
# We have the blurred image, which highlights the boldest edges.
def dodgeV2(image, mask):
# inverting color with 255 - ...
return cv2.divide(image, 255 - mask, scale=256)
final_img = dodgeV2(img_gray, img_blur)
final_img_2 = dodgeV2(img_gray, img_blur_2)
# final_img_2 = cv2.blur(final_img_2, (3, 3),0)
final_img_2 = cv2.bilateralFilter(final_img_2,9,50,50)
# convert to bgr for showing the input and output in the same window
# this will convert the output from 2 channel to 3 channel
final_img = cv2.cvtColor(final_img, cv2.COLOR_GRAY2BGR)
# concatenate both the input and output
numpy_vertical_concat = np.concatenate((img, final_img), axis=1)
# cv2.imshow('image', final_img)
# cv2.imshow('image_2', final_img_2)
# displaying the sketch image
cv2.imshow('image', numpy_vertical_concat)
print("Press 'Esc' button to exit or Press 's' to save the image and exit.")
k = cv2.waitKey(0)
# if escape is pressed
if k == 27:
cv2.destroyAllWindows()
# save the output image
elif k == ord('s'):
cv2.imwrite('output.png', final_img)
cv2.imwrite('combined.png',numpy_vertical_concat)
cv2.destroyAllWindows()
| [
"cv2.cv2.destroyAllWindows",
"cv2.cv2.blur",
"cv2.cv2.waitKey",
"cv2.cv2.bilateralFilter",
"sys.exit",
"cv2.cv2.resize",
"cv2.cv2.divide",
"cv2.cv2.imwrite",
"cv2.cv2.GaussianBlur",
"cv2.cv2.imread",
"cv2.cv2.bitwise_not",
"cv2.cv2.cvtColor",
"numpy.concatenate",
"cv2.cv2.imshow"
] | [((143, 165), 'cv2.cv2.imread', 'cv2.imread', (['image_name'], {}), '(image_name)\n', (153, 165), False, 'from cv2 import cv2\n'), ((486, 527), 'cv2.cv2.resize', 'cv2.resize', (['img', '(0, 0)', 'None', '(0.75)', '(0.75)'], {}), '(img, (0, 0), None, 0.75, 0.75)\n', (496, 527), False, 'from cv2 import cv2\n'), ((577, 614), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (589, 614), False, 'from cv2 import cv2\n'), ((652, 677), 'cv2.cv2.bitwise_not', 'cv2.bitwise_not', (['img_gray'], {}), '(img_gray)\n', (667, 677), False, 'from cv2 import cv2\n'), ((762, 792), 'cv2.cv2.blur', 'cv2.blur', (['img_invert', '(25, 25)'], {}), '(img_invert, (25, 25))\n', (770, 792), False, 'from cv2 import cv2\n'), ((855, 896), 'cv2.cv2.GaussianBlur', 'cv2.GaussianBlur', (['img_invert', '(23, 23)', '(0)'], {}), '(img_invert, (23, 23), 0)\n', (871, 896), False, 'from cv2 import cv2\n'), ((1395, 1438), 'cv2.cv2.bilateralFilter', 'cv2.bilateralFilter', (['final_img_2', '(9)', '(50)', '(50)'], {}), '(final_img_2, 9, 50, 50)\n', (1414, 1438), False, 'from cv2 import cv2\n'), ((1581, 1624), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['final_img', 'cv2.COLOR_GRAY2BGR'], {}), '(final_img, cv2.COLOR_GRAY2BGR)\n', (1593, 1624), False, 'from cv2 import cv2\n'), ((1692, 1732), 'numpy.concatenate', 'np.concatenate', (['(img, final_img)'], {'axis': '(1)'}), '((img, final_img), axis=1)\n', (1706, 1732), True, 'import numpy as np\n'), ((1841, 1883), 'cv2.cv2.imshow', 'cv2.imshow', (['"""image"""', 'numpy_vertical_concat'], {}), "('image', numpy_vertical_concat)\n", (1851, 1883), False, 'from cv2 import cv2\n'), ((1969, 1983), 'cv2.cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1980, 1983), False, 'from cv2 import cv2\n'), ((398, 420), 'cv2.cv2.imread', 'cv2.imread', (['image_name'], {}), '(image_name)\n', (408, 420), False, 'from cv2 import cv2\n'), ((1198, 1238), 'cv2.cv2.divide', 'cv2.divide', (['image', '(255 - mask)'], {'scale': '(256)'}), '(image, 255 - mask, scale=256)\n', (1208, 1238), False, 'from cv2 import cv2\n'), ((2026, 2049), 'cv2.cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2047, 2049), False, 'from cv2 import cv2\n'), ((349, 359), 'sys.exit', 'sys.exit', ([], {}), '()\n', (357, 359), False, 'import sys\n'), ((2101, 2137), 'cv2.cv2.imwrite', 'cv2.imwrite', (['"""output.png"""', 'final_img'], {}), "('output.png', final_img)\n", (2112, 2137), False, 'from cv2 import cv2\n'), ((2143, 2193), 'cv2.cv2.imwrite', 'cv2.imwrite', (['"""combined.png"""', 'numpy_vertical_concat'], {}), "('combined.png', numpy_vertical_concat)\n", (2154, 2193), False, 'from cv2 import cv2\n'), ((2198, 2221), 'cv2.cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2219, 2221), False, 'from cv2 import cv2\n')] |
# encoding: utf-8
import operator
import numpy as np
import PIL
from histolab.filters import image_filters as imf
from ...unitutil import PILIMG, NpArrayMock, function_mock
class DescribeImageFilters:
def it_calls_invert_filter_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_invert = function_mock(
request, "histolab.filters.image_filters_functional.invert"
)
F_invert.return_value = image
invert = imf.Invert()
invert(image)
F_invert.assert_called_once_with(image)
assert type(invert(image)) == PIL.Image.Image
def it_calls_pil_grayscale(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
grayscale_filter = function_mock(request, "PIL.ImageOps.grayscale")
grayscale_filter.return_value = image
grayscale = imf.RgbToGrayscale()
grayscale(image)
grayscale_filter.assert_called_once_with(image)
assert type(grayscale(image)) == PIL.Image.Image
def it_calls_rgb_to_hed_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_rgb_to_hed = function_mock(
request, "histolab.filters.image_filters_functional.rgb_to_hed"
)
F_rgb_to_hed.return_value = image
rgb_to_hed = imf.RgbToHed()
rgb_to_hed(image)
F_rgb_to_hed.assert_called_once_with(image)
assert type(rgb_to_hed(image)) == PIL.Image.Image
def it_calls_hematoxylin_channel_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_hematoxylin_channel = function_mock(
request, "histolab.filters.image_filters_functional.hematoxylin_channel"
)
F_hematoxylin_channel.return_value = image
hematoxylin_channel = imf.HematoxylinChannel()
hematoxylin_channel(image)
F_hematoxylin_channel.assert_called_once_with(image)
assert type(hematoxylin_channel(image)) == PIL.Image.Image
def it_calls_eosin_channel_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_eosin_channel = function_mock(
request, "histolab.filters.image_filters_functional.eosin_channel"
)
F_eosin_channel.return_value = image
eosin_channel = imf.EosinChannel()
eosin_channel(image)
F_eosin_channel.assert_called_once_with(image)
assert type(eosin_channel(image)) == PIL.Image.Image
def it_calls_rgb_to_hsv_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_rgb_to_hsv = function_mock(
request, "histolab.filters.image_filters_functional.rgb_to_hsv"
)
F_rgb_to_hsv.return_value = image
rgb_to_hsv = imf.RgbToHsv()
rgb_to_hsv(image)
F_rgb_to_hsv.assert_called_once_with(image)
assert type(rgb_to_hsv(image)) == PIL.Image.Image
def it_calls_stretch_contrast_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_stretch_contrast = function_mock(
request, "histolab.filters.image_filters_functional.stretch_contrast"
)
F_stretch_contrast.return_value = image
stretch_contrast = imf.StretchContrast(0, 100)
stretch_contrast(image)
F_stretch_contrast.assert_called_once_with(image, 0, 100)
assert type(stretch_contrast(image)) == PIL.Image.Image
def it_calls_histogram_equalization_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_histogram_equalization = function_mock(
request, "histolab.filters.image_filters_functional.histogram_equalization"
)
F_histogram_equalization.return_value = image
histogram_equalization = imf.HistogramEqualization(200)
histogram_equalization(image)
F_histogram_equalization.assert_called_once_with(image, 200)
assert type(histogram_equalization(image)) == PIL.Image.Image
def it_calls_adaptive_equalization_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_adaptive_equalization = function_mock(
request, "histolab.filters.image_filters_functional.adaptive_equalization"
)
F_adaptive_equalization.return_value = image
adaptive_equalization = imf.AdaptiveEqualization(250, 0.2)
adaptive_equalization(image)
F_adaptive_equalization.assert_called_once_with(image, 250, 0.2)
assert type(adaptive_equalization(image)) == PIL.Image.Image
def it_calls_local_equalization_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_local_equalization = function_mock(
request, "histolab.filters.image_filters_functional.local_equalization"
)
F_local_equalization.return_value = image
local_equalization = imf.LocalEqualization(5)
local_equalization(image)
F_local_equalization.assert_called_once_with(image, 5)
assert type(local_equalization(image)) == PIL.Image.Image
def it_calls_kmeans_segmentation_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_kmeans_segmentation = function_mock(
request, "histolab.filters.image_filters_functional.kmeans_segmentation"
)
F_kmeans_segmentation.return_value = image
kmeans_segmentation = imf.KmeansSegmentation(5, 400)
kmeans_segmentation(image)
F_kmeans_segmentation.assert_called_once_with(image, 5, 400)
assert type(kmeans_segmentation(image)) == PIL.Image.Image
def it_calls_rag_threshold_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_rag_threshold = function_mock(
request, "histolab.filters.image_filters_functional.rag_threshold"
)
F_rag_threshold.return_value = image
rag_threshold = imf.RagThreshold(3, 600, 15)
rag_threshold(image)
F_rag_threshold.assert_called_once_with(image, 3, 600, 15)
assert type(rag_threshold(image)) == PIL.Image.Image
def it_applies_hysteresis_threshold(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_hysteresis_threshold = function_mock(
request, "histolab.filters.image_filters_functional.hysteresis_threshold"
)
F_hysteresis_threshold.return_value = image
hysteresis_threshold = imf.HysteresisThreshold(20, 150)
hysteresis_threshold(image)
F_hysteresis_threshold.assert_called_once_with(image, 20, 150)
assert type(hysteresis_threshold(image)) == PIL.Image.Image
def it_applies_hysteresis_threshold_mask_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_hysteresis_threshold_mask = function_mock(
request,
"histolab.filters.image_filters_functional.hysteresis_threshold_mask",
)
F_hysteresis_threshold_mask.return_value = np.array(image)
hysteresis_threshold_mask = imf.HysteresisThresholdMask(30, 170)
hysteresis_threshold_mask(image)
F_hysteresis_threshold_mask.assert_called_once_with(image, 30, 170)
assert type(hysteresis_threshold_mask(image)) == np.ndarray
def it_calls_otsu_threshold_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_otsu_threshold = function_mock(
request, "histolab.filters.image_filters_functional.otsu_threshold"
)
F_otsu_threshold.return_value = np.array(image)
otsu_threshold = imf.OtsuThreshold()
otsu_threshold(image)
F_otsu_threshold.assert_called_once_with(image)
assert type(otsu_threshold(image)) == np.ndarray
def it_calls_local_otsu_threshold_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_local_otsu_threshold = function_mock(
request, "histolab.filters.image_filters_functional.local_otsu_threshold"
)
F_local_otsu_threshold.return_value = np.array(image)
local_otsu_threshold = imf.LocalOtsuThreshold(5)
local_otsu_threshold(image)
F_local_otsu_threshold.assert_called_once_with(image, 5)
assert type(local_otsu_threshold(image)) == np.ndarray
def it_calls_filter_entropy_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_filter_entropy = function_mock(
request, "histolab.filters.image_filters_functional.filter_entropy"
)
F_filter_entropy.return_value = np.array(image)
filter_entropy = imf.FilterEntropy(3, 6)
filter_entropy(image)
F_filter_entropy.assert_called_once_with(image, 3, 6)
assert type(filter_entropy(image)) == np.ndarray
def it_calls_canny_edges_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_canny_edges = function_mock(
request, "histolab.filters.image_filters_functional.canny_edges"
)
F_canny_edges.return_value = np.array(image)
canny_edges = imf.CannyEdges(0.8, 0.3, 13)
canny_edges(image)
F_canny_edges.assert_called_once_with(image, 0.8, 0.3, 13)
assert type(canny_edges(image)) == np.ndarray
def it_calls_grays_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_grays = function_mock(
request, "histolab.filters.image_filters_functional.grays"
)
F_grays.return_value = np.array(image)
grays = imf.Grays(20)
grays(image)
F_grays.assert_called_once_with(image, 20)
assert type(grays(image)) == np.ndarray
def it_calls_green_channel_filter_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_green_channel_filter = function_mock(
request, "histolab.filters.image_filters_functional.green_channel_filter"
)
F_green_channel_filter.return_value = np.array(image)
green_channel_filter = imf.GreenChannelFilter(250, False, 85.0)
green_channel_filter(image)
F_green_channel_filter.assert_called_once_with(image, 250, False, 85.0)
assert type(green_channel_filter(image)) == np.ndarray
def it_calls_red_filter_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_red_filter = function_mock(
request, "histolab.filters.image_filters_functional.red_filter"
)
F_red_filter.return_value = np.array(image)
red_filter = imf.RedFilter(180, 100, 85)
red_filter(image)
F_red_filter.assert_called_once_with(image, 180, 100, 85)
assert type(red_filter(image)) == np.ndarray
def it_calls_red_pen_filter_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_red_pen_filter = function_mock(
request, "histolab.filters.image_filters_functional.red_pen_filter"
)
F_red_pen_filter.return_value = np.array(image)
red_pen_filter = imf.RedPenFilter()
red_pen_filter(image)
F_red_pen_filter.assert_called_once_with(image)
assert type(red_pen_filter(image)) == np.ndarray
def it_calls_green_filter_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_green_filter = function_mock(
request, "histolab.filters.image_filters_functional.green_filter"
)
F_green_filter.return_value = np.array(image)
green_filter = imf.GreenFilter(150, 160, 140)
green_filter(image)
F_green_filter.assert_called_once_with(image, 150, 160, 140)
assert type(green_filter(image)) == np.ndarray
def it_calls_green_pen_filter_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_green_pen_filter = function_mock(
request, "histolab.filters.image_filters_functional.green_pen_filter"
)
F_green_pen_filter.return_value = np.array(image)
green_pen_filter = imf.GreenPenFilter()
green_pen_filter(image)
F_green_pen_filter.assert_called_once_with(image)
assert type(green_pen_filter(image)) == np.ndarray
def it_calls_blue_filter_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_blue_filter = function_mock(
request, "histolab.filters.image_filters_functional.blue_filter"
)
F_blue_filter.return_value = np.array(image)
blue_filter = imf.BlueFilter(60, 120, 190)
blue_filter(image)
F_blue_filter.assert_called_once_with(image, 60, 120, 190)
assert type(blue_filter(image)) == np.ndarray
def it_calls_blue_pen_filter_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_blue_pen_filter = function_mock(
request, "histolab.filters.image_filters_functional.blue_pen_filter"
)
F_blue_pen_filter.return_value = np.array(image)
blue_pen_filter = imf.BluePenFilter()
blue_pen_filter(image)
F_blue_pen_filter.assert_called_once_with(image)
assert type(blue_pen_filter(image)) == np.ndarray
def it_calls_pen_marks_filter_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_pen_marks = function_mock(
request, "histolab.filters.image_filters_functional.pen_marks"
)
F_pen_marks.return_value = np.array(image)
pen_marks = imf.PenMarks()
pen_marks(image)
F_pen_marks.assert_called_once_with(image)
assert type(pen_marks(image)) == np.ndarray
def it_calls_np_to_pil(self, request):
array = NpArrayMock.ONES_30X30_UINT8
util_np_to_pil = function_mock(request, "histolab.util.np_to_pil")
util_np_to_pil.return_value = PIL.Image.fromarray(array)
to_pil_image = imf.ToPILImage()
to_pil_image(array)
util_np_to_pil.assert_called_once_with(array)
assert type(to_pil_image(array)) == PIL.Image.Image
def it_calls_apply_mask_image(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
mask = NpArrayMock.ONES_500X500X4_BOOL
util_apply_mask_image = function_mock(request, "histolab.util.apply_mask_image")
util_apply_mask_image.return_value = PIL.Image.fromarray(np.array(image) * mask)
class_apply_mask_image = imf.ApplyMaskImage(image)
class_apply_mask_image(mask)
util_apply_mask_image.assert_called_once_with(image, mask)
assert type(util_apply_mask_image(image, mask)) == PIL.Image.Image
def it_calls_lambda_filter(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
image_np = np.array(image)
fun_ = function_mock(request, "numpy.array")
fun_.return_value = image_np
lambda_filter = imf.Lambda(fun_)
lambda_filter(image)
fun_.assert_called_once_with(image)
assert type(lambda_filter(image)) == np.ndarray
def it_calls_yen_threshold(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_yen_threshold = function_mock(
request, "histolab.filters.image_filters_functional.yen_threshold"
)
F_yen_threshold.return_value = np.array(image)
yen_threshold = imf.YenThreshold()
yen_threshold(image)
F_yen_threshold.assert_called_once_with(image, operator.lt)
assert type(yen_threshold(image)) == np.ndarray
def it_calls_rgb_to_lab_functional(self, request):
image = PILIMG.RGBA_COLOR_500X500_155_249_240
F_rgb_to_lab = function_mock(
request, "histolab.filters.image_filters_functional.rgb_to_lab"
)
F_rgb_to_lab.return_value = image
rgb_to_lab = imf.RgbToLab()
rgb_to_lab(image)
F_rgb_to_lab.assert_called_once_with(image)
assert type(rgb_to_lab(image)) == PIL.Image.Image
| [
"histolab.filters.image_filters.RgbToGrayscale",
"histolab.filters.image_filters.GreenPenFilter",
"histolab.filters.image_filters.OtsuThreshold",
"histolab.filters.image_filters.RgbToHsv",
"histolab.filters.image_filters.CannyEdges",
"histolab.filters.image_filters.AdaptiveEqualization",
"histolab.filte... | [((490, 502), 'histolab.filters.image_filters.Invert', 'imf.Invert', ([], {}), '()\n', (500, 502), True, 'from histolab.filters import image_filters as imf\n'), ((873, 893), 'histolab.filters.image_filters.RgbToGrayscale', 'imf.RgbToGrayscale', ([], {}), '()\n', (891, 893), True, 'from histolab.filters import image_filters as imf\n'), ((1331, 1345), 'histolab.filters.image_filters.RgbToHed', 'imf.RgbToHed', ([], {}), '()\n', (1343, 1345), True, 'from histolab.filters import image_filters as imf\n'), ((1826, 1850), 'histolab.filters.image_filters.HematoxylinChannel', 'imf.HematoxylinChannel', ([], {}), '()\n', (1848, 1850), True, 'from histolab.filters import image_filters as imf\n'), ((2328, 2346), 'histolab.filters.image_filters.EosinChannel', 'imf.EosinChannel', ([], {}), '()\n', (2344, 2346), True, 'from histolab.filters import image_filters as imf\n'), ((2791, 2805), 'histolab.filters.image_filters.RgbToHsv', 'imf.RgbToHsv', ([], {}), '()\n', (2803, 2805), True, 'from histolab.filters import image_filters as imf\n'), ((3271, 3298), 'histolab.filters.image_filters.StretchContrast', 'imf.StretchContrast', (['(0)', '(100)'], {}), '(0, 100)\n', (3290, 3298), True, 'from histolab.filters import image_filters as imf\n'), ((3820, 3850), 'histolab.filters.image_filters.HistogramEqualization', 'imf.HistogramEqualization', (['(200)'], {}), '(200)\n', (3845, 3850), True, 'from histolab.filters import image_filters as imf\n'), ((4382, 4416), 'histolab.filters.image_filters.AdaptiveEqualization', 'imf.AdaptiveEqualization', (['(250)', '(0.2)'], {}), '(250, 0.2)\n', (4406, 4416), True, 'from histolab.filters import image_filters as imf\n'), ((4935, 4959), 'histolab.filters.image_filters.LocalEqualization', 'imf.LocalEqualization', (['(5)'], {}), '(5)\n', (4956, 4959), True, 'from histolab.filters import image_filters as imf\n'), ((5467, 5497), 'histolab.filters.image_filters.KmeansSegmentation', 'imf.KmeansSegmentation', (['(5)', '(400)'], {}), '(5, 400)\n', (5489, 5497), True, 'from histolab.filters import image_filters as imf\n'), ((5983, 6011), 'histolab.filters.image_filters.RagThreshold', 'imf.RagThreshold', (['(3)', '(600)', '(15)'], {}), '(3, 600, 15)\n', (5999, 6011), True, 'from histolab.filters import image_filters as imf\n'), ((6509, 6541), 'histolab.filters.image_filters.HysteresisThreshold', 'imf.HysteresisThreshold', (['(20)', '(150)'], {}), '(20, 150)\n', (6532, 6541), True, 'from histolab.filters import image_filters as imf\n'), ((7064, 7079), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (7072, 7079), True, 'import numpy as np\n'), ((7116, 7152), 'histolab.filters.image_filters.HysteresisThresholdMask', 'imf.HysteresisThresholdMask', (['(30)', '(170)'], {}), '(30, 170)\n', (7143, 7152), True, 'from histolab.filters import image_filters as imf\n'), ((7626, 7641), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (7634, 7641), True, 'import numpy as np\n'), ((7667, 7686), 'histolab.filters.image_filters.OtsuThreshold', 'imf.OtsuThreshold', ([], {}), '()\n', (7684, 7686), True, 'from histolab.filters import image_filters as imf\n'), ((8142, 8157), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (8150, 8157), True, 'import numpy as np\n'), ((8189, 8214), 'histolab.filters.image_filters.LocalOtsuThreshold', 'imf.LocalOtsuThreshold', (['(5)'], {}), '(5)\n', (8211, 8214), True, 'from histolab.filters import image_filters as imf\n'), ((8667, 8682), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (8675, 8682), True, 'import numpy as np\n'), ((8708, 8731), 'histolab.filters.image_filters.FilterEntropy', 'imf.FilterEntropy', (['(3)', '(6)'], {}), '(3, 6)\n', (8725, 8731), True, 'from histolab.filters import image_filters as imf\n'), ((9157, 9172), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (9165, 9172), True, 'import numpy as np\n'), ((9195, 9223), 'histolab.filters.image_filters.CannyEdges', 'imf.CannyEdges', (['(0.8)', '(0.3)', '(13)'], {}), '(0.8, 0.3, 13)\n', (9209, 9223), True, 'from histolab.filters import image_filters as imf\n'), ((9624, 9639), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (9632, 9639), True, 'import numpy as np\n'), ((9656, 9669), 'histolab.filters.image_filters.Grays', 'imf.Grays', (['(20)'], {}), '(20)\n', (9665, 9669), True, 'from histolab.filters import image_filters as imf\n'), ((10102, 10117), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (10110, 10117), True, 'import numpy as np\n'), ((10149, 10189), 'histolab.filters.image_filters.GreenChannelFilter', 'imf.GreenChannelFilter', (['(250)', '(False)', '(85.0)'], {}), '(250, False, 85.0)\n', (10171, 10189), True, 'from histolab.filters import image_filters as imf\n'), ((10641, 10656), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (10649, 10656), True, 'import numpy as np\n'), ((10678, 10705), 'histolab.filters.image_filters.RedFilter', 'imf.RedFilter', (['(180)', '(100)', '(85)'], {}), '(180, 100, 85)\n', (10691, 10705), True, 'from histolab.filters import image_filters as imf\n'), ((11139, 11154), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (11147, 11154), True, 'import numpy as np\n'), ((11180, 11198), 'histolab.filters.image_filters.RedPenFilter', 'imf.RedPenFilter', ([], {}), '()\n', (11196, 11198), True, 'from histolab.filters import image_filters as imf\n'), ((11622, 11637), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (11630, 11637), True, 'import numpy as np\n'), ((11661, 11691), 'histolab.filters.image_filters.GreenFilter', 'imf.GreenFilter', (['(150)', '(160)', '(140)'], {}), '(150, 160, 140)\n', (11676, 11691), True, 'from histolab.filters import image_filters as imf\n'), ((12140, 12155), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (12148, 12155), True, 'import numpy as np\n'), ((12183, 12203), 'histolab.filters.image_filters.GreenPenFilter', 'imf.GreenPenFilter', ([], {}), '()\n', (12201, 12203), True, 'from histolab.filters import image_filters as imf\n'), ((12629, 12644), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (12637, 12644), True, 'import numpy as np\n'), ((12667, 12695), 'histolab.filters.image_filters.BlueFilter', 'imf.BlueFilter', (['(60)', '(120)', '(190)'], {}), '(60, 120, 190)\n', (12681, 12695), True, 'from histolab.filters import image_filters as imf\n'), ((13136, 13151), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (13144, 13151), True, 'import numpy as np\n'), ((13178, 13197), 'histolab.filters.image_filters.BluePenFilter', 'imf.BluePenFilter', ([], {}), '()\n', (13195, 13197), True, 'from histolab.filters import image_filters as imf\n'), ((13619, 13634), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (13627, 13634), True, 'import numpy as np\n'), ((13655, 13669), 'histolab.filters.image_filters.PenMarks', 'imf.PenMarks', ([], {}), '()\n', (13667, 13669), True, 'from histolab.filters import image_filters as imf\n'), ((14002, 14028), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['array'], {}), '(array)\n', (14021, 14028), False, 'import PIL\n'), ((14052, 14068), 'histolab.filters.image_filters.ToPILImage', 'imf.ToPILImage', ([], {}), '()\n', (14066, 14068), True, 'from histolab.filters import image_filters as imf\n'), ((14576, 14601), 'histolab.filters.image_filters.ApplyMaskImage', 'imf.ApplyMaskImage', (['image'], {}), '(image)\n', (14594, 14601), True, 'from histolab.filters import image_filters as imf\n'), ((14904, 14919), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (14912, 14919), True, 'import numpy as np\n'), ((15034, 15050), 'histolab.filters.image_filters.Lambda', 'imf.Lambda', (['fun_'], {}), '(fun_)\n', (15044, 15050), True, 'from histolab.filters import image_filters as imf\n'), ((15453, 15468), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (15461, 15468), True, 'import numpy as np\n'), ((15493, 15511), 'histolab.filters.image_filters.YenThreshold', 'imf.YenThreshold', ([], {}), '()\n', (15509, 15511), True, 'from histolab.filters import image_filters as imf\n'), ((15964, 15978), 'histolab.filters.image_filters.RgbToLab', 'imf.RgbToLab', ([], {}), '()\n', (15976, 15978), True, 'from histolab.filters import image_filters as imf\n'), ((14519, 14534), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (14527, 14534), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Tests for comparing RDMs
@author: heiko
"""
import unittest
import numpy as np
from numpy.testing import assert_array_almost_equal
import pyrsa as rsa
class TestCompareRDM(unittest.TestCase):
def setUp(self):
dissimilarities1 = np.random.rand(1, 15)
des1 = {'session': 0, 'subj': 0}
self.test_rdm1 = rsa.rdm.RDMs(
dissimilarities=dissimilarities1,
dissimilarity_measure='test',
descriptors=des1)
dissimilarities2 = np.random.rand(3, 15)
des2 = {'session': 0, 'subj': 0}
self.test_rdm2 = rsa.rdm.RDMs(
dissimilarities=dissimilarities2,
dissimilarity_measure='test',
descriptors=des2
)
dissimilarities3 = np.random.rand(7, 15)
des2 = {'session': 0, 'subj': 0}
self.test_rdm3 = rsa.rdm.RDMs(
dissimilarities=dissimilarities3,
dissimilarity_measure='test',
descriptors=des2
)
def test_compare_cosine(self):
from pyrsa.rdm.compare import compare_cosine
result = compare_cosine(self.test_rdm1, self.test_rdm1)
assert_array_almost_equal(result, 1)
result = compare_cosine(self.test_rdm1, self.test_rdm2)
assert np.all(result < 1)
def test_compare_cosine_cov(self):
from pyrsa.rdm.compare import compare_cosine_cov_weighted
result = compare_cosine_cov_weighted(self.test_rdm1,
self.test_rdm1,
sigma_k=np.eye(6))
assert_array_almost_equal(result, 1)
result = compare_cosine_cov_weighted(self.test_rdm1,
self.test_rdm2,
sigma_k=np.eye(6))
assert np.all(result < 1)
def test_compare_cosine_loop(self):
from pyrsa.rdm.compare import compare_cosine
result = compare_cosine(self.test_rdm2, self.test_rdm3)
assert result.shape[0] == 3
assert result.shape[1] == 7
result_loop = np.zeros_like(result)
d1 = self.test_rdm2.get_vectors()
d2 = self.test_rdm3.get_vectors()
for i in range(result_loop.shape[0]):
for j in range(result_loop.shape[1]):
result_loop[i, j] = (np.sum(d1[i] * d2[j])
/ np.sqrt(np.sum(d1[i] * d1[i]))
/ np.sqrt(np.sum(d2[j] * d2[j])))
assert_array_almost_equal(result, result_loop)
def test_compare_correlation(self):
from pyrsa.rdm.compare import compare_correlation
result = compare_correlation(self.test_rdm1, self.test_rdm1)
assert_array_almost_equal(result, 1)
result = compare_correlation(self.test_rdm1, self.test_rdm2)
assert np.all(result < 1)
def test_compare_correlation_cov(self):
from pyrsa.rdm.compare import compare_correlation_cov_weighted
result = compare_correlation_cov_weighted(self.test_rdm1,
self.test_rdm1)
assert_array_almost_equal(result, 1)
result = compare_correlation_cov_weighted(self.test_rdm1,
self.test_rdm2)
assert np.all(result < 1)
def test_compare_correlation_cov_sk(self):
from pyrsa.rdm.compare import compare_correlation_cov_weighted
result = compare_correlation_cov_weighted(self.test_rdm1,
self.test_rdm1,
sigma_k=np.eye(6))
assert_array_almost_equal(result, 1)
result = compare_correlation_cov_weighted(self.test_rdm1,
self.test_rdm2,
sigma_k=np.eye(6))
assert np.all(result < 1)
def test_compare_corr_loop(self):
from pyrsa.rdm.compare import compare_correlation
result = compare_correlation(self.test_rdm2, self.test_rdm3)
assert result.shape[0] == 3
assert result.shape[1] == 7
result_loop = np.zeros_like(result)
d1 = self.test_rdm2.get_vectors()
d2 = self.test_rdm3.get_vectors()
d1 = d1 - np.mean(d1, 1, keepdims=True)
d2 = d2 - np.mean(d2, 1, keepdims=True)
for i in range(result_loop.shape[0]):
for j in range(result_loop.shape[1]):
result_loop[i, j] = (np.sum(d1[i] * d2[j])
/ np.sqrt(np.sum(d1[i] * d1[i]))
/ np.sqrt(np.sum(d2[j] * d2[j])))
assert_array_almost_equal(result, result_loop)
def test_compare_spearman(self):
from pyrsa.rdm.compare import compare_spearman
result = compare_spearman(self.test_rdm1, self.test_rdm1)
assert_array_almost_equal(result, 1)
result = compare_spearman(self.test_rdm1, self.test_rdm2)
assert np.all(result < 1)
def test_compare_rho_a(self):
from pyrsa.rdm.compare import compare_rho_a
result = compare_rho_a(self.test_rdm1, self.test_rdm1)
assert_array_almost_equal(result, 1)
result = compare_rho_a(self.test_rdm1, self.test_rdm2)
assert np.all(result < 1)
def test_spearman_equal_scipy(self):
from pyrsa.rdm.compare import _parse_input_rdms
from pyrsa.rdm.compare import _all_combinations
import scipy.stats
from pyrsa.rdm.compare import compare_spearman
def _spearman_r(vector1, vector2):
"""computes the spearman rank correlation between two vectors
Args:
vector1 (numpy.ndarray):
first vector
vector1 (numpy.ndarray):
second vector
Returns:
corr (float):
spearman r
"""
corr = scipy.stats.spearmanr(vector1, vector2).correlation
return corr
vector1, vector2, _ = _parse_input_rdms(self.test_rdm1, self.test_rdm2)
sim = _all_combinations(vector1, vector2, _spearman_r)
result = sim
result2 = compare_spearman(self.test_rdm1, self.test_rdm2)
assert_array_almost_equal(result, result2)
def test_compare_kendall_tau(self):
from pyrsa.rdm.compare import compare_kendall_tau
result = compare_kendall_tau(self.test_rdm1, self.test_rdm1)
assert_array_almost_equal(result, 1)
result = compare_kendall_tau(self.test_rdm1, self.test_rdm2)
assert np.all(result < 1)
def test_compare_kendall_tau_a(self):
from pyrsa.rdm.compare import compare_kendall_tau_a
result = compare_kendall_tau_a(self.test_rdm1, self.test_rdm1)
assert_array_almost_equal(result, 1)
result = compare_kendall_tau_a(self.test_rdm1, self.test_rdm2)
assert np.all(result < 1)
def test_compare(self):
from pyrsa.rdm.compare import compare
result = compare(self.test_rdm1, self.test_rdm1)
assert_array_almost_equal(result, 1)
result = compare(self.test_rdm1, self.test_rdm2, method='corr')
result = compare(self.test_rdm1, self.test_rdm2, method='corr_cov')
result = compare(self.test_rdm1, self.test_rdm2, method='spearman')
result = compare(self.test_rdm1, self.test_rdm2, method='cosine')
result = compare(self.test_rdm1, self.test_rdm2, method='cosine_cov')
result = compare(self.test_rdm1, self.test_rdm2, method='kendall')
class TestCompareRDMNaN(unittest.TestCase):
def setUp(self):
dissimilarities1 = np.random.rand(1, 15)
des1 = {'session': 0, 'subj': 0}
test_rdm1 = rsa.rdm.RDMs(
dissimilarities=dissimilarities1,
dissimilarity_measure='test',
descriptors=des1)
self.test_rdm1 = test_rdm1.subsample_pattern(
'index', [0, 1, 1, 3, 4, 5])
dissimilarities2 = np.random.rand(3, 15)
des2 = {'session': 0, 'subj': 0}
test_rdm2 = rsa.rdm.RDMs(
dissimilarities=dissimilarities2,
dissimilarity_measure='test',
descriptors=des2
)
self.test_rdm2 = test_rdm2.subsample_pattern('index',
[0, 1, 1, 3, 4, 5])
dissimilarities3 = np.random.rand(7, 15)
des2 = {'session': 0, 'subj': 0}
test_rdm3 = rsa.rdm.RDMs(
dissimilarities=dissimilarities3,
dissimilarity_measure='test',
descriptors=des2
)
self.test_rdm3 = test_rdm3.subsample_pattern('index',
[0, 1, 1, 3, 4, 5])
def test_compare_cosine(self):
from pyrsa.rdm.compare import compare_cosine
result = compare_cosine(self.test_rdm1, self.test_rdm1)
assert_array_almost_equal(result, 1)
result = compare_cosine(self.test_rdm1, self.test_rdm2)
assert np.all(result < 1)
def test_compare_cosine_cov(self):
from pyrsa.rdm.compare import compare_cosine_cov_weighted
result = compare_cosine_cov_weighted(self.test_rdm1,
self.test_rdm1,
sigma_k=np.eye(6))
assert_array_almost_equal(result, 1)
result = compare_cosine_cov_weighted(self.test_rdm1,
self.test_rdm2,
sigma_k=np.eye(6))
assert np.all(result < 1)
def test_compare_cosine_cov_sk(self):
from pyrsa.rdm.compare import compare_cosine_cov_weighted
result = compare_cosine_cov_weighted(self.test_rdm1,
self.test_rdm2,
sigma_k=None)
result_1D = compare_cosine_cov_weighted(self.test_rdm1,
self.test_rdm2,
sigma_k=np.ones(6))
result_2D = compare_cosine_cov_weighted(self.test_rdm1,
self.test_rdm2,
sigma_k=np.eye(6))
assert_array_almost_equal(result, result_1D)
assert_array_almost_equal(result, result_2D)
def test_cosine_cov_consistency(self):
from pyrsa.rdm.compare import _cosine_cov_weighted
from pyrsa.rdm.compare import _cosine_cov_weighted_slow
from pyrsa.rdm.compare import _parse_input_rdms
vector1, vector2, nan_idx = _parse_input_rdms(self.test_rdm1,
self.test_rdm2)
res_slow = _cosine_cov_weighted_slow(vector1, vector2, nan_idx=nan_idx)
res = _cosine_cov_weighted(vector1, vector2, nan_idx=nan_idx)
assert_array_almost_equal(res, res_slow)
def test_compare_correlation(self):
from pyrsa.rdm.compare import compare_correlation
result = compare_correlation(self.test_rdm1, self.test_rdm1)
assert_array_almost_equal(result, 1)
result = compare_correlation(self.test_rdm1, self.test_rdm2)
assert np.all(result < 1)
def test_compare_correlation_cov(self):
from pyrsa.rdm.compare import compare_correlation_cov_weighted
result = compare_correlation_cov_weighted(self.test_rdm1,
self.test_rdm1)
assert_array_almost_equal(result, 1)
result = compare_correlation_cov_weighted(self.test_rdm1,
self.test_rdm2)
assert np.all(result < 1)
def test_compare_correlation_cov_sk(self):
from pyrsa.rdm.compare import compare_correlation_cov_weighted
result = compare_correlation_cov_weighted(self.test_rdm1,
self.test_rdm1,
sigma_k=np.eye(6))
assert_array_almost_equal(result, 1)
result = compare_correlation_cov_weighted(self.test_rdm1,
self.test_rdm2,
sigma_k=np.eye(6))
assert np.all(result < 1)
def test_compare_spearman(self):
from pyrsa.rdm.compare import compare_spearman
result = compare_spearman(self.test_rdm1, self.test_rdm1)
assert_array_almost_equal(result, 1)
result = compare_spearman(self.test_rdm1, self.test_rdm2)
assert np.all(result < 1)
def test_compare_rho_a(self):
from pyrsa.rdm.compare import compare_rho_a
result = compare_rho_a(self.test_rdm1, self.test_rdm1)
result = compare_rho_a(self.test_rdm1, self.test_rdm2)
assert np.all(result < 1)
def test_spearman_equal_scipy(self):
from pyrsa.rdm.compare import _parse_input_rdms
from pyrsa.rdm.compare import _all_combinations
import scipy.stats
from pyrsa.rdm.compare import compare_spearman
def _spearman_r(vector1, vector2):
"""computes the spearman rank correlation between two vectors
Args:
vector1 (numpy.ndarray):
first vector
vector1 (numpy.ndarray):
second vector
Returns:
corr (float):
spearman r
"""
corr = scipy.stats.spearmanr(vector1, vector2).correlation
return corr
vector1, vector2, _ = _parse_input_rdms(self.test_rdm1, self.test_rdm2)
sim = _all_combinations(vector1, vector2, _spearman_r)
result = sim
result2 = compare_spearman(self.test_rdm1, self.test_rdm2)
assert_array_almost_equal(result, result2)
def test_compare_kendall_tau(self):
from pyrsa.rdm.compare import compare_kendall_tau
result = compare_kendall_tau(self.test_rdm1, self.test_rdm1)
assert_array_almost_equal(result, 1)
result = compare_kendall_tau(self.test_rdm1, self.test_rdm2)
assert np.all(result < 1)
def test_compare_kendall_tau_a(self):
from pyrsa.rdm.compare import compare_kendall_tau_a
result = compare_kendall_tau_a(self.test_rdm1, self.test_rdm1)
result = compare_kendall_tau_a(self.test_rdm1, self.test_rdm2)
assert np.all(result < 1)
def test_compare(self):
from pyrsa.rdm.compare import compare
result = compare(self.test_rdm1, self.test_rdm1)
assert_array_almost_equal(result, 1)
result = compare(self.test_rdm1, self.test_rdm2, method='corr')
result = compare(self.test_rdm1, self.test_rdm2, method='corr_cov')
result = compare(self.test_rdm1, self.test_rdm2, method='spearman')
result = compare(self.test_rdm1, self.test_rdm2, method='cosine')
result = compare(self.test_rdm1, self.test_rdm2, method='cosine_cov')
result = compare(self.test_rdm1, self.test_rdm2, method='kendall')
class TestCompareCov(unittest.TestCase):
def setUp(self):
dissimilarities1 = np.random.rand(1, 15)
des1 = {'session': 0, 'subj': 0}
self.test_rdm1 = rsa.rdm.RDMs(
dissimilarities=dissimilarities1,
dissimilarity_measure='test',
descriptors=des1)
dissimilarities2 = np.random.rand(3, 15)
des2 = {'session': 0, 'subj': 0}
self.test_rdm2 = rsa.rdm.RDMs(
dissimilarities=dissimilarities2,
dissimilarity_measure='test',
descriptors=des2
)
dissimilarities3 = np.random.rand(7, 15)
des2 = {'session': 0, 'subj': 0}
self.test_rdm3 = rsa.rdm.RDMs(
dissimilarities=dissimilarities3,
dissimilarity_measure='test',
descriptors=des2
)
def test_corr_identity_equal(self):
from pyrsa.rdm.compare import compare
result = compare(self.test_rdm1, self.test_rdm2, method='corr_cov')
result_1D = compare(
self.test_rdm1, self.test_rdm2, method='corr_cov',
sigma_k=np.ones(6))
result_2D = compare(
self.test_rdm1, self.test_rdm2, method='corr_cov',
sigma_k=np.eye(6))
assert_array_almost_equal(result, result_1D)
assert_array_almost_equal(result, result_2D)
def test_cos_identity_equal(self):
from pyrsa.rdm.compare import compare
result = compare(self.test_rdm1, self.test_rdm2, method='cosine_cov')
result_1D = compare(
self.test_rdm1, self.test_rdm2, method='cosine_cov',
sigma_k=np.ones(6))
result_2D = compare(
self.test_rdm1, self.test_rdm2, method='cosine_cov',
sigma_k=np.eye(6))
assert_array_almost_equal(result, result_1D)
assert_array_almost_equal(result, result_2D)
| [
"numpy.sum",
"pyrsa.rdm.compare.compare_correlation_cov_weighted",
"numpy.ones",
"pyrsa.rdm.compare._all_combinations",
"numpy.mean",
"pyrsa.rdm.compare.compare_cosine",
"pyrsa.rdm.compare._cosine_cov_weighted",
"numpy.testing.assert_array_almost_equal",
"pyrsa.rdm.RDMs",
"pyrsa.rdm.compare._parse... | [((296, 317), 'numpy.random.rand', 'np.random.rand', (['(1)', '(15)'], {}), '(1, 15)\n', (310, 317), True, 'import numpy as np\n'), ((384, 482), 'pyrsa.rdm.RDMs', 'rsa.rdm.RDMs', ([], {'dissimilarities': 'dissimilarities1', 'dissimilarity_measure': '"""test"""', 'descriptors': 'des1'}), "(dissimilarities=dissimilarities1, dissimilarity_measure='test',\n descriptors=des1)\n", (396, 482), True, 'import pyrsa as rsa\n'), ((543, 564), 'numpy.random.rand', 'np.random.rand', (['(3)', '(15)'], {}), '(3, 15)\n', (557, 564), True, 'import numpy as np\n'), ((631, 729), 'pyrsa.rdm.RDMs', 'rsa.rdm.RDMs', ([], {'dissimilarities': 'dissimilarities2', 'dissimilarity_measure': '"""test"""', 'descriptors': 'des2'}), "(dissimilarities=dissimilarities2, dissimilarity_measure='test',\n descriptors=des2)\n", (643, 729), True, 'import pyrsa as rsa\n'), ((803, 824), 'numpy.random.rand', 'np.random.rand', (['(7)', '(15)'], {}), '(7, 15)\n', (817, 824), True, 'import numpy as np\n'), ((891, 989), 'pyrsa.rdm.RDMs', 'rsa.rdm.RDMs', ([], {'dissimilarities': 'dissimilarities3', 'dissimilarity_measure': '"""test"""', 'descriptors': 'des2'}), "(dissimilarities=dissimilarities3, dissimilarity_measure='test',\n descriptors=des2)\n", (903, 989), True, 'import pyrsa as rsa\n'), ((1142, 1188), 'pyrsa.rdm.compare.compare_cosine', 'compare_cosine', (['self.test_rdm1', 'self.test_rdm1'], {}), '(self.test_rdm1, self.test_rdm1)\n', (1156, 1188), False, 'from pyrsa.rdm.compare import compare_cosine\n'), ((1197, 1233), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', '(1)'], {}), '(result, 1)\n', (1222, 1233), False, 'from numpy.testing import assert_array_almost_equal\n'), ((1251, 1297), 'pyrsa.rdm.compare.compare_cosine', 'compare_cosine', (['self.test_rdm1', 'self.test_rdm2'], {}), '(self.test_rdm1, self.test_rdm2)\n', (1265, 1297), False, 'from pyrsa.rdm.compare import compare_cosine\n'), ((1313, 1331), 'numpy.all', 'np.all', (['(result < 1)'], {}), '(result < 1)\n', (1319, 1331), True, 'import numpy as np\n'), ((1632, 1668), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', '(1)'], {}), '(result, 1)\n', (1657, 1668), False, 'from numpy.testing import assert_array_almost_equal\n'), ((1870, 1888), 'numpy.all', 'np.all', (['(result < 1)'], {}), '(result < 1)\n', (1876, 1888), True, 'import numpy as np\n'), ((2000, 2046), 'pyrsa.rdm.compare.compare_cosine', 'compare_cosine', (['self.test_rdm2', 'self.test_rdm3'], {}), '(self.test_rdm2, self.test_rdm3)\n', (2014, 2046), False, 'from pyrsa.rdm.compare import compare_cosine\n'), ((2141, 2162), 'numpy.zeros_like', 'np.zeros_like', (['result'], {}), '(result)\n', (2154, 2162), True, 'import numpy as np\n'), ((2551, 2597), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', 'result_loop'], {}), '(result, result_loop)\n', (2576, 2597), False, 'from numpy.testing import assert_array_almost_equal\n'), ((2714, 2765), 'pyrsa.rdm.compare.compare_correlation', 'compare_correlation', (['self.test_rdm1', 'self.test_rdm1'], {}), '(self.test_rdm1, self.test_rdm1)\n', (2733, 2765), False, 'from pyrsa.rdm.compare import compare_correlation\n'), ((2774, 2810), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', '(1)'], {}), '(result, 1)\n', (2799, 2810), False, 'from numpy.testing import assert_array_almost_equal\n'), ((2828, 2879), 'pyrsa.rdm.compare.compare_correlation', 'compare_correlation', (['self.test_rdm1', 'self.test_rdm2'], {}), '(self.test_rdm1, self.test_rdm2)\n', (2847, 2879), False, 'from pyrsa.rdm.compare import compare_correlation\n'), ((2895, 2913), 'numpy.all', 'np.all', (['(result < 1)'], {}), '(result < 1)\n', (2901, 2913), True, 'import numpy as np\n'), ((3047, 3111), 'pyrsa.rdm.compare.compare_correlation_cov_weighted', 'compare_correlation_cov_weighted', (['self.test_rdm1', 'self.test_rdm1'], {}), '(self.test_rdm1, self.test_rdm1)\n', (3079, 3111), False, 'from pyrsa.rdm.compare import compare_correlation_cov_weighted\n'), ((3170, 3206), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', '(1)'], {}), '(result, 1)\n', (3195, 3206), False, 'from numpy.testing import assert_array_almost_equal\n'), ((3224, 3288), 'pyrsa.rdm.compare.compare_correlation_cov_weighted', 'compare_correlation_cov_weighted', (['self.test_rdm1', 'self.test_rdm2'], {}), '(self.test_rdm1, self.test_rdm2)\n', (3256, 3288), False, 'from pyrsa.rdm.compare import compare_correlation_cov_weighted\n'), ((3354, 3372), 'numpy.all', 'np.all', (['(result < 1)'], {}), '(result < 1)\n', (3360, 3372), True, 'import numpy as np\n'), ((3701, 3737), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', '(1)'], {}), '(result, 1)\n', (3726, 3737), False, 'from numpy.testing import assert_array_almost_equal\n'), ((3954, 3972), 'numpy.all', 'np.all', (['(result < 1)'], {}), '(result < 1)\n', (3960, 3972), True, 'import numpy as np\n'), ((4087, 4138), 'pyrsa.rdm.compare.compare_correlation', 'compare_correlation', (['self.test_rdm2', 'self.test_rdm3'], {}), '(self.test_rdm2, self.test_rdm3)\n', (4106, 4138), False, 'from pyrsa.rdm.compare import compare_correlation\n'), ((4233, 4254), 'numpy.zeros_like', 'np.zeros_like', (['result'], {}), '(result)\n', (4246, 4254), True, 'import numpy as np\n'), ((4739, 4785), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', 'result_loop'], {}), '(result, result_loop)\n', (4764, 4785), False, 'from numpy.testing import assert_array_almost_equal\n'), ((4896, 4944), 'pyrsa.rdm.compare.compare_spearman', 'compare_spearman', (['self.test_rdm1', 'self.test_rdm1'], {}), '(self.test_rdm1, self.test_rdm1)\n', (4912, 4944), False, 'from pyrsa.rdm.compare import compare_spearman\n'), ((4953, 4989), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', '(1)'], {}), '(result, 1)\n', (4978, 4989), False, 'from numpy.testing import assert_array_almost_equal\n'), ((5007, 5055), 'pyrsa.rdm.compare.compare_spearman', 'compare_spearman', (['self.test_rdm1', 'self.test_rdm2'], {}), '(self.test_rdm1, self.test_rdm2)\n', (5023, 5055), False, 'from pyrsa.rdm.compare import compare_spearman\n'), ((5071, 5089), 'numpy.all', 'np.all', (['(result < 1)'], {}), '(result < 1)\n', (5077, 5089), True, 'import numpy as np\n'), ((5194, 5239), 'pyrsa.rdm.compare.compare_rho_a', 'compare_rho_a', (['self.test_rdm1', 'self.test_rdm1'], {}), '(self.test_rdm1, self.test_rdm1)\n', (5207, 5239), False, 'from pyrsa.rdm.compare import compare_rho_a\n'), ((5248, 5284), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', '(1)'], {}), '(result, 1)\n', (5273, 5284), False, 'from numpy.testing import assert_array_almost_equal\n'), ((5302, 5347), 'pyrsa.rdm.compare.compare_rho_a', 'compare_rho_a', (['self.test_rdm1', 'self.test_rdm2'], {}), '(self.test_rdm1, self.test_rdm2)\n', (5315, 5347), False, 'from pyrsa.rdm.compare import compare_rho_a\n'), ((5363, 5381), 'numpy.all', 'np.all', (['(result < 1)'], {}), '(result < 1)\n', (5369, 5381), True, 'import numpy as np\n'), ((6128, 6177), 'pyrsa.rdm.compare._parse_input_rdms', '_parse_input_rdms', (['self.test_rdm1', 'self.test_rdm2'], {}), '(self.test_rdm1, self.test_rdm2)\n', (6145, 6177), False, 'from pyrsa.rdm.compare import _parse_input_rdms\n'), ((6192, 6240), 'pyrsa.rdm.compare._all_combinations', '_all_combinations', (['vector1', 'vector2', '_spearman_r'], {}), '(vector1, vector2, _spearman_r)\n', (6209, 6240), False, 'from pyrsa.rdm.compare import _all_combinations\n'), ((6280, 6328), 'pyrsa.rdm.compare.compare_spearman', 'compare_spearman', (['self.test_rdm1', 'self.test_rdm2'], {}), '(self.test_rdm1, self.test_rdm2)\n', (6296, 6328), False, 'from pyrsa.rdm.compare import compare_spearman\n'), ((6337, 6379), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', 'result2'], {}), '(result, result2)\n', (6362, 6379), False, 'from numpy.testing import assert_array_almost_equal\n'), ((6496, 6547), 'pyrsa.rdm.compare.compare_kendall_tau', 'compare_kendall_tau', (['self.test_rdm1', 'self.test_rdm1'], {}), '(self.test_rdm1, self.test_rdm1)\n', (6515, 6547), False, 'from pyrsa.rdm.compare import compare_kendall_tau\n'), ((6556, 6592), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', '(1)'], {}), '(result, 1)\n', (6581, 6592), False, 'from numpy.testing import assert_array_almost_equal\n'), ((6610, 6661), 'pyrsa.rdm.compare.compare_kendall_tau', 'compare_kendall_tau', (['self.test_rdm1', 'self.test_rdm2'], {}), '(self.test_rdm1, self.test_rdm2)\n', (6629, 6661), False, 'from pyrsa.rdm.compare import compare_kendall_tau\n'), ((6677, 6695), 'numpy.all', 'np.all', (['(result < 1)'], {}), '(result < 1)\n', (6683, 6695), True, 'import numpy as np\n'), ((6816, 6869), 'pyrsa.rdm.compare.compare_kendall_tau_a', 'compare_kendall_tau_a', (['self.test_rdm1', 'self.test_rdm1'], {}), '(self.test_rdm1, self.test_rdm1)\n', (6837, 6869), False, 'from pyrsa.rdm.compare import compare_kendall_tau_a\n'), ((6878, 6914), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', '(1)'], {}), '(result, 1)\n', (6903, 6914), False, 'from numpy.testing import assert_array_almost_equal\n'), ((6932, 6985), 'pyrsa.rdm.compare.compare_kendall_tau_a', 'compare_kendall_tau_a', (['self.test_rdm1', 'self.test_rdm2'], {}), '(self.test_rdm1, self.test_rdm2)\n', (6953, 6985), False, 'from pyrsa.rdm.compare import compare_kendall_tau_a\n'), ((7001, 7019), 'numpy.all', 'np.all', (['(result < 1)'], {}), '(result < 1)\n', (7007, 7019), True, 'import numpy as np\n'), ((7112, 7151), 'pyrsa.rdm.compare.compare', 'compare', (['self.test_rdm1', 'self.test_rdm1'], {}), '(self.test_rdm1, self.test_rdm1)\n', (7119, 7151), False, 'from pyrsa.rdm.compare import compare\n'), ((7160, 7196), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', '(1)'], {}), '(result, 1)\n', (7185, 7196), False, 'from numpy.testing import assert_array_almost_equal\n'), ((7214, 7268), 'pyrsa.rdm.compare.compare', 'compare', (['self.test_rdm1', 'self.test_rdm2'], {'method': '"""corr"""'}), "(self.test_rdm1, self.test_rdm2, method='corr')\n", (7221, 7268), False, 'from pyrsa.rdm.compare import compare\n'), ((7286, 7344), 'pyrsa.rdm.compare.compare', 'compare', (['self.test_rdm1', 'self.test_rdm2'], {'method': '"""corr_cov"""'}), "(self.test_rdm1, self.test_rdm2, method='corr_cov')\n", (7293, 7344), False, 'from pyrsa.rdm.compare import compare\n'), ((7362, 7420), 'pyrsa.rdm.compare.compare', 'compare', (['self.test_rdm1', 'self.test_rdm2'], {'method': '"""spearman"""'}), "(self.test_rdm1, self.test_rdm2, method='spearman')\n", (7369, 7420), False, 'from pyrsa.rdm.compare import compare\n'), ((7438, 7494), 'pyrsa.rdm.compare.compare', 'compare', (['self.test_rdm1', 'self.test_rdm2'], {'method': '"""cosine"""'}), "(self.test_rdm1, self.test_rdm2, method='cosine')\n", (7445, 7494), False, 'from pyrsa.rdm.compare import compare\n'), ((7512, 7572), 'pyrsa.rdm.compare.compare', 'compare', (['self.test_rdm1', 'self.test_rdm2'], {'method': '"""cosine_cov"""'}), "(self.test_rdm1, self.test_rdm2, method='cosine_cov')\n", (7519, 7572), False, 'from pyrsa.rdm.compare import compare\n'), ((7590, 7647), 'pyrsa.rdm.compare.compare', 'compare', (['self.test_rdm1', 'self.test_rdm2'], {'method': '"""kendall"""'}), "(self.test_rdm1, self.test_rdm2, method='kendall')\n", (7597, 7647), False, 'from pyrsa.rdm.compare import compare\n'), ((7743, 7764), 'numpy.random.rand', 'np.random.rand', (['(1)', '(15)'], {}), '(1, 15)\n', (7757, 7764), True, 'import numpy as np\n'), ((7826, 7924), 'pyrsa.rdm.RDMs', 'rsa.rdm.RDMs', ([], {'dissimilarities': 'dissimilarities1', 'dissimilarity_measure': '"""test"""', 'descriptors': 'des1'}), "(dissimilarities=dissimilarities1, dissimilarity_measure='test',\n descriptors=des1)\n", (7838, 7924), True, 'import pyrsa as rsa\n'), ((8080, 8101), 'numpy.random.rand', 'np.random.rand', (['(3)', '(15)'], {}), '(3, 15)\n', (8094, 8101), True, 'import numpy as np\n'), ((8163, 8261), 'pyrsa.rdm.RDMs', 'rsa.rdm.RDMs', ([], {'dissimilarities': 'dissimilarities2', 'dissimilarity_measure': '"""test"""', 'descriptors': 'des2'}), "(dissimilarities=dissimilarities2, dissimilarity_measure='test',\n descriptors=des2)\n", (8175, 8261), True, 'import pyrsa as rsa\n'), ((8470, 8491), 'numpy.random.rand', 'np.random.rand', (['(7)', '(15)'], {}), '(7, 15)\n', (8484, 8491), True, 'import numpy as np\n'), ((8553, 8651), 'pyrsa.rdm.RDMs', 'rsa.rdm.RDMs', ([], {'dissimilarities': 'dissimilarities3', 'dissimilarity_measure': '"""test"""', 'descriptors': 'des2'}), "(dissimilarities=dissimilarities3, dissimilarity_measure='test',\n descriptors=des2)\n", (8565, 8651), True, 'import pyrsa as rsa\n'), ((8939, 8985), 'pyrsa.rdm.compare.compare_cosine', 'compare_cosine', (['self.test_rdm1', 'self.test_rdm1'], {}), '(self.test_rdm1, self.test_rdm1)\n', (8953, 8985), False, 'from pyrsa.rdm.compare import compare_cosine\n'), ((8994, 9030), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', '(1)'], {}), '(result, 1)\n', (9019, 9030), False, 'from numpy.testing import assert_array_almost_equal\n'), ((9048, 9094), 'pyrsa.rdm.compare.compare_cosine', 'compare_cosine', (['self.test_rdm1', 'self.test_rdm2'], {}), '(self.test_rdm1, self.test_rdm2)\n', (9062, 9094), False, 'from pyrsa.rdm.compare import compare_cosine\n'), ((9110, 9128), 'numpy.all', 'np.all', (['(result < 1)'], {}), '(result < 1)\n', (9116, 9128), True, 'import numpy as np\n'), ((9429, 9465), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', '(1)'], {}), '(result, 1)\n', (9454, 9465), False, 'from numpy.testing import assert_array_almost_equal\n'), ((9667, 9685), 'numpy.all', 'np.all', (['(result < 1)'], {}), '(result < 1)\n', (9673, 9685), True, 'import numpy as np\n'), ((9812, 9885), 'pyrsa.rdm.compare.compare_cosine_cov_weighted', 'compare_cosine_cov_weighted', (['self.test_rdm1', 'self.test_rdm2'], {'sigma_k': 'None'}), '(self.test_rdm1, self.test_rdm2, sigma_k=None)\n', (9839, 9885), False, 'from pyrsa.rdm.compare import compare_cosine_cov_weighted\n'), ((10375, 10419), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', 'result_1D'], {}), '(result, result_1D)\n', (10400, 10419), False, 'from numpy.testing import assert_array_almost_equal\n'), ((10428, 10472), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', 'result_2D'], {}), '(result, result_2D)\n', (10453, 10472), False, 'from numpy.testing import assert_array_almost_equal\n'), ((10732, 10781), 'pyrsa.rdm.compare._parse_input_rdms', '_parse_input_rdms', (['self.test_rdm1', 'self.test_rdm2'], {}), '(self.test_rdm1, self.test_rdm2)\n', (10749, 10781), False, 'from pyrsa.rdm.compare import _parse_input_rdms\n'), ((10855, 10915), 'pyrsa.rdm.compare._cosine_cov_weighted_slow', '_cosine_cov_weighted_slow', (['vector1', 'vector2'], {'nan_idx': 'nan_idx'}), '(vector1, vector2, nan_idx=nan_idx)\n', (10880, 10915), False, 'from pyrsa.rdm.compare import _cosine_cov_weighted_slow\n'), ((10930, 10985), 'pyrsa.rdm.compare._cosine_cov_weighted', '_cosine_cov_weighted', (['vector1', 'vector2'], {'nan_idx': 'nan_idx'}), '(vector1, vector2, nan_idx=nan_idx)\n', (10950, 10985), False, 'from pyrsa.rdm.compare import _cosine_cov_weighted\n'), ((10994, 11034), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'res_slow'], {}), '(res, res_slow)\n', (11019, 11034), False, 'from numpy.testing import assert_array_almost_equal\n'), ((11151, 11202), 'pyrsa.rdm.compare.compare_correlation', 'compare_correlation', (['self.test_rdm1', 'self.test_rdm1'], {}), '(self.test_rdm1, self.test_rdm1)\n', (11170, 11202), False, 'from pyrsa.rdm.compare import compare_correlation\n'), ((11211, 11247), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', '(1)'], {}), '(result, 1)\n', (11236, 11247), False, 'from numpy.testing import assert_array_almost_equal\n'), ((11265, 11316), 'pyrsa.rdm.compare.compare_correlation', 'compare_correlation', (['self.test_rdm1', 'self.test_rdm2'], {}), '(self.test_rdm1, self.test_rdm2)\n', (11284, 11316), False, 'from pyrsa.rdm.compare import compare_correlation\n'), ((11332, 11350), 'numpy.all', 'np.all', (['(result < 1)'], {}), '(result < 1)\n', (11338, 11350), True, 'import numpy as np\n'), ((11484, 11548), 'pyrsa.rdm.compare.compare_correlation_cov_weighted', 'compare_correlation_cov_weighted', (['self.test_rdm1', 'self.test_rdm1'], {}), '(self.test_rdm1, self.test_rdm1)\n', (11516, 11548), False, 'from pyrsa.rdm.compare import compare_correlation_cov_weighted\n'), ((11607, 11643), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', '(1)'], {}), '(result, 1)\n', (11632, 11643), False, 'from numpy.testing import assert_array_almost_equal\n'), ((11661, 11725), 'pyrsa.rdm.compare.compare_correlation_cov_weighted', 'compare_correlation_cov_weighted', (['self.test_rdm1', 'self.test_rdm2'], {}), '(self.test_rdm1, self.test_rdm2)\n', (11693, 11725), False, 'from pyrsa.rdm.compare import compare_correlation_cov_weighted\n'), ((11791, 11809), 'numpy.all', 'np.all', (['(result < 1)'], {}), '(result < 1)\n', (11797, 11809), True, 'import numpy as np\n'), ((12138, 12174), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', '(1)'], {}), '(result, 1)\n', (12163, 12174), False, 'from numpy.testing import assert_array_almost_equal\n'), ((12391, 12409), 'numpy.all', 'np.all', (['(result < 1)'], {}), '(result < 1)\n', (12397, 12409), True, 'import numpy as np\n'), ((12520, 12568), 'pyrsa.rdm.compare.compare_spearman', 'compare_spearman', (['self.test_rdm1', 'self.test_rdm1'], {}), '(self.test_rdm1, self.test_rdm1)\n', (12536, 12568), False, 'from pyrsa.rdm.compare import compare_spearman\n'), ((12577, 12613), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', '(1)'], {}), '(result, 1)\n', (12602, 12613), False, 'from numpy.testing import assert_array_almost_equal\n'), ((12631, 12679), 'pyrsa.rdm.compare.compare_spearman', 'compare_spearman', (['self.test_rdm1', 'self.test_rdm2'], {}), '(self.test_rdm1, self.test_rdm2)\n', (12647, 12679), False, 'from pyrsa.rdm.compare import compare_spearman\n'), ((12695, 12713), 'numpy.all', 'np.all', (['(result < 1)'], {}), '(result < 1)\n', (12701, 12713), True, 'import numpy as np\n'), ((12818, 12863), 'pyrsa.rdm.compare.compare_rho_a', 'compare_rho_a', (['self.test_rdm1', 'self.test_rdm1'], {}), '(self.test_rdm1, self.test_rdm1)\n', (12831, 12863), False, 'from pyrsa.rdm.compare import compare_rho_a\n'), ((12881, 12926), 'pyrsa.rdm.compare.compare_rho_a', 'compare_rho_a', (['self.test_rdm1', 'self.test_rdm2'], {}), '(self.test_rdm1, self.test_rdm2)\n', (12894, 12926), False, 'from pyrsa.rdm.compare import compare_rho_a\n'), ((12942, 12960), 'numpy.all', 'np.all', (['(result < 1)'], {}), '(result < 1)\n', (12948, 12960), True, 'import numpy as np\n'), ((13707, 13756), 'pyrsa.rdm.compare._parse_input_rdms', '_parse_input_rdms', (['self.test_rdm1', 'self.test_rdm2'], {}), '(self.test_rdm1, self.test_rdm2)\n', (13724, 13756), False, 'from pyrsa.rdm.compare import _parse_input_rdms\n'), ((13771, 13819), 'pyrsa.rdm.compare._all_combinations', '_all_combinations', (['vector1', 'vector2', '_spearman_r'], {}), '(vector1, vector2, _spearman_r)\n', (13788, 13819), False, 'from pyrsa.rdm.compare import _all_combinations\n'), ((13859, 13907), 'pyrsa.rdm.compare.compare_spearman', 'compare_spearman', (['self.test_rdm1', 'self.test_rdm2'], {}), '(self.test_rdm1, self.test_rdm2)\n', (13875, 13907), False, 'from pyrsa.rdm.compare import compare_spearman\n'), ((13916, 13958), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', 'result2'], {}), '(result, result2)\n', (13941, 13958), False, 'from numpy.testing import assert_array_almost_equal\n'), ((14075, 14126), 'pyrsa.rdm.compare.compare_kendall_tau', 'compare_kendall_tau', (['self.test_rdm1', 'self.test_rdm1'], {}), '(self.test_rdm1, self.test_rdm1)\n', (14094, 14126), False, 'from pyrsa.rdm.compare import compare_kendall_tau\n'), ((14135, 14171), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', '(1)'], {}), '(result, 1)\n', (14160, 14171), False, 'from numpy.testing import assert_array_almost_equal\n'), ((14189, 14240), 'pyrsa.rdm.compare.compare_kendall_tau', 'compare_kendall_tau', (['self.test_rdm1', 'self.test_rdm2'], {}), '(self.test_rdm1, self.test_rdm2)\n', (14208, 14240), False, 'from pyrsa.rdm.compare import compare_kendall_tau\n'), ((14256, 14274), 'numpy.all', 'np.all', (['(result < 1)'], {}), '(result < 1)\n', (14262, 14274), True, 'import numpy as np\n'), ((14395, 14448), 'pyrsa.rdm.compare.compare_kendall_tau_a', 'compare_kendall_tau_a', (['self.test_rdm1', 'self.test_rdm1'], {}), '(self.test_rdm1, self.test_rdm1)\n', (14416, 14448), False, 'from pyrsa.rdm.compare import compare_kendall_tau_a\n'), ((14466, 14519), 'pyrsa.rdm.compare.compare_kendall_tau_a', 'compare_kendall_tau_a', (['self.test_rdm1', 'self.test_rdm2'], {}), '(self.test_rdm1, self.test_rdm2)\n', (14487, 14519), False, 'from pyrsa.rdm.compare import compare_kendall_tau_a\n'), ((14535, 14553), 'numpy.all', 'np.all', (['(result < 1)'], {}), '(result < 1)\n', (14541, 14553), True, 'import numpy as np\n'), ((14646, 14685), 'pyrsa.rdm.compare.compare', 'compare', (['self.test_rdm1', 'self.test_rdm1'], {}), '(self.test_rdm1, self.test_rdm1)\n', (14653, 14685), False, 'from pyrsa.rdm.compare import compare\n'), ((14694, 14730), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', '(1)'], {}), '(result, 1)\n', (14719, 14730), False, 'from numpy.testing import assert_array_almost_equal\n'), ((14748, 14802), 'pyrsa.rdm.compare.compare', 'compare', (['self.test_rdm1', 'self.test_rdm2'], {'method': '"""corr"""'}), "(self.test_rdm1, self.test_rdm2, method='corr')\n", (14755, 14802), False, 'from pyrsa.rdm.compare import compare\n'), ((14820, 14878), 'pyrsa.rdm.compare.compare', 'compare', (['self.test_rdm1', 'self.test_rdm2'], {'method': '"""corr_cov"""'}), "(self.test_rdm1, self.test_rdm2, method='corr_cov')\n", (14827, 14878), False, 'from pyrsa.rdm.compare import compare\n'), ((14896, 14954), 'pyrsa.rdm.compare.compare', 'compare', (['self.test_rdm1', 'self.test_rdm2'], {'method': '"""spearman"""'}), "(self.test_rdm1, self.test_rdm2, method='spearman')\n", (14903, 14954), False, 'from pyrsa.rdm.compare import compare\n'), ((14972, 15028), 'pyrsa.rdm.compare.compare', 'compare', (['self.test_rdm1', 'self.test_rdm2'], {'method': '"""cosine"""'}), "(self.test_rdm1, self.test_rdm2, method='cosine')\n", (14979, 15028), False, 'from pyrsa.rdm.compare import compare\n'), ((15046, 15106), 'pyrsa.rdm.compare.compare', 'compare', (['self.test_rdm1', 'self.test_rdm2'], {'method': '"""cosine_cov"""'}), "(self.test_rdm1, self.test_rdm2, method='cosine_cov')\n", (15053, 15106), False, 'from pyrsa.rdm.compare import compare\n'), ((15124, 15181), 'pyrsa.rdm.compare.compare', 'compare', (['self.test_rdm1', 'self.test_rdm2'], {'method': '"""kendall"""'}), "(self.test_rdm1, self.test_rdm2, method='kendall')\n", (15131, 15181), False, 'from pyrsa.rdm.compare import compare\n'), ((15274, 15295), 'numpy.random.rand', 'np.random.rand', (['(1)', '(15)'], {}), '(1, 15)\n', (15288, 15295), True, 'import numpy as np\n'), ((15362, 15460), 'pyrsa.rdm.RDMs', 'rsa.rdm.RDMs', ([], {'dissimilarities': 'dissimilarities1', 'dissimilarity_measure': '"""test"""', 'descriptors': 'des1'}), "(dissimilarities=dissimilarities1, dissimilarity_measure='test',\n descriptors=des1)\n", (15374, 15460), True, 'import pyrsa as rsa\n'), ((15521, 15542), 'numpy.random.rand', 'np.random.rand', (['(3)', '(15)'], {}), '(3, 15)\n', (15535, 15542), True, 'import numpy as np\n'), ((15609, 15707), 'pyrsa.rdm.RDMs', 'rsa.rdm.RDMs', ([], {'dissimilarities': 'dissimilarities2', 'dissimilarity_measure': '"""test"""', 'descriptors': 'des2'}), "(dissimilarities=dissimilarities2, dissimilarity_measure='test',\n descriptors=des2)\n", (15621, 15707), True, 'import pyrsa as rsa\n'), ((15781, 15802), 'numpy.random.rand', 'np.random.rand', (['(7)', '(15)'], {}), '(7, 15)\n', (15795, 15802), True, 'import numpy as np\n'), ((15869, 15967), 'pyrsa.rdm.RDMs', 'rsa.rdm.RDMs', ([], {'dissimilarities': 'dissimilarities3', 'dissimilarity_measure': '"""test"""', 'descriptors': 'des2'}), "(dissimilarities=dissimilarities3, dissimilarity_measure='test',\n descriptors=des2)\n", (15881, 15967), True, 'import pyrsa as rsa\n'), ((16118, 16176), 'pyrsa.rdm.compare.compare', 'compare', (['self.test_rdm1', 'self.test_rdm2'], {'method': '"""corr_cov"""'}), "(self.test_rdm1, self.test_rdm2, method='corr_cov')\n", (16125, 16176), False, 'from pyrsa.rdm.compare import compare\n'), ((16432, 16476), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', 'result_1D'], {}), '(result, result_1D)\n', (16457, 16476), False, 'from numpy.testing import assert_array_almost_equal\n'), ((16485, 16529), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', 'result_2D'], {}), '(result, result_2D)\n', (16510, 16529), False, 'from numpy.testing import assert_array_almost_equal\n'), ((16633, 16693), 'pyrsa.rdm.compare.compare', 'compare', (['self.test_rdm1', 'self.test_rdm2'], {'method': '"""cosine_cov"""'}), "(self.test_rdm1, self.test_rdm2, method='cosine_cov')\n", (16640, 16693), False, 'from pyrsa.rdm.compare import compare\n'), ((16953, 16997), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', 'result_1D'], {}), '(result, result_1D)\n', (16978, 16997), False, 'from numpy.testing import assert_array_almost_equal\n'), ((17006, 17050), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result', 'result_2D'], {}), '(result, result_2D)\n', (17031, 17050), False, 'from numpy.testing import assert_array_almost_equal\n'), ((4357, 4386), 'numpy.mean', 'np.mean', (['d1', '(1)'], {'keepdims': '(True)'}), '(d1, 1, keepdims=True)\n', (4364, 4386), True, 'import numpy as np\n'), ((4405, 4434), 'numpy.mean', 'np.mean', (['d2', '(1)'], {'keepdims': '(True)'}), '(d2, 1, keepdims=True)\n', (4412, 4434), True, 'import numpy as np\n'), ((1613, 1622), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (1619, 1622), True, 'import numpy as np\n'), ((1844, 1853), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (1850, 1853), True, 'import numpy as np\n'), ((3682, 3691), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (3688, 3691), True, 'import numpy as np\n'), ((3928, 3937), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (3934, 3937), True, 'import numpy as np\n'), ((9410, 9419), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (9416, 9419), True, 'import numpy as np\n'), ((9641, 9650), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (9647, 9650), True, 'import numpy as np\n'), ((10160, 10170), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (10167, 10170), True, 'import numpy as np\n'), ((10356, 10365), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (10362, 10365), True, 'import numpy as np\n'), ((12119, 12128), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (12125, 12128), True, 'import numpy as np\n'), ((12365, 12374), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (12371, 12374), True, 'import numpy as np\n'), ((16289, 16299), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (16296, 16299), True, 'import numpy as np\n'), ((16413, 16422), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (16419, 16422), True, 'import numpy as np\n'), ((16808, 16818), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (16815, 16818), True, 'import numpy as np\n'), ((16934, 16943), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (16940, 16943), True, 'import numpy as np\n'), ((2380, 2401), 'numpy.sum', 'np.sum', (['(d1[i] * d2[j])'], {}), '(d1[i] * d2[j])\n', (2386, 2401), True, 'import numpy as np\n'), ((2519, 2540), 'numpy.sum', 'np.sum', (['(d2[j] * d2[j])'], {}), '(d2[j] * d2[j])\n', (2525, 2540), True, 'import numpy as np\n'), ((4568, 4589), 'numpy.sum', 'np.sum', (['(d1[i] * d2[j])'], {}), '(d1[i] * d2[j])\n', (4574, 4589), True, 'import numpy as np\n'), ((4707, 4728), 'numpy.sum', 'np.sum', (['(d2[j] * d2[j])'], {}), '(d2[j] * d2[j])\n', (4713, 4728), True, 'import numpy as np\n'), ((2449, 2470), 'numpy.sum', 'np.sum', (['(d1[i] * d1[i])'], {}), '(d1[i] * d1[i])\n', (2455, 2470), True, 'import numpy as np\n'), ((4637, 4658), 'numpy.sum', 'np.sum', (['(d1[i] * d1[i])'], {}), '(d1[i] * d1[i])\n', (4643, 4658), True, 'import numpy as np\n')] |
#General Imports
import numpy as np
import random
import collections
import timeit
import copy
#Dice Imports
from dice_ml.explainer_interfaces.explainer_base import ExplainerBase
from dice_ml import diverse_counterfactuals as exp
from dice_ml.utils.sample_architecture.vae_model import CF_VAE
from dice_ml.utils.helpers import get_base_gen_cf_initialization
#Pytorch
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.autograd import Variable
class FeasibleBaseVAE(ExplainerBase):
def __init__(self, data_interface, model_interface, **kwargs):
"""
:param data_interface: an interface class to data related params
:param model_interface: an interface class to access trained ML model
"""
# initiating data related parameters
super().__init__(data_interface)
#Black Box ML Model to be explained
self.pred_model= model_interface.model
#Hyperparam
self.encoded_size=kwargs['encoded_size']
self.learning_rate= kwargs['lr']
self.batch_size= kwargs['batch_size']
self.validity_reg= kwargs['validity_reg']
self.margin= kwargs['margin']
self.epochs= kwargs['epochs']
self.wm1= kwargs['wm1']
self.wm2= kwargs['wm2']
self.wm3= kwargs['wm3']
#Initializing parameters for the DiceBaseGenCF
self.vae_train_dataset, self.vae_val_dataset, self.vae_test_dataset, self.normalise_weights, self.cf_vae, self.cf_vae_optimizer= get_base_gen_cf_initialization( self.data_interface, self.encoded_size, self.cont_minx, self.cont_maxx, self.margin, self.validity_reg, self.epochs, self.wm1, self.wm2, self.wm3, self.learning_rate )
#Data paths
self.base_model_dir= '../../dice_ml/utils/sample_trained_models/'
self.save_path=self.base_model_dir+ self.data_interface.data_name +'-margin-' + str(self.margin) + '-validity_reg-'+ str(self.validity_reg) + '-epoch-' + str(self.epochs) + '-' + 'base-gen' + '.pth'
def compute_loss( self, model_out, x, target_label ):
em = model_out['em']
ev = model_out['ev']
z = model_out['z']
dm = model_out['x_pred']
mc_samples = model_out['mc_samples']
#KL Divergence
kl_divergence = 0.5*torch.mean( em**2 +ev - torch.log(ev) - 1, axis=1 )
#Reconstruction Term
#Proximity: L1 Loss
x_pred = dm[0]
s= self.cf_vae.encoded_start_cat
recon_err = -torch.sum( torch.abs(x[:,s:-1] - x_pred[:,s:-1]), axis=1 )
for key in self.normalise_weights.keys():
recon_err+= -(self.normalise_weights[key][1] - self.normalise_weights[key][0])*torch.abs(x[:,key] - x_pred[:,key])
# Sum to 1 over the categorical indexes of a feature
for v in self.cf_vae.encoded_categorical_feature_indexes:
temp = -torch.abs( 1.0-torch.sum( x_pred[:, v[0]:v[-1]+1], axis=1) )
recon_err += temp
#Validity
temp_logits = self.pred_model(x_pred)
validity_loss= torch.zeros(1)
temp_1= temp_logits[target_label==1,:]
temp_0= temp_logits[target_label==0,:]
validity_loss += F.hinge_embedding_loss( F.sigmoid(temp_1[:,1]) - F.sigmoid(temp_1[:,0]), torch.tensor(-1), self.margin, reduction='mean')
validity_loss += F.hinge_embedding_loss( F.sigmoid(temp_0[:,0]) - F.sigmoid(temp_0[:,1]), torch.tensor(-1), self.margin, reduction='mean')
for i in range(1,mc_samples):
x_pred = dm[i]
recon_err += -torch.sum( torch.abs(x[:,s:-1] - x_pred[:,s:-1]), axis=1 )
for key in self.normalise_weights.keys():
recon_err+= -(self.normalise_weights[key][1] - self.normalise_weights[key][0])*torch.abs(x[:,key] - x_pred[:,key])
# Sum to 1 over the categorical indexes of a feature
for v in self.cf_vae.encoded_categorical_feature_indexes:
temp = -torch.abs( 1.0-torch.sum( x_pred[:, v[0]:v[-1]+1], axis=1) )
recon_err += temp
#Validity
temp_logits = self.pred_model(x_pred)
temp_1= temp_logits[target_label==1,:]
temp_0= temp_logits[target_label==0,:]
validity_loss += F.hinge_embedding_loss( F.sigmoid(temp_1[:,1]) - F.sigmoid(temp_1[:,0]), torch.tensor(-1), self.margin, reduction='mean')
validity_loss += F.hinge_embedding_loss( F.sigmoid(temp_0[:,0]) - F.sigmoid(temp_0[:,1]), torch.tensor(-1), self.margin, reduction='mean')
recon_err = recon_err / mc_samples
validity_loss = -1*self.validity_reg*validity_loss/mc_samples
print('recon: ',-torch.mean(recon_err), ' KL: ', torch.mean(kl_divergence), ' Validity: ', -validity_loss)
return -torch.mean(recon_err - kl_divergence) - validity_loss
def train(self, pre_trained=False):
'''
pre_trained: Bool Variable to check whether pre trained model exists to avoid training again
'''
if pre_trained:
self.cf_vae.load_state_dict(torch.load(self.save_path))
self.cf_vae.eval()
return
##TODO: Handling such dataset specific constraints in a more general way
# CF Generation for only low to high income data points
self.vae_train_dataset= self.vae_train_dataset[self.vae_train_dataset[:,-1]==0,:]
self.vae_val_dataset= self.vae_val_dataset[self.vae_val_dataset[:,-1]==0,:]
#Removing the outcome variable from the datasets
self.vae_train_feat= self.vae_train_dataset[:,:-1]
self.vae_val_feat= self.vae_val_dataset[:,:-1]
for epoch in range(self.epochs):
batch_num=0
train_loss= 0.0
train_size=0
train_dataset= torch.tensor(self.vae_train_feat).float()
train_dataset= torch.utils.data.DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True)
for train_x in enumerate(train_dataset):
self.cf_vae_optimizer.zero_grad()
train_x= train_x[1]
train_y= 1.0-torch.argmax( self.pred_model(train_x), dim=1 )
train_size+= train_x.shape[0]
out= self.cf_vae(train_x, train_y)
loss= self.compute_loss( out, train_x, train_y )
loss.backward()
train_loss += loss.item()
self.cf_vae_optimizer.step()
batch_num+=1
ret= loss/batch_num
print('Train Avg Loss: ', ret, train_size )
#Save the model after training every 10 epochs and at the last epoch
if (epoch!=0 and epoch%10==0) or epoch==self.epochs-1:
torch.save(self.cf_vae.state_dict(), self.save_path)
#The input arguments for this function same as the one defined for Diverse CF
def generate_counterfactuals(self, query_instance, total_CFs, desired_class="opposite" ):
## Loading the latest trained CFVAE model
self.cf_vae.load_state_dict(torch.load(self.save_path))
self.cf_vae.eval()
# Converting query_instance into numpy array
query_instance_org= query_instance
query_instance = self.data_interface.prepare_query_instance(query_instance=query_instance, encoding='one-hot')
query_instance = np.array([query_instance.iloc[0].values])
if query_instance.shape[0] > self.batch_size:
test_dataset= np.array_split( query_instance, query_instance.shape[0]//self.batch_size ,axis=0 )
else:
test_dataset= [ query_instance ]
final_gen_cf=[]
final_cf_pred=[]
final_test_pred=[]
for i in range(len(query_instance)):
train_x = test_dataset[i]
train_x= torch.tensor( train_x ).float()
train_y = torch.argmax( self.pred_model(train_x), dim=1 )
curr_gen_cf=[]
curr_cf_pred=[]
curr_test_pred= train_y.numpy()
for cf_count in range(total_CFs):
recon_err, kl_err, x_true, x_pred, cf_label = self.cf_vae.compute_elbo( train_x, 1.0-train_y, self.pred_model )
while( cf_label== train_y):
print(cf_label, train_y)
recon_err, kl_err, x_true, x_pred, cf_label = self.cf_vae.compute_elbo( train_x, 1.0-train_y, self.pred_model )
x_pred= x_pred.detach().numpy()
#Converting mixed scores into one hot feature representations
for v in self.cf_vae.encoded_categorical_feature_indexes:
curr_max= x_pred[:, v[0]]
curr_max_idx= v[0]
for idx in v:
if curr_max < x_pred[:, idx]:
curr_max= x_pred[:, idx]
curr_max_idx= idx
for idx in v:
if idx==curr_max_idx:
x_pred[:, idx]=1
else:
x_pred[:, idx]=0
cf_label= cf_label.detach().numpy()
cf_label= np.reshape( cf_label, (cf_label.shape[0],1) )
curr_gen_cf.append( x_pred )
curr_cf_pred.append( cf_label )
final_gen_cf.append(curr_gen_cf)
final_cf_pred.append(curr_cf_pred)
final_test_pred.append(curr_test_pred)
#CF Gen out
result={}
result['query-instance']= query_instance[0]
result['test-pred']= final_test_pred[0][0]
result['CF']= final_gen_cf[0]
result['CF-Pred']= final_cf_pred[0]
# Adding empty list for sparse cf gen and pred; adding 'NA' for the posthoc sparsity cofficient
return exp.CounterfactualExamples(self.data_interface, result['query-instance'], result['test-pred'], result['CF'], result['CF-Pred'], posthoc_sparsity_param=None) | [
"torch.mean",
"torch.utils.data.DataLoader",
"torch.sum",
"torch.load",
"dice_ml.diverse_counterfactuals.CounterfactualExamples",
"torch.abs",
"numpy.array",
"numpy.reshape",
"torch.nn.functional.sigmoid",
"torch.zeros",
"numpy.array_split",
"torch.log",
"torch.tensor",
"dice_ml.utils.help... | [((1679, 1885), 'dice_ml.utils.helpers.get_base_gen_cf_initialization', 'get_base_gen_cf_initialization', (['self.data_interface', 'self.encoded_size', 'self.cont_minx', 'self.cont_maxx', 'self.margin', 'self.validity_reg', 'self.epochs', 'self.wm1', 'self.wm2', 'self.wm3', 'self.learning_rate'], {}), '(self.data_interface, self.encoded_size, self\n .cont_minx, self.cont_maxx, self.margin, self.validity_reg, self.epochs,\n self.wm1, self.wm2, self.wm3, self.learning_rate)\n', (1709, 1885), False, 'from dice_ml.utils.helpers import get_base_gen_cf_initialization\n'), ((3248, 3262), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (3259, 3262), False, 'import torch\n'), ((7753, 7794), 'numpy.array', 'np.array', (['[query_instance.iloc[0].values]'], {}), '([query_instance.iloc[0].values])\n', (7761, 7794), True, 'import numpy as np\n'), ((10375, 10539), 'dice_ml.diverse_counterfactuals.CounterfactualExamples', 'exp.CounterfactualExamples', (['self.data_interface', "result['query-instance']", "result['test-pred']", "result['CF']", "result['CF-Pred']"], {'posthoc_sparsity_param': 'None'}), "(self.data_interface, result['query-instance'],\n result['test-pred'], result['CF'], result['CF-Pred'],\n posthoc_sparsity_param=None)\n", (10401, 10539), True, 'from dice_ml import diverse_counterfactuals as exp\n'), ((3494, 3510), 'torch.tensor', 'torch.tensor', (['(-1)'], {}), '(-1)\n', (3506, 3510), False, 'import torch\n'), ((3641, 3657), 'torch.tensor', 'torch.tensor', (['(-1)'], {}), '(-1)\n', (3653, 3657), False, 'import torch\n'), ((4940, 4965), 'torch.mean', 'torch.mean', (['kl_divergence'], {}), '(kl_divergence)\n', (4950, 4965), False, 'import torch\n'), ((6143, 6231), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=self.batch_size,\n shuffle=True)\n', (6170, 6231), False, 'import torch\n'), ((7440, 7466), 'torch.load', 'torch.load', (['self.save_path'], {}), '(self.save_path)\n', (7450, 7466), False, 'import torch\n'), ((7877, 7963), 'numpy.array_split', 'np.array_split', (['query_instance', '(query_instance.shape[0] // self.batch_size)'], {'axis': '(0)'}), '(query_instance, query_instance.shape[0] // self.batch_size,\n axis=0)\n', (7891, 7963), True, 'import numpy as np\n'), ((2685, 2724), 'torch.abs', 'torch.abs', (['(x[:, s:-1] - x_pred[:, s:-1])'], {}), '(x[:, s:-1] - x_pred[:, s:-1])\n', (2694, 2724), False, 'import torch\n'), ((2874, 2911), 'torch.abs', 'torch.abs', (['(x[:, key] - x_pred[:, key])'], {}), '(x[:, key] - x_pred[:, key])\n', (2883, 2911), False, 'import torch\n'), ((3445, 3468), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['temp_1[:, 1]'], {}), '(temp_1[:, 1])\n', (3454, 3468), True, 'from torch.nn import functional as F\n'), ((3470, 3493), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['temp_1[:, 0]'], {}), '(temp_1[:, 0])\n', (3479, 3493), True, 'from torch.nn import functional as F\n'), ((3592, 3615), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['temp_0[:, 0]'], {}), '(temp_0[:, 0])\n', (3601, 3615), True, 'from torch.nn import functional as F\n'), ((3617, 3640), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['temp_0[:, 1]'], {}), '(temp_0[:, 1])\n', (3626, 3640), True, 'from torch.nn import functional as F\n'), ((4568, 4584), 'torch.tensor', 'torch.tensor', (['(-1)'], {}), '(-1)\n', (4580, 4584), False, 'import torch\n'), ((4719, 4735), 'torch.tensor', 'torch.tensor', (['(-1)'], {}), '(-1)\n', (4731, 4735), False, 'import torch\n'), ((4908, 4929), 'torch.mean', 'torch.mean', (['recon_err'], {}), '(recon_err)\n', (4918, 4929), False, 'import torch\n'), ((5014, 5051), 'torch.mean', 'torch.mean', (['(recon_err - kl_divergence)'], {}), '(recon_err - kl_divergence)\n', (5024, 5051), False, 'import torch\n'), ((5328, 5354), 'torch.load', 'torch.load', (['self.save_path'], {}), '(self.save_path)\n', (5338, 5354), False, 'import torch\n'), ((9699, 9743), 'numpy.reshape', 'np.reshape', (['cf_label', '(cf_label.shape[0], 1)'], {}), '(cf_label, (cf_label.shape[0], 1))\n', (9709, 9743), True, 'import numpy as np\n'), ((3801, 3840), 'torch.abs', 'torch.abs', (['(x[:, s:-1] - x_pred[:, s:-1])'], {}), '(x[:, s:-1] - x_pred[:, s:-1])\n', (3810, 3840), False, 'import torch\n'), ((3998, 4035), 'torch.abs', 'torch.abs', (['(x[:, key] - x_pred[:, key])'], {}), '(x[:, key] - x_pred[:, key])\n', (4007, 4035), False, 'import torch\n'), ((4519, 4542), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['temp_1[:, 1]'], {}), '(temp_1[:, 1])\n', (4528, 4542), True, 'from torch.nn import functional as F\n'), ((4544, 4567), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['temp_1[:, 0]'], {}), '(temp_1[:, 0])\n', (4553, 4567), True, 'from torch.nn import functional as F\n'), ((4670, 4693), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['temp_0[:, 0]'], {}), '(temp_0[:, 0])\n', (4679, 4693), True, 'from torch.nn import functional as F\n'), ((4695, 4718), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['temp_0[:, 1]'], {}), '(temp_0[:, 1])\n', (4704, 4718), True, 'from torch.nn import functional as F\n'), ((6074, 6107), 'torch.tensor', 'torch.tensor', (['self.vae_train_feat'], {}), '(self.vae_train_feat)\n', (6086, 6107), False, 'import torch\n'), ((8200, 8221), 'torch.tensor', 'torch.tensor', (['train_x'], {}), '(train_x)\n', (8212, 8221), False, 'import torch\n'), ((2495, 2508), 'torch.log', 'torch.log', (['ev'], {}), '(ev)\n', (2504, 2508), False, 'import torch\n'), ((3075, 3119), 'torch.sum', 'torch.sum', (['x_pred[:, v[0]:v[-1] + 1]'], {'axis': '(1)'}), '(x_pred[:, v[0]:v[-1] + 1], axis=1)\n', (3084, 3119), False, 'import torch\n'), ((4211, 4255), 'torch.sum', 'torch.sum', (['x_pred[:, v[0]:v[-1] + 1]'], {'axis': '(1)'}), '(x_pred[:, v[0]:v[-1] + 1], axis=1)\n', (4220, 4255), False, 'import torch\n')] |
from TSP_utils import TSP_solver, TSP_plotter, TSP_generator, TSP_loader
import numpy as np
import networkx as nx
import tqdm
import tsplib95
import time
import re
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import statsmodels.formula.api as smf
def get_cparams_from_lengths(cparam, delta= 0.005, num_nodes=50, num_samples=1000, data_root='test_sets'):
# get lengths
if cparam == 'random':
folder = f'{data_root}/synthetic_n_{num_nodes}_{num_samples}/'
else:
folder = f'{data_root}/synthetic_n_{num_nodes}_cparam_{cparam}_delta_{delta}_{num_samples}/'
with open(folder+'lengths.txt', 'r') as f:
lines = f.readlines()
file_names = [line.split(':')[0].strip() for k, line in enumerate(lines)]
test_cparams = [float(line.split(':')[-1].strip()) / np.sqrt(1*50) for k, line in enumerate(lines)]
cparam_dict = dict(zip(file_names, test_cparams))
return cparam_dict
def get_soltime_from_file(cparam, delta=0.005, num_nodes=50, num_samples=1000, data_root='test_sets'):
# get lengths
if cparam == 'random':
folder = f'{data_root}/synthetic_n_{num_nodes}_{num_samples}/'
else:
folder = f'{data_root}/synthetic_n_{num_nodes}_cparam_{cparam}_delta_{delta}_{num_samples}/'
with open(folder+'sol_times.txt', 'r') as f:
lines = f.readlines()
file_names = [line.split(':')[0].strip() for k, line in enumerate(lines)]
test_sol_times = [float(line.split(':')[-1].strip()) for k, line in enumerate(lines)]
soltime_dict = dict(zip(file_names, test_sol_times))
return soltime_dict
def get_soltimes_from_file(cparam, delta=0.005, num_nodes=50, num_samples=1000, data_root='test_sets'):
# get lengths
if cparam == 'random':
folder = f'{data_root}/synthetic_n_{num_nodes}_{num_samples}/'
else:
folder = f'{data_root}/synthetic_n_{num_nodes}_cparam_{cparam}_delta_{delta}_{num_samples}/'
with open(folder+'sol_times.txt', 'r') as f:
lines = f.readlines()
file_names = [line.split(':')[0].strip() for k, line in enumerate(lines)]
test_sol_times = [[float(time.strip()) for time in line.split(':')[-1].split(',')] for k, line in enumerate(lines)]
soltime_dict = dict(zip(file_names, test_sol_times))
return soltime_dict
def get_approx_to_opt_from_file(approx_path='s2v_dqn_results/test_cparam_random.csv'):
with open(approx_path, 'r') as f:
lines = f.readlines()
file_names = [line.split(',')[0].strip() for k, line in enumerate(lines)]
approx_to_opt = [float(line.split(',')[1].strip()) for k, line in enumerate(lines)]
approx_dict = dict(zip(file_names, approx_to_opt))
return approx_dict
def get_approx_and_soltime_lists(approx_path, **args):
approx_dict = get_approx_to_opt_from_file(approx_path)
# soltime_dict = get_soltime_from_file(cparam)
soltime_dict = get_soltimes_from_file(**args)
approx_list = []
soltime_list = []
for key in approx_dict:
approx_list.append(approx_dict[key])
soltime_list.append(soltime_dict[key])
return approx_list, soltime_list
def get_approx_and_cparam_lists(approx_path, **args):
approx_dict = get_approx_to_opt_from_file(approx_path)
cparam_dict = get_cparams_from_lengths(**args)
approx_list = []
cparam_list = []
for key in approx_dict:
approx_list.append(approx_dict[key])
cparam_list.append(cparam_dict[key])
return cparam_list, approx_list
def get_soltime_and_cparam_lists(**args):
soltime_dict = get_soltimes_from_file(**args)
cparam_dict = get_cparams_from_lengths(**args)
soltime_list = []
cparam_list = []
for key in soltime_dict:
soltime_list.append(soltime_dict[key])
cparam_list.append(cparam_dict[key])
return soltime_list, cparam_list
def get_reg_fit_data(x_list, y_list):
X = np.array(x_list)
X = sm.add_constant(X)
Y = np.array(y_list)
results = sm.OLS(Y, X).fit()
b = results.params[0]
a = results.params[1]
x = np.arange(np.min(x_list), np.max(x_list), 0.0001)
y = a * x + b
# p_value = np.round(results.pvalues[1], 10)
p_value = np.round(results.pvalues[1],30)
rsquared = np.round(results.rsquared, 4)
slope = np.round(a, 6)
return x, y, p_value, rsquared, slope | [
"statsmodels.api.OLS",
"numpy.min",
"numpy.max",
"numpy.array",
"time.strip",
"statsmodels.api.add_constant",
"numpy.round",
"numpy.sqrt"
] | [((3922, 3938), 'numpy.array', 'np.array', (['x_list'], {}), '(x_list)\n', (3930, 3938), True, 'import numpy as np\n'), ((3947, 3965), 'statsmodels.api.add_constant', 'sm.add_constant', (['X'], {}), '(X)\n', (3962, 3965), True, 'import statsmodels.api as sm\n'), ((3974, 3990), 'numpy.array', 'np.array', (['y_list'], {}), '(y_list)\n', (3982, 3990), True, 'import numpy as np\n'), ((4215, 4247), 'numpy.round', 'np.round', (['results.pvalues[1]', '(30)'], {}), '(results.pvalues[1], 30)\n', (4223, 4247), True, 'import numpy as np\n'), ((4262, 4291), 'numpy.round', 'np.round', (['results.rsquared', '(4)'], {}), '(results.rsquared, 4)\n', (4270, 4291), True, 'import numpy as np\n'), ((4304, 4318), 'numpy.round', 'np.round', (['a', '(6)'], {}), '(a, 6)\n', (4312, 4318), True, 'import numpy as np\n'), ((4094, 4108), 'numpy.min', 'np.min', (['x_list'], {}), '(x_list)\n', (4100, 4108), True, 'import numpy as np\n'), ((4110, 4124), 'numpy.max', 'np.max', (['x_list'], {}), '(x_list)\n', (4116, 4124), True, 'import numpy as np\n'), ((4005, 4017), 'statsmodels.api.OLS', 'sm.OLS', (['Y', 'X'], {}), '(Y, X)\n', (4011, 4017), True, 'import statsmodels.api as sm\n'), ((840, 855), 'numpy.sqrt', 'np.sqrt', (['(1 * 50)'], {}), '(1 * 50)\n', (847, 855), True, 'import numpy as np\n'), ((2165, 2177), 'time.strip', 'time.strip', ([], {}), '()\n', (2175, 2177), False, 'import time\n')] |
#构建可训练的分布式词向量
import tensorflow as tf
import numpy as np
import math
class embedding:
def __init__(self,vocabulary_size,embedding_size):
'''
构建embedding层
vocabulary_size:为词库大小
embedding_size:分布式词向量大小
'''
self.vocabulary_size=vocabulary_size
self.embedding_size=embedding_size
self.embeddings=tf.Variable(tf.random.uniform([self.vocabulary_size,self.embedding_size],-1.0,1.0))
def embedding_lookup(self,oneHot_data):
'''
embedding的查找表函数
oneHot_data:词库大小的one_hot向量
'''
outputs_i=[]
#外层训练是遍历一个批次的所有语句,内层训练是遍历语句的每个one_hot数据
for i in range(oneHot_data.shape[0]):
outputs_j=[]
for j in range(oneHot_data.shape[1]):
if(tf.reduce_sum(oneHot_data[i,j,:]).numpy()==0.0):
#遇到-填充则返回全零的词向量
outputs_j.append(tf.zeros([1,self.embeddings.shape[-1]]))
else:
#以查找表方式提取对应行的词向量
row = tf.math.argmax(oneHot_data[i,j,:],output_type=tf.dtypes.int32).numpy()
outputs_j.append(tf.reshape(self.embeddings[row,:],[1,self.embeddings.shape[-1]]))
array_outputs_j=tf.concat(outputs_j,0)
array_outputs_j=tf.reshape(array_outputs_j,shape=[1]+(list(tf.concat(outputs_j,0).shape)))
outputs_i.append(tf.concat(array_outputs_j,0))
return tf.concat(outputs_i,0)
def get_params(self):
return [self.embeddings]
def positional_encoding(self,pos,d):
def w_k(pos,k, d):
wk = 1.0/(10000**(((2*(k//2))/(float(d)))))
return tf.matmul(pos , wk)
pe=tf.zeros([pos,d])
wk = w_k(tf.reshape(tf.constant(np.arange(pos),dtype=tf.float32),[-1,1]),tf.reshape(tf.constant(np.arange(d),dtype=tf.float32),[1,-1]),d)
tmp=wk.numpy()
tmp[:,0::2]=np.sin(tmp[:,0::2])
tmp[:,1::2]=np.sin(tmp[:,1::2])
return tf.reshape(tmp,[1,tmp.shape[0],tmp.shape[1]])
| [
"tensorflow.reduce_sum",
"tensorflow.random.uniform",
"tensorflow.math.argmax",
"tensorflow.reshape",
"tensorflow.concat",
"tensorflow.matmul",
"numpy.sin",
"tensorflow.zeros",
"numpy.arange"
] | [((1453, 1476), 'tensorflow.concat', 'tf.concat', (['outputs_i', '(0)'], {}), '(outputs_i, 0)\n', (1462, 1476), True, 'import tensorflow as tf\n'), ((1709, 1727), 'tensorflow.zeros', 'tf.zeros', (['[pos, d]'], {}), '([pos, d])\n', (1717, 1727), True, 'import tensorflow as tf\n'), ((1916, 1936), 'numpy.sin', 'np.sin', (['tmp[:, 0::2]'], {}), '(tmp[:, 0::2])\n', (1922, 1936), True, 'import numpy as np\n'), ((1956, 1976), 'numpy.sin', 'np.sin', (['tmp[:, 1::2]'], {}), '(tmp[:, 1::2])\n', (1962, 1976), True, 'import numpy as np\n'), ((1991, 2039), 'tensorflow.reshape', 'tf.reshape', (['tmp', '[1, tmp.shape[0], tmp.shape[1]]'], {}), '(tmp, [1, tmp.shape[0], tmp.shape[1]])\n', (2001, 2039), True, 'import tensorflow as tf\n'), ((372, 445), 'tensorflow.random.uniform', 'tf.random.uniform', (['[self.vocabulary_size, self.embedding_size]', '(-1.0)', '(1.0)'], {}), '([self.vocabulary_size, self.embedding_size], -1.0, 1.0)\n', (389, 445), True, 'import tensorflow as tf\n'), ((1240, 1263), 'tensorflow.concat', 'tf.concat', (['outputs_j', '(0)'], {}), '(outputs_j, 0)\n', (1249, 1263), True, 'import tensorflow as tf\n'), ((1678, 1696), 'tensorflow.matmul', 'tf.matmul', (['pos', 'wk'], {}), '(pos, wk)\n', (1687, 1696), True, 'import tensorflow as tf\n'), ((1408, 1437), 'tensorflow.concat', 'tf.concat', (['array_outputs_j', '(0)'], {}), '(array_outputs_j, 0)\n', (1417, 1437), True, 'import tensorflow as tf\n'), ((1767, 1781), 'numpy.arange', 'np.arange', (['pos'], {}), '(pos)\n', (1776, 1781), True, 'import numpy as np\n'), ((1831, 1843), 'numpy.arange', 'np.arange', (['d'], {}), '(d)\n', (1840, 1843), True, 'import numpy as np\n'), ((911, 951), 'tensorflow.zeros', 'tf.zeros', (['[1, self.embeddings.shape[-1]]'], {}), '([1, self.embeddings.shape[-1]])\n', (919, 951), True, 'import tensorflow as tf\n'), ((1145, 1212), 'tensorflow.reshape', 'tf.reshape', (['self.embeddings[row, :]', '[1, self.embeddings.shape[-1]]'], {}), '(self.embeddings[row, :], [1, self.embeddings.shape[-1]])\n', (1155, 1212), True, 'import tensorflow as tf\n'), ((789, 824), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['oneHot_data[i, j, :]'], {}), '(oneHot_data[i, j, :])\n', (802, 824), True, 'import tensorflow as tf\n'), ((1037, 1102), 'tensorflow.math.argmax', 'tf.math.argmax', (['oneHot_data[i, j, :]'], {'output_type': 'tf.dtypes.int32'}), '(oneHot_data[i, j, :], output_type=tf.dtypes.int32)\n', (1051, 1102), True, 'import tensorflow as tf\n'), ((1334, 1357), 'tensorflow.concat', 'tf.concat', (['outputs_j', '(0)'], {}), '(outputs_j, 0)\n', (1343, 1357), True, 'import tensorflow as tf\n')] |
import os
from abc import ABC, abstractmethod
from collections import namedtuple
from typing import List, Union
from datetime import datetime
import pandas as pd
from pandas import Timestamp
import numpy as np
from fxqu4nt.market.symbol import Symbol
from fxqu4nt.market.kdb import QuotesDB
from fxqu4nt.utils.common import q_dt_str
from fxqu4nt.logger import create_logger
OHLC = namedtuple('OHLC', ['OpenBid', 'HighBid', 'LowBid', 'CloseBid', 'OpenAsk', 'HighAsk', 'LowAsk', 'CloseAsk', 'Volume', 'Start', 'End'])
class BarBase(ABC):
def fetch(self, sdt, edt):
pass
def fisrt_quote_date(self):
pdFirstDateTime: Timestamp = self.kdb.first_quote_date(self.symbol)
firstDateTime = pdFirstDateTime.to_pydatetime()
return firstDateTime
class TickBar(BarBase):
def __init__(self, kdb: QuotesDB, symbol:[str, Symbol],step_size=50):
self.kdb = kdb
self.q = self.kdb.q
self.symbol = symbol
self.step_size = step_size
self.logger = create_logger(self.__class__.__name__)
def fetch(self, sdt, edt, pandas=False) -> Union[pd.DataFrame, List['OHLC']]:
tbn = self.kdb.quote_table_name(self.symbol)
qfmt = ".tickbar.makeBars[{tbn};{step_size};{sdt};{edt}]"
try:
query = qfmt.format(tbn=tbn,
step_size=str(self.step_size),
sdt=q_dt_str(sdt), edt=q_dt_str(edt))
result = self.q(query, pandas=True)
self.logger.debug("Execute query: %s" % query)
if pandas:
return result
result = [
OHLC(OpenBid=r["OpenBid"], HighBid=r["HighBid"], LowBid=r["LowBid"], CloseBid=r["CloseBid"],
OpenAsk=r["OpenAsk"], HighAsk=r["HighAsk"], LowAsk=r["LowAsk"], CloseAsk=r["CloseAsk"],
Volume=r["Volume"], Start=r["Start"], End=r["End"])
for idx, r in result.iterrows()
]
return result
except Exception as e:
self.logger.error("Fetch tick bar with step size:%d error: %s" % (self.step_size, str(e)))
return []
def __repr__(self):
return "Tick %d Bar" % self.step_size
def exist(self):
if isinstance(self.symbol, Symbol):
name = self.symbol.name
else:
name = self.symbol
bar_path = self.kdb._get_symbol_path(name)
tbn = "GenTick{tick_size:05d}Bars{symbol}".format(tick_size=self.step_size, symbol=name)
bar_path = os.path.join(bar_path, tbn)
if os.path.exists(bar_path):
return True
return False
def generate(self):
if isinstance(self.symbol, Symbol):
name = self.symbol.name
else:
name = self.symbol
bar_path = self.kdb._get_symbol_path(name)
tbn = "GenTick{tick_size:05d}Bars{symbol}".format(tick_size=self.step_size, symbol=name)
bar_path = os.path.join(bar_path, tbn)
qtb = self.kdb.quote_table_name(self.symbol)
self.q.sendSync('.tickbar.genBars',
bar_path,
tbn, np.bytes_(qtb.encode('utf-8')),
np.int32(self.step_size)) # see q/tick_bar.q
return bar_path
| [
"fxqu4nt.logger.create_logger",
"os.path.exists",
"fxqu4nt.utils.common.q_dt_str",
"collections.namedtuple",
"numpy.int32",
"os.path.join"
] | [((384, 522), 'collections.namedtuple', 'namedtuple', (['"""OHLC"""', "['OpenBid', 'HighBid', 'LowBid', 'CloseBid', 'OpenAsk', 'HighAsk', 'LowAsk',\n 'CloseAsk', 'Volume', 'Start', 'End']"], {}), "('OHLC', ['OpenBid', 'HighBid', 'LowBid', 'CloseBid', 'OpenAsk',\n 'HighAsk', 'LowAsk', 'CloseAsk', 'Volume', 'Start', 'End'])\n", (394, 522), False, 'from collections import namedtuple\n'), ((1016, 1054), 'fxqu4nt.logger.create_logger', 'create_logger', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (1029, 1054), False, 'from fxqu4nt.logger import create_logger\n'), ((2544, 2571), 'os.path.join', 'os.path.join', (['bar_path', 'tbn'], {}), '(bar_path, tbn)\n', (2556, 2571), False, 'import os\n'), ((2584, 2608), 'os.path.exists', 'os.path.exists', (['bar_path'], {}), '(bar_path)\n', (2598, 2608), False, 'import os\n'), ((2974, 3001), 'os.path.join', 'os.path.join', (['bar_path', 'tbn'], {}), '(bar_path, tbn)\n', (2986, 3001), False, 'import os\n'), ((3222, 3246), 'numpy.int32', 'np.int32', (['self.step_size'], {}), '(self.step_size)\n', (3230, 3246), True, 'import numpy as np\n'), ((1410, 1423), 'fxqu4nt.utils.common.q_dt_str', 'q_dt_str', (['sdt'], {}), '(sdt)\n', (1418, 1423), False, 'from fxqu4nt.utils.common import q_dt_str\n'), ((1429, 1442), 'fxqu4nt.utils.common.q_dt_str', 'q_dt_str', (['edt'], {}), '(edt)\n', (1437, 1442), False, 'from fxqu4nt.utils.common import q_dt_str\n')] |
import os
import itertools
import pandas as pd
import geopandas as gpd
import numpy as np
import json
from gisele.functions import line_to_points, distance_2d, nearest
from gisele import dijkstra, lcoe_optimization
from gisele.multi_obj_factor import emission_factor, reliability_grid, line_reliability
def clusters_interconnections(geo_df_clustered, grid_resume, substations, mg, total_energy,
grid_om, coe, grid_ir, grid_lifetime, branch, line_bc,
resolution):
substations = substations.assign(
nearest_id=substations.apply(nearest, df=geo_df_clustered,
src_column='ID', axis=1))
gdf_roads = gpd.read_file('Output/Datasets/Roads/gdf_roads.shp')
roads_segments =gpd.read_file('Output/Datasets/Roads/roads_segments.shp')
with open('gisele/michele/Inputs/data.json') as f:
input_michele = json.load(f)
if branch == 'yes':
file = 'Branch_'
os.chdir(r'Output/Branches')
else:
file = 'Grid_'
os.chdir(r'Output/Grids')
milp_clusters = grid_resume[['Cluster', 'Load [kW]']].copy()
milp_clusters['Cluster'] = ['C' + str(i[0]) for i in
milp_clusters['Cluster'].iteritems()]
# energy_mismatch = \
# (total_energy['Energy'] / 1000) / mg['Energy Produced [MWh]']
milp_clusters['mg_npc'] = mg['Total Cost [kEUR]']
milp_subs = substations[['ID', 'PowerAvailable']].copy()
milp_subs['ID'] = ['S' + str(i[1]) for i in milp_subs['ID'].iteritems()]
milp_subs['subs_npc'] = 10 #da cambiare
sets = milp_clusters['Cluster'].append(milp_subs['ID'], ignore_index=True)
combinations = list(itertools.combinations(sets, 2))
milp_links = pd.DataFrame(index=range(combinations.__len__()),
columns=['0', '1','Cost','Length'])
milp_links['0'] = [i[0] for i in combinations]
milp_links['1'] = [i[1] for i in combinations]
milp_links['Cost'] = 999999
for row in milp_links.iterrows():
if 'S' in row[1][0] and 'S' in row[1][1]:
continue
c_grid_points = []
print('Connecting ' + row[1][0] + ' and ' + row[1][1])
if 'C' in row[1][0]:
if os.path.isfile(file + str(row[1][0].split('C')[1]) +
".shp"):
grid_1 = gpd.read_file(file + str(row[1][0].split('C')[1]) +
".shp")
c_grid_points = list(zip(grid_1.ID1.astype(int),
grid_1.ID2.astype(int)))
grid_1 = line_to_points(grid_1, geo_df_clustered)
else:
grid_1=geo_df_clustered[geo_df_clustered['Cluster']==int(row[1][0].split('C')[1])]
grid_1=grid_1[grid_1['Population']>0]
c_grid_points = []
elif 'S' in row[1][0]:
sub_in_df = substations[
substations['ID'] ==
int(row[1][0].split('S')[1])].nearest_id.values[0]
grid_1 = geo_df_clustered[geo_df_clustered['ID'] == sub_in_df]
if 'C' in row[1][1]:
if os.path.isfile(file + str(row[1][1].split('C')[1]) +
".shp"):
grid_2 = gpd.read_file(file + str(row[1][1].split('C')[1]) +
".shp")
c_grid_points.append(list(zip(grid_2.ID1.astype(int),
grid_2.ID2.astype(int))))
grid_2 = line_to_points(grid_2, geo_df_clustered)
else:
grid_2 = geo_df_clustered[
geo_df_clustered['Cluster'] == int(row[1][1].split('C')[1])]
grid_2 = grid_2[grid_2['Population'] > 0]
elif 'S' in row[1][1]:
sub_in_df = substations[
substations['ID'] ==
int(row[1][1].split('S')[1])].nearest_id.values[0]
grid_2 = geo_df_clustered[geo_df_clustered['ID'] == sub_in_df]
dist_2d = pd.DataFrame(distance_2d(grid_1, grid_2, 'X', 'Y'),
index=grid_1.ID.values,
columns=grid_2.ID.values)
p1 = geo_df_clustered[geo_df_clustered['ID'] == dist_2d.min().idxmin()]
p2 = geo_df_clustered[geo_df_clustered['ID'] ==
dist_2d.min(axis=1).idxmin()]
# connection, connection_cost, connection_length, _ = \
# dijkstra.dijkstra_connection(geo_df_clustered, p1, p2,
# c_grid_points, line_bc, resolution)
connection, connection_cost, connection_length, _ = \
dijkstra.dijkstra_connection_roads(geo_df_clustered, p1, p2,
c_grid_points, line_bc,
resolution, gdf_roads,
roads_segments)
if connection.empty and connection_cost == 999999:
continue
elif connection.empty:
connection_cost = 1000
connection_length = 1000
# compute actualized grid cost wrt to microgrids number of years
connection_om = [(connection_cost/1000) * grid_om] * input_michele['num_years']
connection_om = np.npv(grid_ir, connection_om)
connection_salvage = (connection_cost/1000) * \
(grid_lifetime-input_michele['num_years'])/(grid_lifetime)*\
1/(1+grid_ir)**(input_michele['num_years']) #controllare costo
milp_links.loc[row[0], 'Cost'] = (connection_cost / 1000) \
+ connection_om - connection_salvage
milp_links.loc[row[0], 'Length'] = (connection_length / 1000) # in km
milp_links.drop(milp_links[milp_links['Cost'] == 999999].index,
inplace=True)
milp_links.reset_index(inplace=True, drop=True)
os.chdir('../..')
milp_links.to_csv(r'Output/LCOE/milp_links.csv', index=False)
sets.to_csv(r'Output/LCOE/set.csv', index=False)
milp_links.loc[:, ['0', '1']].to_csv(r'Output/LCOE/possible_links.csv',
index=False)
# duplicate connections to have the possibility of links in two directions
# but with positive power flow
milp_links_duplicate = milp_links.copy(deep=True)
# switch indices
milp_links_duplicate['2'] = milp_links_duplicate['1']
milp_links_duplicate['1'] = milp_links_duplicate['0']
milp_links_duplicate['0'] = milp_links_duplicate['2']
milp_links = milp_links.append(milp_links_duplicate)
milp_links.reset_index(inplace=True, drop=True)
#includo un valore fittizio per le emissioni legate alla costruzione dei link:
#todo -> aggiornare con valore sensato di emissioni per combustibile prese da letteratura
em_links = milp_links.copy()
em_links['emission'] =(em_links['Cost']-min(em_links['Cost']))/(max(em_links['Cost'])-min(em_links['Cost']))*10
em_links.drop(['Cost','Length'],axis=1,inplace=True)
milp_links.loc[:, ['0', '1', 'Cost']].to_csv(
r'Output/LCOE/cost_links_complete.csv',
index=False)
milp_links.loc[:, ['0', '1', 'Length']].to_csv(
r'Output/LCOE/len_links_complete.csv',
index=False)
sets.to_csv(r'Output/LCOE/set.csv', index=False)
milp_links.loc[:, ['0', '1']].to_csv(
r'Output/LCOE/possible_links_complete.csv',
index=False)
em_links.loc[:, ['0', '1', 'emission']].to_csv(r'Output/LCOE/em_links.csv', index=False)
# add columns to set related to cluster radius (for estimating voltage drops)
# todo-> improve this step, evaluating different possibilities for cluster radius
d_nodes=pd.DataFrame(sets)
d_nodes['length']=0
d_nodes.set_index(0,inplace=True)
for i, row in d_nodes.iterrows():
if 'C' in i:
max_x=geo_df_clustered[geo_df_clustered['Cluster'] == int(i.split('C')[1])]['geometry'].x.max()
min_x = geo_df_clustered[geo_df_clustered['Cluster'] == int(i.split('C')[1])][
'geometry'].x.min()
max_y = geo_df_clustered[geo_df_clustered['Cluster'] == int(i.split('C')[1])][
'geometry'].y.max()
min_y = geo_df_clustered[geo_df_clustered['Cluster'] == int(i.split('C')[1])][
'geometry'].y.min()
d_nodes.loc[i,'length'] = ((max_x-min_x)**2+(max_y-min_y)**2)**0.5/1000 # [km]
d_nodes.to_csv(r'Output/LCOE/len_nodes.csv')
milp_subs.loc[:, ['ID', 'PowerAvailable']].to_csv(
r'Output/LCOE/sub_power.csv', index=False)
milp_subs.loc[:, ['ID', 'subs_npc']].to_csv(r'Output/LCOE/sub_npc.csv',
index=False)
milp_subs.loc[:, 'ID'].to_csv(r'Output/LCOE/subs.csv', index=False)
milp_clusters.loc[:, ['Cluster', 'Load [kW]']].to_csv(
r'Output/LCOE/c_power.csv', index=False)
def milp_execution(geo_df_clustered, grid_resume, substations, coe, branch, line_bc,
resolution,p_max_lines):
total_connections_opt = pd.DataFrame()
# run the effective optimization
nation_emis = 1000 # kgCO2 emission per kWh given country energy mix
country = 'Mozambique'
emission_type ='direct'
nation_emis = emission_factor(country,emission_type) # kgCO2/MWh
nation_rel = reliability_grid(country)
line_rel = line_reliability()
with open('gisele/michele/Inputs/data.json') as f:
input_michele = json.load(f)
lcoe_optimization.cost_optimization(p_max_lines, coe, nation_emis,
nation_rel, line_rel,input_michele)
gdf_roads = gpd.read_file('Output/Datasets/Roads/gdf_roads.shp')
roads_segments = gpd.read_file('Output/Datasets/Roads/roads_segments.shp')
con_out = pd.read_csv(r'Output/LCOE/MV_connections_output.csv')
mg_out = pd.read_csv(r'Output/LCOE/MV_SHS_output.csv')
substations = substations.assign(
nearest_id=substations.apply(nearest, df=geo_df_clustered,
src_column='ID', axis=1))
dups = con_out.duplicated('id1', keep=False)
dups = dups[dups == True].index.values
for i in dups:
if 'C' in con_out.loc[i, 'id2']:
if con_out.loc[i, 'id2'] not in con_out.loc[:, 'id1']:
swap = con_out.loc[i, 'id1']
con_out.loc[i, 'id1'] = con_out.loc[i, 'id2']
con_out.loc[i, 'id2'] = swap
con_out = con_out.sort_values('id2', ascending=False)
if branch == 'yes':
file = 'Branch_'
os.chdir(r'Output/Branches')
else:
file = 'Grid_'
os.chdir(r'Output/Grids')
for row in mg_out.iterrows():
index = grid_resume.loc[
grid_resume['Cluster'] == int(row[1][0].split('C')[1])].index
grid_resume.loc[index, 'Connection Length [km]'] = 0
grid_resume.loc[index, 'Connection Cost [k€]'] = 0
grid_resume.loc[index, 'Connection Type'] = 'Microgrid'
grid_resume.loc[index, 'Substation ID'] = 'Microgrid'
#iterate until alle the possible connections are analyzed, start with extreme clusters
while con_out.empty==False:
#make a list of all the items of con_out:
list_items=con_out['id1'].append(con_out['id2']).values.tolist()
#group items and count their numbers
my_dict = {i: list_items.count(i) for i in list_items}
#check elements in my_dict and select the ones present only once
for k in my_dict:
if my_dict[k]==1 and 'S' not in k:
print(k)
break
index = grid_resume.loc[
grid_resume['Cluster'] == int(k.split('C')[1])].index
if os.path.isfile(file + str(k.split('C')[1]) +
".shp"):
grid_1 = gpd.read_file(file + str(k.split('C')[1]) +
".shp")
c_grid_points = list(zip(grid_1.ID1.astype(int),
grid_1.ID2.astype(int)))
grid_1 = line_to_points(grid_1, geo_df_clustered)
else:
grid_1 = geo_df_clustered[
geo_df_clustered['Cluster'] == int(
k.split('C')[1])]
grid_1 = grid_1[grid_1['Population'] > 0]
c_grid_points=[]
# find the second point of the connection
if (con_out['id1']==k).any():
k1=con_out[con_out['id1']==k]['id2'].values[0]
else:
k1=con_out[con_out['id2']==k]['id1'].values[0]
if 'C' in k1:
if os.path.isfile(file + str(k1.split('C')[1]) +
".shp"):
grid_2 = gpd.read_file(file + str(k1.split('C')[1]) +
".shp")
c_grid_points.append(list(zip(grid_2.ID1.astype(int),
grid_2.ID2.astype(int))))
grid_2 = line_to_points(grid_2, geo_df_clustered)
else:
grid_2 = geo_df_clustered[
geo_df_clustered['Cluster'] == int(
k1.split('C')[1])]
grid_2 = grid_2[grid_2['Population'] > 0]
grid_resume.loc[index, 'Connection Type'] = 'Intra cluster connection'
grid_resume.loc[index, 'Substation ID'] = k1
elif 'S' in k1:
sub_in_df = substations[
substations['ID'] ==
int(k1.split('S')[1])].nearest_id.values[0]
grid_2 = geo_df_clustered[geo_df_clustered['ID'] == sub_in_df]
grid_resume.loc[index, 'Connection Type'] = substations[
substations['ID'] == int(k1.split('S')[1])].Type.values[
0]
grid_resume.loc[index, 'Substation ID'] = substations[
substations['ID'] == int(k1.split('S')[1])].ID.values[0]
dist_2d = pd.DataFrame(distance_2d(grid_1, grid_2, 'X', 'Y'),
index=grid_1.ID.values,
columns=grid_2.ID.values)
p1 = geo_df_clustered[geo_df_clustered['ID'] == dist_2d.min().idxmin()]
p2 = geo_df_clustered[geo_df_clustered['ID'] ==
dist_2d.min(axis=1).idxmin()]
# recompute dijsktra on the selected connection, it would be better to save its value from before
# connection, connection_cost, connection_length, _ = \
# dijkstra.dijkstra_connection(geo_df_clustered, p1, p2,
# c_grid_points, line_bc, resolution)
connection, connection_cost, connection_length, _ = \
dijkstra.dijkstra_connection_roads(geo_df_clustered, p1, p2,
c_grid_points, line_bc,
resolution, gdf_roads,
roads_segments)
#remove files created during previous simulations
if os.path.exists('Connection_' + k.split('C')[1] + '.shp'):
os.remove('Connection_' + k.split('C')[1] + '.shp')
#check if the connection exists and save it
if not connection.empty:
connection.to_file(
'Connection_' + k.split('C')[1] + '.shp')
grid_resume.loc[index, 'Connection Length [km]'] = \
connection_length / 1000
grid_resume.loc[index, 'Connection Cost [k€]'] = connection_cost / 1000
print('Connection for Cluster ' + k + ' created')
total_connections_opt = \
gpd.GeoDataFrame(pd.concat([total_connections_opt,
connection], sort=True))
con_out.drop(index=con_out[(((con_out['id1']==k) & (con_out['id2']==k1))|((con_out['id2']==k) & (con_out['id1']==k1)))].index,inplace=True)
grid_resume.to_csv('grid_resume_opt.csv', index=False)
if total_connections_opt.empty == False:
total_connections_opt.crs = geo_df_clustered.crs
total_connections_opt.to_file('all_connections_opt.shp')
else:
os.remove('all_connections_opt.shp')
os.chdir('../..')
return grid_resume
| [
"pandas.DataFrame",
"os.remove",
"json.load",
"gisele.lcoe_optimization.cost_optimization",
"gisele.functions.distance_2d",
"pandas.read_csv",
"gisele.multi_obj_factor.line_reliability",
"numpy.npv",
"gisele.dijkstra.dijkstra_connection_roads",
"gisele.functions.line_to_points",
"gisele.multi_ob... | [((678, 730), 'geopandas.read_file', 'gpd.read_file', (['"""Output/Datasets/Roads/gdf_roads.shp"""'], {}), "('Output/Datasets/Roads/gdf_roads.shp')\n", (691, 730), True, 'import geopandas as gpd\n'), ((751, 808), 'geopandas.read_file', 'gpd.read_file', (['"""Output/Datasets/Roads/roads_segments.shp"""'], {}), "('Output/Datasets/Roads/roads_segments.shp')\n", (764, 808), True, 'import geopandas as gpd\n'), ((5952, 5969), 'os.chdir', 'os.chdir', (['"""../.."""'], {}), "('../..')\n", (5960, 5969), False, 'import os\n'), ((7760, 7778), 'pandas.DataFrame', 'pd.DataFrame', (['sets'], {}), '(sets)\n', (7772, 7778), True, 'import pandas as pd\n'), ((9116, 9130), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (9128, 9130), True, 'import pandas as pd\n'), ((9316, 9355), 'gisele.multi_obj_factor.emission_factor', 'emission_factor', (['country', 'emission_type'], {}), '(country, emission_type)\n', (9331, 9355), False, 'from gisele.multi_obj_factor import emission_factor, reliability_grid, line_reliability\n'), ((9385, 9410), 'gisele.multi_obj_factor.reliability_grid', 'reliability_grid', (['country'], {}), '(country)\n', (9401, 9410), False, 'from gisele.multi_obj_factor import emission_factor, reliability_grid, line_reliability\n'), ((9426, 9444), 'gisele.multi_obj_factor.line_reliability', 'line_reliability', ([], {}), '()\n', (9442, 9444), False, 'from gisele.multi_obj_factor import emission_factor, reliability_grid, line_reliability\n'), ((9542, 9649), 'gisele.lcoe_optimization.cost_optimization', 'lcoe_optimization.cost_optimization', (['p_max_lines', 'coe', 'nation_emis', 'nation_rel', 'line_rel', 'input_michele'], {}), '(p_max_lines, coe, nation_emis,\n nation_rel, line_rel, input_michele)\n', (9577, 9649), False, 'from gisele import dijkstra, lcoe_optimization\n'), ((9702, 9754), 'geopandas.read_file', 'gpd.read_file', (['"""Output/Datasets/Roads/gdf_roads.shp"""'], {}), "('Output/Datasets/Roads/gdf_roads.shp')\n", (9715, 9754), True, 'import geopandas as gpd\n'), ((9776, 9833), 'geopandas.read_file', 'gpd.read_file', (['"""Output/Datasets/Roads/roads_segments.shp"""'], {}), "('Output/Datasets/Roads/roads_segments.shp')\n", (9789, 9833), True, 'import geopandas as gpd\n'), ((9848, 9900), 'pandas.read_csv', 'pd.read_csv', (['"""Output/LCOE/MV_connections_output.csv"""'], {}), "('Output/LCOE/MV_connections_output.csv')\n", (9859, 9900), True, 'import pandas as pd\n'), ((9915, 9959), 'pandas.read_csv', 'pd.read_csv', (['"""Output/LCOE/MV_SHS_output.csv"""'], {}), "('Output/LCOE/MV_SHS_output.csv')\n", (9926, 9959), True, 'import pandas as pd\n'), ((16164, 16181), 'os.chdir', 'os.chdir', (['"""../.."""'], {}), "('../..')\n", (16172, 16181), False, 'import os\n'), ((888, 900), 'json.load', 'json.load', (['f'], {}), '(f)\n', (897, 900), False, 'import json\n'), ((959, 986), 'os.chdir', 'os.chdir', (['"""Output/Branches"""'], {}), "('Output/Branches')\n", (967, 986), False, 'import os\n'), ((1029, 1053), 'os.chdir', 'os.chdir', (['"""Output/Grids"""'], {}), "('Output/Grids')\n", (1037, 1053), False, 'import os\n'), ((1685, 1716), 'itertools.combinations', 'itertools.combinations', (['sets', '(2)'], {}), '(sets, 2)\n', (1707, 1716), False, 'import itertools\n'), ((4677, 4804), 'gisele.dijkstra.dijkstra_connection_roads', 'dijkstra.dijkstra_connection_roads', (['geo_df_clustered', 'p1', 'p2', 'c_grid_points', 'line_bc', 'resolution', 'gdf_roads', 'roads_segments'], {}), '(geo_df_clustered, p1, p2, c_grid_points,\n line_bc, resolution, gdf_roads, roads_segments)\n', (4711, 4804), False, 'from gisele import dijkstra, lcoe_optimization\n'), ((5313, 5343), 'numpy.npv', 'np.npv', (['grid_ir', 'connection_om'], {}), '(grid_ir, connection_om)\n', (5319, 5343), True, 'import numpy as np\n'), ((9524, 9536), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9533, 9536), False, 'import json\n'), ((10617, 10644), 'os.chdir', 'os.chdir', (['"""Output/Branches"""'], {}), "('Output/Branches')\n", (10625, 10644), False, 'import os\n'), ((10687, 10711), 'os.chdir', 'os.chdir', (['"""Output/Grids"""'], {}), "('Output/Grids')\n", (10695, 10711), False, 'import os\n'), ((14700, 14827), 'gisele.dijkstra.dijkstra_connection_roads', 'dijkstra.dijkstra_connection_roads', (['geo_df_clustered', 'p1', 'p2', 'c_grid_points', 'line_bc', 'resolution', 'gdf_roads', 'roads_segments'], {}), '(geo_df_clustered, p1, p2, c_grid_points,\n line_bc, resolution, gdf_roads, roads_segments)\n', (14734, 14827), False, 'from gisele import dijkstra, lcoe_optimization\n'), ((16123, 16159), 'os.remove', 'os.remove', (['"""all_connections_opt.shp"""'], {}), "('all_connections_opt.shp')\n", (16132, 16159), False, 'import os\n'), ((4039, 4076), 'gisele.functions.distance_2d', 'distance_2d', (['grid_1', 'grid_2', '"""X"""', '"""Y"""'], {}), "(grid_1, grid_2, 'X', 'Y')\n", (4050, 4076), False, 'from gisele.functions import line_to_points, distance_2d, nearest\n'), ((12096, 12136), 'gisele.functions.line_to_points', 'line_to_points', (['grid_1', 'geo_df_clustered'], {}), '(grid_1, geo_df_clustered)\n', (12110, 12136), False, 'from gisele.functions import line_to_points, distance_2d, nearest\n'), ((13959, 13996), 'gisele.functions.distance_2d', 'distance_2d', (['grid_1', 'grid_2', '"""X"""', '"""Y"""'], {}), "(grid_1, grid_2, 'X', 'Y')\n", (13970, 13996), False, 'from gisele.functions import line_to_points, distance_2d, nearest\n'), ((15630, 15687), 'pandas.concat', 'pd.concat', (['[total_connections_opt, connection]'], {'sort': '(True)'}), '([total_connections_opt, connection], sort=True)\n', (15639, 15687), True, 'import pandas as pd\n'), ((2605, 2645), 'gisele.functions.line_to_points', 'line_to_points', (['grid_1', 'geo_df_clustered'], {}), '(grid_1, geo_df_clustered)\n', (2619, 2645), False, 'from gisele.functions import line_to_points, distance_2d, nearest\n'), ((3518, 3558), 'gisele.functions.line_to_points', 'line_to_points', (['grid_2', 'geo_df_clustered'], {}), '(grid_2, geo_df_clustered)\n', (3532, 3558), False, 'from gisele.functions import line_to_points, distance_2d, nearest\n'), ((12990, 13030), 'gisele.functions.line_to_points', 'line_to_points', (['grid_2', 'geo_df_clustered'], {}), '(grid_2, geo_df_clustered)\n', (13004, 13030), False, 'from gisele.functions import line_to_points, distance_2d, nearest\n')] |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementing a container for the dataset info"""
import numpy as np
class DatasetInfo:
def __init__(self, dataset_info):
self._dataset_info = dataset_info
self.dataset_name = self._dataset_info['dataset_name']
self.paper_info = self._dataset_info['paper_info']
self.keypoint_info = self._dataset_info['keypoint_info']
self.skeleton_info = self._dataset_info['skeleton_info']
self.joint_weights = np.array(
self._dataset_info['joint_weights'], dtype=np.float32)[:, None]
self.sigmas = np.array(self._dataset_info['sigmas'])
self._parse_keypoint_info()
self._parse_skeleton_info()
def _parse_skeleton_info(self):
"""Parse skeleton information.
- link_num (int): number of links.
- skeleton (list((2,))): list of links (id).
- skeleton_name (list((2,))): list of links (name).
- pose_link_color (np.ndarray): the color of the link for
visualization.
"""
self.link_num = len(self.skeleton_info.keys())
self.pose_link_color = []
self.skeleton_name = []
self.skeleton = []
for skid in self.skeleton_info.keys():
link = self.skeleton_info[skid]['link']
self.skeleton_name.append(link)
self.skeleton.append([
self.keypoint_name2id[link[0]], self.keypoint_name2id[link[1]]
])
self.pose_link_color.append(self.skeleton_info[skid].get(
'color', [255, 128, 0]))
self.pose_link_color = np.array(self.pose_link_color)
def _parse_keypoint_info(self):
"""Parse keypoint information.
- keypoint_num (int): number of keypoints.
- keypoint_id2name (dict): mapping keypoint id to keypoint name.
- keypoint_name2id (dict): mapping keypoint name to keypoint id.
- upper_body_ids (list): a list of keypoints that belong to the
upper body.
- lower_body_ids (list): a list of keypoints that belong to the
lower body.
- flip_index (list): list of flip index (id)
- flip_pairs (list((2,))): list of flip pairs (id)
- flip_index_name (list): list of flip index (name)
- flip_pairs_name (list((2,))): list of flip pairs (name)
- pose_kpt_color (np.ndarray): the color of the keypoint for
visualization.
"""
self.keypoint_num = len(self.keypoint_info.keys())
self.keypoint_id2name = {}
self.keypoint_name2id = {}
self.pose_kpt_color = []
self.upper_body_ids = []
self.lower_body_ids = []
self.flip_index_name = []
self.flip_pairs_name = []
for kid in self.keypoint_info.keys():
keypoint_name = self.keypoint_info[kid]['name']
self.keypoint_id2name[kid] = keypoint_name
self.keypoint_name2id[keypoint_name] = kid
self.pose_kpt_color.append(self.keypoint_info[kid].get(
'color', [255, 128, 0]))
key_type = self.keypoint_info[kid].get('type', '')
if key_type == 'upper':
self.upper_body_ids.append(kid)
elif key_type == 'lower':
self.lower_body_ids.append(kid)
else:
pass
swap_keypoint = self.keypoint_info[kid].get('swap', '')
if swap_keypoint in (keypoint_name, ''):
self.flip_index_name.append(keypoint_name)
else:
self.flip_index_name.append(swap_keypoint)
if [swap_keypoint, keypoint_name] not in self.flip_pairs_name:
self.flip_pairs_name.append([keypoint_name, swap_keypoint])
self.flip_pairs = [[
self.keypoint_name2id[pair[0]], self.keypoint_name2id[pair[1]]
] for pair in self.flip_pairs_name]
self.flip_index = [
self.keypoint_name2id[name] for name in self.flip_index_name
]
self.pose_kpt_color = np.array(self.pose_kpt_color)
| [
"numpy.array"
] | [((1230, 1268), 'numpy.array', 'np.array', (["self._dataset_info['sigmas']"], {}), "(self._dataset_info['sigmas'])\n", (1238, 1268), True, 'import numpy as np\n'), ((2243, 2273), 'numpy.array', 'np.array', (['self.pose_link_color'], {}), '(self.pose_link_color)\n', (2251, 2273), True, 'import numpy as np\n'), ((4692, 4721), 'numpy.array', 'np.array', (['self.pose_kpt_color'], {}), '(self.pose_kpt_color)\n', (4700, 4721), True, 'import numpy as np\n'), ((1121, 1184), 'numpy.array', 'np.array', (["self._dataset_info['joint_weights']"], {'dtype': 'np.float32'}), "(self._dataset_info['joint_weights'], dtype=np.float32)\n", (1129, 1184), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the histogram plugin summary generation functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import numpy as np
import tensorflow as tf
from tensorboard.compat import tf2
from tensorboard.compat.proto import summary_pb2
from tensorboard.plugins.histogram import metadata
from tensorboard.plugins.histogram import summary
from tensorboard.util import tensor_util
try:
tf2.__version__ # Force lazy import to resolve
except ImportError:
tf2 = None
try:
tf.compat.v1.enable_eager_execution()
except AttributeError:
# TF 2.0 doesn't have this symbol because eager is the default.
pass
class SummaryBaseTest(object):
def setUp(self):
super(SummaryBaseTest, self).setUp()
np.random.seed(0)
self.gaussian = np.random.normal(size=[100])
def histogram(self, *args, **kwargs):
raise NotImplementedError()
def test_metadata(self):
pb = self.histogram('h', [], description='foo')
self.assertEqual(len(pb.value), 1)
summary_metadata = pb.value[0].metadata
self.assertEqual(summary_metadata.summary_description, 'foo')
plugin_data = summary_metadata.plugin_data
self.assertEqual(plugin_data.plugin_name, metadata.PLUGIN_NAME)
parsed = metadata.parse_plugin_metadata(plugin_data.content)
self.assertEqual(metadata.PROTO_VERSION, parsed.version)
def test_empty_input(self):
pb = self.histogram('empty', [])
buckets = tensor_util.make_ndarray(pb.value[0].tensor)
np.testing.assert_allclose(buckets, np.array([]).reshape((0, 3)))
def test_empty_input_of_high_rank(self):
pb = self.histogram('empty_but_fancy', [[[], []], [[], []]])
buckets = tensor_util.make_ndarray(pb.value[0].tensor)
np.testing.assert_allclose(buckets, np.array([]).reshape((0, 3)))
def test_singleton_input(self):
pb = self.histogram('twelve', [12])
buckets = tensor_util.make_ndarray(pb.value[0].tensor)
np.testing.assert_allclose(buckets, np.array([[11.5, 12.5, 1]]))
def test_input_with_all_same_values(self):
pb = self.histogram('twelven', [12, 12, 12])
buckets = tensor_util.make_ndarray(pb.value[0].tensor)
np.testing.assert_allclose(buckets, np.array([[11.5, 12.5, 3]]))
def test_fixed_input(self):
pass # TODO: test a small fixed input
def test_normal_distribution_input(self):
bucket_count = 44
pb = self.histogram(
'normal', data=self.gaussian.reshape((5, -1)), buckets=bucket_count)
buckets = tensor_util.make_ndarray(pb.value[0].tensor)
self.assertEqual(buckets[:, 0].min(), self.gaussian.min())
# Assert near, not equal, since TF's linspace op introduces floating point
# error in the upper bound of the result.
self.assertNear(buckets[:, 1].max(), self.gaussian.max(), 1.0**-10)
self.assertEqual(buckets[:, 2].sum(), self.gaussian.size)
np.testing.assert_allclose(buckets[1:, 0], buckets[:-1, 1])
def test_when_shape_not_statically_known(self):
self.skipTest('TODO: figure out how to test this')
placeholder = tf.compat.v1.placeholder(tf.float64, shape=None)
reshaped = self.gaussian.reshape((25, -1))
self.histogram(data=reshaped,
data_tensor=placeholder,
feed_dict={placeholder: reshaped})
# The proto-equality check is all we need.
def test_when_bucket_count_not_statically_known(self):
self.skipTest('TODO: figure out how to test this')
placeholder = tf.compat.v1.placeholder(tf.int32, shape=())
bucket_count = 44
pb = self.histogram(
bucket_count=bucket_count,
bucket_count_tensor=placeholder,
feed_dict={placeholder: bucket_count})
buckets = tensor_util.make_ndarray(pb.value[0].tensor)
self.assertEqual(buckets.shape, (bucket_count, 3))
class SummaryV1PbTest(SummaryBaseTest, tf.test.TestCase):
def histogram(self, *args, **kwargs):
# Map new name to the old name.
if 'buckets' in kwargs:
kwargs['bucket_count'] = kwargs.pop('buckets')
return summary.pb(*args, **kwargs)
def test_tag(self):
self.assertEqual('a/histogram_summary',
self.histogram('a', []).value[0].tag)
self.assertEqual('a/b/histogram_summary',
self.histogram('a/b', []).value[0].tag)
class SummaryV1OpTest(SummaryBaseTest, tf.test.TestCase):
def histogram(self, *args, **kwargs):
# Map new name to the old name.
if 'buckets' in kwargs:
kwargs['bucket_count'] = kwargs.pop('buckets')
return summary_pb2.Summary.FromString(summary.op(*args, **kwargs).numpy())
def test_tag(self):
self.assertEqual('a/histogram_summary',
self.histogram('a', []).value[0].tag)
self.assertEqual('a/b/histogram_summary',
self.histogram('a/b', []).value[0].tag)
def test_scoped_tag(self):
with tf.name_scope('scope'):
self.assertEqual('scope/a/histogram_summary',
self.histogram('a', []).value[0].tag)
class SummaryV2PbTest(SummaryBaseTest, tf.test.TestCase):
def histogram(self, *args, **kwargs):
return summary.histogram_pb(*args, **kwargs)
class SummaryV2OpTest(SummaryBaseTest, tf.test.TestCase):
def setUp(self):
super(SummaryV2OpTest, self).setUp()
if tf2 is None:
self.skipTest('v2 summary API not available')
def histogram(self, *args, **kwargs):
return self.histogram_event(*args, **kwargs).summary
def histogram_event(self, *args, **kwargs):
kwargs.setdefault('step', 1)
writer = tf2.summary.create_file_writer(self.get_temp_dir())
with writer.as_default():
summary.histogram(*args, **kwargs)
writer.close()
event_files = sorted(glob.glob(os.path.join(self.get_temp_dir(), '*')))
self.assertEqual(len(event_files), 1)
events = list(tf.compat.v1.train.summary_iterator(event_files[0]))
# Expect a boilerplate event for the file_version, then the summary one.
self.assertEqual(len(events), 2)
# Delete the event file to reset to an empty directory for later calls.
# TODO(nickfelt): use a unique subdirectory per writer instead.
os.remove(event_files[0])
return events[1]
def write_histogram_event(self, *args, **kwargs):
kwargs.setdefault('step', 1)
writer = tf2.summary.create_file_writer(self.get_temp_dir())
with writer.as_default():
summary.histogram(*args, **kwargs)
writer.close()
def test_scoped_tag(self):
with tf.name_scope('scope'):
self.assertEqual('scope/a', self.histogram('a', []).value[0].tag)
def test_step(self):
event = self.histogram_event('a', [], step=333)
self.assertEqual(333, event.step)
def test_default_step(self):
try:
tf2.summary.experimental.set_step(333)
# TODO(nickfelt): change test logic so we can just omit `step` entirely.
event = self.histogram_event('a', [], step=None)
self.assertEqual(333, event.step)
finally:
# Reset to default state for other tests.
tf2.summary.experimental.set_step(None)
class SummaryV2OpGraphTest(SummaryV2OpTest, tf.test.TestCase):
def write_histogram_event(self, *args, **kwargs):
kwargs.setdefault('step', 1)
# Hack to extract current scope since there's no direct API for it.
with tf.name_scope('_') as temp_scope:
scope = temp_scope.rstrip('/_')
@tf2.function
def graph_fn():
# Recreate the active scope inside the defun since it won't propagate.
with tf.name_scope(scope):
summary.histogram(*args, **kwargs)
writer = tf2.summary.create_file_writer(self.get_temp_dir())
with writer.as_default():
graph_fn()
writer.close()
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow.test.main",
"os.remove",
"numpy.random.seed",
"tensorflow.compat.v1.enable_eager_execution",
"tensorboard.plugins.histogram.summary.histogram",
"tensorflow.compat.v1.train.summary_iterator",
"tensorflow.compat.v1.placeholder",
"tensorboard.util.tensor_util.make_ndarray",
"tensorflow.name... | [((1282, 1319), 'tensorflow.compat.v1.enable_eager_execution', 'tf.compat.v1.enable_eager_execution', ([], {}), '()\n', (1317, 1319), True, 'import tensorflow as tf\n'), ((8448, 8462), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (8460, 8462), True, 'import tensorflow as tf\n'), ((1514, 1531), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1528, 1531), True, 'import numpy as np\n'), ((1552, 1580), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[100]'}), '(size=[100])\n', (1568, 1580), True, 'import numpy as np\n'), ((2011, 2062), 'tensorboard.plugins.histogram.metadata.parse_plugin_metadata', 'metadata.parse_plugin_metadata', (['plugin_data.content'], {}), '(plugin_data.content)\n', (2041, 2062), False, 'from tensorboard.plugins.histogram import metadata\n'), ((2206, 2250), 'tensorboard.util.tensor_util.make_ndarray', 'tensor_util.make_ndarray', (['pb.value[0].tensor'], {}), '(pb.value[0].tensor)\n', (2230, 2250), False, 'from tensorboard.util import tensor_util\n'), ((2444, 2488), 'tensorboard.util.tensor_util.make_ndarray', 'tensor_util.make_ndarray', (['pb.value[0].tensor'], {}), '(pb.value[0].tensor)\n', (2468, 2488), False, 'from tensorboard.util import tensor_util\n'), ((2648, 2692), 'tensorboard.util.tensor_util.make_ndarray', 'tensor_util.make_ndarray', (['pb.value[0].tensor'], {}), '(pb.value[0].tensor)\n', (2672, 2692), False, 'from tensorboard.util import tensor_util\n'), ((2871, 2915), 'tensorboard.util.tensor_util.make_ndarray', 'tensor_util.make_ndarray', (['pb.value[0].tensor'], {}), '(pb.value[0].tensor)\n', (2895, 2915), False, 'from tensorboard.util import tensor_util\n'), ((3241, 3285), 'tensorboard.util.tensor_util.make_ndarray', 'tensor_util.make_ndarray', (['pb.value[0].tensor'], {}), '(pb.value[0].tensor)\n', (3265, 3285), False, 'from tensorboard.util import tensor_util\n'), ((3612, 3671), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['buckets[1:, 0]', 'buckets[:-1, 1]'], {}), '(buckets[1:, 0], buckets[:-1, 1])\n', (3638, 3671), True, 'import numpy as np\n'), ((3796, 3844), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float64'], {'shape': 'None'}), '(tf.float64, shape=None)\n', (3820, 3844), True, 'import tensorflow as tf\n'), ((4240, 4284), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.int32'], {'shape': '()'}), '(tf.int32, shape=())\n', (4264, 4284), True, 'import tensorflow as tf\n'), ((4469, 4513), 'tensorboard.util.tensor_util.make_ndarray', 'tensor_util.make_ndarray', (['pb.value[0].tensor'], {}), '(pb.value[0].tensor)\n', (4493, 4513), False, 'from tensorboard.util import tensor_util\n'), ((4797, 4824), 'tensorboard.plugins.histogram.summary.pb', 'summary.pb', (['*args'], {}), '(*args, **kwargs)\n', (4807, 4824), False, 'from tensorboard.plugins.histogram import summary\n'), ((5874, 5911), 'tensorboard.plugins.histogram.summary.histogram_pb', 'summary.histogram_pb', (['*args'], {}), '(*args, **kwargs)\n', (5894, 5911), False, 'from tensorboard.plugins.histogram import summary\n'), ((6888, 6913), 'os.remove', 'os.remove', (['event_files[0]'], {}), '(event_files[0])\n', (6897, 6913), False, 'import os\n'), ((2733, 2760), 'numpy.array', 'np.array', (['[[11.5, 12.5, 1]]'], {}), '([[11.5, 12.5, 1]])\n', (2741, 2760), True, 'import numpy as np\n'), ((2956, 2983), 'numpy.array', 'np.array', (['[[11.5, 12.5, 3]]'], {}), '([[11.5, 12.5, 3]])\n', (2964, 2983), True, 'import numpy as np\n'), ((5626, 5648), 'tensorflow.name_scope', 'tf.name_scope', (['"""scope"""'], {}), "('scope')\n", (5639, 5648), True, 'import tensorflow as tf\n'), ((6383, 6417), 'tensorboard.plugins.histogram.summary.histogram', 'summary.histogram', (['*args'], {}), '(*args, **kwargs)\n', (6400, 6417), False, 'from tensorboard.plugins.histogram import summary\n'), ((6573, 6624), 'tensorflow.compat.v1.train.summary_iterator', 'tf.compat.v1.train.summary_iterator', (['event_files[0]'], {}), '(event_files[0])\n', (6608, 6624), True, 'import tensorflow as tf\n'), ((7122, 7156), 'tensorboard.plugins.histogram.summary.histogram', 'summary.histogram', (['*args'], {}), '(*args, **kwargs)\n', (7139, 7156), False, 'from tensorboard.plugins.histogram import summary\n'), ((7215, 7237), 'tensorflow.name_scope', 'tf.name_scope', (['"""scope"""'], {}), "('scope')\n", (7228, 7237), True, 'import tensorflow as tf\n'), ((7472, 7510), 'tensorboard.compat.tf2.summary.experimental.set_step', 'tf2.summary.experimental.set_step', (['(333)'], {}), '(333)\n', (7505, 7510), False, 'from tensorboard.compat import tf2\n'), ((7752, 7791), 'tensorboard.compat.tf2.summary.experimental.set_step', 'tf2.summary.experimental.set_step', (['None'], {}), '(None)\n', (7785, 7791), False, 'from tensorboard.compat import tf2\n'), ((8023, 8041), 'tensorflow.name_scope', 'tf.name_scope', (['"""_"""'], {}), "('_')\n", (8036, 8041), True, 'import tensorflow as tf\n'), ((8221, 8241), 'tensorflow.name_scope', 'tf.name_scope', (['scope'], {}), '(scope)\n', (8234, 8241), True, 'import tensorflow as tf\n'), ((8251, 8285), 'tensorboard.plugins.histogram.summary.histogram', 'summary.histogram', (['*args'], {}), '(*args, **kwargs)\n', (8268, 8285), False, 'from tensorboard.plugins.histogram import summary\n'), ((2291, 2303), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2299, 2303), True, 'import numpy as np\n'), ((2529, 2541), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2537, 2541), True, 'import numpy as np\n'), ((5317, 5344), 'tensorboard.plugins.histogram.summary.op', 'summary.op', (['*args'], {}), '(*args, **kwargs)\n', (5327, 5344), False, 'from tensorboard.plugins.histogram import summary\n')] |
#!/user/bin/env python
# -*- coding:utf-8 -*-
import cv2
import numpy as np
def edgeDetection(img, sobel):
height, width, channels = img.shape
filter_size = len(sobel)
n = int((filter_size - 1) / 2)
img_edge = np.zeros((height, width), np.uint8)
for i in range(n, height - n):
for j in range(n, width - n):
temp =0
for k in range(channels):
temp += np.sum(sobel * img[i - n:i + n + 1, j - n:j + n + 1, k])
img_edge[i, j]=np.clip(temp, 0, 255)
return img_edge
def edgeDetection1(img, sobel):
height, width, channels = img.shape
filter_size = len(sobel)
n = int((filter_size - 1) / 2)
img_edge = np.zeros((height, width, channels), np.uint8)
for i in range(n, height - n):
for j in range(n, width - n):
for k in range(channels):
temp = np.sum(sobel * img[i - n:i + n + 1, j - n:j + n + 1, k])
img_edge[i, j, k]=np.clip(temp, 0, 255)
return img_edge
lapla90 = [[0, 1, 0], [1, -4, 1], [0, 1, 0]] # 90°增量
sobel = [[1, 1, 1], [1, -8, 1], [1, 1, 1]] # 45°增量
sobel1 = [[-2, -2, 0],
[-2, 0, 2],
[0, 2, 2]]
img = cv2.imread("../images/lena.jpg")
cv2.imshow("image", img)
imgedge = edgeDetection(img, sobel)
cv2.imshow("sobel", imgedge)
cv2.waitKey(0) | [
"numpy.sum",
"cv2.waitKey",
"numpy.zeros",
"numpy.clip",
"cv2.imread",
"cv2.imshow"
] | [((1243, 1275), 'cv2.imread', 'cv2.imread', (['"""../images/lena.jpg"""'], {}), "('../images/lena.jpg')\n", (1253, 1275), False, 'import cv2\n'), ((1277, 1301), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (1287, 1301), False, 'import cv2\n'), ((1342, 1370), 'cv2.imshow', 'cv2.imshow', (['"""sobel"""', 'imgedge'], {}), "('sobel', imgedge)\n", (1352, 1370), False, 'import cv2\n'), ((1374, 1388), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1385, 1388), False, 'import cv2\n'), ((240, 275), 'numpy.zeros', 'np.zeros', (['(height, width)', 'np.uint8'], {}), '((height, width), np.uint8)\n', (248, 275), True, 'import numpy as np\n'), ((724, 769), 'numpy.zeros', 'np.zeros', (['(height, width, channels)', 'np.uint8'], {}), '((height, width, channels), np.uint8)\n', (732, 769), True, 'import numpy as np\n'), ((521, 542), 'numpy.clip', 'np.clip', (['temp', '(0)', '(255)'], {}), '(temp, 0, 255)\n', (528, 542), True, 'import numpy as np\n'), ((436, 492), 'numpy.sum', 'np.sum', (['(sobel * img[i - n:i + n + 1, j - n:j + n + 1, k])'], {}), '(sobel * img[i - n:i + n + 1, j - n:j + n + 1, k])\n', (442, 492), True, 'import numpy as np\n'), ((908, 964), 'numpy.sum', 'np.sum', (['(sobel * img[i - n:i + n + 1, j - n:j + n + 1, k])'], {}), '(sobel * img[i - n:i + n + 1, j - n:j + n + 1, k])\n', (914, 964), True, 'import numpy as np\n'), ((1000, 1021), 'numpy.clip', 'np.clip', (['temp', '(0)', '(255)'], {}), '(temp, 0, 255)\n', (1007, 1021), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import rospy as rp
from sys import maxsize as infinity
from geometry_msgs.msg import Transform
from agv_as18.srv import Path, PathResponse, PathRequest
robot = [0.0,0.0]
# locations
AS = ['AS',125.0,66.0]
C1 = ['C1',200.0,210.0]
C2 = ['C2',170.0,210.0]
C3 = ['C3',140.0,210.0]
C4 = ['C4',110.0,210.0]
C5 = ['C5',80.0,210.0]
C6 = ['C6',50.0,210.0]
MWP1 = ['MWP1',222.5,166.0]
MWP2 = ['MWP2',125.0,166.0]
MWP3 = ['MWP3',27.7,166.0]
# distances
AS_MWP1 = np.sqrt((AS[1]-MWP1[1])**2 + (AS[2]-MWP1[2])**2)
AS_MWP2 = np.sqrt((AS[1]-MWP2[1])**2 + (AS[2]-MWP2[2])**2)
AS_MWP3 = np.sqrt((AS[1]-MWP3[1])**2 + (AS[2]-MWP3[2])**2)
C1_MWP1 = np.sqrt((C1[1]-MWP1[1])**2 + (C1[2]-MWP1[2])**2)
C1_MWP2 = np.sqrt((C1[1]-MWP2[1])**2 + (C1[2]-MWP2[2])**2)
C1_MWP3 = np.sqrt((C1[1]-MWP3[1])**2 + (C1[2]-MWP3[2])**2)
C2_MWP1 = np.sqrt((C2[1]-MWP1[1])**2 + (C2[2]-MWP1[2])**2)
C2_MWP2 = np.sqrt((C2[1]-MWP2[1])**2 + (C2[2]-MWP2[2])**2)
C2_MWP3 = np.sqrt((C2[1]-MWP3[1])**2 + (C2[2]-MWP3[2])**2)
C3_MWP1 = np.sqrt((C3[1]-MWP1[1])**2 + (C3[2]-MWP1[2])**2)
C3_MWP2 = np.sqrt((C3[1]-MWP2[1])**2 + (C3[2]-MWP2[2])**2)
C3_MWP3 = np.sqrt((C3[1]-MWP3[1])**2 + (C3[2]-MWP3[2])**2)
C4_MWP1 = np.sqrt((C4[1]-MWP1[1])**2 + (C4[2]-MWP1[2])**2)
C4_MWP2 = np.sqrt((C4[1]-MWP2[1])**2 + (C4[2]-MWP2[2])**2)
C4_MWP3 = np.sqrt((C4[1]-MWP3[1])**2 + (C4[2]-MWP3[2])**2)
C5_MWP1 = np.sqrt((C5[1]-MWP1[1])**2 + (C5[2]-MWP1[2])**2)
C5_MWP2 = np.sqrt((C5[1]-MWP2[1])**2 + (C5[2]-MWP2[2])**2)
C5_MWP3 = np.sqrt((C5[1]-MWP3[1])**2 + (C5[2]-MWP3[2])**2)
C6_MWP1 = np.sqrt((C6[1]-MWP1[1])**2 + (C6[2]-MWP1[2])**2)
C6_MWP2 = np.sqrt((C6[1]-MWP2[1])**2 + (C6[2]-MWP2[2])**2)
C6_MWP3 = np.sqrt((C6[1]-MWP3[1])**2 + (C6[2]-MWP3[2])**2)
C1_C2 = np.sqrt((C1[1]-C2[1])**2 + (C1[2]-C2[2])**2)
C1_C3 = np.sqrt((C1[1]-C3[1])**2 + (C1[2]-C3[2])**2)
C1_C4 = np.sqrt((C1[1]-C4[1])**2 + (C1[2]-C4[2])**2)
C1_C5 = np.sqrt((C1[1]-C5[1])**2 + (C1[2]-C5[2])**2)
C1_C6 = np.sqrt((C1[1]-C6[1])**2 + (C1[2]-C6[2])**2)
C2_C3 = np.sqrt((C2[1]-C3[1])**2 + (C2[2]-C3[2])**2)
C2_C4 = np.sqrt((C2[1]-C4[1])**2 + (C2[2]-C4[2])**2)
C2_C5 = np.sqrt((C2[1]-C5[1])**2 + (C2[2]-C5[2])**2)
C2_C6 = np.sqrt((C2[1]-C6[1])**2 + (C2[2]-C6[2])**2)
C3_C4 = np.sqrt((C3[1]-C4[1])**2 + (C3[2]-C4[2])**2)
C3_C5 = np.sqrt((C3[1]-C5[1])**2 + (C3[2]-C5[2])**2)
C3_C6 = np.sqrt((C3[1]-C6[1])**2 + (C3[2]-C6[2])**2)
C4_C5 = np.sqrt((C4[1]-C5[1])**2 + (C4[2]-C5[2])**2)
C4_C6 = np.sqrt((C4[1]-C6[1])**2 + (C4[2]-C6[2])**2)
C5_C6 = np.sqrt((C5[1]-C6[1])**2 + (C5[2]-C6[2])**2)
def find_component(component):
"""Returns the name and the x, y position of the component of interest"""
if component == 'C1':
return C1
elif component == 'C2':
return C2
elif component == 'C3':
return C3
elif component == 'C4':
return C4
elif component == 'C5':
return C5
elif component == 'C6':
return C6
elif component == 'AS':
return AS
#dijkstra algorithm
def dijkstra(graph,start,goal):
"""Based on a graph and a starting node, it finds the minimum distances to every other node and returns the path to the goal node"""
shortest_distance = {}
predecessor = {}
unseenNodes = graph
path = []
for node in unseenNodes:
shortest_distance[node] = infinity
shortest_distance[start] = 0
while unseenNodes:
minNode = None
for node in unseenNodes:
if minNode is None:
minNode = node
elif shortest_distance[node] < shortest_distance[minNode]:
minNode = node
for childNode, weight in graph[minNode].items():
if weight + shortest_distance[minNode] < shortest_distance[childNode]:
shortest_distance[childNode] = weight + shortest_distance[minNode]
predecessor[childNode] = minNode
unseenNodes.pop(minNode)
currentNode = goal
while currentNode != start:
try:
path.insert(0,currentNode)
currentNode = predecessor[currentNode]
except KeyError:
print('Path not reachable')
break
path.insert(0,start)
if shortest_distance[goal] != infinity:
return path
def pos_cb(data):
global robot
robot[0]=data.translation.x
robot[1]=data.translation.y
def server_cb(req):
BEAST = ['BEAST', robot[0], robot[1]]
path=[] # list that will contain all the waypoints
# apply dijkstras algorithm for every task in the task sequence list
for task in req.task_seq:
if BEAST[2] < 166:
graph = {AS[0]:{MWP1[0]:AS_MWP1,MWP2[0]:AS_MWP2,MWP3[0]:AS_MWP3},MWP1[0]:{AS[0]:AS_MWP1,C1[0]:C1_MWP1,C2[0]:C2_MWP1,C3[0]:C3_MWP1,C4[0]:C4_MWP1,C5[0]:C5_MWP1,C6[0]:C6_MWP1},MWP2[0]:{AS[0]:AS_MWP2,C1[0]:C1_MWP2,C6[0]:C6_MWP2,C2[0]:C2_MWP2,C5[0]:C5_MWP2,C3[0]:C3_MWP2,C4[0]:C4_MWP2},
MWP3[0]:{AS[0]:AS_MWP3,C6[0]:C6_MWP3,C5[0]:C5_MWP3,C4[0]:C4_MWP3,C3[0]:C3_MWP3,C2[0]:C2_MWP3,C1[0]:C1_MWP3},C1[0]:{MWP1[0]:C1_MWP1,MWP2[0]:C1_MWP2,MWP3[0]:C1_MWP3,C2[0]:C1_C2,C3[0]:C1_C3,C4[0]:C1_C4,C5[0]:C1_C5,C6[0]:C1_C6},C2[0]:{MWP1[0]:C2_MWP1,MWP2[0]:C2_MWP2,MWP3[0]:C2_MWP3,C1[0]:C1_C2,C3[0]:C2_C3,C4[0]:C2_C4,C5[0]:C2_C5,C6[0]:C2_C6},
C3[0]:{MWP1[0]:C3_MWP1,MWP2[0]:C3_MWP2,MWP3[0]:C3_MWP3,C1[0]:C1_C3,C2[0]:C2_C3,C4[0]:C3_C4,C5[0]:C3_C5,C6[0]:C3_C6},C4[0]:{MWP1[0]:C4_MWP1,MWP2[0]:C4_MWP2,MWP3[0]:C4_MWP3,C1[0]:C1_C4,C2[0]:C2_C4,C3[0]:C3_C4,C5[0]:C4_C5,C6[0]:C4_C6},
C5[0]:{MWP1[0]:C5_MWP1,MWP2[0]:C5_MWP2,MWP3[0]:C5_MWP3,C1[0]:C1_C5,C2[0]:C2_C5,C3[0]:C3_C5,C4[0]:C4_C5,C6[0]:C5_C6},C6[0]:{MWP1[0]:C6_MWP1,MWP2[0]:C6_MWP2,MWP3[0]:C6_MWP3,C1[0]:C1_C6,C2[0]:C2_C6,C3[0]:C3_C6,C4[0]:C4_C6,C5[0]:C5_C6},
BEAST[0]:{AS[0]:np.sqrt((BEAST[1]-AS[1])**2+(BEAST[2]-AS[2])**2),MWP1[0]:np.sqrt((BEAST[1]-MWP1[1])**2+(BEAST[2]-MWP1[2])**2),MWP2[0]:np.sqrt((BEAST[1]-MWP2[1])**2+(BEAST[2]-MWP2[2])**2),
MWP3[0]:np.sqrt((BEAST[1]-MWP3[1])**2+(BEAST[2]-MWP3[2])**2)}}
else:
graph = {AS[0]:{MWP1[0]:AS_MWP1,MWP2[0]:AS_MWP2,MWP3[0]:AS_MWP3},MWP1[0]:{AS[0]:AS_MWP1,C1[0]:C1_MWP1,C2[0]:C2_MWP1,C3[0]:C3_MWP1,C4[0]:C4_MWP1,C5[0]:C5_MWP1,C6[0]:C6_MWP1},MWP2[0]:{AS[0]:AS_MWP2,C1[0]:C1_MWP2,C6[0]:C6_MWP2,C2[0]:C2_MWP2,C5[0]:C5_MWP2,C3[0]:C3_MWP2,C4[0]:C4_MWP2},
MWP3[0]:{AS[0]:AS_MWP3,C6[0]:C6_MWP3,C5[0]:C5_MWP3,C4[0]:C4_MWP3,C3[0]:C3_MWP3,C2[0]:C2_MWP3,C1[0]:C1_MWP3},C1[0]:{MWP1[0]:C1_MWP1,MWP2[0]:C1_MWP2,MWP3[0]:C1_MWP3,C2[0]:C1_C2,C3[0]:C1_C3,C4[0]:C1_C4,C5[0]:C1_C5,C6[0]:C1_C6},C2[0]:{MWP1[0]:C2_MWP1,MWP2[0]:C2_MWP2,MWP3[0]:C2_MWP3,C1[0]:C1_C2,C3[0]:C2_C3,C4[0]:C2_C4,C5[0]:C2_C5,C6[0]:C2_C6},
C3[0]:{MWP1[0]:C3_MWP1,MWP2[0]:C3_MWP2,MWP3[0]:C3_MWP3,C1[0]:C1_C3,C2[0]:C2_C3,C4[0]:C3_C4,C5[0]:C3_C5,C6[0]:C3_C6},C4[0]:{MWP1[0]:C4_MWP1,MWP2[0]:C4_MWP2,MWP3[0]:C4_MWP3,C1[0]:C1_C4,C2[0]:C2_C4,C3[0]:C3_C4,C5[0]:C4_C5,C6[0]:C4_C6},
C5[0]:{MWP1[0]:C5_MWP1,MWP2[0]:C5_MWP2,MWP3[0]:C5_MWP3,C1[0]:C1_C5,C2[0]:C2_C5,C3[0]:C3_C5,C4[0]:C4_C5,C6[0]:C5_C6},C6[0]:{MWP1[0]:C6_MWP1,MWP2[0]:C6_MWP2,MWP3[0]:C6_MWP3,C1[0]:C1_C6,C2[0]:C2_C6,C3[0]:C3_C6,C4[0]:C4_C6,C5[0]:C5_C6},
BEAST[0]:{C1[0]:np.sqrt((BEAST[1]-C1[1])**2+(BEAST[2]-C1[2])**2),C2[0]:np.sqrt((BEAST[1]-C2[1])**2+(BEAST[2]-C2[2])**2),C3[0]:np.sqrt((BEAST[1]-C3[1])**2+(BEAST[2]-C3[2])**2),
C4[0]:np.sqrt((BEAST[1]-C4[1])**2+(BEAST[2]-C4[2])**2),C5[0]:np.sqrt((BEAST[1]-C5[1])**2+(BEAST[2]-C5[2])**2),C6[0]:np.sqrt((BEAST[1]-C6[1])**2+(BEAST[2]-C6[2])**2),MWP1[0]:np.sqrt((BEAST[1]-MWP1[1])**2+(BEAST[2]-MWP1[2])**2),MWP2[0]:np.sqrt((BEAST[1]-MWP2[1])**2+(BEAST[2]-MWP2[2])**2),
MWP3[0]:np.sqrt((BEAST[1]-MWP3[1])**2+(BEAST[2]-MWP3[2])**2)}}
c = dijkstra(graph,BEAST[0],task) # stores a list of the path the robot needs to take in order to complete a task
# appends the path waypoints in a list
for i in range(1,len(c)):
path.append(c[i])
# update the position of the robot for the next iteration (needed for the new graph)
BEAST[1] = find_component(task)[1]
BEAST[2] = find_component(task)[2]
return PathResponse(path)
rp.init_node('waypoint_optimization')
rp.Subscriber('local_pos_ref', Transform, pos_cb)
rp.wait_for_message('local_pos_ref', Transform) # blocks until a message is received (here to make sure we have pose feedback)
s = rp.Service('path_service', Path, server_cb)
rp.spin() | [
"rospy.Subscriber",
"rospy.wait_for_message",
"agv_as18.srv.PathResponse",
"rospy.init_node",
"rospy.spin",
"rospy.Service",
"numpy.sqrt"
] | [((496, 552), 'numpy.sqrt', 'np.sqrt', (['((AS[1] - MWP1[1]) ** 2 + (AS[2] - MWP1[2]) ** 2)'], {}), '((AS[1] - MWP1[1]) ** 2 + (AS[2] - MWP1[2]) ** 2)\n', (503, 552), True, 'import numpy as np\n'), ((555, 611), 'numpy.sqrt', 'np.sqrt', (['((AS[1] - MWP2[1]) ** 2 + (AS[2] - MWP2[2]) ** 2)'], {}), '((AS[1] - MWP2[1]) ** 2 + (AS[2] - MWP2[2]) ** 2)\n', (562, 611), True, 'import numpy as np\n'), ((614, 670), 'numpy.sqrt', 'np.sqrt', (['((AS[1] - MWP3[1]) ** 2 + (AS[2] - MWP3[2]) ** 2)'], {}), '((AS[1] - MWP3[1]) ** 2 + (AS[2] - MWP3[2]) ** 2)\n', (621, 670), True, 'import numpy as np\n'), ((673, 729), 'numpy.sqrt', 'np.sqrt', (['((C1[1] - MWP1[1]) ** 2 + (C1[2] - MWP1[2]) ** 2)'], {}), '((C1[1] - MWP1[1]) ** 2 + (C1[2] - MWP1[2]) ** 2)\n', (680, 729), True, 'import numpy as np\n'), ((732, 788), 'numpy.sqrt', 'np.sqrt', (['((C1[1] - MWP2[1]) ** 2 + (C1[2] - MWP2[2]) ** 2)'], {}), '((C1[1] - MWP2[1]) ** 2 + (C1[2] - MWP2[2]) ** 2)\n', (739, 788), True, 'import numpy as np\n'), ((791, 847), 'numpy.sqrt', 'np.sqrt', (['((C1[1] - MWP3[1]) ** 2 + (C1[2] - MWP3[2]) ** 2)'], {}), '((C1[1] - MWP3[1]) ** 2 + (C1[2] - MWP3[2]) ** 2)\n', (798, 847), True, 'import numpy as np\n'), ((850, 906), 'numpy.sqrt', 'np.sqrt', (['((C2[1] - MWP1[1]) ** 2 + (C2[2] - MWP1[2]) ** 2)'], {}), '((C2[1] - MWP1[1]) ** 2 + (C2[2] - MWP1[2]) ** 2)\n', (857, 906), True, 'import numpy as np\n'), ((909, 965), 'numpy.sqrt', 'np.sqrt', (['((C2[1] - MWP2[1]) ** 2 + (C2[2] - MWP2[2]) ** 2)'], {}), '((C2[1] - MWP2[1]) ** 2 + (C2[2] - MWP2[2]) ** 2)\n', (916, 965), True, 'import numpy as np\n'), ((968, 1024), 'numpy.sqrt', 'np.sqrt', (['((C2[1] - MWP3[1]) ** 2 + (C2[2] - MWP3[2]) ** 2)'], {}), '((C2[1] - MWP3[1]) ** 2 + (C2[2] - MWP3[2]) ** 2)\n', (975, 1024), True, 'import numpy as np\n'), ((1027, 1083), 'numpy.sqrt', 'np.sqrt', (['((C3[1] - MWP1[1]) ** 2 + (C3[2] - MWP1[2]) ** 2)'], {}), '((C3[1] - MWP1[1]) ** 2 + (C3[2] - MWP1[2]) ** 2)\n', (1034, 1083), True, 'import numpy as np\n'), ((1086, 1142), 'numpy.sqrt', 'np.sqrt', (['((C3[1] - MWP2[1]) ** 2 + (C3[2] - MWP2[2]) ** 2)'], {}), '((C3[1] - MWP2[1]) ** 2 + (C3[2] - MWP2[2]) ** 2)\n', (1093, 1142), True, 'import numpy as np\n'), ((1145, 1201), 'numpy.sqrt', 'np.sqrt', (['((C3[1] - MWP3[1]) ** 2 + (C3[2] - MWP3[2]) ** 2)'], {}), '((C3[1] - MWP3[1]) ** 2 + (C3[2] - MWP3[2]) ** 2)\n', (1152, 1201), True, 'import numpy as np\n'), ((1204, 1260), 'numpy.sqrt', 'np.sqrt', (['((C4[1] - MWP1[1]) ** 2 + (C4[2] - MWP1[2]) ** 2)'], {}), '((C4[1] - MWP1[1]) ** 2 + (C4[2] - MWP1[2]) ** 2)\n', (1211, 1260), True, 'import numpy as np\n'), ((1263, 1319), 'numpy.sqrt', 'np.sqrt', (['((C4[1] - MWP2[1]) ** 2 + (C4[2] - MWP2[2]) ** 2)'], {}), '((C4[1] - MWP2[1]) ** 2 + (C4[2] - MWP2[2]) ** 2)\n', (1270, 1319), True, 'import numpy as np\n'), ((1322, 1378), 'numpy.sqrt', 'np.sqrt', (['((C4[1] - MWP3[1]) ** 2 + (C4[2] - MWP3[2]) ** 2)'], {}), '((C4[1] - MWP3[1]) ** 2 + (C4[2] - MWP3[2]) ** 2)\n', (1329, 1378), True, 'import numpy as np\n'), ((1381, 1437), 'numpy.sqrt', 'np.sqrt', (['((C5[1] - MWP1[1]) ** 2 + (C5[2] - MWP1[2]) ** 2)'], {}), '((C5[1] - MWP1[1]) ** 2 + (C5[2] - MWP1[2]) ** 2)\n', (1388, 1437), True, 'import numpy as np\n'), ((1440, 1496), 'numpy.sqrt', 'np.sqrt', (['((C5[1] - MWP2[1]) ** 2 + (C5[2] - MWP2[2]) ** 2)'], {}), '((C5[1] - MWP2[1]) ** 2 + (C5[2] - MWP2[2]) ** 2)\n', (1447, 1496), True, 'import numpy as np\n'), ((1499, 1555), 'numpy.sqrt', 'np.sqrt', (['((C5[1] - MWP3[1]) ** 2 + (C5[2] - MWP3[2]) ** 2)'], {}), '((C5[1] - MWP3[1]) ** 2 + (C5[2] - MWP3[2]) ** 2)\n', (1506, 1555), True, 'import numpy as np\n'), ((1558, 1614), 'numpy.sqrt', 'np.sqrt', (['((C6[1] - MWP1[1]) ** 2 + (C6[2] - MWP1[2]) ** 2)'], {}), '((C6[1] - MWP1[1]) ** 2 + (C6[2] - MWP1[2]) ** 2)\n', (1565, 1614), True, 'import numpy as np\n'), ((1617, 1673), 'numpy.sqrt', 'np.sqrt', (['((C6[1] - MWP2[1]) ** 2 + (C6[2] - MWP2[2]) ** 2)'], {}), '((C6[1] - MWP2[1]) ** 2 + (C6[2] - MWP2[2]) ** 2)\n', (1624, 1673), True, 'import numpy as np\n'), ((1676, 1732), 'numpy.sqrt', 'np.sqrt', (['((C6[1] - MWP3[1]) ** 2 + (C6[2] - MWP3[2]) ** 2)'], {}), '((C6[1] - MWP3[1]) ** 2 + (C6[2] - MWP3[2]) ** 2)\n', (1683, 1732), True, 'import numpy as np\n'), ((1733, 1785), 'numpy.sqrt', 'np.sqrt', (['((C1[1] - C2[1]) ** 2 + (C1[2] - C2[2]) ** 2)'], {}), '((C1[1] - C2[1]) ** 2 + (C1[2] - C2[2]) ** 2)\n', (1740, 1785), True, 'import numpy as np\n'), ((1786, 1838), 'numpy.sqrt', 'np.sqrt', (['((C1[1] - C3[1]) ** 2 + (C1[2] - C3[2]) ** 2)'], {}), '((C1[1] - C3[1]) ** 2 + (C1[2] - C3[2]) ** 2)\n', (1793, 1838), True, 'import numpy as np\n'), ((1839, 1891), 'numpy.sqrt', 'np.sqrt', (['((C1[1] - C4[1]) ** 2 + (C1[2] - C4[2]) ** 2)'], {}), '((C1[1] - C4[1]) ** 2 + (C1[2] - C4[2]) ** 2)\n', (1846, 1891), True, 'import numpy as np\n'), ((1892, 1944), 'numpy.sqrt', 'np.sqrt', (['((C1[1] - C5[1]) ** 2 + (C1[2] - C5[2]) ** 2)'], {}), '((C1[1] - C5[1]) ** 2 + (C1[2] - C5[2]) ** 2)\n', (1899, 1944), True, 'import numpy as np\n'), ((1945, 1997), 'numpy.sqrt', 'np.sqrt', (['((C1[1] - C6[1]) ** 2 + (C1[2] - C6[2]) ** 2)'], {}), '((C1[1] - C6[1]) ** 2 + (C1[2] - C6[2]) ** 2)\n', (1952, 1997), True, 'import numpy as np\n'), ((1998, 2050), 'numpy.sqrt', 'np.sqrt', (['((C2[1] - C3[1]) ** 2 + (C2[2] - C3[2]) ** 2)'], {}), '((C2[1] - C3[1]) ** 2 + (C2[2] - C3[2]) ** 2)\n', (2005, 2050), True, 'import numpy as np\n'), ((2051, 2103), 'numpy.sqrt', 'np.sqrt', (['((C2[1] - C4[1]) ** 2 + (C2[2] - C4[2]) ** 2)'], {}), '((C2[1] - C4[1]) ** 2 + (C2[2] - C4[2]) ** 2)\n', (2058, 2103), True, 'import numpy as np\n'), ((2104, 2156), 'numpy.sqrt', 'np.sqrt', (['((C2[1] - C5[1]) ** 2 + (C2[2] - C5[2]) ** 2)'], {}), '((C2[1] - C5[1]) ** 2 + (C2[2] - C5[2]) ** 2)\n', (2111, 2156), True, 'import numpy as np\n'), ((2157, 2209), 'numpy.sqrt', 'np.sqrt', (['((C2[1] - C6[1]) ** 2 + (C2[2] - C6[2]) ** 2)'], {}), '((C2[1] - C6[1]) ** 2 + (C2[2] - C6[2]) ** 2)\n', (2164, 2209), True, 'import numpy as np\n'), ((2210, 2262), 'numpy.sqrt', 'np.sqrt', (['((C3[1] - C4[1]) ** 2 + (C3[2] - C4[2]) ** 2)'], {}), '((C3[1] - C4[1]) ** 2 + (C3[2] - C4[2]) ** 2)\n', (2217, 2262), True, 'import numpy as np\n'), ((2263, 2315), 'numpy.sqrt', 'np.sqrt', (['((C3[1] - C5[1]) ** 2 + (C3[2] - C5[2]) ** 2)'], {}), '((C3[1] - C5[1]) ** 2 + (C3[2] - C5[2]) ** 2)\n', (2270, 2315), True, 'import numpy as np\n'), ((2316, 2368), 'numpy.sqrt', 'np.sqrt', (['((C3[1] - C6[1]) ** 2 + (C3[2] - C6[2]) ** 2)'], {}), '((C3[1] - C6[1]) ** 2 + (C3[2] - C6[2]) ** 2)\n', (2323, 2368), True, 'import numpy as np\n'), ((2369, 2421), 'numpy.sqrt', 'np.sqrt', (['((C4[1] - C5[1]) ** 2 + (C4[2] - C5[2]) ** 2)'], {}), '((C4[1] - C5[1]) ** 2 + (C4[2] - C5[2]) ** 2)\n', (2376, 2421), True, 'import numpy as np\n'), ((2422, 2474), 'numpy.sqrt', 'np.sqrt', (['((C4[1] - C6[1]) ** 2 + (C4[2] - C6[2]) ** 2)'], {}), '((C4[1] - C6[1]) ** 2 + (C4[2] - C6[2]) ** 2)\n', (2429, 2474), True, 'import numpy as np\n'), ((2475, 2527), 'numpy.sqrt', 'np.sqrt', (['((C5[1] - C6[1]) ** 2 + (C5[2] - C6[2]) ** 2)'], {}), '((C5[1] - C6[1]) ** 2 + (C5[2] - C6[2]) ** 2)\n', (2482, 2527), True, 'import numpy as np\n'), ((8217, 8254), 'rospy.init_node', 'rp.init_node', (['"""waypoint_optimization"""'], {}), "('waypoint_optimization')\n", (8229, 8254), True, 'import rospy as rp\n'), ((8255, 8304), 'rospy.Subscriber', 'rp.Subscriber', (['"""local_pos_ref"""', 'Transform', 'pos_cb'], {}), "('local_pos_ref', Transform, pos_cb)\n", (8268, 8304), True, 'import rospy as rp\n'), ((8305, 8352), 'rospy.wait_for_message', 'rp.wait_for_message', (['"""local_pos_ref"""', 'Transform'], {}), "('local_pos_ref', Transform)\n", (8324, 8352), True, 'import rospy as rp\n'), ((8436, 8479), 'rospy.Service', 'rp.Service', (['"""path_service"""', 'Path', 'server_cb'], {}), "('path_service', Path, server_cb)\n", (8446, 8479), True, 'import rospy as rp\n'), ((8480, 8489), 'rospy.spin', 'rp.spin', ([], {}), '()\n', (8487, 8489), True, 'import rospy as rp\n'), ((8197, 8215), 'agv_as18.srv.PathResponse', 'PathResponse', (['path'], {}), '(path)\n', (8209, 8215), False, 'from agv_as18.srv import Path, PathResponse, PathRequest\n'), ((5745, 5803), 'numpy.sqrt', 'np.sqrt', (['((BEAST[1] - AS[1]) ** 2 + (BEAST[2] - AS[2]) ** 2)'], {}), '((BEAST[1] - AS[1]) ** 2 + (BEAST[2] - AS[2]) ** 2)\n', (5752, 5803), True, 'import numpy as np\n'), ((5802, 5864), 'numpy.sqrt', 'np.sqrt', (['((BEAST[1] - MWP1[1]) ** 2 + (BEAST[2] - MWP1[2]) ** 2)'], {}), '((BEAST[1] - MWP1[1]) ** 2 + (BEAST[2] - MWP1[2]) ** 2)\n', (5809, 5864), True, 'import numpy as np\n'), ((5863, 5925), 'numpy.sqrt', 'np.sqrt', (['((BEAST[1] - MWP2[1]) ** 2 + (BEAST[2] - MWP2[2]) ** 2)'], {}), '((BEAST[1] - MWP2[1]) ** 2 + (BEAST[2] - MWP2[2]) ** 2)\n', (5870, 5925), True, 'import numpy as np\n'), ((5945, 6007), 'numpy.sqrt', 'np.sqrt', (['((BEAST[1] - MWP3[1]) ** 2 + (BEAST[2] - MWP3[2]) ** 2)'], {}), '((BEAST[1] - MWP3[1]) ** 2 + (BEAST[2] - MWP3[2]) ** 2)\n', (5952, 6007), True, 'import numpy as np\n'), ((7211, 7269), 'numpy.sqrt', 'np.sqrt', (['((BEAST[1] - C1[1]) ** 2 + (BEAST[2] - C1[2]) ** 2)'], {}), '((BEAST[1] - C1[1]) ** 2 + (BEAST[2] - C1[2]) ** 2)\n', (7218, 7269), True, 'import numpy as np\n'), ((7266, 7324), 'numpy.sqrt', 'np.sqrt', (['((BEAST[1] - C2[1]) ** 2 + (BEAST[2] - C2[2]) ** 2)'], {}), '((BEAST[1] - C2[1]) ** 2 + (BEAST[2] - C2[2]) ** 2)\n', (7273, 7324), True, 'import numpy as np\n'), ((7321, 7379), 'numpy.sqrt', 'np.sqrt', (['((BEAST[1] - C3[1]) ** 2 + (BEAST[2] - C3[2]) ** 2)'], {}), '((BEAST[1] - C3[1]) ** 2 + (BEAST[2] - C3[2]) ** 2)\n', (7328, 7379), True, 'import numpy as np\n'), ((7397, 7455), 'numpy.sqrt', 'np.sqrt', (['((BEAST[1] - C4[1]) ** 2 + (BEAST[2] - C4[2]) ** 2)'], {}), '((BEAST[1] - C4[1]) ** 2 + (BEAST[2] - C4[2]) ** 2)\n', (7404, 7455), True, 'import numpy as np\n'), ((7452, 7510), 'numpy.sqrt', 'np.sqrt', (['((BEAST[1] - C5[1]) ** 2 + (BEAST[2] - C5[2]) ** 2)'], {}), '((BEAST[1] - C5[1]) ** 2 + (BEAST[2] - C5[2]) ** 2)\n', (7459, 7510), True, 'import numpy as np\n'), ((7507, 7565), 'numpy.sqrt', 'np.sqrt', (['((BEAST[1] - C6[1]) ** 2 + (BEAST[2] - C6[2]) ** 2)'], {}), '((BEAST[1] - C6[1]) ** 2 + (BEAST[2] - C6[2]) ** 2)\n', (7514, 7565), True, 'import numpy as np\n'), ((7564, 7626), 'numpy.sqrt', 'np.sqrt', (['((BEAST[1] - MWP1[1]) ** 2 + (BEAST[2] - MWP1[2]) ** 2)'], {}), '((BEAST[1] - MWP1[1]) ** 2 + (BEAST[2] - MWP1[2]) ** 2)\n', (7571, 7626), True, 'import numpy as np\n'), ((7625, 7687), 'numpy.sqrt', 'np.sqrt', (['((BEAST[1] - MWP2[1]) ** 2 + (BEAST[2] - MWP2[2]) ** 2)'], {}), '((BEAST[1] - MWP2[1]) ** 2 + (BEAST[2] - MWP2[2]) ** 2)\n', (7632, 7687), True, 'import numpy as np\n'), ((7707, 7769), 'numpy.sqrt', 'np.sqrt', (['((BEAST[1] - MWP3[1]) ** 2 + (BEAST[2] - MWP3[2]) ** 2)'], {}), '((BEAST[1] - MWP3[1]) ** 2 + (BEAST[2] - MWP3[2]) ** 2)\n', (7714, 7769), True, 'import numpy as np\n')] |
import cv2
import time
import numpy as np
import pandas as pd
import mediapipe as mp
import plotly.express as px
import plotly.graph_objects as go
class poseDetector:
def __init__(
self,
mode=False,
complex=1,
smooth_landmarks=True,
segmentation=True,
smooth_segmentation=True,
detectionCon=0.5,
trackCon=0.5,
):
self.mode = mode
self.complex = complex
self.smooth_landmarks = smooth_landmarks
self.segmentation = segmentation
self.smooth_segmentation = smooth_segmentation
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpDraw = mp.solutions.drawing_utils
self.mpDrawStyle = mp.solutions.drawing_styles
self.mpPose = mp.solutions.pose
self.pose = self.mpPose.Pose(
self.mode,
self.complex,
self.smooth_landmarks,
self.segmentation,
self.smooth_segmentation,
self.detectionCon,
self.trackCon,
)
self.mp_drawing = mp.solutions.drawing_utils
def findPose(self, img):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.pose.process(imgRGB)
# self.plotly_fig(self.results.pose_landmarks)
print(self.results.pose_landmarks)
print('-----------------------------------------------------------------------------------------------------------')
# if self.results.pose_landmarks:
# if draw:
# self.mp_drawing.draw_landmarks(
# img,
# self.results.pose_landmarks,
# self.mpPose.POSE_CONNECTIONS,
# # self.mpDrawStyle.get_default_pose_landmarks_style())
# self.mpDraw.DrawingSpec(
# color=(0, 0, 255), thickness=2, circle_radius=2
# ),
# self.mpDraw.DrawingSpec(
# color=(0, 255, 0), thickness=2, circle_radius=2
# ),
# )
return img
def findPosition(self, img, draw=True):
self.lmList = []
if self.results.pose_landmarks:
for id, lm in enumerate(self.results.pose_landmarks.landmark):
h, w, c = img.shape
# print(id, lm)
cx, cy = int(lm.x * w), int(lm.y * h)
x, y, z = lm.x, lm.y, lm.z
self.lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 5, (0, 255, 0), cv2.FILLED)
return self.lmList
def findAngle(self, img, p1, p2, p3, draw=True):
# Get the landmarks
x1, y1 = self.lmList[p1][1:]
x2, y2 = self.lmList[p2][1:]
x3, y3 = self.lmList[p3][1:]
# Calculate the Angle
radians = np.arctan2(y3 - y2, x3 - x2) - np.arctan2(y1 - y2, x1 - x2)
angle = np.abs(radians * 180.0 / np.pi)
if angle > 180.0:
angle = 360 - angle
print(int(angle))
# Draw
if draw:
cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 3)
cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 3)
cv2.circle(img, (x1, y1), 5, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x1, y1), 10, (0, 0, 255), 2)
cv2.circle(img, (x2, y2), 5, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), 10, (0, 0, 255), 2)
cv2.circle(img, (x3, y3), 5, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x3, y3), 10, (0, 0, 255), 2)
cv2.putText(
img,
str(int(angle)) + "",
(x2 - 50, y2 + 50),
cv2.FONT_HERSHEY_PLAIN,
2,
(255, 0, 0),
2,
)
return angle
def plotly_fig(self, results):
if not results:
return
plotted_landmarks = {}
_PRESENCE_THRESHOLD = 0.5
_VISIBILITY_THRESHOLD = 0.5
for idx, landmark in enumerate(self.results.pose_landmarks.landmark):
if (
landmark.HasField("visibility")
and landmark.visibility < _VISIBILITY_THRESHOLD
) or (
landmark.HasField("presence") and landmark.presence < _PRESENCE_THRESHOLD
):
continue
plotted_landmarks[idx] = (-landmark.z, landmark.x, -landmark.y)
if self.results.pose_landmarks.landmark:
out_cn = []
num_landmarks = len(self.results.pose_landmarks.landmark)
# Draws the connections if the start and end landmarks are both visible.
for connection in self.mpPose.POSE_CONNECTIONS:
start_idx = connection[0]
end_idx = connection[1]
if not (0 <= start_idx < num_landmarks and 0 <= end_idx < num_landmarks):
raise ValueError(
f"Landmark index is out of range. Invalid connection "
f"from landmark #{start_idx} to landmark #{end_idx}."
)
if start_idx in plotted_landmarks and end_idx in plotted_landmarks:
landmark_pair = [
plotted_landmarks[start_idx],
plotted_landmarks[end_idx],
]
out_cn.append(
dict(
xs=[landmark_pair[0][0], landmark_pair[1][0]],
ys=[landmark_pair[0][1], landmark_pair[1][1]],
zs=[landmark_pair[0][2], landmark_pair[1][2]],
)
)
cn2 = {"xs": [], "ys": [], "zs": []}
for pair in out_cn:
for k in pair.keys():
cn2[k].append(pair[k][0])
cn2[k].append(pair[k][1])
cn2[k].append(None)
df = pd.DataFrame(plotted_landmarks).T.rename(columns={0: "z", 1: "x", 2: "y"})
df["lm"] = df.index.map(lambda s: self.mpPose.PoseLandmark(s).name).values
fig = (
px.scatter_3d(df, x="z", y="x", z="y", hover_name="lm")
.update_traces(marker={"color": "red"})
.update_layout(
margin={"l": 0, "r": 0, "t": 0, "b": 0},
scene={"camera": {"eye": {"x": 2.1, "y": 0, "z": 0}}},
)
)
fig.add_traces(
[
go.Scatter3d(
x=cn2["xs"],
y=cn2["ys"],
z=cn2["zs"],
mode="lines",
line={"color": "black", "width": 5},
name="connections",
)
]
)
return fig
def main():
cap = cv2.VideoCapture('./Hackathon_1st_Hitter.mp4')
milliseconds = 1000
start_time = int(input("Enter Start time: "))
end_time = int(input("Enter Length: "))
end_time = start_time + end_time
cap.set(cv2.CAP_PROP_POS_MSEC, start_time * milliseconds)
pTime = 0
detector = poseDetector()
while True and cap.get(cv2.CAP_PROP_POS_MSEC) <= end_time * milliseconds:
success, img = cap.read()
img = detector.findPose(img)
lmList = detector.findPosition(img, draw=False)
if len(lmList) != 0:
detector.findAngle(img, 11, 13, 15)
detector.findAngle(img, 24, 12, 14)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
# show fps count
cv2.putText(
img, str(int(fps)), (70, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3
)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main() | [
"cv2.line",
"pandas.DataFrame",
"cv2.circle",
"numpy.abs",
"numpy.arctan2",
"cv2.cvtColor",
"cv2.waitKey",
"plotly.graph_objects.Scatter3d",
"plotly.express.scatter_3d",
"time.time",
"cv2.VideoCapture",
"cv2.imshow"
] | [((6951, 6997), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""./Hackathon_1st_Hitter.mp4"""'], {}), "('./Hackathon_1st_Hitter.mp4')\n", (6967, 6997), False, 'import cv2\n'), ((1166, 1202), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1178, 1202), False, 'import cv2\n'), ((2994, 3025), 'numpy.abs', 'np.abs', (['(radians * 180.0 / np.pi)'], {}), '(radians * 180.0 / np.pi)\n', (3000, 3025), True, 'import numpy as np\n'), ((7613, 7624), 'time.time', 'time.time', ([], {}), '()\n', (7622, 7624), False, 'import time\n'), ((7831, 7855), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'img'], {}), "('Image', img)\n", (7841, 7855), False, 'import cv2\n'), ((7864, 7878), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7875, 7878), False, 'import cv2\n'), ((2918, 2946), 'numpy.arctan2', 'np.arctan2', (['(y3 - y2)', '(x3 - x2)'], {}), '(y3 - y2, x3 - x2)\n', (2928, 2946), True, 'import numpy as np\n'), ((2949, 2977), 'numpy.arctan2', 'np.arctan2', (['(y1 - y2)', '(x1 - x2)'], {}), '(y1 - y2, x1 - x2)\n', (2959, 2977), True, 'import numpy as np\n'), ((3157, 3210), 'cv2.line', 'cv2.line', (['img', '(x1, y1)', '(x2, y2)', '(255, 255, 255)', '(3)'], {}), '(img, (x1, y1), (x2, y2), (255, 255, 255), 3)\n', (3165, 3210), False, 'import cv2\n'), ((3223, 3276), 'cv2.line', 'cv2.line', (['img', '(x3, y3)', '(x2, y2)', '(255, 255, 255)', '(3)'], {}), '(img, (x3, y3), (x2, y2), (255, 255, 255), 3)\n', (3231, 3276), False, 'import cv2\n'), ((3289, 3342), 'cv2.circle', 'cv2.circle', (['img', '(x1, y1)', '(5)', '(0, 0, 255)', 'cv2.FILLED'], {}), '(img, (x1, y1), 5, (0, 0, 255), cv2.FILLED)\n', (3299, 3342), False, 'import cv2\n'), ((3355, 3400), 'cv2.circle', 'cv2.circle', (['img', '(x1, y1)', '(10)', '(0, 0, 255)', '(2)'], {}), '(img, (x1, y1), 10, (0, 0, 255), 2)\n', (3365, 3400), False, 'import cv2\n'), ((3413, 3466), 'cv2.circle', 'cv2.circle', (['img', '(x2, y2)', '(5)', '(0, 0, 255)', 'cv2.FILLED'], {}), '(img, (x2, y2), 5, (0, 0, 255), cv2.FILLED)\n', (3423, 3466), False, 'import cv2\n'), ((3479, 3524), 'cv2.circle', 'cv2.circle', (['img', '(x2, y2)', '(10)', '(0, 0, 255)', '(2)'], {}), '(img, (x2, y2), 10, (0, 0, 255), 2)\n', (3489, 3524), False, 'import cv2\n'), ((3537, 3590), 'cv2.circle', 'cv2.circle', (['img', '(x3, y3)', '(5)', '(0, 0, 255)', 'cv2.FILLED'], {}), '(img, (x3, y3), 5, (0, 0, 255), cv2.FILLED)\n', (3547, 3590), False, 'import cv2\n'), ((3603, 3648), 'cv2.circle', 'cv2.circle', (['img', '(x3, y3)', '(10)', '(0, 0, 255)', '(2)'], {}), '(img, (x3, y3), 10, (0, 0, 255), 2)\n', (3613, 3648), False, 'import cv2\n'), ((6614, 6741), 'plotly.graph_objects.Scatter3d', 'go.Scatter3d', ([], {'x': "cn2['xs']", 'y': "cn2['ys']", 'z': "cn2['zs']", 'mode': '"""lines"""', 'line': "{'color': 'black', 'width': 5}", 'name': '"""connections"""'}), "(x=cn2['xs'], y=cn2['ys'], z=cn2['zs'], mode='lines', line={\n 'color': 'black', 'width': 5}, name='connections')\n", (6626, 6741), True, 'import plotly.graph_objects as go\n'), ((2595, 2648), 'cv2.circle', 'cv2.circle', (['img', '(cx, cy)', '(5)', '(0, 255, 0)', 'cv2.FILLED'], {}), '(img, (cx, cy), 5, (0, 255, 0), cv2.FILLED)\n', (2605, 2648), False, 'import cv2\n'), ((6078, 6109), 'pandas.DataFrame', 'pd.DataFrame', (['plotted_landmarks'], {}), '(plotted_landmarks)\n', (6090, 6109), True, 'import pandas as pd\n'), ((6264, 6319), 'plotly.express.scatter_3d', 'px.scatter_3d', (['df'], {'x': '"""z"""', 'y': '"""x"""', 'z': '"""y"""', 'hover_name': '"""lm"""'}), "(df, x='z', y='x', z='y', hover_name='lm')\n", (6277, 6319), True, 'import plotly.express as px\n')] |
import os
from torch.utils.data import Dataset
import cv2
import torch
import numpy as np
class ImageDataset(Dataset):
def __init__(self, file_path):
super(Dataset, self).__init__()
self.images = []
self.labels = []
self.file_name = []
for root, sub_dir, files in os.walk(file_path):
for file in files:
img = cv2.imread(os.path.join(root, file))
img = np.float32(cv2.resize(img, (224, 224))) / 255
self.file_name.append(os.path.join(root, file))
self.images.append(torch.from_numpy(img))
file = file.split('].')[0].split('[')[1].split(',')
label = [float(i) for i in file]
label = np.array(label, dtype=np.float32)
self.labels.append(torch.from_numpy(label))
def __len__(self):
return len(self.images)
def __getitem__(self, item):
return self.images[item], self.labels[item]
| [
"os.walk",
"numpy.array",
"os.path.join",
"cv2.resize",
"torch.from_numpy"
] | [((310, 328), 'os.walk', 'os.walk', (['file_path'], {}), '(file_path)\n', (317, 328), False, 'import os\n'), ((751, 784), 'numpy.array', 'np.array', (['label'], {'dtype': 'np.float32'}), '(label, dtype=np.float32)\n', (759, 784), True, 'import numpy as np\n'), ((394, 418), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (406, 418), False, 'import os\n'), ((526, 550), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (538, 550), False, 'import os\n'), ((587, 608), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (603, 608), False, 'import torch\n'), ((820, 843), 'torch.from_numpy', 'torch.from_numpy', (['label'], {}), '(label)\n', (836, 843), False, 'import torch\n'), ((453, 480), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (463, 480), False, 'import cv2\n')] |
import pickle
import numpy as np, pandas as pd, matplotlib as mpl
from matplotlib import dates as mdates
# # Use environment rws_dev
# from sys import path
# for extra in ["C:/Users/mphum/GitHub/koolstof", "C:/Users/mphum/GitHub/calkulate"]:
# if extra not in path:
# path.append(extra)
import koolstof as ks
mpl.rcParams["date.epoch"] = "1970-01-01T00:00:00"
#%% Import station positions
allstations = pd.read_excel("data/Coordinaten_verzuring_20190429.xlsx")
allstations["latitude"] = (
allstations.lat_deg + allstations.lat_min / 60 + allstations.lat_sec / 3600
)
allstations["longitude"] = (
allstations.lon_deg + allstations.lon_min / 60 + allstations.lon_sec / 3600
)
# Import RWS bottle file
data = pd.read_csv(
"data/bottle_files/Bottlefile_NIOZ_20210618_MPH.csv",
header=3,
na_values=["#N/B", -999],
)
#%% Assign station locations and bottle IDs
data["latitude"] = np.nan
data["longitude"] = np.nan
for S in range(len(data.index)):
SL = allstations["U meetpunt"] == data.station[S]
if np.any(SL):
data.loc[S, "latitude"] = allstations.latitude[SL].values[0]
data.loc[S, "longitude"] = allstations.longitude[SL].values[0]
else:
print("Warning: station {} has no co-ordinates!".format(data.station[S]))
data["station_bottleid"] = [
row.station + "_" + str(row.bottleid) for _, row in data.iterrows()
]
#%% Convert units from mg/l to μmol/l
data["doc_vol"] = 1e3 * data.doc_gvol / ks.molar.mass["C"]
data["poc_vol"] = 1e3 * data.poc_gvol / ks.molar.mass["C"]
data["nitrate_vol"] = 1e3 * data.nitrate_gvol / ks.molar.mass["N"]
data["nitrite_vol"] = 1e3 * data.nitrite_gvol / ks.molar.mass["N"]
data["dn_vol"] = 1e3 * data.dn_gvol / ks.molar.mass["N"]
data["ammonia_vol"] = 1e3 * data.ammonia_gvol / ks.molar.mass["N"]
data["din_vol"] = data.nitrate_vol + data.nitrite_vol + data.ammonia_vol
data["silicate_vol"] = 1e3 * data.silicate_gvol / ks.molar.mass["Si"]
data["phosphate_vol"] = (
data.phosphate_gvol / ks.molar.mass["P"]
) # this one provided in μg/l, not mg/l
data["dp_vol"] = 1e3 * data.dp_gvol / ks.molar.mass["P"]
data["sulfate_vol"] = 1e3 * data.sulfate_gvol / ks.molar.mass["SO4"]
data["chloride_vol"] = 1e3 * data.chloride_gvol / ks.molar.mass["Cl"]
# Set negative nutrients to zero
nutrients = [
"doc",
"poc",
"nitrate",
"nitrite",
"ammonia",
"din",
"silicate",
"phosphate",
"sulfate",
"chloride",
"dn",
"dp",
]
for nutrient in nutrients:
nvol = nutrient + "_vol"
data.loc[data[nvol] < 0, nvol] = 0
# Assign nutrient uncertainties from RWS
data["silicate_vol_unc"] = data.silicate_vol * 0.15
data["phosphate_vol_unc"] = data.phosphate_vol * 0.25
data["nitrate_vol_unc"] = data.nitrate_vol * 0.3
data["nitrite_vol_unc"] = data.nitrite_vol * 0.3
data["ammonia_vol_unc"] = data.ammonia_vol * 0.15
data["doc_vol_unc"] = data.doc_vol * 0.2
#%% Import lab sheet and copy across salinities (for up to v6 only)
# lab = pd.read_excel(
# "data/co2data-20200406.xlsx",
# sheet_name="2018_2019 All Final Data",
# header=2,
# na_values=["#N/B", -999],
# )
# data["salinity_nioz"] = np.nan
# data["datfile_stem"] = ""
# for i in data.index:
# il = (lab.station == data.loc[i].station) & (lab.bottleid == data.loc[i].bottleid)
# if sum(il) == 1:
# data.loc[i, "salinity_nioz"] = lab.salinity_nioz[il].values
# data.loc[i, "datfile_stem"] = lab.station_bottleid[il].values
data["salinity"] = data.salinity_rws
# for i in data.index:
# if np.isnan(data.loc[i, "salinity"]):
# data.loc[i, "salinity"] = data.loc[i].salinity_nioz
#%% Get unique stations table and assign properties
stations = pd.DataFrame(index=np.unique(data.station))
stations["latitude"] = np.nan
stations["longitude"] = np.nan
for station in stations.index:
SL = data.station == station
stations.loc[station, "latitude"] = np.unique(data.latitude[SL])
stations.loc[station, "longitude"] = np.unique(data.longitude[SL])
stations["r"] = np.nan
stations["g"] = np.nan
stations["b"] = np.nan
# Goeree in red:
stations.loc["GOERE2", ["r", "g", "b"]] = np.array([0.9, 0.1, 0.1]) * 1.11
stations.loc["GOERE6", ["r", "g", "b"]] = np.array([0.9, 0.1, 0.1]) * 0.8
# Noordwijk in purple:
stations.loc["NOORDWK2", ["r", "g", "b"]] = np.array([0.6, 0.3, 0.6]) * 1.4
stations.loc["NOORDWK10", ["r", "g", "b"]] = np.array([0.6, 0.3, 0.6]) * 1.1
stations.loc["NOORDWK20", ["r", "g", "b"]] = np.array([0.6, 0.3, 0.6]) * 0.9
stations.loc["NOORDWK70", ["r", "g", "b"]] = np.array([0.6, 0.3, 0.6]) * 0.6
# Rottumerplaat in green:
stations.loc["ROTTMPT50", ["r", "g", "b"]] = np.array([0.3, 0.7, 0.3]) * 1.1
stations.loc["ROTTMPT70", ["r", "g", "b"]] = np.array([0.3, 0.7, 0.3]) * 0.7
# Schouwen in orange:
stations.loc["SCHOUWN10", ["r", "g", "b"]] = np.array([1, 0.5, 0])
# Terschelling in blue:
stations.loc["TERSLG10", ["r", "g", "b"]] = np.array([0.2, 0.5, 0.7]) * 1.42
stations.loc["TERSLG50", ["r", "g", "b"]] = np.array([0.2, 0.5, 0.7]) * 1.2
stations.loc["TERSLG100", ["r", "g", "b"]] = np.array([0.2, 0.5, 0.7]) * 1.0
stations.loc["TERSLG135", ["r", "g", "b"]] = np.array([0.2, 0.5, 0.7]) * 0.8
stations.loc["TERSLG175", ["r", "g", "b"]] = np.array([0.2, 0.5, 0.7]) * 0.6
stations.loc["TERSLG235", ["r", "g", "b"]] = np.array([0.2, 0.5, 0.7]) * 0.3
# Walcheren in brown:
stations.loc["WALCRN2", ["r", "g", "b"]] = np.array([0.6, 0.3, 0.1]) * 1.2
stations.loc["WALCRN20", ["r", "g", "b"]] = np.array([0.6, 0.3, 0.1]) * 0.9
stations.loc["WALCRN70", ["r", "g", "b"]] = np.array([0.6, 0.3, 0.1]) * 0.6
# Merge into rgb
stations_rgb = stations[["r", "g", "b"]].values.tolist()
stations_rgb = [np.array([v]) for v in stations_rgb]
stations["rgb"] = stations_rgb
# Assign groups
groups = pd.DataFrame(
index=[
"Walcheren & Schouwen",
"Goeree",
"Noordwijk",
"Terschelling & Rottumerplaat",
]
)
groups["gid"] = [1, 2, 3, 4]
stations["gid"] = 0
for station in stations.index:
if station in ["WALCRN2", "WALCRN20", "WALCRN70", "SCHOUWN10"]:
stations.loc[station, "gid"] = 1
elif station in ["GOERE2", "GOERE6"]:
stations.loc[station, "gid"] = 2
elif station in ["NOORDWK2", "NOORDWK10", "NOORDWK20", "NOORDWK70"]:
stations.loc[station, "gid"] = 3
elif station in [
"TERSLG10",
"TERSLG50",
"TERSLG100",
"TERSLG135",
"TERSLG175",
"TERSLG235",
"ROTTMPT50",
"ROTTMPT70",
]:
stations.loc[station, "gid"] = 4
data["gid"] = stations.gid[data.station].values
data["rgb"] = stations.rgb[data.station].values
# Get station descriptions
stations["description"] = [
allstations.loc[allstations["U meetpunt"] == station, "U omschr meetpunt"].values[0]
for station in stations.index
]
#%% Add more sampling date variants
data["datetime"] = pd.to_datetime(data.datetime, format="%d-%m-%Y %H:%M")
data["datenum"] = mdates.date2num(data.datetime)
data["day_of_year"] = data.datetime.dt.dayofyear
# Add depth info
data["depth"] = -data.height_cm / 100
# Save for next step of Python analysis
with open("pickles/data0_stations_groups_v10.pkl", "wb") as f:
pickle.dump((data, stations, groups), f)
| [
"pandas.DataFrame",
"pickle.dump",
"pandas.read_csv",
"pandas.read_excel",
"numpy.any",
"pandas.to_datetime",
"numpy.array",
"matplotlib.dates.date2num",
"numpy.unique"
] | [((420, 477), 'pandas.read_excel', 'pd.read_excel', (['"""data/Coordinaten_verzuring_20190429.xlsx"""'], {}), "('data/Coordinaten_verzuring_20190429.xlsx')\n", (433, 477), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((732, 837), 'pandas.read_csv', 'pd.read_csv', (['"""data/bottle_files/Bottlefile_NIOZ_20210618_MPH.csv"""'], {'header': '(3)', 'na_values': "['#N/B', -999]"}), "('data/bottle_files/Bottlefile_NIOZ_20210618_MPH.csv', header=3,\n na_values=['#N/B', -999])\n", (743, 837), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((4812, 4833), 'numpy.array', 'np.array', (['[1, 0.5, 0]'], {}), '([1, 0.5, 0])\n', (4820, 4833), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((5752, 5855), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': "['Walcheren & Schouwen', 'Goeree', 'Noordwijk', 'Terschelling & Rottumerplaat']"}), "(index=['Walcheren & Schouwen', 'Goeree', 'Noordwijk',\n 'Terschelling & Rottumerplaat'])\n", (5764, 5855), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((6852, 6906), 'pandas.to_datetime', 'pd.to_datetime', (['data.datetime'], {'format': '"""%d-%m-%Y %H:%M"""'}), "(data.datetime, format='%d-%m-%Y %H:%M')\n", (6866, 6906), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((6925, 6955), 'matplotlib.dates.date2num', 'mdates.date2num', (['data.datetime'], {}), '(data.datetime)\n', (6940, 6955), True, 'from matplotlib import dates as mdates\n'), ((1041, 1051), 'numpy.any', 'np.any', (['SL'], {}), '(SL)\n', (1047, 1051), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((3900, 3928), 'numpy.unique', 'np.unique', (['data.latitude[SL]'], {}), '(data.latitude[SL])\n', (3909, 3928), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((3970, 3999), 'numpy.unique', 'np.unique', (['data.longitude[SL]'], {}), '(data.longitude[SL])\n', (3979, 3999), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((4128, 4153), 'numpy.array', 'np.array', (['[0.9, 0.1, 0.1]'], {}), '([0.9, 0.1, 0.1])\n', (4136, 4153), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((4203, 4228), 'numpy.array', 'np.array', (['[0.9, 0.1, 0.1]'], {}), '([0.9, 0.1, 0.1])\n', (4211, 4228), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((4302, 4327), 'numpy.array', 'np.array', (['[0.6, 0.3, 0.6]'], {}), '([0.6, 0.3, 0.6])\n', (4310, 4327), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((4379, 4404), 'numpy.array', 'np.array', (['[0.6, 0.3, 0.6]'], {}), '([0.6, 0.3, 0.6])\n', (4387, 4404), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((4456, 4481), 'numpy.array', 'np.array', (['[0.6, 0.3, 0.6]'], {}), '([0.6, 0.3, 0.6])\n', (4464, 4481), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((4533, 4558), 'numpy.array', 'np.array', (['[0.6, 0.3, 0.6]'], {}), '([0.6, 0.3, 0.6])\n', (4541, 4558), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((4636, 4661), 'numpy.array', 'np.array', (['[0.3, 0.7, 0.3]'], {}), '([0.3, 0.7, 0.3])\n', (4644, 4661), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((4713, 4738), 'numpy.array', 'np.array', (['[0.3, 0.7, 0.3]'], {}), '([0.3, 0.7, 0.3])\n', (4721, 4738), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((4902, 4927), 'numpy.array', 'np.array', (['[0.2, 0.5, 0.7]'], {}), '([0.2, 0.5, 0.7])\n', (4910, 4927), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((4979, 5004), 'numpy.array', 'np.array', (['[0.2, 0.5, 0.7]'], {}), '([0.2, 0.5, 0.7])\n', (4987, 5004), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((5056, 5081), 'numpy.array', 'np.array', (['[0.2, 0.5, 0.7]'], {}), '([0.2, 0.5, 0.7])\n', (5064, 5081), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((5133, 5158), 'numpy.array', 'np.array', (['[0.2, 0.5, 0.7]'], {}), '([0.2, 0.5, 0.7])\n', (5141, 5158), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((5210, 5235), 'numpy.array', 'np.array', (['[0.2, 0.5, 0.7]'], {}), '([0.2, 0.5, 0.7])\n', (5218, 5235), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((5287, 5312), 'numpy.array', 'np.array', (['[0.2, 0.5, 0.7]'], {}), '([0.2, 0.5, 0.7])\n', (5295, 5312), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((5384, 5409), 'numpy.array', 'np.array', (['[0.6, 0.3, 0.1]'], {}), '([0.6, 0.3, 0.1])\n', (5392, 5409), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((5460, 5485), 'numpy.array', 'np.array', (['[0.6, 0.3, 0.1]'], {}), '([0.6, 0.3, 0.1])\n', (5468, 5485), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((5536, 5561), 'numpy.array', 'np.array', (['[0.6, 0.3, 0.1]'], {}), '([0.6, 0.3, 0.1])\n', (5544, 5561), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((5658, 5671), 'numpy.array', 'np.array', (['[v]'], {}), '([v])\n', (5666, 5671), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n'), ((7169, 7209), 'pickle.dump', 'pickle.dump', (['(data, stations, groups)', 'f'], {}), '((data, stations, groups), f)\n', (7180, 7209), False, 'import pickle\n'), ((3710, 3733), 'numpy.unique', 'np.unique', (['data.station'], {}), '(data.station)\n', (3719, 3733), True, 'import numpy as np, pandas as pd, matplotlib as mpl\n')] |
#!/usr/bin/env python
"""
Python implementation of common model fitting operations to
analyse protein folding data. Simply automates some fitting
and value calculation. Will be extended to include phi-value
analysis and other common calculations.
Allows for quick model evaluation and plotting.
Also tried to make this somewhat abstract and modular to
enable more interesting calculations, such as Ising models
and such.
Requirements (recommended python 2.7+):
- numpy
- scipy
- matplotlib
Lowe, A.R. 2015
"""
import os
import csv
import inspect
from collections import OrderedDict
import numpy as np
from scipy import optimize
from scipy.stats import t as t_distrb
# pyfolding imports
from . import utils
from . import constants
from .plotting import *
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = constants.VERSION
# by default turn off autoscrolling if it exists
utils.disable_autoscroll()
# set up a global temperature object
temperature = utils.__Temperature()
"""
===========================================================
FILE I/O OPERATIONS
===========================================================
"""
def read_kinetic_data(directory=None, filename=None):
""" Read in kinetic data in the form of an .csv worksheet. It
should be arranged such that each file is a different protein,
and columns represent the following:
[den] k1 k2 ...
This function then returns a chevron object with the data
"""
reader = utils.DataImporter(datatype='Chevron')
return reader.load(os.path.join(directory,filename))
def read_equilibrium_data(directory=None, filename=None):
""" Read in an equilbrium denaturation curve from a .csv
worksheet. It should be arranged such that each file is a
different protein, and columns represent the following:
[den] unfolding
This function then returns an equilbrium curve object.
"""
reader = utils.DataImporter(datatype='EquilibriumDenaturationCurve')
return reader.load(os.path.join(directory,filename))
def read_generic_data(directory=None, filename=None):
""" Read in a generic dataset from a .csv
worksheet. It should be arranged such that each file is a
different protein, and columns represent the following:
x y_0 y_1 ....
This function then returns a generic data object.
"""
reader = utils.DataImporter()
return reader.load(os.path.join(directory,filename))
"""
===========================================================
SETTING CALCULATION TEMPERATURE
===========================================================
"""
def set_temperature(value=constants.TEMPERATURE_CELSIUS):
""" Set the temperature.
Args:
temperature: set the temperature in celsius
Returns:
None
Usage:
>> pyfolding.set_temperature( 10.2 )
"""
temperature.temperature = value
print("Set temperature to {0:2.2f}\u00B0C".format(value))
print("(NOTE: Careful, this sets the temperature for all subsequent calculations)")
"""
===========================================================
BASE CLASSES
===========================================================
"""
class DataTemplate(object):
""" DataTemplate
Base class fo chevrons, equilibrium denaturation curves and generic data.
Takes care of common functions such as fitting of models.
Data is stored internally as a dictionary and associated list, for example:
labels = ['denaturant', 'k1', 'k2']
data = {'x': {'k1': [], 'k2': []}, 'y': {'k1':[], 'k2':[]}
Subclassed objects may use these data in different ways, for example as
Chevron plots or Equilibrium denaturation curves.
Usage:
>> data['k1']
returns a tuple of x['k1'] and y['k1']
Properties:
datasets - return a list of datasets in the model
fit_func - return/set the fit function for the dataset
fit_func_args - return the fit function arguments
fit_params - return the final parameters following the fit
results - a FitResult object following fitting
Members:
Notes:
"""
def __init__(self):
# store the raw data in a dictionary
self.labels = []
self.data = {}
# store associated fit functions
self.__fit_func = None
self.__fit = None
self.__fit_residuals = None
self.components = None
def initialise(self):
raise NotImplementedError
def __getitem__(self, dataset):
""" Return an XY pair from the dataset, based on the label """
if not isinstance(dataset, str):
raise TypeError('Dataset must be specified as as string')
if dataset not in self.datasets:
raise ValueError('Dataset {0:s} not found'.format(dataset))
return ( np.array(self.data['x'][dataset], dtype='float'),
np.array(self.data['y'][dataset], dtype='float') )
@property
def datasets(self): return self.labels[1:]
@property
def fit_func(self): return self.__fit_func.name
@fit_func.setter
def fit_func(self, fit_func=None):
if hasattr(fit_func, "__call__"):
self.__fit_func = fit_func()
else:
raise AttributeError("Fit function must be callable")
@property
def fit_func_args(self):
if self.__fit_func:
return self.__fit_func.fit_func_args
@property
def fit_params(self):
return [p.value for p in self.__fit.fit_params]
@property
def results(self):
return self.__fit
@results.setter
def results(self, result):
if not isinstance(result, FitResult):
raise TypeError("Results must be of type FitResult")
print("Warning: overwriting fit result for {0:s}".format(self))
self.__fit = result
def fit(self, p0=None, const=None):
""" Fit the data to the defined model. Use p0 to introduce the estimated
start values.
"""
if self.__fit_func:
# reset components
self.components = None
# set the default fitting parameters
if not p0: p0 = self.__fit_func.default_params
# set up the fit
f = GlobalFit()
f.fit_funcs = [self.__fit_func]
if const: f.constants = [const]
f.shared = [] # no shared parameters by default
f.x = [self.x]
f.y = [self.y]
f.ID = [self.ID]
out, covar = f.fit( p0=p0 )
self.__fit = f.results[0]
if hasattr(self.__fit_func, "components"):
self.components = self.__fit_func.components(constants.XSIM, *out.tolist())
else:
raise AttributeError("Fit function must be defined first.")
self.__fit.display()
@property
def fitted_x(self):
raise DeprecationWarning("This feature will be deprecated soon.")
@property
def fitted(self):
raise DeprecationWarning("This feature will be deprecated soon.")
def print_fit_params(self):
raise DeprecationWarning("This feature will be deprecated soon.")
if isinstance(self.fit_params, np.ndarray):
print(self.fit_params)
def plot(self, **kwargs):
""" Plot a simple figure of the data, this is context dependent
title='', marker='wo', display_fit=True
"""
# make this cleaner by calling an independent function. User can also
# call these functions
if isinstance(self, Chevron):
plot_chevron(self, **kwargs)
elif isinstance(self, EquilibriumDenaturationCurve):
plot_equilibrium(self, **kwargs)
else:
plot_generic(self, **kwargs)
def save_fit(self, filename):
""" Export the fit. """
exporter = utils.FitExporter()
exporter.export(filename, self.results)
class Protein(object):
""" Protein wrapper object.
This class wraps different types of data and acts as a container object for
a single protein. It can contain equilbrium, kinetic and other types of
data. The object can be passed to higher-order functions, such as 'phi' that
use multiple datasets for calculations.
Properties:
deltaG - the equilbrium deltaG value from equilbrium data
kf_H20 - the observed folding rate in water
Notes:
None
"""
def __init__(self, ID=None):
self.ID = ID
self.chevron = None
self.equilibrium = None
self.other = None
@property
def deltaG(self): return self.equilibrium.deltaG
@property
def kf_H20(self): return self.chevron.results.y_fit[0]
class GenericData(DataTemplate):
""" A generic data model.
"""
def __init__(self, ID=None):
DataTemplate.__init__(self)
self.ID = ID
@property
def x(self): return self[self.datasets[0]][0]
@property
def y(self): return self[self.datasets[0]][1]
@property
def y_raw(self): return self.y
def initialise(self):
pass
class Chevron(DataTemplate):
""" Chevron plot for protein folding kinetics.
Args:
Methods:
Notes:
"""
def __init__(self, ID=None):
DataTemplate.__init__(self)
self.ID = ID
self.__midpoint = None
@property
def denaturant_label(self): return self.labels[0]
@property
def phases(self): return self.datasets
@property
def rates(self): return {k:self[k][1] for k in self.phases}
@property
def denaturant(self): return {k:self[k][0] for k in self.phases}
@property
def x(self): return np.array(self.denaturant[self.phases[0]])
@property
def y(self): return np.array(np.log(self.rates[self.phases[0]]))
@property
def y_raw(self): return np.array(self.rates[self.phases[0]])
@property
def midpoint(self):
""" Return a calculated midpoint for the chevron. Unless
we have set one using equilibrium data.
"""
if not self.__midpoint and self.denaturant:
return self.denaturant['k1'][ np.argmin(self.rates['k1']) ]
else:
return self.__midpoint
@midpoint.setter
def midpoint(self, midpoint=0.0):
if isinstance(midpoint, float):
if midpoint>0. and midpoint<10.: self.__midpoint = midpoint
else:
raise Exception("Midpoint must be a float and 0<x<10")
def unfolding_limb(self, phase=None):
""" Return only the unfolding limb data
"""
if not phase:
phase = self.phases[0]
elif phase not in self.phases:
return None
denaturant, rates = [], []
for d,r in zip(self.denaturant[phase], self.rate(phase)):
if d > self.midpoint:
denaturant.append(d)
rates.append(r)
return denaturant, rates
def refolding_limb(self, phase=None):
""" Return only the refolding limb data
"""
if not phase:
phase = self.phases[0]
elif phase not in self.phases:
return None
denaturant, rates = [], []
for d,r in zip(self.denaturant[phase], self.rate(phase)):
if d <= self.midpoint:
denaturant.append(d)
rates.append(r)
return denaturant, rates
def chevron(self, phase=None):
""" Return the entire phase of a chevron
"""
if not phase:
phase = self.phases[0]
elif phase not in self.phases:
return None
return self.denaturant[phase], self.rate(phase)
def rate(self, phase=None):
return np.log(self.rates[phase])
class EquilibriumDenaturationCurve(DataTemplate):
""" Equilibrium Denaturation curve
Args:
Methods:
Notes:
"""
def __init__(self, ID=None):
DataTemplate.__init__(self)
self.ID = ID
@property
def denaturant_label(self): return self.labels[0]
@property
def curves(self): return self.datasets
@property
def signal(self): return {k:self[k][1] for k in self.curves}
@property
def denaturant(self): return {k:self[k][0] for k in self.curves}
@property
def x(self): return np.array(self.denaturant[self.curves[0]])
@property
def y(self): return np.array(self.signal[self.curves[0]])
@property
def y_raw(self): return self.y
@property
def normalised(self):
""" TODO(arl): Return a normalised equilbrium curve.
"""
raise NotImplementedError
@property
def m_value(self):
if isinstance(self.fit_params, list):
return self.fit_params[ self.fit_func_args.index('m') ]
return None
@property
def midpoint(self):
if isinstance(self.fit_params, list):
return self.fit_params[ self.fit_func_args.index('d50') ]
else:
return None
@property
def two_state(self):
""" Return whether this is a two state model or not """
return 'd50' in self.fit_func_args
def point(self, fraction_folded=0.5):
""" Return the denaturant concentration for a particular
fraction folded. Assumes a two-state transition since I
had to derive this equation by hand.
"""
if self.m_value and self.midpoint:
if fraction_folded<0. or fraction_folded>1.:
raise ValueError("Fraction folded must be in the range 0.<x<1.")
return (np.log((1.-fraction_folded)/fraction_folded) / self.m_value) + self.midpoint
else:
return None
@property
def deltaG(self):
""" Return the deltaG value based on the fit of the data """
if self.m_value and self.midpoint:
return self.m_value * self.midpoint
else:
return None
"""
===========================================================
MODEL FITTING FUNCTIONS
===========================================================
"""
def FIT_ERROR(x):
""" Return a generic fit error """
if isinstance(x, np.ndarray):
return np.ones(x.shape)*constants.FITTING_PENALTY
else:
return None
class FitParameter(object):
""" Object to store parameter error information """
def __init__(self, name, value, param_type='free'):
self.name = name
self.value = value
self.type = param_type
self.DoF = None
self.SE = 0
self.CI = [-np.inf, np.inf]
self.covar = None
self.r_squared = None
@property
def name(self): return self.__name
@name.setter
def name(self, arg_name):
if not isinstance(arg_name, str):
raise TypeError('Arg name must be of type string')
self.__name = arg_name
@property
def type(self): return self.__type
@type.setter
def type(self, arg_type):
if not isinstance(arg_type, str):
raise TypeError('Arg type must be of type string')
if arg_type not in ['free', 'shared', 'constant']:
raise ValueError('Arg type must be either free, shared or constant')
self.__type = arg_type
@property
def CI_low(self): return self.CI[0]
@property
def CI_high(self): return self.CI[1]
class GlobalFit(object):
""" GlobalFit
Wrapper function to perform global fitting. This acts as a wrapper for
multiple FitModels, enabling the user to pair datasets and models and share
data or arguments.
For each fit function, a list of arguments is compiled. Those belonging to
the shared or constant type are set respectively.
Note that a single or individual fit is just a special case of a global fit
where there are no shared values and only one dataset. This wrapped can be
used for that purpose too...
Now added weighting to fits, specified using the weights property. These are
inputs to the sigma function for curve_fit, and specified as the number
of standard deviations of error (assuming Gaussian distrb.)
Args:
x: concatenated x data
y: concatenated y data
weights: (optional)
Properties:
fit_funcs: the fit functions
constants: constants for the fitting
Members:
__call__: evaluates the fit functions
Notes:
"""
def __init__(self):
self.ID = []
self.x = []
self.y = []
self.__fit_funcs = []
self.__shared = []
self.__initialised = False
self.__params = None
self.__results = None
self.__weights = None
self.covar = None
@property
def fit_funcs(self): return self.__fit_funcs
@fit_funcs.setter
def fit_funcs(self, fit_funcs):
for fit_func in fit_funcs:
if not hasattr(fit_func, "__call__"): continue
# append it and instantiate it
if isinstance(fit_func, FitModel):
self.__fit_funcs.append(fit_func)
else:
self.__fit_funcs.append( fit_func() )
@property
def constants(self):
return [f.constants for f in self.__fit_funcs]
@constants.setter
def constants(self, const=None):
if len(const) != len(self.__fit_funcs):
raise ValueError("Number of constants should be the same as number"
" of fit functions")
for constant, fit_func in zip(const, self.__fit_funcs):
fit_func.constants = constant
@property
def shared(self):
return self.__shared
@shared.setter
def shared(self, shared_args=[]):
""" Set the shared arguments for the global fit """
if not isinstance(shared_args, (list, tuple)):
raise TypeError('Shared args must be of type list or tuple')
if not all([isinstance(a, str) for a in shared_args]):
raise TypeError('Shared args must be a list of strings.')
# TODO(arl): check that these shared params exist in the fit functions
# and report an error if incorrect...
self.__shared = list(set(shared_args))
@property
def weights(self):
return self.__weights
@weights.setter
def weights(self, weights):
""" Set weights for the global fit. These should be defined as
standard deviations of errors in ydata. """
if weights is None: self.__weights = None
if not isinstance(weights, (list,tuple)):
raise TypeError('Weights must be of type list or tuple')
if not all(isinstance(w, (np.ndarray, list)) for w in weights):
raise TypeError('Weights must be a list of numpy arrays or lists')
self.__weights = weights
@property
def fit_weights(self):
""" Check and return the weights for fitting """
# check weights
if self.weights is not None:
assert(len(self.weights) == len(self.x) == len(self.y))
return np.concatenate([w for w in self.weights])
return None
@property
def params(self): return self.__params
def __call__(self, *args):
""" Dummy call for all fit functions """
if not self.__initialised: self.initialise()
x = args[0]
fit_args = args[1:]
# now set the values of the objects
for p, p_val in zip(self.params, fit_args):
self.__params[p].value = p_val
ret = np.array(())
for i, fit_func in enumerate(self.fit_funcs):
ret = np.append( ret, self.eval_func(i) )
return ret
def initialise(self):
""" Set up all of the shared, constant and free parameters """
if len(self.ID) != len(self.x):
self.ID = ['protein_{0:d}'.format(i) for i in range(len(self.x))]
shared = {s:FitParameter(s, 0.0, param_type='shared') for s in self.shared}
# set up an ordered dictionary of the parameter objects
all_params = OrderedDict(shared)
for f in self.fit_funcs:
fit_func_params = []
const = [c[0] for c in f.constants]
for arg in f.fit_func_args:
if arg in shared:
fit_func_params.append(shared[arg])
elif arg in const:
c_val = f.constants[const.index(arg)][1]
fit_func_params.append(FitParameter(arg, c_val, param_type='constant'))
else:
fit_func_params.append(FitParameter(arg, 0.0, param_type='free'))
f.rick_and_morty = fit_func_params
# print f.name, [(g.name, g.type) for g in f.rick_and_morty]
# now make the master list of params
for i, f in enumerate(self.fit_funcs):
for p in f.rick_and_morty:
if p.type=='shared' and p.name not in all_params:
all_params[p.name] = p
elif p.type not in ('shared','constant'):
# all_params[p.name+'_'+str(i)] = p
all_params[p.name+'_{'+self.ID[i]+'}'] = p
# save this ordered dict for later
self.__params = all_params
# set the flag so that we don't do this again
self.__initialised = True
def eval_func(self, i):
""" Evaluate the fit function """
if i<0 or i>len(self.fit_funcs):
raise ValueError('Cannot evaluate fit function {0:d}'.format(i))
fit_func = self.fit_funcs[i]
x_this = np.array( self.x[i] )
args_this = [a.value for a in fit_func.rick_and_morty]
return fit_func(x_this, *args_this)
def fit(self, p0=[], bounds=None):
""" Run the fit. """
# check a few things for consistency
assert(len(self.x) == len(self.y))
# concatenate the xy data
x = np.concatenate([x for x in self.x])
y = np.concatenate([y for y in self.y])
# fit the data
if bounds:
out, covar = optimize.curve_fit(self, x, y, p0=p0, bounds=bounds,
max_nfev=20000, absolute_sigma=True,
sigma=self.fit_weights)
else:
out, covar = optimize.curve_fit(self, x, y, p0=p0, maxfev=20000,
absolute_sigma=True,
sigma=self.fit_weights)
# now finalise and set up the results
self.all_residuals = residuals(y_data=y, y_fit=self(x, *out))
self.finalise(out, covar)
return out, covar
def finalise(self, out, covar):
""" Take the results of the fitting, set the parameter values and
calculate errors.
"""
# put the parameter values in
for i, p in enumerate(self.params):
self.params[p].value = out[i]
self.params[p].covar = covar[i,i]
self.covar = covar
self.__results = []
# set up the fit result objects
for i,f in enumerate(self.fit_funcs):
result = FitResult(fit_name=f.name, fit_params=f.rick_and_morty)
result.ID = self.ID[i]
result.method = "pyfolding.GlobalFit and scipy.optimize.curve_fit"
result.y = self.eval_func(i)
result.x_fit = np.linspace(np.min([0.]+self.x[i].tolist()), np.max(self.x[i]),100)
result.y_fit = f(result.x_fit, *[a.value for a in f.rick_and_morty])
result.covar = covar
result.residuals = residuals(y_data=self.y[i], y_fit=result.y)
result.r_squared = r_squared(y_data=self.y[i], y_fit=result.y)
result.all_residuals = self.all_residuals
self.__results.append(result)
@property
def results(self):
return self.__results
class FitResult(object):
""" Fitting result object.
This is an internal class that collates fit results and enables calculation
of errors, residuals and other fun things.
Args:
name: a name for the fit result (e.g. TwoStateChevron)
fit_args: the fit function arguments
ID: The identifier of the protein
Properties:
method: the name of the optimisation algorithm used
errors: the calculated errors (SEM) for the fit arguments
details: return a zipped list of argument, value, error tuples
standard_error: the standard_error of the overall fit
covar: covariance matrix following optimisation
residuals: residuals of fit to data
all_residuals: all residuals for a global fit (same as residuals if an
individual fit)
r_squared: r^2 value for the fit
Members:
display: return a formatted output of the fitting statistics
Notes:
TODO(arl): implement an export function
"""
def __init__(self, fit_name=None, fit_params=None):
self.ID = None
self.fit_params = fit_params
self.name = fit_name
self.covar = None
self.residuals = None
self.all_residuals = None
self.r_squared = None
self.x_fit = None
self.y_fit = None
self.__method = "scipy.optimize.curve_fit"
@property
def method(self): return self.__method
@method.setter
def method(self, method=None):
if not isinstance(method, str):
raise TypeError("FitResult: Method must be a string")
self.__method = method
def display(self):
""" Print the errors and fit values """
table_width = max([len("Model: "+self.name), len(" Fitting results "), 80])
nl = 0
for p in self.details:
nl = max(nl, len(p.name))
print("="*table_width)
print(" Fitting results")
print("="*table_width)
if self.ID: print(" ID: {0:s}".format(self.ID))
print(" Model: {0:s}".format(self.name))
print(" Optimiser: {0:s}".format(self.__method))
print(" Temperature: {0:2.2f}\u00B0C\n".format(temperature.temperature))
for p in self.details:
self.display_row(p, nl)
print("-"*table_width)
print(" R^2: \t{0:2.5f}".format(self.r_squared))
print(" DOF: \t{0:d}".format(self.DoF))
print("|SS|: \t{0:2.2e}".format(self.SS))
print("="*table_width)
print("\n")
def display_row(self, p, max_name_len):
""" Take a parameter and display a row of the table """
p_name = p.name.ljust(max_name_len)
if p.type == 'constant':
print(" ({0:s}) {1:s} {2:>2.5e}".format(p.type[0], p_name, p.value))
return
print(" ({0:s}) {1:s} {2:>2.5e} \u00B1 {3:<2.5e}" \
" \t {6:d}\u0025 CI[{4:>2.5e}, {5:>2.5e}]".format(p.type[0],
p_name, p.value, p.SE, p.CI_low, p.CI_high,
int(constants.CONFIDENCE_INTERVAL)))
def confidence(self, i):
""" Return the 95 per cent confidence interval for a fitted parameter
https://stats.stackexchange.com/questions/72047/when-fitting-a-curve-how-do-i-calculate-the-95-confidence-interval-for-my-fitt
[BestFit(Pi) +/- t(95%,DF)*SE(Pi)
NOTES:
TODO(arl): make this a user defined interval
"""
ci = constants.CONFIDENCE_INTERVAL / 100.0
conf = t_distrb.pdf(ci, self.DoF) * self.SE(i)
return (self.fit_params[i].value-conf, self.fit_params[i].value+conf)
def SE(self, i):
""" Return the SE for parameter i
SE(Pi) = sqrt[ (SS/DF) * Cov(i,i) ]
"""
SE = np.sqrt( (self.SS / self.DoF) * self.fit_params[i].covar )
return SE
@property
def DoF(self):
""" Return the number of degrees of freedom, essentially the difference
between the number of data points and the number of fit parameters
"""
return len(self.all_residuals) - len(self.fit_params)
@property
def SS(self):
""" Sum of squared residuals """
# SS = np.matrix(self.all_residuals) * np.matrix(self.all_residuals).T
SS = np.sum(self.all_residuals**2)
return SS
@property
def details(self):
""" Return a zipped list of the fit arguments, values and errors """
details = []
for i, f in enumerate(self.fit_params):
if f.type == 'constant':
details.append(f)
continue
f.DoF = self.DoF
f.SE = self.SE(i)
f.CI = self.confidence(i)
# f.covar = self.covar[i,i]
details.append(f)
return details
@property
def standard_error(self):
""" Return the standard error of the fit """
return np.std(self.residuals) / np.sqrt(1.*len(self.residuals))
def export(self, filename):
raise NotImplementedError
class FitModel(object):
""" FitModel class
A generic fit model to enable a common core set of functions
but specific new member functions to be enabled in derived
classes.
Can define parameters in this manner:
('kf',0), ('mf',1), ... in order to enable paramater sharing
in global fitting. By default the model just gets the params
in the order they are defined in the function defininition
Note: this must be subclassed to work.
Args:
constants: constants for fitting
Properties:
params: the parameters of the fit model
name: the name of the fit model
default_params: the default starting parameters for the fit
fit_func_args: the names of the fit function arguments
equation: a LaTeX formatted string of the model
Methods:
__call__: evaluates the fit function with the given args
print_equation: (static) prints the equation to the stdout
fit_func: (not defined) the actual fit function
error_func: (not defined) the error function
Notes:
"""
def __init__(self):
self.__params = None
self.__param_names = None
self.__default_params = None
self.fit_params = None
self.fit_covar = None
self.constants = []
# has this model been verified
self.verified = False
@property
def params(self): return self.__params
@params.setter
def params(self, params=None):
if isinstance(params, tuple):
self.__params, self.__param_names = [], []
for key,value in params:
self.__param_names.append(key)
self.__params.append(value)
else:
raise Warning("Fit parameters must be a tuple")
@property
def name(self): return self.__class__.__name__
def __call__(self, x, *args):
""" Parse the fit arguments and pass onto the
fitting function
"""
# fit_args = self.get_fit_params(x, *args)
# return self.error_func( self.fit_func(x, *fit_args) )
return self.error_func( self.fit_func(x, *args) )
def fit_func(self, x, *args):
""" The fit function should be defined here """
raise Exception("Fit function must be defined")
def error_func(self, y):
""" The error function should be defined here """
return y
def get_fit_params(self, x, *args):
fit_args = [args[v] for v in self.__params]
# if we have constants replace the arguments
# with the constants
if self.constants:
for arg, constant in self.constants:
if arg in self.__param_names:
idx = self.__params[ self.__param_names.index(arg) ]
fit_args[idx] = constant
return fit_args
@property
def default_params(self):
""" Give back either the set starting parameters,
or set all to 1.0
"""
if isinstance(self.__default_params, np.ndarray):
return self.__default_params
else:
return np.ones((len(self.params),1))
@default_params.setter
def default_params(self, params):
if isinstance(params, np.ndarray):
self.__default_params = params
@property
def fit_func_args(self):
# return inspect.getargspec(self.fit_func).args[2:]
return inspect.getfullargspec(self.fit_func).args[2:]
@property
def equation(self):
raise NotImplementedError
# @staticmethod
def print_equation(self):
# FIXED(arl): no longer requires IPython
if not 'ipykernel' in sys.modules:
print(self.equation)
return
# if we are in an IPython shell or Jupyter notebook, use the LaTeX
# display for the equation
from IPython.display import display, Math, Latex
display(Math(self.equation))
return None
def info(self):
self.print_equation()
print(self.__doc__)
def r_squared(y_data=None, y_fit=None):
return 1. - np.sum((y_data - y_fit)**2) / np.sum((y_data - np.mean(y_data))**2)
def residuals(y_data=None, y_fit=None):
return y_data - y_fit
def phi(ref_protein, mut_protein):
""" Makes this easier to use! """
from .phi import phi
return phi(ref_protein, cmp_protein)
"""
===========================================================
TEST FUNCTION
===========================================================
"""
def test(protein_ID='Simulated protein'):
"""
Test function to make sure that PyFolding is installed correctly
and functioning as it should. Generates a simulated data set
using known parameters and noise, and then fits and plots the
data comparing these to the ground truth.
"""
from . import models
# initialise the data structures
chevron = Chevron(ID=protein_ID)
equilibrium = EquilibriumDenaturationCurve(ID=protein_ID)
acceptible_error = 1e-2
truth = {'eq':[1.5, 5.], 'kin': [100., 1., 0.005, 1.]}
# denaturant concentrations
den = np.linspace(0.,10.,100)
# generate a two-state equilibrium curve, with Gaussian noise
# alpha_f, beta_f, alpha_u, beta_u, m, d50
eq_model = models.TwoStateEquilibrium()
eq_raw = eq_model.fit_func(den, *truth['eq'])
eq_sim = eq_raw + np.random.randn(100,)*0.01
equilibrium.labels = ['[Denaturant] (M)', 'e1']
equilibrium.data = {'x':{'e1':den}, 'y':{'e1':eq_sim}}
equilibrium.fit_func = models.TwoStateEquilibrium
# generate a two-state chevron curve, with Gaussian noise
# kf, mf, ku, mu
kin_model = models.TwoStateChevron()
kin_raw = kin_model.fit_func(den, *truth['kin'])
kin_sim = np.exp( np.log(kin_raw) + np.random.randn(100,)*0.001 )
chevron.labels = ['[Denaturant] (M)', 'k1']
chevron.data = {'x':{'k1':den}, 'y':{'k1':kin_sim}}
chevron.fit_func = models.TwoStateChevron
# fit the equilibrium data to a two-state model
equilibrium.fit()
# use the midpoint (D_50) of the equilibrium curve as the kinetic midpoint
chevron.midpoint = equilibrium.midpoint
# now fit the chevron to a two-state model
chevron.fit()
# get the parameters and check that they are the same as the
# ground truth set
for p_truth, p_fit in zip(truth['eq'], equilibrium.fit_params):
if (p_truth - p_fit)**2 > acceptible_error:
raise ValueError("PyFolding self-test failed. Fitting error ({0:f}) exceeds \
bounds ({1:f}) \n".format((p_truth - p_fit)**2, acceptible_error))
for p_truth, p_fit in zip(truth['kin'], chevron.fit_params):
if (p_truth - p_fit)**2 > acceptible_error:
raise ValueError("PyFolding self-test failed. Fitting error ({0:f}) exceeds \
bounds ({1:f}) \n".format((p_truth - p_fit)**2, acceptible_error))
print('SUCCESS - Test completed!')
# # plot the output
# if plot_output:
# plot_figure(equilibrium, chevron, display=True)
if __name__ == "__main__":
test()
| [
"numpy.sum",
"numpy.log",
"inspect.getfullargspec",
"numpy.random.randn",
"numpy.std",
"numpy.ones",
"numpy.argmin",
"scipy.optimize.curve_fit",
"numpy.max",
"numpy.mean",
"numpy.array",
"numpy.linspace",
"IPython.display.Math",
"collections.OrderedDict",
"scipy.stats.t.pdf",
"os.path.... | [((33927, 33954), 'numpy.linspace', 'np.linspace', (['(0.0)', '(10.0)', '(100)'], {}), '(0.0, 10.0, 100)\n', (33938, 33954), True, 'import numpy as np\n'), ((1552, 1585), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (1564, 1585), False, 'import os\n'), ((2015, 2048), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (2027, 2048), False, 'import os\n'), ((2413, 2446), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (2425, 2446), False, 'import os\n'), ((9677, 9718), 'numpy.array', 'np.array', (['self.denaturant[self.phases[0]]'], {}), '(self.denaturant[self.phases[0]])\n', (9685, 9718), True, 'import numpy as np\n'), ((9845, 9881), 'numpy.array', 'np.array', (['self.rates[self.phases[0]]'], {}), '(self.rates[self.phases[0]])\n', (9853, 9881), True, 'import numpy as np\n'), ((11717, 11742), 'numpy.log', 'np.log', (['self.rates[phase]'], {}), '(self.rates[phase])\n', (11723, 11742), True, 'import numpy as np\n'), ((12302, 12343), 'numpy.array', 'np.array', (['self.denaturant[self.curves[0]]'], {}), '(self.denaturant[self.curves[0]])\n', (12310, 12343), True, 'import numpy as np\n'), ((12382, 12419), 'numpy.array', 'np.array', (['self.signal[self.curves[0]]'], {}), '(self.signal[self.curves[0]])\n', (12390, 12419), True, 'import numpy as np\n'), ((19467, 19479), 'numpy.array', 'np.array', (['()'], {}), '(())\n', (19475, 19479), True, 'import numpy as np\n'), ((19995, 20014), 'collections.OrderedDict', 'OrderedDict', (['shared'], {}), '(shared)\n', (20006, 20014), False, 'from collections import OrderedDict\n'), ((21507, 21526), 'numpy.array', 'np.array', (['self.x[i]'], {}), '(self.x[i])\n', (21515, 21526), True, 'import numpy as np\n'), ((21841, 21876), 'numpy.concatenate', 'np.concatenate', (['[x for x in self.x]'], {}), '([x for x in self.x])\n', (21855, 21876), True, 'import numpy as np\n'), ((21889, 21924), 'numpy.concatenate', 'np.concatenate', (['[y for y in self.y]'], {}), '([y for y in self.y])\n', (21903, 21924), True, 'import numpy as np\n'), ((27527, 27581), 'numpy.sqrt', 'np.sqrt', (['(self.SS / self.DoF * self.fit_params[i].covar)'], {}), '(self.SS / self.DoF * self.fit_params[i].covar)\n', (27534, 27581), True, 'import numpy as np\n'), ((28033, 28064), 'numpy.sum', 'np.sum', (['(self.all_residuals ** 2)'], {}), '(self.all_residuals ** 2)\n', (28039, 28064), True, 'import numpy as np\n'), ((4826, 4874), 'numpy.array', 'np.array', (["self.data['x'][dataset]"], {'dtype': '"""float"""'}), "(self.data['x'][dataset], dtype='float')\n", (4834, 4874), True, 'import numpy as np\n'), ((4892, 4940), 'numpy.array', 'np.array', (["self.data['y'][dataset]"], {'dtype': '"""float"""'}), "(self.data['y'][dataset], dtype='float')\n", (4900, 4940), True, 'import numpy as np\n'), ((9766, 9800), 'numpy.log', 'np.log', (['self.rates[self.phases[0]]'], {}), '(self.rates[self.phases[0]])\n', (9772, 9800), True, 'import numpy as np\n'), ((14178, 14194), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (14185, 14194), True, 'import numpy as np\n'), ((19006, 19047), 'numpy.concatenate', 'np.concatenate', (['[w for w in self.weights]'], {}), '([w for w in self.weights])\n', (19020, 19047), True, 'import numpy as np\n'), ((21993, 22110), 'scipy.optimize.curve_fit', 'optimize.curve_fit', (['self', 'x', 'y'], {'p0': 'p0', 'bounds': 'bounds', 'max_nfev': '(20000)', 'absolute_sigma': '(True)', 'sigma': 'self.fit_weights'}), '(self, x, y, p0=p0, bounds=bounds, max_nfev=20000,\n absolute_sigma=True, sigma=self.fit_weights)\n', (22011, 22110), False, 'from scipy import optimize\n'), ((22202, 22302), 'scipy.optimize.curve_fit', 'optimize.curve_fit', (['self', 'x', 'y'], {'p0': 'p0', 'maxfev': '(20000)', 'absolute_sigma': '(True)', 'sigma': 'self.fit_weights'}), '(self, x, y, p0=p0, maxfev=20000, absolute_sigma=True,\n sigma=self.fit_weights)\n', (22220, 22302), False, 'from scipy import optimize\n'), ((27274, 27300), 'scipy.stats.t.pdf', 't_distrb.pdf', (['ci', 'self.DoF'], {}), '(ci, self.DoF)\n', (27286, 27300), True, 'from scipy.stats import t as t_distrb\n'), ((28664, 28686), 'numpy.std', 'np.std', (['self.residuals'], {}), '(self.residuals)\n', (28670, 28686), True, 'import numpy as np\n'), ((32719, 32738), 'IPython.display.Math', 'Math', (['self.equation'], {}), '(self.equation)\n', (32723, 32738), False, 'from IPython.display import display, Math, Latex\n'), ((32906, 32935), 'numpy.sum', 'np.sum', (['((y_data - y_fit) ** 2)'], {}), '((y_data - y_fit) ** 2)\n', (32912, 32935), True, 'import numpy as np\n'), ((34181, 34201), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (34196, 34201), True, 'import numpy as np\n'), ((34575, 34590), 'numpy.log', 'np.log', (['kin_raw'], {}), '(kin_raw)\n', (34581, 34590), True, 'import numpy as np\n'), ((10141, 10168), 'numpy.argmin', 'np.argmin', (["self.rates['k1']"], {}), "(self.rates['k1'])\n", (10150, 10168), True, 'import numpy as np\n'), ((23301, 23318), 'numpy.max', 'np.max', (['self.x[i]'], {}), '(self.x[i])\n', (23307, 23318), True, 'import numpy as np\n'), ((32220, 32257), 'inspect.getfullargspec', 'inspect.getfullargspec', (['self.fit_func'], {}), '(self.fit_func)\n', (32242, 32257), False, 'import inspect\n'), ((34593, 34613), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (34608, 34613), True, 'import numpy as np\n'), ((13562, 13611), 'numpy.log', 'np.log', (['((1.0 - fraction_folded) / fraction_folded)'], {}), '((1.0 - fraction_folded) / fraction_folded)\n', (13568, 13611), True, 'import numpy as np\n'), ((32953, 32968), 'numpy.mean', 'np.mean', (['y_data'], {}), '(y_data)\n', (32960, 32968), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import binascii
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from numpy.ma.testutils import assert_equal
'''
spv.py
Act as an SPV client to test SPV server functionality
This is not yet a complete suite. It was added to test inclusion of ancestors
in connection filters.
'''
class BaseNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong(0)
self.sleep_time = 0.1
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Syncing helpers
def sync(self, test_function, timeout=30):
while timeout > 0:
with mininode_lock:
if test_function():
return
time.sleep(self.sleep_time)
timeout -= self.sleep_time
raise AssertionError("Sync failed to complete")
def sync_with_ping(self):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_pong.nonce == self.ping_counter
self.sync(test_function)
self.ping_counter += 1
return
class TestNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
self.txids = []
self.txidstream_isopen = False
self.txidstream_pos = 0
def on_inv(self, conn, message):
if self.txidstream_isopen:
for i in message.inv:
if i.type == 1:
self.txids.append(('%x' % i.hash).zfill(64))
def open_txidstream(self):
self.txidstream_isopen = True
def read_txidstream(self):
if not self.txidstream_isopen:
raise AssertionError("TXID stream not opened for reading")
self.sync(lambda: len(self.txids) >= self.txidstream_pos + 1)
self.txidstream_pos += 1
return self.txids[self.txidstream_pos - 1]
class SPVTest(BitcoinTestFramework):
def __init__(self):
self.num_nodes = 2
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-allowfreetx=0 -debug=mempool"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-allowfreetx=0 -debug=mempool"]))
connect_nodes(self.nodes[0], 1)
def run_test(self):
# Setup the p2p connections
spv_node = TestNode()
connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], spv_node, services=0)
spv_node.add_connection(connection)
# Add a bunch of extra connections to our nodes[0] peer, so spv_node is
# unlikely to get inv's due to trickling rather than filter matches
other_nodes = []
for i in range(0,25):
other_nodes.append(BaseNode())
other_connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], other_nodes[i])
other_nodes[i].add_connection(other_connection)
NetworkThread().start() # Start up network handling in another thread
spv_node.wait_for_verack()
# Generate some coins
self.nodes[1].generate(110)
sync_blocks(self.nodes[0:2])
# Start collecting txid inv's
spv_node.open_txidstream()
# Generate an address and extract pubkeyhash
address0 = self.nodes[0].getnewaddress()
dummyoutputs = {}
dummyoutputs[address0] = 1
dummytxhex = self.nodes[0].createrawtransaction([], dummyoutputs)
dummytx = self.nodes[0].decoderawtransaction(dummytxhex)
dummyasm = dummytx["vout"][0]["scriptPubKey"]["asm"]
pkhstart = dummyasm.index("OP_HASH160") + len("OP_HASH160") + 1
pkhhex = dummyasm[pkhstart:pkhstart+20*2]
pubkeyhash0 = bytearray.fromhex(pkhhex)
# Load bloom filter to peer
spvFilter = CBloomFilter(nFlags=CBloomFilter.ANCESTOR_UPDATE_BIT)
spvFilter.insert(pubkeyhash0)
spv_node.send_message(msg_filterload(spvFilter))
# Test 1. Bloom filter positive match
tx1_id = self.nodes[1].sendtoaddress(address0, 1)
got_txid = spv_node.read_txidstream()
assert_equal(got_txid, tx1_id) #tx1 pays us
# Test 2. Ancestor relay and mempool response
# Send a control tx that neither pays us, nor is an ancestor of a tx that pays us
txextra_id = self.nodes[1].sendtoaddress(self.nodes[1].getnewaddress(), 15)
# Build an ancestor chain where the grandchild pays us
tx2grandparent_id = self.nodes[1].sendtoaddress(self.nodes[1].getnewaddress(), 25)
tx2parent_input = {}
tx2parent_input["txid"] = tx2grandparent_id
tx2parent_input["vout"] = 0
tx2parent_output = {}
tx2parent_output[self.nodes[1].getnewaddress()] = 12.5
tx2parent_output[self.nodes[1].getnewaddress()] = 12.48
tx2parent = self.nodes[1].createrawtransaction([tx2parent_input], tx2parent_output)
tx2parentsignresult = self.nodes[1].signrawtransaction(tx2parent)
assert_equal(tx2parentsignresult["complete"], True)
tx2parent_id = self.nodes[1].sendrawtransaction(tx2parentsignresult["hex"])
# Match tx2 by its consumption of a specific UTXO
spvFilter.insert(COutPoint(int(tx2parent_id,16),0))
spv_node.send_message(msg_filterload(spvFilter))
tx2_input = {}
tx2_input["txid"] = tx2parent_id
tx2_input["vout"] = 0
tx2_output = {}
tx2_output[self.nodes[0].getnewaddress()] = 2
tx2_output[self.nodes[1].getnewaddress()] = 10.48
tx2 = self.nodes[1].createrawtransaction([tx2_input], tx2_output)
tx2signresult = self.nodes[1].signrawtransaction(tx2)
assert_equal(tx2signresult["complete"], True)
tx2_id = self.nodes[1].sendrawtransaction(tx2signresult["hex"])
# Check that tx2 as well as all its ancestors are pushed to our SPV node
relayed = [spv_node.read_txidstream(), spv_node.read_txidstream(), spv_node.read_txidstream()]
expectedRelay = [tx2grandparent_id, tx2parent_id, tx2_id]
assert_equal(sorted(relayed), sorted(expectedRelay))
sync_mempools(self.nodes[0:2])
spv_node.send_message(msg_mempool())
# Check the complete filtered mempool returned by our peer
pool = [spv_node.read_txidstream(), spv_node.read_txidstream(), spv_node.read_txidstream(), spv_node.read_txidstream()]
expectedPool = [tx1_id, tx2grandparent_id, tx2parent_id, tx2_id]
assert_equal(sorted(pool), sorted(expectedPool))
if __name__ == '__main__':
SPVTest().main()
| [
"numpy.ma.testutils.assert_equal"
] | [((4725, 4755), 'numpy.ma.testutils.assert_equal', 'assert_equal', (['got_txid', 'tx1_id'], {}), '(got_txid, tx1_id)\n', (4737, 4755), False, 'from numpy.ma.testutils import assert_equal\n'), ((5603, 5654), 'numpy.ma.testutils.assert_equal', 'assert_equal', (["tx2parentsignresult['complete']", '(True)'], {}), "(tx2parentsignresult['complete'], True)\n", (5615, 5654), False, 'from numpy.ma.testutils import assert_equal\n'), ((6290, 6335), 'numpy.ma.testutils.assert_equal', 'assert_equal', (["tx2signresult['complete']", '(True)'], {}), "(tx2signresult['complete'], True)\n", (6302, 6335), False, 'from numpy.ma.testutils import assert_equal\n')] |
# RUN: %PYTHON %s
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import iree.jax
import iree.runtime
import jax
import jax.numpy as jnp
import numpy as np
# pytype thinks iree.jax is jax.
# pytype: disable=module-attr
TOLERANCE = {"rtol": 1e-6, "atol": 1e-6}
def normal(shape):
return np.random.normal(0, 1, shape).astype(np.float32)
class SqrtNode:
def __init__(self, x, y):
self.x = x
self.y = y
def apply(self, z):
return self.x * jnp.sqrt(self.y * z)
def tree_flatten(self):
return ((self.x, self.y), None)
@classmethod
def tree_unflatten(cls, aux_data, children):
return cls(*children)
class SquareNode:
def __init__(self, x, y):
self.x = x
self.y = y
def apply(self, z):
return self.x * (self.y * z)**2
def tree_flatten(self):
return ((self.x, self.y), None)
@classmethod
def tree_unflatten(cls, aux_data, children):
return cls(*children)
class JAXFrontendTest(absltest.TestCase):
def test_aot_pytree(self):
def pytree_func(params, x):
return jnp.max(jnp.matmul(x, params["w"]) + params["b"], 0)
trace_args = [
{
"w": jnp.zeros((32, 8)),
"b": jnp.zeros((8,))
},
jnp.zeros((1, 32)),
]
binary = iree.jax.aot(pytree_func, *trace_args, target_backends=["cpu"])
def test_jit_pytree_return(self):
@iree.jax.jit
def apply_sqrt(pytree):
return jax.tree_map(jnp.sqrt, pytree)
np.random.seed(0)
input_tree = {
"a": [
normal((2, 3)),
{
"b": normal(3)
},
],
"c": (
{
"d": [normal(2), normal(3)]
},
(normal(1), normal(4)),
)
}
expected = jax.tree_map(jnp.sqrt, input_tree)
expected_arrays, expected_tree = jax.tree_flatten(expected)
result = apply_sqrt(input_tree)
result_arrays, result_tree = jax.tree_flatten(result)
self.assertEqual(expected_tree, result_tree)
for expected_array, result_array in zip(expected_arrays, result_arrays):
np.testing.assert_allclose(expected_array, result_array, **TOLERANCE)
def test_iree_jit_of_iree_jit(self):
@iree.jax.jit
def add(a, b):
return a + b
@iree.jax.jit
def mul_two(a):
return add(a, a)
self.assertEqual(mul_two(3), 6)
def test_jax_jit_of_iree_jit(self):
@iree.jax.jit
def add(a, b):
return a + b
@jax.jit
def mul_two(a):
return add(a, a)
self.assertEqual(mul_two(3), 6)
def test_iree_jit_of_jax_jit(self):
@jax.jit
def add(a, b):
return a + b
@iree.jax.jit
def mul_two(a):
return add(a, a)
self.assertEqual(mul_two(3), 6)
def test_jit_pytree_method(self):
@iree.jax.jit
def apply_node(node, z):
return node.apply(z)
expected_sqrt = apply_node._function(SqrtNode(2, 3), 4)
compiled_sqrt = apply_node(SqrtNode(2, 3), 4)
np.testing.assert_allclose(compiled_sqrt, expected_sqrt, **TOLERANCE)
expected_square = apply_node._function(SquareNode(2, 3), 4)
compiled_square = apply_node(SquareNode(2, 3), 4)
np.testing.assert_allclose(compiled_square, expected_square, **TOLERANCE)
if __name__ == "__main__":
jax.tree_util.register_pytree_node_class(SqrtNode)
jax.tree_util.register_pytree_node_class(SquareNode)
absltest.main()
| [
"jax.tree_flatten",
"absl.testing.absltest.main",
"numpy.random.seed",
"jax.numpy.zeros",
"jax.tree_util.register_pytree_node_class",
"jax.numpy.matmul",
"numpy.random.normal",
"numpy.testing.assert_allclose",
"jax.numpy.sqrt",
"jax.tree_map"
] | [((3819, 3869), 'jax.tree_util.register_pytree_node_class', 'jax.tree_util.register_pytree_node_class', (['SqrtNode'], {}), '(SqrtNode)\n', (3859, 3869), False, 'import jax\n'), ((3872, 3924), 'jax.tree_util.register_pytree_node_class', 'jax.tree_util.register_pytree_node_class', (['SquareNode'], {}), '(SquareNode)\n', (3912, 3924), False, 'import jax\n'), ((3927, 3942), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (3940, 3942), False, 'from absl.testing import absltest\n'), ((2015, 2032), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2029, 2032), True, 'import numpy as np\n'), ((2322, 2356), 'jax.tree_map', 'jax.tree_map', (['jnp.sqrt', 'input_tree'], {}), '(jnp.sqrt, input_tree)\n', (2334, 2356), False, 'import jax\n'), ((2394, 2420), 'jax.tree_flatten', 'jax.tree_flatten', (['expected'], {}), '(expected)\n', (2410, 2420), False, 'import jax\n'), ((2490, 2514), 'jax.tree_flatten', 'jax.tree_flatten', (['result'], {}), '(result)\n', (2506, 2514), False, 'import jax\n'), ((3521, 3590), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['compiled_sqrt', 'expected_sqrt'], {}), '(compiled_sqrt, expected_sqrt, **TOLERANCE)\n', (3547, 3590), True, 'import numpy as np\n'), ((3714, 3787), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['compiled_square', 'expected_square'], {}), '(compiled_square, expected_square, **TOLERANCE)\n', (3740, 3787), True, 'import numpy as np\n'), ((855, 884), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'shape'], {}), '(0, 1, shape)\n', (871, 884), True, 'import numpy as np\n'), ((1024, 1044), 'jax.numpy.sqrt', 'jnp.sqrt', (['(self.y * z)'], {}), '(self.y * z)\n', (1032, 1044), True, 'import jax.numpy as jnp\n'), ((1779, 1797), 'jax.numpy.zeros', 'jnp.zeros', (['(1, 32)'], {}), '((1, 32))\n', (1788, 1797), True, 'import jax.numpy as jnp\n'), ((1979, 2009), 'jax.tree_map', 'jax.tree_map', (['jnp.sqrt', 'pytree'], {}), '(jnp.sqrt, pytree)\n', (1991, 2009), False, 'import jax\n'), ((2648, 2717), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['expected_array', 'result_array'], {}), '(expected_array, result_array, **TOLERANCE)\n', (2674, 2717), True, 'import numpy as np\n'), ((1707, 1725), 'jax.numpy.zeros', 'jnp.zeros', (['(32, 8)'], {}), '((32, 8))\n', (1716, 1725), True, 'import jax.numpy as jnp\n'), ((1744, 1759), 'jax.numpy.zeros', 'jnp.zeros', (['(8,)'], {}), '((8,))\n', (1753, 1759), True, 'import jax.numpy as jnp\n'), ((1615, 1641), 'jax.numpy.matmul', 'jnp.matmul', (['x', "params['w']"], {}), "(x, params['w'])\n", (1625, 1641), True, 'import jax.numpy as jnp\n')] |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" evaluation scripts for KBC and pathQuery tasks """
import json
import logging
import collections
import numpy as np
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def kbc_batch_evaluation(eval_i, all_examples, batch_results, tt):
r_hts_idx = collections.defaultdict(list)
scores_head = collections.defaultdict(list)
scores_tail = collections.defaultdict(list)
batch_r_hts_cnt = 0
b_size = len(batch_results)
for j in range(b_size):
result = batch_results[j]
i = eval_i + j
example = all_examples[i]
assert len(example.token_ids
) == 3, "For kbc task each example consists of 3 tokens"
h, r, t = example.token_ids
_mask_type = example.mask_type
if i % 2 == 0:
r_hts_idx[r].append((h, t))
batch_r_hts_cnt += 1
if _mask_type == "MASK_HEAD":
scores_head[(r, t)] = result
elif _mask_type == "MASK_TAIL":
scores_tail[(r, h)] = result
else:
raise ValueError("Unknown mask type in prediction example:%d" % i)
rank = {}
f_rank = {}
for r, hts in r_hts_idx.items():
r_rank = {'head': [], 'tail': []}
r_f_rank = {'head': [], 'tail': []}
for h, t in hts:
scores_t = scores_tail[(r, h)][:]
sortidx_t = np.argsort(scores_t)[::-1]
r_rank['tail'].append(np.where(sortidx_t == t)[0][0] + 1)
rm_idx = tt[r]['ts'][h]
rm_idx = [i for i in rm_idx if i != t]
for i in rm_idx:
scores_t[i] = -np.Inf
sortidx_t = np.argsort(scores_t)[::-1]
r_f_rank['tail'].append(np.where(sortidx_t == t)[0][0] + 1)
scores_h = scores_head[(r, t)][:]
sortidx_h = np.argsort(scores_h)[::-1]
r_rank['head'].append(np.where(sortidx_h == h)[0][0] + 1)
rm_idx = tt[r]['hs'][t]
rm_idx = [i for i in rm_idx if i != h]
for i in rm_idx:
scores_h[i] = -np.Inf
sortidx_h = np.argsort(scores_h)[::-1]
r_f_rank['head'].append(np.where(sortidx_h == h)[0][0] + 1)
rank[r] = r_rank
f_rank[r] = r_f_rank
h_pos = [p for k in rank.keys() for p in rank[k]['head']]
t_pos = [p for k in rank.keys() for p in rank[k]['tail']]
f_h_pos = [p for k in f_rank.keys() for p in f_rank[k]['head']]
f_t_pos = [p for k in f_rank.keys() for p in f_rank[k]['tail']]
ranks = np.asarray(h_pos + t_pos)
f_ranks = np.asarray(f_h_pos + f_t_pos)
return ranks, f_ranks
def pathquery_batch_evaluation(eval_i, all_examples, batch_results,
sen_negli_dict, trivial_sen_set):
""" evaluate the metrics for batch datas for pathquery datasets """
mqs = []
ranks = []
for j, result in enumerate(batch_results):
i = eval_i + j
example = all_examples[i]
token_ids, mask_type = example
assert mask_type in ["MASK_TAIL", "MASK_HEAD"
], " Unknown mask type in pathquery evaluation"
label = token_ids[-1] if mask_type == "MASK_TAIL" else token_ids[0]
sen = " ".join([str(x) for x in token_ids])
if sen in trivial_sen_set:
mq = rank = -1
else:
# candidate vocab set
cand_set = sen_negli_dict[sen]
assert label in set(
cand_set), "predict label must be in the candidate set"
cand_idx = np.sort(np.array(cand_set))
cand_ret = result[
cand_idx] #logits for candidate words(neg + gold words)
cand_ranks = np.argsort(cand_ret)[::-1]
pred_y = cand_idx[cand_ranks]
rank = (np.argwhere(pred_y == label).ravel().tolist())[0] + 1
mq = (len(cand_set) - rank) / (len(cand_set) - 1.0)
mqs.append(mq)
ranks.append(rank)
return mqs, ranks
def compute_kbc_metrics(rank_li, frank_li, output_evaluation_result_file):
""" combine the kbc rank results from batches into the final metrics """
rank_rets = np.array(rank_li).ravel()
frank_rets = np.array(frank_li).ravel()
mrr = np.mean(1.0 / rank_rets)
fmrr = np.mean(1.0 / frank_rets)
hits1 = np.mean(rank_rets <= 1.0)
hits3 = np.mean(rank_rets <= 3.0)
hits10 = np.mean(rank_rets <= 10.0)
# filtered metrics
fhits1 = np.mean(frank_rets <= 1.0)
fhits3 = np.mean(frank_rets <= 3.0)
fhits10 = np.mean(frank_rets <= 10.0)
eval_result = {
'mrr': mrr,
'hits1': hits1,
'hits3': hits3,
'hits10': hits10,
'fmrr': fmrr,
'fhits1': fhits1,
'fhits3': fhits3,
'fhits10': fhits10
}
with open(output_evaluation_result_file, "w") as fw:
fw.write(json.dumps(eval_result, indent=4) + "\n")
return eval_result
def compute_pathquery_metrics(mq_li, rank_li, output_evaluation_result_file):
""" combine the pathquery mq, rank results from batches into the final metrics """
rank_rets = np.array(rank_li).ravel()
_idx = np.where(rank_rets != -1)
non_trivial_eval_rets = rank_rets[_idx]
non_trivial_mq = np.array(mq_li).ravel()[_idx]
non_trivial_cnt = non_trivial_eval_rets.size
mq = np.mean(non_trivial_mq)
mr = np.mean(non_trivial_eval_rets)
mrr = np.mean(1.0 / non_trivial_eval_rets)
fhits10 = np.mean(non_trivial_eval_rets <= 10.0)
eval_result = {
'fcnt': non_trivial_cnt,
'mq': mq,
'mr': mr,
'fhits10': fhits10
}
with open(output_evaluation_result_file, "w") as fw:
fw.write(json.dumps(eval_result, indent=4) + "\n")
return eval_result
| [
"logging.basicConfig",
"numpy.asarray",
"json.dumps",
"collections.defaultdict",
"numpy.argsort",
"numpy.mean",
"numpy.where",
"numpy.array",
"numpy.argwhere",
"logging.getLogger"
] | [((733, 874), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\n", (752, 874), False, 'import logging\n'), ((887, 914), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (904, 914), False, 'import logging\n'), ((1000, 1029), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (1023, 1029), False, 'import collections\n'), ((1048, 1077), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (1071, 1077), False, 'import collections\n'), ((1096, 1125), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (1119, 1125), False, 'import collections\n'), ((3237, 3262), 'numpy.asarray', 'np.asarray', (['(h_pos + t_pos)'], {}), '(h_pos + t_pos)\n', (3247, 3262), True, 'import numpy as np\n'), ((3277, 3306), 'numpy.asarray', 'np.asarray', (['(f_h_pos + f_t_pos)'], {}), '(f_h_pos + f_t_pos)\n', (3287, 3306), True, 'import numpy as np\n'), ((4940, 4964), 'numpy.mean', 'np.mean', (['(1.0 / rank_rets)'], {}), '(1.0 / rank_rets)\n', (4947, 4964), True, 'import numpy as np\n'), ((4976, 5001), 'numpy.mean', 'np.mean', (['(1.0 / frank_rets)'], {}), '(1.0 / frank_rets)\n', (4983, 5001), True, 'import numpy as np\n'), ((5015, 5040), 'numpy.mean', 'np.mean', (['(rank_rets <= 1.0)'], {}), '(rank_rets <= 1.0)\n', (5022, 5040), True, 'import numpy as np\n'), ((5053, 5078), 'numpy.mean', 'np.mean', (['(rank_rets <= 3.0)'], {}), '(rank_rets <= 3.0)\n', (5060, 5078), True, 'import numpy as np\n'), ((5092, 5118), 'numpy.mean', 'np.mean', (['(rank_rets <= 10.0)'], {}), '(rank_rets <= 10.0)\n', (5099, 5118), True, 'import numpy as np\n'), ((5155, 5181), 'numpy.mean', 'np.mean', (['(frank_rets <= 1.0)'], {}), '(frank_rets <= 1.0)\n', (5162, 5181), True, 'import numpy as np\n'), ((5195, 5221), 'numpy.mean', 'np.mean', (['(frank_rets <= 3.0)'], {}), '(frank_rets <= 3.0)\n', (5202, 5221), True, 'import numpy as np\n'), ((5236, 5263), 'numpy.mean', 'np.mean', (['(frank_rets <= 10.0)'], {}), '(frank_rets <= 10.0)\n', (5243, 5263), True, 'import numpy as np\n'), ((5845, 5870), 'numpy.where', 'np.where', (['(rank_rets != -1)'], {}), '(rank_rets != -1)\n', (5853, 5870), True, 'import numpy as np\n'), ((6026, 6049), 'numpy.mean', 'np.mean', (['non_trivial_mq'], {}), '(non_trivial_mq)\n', (6033, 6049), True, 'import numpy as np\n'), ((6059, 6089), 'numpy.mean', 'np.mean', (['non_trivial_eval_rets'], {}), '(non_trivial_eval_rets)\n', (6066, 6089), True, 'import numpy as np\n'), ((6100, 6136), 'numpy.mean', 'np.mean', (['(1.0 / non_trivial_eval_rets)'], {}), '(1.0 / non_trivial_eval_rets)\n', (6107, 6136), True, 'import numpy as np\n'), ((6151, 6189), 'numpy.mean', 'np.mean', (['(non_trivial_eval_rets <= 10.0)'], {}), '(non_trivial_eval_rets <= 10.0)\n', (6158, 6189), True, 'import numpy as np\n'), ((4860, 4877), 'numpy.array', 'np.array', (['rank_li'], {}), '(rank_li)\n', (4868, 4877), True, 'import numpy as np\n'), ((4903, 4921), 'numpy.array', 'np.array', (['frank_li'], {}), '(frank_li)\n', (4911, 4921), True, 'import numpy as np\n'), ((5808, 5825), 'numpy.array', 'np.array', (['rank_li'], {}), '(rank_li)\n', (5816, 5825), True, 'import numpy as np\n'), ((2088, 2108), 'numpy.argsort', 'np.argsort', (['scores_t'], {}), '(scores_t)\n', (2098, 2108), True, 'import numpy as np\n'), ((2364, 2384), 'numpy.argsort', 'np.argsort', (['scores_t'], {}), '(scores_t)\n', (2374, 2384), True, 'import numpy as np\n'), ((2534, 2554), 'numpy.argsort', 'np.argsort', (['scores_h'], {}), '(scores_h)\n', (2544, 2554), True, 'import numpy as np\n'), ((2810, 2830), 'numpy.argsort', 'np.argsort', (['scores_h'], {}), '(scores_h)\n', (2820, 2830), True, 'import numpy as np\n'), ((4261, 4279), 'numpy.array', 'np.array', (['cand_set'], {}), '(cand_set)\n', (4269, 4279), True, 'import numpy as np\n'), ((4410, 4430), 'numpy.argsort', 'np.argsort', (['cand_ret'], {}), '(cand_ret)\n', (4420, 4430), True, 'import numpy as np\n'), ((5560, 5593), 'json.dumps', 'json.dumps', (['eval_result'], {'indent': '(4)'}), '(eval_result, indent=4)\n', (5570, 5593), False, 'import json\n'), ((5937, 5952), 'numpy.array', 'np.array', (['mq_li'], {}), '(mq_li)\n', (5945, 5952), True, 'import numpy as np\n'), ((6388, 6421), 'json.dumps', 'json.dumps', (['eval_result'], {'indent': '(4)'}), '(eval_result, indent=4)\n', (6398, 6421), False, 'import json\n'), ((2149, 2173), 'numpy.where', 'np.where', (['(sortidx_t == t)'], {}), '(sortidx_t == t)\n', (2157, 2173), True, 'import numpy as np\n'), ((2427, 2451), 'numpy.where', 'np.where', (['(sortidx_t == t)'], {}), '(sortidx_t == t)\n', (2435, 2451), True, 'import numpy as np\n'), ((2595, 2619), 'numpy.where', 'np.where', (['(sortidx_h == h)'], {}), '(sortidx_h == h)\n', (2603, 2619), True, 'import numpy as np\n'), ((2873, 2897), 'numpy.where', 'np.where', (['(sortidx_h == h)'], {}), '(sortidx_h == h)\n', (2881, 2897), True, 'import numpy as np\n'), ((4500, 4528), 'numpy.argwhere', 'np.argwhere', (['(pred_y == label)'], {}), '(pred_y == label)\n', (4511, 4528), True, 'import numpy as np\n')] |
import scipy.spatial
import numpy
import PIL.Image
import PIL.ImageDraw
# Change to desired values
width_px = 1920
height_px = 1080
sample_file = "sample.png"
result_file = "result.png"
triangle_frequency = 800
resample_filter = PIL.Image.BILINEAR
# Generate random 2D coordinates with range 0 to 1 and scale/translate them
points = numpy.random.rand(triangle_frequency, 2)
points[:,0] = points[:,0] * width_px * 4 - width_px
points[:,1] = points[:,1] * height_px * 4 - height_px
# Run the Delaunay algorithm on the points to create triangles
delaunay = scipy.spatial.Delaunay(points)
triangles = delaunay.points[delaunay.simplices]
# Create image object for resulting image
result = PIL.Image.new("RGB", (width_px * 2, height_px * 2))
draw = PIL.ImageDraw.Draw(result)
# Open sample file to sample colors from it
sample = PIL.Image.open(sample_file)
sample_pixels = sample.load()
# Calculate scale between the sample and the result
x_scale = sample.size[0] / result.size[0]
y_scale = sample.size[1] / result.size[1]
# Append all triangles to image object
for triangle in triangles:
points = []
for point in triangle:
for number in point:
points.append(number)
center_x = sum(points[0::2]) / (len(points) / 2) * x_scale
center_y = sum(points[1::2]) / (len(points) / 2) * y_scale
if (center_x < 0):
center_x = 0
if (center_y < 0):
center_y = 0
if (center_x >= sample.size[0]):
center_x = sample.size[0] - 1
if (center_y >= sample.size[1]):
center_y = sample.size[1] - 1
color = sample_pixels[center_x, center_y]
draw.polygon(points, fill=color)
# Downsize the image to the desired size and apply sample filter for anti-aliasing
result = result.resize((width_px, height_px), resample=resample_filter)
# Save file to the desired path
result.save(result_file) | [
"numpy.random.rand"
] | [((335, 375), 'numpy.random.rand', 'numpy.random.rand', (['triangle_frequency', '(2)'], {}), '(triangle_frequency, 2)\n', (352, 375), False, 'import numpy\n')] |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the keras_common module."""
from __future__ import absolute_import
from __future__ import print_function
from mock import Mock
import numpy as np
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.resnet.keras import keras_common
tf.logging.set_verbosity(tf.logging.ERROR)
class KerasCommonTests(tf.test.TestCase):
"""Tests for keras_common."""
@classmethod
def setUpClass(cls): # pylint: disable=invalid-name
super(KerasCommonTests, cls).setUpClass()
def test_build_stats(self):
history = self._build_history(1.145, cat_accuracy=.99988)
eval_output = self._build_eval_output(.56432111, 5.990)
stats = keras_common.build_stats(history, eval_output)
self.assertEqual(1.145, stats['loss'])
self.assertEqual(.99988, stats['training_accuracy_top_1'])
self.assertEqual(.56432111, stats['accuracy_top_1'])
self.assertEqual(5.990, stats['eval_loss'])
def test_build_stats_sparse(self):
history = self._build_history(1.145, cat_accuracy_sparse=.99988)
eval_output = self._build_eval_output(.928, 1.9844)
stats = keras_common.build_stats(history, eval_output)
self.assertEqual(1.145, stats['loss'])
self.assertEqual(.99988, stats['training_accuracy_top_1'])
self.assertEqual(.928, stats['accuracy_top_1'])
self.assertEqual(1.9844, stats['eval_loss'])
def _build_history(self, loss, cat_accuracy=None,
cat_accuracy_sparse=None):
history_p = Mock()
history = {}
history_p.history = history
history['loss'] = [np.float64(loss)]
if cat_accuracy:
history['categorical_accuracy'] = [np.float64(cat_accuracy)]
if cat_accuracy_sparse:
history['sparse_categorical_accuracy'] = [np.float64(cat_accuracy_sparse)]
return history_p
def _build_eval_output(self, top_1, eval_loss):
eval_output = [np.float64(eval_loss), np.float64(top_1)]
return eval_output
| [
"numpy.float64",
"mock.Mock",
"official.resnet.keras.keras_common.build_stats",
"tensorflow.logging.set_verbosity"
] | [((960, 1002), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (984, 1002), True, 'import tensorflow as tf\n'), ((1362, 1408), 'official.resnet.keras.keras_common.build_stats', 'keras_common.build_stats', (['history', 'eval_output'], {}), '(history, eval_output)\n', (1386, 1408), False, 'from official.resnet.keras import keras_common\n'), ((1798, 1844), 'official.resnet.keras.keras_common.build_stats', 'keras_common.build_stats', (['history', 'eval_output'], {}), '(history, eval_output)\n', (1822, 1844), False, 'from official.resnet.keras import keras_common\n'), ((2171, 2177), 'mock.Mock', 'Mock', ([], {}), '()\n', (2175, 2177), False, 'from mock import Mock\n'), ((2250, 2266), 'numpy.float64', 'np.float64', (['loss'], {}), '(loss)\n', (2260, 2266), True, 'import numpy as np\n'), ((2557, 2578), 'numpy.float64', 'np.float64', (['eval_loss'], {}), '(eval_loss)\n', (2567, 2578), True, 'import numpy as np\n'), ((2580, 2597), 'numpy.float64', 'np.float64', (['top_1'], {}), '(top_1)\n', (2590, 2597), True, 'import numpy as np\n'), ((2330, 2354), 'numpy.float64', 'np.float64', (['cat_accuracy'], {}), '(cat_accuracy)\n', (2340, 2354), True, 'import numpy as np\n'), ((2432, 2463), 'numpy.float64', 'np.float64', (['cat_accuracy_sparse'], {}), '(cat_accuracy_sparse)\n', (2442, 2463), True, 'import numpy as np\n')] |
#hps_test.py
import numpy as np
import scipy.signal as sigpy
import matplotlib.pyplot as plt
from pyhelpertool.HelpersSignal import PitchDetechtion
fs = 2e6
fn = 19e3
N = 4096
dt = 1/fs
df = fs/N
t = np.linspace ( 0, N * dt, N)
# Ampltude weights
w = np.array ( [ .1, 1, 1, 1, 1, 1, 1] )
# Frequency multiplier
fr = np.array ( [ 1, 3, 5, 7, 9, 11, 13] )
f = np.arange ( 0, fs, df )
# Creating test signal by summing up different frequencies
y = np.zeros ( N )
for weight, freq in zip(w, fr ):
y += weight * np.sin ( 2 * np.pi * freq * fn * t )
# Detect fundamental frequency using harmonic product spectrum
f2 = PitchDetechtion( y, fs )
# Get nearest value by minimal distance
fc = f[np.argmin( np.abs ( f - fn ) )]
print ( 'Fundamental frequency : %3.3f kHz' % (fn) )
print ( 'Frequency domain resolution %2.3f Hz' % (df) )
print ( 'Nearest value %2.3f Hz' % ( fc ) )
print ( 'Fundamental frequency detected : %3.3f kHz' % (f2*1e-3) )
| [
"pyhelpertool.HelpersSignal.PitchDetechtion",
"numpy.abs",
"numpy.zeros",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.linspace"
] | [((203, 228), 'numpy.linspace', 'np.linspace', (['(0)', '(N * dt)', 'N'], {}), '(0, N * dt, N)\n', (214, 228), True, 'import numpy as np\n'), ((254, 287), 'numpy.array', 'np.array', (['[0.1, 1, 1, 1, 1, 1, 1]'], {}), '([0.1, 1, 1, 1, 1, 1, 1])\n', (262, 287), True, 'import numpy as np\n'), ((320, 353), 'numpy.array', 'np.array', (['[1, 3, 5, 7, 9, 11, 13]'], {}), '([1, 3, 5, 7, 9, 11, 13])\n', (328, 353), True, 'import numpy as np\n'), ((362, 382), 'numpy.arange', 'np.arange', (['(0)', 'fs', 'df'], {}), '(0, fs, df)\n', (371, 382), True, 'import numpy as np\n'), ((450, 461), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (458, 461), True, 'import numpy as np\n'), ((622, 644), 'pyhelpertool.HelpersSignal.PitchDetechtion', 'PitchDetechtion', (['y', 'fs'], {}), '(y, fs)\n', (637, 644), False, 'from pyhelpertool.HelpersSignal import PitchDetechtion\n'), ((516, 549), 'numpy.sin', 'np.sin', (['(2 * np.pi * freq * fn * t)'], {}), '(2 * np.pi * freq * fn * t)\n', (522, 549), True, 'import numpy as np\n'), ((706, 720), 'numpy.abs', 'np.abs', (['(f - fn)'], {}), '(f - fn)\n', (712, 720), True, 'import numpy as np\n')] |
import Image
import numpy as np
import os
import scipy
from scipy import misc
from scipy.misc import imsave
def load_image( infilename ) :
img = Image.open( infilename )
img.load()
data = np.asarray( img, dtype="int32" )
return data
train_data = []
base = "/home/ubuntu/work/github/rajdeepd/neuralnetwork-programming/ch02/data/"
base_path = base + "/test-100/Type_3/"
base_red_path = base + "/test-100-reduced/Type_3/"
list_files = os.listdir(base_path)
for l in list_files:
p = base_path + l
try:
arr = load_image(p)
arr_resized = misc.imresize(arr, 10)
print(l)
imsave(base_red_path + l, arr_resized)
except IOError as err:
print("IO error: {0}".format(err))
| [
"numpy.asarray",
"scipy.misc.imresize",
"scipy.misc.imsave",
"Image.open",
"os.listdir"
] | [((451, 472), 'os.listdir', 'os.listdir', (['base_path'], {}), '(base_path)\n', (461, 472), False, 'import os\n'), ((149, 171), 'Image.open', 'Image.open', (['infilename'], {}), '(infilename)\n', (159, 171), False, 'import Image\n'), ((200, 230), 'numpy.asarray', 'np.asarray', (['img'], {'dtype': '"""int32"""'}), "(img, dtype='int32')\n", (210, 230), True, 'import numpy as np\n'), ((575, 597), 'scipy.misc.imresize', 'misc.imresize', (['arr', '(10)'], {}), '(arr, 10)\n', (588, 597), False, 'from scipy import misc\n'), ((623, 661), 'scipy.misc.imsave', 'imsave', (['(base_red_path + l)', 'arr_resized'], {}), '(base_red_path + l, arr_resized)\n', (629, 661), False, 'from scipy.misc import imsave\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.