code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 10 11:44:58 2021
@author: zongsing.huang
"""
import numpy as np
def t7():
COST = np.array([6, 6, 4, 4, 4, 3, 3])
M = 3 # 機台數
N = 7 # 工單數
F_ideal = 10
return COST, M, N, F_ideal
def t9():
COST = np.array([9, 7, 12, 15, 8, 10, 11, 13, 7])
M = 4 # 機台數
N = 9 # 工單數
F_ideal = 10
return COST, M, N, F_ideal
def t10():
COST = np.array([3, 2, 6, 4, 5, 7, 8, 6, 2, 6])
M = 2 # 機台數
N = 10 # 工單數
F_ideal = 25
return COST, M, N, F_ideal
def t30():
COST = np.array([ 3, 2, 6, 4, 5, 7, 9, 13, 4, 12,
10, 8, 22, 11, 8, 26, 14, 6, 17, 27,
11, 17, 26, 16, 7, 23, 15, 18, 15, 13])
M = 10 # 機台數
N = 30 # 工單數
F_ideal = 41
return COST, M, N, F_ideal
def OBJ(X, COST, M, N):
if X.ndim==1:
X = X.reshape(1, -1)
P = X.shape[0]
F = np.zeros([P, M])
for i in range(M):
subset = X==i
for j in range(P):
F[j, i] = F[j, i] + COST[subset[j]].sum()
F = F.max(axis=1)
return F | [
"numpy.zeros",
"numpy.array"
] | [((135, 166), 'numpy.array', 'np.array', (['[6, 6, 4, 4, 4, 3, 3]'], {}), '([6, 6, 4, 4, 4, 3, 3])\n', (143, 166), True, 'import numpy as np\n'), ((274, 316), 'numpy.array', 'np.array', (['[9, 7, 12, 15, 8, 10, 11, 13, 7]'], {}), '([9, 7, 12, 15, 8, 10, 11, 13, 7])\n', (282, 316), True, 'import numpy as np\n'), ((425, 465), 'numpy.array', 'np.array', (['[3, 2, 6, 4, 5, 7, 8, 6, 2, 6]'], {}), '([3, 2, 6, 4, 5, 7, 8, 6, 2, 6])\n', (433, 465), True, 'import numpy as np\n'), ((575, 698), 'numpy.array', 'np.array', (['[3, 2, 6, 4, 5, 7, 9, 13, 4, 12, 10, 8, 22, 11, 8, 26, 14, 6, 17, 27, 11, \n 17, 26, 16, 7, 23, 15, 18, 15, 13]'], {}), '([3, 2, 6, 4, 5, 7, 9, 13, 4, 12, 10, 8, 22, 11, 8, 26, 14, 6, 17, \n 27, 11, 17, 26, 16, 7, 23, 15, 18, 15, 13])\n', (583, 698), True, 'import numpy as np\n'), ((940, 956), 'numpy.zeros', 'np.zeros', (['[P, M]'], {}), '([P, M])\n', (948, 956), True, 'import numpy as np\n')] |
import os
import torch
import numpy as np
import pandas as pd
from stric.datasets.base_dataset import BaseDataset
class YahooDataset(BaseDataset):
# https://yahooresearch.tumblr.com/post/114590420346/a-benchmark-dataset-for-time-series-anomaly
# Need license from Yahoo to be downloaded
name = 'yahoo'
dataset_dir = 'ydata-labeled-time-series-anomalies-v1_0'
dataset_subnames = ['A1Benchmark', 'A2Benchmark', 'A3Benchmark', 'A4Benchmark']
def load_dataset(self):
path = os.path.join(self.dataset_path(), self.dataset_subset)
self.dataset_index = self.dataset_index + 1 if isinstance(self.dataset_index, int) else self.dataset_index
if self.dataset_subset == 'A1Benchmark':
file_name, len_subnames = lambda x: f"real_{x}.csv", 67
elif self.dataset_subset == 'A2Benchmark':
file_name, len_subnames = lambda x: f"synthetic_{x}.csv", 100
elif self.dataset_subset in 'A3Benchmark':
file_name, len_subnames = lambda x: f"A3Benchmark-TS{x}.csv", 100
elif self.dataset_subset in 'A4Benchmark':
file_name, len_subnames = lambda x: f"A4Benchmark-TS{x}.csv", 100
else:
raise ValueError(f'Dataset {self.dataset_dir} does not contain subname {self.dataset_subset}')
files_name = [file_name(self.dataset_index)] if not self.dataset_index == 'all' else [file_name(i + 1)
for i in range(len_subnames)]
dataset = []
for file_name in files_name:
dataset.append(pd.read_csv(os.path.join(path, file_name), index_col=None, header=0))
return dataset
def preprocessing(self, dataset: list) -> list:
if self.dataset_subset is None:
raise ValueError(f'You need to choose the Benchmark on Yahoo dataset!!!')
if self.dataset_subset == 'A1Benchmark':
for i in range(len(dataset)):
dataset[i] = dataset[i].rename(columns={"timestamps": "timestamp"})
dataset[i]['timestamp'] = (dataset[i]['timestamp'] - dataset[i]['timestamp'].iloc[0]) / 3600
elif self.dataset_subset == 'A2Benchmark':
for i in range(len(dataset)):
dataset[i]['timestamp'] = (dataset[i]['timestamp'] - dataset[i]['timestamp'].iloc[0]) / 3600
elif self.dataset_subset in ['A3Benchmark', 'A4Benchmark']:
for i in range(len(dataset)):
dataset[i] = dataset[i].rename(columns={"anomaly": "is_anomaly", "timestamps": "timestamp"})
dataset[i]['timestamp'] = (dataset[i]['timestamp'] - dataset[i]['timestamp'].iloc[0]) / 3600
else:
raise ValueError(f'Dataset {self.name} does not contain subname {self.dataset_subset}')
dataset = self.data_standardization(dataset)
return dataset
def form_dataset(self, dataset: list) -> tuple:
if self.dataset_subset in 'A1Benchmark':
lengths = [len(d['timestamp'].values) for d in dataset]
median = int(np.median(lengths))
X, Y, Z, data_statistics = [], [], [], []
for i, d in enumerate(dataset):
if len(d['timestamp'].values.reshape(-1, 1)) >= median:
X.append(d['value'].values.reshape(-1, 1)[:median])
Y.append(d['timestamp'].values.reshape(-1, 1)[:median])
Z.append(d['is_anomaly'].values.reshape(-1, 1)[:median])
data_statistics.append(self.dataset_statistics[i])
self.dataset_statistics = data_statistics
X = np.concatenate(X, 1)
Y = np.concatenate(Y, 1)
# Y = dataset[0]['timestamp'].values.reshape(-1, )[:median]
Z = np.concatenate(Z, 1)
else:
X = np.concatenate([d['value'].values.reshape(-1, 1) for d in dataset], 1)
Y = np.concatenate([d['timestamp'].values.reshape(-1, 1) for d in dataset], 1)
# Assuming uniform sampling
# Y = dataset[0]['timestamp'].values.reshape(-1,)
Z = np.concatenate([d['is_anomaly'].values.reshape(-1, 1) for d in dataset], 1)
return torch.tensor(X), torch.tensor(Y), torch.tensor(Z) | [
"numpy.median",
"torch.tensor",
"os.path.join",
"numpy.concatenate"
] | [((3644, 3664), 'numpy.concatenate', 'np.concatenate', (['X', '(1)'], {}), '(X, 1)\n', (3658, 3664), True, 'import numpy as np\n'), ((3681, 3701), 'numpy.concatenate', 'np.concatenate', (['Y', '(1)'], {}), '(Y, 1)\n', (3695, 3701), True, 'import numpy as np\n'), ((3790, 3810), 'numpy.concatenate', 'np.concatenate', (['Z', '(1)'], {}), '(Z, 1)\n', (3804, 3810), True, 'import numpy as np\n'), ((4212, 4227), 'torch.tensor', 'torch.tensor', (['X'], {}), '(X)\n', (4224, 4227), False, 'import torch\n'), ((4229, 4244), 'torch.tensor', 'torch.tensor', (['Y'], {}), '(Y)\n', (4241, 4244), False, 'import torch\n'), ((4246, 4261), 'torch.tensor', 'torch.tensor', (['Z'], {}), '(Z)\n', (4258, 4261), False, 'import torch\n'), ((3087, 3105), 'numpy.median', 'np.median', (['lengths'], {}), '(lengths)\n', (3096, 3105), True, 'import numpy as np\n'), ((1626, 1655), 'os.path.join', 'os.path.join', (['path', 'file_name'], {}), '(path, file_name)\n', (1638, 1655), False, 'import os\n')] |
import ffmpeg
import numpy as np
def extract_frames(video_path, fps, size=None, crop=None, start=None, duration=None):
if start is not None:
cmd = ffmpeg.input(video_path, ss=start, t=duration)
else:
cmd = ffmpeg.input(video_path)
if size is None:
info = [s for s in ffmpeg.probe(video_path)["streams"] if s["codec_type"] == "video"][0]
size = (info["width"], info["height"])
elif isinstance(size, int):
size = (size, size)
if fps is not None:
cmd = cmd.filter('fps', fps=fps)
cmd = cmd.filter('scale', size[0], size[1])
if crop is not None:
cmd = cmd.filter('crop', f'in_w-{crop[0]}', f'in_h-{crop[1]}')
size = (size[0] - crop[0], size[1] - crop[1])
out, _ = (
cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24')
.run(capture_stdout=True, quiet=True)
)
video = np.frombuffer(out, np.uint8).reshape([-1, size[1], size[0], 3])
return video
| [
"numpy.frombuffer",
"ffmpeg.probe",
"ffmpeg.input"
] | [((161, 207), 'ffmpeg.input', 'ffmpeg.input', (['video_path'], {'ss': 'start', 't': 'duration'}), '(video_path, ss=start, t=duration)\n', (173, 207), False, 'import ffmpeg\n'), ((232, 256), 'ffmpeg.input', 'ffmpeg.input', (['video_path'], {}), '(video_path)\n', (244, 256), False, 'import ffmpeg\n'), ((895, 923), 'numpy.frombuffer', 'np.frombuffer', (['out', 'np.uint8'], {}), '(out, np.uint8)\n', (908, 923), True, 'import numpy as np\n'), ((306, 330), 'ffmpeg.probe', 'ffmpeg.probe', (['video_path'], {}), '(video_path)\n', (318, 330), False, 'import ffmpeg\n')] |
import tensorflow as tf
# import PIL
from PIL import Image
import numpy as np
import cv2
from PIL import Image
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def read_image(image_name, feature_row, feature_col):
image_bytes = tf.read_file(image_name)
image_tensor = tf.image.decode_jpeg(image_bytes, channels = 3)
image_tensor = tf.image.convert_image_dtype(image_tensor, tf.float32)
image_tensor = tf.image.resize_images(image_tensor, feature_row, feature_col)
return image_tensor
def display_image(image_v):
"""accept 3D or 4D numpy array. if the input is 4D, it will use the first one"""
display_image_v = image_v
if image_v.ndim == 4:
display_image_v = image_v[0]
display_image_v[:,:,[2,0]] = display_image_v[:,:,[0,2]]
cv2.imshow("image", display_image_v)
cv2.waitKey(100)
def save_image(image_v, loss):
"""accept 3D or 4D numpy array. if the input is 4D, it will use the first one"""
save_image_v = image_v
if save_image_v.ndim == 4:
save_image_v = save_image_v[0]
save_image_v[:,:,[2,0]] = save_image_v[:,:,[0,2]]
save_image_v *= 255
# filename = "loss_%f.jpg" % (loss)
filename = "loss_%f.npy" % (loss)
np.save(filename, save_image_v)
return
# cv2.imwrite(filename, save_image_v)
# cv2.imwrite("aaa.jpg", I)
def define_graph_config(fraction):
"""Define the GPU usage"""
config_proto = tf.ConfigProto()
config_proto.gpu_options.per_process_gpu_memory_fraction = fraction
config_proto.allow_soft_placement=False
config_proto.log_device_placement=False
return config_proto
| [
"tensorflow.image.resize_images",
"numpy.save",
"cv2.waitKey",
"tensorflow.ConfigProto",
"tensorflow.image.decode_jpeg",
"tensorflow.read_file",
"cv2.imshow",
"tensorflow.image.convert_image_dtype"
] | [((262, 286), 'tensorflow.read_file', 'tf.read_file', (['image_name'], {}), '(image_name)\n', (274, 286), True, 'import tensorflow as tf\n'), ((306, 351), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image_bytes'], {'channels': '(3)'}), '(image_bytes, channels=3)\n', (326, 351), True, 'import tensorflow as tf\n'), ((373, 427), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['image_tensor', 'tf.float32'], {}), '(image_tensor, tf.float32)\n', (401, 427), True, 'import tensorflow as tf\n'), ((447, 509), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['image_tensor', 'feature_row', 'feature_col'], {}), '(image_tensor, feature_row, feature_col)\n', (469, 509), True, 'import tensorflow as tf\n'), ((805, 841), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'display_image_v'], {}), "('image', display_image_v)\n", (815, 841), False, 'import cv2\n'), ((846, 862), 'cv2.waitKey', 'cv2.waitKey', (['(100)'], {}), '(100)\n', (857, 862), False, 'import cv2\n'), ((1207, 1238), 'numpy.save', 'np.save', (['filename', 'save_image_v'], {}), '(filename, save_image_v)\n', (1214, 1238), True, 'import numpy as np\n'), ((1396, 1412), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1410, 1412), True, 'import tensorflow as tf\n')] |
from typing import Callable, Dict, Optional, Sequence, Union
import numpy as np
import pytorch_lightning as pl
import torch
from pandas import DataFrame
from torch import LongTensor, Tensor
from torch.utils.data import DataLoader, Dataset
class TabularDataset(Dataset):
def __init__(
self,
data: DataFrame,
task: str = "binary",
continuous_columns: Optional[Sequence[str]] = None,
categorical_columns: Optional[Sequence[str]] = None,
target: Optional[Union[str, Sequence[str]]] = None,
transform: Optional[Callable] = None,
) -> None:
"""
Args:
data (pandas.DataFrame): DataFrame.
task (str): One of "binary", "multiclass", "regression".
Defaults to "binary".
continuous_cols (sequence of str, optional): Sequence of names of
continuous features (columns). Defaults to None.
categorical_cols (sequence of str, optional): Sequence of names of
categorical features (columns). Defaults to None.
target (str, optional): If None, `np.zeros` is set as target.
Defaults to None.
transform (callable): Method of converting Tensor data.
Defaults to None.
"""
super().__init__()
self.task = task
self.num = data.shape[0]
self.categorical_columns = categorical_columns if categorical_columns else []
self.continuous_columns = continuous_columns if continuous_columns else []
if self.continuous_columns:
self.continuous = data[self.continuous_columns].values
if self.categorical_columns:
self.categorical = data[categorical_columns].values
if target:
self.target = data[target].values
if isinstance(target, str):
self.target = self.target.reshape(-1, 1)
else:
self.target = np.zeros((self.num, 1))
self.transform = transform
def __len__(self) -> int:
return self.num
def __getitem__(self, idx: int) -> Dict[str, Tensor]:
"""
Args:
idx (int): The index of the sample in the dataset.
Returns:
dict[str, Tensor]:
The returned dict has the keys {"target", "continuous", "categorical"}
and its values. If no continuous/categorical features, the returned value is `[]`.
"""
if self.task == "multiclass":
x = {
"target": torch.LongTensor(self.target[idx]),
"continuous": Tensor(self.continuous[idx])
if self.continuous_columns
else [],
"categorical": LongTensor(self.categorical[idx])
if self.categorical_columns
else [],
}
elif self.task in {"binary", "regression"}:
x = {
"target": torch.Tensor(self.target[idx]),
"continuous": Tensor(self.continuous[idx])
if self.continuous_columns
else [],
"categorical": LongTensor(self.categorical[idx])
if self.categorical_columns
else [],
}
else:
raise ValueError(
f"task: {self.task} must be 'multiclass' or 'binary' or 'regression'"
)
if self.transform is not None:
x = self.transform(x)
return x
class TabularDatamodule(pl.LightningDataModule):
def __init__(
self,
train: DataFrame,
num_categories: int = 0,
categorical_columns: Optional[Sequence[str]] = None,
continuous_columns: Optional[Sequence[str]] = None,
target: Optional[Sequence[str]] = None,
val: Optional[DataFrame] = None,
test: Optional[DataFrame] = None,
transform: Optional[Callable] = None,
train_sampler: Optional[torch.utils.data.Sampler] = None,
task: str = "binary",
dim_out: int = 1,
batch_size: int = 128,
num_workers: int = 3,
) -> None:
"""
Args:
train (`DataFrame`): DataFrame of train data.
num_categories (int): All categories the dataset has. Defaults to 0.
continuous_cols (sequence of str, optional): Sequence of names of
continuous features (columns). Defaults to None.
categorical_cols (sequence of str, optional): Sequence of names of
categorical features (columns). Defaults to None.
target (sequence of str, optional): Target features (columns) in training.
If None, `np.zeros` is set as target. Defaults to None.
validation (`DataFrame`, optional): DataFrame of validation data.
If None, The returned value of `self.dataloader(split="val")` is None.
Defaults to None.
test: (`DataFrame`, optional): DataFrame of test data.
If None, The returned value of `self.dataloader(split="test")` is None.
Defaults to None.
transform (callable, optional): Transformation applied to the Tensor.
Defaults to None.
train_sampler (`torch.utils.data.Sampler`, optional): Strategy of drawing
samples. Defaults to None.
task: (str): One of "binary", "multiclass", "regression".
Defaults to "binary".
dim_out (int): Dimension of outputs of models. For "binary" or "regression",
`dim_out` should be 1. For "multiclass", `dim_out` should be the number
of the classfication categories. Defaults to 1.
batch_size (int): The number of samples for each batch. Defaults to 128.
num_workers (int): The number of subprocess for loading data. Defaults to 3.
"""
super().__init__()
self.train = train
self._num_categories = num_categories
self.categorical_columns = categorical_columns
self.continuous_columns = continuous_columns
self.target = target
self.dim_out = dim_out
self.val = val
self.test = test
self.transform = transform
self.train_sampler = train_sampler
self.task = task
self.batch_size = batch_size
self.num_workers = num_workers
@property
def num_categories(self) -> int:
return self._num_categories
@property
def num_continuous_features(self) -> int:
return len(self.continuous_columns)
@property
def num_categorical_features(self) -> int:
return len(self.categorical_columns)
def dataloader(
self,
split: str,
batch_size: Optional[int] = None,
transform: Optional[Callable] = None,
) -> Optional[DataLoader]:
"""
Args:
split (str): One of "train", "val", "test".
The returned value is a dataloader of `split`.
batch_size (int): The number of samples for each batch.
If the argument is set, `self.batch_size` will be overrided.
Defaults to None.
transform (callable): Transformation applied to the Tensor.
If `transform` is not None, `self.transform` will be overrided.
Defaults to None.
Return:
DataLoader
"""
assert split in {"train", "val", "test"}
if not hasattr(self, split):
return None
data = getattr(self, split)
if split == "test":
transform = None
if transform is None:
transform = self.transform
dataset = TabularDataset(
data=data,
task=self.task,
categorical_columns=self.categorical_columns,
continuous_columns=self.continuous_columns,
target=self.target,
transform=transform,
)
return DataLoader(
dataset,
batch_size if batch_size is not None else self.batch_size,
shuffle=True if split == "train" else False,
num_workers=self.num_workers,
sampler=self.train_sampler if split == "train" else None,
)
| [
"torch.Tensor",
"numpy.zeros",
"torch.utils.data.DataLoader",
"torch.LongTensor"
] | [((8011, 8232), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset', '(batch_size if batch_size is not None else self.batch_size)'], {'shuffle': "(True if split == 'train' else False)", 'num_workers': 'self.num_workers', 'sampler': "(self.train_sampler if split == 'train' else None)"}), "(dataset, batch_size if batch_size is not None else self.\n batch_size, shuffle=True if split == 'train' else False, num_workers=\n self.num_workers, sampler=self.train_sampler if split == 'train' else None)\n", (8021, 8232), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((1956, 1979), 'numpy.zeros', 'np.zeros', (['(self.num, 1)'], {}), '((self.num, 1))\n', (1964, 1979), True, 'import numpy as np\n'), ((2548, 2582), 'torch.LongTensor', 'torch.LongTensor', (['self.target[idx]'], {}), '(self.target[idx])\n', (2564, 2582), False, 'import torch\n'), ((2614, 2642), 'torch.Tensor', 'Tensor', (['self.continuous[idx]'], {}), '(self.continuous[idx])\n', (2620, 2642), False, 'from torch import LongTensor, Tensor\n'), ((2742, 2775), 'torch.LongTensor', 'LongTensor', (['self.categorical[idx]'], {}), '(self.categorical[idx])\n', (2752, 2775), False, 'from torch import LongTensor, Tensor\n'), ((2955, 2985), 'torch.Tensor', 'torch.Tensor', (['self.target[idx]'], {}), '(self.target[idx])\n', (2967, 2985), False, 'import torch\n'), ((3017, 3045), 'torch.Tensor', 'Tensor', (['self.continuous[idx]'], {}), '(self.continuous[idx])\n', (3023, 3045), False, 'from torch import LongTensor, Tensor\n'), ((3145, 3178), 'torch.LongTensor', 'LongTensor', (['self.categorical[idx]'], {}), '(self.categorical[idx])\n', (3155, 3178), False, 'from torch import LongTensor, Tensor\n')] |
import numpy as np
from datetime import datetime
import calendar
import re
import time
from moderatebot import PriceTable
from moderatebot import PRecordFactory
import matplotlib.pyplot as plt
instrument = 'EUR_USD'
granularity = 'M1'
pt = PriceTable(instrument, granularity)
cf = PRecordFactory(pt.granularity)
ask = []
bid = []
t = []
index = 1
while index < 2825:
# tick = np.load('../tick_data/tick_' + str(index) + '.npy', allow_pickle=True)[()]
tick = np.load('/media/office/0D82-9628/data/tick_data/July3_1.5hrs/tick_' + str(index) + '.npy', allow_pickle=True)[()]
print(tick)
exit()
rec = []
if 'PRICE' in tick['type']:
# print(tick)
# exit()
rec = cf.parseTick(tick)
# print('rec ', rec)
# print(type(rec))
t.append(rec[0])
ask.append(rec[1])
bid.append(rec[6])
# print(len(rec))
# exit()
pt.addItem(*rec)
index += 1
# fig, ax = plt.subplots()
plt.plot(t, np.array(ask), label='ask')
plt.plot(t, bid, label='bid')
# plt.plot(t, (np.array(ask) - np.array(bid))+1.124, label='spread')
ax = plt.gca()
ax.get_figure().autofmt_xdate()
ax.set_xticks(ax.get_xticks()[::5])
plt.grid(True)
plt.legend()
plt.show()
| [
"matplotlib.pyplot.show",
"moderatebot.PriceTable",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"moderatebot.PRecordFactory",
"numpy.array",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.grid"
] | [((242, 277), 'moderatebot.PriceTable', 'PriceTable', (['instrument', 'granularity'], {}), '(instrument, granularity)\n', (252, 277), False, 'from moderatebot import PriceTable\n'), ((283, 313), 'moderatebot.PRecordFactory', 'PRecordFactory', (['pt.granularity'], {}), '(pt.granularity)\n', (297, 313), False, 'from moderatebot import PRecordFactory\n'), ((1016, 1045), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'bid'], {'label': '"""bid"""'}), "(t, bid, label='bid')\n", (1024, 1045), True, 'import matplotlib.pyplot as plt\n'), ((1120, 1129), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1127, 1129), True, 'import matplotlib.pyplot as plt\n'), ((1198, 1212), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1206, 1212), True, 'import matplotlib.pyplot as plt\n'), ((1213, 1225), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1223, 1225), True, 'import matplotlib.pyplot as plt\n'), ((1226, 1236), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1234, 1236), True, 'import matplotlib.pyplot as plt\n'), ((988, 1001), 'numpy.array', 'np.array', (['ask'], {}), '(ask)\n', (996, 1001), True, 'import numpy as np\n')] |
import sys
sys.path.extend(['../'])
from config import Config_pku
import os
import glob
import numpy as np
from PIL import Image, ImageChops
import pickle
def read_txt_file(file_path):
lines = []
with open(file_path) as f:
lines.append(f.read().splitlines() )
f.close()
lines = np.hstack(lines)
return lines
def re_label(lables):
pid2label = {pid:label for label, pid in enumerate(np.sort(np.unique(lables)))}
new_labels = [pid2label[label] for label in lables]
return new_labels
def process_train_data_list(data_dir):
ind_list = np.load('./pku/train_id.npy')
ind_list = np.sort(ind_list)
color_path_list = glob.glob(data_dir + 'photo/*.jpg')
sketch_path_list = glob.glob(data_dir + 'sketch/*.jpg')
color_imgs = []
color_labels = []
for idx in ind_list:
imgs = []
for img_path in color_path_list:
idx_path = int(img_path.split('/')[-1].split('_')[0])
if idx_path == idx:
img = Image.open(img_path)
img = img.resize((128, 384), Image.ANTIALIAS)
imgs.append(np.array(img))
color_imgs.append(imgs)
color_labels.append(idx)
color_labels = re_label(color_labels)
sketch_imgs = []
sketch_labels = []
for idx in ind_list:
for img_path in sketch_path_list:
idx_path = int(img_path.split('/')[-1].split('.')[0])
if idx_path == idx:
img = Image.open(img_path)
img = img.resize((128, 384), Image.ANTIALIAS)
img = np.array(img)
sketch_imgs.append(img)
sketch_labels.append(idx)
sketch_labels = re_label(sketch_labels)
train_data = {'color_imgs':color_imgs,
'color_labels':color_labels,
'sketch_imgs':sketch_imgs,
'sketch_labels':sketch_labels}
#np.save(data_dir+'train_data.npy', train_data)
f = open('./pku/train_data.pkl', 'wb')
pickle.dump(train_data,f)
f.close()
def process_test_data_list(data_dir):
ind_list = np.load('./pku/test_id.npy')
ind_list = np.sort(ind_list)
color_path_list = glob.glob(data_dir + 'photo/*.jpg')
sketch_path_list = glob.glob(data_dir + 'sketch/*.jpg')
color_imgs = []
color_labels = []
for idx in ind_list:
for img_path in color_path_list:
idx_path = int(img_path.split('/')[-1].split('_')[0])
if idx_path == idx:
img = Image.open(img_path)
img = img.resize((128, 384), Image.ANTIALIAS)
img = np.array(img)
color_imgs.append(img)
color_labels.append(idx)
color_labels = re_label(color_labels)
np.save('./pku/test_color_imgs.npy', color_imgs)
np.save('./pku/test_color_labels.npy', color_labels)
sketch_imgs = []
sketch_labels = []
for idx in ind_list:
for img_path in sketch_path_list:
idx_path = int(img_path.split('/')[-1].split('.')[0])
if idx_path == idx:
img = Image.open(img_path)
img = img.resize((128, 384), Image.ANTIALIAS)
img = np.array(img)
sketch_imgs.append(img)
sketch_labels.append(idx)
sketch_labels = re_label(sketch_labels)
np.save('./pku/test_sketch_imgs.npy', sketch_imgs)
np.save('./pku/test_sketch_labels.npy', sketch_labels)
def main():
args = Config_pku()
style_dir = args.data_dir + 'styleAnnotation/'
style_list = glob.glob(style_dir + '*.txt')
sample_ind = {}
for style_path in style_list:
style_clc = style_path.split('/')[-1].split('_')[0]
lines = read_txt_file(style_path)
index = [int(line) for line in lines]
sample_ind[style_clc] = index
train_id = []
test_id = []
split_pos = [34, 15, 60, 25, 16] # given by 'Cross-Domain Adversarial Feature Learning for Sketch Re-identification'
for style_clc, split in zip(sample_ind,split_pos):
all_ind = np.random.permutation(sample_ind[style_clc])
train_id += list(all_ind[:split])
test_id += list(all_ind[split:])
np.save('./pku/train_id.npy', train_id)
np.save('./pku/test_id.npy', test_id)
print('Train list and test list is generated!')
process_train_data_list(args.data_dir)
process_test_data_list(args.data_dir)
print('Train data and test data is generated!')
if __name__== "__main__":
main() | [
"numpy.load",
"pickle.dump",
"numpy.save",
"sys.path.extend",
"numpy.hstack",
"PIL.Image.open",
"numpy.sort",
"numpy.array",
"glob.glob",
"numpy.random.permutation",
"config.Config_pku",
"numpy.unique"
] | [((11, 35), 'sys.path.extend', 'sys.path.extend', (["['../']"], {}), "(['../'])\n", (26, 35), False, 'import sys\n'), ((305, 321), 'numpy.hstack', 'np.hstack', (['lines'], {}), '(lines)\n', (314, 321), True, 'import numpy as np\n'), ((579, 608), 'numpy.load', 'np.load', (['"""./pku/train_id.npy"""'], {}), "('./pku/train_id.npy')\n", (586, 608), True, 'import numpy as np\n'), ((624, 641), 'numpy.sort', 'np.sort', (['ind_list'], {}), '(ind_list)\n', (631, 641), True, 'import numpy as np\n'), ((673, 708), 'glob.glob', 'glob.glob', (["(data_dir + 'photo/*.jpg')"], {}), "(data_dir + 'photo/*.jpg')\n", (682, 708), False, 'import glob\n'), ((732, 768), 'glob.glob', 'glob.glob', (["(data_dir + 'sketch/*.jpg')"], {}), "(data_dir + 'sketch/*.jpg')\n", (741, 768), False, 'import glob\n'), ((2059, 2085), 'pickle.dump', 'pickle.dump', (['train_data', 'f'], {}), '(train_data, f)\n', (2070, 2085), False, 'import pickle\n'), ((2157, 2185), 'numpy.load', 'np.load', (['"""./pku/test_id.npy"""'], {}), "('./pku/test_id.npy')\n", (2164, 2185), True, 'import numpy as np\n'), ((2206, 2223), 'numpy.sort', 'np.sort', (['ind_list'], {}), '(ind_list)\n', (2213, 2223), True, 'import numpy as np\n'), ((2255, 2290), 'glob.glob', 'glob.glob', (["(data_dir + 'photo/*.jpg')"], {}), "(data_dir + 'photo/*.jpg')\n", (2264, 2290), False, 'import glob\n'), ((2314, 2350), 'glob.glob', 'glob.glob', (["(data_dir + 'sketch/*.jpg')"], {}), "(data_dir + 'sketch/*.jpg')\n", (2323, 2350), False, 'import glob\n'), ((2839, 2887), 'numpy.save', 'np.save', (['"""./pku/test_color_imgs.npy"""', 'color_imgs'], {}), "('./pku/test_color_imgs.npy', color_imgs)\n", (2846, 2887), True, 'import numpy as np\n'), ((2892, 2944), 'numpy.save', 'np.save', (['"""./pku/test_color_labels.npy"""', 'color_labels'], {}), "('./pku/test_color_labels.npy', color_labels)\n", (2899, 2944), True, 'import numpy as np\n'), ((3451, 3501), 'numpy.save', 'np.save', (['"""./pku/test_sketch_imgs.npy"""', 'sketch_imgs'], {}), "('./pku/test_sketch_imgs.npy', sketch_imgs)\n", (3458, 3501), True, 'import numpy as np\n'), ((3506, 3560), 'numpy.save', 'np.save', (['"""./pku/test_sketch_labels.npy"""', 'sketch_labels'], {}), "('./pku/test_sketch_labels.npy', sketch_labels)\n", (3513, 3560), True, 'import numpy as np\n'), ((3587, 3599), 'config.Config_pku', 'Config_pku', ([], {}), '()\n', (3597, 3599), False, 'from config import Config_pku\n'), ((3673, 3703), 'glob.glob', 'glob.glob', (["(style_dir + '*.txt')"], {}), "(style_dir + '*.txt')\n", (3682, 3703), False, 'import glob\n'), ((4315, 4354), 'numpy.save', 'np.save', (['"""./pku/train_id.npy"""', 'train_id'], {}), "('./pku/train_id.npy', train_id)\n", (4322, 4354), True, 'import numpy as np\n'), ((4359, 4396), 'numpy.save', 'np.save', (['"""./pku/test_id.npy"""', 'test_id'], {}), "('./pku/test_id.npy', test_id)\n", (4366, 4396), True, 'import numpy as np\n'), ((4182, 4226), 'numpy.random.permutation', 'np.random.permutation', (['sample_ind[style_clc]'], {}), '(sample_ind[style_clc])\n', (4203, 4226), True, 'import numpy as np\n'), ((1026, 1046), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1036, 1046), False, 'from PIL import Image, ImageChops\n'), ((1506, 1526), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1516, 1526), False, 'from PIL import Image, ImageChops\n'), ((1611, 1624), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1619, 1624), True, 'import numpy as np\n'), ((2581, 2601), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (2591, 2601), False, 'from PIL import Image, ImageChops\n'), ((2686, 2699), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2694, 2699), True, 'import numpy as np\n'), ((3182, 3202), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (3192, 3202), False, 'from PIL import Image, ImageChops\n'), ((3287, 3300), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3295, 3300), True, 'import numpy as np\n'), ((425, 442), 'numpy.unique', 'np.unique', (['lables'], {}), '(lables)\n', (434, 442), True, 'import numpy as np\n'), ((1137, 1150), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1145, 1150), True, 'import numpy as np\n')] |
import streamlit as st
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.decomposition import PCA
def app():
dataset = ['Iris', 'Breast Cancer', 'Wine']
dataset_name = st.selectbox('Select Dataset', dataset)
algorithm = "SVM"
st.write(f"## {algorithm} Algorithm on {dataset_name} Dataset")
class SVM:
def __init__(self, learning_rate=0.001, lambda_param=0.01, n_iters=1000):
self.lr = learning_rate
self.lambda_param = lambda_param
self.n_iters = n_iters
self.w = None
self.b = None
def fit(self, X, y):
n_samples, n_features = X.shape
y_ = np.where(y <= 0, -1, 1)
self.w = np.zeros(n_features)
self.b = 0
for _ in range(self.n_iters):
for idx, x_i in enumerate(X):
condition = y_[idx] * (np.dot(x_i, self.w) - self.b) >= 1
if condition:
self.w -= self.lr * (2 * self.lambda_param * self.w)
else:
self.w -= self.lr * (2 * self.lambda_param * self.w - np.dot(x_i, y_[idx]))
self.b -= self.lr * y_[idx]
def predict(self, X):
approx = np.dot(X, self.w) - self.b
return np.sign(approx)
if(dataset_name == "Iris"):
dataset = datasets.load_iris()
elif(dataset_name == "Wine"):
dataset = datasets.load_wine()
else:
dataset = datasets.load_breast_cancer()
X = dataset.data
y = dataset.target
x = pd.DataFrame(dataset.data, columns=dataset.feature_names)
Y = pd.Series(dataset.target, name='response')
df = pd.concat( [x,Y], axis=1 )
st.write(f"A look at the {dataset_name} dataset:")
st.write(df.head(5))
st.write('Shape of dataset:', X.shape)
st.write('Number of classes:', len(np.unique(y)))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .75)
classifier = SVM()
classifier.fit(X_train, y_train)
predictions = classifier.predict(X_test)
acc = accuracy_score(y_test, predictions)
st.write(f'Accuracy :', acc)
pca = PCA(2)
X_projected = pca.fit_transform(X)
x1 = X_projected[:, 0]
x2 = X_projected[:, 1]
fig = plt.figure()
plt.scatter(x1, x2, c=y, alpha=0.8, cmap='viridis')
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.colorbar()
st.pyplot(fig) | [
"sklearn.datasets.load_iris",
"streamlit.selectbox",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.figure",
"numpy.unique",
"pandas.DataFrame",
"sklearn.datasets.load_wine",
"matplotlib.pyplot.colorbar",
"pandas.concat",
"sklearn.datasets.load_b... | [((351, 390), 'streamlit.selectbox', 'st.selectbox', (['"""Select Dataset"""', 'dataset'], {}), "('Select Dataset', dataset)\n", (363, 390), True, 'import streamlit as st\n'), ((423, 486), 'streamlit.write', 'st.write', (['f"""## {algorithm} Algorithm on {dataset_name} Dataset"""'], {}), "(f'## {algorithm} Algorithm on {dataset_name} Dataset')\n", (431, 486), True, 'import streamlit as st\n'), ((1833, 1890), 'pandas.DataFrame', 'pd.DataFrame', (['dataset.data'], {'columns': 'dataset.feature_names'}), '(dataset.data, columns=dataset.feature_names)\n', (1845, 1890), True, 'import pandas as pd\n'), ((1900, 1942), 'pandas.Series', 'pd.Series', (['dataset.target'], {'name': '"""response"""'}), "(dataset.target, name='response')\n", (1909, 1942), True, 'import pandas as pd\n'), ((1953, 1978), 'pandas.concat', 'pd.concat', (['[x, Y]'], {'axis': '(1)'}), '([x, Y], axis=1)\n', (1962, 1978), True, 'import pandas as pd\n'), ((1987, 2037), 'streamlit.write', 'st.write', (['f"""A look at the {dataset_name} dataset:"""'], {}), "(f'A look at the {dataset_name} dataset:')\n", (1995, 2037), True, 'import streamlit as st\n'), ((2069, 2107), 'streamlit.write', 'st.write', (['"""Shape of dataset:"""', 'X.shape'], {}), "('Shape of dataset:', X.shape)\n", (2077, 2107), True, 'import streamlit as st\n'), ((2205, 2243), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.75)'}), '(X, y, test_size=0.75)\n', (2221, 2243), False, 'from sklearn.model_selection import train_test_split\n'), ((2380, 2415), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (2394, 2415), False, 'from sklearn.metrics import accuracy_score\n'), ((2423, 2451), 'streamlit.write', 'st.write', (['f"""Accuracy :"""', 'acc'], {}), "(f'Accuracy :', acc)\n", (2431, 2451), True, 'import streamlit as st\n'), ((2465, 2471), 'sklearn.decomposition.PCA', 'PCA', (['(2)'], {}), '(2)\n', (2468, 2471), False, 'from sklearn.decomposition import PCA\n'), ((2583, 2595), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2593, 2595), True, 'import matplotlib.pyplot as plt\n'), ((2601, 2652), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x1', 'x2'], {'c': 'y', 'alpha': '(0.8)', 'cmap': '"""viridis"""'}), "(x1, x2, c=y, alpha=0.8, cmap='viridis')\n", (2612, 2652), True, 'import matplotlib.pyplot as plt\n'), ((2660, 2695), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Principal Component 1"""'], {}), "('Principal Component 1')\n", (2670, 2695), True, 'import matplotlib.pyplot as plt\n'), ((2701, 2736), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Principal Component 2"""'], {}), "('Principal Component 2')\n", (2711, 2736), True, 'import matplotlib.pyplot as plt\n'), ((2742, 2756), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2754, 2756), True, 'import matplotlib.pyplot as plt\n'), ((2764, 2778), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (2773, 2778), True, 'import streamlit as st\n'), ((1614, 1634), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (1632, 1634), False, 'from sklearn import datasets\n'), ((871, 894), 'numpy.where', 'np.where', (['(y <= 0)', '(-1)', '(1)'], {}), '(y <= 0, -1, 1)\n', (879, 894), True, 'import numpy as np\n'), ((931, 951), 'numpy.zeros', 'np.zeros', (['n_features'], {}), '(n_features)\n', (939, 951), True, 'import numpy as np\n'), ((1543, 1558), 'numpy.sign', 'np.sign', (['approx'], {}), '(approx)\n', (1550, 1558), True, 'import numpy as np\n'), ((1689, 1709), 'sklearn.datasets.load_wine', 'datasets.load_wine', ([], {}), '()\n', (1707, 1709), False, 'from sklearn import datasets\n'), ((1740, 1769), 'sklearn.datasets.load_breast_cancer', 'datasets.load_breast_cancer', ([], {}), '()\n', (1767, 1769), False, 'from sklearn import datasets\n'), ((2148, 2160), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2157, 2160), True, 'import numpy as np\n'), ((1496, 1513), 'numpy.dot', 'np.dot', (['X', 'self.w'], {}), '(X, self.w)\n', (1502, 1513), True, 'import numpy as np\n'), ((1112, 1131), 'numpy.dot', 'np.dot', (['x_i', 'self.w'], {}), '(x_i, self.w)\n', (1118, 1131), True, 'import numpy as np\n'), ((1366, 1386), 'numpy.dot', 'np.dot', (['x_i', 'y_[idx]'], {}), '(x_i, y_[idx])\n', (1372, 1386), True, 'import numpy as np\n')] |
import numpy as np
def compute_air_density(temperature=20, pressure=101325, relative_humidity=0, dew_temperature=None):
"""Computes air density, following the apprach of "Revised formula for the density of moist air (CIPM-2007)" by <NAME>, <NAME>, <NAME> and <NAME>"""
if relative_humidity is None and dew_temperature is None:
relative_humidity = 0
t = temperature
T = 273.15 + t
p = pressure
A = 1.2378847e-5
B = -1.9121316e-2
C = 33.93711047
D = -6.3431645e3
a_ = 1.00062
b_ = 3.14e-8
y_ = 5.6e-7
if not dew_temperature is None:
Td = dew_temperature + 273.15
psv = np.exp(A * np.power(Td, 2) + B * Td + C + D / Td)
f = a_ + b_ * p + y_ * np.power(dew_temperature, 2)
xv = f * psv / p
else:
psv = np.exp(A * np.power(T, 2) + B * T + C + D / T)
f = a_ + b_ * p + y_ * np.power(t, 2)
xv = relative_humidity * f * psv / p
a0 = 1.58123e-6
a1 = -2.9331e-8
a2 = 1.1043e-10
b0 = 5.707e-6
b1 = -2.051e-8
c0 = 1.9898e-4
c1 = -2.376e-6
d = 1.83e-11
e = -0.765e-8
Z = 1 - (p / T) * (a0 - a1 * t + a2 * np.power(t, 2) + (b0 + b1 * t) * xv + (c0 + c1 * t) * np.power(xv, 2)) + np.power(p / T, 2) * (d + e * np.power(xv, 2))
Ma = 28.96546e-3
Mv = 18.01528e-3
R = 8.314472
airden = p * Ma / (Z * R * T) * (1 - xv * (1 - (Mv / Ma)))
return airden
| [
"numpy.power"
] | [((1234, 1252), 'numpy.power', 'np.power', (['(p / T)', '(2)'], {}), '(p / T, 2)\n', (1242, 1252), True, 'import numpy as np\n'), ((731, 759), 'numpy.power', 'np.power', (['dew_temperature', '(2)'], {}), '(dew_temperature, 2)\n', (739, 759), True, 'import numpy as np\n'), ((887, 901), 'numpy.power', 'np.power', (['t', '(2)'], {}), '(t, 2)\n', (895, 901), True, 'import numpy as np\n'), ((1264, 1279), 'numpy.power', 'np.power', (['xv', '(2)'], {}), '(xv, 2)\n', (1272, 1279), True, 'import numpy as np\n'), ((1215, 1230), 'numpy.power', 'np.power', (['xv', '(2)'], {}), '(xv, 2)\n', (1223, 1230), True, 'import numpy as np\n'), ((661, 676), 'numpy.power', 'np.power', (['Td', '(2)'], {}), '(Td, 2)\n', (669, 676), True, 'import numpy as np\n'), ((820, 834), 'numpy.power', 'np.power', (['T', '(2)'], {}), '(T, 2)\n', (828, 834), True, 'import numpy as np\n'), ((1161, 1175), 'numpy.power', 'np.power', (['t', '(2)'], {}), '(t, 2)\n', (1169, 1175), True, 'import numpy as np\n')] |
#
# Copyright 2020- IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
#
from typing import Tuple, Optional, Dict, Union
import numpy as np
from collections import defaultdict
from abc import ABC, abstractmethod
class BaseRetailer(ABC):
def __init__(self, seed: Optional[int] = None):
if seed is None:
seed = sum([ord(s) for s in "retailer"])
self.seed = seed
self.reset_rng()
def reset_rng(self, seed: Optional[int] = None):
if seed is None:
self.rng = np.random.RandomState(self.seed)
else:
self.rng = np.random.RandomState(seed)
@abstractmethod
def act(self, wholesale_price: float) -> Tuple[float, float]:
raise NotImplementedError
@abstractmethod
def learn(self, demand: float):
raise NotImplementedError
class RandomRetailer(BaseRetailer):
def act(self, wholesale_price: float) -> Tuple[float, float]:
retail_price, quantity = self.rng.random(2)
return retail_price, quantity
def learn(self, demand: float):
pass
class ConstantRetailer(BaseRetailer):
def __init__(self,
retail_price: float,
quantity: float,
seed: Optional[int] = None):
self.retail_price = retail_price
self.quantity = quantity
def act(self, wholesale_price: float) -> Tuple[float, float]:
return self.retail_price, self.quantity
def learn(self, demand: float):
pass
| [
"numpy.random.RandomState"
] | [((539, 571), 'numpy.random.RandomState', 'np.random.RandomState', (['self.seed'], {}), '(self.seed)\n', (560, 571), True, 'import numpy as np\n'), ((609, 636), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (630, 636), True, 'import numpy as np\n')] |
from __future__ import annotations
import math
import random
import statistics
import numpy as np
import tensorflow as tf
from .codeepneat_module_base import CoDeepNEATModuleBase
from tfne.helper_functions import round_with_step
class CoDeepNEATModuleDenseDropout(CoDeepNEATModuleBase):
"""
TFNE CoDeepNEAT module encapsulating a Dense layer followed by an optional Dropout layer. No Downsampling layer
defined.
"""
def __init__(self,
config_params,
module_id,
parent_mutation,
dtype,
merge_method=None,
units=None,
activation=None,
kernel_init=None,
bias_init=None,
dropout_flag=None,
dropout_rate=None,
self_initialization_flag=False):
"""
Create module by storing supplied parameters. If self initialization flag is supplied, randomly initialize the
module parameters based on the range of parameters allowed by config_params
@param config_params: dict of the module parameter range supplied via config
@param module_id: int of unique module ID
@param parent_mutation: dict summarizing the mutation of the parent module
@param dtype: string of deserializable TF dtype
@param merge_method: dict representing a TF deserializable merge layer
@param units: see TF documentation
@param activation: see TF documentation
@param kernel_init: see TF documentation
@param bias_init: see TF documentation
@param dropout_flag: see TF documentation
@param dropout_rate: see TF documentation
@param self_initialization_flag: bool flag indicating if all module parameters should be randomly initialized
"""
# Register the implementation specifics by calling parent class
super().__init__(config_params, module_id, parent_mutation, dtype)
# Register the module parameters
self.merge_method = merge_method
self.units = units
self.activation = activation
self.kernel_init = kernel_init
self.bias_init = bias_init
self.dropout_flag = dropout_flag
self.dropout_rate = dropout_rate
# If self initialization flag is provided, initialize the module parameters as they are currently set to None
if self_initialization_flag:
self._initialize()
def __str__(self) -> str:
"""
@return: string representation of the module
"""
return "CoDeepNEAT DENSE Module | ID: {:>6} | Fitness: {:>6} | Units: {:>4} | Activ: {:>6} | Dropout: {:>4}" \
.format('#' + str(self.module_id),
self.fitness,
self.units,
self.activation,
"None" if self.dropout_flag is False else self.dropout_rate)
def _initialize(self):
"""
Randomly initialize all parameters of the module based on the range of parameters allowed by the config_params
variable.
"""
# Uniform randomly set module parameters
self.merge_method = random.choice(self.config_params['merge_method'])
self.merge_method['config']['dtype'] = self.dtype
random_units = random.randint(self.config_params['units']['min'],
self.config_params['units']['max'])
self.units = round_with_step(random_units,
self.config_params['units']['min'],
self.config_params['units']['max'],
self.config_params['units']['step'])
self.activation = random.choice(self.config_params['activation'])
self.kernel_init = random.choice(self.config_params['kernel_init'])
self.bias_init = random.choice(self.config_params['bias_init'])
self.dropout_flag = random.random() < self.config_params['dropout_flag']
random_dropout_rate = random.uniform(self.config_params['dropout_rate']['min'],
self.config_params['dropout_rate']['max'])
self.dropout_rate = round_with_step(random_dropout_rate,
self.config_params['dropout_rate']['min'],
self.config_params['dropout_rate']['max'],
self.config_params['dropout_rate']['step'])
def create_module_layers(self) -> (tf.keras.layers.Layer, ...):
"""
Instantiate TF layers with their respective configuration that are represented by the current module
configuration. Return the instantiated module layers in their respective order as a tuple.
@return: tuple of instantiated TF layers represented by the module configuration.
"""
# Create the basic keras Dense layer, needed in all variants of the module
dense_layer = tf.keras.layers.Dense(units=self.units,
activation=self.activation,
kernel_initializer=self.kernel_init,
bias_initializer=self.bias_init,
dtype=self.dtype)
# If no dropout flag present, return solely the created dense layer as iterable. If dropout flag present, return
# the dense layer and together with the dropout layer
if not self.dropout_flag:
return (dense_layer,)
else:
dropout_layer = tf.keras.layers.Dropout(rate=self.dropout_rate,
dtype=self.dtype)
return dense_layer, dropout_layer
def create_downsampling_layer(self, in_shape, out_shape) -> tf.keras.layers.Layer:
""""""
raise NotImplementedError("Downsampling has not yet been implemented for DenseDropout Modules")
def create_mutation(self,
offspring_id,
max_degree_of_mutation) -> CoDeepNEATModuleDenseDropout:
"""
Create mutated DenseDropout module and return it. Categorical parameters are chosen randomly from all available
values. Sortable parameters are perturbed through a random normal distribution with the current value as mean
and the config specified stddev
@param offspring_id: int of unique module ID of the offspring
@param max_degree_of_mutation: float between 0 and 1 specifying the maximum degree of mutation
@return: instantiated DenseDropout module with mutated parameters
"""
# Copy the parameters of this parent module for the parameters of the offspring
offspring_params = {'merge_method': self.merge_method,
'units': self.units,
'activation': self.activation,
'kernel_init': self.kernel_init,
'bias_init': self.bias_init,
'dropout_flag': self.dropout_flag,
'dropout_rate': self.dropout_rate}
# Create the dict that keeps track of the mutations occuring for the offspring
parent_mutation = {'parent_id': self.module_id,
'mutation': 'mutation',
'mutated_params': dict()}
# Determine exact integer amount of parameters to be mutated, though minimum is 1
param_mutation_count = math.ceil(max_degree_of_mutation * 7)
# Uniform randomly choose the parameters to be mutated
parameters_to_mutate = random.sample(range(7), k=param_mutation_count)
# Mutate offspring parameters. Categorical parameters are chosen randomly from all available values. Sortable
# parameters are perturbed through a random normal distribution with the current value as mean and the config
# specified stddev
for param_to_mutate in parameters_to_mutate:
if param_to_mutate == 0:
offspring_params['merge_method'] = random.choice(self.config_params['merge_method'])
parent_mutation['mutated_params']['merge_method'] = self.merge_method
elif param_to_mutate == 1:
perturbed_units = int(np.random.normal(loc=self.units,
scale=self.config_params['units']['stddev']))
offspring_params['units'] = round_with_step(perturbed_units,
self.config_params['units']['min'],
self.config_params['units']['max'],
self.config_params['units']['step'])
parent_mutation['mutated_params']['units'] = self.units
elif param_to_mutate == 2:
offspring_params['activation'] = random.choice(self.config_params['activation'])
parent_mutation['mutated_params']['activation'] = self.activation
elif param_to_mutate == 3:
offspring_params['kernel_init'] = random.choice(self.config_params['kernel_init'])
parent_mutation['mutated_params']['kernel_init'] = self.kernel_init
elif param_to_mutate == 4:
offspring_params['bias_init'] = random.choice(self.config_params['bias_init'])
parent_mutation['mutated_params']['bias_init'] = self.bias_init
elif param_to_mutate == 5:
offspring_params['dropout_flag'] = not self.dropout_flag
parent_mutation['mutated_params']['dropout_flag'] = self.dropout_flag
else: # param_to_mutate == 6:
perturbed_dropout_rate = np.random.normal(loc=self.dropout_rate,
scale=self.config_params['dropout_rate']['stddev'])
offspring_params['dropout_rate'] = round_with_step(perturbed_dropout_rate,
self.config_params['dropout_rate']['min'],
self.config_params['dropout_rate']['max'],
self.config_params['dropout_rate']['step'])
parent_mutation['mutated_params']['dropout_rate'] = self.dropout_rate
return CoDeepNEATModuleDenseDropout(config_params=self.config_params,
module_id=offspring_id,
parent_mutation=parent_mutation,
dtype=self.dtype,
**offspring_params)
def create_crossover(self,
offspring_id,
less_fit_module,
max_degree_of_mutation) -> CoDeepNEATModuleDenseDropout:
"""
Create crossed over DenseDropout module and return it. Carry over parameters of fitter parent for categorical
parameters and calculate parameter average between both modules for sortable parameters
@param offspring_id: int of unique module ID of the offspring
@param less_fit_module: second DenseDropout module with lower fitness
@param max_degree_of_mutation: float between 0 and 1 specifying the maximum degree of mutation
@return: instantiated DenseDropout module with crossed over parameters
"""
# Create offspring parameters by carrying over parameters of fitter parent for categorical parameters and
# calculating parameter average between both modules for sortable parameters
offspring_params = dict()
# Create the dict that keeps track of the mutations occuring for the offspring
parent_mutation = {'parent_id': (self.module_id, less_fit_module.get_id()),
'mutation': 'crossover'}
offspring_params['merge_method'] = self.merge_method
offspring_params['units'] = round_with_step(int((self.units + less_fit_module.units) / 2),
self.config_params['units']['min'],
self.config_params['units']['max'],
self.config_params['units']['step'])
offspring_params['activation'] = self.activation
offspring_params['kernel_init'] = self.kernel_init
offspring_params['bias_init'] = self.bias_init
offspring_params['dropout_flag'] = self.dropout_flag
offspring_params['dropout_rate'] = round_with_step((self.dropout_rate + less_fit_module.dropout_rate) / 2,
self.config_params['dropout_rate']['min'],
self.config_params['dropout_rate']['max'],
self.config_params['dropout_rate']['step'])
return CoDeepNEATModuleDenseDropout(config_params=self.config_params,
module_id=offspring_id,
parent_mutation=parent_mutation,
dtype=self.dtype,
**offspring_params)
def serialize(self) -> dict:
"""
@return: serialized constructor variables of the module as json compatible dict
"""
return {
'module_type': self.get_module_type(),
'module_id': self.module_id,
'parent_mutation': self.parent_mutation,
'merge_method': self.merge_method,
'units': self.units,
'activation': self.activation,
'kernel_init': self.kernel_init,
'bias_init': self.bias_init,
'dropout_flag': self.dropout_flag,
'dropout_rate': self.dropout_rate
}
def get_distance(self, other_module) -> float:
"""
Calculate distance between 2 DenseDropout modules by inspecting each parameter, calculating the congruence
between each and eventually averaging the out the congruence. The distance is returned as the average
congruences distance to 1.0. The congruence of continuous parameters is calculated by their relative distance.
The congruence of categorical parameters is either 1.0 in case they are the same or it's 1 divided to the amount
of possible values for that specific parameter. Return the calculated distance.
@param other_module: second DenseDropout module to which the distance has to be calculated
@return: float between 0 and 1. High values indicating difference, low values indicating similarity
"""
congruence_list = list()
if self.merge_method == other_module.merge_method:
congruence_list.append(1.0)
else:
congruence_list.append(1 / len(self.config_params['merge_method']))
if self.units >= other_module.units:
congruence_list.append(other_module.units / self.units)
else:
congruence_list.append(self.units / other_module.units)
if self.activation == other_module.activation:
congruence_list.append(1.0)
else:
congruence_list.append(1 / len(self.config_params['activation']))
if self.kernel_init == other_module.kernel_init:
congruence_list.append(1.0)
else:
congruence_list.append(1 / len(self.config_params['kernel_init']))
if self.bias_init == other_module.bias_init:
congruence_list.append(1.0)
else:
congruence_list.append(1 / len(self.config_params['bias_init']))
congruence_list.append(abs(self.dropout_flag - other_module.dropout_flag))
if self.dropout_rate >= other_module.dropout_rate:
congruence_list.append(other_module.dropout_rate / self.dropout_rate)
else:
congruence_list.append(self.dropout_rate / other_module.dropout_rate)
# Return the distance as the distance of the average congruence to the perfect congruence of 1.0
return round(1.0 - statistics.mean(congruence_list), 4)
def get_module_type(self) -> str:
""""""
return 'DenseDropout'
| [
"random.randint",
"tfne.helper_functions.round_with_step",
"tensorflow.keras.layers.Dense",
"random.uniform",
"math.ceil",
"tensorflow.keras.layers.Dropout",
"random.choice",
"random.random",
"statistics.mean",
"numpy.random.normal"
] | [((3214, 3263), 'random.choice', 'random.choice', (["self.config_params['merge_method']"], {}), "(self.config_params['merge_method'])\n", (3227, 3263), False, 'import random\n'), ((3345, 3436), 'random.randint', 'random.randint', (["self.config_params['units']['min']", "self.config_params['units']['max']"], {}), "(self.config_params['units']['min'], self.config_params[\n 'units']['max'])\n", (3359, 3436), False, 'import random\n'), ((3491, 3634), 'tfne.helper_functions.round_with_step', 'round_with_step', (['random_units', "self.config_params['units']['min']", "self.config_params['units']['max']", "self.config_params['units']['step']"], {}), "(random_units, self.config_params['units']['min'], self.\n config_params['units']['max'], self.config_params['units']['step'])\n", (3506, 3634), False, 'from tfne.helper_functions import round_with_step\n'), ((3767, 3814), 'random.choice', 'random.choice', (["self.config_params['activation']"], {}), "(self.config_params['activation'])\n", (3780, 3814), False, 'import random\n'), ((3842, 3890), 'random.choice', 'random.choice', (["self.config_params['kernel_init']"], {}), "(self.config_params['kernel_init'])\n", (3855, 3890), False, 'import random\n'), ((3916, 3962), 'random.choice', 'random.choice', (["self.config_params['bias_init']"], {}), "(self.config_params['bias_init'])\n", (3929, 3962), False, 'import random\n'), ((4074, 4179), 'random.uniform', 'random.uniform', (["self.config_params['dropout_rate']['min']", "self.config_params['dropout_rate']['max']"], {}), "(self.config_params['dropout_rate']['min'], self.\n config_params['dropout_rate']['max'])\n", (4088, 4179), False, 'import random\n'), ((4248, 4424), 'tfne.helper_functions.round_with_step', 'round_with_step', (['random_dropout_rate', "self.config_params['dropout_rate']['min']", "self.config_params['dropout_rate']['max']", "self.config_params['dropout_rate']['step']"], {}), "(random_dropout_rate, self.config_params['dropout_rate'][\n 'min'], self.config_params['dropout_rate']['max'], self.config_params[\n 'dropout_rate']['step'])\n", (4263, 4424), False, 'from tfne.helper_functions import round_with_step\n'), ((5043, 5206), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'self.units', 'activation': 'self.activation', 'kernel_initializer': 'self.kernel_init', 'bias_initializer': 'self.bias_init', 'dtype': 'self.dtype'}), '(units=self.units, activation=self.activation,\n kernel_initializer=self.kernel_init, bias_initializer=self.bias_init,\n dtype=self.dtype)\n', (5064, 5206), True, 'import tensorflow as tf\n'), ((7612, 7649), 'math.ceil', 'math.ceil', (['(max_degree_of_mutation * 7)'], {}), '(max_degree_of_mutation * 7)\n', (7621, 7649), False, 'import math\n'), ((12848, 13058), 'tfne.helper_functions.round_with_step', 'round_with_step', (['((self.dropout_rate + less_fit_module.dropout_rate) / 2)', "self.config_params['dropout_rate']['min']", "self.config_params['dropout_rate']['max']", "self.config_params['dropout_rate']['step']"], {}), "((self.dropout_rate + less_fit_module.dropout_rate) / 2,\n self.config_params['dropout_rate']['min'], self.config_params[\n 'dropout_rate']['max'], self.config_params['dropout_rate']['step'])\n", (12863, 13058), False, 'from tfne.helper_functions import round_with_step\n'), ((3991, 4006), 'random.random', 'random.random', ([], {}), '()\n', (4004, 4006), False, 'import random\n'), ((5669, 5734), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'self.dropout_rate', 'dtype': 'self.dtype'}), '(rate=self.dropout_rate, dtype=self.dtype)\n', (5692, 5734), True, 'import tensorflow as tf\n'), ((8198, 8247), 'random.choice', 'random.choice', (["self.config_params['merge_method']"], {}), "(self.config_params['merge_method'])\n", (8211, 8247), False, 'import random\n'), ((16468, 16500), 'statistics.mean', 'statistics.mean', (['congruence_list'], {}), '(congruence_list)\n', (16483, 16500), False, 'import statistics\n'), ((8589, 8735), 'tfne.helper_functions.round_with_step', 'round_with_step', (['perturbed_units', "self.config_params['units']['min']", "self.config_params['units']['max']", "self.config_params['units']['step']"], {}), "(perturbed_units, self.config_params['units']['min'], self.\n config_params['units']['max'], self.config_params['units']['step'])\n", (8604, 8735), False, 'from tfne.helper_functions import round_with_step\n'), ((8411, 8488), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'self.units', 'scale': "self.config_params['units']['stddev']"}), "(loc=self.units, scale=self.config_params['units']['stddev'])\n", (8427, 8488), True, 'import numpy as np\n'), ((9071, 9118), 'random.choice', 'random.choice', (["self.config_params['activation']"], {}), "(self.config_params['activation'])\n", (9084, 9118), False, 'import random\n'), ((9290, 9338), 'random.choice', 'random.choice', (["self.config_params['kernel_init']"], {}), "(self.config_params['kernel_init'])\n", (9303, 9338), False, 'import random\n'), ((9510, 9556), 'random.choice', 'random.choice', (["self.config_params['bias_init']"], {}), "(self.config_params['bias_init'])\n", (9523, 9556), False, 'import random\n'), ((9919, 10015), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'self.dropout_rate', 'scale': "self.config_params['dropout_rate']['stddev']"}), "(loc=self.dropout_rate, scale=self.config_params[\n 'dropout_rate']['stddev'])\n", (9935, 10015), True, 'import numpy as np\n'), ((10120, 10299), 'tfne.helper_functions.round_with_step', 'round_with_step', (['perturbed_dropout_rate', "self.config_params['dropout_rate']['min']", "self.config_params['dropout_rate']['max']", "self.config_params['dropout_rate']['step']"], {}), "(perturbed_dropout_rate, self.config_params['dropout_rate'][\n 'min'], self.config_params['dropout_rate']['max'], self.config_params[\n 'dropout_rate']['step'])\n", (10135, 10299), False, 'from tfne.helper_functions import round_with_step\n')] |
from numpy import array, pi, sqrt, exp, zeros
T = 1.0
v0 = 0.01
kappa = 2.0 # mean rev
theta = 0.01 # long term
omega = 0.2 # vol
N = 3
def compute_f(r, omega):
return 2*sqrt(r)/omega
def compute_v(R, omega):
if R > 0:
return (R**2) * (omega**2)/4.0
else:
return 0.0
def build_volatility_tree(T, v0, kappa, theta, omega, N):
div_by_zero_counter = 0
f = zeros((N+1, N+1))
f[0, 0] = compute_f(v0, omega)
dt = float(T/N)
sqrt_dt = sqrt(dt)
V = zeros((N+1, N+1))
V[0, 0] = compute_v(f[0, 0], omega)
f[1, 0] = f[0, 0]-sqrt_dt
f[1, 1] = f[0, 0]+sqrt_dt
V[1, 0] = compute_v(f[1, 0], omega)
V[1, 1] = compute_v(f[1, 1], omega)
for i in range(1, N):
for j in range(i+1):
f[i+1, j] = f[i, j] - sqrt_dt
f[i+1, j+1] = f[i, j] + sqrt_dt
V[i+1, j] = compute_v(f[i+1, j], omega)
V[i+1, j+1] = compute_v(f[i+1, j+1], omega)
f_down = zeros((N+1, N+1))
f_up = zeros((N+1, N+1))
pu_f = zeros((N+1, N+1))
pd_f = zeros((N+1, N+1))
for i in range(0, N):
for j in range(i+1):
# /*Compute mu_f*/
v_curr = V[i][j]
mu_r = kappa*(theta-v_curr)
z = 0
while V[i, j] + mu_r*dt < V[i+1, j-z] and j-z >= 0:
z += 1
f_down[i, j] = -z
Rd = V[i+1, j-z] # the next low vertice we can reach
z = 0
while V[i, j] + mu_r*dt > V[i+1, j+z] and j+z <= i:
z += 1
Ru = V[i+1, j+z] # the next high vertice we can reach
f_up[i, j] = z
if Ru == Rd:
div_by_zero_counter += 1
pu_f[i, j] = (V[i, j]+mu_r*dt-Rd)/(Ru-Rd)
if Ru-1.e-9 > V[i+1, i+1] or j+f_up[i][j] > i+1:
pu_f[i][j] = 1
f_up[i][j] = i+1-j
f_down[i][j] = i-j
if Rd+1.e-9 < V[i+1, 0] or j+f_down[i, j] < 0:
pu_f[i, j] = 0
f_up[i, j] = 1 - j
f_down[i, j] = 0 - j
pd_f[i, j] = 1 - pu_f[i][j]
return [V, pu_f, pd_f, f_up, f_down]
def calculate_bond_price(T, v0, kappa, theta, omega, N):
all_trees = build_volatility_tree(T, v0, kappa, theta, omega, N)
v = all_trees[0]
pu_f = all_trees[1]
pd_f = all_trees[2]
f_up = all_trees[3]
f_down = all_trees[4]
bond_price_matrix = zeros((N+1, N+1))
for k in range(N+1):
bond_price_matrix[N, k] = v[N, k]
for n in range(N-1, -1, -1):
for k in range(n+1):
k_u = k + f_up[n, k]
k_d = k + f_down[n, k]
bond_price_matrix[n, k] = pu_f[n, k] * bond_price_matrix[n+1, k_u] + pd_f[n, k] * bond_price_matrix[n+1, k_d]
return bond_price_matrix[0, 0]
print(calculate_bond_price(T, v0, kappa, theta, omega, N))
| [
"numpy.zeros",
"numpy.sqrt"
] | [((394, 415), 'numpy.zeros', 'zeros', (['(N + 1, N + 1)'], {}), '((N + 1, N + 1))\n', (399, 415), False, 'from numpy import array, pi, sqrt, exp, zeros\n'), ((481, 489), 'numpy.sqrt', 'sqrt', (['dt'], {}), '(dt)\n', (485, 489), False, 'from numpy import array, pi, sqrt, exp, zeros\n'), ((498, 519), 'numpy.zeros', 'zeros', (['(N + 1, N + 1)'], {}), '((N + 1, N + 1))\n', (503, 519), False, 'from numpy import array, pi, sqrt, exp, zeros\n'), ((960, 981), 'numpy.zeros', 'zeros', (['(N + 1, N + 1)'], {}), '((N + 1, N + 1))\n', (965, 981), False, 'from numpy import array, pi, sqrt, exp, zeros\n'), ((989, 1010), 'numpy.zeros', 'zeros', (['(N + 1, N + 1)'], {}), '((N + 1, N + 1))\n', (994, 1010), False, 'from numpy import array, pi, sqrt, exp, zeros\n'), ((1018, 1039), 'numpy.zeros', 'zeros', (['(N + 1, N + 1)'], {}), '((N + 1, N + 1))\n', (1023, 1039), False, 'from numpy import array, pi, sqrt, exp, zeros\n'), ((1047, 1068), 'numpy.zeros', 'zeros', (['(N + 1, N + 1)'], {}), '((N + 1, N + 1))\n', (1052, 1068), False, 'from numpy import array, pi, sqrt, exp, zeros\n'), ((2417, 2438), 'numpy.zeros', 'zeros', (['(N + 1, N + 1)'], {}), '((N + 1, N + 1))\n', (2422, 2438), False, 'from numpy import array, pi, sqrt, exp, zeros\n'), ((177, 184), 'numpy.sqrt', 'sqrt', (['r'], {}), '(r)\n', (181, 184), False, 'from numpy import array, pi, sqrt, exp, zeros\n')] |
from IPython.display import FileLinks
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def setup_individual_obs_df(obs_df):
"""
Standardizes adults and pediatrics files for clean processing in GrowthViz notebooks
"""
df = obs_df.copy()
df.rename(columns={"clean_res": "clean_value"}, inplace=True)
df["age"] = df["agedays"] / 365.25
df.drop(columns=["agedays"], inplace=True)
df["clean_cat"] = df["clean_value"].astype("category")
df["include"] = df.clean_value.eq("Include")
col_list = [
"id",
"subjid",
"param",
"measurement",
"age",
"sex",
"clean_value",
"clean_cat",
"include",
]
return df[col_list]
def setup_percentiles_adults(percentiles):
"""
Processes adults percentiles from CDC
"""
# expand decade rows into one row per year
pct = percentiles[
percentiles["Age (All race and Hispanic-origin groups)"] != "20 and over"
].copy()
pct.loc[pct["Age_low"] == 20, "Age_low"] = 18
range_col = pct.apply(lambda row: row.Age_high - row.Age_low + 1, axis=1)
pct = pct.assign(range=range_col.values)
dta = pd.DataFrame(
(np.repeat(pct.values, pct["range"], axis=0)), columns=pct.columns
)
dta["count"] = dta.groupby(["Sex", "Measure", "Age_low", "Age_high"]).cumcount()
dta["age"] = dta["Age_low"] + dta["count"]
# add standard deviation and other values
dta["sqrt"] = np.sqrt(pd.to_numeric(dta["Number of examined persons"]))
dta["sd"] = dta["Standard error of the mean"] * dta["sqrt"]
dta["Sex"] = dta.Sex.replace("Male", 0).replace("Female", 1)
dta.rename(columns={"Measure": "param"}, inplace=True)
dta.drop(
columns=[
"Age (All race and Hispanic-origin groups)",
"Age_low",
"sqrt",
"Standard error of the mean",
"Age_high",
"range",
"count",
"Number of examined persons",
],
inplace=True,
)
# smooth percentiles between X9-(X+1)1 (i.e., 29-31)
dta["decade"] = np.where(dta["age"] == (round(dta["age"].astype(float), -1)), 1, 0)
mcol_list = [
"Mean",
"sd",
"P5",
"P10",
"P15",
"P25",
"P50",
"P75",
"P85",
"P90",
"P95",
]
for col in mcol_list:
dta[col] = np.where(
(dta["decade"] == 1) & (dta["age"] < 110),
(dta[col] + dta[col].shift(1)) / 2,
dta[col],
)
dta.drop(columns={"decade"}, inplace=True)
col_list = ["param", "Sex", "age"] + mcol_list
dta = dta.reindex(columns=col_list)
return dta
def setup_percentiles_pediatrics(percentiles_file):
"""
Processes pediatrics percentiles from CDC
"""
percentiles = pd.read_csv(
f"growthviz-data/ext/{percentiles_file}",
dtype={
"Agemos": float,
"P5": float,
"P50": float,
"P95": float,
"L": float,
"M": float,
"S": float,
"Sex": int,
},
)
percentiles["age"] = percentiles["Agemos"] / 12
# Values by CDC (1=male; 2=female) differ from growthcleanr
# which uses a numeric value of 0 (male) or 1 (female).
# This aligns things to the growthcleanr values
percentiles["Sex"] = percentiles["Sex"] - 1
return percentiles
def keep_age_range(df, mode):
"""
Restricts patient data to acceptable age range for notebooks
"""
obs_grp = df
# create age buckets for chart
def label_excl_grp(row):
if mode == "adults":
if row["age"] < 18:
return " Below 18 (Exclude)"
if (row["age"] >= 18) & (row["age"] < 30):
return " 18 to < 30"
if (row["age"] >= 30) & (row["age"] < 40):
return " 30 to < 40"
if (row["age"] >= 40) & (row["age"] < 50):
return " 40 to < 50"
if (row["age"] >= 50) & (row["age"] < 60):
return " 50 to < 60"
if (row["age"] >= 60) & (row["age"] <= 65):
return " 60 to 65"
if (row["age"] > 65) & (row["age"] <= 80):
return " > 65 to 80 (Not Recommended)"
if row["age"] > 80:
return "Above 80 (Exclude)"
elif mode == "pediatrics":
if row["age"] < 2:
return " Below 2 (Exclude)"
if (row["age"] >= 2) & (row["age"] < 5):
return " 02 to < 05"
if (row["age"] >= 5) & (row["age"] < 8):
return " 05 to < 08"
if (row["age"] >= 8) & (row["age"] < 11):
return " 08 to < 11"
if (row["age"] >= 11) & (row["age"] < 14):
return " 11 to < 14"
if (row["age"] >= 14) & (row["age"] < 17):
return " 14 to < 17"
if (row["age"] >= 17) & (row["age"] <= 20):
return " 17 to 20"
if (row["age"] > 20) & (row["age"] <= 25):
return " > 20 to 25 (Not Recommended)"
if row["age"] > 25:
return "Above 25 (Exclude)"
label_excl_col = obs_grp.apply(lambda row: label_excl_grp(row), axis=1)
obs_grp = obs_grp.assign(cat=label_excl_col.values)
obs_grp = (
obs_grp.groupby("cat")["subjid"]
.count()
.reset_index()
.sort_values("cat", ascending=True)
)
# assign bar colors
cat_list = obs_grp["cat"].values.tolist()
color_list = []
patterns = []
for n in cat_list:
if ("Below" in n) | ("Above" in n):
color_list = color_list + ["C3"]
patterns = patterns + ["x"]
if ("to" in n) & ("Not" not in n):
color_list = color_list + ["C0"]
patterns = patterns + [""]
if "Not" in n:
color_list = color_list + ["orange"]
patterns = patterns + ["/"]
# create chart
fig, ax1 = plt.subplots()
obs_grp_plot = plt.bar(
obs_grp["cat"],
obs_grp["subjid"],
color=color_list,
)
for bar, pattern in zip(obs_grp_plot, patterns):
bar.set_hatch(pattern)
plt.xticks(rotation=45, ha="right")
ax1.get_yaxis().set_major_formatter(
mpl.ticker.FuncFormatter(lambda x, p: format(int(x), ","))
)
# return specified age range
if mode == "adults":
return df[df["age"].between(18, 80, inclusive=True)]
elif mode == "pediatrics":
return df[df["age"].between(2, 25, inclusive=True)]
def setup_merged_df(obs_df):
"""
Merges together weight and height data for calculating BMI
"""
obs_df = obs_df.assign(height=obs_df["measurement"], weight=obs_df["measurement"])
obs_df.loc[obs_df.param == "WEIGHTKG", "height"] = np.NaN
obs_df.loc[obs_df.param == "HEIGHTCM", "weight"] = np.NaN
heights = obs_df[obs_df.param == "HEIGHTCM"]
weights = obs_df[obs_df.param == "WEIGHTKG"]
merged = heights.merge(weights, on=["subjid", "age", "sex"], how="outer")
only_needed_columns = merged.drop(
columns=[
"param_x",
"measurement_x",
"clean_value_x",
"weight_x",
"id_y",
"param_y",
"measurement_y",
"clean_value_y",
"height_y",
]
)
clean_column_names = only_needed_columns.rename(
columns={
"clean_cat_x": "height_cat",
"include_x": "include_height",
"height_x": "height",
"clean_cat_y": "weight_cat",
"include_y": "include_weight",
"weight_y": "weight",
"reason_y": "reason",
"id_x": "id",
}
)
clean_column_names["bmi"] = clean_column_names["weight"] / (
(clean_column_names["height"] / 100) ** 2
)
clean_column_names["rounded_age"] = np.around(clean_column_names.age)
clean_column_names["include_both"] = (
clean_column_names["include_height"] & clean_column_names["include_weight"]
)
return clean_column_names
def exclusion_information(obs):
"""
Provides a count and percentage of growthcleanr categories by measurement type (param).
Parameters:
obs: a DataFrame, in the format output by setup_individual_obs_df
Returns:
A DataFrame with the counts and percentages
"""
exc = (
obs.groupby(["param", "clean_cat"])
.agg({"id": "count"})
.reset_index()
.pivot(index="clean_cat", columns="param", values="id")
)
exc["height percent"] = exc["HEIGHTCM"] / exc["HEIGHTCM"].sum() * 100
exc["weight percent"] = exc["WEIGHTKG"] / exc["WEIGHTKG"].sum() * 100
exc = exc.fillna(0)
exc["total"] = exc["HEIGHTCM"] + exc["WEIGHTKG"]
exc = exc[["HEIGHTCM", "height percent", "WEIGHTKG", "weight percent", "total"]]
exc = exc.sort_values("total", ascending=False)
return exc.style.format(
{
"HEIGHTCM": "{:.0f}".format,
"height percent": "{:.2f}%",
"WEIGHTKG": "{:.0f}".format,
"weight percent": "{:.2f}%",
}
)
def label_incl(row):
"""
Categorizes BMI calculations as Include, Implausible, or unable to calculate (Only Wt or Ht)
"""
if row["include_both"] == True:
return "Include"
elif (row["weight_cat"] == "Implausible") | (row["height_cat"] == "Implausible"):
return "Implausible"
else:
return "Only Wt or Ht"
def setup_bmi_adults(merged_df, obs):
"""
Appends BMI data onto adults weight and height observations
"""
data = merged_df[
[
"id",
"subjid",
"sex",
"age",
"rounded_age",
"bmi",
"weight_cat",
"height_cat",
"include_both",
]
]
incl_col = data.apply(lambda row: label_incl(row), axis=1)
data = data.assign(clean_cat=incl_col.values)
data["param"] = "BMI"
data["clean_value"] = data["clean_cat"]
data.rename(columns={"bmi": "measurement"}, inplace=True)
return pd.concat([obs, data])
def data_frame_names(da_locals):
"""
Returns a list of dataframe names
"""
frames = []
for key, value in da_locals.items():
if isinstance(value, pd.DataFrame):
if key.startswith("_") is False:
frames.append(key)
return frames
def export_to_csv(da_locals, selection_widget, out):
"""
Saves out csv file of dataframe
"""
df_name = selection_widget.value
da_locals[df_name].to_csv("output/{}.csv".format(df_name), index=False)
out.clear_output()
out.append_display_data(FileLinks("output"))
def clean_swapped_values(merged_df):
"""
This function will look in a DataFrame for rows where the height_cat and weight_cat are set to
"Swapped-Measurements" (or the adult equivalent). It will then swap the height and weight values
for those rows, and recalculate BMIs based on these changes. It will also create two new columns:
postprocess_height_cat and postprocess_weight_cat. The values for these columns are copied from
the original categories except in the case where swaps are fixed when it is set to
"Include-Fixed-Swap".
Parameters:
merged_df: (DataFrame) with subjid, height, weight, include_height and include_weight columns
Returns:
The cleaned DataFrame
"""
merged_df["postprocess_height_cat"] = merged_df["height_cat"]
merged_df["postprocess_height_cat"] = merged_df[
"postprocess_height_cat"
].cat.add_categories(["Include-Fixed-Swap"])
merged_df["postprocess_weight_cat"] = merged_df["weight_cat"]
merged_df["postprocess_weight_cat"] = merged_df[
"postprocess_weight_cat"
].cat.add_categories(["Include-Fixed-Swap"])
# Allow for both pediatric and adult exclusion forms
exclusions = ["Swapped-Measurements", "Exclude-Adult-Swapped-Measurements"]
# Condition: both must be flagged as swaps
cond = merged_df["height_cat"].isin(exclusions) & merged_df["weight_cat"].isin(
exclusions
)
# Swap height and weight
merged_df.loc[cond, ["height", "weight"]] = merged_df.loc[
cond, ["weight", "height"]
].values
# Record that they were swapped
merged_df.loc[cond, "postprocess_height_cat"] = "Include-Fixed-Swap"
merged_df.loc[cond, "postprocess_weight_cat"] = "Include-Fixed-Swap"
merged_df["bmi"] = merged_df["weight"] / ((merged_df["height"] / 100) ** 2)
return merged_df
def clean_unit_errors(merged_df):
"""
This function will look in a DataFrame for rows where the height_cat and weight_cat are set to
"Unit-Error-High" or "Unit-Error-Low". It will then multiply / divide the height and weight
values to convert them. It will also create two new columns: postprocess_height_cat and
postprocess_weight_cat. The values for these columns are copied from the original categories
except in the case where unit errors are fixed when it is set to "Include-UH" or "Include-UL"
respectively.
At present, the adult algorithm does not specify high or low unit errors, rather it only flags
"Exclude-Adult-Unit-Errors", so this function only works with pediatrics data. If growthcleanr
adds high and low designations for adult unit errors, a comparable set of conditions could be
added here to accommodate adult data.
Parameters:
merged_df: (DataFrame) with subjid, height, weight, include_height and include_weight columns
Returns:
The cleaned DataFrame
"""
merged_df["postprocess_height_cat"] = merged_df["height_cat"]
merged_df["postprocess_height_cat"] = merged_df[
"postprocess_height_cat"
].cat.add_categories(["Include-UH", "Include-UL"])
merged_df["postprocess_weight_cat"] = merged_df["weight_cat"]
merged_df["postprocess_weight_cat"] = merged_df[
"postprocess_weight_cat"
].cat.add_categories(["Include-UH", "Include-UL"])
merged_df.loc[merged_df["height_cat"] == "Unit-Error-Low", "height"] = (
merged_df.loc[merged_df["height_cat"] == "Unit-Error-Low", "height"] * 2.54
)
merged_df.loc[merged_df["height_cat"] == "Unit-Error-High", "height"] = (
merged_df.loc[merged_df["height_cat"] == "Unit-Error-High", "height"] / 2.54
)
merged_df.loc[merged_df["weight_cat"] == "Unit-Error-Low", "weight"] = (
merged_df.loc[merged_df["weight_cat"] == "Unit-Error-Low", "weight"] * 2.2046
)
merged_df.loc[merged_df["weight_cat"] == "Unit-Error-High", "weight"] = (
merged_df.loc[merged_df["weight_cat"] == "Unit-Error-High", "weight"] / 2.2046
)
merged_df.loc[
merged_df["height_cat"] == "Unit-Error-Low", "postprocess_height_cat"
] = "Include-UL"
merged_df.loc[
merged_df["height_cat"] == "Unit-Error-High", "postprocess_height_cat"
] = "Include-UH"
merged_df.loc[
merged_df["weight_cat"] == "Unit-Error-Low", "postprocess_weight_cat"
] = "Include-UL"
merged_df.loc[
merged_df["weight_cat"] == "Unit-Error-High", "postprocess_weight_cat"
] = "Include-UH"
merged_df["bmi"] = merged_df["weight"] / ((merged_df["height"] / 100) ** 2)
return merged_df
| [
"pandas.read_csv",
"matplotlib.pyplot.bar",
"numpy.around",
"IPython.display.FileLinks",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"pandas.concat",
"pandas.to_numeric",
"numpy.repeat"
] | [((2892, 3067), 'pandas.read_csv', 'pd.read_csv', (['f"""growthviz-data/ext/{percentiles_file}"""'], {'dtype': "{'Agemos': float, 'P5': float, 'P50': float, 'P95': float, 'L': float, 'M':\n float, 'S': float, 'Sex': int}"}), "(f'growthviz-data/ext/{percentiles_file}', dtype={'Agemos':\n float, 'P5': float, 'P50': float, 'P95': float, 'L': float, 'M': float,\n 'S': float, 'Sex': int})\n", (2903, 3067), True, 'import pandas as pd\n'), ((6091, 6105), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6103, 6105), True, 'import matplotlib.pyplot as plt\n'), ((6125, 6185), 'matplotlib.pyplot.bar', 'plt.bar', (["obs_grp['cat']", "obs_grp['subjid']"], {'color': 'color_list'}), "(obs_grp['cat'], obs_grp['subjid'], color=color_list)\n", (6132, 6185), True, 'import matplotlib.pyplot as plt\n'), ((6305, 6340), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)', 'ha': '"""right"""'}), "(rotation=45, ha='right')\n", (6315, 6340), True, 'import matplotlib.pyplot as plt\n'), ((8009, 8042), 'numpy.around', 'np.around', (['clean_column_names.age'], {}), '(clean_column_names.age)\n', (8018, 8042), True, 'import numpy as np\n'), ((10238, 10260), 'pandas.concat', 'pd.concat', (['[obs, data]'], {}), '([obs, data])\n', (10247, 10260), True, 'import pandas as pd\n'), ((1247, 1290), 'numpy.repeat', 'np.repeat', (['pct.values', "pct['range']"], {'axis': '(0)'}), "(pct.values, pct['range'], axis=0)\n", (1256, 1290), True, 'import numpy as np\n'), ((1523, 1571), 'pandas.to_numeric', 'pd.to_numeric', (["dta['Number of examined persons']"], {}), "(dta['Number of examined persons'])\n", (1536, 1571), True, 'import pandas as pd\n'), ((10820, 10839), 'IPython.display.FileLinks', 'FileLinks', (['"""output"""'], {}), "('output')\n", (10829, 10839), False, 'from IPython.display import FileLinks\n')] |
import numpy as np
import torch
import torch.distributions as distributions
from scipy.stats import truncnorm
class AdaptiveBaseNet:
def __init__(self, layers, activation, device, torch_type):
self.device = device
self.torch_type = torch_type
self.layers = layers
self.num_layers = len(self.layers)
self.activation = {'tanh': torch.nn.Tanh(), 'relu': torch.nn.ReLU(), 'sigmoid': torch.nn.Sigmoid()}[activation]
self.input_dim = self.layers[0]
self.output_dim = self.layers[-1]
self.latent_dim = self.layers[-3]
self.base_dim = self.layers[-2]
self.latent_weights = []
self.latent_biases = []
for l in range(self.num_layers-3):
W = self._xavier_init(self.layers[l], self.layers[l+1])
b = torch.zeros([1,self.layers[l+1]], dtype=self.torch_type, device=self.device, requires_grad=True)
self.latent_weights.append(W)
self.latent_biases.append(b)
self.W_mu = self._xavier_init(self.latent_dim, self.base_dim)
self.b_mu = torch.zeros([1,self.base_dim], dtype=self.torch_type, device=self.device, requires_grad=True)
self.W_rho = torch.zeros([self.latent_dim, self.base_dim], dtype=self.torch_type, device=self.device, requires_grad=True)
self.W_std = torch.log(1 + torch.exp(self.W_rho))
self.b_rho = torch.zeros([1, self.base_dim], dtype=self.torch_type, device=self.device, requires_grad=True)
self.b_std = torch.log(1 + torch.exp(self.b_rho))
self.A = self._xavier_init(self.base_dim, self.output_dim)
self.A_b = torch.zeros([1,self.output_dim], dtype=self.torch_type, device=self.device, requires_grad=True)
self.normal = distributions.normal.Normal(
torch.tensor([0.0], dtype=self.torch_type, device=self.device),
torch.tensor([1.0], dtype=self.torch_type, device=self.device)
)
self.kld = self._eval_kld()
self.reg = self._eval_reg()
def _sample_from_posterior(self,):
epsi_W = torch.squeeze(self.normal.sample(self.W_mu.shape))
W_sample = self.W_mu + self.W_std*epsi_W
epsi_b = torch.squeeze(self.normal.sample(self.b_mu.shape), dim=2)
b_sample = self.b_mu + self.b_std*epsi_b
return W_sample, b_sample
def forward(self, X, sample=False):
H = X
for l in range(self.num_layers-3):
W = self.latent_weights[l]
b = self.latent_biases[l]
H = torch.add(torch.matmul(H, W), b)
# scale before the nonlinear-op
in_d = self.layers[l]
H = H/np.sqrt(in_d+1)
H = self.activation(H)
# project the latent base to base
if sample:
W_sample, b_sample = self._sample_from_posterior()
H = torch.add(torch.matmul(H, W_sample), b_sample)
else:
H = torch.add(torch.matmul(H, self.W_mu), self.b_mu)
#
base = H/np.sqrt(self.latent_dim+1)
Y = torch.add(torch.matmul(base, self.A), self.A_b)
Y = Y/np.sqrt(self.base_dim+1)
return Y, base
def forward_base_by_sample(self, X, W_sample, b_sample):
H = X
for l in range(self.num_layers-3):
W = self.latent_weights[l]
b = self.latent_biases[l]
H = torch.add(torch.matmul(H, W), b)
# scale before the nonlinear-op
in_d = self.layers[l]
H = H/np.sqrt(in_d+1)
H = self.activation(H)
#
H = torch.add(torch.matmul(H, W_sample), b_sample)
base = H/np.sqrt(self.latent_dim+1)
return base
def _eval_reg(self,):
L2_norm_list = []
for w in self.latent_weights:
L2_norm_list.append(torch.sum(torch.square(w)))
#
for b in self.latent_biases:
L2_norm_list.append(torch.sum(torch.square(b)))
#
L2_norm_list.append(torch.sum(torch.square(self.A)))
L2_norm_list.append(torch.sum(torch.square(self.A_b)))
return sum(L2_norm_list)
def _eval_kld(self,):
kld_W = torch.sum(-torch.log(self.W_std) + 0.5*(torch.square(self.W_std) + torch.square(self.W_mu)) - 0.5)
kld_b = torch.sum(-torch.log(self.b_std) + 0.5*(torch.square(self.b_std) + torch.square(self.b_mu)) - 0.5)
return kld_W+kld_b
def _xavier_init(self, in_dim, out_dim):
xavier_stddev = np.sqrt(2.0/(in_dim + out_dim))
W = torch.normal(size=(in_dim, out_dim), mean=0.0, std=xavier_stddev, requires_grad=True, device=self.device, dtype=self.torch_type)
return W
def _msra_init(self, in_dim, out_dim):
xavier_stddev = np.sqrt(2.0/(in_dim))
W = torch.normal(size=(in_dim, out_dim), mean=0.0, std=xavier_stddev, requires_grad=True, device=self.device, dtype=self.torch_type)
return W
def parameters(self,):
params = {}
params['latent_weights'] = self.latent_weights
params['latent_biases'] = self.latent_biases
params['W_mu'] = self.W_mu
params['W_rho'] = self.W_rho
params['b_mu'] = self.b_mu
params['b_rho'] = self.b_rho
params['A'] = self.A
params['A_b'] = self.A_b
return params
class DropoutBaseNet:
def __init__(self, layers, activation, dropout=0.2):
self.layers = layers
self.num_layers = len(self.layers)
self.activation = {'tanh': torch.nn.Tanh(), 'relu': torch.nn.ReLU(), 'sigmoid': torch.nn.Sigmoid()}[activation]
self.dropout = dropout
self.input_dim = self.layers[0]
self.output_dim = self.layers[-1]
self.latent_dim = self.layers[-3]
self.base_dim = self.layers[-2]
self.base_weights = []
self.base_biases = []
for l in range(self.num_layers-2):
W = self._xavier_init(self.layers[l], self.layers[l+1])
b = torch.zeros([1,self.layers[l+1]], dtype=self.torch_type, device=self.device, requires_grad=True)
self.base_weights.append(W)
self.base_biases.append(b)
self.A = self._xavier_init(self.base_dim, self.output_dim)
self.A_b = torch.zeros([1,self.output_dim], dtype=self.torch_type, device=self.device, requires_grad=True)
self.reg = self._eval_reg()
def forward(self, X, sample=False):
H = torch.nn.functional.dropout(X, p=self.dropout, training=sample)
for l in range(self.num_layers-2):
W = self.base_weights[l]
b = self.base_biases[l]
H = torch.add(torch.matmul(H, W), b)
# scale before the nonlinear-op
in_d = self.layers[l]
H = H/np.sqrt(in_d+1)
H = self.activation(H)
H = torch.nn.functional.dropout(H, p=self.dropout, training=sample)
base = H/np.sqrt(self.latent_dim+1)
Y = torch.add(torch.matmul(base, self.A), self.A_b)
Y = Y/np.sqrt(self.base_dim+1)
return Y, base
def _eval_reg(self,):
L2_norm_list = []
for w in self.base_weights:
# print(w.shape)
L2_norm_list.append(torch.sum(torch.square(w)))
#
L2_norm_list.append(torch.sum(torch.square(self.A)))
return sum(L2_norm_list)
def _xavier_init(self, in_dim, out_dim):
xavier_stddev = np.sqrt(2.0/(in_dim + out_dim))
W = torch.normal(size=(in_dim, out_dim), mean=0.0, std=xavier_stddev, requires_grad=True, device=self.device, dtype=self.torch_type)
return W
def _msra_init(self, in_dim, out_dim):
xavier_stddev = np.sqrt(2.0/(in_dim))
W = torch.normal(size=(in_dim, out_dim), mean=0.0, std=xavier_stddev, requires_grad=True, device=self.device, dtype=self.torch_type)
return W
def parameters(self,):
params = {}
params['base_weights'] = self.base_weights
params['base_biases'] = self.base_biases
params['A'] = self.A
params['A_b'] = self.A_b
return params
| [
"torch.nn.ReLU",
"torch.nn.Tanh",
"torch.square",
"torch.nn.functional.dropout",
"torch.nn.Sigmoid",
"torch.normal",
"torch.exp",
"torch.zeros",
"torch.matmul",
"torch.log",
"torch.tensor",
"numpy.sqrt"
] | [((1142, 1240), 'torch.zeros', 'torch.zeros', (['[1, self.base_dim]'], {'dtype': 'self.torch_type', 'device': 'self.device', 'requires_grad': '(True)'}), '([1, self.base_dim], dtype=self.torch_type, device=self.device,\n requires_grad=True)\n', (1153, 1240), False, 'import torch\n'), ((1266, 1379), 'torch.zeros', 'torch.zeros', (['[self.latent_dim, self.base_dim]'], {'dtype': 'self.torch_type', 'device': 'self.device', 'requires_grad': '(True)'}), '([self.latent_dim, self.base_dim], dtype=self.torch_type, device\n =self.device, requires_grad=True)\n', (1277, 1379), False, 'import torch\n'), ((1463, 1561), 'torch.zeros', 'torch.zeros', (['[1, self.base_dim]'], {'dtype': 'self.torch_type', 'device': 'self.device', 'requires_grad': '(True)'}), '([1, self.base_dim], dtype=self.torch_type, device=self.device,\n requires_grad=True)\n', (1474, 1561), False, 'import torch\n'), ((1711, 1811), 'torch.zeros', 'torch.zeros', (['[1, self.output_dim]'], {'dtype': 'self.torch_type', 'device': 'self.device', 'requires_grad': '(True)'}), '([1, self.output_dim], dtype=self.torch_type, device=self.device,\n requires_grad=True)\n', (1722, 1811), False, 'import torch\n'), ((4719, 4752), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (in_dim + out_dim))'], {}), '(2.0 / (in_dim + out_dim))\n', (4726, 4752), True, 'import numpy as np\n'), ((4763, 4895), 'torch.normal', 'torch.normal', ([], {'size': '(in_dim, out_dim)', 'mean': '(0.0)', 'std': 'xavier_stddev', 'requires_grad': '(True)', 'device': 'self.device', 'dtype': 'self.torch_type'}), '(size=(in_dim, out_dim), mean=0.0, std=xavier_stddev,\n requires_grad=True, device=self.device, dtype=self.torch_type)\n', (4775, 4895), False, 'import torch\n'), ((4981, 5002), 'numpy.sqrt', 'np.sqrt', (['(2.0 / in_dim)'], {}), '(2.0 / in_dim)\n', (4988, 5002), True, 'import numpy as np\n'), ((5015, 5147), 'torch.normal', 'torch.normal', ([], {'size': '(in_dim, out_dim)', 'mean': '(0.0)', 'std': 'xavier_stddev', 'requires_grad': '(True)', 'device': 'self.device', 'dtype': 'self.torch_type'}), '(size=(in_dim, out_dim), mean=0.0, std=xavier_stddev,\n requires_grad=True, device=self.device, dtype=self.torch_type)\n', (5027, 5147), False, 'import torch\n'), ((6543, 6643), 'torch.zeros', 'torch.zeros', (['[1, self.output_dim]'], {'dtype': 'self.torch_type', 'device': 'self.device', 'requires_grad': '(True)'}), '([1, self.output_dim], dtype=self.torch_type, device=self.device,\n requires_grad=True)\n', (6554, 6643), False, 'import torch\n'), ((6751, 6814), 'torch.nn.functional.dropout', 'torch.nn.functional.dropout', (['X'], {'p': 'self.dropout', 'training': 'sample'}), '(X, p=self.dropout, training=sample)\n', (6778, 6814), False, 'import torch\n'), ((7784, 7817), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (in_dim + out_dim))'], {}), '(2.0 / (in_dim + out_dim))\n', (7791, 7817), True, 'import numpy as np\n'), ((7828, 7960), 'torch.normal', 'torch.normal', ([], {'size': '(in_dim, out_dim)', 'mean': '(0.0)', 'std': 'xavier_stddev', 'requires_grad': '(True)', 'device': 'self.device', 'dtype': 'self.torch_type'}), '(size=(in_dim, out_dim), mean=0.0, std=xavier_stddev,\n requires_grad=True, device=self.device, dtype=self.torch_type)\n', (7840, 7960), False, 'import torch\n'), ((8046, 8067), 'numpy.sqrt', 'np.sqrt', (['(2.0 / in_dim)'], {}), '(2.0 / in_dim)\n', (8053, 8067), True, 'import numpy as np\n'), ((8080, 8212), 'torch.normal', 'torch.normal', ([], {'size': '(in_dim, out_dim)', 'mean': '(0.0)', 'std': 'xavier_stddev', 'requires_grad': '(True)', 'device': 'self.device', 'dtype': 'self.torch_type'}), '(size=(in_dim, out_dim), mean=0.0, std=xavier_stddev,\n requires_grad=True, device=self.device, dtype=self.torch_type)\n', (8092, 8212), False, 'import torch\n'), ((863, 967), 'torch.zeros', 'torch.zeros', (['[1, self.layers[l + 1]]'], {'dtype': 'self.torch_type', 'device': 'self.device', 'requires_grad': '(True)'}), '([1, self.layers[l + 1]], dtype=self.torch_type, device=self.\n device, requires_grad=True)\n', (874, 967), False, 'import torch\n'), ((1879, 1941), 'torch.tensor', 'torch.tensor', (['[0.0]'], {'dtype': 'self.torch_type', 'device': 'self.device'}), '([0.0], dtype=self.torch_type, device=self.device)\n', (1891, 1941), False, 'import torch\n'), ((1956, 2018), 'torch.tensor', 'torch.tensor', (['[1.0]'], {'dtype': 'self.torch_type', 'device': 'self.device'}), '([1.0], dtype=self.torch_type, device=self.device)\n', (1968, 2018), False, 'import torch\n'), ((3158, 3186), 'numpy.sqrt', 'np.sqrt', (['(self.latent_dim + 1)'], {}), '(self.latent_dim + 1)\n', (3165, 3186), True, 'import numpy as np\n'), ((3216, 3242), 'torch.matmul', 'torch.matmul', (['base', 'self.A'], {}), '(base, self.A)\n', (3228, 3242), False, 'import torch\n'), ((3268, 3294), 'numpy.sqrt', 'np.sqrt', (['(self.base_dim + 1)'], {}), '(self.base_dim + 1)\n', (3275, 3294), True, 'import numpy as np\n'), ((3777, 3802), 'torch.matmul', 'torch.matmul', (['H', 'W_sample'], {}), '(H, W_sample)\n', (3789, 3802), False, 'import torch\n'), ((3840, 3868), 'numpy.sqrt', 'np.sqrt', (['(self.latent_dim + 1)'], {}), '(self.latent_dim + 1)\n', (3847, 3868), True, 'import numpy as np\n'), ((6263, 6367), 'torch.zeros', 'torch.zeros', (['[1, self.layers[l + 1]]'], {'dtype': 'self.torch_type', 'device': 'self.device', 'requires_grad': '(True)'}), '([1, self.layers[l + 1]], dtype=self.torch_type, device=self.\n device, requires_grad=True)\n', (6274, 6367), False, 'import torch\n'), ((7156, 7219), 'torch.nn.functional.dropout', 'torch.nn.functional.dropout', (['H'], {'p': 'self.dropout', 'training': 'sample'}), '(H, p=self.dropout, training=sample)\n', (7183, 7219), False, 'import torch\n'), ((7246, 7274), 'numpy.sqrt', 'np.sqrt', (['(self.latent_dim + 1)'], {}), '(self.latent_dim + 1)\n', (7253, 7274), True, 'import numpy as np\n'), ((7304, 7330), 'torch.matmul', 'torch.matmul', (['base', 'self.A'], {}), '(base, self.A)\n', (7316, 7330), False, 'import torch\n'), ((7356, 7382), 'numpy.sqrt', 'np.sqrt', (['(self.base_dim + 1)'], {}), '(self.base_dim + 1)\n', (7363, 7382), True, 'import numpy as np\n'), ((395, 410), 'torch.nn.Tanh', 'torch.nn.Tanh', ([], {}), '()\n', (408, 410), False, 'import torch\n'), ((420, 435), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (433, 435), False, 'import torch\n'), ((448, 466), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (464, 466), False, 'import torch\n'), ((1410, 1431), 'torch.exp', 'torch.exp', (['self.W_rho'], {}), '(self.W_rho)\n', (1419, 1431), False, 'import torch\n'), ((1593, 1614), 'torch.exp', 'torch.exp', (['self.b_rho'], {}), '(self.b_rho)\n', (1602, 1614), False, 'import torch\n'), ((2663, 2681), 'torch.matmul', 'torch.matmul', (['H', 'W'], {}), '(H, W)\n', (2675, 2681), False, 'import torch\n'), ((2795, 2812), 'numpy.sqrt', 'np.sqrt', (['(in_d + 1)'], {}), '(in_d + 1)\n', (2802, 2812), True, 'import numpy as np\n'), ((3006, 3031), 'torch.matmul', 'torch.matmul', (['H', 'W_sample'], {}), '(H, W_sample)\n', (3018, 3031), False, 'import torch\n'), ((3083, 3109), 'torch.matmul', 'torch.matmul', (['H', 'self.W_mu'], {}), '(H, self.W_mu)\n', (3095, 3109), False, 'import torch\n'), ((3552, 3570), 'torch.matmul', 'torch.matmul', (['H', 'W'], {}), '(H, W)\n', (3564, 3570), False, 'import torch\n'), ((3684, 3701), 'numpy.sqrt', 'np.sqrt', (['(in_d + 1)'], {}), '(in_d + 1)\n', (3691, 3701), True, 'import numpy as np\n'), ((4211, 4231), 'torch.square', 'torch.square', (['self.A'], {}), '(self.A)\n', (4223, 4231), False, 'import torch\n'), ((4272, 4294), 'torch.square', 'torch.square', (['self.A_b'], {}), '(self.A_b)\n', (4284, 4294), False, 'import torch\n'), ((5768, 5783), 'torch.nn.Tanh', 'torch.nn.Tanh', ([], {}), '()\n', (5781, 5783), False, 'import torch\n'), ((5793, 5808), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (5806, 5808), False, 'import torch\n'), ((5821, 5839), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (5837, 5839), False, 'import torch\n'), ((6957, 6975), 'torch.matmul', 'torch.matmul', (['H', 'W'], {}), '(H, W)\n', (6969, 6975), False, 'import torch\n'), ((7089, 7106), 'numpy.sqrt', 'np.sqrt', (['(in_d + 1)'], {}), '(in_d + 1)\n', (7096, 7106), True, 'import numpy as np\n'), ((7649, 7669), 'torch.square', 'torch.square', (['self.A'], {}), '(self.A)\n', (7661, 7669), False, 'import torch\n'), ((4038, 4053), 'torch.square', 'torch.square', (['w'], {}), '(w)\n', (4050, 4053), False, 'import torch\n'), ((4145, 4160), 'torch.square', 'torch.square', (['b'], {}), '(b)\n', (4157, 4160), False, 'import torch\n'), ((7582, 7597), 'torch.square', 'torch.square', (['w'], {}), '(w)\n', (7594, 7597), False, 'import torch\n'), ((4406, 4427), 'torch.log', 'torch.log', (['self.W_std'], {}), '(self.W_std)\n', (4415, 4427), False, 'import torch\n'), ((4521, 4542), 'torch.log', 'torch.log', (['self.b_std'], {}), '(self.b_std)\n', (4530, 4542), False, 'import torch\n'), ((4435, 4459), 'torch.square', 'torch.square', (['self.W_std'], {}), '(self.W_std)\n', (4447, 4459), False, 'import torch\n'), ((4462, 4485), 'torch.square', 'torch.square', (['self.W_mu'], {}), '(self.W_mu)\n', (4474, 4485), False, 'import torch\n'), ((4550, 4574), 'torch.square', 'torch.square', (['self.b_std'], {}), '(self.b_std)\n', (4562, 4574), False, 'import torch\n'), ((4577, 4600), 'torch.square', 'torch.square', (['self.b_mu'], {}), '(self.b_mu)\n', (4589, 4600), False, 'import torch\n')] |
'''
Python functions for creating and analyzing EEG waveforms
How to use:
Each time a new epoch is needed, call generate_data(args) to create an epoch. Then call
generate_spectra() to create fft and power spectrum. All this data is stored internally
in this script.
Call get_data(), get_fft(), or get_power_spec() to get the graphs for the raw data, fft,
or power spectrum, respectively. All are in a list of [x, y] lists.
Call get_TBR_power(), get_TBR_amp(), or get_TBR_diff() to get the desired TBR from the
most recently created epoch
'''
import numpy as np
import js
# Global data variable to hold the most recent epoch
curr_data = [] # raw data
curr_data_sr = 0 # sampling rate
curr_data_ed = 0 # epoch duration
curr_fft = [] # fft spectrum
curr_powerspec = [] # power spectrum
# Creates a synthetic brain wave and stores it in global variable. Adapted from Python IV workshop
# params:
# args.epochDuration: length of epoch in seconds
# args.samplingRate: number of samples per second
# args.tFreq: frequency of theta wave
# args.tAmp: amplitude of theta wave
# args.tNoise: amount of noise in theta wave
# args.bFreq: frequency of beta wave
# args.bAmp: amplitude of beta wave
# args.bNoise: amount of noise in beta wave
def generate_data():
global curr_data, curr_data_sr, curr_data_ed
args = js.window.pyArgs
times = np.linspace(0,args.epochDuration,args.samplingRate) # One second at 250Hz
theta = generateNoisyWave(times, args.tFreq, args.tAmp, args.tNoise) # (time, Freq, Amp, Noise)
beta = generateNoisyWave(times, args.bFreq, args.bAmp, args.bNoise)
xy_list = []
for x in range(0, len(times)):
xy_list.append([times[x], theta[x] + beta[x]])
curr_data = xy_list
curr_data_sr = args.samplingRate
curr_data_ed = args.epochDuration
# Creates an fft and power spectra from the most recently generated epoch and stores them in global variables
def generate_spectra():
global curr_data, curr_fft, curr_powerspec, curr_data_sr, curr_data_ed
curr_fft = fft(curr_data, curr_data_sr, curr_data_ed)
curr_powerspec = power_spec(curr_fft)
# Returns most recent raw data to JS
# returns list of [time, voltage] pairs
def get_data():
global curr_data
print(curr_data)
return curr_data
# fft function for returning result to Javascript
# returns list of [frequency, amplitude] pairs corresponding to fourier spectrum
def get_fft():
global curr_fft
return curr_fft
# Power spectrum function for returning result to Javascript
# returns list of [frequency, amplitude] pairs corresponding to fourier spectrum
def get_power_spec():
global curr_powerspec
return curr_powerspec
# Calculates the theta-beta ratio (TBR) for the most recently stored epoch and returns to Javascript
# Calculation is mean theta power divided by mean beta power
# returns the TBR (power)
def get_TBR_power():
global curr_powerspec
theta_sum = 0
theta_count = 0
beta_sum = 0
beta_count = 0
for x in curr_powerspec:
if x[0] <=7 and x[0] > 3.5:
theta_sum += x[1]
theta_count += 1
elif x[0] <= 20 and x[0] > 12:
beta_sum += x[1]
beta_count += 1
return ( (theta_sum / theta_count) / (beta_sum / beta_count) )
# Calculates the theta-beta ratio (TBR) for the most recently stored epoch and returns to Javascript
# Calculation is mean theta amplitude divided by mean beta amplitude
# returns the TBR (amplitude)
def get_TBR_amp():
global curr_fft
theta_sum = 0
theta_count = 0
beta_sum = 0
beta_count = 0
for x in curr_fft:
if x[0] <=7 and x[0] > 3.5:
theta_sum += x[1]
theta_count += 1
elif x[0] <= 20 and x[0] > 12:
beta_sum += x[1]
beta_count += 1
return ( (theta_sum / theta_count) / (beta_sum / beta_count) )
# Calculates the theta-beta ratio (TBR) for the most recently stored epoch and returns to Javascript
# Calculation is normalized difference between theta power and beta powr
# (mean theta power - mean beta power) / (mean theta power + mean beta power)
# returns the TBR (difference)
def get_TBR_diff():
global curr_powerspec
theta_sum = 0
theta_count = 0
beta_sum = 0
beta_count = 0
for x in curr_powerspec:
if x[0] <=7 and x[0] > 3.5:
theta_sum += x[1]
theta_count += 1
elif x[0] <= 20 and x[0] > 12:
beta_sum += x[1]
beta_count += 1
mean_theta = theta_sum / theta_count
mean_beta = beta_sum / beta_count
return (mean_theta - mean_beta) / (mean_theta + mean_beta)
# Creates a sine wave with added noise. Adapted from Python III workshop
# params:
# times: array of x-values for creating wave
# freq: frquency of wave
# amp: amplitude of wave
# noise: amount of noise introduced; higher means more noise
# returns: array of y-values corresponding to the specified sine wave
def generateNoisyWave(times, freq, amp, noise):
# This simplifies code later, this basically just creates our noise for us
if(not isinstance(times, float)):
noiseArray = noise * np.random.randn(len(times))
else:
noiseArray = noise * np.random.randn(1)
sineWave = amp * np.sin(freq * 2 * np.pi * times)
return sineWave + noiseArray
# Creates a power spectrum from an epoch of EEG data.
# params:
# fft: list of [time, voltage] pairs corresponding to brain wave
# returns list of [frequency, power] pairs corresponding to fourier spectrum
def power_spec(fft):
powerspec = []
for x in fft:
powerspec.append(x[0], x[1]**2)
return powerspec
# Performs an FFT on an epoch of EEG data and stores it in the global variable
# params:
# data: list of [time, voltage] pairs corresponding to brain wave
# samp_rate: samples per second
# epoch_length: length of epoch in seconds
def fft(data, samp_rate, epoch_length):
global curr_fft
volts = [x[1] for x in data]
n = len(volts)
Y = np.fft.fft(volts)/n # fft computing and normalization
Y = Y[:100 * epoch_length]
k = np.arange(n)
T = n*(epoch_length/samp_rate)
frq = k/T # two sides frequency range
frq = frq[:100 * epoch_length] # one side frequency range
amp = np.abs(Y)
xy_list = []
for x in range(0, len(frq)):
xy_list.append([frq[x], amp[x]])
return xy_list
| [
"numpy.abs",
"numpy.random.randn",
"numpy.fft.fft",
"numpy.sin",
"numpy.arange",
"numpy.linspace"
] | [((1407, 1460), 'numpy.linspace', 'np.linspace', (['(0)', 'args.epochDuration', 'args.samplingRate'], {}), '(0, args.epochDuration, args.samplingRate)\n', (1418, 1460), True, 'import numpy as np\n'), ((6224, 6236), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (6233, 6236), True, 'import numpy as np\n'), ((6386, 6395), 'numpy.abs', 'np.abs', (['Y'], {}), '(Y)\n', (6392, 6395), True, 'import numpy as np\n'), ((5359, 5391), 'numpy.sin', 'np.sin', (['(freq * 2 * np.pi * times)'], {}), '(freq * 2 * np.pi * times)\n', (5365, 5391), True, 'import numpy as np\n'), ((6131, 6148), 'numpy.fft.fft', 'np.fft.fft', (['volts'], {}), '(volts)\n', (6141, 6148), True, 'import numpy as np\n'), ((5314, 5332), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (5329, 5332), True, 'import numpy as np\n')] |
"""Filtering routines."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import numpy as np
from scipy import signal
from kwiklib.utils.six.moves import range
# -----------------------------------------------------------------------------
# Signal processing functions
# -----------------------------------------------------------------------------
def bandpass_filter(**prm):
"""Bandpass filtering."""
rate = prm['sample_rate']
order = prm['filter_butter_order']
low = prm['filter_low']
high = prm['filter_high']
return signal.butter(order,
(low/(rate/2.), high/(rate/2.)),
'pass')
def apply_filter(x, filter=None):
if x.shape[0] == 0:
return x
b, a = filter
try:
out_arr = signal.filtfilt(b, a, x, axis=0)
except TypeError:
out_arr = np.zeros_like(x)
for i_ch in range(x.shape[1]):
out_arr[:, i_ch] = signal.filtfilt(b, a, x[:, i_ch])
return out_arr
def decimate(x):
q = 16
n = 50
axis = 0
b = signal.firwin(n + 1, 1. / q, window='hamming')
a = 1.
y = signal.lfilter(b, a, x, axis=axis)
sl = [slice(None)] * y.ndim
sl[axis] = slice(n // 2, None, q)
return y[sl]
# -----------------------------------------------------------------------------
# Whitening
# -----------------------------------------------------------------------------
"""
* Get the first chunk of data
* Detect spikes the usual way
* Compute mean on each channel on non-spike data
* For every pair of channels:
* estimate the covariance on non-spike data
* Get the covariance matrix
* Get its square root C' (sqrtm)
* Get u*C' + (1-u)*s*Id, where u is a parameter, s the std of non-spike data
across all channels
* Option to save or not whitened data in FIL
* All spike detection is done on whitened data
"""
def get_whitening_matrix(x):
C = np.cov(x, rowvar=0)
# TODO
def whiten(x, matrix=None):
if matrix is None:
matrix = get_whitening_matrix(x)
# TODO
| [
"numpy.zeros_like",
"scipy.signal.filtfilt",
"kwiklib.utils.six.moves.range",
"scipy.signal.lfilter",
"scipy.signal.firwin",
"numpy.cov",
"scipy.signal.butter"
] | [((671, 742), 'scipy.signal.butter', 'signal.butter', (['order', '(low / (rate / 2.0), high / (rate / 2.0))', '"""pass"""'], {}), "(order, (low / (rate / 2.0), high / (rate / 2.0)), 'pass')\n", (684, 742), False, 'from scipy import signal\n'), ((1179, 1226), 'scipy.signal.firwin', 'signal.firwin', (['(n + 1)', '(1.0 / q)'], {'window': '"""hamming"""'}), "(n + 1, 1.0 / q, window='hamming')\n", (1192, 1226), False, 'from scipy import signal\n'), ((1246, 1280), 'scipy.signal.lfilter', 'signal.lfilter', (['b', 'a', 'x'], {'axis': 'axis'}), '(b, a, x, axis=axis)\n', (1260, 1280), False, 'from scipy import signal\n'), ((2051, 2070), 'numpy.cov', 'np.cov', (['x'], {'rowvar': '(0)'}), '(x, rowvar=0)\n', (2057, 2070), True, 'import numpy as np\n'), ((904, 936), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'x'], {'axis': '(0)'}), '(b, a, x, axis=0)\n', (919, 936), False, 'from scipy import signal\n'), ((977, 993), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (990, 993), True, 'import numpy as np\n'), ((1014, 1031), 'kwiklib.utils.six.moves.range', 'range', (['x.shape[1]'], {}), '(x.shape[1])\n', (1019, 1031), False, 'from kwiklib.utils.six.moves import range\n'), ((1064, 1097), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'x[:, i_ch]'], {}), '(b, a, x[:, i_ch])\n', (1079, 1097), False, 'from scipy import signal\n')] |
import pandas as pd
from pathlib import Path
import pickle
import numpy as np
#root_path_setup0 = Path(r"Z:\arminbahl\Behavior Setup 0\free_swimming_4fish_data\dot_motion_coherence8_2")
#root_path_setup1 = Path(r"Z:\arminbahl\Behavior Setup 1\free_swimming_4fish_data\dot_motion_coherence8_2")
root_path_setup0 = Path("/n/home10/abahl/engert_lab_storage/arminbahl/Behavior Setup 0/free_swimming_4fish_data/dot_motion_coherence8_2")
root_path_setup1 = Path("/n/home10/abahl/engert_lab_storage/arminbahl/Behavior Setup 1/free_swimming_4fish_data/dot_motion_coherence8_2")
fishes_setup0 = [
"2017_12_25_fish001",
"2017_12_25_fish002",
"2017_12_25_fish003",
"2017_12_25_fish004",
"2017_12_25_fish005",
"2017_12_25_fish006",
"2017_12_25_fish007",
"2017_12_25_fish008",
"2017_12_29_fish013",
"2017_12_29_fish014",
"2017_12_29_fish015",
"2017_12_29_fish016",
"2017_12_29_fish017",
"2017_12_29_fish018",
"2017_12_29_fish019",
"2017_12_29_fish020",
"2017_12_29_fish021",
"2017_12_29_fish022",
"2017_12_29_fish023",
"2017_12_29_fish024",
"2017_12_30_fish025",
"2017_12_30_fish026",
"2017_12_30_fish027",
"2017_12_30_fish028",
"2017_12_30_fish029",
"2017_12_30_fish030",
"2017_12_30_fish031",
"2017_12_30_fish032",
]
fishes_setup1 = [
"2017_12_25_fish001",
"2017_12_25_fish002",
"2017_12_25_fish003",
"2017_12_25_fish004",
"2017_12_25_fish005",
"2017_12_25_fish006",
"2017_12_25_fish007",
"2017_12_25_fish008",
"2017_12_29_fish009",
"2017_12_29_fish010",
"2017_12_29_fish011",
"2017_12_29_fish012",
"2017_12_29_fish013",
"2017_12_29_fish014",
"2017_12_29_fish015",
"2017_12_29_fish016",
"2017_12_29_fish017",
"2017_12_29_fish018",
"2017_12_29_fish019",
"2017_12_29_fish020",
"2017_12_30_fish021",
"2017_12_30_fish022",
"2017_12_30_fish023",
"2017_12_30_fish024",
"2017_12_30_fish025",
"2017_12_30_fish026",
"2017_12_30_fish027",
"2017_12_30_fish028",
"2017_12_30_fish029",
"2017_12_30_fish030",
"2017_12_30_fish031",
"2017_12_30_fish032",
]
all_data = []
numtrials = 45
for setupID in [0, 1]:
if setupID == 0:
fishes = fishes_setup0
root_path = root_path_setup0
else:
fishes = fishes_setup1
root_path = root_path_setup1
for i in range(len(fishes)):
fish_ID = fishes[i]
genotype = "wt"
for trial in range(0, numtrials):
print(fish_ID, genotype, trial)
try:
f = open(root_path / fish_ID / "raw_data" / f"trial{trial:03d}.dat", 'rb')
data = pickle.load(f)
f.close()
except:
break
for stim in range(8):
bout_times = data[f"bouts_start_stimulus_{stim:03d}"]["timestamp"]
bout_xs = data[f"bouts_start_stimulus_{stim:03d}"]["fish_position_x"]
bout_ys = data[f"bouts_start_stimulus_{stim:03d}"]["fish_position_y"]
bout_start_fish_accumulated_orientation = data[f"bouts_start_stimulus_{stim:03d}"]["fish_accumulated_orientation"]
bout_end_fish_accumulated_orientation = data[f"bouts_end_stimulus_{stim:03d}"]["fish_accumulated_orientation"]
heading_angle_changes = bout_end_fish_accumulated_orientation - bout_start_fish_accumulated_orientation
# Turn responses to left-ward motion the after way around
if stim in [0, 1, 2, 3]:
heading_angle_changes = -heading_angle_changes
for i in range(1, len(bout_times)):
all_data.append([fish_ID,
genotype,
trial,
stim % 4,
bout_times[i],
bout_xs[i],
bout_ys[i],
bout_times[i] - bout_times[i - 1],
heading_angle_changes[i],
np.sign(heading_angle_changes[i]) == np.sign(heading_angle_changes[i - 1])])
df = pd.DataFrame(all_data, columns=["fish_ID",
"genotype",
"trial",
"stim",
"bout_time",
"bout_x",
"bout_y",
"inter_bout_interval",
"heading_angle_change",
"same_as_previous"]).astype(dtype={"trial": "int64",
"stim": "int64",
"same_as_previous": "bool"}, copy=False)
df.set_index(['fish_ID', "genotype", 'trial', 'stim'], inplace=True)
df.sort_index(inplace=True)
df.to_hdf(root_path_setup0 / "all_data.h5", key="all_bouts", complevel=9)
| [
"pandas.DataFrame",
"pathlib.Path",
"pickle.load",
"numpy.sign"
] | [((315, 443), 'pathlib.Path', 'Path', (['"""/n/home10/abahl/engert_lab_storage/arminbahl/Behavior Setup 0/free_swimming_4fish_data/dot_motion_coherence8_2"""'], {}), "(\n '/n/home10/abahl/engert_lab_storage/arminbahl/Behavior Setup 0/free_swimming_4fish_data/dot_motion_coherence8_2'\n )\n", (319, 443), False, 'from pathlib import Path\n'), ((453, 581), 'pathlib.Path', 'Path', (['"""/n/home10/abahl/engert_lab_storage/arminbahl/Behavior Setup 1/free_swimming_4fish_data/dot_motion_coherence8_2"""'], {}), "(\n '/n/home10/abahl/engert_lab_storage/arminbahl/Behavior Setup 1/free_swimming_4fish_data/dot_motion_coherence8_2'\n )\n", (457, 581), False, 'from pathlib import Path\n'), ((4292, 4472), 'pandas.DataFrame', 'pd.DataFrame', (['all_data'], {'columns': "['fish_ID', 'genotype', 'trial', 'stim', 'bout_time', 'bout_x', 'bout_y',\n 'inter_bout_interval', 'heading_angle_change', 'same_as_previous']"}), "(all_data, columns=['fish_ID', 'genotype', 'trial', 'stim',\n 'bout_time', 'bout_x', 'bout_y', 'inter_bout_interval',\n 'heading_angle_change', 'same_as_previous'])\n", (4304, 4472), True, 'import pandas as pd\n'), ((2714, 2728), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2725, 2728), False, 'import pickle\n'), ((4209, 4242), 'numpy.sign', 'np.sign', (['heading_angle_changes[i]'], {}), '(heading_angle_changes[i])\n', (4216, 4242), True, 'import numpy as np\n'), ((4246, 4283), 'numpy.sign', 'np.sign', (['heading_angle_changes[i - 1]'], {}), '(heading_angle_changes[i - 1])\n', (4253, 4283), True, 'import numpy as np\n')] |
import pyclesperanto_prototype as cle
import numpy as np
def test_subtract_image_from_scalar():
test1 = cle.push(np.asarray([
[0, 0],
[1, 1],
[2, 2]
]))
reference = cle.push(np.asarray([
[5, 5],
[4, 4],
[3, 3]
]))
result = cle.create(test1)
cle.subtract_image_from_scalar(test1, result, 5)
a = cle.pull(result)
b = cle.pull(reference)
print(a)
assert (np.array_equal(a, b))
| [
"numpy.array_equal",
"numpy.asarray",
"pyclesperanto_prototype.pull",
"pyclesperanto_prototype.subtract_image_from_scalar",
"pyclesperanto_prototype.create"
] | [((295, 312), 'pyclesperanto_prototype.create', 'cle.create', (['test1'], {}), '(test1)\n', (305, 312), True, 'import pyclesperanto_prototype as cle\n'), ((317, 365), 'pyclesperanto_prototype.subtract_image_from_scalar', 'cle.subtract_image_from_scalar', (['test1', 'result', '(5)'], {}), '(test1, result, 5)\n', (347, 365), True, 'import pyclesperanto_prototype as cle\n'), ((375, 391), 'pyclesperanto_prototype.pull', 'cle.pull', (['result'], {}), '(result)\n', (383, 391), True, 'import pyclesperanto_prototype as cle\n'), ((400, 419), 'pyclesperanto_prototype.pull', 'cle.pull', (['reference'], {}), '(reference)\n', (408, 419), True, 'import pyclesperanto_prototype as cle\n'), ((447, 467), 'numpy.array_equal', 'np.array_equal', (['a', 'b'], {}), '(a, b)\n', (461, 467), True, 'import numpy as np\n'), ((119, 155), 'numpy.asarray', 'np.asarray', (['[[0, 0], [1, 1], [2, 2]]'], {}), '([[0, 0], [1, 1], [2, 2]])\n', (129, 155), True, 'import numpy as np\n'), ((213, 249), 'numpy.asarray', 'np.asarray', (['[[5, 5], [4, 4], [3, 3]]'], {}), '([[5, 5], [4, 4], [3, 3]])\n', (223, 249), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Creates a Torch Dataset and Dataloader objects with custom tokenizer.
Create a simple data set and loader to work with PyTorch Dataset and Dataloader class. Wanted to use these instead of
the torch.text alternatives to keep things simple. In addition, using Dataloader in combination with DDP will hopefully
be more efficient with parallel processing and GPUs (yet to test)
"""
import collections
from itertools import product
import logging
import numpy as np
from torch.utils.data import DataLoader
import torch
# Helper Functions
class SeqDataset(torch.utils.data.Dataset):
"""
A class to represent a dataset to store sequence data.
...
Attributes
----------
file_name: str
Name of the file that contains one sequence per line
data: [str]
List of seqeunces read as lines from the file
transform: str
Name of the function(s) used to transform the data
context_size: int
The number of kmers or words to consider on either side of the target kmer
Methods
-------
__len__():
Prints the number of lines or seqeunces in the data
__getitem__():
Gets the next seqeunce in the dataset and applies transformtaions on the data
"""
def __init__(self, file_name, kmer_size = 4, context_size=3, num_noise_words=5, transform=None):
"""Constructs the dataset of sequences
that should be passed to the negative sampling loss.
Parameters
----------
file_name: str
Name of the file that contains one sequence per line
kmer_size: int
The size of the kmers to extract from the sequence
context_size: int
The number of kmers or words to consider on either side of the target kmer
num_noise_words: int
The number of noise words to use for negative sampling
transform: [torchvision.Transform]
List of mames of the transformer function(s) used to transform the data
"""
self.file_name = file_name
self.data = open(self.file_name, 'r').readlines()
self.kmer_size = kmer_size
self.transform = transform
self.context_size = context_size
self.num_noise_words = num_noise_words
self.word_probs = None # Initialize this to None to make sure _build_vocab() does not error out
self.doc_to_ix, self.word_to_ix, self.ix_to_word, self.word_probs = self._build_vocab()
self.sample_noise = lambda: np.random.choice(
self.word_probs.shape[0], self.num_noise_words, p=self.word_probs).tolist()
def __len__(self):
"""Gets the length or number of sequences in the dataset.
Returns
----------
len(int): The number of lines in the input file
"""
return len(self.data)
def __getitem__(self, idx):
"""Gets the next sequence in the file and extracts kmers by applying the specified transformations
In addition, also outputs the context for each target kmer.
Parameters
----------
idx: int
The index of the next sequence in the file
Returns
----------
[[str], [str], [[str]]]: List of lists of sequence (document) ids, kmers, and kmer contexts
"""
sample = self.data[idx]
seq = sample.split(',')[0]
doc_id = sample.split(',')[1].rstrip()
# print(self.word_probs)
if self.transform:
# Add padding to the sequence so there is enough for all kmers at the beginning and end to have enough context kmers
seq = 'X' * self.kmer_size * self.context_size + seq + 'Y' * self.kmer_size * self.context_size
# Kmerize
seqs = self.transform(seq) # Kmer tokenize the sequence and get a list of sequence(s) back
batch = []
for seq in seqs: # For each sequence
if self.context_size == 0:
batch.append([doc_id, seq, []])
# Add context kmers
full_context = [] # The context kmers for each target kmer in this sequence
for in_doc_pos in range(self.context_size, (len(seq)-self.context_size)): # For each kmer find the context
context_indices = (in_doc_pos + diff for diff in
range(-self.context_size, self.context_size + 1)
if diff != 0)
current_context = []
for i in context_indices:
context_kmer = seq[i]
current_context.append(context_kmer)
full_context.append(current_context)
# Add noise targets
if self.word_probs is not None:
target_noise_ids = []
for in_doc_pos in range(self.context_size, (len(seq) - self.context_size)):
current_noise = [self.ix_to_word[no] for no in self.sample_noise()]
current_noise.insert(0, seq[in_doc_pos])
target_noise_ids.append(current_noise)
else:
target_noise_ids = []
seq = seq[self.context_size:(len(seq) - self.context_size)]
batch.append([[doc_id]*len(seq), seq, full_context, target_noise_ids])
if self.word_probs is None:
return batch
else:
if len(batch) > 1:
batch_flat = [item for sublist in batch for item in sublist]
doc_ids = torch.tensor([self.doc_to_ix[item] for i,sublist in enumerate(batch_flat) for item in sublist if i % 4 == 0])
target_ids = torch.tensor([self.word_to_ix[item] for i,sublist in enumerate(batch_flat) for item in sublist if i % 4 == 1])
context_ids = torch.tensor([[self.word_to_ix[nested_item] for nested_item in item] for i,sublist in enumerate(batch_flat) for item in sublist if i % 4 == 2])
target_noise_ids = torch.tensor([[self.word_to_ix[nested_item] for nested_item in item] for i,sublist in enumerate(batch_flat) for item in sublist if i % 4 == 3])
return (doc_ids, target_ids, context_ids, target_noise_ids)
def _build_vocab(self):
"""Gets the next sequence in the file and extracts kmers by applying the specified transformations
In addition, also outputs the context for each target kmer.
Parameters
----------
Returns
----------
([str], [str], [[str]]]: List of lists of sequence (document) ids, kmers, and kmer contexts
"""
loader = DataLoader(self, batch_size=4, shuffle=False, collate_fn=my_collate, num_workers=8)
# Create vocabulary
count = 0
docs = set()
vocab = set()
char_set = set()
vocab_freq = collections.Counter(vocab)
print('Building Voabulary')
for i, batch in enumerate(loader):
batch_flat = [item for sublist in batch for item in sublist]
batch_kmers = [item for sublist in batch_flat for item in sublist[1]]
docs.update(set([item for sublist in batch_flat for item in sublist[0]]))
vocab.update(set(batch_kmers))
char_set.update(set([char for kmer in batch_kmers for char in kmer]))
vocab_freq.update(collections.Counter(batch_kmers))
count += 1
# Add begin and end kmers
vocab.add('XXXX')
vocab.add('YYYY')
vocab_freq['XXXX'] = 0
vocab_freq['YYYY'] = 0
# Add all combinations of X and Y as well
print(''.join(char_set))
for nx in range(1,self.kmer_size):
all_comb = [''.join(i) for i in list(product(''.join(char_set), repeat=self.kmer_size-nx))]
for comb in all_comb:
vocab.add('X'*nx + comb)
vocab_freq['X'*nx + comb] = 0
vocab.add(comb + 'Y'*nx)
vocab_freq[comb + 'Y'*nx] = 0
# Create final word to ix dictionary
doc_to_ix = {doc: i for i, doc in enumerate(docs)}
word_to_ix = {word: i for i, word in enumerate(vocab)}
ix_to_word = {i : word for i, word in enumerate(vocab)}
probs = noise_distribution(vocab_freq, word_to_ix)
return doc_to_ix, word_to_ix, ix_to_word, probs
class KmerTokenize(object):
"""A custom tokenizer class to parse the sequences into kmers
Inspired From: https://github.com/fhalab/embeddings_reproduction/
...
Attributes
----------
k: int
The size of kmers
overlap: boolean
Should kmers be calculated with or without overlap between them
merge: boolean
Should the different kmer stretches from the same sequence be merged.
Methods
-------
__call__():
Function to kmerize the sequence and return them
"""
def __init__(self, kmer_hypers):
"""Constructs the kmer tokenizer to break a sequence into batch of kmers
Parameters
----------
kmer_hypers: (dict of {str: int, str: bool, str: bool})
Name of the file that contains one sequence per line
"""
self.k = kmer_hypers['k']
self.overlap = kmer_hypers['overlap']
self.merge = kmer_hypers['merge']
def __call__(self, seq):
"""Generates kmers from a sequence and returns a list of kmers
Parameters
----------
seq: str
The seqeunce to be broken down into kmers
Returns
-------
[[str]]: List of lists of kmers
"""
N = len(seq)
if self.overlap:
seq = seq.rstrip()
kmers = [[seq[i:i + self.k] for i in range(N - self.k + 1)]]
else:
kmers = [[seq[i:i + self.k] for i in range(j, N - self.k + 1, self.k)]
for j in range(self.k)]
if self.merge:
return kmers
else: # Not tested
kms = []
for km in kmers:
kms.append(km)
return kms
# a simple custom collate function
def my_collate(batch):
"""A custom collation function to process batch items
The default collate function uses zip to process each sequence at the kmer level.
The current implementation just separates each list
Parameters
----------
batch: [[[[str], [str], [[str]]]]]
Nested list of strings
"""
data = [item for item in batch]
return data
def noise_distribution(vocab_freq, word_to_ix):
""" We use a unigram distribution raised to the 3/4rd power,
as proposed by <NAME> al. in Distributed Representations
of Words and Phrases and their Compositionality
Inspired From: https://github.com/fhalab/embeddings_reproduction/
"""
probs = np.zeros(len(vocab_freq))
for word, freq in vocab_freq.items():
probs[word_to_ix[word]] = freq
probs = np.power(probs, 0.75)
probs /= np.sum(probs)
return probs
| [
"numpy.sum",
"torch.utils.data.DataLoader",
"numpy.power",
"numpy.random.choice",
"collections.Counter"
] | [((11006, 11027), 'numpy.power', 'np.power', (['probs', '(0.75)'], {}), '(probs, 0.75)\n', (11014, 11027), True, 'import numpy as np\n'), ((11041, 11054), 'numpy.sum', 'np.sum', (['probs'], {}), '(probs)\n', (11047, 11054), True, 'import numpy as np\n'), ((6693, 6780), 'torch.utils.data.DataLoader', 'DataLoader', (['self'], {'batch_size': '(4)', 'shuffle': '(False)', 'collate_fn': 'my_collate', 'num_workers': '(8)'}), '(self, batch_size=4, shuffle=False, collate_fn=my_collate,\n num_workers=8)\n', (6703, 6780), False, 'from torch.utils.data import DataLoader\n'), ((6913, 6939), 'collections.Counter', 'collections.Counter', (['vocab'], {}), '(vocab)\n', (6932, 6939), False, 'import collections\n'), ((7416, 7448), 'collections.Counter', 'collections.Counter', (['batch_kmers'], {}), '(batch_kmers)\n', (7435, 7448), False, 'import collections\n'), ((2524, 2612), 'numpy.random.choice', 'np.random.choice', (['self.word_probs.shape[0]', 'self.num_noise_words'], {'p': 'self.word_probs'}), '(self.word_probs.shape[0], self.num_noise_words, p=self.\n word_probs)\n', (2540, 2612), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: zmq_pull.py
import tensorflow as tf
import struct
import numpy as np
import os
from tensorflow.core.framework.tensor_pb2 import TensorProto
from tensorflow.core.framework import types_pb2 as DT
# have to import like this: https://github.com/tensorflow/tensorflow/commit/955f038afbeb81302cea43058078e68574000bce
from .common import compile, get_ext_suffix
__all__ = ['dumps_zmq_op', 'ZMQPullSocket']
_zmq_mod = None
def try_build():
file_dir = os.path.dirname(os.path.abspath(__file__))
basename = 'zmq_ops' + get_ext_suffix()
so_file = os.path.join(file_dir, basename)
if not os.path.isfile(so_file):
ret = compile()
if ret != 0:
raise RuntimeError("tensorpack user_ops compilation failed!")
global _zmq_mod
_zmq_mod = tf.load_op_library(so_file)
try_build()
class ZMQPullSocket(object):
def __init__(self, end_point, types, hwm=None, bind=True, name=None):
self._types = types
assert isinstance(bind, bool), bind
if name is None:
self._name = (tf.get_default_graph()
.unique_name(self.__class__.__name__))
else:
self._name = name
self._zmq_handle = _zmq_mod.zmq_connection(
end_point, hwm, bind=bind, shared_name=self._name)
@property
def name(self):
return self._name
def pull(self):
return _zmq_mod.zmq_pull(
self._zmq_handle, self._types)
# copied from tensorflow/python/framework/dtypes.py
_DTYPE_DICT = {
np.float16: DT.DT_HALF,
np.float32: DT.DT_FLOAT,
np.float64: DT.DT_DOUBLE,
np.uint8: DT.DT_UINT8,
np.uint16: DT.DT_UINT16,
np.uint32: DT.DT_UINT32,
np.uint64: DT.DT_UINT64,
np.int64: DT.DT_INT64,
np.int32: DT.DT_INT32,
np.int16: DT.DT_INT16,
np.int8: DT.DT_INT8,
np.complex64: DT.DT_COMPLEX64,
np.complex128: DT.DT_COMPLEX128,
np.bool: DT.DT_BOOL,
}
_DTYPE_DICT = {np.dtype(k): v for k, v in _DTYPE_DICT.items()}
def to_tensor_proto(arr):
"""
Convert a numpy array to TensorProto
Args:
arr: numpy.ndarray. only supports common numerical types
"""
if isinstance(arr, float):
arr = np.asarray(arr).astype('float32')
elif isinstance(arr, int):
arr = np.asarray(arr).astype('int32')
assert isinstance(arr, np.ndarray), type(arr)
try:
dtype = _DTYPE_DICT[arr.dtype]
except KeyError:
raise KeyError("Dtype {} is unsupported by current ZMQ Op!".format(arr.dtype))
ret = TensorProto()
shape = ret.tensor_shape
for s in arr.shape:
d = shape.dim.add()
d.size = s
ret.dtype = dtype
buf = arr.tobytes()
ret.tensor_content = buf
return ret
def dump_tensor_protos(protos):
"""
Serialize a list of :class:`TensorProto`, for communication between custom TensorFlow ops.
Args:
protos (list): list of :class:`TensorProto` instance
Notes:
The format is:
[#tensors(int32)]
[tensor1][tensor2]...
Where each tensor is:
[dtype(int32)][ndims(int32)][shape[0](int32)]...[shape[n](int32)]
[len(buffer)(int64)][buffer]
"""
s = struct.pack('=i', len(protos))
for p in protos:
tensor_content = p.tensor_content
s += struct.pack('=i', int(p.dtype))
dims = p.tensor_shape.dim
s += struct.pack('=i', len(dims))
for k in dims:
s += struct.pack('=i', k.size)
s += struct.pack('=q', len(tensor_content))
s += tensor_content
return s
def dumps_zmq_op(dp):
"""
Dump a datapoint (list of nparray) into a format that the ZMQPull op in tensorpack would accept.
Args:
dp: list of nparray
Returns:
a binary string
"""
assert isinstance(dp, (list, tuple))
protos = [to_tensor_proto(arr) for arr in dp]
return dump_tensor_protos(protos)
| [
"tensorflow.load_op_library",
"os.path.abspath",
"numpy.asarray",
"numpy.dtype",
"tensorflow.core.framework.tensor_pb2.TensorProto",
"struct.pack",
"os.path.isfile",
"tensorflow.get_default_graph",
"os.path.join"
] | [((610, 642), 'os.path.join', 'os.path.join', (['file_dir', 'basename'], {}), '(file_dir, basename)\n', (622, 642), False, 'import os\n'), ((834, 861), 'tensorflow.load_op_library', 'tf.load_op_library', (['so_file'], {}), '(so_file)\n', (852, 861), True, 'import tensorflow as tf\n'), ((2007, 2018), 'numpy.dtype', 'np.dtype', (['k'], {}), '(k)\n', (2015, 2018), True, 'import numpy as np\n'), ((2589, 2602), 'tensorflow.core.framework.tensor_pb2.TensorProto', 'TensorProto', ([], {}), '()\n', (2600, 2602), False, 'from tensorflow.core.framework.tensor_pb2 import TensorProto\n'), ((525, 550), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (540, 550), False, 'import os\n'), ((654, 677), 'os.path.isfile', 'os.path.isfile', (['so_file'], {}), '(so_file)\n', (668, 677), False, 'import os\n'), ((3512, 3537), 'struct.pack', 'struct.pack', (['"""=i"""', 'k.size'], {}), "('=i', k.size)\n", (3523, 3537), False, 'import struct\n'), ((2261, 2276), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (2271, 2276), True, 'import numpy as np\n'), ((1105, 1127), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1125, 1127), True, 'import tensorflow as tf\n'), ((2340, 2355), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (2350, 2355), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import mlopt
from mlopt.sampling import uniform_sphere_sample
import pandas as pd
import cvxpy as cp
from mlopt.settings import logger
class TestFilter(unittest.TestCase):
def setUp(self):
"""Setup simple problem"""
np.random.seed(1)
# This needs to work for different
p = 50
n = 200
F = np.random.randn(n, p)
D = np.diag(np.random.rand(n)*np.sqrt(p))
Sigma = F.dot(F.T) + D
gamma = 1.0
mu = cp.Parameter(n, name='mu')
x = cp.Variable(n)
cost = - mu @ x + gamma * cp.quad_form(x, Sigma) + .5 * cp.norm(x, 1)
constraints = [cp.sum(x) == 1, x >= 0]
problem = cp.Problem(cp.Minimize(cost), constraints)
# Define optimizer
# Force mosek to be single threaded
m = mlopt.Optimizer(problem,
# log_level=logging.DEBUG,
)
'''
Sample points
'''
theta_bar = 10 * np.random.rand(n)
radius = 0.8
'''
Train and solve
'''
# Training and testing data
n_train = 100
n_test = 10
# Sample points from multivariate ball
X_d = uniform_sphere_sample(theta_bar, radius, n=n_train)
self.df_train = pd.DataFrame({'mu': list(X_d)})
# # Train and test using pytorch
# params = {
# 'learning_rate': [0.01],
# 'batch_size': [100],
# 'n_epochs': [200]
# }
#
# m.train(df, parallel=False, learner=mlopt.PYTORCH, params=params)
# Testing data
X_d_test = uniform_sphere_sample(theta_bar, radius, n=n_test)
df_test = pd.DataFrame({'mu': list(X_d_test)})
# Store stuff
self.m = m
self.df_test = df_test
def test_filter_simple(self):
self.m.get_samples(self.df_train, parallel=True,
filter_strategies=False)
logger.info("Number of original strategies %d" %
len(self.m.encoding))
self.m.filter_strategies(parallel=True)
logger.info("Number of condensed strategies (parallel): %d" %
len(self.m.encoding))
n_filter_parallel = len(self.m.encoding)
logger.info("Recompute samples to cleanup filtered ones")
self.m.get_samples(self.df_train, parallel=False,
filter_strategies=False)
self.m.filter_strategies(parallel=False)
logger.info("Number of condensed strategies (serial): %d" %
len(self.m.encoding))
n_filter_serial = len(self.m.encoding)
assert len(self.m._filter.encoding_full) >= n_filter_parallel
assert n_filter_serial == n_filter_parallel
| [
"mlopt.sampling.uniform_sphere_sample",
"numpy.random.seed",
"mlopt.Optimizer",
"cvxpy.Parameter",
"numpy.random.randn",
"numpy.random.rand",
"cvxpy.sum",
"cvxpy.norm",
"cvxpy.Variable",
"mlopt.settings.logger.info",
"numpy.sqrt",
"cvxpy.quad_form",
"cvxpy.Minimize"
] | [((274, 291), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (288, 291), True, 'import numpy as np\n'), ((378, 399), 'numpy.random.randn', 'np.random.randn', (['n', 'p'], {}), '(n, p)\n', (393, 399), True, 'import numpy as np\n'), ((514, 540), 'cvxpy.Parameter', 'cp.Parameter', (['n'], {'name': '"""mu"""'}), "(n, name='mu')\n", (526, 540), True, 'import cvxpy as cp\n'), ((553, 567), 'cvxpy.Variable', 'cp.Variable', (['n'], {}), '(n)\n', (564, 567), True, 'import cvxpy as cp\n'), ((838, 862), 'mlopt.Optimizer', 'mlopt.Optimizer', (['problem'], {}), '(problem)\n', (853, 862), False, 'import mlopt\n'), ((1250, 1301), 'mlopt.sampling.uniform_sphere_sample', 'uniform_sphere_sample', (['theta_bar', 'radius'], {'n': 'n_train'}), '(theta_bar, radius, n=n_train)\n', (1271, 1301), False, 'from mlopt.sampling import uniform_sphere_sample\n'), ((1675, 1725), 'mlopt.sampling.uniform_sphere_sample', 'uniform_sphere_sample', (['theta_bar', 'radius'], {'n': 'n_test'}), '(theta_bar, radius, n=n_test)\n', (1696, 1725), False, 'from mlopt.sampling import uniform_sphere_sample\n'), ((2316, 2373), 'mlopt.settings.logger.info', 'logger.info', (['"""Recompute samples to cleanup filtered ones"""'], {}), "('Recompute samples to cleanup filtered ones')\n", (2327, 2373), False, 'from mlopt.settings import logger\n'), ((722, 739), 'cvxpy.Minimize', 'cp.Minimize', (['cost'], {}), '(cost)\n', (733, 739), True, 'import cvxpy as cp\n'), ((1021, 1038), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (1035, 1038), True, 'import numpy as np\n'), ((420, 437), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (434, 437), True, 'import numpy as np\n'), ((438, 448), 'numpy.sqrt', 'np.sqrt', (['p'], {}), '(p)\n', (445, 448), True, 'import numpy as np\n'), ((632, 645), 'cvxpy.norm', 'cp.norm', (['x', '(1)'], {}), '(x, 1)\n', (639, 645), True, 'import cvxpy as cp\n'), ((669, 678), 'cvxpy.sum', 'cp.sum', (['x'], {}), '(x)\n', (675, 678), True, 'import cvxpy as cp\n'), ((602, 624), 'cvxpy.quad_form', 'cp.quad_form', (['x', 'Sigma'], {}), '(x, Sigma)\n', (614, 624), True, 'import cvxpy as cp\n')] |
# coding=utf-8
# Copyright 2018 Google LLC & <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multiple GANs that together model the data distribution, including BG."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compare_gan.src import params
from compare_gan.src.gans import consts
from compare_gan.src.multi_gan.multi_gan import MultiGAN
import numpy as np
import tensorflow as tf
class MultiGANBackground(MultiGAN):
"""A GAN consisting of a background generator and multiple copies of
object generators."""
def __init__(self, dataset_content, parameters, runtime_info):
super(MultiGANBackground, self).__init__(
dataset_content=dataset_content,
parameters=parameters,
runtime_info=runtime_info,
model_name="MultiGANBackground")
self.background_interaction = parameters["background_interaction"]
def build_model(self, is_training=True):
image_dims = [self.input_height, self.input_width, self.c_dim]
batch_size = self.batch_size
# Input images.
self.inputs = tf.placeholder(
tf.float32, [batch_size] + image_dims, name="real_images")
# Noise vector.
self.z = tf.placeholder(
tf.float32, [batch_size, self.k + 1, self.z_dim], name="z")
# Discriminator output for real images.
d_real, d_real_logits, _ = self.discriminator(
self.inputs, is_training=is_training, reuse=False)
# Discriminator output for fake images.
generated = self.generator(self.z, is_training=is_training, reuse=False)
d_fake, d_fake_logits, _ = self.discriminator(
generated, is_training=is_training, reuse=True)
self.discriminator_output = self.discriminator(
self.inputs, is_training=is_training, reuse=True)[0]
# Define the loss functions (NS-GAN)
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_real_logits, labels=tf.ones_like(d_real)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_fake_logits, labels=tf.zeros_like(d_fake)))
self.d_loss = d_loss_real + d_loss_fake
self.g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_fake_logits, labels=tf.ones_like(d_fake)))
# Define the penalty.
if self.penalty_type == consts.NO_PENALTY:
self.penalty_loss = 0.0
elif self.penalty_type == consts.DRAGAN_PENALTY:
self.penalty_loss = self.dragan_penalty(self.inputs, self.discriminator,
is_training)
self.d_loss += self.lambd * self.penalty_loss
elif self.penalty_type == consts.WGANGP_PENALTY:
self.penalty_loss = self.wgangp_penalty(self.inputs, generated,
self.discriminator, is_training)
self.d_loss += self.lambd * self.penalty_loss
elif self.penalty_type == consts.L2_PENALTY:
self.penalty_loss = self.l2_penalty()
self.d_loss += self.lambd * self.penalty_loss
else:
raise NotImplementedError(
"The penalty %s was not implemented." % self.penalty_type)
# Divide trainable variables into a group for D and group for G.
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if "discriminator" in var.name]
g_vars = [var for var in t_vars if "generator" in var.name]
self.check_variables(t_vars, d_vars, g_vars)
# Define optimization ops.
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self.d_optim = self.get_optimizer("d_").minimize(
self.d_loss, var_list=d_vars)
self.g_optim = self.get_optimizer("g_").minimize(
self.g_loss, var_list=g_vars)
# Store testing images.
self.fake_images = self.generator(self.z, is_training=False, reuse=True)
# Setup summaries.
d_loss_real_sum = tf.summary.scalar("d_loss_real", d_loss_real)
d_loss_fake_sum = tf.summary.scalar("d_loss_fake", d_loss_fake)
d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
self.g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum])
self.d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum])
def generate_images(self, z, is_training, reuse):
"""Returns generated object / backgrounds images and z's."""
# Let z_b participate in relational part
if self.background_interaction:
z = self.update_z(z, reuse)
# Isolate z used for background ALWAYS IN LAST
z_s = tf.unstack(z, axis=1)
z_o = tf.stack(z_s[:-1], axis=1)
z_b = z_s[-1]
# Pass latent representation through generator (copy from MG).
out = []
for i in range(self.k):
use_copy = reuse or (i > 0)
out_k = super(MultiGAN, self).generator( # pylint: disable=bad-super-call
z_o[:, i], is_training, use_copy)
out.append(tf.expand_dims(out_k, 1))
generated_o = tf.concat(out, axis=1, name="generator_predictions")
# Only let z_o participate in relational part
else:
# Isolate z used for background ALWAYS IN LAST
z_s = tf.unstack(z, axis=1)
z_o = tf.stack(z_s[:-1], axis=1)
z_b = z_s[-1]
# Generated object images and background image
generated_o = super(MultiGANBackground, self).generate_images(
z_o, is_training, reuse)
with tf.variable_scope("background_generator", reuse=reuse):
generated_b = super(MultiGAN, self).generator(z_b, is_training, reuse) # pylint: disable=bad-super-call
return generated_o, generated_b, z_o, z_b
def generator(self, z, is_training, reuse=False):
# Hack to add alpha channel to generator output if aggregate = alpha
if self.aggregate == "alpha":
self.c_dim += 1
# # Generated object images and background image.
generated_o, generated_b, z_o, z_b = self.generate_images(
z, is_training, reuse)
# Hack to reset alpha channel after use
# Add opaque alpha channel in case of alpha compositing to background
if self.aggregate == "alpha":
self.c_dim -= 1
generated_b = tf.concat(
(generated_b[..., :-1], tf.ones_like(generated_b[..., -1:])), axis=-1)
# Aggregate and generated outputs (order matters for alpha / fixed_perm)
z = tf.concat([z_o, tf.expand_dims(z_b, axis=1)], axis=1)
generated = tf.concat(
[generated_o, tf.expand_dims(generated_b, axis=1)], axis=1)
aggregated = self.aggregate_images(generated)
return aggregated
def z_generator(self, batch_size, z_dim):
z_o = super(MultiGANBackground, self).z_generator(batch_size, z_dim)
z_b = np.random.uniform(-1, 1, size=(batch_size, 1, z_dim))
return np.concatenate([z_b, z_o], axis=1) # (batch_size, k + 1, z_dim)
def z_tf_generator(self, batch_size, z_dim, name=None):
z_o = super(MultiGANBackground, self).z_tf_generator(
batch_size, z_dim, name)
z_b = tf.random_uniform(
(batch_size, 1, z_dim), minval=-1.0, maxval=1.0, name=name)
return tf.concat([z_b, z_o], axis=1) # (batch_size, k + 1, z_dim)
def MultiGANBackgroundHyperParams(range_type, gan_type, penalty_type):
"""Return a default set of hyperparameters for MultiGANBackground."""
del gan_type
param_ranges = params.GetDefaultRange(range_type)
param_ranges.AddRange("penalty_type", consts.NO_PENALTY,
[consts.NO_PENALTY, consts.WGANGP_PENALTY],
is_log_scale=False, is_discrete=True)
if penalty_type and penalty_type == consts.L2_PENALTY:
param_ranges.AddRange("lambda", 0.01, [-4.0, 1.0],
is_log_scale=True, is_discrete=False)
else:
param_ranges.AddRange("lambda", 10.0, [-1.0, 2.0],
is_log_scale=True, is_discrete=False)
param_ranges.AddRange("beta2", 0.999, [0, 1],
is_log_scale=False, is_discrete=False)
# MultiGAN
param_ranges.UpdateDefaults(
{"beta1": 0.0, "learning_rate": 0.00005, "disc_iters": 1,
"discriminator_normalization": consts.BATCH_NORM})
param_ranges.AddRange("penalty_type", consts.NO_PENALTY,
[consts.NO_PENALTY, consts.WGANGP_PENALTY],
is_log_scale=False, is_discrete=True)
param_ranges.AddRange("discriminator_normalization", consts.NO_NORMALIZATION,
[consts.NO_NORMALIZATION, consts.BATCH_NORM,
consts.SPECTRAL_NORM],
is_log_scale=False, is_discrete=True)
param_ranges.AddRange("z_dim", 64, [64],
is_log_scale=False, is_discrete=True)
param_ranges.AddRange("k", 3, [1, 2, 3, 4, 5],
is_log_scale=False, is_discrete=True)
param_ranges.AddRange("aggregate", "sum_clip", ["sum", "sum_clip", "mean"],
is_log_scale=False, is_discrete=True)
param_ranges.AddRange("n_heads", 1, [1, 2, 3],
is_log_scale=False, is_discrete=True)
param_ranges.AddRange("n_blocks", 1, [1, 2, 3],
is_log_scale=False, is_discrete=True)
param_ranges.AddRange("share_block_weights", False, [True, False],
is_log_scale=False, is_discrete=True)
param_ranges.AddRange("embedding_dim", 32, [32, 64, 128],
is_log_scale=False, is_discrete=True)
# MultiGANBackground
param_ranges.AddRange("background_interaction", False, [True, False],
is_log_scale=False, is_discrete=True)
return param_ranges.GetParams()
| [
"numpy.random.uniform",
"tensorflow.random_uniform",
"tensorflow.summary.scalar",
"tensorflow.trainable_variables",
"tensorflow.get_collection",
"compare_gan.src.params.GetDefaultRange",
"tensorflow.concat",
"tensorflow.variable_scope",
"tensorflow.ones_like",
"tensorflow.stack",
"tensorflow.pla... | [((7853, 7887), 'compare_gan.src.params.GetDefaultRange', 'params.GetDefaultRange', (['range_type'], {}), '(range_type)\n', (7875, 7887), False, 'from compare_gan.src import params\n'), ((1615, 1688), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([batch_size] + image_dims)'], {'name': '"""real_images"""'}), "(tf.float32, [batch_size] + image_dims, name='real_images')\n", (1629, 1688), True, 'import tensorflow as tf\n'), ((1732, 1806), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size, self.k + 1, self.z_dim]'], {'name': '"""z"""'}), "(tf.float32, [batch_size, self.k + 1, self.z_dim], name='z')\n", (1746, 1806), True, 'import tensorflow as tf\n'), ((3783, 3807), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (3805, 3807), True, 'import tensorflow as tf\n'), ((4443, 4488), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""d_loss_real"""', 'd_loss_real'], {}), "('d_loss_real', d_loss_real)\n", (4460, 4488), True, 'import tensorflow as tf\n'), ((4511, 4556), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""d_loss_fake"""', 'd_loss_fake'], {}), "('d_loss_fake', d_loss_fake)\n", (4528, 4556), True, 'import tensorflow as tf\n'), ((4574, 4614), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""d_loss"""', 'self.d_loss'], {}), "('d_loss', self.d_loss)\n", (4591, 4614), True, 'import tensorflow as tf\n'), ((4632, 4672), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""g_loss"""', 'self.g_loss'], {}), "('g_loss', self.g_loss)\n", (4649, 4672), True, 'import tensorflow as tf\n'), ((4691, 4738), 'tensorflow.summary.merge', 'tf.summary.merge', (['[d_loss_fake_sum, g_loss_sum]'], {}), '([d_loss_fake_sum, g_loss_sum])\n', (4707, 4738), True, 'import tensorflow as tf\n'), ((4756, 4803), 'tensorflow.summary.merge', 'tf.summary.merge', (['[d_loss_real_sum, d_loss_sum]'], {}), '([d_loss_real_sum, d_loss_sum])\n', (4772, 4803), True, 'import tensorflow as tf\n'), ((7225, 7278), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(batch_size, 1, z_dim)'}), '(-1, 1, size=(batch_size, 1, z_dim))\n', (7242, 7278), True, 'import numpy as np\n'), ((7291, 7325), 'numpy.concatenate', 'np.concatenate', (['[z_b, z_o]'], {'axis': '(1)'}), '([z_b, z_o], axis=1)\n', (7305, 7325), True, 'import numpy as np\n'), ((7516, 7593), 'tensorflow.random_uniform', 'tf.random_uniform', (['(batch_size, 1, z_dim)'], {'minval': '(-1.0)', 'maxval': '(1.0)', 'name': 'name'}), '((batch_size, 1, z_dim), minval=-1.0, maxval=1.0, name=name)\n', (7533, 7593), True, 'import tensorflow as tf\n'), ((7615, 7644), 'tensorflow.concat', 'tf.concat', (['[z_b, z_o]'], {'axis': '(1)'}), '([z_b, z_o], axis=1)\n', (7624, 7644), True, 'import tensorflow as tf\n'), ((5103, 5124), 'tensorflow.unstack', 'tf.unstack', (['z'], {'axis': '(1)'}), '(z, axis=1)\n', (5113, 5124), True, 'import tensorflow as tf\n'), ((5137, 5163), 'tensorflow.stack', 'tf.stack', (['z_s[:-1]'], {'axis': '(1)'}), '(z_s[:-1], axis=1)\n', (5145, 5163), True, 'import tensorflow as tf\n'), ((5530, 5582), 'tensorflow.concat', 'tf.concat', (['out'], {'axis': '(1)', 'name': '"""generator_predictions"""'}), "(out, axis=1, name='generator_predictions')\n", (5539, 5582), True, 'import tensorflow as tf\n'), ((5708, 5729), 'tensorflow.unstack', 'tf.unstack', (['z'], {'axis': '(1)'}), '(z, axis=1)\n', (5718, 5729), True, 'import tensorflow as tf\n'), ((5742, 5768), 'tensorflow.stack', 'tf.stack', (['z_s[:-1]'], {'axis': '(1)'}), '(z_s[:-1], axis=1)\n', (5750, 5768), True, 'import tensorflow as tf\n'), ((5957, 6011), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""background_generator"""'], {'reuse': 'reuse'}), "('background_generator', reuse=reuse)\n", (5974, 6011), True, 'import tensorflow as tf\n'), ((4054, 4096), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (4071, 4096), True, 'import tensorflow as tf\n'), ((6891, 6918), 'tensorflow.expand_dims', 'tf.expand_dims', (['z_b'], {'axis': '(1)'}), '(z_b, axis=1)\n', (6905, 6918), True, 'import tensorflow as tf\n'), ((6978, 7013), 'tensorflow.expand_dims', 'tf.expand_dims', (['generated_b'], {'axis': '(1)'}), '(generated_b, axis=1)\n', (6992, 7013), True, 'import tensorflow as tf\n'), ((2480, 2500), 'tensorflow.ones_like', 'tf.ones_like', (['d_real'], {}), '(d_real)\n', (2492, 2500), True, 'import tensorflow as tf\n'), ((2627, 2648), 'tensorflow.zeros_like', 'tf.zeros_like', (['d_fake'], {}), '(d_fake)\n', (2640, 2648), True, 'import tensorflow as tf\n'), ((2819, 2839), 'tensorflow.ones_like', 'tf.ones_like', (['d_fake'], {}), '(d_fake)\n', (2831, 2839), True, 'import tensorflow as tf\n'), ((5483, 5507), 'tensorflow.expand_dims', 'tf.expand_dims', (['out_k', '(1)'], {}), '(out_k, 1)\n', (5497, 5507), True, 'import tensorflow as tf\n'), ((6742, 6777), 'tensorflow.ones_like', 'tf.ones_like', (['generated_b[..., -1:]'], {}), '(generated_b[..., -1:])\n', (6754, 6777), True, 'import tensorflow as tf\n')] |
import pandas as pd
import numpy as np
import os
from nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA
sia = SIA()
NAME_COL = [1,2]
NUMER_ORD = [3,4,5,6,7,8,9,10,11,12,13,15,16,17,18,19]
CAT_ORD = [14,27,29]
BIN_PRES = [20,21,22,23,24]
COMM = [25,26]
REL_COLS = []
REL_COLS.extend(NUMER_ORD)
REL_COLS.extend(CAT_ORD)
REL_COLS.extend(BIN_PRES)
REL_COLS.extend(COMM)
REL_COLS = sorted(REL_COLS)
sheet_names = pd.ExcelFile("BaseData.xlsx").sheet_names
sheet_names_main = [sheet_name for (i,sheet_name) in enumerate(sheet_names) if i%4==2]
sheet_names_results = [sheet_name for (i,sheet_name) in enumerate(sheet_names) if i%4==3]
def main():
data4_list = []
for sheet_name in sheet_names_main:
data = pd.read_excel("BaseData.xlsx", sheet_name=sheet_name, header=0)
new_header = data.iloc[0]
data = data[1:]
data.columns = new_header
data2 = data.iloc[:,REL_COLS]
data3 = data.iloc[:,NAME_COL]
for i in NUMER_ORD:
MAX,MIN = data.iloc[:,i].min(), data.iloc[:,i].max()
data.iloc[:,i] = data.iloc[:,i].apply(lambda x: (x - MIN)/(MAX - MIN) )
for i in CAT_ORD:
if(i==14):
def helper_14(x):
if("somewhat" in str(x).lower()): return 0.5
elif("highly" in str(x).lower()): return 1.0
else: return 0.0
data.iloc[:,i] = data.iloc[:,i].apply(lambda x: helper_14(x))
if(i==27):
def helper_27(x):
if("can be there" in str(x).lower()): return 1.0
elif("no chance" in str(x).lower()): return 0.0
else: return 0.5
try: data.iloc[:,i] = data.iloc[:,i].apply(lambda x: helper_27(x))
except: continue
if(i==29):
def helper_29(x):
if("low" in str(x).lower()): return 0.0
elif("high" in str(x).lower()): return 1.0
else: return 0.5
data.iloc[:,i] = data.iloc[:,i].apply(lambda x: helper_29(x))
for i in BIN_PRES:
data.iloc[:,i] = data.iloc[:,i].apply(lambda x: int(pd.isna(x)))
for i in COMM:
def helper_COMM(x):
if(pd.isna(x)): return 0.5
else: return (float(sia.polarity_scores(x)['compound'])+1)/2
try: data.iloc[:,i] = data.iloc[:,i].apply(lambda x: helper_COMM(x))
except: continue
for (ind,j) in enumerate(REL_COLS):
data2.iloc[:,ind] = data.iloc[:,j]
data4 = pd.concat([data3, data2], axis=1)
data4_list.append(data4)
data_cols = data4_list[1].columns
data_cols = data_cols[2:]
expert_count = dict()
expertIndexColScore = dict()
for sheet_name,data in zip(sheet_names_main,data4_list):
for index, row in data.iterrows():
if("S5" in sheet_name): name, company = row[1], row[0]
else: name, company = row[0], row[1]
if(name not in expert_count):
expert_count[name] = 1
else: expert_count[name] += 1
rowNew = row[2:].tolist()
for index,item in enumerate(rowNew):
if(name not in expertIndexColScore):
expertIndexColScore[name] = dict()
else:
if(data_cols[index] not in expertIndexColScore[name]):
expertIndexColScore[name][data_cols[index]] = [item]
else: expertIndexColScore[name][data_cols[index]].append(item)
expertScoreIndexCol = dict()
for expert in expertIndexColScore:
for column in expertIndexColScore[expert]:
if(column not in expertScoreIndexCol):
expertScoreIndexCol[column] = []
expertScoreIndexCol[column].extend(expertIndexColScore[expert][column])
allColumnKeys = expertScoreIndexCol.keys()
for expert in expertIndexColScore:
currentKeys = list()
for column in expertIndexColScore[expert]:
relScoreIndexCol = expertScoreIndexCol[column]
relScoreIndexCol = [item for item in relScoreIndexCol if ~np.isnan(item)]
numerator = np.mean(relScoreIndexCol)
relIndexColScore = expertIndexColScore[expert][column]
relIndexColScore = [item for item in relIndexColScore if ~np.isnan(item)]
denominator = np.mean(relIndexColScore)
if(abs(denominator-0.0)>=1e-5):
expertIndexColScore[expert][column] = round(float(numerator/denominator),4)
else: expertIndexColScore[expert][column] = 1.0
currentKeys.append(column)
for remainColumn in (set(allColumnKeys)-set(currentKeys)):
expertIndexColScore[expert][remainColumn] = 1.0
newExpertIndexColScore = dict()
for item in expertIndexColScore:
if(pd.isna(item)): continue
else: newExpertIndexColScore[item] = expertIndexColScore[item]
pd_exp_ind_col_score = pd.DataFrame.from_dict(newExpertIndexColScore).transpose()
if("s234578r2c.csv" in os.listdir()): os.remove("s234578r2c.csv")
pd_exp_ind_col_score.to_csv("s234578r2c.csv")
main() | [
"os.listdir",
"os.remove",
"pandas.DataFrame.from_dict",
"nltk.sentiment.vader.SentimentIntensityAnalyzer",
"pandas.ExcelFile",
"numpy.isnan",
"pandas.read_excel",
"numpy.mean",
"pandas.isna",
"pandas.concat"
] | [((127, 132), 'nltk.sentiment.vader.SentimentIntensityAnalyzer', 'SIA', ([], {}), '()\n', (130, 132), True, 'from nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA\n'), ((427, 456), 'pandas.ExcelFile', 'pd.ExcelFile', (['"""BaseData.xlsx"""'], {}), "('BaseData.xlsx')\n", (439, 456), True, 'import pandas as pd\n'), ((735, 798), 'pandas.read_excel', 'pd.read_excel', (['"""BaseData.xlsx"""'], {'sheet_name': 'sheet_name', 'header': '(0)'}), "('BaseData.xlsx', sheet_name=sheet_name, header=0)\n", (748, 798), True, 'import pandas as pd\n'), ((2615, 2648), 'pandas.concat', 'pd.concat', (['[data3, data2]'], {'axis': '(1)'}), '([data3, data2], axis=1)\n', (2624, 2648), True, 'import pandas as pd\n'), ((4933, 4946), 'pandas.isna', 'pd.isna', (['item'], {}), '(item)\n', (4940, 4946), True, 'import pandas as pd\n'), ((5144, 5156), 'os.listdir', 'os.listdir', ([], {}), '()\n', (5154, 5156), False, 'import os\n'), ((5159, 5186), 'os.remove', 'os.remove', (['"""s234578r2c.csv"""'], {}), "('s234578r2c.csv')\n", (5168, 5186), False, 'import os\n'), ((4252, 4277), 'numpy.mean', 'np.mean', (['relScoreIndexCol'], {}), '(relScoreIndexCol)\n', (4259, 4277), True, 'import numpy as np\n'), ((4458, 4483), 'numpy.mean', 'np.mean', (['relIndexColScore'], {}), '(relIndexColScore)\n', (4465, 4483), True, 'import numpy as np\n'), ((5057, 5103), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['newExpertIndexColScore'], {}), '(newExpertIndexColScore)\n', (5079, 5103), True, 'import pandas as pd\n'), ((2296, 2306), 'pandas.isna', 'pd.isna', (['x'], {}), '(x)\n', (2303, 2306), True, 'import pandas as pd\n'), ((2209, 2219), 'pandas.isna', 'pd.isna', (['x'], {}), '(x)\n', (2216, 2219), True, 'import pandas as pd\n'), ((4212, 4226), 'numpy.isnan', 'np.isnan', (['item'], {}), '(item)\n', (4220, 4226), True, 'import numpy as np\n'), ((4416, 4430), 'numpy.isnan', 'np.isnan', (['item'], {}), '(item)\n', (4424, 4430), True, 'import numpy as np\n')] |
from SharedProcessors.const import SUB_NAMES, LINE_WIDTH, FONT_DICT_SMALL, EPOCH_NUM
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from Drawer import save_fig
from matplotlib import rc
def draw_f7(training_mean, training_std, test_mean, test_std):
def format_axis(line_width=LINE_WIDTH):
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.xaxis.set_tick_params(width=line_width)
ax.yaxis.set_tick_params(width=line_width)
ax.spines['left'].set_linewidth(line_width)
ax.spines['bottom'].set_linewidth(line_width)
def draw_lines(training_mean, training_std, test_mean, test_std):
ax = plt.gca()
color_0, color_1 = np.array([37, 128, 92]) / 255, np.array([53, 128, 57]) / 255
axis_x = range(training_mean.shape[0])
plt.plot(axis_x, training_mean, label='Training Set', linewidth=LINE_WIDTH)
plt.fill_between(axis_x, training_mean - training_std, training_mean + training_std, alpha=0.4)
plt.plot(axis_x, test_mean, label='Test Set', linewidth=LINE_WIDTH, color=color_1)
plt.fill_between(axis_x, test_mean - test_std, test_mean + test_std, alpha=0.4, facecolor=color_1)
ax.tick_params(labelsize=FONT_DICT_SMALL['fontsize'])
ax.set_xticks(range(0, training_mean.shape[0]+1, 25))
ax.set_xticklabels(range(0, training_mean.shape[0]+1, 25), fontdict=FONT_DICT_SMALL)
ax.set_xlabel('Epoch', fontdict=FONT_DICT_SMALL)
ax.set_xlim(-1, training_mean.shape[0]-1)
ax = plt.gca()
ax.set_ylabel('RMSE', fontdict=FONT_DICT_SMALL)
ax.set_ylim(0, 0.4)
ticks = [0, 0.1, 0.2, 0.3, 0.4]
ax.set_yticks(ticks)
ax.set_yticklabels(ticks, fontdict=FONT_DICT_SMALL)
rc('font', family='Arial')
plt.figure(figsize=(3.54, 2.8))
draw_lines(training_mean, training_std, test_mean, test_std)
format_axis()
plt.tight_layout(rect=[-0.02, -0.02, 1.02, 1.02])
plt.legend(handlelength=3, bbox_to_anchor=(0.98, 0.95), ncol=1, fontsize=FONT_DICT_SMALL['fontsize'],
frameon=False)
save_fig('f7', 600)
plt.show()
if __name__ == '__main__':
result_date = '220325'
training_rmse, test_rmse = [], []
for sub in SUB_NAMES: # SUB_NAMES
training_log = pd.read_csv('./result_conclusion/{}/training_log/{}.csv'.format(result_date, sub), index_col=False)
training_rmse.append(np.sqrt(training_log['mean_squared_error'].values))
test_rmse.append(np.sqrt(training_log['val_mean_squared_error'].values))
training_rmse_mean, training_rmse_std = np.mean(training_rmse, axis=0), np.std(training_rmse, axis=0)
test_rmse_mean, test_rmse_std = np.mean(test_rmse, axis=0), np.std(test_rmse, axis=0)
draw_f7(training_rmse_mean, training_rmse_std, test_rmse_mean, test_rmse_std)
| [
"matplotlib.rc",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.std",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"Drawer.save_fig",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.tight_layout",
"numpy.sqrt"
... | [((1822, 1848), 'matplotlib.rc', 'rc', (['"""font"""'], {'family': '"""Arial"""'}), "('font', family='Arial')\n", (1824, 1848), False, 'from matplotlib import rc\n'), ((1853, 1884), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(3.54, 2.8)'}), '(figsize=(3.54, 2.8))\n', (1863, 1884), True, 'import matplotlib.pyplot as plt\n'), ((1972, 2021), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '[-0.02, -0.02, 1.02, 1.02]'}), '(rect=[-0.02, -0.02, 1.02, 1.02])\n', (1988, 2021), True, 'import matplotlib.pyplot as plt\n'), ((2026, 2147), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handlelength': '(3)', 'bbox_to_anchor': '(0.98, 0.95)', 'ncol': '(1)', 'fontsize': "FONT_DICT_SMALL['fontsize']", 'frameon': '(False)'}), "(handlelength=3, bbox_to_anchor=(0.98, 0.95), ncol=1, fontsize=\n FONT_DICT_SMALL['fontsize'], frameon=False)\n", (2036, 2147), True, 'import matplotlib.pyplot as plt\n'), ((2162, 2181), 'Drawer.save_fig', 'save_fig', (['"""f7"""', '(600)'], {}), "('f7', 600)\n", (2170, 2181), False, 'from Drawer import save_fig\n'), ((2186, 2196), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2194, 2196), True, 'import matplotlib.pyplot as plt\n'), ((332, 341), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (339, 341), True, 'import matplotlib.pyplot as plt\n'), ((724, 733), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (731, 733), True, 'import matplotlib.pyplot as plt\n'), ((877, 952), 'matplotlib.pyplot.plot', 'plt.plot', (['axis_x', 'training_mean'], {'label': '"""Training Set"""', 'linewidth': 'LINE_WIDTH'}), "(axis_x, training_mean, label='Training Set', linewidth=LINE_WIDTH)\n", (885, 952), True, 'import matplotlib.pyplot as plt\n'), ((961, 1060), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['axis_x', '(training_mean - training_std)', '(training_mean + training_std)'], {'alpha': '(0.4)'}), '(axis_x, training_mean - training_std, training_mean +\n training_std, alpha=0.4)\n', (977, 1060), True, 'import matplotlib.pyplot as plt\n'), ((1065, 1152), 'matplotlib.pyplot.plot', 'plt.plot', (['axis_x', 'test_mean'], {'label': '"""Test Set"""', 'linewidth': 'LINE_WIDTH', 'color': 'color_1'}), "(axis_x, test_mean, label='Test Set', linewidth=LINE_WIDTH, color=\n color_1)\n", (1073, 1152), True, 'import matplotlib.pyplot as plt\n'), ((1156, 1259), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['axis_x', '(test_mean - test_std)', '(test_mean + test_std)'], {'alpha': '(0.4)', 'facecolor': 'color_1'}), '(axis_x, test_mean - test_std, test_mean + test_std, alpha=\n 0.4, facecolor=color_1)\n', (1172, 1259), True, 'import matplotlib.pyplot as plt\n'), ((1594, 1603), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1601, 1603), True, 'import matplotlib.pyplot as plt\n'), ((2660, 2690), 'numpy.mean', 'np.mean', (['training_rmse'], {'axis': '(0)'}), '(training_rmse, axis=0)\n', (2667, 2690), True, 'import numpy as np\n'), ((2692, 2721), 'numpy.std', 'np.std', (['training_rmse'], {'axis': '(0)'}), '(training_rmse, axis=0)\n', (2698, 2721), True, 'import numpy as np\n'), ((2758, 2784), 'numpy.mean', 'np.mean', (['test_rmse'], {'axis': '(0)'}), '(test_rmse, axis=0)\n', (2765, 2784), True, 'import numpy as np\n'), ((2786, 2811), 'numpy.std', 'np.std', (['test_rmse'], {'axis': '(0)'}), '(test_rmse, axis=0)\n', (2792, 2811), True, 'import numpy as np\n'), ((2482, 2532), 'numpy.sqrt', 'np.sqrt', (["training_log['mean_squared_error'].values"], {}), "(training_log['mean_squared_error'].values)\n", (2489, 2532), True, 'import numpy as np\n'), ((2559, 2613), 'numpy.sqrt', 'np.sqrt', (["training_log['val_mean_squared_error'].values"], {}), "(training_log['val_mean_squared_error'].values)\n", (2566, 2613), True, 'import numpy as np\n'), ((761, 784), 'numpy.array', 'np.array', (['[37, 128, 92]'], {}), '([37, 128, 92])\n', (769, 784), True, 'import numpy as np\n'), ((792, 815), 'numpy.array', 'np.array', (['[53, 128, 57]'], {}), '([53, 128, 57])\n', (800, 815), True, 'import numpy as np\n')] |
#!/usr/bin/env python
'''
CUDA-accelerated Computer Vision functions
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests
class cuda_test(NewOpenCVTests):
def setUp(self):
if not cv.cuda.getCudaEnabledDeviceCount():
self.skipTest("No CUDA-capable device is detected")
def test_cuda_upload_download(self):
npMat = (np.random.random((200, 200, 3)) * 255).astype(np.uint8)
gpuMat = cv.cuda_GpuMat()
gpuMat.upload(npMat)
self.assertTrue(np.allclose(gpuMat.download(), npMat))
def test_cuda_imgproc_cvtColor(self):
npMat = (np.random.random((200, 200, 3)) * 255).astype(np.uint8)
gpuMat = cv.cuda_GpuMat()
gpuMat.upload(npMat)
gpuMat2 = cv.cuda.cvtColor(gpuMat, cv.COLOR_BGR2HSV)
self.assertTrue(np.allclose(gpuMat2.download(), cv.cvtColor(npMat, cv.COLOR_BGR2HSV)))
def test_cuda_filter_laplacian(self):
npMat = (np.random.random((200, 200)) * 255).astype(np.uint16)
gpuMat = cv.cuda_GpuMat()
gpuMat.upload(npMat)
gpuMat = cv.cuda.createLaplacianFilter(cv.CV_16UC1, -1, ksize=3).apply(gpuMat)
self.assertTrue(np.allclose(gpuMat.download(), cv.Laplacian(npMat, cv.CV_16UC1, ksize=3)))
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
| [
"tests_common.NewOpenCVTests.bootstrap",
"cv2.cuda_GpuMat",
"cv2.cvtColor",
"cv2.cuda.createLaplacianFilter",
"numpy.random.random",
"cv2.cuda.cvtColor",
"cv2.Laplacian",
"cv2.cuda.getCudaEnabledDeviceCount"
] | [((1364, 1390), 'tests_common.NewOpenCVTests.bootstrap', 'NewOpenCVTests.bootstrap', ([], {}), '()\n', (1388, 1390), False, 'from tests_common import NewOpenCVTests\n'), ((521, 537), 'cv2.cuda_GpuMat', 'cv.cuda_GpuMat', ([], {}), '()\n', (535, 537), True, 'import cv2 as cv\n'), ((764, 780), 'cv2.cuda_GpuMat', 'cv.cuda_GpuMat', ([], {}), '()\n', (778, 780), True, 'import cv2 as cv\n'), ((828, 870), 'cv2.cuda.cvtColor', 'cv.cuda.cvtColor', (['gpuMat', 'cv.COLOR_BGR2HSV'], {}), '(gpuMat, cv.COLOR_BGR2HSV)\n', (844, 870), True, 'import cv2 as cv\n'), ((1098, 1114), 'cv2.cuda_GpuMat', 'cv.cuda_GpuMat', ([], {}), '()\n', (1112, 1114), True, 'import cv2 as cv\n'), ((288, 323), 'cv2.cuda.getCudaEnabledDeviceCount', 'cv.cuda.getCudaEnabledDeviceCount', ([], {}), '()\n', (321, 323), True, 'import cv2 as cv\n'), ((928, 964), 'cv2.cvtColor', 'cv.cvtColor', (['npMat', 'cv.COLOR_BGR2HSV'], {}), '(npMat, cv.COLOR_BGR2HSV)\n', (939, 964), True, 'import cv2 as cv\n'), ((1161, 1216), 'cv2.cuda.createLaplacianFilter', 'cv.cuda.createLaplacianFilter', (['cv.CV_16UC1', '(-1)'], {'ksize': '(3)'}), '(cv.CV_16UC1, -1, ksize=3)\n', (1190, 1216), True, 'import cv2 as cv\n'), ((1287, 1328), 'cv2.Laplacian', 'cv.Laplacian', (['npMat', 'cv.CV_16UC1'], {'ksize': '(3)'}), '(npMat, cv.CV_16UC1, ksize=3)\n', (1299, 1328), True, 'import cv2 as cv\n'), ((448, 479), 'numpy.random.random', 'np.random.random', (['(200, 200, 3)'], {}), '((200, 200, 3))\n', (464, 479), True, 'import numpy as np\n'), ((691, 722), 'numpy.random.random', 'np.random.random', (['(200, 200, 3)'], {}), '((200, 200, 3))\n', (707, 722), True, 'import numpy as np\n'), ((1027, 1055), 'numpy.random.random', 'np.random.random', (['(200, 200)'], {}), '((200, 200))\n', (1043, 1055), True, 'import numpy as np\n')] |
import numpy as np
class RandomActor:
def __init__(self, n_actions):
self.n_actions = n_actions
def get(self, state):
return np.random.randint(self.n_actions)
| [
"numpy.random.randint"
] | [((150, 183), 'numpy.random.randint', 'np.random.randint', (['self.n_actions'], {}), '(self.n_actions)\n', (167, 183), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 30 17:02:54 2020
@author: rpear
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_openml
from sklearn.neural_network import MLPClassifier
np.random.seed(1)
""" Example based on sklearn's docs """
mnist = fetch_openml('mnist_784')
# rescale the data, use the traditional train/test split
X, y = mnist.data / 255., mnist.target
X_train, X_test = X[:1000], X[1000:]
y_train, y_test = y[:1000], y[1000:]
mlp = MLPClassifier(hidden_layer_sizes=(3,), max_iter=5, alpha=1e-4,
solver='sgd', verbose=0, tol=1e-8, random_state=1,
learning_rate_init=.01)
N_TRAIN_SAMPLES = X_train.shape[0]
N_EPOCHS = 25
N_BATCH = 64
N_CLASSES = np.unique(y_train)
scores_train = []
scores_test = []
mlploss = []
# EPOCH
epoch = 0
while epoch < N_EPOCHS:
print('epoch: ', epoch)
# SHUFFLING
random_perm = np.random.permutation(X_train.shape[0])
mini_batch_index = 0
while True:
# MINI-BATCH
indices = random_perm[mini_batch_index:mini_batch_index + N_BATCH]
mlp.partial_fit(X_train[indices], y_train[indices], classes=N_CLASSES)
mini_batch_index += N_BATCH
if mini_batch_index >= N_TRAIN_SAMPLES:
break
# SCORE TRAIN
scores_train.append(1 - mlp.score(X_train, y_train))
# SCORE TEST
scores_test.append(1 - mlp.score(X_test, y_test))
# compute loss
mlploss.append(mlp.loss_)
epoch += 1
""" Plot """
fig, ax = plt.subplots(3, sharex=True)
ax[0].plot(scores_train)
ax[0].set_title('Train Error')
ax[1].plot(mlploss)
ax[1].set_title('Train Loss')
ax[2].plot(scores_test)
ax[2].set_title('Test Error')
fig.suptitle("Error vs Loss over epochs", fontsize=14)
fig.savefig('C:/Users/rpear/OneDrive/Apps/Documents/LossCurve.png')
plt.show()
| [
"numpy.random.seed",
"matplotlib.pyplot.show",
"sklearn.neural_network.MLPClassifier",
"numpy.random.permutation",
"sklearn.datasets.fetch_openml",
"matplotlib.pyplot.subplots",
"numpy.unique"
] | [((228, 245), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (242, 245), True, 'import numpy as np\n'), ((295, 320), 'sklearn.datasets.fetch_openml', 'fetch_openml', (['"""mnist_784"""'], {}), "('mnist_784')\n", (307, 320), False, 'from sklearn.datasets import fetch_openml\n'), ((498, 644), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': '(3,)', 'max_iter': '(5)', 'alpha': '(0.0001)', 'solver': '"""sgd"""', 'verbose': '(0)', 'tol': '(1e-08)', 'random_state': '(1)', 'learning_rate_init': '(0.01)'}), "(hidden_layer_sizes=(3,), max_iter=5, alpha=0.0001, solver=\n 'sgd', verbose=0, tol=1e-08, random_state=1, learning_rate_init=0.01)\n", (511, 644), False, 'from sklearn.neural_network import MLPClassifier\n'), ((751, 769), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (760, 769), True, 'import numpy as np\n'), ((1521, 1549), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)'], {'sharex': '(True)'}), '(3, sharex=True)\n', (1533, 1549), True, 'import matplotlib.pyplot as plt\n'), ((1833, 1843), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1841, 1843), True, 'import matplotlib.pyplot as plt\n'), ((924, 963), 'numpy.random.permutation', 'np.random.permutation', (['X_train.shape[0]'], {}), '(X_train.shape[0])\n', (945, 963), True, 'import numpy as np\n')] |
##############################################################################################################################
# Importing necessary libraries
##############################################################################################################################
import argparse
import torch
import json
from torch import nn, optim
from torchvision import datasets, transforms, models
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
##############################################################################################################################
# Creating parser for arguments in command line
##############################################################################################################################
parser = argparse.ArgumentParser()
parser.add_argument('image_path',
type = str,
default = 'flowers/test/1/image_06743.jpg',
help = 'path to image file for model classification of top k, flowers/test/class/image')
parser.add_argument('checkpoint',
action = 'store',
type = str,
default = 'checkpoint.pth',
help = 'Name of file for trained model')
parser.add_argument('--top_k',
action = 'store',
type = int,
default = 3,
help = 'Select number of classes you wish to see in descending order.')
parser.add_argument('--category_names',
action = 'store',
type = str,
default = 'cat_to_name.json',
help = 'Name of json file for class to flower names')
parser.add_argument('--arch',
action = 'store',
type = str,
default = 'vgg11',
help = 'vgg11, vgg13, vgg19')
parser.add_argument('--gpu',
action = 'store_true',
default = torch.device("cuda:0" if torch.cuda.is_available() else "cpu"),
help = 'GPU if available')
args = parser.parse_args()
if args.image_path:
image_path = args.image_path
if args.checkpoint:
checkpoint = args.checkpoint
if args.top_k:
top_k = args.top_k
if args.category_names:
category_names = args.category_names
if args.arch:
arch = args.arch
if args.gpu:
device = args.gpu
##############################################################################################################################
# Loading saved checkpoint of model
##############################################################################################################################
def load_checkpoint(checkpoint):
checkpoint = torch.load(checkpoint)
arch = checkpoint['arch']
model = getattr(models,arch)(pretrained=True)
for param in model.parameters():
param.requires_grad = False
model.class_to_idx = checkpoint['class_to_idx']
input = model.classifier[0].in_features
hidden_units = checkpoint['hidden_units']
output = checkpoint['output']
classifier = nn.Sequential(
nn.Linear(input, hidden_units),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(hidden_units, 512),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(512, output),
nn.LogSoftmax(dim=1)
)
model.classifier = classifier
model.load_state_dict(checkpoint['state_dict'])
return model
model = load_checkpoint(checkpoint)
print('Checkpoint has been loaded...')
print(model)
##############################################################################################################################
# processing image for image_path
##############################################################################################################################
def process_image(image_path):
im = Image.open(image_path)
size = 256, 256
im.thumbnail(size)
crop_size = 224
left = (size[0] - crop_size)/2
upper = (size[1] - crop_size)/2
right = left + crop_size
lower = upper + crop_size
im_crop = im.crop(box = (left, upper, right, lower))
np_image = (np.array(im_crop))/255
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
np_image = (np_image - mean) / std
processed_image = np_image.transpose(2,0,1)
return processed_image
process_image(image_path)
##############################################################################################################################
# predicting top_k from processed image
##############################################################################################################################
def predict(image_path, model, top_k):
model.cpu()
model.eval()
image = process_image(image_path)
tensor = torch.tensor(image).float().unsqueeze_(0)
with torch.no_grad():
log_ps = model.forward(tensor)
ps = torch.exp(log_ps)
probs, classes = ps.topk(top_k, dim=1)
return probs , classes
probs, classes = predict(image_path, model, top_k)
##############################################################################################################################
# Predicting top_k from processed image
##############################################################################################################################
def image_prediction(image_path):
with open(category_names, 'r') as f:
cat_to_name = json.load(f)
image = process_image(image_path)
probs, classes = predict(image_path, model, top_k)
probs = probs.data.numpy().squeeze()
classes = classes.data.numpy().squeeze()
np.set_printoptions(suppress=True)
idx = {val: i for i, val in model.class_to_idx.items()}
labels = [idx[labels] for labels in classes]
flowers = [cat_to_name[labels] for labels in labels]
print('The predictions for ', image_path, 'are...')
print(flowers, probs)
return flowers, probs
image_prediction(image_path)
##############################################################################################################################
# Running file instructions
##############################################################################################################################
#To run enter on command line:
#cd ImageClassifier
#python predict.py flowers/test/1/image_06743.jpg checkpoint.pth
| [
"torch.nn.Dropout",
"numpy.set_printoptions",
"torch.nn.ReLU",
"argparse.ArgumentParser",
"json.load",
"torch.nn.LogSoftmax",
"torch.load",
"PIL.Image.open",
"torch.exp",
"numpy.array",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.no_grad",
"torch.tensor"
] | [((795, 820), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (818, 820), False, 'import argparse\n'), ((2788, 2810), 'torch.load', 'torch.load', (['checkpoint'], {}), '(checkpoint)\n', (2798, 2810), False, 'import torch\n'), ((4157, 4179), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (4167, 4179), False, 'from PIL import Image\n'), ((5219, 5236), 'torch.exp', 'torch.exp', (['log_ps'], {}), '(log_ps)\n', (5228, 5236), False, 'import torch\n'), ((5952, 5986), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (5971, 5986), True, 'import numpy as np\n'), ((3216, 3246), 'torch.nn.Linear', 'nn.Linear', (['input', 'hidden_units'], {}), '(input, hidden_units)\n', (3225, 3246), False, 'from torch import nn, optim\n'), ((3279, 3288), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3286, 3288), False, 'from torch import nn, optim\n'), ((3321, 3336), 'torch.nn.Dropout', 'nn.Dropout', (['(0.2)'], {}), '(0.2)\n', (3331, 3336), False, 'from torch import nn, optim\n'), ((3369, 3397), 'torch.nn.Linear', 'nn.Linear', (['hidden_units', '(512)'], {}), '(hidden_units, 512)\n', (3378, 3397), False, 'from torch import nn, optim\n'), ((3430, 3439), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3437, 3439), False, 'from torch import nn, optim\n'), ((3472, 3487), 'torch.nn.Dropout', 'nn.Dropout', (['(0.2)'], {}), '(0.2)\n', (3482, 3487), False, 'from torch import nn, optim\n'), ((3520, 3542), 'torch.nn.Linear', 'nn.Linear', (['(512)', 'output'], {}), '(512, output)\n', (3529, 3542), False, 'from torch import nn, optim\n'), ((3575, 3595), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (3588, 3595), False, 'from torch import nn, optim\n'), ((4449, 4466), 'numpy.array', 'np.array', (['im_crop'], {}), '(im_crop)\n', (4457, 4466), True, 'import numpy as np\n'), ((5153, 5168), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5166, 5168), False, 'import torch\n'), ((5754, 5766), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5763, 5766), False, 'import json\n'), ((2050, 2075), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2073, 2075), False, 'import torch\n'), ((5101, 5120), 'torch.tensor', 'torch.tensor', (['image'], {}), '(image)\n', (5113, 5120), False, 'import torch\n')] |
from __future__ import print_function
from __future__ import division
from skimage.measure import block_reduce
import matplotlib.pyplot as pl
import scipy as sp
from tqdm import trange
import numpy as np
import argparse
import cv2
import os
# argv[1] -> source directory containing cuts
# argv[2] -> destination directory to store augmented data
# argv[3] -> number of random crops to generate from each image
def generate():
all_image_files = os.listdir(source)
crop_size = 128
for k in trange(len(all_image_files)):
name, _ = os.path.splitext(all_image_files[k])
x = os.path.join(source, all_image_files[k])
y = os.path.join(annotations, all_image_files[k])
actual_image = pl.imread(x)
label = pl.imread(y)
# print(all_image_files[k])
w, h, c = actual_image.shape
if actual_image is not None:
coords = []
# this is for random crops
for i in range(crops):
# crop randomly
x = np.random.randint(low=0, high=w-crop_size)
y = np.random.randint(low=0, high=h-crop_size)
while (x,y) in coords:
x = np.random.randint(low=0, high=w-crop_size)
y = np.random.randint(low=0, high=h-crop_size)
coords.append((x,y))
croped_image = actual_image[x:x+crop_size,y:y+crop_size,:]
croped_label = label[x:x+crop_size,y:y+crop_size,:]
# # downsample now
# for _ in range(4):
# croped_image = block_reduce(image=croped_image, block_size=(2,2,1), func=np.max)
# croped_label = block_reduce(image=croped_label, block_size=(2,2,1), func=np.max)
cv2.imwrite(os.path.join(im_dest_path, name+'_{}.png'.format(i)), croped_image)
cv2.imwrite(os.path.join(an_dest_path, name+'_{}.png').format(i), croped_label)
pass
def convert_labels(label_im):
conversions = {76:0, 150:1, 179:2, 226:3, 255:4}
gray = cv2.cvtColor(label_im, cv2.COLOR_RGB2GRAY)
for k in conversions.keys():
gray[gray == k] = conversions[k]
# print(np.unique(gray))
return gray
def get_subimages():
sub_folders = os.listdir(source)
for folder in sub_folders:
source_image_folder = os.path.join(source, folder)
source_label_folder = os.path.join(annotations, folder)
dest_image_folder = os.path.join(im_dest_path, folder)
dest_label_folder = os.path.join(an_dest_path, folder)
os.mkdir(dest_image_folder)
os.mkdir(dest_label_folder)
all_image_files = os.listdir(source_image_folder)
for k in trange(len(all_image_files)):
name, _ = os.path.splitext(all_image_files[k])
x = os.path.join(source_image_folder, all_image_files[k])
y = os.path.join(source_label_folder, all_image_files[k])
# image -> (H, W, C)
actual_image = cv2.imread(x)
label = cv2.imread(y)
# print(all_image_files[k])
if actual_image is not None:
w, h, c = actual_image.shape
coords = []
# this is for random crops
for i in range(crops):
# crop randomly
x = np.random.randint(low=0, high=w - crop_size)
y = np.random.randint(low=0, high=h - crop_size)
while (x, y) in coords:
x = np.random.randint(low=0, high=w - crop_size)
y = np.random.randint(low=0, high=h - crop_size)
coords.append((x, y))
croped_image = actual_image[x:x + crop_size, y:y + crop_size, :]
croped_label = label[x:x + crop_size, y:y + crop_size, :]
# print(croped_image.shape, croped_label.shape)
sp.misc.imsave(os.path.join(dest_image_folder, name + '_{}.png'.format(i)), croped_image)
sp.misc.imsave(os.path.join(dest_label_folder, name + '_{}.png').format(i), croped_label)
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--images', dest='images_path')
parser.add_argument('--annotations', dest='labels_path')
parser.add_argument('--crops', dest='crops')
parser.add_argument('--cropsize', dest='crop_size')
parser.add_argument('--im_dest', dest='im_dest_path')
parser.add_argument('--an_dest', dest='an_dest_path')
args = parser.parse_args()
global source, annotations, im_dest_path, an_dest_path, crops, crop_size
source = args.images_path
annotations = args.labels_path
crops = int(args.crops)
crop_size = int(args.crop_size)
im_dest_path = args.im_dest_path
an_dest_path = args.an_dest_path
# print('paths => ', im_dest_path, an_dest_path)
import shutil
if os.path.exists(im_dest_path):
shutil.rmtree(im_dest_path)
print('removed {}'.format(im_dest_path))
if os.path.exists(an_dest_path):
shutil.rmtree(an_dest_path)
print('removed {}'.format(an_dest_path))
os.mkdir(im_dest_path)
os.mkdir(an_dest_path)
# important for reproducibility
np.random.seed(17)
get_subimages()
| [
"os.mkdir",
"numpy.random.seed",
"argparse.ArgumentParser",
"cv2.cvtColor",
"os.path.exists",
"cv2.imread",
"numpy.random.randint",
"os.path.splitext",
"shutil.rmtree",
"matplotlib.pyplot.imread",
"os.path.join",
"os.listdir"
] | [((453, 471), 'os.listdir', 'os.listdir', (['source'], {}), '(source)\n', (463, 471), False, 'import os\n'), ((2061, 2103), 'cv2.cvtColor', 'cv2.cvtColor', (['label_im', 'cv2.COLOR_RGB2GRAY'], {}), '(label_im, cv2.COLOR_RGB2GRAY)\n', (2073, 2103), False, 'import cv2\n'), ((2264, 2282), 'os.listdir', 'os.listdir', (['source'], {}), '(source)\n', (2274, 2282), False, 'import os\n'), ((4195, 4220), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4218, 4220), False, 'import argparse\n'), ((4950, 4978), 'os.path.exists', 'os.path.exists', (['im_dest_path'], {}), '(im_dest_path)\n', (4964, 4978), False, 'import os\n'), ((5072, 5100), 'os.path.exists', 'os.path.exists', (['an_dest_path'], {}), '(an_dest_path)\n', (5086, 5100), False, 'import os\n'), ((5192, 5214), 'os.mkdir', 'os.mkdir', (['im_dest_path'], {}), '(im_dest_path)\n', (5200, 5214), False, 'import os\n'), ((5219, 5241), 'os.mkdir', 'os.mkdir', (['an_dest_path'], {}), '(an_dest_path)\n', (5227, 5241), False, 'import os\n'), ((5282, 5300), 'numpy.random.seed', 'np.random.seed', (['(17)'], {}), '(17)\n', (5296, 5300), True, 'import numpy as np\n'), ((553, 589), 'os.path.splitext', 'os.path.splitext', (['all_image_files[k]'], {}), '(all_image_files[k])\n', (569, 589), False, 'import os\n'), ((602, 642), 'os.path.join', 'os.path.join', (['source', 'all_image_files[k]'], {}), '(source, all_image_files[k])\n', (614, 642), False, 'import os\n'), ((655, 700), 'os.path.join', 'os.path.join', (['annotations', 'all_image_files[k]'], {}), '(annotations, all_image_files[k])\n', (667, 700), False, 'import os\n'), ((724, 736), 'matplotlib.pyplot.imread', 'pl.imread', (['x'], {}), '(x)\n', (733, 736), True, 'import matplotlib.pyplot as pl\n'), ((753, 765), 'matplotlib.pyplot.imread', 'pl.imread', (['y'], {}), '(y)\n', (762, 765), True, 'import matplotlib.pyplot as pl\n'), ((2344, 2372), 'os.path.join', 'os.path.join', (['source', 'folder'], {}), '(source, folder)\n', (2356, 2372), False, 'import os\n'), ((2403, 2436), 'os.path.join', 'os.path.join', (['annotations', 'folder'], {}), '(annotations, folder)\n', (2415, 2436), False, 'import os\n'), ((2465, 2499), 'os.path.join', 'os.path.join', (['im_dest_path', 'folder'], {}), '(im_dest_path, folder)\n', (2477, 2499), False, 'import os\n'), ((2528, 2562), 'os.path.join', 'os.path.join', (['an_dest_path', 'folder'], {}), '(an_dest_path, folder)\n', (2540, 2562), False, 'import os\n'), ((2571, 2598), 'os.mkdir', 'os.mkdir', (['dest_image_folder'], {}), '(dest_image_folder)\n', (2579, 2598), False, 'import os\n'), ((2607, 2634), 'os.mkdir', 'os.mkdir', (['dest_label_folder'], {}), '(dest_label_folder)\n', (2615, 2634), False, 'import os\n'), ((2661, 2692), 'os.listdir', 'os.listdir', (['source_image_folder'], {}), '(source_image_folder)\n', (2671, 2692), False, 'import os\n'), ((4988, 5015), 'shutil.rmtree', 'shutil.rmtree', (['im_dest_path'], {}), '(im_dest_path)\n', (5001, 5015), False, 'import shutil\n'), ((5110, 5137), 'shutil.rmtree', 'shutil.rmtree', (['an_dest_path'], {}), '(an_dest_path)\n', (5123, 5137), False, 'import shutil\n'), ((2762, 2798), 'os.path.splitext', 'os.path.splitext', (['all_image_files[k]'], {}), '(all_image_files[k])\n', (2778, 2798), False, 'import os\n'), ((2815, 2868), 'os.path.join', 'os.path.join', (['source_image_folder', 'all_image_files[k]'], {}), '(source_image_folder, all_image_files[k])\n', (2827, 2868), False, 'import os\n'), ((2885, 2938), 'os.path.join', 'os.path.join', (['source_label_folder', 'all_image_files[k]'], {}), '(source_label_folder, all_image_files[k])\n', (2897, 2938), False, 'import os\n'), ((2999, 3012), 'cv2.imread', 'cv2.imread', (['x'], {}), '(x)\n', (3009, 3012), False, 'import cv2\n'), ((3033, 3046), 'cv2.imread', 'cv2.imread', (['y'], {}), '(y)\n', (3043, 3046), False, 'import cv2\n'), ((1026, 1070), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(w - crop_size)'}), '(low=0, high=w - crop_size)\n', (1043, 1070), True, 'import numpy as np\n'), ((1089, 1133), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(h - crop_size)'}), '(low=0, high=h - crop_size)\n', (1106, 1133), True, 'import numpy as np\n'), ((1195, 1239), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(w - crop_size)'}), '(low=0, high=w - crop_size)\n', (1212, 1239), True, 'import numpy as np\n'), ((1262, 1306), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(h - crop_size)'}), '(low=0, high=h - crop_size)\n', (1279, 1306), True, 'import numpy as np\n'), ((3343, 3387), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(w - crop_size)'}), '(low=0, high=w - crop_size)\n', (3360, 3387), True, 'import numpy as np\n'), ((3412, 3456), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(h - crop_size)'}), '(low=0, high=h - crop_size)\n', (3429, 3456), True, 'import numpy as np\n'), ((3529, 3573), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(w - crop_size)'}), '(low=0, high=w - crop_size)\n', (3546, 3573), True, 'import numpy as np\n'), ((3602, 3646), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(h - crop_size)'}), '(low=0, high=h - crop_size)\n', (3619, 3646), True, 'import numpy as np\n'), ((1888, 1932), 'os.path.join', 'os.path.join', (['an_dest_path', "(name + '_{}.png')"], {}), "(an_dest_path, name + '_{}.png')\n", (1900, 1932), False, 'import os\n'), ((4065, 4114), 'os.path.join', 'os.path.join', (['dest_label_folder', "(name + '_{}.png')"], {}), "(dest_label_folder, name + '_{}.png')\n", (4077, 4114), False, 'import os\n')] |
from cuteSV_Description import Generation_VCF_header
from math import log10
import numpy as np
from Bio import SeqIO
err = 0.1
prior = float(1/3)
Genotype = ["0/0", "0/1", "1/1"]
def log10sumexp(log10_probs):
# Normalization of Genotype likelihoods
m = max(log10_probs)
return m + log10(sum(pow(10.0, x-m) for x in log10_probs))
def normalize_log10_probs(log10_probs):
# Adjust the Genotype likelihoods
log10_probs = np.array(log10_probs)
lse = log10sumexp(log10_probs)
return np.minimum(log10_probs - lse, 0.0)
def rescale_read_counts(c0, c1, max_allowed_reads=100):
"""Ensures that n_total <= max_allowed_reads, rescaling if necessary."""
Total = c0 + c1
if Total > max_allowed_reads:
c0 = int(max_allowed_reads * float(c0/Total))
c1 = max_allowed_reads - c0
return c0, c1
def cal_GL(c0, c1):
# Approximate adjustment of events with larger read depth
c0, c1 = rescale_read_counts(c0, c1)
# original genotype likelihood
# ori_GL00 = np.float64(pow((1-err), c0)*pow(err, c1)*comb(c0+c1,c0)*(1-prior)/2)
# ori_GL11 = np.float64(pow(err, c0)*pow((1-err), c1)*comb(c0+c1,c0)*(1-prior)/2)
# ori_GL01 = np.float64(pow(0.5, c0+c1)*comb(c0+c1,c0)*prior)
ori_GL00 = np.float64(pow((1-err), c0)*pow(err, c1)*(1-prior)/2)
ori_GL11 = np.float64(pow(err, c0)*pow((1-err), c1)*(1-prior)/2)
ori_GL01 = np.float64(pow(0.5, c0+c1)*prior)
# normalized genotype likelihood
prob = list(normalize_log10_probs(
[log10(ori_GL00), log10(ori_GL01), log10(ori_GL11)]))
GL_P = [pow(10, i) for i in prob]
PL = [int(np.around(-10*log10(i))) for i in GL_P]
GQ = [int(-10*log10(GL_P[1] + GL_P[2])), int(-10 *
log10(GL_P[0] + GL_P[2])), int(-10*log10(GL_P[0] + GL_P[1]))]
QUAL = abs(np.around(-10*log10(GL_P[0]), 1))
return Genotype[prob.index(max(prob))], "%d,%d,%d" % (PL[0], PL[1], PL[2]), max(GQ), QUAL
def cal_CIPOS(std, num):
pos = int(1.96 * std / num ** 0.5)
return "-%d,%d" % (pos, pos)
def threshold_ref_count(num):
if num <= 2:
return 10*num
elif 3 <= num <= 5:
return 5*num
elif 6 <= num <= 15:
return 4*num
else:
return 3*num
def binarySearch(query, L, s, e):
low = 0; high = L - 1
while low <= high:
mid = (low + high) >> 1
tmpl, tmpr = query[mid][0:2]
if e < tmpl:
high = mid - 1
elif s > tmpr:
low = mid + 1
else:
return mid
return -1
def count_coverage(chr, s, e, f, read_count, up_bound, itround):
status = 1
L = len(f)
mid = binarySearch(f, L, s, e)
if mid == -1:
return 0
half = int(up_bound / 2)
A = 0 if mid < half else mid - half
B = L - 1 if mid + half >= L else mid + half
for t in range(A, B):
i = f[t]
if i[0] > e:
break
if i[0] < s and i[1] > e:
read_count.add(i[2])
return status
def generate_output(args, semi_result, contigINFO, argv, print_allele_seq):
'''
Generation of VCF format file.
VCF version: 4.2
'''
# genotype_trigger = TriggerGT[args.genotype]
if print_allele_seq:
ref_g = SeqIO.to_dict(SeqIO.parse(args.ref, "fasta"))
svid = dict()
svid["INS"] = 0
svid["DEL"] = 0
svid["BND"] = 0
svid["DUP"] = 0
svid["INV"] = 0
file = open(args.output, 'w')
Generation_VCF_header(file, contigINFO, args.sample, argv)
file.write(
"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%s\n" % (args.sample))
for i in semi_result:
if i[1] in ["DEL", "INS"]:
if i[1] == "INS":
cal_end = int(i[2]) + 1
else:
cal_end = int(i[2]) + 1 + abs(int(float(i[3])))
info_list = "{PRECISION};SVTYPE={SVTYPE};SVLEN={SVLEN};END={END};CIPOS={CIPOS};CILEN={CILEN};RE={RE};RNAMES={RNAMES}".format(
PRECISION="IMPRECISE" if i[8] == "0/0" else "PRECISE",
SVTYPE=i[1],
SVLEN=i[3],
END=str(cal_end),
CIPOS=i[5],
CILEN=i[6],
RE=i[4],
RNAMES=i[12] if args.report_readid else "NULL")
if i[1] == "DEL":
info_list += ";STRAND=+-"
if i[11] == "." or i[11] == None:
filter_lable = "PASS"
else:
filter_lable = "PASS" if float(i[11]) >= 5.0 else "q5"
if print_allele_seq:
file.write("{CHR}\t{POS}\t{ID}\t{REF}\t{ALT}\t{QUAL}\t{PASS}\t{INFO}\t{FORMAT}\t{GT}:{DR}:{RE}:{PL}:{GQ}\n".format(
CHR=i[0],
POS=str(int(i[2]) + 1),
ID="SKSV.%s.%d" % (i[1], svid[i[1]]),
REF=str(ref_g[i[0]].seq[max(int(i[2])-1, 0)]) if i[1] == 'INS' else str(
ref_g[i[0]].seq[max(int(i[2])-1, 0):int(i[2])-int(i[3])]),
ALT="%s" % (str(ref_g[i[0]].seq[max(int(i[2])-1, 0)])+i[13] if i[1]
== 'INS' else str(ref_g[i[0]].seq[max(int(i[2])-1, 0)])),
INFO=info_list,
FORMAT="GT:DR:DV:PL:GQ",
GT=i[8],
DR=i[7],
RE=i[4],
PL=i[9],
GQ=i[10],
QUAL=i[11],
PASS=filter_lable))
else:
file.write("{CHR}\t{POS}\t{ID}\tN\t{ALT}\t{QUAL}\t{PASS}\t{INFO}\t{FORMAT}\t{GT}:{DR}:{RE}:{PL}:{GQ}\n".format(
CHR=i[0],
POS=str(int(i[2]) + 1),
ID="SKSV.%s.%d" % (i[1], svid[i[1]]),
ALT="<%s>" % (i[1]),
INFO=info_list,
FORMAT="GT:DR:DV:PL:GQ",
GT=i[8],
DR=i[7],
RE=i[4],
PL=i[9],
GQ=i[10],
QUAL=i[11],
PASS=filter_lable))
svid[i[1]] += 1
elif i[1] == "DUP":
cal_end = int(i[2]) + 1 + abs(int(float(i[3])))
info_list = "{PRECISION};SVTYPE={SVTYPE};SVLEN={SVLEN};END={END};RE={RE};STRAND=-+;RNAMES={RNAMES}".format(
PRECISION="IMPRECISE" if i[6] == "0/0" else "PRECISE",
SVTYPE=i[1],
SVLEN=i[3],
END=str(cal_end),
RE=i[4],
RNAMES=i[10] if args.report_readid else "NULL")
if i[9] == ".":
filter_lable = "PASS"
else:
filter_lable = "PASS" if float(i[9]) >= 5.0 else "q5"
file.write("{CHR}\t{POS}\t{ID}\t{REF}\t{ALT}\t{QUAL}\t{PASS}\t{INFO}\t{FORMAT}\t{GT}:{DR}:{RE}:{PL}:{GQ}\n".format(
CHR=i[0],
POS=str(int(i[2]) + 1),
ID="SKSV.%s.%d" % (i[1], svid[i[1]]),
REF=str(ref_g[i[0]].seq[int(i[2])]) if print_allele_seq else 'N',
ALT="<%s>" % (i[1]),
INFO=info_list,
FORMAT="GT:DR:DV:PL:GQ",
GT=i[6],
DR=i[5],
RE=i[4],
PL=i[7],
GQ=i[8],
QUAL=i[9],
PASS=filter_lable))
svid[i[1]] += 1
elif i[1] == "INV":
cal_end = int(i[2]) + 1 + abs(int(float(i[3])))
info_list = "{PRECISION};SVTYPE={SVTYPE};SVLEN={SVLEN};END={END};RE={RE};STRAND={STRAND};RNAMES={RNAMES}".format(
PRECISION="IMPRECISE" if i[6] == "0/0" else "PRECISE",
SVTYPE=i[1],
SVLEN=i[3],
END=str(cal_end),
RE=i[4],
STRAND=i[7],
RNAMES=i[11] if args.report_readid else "NULL")
if i[10] == ".":
filter_lable = "PASS"
else:
filter_lable = "PASS" if float(i[10]) >= 5.0 else "q5"
file.write("{CHR}\t{POS}\t{ID}\t{REF}\t{ALT}\t{QUAL}\t{PASS}\t{INFO}\t{FORMAT}\t{GT}:{DR}:{RE}:{PL}:{GQ}\n".format(
CHR=i[0],
POS=str(int(i[2]) + 1),
ID="SKSV.%s.%d" % (i[1], svid[i[1]]),
#REF=str(ref_g[i[0]].seq[int(i[2])]),
REF=str(ref_g[i[0]].seq[int(i[2])]) if print_allele_seq else 'N',
ALT="<%s>" % (i[1]),
INFO=info_list,
FORMAT="GT:DR:DV:PL:GQ",
GT=i[6],
DR=i[5],
RE=i[4],
PL=i[8],
GQ=i[9],
QUAL=i[10],
PASS=filter_lable))
svid[i[1]] += 1
else:
# BND
# info_list = "{PRECISION};SVTYPE={SVTYPE};CHR2={CHR2};END={END};RE={RE};RNAMES={RNAMES}".format(
info_list = "{PRECISION};SVTYPE={SVTYPE};RE={RE};RNAMES={RNAMES}".format(
PRECISION="IMPRECISE" if i[7] == "0/0" else "PRECISE",
SVTYPE="BND",
# CHR2 = i[3],
# END = str(int(i[4]) + 1),
RE=i[5],
RNAMES=i[11] if args.report_readid else "NULL")
if i[10] == ".":
filter_lable = "PASS"
else:
filter_lable = "PASS" if float(i[10]) >= 5.0 else "q5"
file.write("{CHR}\t{POS}\t{ID}\t{REF}\t{ALT}\t{QUAL}\t{PASS}\t{INFO}\t{FORMAT}\t{GT}:{DR}:{RE}:{PL}:{GQ}\n".format(
CHR=i[0],
POS=str(int(i[2]) + 1),
ID="SKSV.%s.%d" % ("BND", svid["BND"]),
REF='N',
ALT=i[1],
INFO=info_list,
FORMAT="GT:DR:DV:PL:GQ",
GT=i[7],
DR=i[6],
RE=i[5],
PL=i[8],
GQ=i[9],
QUAL=i[10],
PASS=filter_lable))
svid["BND"] += 1
def load_valuable_chr(path):
valuable_chr = dict()
valuable_chr["DEL"] = list()
valuable_chr["DUP"] = list()
valuable_chr["INS"] = list()
valuable_chr["INV"] = list()
valuable_chr["TRA"] = dict()
for svtype in ["DEL", "DUP", "INS", "INV"]:
file = open("%s%s.sigs" % (path, svtype), 'r')
for line in file:
chr = line.strip('\n').split('\t')[1]
if chr not in valuable_chr[svtype]:
valuable_chr[svtype].append(chr)
file.close()
valuable_chr[svtype].sort()
file = open("%s%s.sigs" % (path, "TRA"), 'r')
for line in file:
chr1 = line.strip('\n').split('\t')[1]
chr2 = line.strip('\n').split('\t')[4]
if chr1 not in valuable_chr["TRA"]:
valuable_chr["TRA"][chr1] = list()
if chr2 not in valuable_chr["TRA"][chr1]:
valuable_chr["TRA"][chr1].append(chr2)
file.close()
for chr1 in valuable_chr["TRA"]:
valuable_chr["TRA"][chr1].sort()
return valuable_chr
#def generate_output(args, semi_result, contigINFO, argv):
# '''
# Generation of VCF format file.
# VCF version: 4.2
# '''
#
# # genotype_trigger = TriggerGT[args.genotype]
#
# svid = dict()
# svid["INS"] = 0
# svid["DEL"] = 0
# svid["BND"] = 0
# svid["DUP"] = 0
# svid["INV"] = 0
#
# file = open(args.output, 'w')
# Generation_VCF_header(file, contigINFO, args.sample, argv)
#
# for i in semi_result:
# if i[1] in ["DEL", "INS"]:
# if i[1] == "INS":
# cal_end = int(i[2]) + 1
# else:
# cal_end = int(i[2]) + abs(int(float(i[3])))
# info_list = "{PRECISION};SVTYPE={SVTYPE};SVLEN={SVLEN};END={END};CIPOS={CIPOS};CILEN={CILEN};RE={RE}".format(
# PRECISION="IMPRECISE" if i[8] == "0/0" else "PRECISE",
# SVTYPE=i[1],
# SVLEN=i[3],
# END=str(cal_end),
# CIPOS=i[5],
# CILEN=i[6],
# RE=i[4])
# if i[1] == "DEL":
# info_list += ";STRAND=+-"
# if i[11] == ".":
# filter_lable = "PASS"
# else:
# filter_lable = "PASS" if float(i[11]) >= 5.0 else "q5"
# file.write("{CHR}\t{POS}\t{ID}\tN\t{ALT}\t{QUAL}\t{PASS}\t{INFO}\t{FORMAT}\t{GT}:{DR}:{RE}:{PL}:{GQ}\n".format(
# CHR=i[0],
# POS=i[2],
# ID="SKSV.%s.%d" % (i[1], svid[i[1]]),
# ALT="<%s>" % (i[1]),
# INFO=info_list,
# FORMAT="GT:DR:DV:PL:GQ",
# GT=i[8],
# DR=i[7],
# RE=i[4],
# PL=i[9],
# GQ=i[10],
# QUAL=i[11],
# PASS=filter_lable))
# svid[i[1]] += 1
# elif i[1] == "DUP":
# cal_end = int(i[2]) + abs(int(float(i[3])))
# info_list = "{PRECISION};SVTYPE={SVTYPE};SVLEN={SVLEN};END={END};RE={RE};STRAND=-+".format(
# PRECISION="IMPRECISE" if i[6] == "0/0" else "PRECISE",
# SVTYPE=i[1],
# SVLEN=i[3],
# END=str(cal_end),
# RE=i[4])
# if i[9] == ".":
# filter_lable = "PASS"
# else:
# filter_lable = "PASS" if float(i[9]) >= 5.0 else "q5"
# file.write("{CHR}\t{POS}\t{ID}\tN\t{ALT}\t{QUAL}\t{PASS}\t{INFO}\t{FORMAT}\t{GT}:{DR}:{RE}:{PL}:{GQ}\n".format(
# CHR=i[0],
# POS=i[2],
# ID="SKSV.%s.%d" % (i[1], svid[i[1]]),
# ALT="<%s>" % (i[1]),
# INFO=info_list,
# FORMAT="GT:DR:DV:PL:GQ",
# GT=i[6],
# DR=i[5],
# RE=i[4],
# PL=i[7],
# GQ=i[8],
# QUAL=i[9],
# PASS=filter_lable))
# svid[i[1]] += 1
# elif i[1] == "INV":
# cal_end = int(i[2]) + abs(int(float(i[3])))
# info_list = "{PRECISION};SVTYPE={SVTYPE};SVLEN={SVLEN};END={END};RE={RE};STRAND={STRAND}".format(
# PRECISION="IMPRECISE" if i[6] == "0/0" else "PRECISE",
# SVTYPE=i[1],
# SVLEN=i[3],
# END=str(cal_end),
# RE=i[4],
# STRAND=i[7])
# if i[10] == ".":
# filter_lable = "PASS"
# else:
# filter_lable = "PASS" if float(i[10]) >= 5.0 else "q5"
# file.write("{CHR}\t{POS}\t{ID}\tN\t{ALT}\t{QUAL}\t{PASS}\t{INFO}\t{FORMAT}\t{GT}:{DR}:{RE}:{PL}:{GQ}\n".format(
# CHR=i[0],
# POS=i[2],
# ID="SKSV.%s.%d" % (i[1], svid[i[1]]),
# ALT="<%s>" % (i[1]),
# INFO=info_list,
# FORMAT="GT:DR:DV:PL:GQ",
# GT=i[6],
# DR=i[5],
# RE=i[4],
# PL=i[8],
# GQ=i[9],
# QUAL=i[10],
# PASS=filter_lable))
# svid[i[1]] += 1
# else:
# # BND
# info_list = "{PRECISION};SVTYPE={SVTYPE};CHR2={CHR2};END={END};RE={RE}".format(
# PRECISION="IMPRECISE" if i[7] == "0/0" else "PRECISE",
# SVTYPE="BND",
# CHR2=i[3],
# END=i[4],
# RE=i[5])
# if i[10] == ".":
# filter_lable = "PASS"
# else:
# filter_lable = "PASS" if float(i[10]) >= 5.0 else "q5"
# file.write("{CHR}\t{POS}\t{ID}\tN\t{ALT}\t{QUAL}\t{PASS}\t{INFO}\t{FORMAT}\t{GT}:{DR}:{RE}:{PL}:{GQ}\n".format(
# CHR=i[0],
# POS=i[2],
# ID="SKSV.%s.%d" % ("BND", svid["BND"]),
# ALT=i[1],
# INFO=info_list,
# FORMAT="GT:DR:DV:PL:GQ",
# GT=i[7],
# DR=i[6],
# RE=i[5],
# PL=i[8],
# GQ=i[9],
# QUAL=i[10],
# PASS=filter_lable))
# svid["BND"] += 1
| [
"numpy.minimum",
"Bio.SeqIO.parse",
"math.log10",
"numpy.array",
"cuteSV_Description.Generation_VCF_header"
] | [((443, 464), 'numpy.array', 'np.array', (['log10_probs'], {}), '(log10_probs)\n', (451, 464), True, 'import numpy as np\n'), ((511, 545), 'numpy.minimum', 'np.minimum', (['(log10_probs - lse)', '(0.0)'], {}), '(log10_probs - lse, 0.0)\n', (521, 545), True, 'import numpy as np\n'), ((3473, 3531), 'cuteSV_Description.Generation_VCF_header', 'Generation_VCF_header', (['file', 'contigINFO', 'args.sample', 'argv'], {}), '(file, contigINFO, args.sample, argv)\n', (3494, 3531), False, 'from cuteSV_Description import Generation_VCF_header\n'), ((3283, 3313), 'Bio.SeqIO.parse', 'SeqIO.parse', (['args.ref', '"""fasta"""'], {}), "(args.ref, 'fasta')\n", (3294, 3313), False, 'from Bio import SeqIO\n'), ((1515, 1530), 'math.log10', 'log10', (['ori_GL00'], {}), '(ori_GL00)\n', (1520, 1530), False, 'from math import log10\n'), ((1532, 1547), 'math.log10', 'log10', (['ori_GL01'], {}), '(ori_GL01)\n', (1537, 1547), False, 'from math import log10\n'), ((1549, 1564), 'math.log10', 'log10', (['ori_GL11'], {}), '(ori_GL11)\n', (1554, 1564), False, 'from math import log10\n'), ((1678, 1702), 'math.log10', 'log10', (['(GL_P[1] + GL_P[2])'], {}), '(GL_P[1] + GL_P[2])\n', (1683, 1702), False, 'from math import log10\n'), ((1764, 1788), 'math.log10', 'log10', (['(GL_P[0] + GL_P[2])'], {}), '(GL_P[0] + GL_P[2])\n', (1769, 1788), False, 'from math import log10\n'), ((1799, 1823), 'math.log10', 'log10', (['(GL_P[0] + GL_P[1])'], {}), '(GL_P[0] + GL_P[1])\n', (1804, 1823), False, 'from math import log10\n'), ((1855, 1869), 'math.log10', 'log10', (['GL_P[0]'], {}), '(GL_P[0])\n', (1860, 1869), False, 'from math import log10\n'), ((1634, 1642), 'math.log10', 'log10', (['i'], {}), '(i)\n', (1639, 1642), False, 'from math import log10\n')] |
from adaline_gd import *
from plot_decision_regions import *
import matplotlib.pyplot as plt
import numpy as np
# ### Reading-in the Iris data
import pandas as pd
df = pd.read_csv('https://archive.ics.uci.edu/ml/'
'machine-learning-databases/iris/iris.data', header=None)
df.tail()
# select setosa and versicolor
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)
# extract sepal length and petal length
X = df.iloc[0:100, [0, 2]].values
### 正規化してから勾配法つかう
# standardize features
X_std = np.copy(X)
X_std[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()
X_std[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()
# In[20]:
#ada = AdalineGD(n_iter=15, eta=0.01)
ada = AdalineGD(n_iter=100, eta=0.01)
ada.fit(X_std, y)
plot_decision_regions(X_std, y, classifier=ada)
plt.title('Adaline - Gradient Descent')
plt.xlabel('sepal length [standardized]')
plt.ylabel('petal length [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
# plt.savefig('./adaline_2.png', dpi=300)
plt.show()
plt.plot(range(1, len(ada.cost_) + 1), ada.cost_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Sum-squared-error')
plt.tight_layout()
# plt.savefig('./adaline_3.png', dpi=300)
plt.show()
### 解析的に最小二乗解求められるので計算してみる
# データを正規化しているからバイアス項は最初から無視して計算
#print(X_std.shape) # 100 x 2
R = np.dot(X_std.T, X_std)
p = np.dot(X_std.T, y)
w_opt = np.linalg.solve(R, p)
#print(w_opt)
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.copy",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"numpy.where",
"numpy.linalg.solve",
"numpy.dot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout"
] | [((170, 279), 'pandas.read_csv', 'pd.read_csv', (['"""https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"""'], {'header': 'None'}), "(\n 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',\n header=None)\n", (181, 279), True, 'import pandas as pd\n'), ((357, 392), 'numpy.where', 'np.where', (["(y == 'Iris-setosa')", '(-1)', '(1)'], {}), "(y == 'Iris-setosa', -1, 1)\n", (365, 392), True, 'import numpy as np\n'), ((519, 529), 'numpy.copy', 'np.copy', (['X'], {}), '(X)\n', (526, 529), True, 'import numpy as np\n'), ((801, 840), 'matplotlib.pyplot.title', 'plt.title', (['"""Adaline - Gradient Descent"""'], {}), "('Adaline - Gradient Descent')\n", (810, 840), True, 'import matplotlib.pyplot as plt\n'), ((841, 882), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""sepal length [standardized]"""'], {}), "('sepal length [standardized]')\n", (851, 882), True, 'import matplotlib.pyplot as plt\n'), ((883, 924), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""petal length [standardized]"""'], {}), "('petal length [standardized]')\n", (893, 924), True, 'import matplotlib.pyplot as plt\n'), ((925, 953), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (935, 953), True, 'import matplotlib.pyplot as plt\n'), ((954, 972), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (970, 972), True, 'import matplotlib.pyplot as plt\n'), ((1015, 1025), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1023, 1025), True, 'import matplotlib.pyplot as plt\n'), ((1089, 1109), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (1099, 1109), True, 'import matplotlib.pyplot as plt\n'), ((1110, 1141), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sum-squared-error"""'], {}), "('Sum-squared-error')\n", (1120, 1141), True, 'import matplotlib.pyplot as plt\n'), ((1143, 1161), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1159, 1161), True, 'import matplotlib.pyplot as plt\n'), ((1204, 1214), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1212, 1214), True, 'import matplotlib.pyplot as plt\n'), ((1311, 1333), 'numpy.dot', 'np.dot', (['X_std.T', 'X_std'], {}), '(X_std.T, X_std)\n', (1317, 1333), True, 'import numpy as np\n'), ((1338, 1356), 'numpy.dot', 'np.dot', (['X_std.T', 'y'], {}), '(X_std.T, y)\n', (1344, 1356), True, 'import numpy as np\n'), ((1365, 1386), 'numpy.linalg.solve', 'np.linalg.solve', (['R', 'p'], {}), '(R, p)\n', (1380, 1386), True, 'import numpy as np\n')] |
'''
Take the .mol2 files and generate 48 images for each of them.
'''
import argparse
from bionoi import Bionoi
import os
import skimage
from skimage.io import imshow
from skimage.transform import rotate
import numpy as np
import shutil
from os import listdir
from os.path import isfile, join
from shutil import copyfile
def getArgs():
parser = argparse.ArgumentParser('python')
parser.add_argument('-opMode',
default='control_vs_heme',
required=False,
help='control_vs_heme, control_vs_nucleotide, control_vs_heme_cv, control_vs_nucleotide_cv')
parser.add_argument('-proDirect',
type=int,
default=0,
choices=[0, 1, 2, 3, 4, 5, 6],
required=False,
help='the direction of projection(0 from all, xy+,xy-,yz+,yz-,zx+,zx-)')
parser.add_argument('-rotAngle2D',
type=int,
default=0,
choices=[0, 1, 2, 3, 4],
required=False,
help='the angle of rotation(0:(0,90,180,270), 1:0 2:90 3:180 4:270)')
parser.add_argument('-flip',
type=int,
default=0,
choices=[0, 1, 2],
required=False,
help='the type of flipping(0:(original and up-down), 1:original, 2:up-down')
parser.add_argument('-dpi',
default=256,
required=False,
help='image quality in dpi')
parser.add_argument('-size', default=256,
required=False,
help='image size in pixels, eg: 128')
parser.add_argument('-alpha',
default=0.5,
required=False,
help='alpha for color of cells')
parser.add_argument('-colorby',
default="residue_type",
choices=["atom_type", "residue_type", "residue_num"],
required=False,
help='color the voronoi cells according to {atom_type, residue_type, residue_num}')
parser.add_argument('-imageType',
default=".jpg",
choices=[".jpg", ".png"],
required=False,
help='the type of image {.jpg, .png}')
parser.add_argument('-save_fig',
default=False,
choices=[True, False],
required=False,
help='whether the original image needs save (True, False)')
return parser.parse_args()
def gen_output_filename_list(dirct, rotAngle, flip):
f_p_list = []
f_r_list = []
f_f_list = []
if dirct != 0:
name = ''
if dirct == 1:
name = 'XOY+'
elif dirct == 2:
name = 'XOY-'
elif dirct == 3:
name = 'YOZ+'
elif dirct == 4:
name = 'YOZ-'
elif dirct == 5:
name = 'ZOX+'
elif dirct == 6:
name = 'ZOX-'
f_p_list.append(name)
elif dirct == 0:
f_p_list = ['XOY+', 'XOY-', 'YOZ+', 'YOZ-', 'ZOX+', 'ZOX-']
if rotAngle != 0:
name = ''
if rotAngle == 1:
name = '_r0'
elif rotAngle == 2:
name = '_r90'
elif rotAngle == 3:
name = '_r180'
elif rotAngle == 4:
name = '_r270'
f_r_list.append(name)
else:
f_r_list = ['_r0', '_r90', '_r180', '_r270']
if flip != 0:
name = ''
if flip == 1:
name = '_OO'
elif flip == 2:
name = '_ud'
f_f_list.append(name)
else:
f_f_list = ['_OO', '_ud']
return f_p_list, f_r_list, f_f_list
# generate 48 images for one .mol2 file
def one_gen_48(mol, out_folder, args):
basepath = os.path.basename(mol)
basename = os.path.splitext(basepath)[0]
size = args.size
dpi = args.dpi
alpha = args.alpha
imgtype = args.imageType
colorby = args.colorby
proDirect = args.proDirect
rotAngle = args.rotAngle2D
flip = args.flip
f_p_list, f_r_list, f_f_list = gen_output_filename_list(proDirect, rotAngle, flip)
len_list = len(f_p_list)
proj_img_list = []
rotate_img_list = []
flip_img_list = []
# ===================================== Projection ===============================================
if proDirect != 0:
atoms, vor, img = Bionoi(mol=mol,
bs_out='',
size=size,
dpi=dpi,
alpha=alpha,
colorby=colorby,
proj_direction=proDirect)
# imshow(img)
proj_img_list.append(img)
else:
for i in range(len_list):
atoms, vor, img = Bionoi(mol=mol,
bs_out='',
size=size,
dpi=dpi,
alpha=alpha,
colorby=colorby,
proj_direction=i+1)
proj_img_list.append(img)
# ---------------------------------- rotate -----------------------------------------
col = proj_img_list
m = len(col)
for i in range(m):
img = col[i]
if rotAngle == 0:
rotate_img_list.append(img)
rotate_img_list.append(rotate(img, angle=90))
rotate_img_list.append(rotate(img, angle=180))
rotate_img_list.append(rotate(img, angle=270))
elif rotAngle == 1:
rotate_img_list.append(rotate(img, angle=0))
elif rotAngle == 2:
rotate_img_list.append(rotate(img, angle=90))
elif rotAngle == 3:
rotate_img_list.append(rotate(img, angle=180))
elif rotAngle == 4:
rotate_img_list.append(rotate(img, angle=270))
# ---------------------------------- flip -----------------------------------------
for i in range(len(rotate_img_list)):
img = rotate_img_list[i]
if flip == 0:
flip_img_list.append(img)
flip_img_list.append(np.flipud(img))
if flip == 1:
flip_img_list.append(img)
if flip == 2:
img = np.flipud(img)
flip_img_list.append(img)
filename_list = []
for i in range(len(f_p_list)):
for j in range(len(f_r_list)):
for k in range(len(f_f_list)):
saveFile = f_p_list[i] + f_r_list[j] + f_f_list[k] + imgtype
filename_list.append(saveFile)
# assert len(filename_list) == len(flip_img_list)
for i in range(len(filename_list)):
path_base = os.path.join(out_folder, basename + '_')
skimage.io.imsave(path_base + filename_list[i], flip_img_list[i])
del filename_list
del flip_img_list
del rotate_img_list
del proj_img_list
del f_p_list, f_r_list, f_f_list
# generate 48 images based on .mol2 files in the sourceFolder
def gen_48(sourceFolder, targetFolder):
if os.path.exists(targetFolder):
shutil.rmtree(targetFolder, ignore_errors=True)
os.makedirs(targetFolder)
# a list of files in target folder
fileList = [f for f in listdir(sourceFolder) if isfile(join(sourceFolder, f))]
m = len(fileList)
num = 0
for mol in fileList:
one_gen_48(sourceFolder+mol, targetFolder, args)
num = num+1
assert(m==num)
# generate images for control_vs_nucleotide
def gen_48_control_vs_nucleotide():
gen_48(sourceFolder='../control_vs_nucleotide_mols/train/control/',targetFolder='../control_vs_nucleotide/train/control/')
gen_48(sourceFolder='../control_vs_nucleotide_mols/train/nucleotide/',targetFolder='../control_vs_nucleotide/train/nucleotide/')
gen_48(sourceFolder='../control_vs_nucleotide_mols/val/control/',targetFolder='../control_vs_nucleotide/val/control/')
gen_48(sourceFolder='../control_vs_nucleotide_mols/val/nucleotide/',targetFolder='../control_vs_nucleotide/val/nucleotide/')
gen_48(sourceFolder='../control_vs_nucleotide_mols/test/control/',targetFolder='../control_vs_nucleotide/test/control/')
gen_48(sourceFolder='../control_vs_nucleotide_mols/test/nucleotide/',targetFolder='../control_vs_nucleotide/test/nucleotide/')
# generate images for control_vs_heme
def gen_48_control_vs_heme():
gen_48(sourceFolder='../control_vs_heme_mols/train/control/',targetFolder='../control_vs_heme/train/control/')
gen_48(sourceFolder='../control_vs_heme_mols/train/heme/',targetFolder='../control_vs_heme/train/heme/')
gen_48(sourceFolder='../control_vs_heme_mols/val/control/',targetFolder='../control_vs_heme/val/control/')
gen_48(sourceFolder='../control_vs_heme_mols/val/heme/',targetFolder='../control_vs_heme/val/heme/')
gen_48(sourceFolder='../control_vs_heme_mols/test/control/',targetFolder='../control_vs_heme/test/control/')
gen_48(sourceFolder='../control_vs_heme_mols/test/heme/',targetFolder='../control_vs_heme/test/heme/')
# generate images for control_vs_nucleotide for 10-fold cross-validation
def gen_48_control_vs_nucleotide_cv(k):
for i in range(k):
# from 0 to 9, folder from 1 to 10
cvFoder = 'cv'+str(i+1)
gen_48(sourceFolder='../control_vs_nucleotide_mols_cv/'+cvFoder+'/train/control/',targetFolder='../control_vs_nucleotide_cv/'+cvFoder+'/train/control/')
gen_48(sourceFolder='../control_vs_nucleotide_mols_cv/'+cvFoder+'/train/nucleotide/',targetFolder='../control_vs_nucleotide_cv/'+cvFoder+'/train/nucleotide/')
gen_48(sourceFolder='../control_vs_nucleotide_mols_cv/'+cvFoder+'/val/control/',targetFolder='../control_vs_nucleotide_cv/'+cvFoder+'/val/control/')
gen_48(sourceFolder='../control_vs_nucleotide_mols_cv/'+cvFoder+'/val/nucleotide/',targetFolder='../control_vs_nucleotide_cv/'+cvFoder+'/val/nucleotide/')
# generate images for control_vs_nucleotide for 10-fold cross-validation
def gen_48_control_vs_heme_cv(k):
for i in range(k):
# from 0 to 9, folder from 1 to 10
cvFoder = 'cv'+str(i+1)
gen_48(sourceFolder='../control_vs_heme_mols_cv/'+cvFoder+'/train/control/',targetFolder='../control_vs_heme_cv/'+cvFoder+'/train/control/')
gen_48(sourceFolder='../control_vs_heme_mols_cv/'+cvFoder+'/train/heme/',targetFolder='../control_vs_heme_cv/'+cvFoder+'/train/heme/')
gen_48(sourceFolder='../control_vs_heme_mols_cv/'+cvFoder+'/val/control/',targetFolder='../control_vs_heme_cv/'+cvFoder+'/val/control/')
gen_48(sourceFolder='../control_vs_heme_mols_cv/'+cvFoder+'/val/heme/',targetFolder='../control_vs_heme_cv/'+cvFoder+'/val/heme/')
# generate images for heme_vs_nucleotide
def gen_48_heme_vs_nucleotide():
gen_48(sourceFolder='../heme_vs_nucleotide_mols/train/heme/',targetFolder='../heme_vs_nucleotide/train/heme/')
gen_48(sourceFolder='../heme_vs_nucleotide_mols/train/nucleotide/',targetFolder='../heme_vs_nucleotide/train/nucleotide/')
gen_48(sourceFolder='../heme_vs_nucleotide_mols/val/heme/',targetFolder='../heme_vs_nucleotide/val/heme/')
gen_48(sourceFolder='../heme_vs_nucleotide_mols/val/nucleotide/',targetFolder='../heme_vs_nucleotide/val/nucleotide/')
gen_48(sourceFolder='../heme_vs_nucleotide_mols/test/heme/',targetFolder='../heme_vs_nucleotide/test/heme/')
gen_48(sourceFolder='../heme_vs_nucleotide_mols/test/nucleotide/',targetFolder='../heme_vs_nucleotide/test/nucleotide/')
# generate images for heme_vs_nucleotide for 10-fold cross-validation
def gen_48_heme_vs_nucleotide_cv(k):
for i in range(k):
# from 0 to 9, folder from 1 to 10
cvFoder = 'cv'+str(i+1)
gen_48(sourceFolder='../heme_vs_nucleotide_mols_cv/'+cvFoder+'/train/0-heme/',targetFolder='../heme_vs_nucleotide_cv/'+cvFoder+'/train/0-heme/')
gen_48(sourceFolder='../heme_vs_nucleotide_mols_cv/'+cvFoder+'/train/1-nucleotide/',targetFolder='../heme_vs_nucleotide_cv/'+cvFoder+'/train/1-nucleotide/')
gen_48(sourceFolder='../heme_vs_nucleotide_mols_cv/'+cvFoder+'/val/0-heme/',targetFolder='../heme_vs_nucleotide_cv/'+cvFoder+'/val/0-heme/')
gen_48(sourceFolder='../heme_vs_nucleotide_mols_cv/'+cvFoder+'/val/1-nucleotide/',targetFolder='../heme_vs_nucleotide_cv/'+cvFoder+'/val/1-nucleotide/')
# generate images for bionoi autoencoder. All the images will be in one single folder
def gen_48_bionoi_autoencoder():
gen_48(sourceFolder='../bae-data-mol2/',targetFolder='../bae-data-images/')
#-----------------------------------------------------------------------------------
if __name__ == "__main__":
args = getArgs()
opMode = args.opMode
if opMode == 'control_vs_nucleotide':
gen_48_control_vs_nucleotide()
elif opMode == 'control_vs_heme':
gen_48_control_vs_heme()
elif opMode == 'control_vs_nucleotide_cv':
gen_48_control_vs_nucleotide_cv(10)
elif opMode == 'control_vs_heme_cv':
gen_48_control_vs_heme_cv(10)
elif opMode == 'heme_vs_nucleotide':
gen_48_heme_vs_nucleotide()
elif opMode == 'heme_vs_nucleotide_cv':
gen_48_heme_vs_nucleotide_cv(10)
elif opMode == 'bionoi_autoencoder':
gen_48_bionoi_autoencoder()
else:
print('error: invalid value of opMode.') | [
"os.makedirs",
"argparse.ArgumentParser",
"os.path.basename",
"skimage.io.imsave",
"os.path.exists",
"numpy.flipud",
"os.path.splitext",
"bionoi.Bionoi",
"skimage.transform.rotate",
"shutil.rmtree",
"os.path.join",
"os.listdir"
] | [((368, 401), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""python"""'], {}), "('python')\n", (391, 401), False, 'import argparse\n'), ((4187, 4208), 'os.path.basename', 'os.path.basename', (['mol'], {}), '(mol)\n', (4203, 4208), False, 'import os\n'), ((7604, 7632), 'os.path.exists', 'os.path.exists', (['targetFolder'], {}), '(targetFolder)\n', (7618, 7632), False, 'import os\n'), ((7696, 7721), 'os.makedirs', 'os.makedirs', (['targetFolder'], {}), '(targetFolder)\n', (7707, 7721), False, 'import os\n'), ((4225, 4251), 'os.path.splitext', 'os.path.splitext', (['basepath'], {}), '(basepath)\n', (4241, 4251), False, 'import os\n'), ((4816, 4922), 'bionoi.Bionoi', 'Bionoi', ([], {'mol': 'mol', 'bs_out': '""""""', 'size': 'size', 'dpi': 'dpi', 'alpha': 'alpha', 'colorby': 'colorby', 'proj_direction': 'proDirect'}), "(mol=mol, bs_out='', size=size, dpi=dpi, alpha=alpha, colorby=colorby,\n proj_direction=proDirect)\n", (4822, 4922), False, 'from bionoi import Bionoi\n'), ((7240, 7280), 'os.path.join', 'os.path.join', (['out_folder', "(basename + '_')"], {}), "(out_folder, basename + '_')\n", (7252, 7280), False, 'import os\n'), ((7290, 7355), 'skimage.io.imsave', 'skimage.io.imsave', (['(path_base + filename_list[i])', 'flip_img_list[i]'], {}), '(path_base + filename_list[i], flip_img_list[i])\n', (7307, 7355), False, 'import skimage\n'), ((7643, 7690), 'shutil.rmtree', 'shutil.rmtree', (['targetFolder'], {'ignore_errors': '(True)'}), '(targetFolder, ignore_errors=True)\n', (7656, 7690), False, 'import shutil\n'), ((5258, 5360), 'bionoi.Bionoi', 'Bionoi', ([], {'mol': 'mol', 'bs_out': '""""""', 'size': 'size', 'dpi': 'dpi', 'alpha': 'alpha', 'colorby': 'colorby', 'proj_direction': '(i + 1)'}), "(mol=mol, bs_out='', size=size, dpi=dpi, alpha=alpha, colorby=colorby,\n proj_direction=i + 1)\n", (5264, 5360), False, 'from bionoi import Bionoi\n'), ((6791, 6805), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (6800, 6805), True, 'import numpy as np\n'), ((7792, 7813), 'os.listdir', 'listdir', (['sourceFolder'], {}), '(sourceFolder)\n', (7799, 7813), False, 'from os import listdir\n'), ((5910, 5931), 'skimage.transform.rotate', 'rotate', (['img'], {'angle': '(90)'}), '(img, angle=90)\n', (5916, 5931), False, 'from skimage.transform import rotate\n'), ((5969, 5991), 'skimage.transform.rotate', 'rotate', (['img'], {'angle': '(180)'}), '(img, angle=180)\n', (5975, 5991), False, 'from skimage.transform import rotate\n'), ((6029, 6051), 'skimage.transform.rotate', 'rotate', (['img'], {'angle': '(270)'}), '(img, angle=270)\n', (6035, 6051), False, 'from skimage.transform import rotate\n'), ((6671, 6685), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (6680, 6685), True, 'import numpy as np\n'), ((7824, 7845), 'os.path.join', 'join', (['sourceFolder', 'f'], {}), '(sourceFolder, f)\n', (7828, 7845), False, 'from os.path import isfile, join\n'), ((6118, 6138), 'skimage.transform.rotate', 'rotate', (['img'], {'angle': '(0)'}), '(img, angle=0)\n', (6124, 6138), False, 'from skimage.transform import rotate\n'), ((6205, 6226), 'skimage.transform.rotate', 'rotate', (['img'], {'angle': '(90)'}), '(img, angle=90)\n', (6211, 6226), False, 'from skimage.transform import rotate\n'), ((6293, 6315), 'skimage.transform.rotate', 'rotate', (['img'], {'angle': '(180)'}), '(img, angle=180)\n', (6299, 6315), False, 'from skimage.transform import rotate\n'), ((6382, 6404), 'skimage.transform.rotate', 'rotate', (['img'], {'angle': '(270)'}), '(img, angle=270)\n', (6388, 6404), False, 'from skimage.transform import rotate\n')] |
import argparse, glob, os, cv2, sys, pickle, random
import numpy as np
import tensorflow as tf
import config as cfg
from models.stgru import STGRU
from models.lrr import LRR
from models.dilation import dilation10network
from models.flownet2 import Flownet2
from models.flownet1 import Flownet1
from tensorflow.python.framework import ops
bilinear_warping_module = tf.load_op_library('./misc/bilinear_warping.so')
@ops.RegisterGradient("BilinearWarping")
def _BilinearWarping(op, grad):
return bilinear_warping_module.bilinear_warping_grad(grad, op.inputs[0], op.inputs[1])
class DataLoader():
def __init__(self, im_size, nbr_frames):
self.im_size = im_size
self.dataset_size = [1024, 2048]
self.nbr_frames = nbr_frames
self.L = glob.glob(os.path.join(cfg.cityscapes_dir, 'gtFine', 'train', "*", "*labelTrainIds.png"))
random.shuffle(self.L)
self.idx = 0
def get_next_sequence(self):
H, W = self.dataset_size
h, w = self.im_size
offset = [np.random.randint(H - h),
np.random.randint(W - w)]
i0, j0 = offset
i1, j1 = i0 + h, j0 + w
im_path = self.L[self.idx % len(self.L)]
self.idx += 1
parts = im_path.split('/')[-1].split('_')
city, seq, frame = parts[0], parts[1], parts[2]
images = []
gt = cv2.imread(im_path, 0)[i0:i1, j0:j1]
for dt in range(-self.nbr_frames + 1, 1):
t = int(frame) + dt
frame_path = os.path.join(cfg.cityscapes_video_dir, 'leftImg8bit_sequence', 'train',
city, ("%s_%s_%06d_leftImg8bit.png" % (city, seq, t)))
images.append(cv2.imread(frame_path, 1).astype(np.float32)[i0:i1,j0:j1][np.newaxis,...])
return images, gt
def train(args):
nbr_classes = 19
# learning rates for the GRU and the static segmentation networks, respectively
learning_rate = 2e-5
static_learning_rate = 2e-12
# The total number of iterations and when the static network should start being refined
nbr_iterations = 10000
t0_dilation_net = 5000
im_size = [512, 512]
image_mean = [72.39,82.91,73.16] # the mean is automatically subtracted in some modules e.g. flownet2, so be careful
f = open('misc/cityscapes_labels.pckl')
cs_id2trainid, cs_id2name = pickle.load(f)
f.close()
assert args.static in ['dilation', 'lrr'], "Only dilation and LRR are supported for now."
if args.flow == 'flownet2':
with tf.variable_scope('flow'):
flow_network = Flownet2(bilinear_warping_module)
flow_img0 = tf.placeholder(tf.float32)
flow_img1 = tf.placeholder(tf.float32)
flow_tensor = flow_network(flow_img0, flow_img1, flip=True)
elif args.flow == 'flownet1':
with tf.variable_scope('flow'):
flow_network = Flownet1()
flow_img0 = tf.placeholder(tf.float32)
flow_img1 = tf.placeholder(tf.float32)
flow_tensor = flow_network.get_output_tensor(flow_img0, flow_img1, im_size)
RNN = STGRU([nbr_classes, im_size[0], im_size[1]], [7, 7], bilinear_warping_module)
gru_opt, gru_loss, gru_prediction, gru_learning_rate, \
gru_input_images_tensor, gru_input_flow_tensor, \
gru_input_segmentation_tensor, gru_targets = RNN.get_optimizer(args.frames)
unary_grad_op = tf.gradients(gru_loss, gru_input_segmentation_tensor)
if args.static == 'lrr':
static_input = tf.placeholder(tf.float32)
static_network = LRR()
static_output = static_network(static_input)
unary_opt, unary_dLdy = static_network.get_optimizer(static_input, static_output, static_learning_rate)
elif args.static == 'dilation':
static_input = tf.placeholder(tf.float32)
static_network = dilation10network()
static_output = static_network.get_output_tensor(static_input, im_size)
data_loader = DataLoader(im_size, args.frames)
loss_history = np.zeros(nbr_iterations)
loss_history_smoothed = np.zeros(nbr_iterations)
vars_trainable = [k for k in tf.trainable_variables() if not k.name.startswith('flow/')]
vars_static = [k for k in vars_trainable if not k in RNN.weights.values()]
loader_static = tf.train.Saver(vars_static)
saver = tf.train.Saver(vars_trainable)
if args.flow in ['flownet1', 'flownet2']:
saver_fn = tf.train.Saver([k for k in tf.trainable_variables() if k.name.startswith('flow/')])
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
if args.static == 'lrr':
loader_static.restore(sess, './checkpoints/lrr_pretrained')
elif args.static == 'dilation':
assert False, "Pretrained dilation model will soon be released."
saver.restore(sess, './checkpoints/dilation_grfp')
if args.flow == 'flownet1':
saver_fn.restore(sess, './checkpoints/flownet1')
elif args.flow == 'flownet2':
saver_fn.restore(sess, './checkpoints/flownet2')
for training_it in range(nbr_iterations):
images, ground_truth = data_loader.get_next_sequence()
# Optical flow
optflow = []
for frame in range(1, args.frames):
im, last_im = images[frame], images[frame-1]
if args.flow == 'flownet2':
flow = sess.run(flow_tensor, feed_dict={flow_img0: im, flow_img1: last_im})
elif args.flow == 'flownet1':
flow = sess.run(flow_tensor, feed_dict={flow_img0: im, flow_img1: last_im})
flow = flow[...,(1, 0)]
elif args.flow == 'farneback':
im_gray = cv2.cvtColor(im[0], cv2.COLOR_BGR2GRAY)
last_im_gray = cv2.cvtColor(last_im[0], cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(im_gray, last_im_gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
flow = flow[...,(1, 0)]
flow = flow[np.newaxis,...]
optflow.append(flow)
# Static segmentation
static_segm = []
for frame in range(args.frames):
im = images[frame]
if args.static == 'dilation':
# augment a 186x186 border around the image and subtract the mean
im_aug = cv2.copyMakeBorder(im[0], 186, 186, 186, 186, cv2.BORDER_REFLECT_101)
im_aug = im_aug - image_mean
im_aug = im_aug[np.newaxis,...]
x = sess.run(static_output, feed_dict={static_input: im_aug})
elif args.static == 'lrr':
x = sess.run(static_output, feed_dict={static_input: im})
static_segm.append(x)
# GRFP
rnn_input = {
gru_learning_rate: learning_rate,
gru_input_images_tensor: np.stack(images),
gru_input_flow_tensor: np.stack(optflow),
gru_input_segmentation_tensor: np.stack(static_segm),
gru_targets: ground_truth,
}
_, loss, pred, unary_grads = sess.run([gru_opt, gru_loss,
gru_prediction, unary_grad_op], feed_dict=rnn_input)
loss_history[training_it] = loss
if training_it < 300:
loss_history_smoothed[training_it] = np.mean(loss_history[0:training_it+1])
else:
loss_history_smoothed[training_it] = 0.997*loss_history_smoothed[training_it-1] + 0.003*loss
# Refine the static network?
# The reason that a two-stage training routine is used
# is because there is not enough GPU memory (with a 12 GB Titan X)
# to do it in one pass.
if training_it+1 > t0_dilation_net:
for k in range(len(images)-3, len(images)):
g = unary_grads[0][k]
im = images[k]
_ = sess.run([unary_opt], feed_dict={
static_input: im,
unary_dLdy: g
})
if training_it > 0 and (training_it+1) % 1000 == 0:
saver.save(sess, './checkpoints/%s_%s_it%d' % (args.static, args.flow, training_it+1))
if (training_it+1) % 200 == 0:
print("Iteration %d/%d: Loss %.3f" % (training_it+1, nbr_iterations, loss_history_smoothed[training_it]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tran GRFP on the CityScapes training set.')
parser.add_argument('--static', help='Which static network to use.', required=True)
parser.add_argument('--flow', help='Which optical flow method to use.', required=True)
parser.add_argument('--frames', type=int, help='Number of frames to use.', default=5, required=False)
args = parser.parse_args()
assert args.flow in ['flownet1', 'flownet2', 'farneback'], "Unknown flow method %s." % args.flow
assert args.static in ['dilation', 'dilation_grfp', 'lrr', 'lrr_grfp'], "Unknown static method %s." % args.static
assert args.frames >= 1 and args.frames <= 20, "The number of frames must be between 1 and 20."
train(args) | [
"argparse.ArgumentParser",
"tensorflow.trainable_variables",
"random.shuffle",
"models.lrr.LRR",
"pickle.load",
"numpy.random.randint",
"numpy.mean",
"cv2.calcOpticalFlowFarneback",
"os.path.join",
"models.flownet1.Flownet1",
"models.stgru.STGRU",
"cv2.cvtColor",
"cv2.copyMakeBorder",
"ten... | [((365, 413), 'tensorflow.load_op_library', 'tf.load_op_library', (['"""./misc/bilinear_warping.so"""'], {}), "('./misc/bilinear_warping.so')\n", (383, 413), True, 'import tensorflow as tf\n'), ((415, 454), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""BilinearWarping"""'], {}), "('BilinearWarping')\n", (435, 454), False, 'from tensorflow.python.framework import ops\n'), ((2368, 2382), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2379, 2382), False, 'import argparse, glob, os, cv2, sys, pickle, random\n'), ((3113, 3190), 'models.stgru.STGRU', 'STGRU', (['[nbr_classes, im_size[0], im_size[1]]', '[7, 7]', 'bilinear_warping_module'], {}), '([nbr_classes, im_size[0], im_size[1]], [7, 7], bilinear_warping_module)\n', (3118, 3190), False, 'from models.stgru import STGRU\n'), ((3418, 3471), 'tensorflow.gradients', 'tf.gradients', (['gru_loss', 'gru_input_segmentation_tensor'], {}), '(gru_loss, gru_input_segmentation_tensor)\n', (3430, 3471), True, 'import tensorflow as tf\n'), ((4032, 4056), 'numpy.zeros', 'np.zeros', (['nbr_iterations'], {}), '(nbr_iterations)\n', (4040, 4056), True, 'import numpy as np\n'), ((4085, 4109), 'numpy.zeros', 'np.zeros', (['nbr_iterations'], {}), '(nbr_iterations)\n', (4093, 4109), True, 'import numpy as np\n'), ((4303, 4330), 'tensorflow.train.Saver', 'tf.train.Saver', (['vars_static'], {}), '(vars_static)\n', (4317, 4330), True, 'import tensorflow as tf\n'), ((4343, 4373), 'tensorflow.train.Saver', 'tf.train.Saver', (['vars_trainable'], {}), '(vars_trainable)\n', (4357, 4373), True, 'import tensorflow as tf\n'), ((4540, 4573), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4571, 4573), True, 'import tensorflow as tf\n'), ((8606, 8691), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Tran GRFP on the CityScapes training set."""'}), "(description='Tran GRFP on the CityScapes training set.'\n )\n", (8629, 8691), False, 'import argparse, glob, os, cv2, sys, pickle, random\n'), ((866, 888), 'random.shuffle', 'random.shuffle', (['self.L'], {}), '(self.L)\n', (880, 888), False, 'import argparse, glob, os, cv2, sys, pickle, random\n'), ((3525, 3551), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (3539, 3551), True, 'import tensorflow as tf\n'), ((3577, 3582), 'models.lrr.LRR', 'LRR', ([], {}), '()\n', (3580, 3582), False, 'from models.lrr import LRR\n'), ((4584, 4596), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4594, 4596), True, 'import tensorflow as tf\n'), ((778, 856), 'os.path.join', 'os.path.join', (['cfg.cityscapes_dir', '"""gtFine"""', '"""train"""', '"""*"""', '"""*labelTrainIds.png"""'], {}), "(cfg.cityscapes_dir, 'gtFine', 'train', '*', '*labelTrainIds.png')\n", (790, 856), False, 'import argparse, glob, os, cv2, sys, pickle, random\n'), ((1024, 1048), 'numpy.random.randint', 'np.random.randint', (['(H - h)'], {}), '(H - h)\n', (1041, 1048), True, 'import numpy as np\n'), ((1062, 1086), 'numpy.random.randint', 'np.random.randint', (['(W - w)'], {}), '(W - w)\n', (1079, 1086), True, 'import numpy as np\n'), ((1357, 1379), 'cv2.imread', 'cv2.imread', (['im_path', '(0)'], {}), '(im_path, 0)\n', (1367, 1379), False, 'import argparse, glob, os, cv2, sys, pickle, random\n'), ((1523, 1651), 'os.path.join', 'os.path.join', (['cfg.cityscapes_video_dir', '"""leftImg8bit_sequence"""', '"""train"""', 'city', "('%s_%s_%06d_leftImg8bit.png' % (city, seq, t))"], {}), "(cfg.cityscapes_video_dir, 'leftImg8bit_sequence', 'train',\n city, '%s_%s_%06d_leftImg8bit.png' % (city, seq, t))\n", (1535, 1651), False, 'import argparse, glob, os, cv2, sys, pickle, random\n'), ((2538, 2563), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""flow"""'], {}), "('flow')\n", (2555, 2563), True, 'import tensorflow as tf\n'), ((2592, 2625), 'models.flownet2.Flownet2', 'Flownet2', (['bilinear_warping_module'], {}), '(bilinear_warping_module)\n', (2600, 2625), False, 'from models.flownet2 import Flownet2\n'), ((2650, 2676), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (2664, 2676), True, 'import tensorflow as tf\n'), ((2701, 2727), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (2715, 2727), True, 'import tensorflow as tf\n'), ((3808, 3834), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (3822, 3834), True, 'import tensorflow as tf\n'), ((3860, 3879), 'models.dilation.dilation10network', 'dilation10network', ([], {}), '()\n', (3877, 3879), False, 'from models.dilation import dilation10network\n'), ((4144, 4168), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (4166, 4168), True, 'import tensorflow as tf\n'), ((2847, 2872), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""flow"""'], {}), "('flow')\n", (2864, 2872), True, 'import tensorflow as tf\n'), ((2901, 2911), 'models.flownet1.Flownet1', 'Flownet1', ([], {}), '()\n', (2909, 2911), False, 'from models.flownet1 import Flownet1\n'), ((2936, 2962), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (2950, 2962), True, 'import tensorflow as tf\n'), ((2987, 3013), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (3001, 3013), True, 'import tensorflow as tf\n'), ((7011, 7027), 'numpy.stack', 'np.stack', (['images'], {}), '(images)\n', (7019, 7027), True, 'import numpy as np\n'), ((7068, 7085), 'numpy.stack', 'np.stack', (['optflow'], {}), '(optflow)\n', (7076, 7085), True, 'import numpy as np\n'), ((7134, 7155), 'numpy.stack', 'np.stack', (['static_segm'], {}), '(static_segm)\n', (7142, 7155), True, 'import numpy as np\n'), ((7499, 7539), 'numpy.mean', 'np.mean', (['loss_history[0:training_it + 1]'], {}), '(loss_history[0:training_it + 1])\n', (7506, 7539), True, 'import numpy as np\n'), ((4471, 4495), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (4493, 4495), True, 'import tensorflow as tf\n'), ((6461, 6530), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['im[0]', '(186)', '(186)', '(186)', '(186)', 'cv2.BORDER_REFLECT_101'], {}), '(im[0], 186, 186, 186, 186, cv2.BORDER_REFLECT_101)\n', (6479, 6530), False, 'import argparse, glob, os, cv2, sys, pickle, random\n'), ((5795, 5834), 'cv2.cvtColor', 'cv2.cvtColor', (['im[0]', 'cv2.COLOR_BGR2GRAY'], {}), '(im[0], cv2.COLOR_BGR2GRAY)\n', (5807, 5834), False, 'import argparse, glob, os, cv2, sys, pickle, random\n'), ((5870, 5914), 'cv2.cvtColor', 'cv2.cvtColor', (['last_im[0]', 'cv2.COLOR_BGR2GRAY'], {}), '(last_im[0], cv2.COLOR_BGR2GRAY)\n', (5882, 5914), False, 'import argparse, glob, os, cv2, sys, pickle, random\n'), ((5943, 6030), 'cv2.calcOpticalFlowFarneback', 'cv2.calcOpticalFlowFarneback', (['im_gray', 'last_im_gray', 'None', '(0.5)', '(3)', '(15)', '(3)', '(5)', '(1.2)', '(0)'], {}), '(im_gray, last_im_gray, None, 0.5, 3, 15, 3, 5,\n 1.2, 0)\n', (5971, 6030), False, 'import argparse, glob, os, cv2, sys, pickle, random\n'), ((1697, 1722), 'cv2.imread', 'cv2.imread', (['frame_path', '(1)'], {}), '(frame_path, 1)\n', (1707, 1722), False, 'import argparse, glob, os, cv2, sys, pickle, random\n')] |
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
import math
import matplotlib as mpl
#-------------------------------------------------------------------------------
def stress_divergence_hist():
# grid
fileGrid = Dataset("grid.40962.nc","r")
nVertices = len(fileGrid.dimensions["nVertices"])
latVertex = fileGrid.variables["latVertex"][:]
fileGrid.close()
# ic
fileIC = Dataset("ic_40962.nc","r")
uVelocity = fileIC.variables["uVelocity"][:]
vVelocity = fileIC.variables["vVelocity"][:]
stressDivergenceUAnalytical = fileIC.variables["stressDivergenceUAnalytical"][:]
stressDivergenceVAnalytical = fileIC.variables["stressDivergenceVAnalytical"][:]
print("Stress divergence: ",
np.amin(stressDivergenceUAnalytical), np.amax(stressDivergenceUAnalytical),
np.amin(stressDivergenceVAnalytical), np.amax(stressDivergenceVAnalytical))
fileIC.close()
# Wachspress
fileWach = Dataset("./output_wachspress_40962/output.2000.nc","r")
stressDivergenceUWach = fileWach.variables["stressDivergenceU"][0,:]
stressDivergenceVWach = fileWach.variables["stressDivergenceV"][0,:]
stressDivergenceUWachDiff = (stressDivergenceUWach - stressDivergenceUAnalytical)
stressDivergenceVWachDiff = (stressDivergenceVWach - stressDivergenceVAnalytical)
print("Wachs: ",
np.amin(stressDivergenceUWachDiff), np.amax(stressDivergenceUWachDiff),
np.amin(stressDivergenceVWachDiff), np.amax(stressDivergenceVWachDiff))
fileWach.close()
# PWL
filePWL = Dataset("./output_pwl_40962/output.2000.nc","r")
stressDivergenceUPWL = filePWL.variables["stressDivergenceU"][0,:]
stressDivergenceVPWL = filePWL.variables["stressDivergenceV"][0,:]
stressDivergenceUPWLDiff = (stressDivergenceUPWL - stressDivergenceUAnalytical)
stressDivergenceVPWLDiff = (stressDivergenceVPWL - stressDivergenceVAnalytical)
print("PWL: ",
np.amin(stressDivergenceUPWLDiff), np.amax(stressDivergenceUPWLDiff),
np.amin(stressDivergenceVPWLDiff), np.amax(stressDivergenceVPWLDiff))
filePWL.close()
# Wachspress alt
fileWachAlt = Dataset("./output_wachspress_alt_40962/output.2000.nc","r")
stressDivergenceUWachAlt = fileWachAlt.variables["stressDivergenceU"][0,:]
stressDivergenceVWachAlt = fileWachAlt.variables["stressDivergenceV"][0,:]
stressDivergenceUWachAltDiff = (stressDivergenceUWachAlt - stressDivergenceUAnalytical)
stressDivergenceVWachAltDiff = (stressDivergenceVWachAlt - stressDivergenceVAnalytical)
print("Wachs Alt: ",
np.amin(stressDivergenceUWachAltDiff), np.amax(stressDivergenceUWachAltDiff),
np.amin(stressDivergenceVWachAltDiff), np.amax(stressDivergenceVWachAltDiff))
fileWachAlt.close()
# PWL alt
filePWLAlt = Dataset("./output_pwl_alt_40962/output.2000.nc","r")
stressDivergenceUPWLAlt = filePWLAlt.variables["stressDivergenceU"][0,:]
stressDivergenceVPWLAlt = filePWLAlt.variables["stressDivergenceV"][0,:]
stressDivergenceUPWLAltDiff = (stressDivergenceUPWLAlt - stressDivergenceUAnalytical)
stressDivergenceVPWLAltDiff = (stressDivergenceVPWLAlt - stressDivergenceVAnalytical)
print("PWL Alt: ",
np.amin(stressDivergenceUPWLAltDiff), np.amax(stressDivergenceUPWLAltDiff),
np.amin(stressDivergenceVPWLAltDiff), np.amax(stressDivergenceVPWLAltDiff))
filePWLAlt.close()
# Weak
fileWeak = Dataset("./output_weak_40962/output.2000.nc","r")
stressDivergenceUWeak = fileWeak.variables["stressDivergenceU"][0,:]
stressDivergenceVWeak = fileWeak.variables["stressDivergenceV"][0,:]
stressDivergenceUWeakDiff = (stressDivergenceUWeak - stressDivergenceUAnalytical)
stressDivergenceVWeakDiff = (stressDivergenceVWeak - stressDivergenceVAnalytical)
print("Weak: ",
np.amin(stressDivergenceUWeakDiff), np.amax(stressDivergenceUWeakDiff),
np.amin(stressDivergenceVWeakDiff), np.amax(stressDivergenceVWeakDiff))
fileWeak.close()
# histograms
stressDivergenceUWachDiffHist = []
stressDivergenceUPWLDiffHist = []
stressDivergenceUWachAltDiffHist = []
stressDivergenceUPWLAltDiffHist = []
stressDivergenceUWeakDiffHist = []
for iVertex in range(0,nVertices):
if (latVertex[iVertex] > math.radians(20.0)):
stressDivergenceUWachDiffHist.append(math.fabs(stressDivergenceUWachDiff[iVertex]))
stressDivergenceUPWLDiffHist.append(math.fabs(stressDivergenceUPWLDiff[iVertex]))
stressDivergenceUWachAltDiffHist.append(math.fabs(stressDivergenceUWachAltDiff[iVertex]))
stressDivergenceUPWLAltDiffHist.append(math.fabs(stressDivergenceUPWLAltDiff[iVertex]))
stressDivergenceUWeakDiffHist.append(math.fabs(stressDivergenceUWeakDiff[iVertex]))
mpl.rc('text', usetex=True)
mpl.rc('font', family='Times New Roman', size=8)
mpl.rcParams['axes.linewidth'] = 0.5
plt.figure(figsize=(3.74016, 3))
plt.hist(stressDivergenceUWachDiffHist, 50, range=[0.0,1.0], histtype='step', lw=1, color='red', label='Wachspress')
plt.hist(stressDivergenceUPWLDiffHist, 50, range=[0.0,1.0], histtype='step', lw=1, color='blue', label='PWL')
plt.hist(stressDivergenceUWachAltDiffHist, 50, range=[0.0,1.0], histtype='step', lw=1, color='darkturquoise', label='Wachs. Alt')
plt.hist(stressDivergenceUPWLAltDiffHist, 50, range=[0.0,1.0], histtype='step', lw=1, color='darkorange', label='PWL Alt')
plt.hist(stressDivergenceUWeakDiffHist, 50, range=[0.0,1.0], histtype='step', lw=1, color='green', label='Weak')
plt.yscale('log', nonpositive='clip')
plt.xlabel("Error")
plt.ylabel("Frequency")
plt.legend(["Wachs.","PWL","Wachs. Alt","PWL Alt","Weak"], frameon=False, fontsize=8)
plt.xlim([0,1.0])
plt.tight_layout(pad=0.5, w_pad=0.5, h_pad=0.5)
plt.savefig("stress_divergence_hist.png",dpi=400)
#-------------------------------------------------------------------------------
if __name__ == "__main__":
stress_divergence_hist()
| [
"netCDF4.Dataset",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.yscale",
"matplotlib.rc",
"numpy.amin",
"matplotlib.pyplot.hist",
"math.fabs",
"math.radians",
"matplotlib.pyplot.legend",
"numpy.amax",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matp... | [((256, 285), 'netCDF4.Dataset', 'Dataset', (['"""grid.40962.nc"""', '"""r"""'], {}), "('grid.40962.nc', 'r')\n", (263, 285), False, 'from netCDF4 import Dataset\n'), ((438, 465), 'netCDF4.Dataset', 'Dataset', (['"""ic_40962.nc"""', '"""r"""'], {}), "('ic_40962.nc', 'r')\n", (445, 465), False, 'from netCDF4 import Dataset\n'), ((994, 1050), 'netCDF4.Dataset', 'Dataset', (['"""./output_wachspress_40962/output.2000.nc"""', '"""r"""'], {}), "('./output_wachspress_40962/output.2000.nc', 'r')\n", (1001, 1050), False, 'from netCDF4 import Dataset\n'), ((1607, 1656), 'netCDF4.Dataset', 'Dataset', (['"""./output_pwl_40962/output.2000.nc"""', '"""r"""'], {}), "('./output_pwl_40962/output.2000.nc', 'r')\n", (1614, 1656), False, 'from netCDF4 import Dataset\n'), ((2215, 2275), 'netCDF4.Dataset', 'Dataset', (['"""./output_wachspress_alt_40962/output.2000.nc"""', '"""r"""'], {}), "('./output_wachspress_alt_40962/output.2000.nc', 'r')\n", (2222, 2275), False, 'from netCDF4 import Dataset\n'), ((2878, 2931), 'netCDF4.Dataset', 'Dataset', (['"""./output_pwl_alt_40962/output.2000.nc"""', '"""r"""'], {}), "('./output_pwl_alt_40962/output.2000.nc', 'r')\n", (2885, 2931), False, 'from netCDF4 import Dataset\n'), ((3516, 3566), 'netCDF4.Dataset', 'Dataset', (['"""./output_weak_40962/output.2000.nc"""', '"""r"""'], {}), "('./output_weak_40962/output.2000.nc', 'r')\n", (3523, 3566), False, 'from netCDF4 import Dataset\n'), ((4917, 4944), 'matplotlib.rc', 'mpl.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (4923, 4944), True, 'import matplotlib as mpl\n'), ((4949, 4997), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {'family': '"""Times New Roman"""', 'size': '(8)'}), "('font', family='Times New Roman', size=8)\n", (4955, 4997), True, 'import matplotlib as mpl\n'), ((5044, 5076), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(3.74016, 3)'}), '(figsize=(3.74016, 3))\n', (5054, 5076), True, 'import matplotlib.pyplot as plt\n'), ((5082, 5204), 'matplotlib.pyplot.hist', 'plt.hist', (['stressDivergenceUWachDiffHist', '(50)'], {'range': '[0.0, 1.0]', 'histtype': '"""step"""', 'lw': '(1)', 'color': '"""red"""', 'label': '"""Wachspress"""'}), "(stressDivergenceUWachDiffHist, 50, range=[0.0, 1.0], histtype=\n 'step', lw=1, color='red', label='Wachspress')\n", (5090, 5204), True, 'import matplotlib.pyplot as plt\n'), ((5206, 5321), 'matplotlib.pyplot.hist', 'plt.hist', (['stressDivergenceUPWLDiffHist', '(50)'], {'range': '[0.0, 1.0]', 'histtype': '"""step"""', 'lw': '(1)', 'color': '"""blue"""', 'label': '"""PWL"""'}), "(stressDivergenceUPWLDiffHist, 50, range=[0.0, 1.0], histtype=\n 'step', lw=1, color='blue', label='PWL')\n", (5214, 5321), True, 'import matplotlib.pyplot as plt\n'), ((5325, 5460), 'matplotlib.pyplot.hist', 'plt.hist', (['stressDivergenceUWachAltDiffHist', '(50)'], {'range': '[0.0, 1.0]', 'histtype': '"""step"""', 'lw': '(1)', 'color': '"""darkturquoise"""', 'label': '"""Wachs. Alt"""'}), "(stressDivergenceUWachAltDiffHist, 50, range=[0.0, 1.0], histtype=\n 'step', lw=1, color='darkturquoise', label='Wachs. Alt')\n", (5333, 5460), True, 'import matplotlib.pyplot as plt\n'), ((5459, 5587), 'matplotlib.pyplot.hist', 'plt.hist', (['stressDivergenceUPWLAltDiffHist', '(50)'], {'range': '[0.0, 1.0]', 'histtype': '"""step"""', 'lw': '(1)', 'color': '"""darkorange"""', 'label': '"""PWL Alt"""'}), "(stressDivergenceUPWLAltDiffHist, 50, range=[0.0, 1.0], histtype=\n 'step', lw=1, color='darkorange', label='PWL Alt')\n", (5467, 5587), True, 'import matplotlib.pyplot as plt\n'), ((5588, 5706), 'matplotlib.pyplot.hist', 'plt.hist', (['stressDivergenceUWeakDiffHist', '(50)'], {'range': '[0.0, 1.0]', 'histtype': '"""step"""', 'lw': '(1)', 'color': '"""green"""', 'label': '"""Weak"""'}), "(stressDivergenceUWeakDiffHist, 50, range=[0.0, 1.0], histtype=\n 'step', lw=1, color='green', label='Weak')\n", (5596, 5706), True, 'import matplotlib.pyplot as plt\n'), ((5711, 5748), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {'nonpositive': '"""clip"""'}), "('log', nonpositive='clip')\n", (5721, 5748), True, 'import matplotlib.pyplot as plt\n'), ((5754, 5773), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Error"""'], {}), "('Error')\n", (5764, 5773), True, 'import matplotlib.pyplot as plt\n'), ((5778, 5801), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (5788, 5801), True, 'import matplotlib.pyplot as plt\n'), ((5807, 5901), 'matplotlib.pyplot.legend', 'plt.legend', (["['Wachs.', 'PWL', 'Wachs. Alt', 'PWL Alt', 'Weak']"], {'frameon': '(False)', 'fontsize': '(8)'}), "(['Wachs.', 'PWL', 'Wachs. Alt', 'PWL Alt', 'Weak'], frameon=\n False, fontsize=8)\n", (5817, 5901), True, 'import matplotlib.pyplot as plt\n'), ((5898, 5916), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1.0]'], {}), '([0, 1.0])\n', (5906, 5916), True, 'import matplotlib.pyplot as plt\n'), ((5921, 5968), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(0.5)', 'w_pad': '(0.5)', 'h_pad': '(0.5)'}), '(pad=0.5, w_pad=0.5, h_pad=0.5)\n', (5937, 5968), True, 'import matplotlib.pyplot as plt\n'), ((5974, 6024), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""stress_divergence_hist.png"""'], {'dpi': '(400)'}), "('stress_divergence_hist.png', dpi=400)\n", (5985, 6024), True, 'import matplotlib.pyplot as plt\n'), ((779, 815), 'numpy.amin', 'np.amin', (['stressDivergenceUAnalytical'], {}), '(stressDivergenceUAnalytical)\n', (786, 815), True, 'import numpy as np\n'), ((817, 853), 'numpy.amax', 'np.amax', (['stressDivergenceUAnalytical'], {}), '(stressDivergenceUAnalytical)\n', (824, 853), True, 'import numpy as np\n'), ((865, 901), 'numpy.amin', 'np.amin', (['stressDivergenceVAnalytical'], {}), '(stressDivergenceVAnalytical)\n', (872, 901), True, 'import numpy as np\n'), ((903, 939), 'numpy.amax', 'np.amax', (['stressDivergenceVAnalytical'], {}), '(stressDivergenceVAnalytical)\n', (910, 939), True, 'import numpy as np\n'), ((1406, 1440), 'numpy.amin', 'np.amin', (['stressDivergenceUWachDiff'], {}), '(stressDivergenceUWachDiff)\n', (1413, 1440), True, 'import numpy as np\n'), ((1442, 1476), 'numpy.amax', 'np.amax', (['stressDivergenceUWachDiff'], {}), '(stressDivergenceUWachDiff)\n', (1449, 1476), True, 'import numpy as np\n'), ((1488, 1522), 'numpy.amin', 'np.amin', (['stressDivergenceVWachDiff'], {}), '(stressDivergenceVWachDiff)\n', (1495, 1522), True, 'import numpy as np\n'), ((1524, 1558), 'numpy.amax', 'np.amax', (['stressDivergenceVWachDiff'], {}), '(stressDivergenceVWachDiff)\n', (1531, 1558), True, 'import numpy as np\n'), ((2004, 2037), 'numpy.amin', 'np.amin', (['stressDivergenceUPWLDiff'], {}), '(stressDivergenceUPWLDiff)\n', (2011, 2037), True, 'import numpy as np\n'), ((2039, 2072), 'numpy.amax', 'np.amax', (['stressDivergenceUPWLDiff'], {}), '(stressDivergenceUPWLDiff)\n', (2046, 2072), True, 'import numpy as np\n'), ((2084, 2117), 'numpy.amin', 'np.amin', (['stressDivergenceVPWLDiff'], {}), '(stressDivergenceVPWLDiff)\n', (2091, 2117), True, 'import numpy as np\n'), ((2119, 2152), 'numpy.amax', 'np.amax', (['stressDivergenceVPWLDiff'], {}), '(stressDivergenceVPWLDiff)\n', (2126, 2152), True, 'import numpy as np\n'), ((2655, 2692), 'numpy.amin', 'np.amin', (['stressDivergenceUWachAltDiff'], {}), '(stressDivergenceUWachAltDiff)\n', (2662, 2692), True, 'import numpy as np\n'), ((2694, 2731), 'numpy.amax', 'np.amax', (['stressDivergenceUWachAltDiff'], {}), '(stressDivergenceUWachAltDiff)\n', (2701, 2731), True, 'import numpy as np\n'), ((2743, 2780), 'numpy.amin', 'np.amin', (['stressDivergenceVWachAltDiff'], {}), '(stressDivergenceVWachAltDiff)\n', (2750, 2780), True, 'import numpy as np\n'), ((2782, 2819), 'numpy.amax', 'np.amax', (['stressDivergenceVWachAltDiff'], {}), '(stressDivergenceVWachAltDiff)\n', (2789, 2819), True, 'import numpy as np\n'), ((3303, 3339), 'numpy.amin', 'np.amin', (['stressDivergenceUPWLAltDiff'], {}), '(stressDivergenceUPWLAltDiff)\n', (3310, 3339), True, 'import numpy as np\n'), ((3341, 3377), 'numpy.amax', 'np.amax', (['stressDivergenceUPWLAltDiff'], {}), '(stressDivergenceUPWLAltDiff)\n', (3348, 3377), True, 'import numpy as np\n'), ((3389, 3425), 'numpy.amin', 'np.amin', (['stressDivergenceVPWLAltDiff'], {}), '(stressDivergenceVPWLAltDiff)\n', (3396, 3425), True, 'import numpy as np\n'), ((3427, 3463), 'numpy.amax', 'np.amax', (['stressDivergenceVPWLAltDiff'], {}), '(stressDivergenceVPWLAltDiff)\n', (3434, 3463), True, 'import numpy as np\n'), ((3922, 3956), 'numpy.amin', 'np.amin', (['stressDivergenceUWeakDiff'], {}), '(stressDivergenceUWeakDiff)\n', (3929, 3956), True, 'import numpy as np\n'), ((3958, 3992), 'numpy.amax', 'np.amax', (['stressDivergenceUWeakDiff'], {}), '(stressDivergenceUWeakDiff)\n', (3965, 3992), True, 'import numpy as np\n'), ((4004, 4038), 'numpy.amin', 'np.amin', (['stressDivergenceVWeakDiff'], {}), '(stressDivergenceVWeakDiff)\n', (4011, 4038), True, 'import numpy as np\n'), ((4040, 4074), 'numpy.amax', 'np.amax', (['stressDivergenceVWeakDiff'], {}), '(stressDivergenceVWeakDiff)\n', (4047, 4074), True, 'import numpy as np\n'), ((4401, 4419), 'math.radians', 'math.radians', (['(20.0)'], {}), '(20.0)\n', (4413, 4419), False, 'import math\n'), ((4472, 4517), 'math.fabs', 'math.fabs', (['stressDivergenceUWachDiff[iVertex]'], {}), '(stressDivergenceUWachDiff[iVertex])\n', (4481, 4517), False, 'import math\n'), ((4567, 4611), 'math.fabs', 'math.fabs', (['stressDivergenceUPWLDiff[iVertex]'], {}), '(stressDivergenceUPWLDiff[iVertex])\n', (4576, 4611), False, 'import math\n'), ((4665, 4713), 'math.fabs', 'math.fabs', (['stressDivergenceUWachAltDiff[iVertex]'], {}), '(stressDivergenceUWachAltDiff[iVertex])\n', (4674, 4713), False, 'import math\n'), ((4766, 4813), 'math.fabs', 'math.fabs', (['stressDivergenceUPWLAltDiff[iVertex]'], {}), '(stressDivergenceUPWLAltDiff[iVertex])\n', (4775, 4813), False, 'import math\n'), ((4864, 4909), 'math.fabs', 'math.fabs', (['stressDivergenceUWeakDiff[iVertex]'], {}), '(stressDivergenceUWeakDiff[iVertex])\n', (4873, 4909), False, 'import math\n')] |
# <NAME> <<EMAIL>>
import tensorflow as tf
import numpy as np
class trainload:
def __init__(self,bsz,scales=[256,512]):
self.names = []
# Create placeholders
for i in range(bsz):
self.names.append(tf.placeholder(tf.string))
batch = []
sizes = tf.constant(np.float32(scales))
for i in range(bsz):
# Load image
img = tf.read_file(self.names[i])
code = tf.decode_raw(img,tf.uint8)[0]
img = tf.cond(tf.equal(code,137),
lambda: tf.image.decode_png(img,channels=3),
lambda: tf.image.decode_jpeg(img,channels=3))
# Resize image to a random scale in scales
in_s = tf.to_float(tf.shape(img)[:2])
min_s = tf.minimum(in_s[0],in_s[1])
size = tf.random_shuffle(sizes)[0]
new_s = tf.to_int32((size/min_s)*in_s)
img = tf.image.resize_images(img,new_s[0],new_s[1])
# Randomly flip image
img = tf.image.random_flip_left_right(img)
# Randomly crop image
img = tf.random_crop(img,[224,224,3])
batch.append(tf.reshape(img,[1,224,224,3]))
batch = tf.to_float(tf.concat(0,batch))
# Fetching logic
nBuf = tf.Variable(tf.zeros([bsz,224,224,3],dtype=tf.float32),trainable=False)
self.batch = tf.Variable(tf.zeros([bsz,224,224,3],dtype=tf.float32),trainable=False)
self.fetchOp = tf.assign(nBuf,batch).op
self.swapOp = tf.assign(self.batch,nBuf)
def getfeed(self,imgs):
dict = {}
for i in range(len(self.names)):
dict[self.names[i]] = imgs[i]
return dict
def testload(name,size):
# Load image
img = tf.image.decode_jpeg(tf.read_file(name),channels=3)
# Resize image to specific scale.
in_s = tf.to_float(tf.shape(img)[:2])
min_s = tf.minimum(in_s[0],in_s[1])
new_s = tf.to_int32((size/min_s)*in_s)
img = tf.image.resize_images(img,new_s[0],new_s[1])
return tf.expand_dims(tf.to_float(img),0)
| [
"tensorflow.reshape",
"tensorflow.image.decode_png",
"tensorflow.decode_raw",
"tensorflow.assign",
"tensorflow.concat",
"tensorflow.minimum",
"tensorflow.placeholder",
"tensorflow.to_int32",
"tensorflow.to_float",
"tensorflow.random_crop",
"tensorflow.equal",
"tensorflow.image.resize_images",
... | [((1955, 1983), 'tensorflow.minimum', 'tf.minimum', (['in_s[0]', 'in_s[1]'], {}), '(in_s[0], in_s[1])\n', (1965, 1983), True, 'import tensorflow as tf\n'), ((1999, 2031), 'tensorflow.to_int32', 'tf.to_int32', (['(size / min_s * in_s)'], {}), '(size / min_s * in_s)\n', (2010, 2031), True, 'import tensorflow as tf\n'), ((2044, 2091), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['img', 'new_s[0]', 'new_s[1]'], {}), '(img, new_s[0], new_s[1])\n', (2066, 2091), True, 'import tensorflow as tf\n'), ((1547, 1574), 'tensorflow.assign', 'tf.assign', (['self.batch', 'nBuf'], {}), '(self.batch, nBuf)\n', (1556, 1574), True, 'import tensorflow as tf\n'), ((1819, 1837), 'tensorflow.read_file', 'tf.read_file', (['name'], {}), '(name)\n', (1831, 1837), True, 'import tensorflow as tf\n'), ((2120, 2136), 'tensorflow.to_float', 'tf.to_float', (['img'], {}), '(img)\n', (2131, 2136), True, 'import tensorflow as tf\n'), ((314, 332), 'numpy.float32', 'np.float32', (['scales'], {}), '(scales)\n', (324, 332), True, 'import numpy as np\n'), ((406, 433), 'tensorflow.read_file', 'tf.read_file', (['self.names[i]'], {}), '(self.names[i])\n', (418, 433), True, 'import tensorflow as tf\n'), ((799, 827), 'tensorflow.minimum', 'tf.minimum', (['in_s[0]', 'in_s[1]'], {}), '(in_s[0], in_s[1])\n', (809, 827), True, 'import tensorflow as tf\n'), ((894, 926), 'tensorflow.to_int32', 'tf.to_int32', (['(size / min_s * in_s)'], {}), '(size / min_s * in_s)\n', (905, 926), True, 'import tensorflow as tf\n'), ((943, 990), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['img', 'new_s[0]', 'new_s[1]'], {}), '(img, new_s[0], new_s[1])\n', (965, 990), True, 'import tensorflow as tf\n'), ((1042, 1078), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['img'], {}), '(img)\n', (1073, 1078), True, 'import tensorflow as tf\n'), ((1132, 1166), 'tensorflow.random_crop', 'tf.random_crop', (['img', '[224, 224, 3]'], {}), '(img, [224, 224, 3])\n', (1146, 1166), True, 'import tensorflow as tf\n'), ((1250, 1269), 'tensorflow.concat', 'tf.concat', (['(0)', 'batch'], {}), '(0, batch)\n', (1259, 1269), True, 'import tensorflow as tf\n'), ((1323, 1369), 'tensorflow.zeros', 'tf.zeros', (['[bsz, 224, 224, 3]'], {'dtype': 'tf.float32'}), '([bsz, 224, 224, 3], dtype=tf.float32)\n', (1331, 1369), True, 'import tensorflow as tf\n'), ((1416, 1462), 'tensorflow.zeros', 'tf.zeros', (['[bsz, 224, 224, 3]'], {'dtype': 'tf.float32'}), '([bsz, 224, 224, 3], dtype=tf.float32)\n', (1424, 1462), True, 'import tensorflow as tf\n'), ((1500, 1522), 'tensorflow.assign', 'tf.assign', (['nBuf', 'batch'], {}), '(nBuf, batch)\n', (1509, 1522), True, 'import tensorflow as tf\n'), ((1920, 1933), 'tensorflow.shape', 'tf.shape', (['img'], {}), '(img)\n', (1928, 1933), True, 'import tensorflow as tf\n'), ((239, 264), 'tensorflow.placeholder', 'tf.placeholder', (['tf.string'], {}), '(tf.string)\n', (253, 264), True, 'import tensorflow as tf\n'), ((453, 481), 'tensorflow.decode_raw', 'tf.decode_raw', (['img', 'tf.uint8'], {}), '(img, tf.uint8)\n', (466, 481), True, 'import tensorflow as tf\n'), ((510, 529), 'tensorflow.equal', 'tf.equal', (['code', '(137)'], {}), '(code, 137)\n', (518, 529), True, 'import tensorflow as tf\n'), ((846, 870), 'tensorflow.random_shuffle', 'tf.random_shuffle', (['sizes'], {}), '(sizes)\n', (863, 870), True, 'import tensorflow as tf\n'), ((1190, 1223), 'tensorflow.reshape', 'tf.reshape', (['img', '[1, 224, 224, 3]'], {}), '(img, [1, 224, 224, 3])\n', (1200, 1223), True, 'import tensorflow as tf\n'), ((564, 600), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['img'], {'channels': '(3)'}), '(img, channels=3)\n', (583, 600), True, 'import tensorflow as tf\n'), ((635, 672), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['img'], {'channels': '(3)'}), '(img, channels=3)\n', (655, 672), True, 'import tensorflow as tf\n'), ((760, 773), 'tensorflow.shape', 'tf.shape', (['img'], {}), '(img)\n', (768, 773), True, 'import tensorflow as tf\n')] |
import numpy as np
import tensorflow as tf
from .base import SaliencyMap
class VanillaGradients(SaliencyMap):
def get_mask(self, image):
"""Constructs a Vanilla Gradient Map by computing dy/dx.
Args:
image: input image in NHWC format.
"""
mask = self.get_gradients(image)
return mask
def get_smooth_mask(self, image, stdev_spread=0.1, n=30, magnitude=False):
"""Constructs a SmoothGrad Saliency Map by computing dy/dx.
Args:
image: input image in NHWC format.
"""
stdev = stdev_spread * (np.max(image) - np.min(image))
total_gradients = np.zeros_like(image)
for i in range(n):
noise = np.random.normal(0, stdev, image.shape)
grads = self.get_gradients(image + noise)
if magnitude:
grads *= grads
total_gradients += grads
return total_gradients / n
| [
"numpy.max",
"numpy.zeros_like",
"numpy.min",
"numpy.random.normal"
] | [((656, 676), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (669, 676), True, 'import numpy as np\n'), ((725, 764), 'numpy.random.normal', 'np.random.normal', (['(0)', 'stdev', 'image.shape'], {}), '(0, stdev, image.shape)\n', (741, 764), True, 'import numpy as np\n'), ((599, 612), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (605, 612), True, 'import numpy as np\n'), ((615, 628), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (621, 628), True, 'import numpy as np\n')] |
import numpy as np
import pyarrow as pa
import pytest
from meerkat.block.abstract import BlockView
from meerkat.block.arrow_block import ArrowBlock
from meerkat.block.ref import BlockRef
from meerkat.columns.arrow_column import ArrowArrayColumn
from meerkat.errors import ConsolidationError
def test_signature_hash():
# check equal
block1 = ArrowBlock(pa.Table.from_pydict({"a": [1, 2, 3], "b": ["4", "5", "6"]}))
block2 = ArrowBlock(pa.Table.from_pydict({"c": [1, 2, 3], "d": ["4", "5", "6"]}))
assert hash(block1.signature) == hash(block2.signature)
# check not equal
block1 = ArrowBlock(pa.Table.from_pydict({"a": [1, 2, 3], "b": ["4", "5", "6"]}))
block2 = ArrowBlock(pa.Table.from_pydict({"c": [1, 2], "d": ["5", "6"]}))
assert hash(block1.signature) != hash(block2.signature)
@pytest.mark.parametrize("num_blocks", [1, 2, 3])
def test_consolidate_1(num_blocks):
# check equal
blocks = [
ArrowBlock(
pa.Table.from_pydict(
{f"a_{idx}": np.arange(10), f"b_{idx}": np.arange(10) * 2},
)
)
for idx in range(num_blocks)
]
cols = [
{
str(slc): ArrowArrayColumn(
data=BlockView(
block=blocks[idx],
block_index=slc,
)
)
for slc in [f"a_{idx}", f"b_{idx}"]
}
for idx in range(num_blocks)
]
block_refs = [
BlockRef(block=block, columns=cols) for block, cols in zip(blocks, cols)
]
block_ref = ArrowBlock.consolidate(block_refs=block_refs)
for ref in block_refs:
block = ref.block
for name, col in ref.items():
assert block.data[col._block_index].equals(
block_ref.block.data[block_ref[name]._block_index]
)
def test_consolidate_empty():
with pytest.raises(ConsolidationError):
ArrowBlock.consolidate([])
def test_consolidate_mismatched_signature():
block1 = ArrowBlock(pa.Table.from_pydict({"a": [1, 2, 3], "b": ["4", "5", "6"]}))
block2 = ArrowBlock(pa.Table.from_pydict({"c": [1, 2], "d": ["5", "6"]}))
blocks = [block1, block2]
slices = [
["a", "b"],
["c", "d"],
]
cols = [
{
str(slc): ArrowArrayColumn(
data=BlockView(
block=blocks[block_idx],
block_index=slc,
)
)
for slc in slices[block_idx]
}
for block_idx in range(2)
]
block_refs = [
BlockRef(block=block, columns=cols) for block, cols in zip(blocks, cols)
]
with pytest.raises(ConsolidationError):
ArrowBlock.consolidate(block_refs)
def test_io(tmpdir):
block = ArrowBlock(pa.Table.from_pydict({"a": [1, 2, 3], "b": ["4", "5", "6"]}))
block.write(tmpdir)
new_block = block.read(tmpdir)
assert isinstance(block, ArrowBlock)
assert block.data.equals(new_block.data)
| [
"pyarrow.Table.from_pydict",
"meerkat.block.arrow_block.ArrowBlock.consolidate",
"meerkat.block.abstract.BlockView",
"pytest.raises",
"numpy.arange",
"pytest.mark.parametrize",
"meerkat.block.ref.BlockRef"
] | [((821, 869), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num_blocks"""', '[1, 2, 3]'], {}), "('num_blocks', [1, 2, 3])\n", (844, 869), False, 'import pytest\n'), ((1563, 1608), 'meerkat.block.arrow_block.ArrowBlock.consolidate', 'ArrowBlock.consolidate', ([], {'block_refs': 'block_refs'}), '(block_refs=block_refs)\n', (1585, 1608), False, 'from meerkat.block.arrow_block import ArrowBlock\n'), ((363, 423), 'pyarrow.Table.from_pydict', 'pa.Table.from_pydict', (["{'a': [1, 2, 3], 'b': ['4', '5', '6']}"], {}), "({'a': [1, 2, 3], 'b': ['4', '5', '6']})\n", (383, 423), True, 'import pyarrow as pa\n'), ((449, 509), 'pyarrow.Table.from_pydict', 'pa.Table.from_pydict', (["{'c': [1, 2, 3], 'd': ['4', '5', '6']}"], {}), "({'c': [1, 2, 3], 'd': ['4', '5', '6']})\n", (469, 509), True, 'import pyarrow as pa\n'), ((618, 678), 'pyarrow.Table.from_pydict', 'pa.Table.from_pydict', (["{'a': [1, 2, 3], 'b': ['4', '5', '6']}"], {}), "({'a': [1, 2, 3], 'b': ['4', '5', '6']})\n", (638, 678), True, 'import pyarrow as pa\n'), ((704, 756), 'pyarrow.Table.from_pydict', 'pa.Table.from_pydict', (["{'c': [1, 2], 'd': ['5', '6']}"], {}), "({'c': [1, 2], 'd': ['5', '6']})\n", (724, 756), True, 'import pyarrow as pa\n'), ((1468, 1503), 'meerkat.block.ref.BlockRef', 'BlockRef', ([], {'block': 'block', 'columns': 'cols'}), '(block=block, columns=cols)\n', (1476, 1503), False, 'from meerkat.block.ref import BlockRef\n'), ((1878, 1911), 'pytest.raises', 'pytest.raises', (['ConsolidationError'], {}), '(ConsolidationError)\n', (1891, 1911), False, 'import pytest\n'), ((1921, 1947), 'meerkat.block.arrow_block.ArrowBlock.consolidate', 'ArrowBlock.consolidate', (['[]'], {}), '([])\n', (1943, 1947), False, 'from meerkat.block.arrow_block import ArrowBlock\n'), ((2019, 2079), 'pyarrow.Table.from_pydict', 'pa.Table.from_pydict', (["{'a': [1, 2, 3], 'b': ['4', '5', '6']}"], {}), "({'a': [1, 2, 3], 'b': ['4', '5', '6']})\n", (2039, 2079), True, 'import pyarrow as pa\n'), ((2105, 2157), 'pyarrow.Table.from_pydict', 'pa.Table.from_pydict', (["{'c': [1, 2], 'd': ['5', '6']}"], {}), "({'c': [1, 2], 'd': ['5', '6']})\n", (2125, 2157), True, 'import pyarrow as pa\n'), ((2578, 2613), 'meerkat.block.ref.BlockRef', 'BlockRef', ([], {'block': 'block', 'columns': 'cols'}), '(block=block, columns=cols)\n', (2586, 2613), False, 'from meerkat.block.ref import BlockRef\n'), ((2666, 2699), 'pytest.raises', 'pytest.raises', (['ConsolidationError'], {}), '(ConsolidationError)\n', (2679, 2699), False, 'import pytest\n'), ((2709, 2743), 'meerkat.block.arrow_block.ArrowBlock.consolidate', 'ArrowBlock.consolidate', (['block_refs'], {}), '(block_refs)\n', (2731, 2743), False, 'from meerkat.block.arrow_block import ArrowBlock\n'), ((2790, 2850), 'pyarrow.Table.from_pydict', 'pa.Table.from_pydict', (["{'a': [1, 2, 3], 'b': ['4', '5', '6']}"], {}), "({'a': [1, 2, 3], 'b': ['4', '5', '6']})\n", (2810, 2850), True, 'import pyarrow as pa\n'), ((1022, 1035), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1031, 1035), True, 'import numpy as np\n'), ((1221, 1266), 'meerkat.block.abstract.BlockView', 'BlockView', ([], {'block': 'blocks[idx]', 'block_index': 'slc'}), '(block=blocks[idx], block_index=slc)\n', (1230, 1266), False, 'from meerkat.block.abstract import BlockView\n'), ((2335, 2386), 'meerkat.block.abstract.BlockView', 'BlockView', ([], {'block': 'blocks[block_idx]', 'block_index': 'slc'}), '(block=blocks[block_idx], block_index=slc)\n', (2344, 2386), False, 'from meerkat.block.abstract import BlockView\n'), ((1049, 1062), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1058, 1062), True, 'import numpy as np\n')] |
from gailtf.baselines.common.mpi_running_mean_std import RunningMeanStd
import tensorflow as tf
import tensorflow.contrib.layers as layers
from gailtf.baselines.common import tf_util as U
from gailtf.common.tf_util import *
import numpy as np
import ipdb
L2_REG = 1e-4
class TrajectoryClassifier(object):
def __init__(self, env, hidden_size, sequence_size, attention_size, cell_type, entcoeff=0.001, lr_rate = 0.0, scope = "adversary"):
self.scope = scope
self.observation_shape = env.observation_space.shape
self.action_shape = env.action_space.shape
self.num_observations = self.observation_shape[0]
self.num_actions = self.action_shape[0]
self.embedding_size = self.num_observations + self.num_actions
self.hidden_size = hidden_size
self.sequence_size = sequence_size
self.attention_size = attention_size
self.cell_type = cell_type
self.build_ph()
#Build graph
generator_logits, self.rewards_op = self.build_graph(self.generator_traj_ph, self.generator_traj_seq_len, reuse = False)
expert_logits, _ = self.build_graph(self.expert_traj_ph, self.expert_traj_seq_len, reuse = True)
# Build accuracy
generator_acc = tf.reduce_mean(tf.to_float(tf.nn.sigmoid(generator_logits) < 0.5))
expert_acc = tf.reduce_mean(tf.to_float(tf.nn.sigmoid(expert_logits) > 0.5))
# Build regression loss
# let x = logits, z = targets.
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
generator_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=generator_logits, labels=tf.zeros_like(generator_logits))
generator_loss = tf.reduce_mean(generator_loss)
expert_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=expert_logits, labels=tf.ones_like(expert_logits))
expert_loss = tf.reduce_mean(expert_loss)
# Build entropy loss
logits = tf.concat([generator_logits, expert_logits], 0)
entropy = tf.reduce_mean(logit_bernoulli_entropy(logits))
entropy_loss = -entcoeff*entropy
# Loss + Accuracy terms
self.losses = [generator_loss, expert_loss, entropy, entropy_loss, generator_acc, expert_acc]
self.loss_name = ["generator_loss", "expert_loss", "entropy", "entropy_loss", "generator_acc", "expert_acc"]
self.total_loss = generator_loss + expert_loss + entropy_loss
var_list = self.get_trainable_variables()
self.lossandgrad = U.function([self.generator_traj_ph, self.generator_traj_seq_len, self.expert_traj_ph, self.expert_traj_seq_len, self.dropout_keep_prob],
self.losses + [U.flatgrad(self.total_loss, var_list)])
# for test
#self.check_values = U.function([self.generator_traj_ph, self.generator_traj_seq_len, self.expert_traj_ph, self.expert_traj_seq_len, self.dropout_keep_prob],[self.cvs, self.exp_cvs])
def get_trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
def build_ph(self):
self.generator_traj_ph = tf.placeholder(tf.float32, (None, self.sequence_size, self.embedding_size), name = "observation_traj")
self.generator_traj_seq_len = tf.placeholder(tf.float32, (None,), name = "observation_seq_length")
self.expert_traj_ph = tf.placeholder(tf.float32, (None, self.sequence_size, self.embedding_size), name = "expert_traj")
self.expert_traj_seq_len = tf.placeholder(tf.float32, (None,), name = "expert_seq_length")
self.dropout_keep_prob = tf.placeholder(tf.float32, name = 'dropout_keep_prob')
def build_graph(self, trajs,trajs_len, reuse=False):
with tf.variable_scope(self.scope):
if reuse:
tf.get_variable_scope().reuse_variables()
#input normalize
with tf.variable_scope("obfilter"):
self.obs_rms = RunningMeanStd(shape = self.observation_shape)
obs = (trajs[:,:,:self.num_observations] - self.obs_rms.mean) / self.obs_rms.std
feats = tf.concat((obs, trajs[:,:,self.num_observations:]), 2)
#feats = trajs
with tf.variable_scope("rnn"):
cell = self._get_cell(self.hidden_size,self.cell_type, reuse)
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob = self.dropout_keep_prob)
outputs, _ = tf.nn.dynamic_rnn(cell = cell, inputs = feats, sequence_length = trajs_len, dtype = tf.float32)
with tf.variable_scope('attention') as scope:
attn_outputs, weighted_eb = self.attention(outputs, self.attention_size, scope)
logits = self.shared_fc_layer(attn_outputs, reuse = False)
rewards = self.shared_fc_layer(weighted_eb, reuse = True)
#check_values = (outputs, attn_outputs, weighted_eb)
return logits, rewards#, check_values
def shared_fc_layer(self, inputs, scope = 'fully_connected', reuse = False):
with tf.variable_scope(scope):
if reuse:
tf.get_variable_scope().reuse_variables()
outputs = tf.contrib.layers.fully_connected(inputs, 1, activation_fn = tf.identity)
return outputs
def attention(self, inputs, size, scope):
with tf.variable_scope(scope or "attention") as scope:
attention_context_vector = tf.get_variable(name = "attention_context_vector", shape = [size], regularizer = layers.l2_regularizer(scale = L2_REG), dtype = tf.float32)
input_projection = layers.fully_connected(inputs, size, activation_fn = tf.tanh, weights_regularizer=layers.l2_regularizer(scale=L2_REG))
vector_attn = tf.reduce_sum(tf.multiply(input_projection, attention_context_vector), axis=2, keep_dims=True)
attention_weights = tf.nn.softmax(vector_attn, dim = 1)
weighted_projection = tf.multiply(inputs, attention_weights)
outputs = tf.reduce_sum(weighted_projection, axis = 1)
return outputs, weighted_projection
@staticmethod
def _get_cell(hidden_size, cell_type = 'lstm', reuse = False):
if cell_type == "vanilla":
return tf.contrib.rnn.BasicRNNCell(hidden_size, reuse = reuse)
elif cell_type == "lstm":
return tf.contrib.rnn.BasicLSTMCell(hidden_size, reuse = reuse)
elif cell_type == "gru":
return tf.contrib.rnn.GRUCell(hidden_size, reuse = reuse)
else:
print("ERROR: '" + cell_type + "' is a wrong cell type !!!")
return None
def get_reward(self, trajs, trajs_len, dropout_keep_prob = 1.0):
sess = U.get_session()
if len(trajs.shape) == 2:
trajs = np.expand_dims(trajs, 0)
if len(np.shape(trajs_len)) == 0:
trajs_len = np.expand_dims(trajs_len, 0)
feed_dict = {self.generator_traj_ph:trajs, self.generator_traj_seq_len:trajs_len, self.dropout_keep_prob:dropout_keep_prob}
rewards = sess.run(self.rewards_op, feed_dict)
return rewards
def test(expert_path,sequence_size = 1000,attention_size = 30, hidden_size = 30, env_id = 'Hopper-v1', cell_type = 'lstm'):
from gailtf.dataset.mujoco_traj import Mujoco_Traj_Dset
import gym
U.make_session(num_cpu = 2).__enter__()
dset = Mujoco_Traj_Dset(expert_path)
env = gym.make(env_id)
t1, tl1 = dset.get_next_traj_batch(10)
t2, tl2 = dset.get_next_traj_batch(10)
discriminator = TrajectoryClassifier(env, hidden_size, sequence_size, attention_size, cell_type)
U.initialize()
*losses, g = discriminator.lossandgrad(t1, tl1, t2, tl2, 0.5)
rs1 = discriminator.get_rewards(t1,tl1)
#cv1,cv2 = discriminator.check_values(t1,tl1,t2,tl2,0.5)
print(rs1.shape)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--expert_path", type=str, default="../baselines/ppo1/ppo.Hopper.0.00.pkl")
args = parser.parse_args()
test(args.expert_path)
| [
"tensorflow.contrib.rnn.GRUCell",
"tensorflow.reduce_sum",
"tensorflow.contrib.rnn.BasicRNNCell",
"argparse.ArgumentParser",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.get_collection",
"tensorflow.get_variable_scope",
"tensorflow.zeros_like",
"numpy.shape",
"tensorflow.multiply",
"g... | [((7347, 7376), 'gailtf.dataset.mujoco_traj.Mujoco_Traj_Dset', 'Mujoco_Traj_Dset', (['expert_path'], {}), '(expert_path)\n', (7363, 7376), False, 'from gailtf.dataset.mujoco_traj import Mujoco_Traj_Dset\n'), ((7387, 7403), 'gym.make', 'gym.make', (['env_id'], {}), '(env_id)\n', (7395, 7403), False, 'import gym\n'), ((7595, 7609), 'gailtf.baselines.common.tf_util.initialize', 'U.initialize', ([], {}), '()\n', (7607, 7609), True, 'from gailtf.baselines.common import tf_util as U\n'), ((7864, 7889), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7887, 7889), False, 'import argparse\n'), ((1691, 1721), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['generator_loss'], {}), '(generator_loss)\n', (1705, 1721), True, 'import tensorflow as tf\n'), ((1864, 1891), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['expert_loss'], {}), '(expert_loss)\n', (1878, 1891), True, 'import tensorflow as tf\n'), ((1938, 1985), 'tensorflow.concat', 'tf.concat', (['[generator_logits, expert_logits]', '(0)'], {}), '([generator_logits, expert_logits], 0)\n', (1947, 1985), True, 'import tensorflow as tf\n'), ((2990, 3053), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES', 'self.scope'], {}), '(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)\n', (3007, 3053), True, 'import tensorflow as tf\n'), ((3113, 3217), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, self.sequence_size, self.embedding_size)'], {'name': '"""observation_traj"""'}), "(tf.float32, (None, self.sequence_size, self.embedding_size),\n name='observation_traj')\n", (3127, 3217), True, 'import tensorflow as tf\n'), ((3254, 3320), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None,)'], {'name': '"""observation_seq_length"""'}), "(tf.float32, (None,), name='observation_seq_length')\n", (3268, 3320), True, 'import tensorflow as tf\n'), ((3353, 3452), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, self.sequence_size, self.embedding_size)'], {'name': '"""expert_traj"""'}), "(tf.float32, (None, self.sequence_size, self.embedding_size),\n name='expert_traj')\n", (3367, 3452), True, 'import tensorflow as tf\n'), ((3486, 3547), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None,)'], {'name': '"""expert_seq_length"""'}), "(tf.float32, (None,), name='expert_seq_length')\n", (3500, 3547), True, 'import tensorflow as tf\n'), ((3583, 3635), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""dropout_keep_prob"""'}), "(tf.float32, name='dropout_keep_prob')\n", (3597, 3635), True, 'import tensorflow as tf\n'), ((6692, 6707), 'gailtf.baselines.common.tf_util.get_session', 'U.get_session', ([], {}), '()\n', (6705, 6707), True, 'from gailtf.baselines.common import tf_util as U\n'), ((3709, 3738), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.scope'], {}), '(self.scope)\n', (3726, 3738), True, 'import tensorflow as tf\n'), ((4089, 4145), 'tensorflow.concat', 'tf.concat', (['(obs, trajs[:, :, self.num_observations:])', '(2)'], {}), '((obs, trajs[:, :, self.num_observations:]), 2)\n', (4098, 4145), True, 'import tensorflow as tf\n'), ((5041, 5065), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (5058, 5065), True, 'import tensorflow as tf\n'), ((5169, 5240), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['inputs', '(1)'], {'activation_fn': 'tf.identity'}), '(inputs, 1, activation_fn=tf.identity)\n', (5202, 5240), True, 'import tensorflow as tf\n'), ((5326, 5365), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'attention')"], {}), "(scope or 'attention')\n", (5343, 5365), True, 'import tensorflow as tf\n'), ((5858, 5891), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['vector_attn'], {'dim': '(1)'}), '(vector_attn, dim=1)\n', (5871, 5891), True, 'import tensorflow as tf\n'), ((5928, 5966), 'tensorflow.multiply', 'tf.multiply', (['inputs', 'attention_weights'], {}), '(inputs, attention_weights)\n', (5939, 5966), True, 'import tensorflow as tf\n'), ((5989, 6031), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['weighted_projection'], {'axis': '(1)'}), '(weighted_projection, axis=1)\n', (6002, 6031), True, 'import tensorflow as tf\n'), ((6227, 6280), 'tensorflow.contrib.rnn.BasicRNNCell', 'tf.contrib.rnn.BasicRNNCell', (['hidden_size'], {'reuse': 'reuse'}), '(hidden_size, reuse=reuse)\n', (6254, 6280), True, 'import tensorflow as tf\n'), ((6762, 6786), 'numpy.expand_dims', 'np.expand_dims', (['trajs', '(0)'], {}), '(trajs, 0)\n', (6776, 6786), True, 'import numpy as np\n'), ((6853, 6881), 'numpy.expand_dims', 'np.expand_dims', (['trajs_len', '(0)'], {}), '(trajs_len, 0)\n', (6867, 6881), True, 'import numpy as np\n'), ((7296, 7321), 'gailtf.baselines.common.tf_util.make_session', 'U.make_session', ([], {'num_cpu': '(2)'}), '(num_cpu=2)\n', (7310, 7321), True, 'from gailtf.baselines.common import tf_util as U\n'), ((1633, 1664), 'tensorflow.zeros_like', 'tf.zeros_like', (['generator_logits'], {}), '(generator_logits)\n', (1646, 1664), True, 'import tensorflow as tf\n'), ((1813, 1840), 'tensorflow.ones_like', 'tf.ones_like', (['expert_logits'], {}), '(expert_logits)\n', (1825, 1840), True, 'import tensorflow as tf\n'), ((3867, 3896), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""obfilter"""'], {}), "('obfilter')\n", (3884, 3896), True, 'import tensorflow as tf\n'), ((3929, 3973), 'gailtf.baselines.common.mpi_running_mean_std.RunningMeanStd', 'RunningMeanStd', ([], {'shape': 'self.observation_shape'}), '(shape=self.observation_shape)\n', (3943, 3973), False, 'from gailtf.baselines.common.mpi_running_mean_std import RunningMeanStd\n'), ((4189, 4213), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rnn"""'], {}), "('rnn')\n", (4206, 4213), True, 'import tensorflow as tf\n'), ((4316, 4392), 'tensorflow.contrib.rnn.DropoutWrapper', 'tf.contrib.rnn.DropoutWrapper', (['cell'], {'output_keep_prob': 'self.dropout_keep_prob'}), '(cell, output_keep_prob=self.dropout_keep_prob)\n', (4345, 4392), True, 'import tensorflow as tf\n'), ((4424, 4516), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'cell', 'inputs': 'feats', 'sequence_length': 'trajs_len', 'dtype': 'tf.float32'}), '(cell=cell, inputs=feats, sequence_length=trajs_len, dtype\n =tf.float32)\n', (4441, 4516), True, 'import tensorflow as tf\n'), ((5745, 5800), 'tensorflow.multiply', 'tf.multiply', (['input_projection', 'attention_context_vector'], {}), '(input_projection, attention_context_vector)\n', (5756, 5800), True, 'import tensorflow as tf\n'), ((6336, 6390), 'tensorflow.contrib.rnn.BasicLSTMCell', 'tf.contrib.rnn.BasicLSTMCell', (['hidden_size'], {'reuse': 'reuse'}), '(hidden_size, reuse=reuse)\n', (6364, 6390), True, 'import tensorflow as tf\n'), ((6802, 6821), 'numpy.shape', 'np.shape', (['trajs_len'], {}), '(trajs_len)\n', (6810, 6821), True, 'import numpy as np\n'), ((1276, 1307), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['generator_logits'], {}), '(generator_logits)\n', (1289, 1307), True, 'import tensorflow as tf\n'), ((1364, 1392), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['expert_logits'], {}), '(expert_logits)\n', (1377, 1392), True, 'import tensorflow as tf\n'), ((2677, 2714), 'gailtf.baselines.common.tf_util.flatgrad', 'U.flatgrad', (['self.total_loss', 'var_list'], {}), '(self.total_loss, var_list)\n', (2687, 2714), True, 'from gailtf.baselines.common import tf_util as U\n'), ((4541, 4571), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""attention"""'], {}), "('attention')\n", (4558, 4571), True, 'import tensorflow as tf\n'), ((5496, 5531), 'tensorflow.contrib.layers.l2_regularizer', 'layers.l2_regularizer', ([], {'scale': 'L2_REG'}), '(scale=L2_REG)\n', (5517, 5531), True, 'import tensorflow.contrib.layers as layers\n'), ((5668, 5703), 'tensorflow.contrib.layers.l2_regularizer', 'layers.l2_regularizer', ([], {'scale': 'L2_REG'}), '(scale=L2_REG)\n', (5689, 5703), True, 'import tensorflow.contrib.layers as layers\n'), ((6445, 6493), 'tensorflow.contrib.rnn.GRUCell', 'tf.contrib.rnn.GRUCell', (['hidden_size'], {'reuse': 'reuse'}), '(hidden_size, reuse=reuse)\n', (6467, 6493), True, 'import tensorflow as tf\n'), ((3778, 3801), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (3799, 3801), True, 'import tensorflow as tf\n'), ((5105, 5128), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (5126, 5128), True, 'import tensorflow as tf\n')] |
import matplotlib.pyplot as plt
import random
import numpy as np
(fig, ax) = plt.subplots(1, 1)
x = []
y = []
cnts = []
data = []
for i in range(250):
x.append(i)
data.append(random.randint(0, 100))
std = np.std(data)
avg = np.average(data)
y.append(avg)
cnt = np.sum(np.where(data > (avg + std * 2), 1, 0))
cnt += np.sum(np.where(data < (avg - std * 2), 1, 0))
cnts.append((len(data) - cnt) / float(len(data)) * 100.0)
ax.plot(x, y)
ax2 = ax.twinx()
ax2.plot(x, cnts, color="r")
ax2.set_ylabel("Percentage within 2 sigma [%]", color="r")
ax2.set_ylim(0, 102)
ax.set_xlabel("Random Sample Size Increase")
ax.set_ylabel("Average", color="b")
ax.set_ylim(0, 102)
ax.set_title("Random Sampling between 0 and 100")
ax.grid(True)
ax.set_yticks([0, 25, 50, 75, 100])
fig.savefig("test.png")
| [
"numpy.average",
"random.randint",
"numpy.std",
"numpy.where",
"matplotlib.pyplot.subplots"
] | [((78, 96), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (90, 96), True, 'import matplotlib.pyplot as plt\n'), ((219, 231), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (225, 231), True, 'import numpy as np\n'), ((242, 258), 'numpy.average', 'np.average', (['data'], {}), '(data)\n', (252, 258), True, 'import numpy as np\n'), ((185, 207), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (199, 207), False, 'import random\n'), ((294, 330), 'numpy.where', 'np.where', (['(data > avg + std * 2)', '(1)', '(0)'], {}), '(data > avg + std * 2, 1, 0)\n', (302, 330), True, 'import numpy as np\n'), ((352, 388), 'numpy.where', 'np.where', (['(data < avg - std * 2)', '(1)', '(0)'], {}), '(data < avg - std * 2, 1, 0)\n', (360, 388), True, 'import numpy as np\n')] |
import numpy as np
def read_obj_file(path: str):
with open(path) as f:
vertexArray = np.zeros((1,3))
meshArray = np.zeros((1,3))
for line in f:
data = line.split() # white space
if data[0] == "v":
vertexArray = np.append(vertexArray , [[float(data[1]), float(data[2]), float(data[3])]], axis=0)
elif data[0] == "f":
v1 = data[1].split("//")[0]
v2 = data[2].split("//")[0]
v3 = data[3].split("//")[0]
meshArray = np.append(meshArray , [[int(v1), int(v2), int(v3)]], axis=0)
else:
pass
print("total vertex num : {}".format(np.shape(vertexArray)))
print("total mesh num : {}".format(np.shape(meshArray)))
return vertexArray,meshArray
def cal_volume(vertexs: np.ndarray, meshes: np.ndarray):
total_volume = 0
for mesh in meshes:
dV = np.zeros((3,3))
dV[:,0] = vertexs[int(mesh[0])]
dV[:,1] = vertexs[int(mesh[1])]
dV[:,2] = vertexs[int(mesh[2])]
total_volume = total_volume + np.linalg.det(dV)/6
print("result : {}".format(total_volume))
if __name__ == '__main__':
vertexs, meshes = read_obj_file("./sample.obj")
cal_volume(vertexs, meshes) | [
"numpy.shape",
"numpy.linalg.det",
"numpy.zeros"
] | [((110, 126), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (118, 126), True, 'import numpy as np\n'), ((147, 163), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (155, 163), True, 'import numpy as np\n'), ((1018, 1034), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1026, 1034), True, 'import numpy as np\n'), ((769, 790), 'numpy.shape', 'np.shape', (['vertexArray'], {}), '(vertexArray)\n', (777, 790), True, 'import numpy as np\n'), ((833, 852), 'numpy.shape', 'np.shape', (['meshArray'], {}), '(meshArray)\n', (841, 852), True, 'import numpy as np\n'), ((1200, 1217), 'numpy.linalg.det', 'np.linalg.det', (['dV'], {}), '(dV)\n', (1213, 1217), True, 'import numpy as np\n')] |
#-------------------------------------------------------------------------------
# Calculate urban areas from gridded population data
# <NAME>, April 2019
# Purpose is to create high density urban clusters and urban cluster above minimum
# density and total population thresholds
#-------------------------------------------------------------------------------
import os, sys, logging, geojson, json, time
import rasterio
import geopandas as gpd
import pandas as pd
import numpy as np
from scipy import stats
from scipy.ndimage import generic_filter
from scipy.sparse.csgraph import connected_components
from rasterio import features
from rasterio.features import rasterize
from shapely.geometry import shape, Polygon
'''prints the time along with the message'''
def tPrint(s):
print("%s\t%s" % (time.strftime("%H:%M:%S"), s))
class urbanGriddedPop(object):
def __init__(self, inRaster):
"""
Create urban definitions using gridded population data.
:param inRaster: string or rasterio object representing gridded population data
"""
if type(inRaster) == str:
self.inR = rasterio.open(inRaster)
elif isinstance(inRaster, rasterio.DatasetReader):
self.inR = inRaster
else:
raise(ValueError("Input raster dataset must be a file path or a rasterio object"))
def calculateUrban(self, densVal=300, totalPopThresh=5000, smooth=False, verbose=False, queen=False,
raster='', raster_pop='', print_message=''):
"""
Generate urban extents from gridded population data through the application of a minimum
density threshold and a minimum total population threshold
:param densVal: integer of the minimum density value to be counted as urban
:param totalPopThresh: integer minimum total settlement population to ne considered urban
:param smooth: boolean to run a single modal smoothing function (this should be run when running
on WorldPop as the increased resolution often leads to small holes and funny shapes
:param verbose: boolean on what messages to receive
:param queen: boolean to determine whether to dissolve final shape to connect queen's contiguity
:param raster: string path to create a boolean raster of urban and not.
Empty string is the default and will create no raster
:param raster_pop: string path to create a raster of the population layer only in the urban areas
Empty string is the default and will create no raster
:returns: GeoPandasDataFrame of the urban extents
"""
popRaster = self.inR
data = popRaster.read()
urbanData = (data > densVal) * 1
urbanData = urbanData.astype('int16')
if verbose:
tPrint("%s: Read in urban data" % print_message)
# Modal filter
def modal(P):
mode = stats.mode(P)
return mode.mode[0]
'''
if smooth:
# Run modal filter
urbanData[0,:,:] = generic_filter(urbanData[0,:,:], modal, (3, 3))
tPrint("Smoothed urban data")
'''
allFeatures = []
badFeatures = []
idx = 0
# create output array to store urban raster
urban_raster = urbanData * 0
for cShape, value in features.shapes(urbanData, transform=popRaster.transform):
if idx % 1000 == 0 and verbose:
tPrint("%s: Creating Shape %s" % (print_message, idx))
if value == 1:
if smooth:
xx = shape(cShape)
xx = Polygon(xx.exterior)
cShape = xx.__geo_interface__
#If the shape is urban, claculate total pop
mask = rasterize([(cShape, 0)], out_shape=data[0,:,:].shape,fill=1,transform=popRaster.transform)
inData = np.ma.array(data=data, mask=mask.astype(bool))
curPop = np.nansum(inData)
if curPop < 0: # when smoothed, sometimes the pop withh be < 0 because of no data
inData = np.ma.array(data=inData, mask=(inData < 0).astype(bool))
curPop = np.nansum(inData)
if curPop > totalPopThresh:
allFeatures.append([idx, curPop, shape(geojson.loads(json.dumps(cShape)))])
urban_raster += (mask^1)
else:
badFeatures.append([idx, curPop, shape(geojson.loads(json.dumps(cShape)))])
idx = idx + 1
if len(raster):
out_metadata = popRaster.meta.copy()
out_metadata['dtype'] = urban_raster.dtype
out_metadata['nodata'] = 0
with rasterio.open(raster, 'w', **out_metadata) as rOut:
rOut.write(urban_raster)
if len(raster_pop):
out_metadata = popRaster.meta.copy()
urban_pop = data * urban_raster
with rasterio.open(raster_pop, 'w', **out_metadata) as rOut:
rOut.write(urban_pop)
xx = pd.DataFrame(allFeatures, columns=['ID', 'Pop','geometry'])
xxGeom = gpd.GeoDataFrame(xx, geometry='geometry')
xxGeom.crs = popRaster.crs
if queen:
xxGeom['geometry '] = xxGeom.buffer((popRaster.res[0] / 2))
s = xxGeom['geometry']
overlap_matrix = s.apply(lambda x: s.intersects(x)).values.astype(int)
n, ids = connected_components(overlap_matrix)
xxGeom['group'] = ids
xxGeom = xxGeom.dissolve(by="group", aggfunc="sum")
return(xxGeom)
| [
"pandas.DataFrame",
"rasterio.open",
"numpy.nansum",
"shapely.geometry.Polygon",
"scipy.stats.mode",
"time.strftime",
"json.dumps",
"geopandas.GeoDataFrame",
"scipy.sparse.csgraph.connected_components",
"rasterio.features.rasterize",
"rasterio.features.shapes",
"shapely.geometry.shape"
] | [((3461, 3518), 'rasterio.features.shapes', 'features.shapes', (['urbanData'], {'transform': 'popRaster.transform'}), '(urbanData, transform=popRaster.transform)\n', (3476, 3518), False, 'from rasterio import features\n'), ((5256, 5316), 'pandas.DataFrame', 'pd.DataFrame', (['allFeatures'], {'columns': "['ID', 'Pop', 'geometry']"}), "(allFeatures, columns=['ID', 'Pop', 'geometry'])\n", (5268, 5316), True, 'import pandas as pd\n'), ((5333, 5374), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['xx'], {'geometry': '"""geometry"""'}), "(xx, geometry='geometry')\n", (5349, 5374), True, 'import geopandas as gpd\n'), ((1155, 1178), 'rasterio.open', 'rasterio.open', (['inRaster'], {}), '(inRaster)\n', (1168, 1178), False, 'import rasterio\n'), ((3031, 3044), 'scipy.stats.mode', 'stats.mode', (['P'], {}), '(P)\n', (3041, 3044), False, 'from scipy import stats\n'), ((5648, 5684), 'scipy.sparse.csgraph.connected_components', 'connected_components', (['overlap_matrix'], {}), '(overlap_matrix)\n', (5668, 5684), False, 'from scipy.sparse.csgraph import connected_components\n'), ((808, 833), 'time.strftime', 'time.strftime', (['"""%H:%M:%S"""'], {}), "('%H:%M:%S')\n", (821, 833), False, 'import os, sys, logging, geojson, json, time\n'), ((3927, 4026), 'rasterio.features.rasterize', 'rasterize', (['[(cShape, 0)]'], {'out_shape': 'data[0, :, :].shape', 'fill': '(1)', 'transform': 'popRaster.transform'}), '([(cShape, 0)], out_shape=data[0, :, :].shape, fill=1, transform=\n popRaster.transform)\n', (3936, 4026), False, 'from rasterio.features import rasterize\n'), ((4115, 4132), 'numpy.nansum', 'np.nansum', (['inData'], {}), '(inData)\n', (4124, 4132), True, 'import numpy as np\n'), ((4900, 4942), 'rasterio.open', 'rasterio.open', (['raster', '"""w"""'], {}), "(raster, 'w', **out_metadata)\n", (4913, 4942), False, 'import rasterio\n'), ((5140, 5186), 'rasterio.open', 'rasterio.open', (['raster_pop', '"""w"""'], {}), "(raster_pop, 'w', **out_metadata)\n", (5153, 5186), False, 'import rasterio\n'), ((3726, 3739), 'shapely.geometry.shape', 'shape', (['cShape'], {}), '(cShape)\n', (3731, 3739), False, 'from shapely.geometry import shape, Polygon\n'), ((3765, 3785), 'shapely.geometry.Polygon', 'Polygon', (['xx.exterior'], {}), '(xx.exterior)\n', (3772, 3785), False, 'from shapely.geometry import shape, Polygon\n'), ((4347, 4364), 'numpy.nansum', 'np.nansum', (['inData'], {}), '(inData)\n', (4356, 4364), True, 'import numpy as np\n'), ((4495, 4513), 'json.dumps', 'json.dumps', (['cShape'], {}), '(cShape)\n', (4505, 4513), False, 'import os, sys, logging, geojson, json, time\n'), ((4658, 4676), 'json.dumps', 'json.dumps', (['cShape'], {}), '(cShape)\n', (4668, 4676), False, 'import os, sys, logging, geojson, json, time\n')] |
from typing import Union, Optional, List, Tuple, Text, BinaryIO
import numpy as np
import math
irange = range
def make_grid(
tensor: Union[np.ndarray, List[np.ndarray]],
nrow: int = 8,
padding: int = 2,
normalize: bool = False,
range: Optional[Tuple[int, int]] = None,
scale_each: bool = False,
pad_value: int = 0,
) -> np.ndarray:
"""Make a grid of images.
This is essentially a port numpy version from torchvision.make_grid. See Examples.
Args:
tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
or a list of images all of the same size.
nrow (int, optional): Number of images displayed in each row of the grid.
The final grid size is ``(B / nrow, nrow)``. Default: ``8``.
padding (int, optional): amount of padding. Default: ``2``.
normalize (bool, optional): If True, shift the image to the range (0, 1),
by the min and max values specified by :attr:`range`. Default: ``False``.
range (tuple, optional): tuple (min, max) where min and max are numbers,
then these numbers are used to normalize the image. By default, min and max
are computed from the tensor.
scale_each (bool, optional): If ``True``, scale each image in the batch of
images separately rather than the (min, max) over all images. Default: ``False``.
pad_value (float, optional): Value for the padded pixels. Default: ``0``.
Example:
See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_
"""
if not (isinstance(tensor, np.ndarray) or
(isinstance(tensor, list) and all(isinstance(t, np.ndarray) for t in tensor))):
raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))
# if list of tensors, convert to a 4D mini-batch Tensor
if isinstance(tensor, list):
tensor = np.stack(tensor, axis=0)
if tensor.ndim == 2: # single image H x W
tensor = np.expand_dims(tensor, axis=0)
if tensor.ndim == 3: # single image
if tensor.shape[0] == 1: # if single-channel, convert to 3-channel
tensor = np.concatenate((tensor, tensor, tensor), axis=0)
tensor = np.expand_dims(tensor, axis=0)
if tensor.ndim == 4 and tensor.shape[0] == 1: # single-channel images
tensor = np.concatenate((tensor, tensor, tensor), axis=1)
if normalize is True:
tensor = tensor.copy() # avoid modifying tensor in-place
if range is not None:
assert isinstance(range, tuple), \
"range has to be a tuple (min, max) if specified. min and max are numbers"
def norm_ip(img, min, max):
# Copy by reference
img[:] = np.clip(img, min, max)
img[:] = (img - min)/(max - min + 1e-8)
def norm_range(t, range):
if range is not None:
return norm_ip(t, range[0], range[1])
else:
return norm_ip(t, float(t.min()), float(t.max()))
if scale_each is True:
for t in tensor: # loop over mini-batch dimension
norm_range(t, range)
else:
norm_range(tensor, range)
if tensor.shape[0] == 1:
return tensor.squeeze(0)
# make the mini-batch of images into a grid
nmaps = tensor.shape[0]
xmaps = min(nrow, nmaps)
ymaps = int(math.ceil(float(nmaps) / xmaps))
height, width = int(tensor.shape[2] + padding), int(tensor.shape[3] + padding)
num_channels = tensor.shape[1]
grid = np.ones([num_channels, height * ymaps + padding, width * xmaps + padding]) * pad_value
k = 0
for y in irange(ymaps):
for x in irange(xmaps):
if k >= nmaps:
break
# Tensor.copy_() is a valid method but seems to be missing from the stubs
# https://pytorch.org/docs/stable/tensors.html#torch.Tensor.copy_
s = [slice(None)] * grid.ndim
s[1] = slice(y * height + padding, (y + 1) * height)
s[2] = slice(x * width + padding, (x + 1) * width)
grid[tuple(s)] = tensor[k].copy()
k = k + 1
return grid | [
"numpy.stack",
"numpy.ones",
"numpy.expand_dims",
"numpy.clip",
"numpy.concatenate"
] | [((1977, 2001), 'numpy.stack', 'np.stack', (['tensor'], {'axis': '(0)'}), '(tensor, axis=0)\n', (1985, 2001), True, 'import numpy as np\n'), ((2067, 2097), 'numpy.expand_dims', 'np.expand_dims', (['tensor'], {'axis': '(0)'}), '(tensor, axis=0)\n', (2081, 2097), True, 'import numpy as np\n'), ((2302, 2332), 'numpy.expand_dims', 'np.expand_dims', (['tensor'], {'axis': '(0)'}), '(tensor, axis=0)\n', (2316, 2332), True, 'import numpy as np\n'), ((2426, 2474), 'numpy.concatenate', 'np.concatenate', (['(tensor, tensor, tensor)'], {'axis': '(1)'}), '((tensor, tensor, tensor), axis=1)\n', (2440, 2474), True, 'import numpy as np\n'), ((3640, 3714), 'numpy.ones', 'np.ones', (['[num_channels, height * ymaps + padding, width * xmaps + padding]'], {}), '([num_channels, height * ymaps + padding, width * xmaps + padding])\n', (3647, 3714), True, 'import numpy as np\n'), ((2236, 2284), 'numpy.concatenate', 'np.concatenate', (['(tensor, tensor, tensor)'], {'axis': '(0)'}), '((tensor, tensor, tensor), axis=0)\n', (2250, 2284), True, 'import numpy as np\n'), ((2826, 2848), 'numpy.clip', 'np.clip', (['img', 'min', 'max'], {}), '(img, min, max)\n', (2833, 2848), True, 'import numpy as np\n')] |
from pytest import fixture, mark
import os
import numpy as np
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
from sklearn.pipeline import Pipeline
from agape.ml.classifier import Classifier, SVClassifier, RFClassifier
random_state = 0
X, y = make_classification(n_samples=10, n_features=5,
random_state=random_state)
@fixture(scope='module')
def Clf():
'''Classifier fit using `fit`.
'''
clf = Classifier(
OneVsRestClassifier(
LogisticRegression(random_state=random_state),
n_jobs=-1),
scale=True)
clf.fit(X, y)
return clf
@fixture(scope='module')
def ClfGS():
'''Classifier fit using `grid_search`.
'''
clf = Classifier(
OneVsRestClassifier(
LogisticRegression(random_state=random_state),
n_jobs=-1),
scale=True)
parameters = {'C': np.logspace(-1, 1, 3)}
clf.grid_search(X, y, parameters)
return clf
class TestClassifier:
def test_predict(self, Clf):
assert np.array_equal(Clf.predict(X), [0, 0, 1, 0, 1, 1, 0, 1, 1, 0])
def test_predict_proba(self, Clf):
expected = np.array(
[[0.86175027, 0.13824973],
[0.92458054, 0.07541946],
[0.02817212, 0.97182788],
[0.83849173, 0.16150827],
[0.2650148, 0.7349852],
[0.25549562, 0.74450438],
[0.75834918, 0.24165082],
[0.0713748, 0.9286252],
[0.40150536, 0.59849464],
[0.67087362, 0.32912638]])
assert np.allclose(Clf.predict_proba(X), expected)
def test_accuracy(self, Clf):
Clf.predict(X)
assert Clf.accuracy(y) == 1.0
Clf.predictions = np.array([0, 0, 1, 0, 0, 0, 0, 1, 1, 1])
assert np.allclose(Clf.accuracy(y), 0.699999)
def test_get_clf_Clf(self, Clf):
assert hasattr(Clf, 'clf')
assert not hasattr(Clf, 'clf_grid_search')
clf = Clf.get_clf()
assert isinstance(clf, Pipeline)
def test_get_clf_ClfGS(self, ClfGS):
assert hasattr(ClfGS, 'clf_grid_search')
clf = ClfGS.get_clf()
assert isinstance(clf, Pipeline)
@fixture(scope='module')
def SVClf():
'''SVClassifier instance.
'''
clf = SVClassifier(random_state=random_state)
clf.fit(X, y)
return clf
class TestSVClassifier:
def test_predict(self, SVClf):
assert np.array_equal(SVClf.predict(X), [0, 0, 1, 0, 1, 1, 0, 1, 1, 0])
@mark.skipif('TRAVIS' in os.environ, reason='Test fails on Travis')
def test_predict_proba(self, SVClf):
expected = np.array(
[[0.96709295, 0.03290705],
[0.96708461, 0.03291539],
[0.00886844, 0.99113156],
[0.95934577, 0.04065423],
[0.00885798, 0.99114202],
[0.00885607, 0.99114393],
[0.92929739, 0.07070261],
[0.00885798, 0.99114202],
[0.01053052, 0.98946948],
[0.76418338, 0.23581662]])
print(SVClf.predict_proba(X))
assert np.allclose(SVClf.predict_proba(X), expected)
@fixture(scope='module')
def RFClf():
'''RFClassifier instance.
'''
clf = RFClassifier(random_state=random_state)
clf.fit(X, y)
return clf
class TestRFClassifier:
def test_predict(self, RFClf):
assert np.array_equal(RFClf.predict(X), [0, 0, 1, 0, 1, 1, 0, 1, 1, 0])
def test_predict_proba(self, RFClf):
expected = np.array(
[[1.0, 0.0],
[0.9, 0.1],
[0.1, 0.9],
[0.8, 0.2],
[0.1, 0.9],
[0.2, 0.8],
[0.8, 0.2],
[0.2, 0.8],
[0.2, 0.8],
[0.8, 0.2]])
print(RFClf.predict_proba(X))
assert np.allclose(RFClf.predict_proba(X), expected)
| [
"numpy.logspace",
"agape.ml.classifier.SVClassifier",
"pytest.fixture",
"sklearn.datasets.make_classification",
"sklearn.linear_model.LogisticRegression",
"pytest.mark.skipif",
"numpy.array",
"agape.ml.classifier.RFClassifier"
] | [((349, 423), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(10)', 'n_features': '(5)', 'random_state': 'random_state'}), '(n_samples=10, n_features=5, random_state=random_state)\n', (368, 423), False, 'from sklearn.datasets import make_classification\n'), ((454, 477), 'pytest.fixture', 'fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (461, 477), False, 'from pytest import fixture, mark\n'), ((722, 745), 'pytest.fixture', 'fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (729, 745), False, 'from pytest import fixture, mark\n'), ((2289, 2312), 'pytest.fixture', 'fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (2296, 2312), False, 'from pytest import fixture, mark\n'), ((3224, 3247), 'pytest.fixture', 'fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (3231, 3247), False, 'from pytest import fixture, mark\n'), ((2374, 2413), 'agape.ml.classifier.SVClassifier', 'SVClassifier', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (2386, 2413), False, 'from agape.ml.classifier import Classifier, SVClassifier, RFClassifier\n'), ((2594, 2660), 'pytest.mark.skipif', 'mark.skipif', (["('TRAVIS' in os.environ)"], {'reason': '"""Test fails on Travis"""'}), "('TRAVIS' in os.environ, reason='Test fails on Travis')\n", (2605, 2660), False, 'from pytest import fixture, mark\n'), ((3309, 3348), 'agape.ml.classifier.RFClassifier', 'RFClassifier', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (3321, 3348), False, 'from agape.ml.classifier import Classifier, SVClassifier, RFClassifier\n'), ((987, 1008), 'numpy.logspace', 'np.logspace', (['(-1)', '(1)', '(3)'], {}), '(-1, 1, 3)\n', (998, 1008), True, 'import numpy as np\n'), ((1257, 1538), 'numpy.array', 'np.array', (['[[0.86175027, 0.13824973], [0.92458054, 0.07541946], [0.02817212, \n 0.97182788], [0.83849173, 0.16150827], [0.2650148, 0.7349852], [\n 0.25549562, 0.74450438], [0.75834918, 0.24165082], [0.0713748, \n 0.9286252], [0.40150536, 0.59849464], [0.67087362, 0.32912638]]'], {}), '([[0.86175027, 0.13824973], [0.92458054, 0.07541946], [0.02817212, \n 0.97182788], [0.83849173, 0.16150827], [0.2650148, 0.7349852], [\n 0.25549562, 0.74450438], [0.75834918, 0.24165082], [0.0713748, \n 0.9286252], [0.40150536, 0.59849464], [0.67087362, 0.32912638]])\n', (1265, 1538), True, 'import numpy as np\n'), ((1836, 1876), 'numpy.array', 'np.array', (['[0, 0, 1, 0, 0, 0, 0, 1, 1, 1]'], {}), '([0, 0, 1, 0, 0, 0, 0, 1, 1, 1])\n', (1844, 1876), True, 'import numpy as np\n'), ((2721, 3006), 'numpy.array', 'np.array', (['[[0.96709295, 0.03290705], [0.96708461, 0.03291539], [0.00886844, \n 0.99113156], [0.95934577, 0.04065423], [0.00885798, 0.99114202], [\n 0.00885607, 0.99114393], [0.92929739, 0.07070261], [0.00885798, \n 0.99114202], [0.01053052, 0.98946948], [0.76418338, 0.23581662]]'], {}), '([[0.96709295, 0.03290705], [0.96708461, 0.03291539], [0.00886844, \n 0.99113156], [0.95934577, 0.04065423], [0.00885798, 0.99114202], [\n 0.00885607, 0.99114393], [0.92929739, 0.07070261], [0.00885798, \n 0.99114202], [0.01053052, 0.98946948], [0.76418338, 0.23581662]])\n', (2729, 3006), True, 'import numpy as np\n'), ((3584, 3718), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.9, 0.1], [0.1, 0.9], [0.8, 0.2], [0.1, 0.9], [0.2, 0.8], [\n 0.8, 0.2], [0.2, 0.8], [0.2, 0.8], [0.8, 0.2]]'], {}), '([[1.0, 0.0], [0.9, 0.1], [0.1, 0.9], [0.8, 0.2], [0.1, 0.9], [0.2,\n 0.8], [0.8, 0.2], [0.2, 0.8], [0.2, 0.8], [0.8, 0.2]])\n', (3592, 3718), True, 'import numpy as np\n'), ((595, 640), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (613, 640), False, 'from sklearn.linear_model import LogisticRegression\n'), ((873, 918), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (891, 918), False, 'from sklearn.linear_model import LogisticRegression\n')] |
""" Unit Tests for Py-ART's retrieve/vad_michelson.py module. """
from __future__ import print_function
from numpy.testing import assert_almost_equal
import pyart
def test_velocity_azimuth_display():
test_radar = pyart.io.read(pyart.testing.NEXRAD_ARCHIVE_MSG1_FILE)
velocity = 'velocity'
height = ([50.0, 162.5, 275.0, 387.5, 500.0])
valid_ray_min = 16
gatefilter = None
window = 2
weight = 'equal'
speed = ([1.921647204618432, 1.3827367541946036, 1.164585840293324,
1.682006738330196, 2.1305502721695446])
direction = ([356.78032862063486, 0.5040511489577615, 5.449921149639364,
352.1683403733644, 337.85762401398773])
u_wind = ([0.10792796375637152, -0.012164265247251506,
-0.11060735435152175, 0.22919529090527493, 0.803024469916098])
v_wind = ([-1.9186139616028124, -1.382683247187013, -1.1593214362613435,
-1.666318152819272, -1.9734224491876267])
vad = pyart.retrieve.velocity_azimuth_display(test_radar, velocity,
height, valid_ray_min,
gatefilter, window, weight)
assert_almost_equal(vad.height, height, 3)
assert_almost_equal(vad.speed, speed, 3)
assert_almost_equal(vad.direction, direction, 3)
assert_almost_equal(vad.u_wind, u_wind, 3)
assert_almost_equal(vad.v_wind, v_wind, 3)
| [
"numpy.testing.assert_almost_equal",
"pyart.retrieve.velocity_azimuth_display",
"pyart.io.read"
] | [((222, 275), 'pyart.io.read', 'pyart.io.read', (['pyart.testing.NEXRAD_ARCHIVE_MSG1_FILE'], {}), '(pyart.testing.NEXRAD_ARCHIVE_MSG1_FILE)\n', (235, 275), False, 'import pyart\n'), ((977, 1093), 'pyart.retrieve.velocity_azimuth_display', 'pyart.retrieve.velocity_azimuth_display', (['test_radar', 'velocity', 'height', 'valid_ray_min', 'gatefilter', 'window', 'weight'], {}), '(test_radar, velocity, height,\n valid_ray_min, gatefilter, window, weight)\n', (1016, 1093), False, 'import pyart\n'), ((1195, 1237), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['vad.height', 'height', '(3)'], {}), '(vad.height, height, 3)\n', (1214, 1237), False, 'from numpy.testing import assert_almost_equal\n'), ((1242, 1282), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['vad.speed', 'speed', '(3)'], {}), '(vad.speed, speed, 3)\n', (1261, 1282), False, 'from numpy.testing import assert_almost_equal\n'), ((1287, 1335), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['vad.direction', 'direction', '(3)'], {}), '(vad.direction, direction, 3)\n', (1306, 1335), False, 'from numpy.testing import assert_almost_equal\n'), ((1340, 1382), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['vad.u_wind', 'u_wind', '(3)'], {}), '(vad.u_wind, u_wind, 3)\n', (1359, 1382), False, 'from numpy.testing import assert_almost_equal\n'), ((1387, 1429), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['vad.v_wind', 'v_wind', '(3)'], {}), '(vad.v_wind, v_wind, 3)\n', (1406, 1429), False, 'from numpy.testing import assert_almost_equal\n')] |
import math
import os
from typing import Tuple, List
import albumentations as A
import cv2
import numpy as np
import pandas as pd
from pytorch_toolbelt.utils import fs
from pytorch_toolbelt.utils.fs import id_from_fname
from pytorch_toolbelt.utils.torch_utils import tensor_from_rgb_image
from sklearn.model_selection import StratifiedKFold, train_test_split
from sklearn.utils import compute_sample_weight
from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler
from retinopathy.augmentations import get_train_transform, get_test_transform
def get_class_names(coarse_grading=False):
if coarse_grading:
return [
'No DR',
'DR (Mild/Moderate/Severe)',
'Proliferative DR'
]
CLASS_NAMES = [
'No DR',
'Mild',
'Moderate',
'Severe',
'Proliferative DR'
]
return CLASS_NAMES
UNLABELED_CLASS = -100
class RetinopathyDataset(Dataset):
def __init__(self, images, targets,
transform: A.Compose,
target_as_array=False,
dtype=int,
meta_features=False):
if targets: # False
targets = np.array(targets)
unique_targets = set(targets)
if len(unique_targets.difference({0, 1, 2, 3, 4, UNLABELED_CLASS})):
raise ValueError('Unexpected targets in Y ' + str(unique_targets))
self.meta_features = meta_features # False
self.images = np.array(images)
self.targets = targets
self.transform = transform # image preprocessing transform like cropping the black region
self.target_as_array = target_as_array
self.dtype = dtype
def __len__(self):
return len(self.images)
def __getitem__(self, item):
image = cv2.imread(self.images[item]) # Read with OpenCV instead PIL. It's faster
if image is None:
raise FileNotFoundError(self.images[item])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width = image.shape[:2]
diagnosis = UNLABELED_CLASS
if self.targets: # False
diagnosis = self.targets[item]
data = self.transform(image=image, diagnosis=diagnosis)
diagnosis = data['diagnosis']
data = {'image': tensor_from_rgb_image(data['image']),
'image_id': id_from_fname(self.images[item])}
if self.meta_features: # False
log_height = math.log(height)
log_width = math.log(width)
aspect_ratio = log_height / log_width
mean = np.mean(image, axis=(0, 1))
meta_features = np.array([
log_height,
log_width,
aspect_ratio,
mean[0],
mean[1],
mean[2]
])
data['meta_features'] = meta_features
diagnosis = self.dtype(diagnosis)
if self.target_as_array:
data['targets'] = np.array([diagnosis])
else:
data['targets'] = diagnosis
return data
class RetinopathyDatasetV2(Dataset):
"""
Implementation of dataset for use with unsupervised learning
"""
def __init__(self, images, targets,
transform: A.Compose,
normalize: A.Compose,
target_as_array=False,
dtype=int,
meta_features=False):
if targets is not None:
targets = np.array(targets)
unique_targets = set(targets)
if len(unique_targets.difference({0, 1, 2, 3, 4, -100})):
raise ValueError('Unexpected targets in Y ' + str(unique_targets))
self.meta_features = meta_features
self.images = np.array(images)
self.targets = targets
self.transform = transform
self.normalize = normalize
self.target_as_array = target_as_array
self.dtype = dtype
def __len__(self):
return len(self.images)
def __getitem__(self, item):
image = cv2.imread(self.images[item]) # Read with OpenCV instead PIL. It's faster
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width = image.shape[:2]
original = self.normalize(image=image)['image']
transformed = self.transform(image=image)['image']
data = {'image': tensor_from_rgb_image(transformed),
'original': tensor_from_rgb_image(original),
'image_id': id_from_fname(self.images[item])}
if self.meta_features:
log_height = math.log(height)
log_width = math.log(width)
aspect_ratio = log_height / log_width
mean = np.mean(image, axis=(0, 1))
meta_features = np.array([
log_height,
log_width,
aspect_ratio,
mean[0],
mean[1],
mean[2]
])
data['meta_features'] = meta_features
if self.targets is not None:
target = self.dtype(self.targets[item])
if self.target_as_array:
data['targets'] = np.array([target])
else:
data['targets'] = target
return data
def count_targets(targets):
targets = np.array(targets)
counts = []
for i in range(len(get_class_names())):
counts.append(np.sum(targets == i))
return counts
def split_train_valid(x, y, fold=None, folds=4, random_state=42):
"""
Common train/test split function
:param x:
:param y:
:param fold:
:param folds:
:param random_state:
:return:
"""
train_x, train_y = [], []
valid_x, valid_y = [], []
if fold is not None:
assert 0 <= fold < folds
skf = StratifiedKFold(n_splits=folds, random_state=random_state, shuffle=True)
for fold_index, (train_index, test_index) in enumerate(skf.split(x, y)):
if fold_index == fold:
train_x = x[train_index]
train_y = y[train_index]
valid_x = x[test_index]
valid_y = y[test_index]
break
else:
train_x, valid_x, train_y, valid_y = train_test_split(x, y,
random_state=random_state,
test_size=1.0 / folds,
shuffle=True,
stratify=y)
assert len(train_x) and len(train_y) and len(valid_x) and len(valid_y)
assert len(train_x) == len(train_y)
assert len(valid_x) == len(valid_y)
return train_x, valid_x, train_y, valid_y
import boto3
import botocore
from botocore.exceptions import ClientError
bucket = "dataset-retinopathy"
region_name="us-east-1"
def download_from_s3(s3_filename, local_path="test"):
s3_client = boto3.client('s3', region_name=region_name)
print("Downloading file {} to {}".format(s3_filename, local_path))
try:
s3_client.download_file(bucket, Key=s3_filename, Filename=local_path)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
def get_aptos2019_train(data_dir):
# downloading from s3
# download_from_s3(s3_filename="aptos-2019/train.csv", local_path=os.path.join(data_dir, 'train.csv'))
aptos2019_train = pd.read_csv(os.path.join(data_dir, 'train.csv'))
# Remove duplicates and wrong annotations
ids_to_skip = set(APTOS2019_MISLABELED_DUPLICATES2 + APTOS2019_DUPLICATES)
size_before = len(aptos2019_train)
aptos2019_train = aptos2019_train[~aptos2019_train['id_code'].isin(ids_to_skip)]
size_after = len(aptos2019_train)
print('Dropped', size_before - size_after, 'bad samples')
x = np.array(aptos2019_train['id_code'].apply(lambda x: os.path.join(data_dir, 'train_images_768', f'{x}.png')))
y = np.array(aptos2019_train['diagnosis'], dtype=int)
return x, y
def get_aptos2019_test(data_dir):
# download_from_s3(s3_filename="aptos-2019/test.csv", local_path=os.path.join(data_dir, 'test.csv'))
aptos2019_test = pd.read_csv(os.path.join(data_dir, 'test.csv'))
x = np.array(aptos2019_test['id_code'].apply(lambda x: os.path.join(data_dir, 'test_images_768', f'{x}.png')))
y = np.array([UNLABELED_CLASS] * len(x), dtype=int)
return x, y
def get_aptos2015_train(dataset_dir, healthy_eye_fraction=1):
aptos2015_train = pd.read_csv(os.path.join(dataset_dir, 'train_labels.csv'))
aptos2015_train['image_path'] = aptos2015_train['id_code'].apply(
lambda x: os.path.join(dataset_dir, 'train_images_768', f'{x}.png'))
x = np.array(aptos2015_train['image_path'])
y = np.array(aptos2015_train['diagnosis'], dtype=int)
if healthy_eye_fraction != 1:
healthy = y == 0
num_healthy = int(np.sum(healthy) * healthy_eye_fraction)
x = np.concatenate((x[~healthy], x[healthy][:num_healthy]))
y = np.concatenate((y[~healthy], y[healthy][:num_healthy]))
return x, y
def get_aptos2015_test_public(dataset_dir, healthy_eye_fraction=1):
aptos2015_test = pd.read_csv(os.path.join(dataset_dir, 'test_labels.csv'))
aptos2015_test = aptos2015_test[aptos2015_test['Usage'] == 'Public']
aptos2015_test['image_path'] = aptos2015_test['id_code'].apply(
lambda x: os.path.join(dataset_dir, 'test_images_768', f'{x}.png'))
x = np.array(aptos2015_test['image_path'])
y = np.array(aptos2015_test['diagnosis'], dtype=int)
if healthy_eye_fraction != 1:
healthy = y == 0
num_healthy = int(np.sum(healthy) * healthy_eye_fraction)
x = np.concatenate((x[~healthy], x[healthy][:num_healthy]))
y = np.concatenate((y[~healthy], y[healthy][:num_healthy]))
return x, y
def get_aptos2015_test_private(dataset_dir, healthy_eye_fraction=1):
aptos2015_test = pd.read_csv(os.path.join(dataset_dir, 'test_labels.csv'))
aptos2015_test = aptos2015_test[aptos2015_test['Usage'] == 'Private']
aptos2015_test['image_path'] = aptos2015_test['id_code'].apply(
lambda x: os.path.join(dataset_dir, 'test_images_768', f'{x}.png'))
x = np.array(aptos2015_test['image_path'])
y = np.array(aptos2015_test['diagnosis'], dtype=int)
if healthy_eye_fraction != 1:
healthy = y == 0
num_healthy = int(np.sum(healthy) * healthy_eye_fraction)
x = np.concatenate((x[~healthy], x[healthy][:num_healthy]))
y = np.concatenate((y[~healthy], y[healthy][:num_healthy]))
return x, y
def get_idrid_train(dataset_dir):
idrid_train = pd.read_csv(os.path.join(dataset_dir, 'train_labels.csv'))
idrid_train['image_path'] = idrid_train['id_code'].apply(
lambda x: os.path.join(dataset_dir, 'train_images_768', f'{x}.png'))
x = np.array(idrid_train['image_path'])
y = np.array(idrid_train['diagnosis'], dtype=int)
return x, y
def get_idrid_test(dataset_dir):
idrid_test = pd.read_csv(os.path.join(dataset_dir, 'test_labels.csv'))
idrid_test['image_path'] = idrid_test['id_code'].apply(
lambda x: os.path.join(dataset_dir, 'test_images_768', f'{x}.png'))
x = np.array(idrid_test['image_path'])
y = np.array(idrid_test['diagnosis'], dtype=int)
return x, y
def get_messidor(dataset_dir, include_grade_3=False):
messidor_train = pd.read_csv(os.path.join(dataset_dir, 'train_labels.csv'))
messidor_train['image_path'] = messidor_train['id_code'].apply(
lambda x: os.path.join(dataset_dir, 'train_images_768', f'{x}.png'))
x = np.array(messidor_train['image_path'])
y = np.array(messidor_train['diagnosis'], dtype=int)
# Grade 3 of Messidor grading protocol includes Neovascularization which is stage 4.
# For consistency we drop grade images by default
if not include_grade_3:
x = x[y != 3]
y = y[y != 3]
return x, y
APTOS2015_NOISE = {
# https://www.kaggle.com/c/diabetic-retinopathy-detection/discussion/1440280229
'25867_right_0': UNLABELED_CLASS,
'25867_left_0': UNLABELED_CLASS,
# https://www.kaggle.com/c/diabetic-retinopathy-detection/discussion/14402#80264
'26064_left': UNLABELED_CLASS,
'25360_left': UNLABELED_CLASS,
'22026_left': 0,
'21118_left': 0,
# https://www.kaggle.com/c/diabetic-retinopathy-detection/discussion/1440280264
'31202_right': 0,
'31160_left': 0,
'27481_right': 0,
'26064_right': 0,
'37244_right': 0,
'34689_left': 0,
'32541_left': 0,
'32253_right': 0,
'43457_left': 0,
'42130_left': 0,
'41693_right': 0,
'41176_left': 0,
'766_left': UNLABELED_CLASS,
'2881_left': 0,
'2516_left': 0,
'1986_left': 0,
'1557_left': UNLABELED_CLASS,
'7470_right': 0,
'7445_left': 0,
'6511_right': 0,
'3829_left': UNLABELED_CLASS,
'20271_left': 0,
'18972_left': UNLABELED_CLASS,
'18085_right': 0,
'15222_left': 0
}
APTOS2019_CORRECTIONS = {
'06b71823f9cd': 4,
'c1e6fa1ad314': 4,
'f0098e9d4aee': 4, # ??
'29f44aea93a4': 1, # ?
# # Blurry
# '6f923b60934b': UNLABELED_CLASS,
# # '041f09eec1e8': UNLABELED_CLASS,
# '22221cf5c7935': UNLABELED_CLASS,
}
APTOS2019_DUPLICATES = [
'1632c4311fc9',
'36041171f441',
'6e92b1c5ac8e',
'89ee1fa16f90',
'111898ab463d',
'7877be80901c',
'f9e1c439d4c8',
'b91ef82e723a',
'4ce74e5eb51d',
'aeed1f251ceb',
'5a36cea278ae',
'91b6ebaa3678',
'd994203deb64',
'7d261f986bef',
'3044022c6969',
'14515b8f19b6',
'f9ecf1795804',
'7550966ef777',
'f920ccd926db',
'2b48daf24be0',
'bd5013540a13',
'dc3c0d8ee20b',
'e135d7ba9a0e',
'ea05c22d92e9',
'cd3fd04d72f5',
'0161338f53cc',
'3a1d3ce00f0c',
'9b32e8ef0ca0',
'98104c8c67eb',
'1f07dae3cadb',
'530d78467615',
'f6f3ea0d2693',
'd567a1a22d33',
'bb5083fae98f',
'4d7d6928534a',
'14e3f84445f7',
'00cb6555d108',
'8a759f94613a',
'05a5183c92d0',
'b376def52ccc',
'9e3510963315',
'be161517d3ac',
'81b0a2651c45',
'bb7e0a2544cd',
'5e7db41b3bee',
'26fc2358a38d',
'76cfe8967f7d'
]
# Here we list what should be ignored
APTOS2019_MISLABELED_DUPLICATES2 = [
# Need resolve
'6b00cb764237', # 4
'64678182d8a8', # 2
# Need resolve
'8273fdb4405e', # 2
'f0098e9d4aee', # 1
# Need resolve
'd801c0a66738', # 2
'68332fdcaa70', # 4
# Need resolve
'ba735b286d62', # 0
'ed3a0fc5b546', # 4
# Need resolve
'36677b70b1ef', # 2
'7bf981d9c7fe', # 0
# Need resolve
'19e350c7c83c', # 3
'19722bff5a09', # 2
# Need resolve
'435d900fa7b2', # 3
'1006345f70b7', # 0
# Need resolve
'278aa860dffd', # 2
'f066db7a2efe', # 0
# Need resolve
'f4d3777f2710', # 0
'5dc23e440de3', # 1
# Need resolve
'a4012932e18d', # 1
'906d02fb822d', # 0
# Need resolve
'8fc09fecd22f', # 4
'd1cad012a254', # 0
# Need resolve
'7a0cff4c24b2', # 2
'86baef833ae0', # 0
# Need resolve
'8ef2eb8c51c4', # 0
'8446826853d0', # 2
# Need resolve
'ca6842bfcbc9', # 0
'c027e5482e8c', # 3
'7a3ea1779b13', # 2
'a8582e346df0', # 2
# Need resolve
'9a3c03a5ad0f', # 1
'f03d3c4ce7fb', # 0
# Need resolve
'1a1b4b2450ca', # 0
'92b0d27fc0ec', # 3
# Need resolve
'3c53198519f7', # 0
'1c5e6cdc7ee1', # 1
# Need resolve
'8cb6b0efaaac', # 0
'42a850acd2ac', # 0
'51131b48f9d4', # 4
# Need resolve
'4a44cc840ebe', # 2
'0cb14014117d', # 0
# Need resolve
'29f44aea93a4', # 0
'7e6e90a93aa5', # 2
# Need resolve
'7b691d9ced34', # 2
'd51c2153d151', # 4
# Need resolve
'9bf060db8376', # 2
'4fecf87184e6', # 0
'f7edc074f06b', # 0
# Need resolve
'aca88f566228', # 0
'c05b7b4c22fe', # 2
# Need resolve
'878a3a097436', # 2
'80feb1f7ca5e', # 0
# Need resolve
'46cdc8b685bd', # 1
'e4151feb8443', # 0
# Need resolve
'ea9e0fb6fb0b', # 2
'23d7ca170bdb', # 0
# Need resolve
'3dbfbc11e105', # 4
'd0079cc188e9', # 0
# Need resolve
'4d9fc85a8259', # 4
'16ce555748d8', # 0
# Need resolve
'79ce83c07588', # 1
'71c1a3cdbe47', # 0
# Need resolve
'c8d2d32f7f29', # 1
'034cb07a550f', # 0
# Need resolve
'38fe9f854046', # 4
'1dfbede13143', # 2
# Need resolve
'98e8adcf085c', # 0
'026dcd9af143', # 1
# Need resolve
'e12d41e7b221', # 0
'bacfb1029f6b', # 4
# Need resolve
'b13d72ceea26', # 0
'da0a1043abf7', # 2
# Need resolve
'0c7e82daf5a0', # 1
'3e86335bc2fd', # 2
# Need resolve
'6165081b9021', # 0
'42985aa2e32f', # 4
# Need resolve
'9c5dd3612f0c', # 0
'c9f0dc2c8b43', # 2
# Need resolve
'521d3e264d71', # 0
'fe0fc67c7980', # 4
# Need resolve
'e8d1c6c07cf2', # 1
'f23902998c21', # 0
# Need resolve
'155e2df6bfcf', # 0
'415f2d2bd2a1', # 4
# Need resolve
'9b7b6e4db1d5', # 0
'9f4132bd6ed6', # 2
# Need resolve
'65e51e18242b', # 0
'cc12453ea915', # 1
# Need resolve
'76095c338728', # 4
'bd34a0639575', # 0
'de55ed25e0e8', # 2
'84b79243e430', # 2
# Need resolve
'ff0740cb484a', # 2
'b8ac328009e0', # 0
# Need resolve
'b9127e38d9b9', # 3
'e39b627cf648', # 0
# Need resolve
'f1a761c68559', # 3
'ff52392372d3', # 0
# Need resolve
'36ec36c301c1', # 2
'26e231747848', # 0
# Need resolve
'0dce95217626', # 1
'94372043d55b', # 4
# Need resolve
'badb5ff8d3c7', # 1
'2923971566fe', # 0
# Need resolve
'33778d136069', # 2
'4ccfa0b4e96c', # 3
# Need resolve
'86b3a7929bec', # 2
'1b4625877527', # 0
# Need resolve
'43fb6eda9b97', # 1
'e4e343eaae2a', # 0
# Need resolve
'135575dc57c9', # 2
'2c2aa057afc5', # 1
# Need resolve
'40e9b5630438', # 3
'77a9538b8362', # 1
# Need resolve
'a8b637abd96b', # 0
'e2c3b037413b', # 2
# Need resolve
'1b862fb6f65d', # 0
'0a4e1a29ffff', # 2
# Need resolve
'bf7b4eae7ad0', # 0
'496155f71d0a', # 4
# Need resolve
'81914ceb4e74', # 4
'd6b109c82067', # 0
'1b398c0494d1', # 0
# Need resolve
'11242a67122d', # 2
'65c958379680', # 0
# Need resolve
'ea15a290eb96', # 1
'1c9c583c10bf', # 0
# Need resolve
'668a319c2d23', # 0
'4d167ca69ea8', # 2
# Need resolve
'7525ebb3434d', # 0
'3cd801ffdbf0', # 2
# Need resolve
'1ee1eb7943db', # 2
'c2d2b4f536da', # 0
# Need resolve
'857002ed4e49', # 0
'840527bc6628', # 2
# Need resolve
'a3b2e93d058b', # 1
'3fd7df6099e3', # 0
# Need resolve
'c546670d9684', # 0
'30cab14951ac', # 2
# Need resolve
'60f15dd68d30', # 0
'772af553b8b7', # 1
'fcc6aa6755e6', # 0
# Need resolve
'3b018e8b7303', # 2
'0243404e8a00', # 1
'3ddb86eb530e', # 0
# Need resolve
'1e8a1fdee5b9', # 0
'a47432cd41e7', # 3
'b8ebedd382de', # 0
# Need resolve
'7005be54cab1', # 3
'3ee4841936ef', # 2
# Need resolve
'a7b0d0c51731', # 0
'1cb814ed6332', # 2
# Need resolve
'fea14b3d44b0', # 2
'80d24897669f', # 0
# Need resolve
'35aa7f5c2ec0', # 4
'1c4d87baaffc', # 0
# Need resolve
'7e980424868e', # 0
'b10fca20c885', # 2
# Need resolve
'98f7136d2e7a', # 2
'e740af6ac6ea', # 0
# Need resolve
'df4913ca3712', # 0
'd51b3fe0fa1b', # 2
# Need resolve
'3ca637fddd56', # 0
'3b4a5fcbe5e0', # 3
# Need resolve
'e037643244b7', # 0
'5b76117c4bcb', # 2
# Need resolve
'2f7789c1e046', # 2
'a8e88d4891c4', # 1
# Need resolve
'48c49f662f7d', # 0
'6cb98da77e3e', # 1
# Need resolve
'a56230242a95', # 2
'1c6d119c3d70', # 0
# Need resolve
'9f1efb799b7b', # 0
'cd4e7f9fa1a9', # 2
# Need resolve
'5eb311bcb5f9', # 3
'a9e984b57556', # 2
# Need resolve
'ce887b196c23', # 3
'e7a7187066ad', # 2
# Need resolve
'1e143fa3de57', # 0
'144b01e7b993', # 2
# Need resolve
'8acffaf1f4b9', # 2
'1411c8ab7161', # 0
# Need resolve
'1638404f385c', # 4
'576e189d23d4', # 2
# Need resolve
'9f1b14dfa14c', # 0
'435414ccccf7', # 2
# Need resolve
'6c3745a222da', # 4
'eadc57064154', # 2
# Need resolve
'2b21d293fdf2', # 4
'2a3a1ed1c285', # 3
# Need resolve
'd144144a2f3f', # 1
'b06dabab4f09', # 2
# Need resolve
'80964d8e0863', # 3
'ab50123abadb', # 1
# Need resolve
'fda39982a810', # 2
'0ac436400db4', # 0
# Need resolve
'd85ea1220a03', # 0
'bfefa7344e7d', # 0
'8688f3d0fcaf', # 2
# Need resolve
'e1fb532f55df', # 1
'b019a49787c1', # 0
# Need resolve
'cd93a472e5cd', # 1
'd035c2bd9104', # 0
]
def append_train_test(existing, to_add):
train_x, train_y, valid_x, valid_y = existing
tx, vx, ty, vy = to_add
train_x.extend(tx)
train_y.extend(ty)
valid_x.extend(vx)
valid_y.extend(vy)
return train_x, train_y, valid_x, valid_y
def get_dataset(datasets: List[str], folds=4, data_dir='data', random_state=42):
"""
:param datasets: List "aptos2015-train/fold0", "messidor",
:param fold:
:param folds:
:return:
"""
all_x = []
all_y = []
sizes = []
for ds in datasets:
dataset_name = ds
suffix = None
if '/' in ds:
dataset_name, suffix = ds.split('/')
if dataset_name == 'aptos-2019-train':
x, y = get_aptos2019_train(os.path.join(data_dir, 'aptos-2019'))
elif dataset_name == 'aptos-2015-train':
x, y = get_aptos2015_train(os.path.join(data_dir, 'aptos-2015'))
elif dataset_name == 'aptos-2015-test-private':
x, y = get_aptos2015_test_private(os.path.join(data_dir, 'aptos-2015'))
elif dataset_name == 'aptos-2015-test-public':
x, y = get_aptos2015_test_public(os.path.join(data_dir, 'aptos-2015'))
elif dataset_name == 'idrid-train':
x, y = get_idrid_train(os.path.join(data_dir, 'idrid'))
elif dataset_name == 'idrid-test':
x, y = get_idrid_test(os.path.join(data_dir, 'idrid'))
elif dataset_name == 'messidor':
x, y = get_messidor(os.path.join(data_dir, 'messidor'), include_grade_3=False)
else:
raise ValueError(dataset_name)
if suffix is not None:
fold = int(suffix)
_, valid_x, _, valid_y = split_train_valid(x, y, fold=fold, folds=folds, random_state=random_state)
x = valid_y
y = valid_y
all_x.extend(x)
all_y.extend(y)
sizes.append(len(x))
return all_x, all_y, sizes
def get_datasets_universal(
train_on: List[str],
valid_on: List[str],
data_dir='data',
image_size=(512, 512),
augmentation='medium',
preprocessing=None,
target_dtype=int,
random_state=42,
coarse_grading=False,
folds=4) -> Tuple[RetinopathyDataset, RetinopathyDataset, List]:
train_x, train_y, sizes = get_dataset(train_on, folds=folds, data_dir=data_dir, random_state=random_state)
valid_x, valid_y, _ = get_dataset(valid_on, folds=folds, data_dir=data_dir, random_state=random_state)
train_transform = get_train_transform(image_size,
augmentation=augmentation,
preprocessing=preprocessing,
crop_black=False)
valid_transform = get_test_transform(image_size,
preprocessing=preprocessing,
crop_black=False)
train_ds = RetinopathyDataset(train_x, train_y,
transform=train_transform,
dtype=target_dtype)
valid_ds = RetinopathyDataset(valid_x, valid_y,
transform=valid_transform,
dtype=target_dtype)
return train_ds, valid_ds, sizes
def get_datasets(
data_dir='data',
image_size=(512, 512),
augmentation='medium',
preprocessing=None,
use_aptos2019=True,
use_aptos2019_test_pl1=False,
use_aptos2015_pl1=False,
use_aptos2015=False,
use_aptos2015_test_private=False,
use_idrid=False,
use_messidor=False,
use_messidor2_pl1=False,
use_unsupervised=False,
target_dtype=int,
random_state=42,
coarse_grading=False,
fold=None,
folds=4) -> Tuple[RetinopathyDataset, RetinopathyDataset, List]:
assert use_aptos2019 or use_aptos2015 or use_aptos2015_test_private or use_idrid or use_messidor
assert not (use_aptos2015 and use_aptos2015_pl1)
trainset_sizes = []
data_split = [], [], [], []
aptos2019_dir = os.path.join(data_dir, 'aptos-2019')
aptos2015_dir = os.path.join(data_dir, 'aptos-2015')
if use_aptos2019:
x, y = get_aptos2019_train(aptos2019_dir)
split = split_train_valid(x, y, fold=fold, folds=folds, random_state=random_state)
data_split = append_train_test(data_split, split)
trainset_sizes.append(split[0])
if use_aptos2015_pl1:
# Add training data
aptos2015_train_pseudolabel_round_1 = pd.read_csv(
os.path.join(aptos2015_dir, 'aptos2015_train_pseudolabel_round_1.csv'))
aptos2015_train_pseudolabel_round_1 = aptos2015_train_pseudolabel_round_1[
aptos2015_train_pseudolabel_round_1['diagnosis'] != -100]
x = np.array(aptos2015_train_pseudolabel_round_1['id_code'].apply(
lambda x: os.path.join(aptos2015_dir, 'train_images_768', f'{x}.png')))
y = np.array(aptos2015_train_pseudolabel_round_1['diagnosis'], dtype=int)
# For training part of aptos2015 - add it conventionaly
split = split_train_valid(x, y, fold=fold, folds=folds, random_state=random_state)
data_split = append_train_test(data_split, split)
trainset_sizes.append(split[0])
# For public test validation data add only unhealthy samples to train set
aptos2015_test_public_pl1 = pd.read_csv(
os.path.join(aptos2015_dir, 'aptos2015_test_public_pseudolabel_round_1.csv'))
aptos2015_test_public_pl1 = aptos2015_test_public_pl1[aptos2015_test_public_pl1['diagnosis'] != -100]
x = np.array(aptos2015_test_public_pl1['id_code'].apply(
lambda x: os.path.join(aptos2015_dir, 'test_images_768', f'{x}.png')))
y = np.array(aptos2015_test_public_pl1['diagnosis'], dtype=int)
# For pseudolabeled data, we add only one fold of it to clear training data
# From test set add only unhealthy
train_x, valid_x, train_y, valid_y = split_train_valid(x, y, fold=fold, folds=folds, random_state=random_state)
train_x = train_x[train_y > 0]
train_y = train_y[train_y > 0]
split = train_x, valid_x, train_y, valid_y
data_split = append_train_test(data_split, split)
trainset_sizes.append(train_x[0])
# Add Aptos2015 private test to validation set entirely
aptos2015_test_private_pl1 = pd.read_csv(os.path.join(aptos2015_dir, 'aptos2015_test_private_pseudolabel_round_1.csv'))
aptos2015_test_private_pl1 = aptos2015_test_private_pl1[aptos2015_test_private_pl1['diagnosis'] != -100]
x = np.array(aptos2015_test_private_pl1['id_code'].apply(lambda x: os.path.join(aptos2015_dir, 'test_images_768', f'{x}.png')))
y = np.array(aptos2015_test_private_pl1['diagnosis'], dtype=int)
# From test set add only unhealthy
x = x[y > 0]
y = y[y > 0]
data_split = append_train_test(data_split, ([], x, [], y))
if use_messidor2_pl1:
messidor2_dir = os.path.join(data_dir, 'messidor_2')
messidor2_pseudolabel_round_1 = pd.read_csv(os.path.join(messidor2_dir, 'train_labels_pseudolabel_round_1.csv'))
confident_labels_mask = messidor2_pseudolabel_round_1['diagnosis'] != -100
messidor2_pseudolabel_round_1 = messidor2_pseudolabel_round_1[confident_labels_mask]
x = np.array(messidor2_pseudolabel_round_1['id_code'].apply(
lambda x: os.path.join(messidor2_dir, 'train_images_768', f'{x}.png')))
y = np.array(messidor2_pseudolabel_round_1['diagnosis'], dtype=int)
split = split_train_valid(x, y, fold=fold, folds=folds, random_state=random_state)
data_split = append_train_test(data_split, split)
trainset_sizes.append(split[0])
if use_aptos2015:
x, y = get_aptos2015_train(aptos2015_dir, healthy_eye_fraction=0.2)
split = split_train_valid(x, y, fold=fold, folds=folds, random_state=random_state)
data_split = append_train_test(data_split, split)
trainset_sizes.append(split[0])
if use_aptos2015_test_private:
x, y = get_aptos2015_test_private(aptos2015_dir, healthy_eye_fraction=0.2)
split = split_train_valid(x, y, fold=fold, folds=folds, random_state=random_state)
data_split = append_train_test(data_split, split)
trainset_sizes.append(split[0])
if use_idrid:
x, y = get_idrid_train(os.path.join(data_dir, 'idrid'))
split = split_train_valid(x, y, fold=fold, folds=folds, random_state=random_state)
data_split = append_train_test(data_split, split)
trainset_sizes.append(split[0])
if use_messidor:
x, y = get_messidor(os.path.join(data_dir, 'messidor'), include_grade_3=False)
split = split_train_valid(x, y, fold=fold, folds=folds, random_state=random_state)
data_split = append_train_test(data_split, split)
trainset_sizes.append(split[0])
train_x, train_y, valid_x, valid_y = data_split
if use_idrid:
# Regardless of used datasets let's use some data from validation (holdout)
data_idrid_test = get_idrid_test(os.path.join(data_dir, 'idrid'))
valid_x.extend(data_idrid_test[0])
valid_y.extend(data_idrid_test[1])
if use_aptos2015:
data_aptos15_public = get_aptos2015_test_public(aptos2015_dir, healthy_eye_fraction=0.1)
valid_x.extend(data_aptos15_public[0])
valid_y.extend(data_aptos15_public[1])
train_transform = get_train_transform(image_size,
augmentation=augmentation,
preprocessing=preprocessing,
crop_black=False)
valid_transform = get_test_transform(image_size,
preprocessing=preprocessing,
crop_black=False)
if coarse_grading:
assert not use_unsupervised
coarse_grading_map = np.array([0, 1, 1, 1, 2])
train_y = coarse_grading_map[np.array(train_y)]
valid_y = coarse_grading_map[np.array(valid_y)]
print('Train', count_targets(train_y), "Valid", count_targets(valid_y))
if use_unsupervised:
aptos2019, _ = get_aptos2019_test(aptos2019_dir)
print('Adding', len(aptos2019), 'unlabeled samples from aptos2019 (test)')
diaretdb0_v_1_1 = fs.find_images_in_dir(os.path.join(data_dir, 'diaretdb0_v_1_1', 'train_images_768'))
print('Adding', len(diaretdb0_v_1_1), 'unlabeled samples from diaretdb0_v_1_1')
diaretdb1_v_1_1 = fs.find_images_in_dir(os.path.join(data_dir, 'diaretdb1_v_1_1', 'train_images_768'))
print('Adding', len(diaretdb1_v_1_1), 'unlabeled samples from diaretdb1_v_1_1')
origa1 = fs.find_images_in_dir(os.path.join(data_dir, 'origa', 'glaucoma_768'))
print('Adding', len(origa1), 'unlabeled samples from origa1')
origa2 = fs.find_images_in_dir(os.path.join(data_dir, 'origa', 'sanas_768'))
print('Adding', len(origa2), 'unlabeled samples from origa2')
stare = fs.find_images_in_dir(os.path.join(data_dir, 'stare', 'train_images_768'))
print('Adding', len(stare), 'unlabeled samples from stare')
unlabeled_samples = diaretdb0_v_1_1 + diaretdb1_v_1_1 + stare + origa1 + origa2 + aptos2019.tolist()
if not use_messidor:
messidor = fs.find_images_in_dir(os.path.join(data_dir, 'messidor', 'train_images_768'))
unlabeled_samples += messidor
print('Adding', len(messidor), 'unlabeled samples from Messidor')
if not use_aptos2015:
dataset_dir = os.path.join(data_dir, 'aptos-2015')
x, y = get_aptos2015_train(dataset_dir, healthy_eye_fraction=0.1)
unlabeled_samples += x.tolist()
print('Adding', len(x), 'unlabeled samples from Aptos 2015')
if not use_aptos2015_test_private:
dataset_dir = os.path.join(data_dir, 'aptos-2015')
x, y = get_aptos2015_test_private(dataset_dir, healthy_eye_fraction=0.1)
unlabeled_samples += x.tolist()
print('Adding', len(x), 'unlabeled samples from Aptos 2015 Test (Private)')
unlabeled_targets = [UNLABELED_CLASS] * len(unlabeled_samples)
print('Using', len(unlabeled_samples), 'unlabeled samples')
train_x.extend(unlabeled_samples)
train_y.extend(unlabeled_targets)
train_ds = RetinopathyDatasetV2(train_x, train_y,
transform=train_transform,
normalize=valid_transform,
dtype=target_dtype)
trainset_sizes.append(len(unlabeled_samples))
else:
train_ds = RetinopathyDataset(train_x, train_y,
transform=train_transform,
dtype=target_dtype)
valid_ds = RetinopathyDataset(valid_x, valid_y,
transform=valid_transform,
dtype=target_dtype)
return train_ds, valid_ds, trainset_sizes
def get_dataloaders(train_ds, valid_ds,
batch_size,
num_workers,
fast=False,
train_sizes=None,
balance=False,
balance_datasets=False,
balance_unlabeled=False):
sampler = None
weights = None
num_samples = 0
if balance_unlabeled:
labeled_mask = (train_ds.targets != UNLABELED_CLASS).astype(np.uint8)
weights = compute_sample_weight('balanced', labeled_mask)
num_samples = int(np.mean(train_sizes))
if balance:
weights = compute_sample_weight('balanced', train_ds.targets)
hist = np.bincount(train_ds.targets)
min_class_counts = int(min(hist))
num_classes = len(np.unique(train_ds.targets))
num_samples = min_class_counts * num_classes
if balance_datasets:
assert train_sizes is not None
dataset_balancing_term = []
for subset_size in train_sizes:
full_dataset_size = float(sum(train_sizes))
dataset_balancing_term.extend([full_dataset_size / subset_size] * subset_size)
dataset_balancing_term = np.array(dataset_balancing_term)
if weights is None:
weights = np.ones(len(train_ds.targets))
weights = weights * dataset_balancing_term
num_samples = int(np.mean(train_sizes))
# If we do balancing, let's go for fixed number of batches (half of dataset)
if weights is not None:
sampler = WeightedRandomSampler(weights, num_samples)
if fast:
weights = np.ones(len(train_ds))
sampler = WeightedRandomSampler(weights, 16)
train_dl = DataLoader(train_ds, batch_size=batch_size,
shuffle=sampler is None, sampler=sampler,
pin_memory=True, drop_last=True,
num_workers=num_workers)
valid_dl = DataLoader(valid_ds, batch_size=batch_size, shuffle=False,
pin_memory=True, drop_last=False,
num_workers=num_workers)
return train_dl, valid_dl
| [
"numpy.sum",
"boto3.client",
"sklearn.model_selection.train_test_split",
"torch.utils.data.WeightedRandomSampler",
"numpy.mean",
"os.path.join",
"numpy.unique",
"torch.utils.data.DataLoader",
"retinopathy.augmentations.get_test_transform",
"cv2.cvtColor",
"retinopathy.augmentations.get_train_tra... | [((5331, 5348), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (5339, 5348), True, 'import numpy as np\n'), ((7007, 7050), 'boto3.client', 'boto3.client', (['"""s3"""'], {'region_name': 'region_name'}), "('s3', region_name=region_name)\n", (7019, 7050), False, 'import boto3\n'), ((8103, 8152), 'numpy.array', 'np.array', (["aptos2019_train['diagnosis']"], {'dtype': 'int'}), "(aptos2019_train['diagnosis'], dtype=int)\n", (8111, 8152), True, 'import numpy as np\n'), ((8866, 8905), 'numpy.array', 'np.array', (["aptos2015_train['image_path']"], {}), "(aptos2015_train['image_path'])\n", (8874, 8905), True, 'import numpy as np\n'), ((8914, 8963), 'numpy.array', 'np.array', (["aptos2015_train['diagnosis']"], {'dtype': 'int'}), "(aptos2015_train['diagnosis'], dtype=int)\n", (8922, 8963), True, 'import numpy as np\n'), ((9616, 9654), 'numpy.array', 'np.array', (["aptos2015_test['image_path']"], {}), "(aptos2015_test['image_path'])\n", (9624, 9654), True, 'import numpy as np\n'), ((9663, 9711), 'numpy.array', 'np.array', (["aptos2015_test['diagnosis']"], {'dtype': 'int'}), "(aptos2015_test['diagnosis'], dtype=int)\n", (9671, 9711), True, 'import numpy as np\n'), ((10366, 10404), 'numpy.array', 'np.array', (["aptos2015_test['image_path']"], {}), "(aptos2015_test['image_path'])\n", (10374, 10404), True, 'import numpy as np\n'), ((10413, 10461), 'numpy.array', 'np.array', (["aptos2015_test['diagnosis']"], {'dtype': 'int'}), "(aptos2015_test['diagnosis'], dtype=int)\n", (10421, 10461), True, 'import numpy as np\n'), ((11001, 11036), 'numpy.array', 'np.array', (["idrid_train['image_path']"], {}), "(idrid_train['image_path'])\n", (11009, 11036), True, 'import numpy as np\n'), ((11045, 11090), 'numpy.array', 'np.array', (["idrid_train['diagnosis']"], {'dtype': 'int'}), "(idrid_train['diagnosis'], dtype=int)\n", (11053, 11090), True, 'import numpy as np\n'), ((11361, 11395), 'numpy.array', 'np.array', (["idrid_test['image_path']"], {}), "(idrid_test['image_path'])\n", (11369, 11395), True, 'import numpy as np\n'), ((11404, 11448), 'numpy.array', 'np.array', (["idrid_test['diagnosis']"], {'dtype': 'int'}), "(idrid_test['diagnosis'], dtype=int)\n", (11412, 11448), True, 'import numpy as np\n'), ((11755, 11793), 'numpy.array', 'np.array', (["messidor_train['image_path']"], {}), "(messidor_train['image_path'])\n", (11763, 11793), True, 'import numpy as np\n'), ((11802, 11850), 'numpy.array', 'np.array', (["messidor_train['diagnosis']"], {'dtype': 'int'}), "(messidor_train['diagnosis'], dtype=int)\n", (11810, 11850), True, 'import numpy as np\n'), ((23974, 24084), 'retinopathy.augmentations.get_train_transform', 'get_train_transform', (['image_size'], {'augmentation': 'augmentation', 'preprocessing': 'preprocessing', 'crop_black': '(False)'}), '(image_size, augmentation=augmentation, preprocessing=\n preprocessing, crop_black=False)\n', (23993, 24084), False, 'from retinopathy.augmentations import get_train_transform, get_test_transform\n'), ((24228, 24305), 'retinopathy.augmentations.get_test_transform', 'get_test_transform', (['image_size'], {'preprocessing': 'preprocessing', 'crop_black': '(False)'}), '(image_size, preprocessing=preprocessing, crop_black=False)\n', (24246, 24305), False, 'from retinopathy.augmentations import get_train_transform, get_test_transform\n'), ((25590, 25626), 'os.path.join', 'os.path.join', (['data_dir', '"""aptos-2019"""'], {}), "(data_dir, 'aptos-2019')\n", (25602, 25626), False, 'import os\n'), ((25647, 25683), 'os.path.join', 'os.path.join', (['data_dir', '"""aptos-2015"""'], {}), "(data_dir, 'aptos-2015')\n", (25659, 25683), False, 'import os\n'), ((31015, 31125), 'retinopathy.augmentations.get_train_transform', 'get_train_transform', (['image_size'], {'augmentation': 'augmentation', 'preprocessing': 'preprocessing', 'crop_black': '(False)'}), '(image_size, augmentation=augmentation, preprocessing=\n preprocessing, crop_black=False)\n', (31034, 31125), False, 'from retinopathy.augmentations import get_train_transform, get_test_transform\n'), ((31269, 31346), 'retinopathy.augmentations.get_test_transform', 'get_test_transform', (['image_size'], {'preprocessing': 'preprocessing', 'crop_black': '(False)'}), '(image_size, preprocessing=preprocessing, crop_black=False)\n', (31287, 31346), False, 'from retinopathy.augmentations import get_train_transform, get_test_transform\n'), ((36380, 36527), 'torch.utils.data.DataLoader', 'DataLoader', (['train_ds'], {'batch_size': 'batch_size', 'shuffle': '(sampler is None)', 'sampler': 'sampler', 'pin_memory': '(True)', 'drop_last': '(True)', 'num_workers': 'num_workers'}), '(train_ds, batch_size=batch_size, shuffle=sampler is None,\n sampler=sampler, pin_memory=True, drop_last=True, num_workers=num_workers)\n', (36390, 36527), False, 'from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler\n'), ((36617, 36738), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_ds'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'pin_memory': '(True)', 'drop_last': '(False)', 'num_workers': 'num_workers'}), '(valid_ds, batch_size=batch_size, shuffle=False, pin_memory=True,\n drop_last=False, num_workers=num_workers)\n', (36627, 36738), False, 'from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler\n'), ((1493, 1509), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (1501, 1509), True, 'import numpy as np\n'), ((1820, 1849), 'cv2.imread', 'cv2.imread', (['self.images[item]'], {}), '(self.images[item])\n', (1830, 1849), False, 'import cv2\n'), ((1992, 2030), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (2004, 2030), False, 'import cv2\n'), ((3781, 3797), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (3789, 3797), True, 'import numpy as np\n'), ((4079, 4108), 'cv2.imread', 'cv2.imread', (['self.images[item]'], {}), '(self.images[item])\n', (4089, 4108), False, 'import cv2\n'), ((4170, 4208), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (4182, 4208), False, 'import cv2\n'), ((5827, 5899), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'folds', 'random_state': 'random_state', 'shuffle': '(True)'}), '(n_splits=folds, random_state=random_state, shuffle=True)\n', (5842, 5899), False, 'from sklearn.model_selection import StratifiedKFold, train_test_split\n'), ((6256, 6358), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'random_state': 'random_state', 'test_size': '(1.0 / folds)', 'shuffle': '(True)', 'stratify': 'y'}), '(x, y, random_state=random_state, test_size=1.0 / folds,\n shuffle=True, stratify=y)\n', (6272, 6358), False, 'from sklearn.model_selection import StratifiedKFold, train_test_split\n'), ((7591, 7626), 'os.path.join', 'os.path.join', (['data_dir', '"""train.csv"""'], {}), "(data_dir, 'train.csv')\n", (7603, 7626), False, 'import os\n'), ((8343, 8377), 'os.path.join', 'os.path.join', (['data_dir', '"""test.csv"""'], {}), "(data_dir, 'test.csv')\n", (8355, 8377), False, 'import os\n'), ((8664, 8709), 'os.path.join', 'os.path.join', (['dataset_dir', '"""train_labels.csv"""'], {}), "(dataset_dir, 'train_labels.csv')\n", (8676, 8709), False, 'import os\n'), ((9102, 9157), 'numpy.concatenate', 'np.concatenate', (['(x[~healthy], x[healthy][:num_healthy])'], {}), '((x[~healthy], x[healthy][:num_healthy]))\n', (9116, 9157), True, 'import numpy as np\n'), ((9170, 9225), 'numpy.concatenate', 'np.concatenate', (['(y[~healthy], y[healthy][:num_healthy])'], {}), '((y[~healthy], y[healthy][:num_healthy]))\n', (9184, 9225), True, 'import numpy as np\n'), ((9345, 9389), 'os.path.join', 'os.path.join', (['dataset_dir', '"""test_labels.csv"""'], {}), "(dataset_dir, 'test_labels.csv')\n", (9357, 9389), False, 'import os\n'), ((9850, 9905), 'numpy.concatenate', 'np.concatenate', (['(x[~healthy], x[healthy][:num_healthy])'], {}), '((x[~healthy], x[healthy][:num_healthy]))\n', (9864, 9905), True, 'import numpy as np\n'), ((9918, 9973), 'numpy.concatenate', 'np.concatenate', (['(y[~healthy], y[healthy][:num_healthy])'], {}), '((y[~healthy], y[healthy][:num_healthy]))\n', (9932, 9973), True, 'import numpy as np\n'), ((10094, 10138), 'os.path.join', 'os.path.join', (['dataset_dir', '"""test_labels.csv"""'], {}), "(dataset_dir, 'test_labels.csv')\n", (10106, 10138), False, 'import os\n'), ((10600, 10655), 'numpy.concatenate', 'np.concatenate', (['(x[~healthy], x[healthy][:num_healthy])'], {}), '((x[~healthy], x[healthy][:num_healthy]))\n', (10614, 10655), True, 'import numpy as np\n'), ((10668, 10723), 'numpy.concatenate', 'np.concatenate', (['(y[~healthy], y[healthy][:num_healthy])'], {}), '((y[~healthy], y[healthy][:num_healthy]))\n', (10682, 10723), True, 'import numpy as np\n'), ((10806, 10851), 'os.path.join', 'os.path.join', (['dataset_dir', '"""train_labels.csv"""'], {}), "(dataset_dir, 'train_labels.csv')\n", (10818, 10851), False, 'import os\n'), ((11171, 11215), 'os.path.join', 'os.path.join', (['dataset_dir', '"""test_labels.csv"""'], {}), "(dataset_dir, 'test_labels.csv')\n", (11183, 11215), False, 'import os\n'), ((11554, 11599), 'os.path.join', 'os.path.join', (['dataset_dir', '"""train_labels.csv"""'], {}), "(dataset_dir, 'train_labels.csv')\n", (11566, 11599), False, 'import os\n'), ((26468, 26537), 'numpy.array', 'np.array', (["aptos2015_train_pseudolabel_round_1['diagnosis']"], {'dtype': 'int'}), "(aptos2015_train_pseudolabel_round_1['diagnosis'], dtype=int)\n", (26476, 26537), True, 'import numpy as np\n'), ((27284, 27343), 'numpy.array', 'np.array', (["aptos2015_test_public_pl1['diagnosis']"], {'dtype': 'int'}), "(aptos2015_test_public_pl1['diagnosis'], dtype=int)\n", (27292, 27343), True, 'import numpy as np\n'), ((28277, 28337), 'numpy.array', 'np.array', (["aptos2015_test_private_pl1['diagnosis']"], {'dtype': 'int'}), "(aptos2015_test_private_pl1['diagnosis'], dtype=int)\n", (28285, 28337), True, 'import numpy as np\n'), ((28542, 28578), 'os.path.join', 'os.path.join', (['data_dir', '"""messidor_2"""'], {}), "(data_dir, 'messidor_2')\n", (28554, 28578), False, 'import os\n'), ((29042, 29105), 'numpy.array', 'np.array', (["messidor2_pseudolabel_round_1['diagnosis']"], {'dtype': 'int'}), "(messidor2_pseudolabel_round_1['diagnosis'], dtype=int)\n", (29050, 29105), True, 'import numpy as np\n'), ((31519, 31544), 'numpy.array', 'np.array', (['[0, 1, 1, 1, 2]'], {}), '([0, 1, 1, 1, 2])\n', (31527, 31544), True, 'import numpy as np\n'), ((35169, 35216), 'sklearn.utils.compute_sample_weight', 'compute_sample_weight', (['"""balanced"""', 'labeled_mask'], {}), "('balanced', labeled_mask)\n", (35190, 35216), False, 'from sklearn.utils import compute_sample_weight\n'), ((35300, 35351), 'sklearn.utils.compute_sample_weight', 'compute_sample_weight', (['"""balanced"""', 'train_ds.targets'], {}), "('balanced', train_ds.targets)\n", (35321, 35351), False, 'from sklearn.utils import compute_sample_weight\n'), ((35367, 35396), 'numpy.bincount', 'np.bincount', (['train_ds.targets'], {}), '(train_ds.targets)\n', (35378, 35396), True, 'import numpy as np\n'), ((35870, 35902), 'numpy.array', 'np.array', (['dataset_balancing_term'], {}), '(dataset_balancing_term)\n', (35878, 35902), True, 'import numpy as np\n'), ((36212, 36255), 'torch.utils.data.WeightedRandomSampler', 'WeightedRandomSampler', (['weights', 'num_samples'], {}), '(weights, num_samples)\n', (36233, 36255), False, 'from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler\n'), ((36329, 36363), 'torch.utils.data.WeightedRandomSampler', 'WeightedRandomSampler', (['weights', '(16)'], {}), '(weights, 16)\n', (36350, 36363), False, 'from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler\n'), ((1194, 1211), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (1202, 1211), True, 'import numpy as np\n'), ((2314, 2350), 'pytorch_toolbelt.utils.torch_utils.tensor_from_rgb_image', 'tensor_from_rgb_image', (["data['image']"], {}), "(data['image'])\n", (2335, 2350), False, 'from pytorch_toolbelt.utils.torch_utils import tensor_from_rgb_image\n'), ((2380, 2412), 'pytorch_toolbelt.utils.fs.id_from_fname', 'id_from_fname', (['self.images[item]'], {}), '(self.images[item])\n', (2393, 2412), False, 'from pytorch_toolbelt.utils.fs import id_from_fname\n'), ((2481, 2497), 'math.log', 'math.log', (['height'], {}), '(height)\n', (2489, 2497), False, 'import math\n'), ((2522, 2537), 'math.log', 'math.log', (['width'], {}), '(width)\n', (2530, 2537), False, 'import math\n'), ((2607, 2634), 'numpy.mean', 'np.mean', (['image'], {'axis': '(0, 1)'}), '(image, axis=(0, 1))\n', (2614, 2634), True, 'import numpy as np\n'), ((2664, 2738), 'numpy.array', 'np.array', (['[log_height, log_width, aspect_ratio, mean[0], mean[1], mean[2]]'], {}), '([log_height, log_width, aspect_ratio, mean[0], mean[1], mean[2]])\n', (2672, 2738), True, 'import numpy as np\n'), ((3005, 3026), 'numpy.array', 'np.array', (['[diagnosis]'], {}), '([diagnosis])\n', (3013, 3026), True, 'import numpy as np\n'), ((3502, 3519), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (3510, 3519), True, 'import numpy as np\n'), ((4392, 4426), 'pytorch_toolbelt.utils.torch_utils.tensor_from_rgb_image', 'tensor_from_rgb_image', (['transformed'], {}), '(transformed)\n', (4413, 4426), False, 'from pytorch_toolbelt.utils.torch_utils import tensor_from_rgb_image\n'), ((4456, 4487), 'pytorch_toolbelt.utils.torch_utils.tensor_from_rgb_image', 'tensor_from_rgb_image', (['original'], {}), '(original)\n', (4477, 4487), False, 'from pytorch_toolbelt.utils.torch_utils import tensor_from_rgb_image\n'), ((4517, 4549), 'pytorch_toolbelt.utils.fs.id_from_fname', 'id_from_fname', (['self.images[item]'], {}), '(self.images[item])\n', (4530, 4549), False, 'from pytorch_toolbelt.utils.fs import id_from_fname\n'), ((4608, 4624), 'math.log', 'math.log', (['height'], {}), '(height)\n', (4616, 4624), False, 'import math\n'), ((4649, 4664), 'math.log', 'math.log', (['width'], {}), '(width)\n', (4657, 4664), False, 'import math\n'), ((4734, 4761), 'numpy.mean', 'np.mean', (['image'], {'axis': '(0, 1)'}), '(image, axis=(0, 1))\n', (4741, 4761), True, 'import numpy as np\n'), ((4791, 4865), 'numpy.array', 'np.array', (['[log_height, log_width, aspect_ratio, mean[0], mean[1], mean[2]]'], {}), '([log_height, log_width, aspect_ratio, mean[0], mean[1], mean[2]])\n', (4799, 4865), True, 'import numpy as np\n'), ((5431, 5451), 'numpy.sum', 'np.sum', (['(targets == i)'], {}), '(targets == i)\n', (5437, 5451), True, 'import numpy as np\n'), ((8799, 8856), 'os.path.join', 'os.path.join', (['dataset_dir', '"""train_images_768"""', 'f"""{x}.png"""'], {}), "(dataset_dir, 'train_images_768', f'{x}.png')\n", (8811, 8856), False, 'import os\n'), ((9550, 9606), 'os.path.join', 'os.path.join', (['dataset_dir', '"""test_images_768"""', 'f"""{x}.png"""'], {}), "(dataset_dir, 'test_images_768', f'{x}.png')\n", (9562, 9606), False, 'import os\n'), ((10300, 10356), 'os.path.join', 'os.path.join', (['dataset_dir', '"""test_images_768"""', 'f"""{x}.png"""'], {}), "(dataset_dir, 'test_images_768', f'{x}.png')\n", (10312, 10356), False, 'import os\n'), ((10933, 10990), 'os.path.join', 'os.path.join', (['dataset_dir', '"""train_images_768"""', 'f"""{x}.png"""'], {}), "(dataset_dir, 'train_images_768', f'{x}.png')\n", (10945, 10990), False, 'import os\n'), ((11295, 11351), 'os.path.join', 'os.path.join', (['dataset_dir', '"""test_images_768"""', 'f"""{x}.png"""'], {}), "(dataset_dir, 'test_images_768', f'{x}.png')\n", (11307, 11351), False, 'import os\n'), ((11687, 11744), 'os.path.join', 'os.path.join', (['dataset_dir', '"""train_images_768"""', 'f"""{x}.png"""'], {}), "(dataset_dir, 'train_images_768', f'{x}.png')\n", (11699, 11744), False, 'import os\n'), ((26072, 26142), 'os.path.join', 'os.path.join', (['aptos2015_dir', '"""aptos2015_train_pseudolabel_round_1.csv"""'], {}), "(aptos2015_dir, 'aptos2015_train_pseudolabel_round_1.csv')\n", (26084, 26142), False, 'import os\n'), ((26936, 27012), 'os.path.join', 'os.path.join', (['aptos2015_dir', '"""aptos2015_test_public_pseudolabel_round_1.csv"""'], {}), "(aptos2015_dir, 'aptos2015_test_public_pseudolabel_round_1.csv')\n", (26948, 27012), False, 'import os\n'), ((27937, 28014), 'os.path.join', 'os.path.join', (['aptos2015_dir', '"""aptos2015_test_private_pseudolabel_round_1.csv"""'], {}), "(aptos2015_dir, 'aptos2015_test_private_pseudolabel_round_1.csv')\n", (27949, 28014), False, 'import os\n'), ((28631, 28698), 'os.path.join', 'os.path.join', (['messidor2_dir', '"""train_labels_pseudolabel_round_1.csv"""'], {}), "(messidor2_dir, 'train_labels_pseudolabel_round_1.csv')\n", (28643, 28698), False, 'import os\n'), ((29942, 29973), 'os.path.join', 'os.path.join', (['data_dir', '"""idrid"""'], {}), "(data_dir, 'idrid')\n", (29954, 29973), False, 'import os\n'), ((30214, 30248), 'os.path.join', 'os.path.join', (['data_dir', '"""messidor"""'], {}), "(data_dir, 'messidor')\n", (30226, 30248), False, 'import os\n'), ((30659, 30690), 'os.path.join', 'os.path.join', (['data_dir', '"""idrid"""'], {}), "(data_dir, 'idrid')\n", (30671, 30690), False, 'import os\n'), ((31583, 31600), 'numpy.array', 'np.array', (['train_y'], {}), '(train_y)\n', (31591, 31600), True, 'import numpy as np\n'), ((31639, 31656), 'numpy.array', 'np.array', (['valid_y'], {}), '(valid_y)\n', (31647, 31656), True, 'import numpy as np\n'), ((31950, 32011), 'os.path.join', 'os.path.join', (['data_dir', '"""diaretdb0_v_1_1"""', '"""train_images_768"""'], {}), "(data_dir, 'diaretdb0_v_1_1', 'train_images_768')\n", (31962, 32011), False, 'import os\n'), ((32150, 32211), 'os.path.join', 'os.path.join', (['data_dir', '"""diaretdb1_v_1_1"""', '"""train_images_768"""'], {}), "(data_dir, 'diaretdb1_v_1_1', 'train_images_768')\n", (32162, 32211), False, 'import os\n'), ((32341, 32388), 'os.path.join', 'os.path.join', (['data_dir', '"""origa"""', '"""glaucoma_768"""'], {}), "(data_dir, 'origa', 'glaucoma_768')\n", (32353, 32388), False, 'import os\n'), ((32500, 32544), 'os.path.join', 'os.path.join', (['data_dir', '"""origa"""', '"""sanas_768"""'], {}), "(data_dir, 'origa', 'sanas_768')\n", (32512, 32544), False, 'import os\n'), ((32655, 32706), 'os.path.join', 'os.path.join', (['data_dir', '"""stare"""', '"""train_images_768"""'], {}), "(data_dir, 'stare', 'train_images_768')\n", (32667, 32706), False, 'import os\n'), ((33194, 33230), 'os.path.join', 'os.path.join', (['data_dir', '"""aptos-2015"""'], {}), "(data_dir, 'aptos-2015')\n", (33206, 33230), False, 'import os\n'), ((33496, 33532), 'os.path.join', 'os.path.join', (['data_dir', '"""aptos-2015"""'], {}), "(data_dir, 'aptos-2015')\n", (33508, 33532), False, 'import os\n'), ((35243, 35263), 'numpy.mean', 'np.mean', (['train_sizes'], {}), '(train_sizes)\n', (35250, 35263), True, 'import numpy as np\n'), ((35465, 35492), 'numpy.unique', 'np.unique', (['train_ds.targets'], {}), '(train_ds.targets)\n', (35474, 35492), True, 'import numpy as np\n'), ((36062, 36082), 'numpy.mean', 'np.mean', (['train_sizes'], {}), '(train_sizes)\n', (36069, 36082), True, 'import numpy as np\n'), ((5188, 5206), 'numpy.array', 'np.array', (['[target]'], {}), '([target])\n', (5196, 5206), True, 'import numpy as np\n'), ((8038, 8092), 'os.path.join', 'os.path.join', (['data_dir', '"""train_images_768"""', 'f"""{x}.png"""'], {}), "(data_dir, 'train_images_768', f'{x}.png')\n", (8050, 8092), False, 'import os\n'), ((8438, 8491), 'os.path.join', 'os.path.join', (['data_dir', '"""test_images_768"""', 'f"""{x}.png"""'], {}), "(data_dir, 'test_images_768', f'{x}.png')\n", (8450, 8491), False, 'import os\n'), ((9050, 9065), 'numpy.sum', 'np.sum', (['healthy'], {}), '(healthy)\n', (9056, 9065), True, 'import numpy as np\n'), ((9798, 9813), 'numpy.sum', 'np.sum', (['healthy'], {}), '(healthy)\n', (9804, 9813), True, 'import numpy as np\n'), ((10548, 10563), 'numpy.sum', 'np.sum', (['healthy'], {}), '(healthy)\n', (10554, 10563), True, 'import numpy as np\n'), ((22190, 22226), 'os.path.join', 'os.path.join', (['data_dir', '"""aptos-2019"""'], {}), "(data_dir, 'aptos-2019')\n", (22202, 22226), False, 'import os\n'), ((32961, 33015), 'os.path.join', 'os.path.join', (['data_dir', '"""messidor"""', '"""train_images_768"""'], {}), "(data_dir, 'messidor', 'train_images_768')\n", (32973, 33015), False, 'import os\n'), ((22316, 22352), 'os.path.join', 'os.path.join', (['data_dir', '"""aptos-2015"""'], {}), "(data_dir, 'aptos-2015')\n", (22328, 22352), False, 'import os\n'), ((26394, 26453), 'os.path.join', 'os.path.join', (['aptos2015_dir', '"""train_images_768"""', 'f"""{x}.png"""'], {}), "(aptos2015_dir, 'train_images_768', f'{x}.png')\n", (26406, 26453), False, 'import os\n'), ((27211, 27269), 'os.path.join', 'os.path.join', (['aptos2015_dir', '"""test_images_768"""', 'f"""{x}.png"""'], {}), "(aptos2015_dir, 'test_images_768', f'{x}.png')\n", (27223, 27269), False, 'import os\n'), ((28204, 28262), 'os.path.join', 'os.path.join', (['aptos2015_dir', '"""test_images_768"""', 'f"""{x}.png"""'], {}), "(aptos2015_dir, 'test_images_768', f'{x}.png')\n", (28216, 28262), False, 'import os\n'), ((28968, 29027), 'os.path.join', 'os.path.join', (['messidor2_dir', '"""train_images_768"""', 'f"""{x}.png"""'], {}), "(messidor2_dir, 'train_images_768', f'{x}.png')\n", (28980, 29027), False, 'import os\n'), ((22456, 22492), 'os.path.join', 'os.path.join', (['data_dir', '"""aptos-2015"""'], {}), "(data_dir, 'aptos-2015')\n", (22468, 22492), False, 'import os\n'), ((22594, 22630), 'os.path.join', 'os.path.join', (['data_dir', '"""aptos-2015"""'], {}), "(data_dir, 'aptos-2015')\n", (22606, 22630), False, 'import os\n'), ((22711, 22742), 'os.path.join', 'os.path.join', (['data_dir', '"""idrid"""'], {}), "(data_dir, 'idrid')\n", (22723, 22742), False, 'import os\n'), ((22821, 22852), 'os.path.join', 'os.path.join', (['data_dir', '"""idrid"""'], {}), "(data_dir, 'idrid')\n", (22833, 22852), False, 'import os\n'), ((22927, 22961), 'os.path.join', 'os.path.join', (['data_dir', '"""messidor"""'], {}), "(data_dir, 'messidor')\n", (22939, 22961), False, 'import os\n')] |
import sys
from multiprocessing import cpu_count
from typing import Callable, Tuple
import numpy as np
import sharedmem
from fastdist import fastdist
from ripser import ripser
from sklearn.metrics import euclidean_distances
from tqdm.auto import tqdm
sys.path.append("..")
from approx_nn import ApproxNN # noqa: E402
from topological_data_analysis.ripser_utils import run_ripser_plus_plus # noqa: E402
from utils import batch_list_gen # noqa: E402
# Multiprocessing variable dict
mp_var_dict = {}
# Type aliases
DistanceFunc = Callable[[int, int], float]
KnnFunc = Callable[[int, int], Tuple[np.ndarray, np.ndarray]]
# def compute_gad_mp_init(
# data_points: Array,
# data_points_shape: tuple,
# distance_func: DistanceFunc,
# knn_func: KnnFunc = None,
# ) -> None:
# """
# Initializes multiprocessing variable dict for GAD.
# Parameters
# ----------
# data_points: Array
# Multiprocessing array representing the data points.
# data_points_shape : tuple
# Shape of the data points.
# distance_func : DistanceFunc
# Distance function.
# knn_func : KnnFunc
# K-nearest neighbour function.
# """
# mp_var_dict["data_points"] = data_points
# mp_var_dict["data_points_shape"] = data_points_shape
# mp_var_dict["distance_func"] = distance_func
# if knn_func is not None:
# mp_var_dict["knn_func"] = knn_func
def get_point_distance_func(
data_points: np.ndarray,
pairwise_distances: np.ndarray = None,
metric_callable: Callable = fastdist.euclidean,
) -> DistanceFunc:
"""
Gets function for computing distance between two points by specifying its indices.
Uses pairwise distances if specified, else, defaulting to
Either pairwise distances or ApproxNN instance has to be specified, else, we
default to the L2 norm of data points.
Parameters
----------
pairwise_distances : np.ndarray
Pairwise distances between data points.
approx_nn : ApproxNN, optional
ApproxNN instance (algorithm must be "annoy").
data_points : np.ndarray, optional
Data points
Returns
-------
distance_func : DistanceFunc
Distance function, taking in two point indices i and j and returns the distance
between the points.
"""
if pairwise_distances is not None:
return lambda point_idx_i, point_idx_j: pairwise_distances[
point_idx_i, point_idx_j
]
else:
return lambda point_idx_i, point_idx_j: metric_callable(
data_points[point_idx_i], data_points[point_idx_j]
)
def get_nearest_neighbours(
distances: np.ndarray,
k_neighbours: int,
) -> Tuple[float, float]:
"""
Gets nearest K neighbours from an array of distances.
Parameters
----------
distances : np.ndarray
Array of distances.
k_neighbours : int
Number of neighbours to find.
Returns
-------
sorted_k_distances_indices : np.ndarray
Indices of K distances, similarly sorted as `sorted_k_distances`.
sorted_k_distances : np.ndarray
K distances, sorted from smallest to largest.
"""
sorted_k_distances_indices = np.argsort(distances)[1 : k_neighbours + 1]
sorted_k_distances = distances[sorted_k_distances_indices]
return sorted_k_distances_indices, sorted_k_distances
def get_knn_func_data_points(
data_points: np.ndarray,
pairwise_distances: np.ndarray = None,
approx_nn: ApproxNN = None,
metric: Callable = fastdist.euclidean,
metric_name: str = "euclidean",
) -> KnnFunc:
"""
Gets a K-nearest neighbour callable for data points, used in `compute_gad`.
Parameters
----------
data_points : np.ndarray
Data points.
pairwise_distances : np.ndarray, optional
Pairwise distances of data points (defaults to None).
approx_nn : ApproxNN, optional
ApproxNN instance.
metric : Callable, optional
fastdist metric; only required if `pairwise_distances` and `approx_nn` are None
(defaults to fastdist.euclidean).
metric_name : str, optional
String name of the `metric` callable (defaults to "euclidean").
Returns
-------
knn_func : KnnFunc
K-nearest neighbour callable for data points.
"""
if approx_nn is not None:
return lambda point_idx, k_neighbours: approx_nn.search(
query_vector=data_points[point_idx],
k_neighbours=k_neighbours,
excluded_neighbour_indices=[point_idx],
return_distances=True,
)
elif pairwise_distances is not None:
return lambda point_idx, k_neighbours: get_nearest_neighbours(
distances=pairwise_distances[point_idx],
k_neighbours=k_neighbours,
)
else:
return lambda point_idx, k_neighbours: get_nearest_neighbours(
distances=fastdist.vector_to_matrix_distance(
u=data_points[point_idx],
m=data_points,
metric=metric,
metric_name=metric_name,
),
k_neighbours=k_neighbours,
)
def compute_gad_point_indices(
data_point_indices: list,
data_points: np.ndarray,
annulus_inner_radius: float,
annulus_outer_radius: float,
distance_func: DistanceFunc,
use_knn_annulus: bool,
knn_func: KnnFunc,
knn_annulus_inner: int,
knn_annulus_outer: int,
target_homology_dim: int,
use_ripser_plus_plus: bool,
ripser_plus_plus_threshold: int,
return_annlus_persistence_diagrams: bool,
progressbar_enabled: bool,
) -> dict:
"""
Computes geometric anomaly detection (GAD) Procedure 1 from [1], for data point
indices.
Parameters
----------
data_point_indices : list
List consising of indices of data points to compute GAD for.
data_points : np.ndarray
All data points.
annulus_inner_radius : float
Inner annulus radius.
annulus_outer_radius : float
Outer annulus radius.
distance_func : DistanceFunc
Distance function to measure distances between any two data points.
use_knn_annulus : bool
Whether or not to use the KNN verison of GAD.
knn_func : KnnFunc
K-nearest neighbour function to find K nearest neighbour of any data point.
knn_annulus_inner : int
Number of neighbours to determine inner annulus radius.
knn_annulus_outer : int
Number of neighbours to determine outer annulus radius.
target_homology_dim : int
Target homology dimension (k parameter in [1]).
use_ripser_plus_plus : bool
Whether or not to use Ripser++ (GPU acceleration).
ripser_plus_plus_threshold : int
The least number of data points in order to use Ripser++, only has an effect
if `use_ripser_plus_plus` is set to True.
return_annlus_persistence_diagrams : bool
Whether or not to return annulus persistence diagrams.
progressbar_enabled : bool
Whether or not the tqdm progressbar is enabled.
Returns
-------
result : dict
Result dictionary consisting of:
"P_man" : list
List of point indices of k-manifold points.
"P_bnd" : list
List of point indices of boundary points.
"P_int" : list
List of point indices of intersection points.
"annlus_persistence_diagrams" : list
List of persistence diagrams of annulus points, if
`return_annlus_persistence_diagrams` is set to True.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, & <NAME>.
(2019). Geometric anomaly detection in data.
"""
# Initialize result
result = {
"P_bnd": [],
"P_man": [],
"P_int": [],
}
if return_annlus_persistence_diagrams:
result["annulus_pds"] = {}
for data_point_index in tqdm(data_point_indices, disable=not progressbar_enabled):
# Find A_y ⊂ data_points containing all points in data_points
# which satisfy r ≤ ||x − y|| ≤ s (*).
if use_knn_annulus:
annulus_outer_indices, annulus_outer_distances = knn_func(
data_point_index, knn_annulus_outer
)
# Set annulus inner and outer radii and A_y_indices
annulus_inner_radius = annulus_outer_distances[knn_annulus_inner]
annulus_outer_radius = annulus_outer_distances[-1]
A_y_indices = annulus_outer_indices[knn_annulus_inner:]
else:
A_y_indices = np.array(
[
j
for j in np.arange(len(data_points))
if annulus_inner_radius
<= distance_func(j, data_point_index)
<= annulus_outer_radius
],
dtype=int,
)
# Return already if there are no points satisfying condition in (*).
N_y = 0
if len(A_y_indices) == 0:
result["P_bnd"].append(data_point_index)
if return_annlus_persistence_diagrams:
result["annulus_pds"][data_point_index] = []
continue
# Compute (k-1) Vietoris-Rips barcode of A_y
A_y = data_points[A_y_indices]
if use_ripser_plus_plus and len(A_y) > ripser_plus_plus_threshold:
diagrams_dict = run_ripser_plus_plus(
point_cloud=A_y, max_dim=target_homology_dim
)
diagrams = list(diagrams_dict.values())
else:
rips_complex = ripser(
X=euclidean_distances(A_y),
maxdim=target_homology_dim,
distance_matrix=True,
)
diagrams = rips_complex["dgms"]
target_homology_dim_diagram = diagrams[target_homology_dim]
# print(target_homology_dim_diagram.shape)
# Calculate number of intervals in A_y_barcodes of length
# (death - birth) > abs(annulus_outer_radius - annulus_inner_radius).
N_y = 0
for birth, death in target_homology_dim_diagram:
if (death - birth) > abs(annulus_outer_radius - annulus_inner_radius):
N_y += 1
# Add result
if N_y == 0:
result["P_bnd"].append(data_point_index)
elif N_y == 1:
result["P_man"].append(data_point_index)
else:
result["P_int"].append(data_point_index)
if return_annlus_persistence_diagrams:
result["annulus_pds"][data_point_index] = target_homology_dim_diagram
return result
def compute_gad_point_indices_mp(args: tuple) -> dict:
"""
Computes geometric anomaly detection (GAD) Procedure 1 from [1], for data point
indices, taking in args for multiprocessing purposes.
Parameters
----------
args : tuple
Multiprocessing argument tuple:
data_points : np.ndarray
Data points
data_point_indices : list
List consising of indices of data points to compute GAD for.
annulus_inner_radius : float
Inner annulus radius.
annulus_outer_radius : float
Outer annulus radius.
use_knn_annulus : bool
Whether or not to use the KNN verison of GAD.
knn_annulus_inner : int
Number of neighbours to determine inner annulus radius.
knn_annulus_outer : int
Number of neighbours to determine outer annulus radius.
target_homology_dim : int
Target homology dimension (k parameter in [1]).
use_ripser_plus_plus : bool
Whether or not to use Ripser++ (GPU acceleration).
ripser_plus_plus_threshold : int
The least number of data points in order to use Ripser++, only has an effect
if `use_ripser_plus_plus` is set to True.
return_annlus_persistence_diagrams : bool
Whether or not to return annulus persistence diagrams.
Returns
-------
result : dict
Result dictionary consisting of:
"P_man" : list
List of point indices of k-manifold points.
"P_bnd" : list
List of point indices of boundary points.
"P_int" : list
List of point indices of intersection points.
"annlus_persistence_diagrams" : list
List of persistence diagrams of annulus points, if
`return_annlus_persistence_diagrams` is set to True.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, & <NAME>.
(2019). Geometric anomaly detection in data.
"""
# Parse args
(
data_points,
data_point_indices,
annulus_inner_radius,
annulus_outer_radius,
use_knn_annulus,
knn_annulus_inner,
knn_annulus_outer,
target_homology_dim,
use_ripser_plus_plus,
ripser_plus_plus_threshold,
return_annlus_persistence_diagrams,
) = args
# Get functions from MP dict
distance_func = mp_var_dict["distance_func"]
knn_func = None
if use_knn_annulus:
knn_func = mp_var_dict["knn_func"]
# Compute GAD and return
return compute_gad_point_indices(
data_point_indices=data_point_indices,
data_points=data_points,
annulus_inner_radius=annulus_inner_radius,
annulus_outer_radius=annulus_outer_radius,
distance_func=distance_func,
use_knn_annulus=use_knn_annulus,
knn_func=knn_func,
knn_annulus_inner=knn_annulus_inner,
knn_annulus_outer=knn_annulus_outer,
target_homology_dim=target_homology_dim,
use_ripser_plus_plus=use_ripser_plus_plus,
ripser_plus_plus_threshold=ripser_plus_plus_threshold,
return_annlus_persistence_diagrams=return_annlus_persistence_diagrams,
progressbar_enabled=True,
)
def compute_gad(
data_points: np.ndarray,
manifold_dimension: int,
annulus_inner_radius: float = None,
annulus_outer_radius: float = None,
data_point_ints: list = None,
data_points_pairwise_distances: np.ndarray = None,
data_points_approx_nn: ApproxNN = None,
data_points_distance_metric: Callable = fastdist.euclidean,
use_ripser_plus_plus: bool = False,
ripser_plus_plus_threshold: int = 200,
use_knn_annulus: bool = False,
knn_annulus_inner: int = None,
knn_annulus_outer: int = None,
knn_annulus_metric: Callable = fastdist.euclidean,
knn_annulus_metric_name: str = "euclidean",
return_annlus_persistence_diagrams: bool = False,
progressbar_enabled: bool = False,
n_jobs: int = 1,
verbose: int = 1,
) -> dict:
"""
Computes geometric anomaly detection (GAD) Procedure 1 from [1].
Parameters
----------
data_points : np.ndarray
All data points.
manifold_dimension : int
Manifold homology dimension (k parameter in [1]).
annulus_inner_radius : float
Inner annulus radius.
annulus_outer_radius : float
Outer annulus radius.
data_point_ints : np.ndarray
Array specifying which data point indices are used from all the data points.
data_points_pairwise_distances : np.ndarray, optional
Pairwise distances of data points (defaults to None).
data_points_approx_nn : ApproxNN, optional
ApproxNN instance (defaults to None).
data_points_distance_metric : Callable, optional
Distance metric callable to compute exact distance between any two data
points (defaults to euclidean distance, `fastdist.euclidean`).
use_ripser_plus_plus : bool
Whether or not to use Ripser++ (GPU acceleration).
ripser_plus_plus_threshold : int
The least number of data points in order to use Ripser++, only has an effect
if `use_ripser_plus_plus` is set to True.
use_knn_annulus : bool
Whether or not to use the KNN verison of GAD.
knn_annulus_inner : int
Number of neighbours to determine inner annulus radius.
knn_annulus_outer : int
Number of neighbours to determine outer annulus radius.
knn_annulus_metric : Callable
fastdist metric; only required if `data_points_pairwise_distances` and
`data_points_approx_nn` are None (defaults to fastdist.euclidean).
knn_annulus_metric_name : str
String name of the `knn_annulus_metric` callable (defaults to "euclidean").
return_annlus_persistence_diagrams : bool
Whether or not to return annulus persistence diagrams.
progressbar_enabled : bool
Whether or not the tqdm progressbar is enabled.
n_jobs : int, optional
Number of processes to use (defaults 1, -1 denotes all processes).
verbose : int, optional
Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose). Defaults to 1 (verbose).
Returns
-------
result : dict
Result dictionary consisting of:
"P_man" : list
List of point indices of k-manifold points.
"P_bnd" : list
List of point indices of boundary points.
"P_int" : list
List of point indices of intersection points.
"annlus_persistence_diagrams" : list
List of persistence diagrams of annulus points, if
`return_annlus_persistence_diagrams` is set to True.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, & <NAME>.
(2019). Geometric anomaly detection in data.
"""
if data_point_ints is None:
data_point_ints = np.arange(len(data_points))
# Get distance function
distance_func = get_point_distance_func(
data_points=data_points,
pairwise_distances=data_points_pairwise_distances,
metric_callable=data_points_distance_metric,
)
# Get KNN annulus function, use_knn_annulus is True
knn_func = None
if use_knn_annulus:
knn_func = get_knn_func_data_points(
data_points=data_points,
pairwise_distances=data_points_pairwise_distances,
approx_nn=data_points_approx_nn,
metric=knn_annulus_metric,
metric_name=knn_annulus_metric_name,
)
target_homology_dim = manifold_dimension - 1
if n_jobs == -1:
n_jobs = cpu_count()
if n_jobs > 1:
# Initialize MP results
results = {
"P_bnd": [],
"P_man": [],
"P_int": [],
}
if return_annlus_persistence_diagrams:
results["annulus_pds"] = {}
# Prepare data for multiprocessing
if verbose == 1:
print("Preparing data for multiprocessing...")
data_points_shared = sharedmem.copy(data_points)
# data_points_raw = Array(
# "d", data_points.shape[0] * data_points.shape[1], lock=False
# )
# data_points_raw_np = np.frombuffer(data_points_raw).reshape(data_points.shape)
# np.copyto(data_points_raw_np, data_points)
if verbose == 1:
print("Done!")
# Prepare arguments
num_data_points_per_process = int(len(data_point_ints) // n_jobs)
mp_args = [
(
data_points_shared,
data_point_ints_chunk,
annulus_inner_radius,
annulus_outer_radius,
use_knn_annulus,
knn_annulus_inner,
knn_annulus_outer,
target_homology_dim,
use_ripser_plus_plus,
ripser_plus_plus_threshold,
return_annlus_persistence_diagrams,
)
for data_point_ints_chunk in batch_list_gen(
data_point_ints, num_data_points_per_process
)
]
mp_var_dict["distance_func"] = distance_func
if knn_func is not None:
mp_var_dict["knn_func"] = knn_func
# Run MP
if verbose == 1:
print(f"Computing GAD using {n_jobs} processes...")
with sharedmem.MapReduce(np=n_jobs) as pool:
mp_results = pool.map(compute_gad_point_indices_mp, mp_args)
for result in mp_results:
results["P_man"].extend(result["P_man"])
results["P_bnd"].extend(result["P_bnd"])
results["P_int"].extend(result["P_int"])
if return_annlus_persistence_diagrams:
results["annulus_pds"].update(result["annulus_pds"])
# with Pool(
# processes=n_jobs,
# initializer=compute_gad_mp_init,
# initargs=(data_points_raw_np, data_points.shape, distance_func, knn_func),
# ) as pool:
# for result in tqdm(
# pool.imap_unordered(compute_gad_point_indices_mp, grid_search_args),
# total=n_jobs,
# disable=not progressbar_enabled,
# ):
# results["P_man"].extend(result["P_man"])
# results["P_bnd"].extend(result["P_bnd"])
# results["P_int"].extend(result["P_int"])
# if return_annlus_persistence_diagrams:
# results["annulus_pds"].update(result["annulus_pds"])
else:
# Compute GAD using only one processor
if verbose == 1:
print("Computing GAD...")
results = compute_gad_point_indices(
data_point_indices=data_point_ints,
data_points=data_points,
annulus_inner_radius=annulus_inner_radius,
annulus_outer_radius=annulus_outer_radius,
distance_func=distance_func,
use_knn_annulus=use_knn_annulus,
knn_func=knn_func,
knn_annulus_inner=knn_annulus_inner,
knn_annulus_outer=knn_annulus_outer,
target_homology_dim=target_homology_dim,
use_ripser_plus_plus=use_ripser_plus_plus,
ripser_plus_plus_threshold=ripser_plus_plus_threshold,
return_annlus_persistence_diagrams=return_annlus_persistence_diagrams,
progressbar_enabled=progressbar_enabled,
)
return results
def grid_search_gad_annulus_radii(
data_points: np.ndarray,
manifold_dimension: int,
search_size: int,
use_knn_annulus: bool,
search_params_max_diff: float = np.inf,
min_annulus_parameter: float = 0,
max_annulus_parameter: float = -1,
data_point_ints: list = None,
data_points_pairwise_distances: np.ndarray = None,
data_points_approx_nn: ApproxNN = None,
data_points_distance_metric: Callable = fastdist.euclidean,
use_ripser_plus_plus: bool = False,
ripser_plus_plus_threshold: int = 200,
return_annlus_persistence_diagrams: bool = False,
progressbar_enabled: bool = False,
n_jobs: int = 1,
verbose: int = 1,
) -> tuple:
"""
Performs hyperparameter search to find the best set of inner and outer
annulus radii for the geometric anomaly detection (GAD) Procedure 1 from [1].
Parameters
----------
data_points : np.ndarray
All data points.
manifold_dimension : int
Manifold homology dimension (k parameter in [1]).
search_size : int
Number of radii parameters to use at most (all for outer radius and (all - 1)
for inner radius).
use_knn_annulus : bool
Whether or not to use the KNN verison of GAD.
search_params_max_diff : float
Maximal difference between outer and inner radii for annulus.
min_annulus_parameter : float
Minimal annulus radius to search over.
max_annulus_parameter : float
Maximal annulus radius to search over.
data_point_ints : np.ndarray
Array specifying which data point indices are used from all the data points.
data_points_pairwise_distances : np.ndarray, optional
Pairwise distances of data points (defaults to None).
data_points_approx_nn : ApproxNN, optional
ApproxNN instance (defaults to None).
data_points_distance_metric : Callable, optional
Distance metric callable to compute exact distance between any two data
points (defaults to euclidean distance, `fastdist.euclidean`).
use_ripser_plus_plus : bool
Whether or not to use Ripser++ (GPU acceleration).
ripser_plus_plus_threshold : int
The least number of data points in order to use Ripser++, only has an effect
if `use_ripser_plus_plus` is set to True.
return_annlus_persistence_diagrams : bool
Whether or not to return annulus persistence diagrams.
progressbar_enabled : bool
Whether or not the tqdm progressbar is enabled.
n_jobs : int, optional
Number of processes to use (defaults 1, -1 denotes all processes).
verbose : int, optional
Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose). Defaults to 1 (verbose).
Returns
-------
result : tuple
Tuple containing best result index, P_man counts in a list, results
from geometric anomaly detection and annulus radii grid.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, & <NAME>.
(2019). Geometric anomaly detection in data.
"""
if max_annulus_parameter == -1:
if use_knn_annulus:
max_annulus_parameter = len(data_points) - 1
else:
if data_points_pairwise_distances is not None:
max_annulus_parameter = np.max(data_points_pairwise_distances)
else:
raise ValueError("Maximum pairwise distance must be specified.")
# Find values for radii to use during search
radii_space = np.linspace(
start=min_annulus_parameter,
stop=max_annulus_parameter,
num=search_size + 1,
dtype=int if use_knn_annulus else None,
)[1:]
# Grid-search best set of annulus radii to optimize number of P_man data points
annulus_radii_grid = []
for inner_idx in range(search_size):
for outer_idx in range(inner_idx + 1, search_size):
inner_param = radii_space[inner_idx]
outer_param = radii_space[outer_idx]
if outer_param - inner_param <= search_params_max_diff:
annulus_radii_grid.append((inner_param, outer_param))
if verbose == 1:
print("Grid searching...")
gad_results = []
P_man_counts = []
for inner_param, outer_param in tqdm(
annulus_radii_grid, disable=not progressbar_enabled
):
if use_knn_annulus:
if verbose == 1:
print(
f"Inner radius neighbours: {inner_param}, outer radius neighbours: {outer_param}"
)
gad_params = {
"knn_annulus_inner": inner_param,
"knn_annulus_outer": outer_param,
}
else:
if verbose == 1:
print(
f"Inner radius: {inner_param:.3f}, outer radius: {outer_param:.3f}"
)
gad_params = {
"annulus_inner_radius": inner_param,
"annulus_outer_radius": outer_param,
}
gad_result = compute_gad(
data_points=data_points,
manifold_dimension=manifold_dimension,
data_point_ints=data_point_ints,
data_points_pairwise_distances=data_points_pairwise_distances,
data_points_approx_nn=data_points_approx_nn,
data_points_distance_metric=data_points_distance_metric,
use_ripser_plus_plus=use_ripser_plus_plus,
ripser_plus_plus_threshold=ripser_plus_plus_threshold,
use_knn_annulus=use_knn_annulus,
return_annlus_persistence_diagrams=return_annlus_persistence_diagrams,
progressbar_enabled=progressbar_enabled,
n_jobs=n_jobs,
verbose=verbose,
**gad_params,
)
print(
"P_man:",
len(gad_result["P_man"]),
"P_int:",
len(gad_result["P_int"]),
"P_bnd:",
len(gad_result["P_bnd"]),
)
P_man_counts.append(len(gad_result["P_man"]))
gad_results.append(gad_result)
# Find best result
best_gad_result_idx = np.argmax(P_man_counts)
return best_gad_result_idx, P_man_counts, gad_results, annulus_radii_grid
| [
"sys.path.append",
"fastdist.fastdist.vector_to_matrix_distance",
"numpy.argmax",
"topological_data_analysis.ripser_utils.run_ripser_plus_plus",
"numpy.argsort",
"tqdm.auto.tqdm",
"numpy.max",
"sklearn.metrics.euclidean_distances",
"numpy.linspace",
"utils.batch_list_gen",
"sharedmem.MapReduce",... | [((253, 274), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (268, 274), False, 'import sys\n'), ((7973, 8030), 'tqdm.auto.tqdm', 'tqdm', (['data_point_indices'], {'disable': '(not progressbar_enabled)'}), '(data_point_indices, disable=not progressbar_enabled)\n', (7977, 8030), False, 'from tqdm.auto import tqdm\n'), ((26562, 26619), 'tqdm.auto.tqdm', 'tqdm', (['annulus_radii_grid'], {'disable': '(not progressbar_enabled)'}), '(annulus_radii_grid, disable=not progressbar_enabled)\n', (26566, 26619), False, 'from tqdm.auto import tqdm\n'), ((28406, 28429), 'numpy.argmax', 'np.argmax', (['P_man_counts'], {}), '(P_man_counts)\n', (28415, 28429), True, 'import numpy as np\n'), ((3216, 3237), 'numpy.argsort', 'np.argsort', (['distances'], {}), '(distances)\n', (3226, 3237), True, 'import numpy as np\n'), ((18476, 18487), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (18485, 18487), False, 'from multiprocessing import cpu_count\n'), ((18889, 18916), 'sharedmem.copy', 'sharedmem.copy', (['data_points'], {}), '(data_points)\n', (18903, 18916), False, 'import sharedmem\n'), ((25802, 25936), 'numpy.linspace', 'np.linspace', ([], {'start': 'min_annulus_parameter', 'stop': 'max_annulus_parameter', 'num': '(search_size + 1)', 'dtype': '(int if use_knn_annulus else None)'}), '(start=min_annulus_parameter, stop=max_annulus_parameter, num=\n search_size + 1, dtype=int if use_knn_annulus else None)\n', (25813, 25936), True, 'import numpy as np\n'), ((9452, 9518), 'topological_data_analysis.ripser_utils.run_ripser_plus_plus', 'run_ripser_plus_plus', ([], {'point_cloud': 'A_y', 'max_dim': 'target_homology_dim'}), '(point_cloud=A_y, max_dim=target_homology_dim)\n', (9472, 9518), False, 'from topological_data_analysis.ripser_utils import run_ripser_plus_plus\n'), ((20204, 20234), 'sharedmem.MapReduce', 'sharedmem.MapReduce', ([], {'np': 'n_jobs'}), '(np=n_jobs)\n', (20223, 20234), False, 'import sharedmem\n'), ((19850, 19910), 'utils.batch_list_gen', 'batch_list_gen', (['data_point_ints', 'num_data_points_per_process'], {}), '(data_point_ints, num_data_points_per_process)\n', (19864, 19910), False, 'from utils import batch_list_gen\n'), ((25596, 25634), 'numpy.max', 'np.max', (['data_points_pairwise_distances'], {}), '(data_points_pairwise_distances)\n', (25602, 25634), True, 'import numpy as np\n'), ((9668, 9692), 'sklearn.metrics.euclidean_distances', 'euclidean_distances', (['A_y'], {}), '(A_y)\n', (9687, 9692), False, 'from sklearn.metrics import euclidean_distances\n'), ((4922, 5041), 'fastdist.fastdist.vector_to_matrix_distance', 'fastdist.vector_to_matrix_distance', ([], {'u': 'data_points[point_idx]', 'm': 'data_points', 'metric': 'metric', 'metric_name': 'metric_name'}), '(u=data_points[point_idx], m=data_points,\n metric=metric, metric_name=metric_name)\n', (4956, 5041), False, 'from fastdist import fastdist\n')] |
#!/usr/bin/env python
import numpy as np
import pandas as pd
import config as cfg
import tensorflow as tf
from embedder import network_config, network_maker, network_utility, network_visualization
from embedder import preprocessor
SAVE_MODEL = True
def main():
csv_file = tf.keras.utils.get_file('titanic.csv', cfg.TITANIC_URL)
df = pd.read_csv(csv_file)
df = prepare_titanic(df)
target_name = 'Survived'
neg, pos = np.bincount(df[target_name])
output_bias = np.log([pos/neg])
embedded_categories = network_utility.get_categorial_cols(df, target_name)
numerical_categories = network_utility.get_numerical_cols(df, target_name)
params = {"df": df,
"target_name": target_name,
"target_type": network_config.TargetType.BINARY_CLASSIFICATION,
"train_ratio": 0.8,
"embedded_categories": embedded_categories,
"numerical_categories": numerical_categories,
"network_layers": ([32]),
"dropout_rate": 0.1,
"output_bias": output_bias,
"epochs": 20,
"batch_size": 128,
"verbose": True,
"artifacts_path": cfg.RESULTS_DIR}
network = network_config.NeuralEmbedder(**params)
n_numerical_cols = len(network.numerical_categories)
X_train, X_val, y_train, y_val, labels, scaler = preprocessor.prepare_network_data(network.df,
network.target_name,
n_numerical_cols,
network.train_ratio)
class_weight = preprocessor.get_class_weights(neg, pos)
model = network_maker.EmbeddingNetwork(network)
model.fit(X_train, y_train, X_val, y_val, class_weight=class_weight)
if SAVE_MODEL:
model.save_model()
embedded_weights = model.get_embedded_weights()
# Save artifacts
network.save_weights(embedded_weights)
network.save_labels(labels)
network.save_scaler(scaler)
# Make visualization
network_visualization.make_visualizations_from_config(network, extension='png')
def prepare_titanic(df):
# Use mean variable for missing Embarked
df.Embarked[df.Embarked.isnull()] = 'S'
# Fill missing values for age with median
ages = df.groupby(['Sex', 'Pclass']).Age
df.Age = ages.transform(lambda x: x.fillna(x.median()))
# Fill missing values for fare with median
fares = df.groupby(['Pclass','Embarked']).Fare
df.Fare = fares.transform(lambda x: x.fillna(x.median()))
# Rearrange columns
df = df[['Pclass', 'Age', 'SibSp', 'Parch',
'Fare', 'Sex', 'Embarked', 'Survived']]
return df
if __name__ == '__main__':
main()
| [
"embedder.network_maker.EmbeddingNetwork",
"embedder.network_visualization.make_visualizations_from_config",
"numpy.log",
"embedder.preprocessor.get_class_weights",
"pandas.read_csv",
"embedder.network_utility.get_numerical_cols",
"embedder.network_utility.get_categorial_cols",
"embedder.preprocessor.... | [((277, 332), 'tensorflow.keras.utils.get_file', 'tf.keras.utils.get_file', (['"""titanic.csv"""', 'cfg.TITANIC_URL'], {}), "('titanic.csv', cfg.TITANIC_URL)\n", (300, 332), True, 'import tensorflow as tf\n'), ((342, 363), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (353, 363), True, 'import pandas as pd\n'), ((442, 470), 'numpy.bincount', 'np.bincount', (['df[target_name]'], {}), '(df[target_name])\n', (453, 470), True, 'import numpy as np\n'), ((489, 508), 'numpy.log', 'np.log', (['[pos / neg]'], {}), '([pos / neg])\n', (495, 508), True, 'import numpy as np\n'), ((538, 590), 'embedder.network_utility.get_categorial_cols', 'network_utility.get_categorial_cols', (['df', 'target_name'], {}), '(df, target_name)\n', (573, 590), False, 'from embedder import network_config, network_maker, network_utility, network_visualization\n'), ((618, 669), 'embedder.network_utility.get_numerical_cols', 'network_utility.get_numerical_cols', (['df', 'target_name'], {}), '(df, target_name)\n', (652, 669), False, 'from embedder import network_config, network_maker, network_utility, network_visualization\n'), ((1243, 1282), 'embedder.network_config.NeuralEmbedder', 'network_config.NeuralEmbedder', ([], {}), '(**params)\n', (1272, 1282), False, 'from embedder import network_config, network_maker, network_utility, network_visualization\n'), ((1394, 1503), 'embedder.preprocessor.prepare_network_data', 'preprocessor.prepare_network_data', (['network.df', 'network.target_name', 'n_numerical_cols', 'network.train_ratio'], {}), '(network.df, network.target_name,\n n_numerical_cols, network.train_ratio)\n', (1427, 1503), False, 'from embedder import preprocessor\n'), ((1761, 1801), 'embedder.preprocessor.get_class_weights', 'preprocessor.get_class_weights', (['neg', 'pos'], {}), '(neg, pos)\n', (1791, 1801), False, 'from embedder import preprocessor\n'), ((1814, 1853), 'embedder.network_maker.EmbeddingNetwork', 'network_maker.EmbeddingNetwork', (['network'], {}), '(network)\n', (1844, 1853), False, 'from embedder import network_config, network_maker, network_utility, network_visualization\n'), ((2193, 2272), 'embedder.network_visualization.make_visualizations_from_config', 'network_visualization.make_visualizations_from_config', (['network'], {'extension': '"""png"""'}), "(network, extension='png')\n", (2246, 2272), False, 'from embedder import network_config, network_maker, network_utility, network_visualization\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
def random_noise_dithering(image, palette, order=128):
"""Render the image using the random noise dithering pattern.
Reference: http://caca.zoy.org/wiki/libcaca/study/1
https://surma.dev/things/ditherpunk/
:param :class:`PIL.Image` image: The image to apply the
ordered dithering to.
:param :class:`~hitherdither.colour.Palette` palette: The palette to use.
:param int order: Noise intensity
:return: The dithered PIL image of type "P" using the input palette.
"""
ni = np.array(image)
noise = np.random.normal(0, order, ni.shape)
new_image = ni + noise
return palette.create_PIL_png_from_rgb_array(new_image)
def interleaved_gradient_noise(image, palette, order=128):
"""Render the image using the interleaved gradient noise pattern.
Reference:
http://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare
:param :class:`PIL.Image` image: The image to apply the
ordered dithering to.
:param :class:`~hitherdither.colour.Palette` palette: The palette to use.
:param int order: Noise intensity
:return: The dithered PIL image of type "P" using the input palette.
"""
ni = np.array(image)
noise = np.zeros(ni.shape[:2], "float")
for x, y in np.ndindex(noise.shape):
noise[x, y] = (((52.9829189 * (0.06711056 * x + 0.00583715 * y) % 1) % 1) - 0.5) * order
new_image = ni + np.repeat(np.expand_dims(noise, 2), 3, 2)
return palette.create_PIL_png_from_rgb_array(new_image)
| [
"numpy.ndindex",
"numpy.zeros",
"numpy.expand_dims",
"numpy.array",
"numpy.random.normal"
] | [((747, 762), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (755, 762), True, 'import numpy as np\n'), ((775, 811), 'numpy.random.normal', 'np.random.normal', (['(0)', 'order', 'ni.shape'], {}), '(0, order, ni.shape)\n', (791, 811), True, 'import numpy as np\n'), ((1439, 1454), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1447, 1454), True, 'import numpy as np\n'), ((1467, 1498), 'numpy.zeros', 'np.zeros', (['ni.shape[:2]', '"""float"""'], {}), "(ni.shape[:2], 'float')\n", (1475, 1498), True, 'import numpy as np\n'), ((1515, 1538), 'numpy.ndindex', 'np.ndindex', (['noise.shape'], {}), '(noise.shape)\n', (1525, 1538), True, 'import numpy as np\n'), ((1668, 1692), 'numpy.expand_dims', 'np.expand_dims', (['noise', '(2)'], {}), '(noise, 2)\n', (1682, 1692), True, 'import numpy as np\n')] |
"""Return the indices of the elements that are non-zero."""
from __future__ import annotations
from typing import Any, Tuple
import numpy
import numpoly
from ..baseclass import PolyLike
from ..dispatch import implements
@implements(numpy.nonzero)
def nonzero(x: PolyLike, **kwargs: Any) -> Tuple[numpy.ndarray, ...]:
"""
Return the indices of the elements that are non-zero.
Args:
x:
Input array.
Returns:
Indices of elements that are non-zero.
Examples:
>>> q0, q1 = numpoly.variable(2)
>>> poly = numpoly.polynomial([[3*q0, 0, 0],
... [0, 4*q1, 0],
... [5*q0+q1, 6*q0, 0]])
>>> poly
polynomial([[3*q0, 0, 0],
[0, 4*q1, 0],
[q1+5*q0, 6*q0, 0]])
>>> numpoly.nonzero(poly)
(array([0, 1, 2, 2]), array([0, 1, 0, 1]))
>>> poly[numpoly.nonzero(poly)]
polynomial([3*q0, 4*q1, q1+5*q0, 6*q0])
"""
x = numpoly.aspolynomial(x)
return numpy.nonzero(numpy.any(numpy.asarray(x.coefficients), axis=0))
| [
"numpy.asarray",
"numpoly.aspolynomial"
] | [((1033, 1056), 'numpoly.aspolynomial', 'numpoly.aspolynomial', (['x'], {}), '(x)\n', (1053, 1056), False, 'import numpoly\n'), ((1092, 1121), 'numpy.asarray', 'numpy.asarray', (['x.coefficients'], {}), '(x.coefficients)\n', (1105, 1121), False, 'import numpy\n')] |
'''
Returns the two nodes with the highest in-degree and out-degree, respectively,
that are also connected. The highest out-degree should be able to reach the highest in-degree.
'''
from collections import deque
import numpy as np
import sys
def int_tuple(list):
return tuple([int(item) for item in list])
if __name__ == "__main__":
filename, lines = sys.argv[1], []
with open(filename) as file:
lines = file.readlines()
V, E = int_tuple(lines[7].strip().split(" ")[2:])
lines, adj_list = lines[8:], [{} for i in range(V)]
for line in lines:
u, v, w = int_tuple(line.strip().split(" ")[1:])
adj_list[u - 1][v - 1] = w
in_max, in_argmax, in_degrees = 0, 0, [0] * V
out_max, out_argmax, out_degrees = 0, 0, [0] * V
src_considered, sink_considered = set(), set()
for i, u in enumerate(adj_list):
out_degrees[i] += len(u)
for v in u:
in_degrees[v] += 1
for i in range(V):
if in_degrees[i] > in_max:
in_max = in_degrees[i]
in_argmax = i
if out_degrees[i] > out_max:
out_max = out_degrees[i]
out_argmax = i
in_argrees, out_argrees = np.flip(np.argsort(in_degrees)), np.flip(np.argsort(out_degrees))
for i in in_argrees:
for j in out_argrees:
if j == i:
break
queue, visited = deque(), set()
queue.append(out_degrees[j])
visited.add(out_degrees[j])
while queue:
node = queue.popleft()
for dest in adj_list[node]:
if dest not in visited:
queue.append(dest)
visited.add(dest)
if dest == in_argmax:
print("Highest in-degree is node " + str(i) + " with in-degree " + str(in_degrees[i]))
print("Highest out-degree is node " + str(j) + " with out-degree " + str(out_degrees[j]))
sys.exit(0)
| [
"numpy.argsort",
"sys.exit",
"collections.deque"
] | [((1202, 1224), 'numpy.argsort', 'np.argsort', (['in_degrees'], {}), '(in_degrees)\n', (1212, 1224), True, 'import numpy as np\n'), ((1235, 1258), 'numpy.argsort', 'np.argsort', (['out_degrees'], {}), '(out_degrees)\n', (1245, 1258), True, 'import numpy as np\n'), ((1389, 1396), 'collections.deque', 'deque', ([], {}), '()\n', (1394, 1396), False, 'from collections import deque\n'), ((2013, 2024), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2021, 2024), False, 'import sys\n')] |
import torchvision.models as models
import torch
import torch.nn as nn
import utils
import torch.optim as optim
from torchvision import transforms
import dataset
import numpy as np
import cv2
from utils import Extract
import time
from argparse import ArgumentParser, ArgumentTypeError
class Model(nn.Module):
def __init__(self, eval=True, batch_size=128, num_features=128, threshold=.5, weights='weights'):
super(Model, self).__init__()
self.conv_net = models.densenet121(pretrained=True).cuda()
for param in self.conv_net.parameters():
param.requires_grad = False
self.conv_net.classifier = nn.Linear(1024, 4096).cuda()
self.relu = nn.LeakyReLU().cuda()
self.first_conv1 = nn.Conv2d(3, 96, kernel_size=8, padding=1, stride=16).cuda()
self.first_conv2 = nn.MaxPool2d(3, 4, 1).cuda()
self.second_conv1 = nn.Conv2d(3, 96, kernel_size=7, padding=4, stride=32).cuda()
self.second_conv2 = nn.MaxPool2d(7, 2, 3).cuda()
self.linear = nn.Linear(7168, num_features).cuda()
self.threshold = threshold
self.num_features = num_features
self.weights = weights
if eval == True:
self.load_state_dict(torch.load(self.weights))
self.eval()
self.eval = True
else:
self.train()
self.eval = False
self.batch_size = batch_size
def forward(self, input):
norm = nn.functional.normalize
tensor1 = self.conv_net(input)
tensor1 = norm(self.relu(tensor1))
tensor2 = self.first_conv1(input)
tensor2 = self.first_conv2(tensor2)
tensor2 = norm(torch.flatten(tensor2, start_dim=1))
tensor3 = self.second_conv1(input)
tensor3 = self.second_conv2(tensor3)
tensor3 = norm(torch.flatten(tensor3, start_dim=1))
tensor4 = norm(torch.cat((tensor2, tensor3), 1))
return norm(self.relu(self.linear(torch.cat((tensor1, tensor4), 1))))
def train_epochs(self, dir, epochs):
lr = 0.01
optimizer = torch.optim.Adam(self.parameters(), lr=lr)
data = dataset.DRDataset(root=dir)
loader = torch.utils.data.DataLoader(data, batch_size=self.batch_size,
shuffle=True, num_workers=12,
pin_memory=True)
loss_function = torch.nn.TripletMarginLoss()
image0_gpu = torch.zeros((self.batch_size, 3, 224, 224), device='cuda:0')
image1_gpu = torch.zeros((self.batch_size, 3, 224, 224), device='cuda:0')
image2_gpu = torch.zeros((self.batch_size, 3, 224, 224), device='cuda:0')
loss_list = []
try:
for epoch in range(epochs):
start_time = time.time()
for i, (image0, image1, image2) in enumerate(loader):
image0_gpu = image0.to(device='cuda:0')
image1_gpu = image1.to(device='cuda:0')
image2_gpu = image2.to(device='cuda:0')
out0 = self.forward(image0_gpu)
out1 = self.forward(image1_gpu)
out2 = self.forward(image2_gpu)
loss = loss_function(out0, out1, out2)
optimizer.zero_grad(set_to_none=True)
loss.backward()
optimizer.step()
loss_list.append(loss.item())
print("epoch {}, batch {}, loss = {}".format(epoch, i,
np.mean(loss_list)))
loss_list.clear()
print("time for epoch {}".format(time.time()- start_time))
if (epoch + 1) % 4:
lr /= 2
for param in optimizer.param_groups:
param['lr'] = lr
torch.save(self.state_dict(), self.weights)
except KeyboardInterrupt:
print("Interrupted")
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
'--num_features',
type=int,
help='Size of the last linear layer',
default=32
)
parser.add_argument(
'--weights',
help='File containing the weights of the model'
)
parser.add_argument(
'--path',
help='Training images'
)
args = parser.parse_args()
m = Model(False, num_features=args.num_features)
m.train_epochs(args.path, 5)
| [
"torch.flatten",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torch.nn.MaxPool2d",
"torchvision.models.densenet121",
"torch.nn.Conv2d",
"torch.load",
"torch.cat",
"time.time",
"numpy.mean",
"torch.nn.Linear",
"torch.zeros",
"torch.nn.LeakyReLU",
"torch.nn.TripletMarginLoss",
... | [((4062, 4078), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (4076, 4078), False, 'from argparse import ArgumentParser, ArgumentTypeError\n'), ((2151, 2178), 'dataset.DRDataset', 'dataset.DRDataset', ([], {'root': 'dir'}), '(root=dir)\n', (2168, 2178), False, 'import dataset\n'), ((2196, 2308), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['data'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'num_workers': '(12)', 'pin_memory': '(True)'}), '(data, batch_size=self.batch_size, shuffle=True,\n num_workers=12, pin_memory=True)\n', (2223, 2308), False, 'import torch\n'), ((2420, 2448), 'torch.nn.TripletMarginLoss', 'torch.nn.TripletMarginLoss', ([], {}), '()\n', (2446, 2448), False, 'import torch\n'), ((2471, 2531), 'torch.zeros', 'torch.zeros', (['(self.batch_size, 3, 224, 224)'], {'device': '"""cuda:0"""'}), "((self.batch_size, 3, 224, 224), device='cuda:0')\n", (2482, 2531), False, 'import torch\n'), ((2553, 2613), 'torch.zeros', 'torch.zeros', (['(self.batch_size, 3, 224, 224)'], {'device': '"""cuda:0"""'}), "((self.batch_size, 3, 224, 224), device='cuda:0')\n", (2564, 2613), False, 'import torch\n'), ((2635, 2695), 'torch.zeros', 'torch.zeros', (['(self.batch_size, 3, 224, 224)'], {'device': '"""cuda:0"""'}), "((self.batch_size, 3, 224, 224), device='cuda:0')\n", (2646, 2695), False, 'import torch\n'), ((1688, 1723), 'torch.flatten', 'torch.flatten', (['tensor2'], {'start_dim': '(1)'}), '(tensor2, start_dim=1)\n', (1701, 1723), False, 'import torch\n'), ((1837, 1872), 'torch.flatten', 'torch.flatten', (['tensor3'], {'start_dim': '(1)'}), '(tensor3, start_dim=1)\n', (1850, 1872), False, 'import torch\n'), ((1898, 1930), 'torch.cat', 'torch.cat', (['(tensor2, tensor3)', '(1)'], {}), '((tensor2, tensor3), 1)\n', (1907, 1930), False, 'import torch\n'), ((476, 511), 'torchvision.models.densenet121', 'models.densenet121', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (494, 511), True, 'import torchvision.models as models\n'), ((645, 666), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(4096)'], {}), '(1024, 4096)\n', (654, 666), True, 'import torch.nn as nn\n'), ((694, 708), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (706, 708), True, 'import torch.nn as nn\n'), ((744, 797), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(96)'], {'kernel_size': '(8)', 'padding': '(1)', 'stride': '(16)'}), '(3, 96, kernel_size=8, padding=1, stride=16)\n', (753, 797), True, 'import torch.nn as nn\n'), ((832, 853), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(3)', '(4)', '(1)'], {}), '(3, 4, 1)\n', (844, 853), True, 'import torch.nn as nn\n'), ((890, 943), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(96)'], {'kernel_size': '(7)', 'padding': '(4)', 'stride': '(32)'}), '(3, 96, kernel_size=7, padding=4, stride=32)\n', (899, 943), True, 'import torch.nn as nn\n'), ((979, 1000), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(7)', '(2)', '(3)'], {}), '(7, 2, 3)\n', (991, 1000), True, 'import torch.nn as nn\n'), ((1031, 1060), 'torch.nn.Linear', 'nn.Linear', (['(7168)', 'num_features'], {}), '(7168, num_features)\n', (1040, 1060), True, 'import torch.nn as nn\n'), ((1237, 1261), 'torch.load', 'torch.load', (['self.weights'], {}), '(self.weights)\n', (1247, 1261), False, 'import torch\n'), ((2802, 2813), 'time.time', 'time.time', ([], {}), '()\n', (2811, 2813), False, 'import time\n'), ((1975, 2007), 'torch.cat', 'torch.cat', (['(tensor1, tensor4)', '(1)'], {}), '((tensor1, tensor4), 1)\n', (1984, 2007), False, 'import torch\n'), ((3599, 3617), 'numpy.mean', 'np.mean', (['loss_list'], {}), '(loss_list)\n', (3606, 3617), True, 'import numpy as np\n'), ((3703, 3714), 'time.time', 'time.time', ([], {}), '()\n', (3712, 3714), False, 'import time\n')] |
import os
import numpy as np
import torch
import torch.nn as nn
from sklearn.feature_extraction.text import TfidfVectorizer
from torch.optim import Adam
from torch.utils.data import DataLoader
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def read_dataset(file_path):
"""
File_path should be a string that represents the filepath
where the movie dataset can be found
This returns an array of strings and an array of labels
"""
neg_data = []
pos_data = []
for root, dirs, files in os.walk(file_path + "/neg"):
for file_name in files:
fp = open(os.path.join(root, file_name), encoding="utf-8", errors="ignore")
neg_data.append(fp.read())
for root, dirs, files in os.walk(file_path + "/pos"):
for file_name in files:
fp = open(os.path.join(root, file_name), encoding="utf-8", errors="ignore")
pos_data.append(fp.read())
neg_labels = np.repeat(0, len(neg_data))
pos_labels = np.repeat(1, len(pos_data))
labels = np.concatenate([neg_labels, pos_labels])
data = neg_data + pos_data
return data, labels
class LinRegModel(nn.Module):
def __init__(self, in_dim):
super(LinRegModel, self).__init__()
self.layers = nn.Sequential(
nn.Linear(in_features=in_dim, out_features=1),
nn.Sigmoid()
)
torch.nn.init.xavier_normal_(self.layers[0].weight.data)
def forward(self, samples):
return self.layers(samples).squeeze(1)
class ReviewDataset(nn.Module):
def __init__(self, data, labels):
super(ReviewDataset, self).__init__()
self.data = data
self.labels = labels
def __len__(self):
return len(self.labels)
def __getitem__(self, item):
return self.data[item], self.labels[item]
train_data, train_labels = read_dataset("aclImdb/train")
test_data, test_labels = read_dataset("aclImdb/test")
def get_dataloaders(tf_idf_thresh, batch_size):
vectorizer = TfidfVectorizer(ngram_range=(1,3), max_features=tf_idf_thresh)
train_vector = torch.from_numpy(vectorizer.fit_transform(train_data).toarray()).float()
test_vector = torch.from_numpy(vectorizer.transform(test_data).toarray()).float()
train_dataset = ReviewDataset(train_vector, torch.from_numpy(train_labels).float())
test_dataset = ReviewDataset(test_vector, torch.from_numpy(test_labels).float())
train_loader = DataLoader(dataset = train_dataset, shuffle = True, batch_size = batch_size)
test_loader = DataLoader(dataset=test_dataset, shuffle=False, batch_size=batch_size)
return train_loader, test_loader
train_options = {
"epochs": 1000,
"load_model": True,
"save_model": True,
"batch_size": 256,
"lr_rate": 0.001,
"train_model": True,
"tf_threshold": 1000,
"model_path": "saved_models/lin_reg_emb"
}
def check_val_accuracy(model, loader):
total_correct = 0
model.eval()
fp = 0
fn = 0
tp = 0
with torch.no_grad():
for X, Y in loader:
Y_pred = model(X.to(device)).cpu()
total_correct += ((Y_pred > 0.5) == Y).sum()
Y_pred_label = Y_pred > 0.5
tp += (Y_pred_label[Y == 1] == 1).sum()
fp += (Y_pred_label[Y == 0] == 1).sum()
fn += (Y_pred_label[Y == 1] == 0).sum()
precision = tp / (fp + tp)
recall = tp / (tp + fn)
print("Precision: " + str(precision))
print("Recall: " + str(recall))
print("F1 Score: " + str(2 * precision * recall / (precision + recall)))
return total_correct / loader.dataset.__len__()
def train_torch_model(train_options):
train_loader, test_loader = get_dataloaders(train_options["tf_threshold"], train_options["batch_size"])
model = LinRegModel(train_options["tf_threshold"])
model.to(device)
max_acc = check_val_accuracy(model, test_loader)
optimizer = Adam(params= model.parameters(), lr=train_options["lr_rate"])
loss_fn = nn.BCELoss()
no_max_count = 0
for epoch_num in range(train_options["epochs"]):
print("Epoch:" + str(epoch_num+1))
total_loss = 0
for X,Y in train_loader:
X = X.to(device)
Y_pred = model(X).cpu()
loss = loss_fn(Y_pred, Y)
total_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("Loss:" + str(total_loss))
acc = check_val_accuracy(model, test_loader)
print("Acc:" + str(acc))
print("------------------------")
if max_acc < acc:
no_max_count = 0
max_acc = acc
else:
no_max_count += 1
if no_max_count > 30:
break
print("Test set scores")
acc = check_val_accuracy(model, test_loader)
print("Test Accuracy: " + str(acc))
print("------------------------")
print("Train set scores")
acc = check_val_accuracy(model, train_loader)
print("Train accuracy: " + str(acc))
train_torch_model(train_options)
| [
"torch.from_numpy",
"torch.nn.BCELoss",
"torch.utils.data.DataLoader",
"sklearn.feature_extraction.text.TfidfVectorizer",
"os.walk",
"torch.nn.init.xavier_normal_",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.no_grad",
"os.path.join",
"numpy.concatenate",
"torch.nn.Sigmoid"
] | [((541, 568), 'os.walk', 'os.walk', (["(file_path + '/neg')"], {}), "(file_path + '/neg')\n", (548, 568), False, 'import os\n'), ((759, 786), 'os.walk', 'os.walk', (["(file_path + '/pos')"], {}), "(file_path + '/pos')\n", (766, 786), False, 'import os\n'), ((1051, 1091), 'numpy.concatenate', 'np.concatenate', (['[neg_labels, pos_labels]'], {}), '([neg_labels, pos_labels])\n', (1065, 1091), True, 'import numpy as np\n'), ((2017, 2080), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'ngram_range': '(1, 3)', 'max_features': 'tf_idf_thresh'}), '(ngram_range=(1, 3), max_features=tf_idf_thresh)\n', (2032, 2080), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((2452, 2522), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'shuffle': '(True)', 'batch_size': 'batch_size'}), '(dataset=train_dataset, shuffle=True, batch_size=batch_size)\n', (2462, 2522), False, 'from torch.utils.data import DataLoader\n'), ((2547, 2617), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_dataset', 'shuffle': '(False)', 'batch_size': 'batch_size'}), '(dataset=test_dataset, shuffle=False, batch_size=batch_size)\n', (2557, 2617), False, 'from torch.utils.data import DataLoader\n'), ((4011, 4023), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (4021, 4023), True, 'import torch.nn as nn\n'), ((228, 253), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (251, 253), False, 'import torch\n'), ((1393, 1449), 'torch.nn.init.xavier_normal_', 'torch.nn.init.xavier_normal_', (['self.layers[0].weight.data'], {}), '(self.layers[0].weight.data)\n', (1421, 1449), False, 'import torch\n'), ((3007, 3022), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3020, 3022), False, 'import torch\n'), ((1303, 1348), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'in_dim', 'out_features': '(1)'}), '(in_features=in_dim, out_features=1)\n', (1312, 1348), True, 'import torch.nn as nn\n'), ((1362, 1374), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1372, 1374), True, 'import torch.nn as nn\n'), ((624, 653), 'os.path.join', 'os.path.join', (['root', 'file_name'], {}), '(root, file_name)\n', (636, 653), False, 'import os\n'), ((842, 871), 'os.path.join', 'os.path.join', (['root', 'file_name'], {}), '(root, file_name)\n', (854, 871), False, 'import os\n'), ((2307, 2337), 'torch.from_numpy', 'torch.from_numpy', (['train_labels'], {}), '(train_labels)\n', (2323, 2337), False, 'import torch\n'), ((2393, 2422), 'torch.from_numpy', 'torch.from_numpy', (['test_labels'], {}), '(test_labels)\n', (2409, 2422), False, 'import torch\n')] |
import numpy as np
import pandas as pd
from os import path
def g(inpu):
return (1/(1+np.exp(-inpu)))
def cost(inpu, actual):
return (actual*np.log(inpu)) + ((1-actual)*(np.log(1-inpu)))
class LogisticRegressor():
def __init__(self, lr=0.05):
# store learning rate and weights
self.lr = lr
self.weights = None
def fit(self, train_data_feature, train_data_label):
# get shape of the training data
m,n = train_data_feature.shape
# creating numpy vector with feature data
X = np.zeros(shape=(m,n+1))
for i in range(m):
for j in range(n+1):
if j==0:
X[i][j] = 1
else:
X[i][j] = train_data_feature[i][j-1]
# setting initial value of the parameters as 1
coeff_vals = np.ones(n+1)
# stores previous cost
prev_jtheta = None
# calculate loss
h_theta = g(np.dot(X, coeff_vals))
loss = (h_theta - train_data_label)
# current cost
cur_jtheta = np.sum(cost(h_theta, train_data_label)) * (-1/m)
# setting convergence when the difference between consecutive error is less than 0.0000001
while (prev_jtheta is None or abs(prev_jtheta - cur_jtheta) > 0.0000001):
# gradient descent with vector notation, simultaneous calculation
descent_vals = (np.dot(X.transpose(), loss) * self.lr) / m
# update all coefficients with descent
coeff_vals -= descent_vals
prev_jtheta = cur_jtheta
# calculate new cost
h_theta = g(np.dot(X, coeff_vals))
loss = (h_theta - train_data_label)
cur_jtheta = np.sum(cost(h_theta, train_data_label)) * (-1/m)
print(f"Difference between consecutive costs: {abs(prev_jtheta - cur_jtheta)}\t", end="\r", flush=True)
# print(f"Parameters: {list(coeff_vals)}\t\t")
# print(f"Error on Data: {cur_jtheta}\n")
self.weights = coeff_vals
# function for predicting results
def predict(self, data_feature):
m,n = data_feature.shape
# creating numpy vector with feature data
X = np.zeros(shape=(m,n+1))
for i in range(m):
for j in range(n+1):
if j==0:
X[i][j] = 1
else:
X[i][j] = data_feature[i][j-1]
h_theta = g(np.dot(X, self.weights))
# threshold is chosen as 0.5
h_theta[h_theta>=0.5] = 1
h_theta[h_theta<0.5] = 0
return h_theta
def get_params(self, deep = False):
return {'lr':self.lr}
| [
"numpy.log",
"numpy.zeros",
"numpy.ones",
"numpy.exp",
"numpy.dot"
] | [((549, 575), 'numpy.zeros', 'np.zeros', ([], {'shape': '(m, n + 1)'}), '(shape=(m, n + 1))\n', (557, 575), True, 'import numpy as np\n'), ((845, 859), 'numpy.ones', 'np.ones', (['(n + 1)'], {}), '(n + 1)\n', (852, 859), True, 'import numpy as np\n'), ((2215, 2241), 'numpy.zeros', 'np.zeros', ([], {'shape': '(m, n + 1)'}), '(shape=(m, n + 1))\n', (2223, 2241), True, 'import numpy as np\n'), ((90, 103), 'numpy.exp', 'np.exp', (['(-inpu)'], {}), '(-inpu)\n', (96, 103), True, 'import numpy as np\n'), ((150, 162), 'numpy.log', 'np.log', (['inpu'], {}), '(inpu)\n', (156, 162), True, 'import numpy as np\n'), ((179, 195), 'numpy.log', 'np.log', (['(1 - inpu)'], {}), '(1 - inpu)\n', (185, 195), True, 'import numpy as np\n'), ((961, 982), 'numpy.dot', 'np.dot', (['X', 'coeff_vals'], {}), '(X, coeff_vals)\n', (967, 982), True, 'import numpy as np\n'), ((2449, 2472), 'numpy.dot', 'np.dot', (['X', 'self.weights'], {}), '(X, self.weights)\n', (2455, 2472), True, 'import numpy as np\n'), ((1635, 1656), 'numpy.dot', 'np.dot', (['X', 'coeff_vals'], {}), '(X, coeff_vals)\n', (1641, 1656), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Author: <NAME> <<EMAIL>>
License: MIT
"""
import numpy as np
__all__ = [ "QuantityError" ]
class QuantityError:
"""
Uncertainty information for a physical quantity.
Parameters
----------
uncertainty : scalar or None, default None
Uncertainty as the absolute value of symmetric deviation from the main
value.
lower_uncertainty : scalar or None, default None
Uncertainty as the absolute value of deviation from the main value
towards smaller values.
upper_uncertainty : scalar or None, default None
Uncertainty as the absolute value of deviation from the main value
towards larger values.
confidence_level : scalar or None, default None
Confidence level of the uncertainty, given in percent (0-100).
"""
_ATTRIBUTES = [ "uncertainty", "lower_uncertainty", "upper_uncertainty", "confidence_level" ]
def __init__(self, uncertainty = None, lower_uncertainty = None,
upper_uncertainty = None, confidence_level = None):
if uncertainty is not None and (not isinstance(uncertainty, (int, float)) or uncertainty < 0.):
raise ValueError("uncertainty must be a positive scalar")
else:
self._uncertainty = uncertainty
if lower_uncertainty is not None and not isinstance(lower_uncertainty, (int, float)):
raise ValueError("lower_uncertainty must be a scalar")
else:
self._lower_uncertainty = lower_uncertainty
if upper_uncertainty is not None and not isinstance(upper_uncertainty, (int, float)):
raise ValueError("upper_uncertainty must be a scalar")
else:
self._upper_uncertainty = upper_uncertainty
if confidence_level is not None and (not isinstance(confidence_level, (int, float)) \
or confidence_level < 0. or confidence_level > 100.):
raise ValueError("confidence_level must be a scalar in [ 0., 100. ]")
else:
self._confidence_level = confidence_level
def __repr__(self):
uncertainty = "%s: %s" % ("uncertainty", self._print_attr("uncertainty"))
if self._lower_uncertainty is not None and self._upper_uncertainty is not None:
uncertainty += ", lower: %s, upper: %s" % (self._print_attr("lower_uncertainty"), self._print_attr("upper_uncertainty"))
return "QuantityError(%s)" % uncertainty
def _print_attr(self, attr):
if attr not in self._ATTRIBUTES:
raise ValueError("error_type should be either 'uncertainty', 'lower_uncertainty', 'upper_uncertainty' or 'confidence_level'")
else:
if attr == "uncertainty":
return self._uncertainty
elif attr == "lower_uncertainty":
return self._lower_uncertainty
elif attr == "upper_uncertainty":
return self._upper_uncertainty
elif attr == "confidence_level":
return self._confidence_level
def toarray(self):
"""
Save attributes to array.
Returns
-------
arr : ndarray
Output array.
"""
return np.array([ self._uncertainty, self._lower_uncertainty, self._upper_uncertainty, self._confidence_level ])
@property
def uncertainty(self):
"""
scalar or None
Uncertainty as the absolute value of symmetric deviation from the main
value.
"""
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
self._uncertainty = value
@property
def lower_uncertainty(self):
"""
scalar or None
Uncertainty as the absolute value of deviation from the main value
towards smaller values.
"""
return self._lower_uncertainty
@lower_uncertainty.setter
def lower_uncertainty(self, value):
self._lower_uncertainty = value
@property
def upper_uncertainty(self):
"""
scalar or None
Uncertainty as the absolute value of deviation from the main value
towards larger values.
"""
return self._upper_uncertainty
@upper_uncertainty.setter
def upper_uncertainty(self, value):
self._upper_uncertainty = value
@property
def confidence_level(self):
"""
scalar or None
Confidence level of the uncertainty, given in percent (0-100).
"""
return self._confidence_level
@confidence_level.setter
def confidence_level(self, value):
self._confidence_level = value | [
"numpy.array"
] | [((3256, 3364), 'numpy.array', 'np.array', (['[self._uncertainty, self._lower_uncertainty, self._upper_uncertainty, self.\n _confidence_level]'], {}), '([self._uncertainty, self._lower_uncertainty, self.\n _upper_uncertainty, self._confidence_level])\n', (3264, 3364), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from keras import Sequential, optimizers
from keras.layers import Dense, Conv1D, MaxPool1D, Flatten, Dropout, LSTM, Bidirectional
from keras.models import load_model, save_model
import matplotlib.pyplot as plt
''' Load and put data into time series format
'''
raw_data = pd.read_csv('./data/preprocessed_6hr_data_lastYear_standardized_BB.csv')
data = raw_data.values # convert to numpy arrays
Y_raw = raw_data['Close'].values # The Close values will be used as labels
print(data.shape)
# Data size. Building the training data tensor X_construct will be shape (m, n_f, t)
timesteps = 1 # Number of 6 hour time chunks to treat as input feature
m = data.shape[0] - timesteps
n_f = data.shape[1]
''' The data is currently in dimension (n_examples, n_features).
We want to add a third dimension, for timesteps. Each new timestep entry will be the same data, shifted by
a timestep. For example, if there are 3 timesteps, the the third dimension will be size 3 where the first matrix
is the original data, the second matrix is the original data shifted up by one row, and the third matrix is the original
data shifted up by two rows. This would repeat t timesteps. '''
# The data will end in final shape of (m, n_f, t)
# Instantiate an array for constructing the 3 dimensional tensor.
X_construct = data[:-timesteps] # t timesteps must be shaved off the bottom of the data
X_construct = X_construct.reshape(m, n_f, 1) # reshape with a 3rd dimension for t
for t in range(1, timesteps):
X_ti = np.copy(data[t:m+t]).reshape(m, n_f, 1) # original data shifted by t rows
X_construct = np.concatenate((X_construct, X_ti), axis=2) # add this timesteps matrix to the tensor
print(X_construct.shape)
''' For Y, the labels, we want 1s and 0s to represent trend up vs trend down.
Y_change calculates the change in price, which is converted to 1s and 0s using numpy'''
Y_diff = raw_data['Close'].diff().iloc[timesteps:].values # Get the difference in price
Y = np.asarray((Y_diff > 0)).astype(int) # Convert to 1 (trend up), or 0 (trend down)
print(Y.shape)
X = X_construct.swapaxes(1,2) # shape (m, n_f, t) -> (m, t, n_f) for keras LSTM
Y = Y.reshape(len(Y), 1) # Column vector shape
print(X.shape)
print(Y.shape)
# Split training and testing data
test_split = int(0.9*len(Y))
X_train, X_test = X[:test_split], X[test_split:]
Y_train, Y_test = Y[:test_split], Y[test_split:]
ensemble = False
if ensemble:
model1 = load_model('./models/model1.h5')
model2 = load_model('./models/model2.h5')
model3 = load_model('./models/model3.h5')
# Take average of 3 model's predictions
predictions = (model1.predict(X_test) + model2.predict(X_test) + model3.predict(X_test)) / 3
else:
model = load_model('./models/LSTM-6hr-standardized.h5')
print(model.summary())
predictions = model.predict(X_test)
# Convert to 0s and 1s
predictions = np.array([round(i) for i in np.squeeze(predictions)]).astype(int)
Y_test = np.squeeze(Y_test)
print(predictions.shape)
print(Y_test.shape)
# Check accuracy
correct_counter = int(np.sum(predictions == Y_test))
print('{0} / {1} correct trend predictions ({2} %)'.format(correct_counter, len(predictions), round(correct_counter/len(predictions)*100, 2)))
## PLOT RESULTS ##
# Get the indicies for where the model predicted trend up (buy) vs trend down (sell)
index = np.arange(len(predictions))
buy_indicies = np.where(predictions > 0.5)[0]
sell_indicies = np.where(predictions < 0.5)[0]
trend = np.squeeze(raw_data['Close'][-(len(predictions)+timesteps):-timesteps].values) # The close values using to make predictions
buys = trend[buy_indicies]
sells = trend[sell_indicies]
plt.plot(index, trend, label='Actual')
plt.plot(buy_indicies, buys, 'go', label='Buys')
plt.plot(sell_indicies, sells, 'ro', label='Sells')
plt.legend()
plt.title('{0} / {1} correct trend predictions ({2} %)'.format(correct_counter, len(predictions), round(correct_counter/len(predictions)*100, 2)))
plt.show()
| [
"keras.models.load_model",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.plot",
"numpy.copy",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"numpy.asarray",
"numpy.where",
"numpy.squeeze",
"numpy.concatenate"
] | [((320, 392), 'pandas.read_csv', 'pd.read_csv', (['"""./data/preprocessed_6hr_data_lastYear_standardized_BB.csv"""'], {}), "('./data/preprocessed_6hr_data_lastYear_standardized_BB.csv')\n", (331, 392), True, 'import pandas as pd\n'), ((3039, 3057), 'numpy.squeeze', 'np.squeeze', (['Y_test'], {}), '(Y_test)\n', (3049, 3057), True, 'import numpy as np\n'), ((3763, 3801), 'matplotlib.pyplot.plot', 'plt.plot', (['index', 'trend'], {'label': '"""Actual"""'}), "(index, trend, label='Actual')\n", (3771, 3801), True, 'import matplotlib.pyplot as plt\n'), ((3803, 3851), 'matplotlib.pyplot.plot', 'plt.plot', (['buy_indicies', 'buys', '"""go"""'], {'label': '"""Buys"""'}), "(buy_indicies, buys, 'go', label='Buys')\n", (3811, 3851), True, 'import matplotlib.pyplot as plt\n'), ((3853, 3904), 'matplotlib.pyplot.plot', 'plt.plot', (['sell_indicies', 'sells', '"""ro"""'], {'label': '"""Sells"""'}), "(sell_indicies, sells, 'ro', label='Sells')\n", (3861, 3904), True, 'import matplotlib.pyplot as plt\n'), ((3906, 3918), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3916, 3918), True, 'import matplotlib.pyplot as plt\n'), ((4068, 4078), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4076, 4078), True, 'import matplotlib.pyplot as plt\n'), ((1657, 1700), 'numpy.concatenate', 'np.concatenate', (['(X_construct, X_ti)'], {'axis': '(2)'}), '((X_construct, X_ti), axis=2)\n', (1671, 1700), True, 'import numpy as np\n'), ((2511, 2543), 'keras.models.load_model', 'load_model', (['"""./models/model1.h5"""'], {}), "('./models/model1.h5')\n", (2521, 2543), False, 'from keras.models import load_model, save_model\n'), ((2558, 2590), 'keras.models.load_model', 'load_model', (['"""./models/model2.h5"""'], {}), "('./models/model2.h5')\n", (2568, 2590), False, 'from keras.models import load_model, save_model\n'), ((2605, 2637), 'keras.models.load_model', 'load_model', (['"""./models/model3.h5"""'], {}), "('./models/model3.h5')\n", (2615, 2637), False, 'from keras.models import load_model, save_model\n'), ((2805, 2852), 'keras.models.load_model', 'load_model', (['"""./models/LSTM-6hr-standardized.h5"""'], {}), "('./models/LSTM-6hr-standardized.h5')\n", (2815, 2852), False, 'from keras.models import load_model, save_model\n'), ((3148, 3177), 'numpy.sum', 'np.sum', (['(predictions == Y_test)'], {}), '(predictions == Y_test)\n', (3154, 3177), True, 'import numpy as np\n'), ((3488, 3515), 'numpy.where', 'np.where', (['(predictions > 0.5)'], {}), '(predictions > 0.5)\n', (3496, 3515), True, 'import numpy as np\n'), ((3536, 3563), 'numpy.where', 'np.where', (['(predictions < 0.5)'], {}), '(predictions < 0.5)\n', (3544, 3563), True, 'import numpy as np\n'), ((2035, 2057), 'numpy.asarray', 'np.asarray', (['(Y_diff > 0)'], {}), '(Y_diff > 0)\n', (2045, 2057), True, 'import numpy as np\n'), ((1564, 1586), 'numpy.copy', 'np.copy', (['data[t:m + t]'], {}), '(data[t:m + t])\n', (1571, 1586), True, 'import numpy as np\n'), ((2991, 3014), 'numpy.squeeze', 'np.squeeze', (['predictions'], {}), '(predictions)\n', (3001, 3014), True, 'import numpy as np\n')] |
import bezier
import numpy as np
from PIL import Image, ImageDraw, ImageFilter
from color_gradient import ColorGradient
class Vine:
""" Vine class which uses bezier curves to draw itself on an image
"""
def __init__(
self,
start_pos: tuple[float, float],
length: float,
thickness: float,
*,
color: ColorGradient,
build_phase: float = 1e9,
rotate_degrees: float = 0.0,
flip: bool = False,
depth: int = 1,
max_child_vines: int = 16,
grow_child_at: float = 0.1,
axis_to_invert: int = 1,
add_degrees_to_angle: float = 0,
):
self.vine_shape = np.array([
[0.0, 1.5, 1.5, 0.6, 0.5],
[0.0, 0.0, 0.9, 1.0, 0.6],
])
self.axis_to_invert = axis_to_invert
self.add_degrees_to_angle = add_degrees_to_angle
self.grow_child_at = grow_child_at
self.color = color
self.flipped = flip
self.depth = depth
self.max_child_vines = max_child_vines
self.build_phase = build_phase
self.thickness = thickness
self.length = length
nodes = self._get_nodes_for_curve(start_pos, rotate_degrees)
self.curve = bezier.Curve(nodes, degree=len(nodes[0]) - 1)
self.child_vine = self._create_child_vine(grow_child_at) if depth < max_child_vines else None
def _get_nodes_for_curve(self, start_pos, rotate_degrees):
rotate_radians = rotate_degrees / 180 * np.pi
rotation_matrix = np.array([
[np.cos(rotate_radians), -np.sin(rotate_radians)],
[np.sin(rotate_radians), np.cos(rotate_radians)],
])
shape = np.copy(self.vine_shape.T)
if self.flipped:
shape[:, self.axis_to_invert] *= -1
rotated = shape @ rotation_matrix
return rotated.T * self.length + np.array(start_pos)[:, None]
def _create_child_vine(self, at: float):
start_pos = tuple(self.curve.evaluate(at).flatten())
angle = -self.get_angle_at(at) * 180 / np.pi # + np.random.uniform(-30, 30)
thinned = self._thinning(np.array([at]))
length = self.length * thinned[0]
thickness = thinned * self.thickness
return Vine(
start_pos, length, thickness, color=self.color, build_phase=self.build_phase - .2,
rotate_degrees=angle, flip=not self.flipped,
depth=self.depth + 1, max_child_vines=self.max_child_vines,
grow_child_at=self.grow_child_at, axis_to_invert=self.axis_to_invert,
add_degrees_to_angle=self.add_degrees_to_angle
)
@staticmethod
def _sigmoid(x: float):
""" Modified sigmoid function which goes from y=1 to 0 within x=0...1
See here: https://www.desmos.com/calculator/s5lr4vi48n
"""
return 1 - (1 / (1 + np.e ** (2 - 6 * x)))
def _thinning(self, arr: np.ndarray):
""" Apply inverse sigmoid function to every element in arr """
return np.clip(0, 1, np.apply_along_axis(self._sigmoid, axis=0, arr=arr))
def get_tangent_at(self, at: float) -> np.ndarray:
hodograph = self.curve.evaluate_hodograph(at)
return hodograph / np.linalg.norm(hodograph)
def get_normal_at(self, at: float) -> np.ndarray:
return np.array([[0, -1], [1, 0]]) @ self.get_tangent_at(at)
def get_angle_at(self, at: float) -> float:
""" Returns angle in radians of the tangent for 0.0 <= at <= 1.0 """
x, y = self.get_tangent_at(at).T[0]
return np.arctan2(y, x) + self.add_degrees_to_angle / 180.0 * np.pi
def draw_on_image(self, image: Image, debug_draw_tangent=False):
if self.child_vine:
self.child_vine.draw_on_image(image)
draw = ImageDraw.Draw(image)
phase = min(1.0, self.build_phase)
dots = np.arange(0, phase, 1 / self.curve.length)
thickness = self._thinning(dots)
evaluated = self.curve.evaluate_multi(dots)
for t, (d, (x, y)) in zip(thickness, zip(dots, zip(*evaluated))):
tangent_line = self.get_normal_at(d) * self.thickness * t * np.clip(0, 1, self.build_phase)
delta_x, delta_y = tangent_line.T[0]
color = self.color.interpolate(self.build_phase - d + 1)
draw.line((x - delta_x, y - delta_y, x + delta_x, y + delta_y), fill=color, width=5)
if debug_draw_tangent:
nx, ny = self.get_tangent_at(self.grow_child_at) * 40
px, py = self.curve.evaluate(self.grow_child_at).T[0]
draw.line((px, py, px + nx, py + ny), fill=(255, 255, 255), width=5)
def main():
size = (1000, 1000)
center = (size[0] // 2, size[1] // 2)
img = Image.new("RGB", size, (0, 0, 0))
vine_count = 5
vines = [
Vine(center, 190, 15, rotate_degrees=i * (360 / vine_count),
color=ColorGradient(("#51007D", 1), ("#FFD600", 1.2)),
add_degrees_to_angle=np.pi / 7, axis_to_invert=1, build_phase=1)
for i in range(vine_count)
]
for vine in vines:
vine.draw_on_image(img)
img = img.filter(ImageFilter.GaussianBlur(5))
img.save("vines.png")
if __name__ == "__main__":
main()
| [
"PIL.ImageFilter.GaussianBlur",
"PIL.Image.new",
"numpy.arctan2",
"numpy.copy",
"numpy.clip",
"color_gradient.ColorGradient",
"numpy.apply_along_axis",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.linalg.norm",
"numpy.cos",
"PIL.ImageDraw.Draw"
] | [((4779, 4812), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'size', '(0, 0, 0)'], {}), "('RGB', size, (0, 0, 0))\n", (4788, 4812), False, 'from PIL import Image, ImageDraw, ImageFilter\n'), ((735, 799), 'numpy.array', 'np.array', (['[[0.0, 1.5, 1.5, 0.6, 0.5], [0.0, 0.0, 0.9, 1.0, 0.6]]'], {}), '([[0.0, 1.5, 1.5, 0.6, 0.5], [0.0, 0.0, 0.9, 1.0, 0.6]])\n', (743, 799), True, 'import numpy as np\n'), ((1757, 1783), 'numpy.copy', 'np.copy', (['self.vine_shape.T'], {}), '(self.vine_shape.T)\n', (1764, 1783), True, 'import numpy as np\n'), ((3835, 3856), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (3849, 3856), False, 'from PIL import Image, ImageDraw, ImageFilter\n'), ((3915, 3957), 'numpy.arange', 'np.arange', (['(0)', 'phase', '(1 / self.curve.length)'], {}), '(0, phase, 1 / self.curve.length)\n', (3924, 3957), True, 'import numpy as np\n'), ((5178, 5205), 'PIL.ImageFilter.GaussianBlur', 'ImageFilter.GaussianBlur', (['(5)'], {}), '(5)\n', (5202, 5205), False, 'from PIL import Image, ImageDraw, ImageFilter\n'), ((2194, 2208), 'numpy.array', 'np.array', (['[at]'], {}), '([at])\n', (2202, 2208), True, 'import numpy as np\n'), ((3087, 3138), 'numpy.apply_along_axis', 'np.apply_along_axis', (['self._sigmoid'], {'axis': '(0)', 'arr': 'arr'}), '(self._sigmoid, axis=0, arr=arr)\n', (3106, 3138), True, 'import numpy as np\n'), ((3277, 3302), 'numpy.linalg.norm', 'np.linalg.norm', (['hodograph'], {}), '(hodograph)\n', (3291, 3302), True, 'import numpy as np\n'), ((3373, 3400), 'numpy.array', 'np.array', (['[[0, -1], [1, 0]]'], {}), '([[0, -1], [1, 0]])\n', (3381, 3400), True, 'import numpy as np\n'), ((3612, 3628), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (3622, 3628), True, 'import numpy as np\n'), ((1940, 1959), 'numpy.array', 'np.array', (['start_pos'], {}), '(start_pos)\n', (1948, 1959), True, 'import numpy as np\n'), ((4197, 4228), 'numpy.clip', 'np.clip', (['(0)', '(1)', 'self.build_phase'], {}), '(0, 1, self.build_phase)\n', (4204, 4228), True, 'import numpy as np\n'), ((4934, 4981), 'color_gradient.ColorGradient', 'ColorGradient', (["('#51007D', 1)", "('#FFD600', 1.2)"], {}), "(('#51007D', 1), ('#FFD600', 1.2))\n", (4947, 4981), False, 'from color_gradient import ColorGradient\n'), ((1618, 1640), 'numpy.cos', 'np.cos', (['rotate_radians'], {}), '(rotate_radians)\n', (1624, 1640), True, 'import numpy as np\n'), ((1681, 1703), 'numpy.sin', 'np.sin', (['rotate_radians'], {}), '(rotate_radians)\n', (1687, 1703), True, 'import numpy as np\n'), ((1705, 1727), 'numpy.cos', 'np.cos', (['rotate_radians'], {}), '(rotate_radians)\n', (1711, 1727), True, 'import numpy as np\n'), ((1643, 1665), 'numpy.sin', 'np.sin', (['rotate_radians'], {}), '(rotate_radians)\n', (1649, 1665), True, 'import numpy as np\n')] |
import os
import sys
import bokeh.plotting as bkp
import numpy as np
from bokeh.io import export_svgs
import cairosvg
# make it so we can import models/etc from parent folder
sys.path.insert(1, os.path.join(sys.path[0], '../common'))
from plotting import *
plot_reverse_kl = True
trials = [1]
nms = [('GIGAOE', 'GIGA'), ('SVI', 'SparseVI'), ('RAND', 'Uniform'), ('IHT', 'A-IHT'), ('IHT-2', 'A-IHT II')]
# plot the KL figure
fig = bkp.figure(y_axis_type='log', plot_width=1000, plot_height=1000, x_axis_label='iteration',
y_axis_label=('f' if plot_reverse_kl else 'Forward KL'))
# preprocess_plot(fig, '32pt', False, True)
plot_every = 10
M = 200
max_iter = 300 + 1
marker_plot_every = 30
marker_size = 25
obj_parameter = np.load('results/obj.npz')
Phi = obj_parameter['Phi']
y = obj_parameter['y'].squeeze()
starting_obj = np.linalg.norm(y, ord=2) # initial objective value for IHT and IHT-2
for i, nm in enumerate(nms):
kl = []
sz = []
for t in trials:
if nm[0] == 'IHT':
obj_list = np.load('results/iht-convergence.npy')
obj_list = np.concatenate([[starting_obj], obj_list])
elif nm[0] == 'IHT-2':
obj_list = np.load('results/iht-2-convergence.npy')
obj_list = np.concatenate([[starting_obj], obj_list])
else:
res = np.load('results/results_' + nm[0] + '_' + str(t) + '.npz')
w = res['w'][M, :]
print('w sparsity: {}'.format(sum(w > 0)))
obj_baseline = np.linalg.norm(y - Phi.dot(w), ord=2)
obj_list = np.ones(max_iter) * obj_baseline
iter = list(range(max_iter))
fig.line(iter[::plot_every], obj_list[::plot_every], color=pal[i], line_width=5, legend=nm[1])
if nm[0] == 'IHT':
fig.circle(iter[::marker_plot_every], obj_list[::marker_plot_every], fill_color=pal[i], size=marker_size,
legend=nm[1])
elif nm[0] == 'IHT-2':
fig.circle(iter[::marker_plot_every], obj_list[::marker_plot_every], fill_color="white",
size=marker_size, legend=nm[1])
elif nm[0] == 'SVI':
fig.square(iter[::marker_plot_every], obj_list[::marker_plot_every], fill_color="white",
size=marker_size, legend=nm[1])
elif nm[0] == 'GIGAOE':
fig.square(iter[::marker_plot_every], obj_list[::marker_plot_every], fill_color=pal[i],
size=marker_size, legend=nm[1])
legend_len = len(fig.legend.items)
fig.legend.items = fig.legend.items[legend_len - 2:legend_len] + fig.legend.items[0:legend_len - 2]
axis_font_size = '25pt'
axis_label_size = '32pt'
fig.xaxis.axis_label_text_font_size = axis_label_size
fig.xaxis.major_label_text_font_size = axis_font_size
fig.yaxis.axis_label_text_font_size = axis_label_size
fig.yaxis.major_label_text_font_size = axis_font_size
postprocess_plot(fig, '22pt', location='bottom_left', glyph_width=40)
fig.legend.background_fill_alpha = 0.
fig.legend.border_line_alpha = 0.
fig.output_backend = 'svg'
fig_name = 'exp_1_conver'
# figure output
export_svgs(fig, filename=fig_name + '.svg')
cairosvg.svg2pdf(
file_obj=open(fig_name + '.svg', "rb"), write_to=fig_name + '.pdf')
bkp.show(fig)
| [
"numpy.load",
"bokeh.plotting.figure",
"numpy.concatenate",
"numpy.ones",
"bokeh.plotting.show",
"numpy.linalg.norm",
"os.path.join",
"bokeh.io.export_svgs"
] | [((434, 587), 'bokeh.plotting.figure', 'bkp.figure', ([], {'y_axis_type': '"""log"""', 'plot_width': '(1000)', 'plot_height': '(1000)', 'x_axis_label': '"""iteration"""', 'y_axis_label': "('f' if plot_reverse_kl else 'Forward KL')"}), "(y_axis_type='log', plot_width=1000, plot_height=1000,\n x_axis_label='iteration', y_axis_label='f' if plot_reverse_kl else\n 'Forward KL')\n", (444, 587), True, 'import bokeh.plotting as bkp\n'), ((744, 770), 'numpy.load', 'np.load', (['"""results/obj.npz"""'], {}), "('results/obj.npz')\n", (751, 770), True, 'import numpy as np\n'), ((846, 870), 'numpy.linalg.norm', 'np.linalg.norm', (['y'], {'ord': '(2)'}), '(y, ord=2)\n', (860, 870), True, 'import numpy as np\n'), ((3048, 3092), 'bokeh.io.export_svgs', 'export_svgs', (['fig'], {'filename': "(fig_name + '.svg')"}), "(fig, filename=fig_name + '.svg')\n", (3059, 3092), False, 'from bokeh.io import export_svgs\n'), ((3184, 3197), 'bokeh.plotting.show', 'bkp.show', (['fig'], {}), '(fig)\n', (3192, 3197), True, 'import bokeh.plotting as bkp\n'), ((196, 234), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""../common"""'], {}), "(sys.path[0], '../common')\n", (208, 234), False, 'import os\n'), ((1041, 1079), 'numpy.load', 'np.load', (['"""results/iht-convergence.npy"""'], {}), "('results/iht-convergence.npy')\n", (1048, 1079), True, 'import numpy as np\n'), ((1103, 1145), 'numpy.concatenate', 'np.concatenate', (['[[starting_obj], obj_list]'], {}), '([[starting_obj], obj_list])\n', (1117, 1145), True, 'import numpy as np\n'), ((1200, 1240), 'numpy.load', 'np.load', (['"""results/iht-2-convergence.npy"""'], {}), "('results/iht-2-convergence.npy')\n", (1207, 1240), True, 'import numpy as np\n'), ((1264, 1306), 'numpy.concatenate', 'np.concatenate', (['[[starting_obj], obj_list]'], {}), '([[starting_obj], obj_list])\n', (1278, 1306), True, 'import numpy as np\n'), ((1573, 1590), 'numpy.ones', 'np.ones', (['max_iter'], {}), '(max_iter)\n', (1580, 1590), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
lt = 'f:/lt/'
region = pd.read_csv(lt + 'region.csv',sep='\t', index_col=0)
# 排除内蒙古和西藏
# prvs = ['北京', '天津', '河北', '山东', '辽宁', '江苏', '上海', '浙江', '福建', '广东', '海南', '吉林',
# '黑龙江', '山西', '河南', '安徽', '江西', '湖北', '湖南', '广西', '重庆', '四川', '贵州', '云南',
# '陕西', '甘肃', '青海', '宁夏', '新疆']
prvs = ['北京', '天津', '河北', '山东', '辽宁', '江苏', '上海', '浙江', '福建', '广东', '广西', '海南',
'吉林', '黑龙江', '山西', '河南', '安徽', '江西', '湖北', '湖南', '重庆', '四川', '贵州', '云南',
'陕西', '甘肃', '青海', '宁夏', '新疆', '内蒙古']
years = ['2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012']
worker = pd.read_csv(lt + 'worker.csv', sep='\t', index_col=0).join(region)
capital = pd.read_csv(lt + 'capital.csv', sep='\t', index_col=0).join(region)
energy = pd.read_csv(lt + 'energy.csv', sep='\t', index_col=0).join(region)
gdp = pd.read_csv(lt + 'gdp.csv', sep='\t', index_col=0).join(region)
co2 = pd.read_csv(lt + 'co2.csv', sep='\t', index_col=0).join(region)
table = {'劳动力': worker, '资本': capital, '能源': energy, 'GDP': gdp, 'CO2': co2}
ll = []
ll_indexs = ['劳动力', '资本', '能源', 'GDP', 'CO2']
# ll_columns = ['整体均值', '整体标准差', '东部均值', '东部标准差', '中部均值', '中部标准差', '西部均值', '西部标准差']
ll_columns = ['均值', '标准差', '最小值', '最大值']
for k, v in table.items():
print(k)
df = v.loc[prvs, :]
# 整体
val = df.loc[:, years].values.ravel()
avg = val.mean()
std = np.std(val, ddof=1)
mini = val.min()
maxi = val.max()
# 东部
val1 = df[df.rgn==1].loc[:, years].values.ravel()
avg1 = val1.mean()
std1 = np.std(val1, ddof=1)
# 中部
val2 = df[df.rgn==2].loc[:, years].values.ravel()
avg2 = val2.mean()
std2 = np.std(val2, ddof=1)
# 西部
val3 = df[df.rgn==3].loc[:, years].values.ravel()
avg3 = val3.mean()
std3 = np.std(val3, ddof=1)
print(f'整体\n平均数{avg:.2f}\n标准差{std:.2f}')
print(f'东部\n平均数{avg1:.2f}\n标准差{std1:.2f}')
print(f'中部\n平均数{avg2:.2f}\n标准差{std2:.2f}')
print(f'西部\n平均数{avg3:.2f}\n标准差{std3:.2f}')
# ll.append([avg, std, avg1, std1, avg2, std2, avg3, std3])
ll.append([avg, std, mini, maxi])
arr = np.array(ll)
df = pd.DataFrame(arr, ll_indexs, ll_columns)
df.to_csv(lt + 'table2_300.csv')
df.to_csv(lt + 'table6_290.csv')
df.to_csv(lt + 'table6_300.csv')
# eviews
eviews = pd.read_csv(lt + 'eviews.csv', sep='\t')
# 排除内蒙古
eviews = eviews[eviews.prv_id!=5]
# 整体
eviews.shape
des = eviews.describe()
des.to_csv(lt + 'des.csv')
# 东部
eviews = eviews[eviews.rgn=='东部']
eviews.shape
des = eviews.describe()
des.to_csv(lt + 'des.csv')
pd.Series.rank() | [
"pandas.DataFrame",
"pandas.read_csv",
"numpy.std",
"pandas.Series.rank",
"numpy.array"
] | [((64, 117), 'pandas.read_csv', 'pd.read_csv', (["(lt + 'region.csv')"], {'sep': '"""\t"""', 'index_col': '(0)'}), "(lt + 'region.csv', sep='\\t', index_col=0)\n", (75, 117), True, 'import pandas as pd\n'), ((2114, 2126), 'numpy.array', 'np.array', (['ll'], {}), '(ll)\n', (2122, 2126), True, 'import numpy as np\n'), ((2132, 2172), 'pandas.DataFrame', 'pd.DataFrame', (['arr', 'll_indexs', 'll_columns'], {}), '(arr, ll_indexs, ll_columns)\n', (2144, 2172), True, 'import pandas as pd\n'), ((2294, 2334), 'pandas.read_csv', 'pd.read_csv', (["(lt + 'eviews.csv')"], {'sep': '"""\t"""'}), "(lt + 'eviews.csv', sep='\\t')\n", (2305, 2334), True, 'import pandas as pd\n'), ((2550, 2566), 'pandas.Series.rank', 'pd.Series.rank', ([], {}), '()\n', (2564, 2566), True, 'import pandas as pd\n'), ((1401, 1420), 'numpy.std', 'np.std', (['val'], {'ddof': '(1)'}), '(val, ddof=1)\n', (1407, 1420), True, 'import numpy as np\n'), ((1560, 1580), 'numpy.std', 'np.std', (['val1'], {'ddof': '(1)'}), '(val1, ddof=1)\n', (1566, 1580), True, 'import numpy as np\n'), ((1678, 1698), 'numpy.std', 'np.std', (['val2'], {'ddof': '(1)'}), '(val2, ddof=1)\n', (1684, 1698), True, 'import numpy as np\n'), ((1796, 1816), 'numpy.std', 'np.std', (['val3'], {'ddof': '(1)'}), '(val3, ddof=1)\n', (1802, 1816), True, 'import numpy as np\n'), ((634, 687), 'pandas.read_csv', 'pd.read_csv', (["(lt + 'worker.csv')"], {'sep': '"""\t"""', 'index_col': '(0)'}), "(lt + 'worker.csv', sep='\\t', index_col=0)\n", (645, 687), True, 'import pandas as pd\n'), ((711, 765), 'pandas.read_csv', 'pd.read_csv', (["(lt + 'capital.csv')"], {'sep': '"""\t"""', 'index_col': '(0)'}), "(lt + 'capital.csv', sep='\\t', index_col=0)\n", (722, 765), True, 'import pandas as pd\n'), ((788, 841), 'pandas.read_csv', 'pd.read_csv', (["(lt + 'energy.csv')"], {'sep': '"""\t"""', 'index_col': '(0)'}), "(lt + 'energy.csv', sep='\\t', index_col=0)\n", (799, 841), True, 'import pandas as pd\n'), ((861, 911), 'pandas.read_csv', 'pd.read_csv', (["(lt + 'gdp.csv')"], {'sep': '"""\t"""', 'index_col': '(0)'}), "(lt + 'gdp.csv', sep='\\t', index_col=0)\n", (872, 911), True, 'import pandas as pd\n'), ((931, 981), 'pandas.read_csv', 'pd.read_csv', (["(lt + 'co2.csv')"], {'sep': '"""\t"""', 'index_col': '(0)'}), "(lt + 'co2.csv', sep='\\t', index_col=0)\n", (942, 981), True, 'import pandas as pd\n')] |
import sys
from typing import Dict, List, Literal, Type, Union
from torch._C import TensorType
sys.path.append(".")
sys.path.append("../../")
import numpy as np
import torch
from torch.utils.data.sampler import Sampler
from yacs.config import CfgNode
from lib.datasets.dataset_catalog import DatasetCatalog
from lib.datasets.samplers import ImageSizeBatchSampler, IterationBasedBatchSampler
from lib.datasets.tasks.classify import ClassifyDataset
from lib.datasets.tasks.semantic_segm import SegmentationDataset
from lib.datasets.transforms import make_transforms
_dataset_factory = {"classify": ClassifyDataset, "semantic_segm": SegmentationDataset}
def make_dataset(
# cfg: CfgNode,
dataset_name: str,
cls_names: Union[List[str], None] = None,
task: Literal["classify", "semantic_segm"] = "classify",
img_shape: Dict[str, int] = {"width": 224, "height": 224},
mask_type: Literal["binary", "normal"] = "normal",
transforms: Type[Union[None, TensorType]] = None,
):
"""
`DatasetCatalog` から `dataset_name` を参照し,そこに保存されている情報をもとにデータセットを作成する関数
Args:
dataset_name (str): ロードするデータセット名.
task (Literal["classify", "semantic_segm"], optional):
適応させるタスク.
Default to "classify".
img_shape (Dict[str, int], optionanl):
出力される画像のサイズ.
Default to {"width": 224, "height": 224}.
mask_type (Literal["binary", "normal"], optional):
"binary": すべてのオブジェクトを単一のクラスとしてマスクする.
"normal": オブジェクトをクラスごとにマスクする.
Defaults to "normal".
transforms (Type[Union[None, TensorType]]):
データ拡張に使用するtorchvisionのクラス.
Default to None.
"""
if task != "classify" and task != "semantic_segm":
raise ValueError("Invalid input for task.")
args = DatasetCatalog.get(dataset_name)
dataset = _dataset_factory[task]
args["cls_names"] = cls_names
args["img_shape"] = img_shape
args["mask_type"] = mask_type
# args["cfg"] = cfg
del args["id"]
# args["data_root"] = os.path.join(pth.DATA_DIR, args["data_root"])
if transforms is not None:
args["transforms"] = transforms
dataset = dataset(**args)
return dataset
def _make_data_sampler(dataset, shuffle: bool):
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
return sampler
def _make_batch_data_sampler(
sampler: Sampler,
batch_size: int,
drop_last: bool,
max_iter: int,
strategy: Literal[
"image_size",
] = "image_size",
):
"""
イタレーションごとにデータセットからデータをサンプリングする際に行う処理を決定する関数
Args:
sampler (Sampler): データセットからデータをサンプリングする際の処理を自動化するクラス.
batch_size (int): バッチサイズ.
drop_last (bool): サンプリングしきれなかった余りを切り捨てるか.
max_iter (int): イテレーションの最大値.
strategy (Literal[str, optional): 特殊な `batch_sampler` を使用する場合に設定する.
Defaults to "image_size".
Returns:
[type]: [description]
"""
if strategy == "image_size":
batch_sampler = ImageSizeBatchSampler(
sampler, batch_size, drop_last, 256, 480, 640
)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, batch_size, drop_last
)
if max_iter != -1:
batch_sampler = IterationBasedBatchSampler(batch_sampler, max_iter)
return batch_sampler
def _worker_init_fn(worker_id):
"""
workerの初期化時に乱数のシードを個別に設定する関数
これにより、workerの分だけforkするときに同一のnumpyのRandom Stateの状態がコピーされるため生じる入力データの重複を避けることができる。
REF: https://qiita.com/kosuke1701/items/14cd376e024f86e57ff6
"""
# np.random.seed(worker_id + (int(round(time.time() * 1000) % (2 ** 16))))
np.random.seed(np.random.get_state()[1][0] + worker_id)
def make_data_loader(
# cfg: CfgNode,
dataset_name: str,
batch_size: int = 16,
batch_sampler: Union[None, Literal["image_size"]] = None,
ds_category: Literal["train", "val", "test"] = "train",
img_shape: Dict[str, int] = {"width": 224, "height": 224},
is_distributed: bool = False,
max_iter: int = -1,
normalization: bool = False,
num_workers: int = 2,
task: Literal["classify", "semantic_segm"] = "classify",
toTensor: bool = True,
) -> torch.utils.data.DataLoader:
"""
データローダーを作成する関数.
Args:
dataset_name (str): ロードするデータセット名.
batch_size (int, optional): 1回の出力で読み出されるデータ数.
batch_sampler (Union[None, Literal["image_size"]], optional): データサンプリング用のプログラム.
default to None.
ds_category (Literal["train", "val", "test"], optional): 使用するデータセットのカテゴリ名.
defaults to "train".
img_shape (Dict[str, int], optionanl): 出力される画像のサイズ.
default to {"width": 224, "height": 224}.
is_distributed (bool, optional): データをシャッフルしたものをテストに使用するか.
defaults to False.
max_iter (int, optional): イテレーションの最大値. defaults to -1.
normalization (bool, optional): データを正規化する.default to Fales.
num_workers (int, optional): 使用する `cpu` のワーカー数.default to 2.
task (Literal["classify", "semantic_segm"], optional): 適応させるタスク.
default to "classify".
toTensor (bool, optional): torch.Tensor で出力する.
`False` の場合,`ndarray` で出力.default to True.
Returns:
torch.utils.data.DataLoader: [description]
"""
# --------------------------------------- #
# 訓練データセットの場合のコンフィグ #
# --------------------------------------- #
if ds_category == "train":
drop_last = False
shuffle = True
# --------------------------------------- #
# 検証データセットの場合のコンフィグ #
# --------------------------------------- #
elif ds_category == "val":
drop_last = False
shuffle = True if is_distributed else False
# ----------------------------------------- #
# テストデータセットの場合のコンフィグ #
# ----------------------------------------- #
elif ds_category == "test":
drop_last = False
shuffle = True if is_distributed else False
else:
raise ("The required parameter for `make_data_loader` has not been set.")
# dataset_name = cfg.train.dataset if is_train else cfg.test.dataset
transforms = make_transforms(
ds_category, toTensor=toTensor, normalization=normalization
)
# dataset = make_dataset(cfg, dataset_name=dataset_name, transforms=transforms)
dataset = make_dataset(
dataset_name=dataset_name, task=task, img_shape=img_shape, transforms=transforms
)
sampler = _make_data_sampler(dataset, shuffle)
batch_sampler = _make_batch_data_sampler(
sampler, batch_size, drop_last, max_iter, batch_sampler
)
# 引数: pin_memory について
# REF: https://qiita.com/sugulu_Ogawa_ISID/items/62f5f7adee083d96a587
data_loader = torch.utils.data.DataLoader(
dataset,
batch_sampler=batch_sampler,
num_workers=num_workers,
worker_init_fn=_worker_init_fn,
pin_memory=True,
)
return data_loader
if __name__ == "__main__":
import sys
sys.path.append("../../")
# from lib.config.config import cfg
# from datasets.tasks.classify import Dataset
cfg = CfgNode()
cfg.train = CfgNode()
cfg.task = "classify"
cfg.train.dataset = "SampleTrain"
cfg.train.batch_size = 4
cfg.train.batch_sampler = ""
cfg.train.num_workers = 2
dataloader = make_data_loader(cfg)
print(dataloader)
# import torchvision
# args = DatasetCatalog.get(cfg.train.dataset)
# data_root = os.path.join(pth.DATA_DIR, args["data_root"])
# dataset = torchvision.datasets.ImageFolder(data_root)
# print(dataset)
"""
class_to_idx: {'NG': 0, 'OK': 1}
classes: ['NG', 'OK']
imgs: [
(img_path, 0),
(img_path, 0), ...,
(img_path, 1),
(img_path, 1), ...
]
targets: [
000: 0,
001: 0, ...
050: 1,
051: 1, ....
]
"""
| [
"sys.path.append",
"lib.datasets.samplers.IterationBasedBatchSampler",
"torch.utils.data.sampler.BatchSampler",
"torch.utils.data.DataLoader",
"numpy.random.get_state",
"torch.utils.data.sampler.SequentialSampler",
"lib.datasets.samplers.ImageSizeBatchSampler",
"yacs.config.CfgNode",
"lib.datasets.t... | [((96, 116), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (111, 116), False, 'import sys\n'), ((117, 142), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (132, 142), False, 'import sys\n'), ((1825, 1857), 'lib.datasets.dataset_catalog.DatasetCatalog.get', 'DatasetCatalog.get', (['dataset_name'], {}), '(dataset_name)\n', (1843, 1857), False, 'from lib.datasets.dataset_catalog import DatasetCatalog\n'), ((6303, 6379), 'lib.datasets.transforms.make_transforms', 'make_transforms', (['ds_category'], {'toTensor': 'toTensor', 'normalization': 'normalization'}), '(ds_category, toTensor=toTensor, normalization=normalization)\n', (6318, 6379), False, 'from lib.datasets.transforms import make_transforms\n'), ((6887, 7030), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_sampler': 'batch_sampler', 'num_workers': 'num_workers', 'worker_init_fn': '_worker_init_fn', 'pin_memory': '(True)'}), '(dataset, batch_sampler=batch_sampler,\n num_workers=num_workers, worker_init_fn=_worker_init_fn, pin_memory=True)\n', (6914, 7030), False, 'import torch\n'), ((7147, 7172), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (7162, 7172), False, 'import sys\n'), ((7275, 7284), 'yacs.config.CfgNode', 'CfgNode', ([], {}), '()\n', (7282, 7284), False, 'from yacs.config import CfgNode\n'), ((7301, 7310), 'yacs.config.CfgNode', 'CfgNode', ([], {}), '()\n', (7308, 7310), False, 'from yacs.config import CfgNode\n'), ((2319, 2366), 'torch.utils.data.sampler.RandomSampler', 'torch.utils.data.sampler.RandomSampler', (['dataset'], {}), '(dataset)\n', (2357, 2366), False, 'import torch\n'), ((2395, 2446), 'torch.utils.data.sampler.SequentialSampler', 'torch.utils.data.sampler.SequentialSampler', (['dataset'], {}), '(dataset)\n', (2437, 2446), False, 'import torch\n'), ((3164, 3232), 'lib.datasets.samplers.ImageSizeBatchSampler', 'ImageSizeBatchSampler', (['sampler', 'batch_size', 'drop_last', '(256)', '(480)', '(640)'], {}), '(sampler, batch_size, drop_last, 256, 480, 640)\n', (3185, 3232), False, 'from lib.datasets.samplers import ImageSizeBatchSampler, IterationBasedBatchSampler\n'), ((3289, 3358), 'torch.utils.data.sampler.BatchSampler', 'torch.utils.data.sampler.BatchSampler', (['sampler', 'batch_size', 'drop_last'], {}), '(sampler, batch_size, drop_last)\n', (3326, 3358), False, 'import torch\n'), ((3436, 3487), 'lib.datasets.samplers.IterationBasedBatchSampler', 'IterationBasedBatchSampler', (['batch_sampler', 'max_iter'], {}), '(batch_sampler, max_iter)\n', (3462, 3487), False, 'from lib.datasets.samplers import ImageSizeBatchSampler, IterationBasedBatchSampler\n'), ((3845, 3866), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (3864, 3866), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from pyoneer.rl.wrappers.process_impl import Process
class Batch(object):
"""
Wraps multiple `gym.Env` into a batch of environments to support
vectorization. Uses an external processes for each environment
when `blocking=False`.
For simple environments, use `blocking=True` as the overhead of
external processes can limit very large batches.
For more complex environments, use `blocking=False` so the CPU
computation can be offloaded.
Note: When rendering with `mode="human"` only the first
environment is rendered.
Example:
```
env = Batch(
constructor=lambda: gym.make('Pendulum-v0'),
batch_size=32,
blocking=True)
```
Args:
constructor: Constructor which returns a `gym.Env`.
batch_size: Number of parallel environments.
blocking: Boolean indicating whether each call to an environment
is blocking (default: True).
"""
def __init__(self, constructor, batch_size, blocking=True):
if blocking:
self.envs = [constructor() for _ in range(batch_size)]
else:
self.envs = [Process(constructor) for _ in range(batch_size)]
self.done = np.zeros(len(self.envs), dtype=np.bool)
self.blocking = blocking
observation_space = self.observation_space
if not all(env.observation_space == observation_space for env in self.envs):
raise ValueError("All environments must use the same observation space.")
action_space = self.action_space
if not all(env.action_space == action_space for env in self.envs):
raise ValueError("All environments must use the same action space.")
def __len__(self):
return len(self.envs)
def __getitem__(self, index):
return self.envs[index]
def __getattr__(self, name):
return getattr(self.envs[0], name)
def seed(self, seed):
if self.blocking:
for i, env in enumerate(self.envs):
env.seed(seed + i)
else:
promises = [env.seed(seed + i) for i, env in enumerate(self.envs)]
for promise in promises:
promise()
def reset(self):
self.done[:] = False
if self.blocking:
states = [env.reset() for env in self.envs]
else:
promises = [env.reset() for env in self.envs]
states = [promise() for promise in promises]
state = np.stack(states, axis=0)
return state
def _dummy_transition(self):
next_state = np.zeros(
shape=self.observation_space.shape, dtype=self.observation_space.dtype
)
reward = 0.0
done = True
info = {}
transition = (next_state, reward, done, info)
return transition
def step(self, actions):
if self.blocking:
transitions = []
for i, env in enumerate(self.envs):
if self.done[i]:
transition = self._dummy_transition()
else:
transition = env.step(actions[i])
transitions.append(transition)
else:
promises = []
for i, env in enumerate(self.envs):
if self.done[i]:
promise = self._dummy_transition
else:
promise = env.step(actions[i])
promises.append(promise)
transitions = [promise() for promise in promises]
next_states, rewards, dones, infos = zip(*transitions)
next_state = np.stack(next_states, axis=0)
reward = np.stack(rewards, axis=0)
done = np.stack(dones, axis=0)
info = tuple(infos)
self.done = self.done | done
return next_state, reward, done, info
def render(self, mode="human"):
assert (
self.blocking or mode == "rgb_array"
), 'only the "rgb_array" mode is supported when `blocking=False`'
if mode == "rgb_array":
return np.stack([env.render(mode=mode) for env in self.envs], axis=0)
else:
return self.envs[0].render(mode=mode)
def close(self):
for env in self.envs:
if hasattr(env, "close"):
env.close()
| [
"numpy.stack",
"pyoneer.rl.wrappers.process_impl.Process",
"numpy.zeros"
] | [((2614, 2638), 'numpy.stack', 'np.stack', (['states'], {'axis': '(0)'}), '(states, axis=0)\n', (2622, 2638), True, 'import numpy as np\n'), ((2715, 2800), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.observation_space.shape', 'dtype': 'self.observation_space.dtype'}), '(shape=self.observation_space.shape, dtype=self.observation_space.dtype\n )\n', (2723, 2800), True, 'import numpy as np\n'), ((3739, 3768), 'numpy.stack', 'np.stack', (['next_states'], {'axis': '(0)'}), '(next_states, axis=0)\n', (3747, 3768), True, 'import numpy as np\n'), ((3786, 3811), 'numpy.stack', 'np.stack', (['rewards'], {'axis': '(0)'}), '(rewards, axis=0)\n', (3794, 3811), True, 'import numpy as np\n'), ((3827, 3850), 'numpy.stack', 'np.stack', (['dones'], {'axis': '(0)'}), '(dones, axis=0)\n', (3835, 3850), True, 'import numpy as np\n'), ((1280, 1300), 'pyoneer.rl.wrappers.process_impl.Process', 'Process', (['constructor'], {}), '(constructor)\n', (1287, 1300), False, 'from pyoneer.rl.wrappers.process_impl import Process\n')] |
from __future__ import annotations
from typeguard import typechecked
from typing import Union
import numpy as np
@typechecked
class ImagePoint:
"""
Represents a point in image coordinates aka. pixel coordinates.
"""
def __init__(self, x: Union[int, float], y: Union[int, float]) -> None:
"""
:param x: X coordinate
:param y: Y coordinate
"""
self._point: np.ndarray
self._point = np.rint(np.array([x, y])).astype(int)
def __eq__(self, other: ImagePoint) -> bool:
if self.x == other.x and self.y == other.y:
return True
else:
return False
def __str__(self) -> str:
msg: str = f"x={self.x} y={self.y}"
return msg
@property
def point(self) -> np.ndarray:
return self._point
@property
def x(self) -> int:
return self._point[0].item()
@property
def y(self) -> int:
return self._point[1].item()
def midpoint(self, other: ImagePoint) -> ImagePoint:
"""
Calculate midpoint between this point and other point.
:param other: Other point
:return: Midpoint
"""
mean: np.ndarray = np.mean((self._point, other._point), axis=0)
# Pixels are discrete values
mean: list[int] = np.rint(mean).astype(int).tolist()
midpoint: ImagePoint = self.__class__(*mean)
return midpoint
class WorldPoint(ImagePoint):
"""
Represents a point in word coordinates.
"""
def __init__(
self,
x: Union[int, float],
y: Union[int, float],
z: Union[int, float],
) -> None:
"""
:param x: X coordinate
:param y: Y coordinate
:param z: Z coordinate
"""
super().__init__(x, y)
z: int = np.rint(z).astype(int).item()
self._point = np.append(self._point, z)
def __eq__(self, other: WorldPoint) -> bool:
if self.x == other.x and self.y == other.y and self.z == other.z:
return True
else:
return False
def __str__(self) -> str:
msg: str = f"x={self.x} y={self.y} z={self.z}"
return msg
@property
def z(self) -> int:
return self._point[2].item()
| [
"numpy.append",
"numpy.rint",
"numpy.mean",
"numpy.array"
] | [((1205, 1249), 'numpy.mean', 'np.mean', (['(self._point, other._point)'], {'axis': '(0)'}), '((self._point, other._point), axis=0)\n', (1212, 1249), True, 'import numpy as np\n'), ((1872, 1897), 'numpy.append', 'np.append', (['self._point', 'z'], {}), '(self._point, z)\n', (1881, 1897), True, 'import numpy as np\n'), ((455, 471), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (463, 471), True, 'import numpy as np\n'), ((1313, 1326), 'numpy.rint', 'np.rint', (['mean'], {}), '(mean)\n', (1320, 1326), True, 'import numpy as np\n'), ((1820, 1830), 'numpy.rint', 'np.rint', (['z'], {}), '(z)\n', (1827, 1830), True, 'import numpy as np\n')] |
import numpy as np
from copy import deepcopy
from scipy.stats import norm
from scipy.optimize import fminbound
# EVOLUTIONARY OPERATORS
def sbx_crossover(p1, p2, sbxdi):
D = p1.shape[0]
cf = np.empty([D])
u = np.random.rand(D)
cf[u <= 0.5] = np.power((2 * u[u <= 0.5]), (1 / (sbxdi + 1)))
cf[u > 0.5] = np.power((2 * (1 - u[u > 0.5])), (-1 / (sbxdi + 1)))
c1 = 0.5 * ((1 + cf) * p1 + (1 - cf) * p2)
c2 = 0.5 * ((1 + cf) * p2 + (1 - cf) * p1)
c1 = np.clip(c1, 0, 1)
c2 = np.clip(c2, 0, 1)
return c1, c2
def mutate(p, pmdi):
mp = float(1. / p.shape[0])
u = np.random.uniform(size=[p.shape[0]])
r = np.random.uniform(size=[p.shape[0]])
tmp = np.copy(p)
for i in range(p.shape[0]):
if r[i] < mp:
if u[i] < 0.5:
delta = (2*u[i]) ** (1/(1+pmdi)) - 1
tmp[i] = p[i] + delta * p[i]
else:
delta = 1 - (2 * (1 - u[i])) ** (1/(1+pmdi))
tmp[i] = p[i] + delta * (1 - p[i])
tmp = np.clip(tmp, 0, 1)
return tmp
def variable_swap(p1, p2, probswap):
D = p1.shape[0]
swap_indicator = np.random.rand(D) <= probswap
c1, c2 = p1.copy(), p2.copy()
c1[np.where(swap_indicator)] = p2[np.where(swap_indicator)]
c2[np.where(swap_indicator)] = p1[np.where(swap_indicator)]
return c1, c2
# MULTIFACTORIAL EVOLUTIONARY HELPER FUNCTIONS
def find_relative(population, skill_factor, sf, N):
return population[np.random.choice(np.where(skill_factor[:N] == sf)[0])]
def calculate_scalar_fitness(factorial_cost):
return 1 / np.min(np.argsort(np.argsort(factorial_cost, axis=0), axis=0) + 1, axis=1)
# MULTIFACTORIAL EVOLUTIONARY WITH TRANSFER PARAMETER ESTIMATION HELPER FUNCTIONS
# def get_subpops(population, skill_factor, N):
# K = len(set(skill_factor))
# subpops = []
# for k in range(K):
# idx = np.where(skill_factor == k)[0][:N//K]
# subpops.append(population[idx, :])
# return subpops
def get_subpops(population, skill_factor, N):
K = len(set(skill_factor))
subpops = []
for k in range(K):
idx = np.where(skill_factor[:N] == k)[0][:N//K]
subpops.append(population[idx, :])
return subpops
class Model:
def __init__(self, mean, std, num_sample):
self.mean = mean
self.std = std
self.num_sample = num_sample
def density(self, subpop, D):
N = subpop.shape[0] # Trong code gốc math lab thì ko dung D mà dùng số chiều thực của task -> D là số chiều multi task
prob = np.ones([N])
for d in range(D):
prob *= norm.pdf(subpop[:, d], loc=self.mean[d], scale=self.std[d]) #Xác suất của từng điểm của tập Subpop được tính trên trung vị và độ lệch chuẩn
return prob
def log_likelihood(rmp, prob_matrix, K):
posterior_matrix = deepcopy(prob_matrix)
value = 0
for k in range(2):
for j in range(2):
if k == j:
posterior_matrix[k][:, j] = posterior_matrix[k][:, j] * (1 - 0.5 * (K - 1) * rmp / float(K))
else:
posterior_matrix[k][:, j] = posterior_matrix[k][:, j] * 0.5 * (K - 1) * rmp / float(K)
value = value + np.sum(-np.log(np.sum(posterior_matrix[k], axis=1)))
return value
def learn_models(subpops, D):
K = len(subpops)
models = []
for k in range(K):
subpop = subpops[k]
num_sample = len(subpop)
num_random_sample = int(np.floor(0.1 * num_sample))
rand_pop = np.random.rand(num_random_sample, D)
mean = np.mean(np.concatenate([subpop, rand_pop]), axis=0)
std = np.std(np.concatenate([subpop, rand_pop]), axis=0)
models.append(Model(mean, std, num_sample))
return models
def learn_rmp(subpops, dims):
K = len(subpops)
rmp_matrix = np.eye(K)
D_max = max(dims)
models = learn_models(subpops, D_max)
for k in range(K-1):
for j in range(k + 1, K):
D_min = min([dims[k], dims[j]])
probmatrix = [np.ones([models[k].num_sample, 2]),
np.ones([models[j].num_sample, 2])]
probmatrix[0][:, 0] = models[k].density(subpops[k], D_min) # tinh density của subpop k trên các tham số của phân phối task j
probmatrix[0][:, 1] = models[j].density(subpops[k], D_min) # tinh density của subpop k trên các tham số của phân phối task j
probmatrix[1][:, 0] = models[k].density(subpops[j], D_min)
probmatrix[1][:, 1] = models[j].density(subpops[j], D_min)
rmp = fminbound(lambda rmp: log_likelihood(rmp, probmatrix, K), 0, 1)
# rmp += np.random.randn() * 0.01
if(rmp < 0.1): rmp = 0.1
rmp += np.random.randn() * 0.01
rmp = np.clip(rmp, 0, 1)
rmp_matrix[k, j] = rmp
rmp_matrix[j, k] = rmp
return rmp_matrix
# OPTIMIZATION RESULT HELPERS
def get_best_individual(population, factorial_cost, scalar_fitness, skill_factor, sf):
# select individuals from task sf
idx = np.where(skill_factor == sf)[0]
subpop = population[idx]
sub_factorial_cost = factorial_cost[idx]
sub_scalar_fitness = scalar_fitness[idx]
# select best individual
idx = np.argmax(sub_scalar_fitness)
x = subpop[idx]
fun = sub_factorial_cost[idx, sf]
return x, fun
def get_result(results):
result = []
for res in results:
result.append(res.fun)
return result
| [
"numpy.random.uniform",
"copy.deepcopy",
"numpy.sum",
"numpy.copy",
"numpy.argmax",
"numpy.random.randn",
"numpy.empty",
"numpy.power",
"numpy.floor",
"numpy.ones",
"numpy.clip",
"scipy.stats.norm.pdf",
"numpy.argsort",
"numpy.where",
"numpy.random.rand",
"numpy.eye",
"numpy.concaten... | [((196, 209), 'numpy.empty', 'np.empty', (['[D]'], {}), '([D])\n', (204, 209), True, 'import numpy as np\n'), ((216, 233), 'numpy.random.rand', 'np.random.rand', (['D'], {}), '(D)\n', (230, 233), True, 'import numpy as np\n'), ((260, 302), 'numpy.power', 'np.power', (['(2 * u[u <= 0.5])', '(1 / (sbxdi + 1))'], {}), '(2 * u[u <= 0.5], 1 / (sbxdi + 1))\n', (268, 302), True, 'import numpy as np\n'), ((323, 371), 'numpy.power', 'np.power', (['(2 * (1 - u[u > 0.5]))', '(-1 / (sbxdi + 1))'], {}), '(2 * (1 - u[u > 0.5]), -1 / (sbxdi + 1))\n', (331, 371), True, 'import numpy as np\n'), ((475, 492), 'numpy.clip', 'np.clip', (['c1', '(0)', '(1)'], {}), '(c1, 0, 1)\n', (482, 492), True, 'import numpy as np\n'), ((500, 517), 'numpy.clip', 'np.clip', (['c2', '(0)', '(1)'], {}), '(c2, 0, 1)\n', (507, 517), True, 'import numpy as np\n'), ((593, 629), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[p.shape[0]]'}), '(size=[p.shape[0]])\n', (610, 629), True, 'import numpy as np\n'), ((636, 672), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[p.shape[0]]'}), '(size=[p.shape[0]])\n', (653, 672), True, 'import numpy as np\n'), ((681, 691), 'numpy.copy', 'np.copy', (['p'], {}), '(p)\n', (688, 691), True, 'import numpy as np\n'), ((959, 977), 'numpy.clip', 'np.clip', (['tmp', '(0)', '(1)'], {}), '(tmp, 0, 1)\n', (966, 977), True, 'import numpy as np\n'), ((2695, 2716), 'copy.deepcopy', 'deepcopy', (['prob_matrix'], {}), '(prob_matrix)\n', (2703, 2716), False, 'from copy import deepcopy\n'), ((3648, 3657), 'numpy.eye', 'np.eye', (['K'], {}), '(K)\n', (3654, 3657), True, 'import numpy as np\n'), ((4985, 5014), 'numpy.argmax', 'np.argmax', (['sub_scalar_fitness'], {}), '(sub_scalar_fitness)\n', (4994, 5014), True, 'import numpy as np\n'), ((1066, 1083), 'numpy.random.rand', 'np.random.rand', (['D'], {}), '(D)\n', (1080, 1083), True, 'import numpy as np\n'), ((1133, 1157), 'numpy.where', 'np.where', (['swap_indicator'], {}), '(swap_indicator)\n', (1141, 1157), True, 'import numpy as np\n'), ((1164, 1188), 'numpy.where', 'np.where', (['swap_indicator'], {}), '(swap_indicator)\n', (1172, 1188), True, 'import numpy as np\n'), ((1195, 1219), 'numpy.where', 'np.where', (['swap_indicator'], {}), '(swap_indicator)\n', (1203, 1219), True, 'import numpy as np\n'), ((1226, 1250), 'numpy.where', 'np.where', (['swap_indicator'], {}), '(swap_indicator)\n', (1234, 1250), True, 'import numpy as np\n'), ((2426, 2438), 'numpy.ones', 'np.ones', (['[N]'], {}), '([N])\n', (2433, 2438), True, 'import numpy as np\n'), ((3322, 3358), 'numpy.random.rand', 'np.random.rand', (['num_random_sample', 'D'], {}), '(num_random_sample, D)\n', (3336, 3358), True, 'import numpy as np\n'), ((4792, 4820), 'numpy.where', 'np.where', (['(skill_factor == sf)'], {}), '(skill_factor == sf)\n', (4800, 4820), True, 'import numpy as np\n'), ((2476, 2535), 'scipy.stats.norm.pdf', 'norm.pdf', (['subpop[:, d]'], {'loc': 'self.mean[d]', 'scale': 'self.std[d]'}), '(subpop[:, d], loc=self.mean[d], scale=self.std[d])\n', (2484, 2535), False, 'from scipy.stats import norm\n'), ((3270, 3296), 'numpy.floor', 'np.floor', (['(0.1 * num_sample)'], {}), '(0.1 * num_sample)\n', (3278, 3296), True, 'import numpy as np\n'), ((3391, 3425), 'numpy.concatenate', 'np.concatenate', (['[subpop, rand_pop]'], {}), '([subpop, rand_pop])\n', (3405, 3425), True, 'import numpy as np\n'), ((3466, 3500), 'numpy.concatenate', 'np.concatenate', (['[subpop, rand_pop]'], {}), '([subpop, rand_pop])\n', (3480, 3500), True, 'import numpy as np\n'), ((4516, 4534), 'numpy.clip', 'np.clip', (['rmp', '(0)', '(1)'], {}), '(rmp, 0, 1)\n', (4523, 4534), True, 'import numpy as np\n'), ((1405, 1437), 'numpy.where', 'np.where', (['(skill_factor[:N] == sf)'], {}), '(skill_factor[:N] == sf)\n', (1413, 1437), True, 'import numpy as np\n'), ((2012, 2043), 'numpy.where', 'np.where', (['(skill_factor[:N] == k)'], {}), '(skill_factor[:N] == k)\n', (2020, 2043), True, 'import numpy as np\n'), ((3833, 3867), 'numpy.ones', 'np.ones', (['[models[k].num_sample, 2]'], {}), '([models[k].num_sample, 2])\n', (3840, 3867), True, 'import numpy as np\n'), ((3890, 3924), 'numpy.ones', 'np.ones', (['[models[j].num_sample, 2]'], {}), '([models[j].num_sample, 2])\n', (3897, 3924), True, 'import numpy as np\n'), ((4479, 4496), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (4494, 4496), True, 'import numpy as np\n'), ((1521, 1555), 'numpy.argsort', 'np.argsort', (['factorial_cost'], {'axis': '(0)'}), '(factorial_cost, axis=0)\n', (1531, 1555), True, 'import numpy as np\n'), ((3033, 3068), 'numpy.sum', 'np.sum', (['posterior_matrix[k]'], {'axis': '(1)'}), '(posterior_matrix[k], axis=1)\n', (3039, 3068), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Utilities for Fling
"""
# common modules
import numpy as np
import matplotlib.pyplot as plt
# extra modules needed
from pynga.utils import *
def FaultGeom(IDs, Dims, Mech, HypoLoc, ProjDict, VisualDict=None):
"""
from Fling source files to get the fault geometry
"""
ScenarioID, VID = IDs # e.g. 142, 00
Fl,dfl,Fw,dfw,ztor = Dims # fault length along strike, fault width down dip
strike, dip, rake = Mech # fault mechanism
hypoAS, hypoDD = HypoLoc # relative hypocenter loation (along strike, down dip)
# UTM projection property
lon0, lat0 = ProjDict['origin'] # projection origin
kwds = ProjDict
# Create fault mesh:
# along strike direction: y-axis
# along dip direction: x-axis
fx = np.arange( 0, Fw+dfw, dfw ) # along dip (x direction)
fy = np.arange( 0, Fl+dfl, dfl ) - Fl/2 # along strike (y direction)
fx, fy = fx*1000, fy*1000
fxx, fyy = np.meshgrid( fx, fy )
sdep2d = fxx * np.sin( dip*np.pi/180. ) / 1000 # in km
slon2d, slat2d = projection( fxx, fyy, **kwds )
# surface projection (change in dip direction)
fxS = fx * np.cos( dip*np.pi/180. )
fxxS, fyy = np.meshgrid( fxS, fy )
slon2dS, slat2dS = projection( fxxS, fyy, **kwds )
# get the lon/lat location of hypocenter (slon, slat, hdep)
hy = hypoAS*1000
hx = hypoDD*1000
slon, slat = projection( hx, hy, **kwds )
hdep = hx*np.sin( dip*np.pi/180. ) / 1000. # (in km)
# epicenter location
hxS = hx*np.cos( dip*np.pi/180. )
slonS, slatS = projection( hxS, hy, **kwds )
# visual test:
if VisualDict != None:
print('test plt')
from mpl_toolkits.mplot3d import Axes3D
rlon, rlat = VisualDict['SiteLoc'] # site locaitons for visual analysis
savetype = VisualDict['savetype']
plotpth = VisualDict['plotpth']
# 1. plot surface projection
fig = plt.figure(1)
ax = fig.add_subplot( 111 )
ax.plot( rlon, rlat, 'k^' )
ax.plot( slonS, slatS, 'r*', ms=10 )
ax.plot( lon0, lat0, 'yo', ms=12 )
ax.plot( [slon2dS[0,0], slon2dS[0,-1],slon2dS[0,-1], slon2dS[0,0], slon2dS[0,0]], \
[slat2dS[0,0], slat2dS[0,0],slat2dS[-1,0], slat2dS[-1,0], slat2dS[0,0]],'b' )
ax.set_title( 'surface projection of fault surface (blue box)\nstation distribution (black triangles), yellew circle: origin' )
ax.set_xlabel( 'longitude' )
ax.set_ylabel( 'latitude' )
plotn = 'SurfaceProjection.Scenario%s.FaultV%s.Station.Hypo.%s'%(ScenarioID, VID, savetype)
fig.savefig( plotpth + plotn, format=savetype )
# 2. plot in 3D
fig = plt.figure(2)
ax = Axes3D(fig)
ax.plot( rlon, rlat, np.zeros(len(rlon)), 'k^', ms=8 )
ax.plot( [slon], [slat], [-hdep], 'r*', ms=12 )
ax.plot( [lon0], [lat0], [0], 'yo', ms=12 )
linec = ax.plot_wireframe( slon2d, slat2d, -sdep2d )
linec.set_color('b')
ax.set_zlim3d(-50, 0)
ax.set_xlabel( 'lon' )
ax.set_ylabel( 'lat' )
ax.set_zlabel( 'depth (km)' )
plotn = 'ThreeDimension.Scenario%s.FaultV%s.Station.Hypo.%s'%(ScenarioID, VID, savetype)
fig.savefig( plotpth + plotn, format=savetype )
OutputDict = {}
OutputDict['FaultPlane'] = slon2d.tolist(), slat2d.tolist(), sdep2d.tolist()
OutputDict['FaultSurface'] = slon2dS.tolist(), slat2dS.tolist()
OutputDict['HypoLoc'] = slon,slat,hdep
OutputDict['EpicLoc'] = slonS, slatS
return OutputDict
| [
"numpy.meshgrid",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.arange",
"numpy.cos"
] | [((791, 818), 'numpy.arange', 'np.arange', (['(0)', '(Fw + dfw)', 'dfw'], {}), '(0, Fw + dfw, dfw)\n', (800, 818), True, 'import numpy as np\n'), ((966, 985), 'numpy.meshgrid', 'np.meshgrid', (['fx', 'fy'], {}), '(fx, fy)\n', (977, 985), True, 'import numpy as np\n'), ((1208, 1228), 'numpy.meshgrid', 'np.meshgrid', (['fxS', 'fy'], {}), '(fxS, fy)\n', (1219, 1228), True, 'import numpy as np\n'), ((855, 882), 'numpy.arange', 'np.arange', (['(0)', '(Fl + dfl)', 'dfl'], {}), '(0, Fl + dfl, dfl)\n', (864, 882), True, 'import numpy as np\n'), ((1167, 1194), 'numpy.cos', 'np.cos', (['(dip * np.pi / 180.0)'], {}), '(dip * np.pi / 180.0)\n', (1173, 1194), True, 'import numpy as np\n'), ((1535, 1562), 'numpy.cos', 'np.cos', (['(dip * np.pi / 180.0)'], {}), '(dip * np.pi / 180.0)\n', (1541, 1562), True, 'import numpy as np\n'), ((1948, 1961), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1958, 1961), True, 'import matplotlib.pyplot as plt\n'), ((2714, 2727), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (2724, 2727), True, 'import matplotlib.pyplot as plt\n'), ((2741, 2752), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (2747, 2752), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((1007, 1034), 'numpy.sin', 'np.sin', (['(dip * np.pi / 180.0)'], {}), '(dip * np.pi / 180.0)\n', (1013, 1034), True, 'import numpy as np\n'), ((1453, 1480), 'numpy.sin', 'np.sin', (['(dip * np.pi / 180.0)'], {}), '(dip * np.pi / 180.0)\n', (1459, 1480), True, 'import numpy as np\n')] |
__author__ = 'Orthocenter'
import cv2
import numpy as np
import Utility.constants as constants
import methods
from layer_plot_method import Layer_Plot_Method
from Utility.color import Color
class Opaque(Layer_Plot_Method):
def __init__(self, params, layer_config):
Layer_Plot_Method.__init__(self, params, layer_config)
def draw_contours(self, bottom_layer, black_bg):
canny = cv2.Canny(black_bg, 0, 100)
contours, hierarchy = cv2.findContours(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
color = Color()
color.setHex(self.config.get("ccolor", "#FFFFFF"))
color = color.getBGR()
thickness = int(self.config.get("ctn", 0))
cv2.drawContours(bottom_layer, contours, -1, color, thickness, cv2.CV_AA)
return bottom_layer
def blend(self, bottom_layer, black_bg):
if self.config.get("contours", None) == "yes":
bottom_layer = self.draw_contours(bottom_layer, black_bg)
pass
# another version, has some problem
# if self.config.get("contours", None) == "yes":
# canvas = self.draw_contours(canvas)
#
# canvas_gray = cv2.cvtColor(canvas, cv2.COLOR_BGR2GRAY)
#
# canvas_gray = np.float32(canvas_gray)
# max = canvas_gray.max()
# if max == 0:
# return bottom_layer
# mask = canvas_gray / max
# mask **= 10
# mask_inv = 1. - mask
#
# bg = np.empty_like(bottom_layer)
# fg = np.empty_like(canvas)
# for i in xrange(3):
# bg[:, :, i] = np.multiply(bottom_layer[:, :, i], mask_inv)
# fg[:, :, i] = np.multiply(canvas[:, :, i], mask)
# ret = bg + fg
# edge feather version
# canvas_gray = np.float32(canvas_gray)
# canvas_gray *= 1. / 255
#
# retval, mask = cv2.threshold(canvas_gray, 0.5, 1.0, cv2.THRESH_BINARY)
# #mask = cv2.GaussianBlur(mask, (5, 5), 1.0)
# mask_inv = 1. - mask
#
# bottom_layer_bg = np.empty_like(bottom_layer)
# canvas_fg = np.empty_like(canvas)
# for i in xrange(3):
# bottom_layer_bg[:, :, i] = np.multiply(bottom_layer[:, :, i], mask_inv)
# canvas_fg[:, :, i] = np.multiply(canvas[:, :, i], mask)
#
# ret = cv2.add(bottom_layer_bg, canvas_fg)
return bottom_layer
def plot(self, data, bottom_layer):
black_bg = np.zeros(constants.u_tile_shape, constants.tile_dtype)
num = int(data.readline())
for elem_id in xrange(0, num):
line = data.readline()
layer_conf = line.rstrip(" \n").split(" ")
layer_conf = dict([(layer_conf[i], layer_conf[i + 1]) for i in xrange(0, len(layer_conf), 2)])
plotter = methods.get_plot_class(self.level, layer_conf)
bottom_layer, black_bg = plotter.plot(data, bottom_layer, black_bg)
bottom_layer = self.blend(bottom_layer, black_bg)
return bottom_layer
| [
"cv2.Canny",
"layer_plot_method.Layer_Plot_Method.__init__",
"Utility.color.Color",
"numpy.zeros",
"methods.get_plot_class",
"cv2.drawContours",
"cv2.findContours"
] | [((279, 333), 'layer_plot_method.Layer_Plot_Method.__init__', 'Layer_Plot_Method.__init__', (['self', 'params', 'layer_config'], {}), '(self, params, layer_config)\n', (305, 333), False, 'from layer_plot_method import Layer_Plot_Method\n'), ((404, 431), 'cv2.Canny', 'cv2.Canny', (['black_bg', '(0)', '(100)'], {}), '(black_bg, 0, 100)\n', (413, 431), False, 'import cv2\n'), ((462, 525), 'cv2.findContours', 'cv2.findContours', (['canny', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (478, 525), False, 'import cv2\n'), ((543, 550), 'Utility.color.Color', 'Color', ([], {}), '()\n', (548, 550), False, 'from Utility.color import Color\n'), ((701, 774), 'cv2.drawContours', 'cv2.drawContours', (['bottom_layer', 'contours', '(-1)', 'color', 'thickness', 'cv2.CV_AA'], {}), '(bottom_layer, contours, -1, color, thickness, cv2.CV_AA)\n', (717, 774), False, 'import cv2\n'), ((2473, 2527), 'numpy.zeros', 'np.zeros', (['constants.u_tile_shape', 'constants.tile_dtype'], {}), '(constants.u_tile_shape, constants.tile_dtype)\n', (2481, 2527), True, 'import numpy as np\n'), ((2824, 2870), 'methods.get_plot_class', 'methods.get_plot_class', (['self.level', 'layer_conf'], {}), '(self.level, layer_conf)\n', (2846, 2870), False, 'import methods\n')] |
import numpy as np
import tensorflow as tf
import os
import scipy.io as sio
import matplotlib.pyplot as plt
print("--> Loading parameters...")
global par
par = {
# Setup parameters
'save_dir' : './savedir/',
'analyze_model' : True,
# Network shape
'n_recurrent' : 10,
'n_hidden' : [300],
# Timings and rates
'dt' : 10,
'learning_rate' : 5e-3,
# Areas to include
'areas' : [2,3],
# Variance values
'clip_max_grad_val' : 1,
'input_mean' : 0.0,
'noise_in_sd' : 0.05,
'noise_rnn_sd' : 0.1,
# Tuning function data
'num_motion_dirs' : 6,
# Cost parameters
'spike_cost' : 0.,
'weight_cost' : 1e-6,
# Training specs
'batch_train_size' : 1024,
'num_iterations' : 8000,
'iters_between_outputs' : 100,
'iters_per_group' : 4000,
# Task specs
'dead_time' : 320,
'fix_time' : 100,
'sample_time' : 660,
'delay_time' : 1020,
'test_time' : 400,
# Save paths
'save_fn' : 'model_results.pkl',
}
def update_parameters(updates):
"""
Takes a list of strings and values for updating parameters in the parameter dictionary
Example: updates = [(key, val), (key, val)]
"""
print('Updating parameters...')
for key, val in updates.items():
par[key] = val
print('Updating ', key)
update_trial_params()
update_dependencies()
def update_dependencies():
"""
Updates all parameter dependencies
"""
data_dir = '/home/masse/'
data = sio.loadmat(data_dir + 'spike_trains.mat')
s = np.nanmean(np.nanmean(np.nanmean(data['spike_train'],axis=3),axis=1),axis=0)
#plt.plot(s[200:230])
#plt.show()
ind = np.array([int(i) for i in range(len(data['area'])) \
if data['area'][i][0] in par['areas'] and np.mean(data['spike_train'][:,:,:,i]) < 99])
# neural data will be split into two equakl groups for trainig and testing the RNN
# each will have size N
N = len(ind)//2
q = np.int16(np.random.permutation(N*2))
par['neuron_ind'] = [ind[q[:N]], ind[q[N:]]]
par['n_output'] = N
par['noise_rnn'] = 1.*par['noise_rnn_sd']
par['noise_in'] = 1.*par['noise_in_sd'] # since term will be multiplied by par['alpha_neuron']
par['trial_length'] = par['dead_time']+par['fix_time']+par['sample_time']+par['delay_time']+par['test_time']
# Length of each trial in time steps
par['num_time_steps'] = par['trial_length']//par['dt']
####################################################################
### Setting up assorted intial weights, biases, and other values ###
####################################################################
par['h_init'] = 0.1*np.ones((par['n_recurrent'], par['batch_train_size']), dtype=np.float32)
par['input_to_rnn_dims'] = [par['n_recurrent'], par['num_motion_dirs']]
par['hidden_to_hidden_dims'] = [par['n_recurrent'], par['n_recurrent']]
# Initialize input weights
par['w_in0'] = initialize([par['n_recurrent'], par['num_motion_dirs']])
par['w_rnn0'] = initialize([par['n_recurrent'], par['n_recurrent']])
#par['w_rnn0'] = 0.3*np.eye(par['n_recurrent'], dtype = np.float32)
par['b_rnn0'] = np.zeros((par['n_recurrent'], 1), dtype=np.float32)
par['w_out0_0'] = initialize([par['n_hidden'][0], par['n_recurrent']])
par['b_out0_0'] = np.zeros((par['n_hidden'][0], 1), dtype=np.float32)
"""
par['w_out1_0'] = initialize([par['n_hidden'][1], par['n_hidden'][0]])
par['w_out2_0'] = initialize([par['n_output'], par['n_hidden'][1]])
par['b_out1_0'] = np.zeros((par['n_hidden'][1], 1), dtype=np.float32)
par['b_out2_0'] = np.zeros((par['n_output'], 1), dtype=np.float32)
"""
par['w_out1_0'] = initialize([par['n_output'], par['n_hidden'][0]])
par['b_out1_0'] = np.zeros((par['n_output'], 1), dtype=np.float32)
def initialize(dims):
#w = np.random.gamma(shape=0.25, scale=1.0, size=dims)
w = np.random.uniform(-0.05,0.05, size=dims)
return np.float32(w)
update_dependencies()
print("--> Parameters successfully loaded.\n")
| [
"numpy.random.uniform",
"scipy.io.loadmat",
"numpy.float32",
"numpy.zeros",
"numpy.ones",
"numpy.mean",
"numpy.random.permutation",
"numpy.nanmean"
] | [((1762, 1804), 'scipy.io.loadmat', 'sio.loadmat', (["(data_dir + 'spike_trains.mat')"], {}), "(data_dir + 'spike_trains.mat')\n", (1773, 1804), True, 'import scipy.io as sio\n'), ((3449, 3500), 'numpy.zeros', 'np.zeros', (["(par['n_recurrent'], 1)"], {'dtype': 'np.float32'}), "((par['n_recurrent'], 1), dtype=np.float32)\n", (3457, 3500), True, 'import numpy as np\n'), ((3599, 3650), 'numpy.zeros', 'np.zeros', (["(par['n_hidden'][0], 1)"], {'dtype': 'np.float32'}), "((par['n_hidden'][0], 1), dtype=np.float32)\n", (3607, 3650), True, 'import numpy as np\n'), ((4054, 4102), 'numpy.zeros', 'np.zeros', (["(par['n_output'], 1)"], {'dtype': 'np.float32'}), "((par['n_output'], 1), dtype=np.float32)\n", (4062, 4102), True, 'import numpy as np\n'), ((4194, 4235), 'numpy.random.uniform', 'np.random.uniform', (['(-0.05)', '(0.05)'], {'size': 'dims'}), '(-0.05, 0.05, size=dims)\n', (4211, 4235), True, 'import numpy as np\n'), ((4246, 4259), 'numpy.float32', 'np.float32', (['w'], {}), '(w)\n', (4256, 4259), True, 'import numpy as np\n'), ((2242, 2270), 'numpy.random.permutation', 'np.random.permutation', (['(N * 2)'], {}), '(N * 2)\n', (2263, 2270), True, 'import numpy as np\n'), ((2949, 3021), 'numpy.ones', 'np.ones', (["(par['n_recurrent'], par['batch_train_size'])"], {'dtype': 'np.float32'}), "((par['n_recurrent'], par['batch_train_size']), dtype=np.float32)\n", (2956, 3021), True, 'import numpy as np\n'), ((1835, 1874), 'numpy.nanmean', 'np.nanmean', (["data['spike_train']"], {'axis': '(3)'}), "(data['spike_train'], axis=3)\n", (1845, 1874), True, 'import numpy as np\n'), ((2045, 2085), 'numpy.mean', 'np.mean', (["data['spike_train'][:, :, :, i]"], {}), "(data['spike_train'][:, :, :, i])\n", (2052, 2085), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding:utf-8
import numpy as np
import rospy
from common import functions
class RobotKeeper(object):
def __init__(self, kick):
# type: (robot_kick.RobotKick) -> None
self.robot_id = kick.pid.robot_id
self.ctrld_robot = kick.pid.ctrld_robot
self.friend = kick.pid.friend
self.enemy = kick.pid.enemy
self.ball_params = kick.ball_params
self.kick = kick
self.team_side = "left" # CONSAIが変換してくれることが発覚したため常にleft
self.goal_right = [6, -0.620]
self.goal_left = [6, 0.620]
self.r_offset = self.ctrld_robot.size_r
self.objects = kick.pid.objects
self._ball_in_friend_penalty_start_time = rospy.Time.now()
def calc_keeper_position(self, defense_point_x, defense_point_y):
if self.team_side == "right":
a1 = -self.ball_params.get_current_position()[1] + defense_point_y
b1 = defense_point_x - self.ball_params.get_current_position()[0]
c1 = self.ball_params.get_current_position()[0] * -a1 -self.ball_params.get_current_position()[1] * -b1
a2 = b1
b2 = -a1
if self.ball_params.get_current_position()[1] < 0.:
c2 =self.goal_right[0] * -a2 - self.goal_right[1] * -b2
else:
c2 =self.goal_left[0] * -a2 - self.goal_left[1] * -b2
if np.sqrt((self.ball_params.get_current_position()[0] - defense_point_x)**2 + (-self.ball_params.get_current_position()[1] + defense_point_y)**2) != 0:
t = self.r_offset / np.sqrt((self.ball_params.get_current_position()[0] - defense_point_x)**2 + (-self.ball_params.get_current_position()[1] + defense_point_y)**2)
else:
return 0, 0, 0
keeper_position_x = (b1 * c2 - b2 * c1) / (a1 * b2 - a2 * b1) + (self.ball_params.get_current_position()[0] - defense_point_x) * t
keeper_position_y = -((a1 * c2 - a2 * c1) / (a2 * b1 - a1 * b2) + (-self.ball_params.get_current_position()[1] + defense_point_y) * t)
else:
a1 = self.ball_params.get_current_position()[1] - defense_point_y
b1 = -defense_point_x + self.ball_params.get_current_position()[0]
c1 = -self.ball_params.get_current_position()[0] * -a1 + self.ball_params.get_current_position()[1] * -b1
a2 = b1
b2 = -a1
if self.ball_params.get_current_position()[1] > 0.:
c2 =self.goal_right[0] * -a2 - self.goal_right[1] * -b2
else:
c2 =self.goal_left[0] * -a2 - self.goal_left[1] * -b2
if np.sqrt((-self.ball_params.get_current_position()[0] + defense_point_x)**2 + (self.ball_params.get_current_position()[1] - defense_point_y)**2) != 0:
t = self.r_offset / np.sqrt((-self.ball_params.get_current_position()[0] + defense_point_x)**2 + (self.ball_params.get_current_position()[1] - defense_point_y)**2)
else:
return 0, 0, 0
keeper_position_x = -((b1 * c2 - b2 * c1) / (a1 * b2 - a2 * b1) + (-self.ball_params.get_current_position()[0] + defense_point_x) * t)
keeper_position_y = (a1 * c2 - a2 * c1) / (a2 * b1 - a1 * b2) + (self.ball_params.get_current_position()[1] - defense_point_y) * t
keeper_position_theta = np.arctan2((self.ball_params.get_current_position()[1] - self.ctrld_robot.get_current_position()[1]) , (self.ball_params.get_current_position()[0] - self.ctrld_robot.get_current_position()[0]))
return keeper_position_x, keeper_position_y, keeper_position_theta
def calc_line(self, team_side, y):
if team_side == "right":
left_line = ((self.ball_params.get_current_position()[0] - self.goal_left[0]) / (-self.ball_params.get_current_position()[1] + self.goal_left[1]) * (-y + self.goal_left[1]) + self.goal_left[0])
right_line = ((self.ball_params.get_current_position()[0] - self.goal_right[0]) / (-self.ball_params.get_current_position()[1] + self.goal_right[1]) * (-y + self.goal_right[1]) + self.goal_right[0])
if team_side == "left":
left_line = ((self.ball_params.get_current_position()[0] + self.goal_left[0]) / (-self.ball_params.get_current_position()[1] - self.goal_left[1]) * (-y - self.goal_left[1]) - self.goal_left[0])
right_line = ((self.ball_params.get_current_position()[0] + self.goal_right[0]) / (-self.ball_params.get_current_position()[1] - self.goal_right[1]) * (-y - self.goal_right[1]) - self.goal_right[0])
return right_line, left_line
def detect_obstacle(self):
friend_obstacle = []
enemy_obstacle = []
if self.team_side == "right":
if self.goal_right[1] < self.ball_params.get_current_position()[1] < self.goal_left[1]:
for i in self.objects.get_active_robot_ids():
if i != self.robot_id:
right_line, left_line = self.calc_line(self.team_side, self.objects.get_robot_by_id(i).get_current_position()[1])
if self.objects.get_robot_by_id(i).get_current_position()[0] > right_line \
and self.objects.get_robot_by_id(i).get_current_position()[0] > left_line:
friend_obstacle.append(i)
for i in self.objects.get_active_enemy_ids():
right_line, left_line = self.calc_line(self.team_side, self.objects.get_enemy_by_id(i).get_current_position()[1])
if self.objects.get_enemy_by_id(i).get_current_position()[0] > right_line \
and self.objects.get_enemy_by_id(i).get_current_position()[0] > left_line:
enemy_obstacle.append(i)
if self.ball_params.get_current_position()[1] < self.goal_right[1]:
for i in self.objects.get_active_robot_ids():
if i != self.robot_id:
right_line, left_line = self.calc_line(self.team_side, self.objects.get_robot_by_id(i).get_current_position()[1])
if self.objects.get_robot_by_id(i).get_current_position()[0] < right_line \
and self.objects.get_robot_by_id(i).get_current_position()[0] > left_line:
friend_obstacle.append(i)
for i in self.objects.get_active_enemy_ids():
right_line, left_line = self.calc_line(self.team_side, self.objects.get_enemy_by_id(i).get_current_position()[1])
if self.objects.get_enemy_by_id(i).get_current_position()[0] < right_line \
and self.objects.get_enemy_by_id(i).get_current_position()[0] > left_line:
enemy_obstacle.append(i)
if self.goal_left[1] < self.ball_params.get_current_position()[1]:
for i in self.objects.get_active_robot_ids():
if i != self.robot_id:
right_line, left_line = self.calc_line(self.team_side, self.objects.get_robot_by_id(i).get_current_position()[1])
if self.objects.get_robot_by_id(i).get_current_position()[0] > right_line \
and self.objects.get_robot_by_id(i).get_current_position()[0] < left_line:
friend_obstacle.append(i)
for i in self.objects.get_active_enemy_ids():
right_line, left_line = self.calc_line(self.team_side, self.objects.get_enemy_by_id(i).get_current_position()[1])
if self.objects.get_enemy_by_id(i).get_current_position()[0] > right_line \
and self.objects.get_enemy_by_id(i).get_current_position()[0] < left_line:
enemy_obstacle.append(i)
else:
if self.goal_right[1] < self.ball_params.get_current_position()[1] < self.goal_left[1]:
for i in self.objects.get_active_robot_ids():
if i != self.robot_id:
right_line, left_line = self.calc_line(self.team_side, self.objects.get_robot_by_id(i).get_current_position()[1])
if self.objects.get_robot_by_id(i).get_current_position()[0] < right_line \
and self.objects.get_robot_by_id(i).get_current_position()[0] < left_line:
friend_obstacle.append(i)
for i in self.objects.get_active_enemy_ids():
right_line, left_line = self.calc_line(self.team_side, self.objects.get_enemy_by_id(i).get_current_position()[1])
if self.objects.get_enemy_by_id(i).get_current_position()[0] < right_line \
and self.objects.get_enemy_by_id(i).get_current_position()[0] < left_line:
enemy_obstacle.append(i)
if self.ball_params.get_current_position()[1] < self.goal_right[1]:
for i in self.objects.get_active_robot_ids():
if i != self.robot_id:
right_line, left_line = self.calc_line(self.team_side, self.objects.get_robot_by_id(i).get_current_position()[1])
if self.objects.get_robot_by_id(i).get_current_position()[0] < right_line \
and self.objects.get_robot_by_id(i).get_current_position()[0] > left_line:
friend_obstacle.append(i)
for i in self.objects.get_active_enemy_ids():
right_line, left_line = self.calc_line(self.team_side, self.objects.get_enemy_by_id(i).get_current_position()[1])
if self.objects.get_enemy_by_id(i).get_current_position()[0] < right_line \
and self.objects.get_enemy_by_id(i).get_current_position()[0] > left_line:
enemy_obstacle.append(i)
if self.goal_left[1] < self.ball_params.get_current_position()[1]:
for i in self.objects.get_active_robot_ids():
if i != self.robot_id:
right_line, left_line = self.calc_line(self.team_side, self.objects.get_robot_by_id(i).get_current_position()[1])
if self.objects.get_robot_by_id(i).get_current_position()[0] > right_line \
and self.objects.get_robot_by_id(i).get_current_position()[0] < left_line:
friend_obstacle.append(i)
for i in self.objects.get_active_enemy_ids():
right_line, left_line = self.calc_line(self.team_side, self.objects.get_enemy_by_id(i).get_current_position()[1])
if self.objects.get_enemy_by_id(i).get_current_position()[0] > right_line \
and self.objects.get_enemy_by_id(i).get_current_position()[0] < left_line:
enemy_obstacle.append(i)
return friend_obstacle, enemy_obstacle
def culc_sheltered_area(self, obstacle):
if self.team_side == "right":
ball_x_obst_coord = self.ball_params.get_current_position()[0] - obstacle[0]
ball_y_obst_coord = -self.ball_params.get_current_position()[1] + obstacle[1]
a = 1. + (ball_x_obst_coord**2. / ball_y_obst_coord**2.)
b = -(2. * self.ctrld_robot.size_r**2. * ball_x_obst_coord / ball_y_obst_coord**2)
c = (self.ctrld_robot.size_r**4. / ball_y_obst_coord**2.) - self.ctrld_robot.size_r**2
if (b**2. - 4. * a * c <= 0.) or (2. * a == 0.):
return 0, 0
obst_contact_x1 = (-b - np.sqrt(b**2. - 4. * a * c)) / (2. * a)
obst_contact_y1 = 1. / ball_y_obst_coord * (-ball_x_obst_coord * obst_contact_x1 + self.ctrld_robot.size_r**2.)
obst_contact_x2 = (-b + np.sqrt(b**2. - 4. * a * c)) / (2. * a)
obst_contact_y2 = 1. / ball_y_obst_coord * (-ball_x_obst_coord * obst_contact_x2 + self.ctrld_robot.size_r**2.)
goal_center_x = self.goal_left[0]
y_goal_l1 = 1. / obst_contact_y1 * (-obst_contact_x1 * goal_center_x + obst_contact_x1 * obstacle[0] + obst_contact_y1 * -obstacle[1] + self.ctrld_robot.size_r**2.)
y_goal_l2 = 1. / obst_contact_y2 * (-obst_contact_x2 * goal_center_x + obst_contact_x2 * obstacle[0] + obst_contact_y2 * -obstacle[1] + self.ctrld_robot.size_r**2.)
if -y_goal_l1 <= -y_goal_l2:
shel_r = -y_goal_l1
shel_l = -y_goal_l2
else:
shel_r = -y_goal_l2
shel_l = -y_goal_l1
else:
ball_x_obst_coord = -self.ball_params.get_current_position()[0] + obstacle[0]
ball_y_obst_coord = self.ball_params.get_current_position()[1] - obstacle[1]
a = 1. + (ball_x_obst_coord**2. / ball_y_obst_coord**2.)
b = -(2. * self.ctrld_robot.size_r**2. * ball_x_obst_coord / ball_y_obst_coord**2)
c = (self.ctrld_robot.size_r**4. / ball_y_obst_coord**2.) - self.ctrld_robot.size_r**2
if (b**2. - 4. * a * c <= 0.) or (2. * a == 0.):
return 0, 0
obst_contact_x1 = (-b - np.sqrt(b**2. - 4. * a * c)) / (2. * a)
obst_contact_y1 = 1. / ball_y_obst_coord * (-ball_x_obst_coord * obst_contact_x1 + self.ctrld_robot.size_r**2.)
obst_contact_x2 = (-b + np.sqrt(b**2. - 4. * a * c)) / (2. * a)
obst_contact_y2 = 1. / ball_y_obst_coord * (-ball_x_obst_coord * obst_contact_x2 + self.ctrld_robot.size_r**2.)
goal_center_x = self.goal_left[0]
y_goal_l1 = 1. / obst_contact_y1 * (-obst_contact_x1 * goal_center_x + obst_contact_x1 * -obstacle[0] + obst_contact_y1 * obstacle[1] + self.ctrld_robot.size_r**2.)
y_goal_l2 = 1. / obst_contact_y2 * (-obst_contact_x2 * goal_center_x + obst_contact_x2 * -obstacle[0] + obst_contact_y2 * obstacle[1] + self.ctrld_robot.size_r**2.)
if y_goal_l1 <= y_goal_l2:
shel_r = y_goal_l1
shel_l = y_goal_l2
else:
shel_r = y_goal_l2
shel_l = y_goal_l1
return shel_r, shel_l
def sort_sheltered_area(self, shel_r, shel_l):
if self.team_side == "right":
shel_area = sorted(dict(zip(shel_l, shel_r)).items(), key=lambda x:-x[0])
else:
shel_area = sorted(dict(zip(shel_l, shel_r)).items(), key=lambda x:x[0])
sort_area = []
while(len(shel_area) != 0):
comb_shel, shel_area = self.combine_sheltered_area(shel_area, shel_area[0])
sort_area.append(comb_shel)
return sort_area
def combine_sheltered_area(self, shel_area, shel):
if self.team_side == "right":
comb_shel = [shel[0], shel[1]]
for i in reversed(range(len(shel_area))):
if shel[1] <= shel_area[i][1]:
del shel_area[i]
shel_r_min = shel[1]
for i in range(len(shel_area)):
if shel[0] > shel_area[i][0] > shel[1]:
comb_shel[0] = shel[0]
if shel_area[i][1] < shel_r_min:
shel_r_min = shel_area[i][1]
if shel_r_min != shel[1]:
comb_shel[1] = shel_r_min
comb_shel, shel_area = self.combine_sheltered_area(shel_area, comb_shel)
else:
comb_shel = [shel[0], shel[1]]
for i in reversed(range(len(shel_area))):
if shel[1] >= shel_area[i][1]:
del shel_area[i]
shel_r_min = shel[1]
for i in range(len(shel_area)):
if shel[0] < shel_area[i][0] < shel[1]:
comb_shel[0] = shel[0]
if shel_area[i][1] > shel_r_min:
shel_r_min = shel_area[i][1]
if shel_r_min != shel[1]:
comb_shel[1] = shel_r_min
comb_shel, shel_area = self.combine_sheltered_area(shel_area, comb_shel)
return comb_shel, shel_area
def culc_defense_point(self, shel):
defense_area_r = []
defense_area_l = []
defense_point_x = 0
defense_point_y = 0
if self.team_side == "right":
if shel == [[]]:
return self.goal_left[0], 0
defense_area_array = np.append(np.ravel(shel), [self.goal_left[1], self.goal_right[1]])
defense_area_array.sort()
defense_area_array = defense_area_array[::-1]
defense_area = []
for i in range(len(defense_area_array) - 1):
for j in range(len(shel)):
if (not(shel[j][0] >= defense_area_array[i] >= shel[j][1]) \
or not(shel[j][0] >= defense_area_array[i + 1] >= shel[j][1])) \
and (self.goal_left[1] >= defense_area_array[i] >= self.goal_right[1]) \
and (self.goal_left[1] >= defense_area_array[i + 1] >= self.goal_right[1]):
defense_area.append([defense_area_array[i], defense_area_array[i+1]])
defense_area_max = 0
for i in range(len(defense_area)):
if defense_area[i][0] - defense_area[i][1] > defense_area_max:
defense_area_max = defense_area[i][0] - defense_area[i][1]
defense_point_y = (defense_area[i][0] + defense_area[i][1]) / 2.
defense_point_x = self.goal_left[0]
else:
if shel == [[]]:
return -self.goal_left[0], 0
defense_area_array = np.append(np.ravel(shel), [-self.goal_left[1], -self.goal_right[1]])
defense_area_array.sort()
defense_area_array = defense_area_array[::-1]
defense_area = []
for i in range(len(defense_area_array) - 1):
for j in range(len(shel)):
if (not(shel[j][0] >= defense_area_array[i] >= shel[j][1]) \
or not(shel[j][0] >= defense_area_array[i + 1] >= shel[j][1])) \
and (self.goal_left[1] >= defense_area_array[i] >= self.goal_right[1]) \
and (self.goal_left[1] >= defense_area_array[i + 1] >= self.goal_right[1]):
defense_area.append([defense_area_array[i], defense_area_array[i+1]])
defense_area_max = 0
for i in range(len(defense_area)):
if defense_area[i][0] - defense_area[i][1] > defense_area_max:
defense_area_max = defense_area[i][0] - defense_area[i][1]
defense_point_y = (defense_area[i][0] + defense_area[i][1]) / 2.
defense_point_x = -self.goal_left[0]
return defense_point_x, defense_point_y
def keeper(self):
if functions.in_penalty_area(self.ball_params.get_current_position()) == "friend":
if (rospy.Time.now() - self._ball_in_friend_penalty_start_time).to_sec() > 3.0:
self.kick.pass_ball(self.ctrld_robot.get_pass_target_position()[0],
self.ctrld_robot.get_pass_target_position()[1],
is_tip_kick=True,
ignore_penalty_area=True)
return
else:
self._ball_in_friend_penalty_start_time = rospy.Time.now()
shel_r = []
shel_l = []
if self.team_side == "right":
defense_point_x = self.goal_left[0]
else:
defense_point_x = -self.goal_left[0]
defense_point_y = 0
#壁検知
friend_obstacle, enemy_obstacle = self.detect_obstacle()
#壁による守備不要域取得
if friend_obstacle != [] or enemy_obstacle != []:
for i in friend_obstacle:
shel_r.append(self.culc_sheltered_area(self.objects.get_robot_by_id(i).get_current_position())[0])
shel_l.append(self.culc_sheltered_area(self.objects.get_robot_by_id(i).get_current_position())[1])
"""
for i in enemy_obstacle:
shel_r.append(self.culc_sheltered_area(self.enemy[i].get_current_position())[0])
shel_l.append(self.culc_sheltered_area(self.enemy[i].get_current_position())[1])
"""
shel = self.sort_sheltered_area(shel_r, shel_l)
#キーパー守備位置
defense_point_x, defense_point_y = self.culc_defense_point(shel)
x, y, theta = self.calc_keeper_position(defense_point_x, defense_point_y)
self.kick.receive_ball_keeper((x, y))
| [
"numpy.sqrt",
"rospy.Time.now",
"numpy.ravel"
] | [((715, 731), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (729, 731), False, 'import rospy\n'), ((19366, 19382), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (19380, 19382), False, 'import rospy\n'), ((16366, 16380), 'numpy.ravel', 'np.ravel', (['shel'], {}), '(shel)\n', (16374, 16380), True, 'import numpy as np\n'), ((17614, 17628), 'numpy.ravel', 'np.ravel', (['shel'], {}), '(shel)\n', (17622, 17628), True, 'import numpy as np\n'), ((11607, 11638), 'numpy.sqrt', 'np.sqrt', (['(b ** 2.0 - 4.0 * a * c)'], {}), '(b ** 2.0 - 4.0 * a * c)\n', (11614, 11638), True, 'import numpy as np\n'), ((11807, 11838), 'numpy.sqrt', 'np.sqrt', (['(b ** 2.0 - 4.0 * a * c)'], {}), '(b ** 2.0 - 4.0 * a * c)\n', (11814, 11838), True, 'import numpy as np\n'), ((13161, 13192), 'numpy.sqrt', 'np.sqrt', (['(b ** 2.0 - 4.0 * a * c)'], {}), '(b ** 2.0 - 4.0 * a * c)\n', (13168, 13192), True, 'import numpy as np\n'), ((13361, 13392), 'numpy.sqrt', 'np.sqrt', (['(b ** 2.0 - 4.0 * a * c)'], {}), '(b ** 2.0 - 4.0 * a * c)\n', (13368, 13392), True, 'import numpy as np\n'), ((18915, 18931), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (18929, 18931), False, 'import rospy\n')] |
import networkx as nx
import argparse
import numpy as np
from measures import F1
import sys
sys.setrecursionlimit(5500000)
def dfs_visit_recursively(g,node,nodes_color,edges_to_be_removed):
nodes_color[node] = 1
nodes_order = list(g.successors_iter(node))
nodes_order = np.random.permutation(nodes_order)
for child in nodes_order:
if nodes_color[child] == 0:
dfs_visit_recursively(g,child,nodes_color,edges_to_be_removed)
elif nodes_color[child] == 1:
edges_to_be_removed.append((node,child))
nodes_color[node] = 2
def dfs_remove_back_edges(graph_file,nodetype = int):
'''
0: white, not visited
1: grey, being visited
2: black, already visited
'''
g = nx.read_edgelist(graph_file,create_using = nx.DiGraph(),nodetype = nodetype)
nodes_color = {}
edges_to_be_removed = []
for node in g.nodes_iter():
nodes_color[node] = 0
nodes_order = list(g.nodes_iter())
nodes_order = np.random.permutation(nodes_order)
num_dfs = 0
for node in nodes_order:
if nodes_color[node] == 0:
num_dfs += 1
dfs_visit_recursively(g,node,nodes_color,edges_to_be_removed)
#print("number of nodes to start dfs: %d" % num_dfs)
#print("number of back edges: %d" % len(edges_to_be_removed))
edges_to_be_removed_file = graph_file[:len(graph_file)-6] + "_removed_by_dfs.edges"
print("edges to be removed, saved in file: %s" % edges_to_be_removed_file)
from file_io import write_pairs_to_file
write_pairs_to_file(edges_to_be_removed,edges_to_be_removed_file)
return edges_to_be_removed
def dfs_performance(graph_file,gt_edges_file):
edges_to_be_removed = dfs_remove_back_edges(graph_file)
from measures import report_performance
report_performance(gt_edges_file,edges_to_be_removed,"DFS")
if __name__ == "__main__":
'''
DFS Remove Back Edges
'''
parser = argparse.ArgumentParser()
parser.add_argument("-g","--graph_file",default= " ", help = "input graph file name (edges list)")
parser.add_argument("-t","--gt_edges_file",default = None, help = "ground truth edges file")
args = parser.parse_args()
graph_file = args.graph_file
print("graph_file %s " % graph_file)
dfs_performance(graph_file,args.gt_edges_file)
| [
"argparse.ArgumentParser",
"file_io.write_pairs_to_file",
"measures.report_performance",
"numpy.random.permutation",
"networkx.DiGraph",
"sys.setrecursionlimit"
] | [((94, 124), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(5500000)'], {}), '(5500000)\n', (115, 124), False, 'import sys\n'), ((277, 311), 'numpy.random.permutation', 'np.random.permutation', (['nodes_order'], {}), '(nodes_order)\n', (298, 311), True, 'import numpy as np\n'), ((908, 942), 'numpy.random.permutation', 'np.random.permutation', (['nodes_order'], {}), '(nodes_order)\n', (929, 942), True, 'import numpy as np\n'), ((1414, 1480), 'file_io.write_pairs_to_file', 'write_pairs_to_file', (['edges_to_be_removed', 'edges_to_be_removed_file'], {}), '(edges_to_be_removed, edges_to_be_removed_file)\n', (1433, 1480), False, 'from file_io import write_pairs_to_file\n'), ((1655, 1716), 'measures.report_performance', 'report_performance', (['gt_edges_file', 'edges_to_be_removed', '"""DFS"""'], {}), "(gt_edges_file, edges_to_be_removed, 'DFS')\n", (1673, 1716), False, 'from measures import report_performance\n'), ((1789, 1814), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1812, 1814), False, 'import argparse\n'), ((725, 737), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (735, 737), True, 'import networkx as nx\n')] |
import random as rnd
import matplotlib.pyplot as plt
import numpy as np
import math as mth
class Cards:
def __init__(self, number_of_cards):
self.number_of_cards = number_of_cards
self.deck_order = self.generate_cards()
self.shuffle_deck()
self.card_index = 0
def generate_cards(self):
return [i + 1 for i in range(self.number_of_cards)]
def shuffle_deck(self):
rnd.shuffle(self.deck_order)
def increment_card_index(self):
self.card_index += 1
if self.card_index > self.number_of_cards - 1:
self.card_index = 0
def reset_card_index(self):
self.card_index = 0
class CommunityChestCards(Cards):
def __init__(self):
super().__init__(number_of_cards=17)
self.cards_pick_up_locations = [3, 18, 34]
self.card_definitions = self.define_cards()
@staticmethod
def define_cards():
return {1: {'set': 1},
6: {'set': 11}}
class ChanceCards(Cards):
def __init__(self):
super().__init__(number_of_cards=16)
self.cards_pick_up_locations = [8, 23, 37]
self.card_definitions = self.define_cards()
@staticmethod
def define_cards():
return {1: {'set': 1},
2: {'set': 25},
3: {'set': 12},
4: {'nearest': [13, 29]},
5: {'nearest': [6, 16, 26, 36]},
8: {'move': -3},
9: {'set': 11},
12: {'set': 6},
13: {'set': 40}}
class MonopolySimulation:
def __init__(self, turns=100, games=1000000):
self.turns = turns
self.games = games
self.community_chest_cards = CommunityChestCards()
self.chance_cards = ChanceCards()
self.end_turn_fields_occurrences_array = [0] * 40
self.percentage_matrix = np.zeros((11, 11))
self.run_simulation()
self.end_turn_fields_percentages_array = self.convert_occurrences_into_percentages()
self.build_matrix_from_array()
@staticmethod
def throw_dice(sides=6):
dice1 = rnd.randint(1, sides)
dice2 = rnd.randint(1, sides)
double = dice1 == dice2
return dice1 + dice2, double
def register_end_turn_field(self, field):
self.end_turn_fields_occurrences_array[field - 1] += 1
@staticmethod
def bring_position_value_back_to_range(position):
if position < 1:
position += 40
elif position > 40:
position -= 40
return position
def change_current_position_based_on_card(self, movement_instruction, current_position):
movement_type, movement_value = list(movement_instruction.items())[0]
if movement_type == 'set':
return movement_value
elif movement_type == 'nearest':
distance_array = [self.bring_position_value_back_to_range(value - current_position) for value in movement_value]
return movement_value[distance_array.index(min(distance_array))]
elif movement_type == 'move':
return self.bring_position_value_back_to_range(current_position + movement_value)
def run_simulation(self):
for game in range(self.games):
current_position = 1
doubles = 0
for turn in range(self.turns):
going_to_jail = False
dice_value, is_double = self.throw_dice()
doubles += int(is_double)
if is_double:
doubles += 1
if doubles == 3:
doubles = 0
current_position = 11
going_to_jail = True
else:
doubles = 0
if not going_to_jail:
current_position += dice_value
current_position = self.bring_position_value_back_to_range(current_position)
if current_position == 31:
current_position = 11
else:
if current_position in self.chance_cards.cards_pick_up_locations:
movement_instruction = self.chance_cards.card_definitions.get(self.chance_cards.deck_order[self.chance_cards.card_index], None)
self.chance_cards.increment_card_index()
if movement_instruction is not None:
current_position = self.change_current_position_based_on_card(movement_instruction, current_position)
if current_position in self.community_chest_cards.cards_pick_up_locations:
movement_instruction = self.community_chest_cards.card_definitions.get(self.community_chest_cards.deck_order[self.community_chest_cards.card_index], None)
self.community_chest_cards.increment_card_index()
if movement_instruction is not None:
current_position = self.change_current_position_based_on_card(movement_instruction, current_position)
self.register_end_turn_field(current_position)
self.community_chest_cards.shuffle_deck()
self.community_chest_cards.reset_card_index()
self.chance_cards.shuffle_deck()
self.chance_cards.reset_card_index()
def convert_occurrences_into_percentages(self):
sum_of_all_end_turn_fields = sum(self.end_turn_fields_occurrences_array)
return [100 * self.end_turn_fields_occurrences_array[i] / sum_of_all_end_turn_fields for i in range(len(self.end_turn_fields_occurrences_array))]
def build_matrix_from_array(self):
self.percentage_matrix[0, 0:-1] = self.end_turn_fields_percentages_array[0: 10]
self.percentage_matrix[0:-1, -1] = self.end_turn_fields_percentages_array[10: 20]
self.percentage_matrix[-1, 1:] = list(reversed(self.end_turn_fields_percentages_array[20: 30]))
self.percentage_matrix[1:, 0] = list(reversed(self.end_turn_fields_percentages_array[30: 40]))
def transform_percentage_matrix(self):
return np.array([[mth.log(self.percentage_matrix[i, j] + 1, 10) for j in range(len(self.percentage_matrix[i]))] for i in range(len(self.percentage_matrix))])
def view_results(self):
fig, ax = plt.subplots()
ax.imshow(self.transform_percentage_matrix(), cmap='jet')
for i in range(len(self.percentage_matrix)):
for j in range(len(self.percentage_matrix)):
if i == 0 or i == len(self.percentage_matrix) - 1 or j == 0 or j == len(self.percentage_matrix) - 1:
if i != 0 or j != 0:
ax.text(j, i, f'{round(self.percentage_matrix[i, j], 2)}%', ha="center", va="center", color="k")
else:
ax.text(j, i, f'START -> \n{round(self.percentage_matrix[i, j], 2)}%', ha="center", va="center", color="k")
ax.set_title("A percentage chance for ending a turn at the particular square in the Monopoly game \n")
fig.tight_layout()
plt.axis('off')
plt.show()
| [
"matplotlib.pyplot.show",
"random.randint",
"random.shuffle",
"numpy.zeros",
"matplotlib.pyplot.axis",
"math.log",
"matplotlib.pyplot.subplots"
] | [((426, 454), 'random.shuffle', 'rnd.shuffle', (['self.deck_order'], {}), '(self.deck_order)\n', (437, 454), True, 'import random as rnd\n'), ((1868, 1886), 'numpy.zeros', 'np.zeros', (['(11, 11)'], {}), '((11, 11))\n', (1876, 1886), True, 'import numpy as np\n'), ((2113, 2134), 'random.randint', 'rnd.randint', (['(1)', 'sides'], {}), '(1, sides)\n', (2124, 2134), True, 'import random as rnd\n'), ((2151, 2172), 'random.randint', 'rnd.randint', (['(1)', 'sides'], {}), '(1, sides)\n', (2162, 2172), True, 'import random as rnd\n'), ((6389, 6403), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6401, 6403), True, 'import matplotlib.pyplot as plt\n'), ((7165, 7180), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7173, 7180), True, 'import matplotlib.pyplot as plt\n'), ((7189, 7199), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7197, 7199), True, 'import matplotlib.pyplot as plt\n'), ((6202, 6247), 'math.log', 'mth.log', (['(self.percentage_matrix[i, j] + 1)', '(10)'], {}), '(self.percentage_matrix[i, j] + 1, 10)\n', (6209, 6247), True, 'import math as mth\n')] |
"""Numerical utilities
These numerical utilities include the Clock
time-keeping class and the base solution vector
classes for both hydro and mhd.
"""
import time
from tqdm import tqdm
import numpy as np
class Clock:
"""Timing utility for simulations
This clock timer keeps track of the simulation run time
Attributes
----------
current_time : float
the current simulation time
end_time : float
the time the simulation will end
next_outout_time : float
the next time point at which output will be dumped
"""
def __init__(self, Parameters):
"""
Parameters
----------
Parameters : a Parameters object
an object of the Parameters class holding simulation configuration
"""
self.current_time = 0.0
self.end_time = Parameters.t_max
self.next_output_time = 0.0
self.output_spacing = self.end_time / Parameters.n_outputs
self.bar = tqdm(total=self.end_time + 0.01)
self.wallclock_start = time.process_time()
def is_end(self):
"""Check if simulation has reached the end time"""
if self.current_time < self.end_time:
return False
else:
self.bar.close()
return True
def tick(self, dt):
"""update the current time"""
self.bar.update(dt)
self.current_time += dt
def is_output(self):
"""check if output should be dumped at this timestep"""
if self.current_time >= self.next_output_time:
self.next_output_time += self.output_spacing
return True
else:
return False
def duration(self):
"""calculate the total duration of the simulation in seconds"""
wallclock_end = time.process_time()
return wallclock_end - self.wallclock_start
class SolutionVector:
"""Hydrodynamic solution vector field
This solution vector contains all vector field data for
all variables in the hydrodynamic problem.
Attributes
----------
data : ndarray
the raw solution data, of shape (n_variables, Nx, Ny, Nz)
boundary_type : List[str]
the boundary condition type in the x, y ,z axes
boundary_value : List[float]
the value of the boundary is using fixed boundary condition
dx, dy, dz : float
the cell widths in x, y , z directions
cfl : float
the value of the CFL condition parameter
adi_idx : float
the adiabatic index
timestep : float
the current timestep size
variable_names : List[str]
the names of the variables contained in the solution vector, in order of their vecotr position
"""
def __init__(self):
self.data = None
self.boundary_type = None
self.boundary_value = None
self.boundsetter = None
self.dx, self.dy, self.dz = None, None, None
self.adi_idx = 1.4
self.timestep = 0.0001
self.cfl = 0.1
self.variable_names = [
"density",
"xmomentum",
"ymomentum",
"zmomentum",
"energy",
]
def set_state(self, Parameters):
"""Set the initial state of the solution vector
Parameters
----------
Parameters : a Parameters object
the simulation configuration object
"""
self.boundary_type = Parameters.boundary_type
self.boundary_value = Parameters.boundary_value
self.dx, self.dy, self.dz = Parameters.cell_sizes
self.adi_idx = Parameters.adi_idx
self.set_centroid(Parameters.initial_condition)
self.cfl = Parameters.cfl
self.boundsetter = BoundarySetter(
Parameters.boundary_type, Parameters.initial_condition
)
def copy(self):
"""Return a copy of the solution vector"""
new_vector = SolutionVector()
new_vector.data = self.data
new_vector.boundary_type = self.boundary_type
new_vector.boundary_value = self.boundary_value
new_vector.dx, new_vector.dy, new_vector.dz = self.dx, self.dy, self.dz
new_vector.adi_idx = self.adi_idx
new_vector.timestep = self.timestep
new_vector.boundsetter = self.boundsetter
return new_vector
def calculate_min_max_wave_speeds_X(self):
"""Return the the minimum and maximum wave speeds in the X direction
Returns
-------
Tuple[ndarray, ndarray]
the minimum and maximum wave speeds in the x direction for each cell
"""
xvel = self.velX()
cs = self.sound_speed()
lambda1 = xvel - cs
lambda2 = xvel + cs
return np.minimum(lambda1, lambda2), np.maximum(lambda1, lambda2)
def calculate_min_max_wave_speeds_Y(self):
"""Return the the minimum and maximum wave speeds in the Y direction
Returns
-------
Tuple[ndarray, ndarray]
the minimum and maximum wave speeds in the y direction for each cell
"""
yvel = self.velY()
cs = self.sound_speed()
lambda1 = yvel - cs
lambda2 = yvel + cs
return np.minimum(lambda1, lambda2), np.maximum(lambda1, lambda2)
def calculate_timestep(self):
"""Calculate the timestep size, using the wave speeds and CFL value
Returns
-------
timestep : float
the new timestep size dt
"""
min_wave_speed_x, max_wave_speed_x = self.calculate_min_max_wave_speeds_X()
min_wave_speed_y, max_wave_speed_y = self.calculate_min_max_wave_speeds_Y()
max_in_x = max(np.abs(min_wave_speed_x).max(), np.abs(max_wave_speed_x).max())
max_in_y = max(np.abs(min_wave_speed_y).max(), np.abs(max_wave_speed_y).max())
timestep_x = self.cfl * self.dx / max_in_x
timestep_y = self.cfl * self.dy / max_in_y
self.timestep = min(timestep_x, timestep_y)
return self.timestep
def set_centroid(self, array):
"""Set the data for each cell in the mesh"""
self.data = array
def centroid(self):
"""Return the solution vector data for each mesh cell
Returns
-------
ndarray
solution vector data for all variables
"""
return self.data
def get_variable(self, variable_name):
"""Get the solution data specific to a variable
Parameters
----------
variable_name : str
the name of the variable data to return
Returns
-------
ndarray
the solution vector data for the specified variable
"""
index = self.variable_names.index(variable_name)
return self.data[index]
def shift(self, axis, direction):
"""Shift all the solution data and apply boundary conditions
This operation provides the means to access the positions of the
numerical stencil in a vectorized manner. It makes a copy of the
solution vector with the data shifted to required stencil position,
accounting for boundary conditions.
Parameters
----------
axis : int
the axis to shift (x:0, y:1, z:2)
direction : int
the direction to shift in (+1 or -1)
Returns
-------
new_vector : SolutionVector
the shifted solution vector for every cell in the mesh
"""
rolled = self.boundsetter.set_stencil(self.data, axis, direction)
new_vector = self.copy()
new_vector.set_centroid(rolled)
return new_vector
def plusX(self):
"""Shift all the solution data in the positive x direction"""
return self.shift(0, 1)
def minusX(self):
"""Shift all the solution data in the negative x direction"""
return self.shift(0, -1)
def plusY(self):
"""Shift all the solution data in the positive y direction"""
return self.shift(1, 1)
def minusY(self):
"""Shift all the solution data in the negative y direction"""
return self.shift(1, -1)
def plusZ(self):
"""Shift all the solution data in the positive z direction"""
return self.shift(2, 1)
def minusZ(self):
"""Shift all the solution data in the neagtive z direction"""
return self.shift(2, -1)
def update(self, array):
"""Update the solution vector data with an array of values
u' = u + delta t * array
Parameters
----------
array : ndarray
the array to update the solution vector
"""
self.data += self.timestep * array
def dens(self):
"""Return the density field data"""
return self.data[0]
def momX(self):
"""Return the x-momentumm field data"""
return self.data[1]
def momY(self):
"""Return the y-momentum field data"""
return self.data[2]
def momZ(self):
"""Return the z-momentum field data"""
return self.data[3]
def velX(self):
"""Return the x-velocity field data"""
return self.data[1] / self.data[0]
def velY(self):
"""Return the y-velocity field data"""
return self.data[2] / self.data[0]
def velZ(self):
"""Return the z-velocity field data"""
return self.data[3] / self.data[0]
def momTotalSqr(self):
"""Return the total momentum squared field data |M|**2"""
return self.data[1] ** 2 + self.data[2] ** 2 + self.data[3] ** 2
def energy(self):
"""Return the total energy field data"""
return self.data[4]
def pressure(self):
"""Return the thermal pressure field data"""
thermal_en = self.energy() - 0.5 * self.momTotalSqr() / self.dens()
pressure = (self.adi_idx - 1.0) * thermal_en
return pressure
def sound_speed(self):
"""Return the sound speed for every mesh cell"""
return np.sqrt(self.adi_idx * self.pressure() / self.dens())
class MHDSolutionVector(SolutionVector):
"""Magnetohydrodynamic solution vector field
This solution vector contains all vector field data for
all variables in the magnetohydrodynamic problem.
"""
def __init__(self):
super(MHDSolutionVector, self).__init__()
self.variable_names = [
"density",
"xmomentum",
"ymomentum",
"zmomentum",
"energy",
"xmag",
"ymag",
"zmag",
]
def copy(self):
"""Return a copy of the solution vector"""
new_vector = MHDSolutionVector()
new_vector.data = self.data
new_vector.boundary_type = self.boundary_type
new_vector.boundary_value = self.boundary_value
new_vector.dx, new_vector.dy, new_vector.dz = self.dx, self.dy, self.dz
new_vector.adi_idx = self.adi_idx
new_vector.timestep = self.timestep
new_vector.boundsetter = self.boundsetter
return new_vector
def magX(self):
"""Return the x-direction magnetic field data"""
return self.data[5]
def magY(self):
"""Return the y-direction magnetic field data"""
return self.data[6]
def magZ(self):
"""Return the z-direction magnetic field data"""
return self.data[7]
def magTotalSqr(self):
"""Return the total magnetic field squared data |B|**2"""
return self.data[5] ** 2 + self.data[6] ** 2 + self.data[7] ** 2
def magnetic_pressure(self):
"""Return the magnetic pressure field data"""
return self.magTotalSqr() * 0.5
def pressure(self):
"""Return the thermal pressure field data"""
thermal_en = (
self.energy()
- 0.5 * self.momTotalSqr() / self.dens()
- self.magnetic_pressure()
)
pressure = (self.adi_idx - 1.0) * thermal_en
return pressure
def total_pressure(self):
"""Return the total pressure, magnetic plus thermal"""
return self.pressure() + self.magnetic_pressure()
def alfven_speed(self):
"""Return the Alfven speed for each mesh cell"""
return np.sqrt(self.magTotalSqr() / self.dens())
def fast_magnetosonic_speed_X(self):
"""Return the fast magnetosonic speed in the x direction for each mesh cell"""
va2 = self.alfven_speed() ** 2
vs2 = self.sound_speed() ** 2
vax2 = self.magX() ** 2 / self.dens()
quad = va2 + vs2 + np.sqrt((va2 + vs2) ** 2 - 4 * vax2 * vs2)
return np.sqrt(0.5 * quad)
def fast_magnetosonic_speed_Y(self):
"""Return the fast magnetosonic speed in the y direction for each mesh cell"""
va2 = self.alfven_speed() ** 2
vs2 = self.sound_speed() ** 2
vay2 = self.magY() ** 2 / self.dens()
quad = va2 + vs2 + np.sqrt((va2 + vs2) ** 2 - 4 * vay2 * vs2)
return np.sqrt(0.5 * quad)
def calculate_min_max_wave_speeds_X(self):
"""Return the the minimum and maximum wave speeds in the X direction
Returns
-------
Tuple[ndarray, ndarray]
the minimum and maximum wave speeds in the x direction for each cell
"""
xvel = self.velX()
cf = self.fast_magnetosonic_speed_X()
lambda1 = xvel - cf
lambda2 = xvel + cf
return np.minimum(lambda1, lambda2), np.maximum(lambda1, lambda2)
def calculate_min_max_wave_speeds_Y(self):
"""Return the the minimum and maximum wave speeds in the Y direction
Returns
-------
Tuple[ndarray, ndarray]
the minimum and maximum wave speeds in the y direction for each cell
"""
yvel = self.velY()
cf = self.fast_magnetosonic_speed_Y()
lambda1 = yvel - cf
lambda2 = yvel + cf
return np.minimum(lambda1, lambda2), np.maximum(lambda1, lambda2)
class BoundarySetter:
"""Boundary value setting object
This class takes an ndarray of solution
values and changes the boundaries to the
specified boundary values.
Attributes
----------
types : List[str]
the boundary value types for the x, y ,z axes
values : List[List[float]]
the values at the boundaries if fixed
"""
def __init__(self, boundary_types, initial_boundary_values):
self.boundary_types = boundary_types
self.initial_values = initial_boundary_values
def set_stencil(self, array, axis, direction):
stencil_arm = np.roll(array, direction, axis=axis + 1)
boundary_type = self.boundary_types[axis]
shape = array.shape
if boundary_type == "periodic":
pass
elif boundary_type == "outflow":
boundary_index_set = self.get_boundary_indices(axis, direction, shape)
stencil_arm[boundary_index_set] = array[boundary_index_set]
elif boundary_type == "fixed":
boundary_index_set = self.get_boundary_indices(axis, direction, shape)
stencil_arm[boundary_index_set] = self.initial_values[boundary_index_set]
elif boundary_type == "reflective":
boundary_index_set = self.get_boundary_indices(axis, direction, shape)
velocity_boundary_indices = self.velocity_boundary_indices(axis, direction)
stencil_arm[boundary_index_set] = array[boundary_index_set]
stencil_arm[velocity_boundary_indices] = -array[velocity_boundary_indices]
return stencil_arm
def get_boundary_indices(self, axis, direction, shape):
variables_index_set = np.arange(shape[0])
x_index_set = np.arange(shape[1])
y_index_set = np.arange(shape[2])
z_index_set = np.arange(shape[3])
if axis == 0:
edge_value = 0 if direction == 1 else shape[1] - 1
x_index_set = np.array([edge_value])
elif axis == 1:
edge_value = 0 if direction == 1 else shape[2] - 1
y_index_set = np.array([edge_value])
elif axis == 2:
edge_value = 0 if direction == 1 else shape[3] - 1
z_index_set = np.array([edge_value])
return np.ix_(variables_index_set, x_index_set, y_index_set, z_index_set)
def velocity_boundary_indices(self, axis, direction, shape):
variables_index_set = np.arange(shape[0])
x_index_set = np.arange(shape[1])
y_index_set = np.arange(shape[2])
z_index_set = np.arange(shape[3])
if axis == 0:
variables_index_set = np.array([1])
edge_value = 0 if direction == 1 else shape[1] - 1
x_index_set = np.array([edge_value])
elif axis == 1:
variables_index_set = np.array([2])
edge_value = 0 if direction == 1 else shape[2] - 1
y_index_set = np.array([edge_value])
elif axis == 2:
variables_index_set = np.array([3])
edge_value = 0 if direction == 1 else shape[3] - 1
z_index_set = np.array([edge_value])
return np.ix_(variables_index_set, x_index_set, y_index_set, z_index_set)
class GravitySource:
"""Gravitational field sources
This class provides gravity source terms for the equations.
Attributes
----------
gravity_field : ndarray
the gravitational field mesh data
"""
def __init__(self, gravity_field):
self.field = gravity_field
def calculate_gravity_source(self, solvec):
"""Calculates the gravity field source terms
Parameters
----------
solvec : SolutionVector
the solution vector of the simulation
Returns
-------
gravity_source : ndarray
the gravitational source term contribution to the system update
"""
gravity_source = np.zeros(solvec.data.shape)
gravity_source[1] = solvec.dens() * self.field[0]
gravity_source[2] = solvec.dens() * self.field[1]
gravity_source[3] = solvec.dens() * self.field[2]
gravity_source[4] = (
solvec.momX() * self.field[0]
+ solvec.momY() * self.field[1]
+ solvec.momZ() * self.field[2]
)
return gravity_source
| [
"tqdm.tqdm",
"numpy.minimum",
"numpy.maximum",
"numpy.abs",
"numpy.roll",
"time.process_time",
"numpy.ix_",
"numpy.zeros",
"numpy.arange",
"numpy.array",
"numpy.sqrt"
] | [((985, 1017), 'tqdm.tqdm', 'tqdm', ([], {'total': '(self.end_time + 0.01)'}), '(total=self.end_time + 0.01)\n', (989, 1017), False, 'from tqdm import tqdm\n'), ((1049, 1068), 'time.process_time', 'time.process_time', ([], {}), '()\n', (1066, 1068), False, 'import time\n'), ((1799, 1818), 'time.process_time', 'time.process_time', ([], {}), '()\n', (1816, 1818), False, 'import time\n'), ((12642, 12661), 'numpy.sqrt', 'np.sqrt', (['(0.5 * quad)'], {}), '(0.5 * quad)\n', (12649, 12661), True, 'import numpy as np\n'), ((12999, 13018), 'numpy.sqrt', 'np.sqrt', (['(0.5 * quad)'], {}), '(0.5 * quad)\n', (13006, 13018), True, 'import numpy as np\n'), ((14606, 14646), 'numpy.roll', 'np.roll', (['array', 'direction'], {'axis': '(axis + 1)'}), '(array, direction, axis=axis + 1)\n', (14613, 14646), True, 'import numpy as np\n'), ((15680, 15699), 'numpy.arange', 'np.arange', (['shape[0]'], {}), '(shape[0])\n', (15689, 15699), True, 'import numpy as np\n'), ((15722, 15741), 'numpy.arange', 'np.arange', (['shape[1]'], {}), '(shape[1])\n', (15731, 15741), True, 'import numpy as np\n'), ((15764, 15783), 'numpy.arange', 'np.arange', (['shape[2]'], {}), '(shape[2])\n', (15773, 15783), True, 'import numpy as np\n'), ((15806, 15825), 'numpy.arange', 'np.arange', (['shape[3]'], {}), '(shape[3])\n', (15815, 15825), True, 'import numpy as np\n'), ((16249, 16315), 'numpy.ix_', 'np.ix_', (['variables_index_set', 'x_index_set', 'y_index_set', 'z_index_set'], {}), '(variables_index_set, x_index_set, y_index_set, z_index_set)\n', (16255, 16315), True, 'import numpy as np\n'), ((16412, 16431), 'numpy.arange', 'np.arange', (['shape[0]'], {}), '(shape[0])\n', (16421, 16431), True, 'import numpy as np\n'), ((16454, 16473), 'numpy.arange', 'np.arange', (['shape[1]'], {}), '(shape[1])\n', (16463, 16473), True, 'import numpy as np\n'), ((16496, 16515), 'numpy.arange', 'np.arange', (['shape[2]'], {}), '(shape[2])\n', (16505, 16515), True, 'import numpy as np\n'), ((16538, 16557), 'numpy.arange', 'np.arange', (['shape[3]'], {}), '(shape[3])\n', (16547, 16557), True, 'import numpy as np\n'), ((17125, 17191), 'numpy.ix_', 'np.ix_', (['variables_index_set', 'x_index_set', 'y_index_set', 'z_index_set'], {}), '(variables_index_set, x_index_set, y_index_set, z_index_set)\n', (17131, 17191), True, 'import numpy as np\n'), ((17901, 17928), 'numpy.zeros', 'np.zeros', (['solvec.data.shape'], {}), '(solvec.data.shape)\n', (17909, 17928), True, 'import numpy as np\n'), ((4740, 4768), 'numpy.minimum', 'np.minimum', (['lambda1', 'lambda2'], {}), '(lambda1, lambda2)\n', (4750, 4768), True, 'import numpy as np\n'), ((4770, 4798), 'numpy.maximum', 'np.maximum', (['lambda1', 'lambda2'], {}), '(lambda1, lambda2)\n', (4780, 4798), True, 'import numpy as np\n'), ((5212, 5240), 'numpy.minimum', 'np.minimum', (['lambda1', 'lambda2'], {}), '(lambda1, lambda2)\n', (5222, 5240), True, 'import numpy as np\n'), ((5242, 5270), 'numpy.maximum', 'np.maximum', (['lambda1', 'lambda2'], {}), '(lambda1, lambda2)\n', (5252, 5270), True, 'import numpy as np\n'), ((12584, 12626), 'numpy.sqrt', 'np.sqrt', (['((va2 + vs2) ** 2 - 4 * vax2 * vs2)'], {}), '((va2 + vs2) ** 2 - 4 * vax2 * vs2)\n', (12591, 12626), True, 'import numpy as np\n'), ((12941, 12983), 'numpy.sqrt', 'np.sqrt', (['((va2 + vs2) ** 2 - 4 * vay2 * vs2)'], {}), '((va2 + vs2) ** 2 - 4 * vay2 * vs2)\n', (12948, 12983), True, 'import numpy as np\n'), ((13447, 13475), 'numpy.minimum', 'np.minimum', (['lambda1', 'lambda2'], {}), '(lambda1, lambda2)\n', (13457, 13475), True, 'import numpy as np\n'), ((13477, 13505), 'numpy.maximum', 'np.maximum', (['lambda1', 'lambda2'], {}), '(lambda1, lambda2)\n', (13487, 13505), True, 'import numpy as np\n'), ((13934, 13962), 'numpy.minimum', 'np.minimum', (['lambda1', 'lambda2'], {}), '(lambda1, lambda2)\n', (13944, 13962), True, 'import numpy as np\n'), ((13964, 13992), 'numpy.maximum', 'np.maximum', (['lambda1', 'lambda2'], {}), '(lambda1, lambda2)\n', (13974, 13992), True, 'import numpy as np\n'), ((15938, 15960), 'numpy.array', 'np.array', (['[edge_value]'], {}), '([edge_value])\n', (15946, 15960), True, 'import numpy as np\n'), ((16615, 16628), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (16623, 16628), True, 'import numpy as np\n'), ((16718, 16740), 'numpy.array', 'np.array', (['[edge_value]'], {}), '([edge_value])\n', (16726, 16740), True, 'import numpy as np\n'), ((16074, 16096), 'numpy.array', 'np.array', (['[edge_value]'], {}), '([edge_value])\n', (16082, 16096), True, 'import numpy as np\n'), ((16799, 16812), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (16807, 16812), True, 'import numpy as np\n'), ((16902, 16924), 'numpy.array', 'np.array', (['[edge_value]'], {}), '([edge_value])\n', (16910, 16924), True, 'import numpy as np\n'), ((5680, 5704), 'numpy.abs', 'np.abs', (['min_wave_speed_x'], {}), '(min_wave_speed_x)\n', (5686, 5704), True, 'import numpy as np\n'), ((5712, 5736), 'numpy.abs', 'np.abs', (['max_wave_speed_x'], {}), '(max_wave_speed_x)\n', (5718, 5736), True, 'import numpy as np\n'), ((5767, 5791), 'numpy.abs', 'np.abs', (['min_wave_speed_y'], {}), '(min_wave_speed_y)\n', (5773, 5791), True, 'import numpy as np\n'), ((5799, 5823), 'numpy.abs', 'np.abs', (['max_wave_speed_y'], {}), '(max_wave_speed_y)\n', (5805, 5823), True, 'import numpy as np\n'), ((16210, 16232), 'numpy.array', 'np.array', (['[edge_value]'], {}), '([edge_value])\n', (16218, 16232), True, 'import numpy as np\n'), ((16983, 16996), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (16991, 16996), True, 'import numpy as np\n'), ((17086, 17108), 'numpy.array', 'np.array', (['[edge_value]'], {}), '([edge_value])\n', (17094, 17108), True, 'import numpy as np\n')] |
from __future__ import division
import wx
import os
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.figure import Figure
from matplotlib import ticker as mticker
from matplotlib import transforms as mtransforms
import numpy as n
from time import sleep, time
import threading
from collections import deque
from .util import get_next_filename
class ScrollingLocator(mticker.MaxNLocator):
def view_limits(self, vmin, vmax):
""" Leave unchanged, for smooth scrolling """
return mtransforms.nonsingular(vmin, vmax)
def silly_gen(inc=0.1):
"""
a silly generator function producing 'data'
(really just a sine wave plus noise)
"""
pt = 0.
while True:
sleep(0.1)
rl = n.sin(2 * n.pi * pt) + n.sin(2 * n.pi * pt / 50.)
rl += 0.1* n.random.randn()
yield pt, rl
pt += inc
# threading code mostly stolen from http://wiki.wxpython.org/LongRunningTasks
# Define notification events
import wx.lib.newevent
# for new data available
ResultEvent, EVT_RESULT = wx.lib.newevent.NewEvent()
# for scan being finished
FinishedEvent, EVT_FINISHED = wx.lib.newevent.NewEvent()
# Thread class that executes processing
class WorkerThread(threading.Thread):
"""Worker Thread Class."""
def __init__(self, notify_window, gen):
"""Init Worker Thread Class."""
threading.Thread.__init__(self)
self.gen = gen
self._notify_window = notify_window
self._want_abort = 0
def run(self):
"""Run Worker Thread."""
# This is the code executing in the new thread.
start = time()
# peek at the abort variable once in a while to see if we should stop
for data in self.gen:
if self._want_abort:
break
# Send data to the parent thread
wx.PostEvent(self._notify_window, ResultEvent(data=data))
# Signal that we are all done
elapsed = time() - start
wx.PostEvent(self._notify_window, FinishedEvent(elapsed=elapsed))
def abort(self):
"""abort worker thread."""
# Method for use by main thread to signal an abort
self._want_abort = 1
class MonitorWindow(wx.Frame):
def __init__(self, parent, title, datagen=None):
wx.Frame.__init__(self, parent, title=title, size=(500,400))
if datagen is None:
datagen = silly_gen()
self.statusbar = self.CreateStatusBar()
self.panel = MonitorPanel(self, datagen)
self.save_to_dir = os.path.expanduser('~')
filemenu = wx.Menu()
menuExit = filemenu.Append(wx.ID_EXIT, "E&xit", " Stop program")
menuSave = filemenu.Append(wx.ID_SAVE, "&Save", " Save a file")
menuBar = wx.MenuBar()
menuBar.Append(filemenu, "&File")
self.SetMenuBar(menuBar)
self.Bind(wx.EVT_MENU, self.OnExit, menuExit)
self.Bind(wx.EVT_MENU, self.OnSave, menuSave)
# shortcuts
shortcuts = wx.AcceleratorTable([
(wx.ACCEL_CTRL, ord('S'), wx.ID_SAVE),
])
self.SetAcceleratorTable(shortcuts)
def OnExit(self, e):
# Runs when the 'Exit' menu option is selected,
# but not when the x is hit
self.Close(True)
def OnSave(self, e):
xy = self.panel.get_data()
dlg = wx.FileDialog(self,
message="Save NPY file",
defaultDir=self.save_to_dir,
defaultFile=get_next_filename(self.save_to_dir,
fmt="monitor%03d.npy"),
wildcard="NPY files (*.npy)|*.npy",
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
self.save_to_dir = dlg.GetDirectory()
path = dlg.GetPath()
n.save(path, xy)
self.statusbar.SetStatusText('Saved %s' % path)
class MonitorPanel(wx.Panel):
"""
Panel encompassing a line monitor graph.
Pulls x,y data from the provided generator `datagen`
and plots a live 2D graph.
"""
def __init__(self, parent, datagen):
wx.Panel.__init__(self, parent)
self.datagen = datagen
fig = Figure()
self.ax = fig.add_subplot(111)
self.ax.xaxis.set_major_locator(ScrollingLocator())
# maintain x and y lists (we'll append to these as we go)
self.setup_deques([], [])
self.line, = self.ax.plot(self.x, self.y)
self.canvas = FigureCanvasWxAgg(self, -1, fig)
self.canvas.draw()
self.checkbox = wx.CheckBox(self, label="Limit to")
self.Bind(wx.EVT_CHECKBOX, self.impose_limit)
self.spinbox = wx.SpinCtrlDouble(self,
value='100', min=10, max=10000, inc=10)
self.Bind(wx.EVT_SPINCTRLDOUBLE, self.impose_limit)
self.clear_button = wx.Button(self, label="Clear")
self.clear_button.Bind(wx.EVT_BUTTON, self.on_clear_button)
self.start_button = wx.Button(self, label="Start")
self.start_button.Bind(wx.EVT_BUTTON, self.on_start_button)
self.subsizer = wx.BoxSizer(wx.HORIZONTAL)
self.subsizer.Add(self.checkbox, 0, wx.EXPAND)
self.subsizer.Add(self.spinbox, 1, wx.EXPAND)
self.subsizer.Add(wx.StaticText(self, label='data points'),
0, wx.ALIGN_CENTER)
self.subsizer.Add(self.clear_button, 0, wx.EXPAND)
self.subsizer.Add(self.start_button, 0, wx.EXPAND)
self.box = wx.StaticBox(self, label="Monitor")
self.sizer = wx.StaticBoxSizer(self.box, orient=wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.EXPAND)
self.sizer.AddSpacer(5)
self.sizer.Add(self.subsizer, 0, wx.EXPAND)
self.SetSizer(self.sizer)
self.SetAutoLayout(1)
self.sizer.Fit(self)
self.running = False
self.Bind(EVT_RESULT, self.on_result)
def on_start_button(self, event):
if not self.running:
self.start_worker()
self.running = True
self.start_button.SetLabel('Stop')
else:
self.abort_worker()
self.running = False
self.start_button.SetLabel('Resume')
def on_clear_button(self, event):
self.x.clear()
self.y.clear()
def __del__(self):
# this doesn't seem to work
self.abort_worker()
def abort_worker(self):
# tell worker to finish
self.worker.abort()
self.worker.join()
def setup_deques(self, x, y, maxlen=None):
self.x = deque(x, maxlen)
self.y = deque(y, maxlen)
def start_worker(self):
self.worker = WorkerThread(self, self.datagen)
self.worker.start()
def on_result(self, event):
if event.data is None:
pass
else:
x,y = event.data
self.x.append(x)
self.y.append(y)
self.line.set_data(self.x, self.y)
self.ax.relim()
self.ax.autoscale_view()
self.canvas.draw()
def impose_limit(self, event):
if self.checkbox.IsChecked():
maxlen = self.spinbox.GetValue()
else:
maxlen = None
self.setup_deques(self.x, self.y, maxlen=maxlen)
def get_data(self):
return self.line.get_data()
if __name__ == "__main__":
title = 'Monitor'
from .config import load_config
cfg = load_config()
pulsechan = cfg.get('counting', 'pulsechan')
countchan = cfg.get('counting', 'countchan')
import sys
fake = ('--fake' in sys.argv)
try:
import PyDAQmx as daq
except NotImplementedError:
fake = True
if fake:
gen = silly_gen()
title += ' (fake)'
elif '--gated' in sys.argv:
from .expt import gen_gated_counts
gen = gen_gated_counts(t=0.1)
title += ' (gated)'
else:
from .expt import gen_count_rate
gen = gen_count_rate(t=0.1, pulsechan=pulsechan, countchan=countchan)
app = wx.App(False)
frame = MonitorWindow(None, title, datagen=gen)
frame.Show(True)
app.MainLoop()
| [
"wx.Menu",
"wx.CheckBox",
"numpy.sin",
"matplotlib.backends.backend_wxagg.FigureCanvasWxAgg",
"collections.deque",
"threading.Thread.__init__",
"numpy.random.randn",
"matplotlib.figure.Figure",
"wx.MenuBar",
"wx.Panel.__init__",
"numpy.save",
"wx.BoxSizer",
"wx.SpinCtrlDouble",
"wx.StaticB... | [((1066, 1092), 'wx.lib.newevent.NewEvent', 'wx.lib.newevent.NewEvent', ([], {}), '()\n', (1090, 1092), False, 'import wx\n'), ((1150, 1176), 'wx.lib.newevent.NewEvent', 'wx.lib.newevent.NewEvent', ([], {}), '()\n', (1174, 1176), False, 'import wx\n'), ((8049, 8062), 'wx.App', 'wx.App', (['(False)'], {}), '(False)\n', (8055, 8062), False, 'import wx\n'), ((530, 565), 'matplotlib.transforms.nonsingular', 'mtransforms.nonsingular', (['vmin', 'vmax'], {}), '(vmin, vmax)\n', (553, 565), True, 'from matplotlib import transforms as mtransforms\n'), ((733, 743), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (738, 743), False, 'from time import sleep, time\n'), ((1379, 1410), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (1404, 1410), False, 'import threading\n'), ((1632, 1638), 'time.time', 'time', ([], {}), '()\n', (1636, 1638), False, 'from time import sleep, time\n'), ((2302, 2363), 'wx.Frame.__init__', 'wx.Frame.__init__', (['self', 'parent'], {'title': 'title', 'size': '(500, 400)'}), '(self, parent, title=title, size=(500, 400))\n', (2319, 2363), False, 'import wx\n'), ((2551, 2574), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (2569, 2574), False, 'import os\n'), ((2595, 2604), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (2602, 2604), False, 'import wx\n'), ((2769, 2781), 'wx.MenuBar', 'wx.MenuBar', ([], {}), '()\n', (2779, 2781), False, 'import wx\n'), ((4154, 4185), 'wx.Panel.__init__', 'wx.Panel.__init__', (['self', 'parent'], {}), '(self, parent)\n', (4171, 4185), False, 'import wx\n'), ((4233, 4241), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (4239, 4241), False, 'from matplotlib.figure import Figure\n'), ((4517, 4549), 'matplotlib.backends.backend_wxagg.FigureCanvasWxAgg', 'FigureCanvasWxAgg', (['self', '(-1)', 'fig'], {}), '(self, -1, fig)\n', (4534, 4549), False, 'from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg\n'), ((4602, 4637), 'wx.CheckBox', 'wx.CheckBox', (['self'], {'label': '"""Limit to"""'}), "(self, label='Limit to')\n", (4613, 4637), False, 'import wx\n'), ((4715, 4778), 'wx.SpinCtrlDouble', 'wx.SpinCtrlDouble', (['self'], {'value': '"""100"""', 'min': '(10)', 'max': '(10000)', 'inc': '(10)'}), "(self, value='100', min=10, max=10000, inc=10)\n", (4732, 4778), False, 'import wx\n'), ((4883, 4913), 'wx.Button', 'wx.Button', (['self'], {'label': '"""Clear"""'}), "(self, label='Clear')\n", (4892, 4913), False, 'import wx\n'), ((5010, 5040), 'wx.Button', 'wx.Button', (['self'], {'label': '"""Start"""'}), "(self, label='Start')\n", (5019, 5040), False, 'import wx\n'), ((5134, 5160), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (5145, 5160), False, 'import wx\n'), ((5512, 5547), 'wx.StaticBox', 'wx.StaticBox', (['self'], {'label': '"""Monitor"""'}), "(self, label='Monitor')\n", (5524, 5547), False, 'import wx\n'), ((5569, 5616), 'wx.StaticBoxSizer', 'wx.StaticBoxSizer', (['self.box'], {'orient': 'wx.VERTICAL'}), '(self.box, orient=wx.VERTICAL)\n', (5586, 5616), False, 'import wx\n'), ((6582, 6598), 'collections.deque', 'deque', (['x', 'maxlen'], {}), '(x, maxlen)\n', (6587, 6598), False, 'from collections import deque\n'), ((6616, 6632), 'collections.deque', 'deque', (['y', 'maxlen'], {}), '(y, maxlen)\n', (6621, 6632), False, 'from collections import deque\n'), ((757, 777), 'numpy.sin', 'n.sin', (['(2 * n.pi * pt)'], {}), '(2 * n.pi * pt)\n', (762, 777), True, 'import numpy as n\n'), ((780, 807), 'numpy.sin', 'n.sin', (['(2 * n.pi * pt / 50.0)'], {}), '(2 * n.pi * pt / 50.0)\n', (785, 807), True, 'import numpy as n\n'), ((826, 842), 'numpy.random.randn', 'n.random.randn', ([], {}), '()\n', (840, 842), True, 'import numpy as n\n'), ((1973, 1979), 'time.time', 'time', ([], {}), '()\n', (1977, 1979), False, 'from time import sleep, time\n'), ((3843, 3859), 'numpy.save', 'n.save', (['path', 'xy'], {}), '(path, xy)\n', (3849, 3859), True, 'import numpy as n\n'), ((5296, 5336), 'wx.StaticText', 'wx.StaticText', (['self'], {'label': '"""data points"""'}), "(self, label='data points')\n", (5309, 5336), False, 'import wx\n')] |
import numpy as np
import matplotlib as mpl
# For headless environments
mpl.use('Agg') # NOQA
import matplotlib.pyplot as plt
from rastervision.common.utils import (
expand_dims, compute_ndvi, plot_img_row, download_dataset)
from rastervision.common.data.generators import Batch, FileGenerator
ISPRS = 'isprs'
class IsprsBatch(Batch):
def __init__(self):
self.y_mask = None
super().__init__()
class IsprsDataset():
"""Metadata and utilities for dealing with ISPRS data.
The ISPRS semantic labeling datasets can be found at
http://www2.isprs.org/commissions/comm3/wg4/semantic-labeling.html
The ground truth label images can be represented in several ways: 1) the
contest organizers provide the ground truth as RGB images, where each RGB
value represents a different label. 2) When evaluating the output of the
model, it is more convenient to represent each label as an integer. 3) The
neural network generates output that is one-hot coded. It is useful to be
able to translate between the representations, so this contains methods to
do so. Each method can take a batch with shape [batch_size, nb_rows,
nb_cols, nb_channels], or a single image with shape [nb_rows, nb_cols,
nb_channels], and the returned array will have the same shape as the
input.
"""
def __init__(self):
# RGB vectors corresponding to different labels
# Impervious surfaces (RGB: 255, 255, 255)
# Building (RGB: 0, 0, 255)
# Low vegetation (RGB: 0, 255, 255)
# Tree (RGB: 0, 255, 0)
# Car (RGB: 255, 255, 0)
# Clutter/background (RGB: 255, 0, 0)
self.label_keys = [
[255, 255, 255],
[0, 0, 255],
[0, 255, 255],
[0, 255, 0],
[255, 255, 0],
[255, 0, 0],
]
self.nb_labels = len(self.label_keys)
self.label_names = [
'Impervious',
'Building',
'Low vegetation',
'Tree',
'Car',
'Clutter'
]
@expand_dims
def rgb_to_mask_batch(self, batch):
"""Convert a label image with black boundary pixels into a mask.
Since there is uncertainty associated with the boundary of
objects/regions in the ground truth segmentation, it makes sense
to ignore these boundaries during evaluation. To help, the contest
organizers have provided special ground truth images where the boundary
pixels (in a 3 pixel radius) are black.
# Returns
A boolean array where an element is True if it should be used in
the evaluation, and ignored otherwise.
"""
mask = (batch[:, :, :, 0] == 0) & \
(batch[:, :, :, 1] == 0) & \
(batch[:, :, :, 2] == 0)
mask = np.bitwise_not(mask)
mask = np.expand_dims(mask, axis=3)
return mask
@expand_dims
def rgb_to_label_batch(self, batch):
label_batch = np.zeros(batch.shape[:-1])
for label, key in enumerate(self.label_keys):
mask = (batch[:, :, :, 0] == key[0]) & \
(batch[:, :, :, 1] == key[1]) & \
(batch[:, :, :, 2] == key[2])
label_batch[mask] = label
return np.expand_dims(label_batch, axis=3)
@expand_dims
def label_to_one_hot_batch(self, label_batch):
if label_batch.ndim == 4:
label_batch = np.squeeze(label_batch, axis=3)
nb_labels = len(self.label_keys)
shape = np.concatenate([label_batch.shape, [nb_labels]])
one_hot_batch = np.zeros(shape)
for label in range(nb_labels):
one_hot_batch[:, :, :, label][label_batch == label] = 1.
return one_hot_batch
@expand_dims
def rgb_to_one_hot_batch(self, rgb_batch):
label_batch = self.rgb_to_label_batch(rgb_batch)
return self.label_to_one_hot_batch(label_batch)
@expand_dims
def label_to_rgb_batch(self, label_batch):
if label_batch.ndim == 4:
label_batch = np.squeeze(label_batch, axis=3)
rgb_batch = np.zeros(np.concatenate([label_batch.shape, [3]]),
dtype=np.uint8)
for label, key in enumerate(self.label_keys):
mask = label_batch == label
rgb_batch[mask, :] = key
return rgb_batch
@expand_dims
def one_hot_to_label_batch(self, one_hot_batch):
one_hot_batch = np.argmax(one_hot_batch, axis=3)
return np.expand_dims(one_hot_batch, axis=3)
@expand_dims
def one_hot_to_rgb_batch(self, one_hot_batch):
label_batch = self.one_hot_to_label_batch(one_hot_batch)
return self.label_to_rgb_batch(label_batch)
def augment_channels(self, batch_x):
red = batch_x[:, :, :, [self.red_ind]]
ir = batch_x[:, :, :, [self.ir_ind]]
ndvi = compute_ndvi(red, ir)
return np.concatenate([batch_x, ndvi], axis=3)
class IsprsFileGenerator(FileGenerator):
def __init__(self, options):
super().__init__(options)
def plot_sample(self, file_path, x, y):
fig = plt.figure()
nb_cols = max(self.dataset.nb_channels + 1, self.dataset.nb_labels + 1)
grid_spec = mpl.gridspec.GridSpec(2, nb_cols)
# Plot x channels
x = self.calibrate_image(x)
rgb_x = x[:, :, self.dataset.rgb_inds]
imgs = [rgb_x]
nb_channels = x.shape[2]
for channel_ind in range(nb_channels):
img = x[:, :, channel_ind]
imgs.append(img)
row_ind = 0
plot_img_row(fig, grid_spec, row_ind, imgs)
# Plot y channels
rgb_y = self.dataset.one_hot_to_rgb_batch(y)
imgs = [rgb_y]
for channel_ind in range(y.shape[2]):
img = y[:, :, channel_ind]
imgs.append(img)
row_ind = 1
plot_img_row(fig, grid_spec, row_ind, imgs)
plt.savefig(file_path, bbox_inches='tight', format='pdf', dpi=600)
plt.close(fig)
def download_dataset(self, file_names):
download_dataset(ISPRS, file_names)
| [
"matplotlib.pyplot.savefig",
"numpy.argmax",
"matplotlib.pyplot.close",
"numpy.zeros",
"numpy.bitwise_not",
"numpy.expand_dims",
"matplotlib.pyplot.figure",
"matplotlib.use",
"rastervision.common.utils.download_dataset",
"rastervision.common.utils.plot_img_row",
"numpy.squeeze",
"matplotlib.gr... | [((72, 86), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (79, 86), True, 'import matplotlib as mpl\n'), ((2866, 2886), 'numpy.bitwise_not', 'np.bitwise_not', (['mask'], {}), '(mask)\n', (2880, 2886), True, 'import numpy as np\n'), ((2902, 2930), 'numpy.expand_dims', 'np.expand_dims', (['mask'], {'axis': '(3)'}), '(mask, axis=3)\n', (2916, 2930), True, 'import numpy as np\n'), ((3033, 3059), 'numpy.zeros', 'np.zeros', (['batch.shape[:-1]'], {}), '(batch.shape[:-1])\n', (3041, 3059), True, 'import numpy as np\n'), ((3323, 3358), 'numpy.expand_dims', 'np.expand_dims', (['label_batch'], {'axis': '(3)'}), '(label_batch, axis=3)\n', (3337, 3358), True, 'import numpy as np\n'), ((3578, 3626), 'numpy.concatenate', 'np.concatenate', (['[label_batch.shape, [nb_labels]]'], {}), '([label_batch.shape, [nb_labels]])\n', (3592, 3626), True, 'import numpy as np\n'), ((3651, 3666), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (3659, 3666), True, 'import numpy as np\n'), ((4509, 4541), 'numpy.argmax', 'np.argmax', (['one_hot_batch'], {'axis': '(3)'}), '(one_hot_batch, axis=3)\n', (4518, 4541), True, 'import numpy as np\n'), ((4557, 4594), 'numpy.expand_dims', 'np.expand_dims', (['one_hot_batch'], {'axis': '(3)'}), '(one_hot_batch, axis=3)\n', (4571, 4594), True, 'import numpy as np\n'), ((4930, 4951), 'rastervision.common.utils.compute_ndvi', 'compute_ndvi', (['red', 'ir'], {}), '(red, ir)\n', (4942, 4951), False, 'from rastervision.common.utils import expand_dims, compute_ndvi, plot_img_row, download_dataset\n'), ((4967, 5006), 'numpy.concatenate', 'np.concatenate', (['[batch_x, ndvi]'], {'axis': '(3)'}), '([batch_x, ndvi], axis=3)\n', (4981, 5006), True, 'import numpy as np\n'), ((5176, 5188), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5186, 5188), True, 'import matplotlib.pyplot as plt\n'), ((5289, 5322), 'matplotlib.gridspec.GridSpec', 'mpl.gridspec.GridSpec', (['(2)', 'nb_cols'], {}), '(2, nb_cols)\n', (5310, 5322), True, 'import matplotlib as mpl\n'), ((5632, 5675), 'rastervision.common.utils.plot_img_row', 'plot_img_row', (['fig', 'grid_spec', 'row_ind', 'imgs'], {}), '(fig, grid_spec, row_ind, imgs)\n', (5644, 5675), False, 'from rastervision.common.utils import expand_dims, compute_ndvi, plot_img_row, download_dataset\n'), ((5921, 5964), 'rastervision.common.utils.plot_img_row', 'plot_img_row', (['fig', 'grid_spec', 'row_ind', 'imgs'], {}), '(fig, grid_spec, row_ind, imgs)\n', (5933, 5964), False, 'from rastervision.common.utils import expand_dims, compute_ndvi, plot_img_row, download_dataset\n'), ((5974, 6040), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_path'], {'bbox_inches': '"""tight"""', 'format': '"""pdf"""', 'dpi': '(600)'}), "(file_path, bbox_inches='tight', format='pdf', dpi=600)\n", (5985, 6040), True, 'import matplotlib.pyplot as plt\n'), ((6049, 6063), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (6058, 6063), True, 'import matplotlib.pyplot as plt\n'), ((6117, 6152), 'rastervision.common.utils.download_dataset', 'download_dataset', (['ISPRS', 'file_names'], {}), '(ISPRS, file_names)\n', (6133, 6152), False, 'from rastervision.common.utils import expand_dims, compute_ndvi, plot_img_row, download_dataset\n'), ((3488, 3519), 'numpy.squeeze', 'np.squeeze', (['label_batch'], {'axis': '(3)'}), '(label_batch, axis=3)\n', (3498, 3519), True, 'import numpy as np\n'), ((4108, 4139), 'numpy.squeeze', 'np.squeeze', (['label_batch'], {'axis': '(3)'}), '(label_batch, axis=3)\n', (4118, 4139), True, 'import numpy as np\n'), ((4170, 4210), 'numpy.concatenate', 'np.concatenate', (['[label_batch.shape, [3]]'], {}), '([label_batch.shape, [3]])\n', (4184, 4210), True, 'import numpy as np\n')] |
import numpy as np
import rowan as rn
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
from mpl_toolkits import mplot3d
import matplotlib.animation as animation
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.gridspec import SubplotSpec, GridSpec
from uavDy import uav
def create_subtitle(fig: plt.Figure, grid: SubplotSpec, title: str):
row = fig.add_subplot(grid)
row.set_title('\n\n\n'+title, fontweight='medium',fontsize='medium')
row.set_frame_on(False)
row.axis('off')
def setlimits(ax, full_state):
# This method finds the maximum value in the x-y-z actual states and sets the limits of the figure accordingly
# edge: adds extra space for the figure
edge = 0.9
max_x = max(full_state[:,0])
max_y = max(full_state[:,1])
max_z = max(full_state[:,2])
if (max_x >= max_y) and (max_x >= max_z):
max_ = max_x
ax.set_xlim3d([-max_-edge, max_+edge])
ax.set_ylim3d([-max_-edge, max_+edge])
ax.set_zlim3d([-max_-edge, max_+edge])
elif (max_y >= max_x) and (max_y >= max_z):
max_ = max_y
ax.set_xlim3d([-max_-edge, max_+edge])
ax.set_ylim3d([-max_-edge, max_+edge])
ax.set_zlim3d([-max_-edge, max_+edge])
else:
max_ = max_z
ax.set_xlim3d([-max_-edge, max_+edge])
ax.set_ylim3d([-max_-edge, max_+edge])
ax.set_zlim3d([-max_-edge, max_+edge])
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
return ax
def plotPayloadStates(full_state, posq, tf_sim):
"""This function plots the states of the payload"""
# PL_states = [xl, vl, p, wl]
fig8, ax11 = plt.subplots(3, 1, sharex=True ,sharey=True)
fig8.tight_layout()
fig9, ax12 = plt.subplots(3, 1, sharex=True, sharey=True)
fig9.tight_layout()
fig10, ax13 = plt.subplots(3, 1, sharex=True ,sharey=True)
fig10.tight_layout()
fig11, ax14 = plt.subplots(3, 1, sharex=True ,sharey=True)
fig11.tight_layout()
fig12, ax15 = plt.subplots(1, 1, sharex=True ,sharey=True)
fig12.tight_layout()
time = np.linspace(0, tf_sim*1e-3, num=len(full_state))
pos = full_state[:,0:3]
linVel = full_state[:,3:6]
angVel = full_state[:,9:12]
p = full_state[:,6:9]
ts = 'time [s]'
###############################################################################################
ax11[0].plot(time, pos[:,0], c='k', lw=0.75, label='Actual'), ax11[1].plot(time, pos[:,1], lw=0.75, c='k'), ax11[2].plot(time, pos[:,2], lw=0.75, c='k')
ax11[0].set_ylabel('x [m]',), ax11[1].set_ylabel('y [m]'), ax11[2].set_ylabel('z [m]')
ax11[0].legend()
fig8.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(3,1)
create_subtitle(fig8, grid[0, ::], 'Actual Payload Positions')
###############################################################################################
ax12[0].plot(time, linVel[:,0],lw=0.75, c='k', label='Actual'), ax12[1].plot(time, linVel[:,1],lw=0.75, c='k'), ax12[2].plot(time, linVel[:,2],lw=0.75, c='k')
ax12[0].set_ylabel('vx [m/s]'), ax12[1].set_ylabel('vy [m/s]'), ax12[2].set_ylabel('vz [m/s]')
ax12[0].legend()
fig9.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(3,1)
create_subtitle(fig9, grid[0, ::], 'Actual Payload Linear Velocities')
###############################################################################################
ax13[0].plot(time, angVel[:,0],c='k',lw=1, label='Actual'), ax13[1].plot(time, angVel[:,1],c='k',lw=1), ax13[2].plot(time, angVel[:,2],c='k',lw=1)
ax13[0].set_ylabel('wx [deg/s]',labelpad=-5), ax13[1].set_ylabel('wy [deg/s]',labelpad=-5), ax13[2].set_ylabel('wz [deg/s]',labelpad=-5)
fig10.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(3,1)
create_subtitle(fig10, grid[0, ::], ' Actual Payload Angular Velocities')
###############################################################################################
ax14[0].plot(time, p[:,0],c='k',lw=1, label='Actual'), ax14[1].plot(time, p[:,1],c='k',lw=1), ax14[2].plot(time, p[:,2],c='k',lw=1)
ax14[0].set_ylabel('px',labelpad=-5), ax14[1].set_ylabel('py',labelpad=-5), ax14[2].set_ylabel('pz',labelpad=-5)
fig11.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(3,1)
create_subtitle(fig11, grid[0, ::], 'Cable Directional Unit Vector')
###############################################################################################
norm_x = np.zeros((len(full_state),))
for i in range(0, len(norm_x)):
norm_x[i] = np.linalg.norm(pos[i,:] - posq[i,:])
ax15.plot(time, norm_x,c='k',lw=1, label='Norm')
ax15.set_ylabel('||xq - xp||',labelpad=-2)
fig12.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(3,1)
create_subtitle(fig12, grid[0, ::], 'Diff between Quadrotor and Payload Positions (Norm)')
return fig8, fig9, fig10, fig11, fig12
###############################################################################################
def outputPlots(uavs, payloads, savePlot, tf_sim, pdfName):
print('Plotting...')
f = PdfPages(pdfName)
# perform file operations
for id, uav_ in uavs.items():
txt = id
textfig, textax = plt.subplots(figsize=(6, 6))
textax.grid(False)
textax.axis(False)
textax.text(0.45, 0.45, txt, size=15, color='black')
full_state = uav_.fullState
cont_stack = uav_.ctrlInps
ref_state = uav_.refState
if uav_.pload:
payload = payloads[id]
plt.rcParams['axes.grid'] = True
plt.rcParams['figure.max_open_warning'] = 100
fig1, ax1 = plt.subplots(3, 1, sharex=True ,sharey=True)
fig1.tight_layout()
fig2, ax2 = plt.subplots(3, 1, sharex=True, sharey=True)
fig2.tight_layout()
fig3, ax3 = plt.subplots(3, 1, sharex=True ,sharey=True)
fig3.tight_layout()
fig4, ax4 = plt.subplots(2, 3, sharex=True ,sharey=True)
fig4.tight_layout()
fig5 = plt.figure(constrained_layout=True)
gs = GridSpec(3, 2, figure=fig5)
ax5 = fig5.add_subplot(gs[:, 0])
ax6 = fig5.add_subplot(gs[0,1])
ax7 = fig5.add_subplot(gs[1,1],sharey=ax6)
ax8 = fig5.add_subplot(gs[2,1],sharey=ax6)
fig6, ax9 = plt.subplots(4, 1, sharex=True ,sharey=True,figsize=(9,4.8))
fig6.tight_layout()
time = np.linspace(0, tf_sim*1e-3, num=len(full_state))
pos = full_state[:,0:3]
linVel = full_state[:,3:6]
angVel = full_state[:,10::]
posdes = ref_state[:,0:3]
linVeldes = ref_state[:,3::]
ts = 'time [s]'
poserr = (posdes[:,:] - pos[:,:]).reshape(len(full_state),3)
linVerr = (linVeldes[:,:] - linVel[:,:]).reshape(len(full_state),3)
###################################
ax1[0].plot(time, pos[:,0], c='k', lw=0.75,label='Actual'), ax1[1].plot(time, pos[:,1], lw=0.75, c='k'), ax1[2].plot(time, pos[:,2], lw=0.75, c='k')
ax1[0].plot(time, posdes[:,0], lw=0.75, c='darkgreen',label='Reference'), ax1[1].plot(time, posdes[:,1], lw=0.75, c='darkgreen'), ax1[2].plot(time, posdes[:,2], lw=0.75, c='darkgreen')
ax1[0].set_ylabel('x [m]',), ax1[1].set_ylabel('y [m]'), ax1[2].set_ylabel('z [m]')
ax1[0].legend()
fig1.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(3,1)
create_subtitle(fig1, grid[0, ::], 'Actual vs Reference Positions')
###################################
ax2[0].plot(time, linVel[:,0],lw=0.75, c='k' ,label='Actual'), ax2[1].plot(time, linVel[:,1],lw=0.75, c='k'), ax2[2].plot(time, linVel[:,2],lw=0.75, c='k')
ax2[0].plot(time, linVeldes[:,0],lw=0.75, c='darkgreen',label='Reference'), ax2[1].plot(time, linVeldes[:,1],lw=0.75, c='darkgreen'), ax2[2].plot(time, linVeldes[:,2],lw=0.75, c='darkgreen')
ax2[0].set_ylabel('vx [m/s]'), ax2[1].set_ylabel('vy [m/s]'), ax2[2].set_ylabel('vz [m/s]')
ax2[0].legend()
fig2.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(3,1)
create_subtitle(fig2, grid[0, ::], 'Actual vs Reference Linear Velocities')
###################################
ax3[0].plot(time, angVel[:,0],c='k',lw=1)
ax3[1].plot(time, angVel[:,1],c='k',lw=1)
ax3[2].plot(time, angVel[:,2],c='k',lw=1)
ax3[0].set_ylabel('wx [deg/s]',labelpad=-5), ax3[1].set_ylabel('wy [deg/s]',labelpad=-5), ax3[2].set_ylabel('wz [deg/s]',labelpad=-5)
fig3.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(3,1)
create_subtitle(fig3, grid[0, ::], 'Actual Angular Velocities')
###################################
ax4[0,0].plot(time, poserr[:,0],c='r',lw=0.7), ax4[0,1].plot(time, poserr[:,1],c='r',lw=0.7), ax4[0,2].plot(time, poserr[:,2],c='r',lw=0.7)
ax4[0,0].set_ylabel('ex [m/s]'), ax4[0,1].set_ylabel('ey [m/s]'), ax4[0,2].set_ylabel('ez [m/s]')
ax4[1,0].plot(time, linVerr[:,0],c='r',lw=0.7), ax4[1,1].plot(time, linVerr[:,1],c='r',lw=0.7), ax4[1,2].plot(time, linVerr[:,2],c='r',lw=0.7)
ax4[1,0].set_ylabel('vex des [m/s]'), ax4[1,1].set_ylabel('vey des [m/s]'), ax4[1,2].set_ylabel('vez des [m/s]')
fig4.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(2,3)
create_subtitle(fig4, grid[0, ::], 'Positional errors')
create_subtitle(fig4, grid[1, ::], 'Linear Velocities errors')
###################################
ax5.plot(time, cont_stack[:,0],lw=0.8, c='darkblue')
ax5.set_ylabel('fz [N]')
ax6.plot(time, cont_stack[:,1], lw=0.8, c='darkblue'), ax7.plot(time, cont_stack[:,2], lw=0.8, c='darkblue'), ax8.plot(time, cont_stack[:,3],lw=0.8, c='darkblue')
ax6.set_ylabel('taux [N.m]',fontsize='small'), ax7.set_ylabel('tauy [N.m]',fontsize='small'), ax8.set_ylabel('tauz [N.m]',fontsize='small')
fig5.supxlabel(ts,fontsize='small')
create_subtitle(fig5, gs[::, 0], 'Force Control Input')
create_subtitle(fig5, gs[::, 1], 'Torque Control Input')
###################################
ax9[0].plot(time, cont_stack[:,4], c='darkred',lw=0.7)
ax9[1].plot(time, cont_stack[:,5], c='darkred',lw=0.7)
ax9[2].plot(time, cont_stack[:,6], c='darkred',lw=0.7)
ax9[3].plot(time, cont_stack[:,7], c='darkred',lw=0.7)
ax9[0].set_ylabel('f1 [N]'), ax9[1].set_ylabel('f2 [N]'), ax9[2].set_ylabel('f3 [N]'), ax9[3].set_ylabel('f4 [N]')
fig6.supxlabel(ts,fontsize='small')
grid = plt.GridSpec(4,1)
create_subtitle(fig6, grid[0,::], 'Motor Forces')
###################################
fig7 = plt.figure(figsize=(10,10))
ax10 = fig7.add_subplot(autoscale_on=True,projection="3d")
ax10.plot3D(pos[:,0], pos[:,1], pos[:,2], 'k-.',lw=1.5, label="Actual Trajectory")
ax10.plot3D(posdes[:,0], posdes[:,1] , posdes[:,2],'darkgreen',ls='--',lw=1.5,label="Reference Trajectory")
ax10.legend()
ax10 = setlimits(ax10, pos)
if uav_.pload:
fig8, fig9, fig10, fig11, fig12 = plotPayloadStates(payload.plFullState, pos, tf_sim)
if savePlot:
textfig.savefig(f, format='pdf', bbox_inches='tight')
fig1.savefig(f, format='pdf', bbox_inches='tight')
fig2.savefig(f, format='pdf', bbox_inches='tight')
fig3.savefig(f, format='pdf', bbox_inches='tight')
fig4.savefig(f, format='pdf', bbox_inches='tight')
fig5.savefig(f, format='pdf', bbox_inches='tight')
fig6.savefig(f, format='pdf', bbox_inches='tight')
fig7.savefig(f, format='pdf', bbox_inches='tight')
if uav_.pload:
fig8.savefig(f, format='pdf', bbox_inches='tight')
fig9.savefig(f, format='pdf', bbox_inches='tight')
fig10.savefig(f, format='pdf', bbox_inches='tight')
fig11.savefig(f, format='pdf', bbox_inches='tight')
fig12.savefig(f, format='pdf', bbox_inches='tight')
f.close()
def RotatedCylinder(center_x, center_y, radius, height_z, q):
R_i = rn.to_matrix(q)
z = np.linspace(0, height_z, 50)
theta = np.linspace(0, 2*np.pi, 50)
theta_grid, Zc = np.meshgrid(theta, z)
Xc = radius*np.cos(theta_grid) + center_x
Yc = radius*np.sin(theta_grid) + center_y
Xb = np.zeros_like(Xc)
Yb = np.zeros_like(Xc)
Zb = np.zeros_like(Xc)
for i in range(0,len(Xc.T)):
for j in range(0,len(Xc.T)):
rc = np.array([Xc[i,j],Yc[i,j],Zc[i,j]])
rb = R_i @ rc
Xb[i,j] = rb[0]
Yb[i,j] = rb[1]
Zb[i,j] = rb[2]
return Xb, Yb, Zb
def Sphere(Cx, Cy, Cz, r):
u, v = np.mgrid[0:2 * np.pi:30j, 0:np.pi:20j]
x = r*np.cos(u) * np.sin(v)
y = r*np.sin(u) * np.sin(v)
z = r*np.cos(v)
return x, y, z
class PlotandAnimate:
def __init__(self, fig, ax, uavModels, payloads, sample):
# Initialize the Actual and Reference states
self.payloads = payloads
self.uavModels = uavModels
self.sample = sample
self.frames = len(list(self.uavModels.values())[0].fullState[::self.sample, :])
# Initialize a 3d figure
self.fig = fig
self.ax = ax
self.ax.view_init(25,35)
def initializeQuad(self):
# Create the lines and vectors to draw body and desired frames
self.line, = self.ax.plot(self.full_state[0,0:1], self.full_state[1,0:1], self.full_state[2,0:1], 'b--', lw=1)
self.vec1 = self.ax.quiver([],[],[],[],[],[])
self.vec2 = self.ax.quiver([],[],[],[],[],[])
self.vec3 = self.ax.quiver([],[],[],[],[],[])
self.vec1d = self.ax.quiver([],[],[],[],[],[])
self.vec2d = self.ax.quiver([],[],[],[],[],[])
self.vec3d = self.ax.quiver([],[],[],[],[],[])
#Create the arms of the quadrotor in the body frame
self.armb1 = np.array([[self.uavModel.d*10**(2)*np.cos(0)], [self.uavModel.d*10**(2)*np.sin(0)] ,[0]])
self._armb1 = np.array([[-self.uavModel.d*10**(2)*np.cos(0)], [-self.uavModel.d*10**(2)*np.sin(0)] ,[0]])
q90z = rn.from_euler(0, 0, np.radians(90),convention='xyz')
rot90z = rn.to_matrix(q90z)
self.armb2 = rot90z @ (self.armb1.reshape(3,))
self._armb2 = rot90z @ (self._armb1.reshape(3,))
def startAnimation(self,videoname,dt):
self.ani = animation.FuncAnimation(self.fig, self.animate, frames=self.frames, interval=dt*1000,blit=True)
self.ani.save('Videos/'+videoname)
def setlimits(self):
# This method finds the maximum value in the x-y-z actual states for the UAV(s) and sets the limits of the figure accordingly
# edge: adds extra space for the figure
edge = 0.5
maxs_ = []
for uav in self.uavModels.values():
max_x = max(uav.fullState[:,0])
max_y = max(uav.fullState[:,1])
max_z = max(uav.fullState[:,2])
if (max_x >= max_y) and (max_x >= max_z):
max_ = max_x
elif (max_y >= max_x) and (max_y >= max_z):
max_ = max_y
else:
max_ = max_z
maxs_.append(max_)
max_ = max(maxs_)
self.ax.set_xlim3d([-max_-edge, max_+edge])
self.ax.set_ylim3d([-max_-edge, max_+edge])
self.ax.set_zlim3d([-max_-edge, max_+edge])
self.ax.set_xlim3d([-max_-edge, max_+edge])
self.ax.set_ylim3d([-max_-edge, max_+edge])
self.ax.set_zlim3d([-max_-edge, max_+edge])
self.ax.set_xlabel('X')
self.ax.set_ylabel('Y')
self.ax.set_zlabel('Z')
def drawQuivers(self, x, y, z, q, xref, yref, zref):
R_i = rn.to_matrix(q)
u = R_i[:,0]
v = R_i[:,1]
w = R_i[:,2]
ud = np.array([1,0,0])
vd = np.array([0,1,0])
wd = np.array([0,0,1])
self.vec1 = self.ax.quiver(x,y,z, u[0], u[1] ,u[2],color = 'r', length = 0.2)
self.vec2 = self.ax.quiver(x,y,z, v[0], v[1] ,v[2],color = 'g', length = 0.2)
self.vec3 = self.ax.quiver(x,y,z, w[0], w[1] ,w[2],color = 'b', length = 0.2)
self.vec1r = self.ax.quiver(xref,yref,zref, ud[0], ud[1] ,ud[2],color = 'r', length = 0.5)
self.vec2r = self.ax.quiver(xref,yref,zref, vd[0], vd[1] ,vd[2],color = 'g', length = 0.5)
self.vec3r = self.ax.quiver(xref,yref,zref, wd[0], wd[1] ,wd[2],color = 'b', length = 0.5)
def getCurrState(self, i):
x = self.full_state[:i+1,0]
y = self.full_state[:i+1,1]
z = self.full_state[:i+1,2]
q = self.full_state[i,6:10].reshape(4,)
return x, y, z, q
def getRefState(self, i):
xref = self.reference_state[:i+1,0]
yref = self.reference_state[:i+1,1]
zref = self.reference_state[:i+1,2]
return xref, yref, zref
def getArmpos(self, x, y, z, q):
R_i = rn.to_matrix(q)
position = np.array([x, y, z])
armI1 = position + R_i @ (self.armb1.reshape(3,))
_armI1 = position + R_i @ (self._armb1.reshape(3,))
armI2 = position + R_i @(self.armb2.reshape(3,))
_armI2 = position + R_i @ (self._armb2.reshape(3,))
return armI1, armI2, _armI1, _armI2
def drawActvsRefTraj(self, x, y, z, xref, yref, zref):
self.ax.plot3D(x, y, z, 'k-.',lw=1.5,label="Actual Trajectory")
self.ax.plot3D(xref, yref ,zref,c='darkgreen',ls='--',lw=1.5,label="Reference Trajectory")
# self.ax.legend()
def drawQuadrotorArms(self, x, y, z, armI1, armI2, _armI1, _armI2):
self.ax.plot3D(np.linspace(x, armI1[0]), np.linspace(y, armI1[1]), np.linspace(z, armI1[2]),'k',lw=2)
self.ax.plot3D(np.linspace(x, _armI1[0]), np.linspace(y, _armI1[1]), np.linspace(z, _armI1[2]),'k',lw=2)
self.ax.plot3D(np.linspace(x, armI2[0]), np.linspace(y, armI2[1]), np.linspace(z, armI2[2]),'k',lw=2)
self.ax.plot3D(np.linspace(x, _armI2[0]), np.linspace(y, _armI2[1]), np.linspace(z, _armI2[2]),'k',lw=2)
def drawPropellers(self, Xb, Yb, Zb,armI1, armI2, _armI1, _armI2):
self.ax.plot_surface(Xb+armI1[0], Yb+armI1[1], Zb+armI1[2], alpha=1)
self.ax.plot_surface(Xb+_armI1[0], Yb+_armI1[1], Zb+_armI1[2], alpha=1)
self.ax.plot_surface(Xb+armI2[0], Yb+armI2[1], Zb+armI2[2], alpha=1)
self.ax.plot_surface(Xb+_armI2[0], Yb+_armI2[1], Zb+_armI2[2], alpha=1)
def getPayloadStates(self,i):
xl = self.plFullstate[:i+1,0]
yl = self.plFullstate[:i+1,1]
zl = self.plFullstate[:i+1,2]
return xl, yl, zl
def drawPlTraj(self, xl,yl,zl):
self.ax.plot3D(xl, yl, zl, 'darkblue',linestyle='-.',lw=1.5,label="Payload Trajectory")
# self.ax.legend()
def drawPayload(self,x,y,z,xl,yl,zl):
c_st = np.array([x,y,z])
c_en = np.array([xl,yl,zl])
self.ax.plot3D(np.linspace(c_st[0], c_en[0]), np.linspace(c_st[1], c_en[1]), np.linspace(c_st[2], c_en[2]), 'darkblue',lw=2)
def animate(self,i):
self.ax.cla()
self.setlimits()
for id in self.uavModels.keys():
self.uavModel = self.uavModels[id]
self.full_state = self.uavModel.fullState[::self.sample, :]
self.reference_state = self.uavModel.refState[::self.sample, :]
if self.uavModel.pload:
self.payload = self.payloads[id]
self.plFullstate = self.payload.plFullState[::self.sample, :]
self.initializeQuad()
x, y, z, q = self.getCurrState(i)
xref,yref,zref = self.getRefState(i)
armI1, armI2, _armI1, _armI2 = self.getArmpos(x[i],y[i],z[i],q)
if self.uavModel.pload:
xl, yl, zl = self.getPayloadStates(i)
self.drawPayload(x[i], y[i], z[i], xl[i], yl[i], zl[i])
# self.drawPlTraj(xl, yl, zl)
r = 1e-1
xsp, ysp, zsp = Sphere(xl, yl, zl, r)
self.ax.plot_surface(xl[i]+xsp, yl[i]+ysp, zl[i]+zsp, cmap=plt.cm.YlGnBu_r)
self.drawQuivers(x[i],y[i],z[i], q, xref[i], yref[i], zref[i])
self.drawActvsRefTraj(x, y, z, xref, yref, zref)
self.drawQuadrotorArms(x[i], y[i], z[i], armI1, armI2, _armI1, _armI2)
Xb,Yb,Zb = RotatedCylinder(0,0,0.1,0.1,q)
self.drawPropellers(Xb, Yb, Zb,armI1, armI2, _armI1, _armI2)
return self.line,
| [
"matplotlib.backends.backend_pdf.PdfPages",
"numpy.radians",
"numpy.meshgrid",
"numpy.zeros_like",
"matplotlib.animation.FuncAnimation",
"rowan.to_matrix",
"matplotlib.pyplot.figure",
"numpy.linalg.norm",
"numpy.sin",
"numpy.linspace",
"matplotlib.pyplot.GridSpec",
"numpy.cos",
"numpy.array"... | [((1674, 1718), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)', 'sharey': '(True)'}), '(3, 1, sharex=True, sharey=True)\n', (1686, 1718), True, 'import matplotlib.pyplot as plt\n'), ((1765, 1809), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)', 'sharey': '(True)'}), '(3, 1, sharex=True, sharey=True)\n', (1777, 1809), True, 'import matplotlib.pyplot as plt\n'), ((1853, 1897), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)', 'sharey': '(True)'}), '(3, 1, sharex=True, sharey=True)\n', (1865, 1897), True, 'import matplotlib.pyplot as plt\n'), ((1942, 1986), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)', 'sharey': '(True)'}), '(3, 1, sharex=True, sharey=True)\n', (1954, 1986), True, 'import matplotlib.pyplot as plt\n'), ((2035, 2079), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'sharex': '(True)', 'sharey': '(True)'}), '(1, 1, sharex=True, sharey=True)\n', (2047, 2079), True, 'import matplotlib.pyplot as plt\n'), ((2740, 2758), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(3)', '(1)'], {}), '(3, 1)\n', (2752, 2758), True, 'import matplotlib.pyplot as plt\n'), ((3261, 3279), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(3)', '(1)'], {}), '(3, 1)\n', (3273, 3279), True, 'import matplotlib.pyplot as plt\n'), ((3798, 3816), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(3)', '(1)'], {}), '(3, 1)\n', (3810, 3816), True, 'import matplotlib.pyplot as plt\n'), ((4302, 4320), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(3)', '(1)'], {}), '(3, 1)\n', (4314, 4320), True, 'import matplotlib.pyplot as plt\n'), ((4784, 4802), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(3)', '(1)'], {}), '(3, 1)\n', (4796, 4802), True, 'import matplotlib.pyplot as plt\n'), ((5133, 5150), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['pdfName'], {}), '(pdfName)\n', (5141, 5150), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((12299, 12314), 'rowan.to_matrix', 'rn.to_matrix', (['q'], {}), '(q)\n', (12311, 12314), True, 'import rowan as rn\n'), ((12337, 12365), 'numpy.linspace', 'np.linspace', (['(0)', 'height_z', '(50)'], {}), '(0, height_z, 50)\n', (12348, 12365), True, 'import numpy as np\n'), ((12387, 12416), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(50)'], {}), '(0, 2 * np.pi, 50)\n', (12398, 12416), True, 'import numpy as np\n'), ((12436, 12457), 'numpy.meshgrid', 'np.meshgrid', (['theta', 'z'], {}), '(theta, z)\n', (12447, 12457), True, 'import numpy as np\n'), ((12559, 12576), 'numpy.zeros_like', 'np.zeros_like', (['Xc'], {}), '(Xc)\n', (12572, 12576), True, 'import numpy as np\n'), ((12586, 12603), 'numpy.zeros_like', 'np.zeros_like', (['Xc'], {}), '(Xc)\n', (12599, 12603), True, 'import numpy as np\n'), ((12613, 12630), 'numpy.zeros_like', 'np.zeros_like', (['Xc'], {}), '(Xc)\n', (12626, 12630), True, 'import numpy as np\n'), ((4589, 4627), 'numpy.linalg.norm', 'np.linalg.norm', (['(pos[i, :] - posq[i, :])'], {}), '(pos[i, :] - posq[i, :])\n', (4603, 4627), True, 'import numpy as np\n'), ((5262, 5290), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (5274, 5290), True, 'import matplotlib.pyplot as plt\n'), ((5723, 5767), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)', 'sharey': '(True)'}), '(3, 1, sharex=True, sharey=True)\n', (5735, 5767), True, 'import matplotlib.pyplot as plt\n'), ((5825, 5869), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)', 'sharey': '(True)'}), '(3, 1, sharex=True, sharey=True)\n', (5837, 5869), True, 'import matplotlib.pyplot as plt\n'), ((5919, 5963), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)', 'sharey': '(True)'}), '(3, 1, sharex=True, sharey=True)\n', (5931, 5963), True, 'import matplotlib.pyplot as plt\n'), ((6013, 6057), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'sharex': '(True)', 'sharey': '(True)'}), '(2, 3, sharex=True, sharey=True)\n', (6025, 6057), True, 'import matplotlib.pyplot as plt\n'), ((6102, 6137), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'constrained_layout': '(True)'}), '(constrained_layout=True)\n', (6112, 6137), True, 'import matplotlib.pyplot as plt\n'), ((6151, 6178), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(3)', '(2)'], {'figure': 'fig5'}), '(3, 2, figure=fig5)\n', (6159, 6178), False, 'from matplotlib.gridspec import SubplotSpec, GridSpec\n'), ((6384, 6446), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'sharex': '(True)', 'sharey': '(True)', 'figsize': '(9, 4.8)'}), '(4, 1, sharex=True, sharey=True, figsize=(9, 4.8))\n', (6396, 6446), True, 'import matplotlib.pyplot as plt\n'), ((7475, 7493), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(3)', '(1)'], {}), '(3, 1)\n', (7487, 7493), True, 'import matplotlib.pyplot as plt\n'), ((8170, 8188), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(3)', '(1)'], {}), '(3, 1)\n', (8182, 8188), True, 'import matplotlib.pyplot as plt\n'), ((8670, 8688), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(3)', '(1)'], {}), '(3, 1)\n', (8682, 8688), True, 'import matplotlib.pyplot as plt\n'), ((9401, 9419), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(2)', '(3)'], {}), '(2, 3)\n', (9413, 9419), True, 'import matplotlib.pyplot as plt\n'), ((10684, 10702), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(4)', '(1)'], {}), '(4, 1)\n', (10696, 10702), True, 'import matplotlib.pyplot as plt\n'), ((10820, 10848), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (10830, 10848), True, 'import matplotlib.pyplot as plt\n'), ((12989, 12998), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (12995, 12998), True, 'import numpy as np\n'), ((13021, 13030), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (13027, 13030), True, 'import numpy as np\n'), ((13041, 13050), 'numpy.cos', 'np.cos', (['v'], {}), '(v)\n', (13047, 13050), True, 'import numpy as np\n'), ((14455, 14473), 'rowan.to_matrix', 'rn.to_matrix', (['q90z'], {}), '(q90z)\n', (14467, 14473), True, 'import rowan as rn\n'), ((14658, 14760), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['self.fig', 'self.animate'], {'frames': 'self.frames', 'interval': '(dt * 1000)', 'blit': '(True)'}), '(self.fig, self.animate, frames=self.frames,\n interval=dt * 1000, blit=True)\n', (14681, 14760), True, 'import matplotlib.animation as animation\n'), ((15978, 15993), 'rowan.to_matrix', 'rn.to_matrix', (['q'], {}), '(q)\n', (15990, 15993), True, 'import rowan as rn\n'), ((16070, 16089), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (16078, 16089), True, 'import numpy as np\n'), ((16101, 16120), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (16109, 16120), True, 'import numpy as np\n'), ((16132, 16151), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (16140, 16151), True, 'import numpy as np\n'), ((17183, 17198), 'rowan.to_matrix', 'rn.to_matrix', (['q'], {}), '(q)\n', (17195, 17198), True, 'import rowan as rn\n'), ((17218, 17237), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (17226, 17237), True, 'import numpy as np\n'), ((19105, 19124), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (19113, 19124), True, 'import numpy as np\n'), ((19138, 19160), 'numpy.array', 'np.array', (['[xl, yl, zl]'], {}), '([xl, yl, zl])\n', (19146, 19160), True, 'import numpy as np\n'), ((12474, 12492), 'numpy.cos', 'np.cos', (['theta_grid'], {}), '(theta_grid)\n', (12480, 12492), True, 'import numpy as np\n'), ((12520, 12538), 'numpy.sin', 'np.sin', (['theta_grid'], {}), '(theta_grid)\n', (12526, 12538), True, 'import numpy as np\n'), ((12718, 12758), 'numpy.array', 'np.array', (['[Xc[i, j], Yc[i, j], Zc[i, j]]'], {}), '([Xc[i, j], Yc[i, j], Zc[i, j]])\n', (12726, 12758), True, 'import numpy as np\n'), ((12977, 12986), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (12983, 12986), True, 'import numpy as np\n'), ((13009, 13018), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (13015, 13018), True, 'import numpy as np\n'), ((14400, 14414), 'numpy.radians', 'np.radians', (['(90)'], {}), '(90)\n', (14410, 14414), True, 'import numpy as np\n'), ((17894, 17918), 'numpy.linspace', 'np.linspace', (['x', 'armI1[0]'], {}), '(x, armI1[0])\n', (17905, 17918), True, 'import numpy as np\n'), ((17920, 17944), 'numpy.linspace', 'np.linspace', (['y', 'armI1[1]'], {}), '(y, armI1[1])\n', (17931, 17944), True, 'import numpy as np\n'), ((17946, 17970), 'numpy.linspace', 'np.linspace', (['z', 'armI1[2]'], {}), '(z, armI1[2])\n', (17957, 17970), True, 'import numpy as np\n'), ((18004, 18029), 'numpy.linspace', 'np.linspace', (['x', '_armI1[0]'], {}), '(x, _armI1[0])\n', (18015, 18029), True, 'import numpy as np\n'), ((18031, 18056), 'numpy.linspace', 'np.linspace', (['y', '_armI1[1]'], {}), '(y, _armI1[1])\n', (18042, 18056), True, 'import numpy as np\n'), ((18058, 18083), 'numpy.linspace', 'np.linspace', (['z', '_armI1[2]'], {}), '(z, _armI1[2])\n', (18069, 18083), True, 'import numpy as np\n'), ((18126, 18150), 'numpy.linspace', 'np.linspace', (['x', 'armI2[0]'], {}), '(x, armI2[0])\n', (18137, 18150), True, 'import numpy as np\n'), ((18152, 18176), 'numpy.linspace', 'np.linspace', (['y', 'armI2[1]'], {}), '(y, armI2[1])\n', (18163, 18176), True, 'import numpy as np\n'), ((18178, 18202), 'numpy.linspace', 'np.linspace', (['z', 'armI2[2]'], {}), '(z, armI2[2])\n', (18189, 18202), True, 'import numpy as np\n'), ((18236, 18261), 'numpy.linspace', 'np.linspace', (['x', '_armI2[0]'], {}), '(x, _armI2[0])\n', (18247, 18261), True, 'import numpy as np\n'), ((18263, 18288), 'numpy.linspace', 'np.linspace', (['y', '_armI2[1]'], {}), '(y, _armI2[1])\n', (18274, 18288), True, 'import numpy as np\n'), ((18290, 18315), 'numpy.linspace', 'np.linspace', (['z', '_armI2[2]'], {}), '(z, _armI2[2])\n', (18301, 18315), True, 'import numpy as np\n'), ((19182, 19211), 'numpy.linspace', 'np.linspace', (['c_st[0]', 'c_en[0]'], {}), '(c_st[0], c_en[0])\n', (19193, 19211), True, 'import numpy as np\n'), ((19213, 19242), 'numpy.linspace', 'np.linspace', (['c_st[1]', 'c_en[1]'], {}), '(c_st[1], c_en[1])\n', (19224, 19242), True, 'import numpy as np\n'), ((19244, 19273), 'numpy.linspace', 'np.linspace', (['c_st[2]', 'c_en[2]'], {}), '(c_st[2], c_en[2])\n', (19255, 19273), True, 'import numpy as np\n'), ((14189, 14198), 'numpy.cos', 'np.cos', (['(0)'], {}), '(0)\n', (14195, 14198), True, 'import numpy as np\n'), ((14226, 14235), 'numpy.sin', 'np.sin', (['(0)'], {}), '(0)\n', (14232, 14235), True, 'import numpy as np\n'), ((14302, 14311), 'numpy.cos', 'np.cos', (['(0)'], {}), '(0)\n', (14308, 14311), True, 'import numpy as np\n'), ((14340, 14349), 'numpy.sin', 'np.sin', (['(0)'], {}), '(0)\n', (14346, 14349), True, 'import numpy as np\n')] |
import math
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def Embedding(num_embeddings, embedding_dim, padding_idx=None):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
# nn.init.normal_(m.weight, mean=0, std=1)
# nn.init.xavier_uniform_(m.weight)
# nn.init.constant_(m.bias, 0.)
return m
def create_sinusoidal_embeddings(n_pos, dim, out):
position_enc = np.array([
[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)]
for pos in range(n_pos)
])
out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
out.requires_grad = False
def gelu(x):
"""
GELU activation
https://arxiv.org/abs/1606.08415
https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/model_pytorch.py#L14
https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/modeling.py
"""
# return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
return 0.5 * x * (1.0 + torch.erf(x / math.sqrt(2.0)))
def get_masks(slen, lengths, causal):
"""
Generate hidden states mask, and optionally an attention mask.
"""
assert lengths.max().item() <= slen
bs = lengths.size(0)
alen = torch.arange(slen, dtype=torch.long, device=lengths.device)
mask = alen < lengths[:, None]
# attention mask is the same as mask, or triangular inferior attention (causal)
if causal:
attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None]
else:
attn_mask = mask
# sanity check
assert mask.size() == (bs, slen)
assert causal is False or attn_mask.size() == (bs, slen, slen)
return mask, attn_mask
class PredLayer(nn.Module):
"""
Prediction layer (cross_entropy or adaptive_softmax).
"""
def __init__(self, params):
super().__init__()
self.asm = params.asm
self.n_words = params.n_words
self.pad_index = params.pad_index
self.label_smoothing = params.label_smoothing if hasattr(params, "label_smoothing") else 0.0
dim = params.emb_dim
if params.asm is False:
self.proj = Linear(dim, params.n_words, bias=True)
else:
self.proj = nn.AdaptiveLogSoftmaxWithLoss(
in_features=dim,
n_classes=params.n_words,
cutoffs=params.asm_cutoffs,
div_value=params.asm_div_value,
head_bias=True, # default is False
)
def forward(self, x, y, get_scores=False):
"""
Compute the loss, and optionally the scores.
"""
assert (y == self.pad_index).sum().item() == 0
if self.asm is False:
scores = self.proj(x).view(-1, self.n_words)
if self.label_smoothing == 0.0:
loss = F.cross_entropy(scores, y, reduction='elementwise_mean')
else:
lprobs = torch.log_softmax(scores, dim=1)
nll_loss = -lprobs.gather(dim=-1, index=y.unsqueeze(1))
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
nll_loss, smooth_loss = nll_loss.sum(), smooth_loss.sum()
eps_i = self.label_smoothing / lprobs.size(-1)
loss = (1. - self.label_smoothing) * nll_loss + eps_i * smooth_loss
loss = loss / x.shape[0]
else:
_, loss = self.proj(x, y)
scores = self.proj.log_prob(x) if get_scores else None
return scores, loss
def get_scores(self, x):
"""
Compute scores.
"""
assert x.dim() == 2
return self.proj.log_prob(x) if self.asm else self.proj(x)
class MultiHeadAttention(nn.Module):
NEW_ID = itertools.count()
def __init__(self, n_heads, dim, dropout):
super().__init__()
self.layer_id = next(MultiHeadAttention.NEW_ID)
self.dim = dim
self.n_heads = n_heads
self.dropout = dropout
assert self.dim % self.n_heads == 0
self.q_lin = Linear(dim, dim)
self.k_lin = Linear(dim, dim)
self.v_lin = Linear(dim, dim)
self.out_lin = Linear(dim, dim)
def forward(self, input, mask, kv=None, cache=None):
"""
Self-attention (if kv is None) or attention over source sentence (provided by kv).
"""
# Input is (bs, qlen, dim)
# Mask is (bs, klen) (non-causal) or (bs, klen, klen)
bs, qlen, dim = input.size()
if kv is None:
klen = qlen if cache is None else cache['slen'] + qlen
else:
klen = kv.size(1)
assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)
n_heads = self.n_heads
dim_per_head = dim // n_heads
mask_reshape = (bs, 1, qlen, klen) if mask.dim() == 3 else (bs, 1, 1, klen)
def shape(x):
""" projection """
return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
def unshape(x):
""" compute context """
return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)
if kv is None:
k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)
elif cache is None or self.layer_id not in cache:
k = v = kv
k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)
if cache is not None:
if self.layer_id in cache:
if kv is None:
k_, v_ = cache[self.layer_id]
k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)
v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)
else:
k, v = cache[self.layer_id]
cache[self.layer_id] = (k, v)
q = q / math.sqrt(dim_per_head) # (bs, n_heads, qlen, dim_per_head)
scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, qlen, klen)
mask = (mask == 0).view(mask_reshape).expand_as(scores) # (bs, n_heads, qlen, klen)
scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, qlen, klen)
weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)
weights = F.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)
context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
context = unshape(context) # (bs, qlen, dim)
return self.out_lin(context)
class TransformerFFN(nn.Module):
def __init__(self, in_dim, dim_hidden, out_dim, dropout, gelu_activation):
super().__init__()
self.dropout = dropout
self.lin1 = Linear(in_dim, dim_hidden)
self.lin2 = Linear(dim_hidden, out_dim)
self.act = gelu if gelu_activation else F.relu
def forward(self, input):
x = self.lin1(input)
x = self.act(x)
x = self.lin2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
return x
| [
"torch.log_softmax",
"torch.nn.AdaptiveLogSoftmaxWithLoss",
"math.sqrt",
"numpy.power",
"torch.nn.Embedding",
"torch.nn.functional.dropout",
"torch.nn.functional.cross_entropy",
"itertools.count",
"torch.cat",
"torch.nn.init.normal_",
"torch.nn.init.constant_",
"torch.arange",
"numpy.sin",
... | [((189, 257), 'torch.nn.Embedding', 'nn.Embedding', (['num_embeddings', 'embedding_dim'], {'padding_idx': 'padding_idx'}), '(num_embeddings, embedding_dim, padding_idx=padding_idx)\n', (201, 257), True, 'import torch.nn as nn\n'), ((262, 322), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight'], {'mean': '(0)', 'std': '(embedding_dim ** -0.5)'}), '(m.weight, mean=0, std=embedding_dim ** -0.5)\n', (277, 322), True, 'import torch.nn as nn\n'), ((480, 522), 'torch.nn.Linear', 'nn.Linear', (['in_features', 'out_features', 'bias'], {}), '(in_features, out_features, bias)\n', (489, 522), True, 'import torch.nn as nn\n'), ((1663, 1722), 'torch.arange', 'torch.arange', (['slen'], {'dtype': 'torch.long', 'device': 'lengths.device'}), '(slen, dtype=torch.long, device=lengths.device)\n', (1675, 1722), False, 'import torch\n'), ((4169, 4186), 'itertools.count', 'itertools.count', ([], {}), '()\n', (4184, 4186), False, 'import itertools\n'), ((363, 406), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.weight[padding_idx]', '(0)'], {}), '(m.weight[padding_idx], 0)\n', (380, 406), True, 'import torch.nn as nn\n'), ((890, 919), 'numpy.sin', 'np.sin', (['position_enc[:, 0::2]'], {}), '(position_enc[:, 0::2])\n', (896, 919), True, 'import numpy as np\n'), ((958, 987), 'numpy.cos', 'np.cos', (['position_enc[:, 1::2]'], {}), '(position_enc[:, 1::2])\n', (964, 987), True, 'import numpy as np\n'), ((7310, 7368), 'torch.nn.functional.dropout', 'F.dropout', (['weights'], {'p': 'self.dropout', 'training': 'self.training'}), '(weights, p=self.dropout, training=self.training)\n', (7319, 7368), True, 'import torch.nn.functional as F\n'), ((7416, 7440), 'torch.matmul', 'torch.matmul', (['weights', 'v'], {}), '(weights, v)\n', (7428, 7440), False, 'import torch\n'), ((8090, 8142), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'self.dropout', 'training': 'self.training'}), '(x, p=self.dropout, training=self.training)\n', (8099, 8142), True, 'import torch.nn.functional as F\n'), ((2665, 2817), 'torch.nn.AdaptiveLogSoftmaxWithLoss', 'nn.AdaptiveLogSoftmaxWithLoss', ([], {'in_features': 'dim', 'n_classes': 'params.n_words', 'cutoffs': 'params.asm_cutoffs', 'div_value': 'params.asm_div_value', 'head_bias': '(True)'}), '(in_features=dim, n_classes=params.n_words,\n cutoffs=params.asm_cutoffs, div_value=params.asm_div_value, head_bias=True)\n', (2694, 2817), True, 'import torch.nn as nn\n'), ((6769, 6792), 'math.sqrt', 'math.sqrt', (['dim_per_head'], {}), '(dim_per_head)\n', (6778, 6792), False, 'import math\n'), ((3264, 3320), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['scores', 'y'], {'reduction': '"""elementwise_mean"""'}), "(scores, y, reduction='elementwise_mean')\n", (3279, 3320), True, 'import torch.nn.functional as F\n'), ((3364, 3396), 'torch.log_softmax', 'torch.log_softmax', (['scores'], {'dim': '(1)'}), '(scores, dim=1)\n', (3381, 3396), False, 'import torch\n'), ((757, 792), 'numpy.power', 'np.power', (['(10000)', '(2 * (j // 2) / dim)'], {}), '(10000, 2 * (j // 2) / dim)\n', (765, 792), True, 'import numpy as np\n'), ((1447, 1461), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (1456, 1461), False, 'import math\n'), ((6436, 6461), 'torch.cat', 'torch.cat', (['[k_, k]'], {'dim': '(2)'}), '([k_, k], dim=2)\n', (6445, 6461), False, 'import torch\n'), ((6550, 6575), 'torch.cat', 'torch.cat', (['[v_, v]'], {'dim': '(2)'}), '([v_, v], dim=2)\n', (6559, 6575), False, 'import torch\n')] |
from statistics import mean
import numpy as np
from tqdm import tqdm
#credits to http://www2.stat.duke.edu/~ar182/rr/examples-gallery/PermutationTest.html
def compute_permutation_stat(z, y):
def run_permutation_test(pooled,sizeZ,sizeY,delta):
np.random.shuffle(pooled)
starZ = pooled[:sizeZ]
starY = pooled[-sizeY:]
return starZ.mean() - starY.mean()
pooled = np.hstack([z, y])
delta = z.mean() - y.mean()
numSamples = 5000
estimates = []
for i in tqdm(range(numSamples)):
estimates.append(run_permutation_test(pooled,z.size,y.size,delta))
estimates = np.array(estimates)
diffCount = len(np.where(estimates <= delta)[0])
hat_asl_perm = 1.0 - (float(diffCount)/float(numSamples))
return hat_asl_perm
def read_file(file_path):
lines = open(file_path, 'r').readlines()
loss_list = [float(x[:-1]) for x in lines]
return np.array(loss_list)
if __name__ == "__main__":
loss_list_1_file = "jason-lm-test-logs-w40-tl/default_allvar004_testloss.csv" #this one should be lower (better)
loss_list_2_file = "jason-lm-test-logs-w40-tl/default_allvar00_testloss.csv"
loss_list_1 = read_file(loss_list_1_file)
loss_list_2 = read_file(loss_list_2_file)
hat_asl_perm = compute_permutation_stat(loss_list_2, loss_list_1)
print(loss_list_1_file, loss_list_2_file)
print(f"list_1_mean:{mean(loss_list_1):.5f}, \t list_2_mean:{mean(loss_list_2):.5f}, \t p value of (l1 < l2)={hat_asl_perm:.4f}") | [
"numpy.hstack",
"numpy.where",
"numpy.array",
"statistics.mean",
"numpy.random.shuffle"
] | [((403, 420), 'numpy.hstack', 'np.hstack', (['[z, y]'], {}), '([z, y])\n', (412, 420), True, 'import numpy as np\n'), ((623, 642), 'numpy.array', 'np.array', (['estimates'], {}), '(estimates)\n', (631, 642), True, 'import numpy as np\n'), ((912, 931), 'numpy.array', 'np.array', (['loss_list'], {}), '(loss_list)\n', (920, 931), True, 'import numpy as np\n'), ((257, 282), 'numpy.random.shuffle', 'np.random.shuffle', (['pooled'], {}), '(pooled)\n', (274, 282), True, 'import numpy as np\n'), ((663, 691), 'numpy.where', 'np.where', (['(estimates <= delta)'], {}), '(estimates <= delta)\n', (671, 691), True, 'import numpy as np\n'), ((1394, 1411), 'statistics.mean', 'mean', (['loss_list_1'], {}), '(loss_list_1)\n', (1398, 1411), False, 'from statistics import mean\n'), ((1434, 1451), 'statistics.mean', 'mean', (['loss_list_2'], {}), '(loss_list_2)\n', (1438, 1451), False, 'from statistics import mean\n')] |
import numpy as np
from TaichiGAME.dynamics.body import Body
from TaichiGAME.dynamics.joint.joint import JointType
from TaichiGAME.dynamics.joint.rotation import OrientationJointPrimitive, RotationJoint
from TaichiGAME.dynamics.joint.rotation import RotationJointPrimitive
from TaichiGAME.math.matrix import Matrix
class TestRotationJointPrimitive():
def test__init__(self):
dut: RotationJointPrimitive = RotationJointPrimitive()
assert isinstance(dut._bodya, Body)
assert isinstance(dut._bodyb, Body)
assert np.isclose(dut._ref_rot, 0)
assert np.isclose(dut._eff_mass, 0)
assert np.isclose(dut._bias, 0)
class TestOrientationJointPrimitive():
def test__init__(self):
dut: OrientationJointPrimitive = OrientationJointPrimitive()
assert isinstance(dut._bodya, Body)
assert dut._target_point == Matrix([0.0, 0.0], 'vec')
assert np.isclose(dut._ref_rot, 0)
assert np.isclose(dut._eff_mass, 0)
assert np.isclose(dut._bias, 0)
class TestRotationJoint():
def test__init__(self):
dut: RotationJoint = RotationJoint()
assert dut._type == JointType.Rotation
assert isinstance(dut._prim, RotationJointPrimitive)
assert np.isclose(dut._factor, 0.2)
def test_set_value(self):
dut: RotationJoint = RotationJoint()
tmp: RotationJointPrimitive = RotationJointPrimitive()
tmp._bias = 0.66
dut.set_value(tmp)
assert isinstance(dut._prim, RotationJointPrimitive)
def test_prepare(self):
assert 1
def test_solve_velocity(self):
assert 1
def test_solve_position(self):
assert 1
def test_prim(self):
dut: RotationJoint = RotationJoint()
assert isinstance(dut.prim(), RotationJointPrimitive)
| [
"TaichiGAME.math.matrix.Matrix",
"numpy.isclose",
"TaichiGAME.dynamics.joint.rotation.RotationJoint",
"TaichiGAME.dynamics.joint.rotation.RotationJointPrimitive",
"TaichiGAME.dynamics.joint.rotation.OrientationJointPrimitive"
] | [((420, 444), 'TaichiGAME.dynamics.joint.rotation.RotationJointPrimitive', 'RotationJointPrimitive', ([], {}), '()\n', (442, 444), False, 'from TaichiGAME.dynamics.joint.rotation import RotationJointPrimitive\n'), ((549, 576), 'numpy.isclose', 'np.isclose', (['dut._ref_rot', '(0)'], {}), '(dut._ref_rot, 0)\n', (559, 576), True, 'import numpy as np\n'), ((592, 620), 'numpy.isclose', 'np.isclose', (['dut._eff_mass', '(0)'], {}), '(dut._eff_mass, 0)\n', (602, 620), True, 'import numpy as np\n'), ((636, 660), 'numpy.isclose', 'np.isclose', (['dut._bias', '(0)'], {}), '(dut._bias, 0)\n', (646, 660), True, 'import numpy as np\n'), ((771, 798), 'TaichiGAME.dynamics.joint.rotation.OrientationJointPrimitive', 'OrientationJointPrimitive', ([], {}), '()\n', (796, 798), False, 'from TaichiGAME.dynamics.joint.rotation import OrientationJointPrimitive, RotationJoint\n'), ((921, 948), 'numpy.isclose', 'np.isclose', (['dut._ref_rot', '(0)'], {}), '(dut._ref_rot, 0)\n', (931, 948), True, 'import numpy as np\n'), ((964, 992), 'numpy.isclose', 'np.isclose', (['dut._eff_mass', '(0)'], {}), '(dut._eff_mass, 0)\n', (974, 992), True, 'import numpy as np\n'), ((1008, 1032), 'numpy.isclose', 'np.isclose', (['dut._bias', '(0)'], {}), '(dut._bias, 0)\n', (1018, 1032), True, 'import numpy as np\n'), ((1119, 1134), 'TaichiGAME.dynamics.joint.rotation.RotationJoint', 'RotationJoint', ([], {}), '()\n', (1132, 1134), False, 'from TaichiGAME.dynamics.joint.rotation import OrientationJointPrimitive, RotationJoint\n'), ((1259, 1287), 'numpy.isclose', 'np.isclose', (['dut._factor', '(0.2)'], {}), '(dut._factor, 0.2)\n', (1269, 1287), True, 'import numpy as np\n'), ((1348, 1363), 'TaichiGAME.dynamics.joint.rotation.RotationJoint', 'RotationJoint', ([], {}), '()\n', (1361, 1363), False, 'from TaichiGAME.dynamics.joint.rotation import OrientationJointPrimitive, RotationJoint\n'), ((1402, 1426), 'TaichiGAME.dynamics.joint.rotation.RotationJointPrimitive', 'RotationJointPrimitive', ([], {}), '()\n', (1424, 1426), False, 'from TaichiGAME.dynamics.joint.rotation import RotationJointPrimitive\n'), ((1748, 1763), 'TaichiGAME.dynamics.joint.rotation.RotationJoint', 'RotationJoint', ([], {}), '()\n', (1761, 1763), False, 'from TaichiGAME.dynamics.joint.rotation import OrientationJointPrimitive, RotationJoint\n'), ((880, 905), 'TaichiGAME.math.matrix.Matrix', 'Matrix', (['[0.0, 0.0]', '"""vec"""'], {}), "([0.0, 0.0], 'vec')\n", (886, 905), False, 'from TaichiGAME.math.matrix import Matrix\n')] |
from typing import Union
import numpy as np
import scipy
import scipy.stats
# Every function takes and return arrays of same dimensions or scalars
DEFAULT_RETURN = np.float(0)
def add(
x: Union[int, float, np.ndarray], y: Union[int, float, np.ndarray]
) -> Union[int, float, np.ndarray]:
return np.nan_to_num(abs_x(x + y)) / 2
def abs_minus(
x: Union[int, float, np.ndarray], y: Union[int, float, np.ndarray]
) -> Union[int, float, np.ndarray]:
return np.nan_to_num(abs_x(x - y)) / 2
def multiply(
x: Union[int, float, np.ndarray], y: Union[int, float, np.ndarray]
) -> Union[int, float, np.ndarray]:
return np.nan_to_num(x * y) / 2
def divide(
x: Union[int, float, np.ndarray], y: Union[int, float, np.ndarray]
) -> Union[int, float, np.ndarray]:
if isinstance(y, np.ndarray) and y.size > 1:
if (y[:] != 0).all():
res = x / y
elif (y[:] != 0).any():
res = x / np.mean(y)
else:
res = DEFAULT_RETURN
elif y != 0:
res = x / y
else:
res = DEFAULT_RETURN
return np.nan_to_num(res)
def inv(x: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:
if isinstance(x, np.ndarray) and x.size > 1:
if (x[:] != 0).all():
res = 1 / x
elif (x[:] != 0).any():
res = 1 / np.mean(x)
else:
res = DEFAULT_RETURN
elif x != 0:
res = 1 / x
else:
res = DEFAULT_RETURN
return np.nan_to_num(res)
def abs_x(x: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:
return np.abs(x)
def sqrt(x: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:
return np.sqrt(np.abs(x))
def x_pow_y(
x: Union[int, float, np.ndarray], y: Union[int, float, np.ndarray]
) -> Union[int, float, np.ndarray]:
if isinstance(y, np.ndarray) and y.size > 1:
if (y[:] > 0).all():
return np.power(x, y)
elif (y[:] > 0).any():
return np.power(x, np.max(y))
else:
return DEFAULT_RETURN
elif y < 0:
return DEFAULT_RETURN
else:
return np.power(x, y)
def exp_x(x: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:
return (np.exp(x) - 1) / (np.exp(1) - 1)
def sin_x(x: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:
return np.sin(x)
def cos_x(x: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:
return np.cos(x)
def sqrt_xy(
x: Union[int, float, np.ndarray], y: Union[int, float, np.ndarray]
) -> Union[int, float, np.ndarray]:
return np.nan_to_num(np.sqrt(np.power(x, 2) + np.power(y, 2)) / np.sqrt(2))
def stddev(x: Union[int, float, np.ndarray]) -> np.float:
return np.float(np.nan_to_num(np.std(x)))
def skew(x: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:
return scipy.stats.skew(x)
def kurtosis(x: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:
return scipy.stats.kurtosis(x)
def mean(x: Union[int, float, np.ndarray]) -> np.float:
return np.float(np.mean(x))
def range_x(x: Union[int, float, np.ndarray]) -> np.float:
return (
np.float(np.max(x) - np.min(x))
if not (isinstance(x, np.ndarray) and x.size == 0)
else DEFAULT_RETURN
)
def round_x(x: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:
return np.nan_to_num(np.round(x))
def ceil(x: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:
return np.ceil(x)
def floor(x: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:
return np.floor(x)
def max1(x: Union[int, float, np.ndarray]) -> np.float:
return (
np.float(np.max(x)) if not (isinstance(x, np.ndarray) and x.size == 0) else DEFAULT_RETURN
)
def min1(x: Union[int, float, np.ndarray]) -> np.float:
return (
np.float(np.min(x)) if not (isinstance(x, np.ndarray) and x.size == 0) else DEFAULT_RETURN
)
def max2(
x: Union[int, float, np.ndarray], y: Union[int, float, np.ndarray]
) -> Union[int, float, np.ndarray]:
try:
return max(x, y)
except ValueError:
return DEFAULT_RETURN
def min2(
x: Union[int, float, np.ndarray], y: Union[int, float, np.ndarray]
) -> Union[int, float, np.ndarray]:
try:
return min(x, y)
except ValueError:
return DEFAULT_RETURN
def split_before(x: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:
if isinstance(x, np.ndarray) and x.size > 1:
first_half = np.array_split(x, 2)[0]
zeros = np.zeros(x.size - first_half.size)
return np.append(first_half, zeros)
else:
return DEFAULT_RETURN
def split_after(x: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:
if isinstance(x, np.ndarray) and x.size > 1:
second_half = np.array_split(x, 2)[1]
zeros = np.zeros(x.size - second_half.size)
return np.append(second_half, zeros)
else:
return DEFAULT_RETURN
def index_y(
x: Union[int, float, np.ndarray], y: Union[int, float, np.ndarray]
) -> Union[int, float, np.ndarray]:
try:
return x[y]
except (TypeError, IndexError):
return DEFAULT_RETURN
def first(x: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:
return x[0] if (isinstance(x, np.ndarray) and x.size > 1) else DEFAULT_RETURN
def last(x: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:
return x[-1] if (isinstance(x, np.ndarray) and x.size > 1) else DEFAULT_RETURN
def rotate(
x: Union[int, float, np.ndarray], y: Union[int, float, np.ndarray]
) -> Union[int, float, np.ndarray]:
return (
np.roll(x, np.int(np.mean(y / 2)))
if isinstance(y, np.ndarray)
else np.roll(x, np.int(y / 2))
)
def sum_x(x: Union[int, float, np.ndarray]) -> np.float:
return np.float(np.nan_to_num(np.sum(x)))
def const_1(x: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:
return np.array([1] * x.size) if isinstance(x, np.ndarray) else DEFAULT_RETURN
def const_0(x: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:
return np.zeros(x.size) if isinstance(x, np.ndarray) else DEFAULT_RETURN
BINARY_FUNCTIONS = [add, abs_minus, multiply, divide, sqrt_xy, max2, min2, index_y, rotate]
UNARY_FUNCTIONS = [
inv,
abs_x,
sqrt,
sin_x,
cos_x,
stddev,
mean,
range_x,
round_x,
ceil,
floor,
max1,
min1,
first,
last,
sum_x,
const_0,
const_1,
split_before,
split_after,
]
BINARY_REDUCERS = []
UNARY_REDUCERS = [max1, min1, mean, stddev, range_x, sum_x]
| [
"numpy.abs",
"numpy.nan_to_num",
"numpy.sum",
"numpy.floor",
"numpy.sin",
"numpy.mean",
"numpy.exp",
"numpy.round",
"numpy.std",
"numpy.power",
"numpy.append",
"numpy.max",
"numpy.int",
"numpy.ceil",
"numpy.float",
"numpy.min",
"numpy.cos",
"numpy.zeros",
"scipy.stats.skew",
"n... | [((167, 178), 'numpy.float', 'np.float', (['(0)'], {}), '(0)\n', (175, 178), True, 'import numpy as np\n'), ((1089, 1107), 'numpy.nan_to_num', 'np.nan_to_num', (['res'], {}), '(res)\n', (1102, 1107), True, 'import numpy as np\n'), ((1488, 1506), 'numpy.nan_to_num', 'np.nan_to_num', (['res'], {}), '(res)\n', (1501, 1506), True, 'import numpy as np\n'), ((1598, 1607), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (1604, 1607), True, 'import numpy as np\n'), ((2374, 2383), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (2380, 2383), True, 'import numpy as np\n'), ((2475, 2484), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (2481, 2484), True, 'import numpy as np\n'), ((2883, 2902), 'scipy.stats.skew', 'scipy.stats.skew', (['x'], {}), '(x)\n', (2899, 2902), False, 'import scipy\n'), ((2997, 3020), 'scipy.stats.kurtosis', 'scipy.stats.kurtosis', (['x'], {}), '(x)\n', (3017, 3020), False, 'import scipy\n'), ((3528, 3538), 'numpy.ceil', 'np.ceil', (['x'], {}), '(x)\n', (3535, 3538), True, 'import numpy as np\n'), ((3630, 3641), 'numpy.floor', 'np.floor', (['x'], {}), '(x)\n', (3638, 3641), True, 'import numpy as np\n'), ((641, 661), 'numpy.nan_to_num', 'np.nan_to_num', (['(x * y)'], {}), '(x * y)\n', (654, 661), True, 'import numpy as np\n'), ((1706, 1715), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (1712, 1715), True, 'import numpy as np\n'), ((3099, 3109), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (3106, 3109), True, 'import numpy as np\n'), ((3425, 3436), 'numpy.round', 'np.round', (['x'], {}), '(x)\n', (3433, 3436), True, 'import numpy as np\n'), ((4603, 4637), 'numpy.zeros', 'np.zeros', (['(x.size - first_half.size)'], {}), '(x.size - first_half.size)\n', (4611, 4637), True, 'import numpy as np\n'), ((4653, 4681), 'numpy.append', 'np.append', (['first_half', 'zeros'], {}), '(first_half, zeros)\n', (4662, 4681), True, 'import numpy as np\n'), ((4919, 4954), 'numpy.zeros', 'np.zeros', (['(x.size - second_half.size)'], {}), '(x.size - second_half.size)\n', (4927, 4954), True, 'import numpy as np\n'), ((4970, 4999), 'numpy.append', 'np.append', (['second_half', 'zeros'], {}), '(second_half, zeros)\n', (4979, 4999), True, 'import numpy as np\n'), ((6038, 6060), 'numpy.array', 'np.array', (['([1] * x.size)'], {}), '([1] * x.size)\n', (6046, 6060), True, 'import numpy as np\n'), ((6203, 6219), 'numpy.zeros', 'np.zeros', (['x.size'], {}), '(x.size)\n', (6211, 6219), True, 'import numpy as np\n'), ((1936, 1950), 'numpy.power', 'np.power', (['x', 'y'], {}), '(x, y)\n', (1944, 1950), True, 'import numpy as np\n'), ((2143, 2157), 'numpy.power', 'np.power', (['x', 'y'], {}), '(x, y)\n', (2151, 2157), True, 'import numpy as np\n'), ((2250, 2259), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2256, 2259), True, 'import numpy as np\n'), ((2268, 2277), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (2274, 2277), True, 'import numpy as np\n'), ((2675, 2685), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2682, 2685), True, 'import numpy as np\n'), ((2781, 2790), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (2787, 2790), True, 'import numpy as np\n'), ((3730, 3739), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (3736, 3739), True, 'import numpy as np\n'), ((3906, 3915), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (3912, 3915), True, 'import numpy as np\n'), ((4563, 4583), 'numpy.array_split', 'np.array_split', (['x', '(2)'], {}), '(x, 2)\n', (4577, 4583), True, 'import numpy as np\n'), ((4879, 4899), 'numpy.array_split', 'np.array_split', (['x', '(2)'], {}), '(x, 2)\n', (4893, 4899), True, 'import numpy as np\n'), ((5819, 5832), 'numpy.int', 'np.int', (['(y / 2)'], {}), '(y / 2)\n', (5825, 5832), True, 'import numpy as np\n'), ((5933, 5942), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (5939, 5942), True, 'import numpy as np\n'), ((3202, 3211), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (3208, 3211), True, 'import numpy as np\n'), ((3214, 3223), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (3220, 3223), True, 'import numpy as np\n'), ((5741, 5755), 'numpy.mean', 'np.mean', (['(y / 2)'], {}), '(y / 2)\n', (5748, 5755), True, 'import numpy as np\n'), ((944, 954), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (951, 954), True, 'import numpy as np\n'), ((1343, 1353), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1350, 1353), True, 'import numpy as np\n'), ((2013, 2022), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (2019, 2022), True, 'import numpy as np\n'), ((2640, 2654), 'numpy.power', 'np.power', (['x', '(2)'], {}), '(x, 2)\n', (2648, 2654), True, 'import numpy as np\n'), ((2657, 2671), 'numpy.power', 'np.power', (['y', '(2)'], {}), '(y, 2)\n', (2665, 2671), True, 'import numpy as np\n')] |
# jbase
from ctypes import CDLL, byref, c_char_p, c_longlong, c_void_p, string_at
import numpy as np
import os
import subprocess
jt = 0
def init(basedir, uname):
global libj, jt
os.chdir(basedir)
bindir = basedir + "/j"
darwin = uname == "darwin"
win32 = uname == "win32"
jc = "/jconsole.exe" if win32 else "/jconsole"
jl = "/j" if win32 else "/libj"
ext = ".dll" if win32 else ".dylib" if darwin else ".so"
avx = subprocess.check_output([bindir + jc,"-jprofile","avx.ijs"],
cwd=bindir, encoding="utf8", text=True)
if "avx2" in avx:
jl += "avx2"
elif "avx" in avx:
jl += "avx"
dll = bindir + jl + ext
if jt != 0:
raise AssertionError('init already run')
libj = CDLL(dll)
libj.JInit.restype = c_void_p
libj.JGetR.restype = c_char_p
jt = libj.JInit()
if jt == 0:
raise AssertionError('init library failed')
def call(cmd):
req="res=: exec_server_ '" + cmd.replace("'","''") + "'"
do(req)
return get("res")
def close():
global jt
call('["maisvr",["close",""]]')
jt = 0
def do(a):
return libj.JDo(c_void_p(jt), tob(a))
def dor(a):
libj.JDo(c_void_p(jt),tob(a))
s= getr()[:-1]
if 0!=len(s):
print(s)
def get(n):
dt = c_longlong(0)
dr = c_longlong(0)
ds = c_longlong(0)
dd = c_longlong(0)
libj.JGetM(c_void_p(jt), tob(n), byref(dt), byref(dr), byref(ds), byref(dd))
t = dt.value
if t == 0:
raise AssertionError('get arg not a name')
shape = np.fromstring(string_at(ds.value, dr.value*8), dtype=np.int64)
count = np.prod(shape)
if t == 2:
r = (string_at(dd.value, count))
elif t == 4:
r = np.fromstring(string_at(dd.value, count*8), dtype=np.int64)
r.shape = shape
elif t == 8:
r = np.fromstring(string_at(dd.value, count*8), dtype=np.float64)
r.shape = shape
else:
raise AssertionError('get type not supported')
return r
def getr():
return string_at(libj.JGetR(c_void_p(jt))).decode('utf-8')
def tob(s):
if type(s) is str:
s = s.encode('utf-8')
return s
| [
"ctypes.byref",
"ctypes.string_at",
"subprocess.check_output",
"ctypes.c_longlong",
"ctypes.c_void_p",
"ctypes.CDLL",
"os.chdir",
"numpy.prod"
] | [((184, 201), 'os.chdir', 'os.chdir', (['basedir'], {}), '(basedir)\n', (192, 201), False, 'import os\n'), ((427, 533), 'subprocess.check_output', 'subprocess.check_output', (["[bindir + jc, '-jprofile', 'avx.ijs']"], {'cwd': 'bindir', 'encoding': '"""utf8"""', 'text': '(True)'}), "([bindir + jc, '-jprofile', 'avx.ijs'], cwd=bindir,\n encoding='utf8', text=True)\n", (450, 533), False, 'import subprocess\n'), ((687, 696), 'ctypes.CDLL', 'CDLL', (['dll'], {}), '(dll)\n', (691, 696), False, 'from ctypes import CDLL, byref, c_char_p, c_longlong, c_void_p, string_at\n'), ((1166, 1179), 'ctypes.c_longlong', 'c_longlong', (['(0)'], {}), '(0)\n', (1176, 1179), False, 'from ctypes import CDLL, byref, c_char_p, c_longlong, c_void_p, string_at\n'), ((1186, 1199), 'ctypes.c_longlong', 'c_longlong', (['(0)'], {}), '(0)\n', (1196, 1199), False, 'from ctypes import CDLL, byref, c_char_p, c_longlong, c_void_p, string_at\n'), ((1206, 1219), 'ctypes.c_longlong', 'c_longlong', (['(0)'], {}), '(0)\n', (1216, 1219), False, 'from ctypes import CDLL, byref, c_char_p, c_longlong, c_void_p, string_at\n'), ((1226, 1239), 'ctypes.c_longlong', 'c_longlong', (['(0)'], {}), '(0)\n', (1236, 1239), False, 'from ctypes import CDLL, byref, c_char_p, c_longlong, c_void_p, string_at\n'), ((1470, 1484), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (1477, 1484), True, 'import numpy as np\n'), ((1037, 1049), 'ctypes.c_void_p', 'c_void_p', (['jt'], {}), '(jt)\n', (1045, 1049), False, 'from ctypes import CDLL, byref, c_char_p, c_longlong, c_void_p, string_at\n'), ((1083, 1095), 'ctypes.c_void_p', 'c_void_p', (['jt'], {}), '(jt)\n', (1091, 1095), False, 'from ctypes import CDLL, byref, c_char_p, c_longlong, c_void_p, string_at\n'), ((1252, 1264), 'ctypes.c_void_p', 'c_void_p', (['jt'], {}), '(jt)\n', (1260, 1264), False, 'from ctypes import CDLL, byref, c_char_p, c_longlong, c_void_p, string_at\n'), ((1274, 1283), 'ctypes.byref', 'byref', (['dt'], {}), '(dt)\n', (1279, 1283), False, 'from ctypes import CDLL, byref, c_char_p, c_longlong, c_void_p, string_at\n'), ((1285, 1294), 'ctypes.byref', 'byref', (['dr'], {}), '(dr)\n', (1290, 1294), False, 'from ctypes import CDLL, byref, c_char_p, c_longlong, c_void_p, string_at\n'), ((1296, 1305), 'ctypes.byref', 'byref', (['ds'], {}), '(ds)\n', (1301, 1305), False, 'from ctypes import CDLL, byref, c_char_p, c_longlong, c_void_p, string_at\n'), ((1307, 1316), 'ctypes.byref', 'byref', (['dd'], {}), '(dd)\n', (1312, 1316), False, 'from ctypes import CDLL, byref, c_char_p, c_longlong, c_void_p, string_at\n'), ((1412, 1445), 'ctypes.string_at', 'string_at', (['ds.value', '(dr.value * 8)'], {}), '(ds.value, dr.value * 8)\n', (1421, 1445), False, 'from ctypes import CDLL, byref, c_char_p, c_longlong, c_void_p, string_at\n'), ((1504, 1530), 'ctypes.string_at', 'string_at', (['dd.value', 'count'], {}), '(dd.value, count)\n', (1513, 1530), False, 'from ctypes import CDLL, byref, c_char_p, c_longlong, c_void_p, string_at\n'), ((1566, 1596), 'ctypes.string_at', 'string_at', (['dd.value', '(count * 8)'], {}), '(dd.value, count * 8)\n', (1575, 1596), False, 'from ctypes import CDLL, byref, c_char_p, c_longlong, c_void_p, string_at\n'), ((1664, 1694), 'ctypes.string_at', 'string_at', (['dd.value', '(count * 8)'], {}), '(dd.value, count * 8)\n', (1673, 1694), False, 'from ctypes import CDLL, byref, c_char_p, c_longlong, c_void_p, string_at\n'), ((1839, 1851), 'ctypes.c_void_p', 'c_void_p', (['jt'], {}), '(jt)\n', (1847, 1851), False, 'from ctypes import CDLL, byref, c_char_p, c_longlong, c_void_p, string_at\n')] |
import h5py
import numpy as np
import cv2
import os
def get_data(filename, dataset_ID):
data_path_main = "./datasets/us2mr/"
data_path_train = data_path_main + "train" + dataset_ID + "/"
data_path_test = data_path_main + "test" + dataset_ID + "/"
if not os.path.exists(data_path_main):
os.mkdir(data_path_main)
if not os.path.exists(data_path_train):
os.mkdir(data_path_train)
if not os.path.exists(data_path_test):
os.mkdir(data_path_test)
h5_file = h5py.File(filename, mode="r")
keys = list(h5_file.keys())
# TRAIN SPLIT
for series_num in range(int(len(keys) / 2)):
series = np.array(h5_file[keys[series_num]])
for frame_num in range(len(series)):
im = series[frame_num]
im = 255 * ((im - np.min(im)) / np.ptp(im))
im = np.rot90(im, 1)
cv2.imwrite(
data_path_train
+ str(series_num).zfill(4)
+ "-"
+ str(frame_num).zfill(4)
+ ".jpg",
im
)
# TEST SPLIT
for series_num in range(int(len(keys) / 2), int(len(keys))):
series = np.array(h5_file[keys[series_num]])
for frame_num in range(len(series)):
im = series[frame_num]
im = 255 * ((im - np.min(im)) / np.ptp(im))
im = np.rot90(im, 1)
cv2.imwrite(
data_path_test
+ str(series_num).zfill(4)
+ "-"
+ str(frame_num).zfill(4)
+ ".jpg",
im
)
get_data("../mrus/us_images_resampled800.h5", "A")
get_data("../mrus/mr_images_resampled800.h5", "B")
| [
"os.mkdir",
"h5py.File",
"numpy.ptp",
"os.path.exists",
"numpy.min",
"numpy.rot90",
"numpy.array"
] | [((508, 537), 'h5py.File', 'h5py.File', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (517, 537), False, 'import h5py\n'), ((274, 304), 'os.path.exists', 'os.path.exists', (['data_path_main'], {}), '(data_path_main)\n', (288, 304), False, 'import os\n'), ((314, 338), 'os.mkdir', 'os.mkdir', (['data_path_main'], {}), '(data_path_main)\n', (322, 338), False, 'import os\n'), ((350, 381), 'os.path.exists', 'os.path.exists', (['data_path_train'], {}), '(data_path_train)\n', (364, 381), False, 'import os\n'), ((391, 416), 'os.mkdir', 'os.mkdir', (['data_path_train'], {}), '(data_path_train)\n', (399, 416), False, 'import os\n'), ((428, 458), 'os.path.exists', 'os.path.exists', (['data_path_test'], {}), '(data_path_test)\n', (442, 458), False, 'import os\n'), ((468, 492), 'os.mkdir', 'os.mkdir', (['data_path_test'], {}), '(data_path_test)\n', (476, 492), False, 'import os\n'), ((655, 690), 'numpy.array', 'np.array', (['h5_file[keys[series_num]]'], {}), '(h5_file[keys[series_num]])\n', (663, 690), True, 'import numpy as np\n'), ((1183, 1218), 'numpy.array', 'np.array', (['h5_file[keys[series_num]]'], {}), '(h5_file[keys[series_num]])\n', (1191, 1218), True, 'import numpy as np\n'), ((844, 859), 'numpy.rot90', 'np.rot90', (['im', '(1)'], {}), '(im, 1)\n', (852, 859), True, 'import numpy as np\n'), ((1372, 1387), 'numpy.rot90', 'np.rot90', (['im', '(1)'], {}), '(im, 1)\n', (1380, 1387), True, 'import numpy as np\n'), ((815, 825), 'numpy.ptp', 'np.ptp', (['im'], {}), '(im)\n', (821, 825), True, 'import numpy as np\n'), ((1343, 1353), 'numpy.ptp', 'np.ptp', (['im'], {}), '(im)\n', (1349, 1353), True, 'import numpy as np\n'), ((801, 811), 'numpy.min', 'np.min', (['im'], {}), '(im)\n', (807, 811), True, 'import numpy as np\n'), ((1329, 1339), 'numpy.min', 'np.min', (['im'], {}), '(im)\n', (1335, 1339), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology.
# SPDX-FileCopyrightText: © 2021 <NAME> <<EMAIL>>
# NOTICE: authors should document their contributions in concisely in NOTICE
# with details inline in source files, comments, and docstrings.
"""
"""
import numpy as np
import collections
from wavestate.bunch import Bunch
# from ..matrix import SRE_copy
from ...optics import alm
from . import algo_mm_linkages
from . import mm_optimize
from .. import algo_tups
class ModeMatchingAlgorithm(algo_mm_linkages.ModeMatchingLinkageAlgorithm):
def __init__(self, pa, previous=None):
super(ModeMatchingAlgorithm, self).__init__(pa)
# from target names to ol-loops
# target names are parameter refs
self.cavity_targets = dict()
self.cavity_map = dict()
self._targets = dict()
# a dictionary of target names to be constructed upon access
self._targets_deferred = dict()
self._populate_targets()
# This is a bit of a hack to forward the explicit calls into this object
self.previous = previous
if self.previous is not None:
for k, v in self.previous._targets.items():
self._targets.setdefault(k, v)
for k, v in self.previous.cavity_targets.items():
self.cavity_targets.setdefault(k, v)
for k, v in self.previous.cavity_map.items():
self.cavity_map.setdefault(k, v)
return
def _populate_targets(self):
"""
Visits objects in the graph looking for ModeMatchingTargetBase objects
such as Cavity, Target and TargetMeasurement. Those objects then populate
the target list
"""
for obj in self.pbg.object_iter():
try:
visit_algo = obj.visit_mode_matching_targets
except AttributeError:
# don't actually run the algorithm within the try because it can
# eat exceptions that occur within the visit method
continue
else:
pass
manip = MMAlgorithmManipulator(
obj=obj,
mm_algo=self,
)
visit_algo(manip)
return
def target_add(
self,
target_name,
waypoints,
q=None,
qX=None,
qY=None,
z_target=0,
z_start=0,
wavelength=None,
obj=None,
):
if qX is None:
qX = q
if qY is None:
qY = q
assert qX is not None
assert qY is not None
if isinstance(waypoints, str):
waypoints = [waypoints]
if wavelength is not None:
Wk = self.fs.parameter_to_wk(wavelength)
else:
Wk = None
if isinstance(q, alm.ComplexBeamParam):
if Wk is None:
Wk = self.fs.parameter_to_wk(q.wavelength_m)
else:
assert self.fs.parameter_to_wk(q.wavelength_m) == Wk
if isinstance(qX, alm.ComplexBeamParam):
if Wk is None:
Wk = self.fs.parameter_to_wk(qX.wavelength_m)
else:
assert self.fs.parameter_to_wk(qX.wavelength_m) == Wk
# print(qY, Wk)
if isinstance(qY, alm.ComplexBeamParam):
if Wk is None:
Wk = self.fs.parameter_to_wk(qY.wavelength_m)
else:
assert self.fs.parameter_to_wk(qY.wavelength_m) == Wk
if Wk is None:
raise RuntimeError("Must specify wavelength for transporting beam targets")
# use substrates
oLp_set_seq = []
for ref in waypoints:
oLp_set_seq.append(self.bg.rAp2oLp_set(ref, obj=obj))
if len(oLp_set_seq) == 0:
raise RuntimeError(
("Must specify at least one waypoint port" " to bind the target")
)
elif len(oLp_set_seq) == 1:
if len(oLp_set_seq[0]) > 1:
with self.pbg.preferred():
raise RuntimeError(
(
"If only one waypoint is specified, it must uniquely define a port. "
"waypoint '{}' defines ports {}. Be more specific or add more waypoints"
).format(waypoints[0], oLp_set_seq[0])
)
oLp_path = [next(iter(oLp_set_seq[0]))]
else:
oLp_path = self._safe_oLp_path(oLp_set_seq)
if not isinstance(qX, alm.ComplexBeamParam):
qX = alm.ComplexBeamParam(qX, wavelength_m=self.fs.wavelength_from_Wk(Wk))
if not isinstance(qY, alm.ComplexBeamParam):
qY = alm.ComplexBeamParam(qY, wavelength_m=self.fs.wavelength_from_Wk(Wk))
trans = self._path_transporters(oLp_path, Wk)
if z_target != z_start:
Mx = trans.X.z2mat(z_target - z_start)
My = trans.Y.z2mat(z_target - z_start)
qX = qX.propagate_matrix(np.linalg.inv(Mx))
qY = qY.propagate_matrix(np.linalg.inv(My))
target_oP = self.pbg.referred_vtup(target_name)
self._targets[target_oP] = Bunch(
ol=oLp_path[0],
oLp_path=oLp_path,
oltrans=trans,
qX=qX,
qY=qY,
Wk=Wk,
type="specified",
)
return
def cavity_add(self, target_name, waypoints, obj=None):
# TODO, what about travelling-wave cavities
# not sure that I love how this works yet..
oLp_set_seq = []
for ref in waypoints:
oLp_set_seq.append(self.bg.rAp2oLp_set(ref, obj=obj))
link_seq = self._safe_oLp_path(oLp_set_seq, loop=True)
target_oP = self.pbg.referred_vtup(target_name)
# print(oP)
self.cavity_targets[target_oP] = link_seq
self.cavity_map[target_name] = target_oP
# self.cavity_params[target_name] = link_seq
objpath = self.bg.oLp_path2objpath(link_seq, as_refs=True)
return objpath
def _cavity_params(self, target_name, ol, Wk, shifts_use=False):
target_oP = self.pbg.referred_vtup(target_name)
# print(oP)
cav_path = self.cavity_targets[target_oP]
idx = cav_path.index(ol)
# rotate the path to the given target start, and add the final node back
# to form the full loop
path = cav_path[idx:] + cav_path[:idx] + [cav_path[idx]]
trans = self._path_transporters(path, Wk, shifts_use=shifts_use)
matX = trans.X.full_trip_mat
matY = trans.Y.full_trip_mat
qX = alm.eigen_q(matX)
qY = alm.eigen_q(matY)
eye = np.eye(2)
cav_shiftX = {}
for shift_key, shift in trans.X.shifts_out_referred.items():
shiftX = -np.linalg.inv(matX - eye) @ shift
cav_shiftX[shift_key] = shiftX
cav_shiftY = {}
for shift_key, shift in trans.Y.shifts_out_referred.items():
shiftY = -np.linalg.inv(matY - eye) @ shift
cav_shiftY[shift_key] = shiftY
if not np.isfinite(abs(qX)) or not np.isfinite(abs(qY)):
# TODO, include matrices in error message?
mat_full_tot = np.eye(2)
for ol1, ol2 in algo_mm_linkages.path_pairs(trans.Y.oLp_path):
# print(ol1, ol2)
idx1, idx2 = trans.Y.inc_ol2idx.get(ol1, None), trans.Y.inc_ol2idx.get(
ol2, None
)
if idx1 is not None and idx2 is not None:
mat_full = np.eye(2)
for l, pfunc, mat in trans.Y.inc[idx1:idx2]:
mat_full = mat @ mat_full
mat_full_tot = mat @ mat_full_tot
if np.any(mat_full != np.eye(2)):
print(mat_full)
print("Total RT Matrix")
print(mat_full_tot)
raise RuntimeError("Cavity {} is not stable".format(target_name))
qX = alm.ComplexBeamParam(qX, wavelength_m=self.fs.wavelength_map[Wk])
qY = alm.ComplexBeamParam(qY, wavelength_m=self.fs.wavelength_map[Wk])
# TODO, annotate target type to relate it to innate targets
return Bunch(
qX=qX,
qY=qY,
Wk=Wk,
ol=ol,
matX=matX,
matY=matY,
type="cavity",
cavity_path=path,
cavity_trans=trans,
cav_shiftX = cav_shiftX,
cav_shiftY = cav_shiftY,
)
def _target_get(self, target, Wk=None):
"""
Get the basic form of target, but don't complete the target computations if it is a cavity
"""
target_oP = self.pbg.referred_vtup(target)
cavB = self.cavity_targets.get(target_oP, None)
if cavB is not None:
return Bunch(
oLp_set=cavB,
target=target,
Wk=Wk,
type="cavity",
)
else:
targB = self._targets[target_oP]
return Bunch(
oLp_set=[targB.ol],
target=target,
Wk=Wk,
targB=targB,
type="specified",
)
def _target_complete(
self,
targB,
ol : algo_tups.ObjectLinkageTup,
shifts_use=False
):
"""
Take the cavity object and complete the remaining computations
ol is an ObjectLinkage tuple
"""
if targB.type == "cavity":
return self._cavity_params(targB.target, ol=ol, Wk=targB.Wk, shifts_use=shifts_use)
elif targB.type == "specified":
target_oP = self.pbg.referred_vtup(targB.target)
return self._targets[target_oP]
def cavity_parameters(self, cavity_name, waypoint, Wk, obj=None, shifts_use=True):
Wk = self.fs.parameter_to_wk(Wk)
if isinstance(waypoint, tuple):
wp = waypoint
else:
wp = self.bg.rAp2oLp_set(waypoint, obj=obj, dir="out").pop()
params = self._cavity_params(cavity_name, wp, Wk=Wk, shifts_use=shifts_use)
return Bunch(
cavB=params,
qX=params.qX,
qY=params.qY,
gouyX_deg=np.angle(
params.qX.propagate_matrix(params.matX).gouy_phasor
/ params.qX.gouy_phasor,
deg=True,
),
gouyY_deg=np.angle(
params.qY.propagate_matrix(params.matY).gouy_phasor
/ params.qY.gouy_phasor,
deg=True,
),
)
def cavity_digest(self, Wk, waypoint=None, print=print):
with self.pbg.preferred():
for target, target_op in sorted(self.cavity_map.items()):
cavity_seq = self.cavity_targets[target_op]
def print_wp(cB, wp):
print("-------------")
print("-----", target, " at ", wp)
print(
" Gouy X,Y [deg] {:.2f}, {:.2f}".format(
cB.gouyX_deg, cB.gouyY_deg
)
)
print(
" Gouy Frac {:.4f}, {:.4f}".format(
(cB.gouyX_deg / 360 + 0.5) % 1 - 0.5,
(cB.gouyY_deg / 360 + 0.5) % 1 - 0.5,
)
)
if waypoint:
print(" qX ", cB.qX)
print(" qY ", cB.qY)
print(
" diameter[m] X, Y ",
alm.str_m(cB.qX.W * 2),
alm.str_m(2 * cB.qY.W),
)
if waypoint is None:
wp = cavity_seq[0]
cB = self.cavity_parameters(target, wp, Wk)
print_wp(cB, wp)
elif isinstance(waypoint, list):
for wp in waypoint:
cB = self.cavity_parameters(target, wp, Wk)
print_wp(cB, wp)
else:
wp = waypoint
cB = self.cavity_parameters(target, wp, Wk)
print_wp(cB, wp)
return
def overlap(
self,
target_fr,
target_to,
targets_fr=None,
targets_to=None,
waypoints=None,
Wk=None,
obj=None,
shifts_use=False,
_just_center=False,
):
"""
Create an overlap objects
"""
Wk = self.fs.parameter_to_wk(Wk)
if isinstance(waypoints, str):
waypoints = [waypoints]
oLp_set_seq = []
if waypoints is not None:
for ref in waypoints:
ref_oP = self.pbg.referred_vtup(ref)
if ref_oP in self.cavity_targets:
oLp_set_seq.append(self.cavity_targets[ref_oP])
else:
# print("WP: ", self.bg.rAp2oLp_set(ref, obj = obj))
oLp_set_seq.append(self.bg.rAp2oLp_set(ref, obj=obj))
if len(oLp_set_seq) > 1:
oLp_path_center = self._safe_oLp_path(oLp_set_seq)
ol_imed = oLp_path_center
else:
oLp_path_center = []
ol_imed = oLp_set_seq[0]
else:
oLp_path_center = []
ol_imed = []
def targets_normalize(target, targets):
targets_d = {}
if target is None:
if targets is not None:
target = next(iter(targets))
elif isinstance(target, str):
pass
else:
for t in target:
targets_d[t] = []
if not target:
target = None
else:
target = next(iter(target))
if targets is not None:
if not isinstance(targets, collections.Mapping):
for t in targets:
targets_d[t] = []
else:
targets_d.update(targets)
if target not in targets_d:
if target is not None:
targets_d[target] = []
return target, targets_d
target_fr, targets_fr = targets_normalize(target_fr, targets_fr)
target_to, targets_to = targets_normalize(target_to, targets_to)
targetsB_fr = dict()
targetsB_to = dict()
# target only approach
if not oLp_path_center:
if _just_center:
assert(len(ol_imed) == 1)
oLp_path_center = list(ol_imed)
else:
if target_fr is None and target_to is None:
raise RuntimeError(
"Must specify from and to targets if no waypoint path is provided"
)
oLp_set_seq = []
# these should be included in the code below, rather than special-cased here
if target_fr is not None:
tspecB_fr = self._target_get(target_fr, Wk=Wk)
frB = Bunch()
targetsB_fr[target_fr] = frB
frB.tspecB = tspecB_fr
oLp_set_seq.append(tspecB_fr.oLp_set)
for ref in targets_fr[target_fr]:
oLp_set_seq.append(self.bg.rAp2oLp_set(ref, obj=obj))
targets_fr.pop(target_fr)
if ol_imed:
oLp_set_seq.append(ol_imed)
# these should be included in the code below, rather than special-cased here
if target_to is not None:
tspecB_to = self._target_get(target_to, Wk=Wk)
toB = Bunch()
targetsB_to[target_to] = toB
toB.tspecB = tspecB_to
for ref in targets_to[target_to]:
oLp_set_seq.append(self.bg.rAp2oLp_set(ref, obj=obj))
targets_to.pop(target_to)
oLp_set_seq.append(tspecB_to.oLp_set)
# print("SEQ", oLp_set_seq)
oLp_path_center = self._safe_oLp_path(oLp_set_seq)
# these should be included in the code below, rather than special-cased here
if target_fr is not None:
frB.oLp_path = [oLp_path_center[0]]
frB.include_center = False
frB.inv_start = False
if target_to is not None:
toB.oLp_path = [oLp_path_center[-1]]
toB.include_center = True
toB.inv_start = True
for t_fr, wp_fr in targets_fr.items():
# TODO, currently from targets can only aim to the start of the path
# not in the middle of it. ol_imed is supposed to find the nearest
# point of intersection to the waypoint path. Must add additional handling
# for the mid-path-intersections.
tB_fr = self._target_get(t_fr, Wk=Wk)
oLp_set_seq = [tB_fr.oLp_set]
for ref in wp_fr:
oLp_set_seq.append(self.bg.rAp2oLp_set(ref, obj=obj))
# oLp_set_seq.append(ol_imed)
# Currently, just aim at the start of the waypoint path
oLp_set_seq.append(oLp_path_center)
# TODO, not sure if this loop variable is correct
oLp_path_fr = self._safe_oLp_path(
oLp_set_seq, loop=False, allow_non_unique=True
)
if oLp_path_fr[-1] != oLp_path_center[0]:
if len(oLp_path_fr) == 1:
mid = oLp_path_fr[0]
idx = oLp_path_center.index(mid)
oLp_path_fr = oLp_path_center[: idx + 1]
inv_start = True
else:
raise NotImplementedError(
"MM Doesn't support middle-injection beams (target {})".format(
t_fr
)
)
else:
inv_start = False
tspecB_fr = self._target_get(t_fr, Wk=Wk)
frB = Bunch()
targetsB_fr[t_fr] = frB
frB.tspecB = tspecB_fr
frB.oLp_path = oLp_path_fr
frB.include_center = False
frB.inv_start = inv_start
for t_to, wp_to in targets_to.items():
# also can only handle targets after the waypoint path
tB_to = self._target_get(t_to, Wk=Wk)
# oLp_set_seq = [ol_imed]
# Currently, just aim at the end of the waypoint path
oLp_set_seq = [oLp_path_center]
for ref in wp_to:
oLp_set_seq.append(self.bg.rAp2oLp_set(ref, obj=obj))
oLp_set_seq.append(tB_to.oLp_set)
oLp_path_to = self._safe_oLp_path(
oLp_set_seq, loop=False, allow_non_unique=True
)
if oLp_path_to[0] != oLp_path_center[-1]:
if len(oLp_path_to) == 1:
mid = oLp_path_to[-1]
idx = oLp_path_center.index(mid)
oLp_path_to = oLp_path_center[idx:]
inv_start = False
else:
raise NotImplementedError(
"MM Doesn't support middle-injection beams (target {})".format(
t_to
)
)
else:
inv_start = True
tspecB_to = self._target_get(t_to, Wk=Wk)
toB = Bunch()
targetsB_to[t_to] = toB
toB.tspecB = tspecB_to
toB.oLp_path = oLp_path_to
toB.inv_start = inv_start
toB.include_center = True
# now check if paths are branching
branching = False
len_longest = -1
t_longest_fr = None
for t_fr, frB in targetsB_fr.items():
if len(frB.oLp_path) > len_longest:
t_longest_fr = t_fr
len_longest = len(frB.oLp_path)
if t_longest_fr:
oLp_path_longest_fr = targetsB_fr[t_longest_fr].oLp_path
# now check that the shorter path must be equal to the longer, or it
# must be a branching path
for t_fr, frB in targetsB_fr.items():
if oLp_path_longest_fr[-len(frB.oLp_path) :] != frB.oLp_path:
branching = True
# now check if paths are branching
len_longest = -1
t_longest_to = None
for t_to, toB in targetsB_to.items():
if len(toB.oLp_path) > len_longest:
t_longest_to = t_to
len_longest = len(toB.oLp_path)
if t_longest_to:
oLp_path_longest_to = targetsB_to[t_longest_to].oLp_path
# now check that the shorter path must be equal to the longer, or it
# must be a branching path
for t_to, toB in targetsB_to.items():
if oLp_path_longest_to[: len(toB.oLp_path)] != toB.oLp_path:
branching = True
overlapper = mm_optimize.ModeMatchingOverlapperOptimizing(
algo_pa=self.pa,
algo_mm=self,
targetsB_to=targetsB_to,
targetsB_fr=targetsB_fr,
oLp_path_center=oLp_path_center,
Wk=Wk,
branching=branching,
shifts_use=shifts_use,
)
overlapper.set_targets(
target1=target_fr,
target2=target_to,
)
return overlapper
class MMAlgorithmView(object):
_mm_algo = None
_obj = None
_p = None
_pbg = None
def __init__(self, obj, mm_algo, p=None, **kw):
self._obj = obj
self._mm_algo = mm_algo
self._pbg = mm_algo.pbg
if p is None:
p = self._pbg.view(obj)
self._p = p
self.p = p
class MMAlgorithmManipulator(MMAlgorithmView):
def path(self):
return self._pbg.path_str(self._obj)
def parent(self):
parent, refP = next(iter(self._pbg.object_paths[self._obj]))
return parent
def cavity_add(self, name, waypoints):
return self._mm_algo.cavity_add(name, waypoints, self.parent())
def target_add(self, name, waypoints, qX, qY, wavelength):
return self._mm_algo.target_add(
name, waypoints, qX=qX, qY=qY, obj=self.parent(), wavelength=wavelength
)
def parameter_to_wk(self, wparam):
return self._pa_algo.fs.parameter_to_wk(wparam)
| [
"wavestate.bunch.Bunch",
"numpy.eye",
"numpy.linalg.inv"
] | [((5288, 5386), 'wavestate.bunch.Bunch', 'Bunch', ([], {'ol': 'oLp_path[0]', 'oLp_path': 'oLp_path', 'oltrans': 'trans', 'qX': 'qX', 'qY': 'qY', 'Wk': 'Wk', 'type': '"""specified"""'}), "(ol=oLp_path[0], oLp_path=oLp_path, oltrans=trans, qX=qX, qY=qY, Wk=Wk,\n type='specified')\n", (5293, 5386), False, 'from wavestate.bunch import Bunch\n'), ((6798, 6807), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (6804, 6807), True, 'import numpy as np\n'), ((8350, 8513), 'wavestate.bunch.Bunch', 'Bunch', ([], {'qX': 'qX', 'qY': 'qY', 'Wk': 'Wk', 'ol': 'ol', 'matX': 'matX', 'matY': 'matY', 'type': '"""cavity"""', 'cavity_path': 'path', 'cavity_trans': 'trans', 'cav_shiftX': 'cav_shiftX', 'cav_shiftY': 'cav_shiftY'}), "(qX=qX, qY=qY, Wk=Wk, ol=ol, matX=matX, matY=matY, type='cavity',\n cavity_path=path, cavity_trans=trans, cav_shiftX=cav_shiftX, cav_shiftY\n =cav_shiftY)\n", (8355, 8513), False, 'from wavestate.bunch import Bunch\n'), ((7340, 7349), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (7346, 7349), True, 'import numpy as np\n'), ((8975, 9031), 'wavestate.bunch.Bunch', 'Bunch', ([], {'oLp_set': 'cavB', 'target': 'target', 'Wk': 'Wk', 'type': '"""cavity"""'}), "(oLp_set=cavB, target=target, Wk=Wk, type='cavity')\n", (8980, 9031), False, 'from wavestate.bunch import Bunch\n'), ((9189, 9267), 'wavestate.bunch.Bunch', 'Bunch', ([], {'oLp_set': '[targB.ol]', 'target': 'target', 'Wk': 'Wk', 'targB': 'targB', 'type': '"""specified"""'}), "(oLp_set=[targB.ol], target=target, Wk=Wk, targB=targB, type='specified')\n", (9194, 9267), False, 'from wavestate.bunch import Bunch\n'), ((18535, 18542), 'wavestate.bunch.Bunch', 'Bunch', ([], {}), '()\n', (18540, 18542), False, 'from wavestate.bunch import Bunch\n'), ((19962, 19969), 'wavestate.bunch.Bunch', 'Bunch', ([], {}), '()\n', (19967, 19969), False, 'from wavestate.bunch import Bunch\n'), ((5122, 5139), 'numpy.linalg.inv', 'np.linalg.inv', (['Mx'], {}), '(Mx)\n', (5135, 5139), True, 'import numpy as np\n'), ((5178, 5195), 'numpy.linalg.inv', 'np.linalg.inv', (['My'], {}), '(My)\n', (5191, 5195), True, 'import numpy as np\n'), ((6923, 6948), 'numpy.linalg.inv', 'np.linalg.inv', (['(matX - eye)'], {}), '(matX - eye)\n', (6936, 6948), True, 'import numpy as np\n'), ((7115, 7140), 'numpy.linalg.inv', 'np.linalg.inv', (['(matY - eye)'], {}), '(matY - eye)\n', (7128, 7140), True, 'import numpy as np\n'), ((7684, 7693), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (7690, 7693), True, 'import numpy as np\n'), ((15448, 15455), 'wavestate.bunch.Bunch', 'Bunch', ([], {}), '()\n', (15453, 15455), False, 'from wavestate.bunch import Bunch\n'), ((16090, 16097), 'wavestate.bunch.Bunch', 'Bunch', ([], {}), '()\n', (16095, 16097), False, 'from wavestate.bunch import Bunch\n'), ((7909, 7918), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (7915, 7918), True, 'import numpy as np\n')] |
import numpy as np
from math import log2, sqrt
def entropy(class_y):
"""
Input:
- class_y: list of class labels (0's and 1's)
Output:
- entropy: a scalar, the value of entropy.
TODO: [3 points]
Compute the entropy for a list of classes
Example: entropy([0,0,0,1,1,1,1,1]) = 0.9544
"""
x = np.mean(class_y)
if x == 0 or x == 1:
return 0
else:
return - x * np.log2(x) - (1 - x) * np.log2(1 - x)
def information_gain(previous_y, current_y):
"""
Inputs:
- previous_y : the distribution of original labels (0's and 1's)
- current_y : the distribution of labels after splitting based on a particular
split attribute and split value
Output:
- information_gain: a scalar, the value of information_gain.
TODO: [3 points]
Compute and return the information gain from partitioning the previous_y labels into the current_y labels.
Reference: http://www.cs.cmu.edu/afs/cs.cmu.edu/academic/class/15381-s06/www/DTs.pdf
Example: previous_y = [0,0,0,1,1,1], current_y = [[0,0], [1,1,1,0]], info_gain = 0.4591
"""
x = len(current_y[0]) / len(previous_y)
if x == 0 or x == 1:
return 0
else:
return entropy(previous_y) - (x * entropy(current_y[0]) + (1 - x) * entropy(current_y[1]))
def partition_classes(X, y, split_attribute, split_val):
"""
Inputs:
- X : (N,D) list containing all data attributes
- y : a list of labels
- split_attribute : column index of the attribute to split on
- split_val : either a numerical or categorical value to divide the split_attribute
Outputs:
- X_left, X_right, y_left, y_right : see the example below.
TODO: [3 points]
Partition the data(X) and labels(y) based on the split value - BINARY SPLIT.
Example:
X = [[3, 'aa', 10], y = [1,
[1, 'bb', 22], 1,
[2, 'cc', 28], 0,
[5, 'bb', 32], 0,
[4, 'cc', 32]] 1]
Here, columns 0 and 2 represent numeric attributes, while column 1 is a categorical attribute.
Consider the case where we call the function with split_attribute = 0 (the index of attribute) and split_val = 3 (the value of attribute).
Then we divide X into two lists - X_left, where column 0 is <= 3 and X_right, where column 0 is > 3.
X_left = [[3, 'aa', 10], y_left = [1,
[1, 'bb', 22], 1,
[2, 'cc', 28]] 0]
X_right = [[5, 'bb', 32], y_right = [0,
[4, 'cc', 32]] 1]
Consider another case where we call the function with split_attribute = 1 and split_val = 'bb'
Then we divide X into two lists, one where column 1 is 'bb', and the other where it is not 'bb'.
X_left = [[1, 'bb', 22], y_left = [1,
[5, 'bb', 32]] 0]
X_right = [[3, 'aa', 10], y_right = [1,
[2, 'cc', 28], 0,
[4, 'cc', 32]] 1]
Return in this order: X_left, X_right, y_left, y_right
"""
X = np.array(X, dtype=object)
y = np.array(y)
#######################################################################################################
# Both list and numpy arrays are allowed in util functions. However, the dataset in the parts below is#
# imported as numpy array. Therefore, we strongly recommend implementing as numpy array to make sure #
# the autograder is stable. It will also reduce the run time for decision tree and random forest. #
# So please keep the lines above. #
#######################################################################################################
X_left = np.copy(X)
X_right = np.copy(X)
if type(split_val) == str:
rightArr = X[:, [split_attribute]]
leftArr = X[:, [split_attribute]]
a = np.where(rightArr != split_val)[0]
b = np.where(leftArr == split_val)[0]
X_left = X_left[b, :]
X_right = X_right[a, :]
y_left = y[b]
y_right = y[a]
else:
rightArr = X[:, [split_attribute]]
leftArr = X[:, [split_attribute]]
a = np.where(rightArr > split_val)[0]
b = np.where(leftArr <= split_val)[0]
X_left = X_left[b, :]
X_right = X_right[a, :]
y_left = y[b]
y_right = y[a]
return X_left, X_right, y_left, y_right
def find_best_split(X, y, split_attribute):
"""
Inputs:
- X : (N,D) list containing all data attributes
- y : a list array of labels
- split_attribute : Column of X on which to split
Outputs:
- best_split_val, info_gain : see the example below.
TODO: [3 points]
Compute and return the optimal split value for a given attribute, along with the corresponding information gain
Note: You will need the functions information_gain and partition_classes to write this function.
It is recommended that when dealing with numerical values, instead of discretizing the variable space, that you loop over the unique values in your dataset
(Hint: np.unique is your friend)
Example:
X = [[3, 'aa', 10], y = [1,
[1, 'bb', 22], 1,
[2, 'cc', 28], 0,
[5, 'bb', 32], 0,
[4, 'cc', 32]] 1]
split_attribute = 0
Starting entropy: 0.971
Calculate information gain at splits:
split_val = 1 --> info_gain = 0.17
split_val = 2 --> info_gain = 0.01997
split_val = 3 --> info_gain = 0.01997
split_val = 4 --> info_gain = 0.32
split_val = 5 --> info_gain = 0
best_split_val = 4; info_gain = .32;
"""
X = np.array(X, dtype = object)
info_gain = -1
best_split_val = None
tried = []
for i in range(len(X)):
SplitVal = X[i][split_attribute]
mask = ~(np.isin(SplitVal, tried))
if mask:
X_left, X_right, y_left, y_right = partition_classes(X, y, split_attribute, SplitVal)
tried.append(SplitVal)
IG = information_gain(y, [y_left, y_right])
if not np.isnan(IG) and IG > info_gain:
info_gain = IG
best_split_val = SplitVal
return best_split_val, info_gain
def find_best_feature(X, y):
"""
Inputs:
- X: (N,D) list containing all data attributes
- y : a list of labels
Outputs:
- best_split_feature, best_split_val: see the example below.
TODO: [3 points]
Compute and return the optimal attribute to split on and optimal splitting value
Note: If two features tie, choose one of them at random
Example:
X = [[3, 'aa', 10], y = [1,
[1, 'bb', 22], 1,
[2, 'cc', 28], 0,
[5, 'bb', 32], 0,
[4, 'cc', 32]] 1]
split_attribute = 0
Starting entropy: 0.971
Calculate information gain at splits:
feature 0: --> info_gain = 0.32
feature 1: --> info_gain = 0.17
feature 2: --> info_gain = 0.4199
best_split_feature: 2 best_split_val: 22
"""
X = np.array(X, dtype = object)
temp = -1
myData = ["Lab-Confirmed Case","Male","Age","Race","Hospitalized","ICU Patient","Pre-existing"]
for i in range(len(X[0])):
temp_val, IG = find_best_split(X, y, i)
if not np.isnan(IG) and IG > temp:
temp = IG
index = i
infoGain = IG
best_split_feature = myData[i]
best_split_val = temp_val
return best_split_feature, index, infoGain, best_split_val
| [
"numpy.isin",
"numpy.copy",
"numpy.log2",
"numpy.isnan",
"numpy.mean",
"numpy.array",
"numpy.where"
] | [((344, 360), 'numpy.mean', 'np.mean', (['class_y'], {}), '(class_y)\n', (351, 360), True, 'import numpy as np\n'), ((3363, 3388), 'numpy.array', 'np.array', (['X'], {'dtype': 'object'}), '(X, dtype=object)\n', (3371, 3388), True, 'import numpy as np\n'), ((3397, 3408), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3405, 3408), True, 'import numpy as np\n'), ((4072, 4082), 'numpy.copy', 'np.copy', (['X'], {}), '(X)\n', (4079, 4082), True, 'import numpy as np\n'), ((4097, 4107), 'numpy.copy', 'np.copy', (['X'], {}), '(X)\n', (4104, 4107), True, 'import numpy as np\n'), ((6209, 6234), 'numpy.array', 'np.array', (['X'], {'dtype': 'object'}), '(X, dtype=object)\n', (6217, 6234), True, 'import numpy as np\n'), ((7740, 7765), 'numpy.array', 'np.array', (['X'], {'dtype': 'object'}), '(X, dtype=object)\n', (7748, 7765), True, 'import numpy as np\n'), ((4236, 4267), 'numpy.where', 'np.where', (['(rightArr != split_val)'], {}), '(rightArr != split_val)\n', (4244, 4267), True, 'import numpy as np\n'), ((4283, 4313), 'numpy.where', 'np.where', (['(leftArr == split_val)'], {}), '(leftArr == split_val)\n', (4291, 4313), True, 'import numpy as np\n'), ((4531, 4561), 'numpy.where', 'np.where', (['(rightArr > split_val)'], {}), '(rightArr > split_val)\n', (4539, 4561), True, 'import numpy as np\n'), ((4577, 4607), 'numpy.where', 'np.where', (['(leftArr <= split_val)'], {}), '(leftArr <= split_val)\n', (4585, 4607), True, 'import numpy as np\n'), ((6384, 6408), 'numpy.isin', 'np.isin', (['SplitVal', 'tried'], {}), '(SplitVal, tried)\n', (6391, 6408), True, 'import numpy as np\n'), ((434, 444), 'numpy.log2', 'np.log2', (['x'], {}), '(x)\n', (441, 444), True, 'import numpy as np\n'), ((457, 471), 'numpy.log2', 'np.log2', (['(1 - x)'], {}), '(1 - x)\n', (464, 471), True, 'import numpy as np\n'), ((6627, 6639), 'numpy.isnan', 'np.isnan', (['IG'], {}), '(IG)\n', (6635, 6639), True, 'import numpy as np\n'), ((7977, 7989), 'numpy.isnan', 'np.isnan', (['IG'], {}), '(IG)\n', (7985, 7989), True, 'import numpy as np\n')] |
import itertools
from collections import OrderedDict, namedtuple
import numpy as np
from sympy import Indexed
from devito.ir.support import Stencil
from devito.exceptions import DSEException
from devito.symbolics import retrieve_indexed, q_indirect
__all__ = ['collect']
def collect(exprs):
"""
Determine groups of aliasing expressions in ``exprs``.
An expression A aliases an expression B if both A and B apply the same
operations to the same input operands, with the possibility for indexed objects
to index into locations at a fixed constant offset in each dimension.
For example: ::
exprs = (a[i+1] + b[i+1], a[i+1] + b[j+1], a[i] + c[i],
a[i+2] - b[i+2], a[i+2] + b[i], a[i-1] + b[i-1])
The following expressions in ``exprs`` alias to ``a[i] + b[i]``: ::
a[i+1] + b[i+1] : same operands and operations, distance along i = 1
a[i-1] + b[i-1] : same operands and operations, distance along i = -1
Whereas the following do not: ::
a[i+1] + b[j+1] : because at least one index differs
a[i] + c[i] : because at least one of the operands differs
a[i+2] - b[i+2] : because at least one operation differs
a[i+2] + b[i] : because distance along ``i`` differ (+2 and +0)
"""
ExprData = namedtuple('ExprData', 'dimensions offsets')
# Discard expressions:
# - that surely won't alias to anything
# - that are non-scalar
candidates = OrderedDict()
for expr in exprs:
if expr.lhs.is_Indexed:
continue
indexeds = retrieve_indexed(expr.rhs, mode='all')
if indexeds and not any(q_indirect(i) for i in indexeds):
handle = calculate_offsets(indexeds)
if handle:
candidates[expr.rhs] = ExprData(*handle)
aliases = OrderedDict()
mapper = OrderedDict()
unseen = list(candidates)
while unseen:
# Find aliasing expressions
handle = unseen.pop(0)
group = [handle]
for e in list(unseen):
if compare(handle, e) and\
is_translated(candidates[handle].offsets, candidates[e].offsets):
group.append(e)
unseen.remove(e)
# Try creating a basis for the aliasing expressions' offsets
offsets = [tuple(candidates[e].offsets) for e in group]
try:
COM, distances = calculate_COM(offsets)
except DSEException:
# Ignore these potential aliases and move on
continue
alias = create_alias(handle, COM)
# An alias has been created, so I can now update the expression mapper
mapper.update([(i, group) for i in group])
# In circumstances in which an expression has repeated coefficients, e.g.
# ... + 0.025*a[...] + 0.025*b[...],
# We may have found a common basis (i.e., same COM, same alias) at this point
v = aliases.setdefault(alias, Alias(alias, candidates[handle].dimensions))
v.extend(group, distances)
# Heuristically attempt to relax the aliases offsets
# to maximize the likelyhood of loop fusion
groups = OrderedDict()
for i in aliases.values():
groups.setdefault(i.dimensions, []).append(i)
for group in groups.values():
ideal_anti_stencil = Stencil.union(*[i.anti_stencil for i in group])
for i in group:
if i.anti_stencil.subtract(ideal_anti_stencil).empty:
aliases[i.alias] = i.relax(ideal_anti_stencil)
return mapper, aliases
# Helpers
def create_alias(expr, offsets):
"""
Create an aliasing expression of ``expr`` by replacing the offsets of each
indexed object in ``expr`` with the new values in ``offsets``. ``offsets``
is an ordered sequence of tuples with as many elements as the number of
indexed objects in ``expr``.
"""
indexeds = retrieve_indexed(expr, mode='all')
assert len(indexeds) == len(offsets)
mapper = {}
for indexed, ofs in zip(indexeds, offsets):
base = indexed.base
dimensions = base.function.dimensions
assert len(dimensions) == len(ofs)
mapper[indexed] = indexed.func(base, *[sum(i) for i in zip(dimensions, ofs)])
return expr.xreplace(mapper)
def calculate_COM(offsets):
"""
Determine the centre of mass (COM) in a collection of offsets.
The COM is a basis to span the vectors in ``offsets``.
Also return the distance of each element E in ``offsets`` from the COM (i.e.,
the coefficients that when multiplied by the COM give exactly E).
"""
COM = []
for ofs in zip(*offsets):
handle = []
for i in zip(*ofs):
strides = sorted(set(i))
# Heuristic:
# - middle point if odd number of values, or
# - strides average otherwise
index = int((len(strides) - 1) / 2)
if (len(strides) - 1) % 2 == 0:
handle.append(strides[index])
else:
handle.append(int(np.mean(strides, dtype=int)))
COM.append(tuple(handle))
distances = []
for ofs in offsets:
handle = distance(COM, ofs)
if len(handle) != 1:
raise DSEException("%s cannot be represented by the COM %s" %
(str(ofs), str(COM)))
distances.append(handle.pop())
return COM, distances
def calculate_offsets(indexeds):
"""
Return a list of tuples, with one tuple for each indexed object appearing
in ``indexeds``. A tuple represents the offsets from the origin (0, 0, ..., 0)
along each dimension. All objects must use the same indices, in the same
order; otherwise, ``None`` is returned.
For example, given: ::
indexeds = [A[i,j,k], B[i,j+2,k+3]]
Return: ::
[(0, 0, 0), (0, 2, 3)]
"""
processed = []
reference = indexeds[0].base.function.indices
for indexed in indexeds:
dimensions = indexed.base.function.indices
if dimensions != reference:
return None
handle = []
for d, i in zip(dimensions, indexed.indices):
offset = i - d
if offset.is_Number:
handle.append(int(offset))
else:
return None
processed.append(tuple(handle))
return tuple(reference), processed
def distance(ofs1, ofs2):
"""
Determine the distance of ``ofs2`` from ``ofs1``.
"""
assert len(ofs1) == len(ofs2)
handle = set()
for o1, o2 in zip(ofs1, ofs2):
assert len(o1) == len(o2)
handle.add(tuple(i2 - i1 for i1, i2 in zip(o1, o2)))
return handle
def is_translated(ofs1, ofs2):
"""
Return True if ``ofs2`` is translated w.r.t. to ``ofs1``, False otherwise.
For example: ::
e1 = A[i,j] + A[i,j+1]
e2 = A[i+1,j] + A[i+1,j+1]
``ofs1`` would be [(0, 0), (0, 1)], while ``ofs2`` would be [(1, 0), (1,1)], so
``e2`` is translated w.r.t. ``e1`` by ``(1, 0)``, and True is returned.
"""
return len(distance(ofs1, ofs2)) == 1
def compare(e1, e2):
"""
Return True if the two expressions e1 and e2 alias each other, False otherwise.
"""
if type(e1) == type(e2) and len(e1.args) == len(e2.args):
if e1.is_Atom:
return True if e1 == e2 else False
elif isinstance(e1, Indexed) and isinstance(e2, Indexed):
return True if e1.base == e2.base else False
else:
for a1, a2 in zip(e1.args, e2.args):
if not compare(a1, a2):
return False
return True
else:
return False
class Alias(object):
"""
Map an expression (the so called "alias") to a set of aliasing expressions.
For each aliasing expression, the distance from the alias along each dimension
is tracked.
"""
def __init__(self, alias, dimensions, aliased=None, distances=None,
ghost_offsets=None):
self.alias = alias
self.dimensions = tuple(i.parent if i.is_Derived else i for i in dimensions)
self.aliased = aliased or []
self.distances = distances or []
self._ghost_offsets = ghost_offsets or []
assert len(self.aliased) == len(self.distances)
assert all(len(i) == len(dimensions) for i in self.distances)
@property
def anti_stencil(self):
handle = Stencil()
for d, i in zip(self.dimensions, zip(*self.distances)):
handle[d].update(set(i))
for d, i in zip(self.dimensions, zip(*self._ghost_offsets)):
handle[d].update(set(i))
return handle
@property
def distance_map(self):
return [tuple(zip(self.dimensions, i)) for i in self.distances]
@property
def diameter(self):
"""Return a map telling the min/max offsets in each dimension for this alias."""
return OrderedDict((d, (min(i), max(i)))
for d, i in zip(self.dimensions, zip(*self.distances)))
@property
def relaxed_diameter(self):
"""Return a map telling the min/max offsets in each dimension for this alias.
The extremes are potentially larger than those provided by ``self.diameter``,
as here we're also taking into account any ghost offsets provided at Alias
construction time.."""
return OrderedDict((k, (min(v), max(v))) for k, v in self.anti_stencil.items())
@property
def with_distance(self):
"""Return a tuple associating each aliased expression with its distance from
``self.alias``."""
return tuple(zip(self.aliased, self.distance_map))
def extend(self, aliased, distances):
assert len(aliased) == len(distances)
assert all(len(i) == len(self.dimensions) for i in distances)
self.aliased.extend(aliased)
self.distances.extend(distances)
def relax(self, distances):
return Alias(self.alias, self.dimensions, self.aliased, self.distances,
self._ghost_offsets + list(itertools.product(*distances.values())))
| [
"devito.ir.support.Stencil",
"devito.ir.support.Stencil.union",
"devito.symbolics.retrieve_indexed",
"numpy.mean",
"collections.namedtuple",
"collections.OrderedDict",
"devito.symbolics.q_indirect"
] | [((1305, 1349), 'collections.namedtuple', 'namedtuple', (['"""ExprData"""', '"""dimensions offsets"""'], {}), "('ExprData', 'dimensions offsets')\n", (1315, 1349), False, 'from collections import OrderedDict, namedtuple\n'), ((1467, 1480), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1478, 1480), False, 'from collections import OrderedDict, namedtuple\n'), ((1825, 1838), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1836, 1838), False, 'from collections import OrderedDict, namedtuple\n'), ((1852, 1865), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1863, 1865), False, 'from collections import OrderedDict, namedtuple\n'), ((3158, 3171), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3169, 3171), False, 'from collections import OrderedDict, namedtuple\n'), ((3893, 3927), 'devito.symbolics.retrieve_indexed', 'retrieve_indexed', (['expr'], {'mode': '"""all"""'}), "(expr, mode='all')\n", (3909, 3927), False, 'from devito.symbolics import retrieve_indexed, q_indirect\n'), ((1576, 1614), 'devito.symbolics.retrieve_indexed', 'retrieve_indexed', (['expr.rhs'], {'mode': '"""all"""'}), "(expr.rhs, mode='all')\n", (1592, 1614), False, 'from devito.symbolics import retrieve_indexed, q_indirect\n'), ((3320, 3367), 'devito.ir.support.Stencil.union', 'Stencil.union', (['*[i.anti_stencil for i in group]'], {}), '(*[i.anti_stencil for i in group])\n', (3333, 3367), False, 'from devito.ir.support import Stencil\n'), ((8415, 8424), 'devito.ir.support.Stencil', 'Stencil', ([], {}), '()\n', (8422, 8424), False, 'from devito.ir.support import Stencil\n'), ((1647, 1660), 'devito.symbolics.q_indirect', 'q_indirect', (['i'], {}), '(i)\n', (1657, 1660), False, 'from devito.symbolics import retrieve_indexed, q_indirect\n'), ((5039, 5066), 'numpy.mean', 'np.mean', (['strides'], {'dtype': 'int'}), '(strides, dtype=int)\n', (5046, 5066), True, 'import numpy as np\n')] |
import torch
import os
import numpy as np
from torchvision.datasets import MNIST, FashionMNIST, CIFAR10, CIFAR100, SVHN, ImageFolder
from torchvision.transforms import transforms as T, InterpolationMode
from torch.utils.data import random_split, DataLoader
from kme.data.cub import Cub2011
from kme.data.risk_datasets import RiskDataset
from kme.data.benchmark_datasets import CreditCardFraudDataset, HeartDataset
from kme.data.text_classification import DATASETS as TEXT_DATASETS, get_text_dataset, CollateProcessor
from kme.data.compas import CompasDataset
from kme.data.tcr_datasets import get_tcr_dataset
from kme.data.single_cell_datasets import get_single_cell_dataset
from torch.utils.data import Subset
from kme.tools.config import load_config
from kme.data.MedMnist import get_medmnist_dataset
def get_loader_dataset(loader):
dataset = loader.dataset
while isinstance(dataset, Subset):
dataset = dataset.dataset
return dataset
TRANSFORMS = {
'cifar10': {
'train': T.Compose([
T.RandomHorizontalFlip(),
T.RandomCrop(32, padding=4, padding_mode='reflect'),
T.RandomAffine(15),
T.ColorJitter(brightness=0.2, contrast=0.2),
T.RandomGrayscale(p=0.1),
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
]),
'test': T.Compose([
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
]),
},
'svhn': {
'train': T.Compose([
T.RandomCrop(32, padding=4, padding_mode='reflect'),
T.ColorJitter(brightness=0.2, contrast=0.2),
T.RandomGrayscale(p=0.5),
T.ToTensor(),
T.Normalize((0.4376821, 0.4437697, 0.47280442),
(0.19803012, 0.20101562, 0.19703614))
]),
'test': T.Compose([
T.ToTensor(),
T.Normalize((0.4376821, 0.4437697, 0.47280442),
(0.19803012, 0.20101562, 0.19703614))
]),
},
'svhn_no_augment': {
'train': T.Compose([
T.ToTensor(),
T.Normalize((0.4376821, 0.4437697, 0.47280442),
(0.19803012, 0.20101562, 0.19703614))
]),
'test': T.Compose([
T.ToTensor(),
T.Normalize((0.4376821, 0.4437697, 0.47280442),
(0.19803012, 0.20101562, 0.19703614))
]),
},
'mnist': {
'train': T.Compose([
T.ToTensor(),
T.Normalize((0.1307,), (0.3081,))
]),
'test': T.Compose([
T.ToTensor(),
T.Normalize((0.1307,), (0.3081,))
]),
},
'fashionmnist': {
'train': T.Compose([
T.ToTensor(),
T.Normalize((0.5,), (0.5,))
]),
'test': T.Compose([
T.ToTensor(),
T.Normalize((0.5,), (0.5,))
]),
},
'imagenet': {
'train': T.Compose([
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]),
'test': T.Compose([
T.RandomResizedCrop(224),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]),
},
'cub': {
'train': T.Compose([
T.Resize((224, 224)),
T.ColorJitter(brightness=0.2, contrast=0.2),
T.RandomHorizontalFlip(p=0.5),
T.RandomCrop(224, padding=14),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
T.RandomGrayscale(p=0.2),
]),
'test': T.Compose([
T.Resize((224, 224)),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]),
},
'cub_no_augment': {
'train': T.Compose([
T.Resize((224, 224)),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]),
'test': T.Compose([
T.Resize((224, 224)),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]),
},
}
TRANSFORMS['cifar100'] = TRANSFORMS['cifar10']
REVERSE_TRANSFORM = {
'cub': T.Normalize(mean=-np.array([0.485, 0.456, 0.406])/np.array([0.229, 0.224, 0.225]), std=1./np.array([0.229, 0.224, 0.225])),
'svhn': T.Normalize(mean=-np.array([0.4376821, 0.4437697, 0.47280442])/np.array([0.19803012, 0.20101562, 0.19703614]), std=1./np.array([0.19803012, 0.20101562, 0.19703614])),
'mnist': T.Normalize(mean=-np.array([0.1307])/np.array([0.3081]), std=1./np.array([0.3081])),
'dermamnist': T.Normalize(mean=-np.array([0.5])/np.array([0.5]), std=1./np.array([0.5]))
}
DATASETS = {
'cifar10': {
'train': lambda root, args: CIFAR10(root=root, train=True, download=True, transform=TRANSFORMS['cifar10']['train']),
'test': lambda root, args: CIFAR10(root=root, train=False, download=True, transform=TRANSFORMS['cifar10']['test'])
},
'cifar100': {
'train': lambda root, args: CIFAR100(root=root, train=True, download=True, transform=TRANSFORMS['cifar100']['train']),
'test': lambda root, args: CIFAR100(root=root, train=False, download=True, transform=TRANSFORMS['cifar100']['test'])
},
'svhn': {
'train': lambda root, args: SVHN(root=root, split='train', download=True, transform=TRANSFORMS['svhn']['train']),
'test': lambda root, args: SVHN(root=root, split='test', download=True, transform=TRANSFORMS['svhn']['test'])
},
'svhn_no_augment': {
'train': lambda root, args: SVHN(root=root, split='train', download=True, transform=TRANSFORMS['svhn_no_augment']['train']),
'test': lambda root, args: SVHN(root=root, split='test', download=True, transform=TRANSFORMS['svhn_no_augment']['test'])
},
'mnist': {
'train': lambda root, args: MNIST(root=root, train=True, download=True, transform=TRANSFORMS['mnist']['train']),
'test': lambda root, args: MNIST(root=root, train=False, download=True, transform=TRANSFORMS['mnist']['test'])
},
'fashionmnist': {
'train': lambda root, args: FashionMNIST(root=root, train=True, download=True, transform=TRANSFORMS['fashionmnist']['train']),
'test': lambda root, args: FashionMNIST(root=root, train=False, download=True, transform=TRANSFORMS['fashionmnist']['test'])
},
'adult': {
'train': lambda root, args: RiskDataset(root=root, dataset='adult', transform=None)
},
'mushroom': {
'train': lambda root, args: RiskDataset(root=root, dataset='mushroom', transform=None)
},
'mammo': {
'train': lambda root, args: RiskDataset(root=root, dataset='mammo', transform=None)
},
'spambase': {
'train': lambda root, args: RiskDataset(root=root, dataset='mushroom', transform=None)
},
'bank': {
'train': lambda root, args: RiskDataset(root=root, dataset='mushroom', transform=None)
},
'compas': {
'train': lambda root, args: CompasDataset(root=root)
},
'credit': {
'train': lambda root, args: CreditCardFraudDataset(root=root)
},
'heart': {
'train': lambda root, args: HeartDataset(root=root)
},
'imagenet': {
'train': lambda root, args: ImageFolder(root=os.path.join(root, 'train'), transform=TRANSFORMS['imagenet']['train']),
'test': lambda root, args: ImageFolder(root=os.path.join(root, 'val'), transform=TRANSFORMS['imagenet']['test'])
},
'cub': {
'train': lambda root, args: Cub2011(root=root, transform=TRANSFORMS['cub']['train'], train=True),
'test': lambda root, args: Cub2011(root=root, transform=TRANSFORMS['cub']['test'], train=False)
},
'cub_no_augment': {
'train': lambda root, args: Cub2011(root=root, transform=TRANSFORMS['cub_no_augment']['train'], train=True),
'test': lambda root, args: Cub2011(root=root, transform=TRANSFORMS['cub_no_augment']['test'], train=False)
},
}
def get_loaders(dataset, dataroot, valid_split=0.2, batch_size=100, random_seed=None, test_split=0.2, dataset_args={},
shuffle=True, device="cpu"):
if dataset in TEXT_DATASETS.keys():
train_set, test_set = get_text_dataset(
dataset, root=dataroot, **dataset_args)
collate_fn = CollateProcessor(dataset, train_set.get_vocab())
elif dataset == 'tcr':
train_set, test_set = get_tcr_dataset(device, **dataset_args)
collate_fn = None
elif dataset == 'single_cell':
train_set, test_set = get_single_cell_dataset(device, **dataset_args)
collate_fn = None
elif dataset == 'medmnist':
train_set, test_set = get_medmnist_dataset(device, dataroot)
collate_fn = None
else:
collate_fn = None
train_set = DATASETS[dataset]['train'](dataroot, dataset_args)
if 'test' in DATASETS[dataset].keys():
test_set = DATASETS[dataset]['test'](dataroot, dataset_args)
else:
train_size = int(len(train_set)*(1.-test_split))
test_size = int(len(train_set) - train_size)
if random_seed is not None:
torch.manual_seed(random_seed)
train_set, test_set = random_split(
train_set, [train_size, test_size])
train_size = int(len(train_set)*(1.-valid_split))
valid_size = int(len(train_set) - train_size)
if random_seed is not None:
torch.manual_seed(random_seed)
train_set, valid_set = random_split(train_set, [train_size, valid_size])
train_loader = DataLoader(train_set, batch_size=batch_size,
shuffle=shuffle, collate_fn=collate_fn, drop_last=True)
valid_loader = DataLoader(valid_set, batch_size=batch_size,
shuffle=False, collate_fn=collate_fn, drop_last=True)
test_loader = DataLoader(test_set, batch_size=batch_size,
shuffle=False, collate_fn=collate_fn, drop_last=True)
return train_loader, valid_loader, test_loader
| [
"torchvision.transforms.transforms.ColorJitter",
"kme.data.text_classification.get_text_dataset",
"torchvision.datasets.CIFAR10",
"kme.data.risk_datasets.RiskDataset",
"kme.data.benchmark_datasets.HeartDataset",
"torchvision.datasets.SVHN",
"os.path.join",
"torch.utils.data.DataLoader",
"torchvision... | [((9738, 9787), 'torch.utils.data.random_split', 'random_split', (['train_set', '[train_size, valid_size]'], {}), '(train_set, [train_size, valid_size])\n', (9750, 9787), False, 'from torch.utils.data import random_split, DataLoader\n'), ((9808, 9913), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'collate_fn': 'collate_fn', 'drop_last': '(True)'}), '(train_set, batch_size=batch_size, shuffle=shuffle, collate_fn=\n collate_fn, drop_last=True)\n', (9818, 9913), False, 'from torch.utils.data import random_split, DataLoader\n'), ((9958, 10061), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_set'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'collate_fn': 'collate_fn', 'drop_last': '(True)'}), '(valid_set, batch_size=batch_size, shuffle=False, collate_fn=\n collate_fn, drop_last=True)\n', (9968, 10061), False, 'from torch.utils.data import random_split, DataLoader\n'), ((10105, 10207), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'collate_fn': 'collate_fn', 'drop_last': '(True)'}), '(test_set, batch_size=batch_size, shuffle=False, collate_fn=\n collate_fn, drop_last=True)\n', (10115, 10207), False, 'from torch.utils.data import random_split, DataLoader\n'), ((8406, 8426), 'kme.data.text_classification.DATASETS.keys', 'TEXT_DATASETS.keys', ([], {}), '()\n', (8424, 8426), True, 'from kme.data.text_classification import DATASETS as TEXT_DATASETS, get_text_dataset, CollateProcessor\n'), ((8458, 8514), 'kme.data.text_classification.get_text_dataset', 'get_text_dataset', (['dataset'], {'root': 'dataroot'}), '(dataset, root=dataroot, **dataset_args)\n', (8474, 8514), False, 'from kme.data.text_classification import DATASETS as TEXT_DATASETS, get_text_dataset, CollateProcessor\n'), ((9680, 9710), 'torch.manual_seed', 'torch.manual_seed', (['random_seed'], {}), '(random_seed)\n', (9697, 9710), False, 'import torch\n'), ((4998, 5090), 'torchvision.datasets.CIFAR10', 'CIFAR10', ([], {'root': 'root', 'train': '(True)', 'download': '(True)', 'transform': "TRANSFORMS['cifar10']['train']"}), "(root=root, train=True, download=True, transform=TRANSFORMS[\n 'cifar10']['train'])\n", (5005, 5090), False, 'from torchvision.datasets import MNIST, FashionMNIST, CIFAR10, CIFAR100, SVHN, ImageFolder\n'), ((5122, 5214), 'torchvision.datasets.CIFAR10', 'CIFAR10', ([], {'root': 'root', 'train': '(False)', 'download': '(True)', 'transform': "TRANSFORMS['cifar10']['test']"}), "(root=root, train=False, download=True, transform=TRANSFORMS[\n 'cifar10']['test'])\n", (5129, 5214), False, 'from torchvision.datasets import MNIST, FashionMNIST, CIFAR10, CIFAR100, SVHN, ImageFolder\n'), ((5271, 5365), 'torchvision.datasets.CIFAR100', 'CIFAR100', ([], {'root': 'root', 'train': '(True)', 'download': '(True)', 'transform': "TRANSFORMS['cifar100']['train']"}), "(root=root, train=True, download=True, transform=TRANSFORMS[\n 'cifar100']['train'])\n", (5279, 5365), False, 'from torchvision.datasets import MNIST, FashionMNIST, CIFAR10, CIFAR100, SVHN, ImageFolder\n'), ((5397, 5491), 'torchvision.datasets.CIFAR100', 'CIFAR100', ([], {'root': 'root', 'train': '(False)', 'download': '(True)', 'transform': "TRANSFORMS['cifar100']['test']"}), "(root=root, train=False, download=True, transform=TRANSFORMS[\n 'cifar100']['test'])\n", (5405, 5491), False, 'from torchvision.datasets import MNIST, FashionMNIST, CIFAR10, CIFAR100, SVHN, ImageFolder\n'), ((5544, 5633), 'torchvision.datasets.SVHN', 'SVHN', ([], {'root': 'root', 'split': '"""train"""', 'download': '(True)', 'transform': "TRANSFORMS['svhn']['train']"}), "(root=root, split='train', download=True, transform=TRANSFORMS['svhn'][\n 'train'])\n", (5548, 5633), False, 'from torchvision.datasets import MNIST, FashionMNIST, CIFAR10, CIFAR100, SVHN, ImageFolder\n'), ((5665, 5752), 'torchvision.datasets.SVHN', 'SVHN', ([], {'root': 'root', 'split': '"""test"""', 'download': '(True)', 'transform': "TRANSFORMS['svhn']['test']"}), "(root=root, split='test', download=True, transform=TRANSFORMS['svhn'][\n 'test'])\n", (5669, 5752), False, 'from torchvision.datasets import MNIST, FashionMNIST, CIFAR10, CIFAR100, SVHN, ImageFolder\n'), ((5816, 5916), 'torchvision.datasets.SVHN', 'SVHN', ([], {'root': 'root', 'split': '"""train"""', 'download': '(True)', 'transform': "TRANSFORMS['svhn_no_augment']['train']"}), "(root=root, split='train', download=True, transform=TRANSFORMS[\n 'svhn_no_augment']['train'])\n", (5820, 5916), False, 'from torchvision.datasets import MNIST, FashionMNIST, CIFAR10, CIFAR100, SVHN, ImageFolder\n'), ((5948, 6046), 'torchvision.datasets.SVHN', 'SVHN', ([], {'root': 'root', 'split': '"""test"""', 'download': '(True)', 'transform': "TRANSFORMS['svhn_no_augment']['test']"}), "(root=root, split='test', download=True, transform=TRANSFORMS[\n 'svhn_no_augment']['test'])\n", (5952, 6046), False, 'from torchvision.datasets import MNIST, FashionMNIST, CIFAR10, CIFAR100, SVHN, ImageFolder\n'), ((6100, 6188), 'torchvision.datasets.MNIST', 'MNIST', ([], {'root': 'root', 'train': '(True)', 'download': '(True)', 'transform': "TRANSFORMS['mnist']['train']"}), "(root=root, train=True, download=True, transform=TRANSFORMS['mnist'][\n 'train'])\n", (6105, 6188), False, 'from torchvision.datasets import MNIST, FashionMNIST, CIFAR10, CIFAR100, SVHN, ImageFolder\n'), ((6220, 6308), 'torchvision.datasets.MNIST', 'MNIST', ([], {'root': 'root', 'train': '(False)', 'download': '(True)', 'transform': "TRANSFORMS['mnist']['test']"}), "(root=root, train=False, download=True, transform=TRANSFORMS['mnist'][\n 'test'])\n", (6225, 6308), False, 'from torchvision.datasets import MNIST, FashionMNIST, CIFAR10, CIFAR100, SVHN, ImageFolder\n'), ((6369, 6471), 'torchvision.datasets.FashionMNIST', 'FashionMNIST', ([], {'root': 'root', 'train': '(True)', 'download': '(True)', 'transform': "TRANSFORMS['fashionmnist']['train']"}), "(root=root, train=True, download=True, transform=TRANSFORMS[\n 'fashionmnist']['train'])\n", (6381, 6471), False, 'from torchvision.datasets import MNIST, FashionMNIST, CIFAR10, CIFAR100, SVHN, ImageFolder\n'), ((6503, 6605), 'torchvision.datasets.FashionMNIST', 'FashionMNIST', ([], {'root': 'root', 'train': '(False)', 'download': '(True)', 'transform': "TRANSFORMS['fashionmnist']['test']"}), "(root=root, train=False, download=True, transform=TRANSFORMS[\n 'fashionmnist']['test'])\n", (6515, 6605), False, 'from torchvision.datasets import MNIST, FashionMNIST, CIFAR10, CIFAR100, SVHN, ImageFolder\n'), ((6659, 6714), 'kme.data.risk_datasets.RiskDataset', 'RiskDataset', ([], {'root': 'root', 'dataset': '"""adult"""', 'transform': 'None'}), "(root=root, dataset='adult', transform=None)\n", (6670, 6714), False, 'from kme.data.risk_datasets import RiskDataset\n'), ((6776, 6834), 'kme.data.risk_datasets.RiskDataset', 'RiskDataset', ([], {'root': 'root', 'dataset': '"""mushroom"""', 'transform': 'None'}), "(root=root, dataset='mushroom', transform=None)\n", (6787, 6834), False, 'from kme.data.risk_datasets import RiskDataset\n'), ((6893, 6948), 'kme.data.risk_datasets.RiskDataset', 'RiskDataset', ([], {'root': 'root', 'dataset': '"""mammo"""', 'transform': 'None'}), "(root=root, dataset='mammo', transform=None)\n", (6904, 6948), False, 'from kme.data.risk_datasets import RiskDataset\n'), ((7010, 7068), 'kme.data.risk_datasets.RiskDataset', 'RiskDataset', ([], {'root': 'root', 'dataset': '"""mushroom"""', 'transform': 'None'}), "(root=root, dataset='mushroom', transform=None)\n", (7021, 7068), False, 'from kme.data.risk_datasets import RiskDataset\n'), ((7126, 7184), 'kme.data.risk_datasets.RiskDataset', 'RiskDataset', ([], {'root': 'root', 'dataset': '"""mushroom"""', 'transform': 'None'}), "(root=root, dataset='mushroom', transform=None)\n", (7137, 7184), False, 'from kme.data.risk_datasets import RiskDataset\n'), ((7245, 7269), 'kme.data.compas.CompasDataset', 'CompasDataset', ([], {'root': 'root'}), '(root=root)\n', (7258, 7269), False, 'from kme.data.compas import CompasDataset\n'), ((7330, 7363), 'kme.data.benchmark_datasets.CreditCardFraudDataset', 'CreditCardFraudDataset', ([], {'root': 'root'}), '(root=root)\n', (7352, 7363), False, 'from kme.data.benchmark_datasets import CreditCardFraudDataset, HeartDataset\n'), ((7423, 7446), 'kme.data.benchmark_datasets.HeartDataset', 'HeartDataset', ([], {'root': 'root'}), '(root=root)\n', (7435, 7446), False, 'from kme.data.benchmark_datasets import CreditCardFraudDataset, HeartDataset\n'), ((7775, 7843), 'kme.data.cub.Cub2011', 'Cub2011', ([], {'root': 'root', 'transform': "TRANSFORMS['cub']['train']", 'train': '(True)'}), "(root=root, transform=TRANSFORMS['cub']['train'], train=True)\n", (7782, 7843), False, 'from kme.data.cub import Cub2011\n'), ((7880, 7948), 'kme.data.cub.Cub2011', 'Cub2011', ([], {'root': 'root', 'transform': "TRANSFORMS['cub']['test']", 'train': '(False)'}), "(root=root, transform=TRANSFORMS['cub']['test'], train=False)\n", (7887, 7948), False, 'from kme.data.cub import Cub2011\n'), ((8016, 8095), 'kme.data.cub.Cub2011', 'Cub2011', ([], {'root': 'root', 'transform': "TRANSFORMS['cub_no_augment']['train']", 'train': '(True)'}), "(root=root, transform=TRANSFORMS['cub_no_augment']['train'], train=True)\n", (8023, 8095), False, 'from kme.data.cub import Cub2011\n'), ((8132, 8211), 'kme.data.cub.Cub2011', 'Cub2011', ([], {'root': 'root', 'transform': "TRANSFORMS['cub_no_augment']['test']", 'train': '(False)'}), "(root=root, transform=TRANSFORMS['cub_no_augment']['test'], train=False)\n", (8139, 8211), False, 'from kme.data.cub import Cub2011\n'), ((8655, 8694), 'kme.data.tcr_datasets.get_tcr_dataset', 'get_tcr_dataset', (['device'], {}), '(device, **dataset_args)\n', (8670, 8694), False, 'from kme.data.tcr_datasets import get_tcr_dataset\n'), ((1033, 1057), 'torchvision.transforms.transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {}), '()\n', (1055, 1057), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((1071, 1122), 'torchvision.transforms.transforms.RandomCrop', 'T.RandomCrop', (['(32)'], {'padding': '(4)', 'padding_mode': '"""reflect"""'}), "(32, padding=4, padding_mode='reflect')\n", (1083, 1122), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((1136, 1154), 'torchvision.transforms.transforms.RandomAffine', 'T.RandomAffine', (['(15)'], {}), '(15)\n', (1150, 1154), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((1168, 1211), 'torchvision.transforms.transforms.ColorJitter', 'T.ColorJitter', ([], {'brightness': '(0.2)', 'contrast': '(0.2)'}), '(brightness=0.2, contrast=0.2)\n', (1181, 1211), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((1225, 1249), 'torchvision.transforms.transforms.RandomGrayscale', 'T.RandomGrayscale', ([], {'p': '(0.1)'}), '(p=0.1)\n', (1242, 1249), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((1263, 1275), 'torchvision.transforms.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1273, 1275), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((1289, 1351), 'torchvision.transforms.transforms.Normalize', 'T.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (1300, 1351), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((1405, 1417), 'torchvision.transforms.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1415, 1417), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((1431, 1493), 'torchvision.transforms.transforms.Normalize', 'T.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (1442, 1493), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((1569, 1620), 'torchvision.transforms.transforms.RandomCrop', 'T.RandomCrop', (['(32)'], {'padding': '(4)', 'padding_mode': '"""reflect"""'}), "(32, padding=4, padding_mode='reflect')\n", (1581, 1620), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((1634, 1677), 'torchvision.transforms.transforms.ColorJitter', 'T.ColorJitter', ([], {'brightness': '(0.2)', 'contrast': '(0.2)'}), '(brightness=0.2, contrast=0.2)\n', (1647, 1677), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((1691, 1715), 'torchvision.transforms.transforms.RandomGrayscale', 'T.RandomGrayscale', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1708, 1715), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((1729, 1741), 'torchvision.transforms.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1739, 1741), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((1755, 1845), 'torchvision.transforms.transforms.Normalize', 'T.Normalize', (['(0.4376821, 0.4437697, 0.47280442)', '(0.19803012, 0.20101562, 0.19703614)'], {}), '((0.4376821, 0.4437697, 0.47280442), (0.19803012, 0.20101562, \n 0.19703614))\n', (1766, 1845), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((1917, 1929), 'torchvision.transforms.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1927, 1929), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((1943, 2033), 'torchvision.transforms.transforms.Normalize', 'T.Normalize', (['(0.4376821, 0.4437697, 0.47280442)', '(0.19803012, 0.20101562, 0.19703614)'], {}), '((0.4376821, 0.4437697, 0.47280442), (0.19803012, 0.20101562, \n 0.19703614))\n', (1954, 2033), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((2138, 2150), 'torchvision.transforms.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (2148, 2150), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((2164, 2254), 'torchvision.transforms.transforms.Normalize', 'T.Normalize', (['(0.4376821, 0.4437697, 0.47280442)', '(0.19803012, 0.20101562, 0.19703614)'], {}), '((0.4376821, 0.4437697, 0.47280442), (0.19803012, 0.20101562, \n 0.19703614))\n', (2175, 2254), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((2326, 2338), 'torchvision.transforms.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (2336, 2338), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((2352, 2442), 'torchvision.transforms.transforms.Normalize', 'T.Normalize', (['(0.4376821, 0.4437697, 0.47280442)', '(0.19803012, 0.20101562, 0.19703614)'], {}), '((0.4376821, 0.4437697, 0.47280442), (0.19803012, 0.20101562, \n 0.19703614))\n', (2363, 2442), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((2537, 2549), 'torchvision.transforms.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (2547, 2549), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((2563, 2596), 'torchvision.transforms.transforms.Normalize', 'T.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (2574, 2596), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((2649, 2661), 'torchvision.transforms.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (2659, 2661), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((2675, 2708), 'torchvision.transforms.transforms.Normalize', 'T.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (2686, 2708), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((2791, 2803), 'torchvision.transforms.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (2801, 2803), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((2817, 2844), 'torchvision.transforms.transforms.Normalize', 'T.Normalize', (['(0.5,)', '(0.5,)'], {}), '((0.5,), (0.5,))\n', (2828, 2844), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((2897, 2909), 'torchvision.transforms.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (2907, 2909), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((2923, 2950), 'torchvision.transforms.transforms.Normalize', 'T.Normalize', (['(0.5,)', '(0.5,)'], {}), '((0.5,), (0.5,))\n', (2934, 2950), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((3029, 3053), 'torchvision.transforms.transforms.RandomResizedCrop', 'T.RandomResizedCrop', (['(224)'], {}), '(224)\n', (3048, 3053), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((3067, 3091), 'torchvision.transforms.transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {}), '()\n', (3089, 3091), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((3105, 3117), 'torchvision.transforms.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (3115, 3117), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((3131, 3197), 'torchvision.transforms.transforms.Normalize', 'T.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (3142, 3197), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((3250, 3274), 'torchvision.transforms.transforms.RandomResizedCrop', 'T.RandomResizedCrop', (['(224)'], {}), '(224)\n', (3269, 3274), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((3288, 3300), 'torchvision.transforms.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (3298, 3300), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((3314, 3380), 'torchvision.transforms.transforms.Normalize', 'T.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (3325, 3380), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((3454, 3474), 'torchvision.transforms.transforms.Resize', 'T.Resize', (['(224, 224)'], {}), '((224, 224))\n', (3462, 3474), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((3488, 3531), 'torchvision.transforms.transforms.ColorJitter', 'T.ColorJitter', ([], {'brightness': '(0.2)', 'contrast': '(0.2)'}), '(brightness=0.2, contrast=0.2)\n', (3501, 3531), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((3545, 3574), 'torchvision.transforms.transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (3567, 3574), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((3588, 3617), 'torchvision.transforms.transforms.RandomCrop', 'T.RandomCrop', (['(224)'], {'padding': '(14)'}), '(224, padding=14)\n', (3600, 3617), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((3631, 3643), 'torchvision.transforms.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (3641, 3643), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((3657, 3723), 'torchvision.transforms.transforms.Normalize', 'T.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (3668, 3723), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((3737, 3761), 'torchvision.transforms.transforms.RandomGrayscale', 'T.RandomGrayscale', ([], {'p': '(0.2)'}), '(p=0.2)\n', (3754, 3761), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((3815, 3835), 'torchvision.transforms.transforms.Resize', 'T.Resize', (['(224, 224)'], {}), '((224, 224))\n', (3823, 3835), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((3849, 3861), 'torchvision.transforms.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (3859, 3861), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((3875, 3941), 'torchvision.transforms.transforms.Normalize', 'T.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (3886, 3941), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((4026, 4046), 'torchvision.transforms.transforms.Resize', 'T.Resize', (['(224, 224)'], {}), '((224, 224))\n', (4034, 4046), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((4060, 4072), 'torchvision.transforms.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (4070, 4072), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((4086, 4152), 'torchvision.transforms.transforms.Normalize', 'T.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (4097, 4152), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((4206, 4226), 'torchvision.transforms.transforms.Resize', 'T.Resize', (['(224, 224)'], {}), '((224, 224))\n', (4214, 4226), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((4240, 4252), 'torchvision.transforms.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (4250, 4252), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((4266, 4332), 'torchvision.transforms.transforms.Normalize', 'T.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (4277, 4332), True, 'from torchvision.transforms import transforms as T, InterpolationMode\n'), ((4484, 4515), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (4492, 4515), True, 'import numpy as np\n'), ((4524, 4555), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (4532, 4555), True, 'import numpy as np\n'), ((4633, 4679), 'numpy.array', 'np.array', (['[0.19803012, 0.20101562, 0.19703614]'], {}), '([0.19803012, 0.20101562, 0.19703614])\n', (4641, 4679), True, 'import numpy as np\n'), ((4688, 4734), 'numpy.array', 'np.array', (['[0.19803012, 0.20101562, 0.19703614]'], {}), '([0.19803012, 0.20101562, 0.19703614])\n', (4696, 4734), True, 'import numpy as np\n'), ((4787, 4805), 'numpy.array', 'np.array', (['[0.3081]'], {}), '([0.3081])\n', (4795, 4805), True, 'import numpy as np\n'), ((4814, 4832), 'numpy.array', 'np.array', (['[0.3081]'], {}), '([0.3081])\n', (4822, 4832), True, 'import numpy as np\n'), ((4887, 4902), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (4895, 4902), True, 'import numpy as np\n'), ((4911, 4926), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (4919, 4926), True, 'import numpy as np\n'), ((8786, 8833), 'kme.data.single_cell_datasets.get_single_cell_dataset', 'get_single_cell_dataset', (['device'], {}), '(device, **dataset_args)\n', (8809, 8833), False, 'from kme.data.single_cell_datasets import get_single_cell_dataset\n'), ((4452, 4483), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (4460, 4483), True, 'import numpy as np\n'), ((4588, 4632), 'numpy.array', 'np.array', (['[0.4376821, 0.4437697, 0.47280442]'], {}), '([0.4376821, 0.4437697, 0.47280442])\n', (4596, 4632), True, 'import numpy as np\n'), ((4768, 4786), 'numpy.array', 'np.array', (['[0.1307]'], {}), '([0.1307])\n', (4776, 4786), True, 'import numpy as np\n'), ((4871, 4886), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (4879, 4886), True, 'import numpy as np\n'), ((7525, 7552), 'os.path.join', 'os.path.join', (['root', '"""train"""'], {}), "(root, 'train')\n", (7537, 7552), False, 'import os\n'), ((7650, 7675), 'os.path.join', 'os.path.join', (['root', '"""val"""'], {}), "(root, 'val')\n", (7662, 7675), False, 'import os\n'), ((8922, 8960), 'kme.data.MedMnist.get_medmnist_dataset', 'get_medmnist_dataset', (['device', 'dataroot'], {}), '(device, dataroot)\n', (8942, 8960), False, 'from kme.data.MedMnist import get_medmnist_dataset\n'), ((9468, 9516), 'torch.utils.data.random_split', 'random_split', (['train_set', '[train_size, test_size]'], {}), '(train_set, [train_size, test_size])\n', (9480, 9516), False, 'from torch.utils.data import random_split, DataLoader\n'), ((9403, 9433), 'torch.manual_seed', 'torch.manual_seed', (['random_seed'], {}), '(random_seed)\n', (9420, 9433), False, 'import torch\n')] |
"""
Preparing data: Vectorize data
Pad your lists so that they all have the same length, turn them into an integer tensor of shape (samples, word_indices), and then use as the first layer in your network a layer capable of handling such integer tensors (the Embedding layer, which we’ll cover in detail later in the book).
OR
One-hot encode your lists to turn them into vectors of 0s and 1s. This would mean, for instance, turning the sequence [3, 5] into a 10,000-dimensional vec- tor that would be all 0s except for indices 3 and 5, which would be 1s. Then you could use as the first layer in your network a Dense layer, capable of handling floating-point vector data.
"""
import numpy as np
from keras.datasets import imdb
def vectorize_sequences(sequences, dimension=10_000):
# Creates an all-zero matrix of shape (len(sequences), dimension)
results = np.zeros((len(sequences), dimension))
for idx, sequence in enumerate(sequences):
# Sets specific indices of results[i] to 1s
results[idx, sequence] = 1.
return results
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
# Vectorize the data
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
# Vectorize the labels
y_train = np.asarray(train_labels).astype('float32')
y_test = np.asarray(test_labels).astype('float32') | [
"numpy.asarray",
"keras.datasets.imdb.load_data"
] | [((1121, 1152), 'keras.datasets.imdb.load_data', 'imdb.load_data', ([], {'num_words': '(10000)'}), '(num_words=10000)\n', (1135, 1152), False, 'from keras.datasets import imdb\n'), ((1289, 1313), 'numpy.asarray', 'np.asarray', (['train_labels'], {}), '(train_labels)\n', (1299, 1313), True, 'import numpy as np\n'), ((1341, 1364), 'numpy.asarray', 'np.asarray', (['test_labels'], {}), '(test_labels)\n', (1351, 1364), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# vim :set ft=py:
from __future__ import print_function
import blosc
import pytest
from unittest.mock import patch
import numpy as np
from bloscpack.args import (BloscArgs,
BloscpackArgs,
MetadataArgs,
calculate_nchunks,
)
from bloscpack.compat_util import StringIO
from bloscpack.constants import (MAX_FORMAT_VERSION,
BLOSCPACK_HEADER_LENGTH,
BLOSC_HEADER_LENGTH,
)
from bloscpack.defaults import (DEFAULT_CHUNK_SIZE,
)
from bloscpack.exceptions import (FormatVersionMismatch,
ChecksumMismatch,
)
from bloscpack.file_io import (PlainFPSource,
PlainFPSink,
CompressedFPSource,
CompressedFPSink,
pack_file_to_file,
unpack_file_from_file,
pack_bytes_to_file,
unpack_bytes_from_file,
pack_bytes_to_bytes,
unpack_bytes_from_bytes,
_read_bloscpack_header,
_read_offsets,
_read_beginning,
_read_metadata,
_write_metadata,
)
from bloscpack.headers import (decode_blosc_header,
)
from bloscpack.pretty import reverse_pretty
from bloscpack.abstract_io import (pack, unpack)
from bloscpack.testutil import (create_array,
create_array_fp,
create_tmp_files,
cmp_fp,
cmp_file,
simple_progress,
)
def test_offsets():
with create_tmp_files() as (tdir, in_file, out_file, dcmp_file):
create_array(1, in_file)
pack_file_to_file(in_file, out_file, chunk_size='2M')
with open(out_file, 'r+b') as input_fp:
bloscpack_header = _read_bloscpack_header(input_fp)
total_entries = bloscpack_header.total_prospective_chunks
offsets = _read_offsets(input_fp, bloscpack_header)
# First chunks should start after header and offsets
first = BLOSCPACK_HEADER_LENGTH + 8 * total_entries
# We assume that the others are correct
assert offsets[0] == first
assert 736 == offsets[0]
# try to read the second header
input_fp.seek(offsets[1], 0)
blosc_header_raw = input_fp.read(BLOSC_HEADER_LENGTH)
expected = {'versionlz': 1,
'version': 2,
'flags': 1,
'nbytes': 2097152,
'typesize': 8}
blosc_header = decode_blosc_header(blosc_header_raw)
blosc_header_slice = dict((k, blosc_header[k]) for k in expected.keys())
assert expected == blosc_header_slice
# now check the same thing again, but w/o any max_app_chunks
input_fp, output_fp = StringIO(), StringIO()
create_array_fp(1, input_fp)
nchunks, chunk_size, last_chunk_size = \
calculate_nchunks(input_fp.tell(), chunk_size='2M')
input_fp.seek(0, 0)
bloscpack_args = BloscpackArgs(max_app_chunks=0)
source = PlainFPSource(input_fp)
sink = CompressedFPSink(output_fp)
pack(source, sink,
nchunks, chunk_size, last_chunk_size,
bloscpack_args=bloscpack_args
)
output_fp.seek(0, 0)
bloscpack_header = _read_bloscpack_header(output_fp)
assert 0 == bloscpack_header.max_app_chunks
offsets = _read_offsets(output_fp, bloscpack_header)
assert 96 == offsets[0]
def test_metadata():
test_metadata = {'dtype': 'float64',
'shape': [1024],
'others': [],
}
received_metadata = pack_unpack_fp(1, metadata=test_metadata)
assert test_metadata == received_metadata
def test_metadata_opportunisitic_compression():
# make up some metadata that can be compressed with benefit
test_metadata = ("{'dtype': 'float64', 'shape': [1024], 'others': [],"
"'original_container': 'carray'}")
target_fp = StringIO()
_write_metadata(target_fp, test_metadata, MetadataArgs())
target_fp.seek(0, 0)
metadata, header = _read_metadata(target_fp)
assert 'zlib' == header['meta_codec']
# now do the same thing, but use badly compressible metadata
test_metadata = "abc"
target_fp = StringIO()
# default args say: do compression...
_write_metadata(target_fp, test_metadata, MetadataArgs())
target_fp.seek(0, 0)
metadata, header = _read_metadata(target_fp)
# but it wasn't of any use
assert 'None' == header['meta_codec']
def test_disable_offsets():
in_fp, out_fp, dcmp_fp = StringIO(), StringIO(), StringIO()
create_array_fp(1, in_fp)
in_fp_size = in_fp.tell()
in_fp.seek(0)
bloscpack_args = BloscpackArgs(offsets=False)
source = PlainFPSource(in_fp)
sink = CompressedFPSink(out_fp)
pack(source, sink,
*calculate_nchunks(in_fp_size),
bloscpack_args=bloscpack_args)
out_fp.seek(0)
bloscpack_header, metadata, metadata_header, offsets = \
_read_beginning(out_fp)
assert len(offsets) == 0
# this will cause a bug if we ever reach 255 format versions
@patch('bloscpack.file_io.FORMAT_VERSION', MAX_FORMAT_VERSION)
def test_invalid_format():
blosc_args = BloscArgs()
with create_tmp_files() as (tdir, in_file, out_file, dcmp_file):
create_array(1, in_file)
pack_file_to_file(in_file, out_file, blosc_args=blosc_args)
with pytest.raises(FormatVersionMismatch):
unpack_file_from_file(out_file, dcmp_file)
def test_file_corruption():
with create_tmp_files() as (tdir, in_file, out_file, dcmp_file):
create_array(1, in_file)
pack_file_to_file(in_file, out_file)
# now go in and modify a byte in the file
with open(out_file, 'r+b') as input_fp:
# read offsets and header
_read_offsets(input_fp,
_read_bloscpack_header(input_fp))
# read the blosc header of the first chunk
input_fp.read(BLOSC_HEADER_LENGTH)
# read four bytes
input_fp.read(4)
# read the fifth byte
fifth = input_fp.read(1)
# figure out what to replace it by
replace = b'\x00' if fifth == b'\xff' else b'\xff'
# seek one byte back relative to current position
input_fp.seek(-1, 1)
# write the flipped byte
input_fp.write(replace)
# now attempt to unpack it
with pytest.raises(ChecksumMismatch):
unpack_file_from_file(out_file, dcmp_file)
def pack_unpack(repeats, chunk_size=None, progress=False):
with create_tmp_files() as (tdir, in_file, out_file, dcmp_file):
if progress:
print("Creating test array")
create_array(repeats, in_file, progress=progress)
if progress:
print("Compressing")
pack_file_to_file(in_file, out_file, chunk_size=chunk_size)
if progress:
print("Decompressing")
unpack_file_from_file(out_file, dcmp_file)
if progress:
print("Verifying")
cmp_file(in_file, dcmp_file)
def pack_unpack_fp(repeats, chunk_size=DEFAULT_CHUNK_SIZE,
progress=False, metadata=None):
in_fp, out_fp, dcmp_fp = StringIO(), StringIO(), StringIO()
if progress:
print("Creating test array")
create_array_fp(repeats, in_fp, progress=progress)
in_fp_size = in_fp.tell()
if progress:
print("Compressing")
in_fp.seek(0)
nchunks, chunk_size, last_chunk_size = \
calculate_nchunks(in_fp_size, chunk_size)
source = PlainFPSource(in_fp)
sink = CompressedFPSink(out_fp)
pack(source, sink,
nchunks, chunk_size, last_chunk_size,
metadata=metadata)
out_fp.seek(0)
if progress:
print("Decompressing")
source = CompressedFPSource(out_fp)
sink = PlainFPSink(dcmp_fp)
unpack(source, sink)
if progress:
print("Verifying")
cmp_fp(in_fp, dcmp_fp)
return source.metadata
def test_pack_unpack():
pack_unpack(1, chunk_size=reverse_pretty('1M'))
pack_unpack(1, chunk_size=reverse_pretty('2M'))
pack_unpack(1, chunk_size=reverse_pretty('4M'))
pack_unpack(1, chunk_size=reverse_pretty('8M'))
def test_pack_unpack_fp():
pack_unpack_fp(1, chunk_size=reverse_pretty('1M'))
pack_unpack_fp(1, chunk_size=reverse_pretty('2M'))
pack_unpack_fp(1, chunk_size=reverse_pretty('4M'))
pack_unpack_fp(1, chunk_size=reverse_pretty('8M'))
def test_pack_unpack_bytes_to_from_file():
array_ = np.linspace(0, 1e5)
input_bytes = array_.tostring()
with create_tmp_files() as (tdir, in_file, out_file, dcmp_file):
pack_bytes_to_file(input_bytes, out_file)
output_bytes, _ = unpack_bytes_from_file(out_file)
assert input_bytes == output_bytes
def test_pack_unpack_bytes_bytes():
a = np.linspace(0, 1e5)
b = a.tostring()
c = pack_bytes_to_bytes(b)
d, _ = unpack_bytes_from_bytes(c)
assert b == d
def pack_unpack_hard():
""" Test on somewhat larger arrays, but be nice to memory. """
# Array is apprx. 1.5 GB large
# should make apprx 1536 chunks
pack_unpack(100, chunk_size=reverse_pretty('1M'), progress=simple_progress)
def pack_unpack_extreme():
""" Test on somewhat larer arrays, uses loads of memory. """
# this will create a huge array, and then use the
# blosc.BLOSC_MAX_BUFFERSIZE as chunk-szie
pack_unpack(300, chunk_size=blosc.BLOSC_MAX_BUFFERSIZE,
progress=simple_progress)
| [
"bloscpack.file_io.CompressedFPSource",
"bloscpack.testutil.create_tmp_files",
"bloscpack.testutil.cmp_fp",
"bloscpack.file_io.PlainFPSource",
"bloscpack.file_io.CompressedFPSink",
"bloscpack.compat_util.StringIO",
"bloscpack.file_io.pack_bytes_to_file",
"bloscpack.file_io._read_beginning",
"bloscpa... | [((5795, 5856), 'unittest.mock.patch', 'patch', (['"""bloscpack.file_io.FORMAT_VERSION"""', 'MAX_FORMAT_VERSION'], {}), "('bloscpack.file_io.FORMAT_VERSION', MAX_FORMAT_VERSION)\n", (5800, 5856), False, 'from unittest.mock import patch\n'), ((3476, 3504), 'bloscpack.testutil.create_array_fp', 'create_array_fp', (['(1)', 'input_fp'], {}), '(1, input_fp)\n', (3491, 3504), False, 'from bloscpack.testutil import create_array, create_array_fp, create_tmp_files, cmp_fp, cmp_file, simple_progress\n'), ((3659, 3690), 'bloscpack.args.BloscpackArgs', 'BloscpackArgs', ([], {'max_app_chunks': '(0)'}), '(max_app_chunks=0)\n', (3672, 3690), False, 'from bloscpack.args import BloscArgs, BloscpackArgs, MetadataArgs, calculate_nchunks\n'), ((3704, 3727), 'bloscpack.file_io.PlainFPSource', 'PlainFPSource', (['input_fp'], {}), '(input_fp)\n', (3717, 3727), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((3739, 3766), 'bloscpack.file_io.CompressedFPSink', 'CompressedFPSink', (['output_fp'], {}), '(output_fp)\n', (3755, 3766), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((3771, 3863), 'bloscpack.abstract_io.pack', 'pack', (['source', 'sink', 'nchunks', 'chunk_size', 'last_chunk_size'], {'bloscpack_args': 'bloscpack_args'}), '(source, sink, nchunks, chunk_size, last_chunk_size, bloscpack_args=\n bloscpack_args)\n', (3775, 3863), False, 'from bloscpack.abstract_io import pack, unpack\n'), ((3935, 3968), 'bloscpack.file_io._read_bloscpack_header', '_read_bloscpack_header', (['output_fp'], {}), '(output_fp)\n', (3957, 3968), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((4031, 4073), 'bloscpack.file_io._read_offsets', '_read_offsets', (['output_fp', 'bloscpack_header'], {}), '(output_fp, bloscpack_header)\n', (4044, 4073), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((4635, 4645), 'bloscpack.compat_util.StringIO', 'StringIO', ([], {}), '()\n', (4643, 4645), False, 'from bloscpack.compat_util import StringIO\n'), ((4756, 4781), 'bloscpack.file_io._read_metadata', '_read_metadata', (['target_fp'], {}), '(target_fp)\n', (4770, 4781), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((4932, 4942), 'bloscpack.compat_util.StringIO', 'StringIO', ([], {}), '()\n', (4940, 4942), False, 'from bloscpack.compat_util import StringIO\n'), ((5095, 5120), 'bloscpack.file_io._read_metadata', '_read_metadata', (['target_fp'], {}), '(target_fp)\n', (5109, 5120), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((5292, 5317), 'bloscpack.testutil.create_array_fp', 'create_array_fp', (['(1)', 'in_fp'], {}), '(1, in_fp)\n', (5307, 5317), False, 'from bloscpack.testutil import create_array, create_array_fp, create_tmp_files, cmp_fp, cmp_file, simple_progress\n'), ((5387, 5415), 'bloscpack.args.BloscpackArgs', 'BloscpackArgs', ([], {'offsets': '(False)'}), '(offsets=False)\n', (5400, 5415), False, 'from bloscpack.args import BloscArgs, BloscpackArgs, MetadataArgs, calculate_nchunks\n'), ((5429, 5449), 'bloscpack.file_io.PlainFPSource', 'PlainFPSource', (['in_fp'], {}), '(in_fp)\n', (5442, 5449), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((5461, 5485), 'bloscpack.file_io.CompressedFPSink', 'CompressedFPSink', (['out_fp'], {}), '(out_fp)\n', (5477, 5485), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((5678, 5701), 'bloscpack.file_io._read_beginning', '_read_beginning', (['out_fp'], {}), '(out_fp)\n', (5693, 5701), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((5901, 5912), 'bloscpack.args.BloscArgs', 'BloscArgs', ([], {}), '()\n', (5910, 5912), False, 'from bloscpack.args import BloscArgs, BloscpackArgs, MetadataArgs, calculate_nchunks\n'), ((8046, 8096), 'bloscpack.testutil.create_array_fp', 'create_array_fp', (['repeats', 'in_fp'], {'progress': 'progress'}), '(repeats, in_fp, progress=progress)\n', (8061, 8096), False, 'from bloscpack.testutil import create_array, create_array_fp, create_tmp_files, cmp_fp, cmp_file, simple_progress\n'), ((8248, 8289), 'bloscpack.args.calculate_nchunks', 'calculate_nchunks', (['in_fp_size', 'chunk_size'], {}), '(in_fp_size, chunk_size)\n', (8265, 8289), False, 'from bloscpack.args import BloscArgs, BloscpackArgs, MetadataArgs, calculate_nchunks\n'), ((8303, 8323), 'bloscpack.file_io.PlainFPSource', 'PlainFPSource', (['in_fp'], {}), '(in_fp)\n', (8316, 8323), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((8335, 8359), 'bloscpack.file_io.CompressedFPSink', 'CompressedFPSink', (['out_fp'], {}), '(out_fp)\n', (8351, 8359), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((8364, 8439), 'bloscpack.abstract_io.pack', 'pack', (['source', 'sink', 'nchunks', 'chunk_size', 'last_chunk_size'], {'metadata': 'metadata'}), '(source, sink, nchunks, chunk_size, last_chunk_size, metadata=metadata)\n', (8368, 8439), False, 'from bloscpack.abstract_io import pack, unpack\n'), ((8538, 8564), 'bloscpack.file_io.CompressedFPSource', 'CompressedFPSource', (['out_fp'], {}), '(out_fp)\n', (8556, 8564), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((8576, 8596), 'bloscpack.file_io.PlainFPSink', 'PlainFPSink', (['dcmp_fp'], {}), '(dcmp_fp)\n', (8587, 8596), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((8601, 8621), 'bloscpack.abstract_io.unpack', 'unpack', (['source', 'sink'], {}), '(source, sink)\n', (8607, 8621), False, 'from bloscpack.abstract_io import pack, unpack\n'), ((8670, 8692), 'bloscpack.testutil.cmp_fp', 'cmp_fp', (['in_fp', 'dcmp_fp'], {}), '(in_fp, dcmp_fp)\n', (8676, 8692), False, 'from bloscpack.testutil import create_array, create_array_fp, create_tmp_files, cmp_fp, cmp_file, simple_progress\n'), ((9261, 9285), 'numpy.linspace', 'np.linspace', (['(0)', '(100000.0)'], {}), '(0, 100000.0)\n', (9272, 9285), True, 'import numpy as np\n'), ((9580, 9604), 'numpy.linspace', 'np.linspace', (['(0)', '(100000.0)'], {}), '(0, 100000.0)\n', (9591, 9604), True, 'import numpy as np\n'), ((9629, 9651), 'bloscpack.file_io.pack_bytes_to_bytes', 'pack_bytes_to_bytes', (['b'], {}), '(b)\n', (9648, 9651), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((9663, 9689), 'bloscpack.file_io.unpack_bytes_from_bytes', 'unpack_bytes_from_bytes', (['c'], {}), '(c)\n', (9686, 9689), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((2142, 2160), 'bloscpack.testutil.create_tmp_files', 'create_tmp_files', ([], {}), '()\n', (2158, 2160), False, 'from bloscpack.testutil import create_array, create_array_fp, create_tmp_files, cmp_fp, cmp_file, simple_progress\n'), ((2210, 2234), 'bloscpack.testutil.create_array', 'create_array', (['(1)', 'in_file'], {}), '(1, in_file)\n', (2222, 2234), False, 'from bloscpack.testutil import create_array, create_array_fp, create_tmp_files, cmp_fp, cmp_file, simple_progress\n'), ((2243, 2296), 'bloscpack.file_io.pack_file_to_file', 'pack_file_to_file', (['in_file', 'out_file'], {'chunk_size': '"""2M"""'}), "(in_file, out_file, chunk_size='2M')\n", (2260, 2296), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((3449, 3459), 'bloscpack.compat_util.StringIO', 'StringIO', ([], {}), '()\n', (3457, 3459), False, 'from bloscpack.compat_util import StringIO\n'), ((3461, 3471), 'bloscpack.compat_util.StringIO', 'StringIO', ([], {}), '()\n', (3469, 3471), False, 'from bloscpack.compat_util import StringIO\n'), ((4692, 4706), 'bloscpack.args.MetadataArgs', 'MetadataArgs', ([], {}), '()\n', (4704, 4706), False, 'from bloscpack.args import BloscArgs, BloscpackArgs, MetadataArgs, calculate_nchunks\n'), ((5031, 5045), 'bloscpack.args.MetadataArgs', 'MetadataArgs', ([], {}), '()\n', (5043, 5045), False, 'from bloscpack.args import BloscArgs, BloscpackArgs, MetadataArgs, calculate_nchunks\n'), ((5253, 5263), 'bloscpack.compat_util.StringIO', 'StringIO', ([], {}), '()\n', (5261, 5263), False, 'from bloscpack.compat_util import StringIO\n'), ((5265, 5275), 'bloscpack.compat_util.StringIO', 'StringIO', ([], {}), '()\n', (5273, 5275), False, 'from bloscpack.compat_util import StringIO\n'), ((5277, 5287), 'bloscpack.compat_util.StringIO', 'StringIO', ([], {}), '()\n', (5285, 5287), False, 'from bloscpack.compat_util import StringIO\n'), ((5922, 5940), 'bloscpack.testutil.create_tmp_files', 'create_tmp_files', ([], {}), '()\n', (5938, 5940), False, 'from bloscpack.testutil import create_array, create_array_fp, create_tmp_files, cmp_fp, cmp_file, simple_progress\n'), ((5990, 6014), 'bloscpack.testutil.create_array', 'create_array', (['(1)', 'in_file'], {}), '(1, in_file)\n', (6002, 6014), False, 'from bloscpack.testutil import create_array, create_array_fp, create_tmp_files, cmp_fp, cmp_file, simple_progress\n'), ((6023, 6082), 'bloscpack.file_io.pack_file_to_file', 'pack_file_to_file', (['in_file', 'out_file'], {'blosc_args': 'blosc_args'}), '(in_file, out_file, blosc_args=blosc_args)\n', (6040, 6082), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((6228, 6246), 'bloscpack.testutil.create_tmp_files', 'create_tmp_files', ([], {}), '()\n', (6244, 6246), False, 'from bloscpack.testutil import create_array, create_array_fp, create_tmp_files, cmp_fp, cmp_file, simple_progress\n'), ((6296, 6320), 'bloscpack.testutil.create_array', 'create_array', (['(1)', 'in_file'], {}), '(1, in_file)\n', (6308, 6320), False, 'from bloscpack.testutil import create_array, create_array_fp, create_tmp_files, cmp_fp, cmp_file, simple_progress\n'), ((6329, 6365), 'bloscpack.file_io.pack_file_to_file', 'pack_file_to_file', (['in_file', 'out_file'], {}), '(in_file, out_file)\n', (6346, 6365), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((7314, 7332), 'bloscpack.testutil.create_tmp_files', 'create_tmp_files', ([], {}), '()\n', (7330, 7332), False, 'from bloscpack.testutil import create_array, create_array_fp, create_tmp_files, cmp_fp, cmp_file, simple_progress\n'), ((7444, 7493), 'bloscpack.testutil.create_array', 'create_array', (['repeats', 'in_file'], {'progress': 'progress'}), '(repeats, in_file, progress=progress)\n', (7456, 7493), False, 'from bloscpack.testutil import create_array, create_array_fp, create_tmp_files, cmp_fp, cmp_file, simple_progress\n'), ((7556, 7615), 'bloscpack.file_io.pack_file_to_file', 'pack_file_to_file', (['in_file', 'out_file'], {'chunk_size': 'chunk_size'}), '(in_file, out_file, chunk_size=chunk_size)\n', (7573, 7615), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((7680, 7722), 'bloscpack.file_io.unpack_file_from_file', 'unpack_file_from_file', (['out_file', 'dcmp_file'], {}), '(out_file, dcmp_file)\n', (7701, 7722), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((7783, 7811), 'bloscpack.testutil.cmp_file', 'cmp_file', (['in_file', 'dcmp_file'], {}), '(in_file, dcmp_file)\n', (7791, 7811), False, 'from bloscpack.testutil import create_array, create_array_fp, create_tmp_files, cmp_fp, cmp_file, simple_progress\n'), ((7953, 7963), 'bloscpack.compat_util.StringIO', 'StringIO', ([], {}), '()\n', (7961, 7963), False, 'from bloscpack.compat_util import StringIO\n'), ((7965, 7975), 'bloscpack.compat_util.StringIO', 'StringIO', ([], {}), '()\n', (7973, 7975), False, 'from bloscpack.compat_util import StringIO\n'), ((7977, 7987), 'bloscpack.compat_util.StringIO', 'StringIO', ([], {}), '()\n', (7985, 7987), False, 'from bloscpack.compat_util import StringIO\n'), ((9326, 9344), 'bloscpack.testutil.create_tmp_files', 'create_tmp_files', ([], {}), '()\n', (9342, 9344), False, 'from bloscpack.testutil import create_array, create_array_fp, create_tmp_files, cmp_fp, cmp_file, simple_progress\n'), ((9394, 9435), 'bloscpack.file_io.pack_bytes_to_file', 'pack_bytes_to_file', (['input_bytes', 'out_file'], {}), '(input_bytes, out_file)\n', (9412, 9435), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((9462, 9494), 'bloscpack.file_io.unpack_bytes_from_file', 'unpack_bytes_from_file', (['out_file'], {}), '(out_file)\n', (9484, 9494), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((2376, 2408), 'bloscpack.file_io._read_bloscpack_header', '_read_bloscpack_header', (['input_fp'], {}), '(input_fp)\n', (2398, 2408), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((2501, 2542), 'bloscpack.file_io._read_offsets', '_read_offsets', (['input_fp', 'bloscpack_header'], {}), '(input_fp, bloscpack_header)\n', (2514, 2542), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((3184, 3221), 'bloscpack.headers.decode_blosc_header', 'decode_blosc_header', (['blosc_header_raw'], {}), '(blosc_header_raw)\n', (3203, 3221), False, 'from bloscpack.headers import decode_blosc_header\n'), ((5519, 5548), 'bloscpack.args.calculate_nchunks', 'calculate_nchunks', (['in_fp_size'], {}), '(in_fp_size)\n', (5536, 5548), False, 'from bloscpack.args import BloscArgs, BloscpackArgs, MetadataArgs, calculate_nchunks\n'), ((6096, 6132), 'pytest.raises', 'pytest.raises', (['FormatVersionMismatch'], {}), '(FormatVersionMismatch)\n', (6109, 6132), False, 'import pytest\n'), ((6146, 6188), 'bloscpack.file_io.unpack_file_from_file', 'unpack_file_from_file', (['out_file', 'dcmp_file'], {}), '(out_file, dcmp_file)\n', (6167, 6188), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((7156, 7187), 'pytest.raises', 'pytest.raises', (['ChecksumMismatch'], {}), '(ChecksumMismatch)\n', (7169, 7187), False, 'import pytest\n'), ((7201, 7243), 'bloscpack.file_io.unpack_file_from_file', 'unpack_file_from_file', (['out_file', 'dcmp_file'], {}), '(out_file, dcmp_file)\n', (7222, 7243), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n'), ((8776, 8796), 'bloscpack.pretty.reverse_pretty', 'reverse_pretty', (['"""1M"""'], {}), "('1M')\n", (8790, 8796), False, 'from bloscpack.pretty import reverse_pretty\n'), ((8828, 8848), 'bloscpack.pretty.reverse_pretty', 'reverse_pretty', (['"""2M"""'], {}), "('2M')\n", (8842, 8848), False, 'from bloscpack.pretty import reverse_pretty\n'), ((8880, 8900), 'bloscpack.pretty.reverse_pretty', 'reverse_pretty', (['"""4M"""'], {}), "('4M')\n", (8894, 8900), False, 'from bloscpack.pretty import reverse_pretty\n'), ((8932, 8952), 'bloscpack.pretty.reverse_pretty', 'reverse_pretty', (['"""8M"""'], {}), "('8M')\n", (8946, 8952), False, 'from bloscpack.pretty import reverse_pretty\n'), ((9016, 9036), 'bloscpack.pretty.reverse_pretty', 'reverse_pretty', (['"""1M"""'], {}), "('1M')\n", (9030, 9036), False, 'from bloscpack.pretty import reverse_pretty\n'), ((9071, 9091), 'bloscpack.pretty.reverse_pretty', 'reverse_pretty', (['"""2M"""'], {}), "('2M')\n", (9085, 9091), False, 'from bloscpack.pretty import reverse_pretty\n'), ((9126, 9146), 'bloscpack.pretty.reverse_pretty', 'reverse_pretty', (['"""4M"""'], {}), "('4M')\n", (9140, 9146), False, 'from bloscpack.pretty import reverse_pretty\n'), ((9181, 9201), 'bloscpack.pretty.reverse_pretty', 'reverse_pretty', (['"""8M"""'], {}), "('8M')\n", (9195, 9201), False, 'from bloscpack.pretty import reverse_pretty\n'), ((9904, 9924), 'bloscpack.pretty.reverse_pretty', 'reverse_pretty', (['"""1M"""'], {}), "('1M')\n", (9918, 9924), False, 'from bloscpack.pretty import reverse_pretty\n'), ((6564, 6596), 'bloscpack.file_io._read_bloscpack_header', '_read_bloscpack_header', (['input_fp'], {}), '(input_fp)\n', (6586, 6596), False, 'from bloscpack.file_io import PlainFPSource, PlainFPSink, CompressedFPSource, CompressedFPSink, pack_file_to_file, unpack_file_from_file, pack_bytes_to_file, unpack_bytes_from_file, pack_bytes_to_bytes, unpack_bytes_from_bytes, _read_bloscpack_header, _read_offsets, _read_beginning, _read_metadata, _write_metadata\n')] |
import numpy as np
from argparse import ArgumentParser
from scipy.misc import imsave
import os.path as osp
from utils import *
def _load(cuhk03_dir):
try:
from scipy.io import loadmat
matdata = loadmat(osp.join(cuhk03_dir, 'cuhk-03.mat'))
except:
from hdf5storage import loadmat
matdata = loadmat(osp.join(cuhk03_dir, 'cuhk-03.mat'))
return matdata
def main(args):
matdata = _load(args.cuhk03_dir)
output_dir = args.output_dir
# Although there are 5 pairs of camera views, we tile them up as one pair.
mkdir_if_missing(osp.join(args.output_dir, 'cam_0'))
mkdir_if_missing(osp.join(args.output_dir, 'cam_1'))
identities = []
for imgs_labeled, imgs_detected in zip(
matdata['labeled'].squeeze(), matdata['detected'].squeeze()):
# We merge the manually labeled and automatically detected images of
# the same view.
for i in xrange(imgs_labeled.shape[0]):
pid = len(identities)
p_images = []
# view-0
v_images = []
for j in xrange(5):
if imgs_labeled[i, j].size == 0:
break
file_name = 'cam_0/{:05d}_{:05d}.jpg'.format(pid, len(v_images))
imsave(osp.join(output_dir, file_name), imgs_labeled[i, j])
v_images.append(file_name)
for j in xrange(5):
if imgs_detected[i, j].size == 0:
break
file_name = 'cam_0/{:05d}_{:05d}.jpg'.format(pid, len(v_images))
imsave(osp.join(output_dir, file_name), imgs_detected[i, j])
v_images.append(file_name)
p_images.append(v_images)
# view-1
v_images = []
for j in xrange(5, 10):
if imgs_labeled[i, j].size == 0:
break
file_name = 'cam_1/{:05d}_{:05d}.jpg'.format(pid, len(v_images))
imsave(osp.join(output_dir, file_name), imgs_labeled[i, j])
v_images.append(file_name)
for j in xrange(5, 10):
if imgs_detected[i, j].size == 0:
break
file_name = 'cam_1/{:05d}_{:05d}.jpg'.format(pid, len(v_images))
imsave(osp.join(output_dir, file_name), imgs_detected[i, j])
v_images.append(file_name)
p_images.append(v_images)
identities.append(p_images)
# Save meta information into a json file
meta = {'name': 'cuhk03', 'shot': 'multiple', 'num_cameras': 2}
meta['identities'] = identities
write_json(meta, osp.join(output_dir, 'meta.json'))
# Save training and test splits into a json file
view_counts = [a.shape[0] for a in matdata['labeled'].squeeze()]
vid_offsets = np.r_[0, np.cumsum(view_counts)]
test_info = np.random.choice(matdata['testsets'].squeeze())
test_pids = []
for i, j in test_info:
pid = vid_offsets[i - 1] + j - 1
test_pids.append(pid)
test_pids.sort()
trainval_pids = list(set(xrange(vid_offsets[-1])) - set(test_pids))
split = {'trainval': trainval_pids,
'test_probe': test_pids,
'test_gallery': test_pids}
write_json(split, osp.join(output_dir, 'split.json'))
if __name__ == '__main__':
parser = ArgumentParser(
description="Convert the CUHK-03 dataset into the uniform format")
parser.add_argument(
'cuhk03_dir',
help="Root directory of the CUHK-03 dataset containing cuhk-03.mat")
parser.add_argument(
'output_dir',
help="Output directory for the formatted CUHK-03 dataset")
args = parser.parse_args()
main(args)
| [
"numpy.cumsum",
"os.path.join",
"argparse.ArgumentParser"
] | [((3345, 3431), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Convert the CUHK-03 dataset into the uniform format"""'}), "(description=\n 'Convert the CUHK-03 dataset into the uniform format')\n", (3359, 3431), False, 'from argparse import ArgumentParser\n'), ((584, 618), 'os.path.join', 'osp.join', (['args.output_dir', '"""cam_0"""'], {}), "(args.output_dir, 'cam_0')\n", (592, 618), True, 'import os.path as osp\n'), ((641, 675), 'os.path.join', 'osp.join', (['args.output_dir', '"""cam_1"""'], {}), "(args.output_dir, 'cam_1')\n", (649, 675), True, 'import os.path as osp\n'), ((2645, 2678), 'os.path.join', 'osp.join', (['output_dir', '"""meta.json"""'], {}), "(output_dir, 'meta.json')\n", (2653, 2678), True, 'import os.path as osp\n'), ((3267, 3301), 'os.path.join', 'osp.join', (['output_dir', '"""split.json"""'], {}), "(output_dir, 'split.json')\n", (3275, 3301), True, 'import os.path as osp\n'), ((225, 260), 'os.path.join', 'osp.join', (['cuhk03_dir', '"""cuhk-03.mat"""'], {}), "(cuhk03_dir, 'cuhk-03.mat')\n", (233, 260), True, 'import os.path as osp\n'), ((2829, 2851), 'numpy.cumsum', 'np.cumsum', (['view_counts'], {}), '(view_counts)\n', (2838, 2851), True, 'import numpy as np\n'), ((340, 375), 'os.path.join', 'osp.join', (['cuhk03_dir', '"""cuhk-03.mat"""'], {}), "(cuhk03_dir, 'cuhk-03.mat')\n", (348, 375), True, 'import os.path as osp\n'), ((1283, 1314), 'os.path.join', 'osp.join', (['output_dir', 'file_name'], {}), '(output_dir, file_name)\n', (1291, 1314), True, 'import os.path as osp\n'), ((1591, 1622), 'os.path.join', 'osp.join', (['output_dir', 'file_name'], {}), '(output_dir, file_name)\n', (1599, 1622), True, 'import os.path as osp\n'), ((1988, 2019), 'os.path.join', 'osp.join', (['output_dir', 'file_name'], {}), '(output_dir, file_name)\n', (1996, 2019), True, 'import os.path as osp\n'), ((2300, 2331), 'os.path.join', 'osp.join', (['output_dir', 'file_name'], {}), '(output_dir, file_name)\n', (2308, 2331), True, 'import os.path as osp\n')] |
import json
import argparse
import numpy as np
from terminaltables import AsciiTable
from core.config import config, update_config
def iou(pred, gt): # require pred and gt is numpy
assert isinstance(pred, list) and isinstance(gt,list)
pred_is_list = isinstance(pred[0],list)
gt_is_list = isinstance(gt[0],list)
if not pred_is_list: pred = [pred]
if not gt_is_list: gt = [gt]
pred, gt = np.array(pred), np.array(gt)
inter_left = np.maximum(pred[:,0,None], gt[None,:,0])
inter_right = np.minimum(pred[:,1,None], gt[None,:,1])
inter = np.maximum(0.0, inter_right - inter_left)
union_left = np.minimum(pred[:,0,None], gt[None,:,0])
union_right = np.maximum(pred[:,1,None], gt[None,:,1])
union = np.maximum(0.0, union_right - union_left)
overlap = 1.0 * inter / union
if not gt_is_list:
overlap = overlap[:,0]
if not pred_is_list:
overlap = overlap[0]
return overlap
def rank(pred, gt):
return pred.index(gt) + 1
def nms(dets, thresh=0.4, top_k=-1):
"""Pure Python NMS baseline."""
if len(dets) == 0: return []
order = np.arange(0,len(dets),1)
dets = np.array(dets)
x1 = dets[:, 0]
x2 = dets[:, 1]
lengths = x2 - x1
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
if len(keep) == top_k:
break
xx1 = np.maximum(x1[i], x1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
inter = np.maximum(0.0, xx2 - xx1)
ovr = inter / (lengths[i] + lengths[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return dets[keep]
def eval(segments, data):
tious = [float(i) for i in config.TEST.TIOU.split(',')] if isinstance(config.TEST.TIOU,str) else [config.TEST.TIOU]
recalls = [int(i) for i in config.TEST.RECALL.split(',')] if isinstance(config.TEST.RECALL,str) else [config.TEST.RECALL]
eval_result = [[[] for _ in recalls] for _ in tious]
max_recall = max(recalls)
average_iou = []
for seg, dat in zip(segments, data):
seg = nms(seg, thresh=config.TEST.NMS_THRESH, top_k=max_recall).tolist()
overlap = iou(seg, [dat['times']])
average_iou.append(np.mean(np.sort(overlap[0])[-3:]))
for i,t in enumerate(tious):
for j,r in enumerate(recalls):
eval_result[i][j].append((overlap > t)[:r].any())
eval_result = np.array(eval_result).mean(axis=-1)
miou = np.mean(average_iou)
return eval_result, miou
def eval_predictions(segments, data, verbose=True):
eval_result, miou = eval(segments, data)
if verbose:
print(display_results(eval_result, miou, ''))
return eval_result, miou
def display_results(eval_result, miou, title=None):
tious = [float(i) for i in config.TEST.TIOU.split(',')] if isinstance(config.TEST.TIOU,str) else [config.TEST.TIOU]
recalls = [int(i) for i in config.TEST.RECALL.split(',')] if isinstance(config.TEST.RECALL,str) else [config.TEST.RECALL]
display_data = [['Rank@{},mIoU@{}'.format(i,j) for i in recalls for j in tious]+['mIoU']]
eval_result = eval_result*100
miou = miou*100
display_data.append(['{:.02f}'.format(eval_result[j][i]) for i in range(len(recalls)) for j in range(len(tious))]
+['{:.02f}'.format(miou)])
table = AsciiTable(display_data, title)
for i in range(len(tious)*len(recalls)):
table.justify_columns[i] = 'center'
return table.table
def parse_args():
parser = argparse.ArgumentParser(description='Train localization network')
# general
parser.add_argument('--cfg', help='experiment configure file name', required=True, type=str)
args, rest = parser.parse_known_args()
# update config
update_config(args.cfg)
parser.add_argument('--verbose', default=False, action="store_true", help='print progress bar')
args = parser.parse_args()
return args
def reset_config(config, args):
if args.verbose:
config.VERBOSE = args.verbose
if __name__ == '__main__':
args = parse_args()
reset_config(config, args)
train_data = json.load(open('/data/home2/hacker01/Data/DiDeMo/train_data.json', 'r'))
val_data = json.load(open('/data/home2/hacker01/Data/DiDeMo/val_data.json', 'r'))
moment_frequency_dict = {}
for d in train_data:
times = [t for t in d['times']]
for time in times:
time = tuple(time)
if time not in moment_frequency_dict.keys():
moment_frequency_dict[time] = 0
moment_frequency_dict[time] += 1
prior = sorted(moment_frequency_dict, key=moment_frequency_dict.get, reverse=True)
prior = [list(item) for item in prior]
prediction = [prior for d in val_data]
eval_predictions(prediction, val_data) | [
"numpy.minimum",
"numpy.maximum",
"argparse.ArgumentParser",
"core.config.update_config",
"terminaltables.AsciiTable",
"core.config.config.TEST.TIOU.split",
"core.config.config.TEST.RECALL.split",
"numpy.sort",
"numpy.mean",
"numpy.array",
"numpy.where"
] | [((458, 502), 'numpy.maximum', 'np.maximum', (['pred[:, 0, None]', 'gt[None, :, 0]'], {}), '(pred[:, 0, None], gt[None, :, 0])\n', (468, 502), True, 'import numpy as np\n'), ((517, 561), 'numpy.minimum', 'np.minimum', (['pred[:, 1, None]', 'gt[None, :, 1]'], {}), '(pred[:, 1, None], gt[None, :, 1])\n', (527, 561), True, 'import numpy as np\n'), ((570, 611), 'numpy.maximum', 'np.maximum', (['(0.0)', '(inter_right - inter_left)'], {}), '(0.0, inter_right - inter_left)\n', (580, 611), True, 'import numpy as np\n'), ((629, 673), 'numpy.minimum', 'np.minimum', (['pred[:, 0, None]', 'gt[None, :, 0]'], {}), '(pred[:, 0, None], gt[None, :, 0])\n', (639, 673), True, 'import numpy as np\n'), ((688, 732), 'numpy.maximum', 'np.maximum', (['pred[:, 1, None]', 'gt[None, :, 1]'], {}), '(pred[:, 1, None], gt[None, :, 1])\n', (698, 732), True, 'import numpy as np\n'), ((741, 782), 'numpy.maximum', 'np.maximum', (['(0.0)', '(union_right - union_left)'], {}), '(0.0, union_right - union_left)\n', (751, 782), True, 'import numpy as np\n'), ((1150, 1164), 'numpy.array', 'np.array', (['dets'], {}), '(dets)\n', (1158, 1164), True, 'import numpy as np\n'), ((2479, 2499), 'numpy.mean', 'np.mean', (['average_iou'], {}), '(average_iou)\n', (2486, 2499), True, 'import numpy as np\n'), ((3358, 3389), 'terminaltables.AsciiTable', 'AsciiTable', (['display_data', 'title'], {}), '(display_data, title)\n', (3368, 3389), False, 'from terminaltables import AsciiTable\n'), ((3535, 3600), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train localization network"""'}), "(description='Train localization network')\n", (3558, 3600), False, 'import argparse\n'), ((3781, 3804), 'core.config.update_config', 'update_config', (['args.cfg'], {}), '(args.cfg)\n', (3794, 3804), False, 'from core.config import config, update_config\n'), ((412, 426), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (420, 426), True, 'import numpy as np\n'), ((428, 440), 'numpy.array', 'np.array', (['gt'], {}), '(gt)\n', (436, 440), True, 'import numpy as np\n'), ((1374, 1406), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order[1:]]'], {}), '(x1[i], x1[order[1:]])\n', (1384, 1406), True, 'import numpy as np\n'), ((1421, 1453), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order[1:]]'], {}), '(x2[i], x2[order[1:]])\n', (1431, 1453), True, 'import numpy as np\n'), ((1470, 1496), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1)'], {}), '(0.0, xx2 - xx1)\n', (1480, 1496), True, 'import numpy as np\n'), ((1576, 1599), 'numpy.where', 'np.where', (['(ovr <= thresh)'], {}), '(ovr <= thresh)\n', (1584, 1599), True, 'import numpy as np\n'), ((2432, 2453), 'numpy.array', 'np.array', (['eval_result'], {}), '(eval_result)\n', (2440, 2453), True, 'import numpy as np\n'), ((1716, 1743), 'core.config.config.TEST.TIOU.split', 'config.TEST.TIOU.split', (['""","""'], {}), "(',')\n", (1738, 1743), False, 'from core.config import config, update_config\n'), ((1836, 1865), 'core.config.config.TEST.RECALL.split', 'config.TEST.RECALL.split', (['""","""'], {}), "(',')\n", (1860, 1865), False, 'from core.config import config, update_config\n'), ((2813, 2840), 'core.config.config.TEST.TIOU.split', 'config.TEST.TIOU.split', (['""","""'], {}), "(',')\n", (2835, 2840), False, 'from core.config import config, update_config\n'), ((2933, 2962), 'core.config.config.TEST.RECALL.split', 'config.TEST.RECALL.split', (['""","""'], {}), "(',')\n", (2957, 2962), False, 'from core.config import config, update_config\n'), ((2240, 2259), 'numpy.sort', 'np.sort', (['overlap[0]'], {}), '(overlap[0])\n', (2247, 2259), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
@description: Workflow for the semantic segmentation example
@author: <NAME>
@contact: <EMAIL>
@version: 2022.03.15
'''
#%% HEADER
# Modules
import itertools
import numpy as np
import tensorflow
from arthisto1960_utilities import *
from numpy import random
from os import path
from tensorflow.keras import callbacks, layers, models, preprocessing
# TensorFlow
print('TensorFlow version:', tensorflow.__version__)
print('GPU Available:', len(tensorflow.config.experimental.list_physical_devices('GPU')))
# Paths
paths = dict(
images='../data_1960/images',
labels='../data_1960/labels',
predictions='../data_1960/predictions',
models='../data_1960/models'
)
#%% FUNCTIONS
# Converts images to blocks of a given size
def images_to_blocks(images:np.ndarray, imagesize:tuple, blocksize:tuple=(512, 512), shift:bool=False, mode:str='symmetric') -> np.ndarray:
# Defines quantities
nimages, imagewidth, imageheight, nbands = imagesize
blockwidth, blockheight = blocksize
nblockswidth = (imagewidth // blockwidth + 1 + shift)
nblocksheight = (imageheight // blockheight + 1 + shift)
# Defines padding
padwidth = int(((nblockswidth) * blockwidth - imagewidth) / 2)
padheight = int(((nblocksheight) * blockheight - imageheight) / 2)
# Maps images to blocks
images = np.pad(images, ((0, 0), (padwidth, padwidth), (padheight, padheight), (0, 0)), mode=mode)
blocks = images.reshape(nimages, nblockswidth, blockwidth, nblocksheight, blockheight, nbands, ).swapaxes(2, 3)
blocks = blocks.reshape(-1, blockwidth, blockheight, nbands)
return blocks
# Converts blocks to images of a given size
def blocks_to_images(blocks:np.ndarray, imagesize:tuple, blocksize:tuple=(512, 512), shift:bool=False) -> np.ndarray:
# Defines quantities
nimages, imagewidth, imageheight, nbands = imagesize
blockwidth, blockheight = blocksize
nblockswidth = (imagewidth // blockwidth + 1 + shift)
nblocksheight = (imageheight // blockheight + 1 + shift)
# Defines padding
padwidth = int(((nblockswidth) * blockwidth - imagewidth) / 2)
padheight = int(((nblocksheight) * blockheight - imageheight) / 2)
# Maps blocks to images
images = blocks.reshape(-1, nblockswidth, nblocksheight, blockwidth, blockheight, nbands).swapaxes(2, 3)
images = images.reshape(-1, (imagewidth + (2 * padwidth)), (imageheight + (2 * padheight)), nbands)
images = images[:, padwidth:imagewidth + padwidth, padheight:imageheight + padheight, :]
return images
# Splits the data multiple samples
def sample_split(images:np.ndarray, sizes:dict, seed:int=1) -> list:
random.seed(seed)
samples = list(sizes.keys())
indexes = random.choice(samples, images.shape[0], p=list(sizes.values()))
samples = [images[indexes == sample, ...] for sample in samples]
return samples
# Displays prediction statistics
def display_statistics(image_test:np.ndarray, label_test:np.ndarray, proba_predict:np.ndarray, label_predict:np.ndarray) -> None:
# Format
image_test = (image_test * 255).astype(int)
label_test = label_test.astype(bool)
label_predict = label_predict.astype(bool)
# Statistics
mask_tp = np.logical_and(label_test, label_predict)
mask_tn = np.logical_and(np.invert(label_test), np.invert(label_predict))
mask_fp = np.logical_and(np.invert(label_test), label_predict)
mask_fn = np.logical_and(label_test, np.invert(label_predict))
# Augmented images
colour = (255, 255, 0)
images = [np.where(np.tile(mask, (1, 1, 3)), colour, image_test) for mask in [mask_tp, mask_tn, mask_fp, mask_fn]]
# Figure
images = [image_test, label_test, proba_predict, label_predict] + images
titles = ['Test image', 'Test label', 'Predicted probability', 'Predicted label', 'True positive', 'True negative', 'False positive', 'False negative']
fig, axs = pyplot.subplots(2, 4, figsize=(20, 10))
for image, title, ax in zip(images, titles, axs.ravel()):
ax.imshow(image)
ax.set_title(title, fontsize=20)
ax.axis('off')
pyplot.tight_layout(pad=2.0)
pyplot.show()
#%% PREPARES DATA
# Training tiles
labels = search_files(paths['labels'], 'tif$')
images = [path.join(paths['images'], path.basename(label).replace('label', 'image')) for label in labels]
# Loads images as blocks (including shifted)
images = np.array([read_raster(file) for file in images])
images = np.concatenate((
images_to_blocks(images=images, imagesize=images.shape, blocksize=(256, 256), shift=True),
images_to_blocks(images=images, imagesize=images.shape, blocksize=(256, 256), shift=False)
))
# Loads labels as blocks (including shifted)
labels = np.array([read_raster(file) for file in labels])
labels = np.concatenate((
images_to_blocks(images=labels, imagesize=labels.shape, blocksize=(256, 256), shift=True),
images_to_blocks(images=labels, imagesize=labels.shape, blocksize=(256, 256), shift=False)
))
# Drops empty blocks
def is_empty(image:np.ndarray, value:int=255) -> bool:
test = np.equal(image, np.full(image.shape, value)).all()
return test
keep = np.invert([is_empty(image) for image in list(images)])
images = images[keep]
labels = labels[keep]
del is_empty, keep
# Checks data
# for i in random.choice(range(len(images)), 3):
# compare(images=[images[i], labels[i]], titles=['Image', 'Label'])
#%% COMPUTES SAMPLES
samples_size = dict(train=0.8, valid=0.1, test=0.1)
images_train, images_valid, images_test = sample_split(array=images, sizes=samples_size, seed=1)
labels_train, labels_valid, labels_test = sample_split(array=labels, sizes=samples_size, seed=1)
samples_size = dict(train=len(images_train), valid=len(images_valid), test=len(images_test))
del images, labels
#%% MODEL
def conv_block(tensor, nfilters, size=3, padding='same', initializer="he_normal"):
x = layers.Conv2D(filters=nfilters, kernel_size=(size, size), padding=padding, kernel_initializer=initializer)(tensor)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.Conv2D(filters=nfilters, kernel_size=(size, size), padding=padding, kernel_initializer=initializer)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
return x
def deconv_block(tensor, residual, nfilters, size=3, padding='same', strides=(2, 2)):
y = layers.Conv2DTranspose(nfilters, kernel_size=(size, size), strides=strides, padding=padding)(tensor)
y = layers.concatenate([y, residual], axis=3)
y = conv_block(y, nfilters)
return y
def init_unet(n_classes:int, input_size:tuple, nfilters:int):
# Input
inputs = layers.Input(shape=input_size, name='image_input')
# Contraction
conv1 = conv_block(inputs, nfilters=nfilters)
pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = conv_block(pool1, nfilters=nfilters*2)
pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = conv_block(pool2, nfilters=nfilters*4)
pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = conv_block(pool3, nfilters=nfilters*8)
pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)
pool4 = layers.Dropout(0.5)(pool4)
conv5 = conv_block(pool4, nfilters=nfilters*16)
conv5 = layers.Dropout(0.5)(conv5)
# Extension
deconv6 = deconv_block(conv5, residual=conv4, nfilters=nfilters*8)
deconv6 = layers.Dropout(0.5)(deconv6)
deconv7 = deconv_block(deconv6, residual=conv3, nfilters=nfilters*4)
deconv7 = layers.Dropout(0.5)(deconv7)
deconv8 = deconv_block(deconv7, residual=conv2, nfilters=nfilters*2)
deconv9 = deconv_block(deconv8, residual=conv1, nfilters=nfilters)
# Output
outputs = layers.Conv2D(n_classes, kernel_size=(1, 1), activation='sigmoid')(deconv9)
# Model
model = models.Model(inputs=inputs, outputs=outputs, name='Unet')
return model
unet = init_unet(n_classes=1, input_size=(256, 256, 3), nfilters=16)
unet.summary()
unet.compile(optimizer='adam', loss='binary_focal_crossentropy', metrics=['accuracy', 'Recall', 'Precision'])
del conv_block, deconv_block
#%% PRE-PROCESSING
'''
Notes:
- The data generator must be fit to the training data to estimate the featurewise_center and featurewise_std_normalization
- The estimated parameters can be extracted using ImageDataGenerator.mean and ImageDataGenerator.std
- The batch size is only defined in the generator, the steps_per_epoch must be defined during training
- Fit a validation data generator only with the standardisation parameters
'''
# Build image generator
def generator(images:np.ndarray, labels:np.ndarray, args:dict, batch_size:int=32, shuffle:bool=True, seed:int=1) -> zip:
# Images generator
images_datagen = preprocessing.image.ImageDataGenerator(**args)
images_generator = images_datagen.flow(images, batch_size=batch_size, shuffle=shuffle, seed=seed)
# Labels generator
labels_datagen = preprocessing.image.ImageDataGenerator(**args)
labels_generator = labels_datagen.flow(labels, batch_size=batch_size, shuffle=shuffle, seed=seed)
# Combines generators
generator = zip(images_generator, labels_generator)
return generator
# Standardisation parameters
standardisation = dict(
rescale=1./255
)
# Augmentation parameters
augmentation = dict(
horizontal_flip=True,
vertical_flip=True,
rotation_range=45,
width_shift_range=0.25,
height_shift_range=0.25,
brightness_range=[0.75,1.25],
zoom_range=[0.75, 1.25],
fill_mode='reflect'
)
train_generator = generator(images_train, labels_train, dict(**standardisation, **augmentation))
valid_generator = generator(images_valid, labels_valid, standardisation)
test_generator = generator(images_test, labels_test, standardisation, shuffle=False)
del generator, standardisation, augmentation, images_train, labels_train, images_valid, labels_valid, images_test, labels_test
# Check
# images, labels = next(train_generator)
# for i in random.choice(range(len(images)), 5):
# compare(images=[images[i], labels[i]], titles=['Image', 'Label'])
# del(images, labels)
#%% ESTIMATES PARAMETERS
'''
Notes:
- Fitting the generator requires steps_per_epoch for the training samples and validation_steps for the validation sample
'''
# Callbacks
train_callbacks = [
callbacks.EarlyStopping(monitor='val_accuracy', patience=3, restore_best_weights=True),
callbacks.ModelCheckpoint(filepath='model/unet_{epoch:02d}_{val_accuracy:.4f}.h5', monitor='val_accuracy', save_best_only=True),
callbacks.BackupAndRestore(backup_dir='model')
]
training = unet.fit(
train_generator,
steps_per_epoch=samples_size['train'] // 32,
#validation_data=valid_generator,
#validation_steps=samples_size['valid'] // 32,
epochs=2,
verbose=1
# callbacks=train_callbacks
)
# Saves model
models.save_model(unet, '../data_1960/models/unet_baseline.h5')
#%% EVALUATES MODEL
# Compute statistics
performance = unet.evaluate(test_generator, steps=samples_size['test'] // 32) # 81%
print('Test loss: {:.4f}\nTest accuracy: {:.4f}'.format(*performance))
# Displays statistics
images_test, labels_test = next(test_generator)
probas_predict = unet.predict(images_test, verbose=1)
labels_predict = probas_predict >= 0.5
for i in random.choice(range(len(images)), 5):
display_statistics(image_test=images_test[i], label_test=labels_test[i], proba_predict=probas_predict[i], label_predict=labels_predict[i])
del images_test, labels_test, probas_predict, labels_predict
#%% PREDICTS NEW TILES
# Loads model
unet = models.load_model('../data_1960/models/unet_baseline.h5')
batch_size = 3
files_pred = search_files(paths['images_pred'], pattern='tif$')[:20]
batches = [files_pred[i:i + batch_size] for i in range(0, len(files_pred), batch_size)]
for batch in batches:
images = np.array([read_raster(file) for file in batch])
images = images_to_blocks(images=images, imagesize=images.shape) / 255
probas = unet.predict(images)
labels = probas >= 0.5
labels = blocks_to_images(labels, imagesize=(len(files_pred), 5000, 5000, 1))
output = [path.join(paths['labels_pred'], path.basename(file).replace('sc50', 'label')) for file in batch]
list(map(write_raster, labels_predict, batch, output))
files = search_files(paths['labels_pred'], pattern='tif$')
for file in files:
command = 'gdal_polygonize.py {} {}'.format(file, file.replace('tif', 'gpkg'))
os.system(command)
for file in files_pred:
os.system('open {}'.format(file)) | [
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"numpy.random.seed",
"tensorflow.keras.layers.MaxPooling2D",
"numpy.invert",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.layers.concatenate",
"numpy.tile",
"tensorflow.keras.callbacks.EarlyStopping",
"numpy.pad",
"numpy... | [((10916, 10979), 'tensorflow.keras.models.save_model', 'models.save_model', (['unet', '"""../data_1960/models/unet_baseline.h5"""'], {}), "(unet, '../data_1960/models/unet_baseline.h5')\n", (10933, 10979), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((11639, 11696), 'tensorflow.keras.models.load_model', 'models.load_model', (['"""../data_1960/models/unet_baseline.h5"""'], {}), "('../data_1960/models/unet_baseline.h5')\n", (11656, 11696), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((1377, 1471), 'numpy.pad', 'np.pad', (['images', '((0, 0), (padwidth, padwidth), (padheight, padheight), (0, 0))'], {'mode': 'mode'}), '(images, ((0, 0), (padwidth, padwidth), (padheight, padheight), (0, 0\n )), mode=mode)\n', (1383, 1471), True, 'import numpy as np\n'), ((2699, 2716), 'numpy.random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2710, 2716), False, 'from numpy import random\n'), ((3266, 3307), 'numpy.logical_and', 'np.logical_and', (['label_test', 'label_predict'], {}), '(label_test, label_predict)\n', (3280, 3307), True, 'import numpy as np\n'), ((6536, 6577), 'tensorflow.keras.layers.concatenate', 'layers.concatenate', (['[y, residual]'], {'axis': '(3)'}), '([y, residual], axis=3)\n', (6554, 6577), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((6712, 6762), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': 'input_size', 'name': '"""image_input"""'}), "(shape=input_size, name='image_input')\n", (6724, 6762), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((7883, 7940), 'tensorflow.keras.models.Model', 'models.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': '"""Unet"""'}), "(inputs=inputs, outputs=outputs, name='Unet')\n", (7895, 7940), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((8812, 8858), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'preprocessing.image.ImageDataGenerator', ([], {}), '(**args)\n', (8850, 8858), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((9007, 9053), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'preprocessing.image.ImageDataGenerator', ([], {}), '(**args)\n', (9045, 9053), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((10384, 10474), 'tensorflow.keras.callbacks.EarlyStopping', 'callbacks.EarlyStopping', ([], {'monitor': '"""val_accuracy"""', 'patience': '(3)', 'restore_best_weights': '(True)'}), "(monitor='val_accuracy', patience=3,\n restore_best_weights=True)\n", (10407, 10474), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((10476, 10612), 'tensorflow.keras.callbacks.ModelCheckpoint', 'callbacks.ModelCheckpoint', ([], {'filepath': '"""model/unet_{epoch:02d}_{val_accuracy:.4f}.h5"""', 'monitor': '"""val_accuracy"""', 'save_best_only': '(True)'}), "(filepath=\n 'model/unet_{epoch:02d}_{val_accuracy:.4f}.h5', monitor='val_accuracy',\n save_best_only=True)\n", (10501, 10612), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((10609, 10655), 'tensorflow.keras.callbacks.BackupAndRestore', 'callbacks.BackupAndRestore', ([], {'backup_dir': '"""model"""'}), "(backup_dir='model')\n", (10635, 10655), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((496, 555), 'tensorflow.config.experimental.list_physical_devices', 'tensorflow.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (548, 555), False, 'import tensorflow\n'), ((3337, 3358), 'numpy.invert', 'np.invert', (['label_test'], {}), '(label_test)\n', (3346, 3358), True, 'import numpy as np\n'), ((3360, 3384), 'numpy.invert', 'np.invert', (['label_predict'], {}), '(label_predict)\n', (3369, 3384), True, 'import numpy as np\n'), ((3415, 3436), 'numpy.invert', 'np.invert', (['label_test'], {}), '(label_test)\n', (3424, 3436), True, 'import numpy as np\n'), ((3494, 3518), 'numpy.invert', 'np.invert', (['label_predict'], {}), '(label_predict)\n', (3503, 3518), True, 'import numpy as np\n'), ((5934, 6044), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': 'nfilters', 'kernel_size': '(size, size)', 'padding': 'padding', 'kernel_initializer': 'initializer'}), '(filters=nfilters, kernel_size=(size, size), padding=padding,\n kernel_initializer=initializer)\n', (5947, 6044), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((6057, 6084), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (6082, 6084), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((6096, 6121), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (6113, 6121), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((6133, 6243), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': 'nfilters', 'kernel_size': '(size, size)', 'padding': 'padding', 'kernel_initializer': 'initializer'}), '(filters=nfilters, kernel_size=(size, size), padding=padding,\n kernel_initializer=initializer)\n', (6146, 6243), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((6251, 6278), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (6276, 6278), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((6290, 6315), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (6307, 6315), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((6427, 6523), 'tensorflow.keras.layers.Conv2DTranspose', 'layers.Conv2DTranspose', (['nfilters'], {'kernel_size': '(size, size)', 'strides': 'strides', 'padding': 'padding'}), '(nfilters, kernel_size=(size, size), strides=strides,\n padding=padding)\n', (6449, 6523), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((6847, 6884), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (6866, 6884), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((6959, 6996), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (6978, 6996), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((7071, 7108), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (7090, 7108), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((7183, 7220), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (7202, 7220), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((7242, 7261), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (7256, 7261), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((7337, 7356), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (7351, 7356), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((7465, 7484), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (7479, 7484), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((7581, 7600), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (7595, 7600), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((7781, 7847), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['n_classes'], {'kernel_size': '(1, 1)', 'activation': '"""sigmoid"""'}), "(n_classes, kernel_size=(1, 1), activation='sigmoid')\n", (7794, 7847), False, 'from tensorflow.keras import callbacks, layers, models, preprocessing\n'), ((3595, 3619), 'numpy.tile', 'np.tile', (['mask', '(1, 1, 3)'], {}), '(mask, (1, 1, 3))\n', (3602, 3619), True, 'import numpy as np\n'), ((4315, 4335), 'os.path.basename', 'path.basename', (['label'], {}), '(label)\n', (4328, 4335), False, 'from os import path\n'), ((5134, 5161), 'numpy.full', 'np.full', (['image.shape', 'value'], {}), '(image.shape, value)\n', (5141, 5161), True, 'import numpy as np\n'), ((12221, 12240), 'os.path.basename', 'path.basename', (['file'], {}), '(file)\n', (12234, 12240), False, 'from os import path\n')] |
import numpy as np
import pytest
from .coordinates_for_tests import COORDINATES
from podpac.datalib.terraintiles import TerrainTiles, get_tile_urls
from podpac import Coordinates, clinspace
@pytest.mark.integration
class TestTerrainTiles(object):
def test_common_coordinates(self):
node = TerrainTiles()
for ck, c in COORDINATES.items():
print("Evaluating: ", ck)
o = node.eval(c)
assert np.any(np.isfinite(o.data))
def test_terrain_tiles(self):
c = Coordinates([clinspace(40, 43, 1000), clinspace(-76, -72, 1000)], dims=["lat", "lon"])
c2 = Coordinates(
[clinspace(40, 43, 1000), clinspace(-76, -72, 1000), ["2018-01-01", "2018-01-02"]],
dims=["lat", "lon", "time"],
)
node = TerrainTiles(tile_format="geotiff", zoom=8)
output = node.eval(c)
assert np.any(np.isfinite(output))
output = node.eval(c2)
assert np.any(np.isfinite(output))
node = TerrainTiles(tile_format="geotiff", zoom=8, cache_ctrl=["ram", "disk"])
output = node.eval(c)
assert np.any(np.isfinite(output))
# tile urls
print(np.array(get_tile_urls("geotiff", 1)))
print(np.array(get_tile_urls("geotiff", 9, coordinates=c)))
| [
"podpac.datalib.terraintiles.TerrainTiles",
"podpac.datalib.terraintiles.get_tile_urls",
"numpy.isfinite",
"podpac.clinspace"
] | [((304, 318), 'podpac.datalib.terraintiles.TerrainTiles', 'TerrainTiles', ([], {}), '()\n', (316, 318), False, 'from podpac.datalib.terraintiles import TerrainTiles, get_tile_urls\n'), ((798, 841), 'podpac.datalib.terraintiles.TerrainTiles', 'TerrainTiles', ([], {'tile_format': '"""geotiff"""', 'zoom': '(8)'}), "(tile_format='geotiff', zoom=8)\n", (810, 841), False, 'from podpac.datalib.terraintiles import TerrainTiles, get_tile_urls\n'), ((1006, 1077), 'podpac.datalib.terraintiles.TerrainTiles', 'TerrainTiles', ([], {'tile_format': '"""geotiff"""', 'zoom': '(8)', 'cache_ctrl': "['ram', 'disk']"}), "(tile_format='geotiff', zoom=8, cache_ctrl=['ram', 'disk'])\n", (1018, 1077), False, 'from podpac.datalib.terraintiles import TerrainTiles, get_tile_urls\n'), ((894, 913), 'numpy.isfinite', 'np.isfinite', (['output'], {}), '(output)\n', (905, 913), True, 'import numpy as np\n'), ((969, 988), 'numpy.isfinite', 'np.isfinite', (['output'], {}), '(output)\n', (980, 988), True, 'import numpy as np\n'), ((1130, 1149), 'numpy.isfinite', 'np.isfinite', (['output'], {}), '(output)\n', (1141, 1149), True, 'import numpy as np\n'), ((454, 473), 'numpy.isfinite', 'np.isfinite', (['o.data'], {}), '(o.data)\n', (465, 473), True, 'import numpy as np\n'), ((535, 558), 'podpac.clinspace', 'clinspace', (['(40)', '(43)', '(1000)'], {}), '(40, 43, 1000)\n', (544, 558), False, 'from podpac import Coordinates, clinspace\n'), ((560, 585), 'podpac.clinspace', 'clinspace', (['(-76)', '(-72)', '(1000)'], {}), '(-76, -72, 1000)\n', (569, 585), False, 'from podpac import Coordinates, clinspace\n'), ((648, 671), 'podpac.clinspace', 'clinspace', (['(40)', '(43)', '(1000)'], {}), '(40, 43, 1000)\n', (657, 671), False, 'from podpac import Coordinates, clinspace\n'), ((673, 698), 'podpac.clinspace', 'clinspace', (['(-76)', '(-72)', '(1000)'], {}), '(-76, -72, 1000)\n', (682, 698), False, 'from podpac import Coordinates, clinspace\n'), ((1195, 1222), 'podpac.datalib.terraintiles.get_tile_urls', 'get_tile_urls', (['"""geotiff"""', '(1)'], {}), "('geotiff', 1)\n", (1208, 1222), False, 'from podpac.datalib.terraintiles import TerrainTiles, get_tile_urls\n'), ((1248, 1290), 'podpac.datalib.terraintiles.get_tile_urls', 'get_tile_urls', (['"""geotiff"""', '(9)'], {'coordinates': 'c'}), "('geotiff', 9, coordinates=c)\n", (1261, 1290), False, 'from podpac.datalib.terraintiles import TerrainTiles, get_tile_urls\n')] |
import itertools
import numpy as np
import pytest
from dnnv.nn.converters.tensorflow import *
from dnnv.nn.operations import *
def test_Transpose():
shape = (2, 3, 4)
data = np.random.random_sample(shape).astype(np.float32)
z = np.transpose(data)
op = Transpose(data)
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, z)
op = Transpose(
Input(shape, np.dtype(np.float32)),
)
tf_op = TensorflowConverter().visit(op)
result = tf_op(data).numpy()
assert np.allclose(result, z)
def test_Transpose_all_permutations():
shape = (2, 3, 4)
data = np.random.random_sample(shape).astype(np.float32)
permutations = list(itertools.permutations(np.arange(len(shape))))
for permutation in permutations:
z = np.transpose(data, permutation)
op = Transpose(data, permutation=np.asarray(permutation))
tf_op = TensorflowConverter().visit(op)
result = tf_op().numpy()
assert np.allclose(result, z)
| [
"numpy.random.random_sample",
"numpy.asarray",
"numpy.allclose",
"numpy.dtype",
"numpy.transpose"
] | [((243, 261), 'numpy.transpose', 'np.transpose', (['data'], {}), '(data)\n', (255, 261), True, 'import numpy as np\n'), ((372, 394), 'numpy.allclose', 'np.allclose', (['result', 'z'], {}), '(result, z)\n', (383, 394), True, 'import numpy as np\n'), ((554, 576), 'numpy.allclose', 'np.allclose', (['result', 'z'], {}), '(result, z)\n', (565, 576), True, 'import numpy as np\n'), ((822, 853), 'numpy.transpose', 'np.transpose', (['data', 'permutation'], {}), '(data, permutation)\n', (834, 853), True, 'import numpy as np\n'), ((1016, 1038), 'numpy.allclose', 'np.allclose', (['result', 'z'], {}), '(result, z)\n', (1027, 1038), True, 'import numpy as np\n'), ((185, 215), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (208, 215), True, 'import numpy as np\n'), ((437, 457), 'numpy.dtype', 'np.dtype', (['np.float32'], {}), '(np.float32)\n', (445, 457), True, 'import numpy as np\n'), ((651, 681), 'numpy.random.random_sample', 'np.random.random_sample', (['shape'], {}), '(shape)\n', (674, 681), True, 'import numpy as np\n'), ((895, 918), 'numpy.asarray', 'np.asarray', (['permutation'], {}), '(permutation)\n', (905, 918), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, print_function
import bcolz
from bcolz import carray, ctable
import numpy as np
from pandas import DataFrame
from collections import Iterator
from toolz import partition_all, keyfilter
import os
from datashape import to_numpy_dtype
from toolz import keyfilter
from toolz.curried import pipe, partial, map, concat
from .resource import resource
from .dispatch import dispatch
from .compute.bcolz import *
from .utils import keywords
__all__ = ['into', 'bcolz', 'chunks']
@dispatch(type, (ctable, carray))
def into(a, b, **kwargs):
f = into.dispatch(a, type(b))
return f(a, b, **kwargs)
@dispatch((tuple, set, list), (ctable, carray))
def into(o, b, **kwargs):
return into(o, into(np.ndarray(0), b))
@dispatch(Iterator, (ctable, carray))
def into(_, b, **kwargs):
return pipe(b, chunks, map(partial(into, np.ndarray(0))),
map(partial(into, list)),
concat)
@dispatch(np.ndarray, (ctable, carray))
def into(a, b, **kwargs):
return b[:]
@dispatch(ctable, np.ndarray)
def into(a, b, **kwargs):
return ctable(b, **kwargs)
@dispatch(carray, np.ndarray)
def into(a, b, **kwargs):
kwargs = keyfilter(keywords(ctable).__contains__, kwargs)
return carray(b, **kwargs)
@dispatch(carray, (tuple, list))
def into(a, b, dtype=None, **kwargs):
x = into(np.ndarray(0), b, dtype=dtype)
kwargs = keyfilter(keywords(ctable).__contains__, kwargs)
return into(a, x, **kwargs)
@dispatch(carray, carray)
def into(a, b, **kwargs):
if isinstance(a, type):
return b
else:
a.append(iter(b))
return a
@dispatch(ctable, (tuple, list))
def into(a, b, names=None, types=None, **kwargs):
if isinstance(b[0], (tuple, list)):
if not types:
types=[None] * len(b[0])
return ctable([into(np.ndarray(0), c2, dtype=dt)
for (c2, dt) in zip(zip(*b), types)], names,
**kwargs)
else:
if not names:
names =[None] * len(b)
arr = into(np.ndarray(0), b, dtype=np.dtype(list(zip(names, types))))
return ctable(arr, names, **kwargs)
@dispatch((carray, ctable), Iterator)
def into(a, b, **kwargs):
kwargs = keyfilter(keywords(ctable).__contains__, kwargs)
chunks = partition_all(1024, b)
chunk = next(chunks)
a = into(a, chunk, **kwargs)
for chunk in chunks:
a.append(list(zip(*chunk)))
a.flush()
return a
@dispatch(DataFrame, ctable)
def into(a, b, columns=None, schema=None, **kwargs):
if not columns and schema:
columns = dshape(schema)[0].names
return DataFrame.from_items(((column, b[column][:]) for column in
sorted(b.names)),
orient='columns',
columns=columns)
from .compute.chunks import ChunkIterator, chunks
@dispatch((carray, ctable), ChunkIterator)
def into(a, b, **kwargs):
b = iter(b)
a = into(a, next(b), **kwargs)
for chunk in b:
a.append(into(np.ndarray(0), chunk))
a.flush()
return a
from blaze.data.core import DataDescriptor
@dispatch(DataDescriptor, (ctable, carray))
def into(a, b, **kwargs):
a.extend_chunks(chunks(b))
return a
@resource.register('.+\.bcolz/?')
def resource_bcolz(rootdir, **kwargs):
if os.path.exists(rootdir):
kwargs = keyfilter(keywords(ctable).__contains__, kwargs)
return ctable(rootdir=rootdir, **kwargs)
else:
if 'dshape' in kwargs:
dtype = to_numpy_dtype(kwargs['dshape'])
kwargs = keyfilter(keywords(ctable).__contains__, kwargs)
return ctable(np.empty(0, dtype), rootdir=rootdir, **kwargs)
else:
raise ValueError("File does not exist and no `dshape=` given")
| [
"bcolz.ctable",
"bcolz.carray",
"datashape.to_numpy_dtype",
"numpy.empty",
"os.path.exists",
"toolz.curried.partial",
"toolz.partition_all",
"numpy.ndarray"
] | [((1137, 1156), 'bcolz.ctable', 'ctable', (['b'], {}), '(b, **kwargs)\n', (1143, 1156), False, 'from bcolz import carray, ctable\n'), ((1288, 1307), 'bcolz.carray', 'carray', (['b'], {}), '(b, **kwargs)\n', (1294, 1307), False, 'from bcolz import carray, ctable\n'), ((2343, 2365), 'toolz.partition_all', 'partition_all', (['(1024)', 'b'], {}), '(1024, b)\n', (2356, 2365), False, 'from toolz import partition_all, keyfilter\n'), ((3398, 3421), 'os.path.exists', 'os.path.exists', (['rootdir'], {}), '(rootdir)\n', (3412, 3421), False, 'import os\n'), ((1394, 1407), 'numpy.ndarray', 'np.ndarray', (['(0)'], {}), '(0)\n', (1404, 1407), True, 'import numpy as np\n'), ((2173, 2201), 'bcolz.ctable', 'ctable', (['arr', 'names'], {}), '(arr, names, **kwargs)\n', (2179, 2201), False, 'from bcolz import carray, ctable\n'), ((3504, 3537), 'bcolz.ctable', 'ctable', ([], {'rootdir': 'rootdir'}), '(rootdir=rootdir, **kwargs)\n', (3510, 3537), False, 'from bcolz import carray, ctable\n'), ((749, 762), 'numpy.ndarray', 'np.ndarray', (['(0)'], {}), '(0)\n', (759, 762), True, 'import numpy as np\n'), ((927, 946), 'toolz.curried.partial', 'partial', (['into', 'list'], {}), '(into, list)\n', (934, 946), False, 'from toolz.curried import pipe, partial, map, concat\n'), ((2099, 2112), 'numpy.ndarray', 'np.ndarray', (['(0)'], {}), '(0)\n', (2109, 2112), True, 'import numpy as np\n'), ((3599, 3631), 'datashape.to_numpy_dtype', 'to_numpy_dtype', (["kwargs['dshape']"], {}), "(kwargs['dshape'])\n", (3613, 3631), False, 'from datashape import to_numpy_dtype\n'), ((879, 892), 'numpy.ndarray', 'np.ndarray', (['(0)'], {}), '(0)\n', (889, 892), True, 'import numpy as np\n'), ((3107, 3120), 'numpy.ndarray', 'np.ndarray', (['(0)'], {}), '(0)\n', (3117, 3120), True, 'import numpy as np\n'), ((3728, 3746), 'numpy.empty', 'np.empty', (['(0)', 'dtype'], {}), '(0, dtype)\n', (3736, 3746), True, 'import numpy as np\n'), ((1883, 1896), 'numpy.ndarray', 'np.ndarray', (['(0)'], {}), '(0)\n', (1893, 1896), True, 'import numpy as np\n')] |
import re
import numpy as np
import collections
def getText(end=10000):
f = open('coopertext.txt','r')
text = ''
i = 0
while i<end:
line = f.readline()
if not line:
break
line = re.sub(' +',' ',line)
line = re.sub('\n',' ',line)
text = text + line
i+=1
text = list(text)
for i in range(len(text)):
text[i] = ord(text[i])
return np.asarray(text).astype(np.float32,copy=False)
def get_words():
f = open('coopertext.txt','r')
text = ''
while True:
line = f.readline()
if not line:
break
line = re.sub(' +',' ',line)
line = re.sub('\n',' ',line)
line = re.sub(r"[^\w\s]+", "", line)
line = line.lower()
text = text + line
text = list(text.split())
return text
# Step 2: Build the dictionary and replace rare words with UNK token.
def build_dataset(words, vocabulary_size):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
def get_data_vectors(vec_len=10,end=1):
text= getText(end=1)
x = []
y = []
for i in range((len(text)-1)//vec_len):
in_var = text[i:vec_len+i]
out_var = text[i+vec_len]
x.append(in_var)
ov = np.zeros(128)
ov[int(out_var)] = 1
y.append(ov)
np.asarray(x).astype(np.float32,copy=False)
np.asarray(y).astype(np.float32,copy=False)
return[x,y]
| [
"numpy.zeros",
"collections.Counter",
"numpy.asarray",
"re.sub"
] | [((193, 216), 're.sub', 're.sub', (['""" +"""', '""" """', 'line'], {}), "(' +', ' ', line)\n", (199, 216), False, 'import re\n'), ((224, 247), 're.sub', 're.sub', (['"""\n"""', '""" """', 'line'], {}), "('\\n', ' ', line)\n", (230, 247), False, 'import re\n'), ((530, 553), 're.sub', 're.sub', (['""" +"""', '""" """', 'line'], {}), "(' +', ' ', line)\n", (536, 553), False, 'import re\n'), ((561, 584), 're.sub', 're.sub', (['"""\n"""', '""" """', 'line'], {}), "('\\n', ' ', line)\n", (567, 584), False, 'import re\n'), ((592, 622), 're.sub', 're.sub', (['"""[^\\\\w\\\\s]+"""', '""""""', 'line'], {}), "('[^\\\\w\\\\s]+', '', line)\n", (598, 622), False, 'import re\n'), ((1535, 1548), 'numpy.zeros', 'np.zeros', (['(128)'], {}), '(128)\n', (1543, 1548), True, 'import numpy as np\n'), ((354, 370), 'numpy.asarray', 'np.asarray', (['text'], {}), '(text)\n', (364, 370), True, 'import numpy as np\n'), ((1588, 1601), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1598, 1601), True, 'import numpy as np\n'), ((1633, 1646), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1643, 1646), True, 'import numpy as np\n'), ((857, 883), 'collections.Counter', 'collections.Counter', (['words'], {}), '(words)\n', (876, 883), False, 'import collections\n')] |
""" This module extracts lexical features. """
import collections
import logging
import re
import sys
import math
import subprocess
import numpy as np
import scipy.stats
import scipy.spatial.distance
import nltk.probability
from nltk.corpus import wordnet as wn
from nltk.tokenize import sent_tokenize
import requests
import json
from utils.logger import get_logger
from utils.lexicosyntactic import functions
def get_wordnet_features(pos_utterances, nan_value):
''' This function extracts WordNet features.
Parameters:
pos_utterances: list of tuples of strings, (token, POS tag).
nan_value: int, value for nan
Returns:
wordnet_keys: list of strings, names of extracted features.
wordnet_features: dictionary, mapping feature name to feature value.
'''
wordnet_keys = ['avg_max_wn_depth_nn', 'sd_max_wn_depth_nn', 'avg_min_wn_depth_nn', 'sd_min_wn_depth_nn',
'avg_wn_ambig_nn', 'sd_wn_ambig_nn', 'kurt_wn_ambig_nn', 'skew_wn_ambig_nn',
'avg_max_wn_depth_vb', 'sd_max_wn_depth_vb', 'avg_min_wn_depth_vb', 'sd_min_wn_depth_vb',
'avg_wn_ambig_vb', 'sd_wn_ambig_vb', 'kurt_wn_ambig_vb', 'skew_wn_ambig_vb',
'avg_max_wn_depth', 'sd_max_wn_depth', 'avg_min_wn_depth', 'sd_min_wn_depth',
'avg_wn_ambig', 'sd_wn_ambig', 'kurt_wn_ambig', 'skew_wn_ambig']
wordnet_features = {}
wn_ambigs = []
wn_ambigs_nn = []
wn_ambigs_vb = []
wn_max_depths = []
wn_max_depths_nn = []
wn_max_depths_vb = []
wn_min_depths = []
wn_min_depths_nn = []
wn_min_depths_vb = []
for utt in pos_utterances:
for token_tuple in utt:
if re.match(r"^NN.*$", token_tuple[1]):
syns = wn.synsets(token_tuple[0], wn.NOUN)
if syns:
wn_ambigs_nn += [len(syns)]
tmp_max = 0.0
tmp_min = 0.0
for syn in syns:
tmp_max += syn.max_depth()
tmp_min += syn.min_depth()
wn_max_depths_nn += [tmp_max/len(syns)]
wn_min_depths_nn += [tmp_min/len(syns)]
elif re.match(r"^VB.*$", token_tuple[1]):
syns = wn.synsets(token_tuple[0], wn.VERB)
if syns:
wn_ambigs_vb += [len(syns)]
tmp_max = 0.0
tmp_min = 0.0
for syn in syns:
tmp_max += syn.max_depth()
tmp_min += syn.min_depth()
wn_max_depths_vb += [tmp_max/len(syns)]
wn_min_depths_vb += [tmp_min/len(syns)]
syns = wn.synsets(token_tuple[0]) # this counts ambiguous POS, ignoring the tagger
if syns:
wn_ambigs += [len(syns)]
tmp_max = 0.0
tmp_min = 0.0
for syn in syns:
tmp_max += syn.max_depth()
tmp_min += syn.min_depth()
wn_max_depths += [tmp_max/len(syns)]
wn_min_depths += [tmp_min/len(syns)]
wordnet_features['avg_wn_ambig'] = np.mean(wn_ambigs) if wn_ambigs else nan_value
wordnet_features['sd_wn_ambig'] = np.std(wn_ambigs) if wn_ambigs else nan_value
wordnet_features['kurt_wn_ambig'] = scipy.stats.kurtosis(wn_ambigs) if wn_ambigs else nan_value
wordnet_features['skew_wn_ambig'] = scipy.stats.skew(wn_ambigs) if wn_ambigs else nan_value
wordnet_features['avg_wn_ambig_nn'] = np.mean(wn_ambigs_nn) if wn_ambigs_nn else nan_value
wordnet_features['sd_wn_ambig_nn'] = np.std(wn_ambigs_nn) if wn_ambigs_nn else nan_value
wordnet_features['kurt_wn_ambig_nn'] = scipy.stats.kurtosis(wn_ambigs_nn) if wn_ambigs_nn else nan_value
wordnet_features['skew_wn_ambig_nn'] = scipy.stats.skew(wn_ambigs_nn) if wn_ambigs_nn else nan_value
wordnet_features['avg_wn_ambig_vb'] = np.mean(wn_ambigs_vb) if wn_ambigs_vb else nan_value
wordnet_features['sd_wn_ambig_vb'] = np.std(wn_ambigs_vb) if wn_ambigs_vb else nan_value
wordnet_features['kurt_wn_ambig_vb'] = scipy.stats.kurtosis(wn_ambigs_vb) if wn_ambigs_vb else nan_value
wordnet_features['skew_wn_ambig_vb'] = scipy.stats.skew(wn_ambigs_vb) if wn_ambigs_vb else nan_value
wordnet_features['avg_max_wn_depth'] = np.mean(wn_max_depths) if wn_max_depths else nan_value
wordnet_features['sd_max_wn_depth'] = np.std(wn_max_depths) if wn_max_depths else nan_value
wordnet_features['avg_min_wn_depth'] = np.mean(wn_min_depths) if wn_min_depths else nan_value
wordnet_features['sd_min_wn_depth'] = np.std(wn_min_depths) if wn_min_depths else nan_value
wordnet_features['avg_max_wn_depth_nn'] = np.mean(wn_max_depths_nn) if wn_max_depths_nn else nan_value
wordnet_features['sd_max_wn_depth_nn'] = np.std(wn_max_depths_nn) if wn_max_depths_nn else nan_value
wordnet_features['avg_min_wn_depth_nn'] = np.mean(wn_min_depths_nn) if wn_min_depths_nn else nan_value
wordnet_features['sd_min_wn_depth_nn'] = np.std(wn_min_depths_nn) if wn_min_depths_nn else nan_value
wordnet_features['avg_max_wn_depth_vb'] = np.mean(wn_max_depths_vb) if wn_max_depths_vb else nan_value
wordnet_features['sd_max_wn_depth_vb'] = np.std(wn_max_depths_vb) if wn_max_depths_vb else nan_value
wordnet_features['avg_min_wn_depth_vb'] = np.mean(wn_min_depths_vb) if wn_min_depths_vb else nan_value
wordnet_features['sd_min_wn_depth_vb'] = np.std(wn_min_depths_vb) if wn_min_depths_vb else nan_value
return wordnet_keys, wordnet_features
def get_cosine_distance(transcript_utterances, stopwords, inf_value):
''' This function extracts cosine distance features.
Parameters:
transcript_utterances: list of lists of strings (words), each row is a plaintext utterance in the transcript.
stopwords: list of string, words to be removed.
inf_value: int, value for infinity.
Returns:
cosine_keys: list of strings, names of extracted features.
cosine_features_dict: dictionary, mapping feature name to feature value.
'''
cosine_keys = ["ave_cos_dist", "min_cos_dist", "cos_cutoff_00", "cos_cutoff_03", "cos_cutoff_05"]
cosine_features_dict = {}
# REPETITION
# Build a vocab for the transcript
fdist_vocab = nltk.probability.FreqDist([word for utt in transcript_utterances for word in utt])
vocab_words = list(fdist_vocab.keys())
for s in stopwords:
if s in vocab_words:
vocab_words.remove(s)
num_utterances = len(transcript_utterances)
# Create a word vector for each utterance, N x V
# where N is the num of utterances and V is the vocab size
# The vector is 1 if the vocab word is present in the utterance,
# 0 otherwise (i.e., one hot encoded).
word_vectors = []
for i, utt in enumerate(transcript_utterances):
word_vectors.append(len(vocab_words)*[0]) # init
for j in range(len(vocab_words)):
if vocab_words[j] in utt:
word_vectors[i][j] += 1
# Calculate cosine DISTANCE between each pair of utterances in
# this transcript (many entries with small distances means the
# subject is repeating a lot of words).
average_dist = 0.0
min_dist = 1.0
num_similar_00 = 0.0
num_similar_03 = 0.0
num_similar_05 = 0.0
num_pairs = 0
for i in range(num_utterances):
for j in range(i):
# The norms of the vectors might be zero if the utterance contained only
# stopwords which were removed above. Only compute cosine distance if the
# norms are non-zero; ignore the rest.
norm_i, norm_j = np.linalg.norm(word_vectors[i]), np.linalg.norm(word_vectors[j])
if norm_i > 0 and norm_j > 0:
cosine_dist = scipy.spatial.distance.cosine(word_vectors[i], word_vectors[j])
if math.isnan(cosine_dist):
continue
average_dist += cosine_dist
num_pairs += 1
if cosine_dist < min_dist:
min_dist = cosine_dist
# Try different cutoffs for similarity
if cosine_dist < 0.001: #similarity threshold
num_similar_00 += 1
if cosine_dist <= 0.3: #similarity threshold
num_similar_03 += 1
if cosine_dist <= 0.5: #similarity threshold
num_similar_05 += 1
# The total number of unique utterance pairwise comparisons is <= N*(N-1)/2
# (could be less if some utterances contain only stopwords and end up empty after
# stopword removal).
denom = num_pairs
if denom >= 1:
cosine_features = [average_dist * 1.0 / denom,
min_dist,
num_similar_00 * 1.0 / denom,
num_similar_03 * 1.0 / denom,
num_similar_05 * 1.0 / denom]
else:
# There are either no utterances or a single utterance -- no repetition occurs
cosine_features = [inf_value, inf_value, 0, 0, 0]
for ind_feat, feat_name in enumerate(cosine_keys):
cosine_features_dict[feat_name] = cosine_features[ind_feat]
return cosine_keys, cosine_features_dict
def get_filler_counts(transcript_utterances_fillers):
''' This function extracts filler counts.
Parameters:
transcript_utterances_fillers: list of list of strings, transcript utterances with fillers included.
Returns:
filler_keys: list of strings, names of extracted features.
filler_features: dictionary, mapping feature name to feature value.
'''
filler_keys = []
filler_counts = {}
regex_fillers = {'fillers': re.compile(r'^(?:(?:ah)|(?:eh)|(?:er)|(?:ew)|(?:hm)|(?:mm)|(?:uh)|(?:uhm)|(?:um))$'),
'um': re.compile(r'^(?:(?:uhm)|(?:um))$'),
'uh': re.compile(r'^(?:(?:ah)|(?:uh))$')}
filler_keys = regex_fillers.keys()
filler_counts = collections.defaultdict(int)
if transcript_utterances_fillers is not None:
for utt in transcript_utterances_fillers:
for word in utt:
for filler_type in filler_keys:
if regex_fillers[filler_type].findall(word):
filler_counts[filler_type] += 1
return filler_keys, filler_counts
def get_vocab_richness_measures(pos_tokens, lemmatized_tokens, total_words, inf_value):
''' This function extracts vocabulary richness measures:
Honore statistic, Brunet index, type-token ratio (TTR), moving average type-token ratio (MATTR)
Parameters:
pos_tokens: list of tuples of strings, (token, POS_tag) of non-punctuation tokens.
lemmatized_tokens: list of strings, lemmatized non-punctuation tokens.
total_words: int, total number of words.
inf_value: int, infinity value.
Returns:
vocab_keys: list of strings, names of extracted features.
vocab_features: dictionary, mapping feature name to feature value.
'''
vocab_keys = ['TTR', 'brunet', 'honore']
vocab_features = {}
# MATTR - shift a window over the transcript and compute
# moving average TTR over each window, then average over all windows
for window_size in [10, 20, 30, 40, 50]:
start = 0
end = window_size
MATTR = 0
vocab_features['MATTR_%d' % (window_size)] = 0
vocab_keys += ['MATTR_%d' % (window_size)]
while end < len(lemmatized_tokens):
lem_types = len(set(lemmatized_tokens[start:end]))
MATTR += 1.0 * lem_types / window_size
start += 1 # shift window one word at a time
end += 1
if start > 0:
vocab_features['MATTR_%d' % (window_size)] = 1.0 * MATTR / start
word_types = len(set(pos_tokens)) # same word with different POS = different tokens (confirm with Katie)
fd_tokens = nltk.probability.FreqDist(pos_tokens)
# Count number of tokens that occur only once in transcript
once_words = 0
for num in fd_tokens.values():
if num == 1:
once_words += 1
try:
vocab_features["TTR"] = 1.0 * word_types / total_words
vocab_features["brunet"] = 1.0 * total_words**(word_types**(-0.165)) # Brunet's index - Vlado
except:
vocab_features["TTR"] = 0
vocab_features["brunet"] = 0
try:
vocab_features["honore"] = 100.0 * math.log(total_words)/(1.0-1.0*once_words/word_types) # Honore's statistic-Vlado
except:
vocab_features["honore"] = inf_value #or infinity ...? (If all words are used only once)
return vocab_keys, vocab_features
def get_mpqa_norm_features(lemmatized_tokens, mpqa_words, mpqa_types, mpqa_polarities, nan_value):
''' This function extracts objectivity polarity measures based on the MPQA lexicon norms:
strong positive, strong negative, weak positive, weak negative.
Parameters:
lemmatized_tokens: list of strings, lemmatized non-punctuation tokens.
mpqa_words: list of strings, list of words found in the MPQA lexicon.
mpqa_types: list of strings, type (strong vs negative) of words in the MPQA lexicon.
mpqa_polarities: list of strings, polarity (positive vs negative) of words in the MPQA lexicon.
nan_value: int, value of nan.
Returns:
mpqa_keys: list of strings, names of extracted features.
mpqa_features: dictionary, mapping feature name to feature value.
'''
mpqa_keys = ['mpqa_strong_positive', 'mpqa_strong_negative', 'mpqa_weak_positive', 'mpqa_weak_negative',
'<KEY>']
mpqa_features = collections.defaultdict(int)
for lemmatized_token in lemmatized_tokens:
if lemmatized_token in mpqa_words:
mpqa_features["mpqa_num"] += 1
mpqa_idx = mpqa_words.index(lemmatized_token)
mpqa_type = mpqa_types[mpqa_idx]
mpqa_polarity = mpqa_polarities[mpqa_idx]
if mpqa_type == 'strong' and mpqa_polarity == 'positive':
mpqa_features["mpqa_strong_positive"] += 1
elif mpqa_type == 'strong' and mpqa_polarity == 'negative':
mpqa_features["mpqa_strong_negative"] += 1
elif mpqa_type == 'weak' and mpqa_polarity == 'positive':
mpqa_features["mpqa_weak_positive"] += 1
elif mpqa_type == 'weak' and mpqa_polarity == 'negative':
mpqa_features["mpqa_weak_negative"] += 1
# Normalize MPQA subjectivity norms
if mpqa_features["mpqa_num"] > 0:
mpqa_features["mpqa_strong_positive"] /= 1.0 * mpqa_features["mpqa_num"]
mpqa_features["mpqa_strong_negative"] /= 1.0 * mpqa_features["mpqa_num"]
mpqa_features["mpqa_weak_positive"] /= 1.0 * mpqa_features["mpqa_num"]
mpqa_features["mpqa_weak_negative"] /= 1.0 * mpqa_features["mpqa_num"]
else:
mpqa_features["mpqa_strong_positive"] = nan_value
mpqa_features["mpqa_strong_negative"] = nan_value
mpqa_features["mpqa_weak_positive"] = nan_value
mpqa_features["mpqa_weak_negative"] = nan_value
return mpqa_keys, mpqa_features
def get_readability_measures(transcript_utterances, prondict, total_words, nan_value):
''' This functions extracts readability measures based on the number of syllables:
Flesch, Flesch-Kincaid, number of syllables per word (average, standard deviation,
kurtosis, skewness)
https://datawarrior.wordpress.com/2016/03/29/flesch-kincaid-readability-measure/
Parameters:
transcript_utterances : list of lists of strings (words); each row is a plaintext utterance in the transcript.
prondict: dictionary of valid words for the language, CMU dictionary is used.
total_words: int, total number of words.
nan_value: int, value for NaN
Returns:
readability_keys: list of strings, names of extracted features.
readability_features: dictionary, mapping feature name to feature value.
'''
readability_keys = ['flesch', 'flesch_kinkaid', 'avg_syll_per_word', 'std_syll_per_word', 'kurt_syll_per_word', 'skew_syll_per_word']
readability_features = {}
total_sents = 0
sylls = []
for utt in transcript_utterances:
sutt = ' '.join(utt)
total_sents += len(sent_tokenize(sutt))
for w in utt:
sylls += functions.numsyllables(w, prondict)
numUnknownProns = len(list(filter(lambda a: a == 0, sylls)))
try:
readability_features["flesch"] = 206.835 - 1.015*(total_words-numUnknownProns)/total_sents - 84.6*sum(sylls)/(total_words-numUnknownProns) # TODO
readability_features["flesch_kinkaid"] = 0.39 * (total_words-numUnknownProns) / total_sents + 11.8 * sum(sylls) / (total_words-numUnknownProns) - 15.59
except Exception as e:
readability_features["flesch"] = 0
readability_features["flesch_kinkaid"] = 0
readability_features['avg_syll_per_word'] = np.mean(sylls) if sylls else nan_value
readability_features['std_syll_per_word'] = np.std(sylls) if sylls else nan_value
readability_features['kurt_syll_per_word'] = scipy.stats.kurtosis(sylls) if sylls else nan_value
readability_features['skew_syll_per_word'] = scipy.stats.skew(sylls) if sylls else nan_value
return readability_keys, readability_features
def get_liwc_features(transcript_utterances, nan_value):
''' This functions extracts sentiment analysis features using the Stanford sentiment analysis tool.
Parameters:
transcript_utterances : list of lists of strings (words); each row is a plaintext utterance in the transcript.
nan_value: int, value for NaN
'''
error_liwc_keys = ['liwc_fail', 'receptiviti_fail']
error_liwc_features = {};
error_liwc_features['liwc_fail'] = nan_value
error_liwc_features['receptiviti_fail'] = nan_value
liwc_keys = [];
liwc_features = {};
try:
import secrets
api_key = secrets.receptiviti_api_key
api_secret = secrets.receptiviti_api_secret
iPerson = secrets.receptiviti_iPerson
except:
get_logger().log(logging.ERROR, "Unable to import secrets for receptiviti")
return error_liwc_keys, error_liwc_features
sServer = 'https://app.receptiviti.com'
url = '%s/v2/api/person/%s/contents' % (sServer, iPerson)
allUtts = ''
for utt in transcript_utterances:
sutt = ' '.join(utt)
allUtts += sutt
headers = {
"X-API-KEY": api_key,
"X-API-SECRET-KEY": api_secret,
'Content-Type': 'application/json'
}
payload = {
'content_handle' : '0',
'language_content' : allUtts,
'content_source' : 0
}
response = requests.post(url, headers=headers, data=payload)
if response.status_code != 200:
get_logger().log(logging.ERROR, "Response code %s from Receptiviti" % response.status_code)
return error_liwc_keys, error_liwc_features
else:
for key in response.json()['receptiviti_scores']['raw_scores']:
skey = 'receptiviti_%s' % key
liwc_keys += [skey]
liwc_features[skey] = response.json()['receptiviti_scores']['raw_scores'][key]
for key in response.json()['liwc_scores']['categories']:
skey = 'liwc_%s' % key
liwc_keys += [skey]
liwc_features[skey] = response.json()['liwc_scores']['categories'][key]
return liwc_keys, liwc_features
def get_stanford_sentiment_features(transcript_utterances, path_to_stanford_cp, nan_value):
''' This functions extracts sentiment analysis features using the Stanford sentiment analysis tool.
Parameters:
transcript_utterances : list of lists of strings (words); each row is a plaintext utterance in the transcript.
path_to_stanford_cp: string, path to Stanford corenlp
nan_value: int, value for NaN
Returns:
stanford_keys: list of strings, names of extracted features.
stanford_features: dictionary, mapping feature name to feature value.
'''
stanford_keys = ['mean_stanford_sentiment_veryneg', 'mean_stanford_sentiment_neg',
'mean_stanford_sentiment_neutral', 'mean_stanford_sentiment_pos', 'mean_stanford_sentiment_verypos',
'std_stanford_sentiment_veryneg', 'std_stanford_sentiment_neg',
'std_stanford_sentiment_neutral', 'std_stanford_sentiment_pos', 'std_stanford_sentiment_verypos']
stanford_features = {}
sentiments = []
try:
for utt in transcript_utterances:
sentence = ' '.join(utt)
cmd = 'java -cp "%s" -mx5g edu.stanford.nlp.sentiment.SentimentPipeline -output PROBABILITIES -stdin' % path_to_stanford_cp
#print cmd
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=True)
proc.stdin.write(sentence.encode())
proc.stdin.close()
result = proc.stdout.read().decode()
lines = result.splitlines()
line = re.split(r'\s+', lines[1].rstrip('\t'))
sentiments = np.vstack((sentiments, np.array(line[2:]))) if len(sentiments) > 0 else np.array(line[2:])
sentiments = np.reshape(sentiments, (-1, 5))
sentiments = sentiments.astype(np.float)
if sentiments.shape[0] > 1:
msentiments = np.mean(sentiments, axis=0)
ssentiments = np.std(sentiments, axis=0)
else:
msentiments = sentiments.flatten()
ssentiments = np.array([0]*5)
stanford_features['mean_stanford_sentiment_veryneg'] = msentiments[0] if msentiments.size >0 else nan_value
stanford_features['mean_stanford_sentiment_neg'] = msentiments[1] if msentiments.size >1 else nan_value
stanford_features['mean_stanford_sentiment_neutral'] = msentiments[2] if msentiments.size>2 else nan_value
stanford_features['mean_stanford_sentiment_pos'] = msentiments[3] if msentiments.size>3 else nan_value
stanford_features['mean_stanford_sentiment_verypos'] = msentiments[4] if msentiments.size>4 else nan_value
stanford_features['std_stanford_sentiment_veryneg'] = ssentiments[0] if ssentiments.size>0 else nan_value
stanford_features['std_stanford_sentiment_neg'] = ssentiments[1] if ssentiments.size>1 else nan_value
stanford_features['std_stanford_sentiment_neutral'] = ssentiments[2] if ssentiments.size>2 else nan_value
stanford_features['std_stanford_sentiment_pos'] = ssentiments[3] if ssentiments.size>3 else nan_value
stanford_features['std_stanford_sentiment_verypos'] = ssentiments[4] if ssentiments.size>4 else nan_value
except (NameError,) as e:
msg = 'Cannot run Stanford sentiment analysis, using %s' % path_to_stanford_cp
msg = msg + "\n" + re.findall("name '(\w+)' is not defined " + str(e))[0]
msg = msg + "\n" + "Error > " + sys.exc_info()[0]
get_logger().log(logging.ERROR, msg)
for stanford_key in stanford_keys:
stanford_features[stanford_key] = nan_value
except (IndexError) as e:
get_logger().log(logging.ERROR, 'Problem with Stanford sentiment analysis output file' + str(e))
for stanford_key in stanford_keys:
stanford_features[stanford_key] = nan_value
return stanford_keys, stanford_features
def get_pos_features(pos_utterances, total_words, lemmatizer,
norms_freq, norms_image, norms_anew, norms_warringer,
function_tags, inflected_verb_tags, light_verbs, subordinate,
demonstratives, dictionary_words, word_exceptions, inf_value, nan_value,
get_pos_counts, get_pos_ratios, get_frequency_norms, get_image_norms, get_anew_norms,
get_warringer_norms, get_density):
''' This general purpose functions extracts POS features.
Parameters:
pos_utterances : list of lists of tuples of (token, POStag); each row is an utterance. No filled pauses.
total_words: int, total number of words.
lemmatizer: WordNet Lemmatizer.
norms_freq: lexical frequency norms.
norms_image: norms for age-of-acquisition, imageability, familiarity.
norms_anew: norms for Anew (valence, arousal, dominance).
norms_warringer: norms for Warringer (valence, arousal, dominance).
function_tags: list of strings, POS tags for function words.
inflected_verb_tags: list of strings, POS tags for inflected verbs.
light_verbs: list of strings, light verbs.
subordinate:
demonstratives:
dictionary_words:
word_exceptions:
inf_value:
nan_value:
get_pos_counts: boolean, return POS counts if True.
get_pos_ratios: boolean, return POS ratios if True.
get_frequency_norms: boolean, return frequency values if True.
get_image_norms: boolean, return image values (age-of-acquisition, imageability, familiarity) if True.
get_anew_norms: boolean, return Anew values (valence, arousal, dominance) if True.
get_warringer_norms: boolean, return Warringer values (valence, arousal, dominance) if True.
get_density: boolean, return density values if True.
Returns:
'''
pos_keys = ['word_length', 'NID']
if get_pos_counts:
pos_keys += ['nouns', 'verbs', 'inflected_verbs', 'light', 'function', 'pronouns', 'determiners', 'adverbs', 'adjectives', 'prepositions',
'coordinate', 'subordinate', 'demonstratives']
if get_pos_ratios:
pos_keys += ['nvratio', 'prp_ratio', 'noun_ratio', 'sub_coord_ratio']
if get_frequency_norms:
pos_keys += ['frequency', 'noun_frequency', 'verb_frequency']
if get_image_norms:
pos_keys += ['aoa', 'imageability', 'familiarity',
'noun_aoa', 'noun_imageability', 'noun_familiarity',
'verb_aoa', 'verb_imageability', 'verb_familiarity']
if get_anew_norms:
pos_keys += ['noun_anew_val_mean', 'noun_anew_val_std', 'noun_anew_aro_mean', 'noun_anew_aro_std', 'noun_anew_dom_mean', 'noun_anew_dom_std',
'verb_anew_val_mean', 'verb_anew_val_std', 'verb_anew_aro_mean', 'verb_anew_aro_std', 'verb_anew_dom_mean', 'verb_anew_dom_std',
'anew_val_mean', 'anew_val_std', 'anew_aro_mean', 'anew_aro_std', 'anew_dom_mean', 'anew_dom_std']
if get_warringer_norms:
pos_keys += ['warr_val_mean', 'warr_val_std', 'warr_val_rat', 'warr_aro_mean', 'warr_aro_std',
'warr_aro_rat', 'warr_dom_mean', 'warr_dom_std', 'warr_dom_rat',
'warr_val_mean_nn', 'warr_val_std_nn', 'warr_val_rat_nn', 'warr_aro_mean_nn', 'warr_aro_std_nn',
'warr_aro_rat_nn', 'warr_dom_mean_nn', 'warr_dom_std_nn', 'warr_dom_rat_nn',
'warr_val_mean_vb', 'warr_val_std_vb', 'warr_val_rat_vb', 'warr_aro_mean_vb', 'warr_aro_std_vb',
'warr_aro_rat_vb', 'warr_dom_mean_vb', 'warr_dom_std_vb', 'warr_dom_rat_vb']
if get_density:
pos_keys += ['prop_density', 'content_density']
pos_features = collections.defaultdict(int)
# Filter out any punctuation tags
regex_pos_content = re.compile(r'^[a-zA-Z$]+$')
pos_tokens = [] # list of tuples of (token, POStag) of non-punctuation tokens
lemmatized_tokens = [] # list of lemmatized non-punctuation tokens
if get_density:
pos_features['prop_density'] = 0
pos_features['content_density'] = 0
for utt in pos_utterances:
for token_tuple in utt:
# If the POS tag is not that of punctuation, add to tokens
if regex_pos_content.findall(token_tuple[1]):
pos_tokens += [token_tuple]
pos_features['word_length'] += len(token_tuple[0])
if token_tuple[0] not in dictionary_words and token_tuple[0] not in word_exceptions:
pos_features['NID'] += 1
# Lemmatize according to the type of the word
lemmatized_token = lemmatizer.lemmatize(token_tuple[0], functions.pos_treebank2wordnet(token_tuple[1]))
lemmatized_tokens += [lemmatized_token]
if get_density:
if re.match(r"^(NN|VB|JJ|RB|SYM).*$", token_tuple[1]):
pos_features['content_density'] += 1
if re.match(r"^(VB|JJ|RB|IN|CC).*$", token_tuple[1]):
pos_features['prop_density'] += 1
# Count POS tags
if re.match(r"^NN.*$", token_tuple[1]):
pos_features['nouns'] += 1
if get_frequency_norms and token_tuple[0] in norms_freq:
pos_features["noun_frequency"] += float(norms_freq[token_tuple[0]][5]) # use log10WF
pos_features["noun_freq_num"] += 1
if get_image_norms and lemmatized_token in norms_image:
pos_features["noun_aoa"] += float(norms_image[lemmatized_token][0])
pos_features["noun_imageability"] += float(norms_image[lemmatized_token][1])
pos_features["noun_familiarity"] += float(norms_image[lemmatized_token][2])
pos_features["noun_img_num"] += 1
if get_anew_norms and lemmatized_token in norms_anew:
pos_features["noun_anew_val_mean"] += float(norms_anew[lemmatized_token][0])
pos_features["noun_anew_val_std"] += float(norms_anew[lemmatized_token][1])
pos_features["noun_anew_aro_mean"] += float(norms_anew[lemmatized_token][2])
pos_features["noun_anew_aro_std"] += float(norms_anew[lemmatized_token][3])
pos_features["noun_anew_dom_mean"] += float(norms_anew[lemmatized_token][4])
pos_features["noun_anew_dom_std"] += float(norms_anew[lemmatized_token][5])
pos_features["noun_anew_num"] += 1
if get_warringer_norms and lemmatized_token in norms_warringer:
pos_features["warr_val_mean_nn"] += float(norms_warringer[lemmatized_token][0])
pos_features["warr_val_std_nn"] += float(norms_warringer[lemmatized_token][1])
pos_features["warr_val_rat_nn"] += float(norms_warringer[lemmatized_token][2])
pos_features["warr_aro_mean_nn"] += float(norms_warringer[lemmatized_token][3])
pos_features["warr_aro_std_nn"] += float(norms_warringer[lemmatized_token][4])
pos_features["warr_aro_rat_nn"] += float(norms_warringer[lemmatized_token][5])
pos_features["warr_dom_mean_nn"] += float(norms_warringer[lemmatized_token][6])
pos_features["warr_dom_std_nn"] += float(norms_warringer[lemmatized_token][7])
pos_features["warr_dom_rat_nn"] += float(norms_warringer[lemmatized_token][8])
pos_features["noun_warr_num"] += 1
elif re.match(r'^V.*$', token_tuple[1]):
pos_features['verbs'] += 1
if token_tuple[1] in inflected_verb_tags:
pos_features['inflected_verbs'] += 1
if lemmatized_token in light_verbs:
pos_features['light'] += 1
if get_frequency_norms and token_tuple[0] in norms_freq:
pos_features["verb_frequency"] += float(norms_freq[token_tuple[0]][5]) # use log10WF
pos_features["verb_freq_num"] += 1
if get_image_norms and lemmatized_token in norms_image:
pos_features["verb_aoa"] += float(norms_image[lemmatized_token][0])
pos_features["verb_imageability"] += float(norms_image[lemmatized_token][1])
pos_features["verb_familiarity"] += float(norms_image[lemmatized_token][2])
pos_features["verb_img_num"] += 1
if get_anew_norms and lemmatized_token in norms_anew:
pos_features["verb_anew_val_mean"] += float(norms_anew[lemmatized_token][0])
pos_features["verb_anew_val_std"] += float(norms_anew[lemmatized_token][1])
pos_features["verb_anew_aro_mean"] += float(norms_anew[lemmatized_token][2])
pos_features["verb_anew_aro_std"] += float(norms_anew[lemmatized_token][3])
pos_features["verb_anew_dom_mean"] += float(norms_anew[lemmatized_token][4])
pos_features["verb_anew_dom_std"] += float(norms_anew[lemmatized_token][5])
pos_features["verb_anew_num"] += 1
if get_warringer_norms and lemmatized_token in norms_warringer:
pos_features["warr_val_mean_vb"] += float(norms_warringer[lemmatized_token][0])
pos_features["warr_val_std_vb"] += float(norms_warringer[lemmatized_token][1])
pos_features["warr_val_rat_vb"] += float(norms_warringer[lemmatized_token][2])
pos_features["warr_aro_mean_vb"] += float(norms_warringer[lemmatized_token][3])
pos_features["warr_aro_std_vb"] += float(norms_warringer[lemmatized_token][4])
pos_features["warr_aro_rat_vb"] += float(norms_warringer[lemmatized_token][5])
pos_features["warr_dom_mean_vb"] += float(norms_warringer[lemmatized_token][6])
pos_features["warr_dom_std_vb"] += float(norms_warringer[lemmatized_token][7])
pos_features["warr_dom_rat_vb"] += float(norms_warringer[lemmatized_token][8])
pos_features["verb_warr_num"] += 1
else:
if token_tuple[1] in function_tags:
pos_features['function'] += 1
if re.match(r'^PRP.*$', token_tuple[1]):
pos_features['pronouns'] += 1
elif re.match(r"^DT$", token_tuple[1]):
pos_features["determiners"] += 1
elif re.match(r"^RB.*$", token_tuple[1]): #adverb
pos_features["adverbs"] += 1
elif re.match(r"^JJ.*$", token_tuple[1]): #adjective
pos_features["adjectives"] += 1
elif re.match(r"^IN$", token_tuple[1]):
pos_features["prepositions"] += 1
elif re.match(r"^CC$", token_tuple[1]):
pos_features["coordinate"] += 1
if token_tuple[0] in subordinate:
if token_tuple[1] in ["IN", "WRB", "WP"]:
pos_features["subordinate"] += 1
if token_tuple[0] in demonstratives:
pos_features["demonstratives"] += 1
if get_frequency_norms and token_tuple[0] in norms_freq: #note: frequencies are not lemmatized
pos_features["frequency"] += float(norms_freq[token_tuple[0]][5]) # use log10WF
pos_features["freq_num"] += 1
if get_image_norms and lemmatized_token in norms_image:
pos_features["aoa"] += float(norms_image[lemmatized_token][0])
pos_features["imageability"] += float(norms_image[lemmatized_token][1])
pos_features["familiarity"] += float(norms_image[lemmatized_token][2])
pos_features["img_num"] += 1
if get_anew_norms and lemmatized_token in norms_anew:
pos_features["anew_val_mean"] += float(norms_anew[lemmatized_token][0])
pos_features["anew_val_std"] += float(norms_anew[lemmatized_token][1])
pos_features["anew_aro_mean"] += float(norms_anew[lemmatized_token][2])
pos_features["anew_aro_std"] += float(norms_anew[lemmatized_token][3])
pos_features["anew_dom_mean"] += float(norms_anew[lemmatized_token][4])
pos_features["anew_dom_std"] += float(norms_anew[lemmatized_token][5])
pos_features["anew_num"] += 1
if get_warringer_norms and lemmatized_token in norms_warringer:
pos_features["warr_val_mean"] += float(norms_warringer[lemmatized_token][0])
pos_features["warr_val_std"] += float(norms_warringer[lemmatized_token][1])
pos_features["warr_val_rat"] += float(norms_warringer[lemmatized_token][2])
pos_features["warr_aro_mean"] += float(norms_warringer[lemmatized_token][3])
pos_features["warr_aro_std"] += float(norms_warringer[lemmatized_token][4])
pos_features["warr_aro_rat"] += float(norms_warringer[lemmatized_token][5])
pos_features["warr_dom_mean"] += float(norms_warringer[lemmatized_token][6])
pos_features["warr_dom_std"] += float(norms_warringer[lemmatized_token][7])
pos_features["warr_dom_rat"] += float(norms_warringer[lemmatized_token][8])
pos_features["warr_num"] += 1
# Compute verb ratios, and noun to verb ratio
if pos_features["verbs"] > 0:
pos_features["nvratio"] = 1.0 * pos_features["nouns"] / pos_features["verbs"]
pos_features["light"] = 1.0 * pos_features["light"] / pos_features["verbs"]
pos_features["inflected_verbs"] = 1.0 * pos_features["inflected_verbs"] / pos_features["verbs"]
else:
if pos_features["nouns"] > 0:
pos_features["nvratio"] = inf_value
else:
pos_features["nvratio"] = nan_value
pos_features["light"] = 0
pos_features["inflected_verbs"] = 0
# Compute noun ratios (pronouns to pronoun+nouns, and nouns to noun+verb)
if pos_features["nouns"] > 0:
pos_features["prp_ratio"] = 1.0 * pos_features["pronouns"] / (pos_features["pronouns"] + pos_features["nouns"])
pos_features["noun_ratio"] = 1.0 * pos_features["nouns"] / (pos_features["verbs"] + pos_features["nouns"])
else:
if pos_features["pronouns"] > 0:
pos_features["prp_ratio"] = 1.0 * pos_features["pronouns"] / (pos_features["pronouns"] + pos_features["nouns"])
else:
pos_features["prp_ratio"] = nan_value # NaN? 0/0 - no nouns and no pronouns
if pos_features["verbs"] > 0:
pos_features["noun_ratio"] = 1.0 * pos_features["nouns"]/(pos_features["verbs"] + pos_features["nouns"])
else:
pos_features["noun_ratio"] = nan_value # NaN? 0/0 - no nouns and no verbs
# Compute conjunction ratios
if pos_features["coordinate"] > 0:
pos_features["sub_coord_ratio"] = 1.0 * pos_features["subordinate"] / pos_features["coordinate"]
else:
if pos_features['subordinate'] > 0:
pos_features["sub_coord_ratio"] = inf_value
else:
pos_features['sub_coord_ratio'] = nan_value # NaN? 0/0 - no subord and no coord conjunctions
if pos_features['prop_density'] > 0:
pos_features['prop_density'] /= total_words
else:
pos_features['prop_density'] = nan_value
if pos_features['content_density'] > 0:
pos_features['content_density'] /= total_words
else:
pos_features['content_density'] = nan_value
# Normalize all age of acquisition, imageability, familiarity norms
if pos_features["img_num"] > 0:
pos_features["aoa"] = 1.0 * pos_features["aoa"] / pos_features["img_num"]
pos_features["imageability"] = 1.0 * pos_features["imageability"] / pos_features["img_num"]
pos_features["familiarity"] = 1.0 * pos_features["familiarity"] / pos_features["img_num"]
else: # no words with imageability norms
pos_features["aoa"] = nan_value
pos_features["imageability"] = nan_value
pos_features["familiarity"] = nan_value
# Normalize all age of acquisition, imageability, familiarity norms for nouns
if pos_features["noun_img_num"] > 0:
pos_features["noun_aoa"] = 1.0 * pos_features["noun_aoa"] / pos_features["noun_img_num"]
pos_features["noun_imageability"] = 1.0 * pos_features["noun_imageability"] / pos_features["noun_img_num"]
pos_features["noun_familiarity"] = 1.0 * pos_features["noun_familiarity"] / pos_features["noun_img_num"]
else:
pos_features["noun_aoa"] = nan_value
pos_features["noun_imageability"] = nan_value
pos_features["noun_familiarity"] = nan_value
# Normalize all age of acquisition, imageability, familiarity norms for verbs
if pos_features["verb_img_num"] > 0:
pos_features["verb_aoa"] = 1.0 * pos_features["verb_aoa"] / pos_features["verb_img_num"]
pos_features["verb_imageability"] = 1.0 * pos_features["verb_imageability"] / pos_features["verb_img_num"]
pos_features["verb_familiarity"] = 1.0 * pos_features["verb_familiarity"] / pos_features["verb_img_num"]
else:
pos_features["verb_aoa"] = nan_value
pos_features["verb_imageability"] = nan_value
pos_features["verb_familiarity"] = nan_value
# Normalize all anew norms
if pos_features["anew_num"] > 0:
pos_features["anew_val_mean"] = 1.0 * pos_features["anew_val_mean"] / pos_features["anew_num"]
pos_features["anew_val_std"] = 1.0 * pos_features["anew_val_std"] / pos_features["anew_num"]
pos_features["anew_aro_mean"] = 1.0 * pos_features["anew_aro_mean"] / pos_features["anew_num"]
pos_features["anew_aro_std"] = 1.0 * pos_features["anew_aro_std"] / pos_features["anew_num"]
pos_features["anew_dom_mean"] = 1.0 * pos_features["anew_dom_mean"] / pos_features["anew_num"]
pos_features["anew_dom_std"] = 1.0 * pos_features["anew_dom_std"] / pos_features["anew_num"]
else: # no words with imageability norms
pos_features["anew_val_mean"] = nan_value
pos_features["anew_val_std"] = nan_value
pos_features["anew_aro_mean"] = nan_value
pos_features["anew_aro_std"] = nan_value
pos_features["anew_dom_mean"] = nan_value
pos_features["anew_dom_std"] = nan_value
# Normalize all anew norms for nouns
if pos_features["noun_anew_num"] > 0:
pos_features["noun_anew_val_mean"] = 1.0 * pos_features["noun_anew_val_mean"] / pos_features["noun_anew_num"]
pos_features["noun_anew_val_std"] = 1.0 * pos_features["noun_anew_val_std"] / pos_features["noun_anew_num"]
pos_features["noun_anew_aro_mean"] = 1.0 * pos_features["noun_anew_aro_mean"] / pos_features["noun_anew_num"]
pos_features["noun_anew_aro_std"] = 1.0 * pos_features["noun_anew_aro_std"] / pos_features["noun_anew_num"]
pos_features["noun_anew_dom_mean"] = 1.0 * pos_features["noun_anew_dom_mean"] / pos_features["noun_anew_num"]
pos_features["noun_anew_dom_std"] = 1.0 * pos_features["noun_anew_dom_std"] / pos_features["noun_anew_num"]
else: # no nouns with anew norms
pos_features["noun_anew_val_mean"] = nan_value
pos_features["noun_anew_val_std"] = nan_value
pos_features["noun_anew_aro_mean"] = nan_value
pos_features["noun_anew_aro_std"] = nan_value
pos_features["noun_anew_dom_mean"] = nan_value
pos_features["noun_anew_dom_std"] = nan_value
# Normalize all anew norms for verbs
if pos_features["verb_anew_num"] > 0:
pos_features["verb_anew_val_mean"] = 1.0 * pos_features["verb_anew_val_mean"] / pos_features["verb_anew_num"]
pos_features["verb_anew_val_std"] = 1.0 * pos_features["verb_anew_val_std"] / pos_features["verb_anew_num"]
pos_features["verb_anew_aro_mean"] = 1.0 * pos_features["verb_anew_aro_mean"] / pos_features["verb_anew_num"]
pos_features["verb_anew_aro_std"] = 1.0 * pos_features["verb_anew_aro_std"] / pos_features["verb_anew_num"]
pos_features["verb_anew_dom_mean"] = 1.0 * pos_features["verb_anew_dom_mean"] / pos_features["verb_anew_num"]
pos_features["verb_anew_dom_std"] = 1.0 * pos_features["verb_anew_dom_std"] / pos_features["verb_anew_num"]
else: # no verbs with anew norms
pos_features["verb_anew_val_mean"] = nan_value
pos_features["verb_anew_val_std"] = nan_value
pos_features["verb_anew_aro_mean"] = nan_value
pos_features["verb_anew_aro_std"] = nan_value
pos_features["verb_anew_dom_mean"] = nan_value
pos_features["verb_anew_dom_std"] = nan_value
# Normalize all warr norms
if pos_features["warr_num"] > 0:
pos_features["warr_val_mean"] /= 1.0*pos_features["warr_num"]
pos_features["warr_val_std"] /= 1.0*pos_features["warr_num"]
pos_features["warr_val_rat"] /= 1.0*pos_features["warr_num"]
pos_features["warr_aro_mean"] /= 1.0*pos_features["warr_num"]
pos_features["warr_aro_std"] /= 1.0*pos_features["warr_num"]
pos_features["warr_aro_rat"] /= 1.0*pos_features["warr_num"]
pos_features["warr_dom_mean"] /= 1.0*pos_features["warr_num"]
pos_features["warr_dom_std"] /= 1.0*pos_features["warr_num"]
pos_features["warr_dom_rat"] /= 1.0*pos_features["warr_num"]
else: # no words with warringer norms
pos_features["warr_val_mean"] = nan_value
pos_features["warr_val_std"] = nan_value
pos_features["warr_val_rat"] = nan_value
pos_features["warr_aro_mean"] = nan_value
pos_features["warr_aro_std"] = nan_value
pos_features["warr_aro_rat"] = nan_value
pos_features["warr_dom_mean"] = nan_value
pos_features["warr_dom_std"] = nan_value
pos_features["warr_dom_rat"] = nan_value
# Normalize all warr norms for nouns
if pos_features["noun_warr_num"] > 0:
pos_features["warr_val_mean_nn"] /= 1.0*pos_features["noun_warr_num"]
pos_features["warr_val_std_nn"] /= 1.0*pos_features["noun_warr_num"]
pos_features["warr_val_rat_nn"] /= 1.0*pos_features["noun_warr_num"]
pos_features["warr_aro_mean_nn"] /= 1.0*pos_features["noun_warr_num"]
pos_features["warr_aro_std_nn"] /= 1.0*pos_features["noun_warr_num"]
pos_features["warr_aro_rat_nn"] /= 1.0*pos_features["noun_warr_num"]
pos_features["warr_dom_mean_nn"] /= 1.0*pos_features["noun_warr_num"]
pos_features["warr_dom_std_nn"] /= 1.0*pos_features["noun_warr_num"]
pos_features["warr_dom_rat_nn"] /= 1.0*pos_features["noun_warr_num"]
else: # no nouns with warringer norms
pos_features["warr_val_mean_nn"] = nan_value
pos_features["warr_val_std_nn"] = nan_value
pos_features["warr_val_rat_nn"] = nan_value
pos_features["warr_aro_mean_nn"] = nan_value
pos_features["warr_aro_std_nn"] = nan_value
pos_features["warr_aro_rat_nn"] = nan_value
pos_features["warr_dom_mean_nn"] = nan_value
pos_features["warr_dom_std_nn"] = nan_value
pos_features["warr_dom_rat_nn"] = nan_value
# Normalize all warr norms for verbs
if pos_features["verb_warr_num"] > 0:
pos_features["warr_val_mean_vb"] /= 1.0*pos_features["verb_warr_num"]
pos_features["warr_val_std_vb"] /= 1.0*pos_features["verb_warr_num"]
pos_features["warr_val_rat_vb"] /= 1.0*pos_features["verb_warr_num"]
pos_features["warr_aro_mean_vb"] /= 1.0*pos_features["verb_warr_num"]
pos_features["warr_aro_std_vb"] /= 1.0*pos_features["verb_warr_num"]
pos_features["warr_aro_rat_vb"] /= 1.0*pos_features["verb_warr_num"]
pos_features["warr_dom_mean_vb"] /= 1.0*pos_features["verb_warr_num"]
pos_features["warr_dom_std_vb"] /= 1.0*pos_features["verb_warr_num"]
pos_features["warr_dom_rat_vb"] /= 1.0*pos_features["verb_warr_num"]
else: # no verbs with warringer norms
pos_features["warr_val_mean_vb"] = nan_value
pos_features["warr_val_std_vb"] = nan_value
pos_features["warr_val_rat_vb"] = nan_value
pos_features["warr_aro_mean_vb"] = nan_value
pos_features["warr_aro_std_vb"] = nan_value
pos_features["warr_aro_rat_vb"] = nan_value
pos_features["warr_dom_mean_vb"] = nan_value
pos_features["warr_dom_std_vb"] = nan_value
pos_features["warr_dom_rat_vb"] = nan_value
# Normalize frequency norms
if pos_features["freq_num"] > 0:
pos_features["frequency"] = 1.0 * pos_features["frequency"] / pos_features["freq_num"]
else:
pos_features["frequency"] = nan_value
# Normalize frequency norms for nouns
if pos_features["noun_freq_num"] > 0:
pos_features["noun_frequency"] = 1.0 * pos_features["noun_frequency"] / pos_features["noun_freq_num"]
else:
pos_features["noun_frequency"] = nan_value
# Normalize frequency norms for verbs
if pos_features["verb_freq_num"] > 0:
pos_features["verb_frequency"] = 1.0 * pos_features["verb_frequency"] / pos_features["verb_freq_num"]
else:
pos_features["verb_frequency"] = nan_value
return pos_keys, pos_features
| [
"subprocess.Popen",
"math.isnan",
"numpy.std",
"utils.logger.get_logger",
"nltk.corpus.wordnet.synsets",
"re.match",
"collections.defaultdict",
"nltk.tokenize.sent_tokenize",
"numpy.mean",
"numpy.array",
"numpy.reshape",
"numpy.linalg.norm",
"sys.exc_info",
"utils.lexicosyntactic.functions... | [((10065, 10093), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (10088, 10093), False, 'import collections\n'), ((13682, 13710), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (13705, 13710), False, 'import collections\n'), ((19121, 19170), 'requests.post', 'requests.post', (['url'], {'headers': 'headers', 'data': 'payload'}), '(url, headers=headers, data=payload)\n', (19134, 19170), False, 'import requests\n'), ((27527, 27555), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (27550, 27555), False, 'import collections\n'), ((27619, 27645), 're.compile', 're.compile', (['"""^[a-zA-Z$]+$"""'], {}), "('^[a-zA-Z$]+$')\n", (27629, 27645), False, 'import re\n'), ((3210, 3228), 'numpy.mean', 'np.mean', (['wn_ambigs'], {}), '(wn_ambigs)\n', (3217, 3228), True, 'import numpy as np\n'), ((3295, 3312), 'numpy.std', 'np.std', (['wn_ambigs'], {}), '(wn_ambigs)\n', (3301, 3312), True, 'import numpy as np\n'), ((3580, 3601), 'numpy.mean', 'np.mean', (['wn_ambigs_nn'], {}), '(wn_ambigs_nn)\n', (3587, 3601), True, 'import numpy as np\n'), ((3674, 3694), 'numpy.std', 'np.std', (['wn_ambigs_nn'], {}), '(wn_ambigs_nn)\n', (3680, 3694), True, 'import numpy as np\n'), ((3983, 4004), 'numpy.mean', 'np.mean', (['wn_ambigs_vb'], {}), '(wn_ambigs_vb)\n', (3990, 4004), True, 'import numpy as np\n'), ((4077, 4097), 'numpy.std', 'np.std', (['wn_ambigs_vb'], {}), '(wn_ambigs_vb)\n', (4083, 4097), True, 'import numpy as np\n'), ((4387, 4409), 'numpy.mean', 'np.mean', (['wn_max_depths'], {}), '(wn_max_depths)\n', (4394, 4409), True, 'import numpy as np\n'), ((4484, 4505), 'numpy.std', 'np.std', (['wn_max_depths'], {}), '(wn_max_depths)\n', (4490, 4505), True, 'import numpy as np\n'), ((4581, 4603), 'numpy.mean', 'np.mean', (['wn_min_depths'], {}), '(wn_min_depths)\n', (4588, 4603), True, 'import numpy as np\n'), ((4678, 4699), 'numpy.std', 'np.std', (['wn_min_depths'], {}), '(wn_min_depths)\n', (4684, 4699), True, 'import numpy as np\n'), ((4779, 4804), 'numpy.mean', 'np.mean', (['wn_max_depths_nn'], {}), '(wn_max_depths_nn)\n', (4786, 4804), True, 'import numpy as np\n'), ((4885, 4909), 'numpy.std', 'np.std', (['wn_max_depths_nn'], {}), '(wn_max_depths_nn)\n', (4891, 4909), True, 'import numpy as np\n'), ((4991, 5016), 'numpy.mean', 'np.mean', (['wn_min_depths_nn'], {}), '(wn_min_depths_nn)\n', (4998, 5016), True, 'import numpy as np\n'), ((5097, 5121), 'numpy.std', 'np.std', (['wn_min_depths_nn'], {}), '(wn_min_depths_nn)\n', (5103, 5121), True, 'import numpy as np\n'), ((5204, 5229), 'numpy.mean', 'np.mean', (['wn_max_depths_vb'], {}), '(wn_max_depths_vb)\n', (5211, 5229), True, 'import numpy as np\n'), ((5310, 5334), 'numpy.std', 'np.std', (['wn_max_depths_vb'], {}), '(wn_max_depths_vb)\n', (5316, 5334), True, 'import numpy as np\n'), ((5416, 5441), 'numpy.mean', 'np.mean', (['wn_min_depths_vb'], {}), '(wn_min_depths_vb)\n', (5423, 5441), True, 'import numpy as np\n'), ((5522, 5546), 'numpy.std', 'np.std', (['wn_min_depths_vb'], {}), '(wn_min_depths_vb)\n', (5528, 5546), True, 'import numpy as np\n'), ((9793, 9881), 're.compile', 're.compile', (['"""^(?:(?:ah)|(?:eh)|(?:er)|(?:ew)|(?:hm)|(?:mm)|(?:uh)|(?:uhm)|(?:um))$"""'], {}), "(\n '^(?:(?:ah)|(?:eh)|(?:er)|(?:ew)|(?:hm)|(?:mm)|(?:uh)|(?:uhm)|(?:um))$')\n", (9803, 9881), False, 'import re\n'), ((9906, 9940), 're.compile', 're.compile', (['"""^(?:(?:uhm)|(?:um))$"""'], {}), "('^(?:(?:uhm)|(?:um))$')\n", (9916, 9940), False, 'import re\n'), ((9970, 10003), 're.compile', 're.compile', (['"""^(?:(?:ah)|(?:uh))$"""'], {}), "('^(?:(?:ah)|(?:uh))$')\n", (9980, 10003), False, 'import re\n'), ((16980, 16994), 'numpy.mean', 'np.mean', (['sylls'], {}), '(sylls)\n', (16987, 16994), True, 'import numpy as np\n'), ((17067, 17080), 'numpy.std', 'np.std', (['sylls'], {}), '(sylls)\n', (17073, 17080), True, 'import numpy as np\n'), ((21652, 21683), 'numpy.reshape', 'np.reshape', (['sentiments', '(-1, 5)'], {}), '(sentiments, (-1, 5))\n', (21662, 21683), True, 'import numpy as np\n'), ((1709, 1743), 're.match', 're.match', (['"""^NN.*$"""', 'token_tuple[1]'], {}), "('^NN.*$', token_tuple[1])\n", (1717, 1743), False, 'import re\n'), ((2738, 2764), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['token_tuple[0]'], {}), '(token_tuple[0])\n', (2748, 2764), True, 'from nltk.corpus import wordnet as wn\n'), ((16321, 16340), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['sutt'], {}), '(sutt)\n', (16334, 16340), False, 'from nltk.tokenize import sent_tokenize\n'), ((16385, 16420), 'utils.lexicosyntactic.functions.numsyllables', 'functions.numsyllables', (['w', 'prondict'], {}), '(w, prondict)\n', (16407, 16420), False, 'from utils.lexicosyntactic import functions\n'), ((21171, 21256), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'stdin': 'subprocess.PIPE', 'shell': '(True)'}), '(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=True\n )\n', (21187, 21256), False, 'import subprocess\n'), ((21795, 21822), 'numpy.mean', 'np.mean', (['sentiments'], {'axis': '(0)'}), '(sentiments, axis=0)\n', (21802, 21822), True, 'import numpy as np\n'), ((21849, 21875), 'numpy.std', 'np.std', (['sentiments'], {'axis': '(0)'}), '(sentiments, axis=0)\n', (21855, 21875), True, 'import numpy as np\n'), ((21963, 21980), 'numpy.array', 'np.array', (['([0] * 5)'], {}), '([0] * 5)\n', (21971, 21980), True, 'import numpy as np\n'), ((1769, 1804), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['token_tuple[0]', 'wn.NOUN'], {}), '(token_tuple[0], wn.NOUN)\n', (1779, 1804), True, 'from nltk.corpus import wordnet as wn\n'), ((2222, 2256), 're.match', 're.match', (['"""^VB.*$"""', 'token_tuple[1]'], {}), "('^VB.*$', token_tuple[1])\n", (2230, 2256), False, 'import re\n'), ((7717, 7748), 'numpy.linalg.norm', 'np.linalg.norm', (['word_vectors[i]'], {}), '(word_vectors[i])\n', (7731, 7748), True, 'import numpy as np\n'), ((7750, 7781), 'numpy.linalg.norm', 'np.linalg.norm', (['word_vectors[j]'], {}), '(word_vectors[j])\n', (7764, 7781), True, 'import numpy as np\n'), ((7937, 7960), 'math.isnan', 'math.isnan', (['cosine_dist'], {}), '(cosine_dist)\n', (7947, 7960), False, 'import math\n'), ((12493, 12514), 'math.log', 'math.log', (['total_words'], {}), '(total_words)\n', (12501, 12514), False, 'import math\n'), ((19216, 19228), 'utils.logger.get_logger', 'get_logger', ([], {}), '()\n', (19226, 19228), False, 'from utils.logger import get_logger\n'), ((21612, 21630), 'numpy.array', 'np.array', (['line[2:]'], {}), '(line[2:])\n', (21620, 21630), True, 'import numpy as np\n'), ((28951, 28985), 're.match', 're.match', (['"""^NN.*$"""', 'token_tuple[1]'], {}), "('^NN.*$', token_tuple[1])\n", (28959, 28985), False, 'import re\n'), ((2282, 2317), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['token_tuple[0]', 'wn.VERB'], {}), '(token_tuple[0], wn.VERB)\n', (2292, 2317), True, 'from nltk.corpus import wordnet as wn\n'), ((18501, 18513), 'utils.logger.get_logger', 'get_logger', ([], {}), '()\n', (18511, 18513), False, 'from utils.logger import get_logger\n'), ((23351, 23365), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (23363, 23365), False, 'import sys\n'), ((23378, 23390), 'utils.logger.get_logger', 'get_logger', ([], {}), '()\n', (23388, 23390), False, 'from utils.logger import get_logger\n'), ((23553, 23565), 'utils.logger.get_logger', 'get_logger', ([], {}), '()\n', (23563, 23565), False, 'from utils.logger import get_logger\n'), ((28493, 28539), 'utils.lexicosyntactic.functions.pos_treebank2wordnet', 'functions.pos_treebank2wordnet', (['token_tuple[1]'], {}), '(token_tuple[1])\n', (28523, 28539), False, 'from utils.lexicosyntactic import functions\n'), ((28653, 28702), 're.match', 're.match', (['"""^(NN|VB|JJ|RB|SYM).*$"""', 'token_tuple[1]'], {}), "('^(NN|VB|JJ|RB|SYM).*$', token_tuple[1])\n", (28661, 28702), False, 'import re\n'), ((28789, 28837), 're.match', 're.match', (['"""^(VB|JJ|RB|IN|CC).*$"""', 'token_tuple[1]'], {}), "('^(VB|JJ|RB|IN|CC).*$', token_tuple[1])\n", (28797, 28837), False, 'import re\n'), ((31542, 31575), 're.match', 're.match', (['"""^V.*$"""', 'token_tuple[1]'], {}), "('^V.*$', token_tuple[1])\n", (31550, 31575), False, 'import re\n'), ((21563, 21581), 'numpy.array', 'np.array', (['line[2:]'], {}), '(line[2:])\n', (21571, 21581), True, 'import numpy as np\n'), ((34499, 34534), 're.match', 're.match', (['"""^PRP.*$"""', 'token_tuple[1]'], {}), "('^PRP.*$', token_tuple[1])\n", (34507, 34534), False, 'import re\n'), ((34616, 34648), 're.match', 're.match', (['"""^DT$"""', 'token_tuple[1]'], {}), "('^DT$', token_tuple[1])\n", (34624, 34648), False, 'import re\n'), ((34733, 34767), 're.match', 're.match', (['"""^RB.*$"""', 'token_tuple[1]'], {}), "('^RB.*$', token_tuple[1])\n", (34741, 34767), False, 'import re\n'), ((34856, 34890), 're.match', 're.match', (['"""^JJ.*$"""', 'token_tuple[1]'], {}), "('^JJ.*$', token_tuple[1])\n", (34864, 34890), False, 'import re\n'), ((34985, 35017), 're.match', 're.match', (['"""^IN$"""', 'token_tuple[1]'], {}), "('^IN$', token_tuple[1])\n", (34993, 35017), False, 'import re\n'), ((35103, 35135), 're.match', 're.match', (['"""^CC$"""', 'token_tuple[1]'], {}), "('^CC$', token_tuple[1])\n", (35111, 35135), False, 'import re\n')] |
import tensorflow as tf
import numpy as np
import time
import cv2
import csv
import os
from utils import label_map_util
from utils import visualization_utils as vis_util
NUM_REC = 4
PATH_TO_RECORDING = 'C:/Users/Dario/rec_'+str(NUM_REC)+'.mp4'
PATH_TO_GROUNDTRUTH = 'C:/Users/Dario/object_tracking/groundtruth/GT_rec_'+str(NUM_REC)+'.csv'
RECORDING_NAME = PATH_TO_RECORDING.split('/')[-1][:-4]
NUM_CLASSES = 7
PATH_TO_FROZEN_GRAPH = 'C:/Users/Dario/models/research/object_detection/frozen_inference_graph.pb'
PATH_TO_LABEL_MAP = 'C:/Users/Dario/models/research/object_detection/label_map.pbtxt'
SAVE_DATA = 1
BB_tracker = []
count_frames = 0
count_matches = 0
fps = 0
TIME_THRESHOLD = 0.03
NUM_TRACKER = 2
tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
tracker_type = tracker_types[NUM_TRACKER]
def create_tracker(index):
tracker_type = tracker_types[index]
print(tracker_type)
if tracker_type == 'BOOSTING':
return cv2.TrackerBoosting_create()
if tracker_type == 'MIL':
return cv2.TrackerMIL_create()
if tracker_type == 'KCF':
return cv2.TrackerKCF_create()
if tracker_type == 'TLD':
return cv2.TrackerTLD_create()
if tracker_type == 'MEDIANFLOW':
return cv2.TrackerMedianFlow_create()
if tracker_type == 'GOTURN':
return cv2.TrackerGOTURN_create()
if tracker_type == 'MOSSE':
return cv2.TrackerMOSSE_create()
if tracker_type == "CSRT":
return cv2.TrackerCSRT_create()
tracker = create_tracker(NUM_TRACKER)
cap = cv2.VideoCapture(PATH_TO_RECORDING)
# Extract timestamps from groundtruth file
time_stamps = []
with open(PATH_TO_GROUNDTRUTH, 'r') as csvfile:
csv_data = csv.reader(csvfile)
for i, row in enumerate(csv_data):
if i > 0:
time_stamps.append(row[0].split('=')[1])
time_stamps = sorted(time_stamps, key = float)
def writeToCsv(data):
file_name = 'C:/Users/Dario/'+RECORDING_NAME+'_BB_fused_'+tracker_type+'.csv'
with open(file_name, mode='w', newline='') as results_file:
csv_writer = csv.writer(results_file, delimiter=',')
csv_writer.writerow(['timestamp','xmin','ymin','xmax','ymax'])
for row in data:
csv_writer.writerow(row)
#reads the frozen graph
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABEL_MAP)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Detection
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while True:
if count_frames == 0:
start = time.time()
if count_frames%5 == 0 and count_frames != 0:
fps = 5/(time.time()-start_10)
print(fps)
if count_frames%5 == 0 or count_frames == 0:
start_10 = time.time()
# Read frame from camera
ok, image_np = cap.read()
if image_np is None:
end = time.time()
duration = (end-start)
print("Program ran for {} sec.".format(duration))
print("FPS: {}".format(count_frames/(end-start)))
# Check number of matches
if count_matches!=len(time_stamps):
print('WARNING: Not all timestamps were matched! Num: {}\n'.format(count_matches))
elif SAVE_DATA:
print('\nINFO: End of video reached, writing to .csv file.\n')
writeToCsv(BB_tracker)
cv2.destroyAllWindows()
break
(H, W) = image_np.shape[:2]
# rec_5 was fliped in annotation tool
if NUM_REC == 5:
image_np = cv2.flip(image_np, 1)
image_np = cv2.flip(image_np, 0)
# run detector every 10th frame
if count_frames%10 == 0:
frame = np.copy(image_np)
frame = cv2.cvtColor(np.copy(frame), cv2.COLOR_BGR2RGB)
image_np_expanded = np.expand_dims(frame, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
min_score_tresh = .5
score = np.squeeze(scores)
box = np.squeeze(boxes)
#Calculate frame time
frame_time = count_frames/30
#Find detected BB
bbox = ()
for i in range(box.shape[0]):
if score[i] > min_score_tresh:
box_det = box[i]
box_det[0] *= H
box_det[1] *= W
box_det[2] *= H
box_det[3] *= W
bbox = (int(box_det[1]), int(box_det[0]),
int(box_det[3]-box_det[1]),
int(box_det[2]-box_det[0]))
if not len(bbox):
ok, bbox = tracker.update(image_np)
else:
#initilize new tracker with detected BB
del tracker
tracker = create_tracker(NUM_TRACKER)
ok = tracker.init(image_np, bbox)
ok, bbox = tracker.update(image_np)
else:
ok, bbox = tracker.update(image_np)
for i, val in enumerate(bbox):
if val < 0:
lst = list(bbox)
lst[i] = 0
bbox = tuple(lst)
if ok:
# Tracking success
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(image_np, p1, p2, (255,0,0), 2, 1)
else :
# Tracking failure
cv2.putText(image_np, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
frame_time = count_frames/30
try:
stamp_time = float(time_stamps[count_matches])
except:
continue
difference = abs(frame_time-stamp_time)
if difference<0.034:
data_row = [time_stamps[count_matches],
bbox[0], bbox[1],
bbox[0]+bbox[2], bbox[1]+bbox[3]]
BB_tracker.append(data_row)
count_matches+=1
# Display tracker type on frame
cv2.putText(image_np, "Fusion Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);
# Display FPS on frame
cv2.putText(image_np, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);
# Display result
cv2.imshow("Tracking", image_np)
count_frames+=1
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
if cv2.waitKey(25) & 0xFF == ord('p'):
cv2.waitKey(0)
| [
"cv2.TrackerBoosting_create",
"csv.reader",
"cv2.rectangle",
"cv2.TrackerMOSSE_create",
"cv2.imshow",
"cv2.TrackerGOTURN_create",
"numpy.copy",
"cv2.TrackerKCF_create",
"utils.label_map_util.convert_label_map_to_categories",
"cv2.TrackerMIL_create",
"tensorflow.GraphDef",
"cv2.destroyAllWindow... | [((1571, 1606), 'cv2.VideoCapture', 'cv2.VideoCapture', (['PATH_TO_RECORDING'], {}), '(PATH_TO_RECORDING)\n', (1587, 1606), False, 'import cv2\n'), ((2322, 2332), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2330, 2332), True, 'import tensorflow as tf\n'), ((2618, 2665), 'utils.label_map_util.load_labelmap', 'label_map_util.load_labelmap', (['PATH_TO_LABEL_MAP'], {}), '(PATH_TO_LABEL_MAP)\n', (2646, 2665), False, 'from utils import label_map_util\n'), ((2679, 2793), 'utils.label_map_util.convert_label_map_to_categories', 'label_map_util.convert_label_map_to_categories', (['label_map'], {'max_num_classes': 'NUM_CLASSES', 'use_display_name': '(True)'}), '(label_map, max_num_classes=\n NUM_CLASSES, use_display_name=True)\n', (2725, 2793), False, 'from utils import label_map_util\n'), ((2806, 2854), 'utils.label_map_util.create_category_index', 'label_map_util.create_category_index', (['categories'], {}), '(categories)\n', (2842, 2854), False, 'from utils import label_map_util\n'), ((1731, 1750), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (1741, 1750), False, 'import csv\n'), ((2387, 2400), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (2398, 2400), True, 'import tensorflow as tf\n'), ((988, 1016), 'cv2.TrackerBoosting_create', 'cv2.TrackerBoosting_create', ([], {}), '()\n', (1014, 1016), False, 'import cv2\n'), ((1062, 1085), 'cv2.TrackerMIL_create', 'cv2.TrackerMIL_create', ([], {}), '()\n', (1083, 1085), False, 'import cv2\n'), ((1131, 1154), 'cv2.TrackerKCF_create', 'cv2.TrackerKCF_create', ([], {}), '()\n', (1152, 1154), False, 'import cv2\n'), ((1200, 1223), 'cv2.TrackerTLD_create', 'cv2.TrackerTLD_create', ([], {}), '()\n', (1221, 1223), False, 'import cv2\n'), ((1276, 1306), 'cv2.TrackerMedianFlow_create', 'cv2.TrackerMedianFlow_create', ([], {}), '()\n', (1304, 1306), False, 'import cv2\n'), ((1355, 1381), 'cv2.TrackerGOTURN_create', 'cv2.TrackerGOTURN_create', ([], {}), '()\n', (1379, 1381), False, 'import cv2\n'), ((1429, 1454), 'cv2.TrackerMOSSE_create', 'cv2.TrackerMOSSE_create', ([], {}), '()\n', (1452, 1454), False, 'import cv2\n'), ((1501, 1525), 'cv2.TrackerCSRT_create', 'cv2.TrackerCSRT_create', ([], {}), '()\n', (1523, 1525), False, 'import cv2\n'), ((2105, 2144), 'csv.writer', 'csv.writer', (['results_file'], {'delimiter': '""","""'}), "(results_file, delimiter=',')\n", (2115, 2144), False, 'import csv\n'), ((2410, 2452), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['PATH_TO_FROZEN_GRAPH', '"""rb"""'], {}), "(PATH_TO_FROZEN_GRAPH, 'rb')\n", (2424, 2452), True, 'import tensorflow as tf\n'), ((2562, 2604), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (2581, 2604), True, 'import tensorflow as tf\n'), ((2912, 2945), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'detection_graph'}), '(graph=detection_graph)\n', (2922, 2945), True, 'import tensorflow as tf\n'), ((7612, 7716), 'cv2.putText', 'cv2.putText', (['image_np', '"""Fusion Tracker"""', '(100, 20)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.75)', '(50, 170, 50)', '(2)'], {}), "(image_np, 'Fusion Tracker', (100, 20), cv2.FONT_HERSHEY_SIMPLEX,\n 0.75, (50, 170, 50), 2)\n", (7623, 7716), False, 'import cv2\n'), ((7907, 7939), 'cv2.imshow', 'cv2.imshow', (['"""Tracking"""', 'image_np'], {}), "('Tracking', image_np)\n", (7917, 7939), False, 'import cv2\n'), ((3040, 3051), 'time.time', 'time.time', ([], {}), '()\n', (3049, 3051), False, 'import time\n'), ((3287, 3298), 'time.time', 'time.time', ([], {}), '()\n', (3296, 3298), False, 'import time\n'), ((3431, 3442), 'time.time', 'time.time', ([], {}), '()\n', (3440, 3442), False, 'import time\n'), ((4003, 4026), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4024, 4026), False, 'import cv2\n'), ((4200, 4221), 'cv2.flip', 'cv2.flip', (['image_np', '(1)'], {}), '(image_np, 1)\n', (4208, 4221), False, 'import cv2\n'), ((4249, 4270), 'cv2.flip', 'cv2.flip', (['image_np', '(0)'], {}), '(image_np, 0)\n', (4257, 4270), False, 'import cv2\n'), ((4377, 4394), 'numpy.copy', 'np.copy', (['image_np'], {}), '(image_np)\n', (4384, 4394), True, 'import numpy as np\n'), ((4503, 4532), 'numpy.expand_dims', 'np.expand_dims', (['frame'], {'axis': '(0)'}), '(frame, axis=0)\n', (4517, 4532), True, 'import numpy as np\n'), ((5282, 5300), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (5292, 5300), True, 'import numpy as np\n'), ((5323, 5340), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (5333, 5340), True, 'import numpy as np\n'), ((6797, 6847), 'cv2.rectangle', 'cv2.rectangle', (['image_np', 'p1', 'p2', '(255, 0, 0)', '(2)', '(1)'], {}), '(image_np, p1, p2, (255, 0, 0), 2, 1)\n', (6810, 6847), False, 'import cv2\n'), ((6933, 7047), 'cv2.putText', 'cv2.putText', (['image_np', '"""Tracking failure detected"""', '(100, 80)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.75)', '(0, 0, 255)', '(2)'], {}), "(image_np, 'Tracking failure detected', (100, 80), cv2.\n FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)\n", (6944, 7047), False, 'import cv2\n'), ((8049, 8072), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (8070, 8072), False, 'import cv2\n'), ((8162, 8176), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (8173, 8176), False, 'import cv2\n'), ((4432, 4446), 'numpy.copy', 'np.copy', (['frame'], {}), '(frame)\n', (4439, 4446), True, 'import numpy as np\n'), ((7997, 8012), 'cv2.waitKey', 'cv2.waitKey', (['(25)'], {}), '(25)\n', (8008, 8012), False, 'import cv2\n'), ((8110, 8125), 'cv2.waitKey', 'cv2.waitKey', (['(25)'], {}), '(25)\n', (8121, 8125), False, 'import cv2\n'), ((3154, 3165), 'time.time', 'time.time', ([], {}), '()\n', (3163, 3165), False, 'import time\n')] |
import numpy as np
from matplotlib import cm
import matplotlib.pyplot as plt
import argparse
import time
#https://matplotlib.org/examples/color/colormaps_reference.html
def getProbDisplay(contextlist, probabilitylist, file):
contextlist=np.array(contextlist)
probabilitylist=np.array(probabilitylist)
return getProbDisplay1(contextlist, probabilitylist)
def getProbDisplay1(contextlist, probabilitylist, file_name="test"):
y_pos = np.arange(len(contextlist))
colours = cm.Greens(probabilitylist / max(probabilitylist))
p = plt.scatter(y_pos, probabilitylist, alpha=0.5, c=probability, cmap='Greens')
plt.clf()
plt.colorbar(p)
plt.bar(range(len(probabilitylist)), [1]*len(contextlist), color = colours)
plt.xticks(y_pos, contextlist)
#plt.ylabel('probability')
#plt.title('Category probabilities')
plt.savefig(file_name + str(int(time.time()))) # save the figure to file
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='probability graph for SQuAD ')
parser.add_argument('final_file', default="test",const="test", nargs='?',help='Image file , ex abc.png')
args = parser.parse_args()
sample=['Where','is','Paris','city','in','here']
probability=[0.1, 0.05, 0.05, 0.2, 0.4, 0.2]
getProbDisplay(sample, probability, args.final_file)
| [
"argparse.ArgumentParser",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.colorbar",
"time.time",
"numpy.array",
"matplotlib.pyplot.xticks"
] | [((243, 264), 'numpy.array', 'np.array', (['contextlist'], {}), '(contextlist)\n', (251, 264), True, 'import numpy as np\n'), ((285, 310), 'numpy.array', 'np.array', (['probabilitylist'], {}), '(probabilitylist)\n', (293, 310), True, 'import numpy as np\n'), ((550, 626), 'matplotlib.pyplot.scatter', 'plt.scatter', (['y_pos', 'probabilitylist'], {'alpha': '(0.5)', 'c': 'probability', 'cmap': '"""Greens"""'}), "(y_pos, probabilitylist, alpha=0.5, c=probability, cmap='Greens')\n", (561, 626), True, 'import matplotlib.pyplot as plt\n'), ((631, 640), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (638, 640), True, 'import matplotlib.pyplot as plt\n'), ((645, 660), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['p'], {}), '(p)\n', (657, 660), True, 'import matplotlib.pyplot as plt\n'), ((746, 776), 'matplotlib.pyplot.xticks', 'plt.xticks', (['y_pos', 'contextlist'], {}), '(y_pos, contextlist)\n', (756, 776), True, 'import matplotlib.pyplot as plt\n'), ((971, 1038), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""probability graph for SQuAD """'}), "(description='probability graph for SQuAD ')\n", (994, 1038), False, 'import argparse\n'), ((885, 896), 'time.time', 'time.time', ([], {}), '()\n', (894, 896), False, 'import time\n')] |
import streamlit as st
import pickle
import numpy as np
# import the model
pipe = pickle.load(open('pipe.pkl', 'rb'))
mobile_df = pickle.load(open('mobile_df.pkl', 'rb'))
st.title("Mobile Price Predictor")
brand = st.selectbox('Brand', mobile_df['brand'].unique())
ram = st.selectbox('RAM(in GB)', [1, 2, 3, 4, 6, 8, 12])
color = st.selectbox('Color', mobile_df['base_color'].unique())
processor = st.selectbox('Processor', mobile_df['processor'].unique())
rom = st.selectbox('ROM(in GB)', [8, 16, 32, 64, 128, 256, 512])
display_size = st.number_input('Display Size')
screen = st.selectbox('Screen Size', mobile_df['screen_size'].unique())
num_rear_camera = st.selectbox('rear camera', [1, 2, 3, 4])
front_camera = st.selectbox('front camera', [1, 2, 3])
ratings = st.number_input('ratings')
device = st.selectbox('Device', mobile_df["Device_type"].unique())
if st.button('Predict Price'):
query = np.array(
[brand, color, processor, screen, rom, ram, display_size, num_rear_camera, front_camera, ratings, device])
query = query.reshape(1, 11)
st.title("The predicted price of this configuration is " + str(int(pipe.predict(query)[0])))
| [
"streamlit.title",
"streamlit.button",
"numpy.array",
"streamlit.selectbox",
"streamlit.number_input"
] | [((181, 215), 'streamlit.title', 'st.title', (['"""Mobile Price Predictor"""'], {}), "('Mobile Price Predictor')\n", (189, 215), True, 'import streamlit as st\n'), ((285, 335), 'streamlit.selectbox', 'st.selectbox', (['"""RAM(in GB)"""', '[1, 2, 3, 4, 6, 8, 12]'], {}), "('RAM(in GB)', [1, 2, 3, 4, 6, 8, 12])\n", (297, 335), True, 'import streamlit as st\n'), ((480, 538), 'streamlit.selectbox', 'st.selectbox', (['"""ROM(in GB)"""', '[8, 16, 32, 64, 128, 256, 512]'], {}), "('ROM(in GB)', [8, 16, 32, 64, 128, 256, 512])\n", (492, 538), True, 'import streamlit as st\n'), ((555, 586), 'streamlit.number_input', 'st.number_input', (['"""Display Size"""'], {}), "('Display Size')\n", (570, 586), True, 'import streamlit as st\n'), ((679, 720), 'streamlit.selectbox', 'st.selectbox', (['"""rear camera"""', '[1, 2, 3, 4]'], {}), "('rear camera', [1, 2, 3, 4])\n", (691, 720), True, 'import streamlit as st\n'), ((737, 776), 'streamlit.selectbox', 'st.selectbox', (['"""front camera"""', '[1, 2, 3]'], {}), "('front camera', [1, 2, 3])\n", (749, 776), True, 'import streamlit as st\n'), ((788, 814), 'streamlit.number_input', 'st.number_input', (['"""ratings"""'], {}), "('ratings')\n", (803, 814), True, 'import streamlit as st\n'), ((889, 915), 'streamlit.button', 'st.button', (['"""Predict Price"""'], {}), "('Predict Price')\n", (898, 915), True, 'import streamlit as st\n'), ((930, 1049), 'numpy.array', 'np.array', (['[brand, color, processor, screen, rom, ram, display_size, num_rear_camera,\n front_camera, ratings, device]'], {}), '([brand, color, processor, screen, rom, ram, display_size,\n num_rear_camera, front_camera, ratings, device])\n', (938, 1049), True, 'import numpy as np\n')] |
import os
import argparse
import json
import pickle
import numpy as np
import pandas as pd
from tensorflow import keras
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import confusion_matrix
import bert # https://github.com/kpe/bert-for-tf2/
from langdetect import detect
from conversion import convert_examples_to_features, convert_text_to_examples
from pathlib import Path
import nltk
from nltk import tokenize
nltk.download('punkt')
parser = argparse.ArgumentParser()
parser.add_argument("--experiment_name", type=str, required=True, help="Experiment to get trained weights from")
parser.add_argument("--data_dir", type=str, default="data", help="Data directory.")
parser.add_argument("--log_dir", type=str, default="D:\\logs", help="Log directory.")
parser.add_argument("--num_examples", type=int, default=150, help="How many examples to classify.")
# read variables
ARGS = parser.parse_args()
experiment_name = ARGS.experiment_name
log_dir = ARGS.log_dir
with open(os.path.join(log_dir, experiment_name, 'config.json'), "r") as read_file:
config = json.load(read_file)
polarized = False # doesn't exist for all experiments. may be overwritten by next line
locals().update(config)
experiment_name = ARGS.experiment_name
num_examples = ARGS.num_examples
log_dir = ARGS.log_dir
data_dir = ARGS.data_dir
task = 'democrasci'
log_dir = os.path.join(log_dir, experiment_name)
data_dir = os.path.join(data_dir, task)
def my_detect(s):
try:
lang = detect(s)
except:
lang = "na"
return lang
def get_speeches(data_dir):
fn = os.path.join(data_dir, "speeches.csv")
if os.path.isfile(fn):
speeches = pd.read_csv(fn, index_col=0)
print("Reloaded speeches dataframe.")
else:
print("Creating speeches dataframe.")
speeches = pd.DataFrame(columns=['year', 'session_id', 'speech_id', 'speech'])
for year in np.arange(1900, 1981):
print('Processing year: ', year)
all_speeches_year = pickle.load(open(os.path.join(data_dir, "AB", year, "06_collectedinformation.pickle"), "rb") )
for session_id in all_speeches_year.keys():
for speech_id in all_speeches_year[session_id]['dict_speeches'].keys():
speech = all_speeches_year[session_id]['dict_speeches'][speech_id][1]
if detect(speech) == 'de':
speeches = speeches.append({'year': year, 'session_id': session_id, 'speech_id': speech_id, 'speech': speech}, ignore_index=True)
speeches.to_csv(os.path.join(data_dir, 'speeches.csv'))
return speeches
def get_sentences(data_dir):
fn = os.path.join(data_dir, "sentences.csv")
if os.path.isfile(fn):
sentences = pd.read_csv(fn, index_col=0)
print("Reloaded sentences dataframe.")
else:
print("Creating sentences dataframe.")
speeches = get_speeches(data_dir)
for index, row in speeches.iterrows():
sents = tokenize.sent_tokenize(row['speech'])
speeches.at[index, 'speech'] = sents
sentences = speeches.explode('speech')
sentences = sentences.sample(frac=1, random_state=0)
sentences.reset_index(drop=True, inplace=True)
sentences.rename(columns={'speech':'sentence'}, inplace=True)
sentences.drop(sentences.columns[sentences.columns.str.contains('unnamed',case = False)],axis = 1, inplace = True)
sentences['label'] = -1
sentences.to_csv(os.path.join(data_dir, 'sentences.csv'))
return sentences
def get_data(data_dir):
fn = os.path.join(data_dir, "test.csv")
if os.path.isfile(fn):
test = pd.read_csv(fn, index_col=0)
print("Reloaded test dataframe.")
else:
print('Creating test dataframe.')
sentences = get_sentences(data_dir)
num_pos = sum(sentences['label'] == 1)
num_neg = sum(sentences['label'] == 0)
num_neu = sum(sentences['label'] == 2)
if min(num_pos, num_neg, num_neu) < num_examples // 3:
print("Test set incomplete. Let's label some more.")
for index, row in sentences.iterrows():
if min(num_pos, num_neg, num_neu) >= num_examples // 3:
print('Test set complete.')
break
if row['label'] == -1:
print(row['sentence'])
l = input('Enter +/-/0/stop:')
if l == '+':
num_pos += 1
sentences.at[index, 'label'] = 1
elif l == '-':
num_neg += 1
sentences.at[index, 'label'] = 0
elif l == '0':
num_neu += 1
sentences.at[index, 'label'] = 2
elif l == 'stop':
break
else:
sentences.at[index, 'label'] = -2 # label so it won't be shown again
sentences.to_csv(os.path.join(data_dir, 'sentences.csv')) # store the labels in sentences, too, so we don't need to label things again
test = sentences[sentences.label >= 0]
test = test.groupby('label')
test = test.apply(lambda group: group.sample(num_examples // 3, random_state=0))
test.reset_index(drop=True, inplace=True)
test = test.drop(['year', 'session_id', 'speech_id'], axis=1)
if min(num_pos, num_neg, num_neu) >= num_examples // 3:
print('Writing test set to disk.')
test.to_csv(os.path.join(data_dir, 'test.csv'))
else:
print('Test set incomplete. Dataframe not written.')
return test
if __name__ == "__main__":
test = get_data(data_dir)
if num_categories == 2:
test = test[test.label != 2] # drop neutrals
bert_path = os.path.join(bert_base_path, model_name)
model_ckpt = os.path.join(bert_path, ckpt_name)
do_lower_case = model_name.find("uncased") != -1
bert.bert_tokenization.validate_case_matches_checkpoint(do_lower_case, model_ckpt)
vocab_file = os.path.join(bert_path, "vocab.txt")
tokenizer = bert.bert_tokenization.FullTokenizer(vocab_file, do_lower_case)
bert_params = bert.params_from_pretrained_ckpt(bert_path)
l_bert = bert.BertModelLayer.from_params(bert_params, name="bert")
in_id = keras.layers.Input(shape=(max_seq_length,), name="input_ids")
bert_output = l_bert(in_id)[:, 0, :]
dropout = keras.layers.Dropout(0.5)(bert_output)
dense = keras.layers.Dense(768, activation="relu")(dropout)
dropout = keras.layers.Dropout(0.5)(dense)
pred = keras.layers.Dense(num_categories, activation=None)(dropout)
model = keras.models.Model(inputs=in_id, outputs=pred)
opt = keras.optimizers.Nadam()
model.compile(loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=opt, metrics=['SparseCategoricalAccuracy'])
# bert.load_bert_weights(l_bert, model_ckpt)
model.load_weights(os.path.join(log_dir, 'best_model.h5'))
print("Reloaded best parameters.")
model.summary()
print('Converting sentences to examples.')
X = test['sentence'].to_list()
X = [" ".join(x.split()[0:max_seq_length]) for x in X]
X = np.array(X, dtype=object)[:, np.newaxis]
y = np.array(test.label)
test_examples = convert_text_to_examples(X, np.zeros(len(X)))
(
test_input_ids,
test_input_masks,
test_segment_ids,
test_labels,
) = convert_examples_to_features(
tokenizer, test_examples, max_seq_length=max_seq_length
)
print("Predicting.")
y_pred = model.predict(test_input_ids)
y_pred = np.argmax(y_pred, axis=1)
test['prediction'] = y_pred
BMAC = balanced_accuracy_score(y, y_pred)
matrix = confusion_matrix(y, y_pred)
print(matrix.diagonal()/matrix.sum(axis=1))
print(BMAC)
test.to_csv(os.path.join(log_dir, "predictions.csv"))
| [
"bert.BertModelLayer.from_params",
"bert.bert_tokenization.validate_case_matches_checkpoint",
"argparse.ArgumentParser",
"numpy.argmax",
"pandas.read_csv",
"tensorflow.keras.layers.Dense",
"conversion.convert_examples_to_features",
"os.path.isfile",
"numpy.arange",
"bert.bert_tokenization.FullToke... | [((439, 461), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (452, 461), False, 'import nltk\n'), ((471, 496), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (494, 496), False, 'import argparse\n'), ((1381, 1419), 'os.path.join', 'os.path.join', (['log_dir', 'experiment_name'], {}), '(log_dir, experiment_name)\n', (1393, 1419), False, 'import os\n'), ((1431, 1459), 'os.path.join', 'os.path.join', (['data_dir', 'task'], {}), '(data_dir, task)\n', (1443, 1459), False, 'import os\n'), ((1096, 1116), 'json.load', 'json.load', (['read_file'], {}), '(read_file)\n', (1105, 1116), False, 'import json\n'), ((1604, 1642), 'os.path.join', 'os.path.join', (['data_dir', '"""speeches.csv"""'], {}), "(data_dir, 'speeches.csv')\n", (1616, 1642), False, 'import os\n'), ((1654, 1672), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (1668, 1672), False, 'import os\n'), ((2698, 2737), 'os.path.join', 'os.path.join', (['data_dir', '"""sentences.csv"""'], {}), "(data_dir, 'sentences.csv')\n", (2710, 2737), False, 'import os\n'), ((2745, 2763), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (2759, 2763), False, 'import os\n'), ((3635, 3669), 'os.path.join', 'os.path.join', (['data_dir', '"""test.csv"""'], {}), "(data_dir, 'test.csv')\n", (3647, 3669), False, 'import os\n'), ((3677, 3695), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (3691, 3695), False, 'import os\n'), ((5935, 5975), 'os.path.join', 'os.path.join', (['bert_base_path', 'model_name'], {}), '(bert_base_path, model_name)\n', (5947, 5975), False, 'import os\n'), ((5993, 6027), 'os.path.join', 'os.path.join', (['bert_path', 'ckpt_name'], {}), '(bert_path, ckpt_name)\n', (6005, 6027), False, 'import os\n'), ((6085, 6171), 'bert.bert_tokenization.validate_case_matches_checkpoint', 'bert.bert_tokenization.validate_case_matches_checkpoint', (['do_lower_case', 'model_ckpt'], {}), '(do_lower_case,\n model_ckpt)\n', (6140, 6171), False, 'import bert\n'), ((6185, 6221), 'os.path.join', 'os.path.join', (['bert_path', '"""vocab.txt"""'], {}), "(bert_path, 'vocab.txt')\n", (6197, 6221), False, 'import os\n'), ((6238, 6301), 'bert.bert_tokenization.FullTokenizer', 'bert.bert_tokenization.FullTokenizer', (['vocab_file', 'do_lower_case'], {}), '(vocab_file, do_lower_case)\n', (6274, 6301), False, 'import bert\n'), ((6326, 6369), 'bert.params_from_pretrained_ckpt', 'bert.params_from_pretrained_ckpt', (['bert_path'], {}), '(bert_path)\n', (6358, 6369), False, 'import bert\n'), ((6383, 6440), 'bert.BertModelLayer.from_params', 'bert.BertModelLayer.from_params', (['bert_params'], {'name': '"""bert"""'}), "(bert_params, name='bert')\n", (6414, 6440), False, 'import bert\n'), ((6453, 6514), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(max_seq_length,)', 'name': '"""input_ids"""'}), "(shape=(max_seq_length,), name='input_ids')\n", (6471, 6514), False, 'from tensorflow import keras\n'), ((6804, 6850), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'in_id', 'outputs': 'pred'}), '(inputs=in_id, outputs=pred)\n', (6822, 6850), False, 'from tensorflow import keras\n'), ((6866, 6890), 'tensorflow.keras.optimizers.Nadam', 'keras.optimizers.Nadam', ([], {}), '()\n', (6888, 6890), False, 'from tensorflow import keras\n'), ((7422, 7442), 'numpy.array', 'np.array', (['test.label'], {}), '(test.label)\n', (7430, 7442), True, 'import numpy as np\n'), ((7620, 7710), 'conversion.convert_examples_to_features', 'convert_examples_to_features', (['tokenizer', 'test_examples'], {'max_seq_length': 'max_seq_length'}), '(tokenizer, test_examples, max_seq_length=\n max_seq_length)\n', (7648, 7710), False, 'from conversion import convert_examples_to_features, convert_text_to_examples\n'), ((7821, 7846), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (7830, 7846), True, 'import numpy as np\n'), ((7890, 7924), 'sklearn.metrics.balanced_accuracy_score', 'balanced_accuracy_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (7913, 7924), False, 'from sklearn.metrics import balanced_accuracy_score\n'), ((7938, 7965), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y', 'y_pred'], {}), '(y, y_pred)\n', (7954, 7965), False, 'from sklearn.metrics import confusion_matrix\n'), ((1009, 1062), 'os.path.join', 'os.path.join', (['log_dir', 'experiment_name', '"""config.json"""'], {}), "(log_dir, experiment_name, 'config.json')\n", (1021, 1062), False, 'import os\n'), ((1508, 1517), 'langdetect.detect', 'detect', (['s'], {}), '(s)\n', (1514, 1517), False, 'from langdetect import detect\n'), ((1693, 1721), 'pandas.read_csv', 'pd.read_csv', (['fn'], {'index_col': '(0)'}), '(fn, index_col=0)\n', (1704, 1721), True, 'import pandas as pd\n'), ((1843, 1910), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['year', 'session_id', 'speech_id', 'speech']"}), "(columns=['year', 'session_id', 'speech_id', 'speech'])\n", (1855, 1910), True, 'import pandas as pd\n'), ((1933, 1954), 'numpy.arange', 'np.arange', (['(1900)', '(1981)'], {}), '(1900, 1981)\n', (1942, 1954), True, 'import numpy as np\n'), ((2785, 2813), 'pandas.read_csv', 'pd.read_csv', (['fn'], {'index_col': '(0)'}), '(fn, index_col=0)\n', (2796, 2813), True, 'import pandas as pd\n'), ((3712, 3740), 'pandas.read_csv', 'pd.read_csv', (['fn'], {'index_col': '(0)'}), '(fn, index_col=0)\n', (3723, 3740), True, 'import pandas as pd\n'), ((6570, 6595), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (6590, 6595), False, 'from tensorflow import keras\n'), ((6621, 6663), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(768)'], {'activation': '"""relu"""'}), "(768, activation='relu')\n", (6639, 6663), False, 'from tensorflow import keras\n'), ((6687, 6712), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (6707, 6712), False, 'from tensorflow import keras\n'), ((6731, 6782), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['num_categories'], {'activation': 'None'}), '(num_categories, activation=None)\n', (6749, 6782), False, 'from tensorflow import keras\n'), ((7103, 7141), 'os.path.join', 'os.path.join', (['log_dir', '"""best_model.h5"""'], {}), "(log_dir, 'best_model.h5')\n", (7115, 7141), False, 'import os\n'), ((7373, 7398), 'numpy.array', 'np.array', (['X'], {'dtype': 'object'}), '(X, dtype=object)\n', (7381, 7398), True, 'import numpy as np\n'), ((8051, 8091), 'os.path.join', 'os.path.join', (['log_dir', '"""predictions.csv"""'], {}), "(log_dir, 'predictions.csv')\n", (8063, 8091), False, 'import os\n'), ((3027, 3064), 'nltk.tokenize.sent_tokenize', 'tokenize.sent_tokenize', (["row['speech']"], {}), "(row['speech'])\n", (3049, 3064), False, 'from nltk import tokenize\n'), ((3531, 3570), 'os.path.join', 'os.path.join', (['data_dir', '"""sentences.csv"""'], {}), "(data_dir, 'sentences.csv')\n", (3543, 3570), False, 'import os\n'), ((6914, 6974), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (6956, 6974), False, 'from tensorflow import keras\n'), ((2591, 2629), 'os.path.join', 'os.path.join', (['data_dir', '"""speeches.csv"""'], {}), "(data_dir, 'speeches.csv')\n", (2603, 2629), False, 'import os\n'), ((5095, 5134), 'os.path.join', 'os.path.join', (['data_dir', '"""sentences.csv"""'], {}), "(data_dir, 'sentences.csv')\n", (5107, 5134), False, 'import os\n'), ((5642, 5676), 'os.path.join', 'os.path.join', (['data_dir', '"""test.csv"""'], {}), "(data_dir, 'test.csv')\n", (5654, 5676), False, 'import os\n'), ((2050, 2118), 'os.path.join', 'os.path.join', (['data_dir', '"""AB"""', 'year', '"""06_collectedinformation.pickle"""'], {}), "(data_dir, 'AB', year, '06_collectedinformation.pickle')\n", (2062, 2118), False, 'import os\n'), ((2385, 2399), 'langdetect.detect', 'detect', (['speech'], {}), '(speech)\n', (2391, 2399), False, 'from langdetect import detect\n')] |
use_cuda = True
from scvi.dataset.dataset import GeneExpressionDataset
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
from scvi.harmonization.utils_chenling import trainSCANVI,trainVAE
from scvi.models import SCANVI
from scvi.inference.annotation import AlternateSemiSupervisedTrainer,SemiSupervisedTrainer
import numpy as np
from scvi.harmonization.utils_chenling import SubsetGenes
from scvi.harmonization.classification.scmap import SCMAP
def SCANVI_acc(gene_dataset:GeneExpressionDataset, plotname: str,pred1,pred2,coral1,coral2, rep='0'):
fname = '../%s/scanvi_acc.txt'%(plotname)
methods = ['scanvi','scanvi1','scanvi2']
f = open(fname, "w+")
f.write('method\t' + "%s\t" * len(gene_dataset.cell_types) % tuple(gene_dataset.cell_types) + "\n")
for i,method in enumerate(methods):
vae_posterior = trainVAE(gene_dataset,plotname,rep)
scanvi = SCANVI(gene_dataset.nb_genes, gene_dataset.n_batches, gene_dataset.n_labels, n_layers=2)
scanvi.load_state_dict(vae_posterior.model.state_dict(), strict=False)
if method=='scanvi1':
trainer_scanvi = AlternateSemiSupervisedTrainer(scanvi, gene_dataset, classification_ratio=10,
n_epochs_classifier=50, lr_classification=5 * 1e-3)
trainer_scanvi.labelled_set = trainer_scanvi.create_posterior(indices=(gene_dataset.batch_indices == 0))
trainer_scanvi.unlabelled_set = trainer_scanvi.create_posterior(indices=(gene_dataset.batch_indices == 1))
elif method=='scanvi2':
trainer_scanvi = AlternateSemiSupervisedTrainer(scanvi, gene_dataset, classification_ratio=10,
n_epochs_classifier=50, lr_classification=5 * 1e-3)
trainer_scanvi.labelled_set = trainer_scanvi.create_posterior(indices=(gene_dataset.batch_indices == 1))
trainer_scanvi.unlabelled_set = trainer_scanvi.create_posterior(indices=(gene_dataset.batch_indices == 0))
else:
trainer_scanvi = SemiSupervisedTrainer(scanvi, gene_dataset, classification_ratio=50,
n_epochs_classifier=1, lr_classification=5 * 1e-3)
trainer_scanvi.train(n_epochs=5)
labelled_idx = trainer_scanvi.labelled_set.indices
unlabelled_idx = trainer_scanvi.unlabelled_set.indices
full = trainer_scanvi.create_posterior(trainer_scanvi.model, gene_dataset, indices=np.arange(len(gene_dataset)))
labels, labels_pred = full.sequential().compute_predictions()
shared = set(labels[labelled_idx]).intersection(set(labels[unlabelled_idx]))
acc = [np.mean(labels_pred[unlabelled_idx][labels[unlabelled_idx] == i] == i) for i in np.unique(labels)]
for x in np.unique(labels):
if x not in [*shared] and method!='scanvi':
acc[x]=-1
f.write(method + "\t" + "%.4f\t" * len(acc) % tuple(acc) + "\n")
labels = gene_dataset.labels.ravel()
batch = gene_dataset.batch_indices.ravel()
acc = [np.mean(pred1[labels[batch == 1] == i] == i) for i in np.unique(labels)]
f.write('scmap1' + "\t" + "%.4f\t" * len(acc) % tuple(acc) + "\n")
acc = [np.mean(pred2[labels[batch == 0] == i] == i) for i in np.unique(labels)]
f.write('scmap2' + "\t" + "%.4f\t" * len(acc) % tuple(acc) + "\n")
acc = [np.mean(coral1[labels[batch == 1] == i] == i) for i in np.unique(labels)]
f.write('coral1' + "\t" + "%.4f\t" * len(acc) % tuple(acc) + "\n")
acc = [np.mean(coral2[labels[batch == 0] == i] == i) for i in np.unique(labels)]
f.write('coral2' + "\t" + "%.4f\t" * len(acc) % tuple(acc) + "\n")
f.close()
def labelpred(gene_dataset, dataset1, dataset2, plotname):
print("Starting scmap")
n_features = 500
scmap = SCMAP()
scmap.set_parameters(n_features=n_features)
scmap.fit_scmap_cluster(gene_dataset, plotname, batch=0)
pred1 = scmap.predict_scmap_cluster(gene_dataset, plotname, batch=1)
scmap = SCMAP()
scmap.set_parameters(n_features=n_features)
scmap.fit_scmap_cluster(gene_dataset, plotname, batch=1)
pred2 = scmap.predict_scmap_cluster(gene_dataset, plotname, batch=0)
dataset1, dataset2, gene_dataset = SubsetGenes(dataset1, dataset2, gene_dataset, plotname)
batch = gene_dataset.batch_indices.ravel()
labels = gene_dataset.labels.ravel()
scaling_factor = gene_dataset.X.mean(axis=1)
norm_X = gene_dataset.X / scaling_factor.reshape(len(scaling_factor), 1)
index_0 = np.where(batch == 0)[0]
index_1 = np.where(batch == 1)[0]
X1 = np.log(1 + norm_X[index_0])
X2 = np.log(1 + norm_X[index_1])
from scvi.harmonization.classification.CORAL import CORAL
coral = CORAL()
coral1 = coral.fit_predict(X1, labels[index_0], X2)
coral = CORAL()
coral2 = coral.fit_predict(X2, labels[index_1], X1)
return pred1,pred2, coral1, coral2
from scvi.dataset.muris_tabula import TabulaMuris
plotname = 'MarrowTM'
dataset1 = TabulaMuris('facs', save_path='/data/yosef2/scratch/chenling/scanvi_data/')
dataset2 = TabulaMuris('droplet', save_path='/data/yosef2/scratch/chenling/scanvi_data/')
dataset1.subsample_genes(dataset1.nb_genes)
dataset2.subsample_genes(dataset2.nb_genes)
gene_dataset = GeneExpressionDataset.concat_datasets(dataset1, dataset2)
#
pred1, pred2, coral1, coral2 = labelpred(gene_dataset, dataset1, dataset2, plotname)
# SCANVI_acc(gene_dataset, plotname, pred1,pred2,coral1,coral2)
#
#
# plotname = 'PBMC8KCITE'
# from scvi.harmonization.utils_chenling import get_matrix_from_dir,assign_label
# from scvi.dataset.pbmc import PbmcDataset
# from scvi.dataset.dataset import GeneExpressionDataset
# dataset1 = PbmcDataset(filter_out_de_genes=False)
# dataset1.update_cells(dataset1.batch_indices.ravel()==0)
# dataset1.subsample_genes(dataset1.nb_genes)
# save_path='/data/yosef2/scratch/chenling/scanvi_data/'
# count, geneid, cellid = get_matrix_from_dir(save_path + 'cite')
# count = count.T.tocsr()
# seurat = np.genfromtxt(save_path + 'cite/cite.seurat.labels', dtype='str', delimiter=',')
# cellid = np.asarray([x.split('-')[0] for x in cellid])
# labels_map = [0, 0, 1, 2, 3, 4, 5, 6]
# labels = seurat[1:, 4]
# cell_type = ['CD4 T cells', 'NK cells', 'CD14+ Monocytes', 'B cells','CD8 T cells', 'FCGR3A+ Monocytes', 'Other']
# dataset2 = assign_label(cellid, geneid, labels_map, count, cell_type, seurat)
# set(dataset2.cell_types).intersection(set(dataset2.cell_types))
# dataset1.subsample_genes(dataset1.nb_genes)
# dataset2.subsample_genes(dataset2.nb_genes)
# gene_dataset = GeneExpressionDataset.concat_datasets(dataset1, dataset2)
# pred1, pred2, coral1, coral2 = labelpred(gene_dataset, dataset1, dataset2, plotname)
# SCANVI_acc(gene_dataset, plotname, pred1,pred2,coral1,coral2)
# plotname='Pancreas'
# import pickle as pkl
# f = open('../%s/gene_dataset.pkl'%plotname, 'rb')
# gene_dataset, dataset1, dataset2 = pkl.load(f)
# f.close()
# dataset1, dataset2, gene_dataset = SubsetGenes(dataset1, dataset2, gene_dataset, plotname)
# pred1, pred2, coral1, coral2 = labelpred(gene_dataset, dataset1, dataset2, plotname)
# SCANVI_acc(gene_dataset, plotname, pred1,pred2,coral1,coral2)
#
#
# plotname = 'DentateGyrus'
# from scvi.dataset.dataset import GeneExpressionDataset
# from scvi.dataset.MouseBrain import DentateGyrus10X, DentateGyrusC1
# dataset1= DentateGyrus10X()
# dataset1.subsample_genes(dataset1.nb_genes)
# dataset2 = DentateGyrusC1()
# dataset2.subsample_genes(dataset2.nb_genes)
# gene_dataset = GeneExpressionDataset.concat_datasets(dataset1,dataset2)
# dataset1, dataset2, gene_dataset = SubsetGenes(dataset1, dataset2, gene_dataset, plotname)
# pred1, pred2, coral1, coral2 = labelpred(gene_dataset, dataset1, dataset2, plotname)
# SCANVI_acc(gene_dataset, plotname, pred1,pred2,coral1,coral2)
#
| [
"scvi.inference.annotation.SemiSupervisedTrainer",
"scvi.harmonization.classification.CORAL.CORAL",
"numpy.log",
"scvi.models.SCANVI",
"scvi.inference.annotation.AlternateSemiSupervisedTrainer",
"scvi.harmonization.classification.scmap.SCMAP",
"numpy.unique",
"scvi.dataset.muris_tabula.TabulaMuris",
... | [((5051, 5126), 'scvi.dataset.muris_tabula.TabulaMuris', 'TabulaMuris', (['"""facs"""'], {'save_path': '"""/data/yosef2/scratch/chenling/scanvi_data/"""'}), "('facs', save_path='/data/yosef2/scratch/chenling/scanvi_data/')\n", (5062, 5126), False, 'from scvi.dataset.muris_tabula import TabulaMuris\n'), ((5138, 5216), 'scvi.dataset.muris_tabula.TabulaMuris', 'TabulaMuris', (['"""droplet"""'], {'save_path': '"""/data/yosef2/scratch/chenling/scanvi_data/"""'}), "('droplet', save_path='/data/yosef2/scratch/chenling/scanvi_data/')\n", (5149, 5216), False, 'from scvi.dataset.muris_tabula import TabulaMuris\n'), ((5320, 5377), 'scvi.dataset.dataset.GeneExpressionDataset.concat_datasets', 'GeneExpressionDataset.concat_datasets', (['dataset1', 'dataset2'], {}), '(dataset1, dataset2)\n', (5357, 5377), False, 'from scvi.dataset.dataset import GeneExpressionDataset\n'), ((3862, 3869), 'scvi.harmonization.classification.scmap.SCMAP', 'SCMAP', ([], {}), '()\n', (3867, 3869), False, 'from scvi.harmonization.classification.scmap import SCMAP\n'), ((4064, 4071), 'scvi.harmonization.classification.scmap.SCMAP', 'SCMAP', ([], {}), '()\n', (4069, 4071), False, 'from scvi.harmonization.classification.scmap import SCMAP\n'), ((4293, 4348), 'scvi.harmonization.utils_chenling.SubsetGenes', 'SubsetGenes', (['dataset1', 'dataset2', 'gene_dataset', 'plotname'], {}), '(dataset1, dataset2, gene_dataset, plotname)\n', (4304, 4348), False, 'from scvi.harmonization.utils_chenling import SubsetGenes\n'), ((4648, 4675), 'numpy.log', 'np.log', (['(1 + norm_X[index_0])'], {}), '(1 + norm_X[index_0])\n', (4654, 4675), True, 'import numpy as np\n'), ((4685, 4712), 'numpy.log', 'np.log', (['(1 + norm_X[index_1])'], {}), '(1 + norm_X[index_1])\n', (4691, 4712), True, 'import numpy as np\n'), ((4787, 4794), 'scvi.harmonization.classification.CORAL.CORAL', 'CORAL', ([], {}), '()\n', (4792, 4794), False, 'from scvi.harmonization.classification.CORAL import CORAL\n'), ((4863, 4870), 'scvi.harmonization.classification.CORAL.CORAL', 'CORAL', ([], {}), '()\n', (4868, 4870), False, 'from scvi.harmonization.classification.CORAL import CORAL\n'), ((883, 920), 'scvi.harmonization.utils_chenling.trainVAE', 'trainVAE', (['gene_dataset', 'plotname', 'rep'], {}), '(gene_dataset, plotname, rep)\n', (891, 920), False, 'from scvi.harmonization.utils_chenling import trainSCANVI, trainVAE\n'), ((936, 1028), 'scvi.models.SCANVI', 'SCANVI', (['gene_dataset.nb_genes', 'gene_dataset.n_batches', 'gene_dataset.n_labels'], {'n_layers': '(2)'}), '(gene_dataset.nb_genes, gene_dataset.n_batches, gene_dataset.n_labels,\n n_layers=2)\n', (942, 1028), False, 'from scvi.models import SCANVI\n'), ((2842, 2859), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (2851, 2859), True, 'import numpy as np\n'), ((3116, 3160), 'numpy.mean', 'np.mean', (['(pred1[labels[batch == 1] == i] == i)'], {}), '(pred1[labels[batch == 1] == i] == i)\n', (3123, 3160), True, 'import numpy as np\n'), ((3271, 3315), 'numpy.mean', 'np.mean', (['(pred2[labels[batch == 0] == i] == i)'], {}), '(pred2[labels[batch == 0] == i] == i)\n', (3278, 3315), True, 'import numpy as np\n'), ((3426, 3471), 'numpy.mean', 'np.mean', (['(coral1[labels[batch == 1] == i] == i)'], {}), '(coral1[labels[batch == 1] == i] == i)\n', (3433, 3471), True, 'import numpy as np\n'), ((3582, 3627), 'numpy.mean', 'np.mean', (['(coral2[labels[batch == 0] == i] == i)'], {}), '(coral2[labels[batch == 0] == i] == i)\n', (3589, 3627), True, 'import numpy as np\n'), ((4577, 4597), 'numpy.where', 'np.where', (['(batch == 0)'], {}), '(batch == 0)\n', (4585, 4597), True, 'import numpy as np\n'), ((4615, 4635), 'numpy.where', 'np.where', (['(batch == 1)'], {}), '(batch == 1)\n', (4623, 4635), True, 'import numpy as np\n'), ((1163, 1298), 'scvi.inference.annotation.AlternateSemiSupervisedTrainer', 'AlternateSemiSupervisedTrainer', (['scanvi', 'gene_dataset'], {'classification_ratio': '(10)', 'n_epochs_classifier': '(50)', 'lr_classification': '(5 * 0.001)'}), '(scanvi, gene_dataset, classification_ratio=\n 10, n_epochs_classifier=50, lr_classification=5 * 0.001)\n', (1193, 1298), False, 'from scvi.inference.annotation import AlternateSemiSupervisedTrainer, SemiSupervisedTrainer\n'), ((2726, 2796), 'numpy.mean', 'np.mean', (['(labels_pred[unlabelled_idx][labels[unlabelled_idx] == i] == i)'], {}), '(labels_pred[unlabelled_idx][labels[unlabelled_idx] == i] == i)\n', (2733, 2796), True, 'import numpy as np\n'), ((3170, 3187), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (3179, 3187), True, 'import numpy as np\n'), ((3325, 3342), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (3334, 3342), True, 'import numpy as np\n'), ((3481, 3498), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (3490, 3498), True, 'import numpy as np\n'), ((3637, 3654), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (3646, 3654), True, 'import numpy as np\n'), ((1641, 1776), 'scvi.inference.annotation.AlternateSemiSupervisedTrainer', 'AlternateSemiSupervisedTrainer', (['scanvi', 'gene_dataset'], {'classification_ratio': '(10)', 'n_epochs_classifier': '(50)', 'lr_classification': '(5 * 0.001)'}), '(scanvi, gene_dataset, classification_ratio=\n 10, n_epochs_classifier=50, lr_classification=5 * 0.001)\n', (1671, 1776), False, 'from scvi.inference.annotation import AlternateSemiSupervisedTrainer, SemiSupervisedTrainer\n'), ((2101, 2225), 'scvi.inference.annotation.SemiSupervisedTrainer', 'SemiSupervisedTrainer', (['scanvi', 'gene_dataset'], {'classification_ratio': '(50)', 'n_epochs_classifier': '(1)', 'lr_classification': '(5 * 0.001)'}), '(scanvi, gene_dataset, classification_ratio=50,\n n_epochs_classifier=1, lr_classification=5 * 0.001)\n', (2122, 2225), False, 'from scvi.inference.annotation import AlternateSemiSupervisedTrainer, SemiSupervisedTrainer\n'), ((2806, 2823), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (2815, 2823), True, 'import numpy as np\n')] |
"""
Python versions of functions for testing purposes etc.
"""
import numpy as np
def python_mvnpdf(data, means, covs):
from pymc import mv_normal_cov_like as pdf_func
results = []
for i, datum in enumerate(data):
for j, cov in enumerate(covs):
mean = means[j]
results.append(pdf_func(datum, mean, cov))
return np.array(results).reshape((len(data), len(covs))).squeeze()
def python_sample_discrete(pmfs, draws=None):
T, K = pmfs.shape
output = np.empty(T, dtype=np.int32)
if draws is None:
draws = np.random.rand(T)
# rescale
pmfs = (pmfs.T / pmfs.sum(1)).T
for i in xrange(T):
the_sum = 0
draw = draws[i]
for j in xrange(K):
the_sum += pmfs[i, j]
if the_sum >= draw:
output[i] = j
break
return output
if __name__ == '__main__':
pmfs = np.random.randn(20, 5)
pmfs = (pmfs.T - pmfs.min(1)).T
| [
"numpy.random.randn",
"numpy.empty",
"numpy.array",
"numpy.random.rand",
"pymc.mv_normal_cov_like"
] | [((504, 531), 'numpy.empty', 'np.empty', (['T'], {'dtype': 'np.int32'}), '(T, dtype=np.int32)\n', (512, 531), True, 'import numpy as np\n'), ((913, 935), 'numpy.random.randn', 'np.random.randn', (['(20)', '(5)'], {}), '(20, 5)\n', (928, 935), True, 'import numpy as np\n'), ((570, 587), 'numpy.random.rand', 'np.random.rand', (['T'], {}), '(T)\n', (584, 587), True, 'import numpy as np\n'), ((322, 348), 'pymc.mv_normal_cov_like', 'pdf_func', (['datum', 'mean', 'cov'], {}), '(datum, mean, cov)\n', (330, 348), True, 'from pymc import mv_normal_cov_like as pdf_func\n'), ((362, 379), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (370, 379), True, 'import numpy as np\n')] |
"""Beam search decoder for Seq2Seq model
Python version decoder which treat Seq2Seq models(attention, transformer, CTC)
as acoustic model. Record inner states of Seq2Seq models in token and execute beam
search or argmax decoding algorithm.
"""
import numpy as np
from absl import logging
class Token:
"""Token used in token passing decode algorithm.
The token can be linked by another token or None.
The token record the cost and inner packed states
used in model
"""
def __init__(self, acoustic_cost, prev_tok=None,
cur_label=None, inner_packed_states=None):
self.prev_tok = prev_tok
self.cur_label = cur_label
self.inner_packed_states = inner_packed_states
if prev_tok is not None:
self.cost = prev_tok.cost + acoustic_cost
else:
self.cost = acoustic_cost
self.rescaled_cost = -1.0
class BeamSearchDecoder:
"""Beam search decoder for seq2seq models"""
def __init__(self, max_active=4, min_active=0, beam=30.0,
sos=3650, eos=3650, max_seq_len=100, max_active_local=4):
"""Init decoder
Args:
max_active: max active token one step
min_active: min active token one step
beam: beam for search
sos: id of start symbol of sentence
eos: id of end symbol of sentence
max_seq_len: max length of sentence
max_active_local: max active token from one token
"""
self.cur_toks = []
self.prev_toks = []
self.completed_token_pool = []
self.num_steps_decoded = -1
self.max_active = max_active
self.min_active = min_active
assert(self.max_active >= self.min_active)
self.beam = beam
self.sos = sos
self.eos = eos
self.max_seq_len = max_seq_len
self.max_active_local = max_active_local
def init_decoding(self, initial_packed_states):
"""Init decoding states for every input utterance
Args:
initial_packed_states: initial packed states for callback function
"""
self.cur_toks = []
self.prev_toks = []
self.completed_token_pool = []
self.num_steps_decoded = 0
tok = Token(0.0, None, [self.sos], initial_packed_states)
self.cur_toks.append(tok)
def decode(self, encoder_outputs, initial_packed_states, inference_one_step_fn):
"""using seq2seq model and WFST graph to decode input utterance
Args:
encoder_outputs: outputs of encoder for encoder-decoder structure model.
or just CTC outputs for ctc model.
inference_one_step_fn: callback function of seq2seq model. the function
take encoder_outputs,input label and inner states,
then produce logscores and inner states.
the function's signature is: logscores, packed_inner_states =
fn(encoder_outputs, label_input, packed_inner_states)
initial_packed_states: initial packed states for inference_one_step_fn callback function
"""
self.init_decoding(initial_packed_states)
while not self.end_detect():
self.prev_toks = self.cur_toks
self.cur_toks = []
self.process_emitting(encoder_outputs, inference_one_step_fn)
def end_detect(self):
"""determine whether to stop propagating"""
if self.cur_toks and self.num_steps_decoded < self.max_seq_len:
return False
else:
return True
def process_emitting(self, encoder_outputs, inference_one_step_fn):
"""Process one step emitting states using callback function
Args:
encoder_outputs: encoder outputs
inference_one_step_fn: callback function
"""
cand_seqs = []
inner_packed_states_array = []
for tok in self.prev_toks:
cand_seqs.append(tok.cur_label)
inner_packed_states_array.append(tok.inner_packed_states)
weight_cutoff = self.get_cutoff()
all_log_scores, inner_packed_states_array = inference_one_step_fn(encoder_outputs,
cand_seqs, inner_packed_states_array)
for idx, tok in enumerate(self.prev_toks):
if tok.cost <= weight_cutoff:
if self.eos == np.argmax(all_log_scores[idx]):
self.deal_completed_token(tok,all_log_scores[idx][self.eos])
continue
log_costs = np.array([-score for score in all_log_scores[idx]])
if self.max_active_local is None:
reserved_idx = [idx for idx in range(len(log_costs))]
else:
k = self.max_active_local
if k > len(log_costs):
k = len(log_costs)
reserved_idx = np.argpartition(log_costs, k-1)[:k-1]
for next_idx in reserved_idx:
label = next_idx
if label == self.eos:
self.deal_completed_token(tok, all_log_scores[idx][self.eos])
else:
new_tok = Token(log_costs[next_idx], tok, tok.cur_label+[label],
inner_packed_states_array[idx])
self.cur_toks.append(new_tok)
self.num_steps_decoded += 1
def get_cutoff(self):
"""get cutoff used in this step
Returns:
beam_cutoff: beam cutoff
"""
costs = np.array([tok.cost for tok in self.prev_toks])
best_cost = np.min(costs)
beam_cutoff = best_cost + self.beam
min_active_cutoff = float('inf')
max_active_cutoff = float('inf')
if len(costs) > self.max_active:
k = self.max_active
max_active_cutoff = costs[np.argpartition(costs, k-1)[k-1]]
if max_active_cutoff < beam_cutoff:
return max_active_cutoff
if len(costs) > self.min_active:
k = self.min_active
if k == 0:
min_active_cutoff = best_cost
else:
min_active_cutoff = costs[np.argpartition(costs, k-1)[k-1]]
if min_active_cutoff > beam_cutoff:
return min_active_cutoff
else:
return beam_cutoff
def deal_completed_token(self, tok, eos_score):
"""deal completed token and rescale scores
Args:
state: the completed token
eos_score: acoustic score of eos
"""
tok.rescaled_cost = (tok.cost + (-eos_score))/self.num_steps_decoded
self.completed_token_pool.append(tok)
def get_best_path(self):
"""get decoding result in best completed path
Returns:
ans: id array of decoding results
"""
if not self.completed_token_pool:
logging.warning('do not encounter eos during decoding, return best uncompleted token ')
best_uncompleted_tok = self.cur_toks[0]
for tok in self.cur_toks:
if best_uncompleted_tok.cost > tok.cost:
best_uncompleted_tok = tok
best_uncompleted_tok.cur_label.pop(0)
return best_uncompleted_tok.cur_label
else:
best_completed_tok = self.completed_token_pool[0]
for tok in self.completed_token_pool:
if best_completed_tok.rescaled_cost > tok.rescaled_cost:
best_completed_tok = tok
best_completed_tok.cur_label.pop(0)
return best_completed_tok.cur_label
| [
"numpy.argmax",
"absl.logging.warning",
"numpy.argpartition",
"numpy.min",
"numpy.array"
] | [((5703, 5749), 'numpy.array', 'np.array', (['[tok.cost for tok in self.prev_toks]'], {}), '([tok.cost for tok in self.prev_toks])\n', (5711, 5749), True, 'import numpy as np\n'), ((5770, 5783), 'numpy.min', 'np.min', (['costs'], {}), '(costs)\n', (5776, 5783), True, 'import numpy as np\n'), ((7050, 7142), 'absl.logging.warning', 'logging.warning', (['"""do not encounter eos during decoding, return best uncompleted token """'], {}), "(\n 'do not encounter eos during decoding, return best uncompleted token ')\n", (7065, 7142), False, 'from absl import logging\n'), ((4637, 4690), 'numpy.array', 'np.array', (['[(-score) for score in all_log_scores[idx]]'], {}), '([(-score) for score in all_log_scores[idx]])\n', (4645, 4690), True, 'import numpy as np\n'), ((4467, 4497), 'numpy.argmax', 'np.argmax', (['all_log_scores[idx]'], {}), '(all_log_scores[idx])\n', (4476, 4497), True, 'import numpy as np\n'), ((6021, 6050), 'numpy.argpartition', 'np.argpartition', (['costs', '(k - 1)'], {}), '(costs, k - 1)\n', (6036, 6050), True, 'import numpy as np\n'), ((5019, 5052), 'numpy.argpartition', 'np.argpartition', (['log_costs', '(k - 1)'], {}), '(log_costs, k - 1)\n', (5034, 5052), True, 'import numpy as np\n'), ((6338, 6367), 'numpy.argpartition', 'np.argpartition', (['costs', '(k - 1)'], {}), '(costs, k - 1)\n', (6353, 6367), True, 'import numpy as np\n')] |
# Authors: <NAME> <<EMAIL>>
# License: MIT
import pytest
import numpy as np
from ..rls import RLS
def test_rls_start():
# Test RLS start parameters
rls = RLS(filter_order=5, forgetting_factor=0.999, wscm_factor=1e3)
params = rls.get_params()
assert params[:3] == (5, 0.999, 1e3)
assert (params[3] == 1e3*np.eye(5)).all()
assert (params[4] == np.zeros((5, 1))).all()
def test_rls_start_with_param_errors():
# Test RLS start parameters with errors
with pytest.raises(TypeError) as err:
_ = RLS(filter_order=5.0, forgetting_factor=0.999, wscm_factor=1e3)
assert str(err.value) == 'The filter order must be an integer.'
with pytest.raises(TypeError) as err:
_ = RLS(filter_order=5, forgetting_factor='0.99', wscm_factor=1e3)
assert str(err.value) == 'The forgetting factor must be a number.'
with pytest.raises(TypeError) as err:
_ = RLS(filter_order=5, forgetting_factor=0.99, wscm_factor='1e3')
assert str(err.value) == 'The weighted sample covariance matrix factor'\
' must be a number.'
with pytest.raises(ValueError) as err:
_ = RLS(filter_order=0, forgetting_factor=0.99, wscm_factor=1e3)
assert str(err.value) == 'The filter order must be positive.'
with pytest.raises(ValueError) as err:
_ = RLS(filter_order=1, forgetting_factor=0, wscm_factor=1e3)
assert str(err.value) == 'The forgetting error must be in (0,1].'
with pytest.raises(ValueError) as err:
_ = RLS(filter_order=1, forgetting_factor=1, wscm_factor=0.9)
assert str(err.value) == 'The weighted sample covariance matrix factor'\
' must be greater or equal to 1.'
def test_rls_fit_online():
rls = RLS(filter_order=5, forgetting_factor=0.999, wscm_factor=1e3)
with pytest.raises(TypeError) as err:
rls.fit(10, 1)
assert str(err.value) == 'The input vector must be a column numpy array'\
' with dimensions 5x1.'
with pytest.raises(ValueError) as err:
rls.fit(np.array([1, 2, 3]), 1)
assert str(err.value) == 'The input vector must be a column numpy array'\
' with dimensions 5x1.'
with pytest.raises(TypeError) as err:
rls.fit(np.array([1, 2, 3, 4, 5]).reshape((5, 1)), '1')
assert str(err.value) == 'The output must be numeric.'
rls.fit(np.array([1, 2, 3, 4, 5]).reshape((5, 1)), 1)
assert (rls.weights_ != np.zeros((5, 1))).all()
| [
"numpy.zeros",
"numpy.eye",
"pytest.raises",
"numpy.array"
] | [((488, 512), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (501, 512), False, 'import pytest\n'), ((675, 699), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (688, 699), False, 'import pytest\n'), ((864, 888), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (877, 888), False, 'import pytest\n'), ((1109, 1134), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1122, 1134), False, 'import pytest\n'), ((1292, 1317), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1305, 1317), False, 'import pytest\n'), ((1476, 1501), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1489, 1501), False, 'import pytest\n'), ((1831, 1855), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1844, 1855), False, 'import pytest\n'), ((2028, 2053), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2041, 2053), False, 'import pytest\n'), ((2243, 2267), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2256, 2267), False, 'import pytest\n'), ((2078, 2097), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2086, 2097), True, 'import numpy as np\n'), ((369, 385), 'numpy.zeros', 'np.zeros', (['(5, 1)'], {}), '((5, 1))\n', (377, 385), True, 'import numpy as np\n'), ((2412, 2437), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2420, 2437), True, 'import numpy as np\n'), ((2486, 2502), 'numpy.zeros', 'np.zeros', (['(5, 1)'], {}), '((5, 1))\n', (2494, 2502), True, 'import numpy as np\n'), ((327, 336), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (333, 336), True, 'import numpy as np\n'), ((2292, 2317), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2300, 2317), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.