code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""Utility functions."""
import h5py
import numpy as np
from torch.utils import data
def save_dict_h5py(data, fname):
"""Save dictionary containing numpy arrays to h5py file."""
with h5py.File(fname, 'w') as hf:
for key in data.keys():
hf.create_dataset(key, data=data[key])
def load_dict_h5py(fname):
"""Restore dictionary containing numpy arrays from h5py file."""
data = dict()
with h5py.File(fname, 'r') as hf:
for key in hf.keys():
data[key] = hf[key][:]
return data
def to_float(np_array):
"""Convert numpy array to float32."""
return np.array(np_array, dtype=np.float32)
class TrajectoryDataset(data.Dataset):
"""Create dataset of (o_t, a_t) trajectories from replay buffer."""
def __init__(self, hdf5_file):
"""
Args:
hdf5_file (string): Path to the hdf5 file that contains experience
buffer
"""
self.experience_buffer = load_dict_h5py(hdf5_file)
def __len__(self):
return len(self.experience_buffer['actions'])
def __getitem__(self, idx):
sample = {
'obs': to_float(self.experience_buffer['observations'][idx]),
'action': self.experience_buffer['actions'][idx],
}
return sample
| [
"h5py.File",
"numpy.array",
"torch.utils.data.keys"
] | [((621, 657), 'numpy.array', 'np.array', (['np_array'], {'dtype': 'np.float32'}), '(np_array, dtype=np.float32)\n', (629, 657), True, 'import numpy as np\n'), ((195, 216), 'h5py.File', 'h5py.File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (204, 216), False, 'import h5py\n'), ((243, 254), 'torch.utils.data.keys', 'data.keys', ([], {}), '()\n', (252, 254), False, 'from torch.utils import data\n'), ((432, 453), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (441, 453), False, 'import h5py\n')] |
# --------------------------------------------------------
# Face Datasets
# Licensed under The MIT License [see LICENSE for details]
# Copyright 2019 smarsu. All Rights Reserved.
# --------------------------------------------------------
import os.path as osp
import numpy as np
class Dataset(object):
"""The base class of dataset.
self._train_datas = [n, str]
self._train_labels = [n, list of box]
"""
def __init__(self):
pass
@property
def size(self):
"""Return the number of train datas."""
raise NotImplementedError
def train_datas_debug(self, batch_size):
"""Yield batch size train datas per step.
Train datas should be shuffled.
Args:
batch_size: int, > 0
"""
if not isinstance(batch_size, int):
raise ValueError('In Dataset, batch_size should be int, get '
'{}'.format(type(batch_size)))
if batch_size <= 0:
raise ValueError('In Dataset, batch_size should larger equal to '
'1, get {}'.format(batch_size))
indices = list(range(batch_size))
datas = []
# for label, we have box and landmark which is 0.
datas.append([self._train_datas[:batch_size],
self._train_labels[:batch_size]])
return datas
def train_datas(self, batch_size):
"""Yield batch size train datas per step.
Train datas should be shuffled.
Args:
batch_size: int, > 0
"""
if not isinstance(batch_size, int):
raise ValueError('In Dataset, batch_size should be int, get '
'{}'.format(type(batch_size)))
if batch_size <= 0:
raise ValueError('In Dataset, batch_size should larger equal to '
'1, get {}'.format(batch_size))
indices = list(range(self.size))
np.random.shuffle(indices)
epoch_size = self.size // batch_size * batch_size
self._train_datas = self._train_datas[indices][:epoch_size] # [epoch_size, ...]
self._train_labels = self._train_labels[indices][:epoch_size] # [epoch_size, ...]
datas = []
for i in range(self.size // batch_size):
# for label, we have box and landmark which is 0.
datas.append([self._train_datas[i*batch_size:(i+1)*batch_size],
self._train_labels[i*batch_size:(i+1)*batch_size]])
return datas
def merge(self, other):
"""Merge the other datas to self.
Args:
other: Dataset
"""
self._train_datas = np.concatenate(
[self._train_datas, other._train_datas], 0)
self._train_labels = np.concatenate(
[self._train_labels, other._train_labels], 0)
class WiderFace(Dataset):
def __init__(self, train_image_path, label_path, value_image_path=None,
test_image_path=None):
"""
TODO(smarsu): Add way to read `value_image_path` and `test_image_path`.
Add way to read `value_label_path` and `test_label_path`.
Args:
train_image_path: str, the path of train images.
label_path: str
"""
self._data_map = {}
self.train_image_path = train_image_path
self.label_path = label_path
self.train_label_path = self.label_path
self._train_datas, self._train_labels = self._read_train_datas()
@property
def size(self):
"""Return the number of train datas.
Assert the size of self._train_datas and self._train_labels is equal.
"""
return len(self._train_datas)
def data_map(self, key):
""""""
if key not in self._data_map:
raise KeyError('{} not in the data map.'.format(key))
return self._data_map[key]
def _real_image_path(self, path):
"""Get real path of image.
self.train_image_path + '/' + path
Args:
path: str, the image name(id) of labels.
"""
return osp.join(self.train_image_path, path)
def _read_train_datas(self):
"""The special way to read wider face labels.
Args:
label_path: str,
"""
with open(self.train_label_path, 'r') as fb:
lines = fb.readlines()
return self._parse_raw_labels(lines)
def _parse_raw_labels(self, lines):
"""Parse raw str lines to python object.
Args:
lines: list of str, with the structure of
File name
Number of bounding box
x1, y1, w, h, blur, expression, illumination, invalid, occlusion, pose
Returns:
images: numpy array, [n], image paths
labels: numpy array, [n, 4], [x1, y1, x2, y2]
"""
images = []
labels = []
idx = 0
while idx < len(lines):
image_path = lines[idx].strip()
images.append(self._real_image_path(image_path))
idx += 1
num = int(lines[idx])
idx += 1
labels_ = []
for _ in range(num):
x1, y1, w, h, blur, expression, illumination, invalid, \
occlusion, pose = [int(v)
for v in lines[idx].strip().split()]
x2, y2 = x1 + w - 1, y1 + h - 1 # -1 to get the read x2, y2
labels_.append([x1, y1, x2, y2])
idx += 1
labels.append(np.array(labels_))
self._data_map[self._real_image_path(image_path)] = np.array(labels_)
return np.array(images), np.array(labels)
if __name__ == '__main__':
import time
# Test wider face dataset
wider = WiderFace('/datasets/wider/images',
'/datasets/wider/wider_face_split/wider_face_train_bbx_gt.txt')
t1 = time.time()
for data, label in wider.train_datas(32):
print(data, label)
t2 = time.time()
print('Time for read wider dataset:', t2 - t1) # 2.467153787612915s with `print`
print(type(wider._train_datas))
print(type(wider._train_labels))
| [
"numpy.random.shuffle",
"time.time",
"numpy.array",
"os.path.join",
"numpy.concatenate"
] | [((6278, 6289), 'time.time', 'time.time', ([], {}), '()\n', (6287, 6289), False, 'import time\n'), ((6375, 6386), 'time.time', 'time.time', ([], {}), '()\n', (6384, 6386), False, 'import time\n'), ((2074, 2100), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (2091, 2100), True, 'import numpy as np\n'), ((2836, 2894), 'numpy.concatenate', 'np.concatenate', (['[self._train_datas, other._train_datas]', '(0)'], {}), '([self._train_datas, other._train_datas], 0)\n', (2850, 2894), True, 'import numpy as np\n'), ((2939, 2999), 'numpy.concatenate', 'np.concatenate', (['[self._train_labels, other._train_labels]', '(0)'], {}), '([self._train_labels, other._train_labels], 0)\n', (2953, 2999), True, 'import numpy as np\n'), ((4356, 4393), 'os.path.join', 'osp.join', (['self.train_image_path', 'path'], {}), '(self.train_image_path, path)\n', (4364, 4393), True, 'import os.path as osp\n'), ((5982, 5999), 'numpy.array', 'np.array', (['labels_'], {}), '(labels_)\n', (5990, 5999), True, 'import numpy as np\n'), ((6016, 6032), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (6024, 6032), True, 'import numpy as np\n'), ((6034, 6050), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (6042, 6050), True, 'import numpy as np\n'), ((5896, 5913), 'numpy.array', 'np.array', (['labels_'], {}), '(labels_)\n', (5904, 5913), True, 'import numpy as np\n')] |
# Copyright 2020-2021 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numpy.lib.format import open_memmap
from scipy.spatial import Delaunay
import argparse
def find_graph_edges(x):
points = np.transpose(x[0, :, 0, :, 0])
print(points.shape)
tri = Delaunay(points)
neigh = tri.simplices
print(neigh.shape)
G = []
N = neigh.shape[0]
for i in range(N):
G.append((neigh[i][0], neigh[i][1]))
G.append((neigh[i][0], neigh[i][2]))
G.append((neigh[i][1], neigh[i][2]))
# connect the master node (nose) to all other nodes
for i in range(51):
G.append((i+1, 17))
edges = G
return edges
def gen_muscle_data(data, muscle_path):
"""Generate facial muscle data from facial landmarks"""
N, C, T, V, M = data.shape
edges = find_graph_edges(data)
V_muscle = len(edges)
fp_sp = open_memmap(muscle_path, dtype='float32', mode='w+', shape=(N, C, T, V_muscle, M))
# Copy the landmark data to muscle placeholder tensor
fp_sp[:, :, :, :V, :] = data
for edge_id, (source_node, target_node) in enumerate(edges):
fp_sp[:, :, :, edge_id, :] = data[:, :, :, source_node-1, :] - data[:, :, :, target_node-1, :]
return fp_sp
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Facial muscle data generator.')
parser.add_argument('--landmark_data_folder', default='./data/CASIA_10fold/')
parser.add_argument('--muscle_data_folder', default='./data/muscle_data/')
parser.add_argument('--dataset_name', default='CASIA')
arg = parser.parse_args()
part = ['Train', 'Val']
for p in part:
if arg.dataset_name == 'CASIA' or arg.dataset_name == 'CK+':
for i in range(10):
landmark_path = arg.landmark_data_folder + '/{}/{}_{}.npy'.format(arg.dataset_name, p, i)
landmark_data = np.load(landmark_path)
muscle_path = arg.muscle_data_folder + '/{}/{}_muscle_{}.npy'.format(arg.dataset_name, p, i)
muscle_data = gen_muscle_data(landmark_data, muscle_path)
elif arg.dataset_name == 'AFEW':
landmark_path = arg.landmark_data_folder + '/{}/{}.npy'.format(arg.dataset_name, p)
landmark_data = np.load(landmark_path)
muscle_path = arg.muscle_data_folder + '/{}/{}_muscle.npy'.format(arg.dataset_name, p)
muscle_data = gen_muscle_data(landmark_data, muscle_path)
| [
"numpy.load",
"argparse.ArgumentParser",
"numpy.transpose",
"numpy.lib.format.open_memmap",
"scipy.spatial.Delaunay"
] | [((744, 774), 'numpy.transpose', 'np.transpose', (['x[0, :, 0, :, 0]'], {}), '(x[0, :, 0, :, 0])\n', (756, 774), True, 'import numpy as np\n'), ((809, 825), 'scipy.spatial.Delaunay', 'Delaunay', (['points'], {}), '(points)\n', (817, 825), False, 'from scipy.spatial import Delaunay\n'), ((1412, 1498), 'numpy.lib.format.open_memmap', 'open_memmap', (['muscle_path'], {'dtype': '"""float32"""', 'mode': '"""w+"""', 'shape': '(N, C, T, V_muscle, M)'}), "(muscle_path, dtype='float32', mode='w+', shape=(N, C, T,\n V_muscle, M))\n", (1423, 1498), False, 'from numpy.lib.format import open_memmap\n'), ((1813, 1881), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Facial muscle data generator."""'}), "(description='Facial muscle data generator.')\n", (1836, 1881), False, 'import argparse\n'), ((2418, 2440), 'numpy.load', 'np.load', (['landmark_path'], {}), '(landmark_path)\n', (2425, 2440), True, 'import numpy as np\n'), ((2789, 2811), 'numpy.load', 'np.load', (['landmark_path'], {}), '(landmark_path)\n', (2796, 2811), True, 'import numpy as np\n')] |
import itertools
import numpy
import six
from chainer.backends import cuda
from chainer.utils.conv import get_conv_outsize
from chainer.utils import conv_nd_kernel
def as_tuple(x, n):
if hasattr(x, '__getitem__'):
assert len(x) == n
return tuple(x)
return (x,) * n
def im2col_nd_cpu(img, ksize, stride, pad, pval=0, cover_all=False):
n, c = img.shape[0:2] # (n, c, d_1, d_2, ..., d_N)
dims = img.shape[2:]
ndim = len(dims)
assert ndim == len(ksize) == len(stride) == len(pad)
outs = tuple(get_conv_outsize(d, k, s, p, cover_all)
for (d, k, s, p) in zip(dims, ksize, stride, pad))
assert all(out > 0 for out in outs), 'Output sizes should be positive.'
# Pad around image.
pad_width = ((0, 0), (0, 0)) + tuple(
(p, p + s - 1) for (s, p) in zip(stride, pad))
img = numpy.pad(img, pad_width, mode='constant', constant_values=(pval,))
# Make patch array with which we will compute correlation with filter.
# shape: (n, c, k_1, k_2, ..., k_N, out_1, out_2, ..., out_N)
shape = (n, c) + ksize + outs
col = numpy.ndarray(shape, dtype=img.dtype)
# Fill the patch array.
colon = slice(None)
for kxs in itertools.product(*[six.moves.range(k) for k in ksize]):
# col[:, :, kx_1, kx_2, ..., kx_N, :, :, ..., :]
col_index = (colon, colon) + kxs + (colon,) * ndim
# img[:, :, kx_1:kx_lim_1:s_1, ..., kx_N:kx_lim_N:s_N]
kx_lims = tuple(kx + s * out
for (kx, s, out) in zip(kxs, stride, outs))
img_index = (colon, colon) + tuple(
slice(kx, kx_lim, s)
for (kx, kx_lim, s) in zip(kxs, kx_lims, stride))
col[col_index] = img[img_index]
return col
def im2col_nd_gpu(img, ksize, stride, pad, cover_all=False):
n, c = img.shape[0:2] # (n, c, d_1, d_2, ..., d_N)
dims = img.shape[2:]
ndim = len(dims)
assert ndim == len(ksize) == len(stride) == len(pad)
outs = tuple(get_conv_outsize(d, k, s, p, cover_all)
for (d, k, s, p) in zip(dims, ksize, stride, pad))
assert all(out > 0 for out in outs), 'Output sizes should be positive.'
# col_shape: (n, c, k_1, k_2, ..., k_N, out_1, out_2, ..., out_N)
shape = (n, c) + ksize + outs
col = cuda.cupy.empty(shape, dtype=img.dtype)
in_params, out_params, operation, name = \
conv_nd_kernel.Im2colNDKernel.generate(ndim)
cuda.elementwise(in_params, out_params, operation, name)(
img.reduced_view(), *(dims + outs + ksize + stride + pad + (col,)))
return col
def col2im_nd_cpu(col, stride, pad, dims):
n, c = col.shape[:2] # (n, c, kx_1, ..., kx_N, out_1, ..., out_N)
mid = (len(col.shape) - 2) // 2 + 2
ksize = col.shape[2:mid]
outs = col.shape[mid:]
colon = slice(None)
assert len(outs) == len(ksize) == len(stride) == len(pad) == len(dims)
# Image with padded size.
img_shape = (n, c) + tuple(d + 2 * p + s - 1
for (d, p, s) in zip(dims, pad, stride))
img = numpy.zeros(img_shape, dtype=col.dtype)
for kxs in itertools.product(*[six.moves.range(k) for k in ksize]):
# (:, :, kx_1:kx_lim_1:s_1, ..., kx_N:kx_lim_N:s_N)
kx_lims = tuple(kx + s * out
for (kx, s, out) in zip(kxs, stride, outs))
img_index = (colon, colon) + tuple(
slice(kx, kx_lim, s)
for (kx, kx_lim, s) in zip(kxs, kx_lims, stride))
# (:, :, kx_1, kx_2, ..., kx_N, :, :, ..., :)
col_index = (colon, colon) + kxs + (colon,) * len(outs)
img[img_index] += col[col_index]
# (:, :, p_1:d_1 + p_1, p_2:d_2 + p_2, ..., p_N:d_N + p_N]
img_index = (colon, colon) + tuple(
slice(p, d + p) for (p, d) in zip(pad, dims))
return img[img_index]
def col2im_nd_gpu(col, stride, pad, dims):
n, c = col.shape[:2] # (n, c, k_1, ..., k_N, out_1, ..., out_N)
mid = (len(col.shape) - 2) // 2 + 2
ksize = col.shape[2:mid]
outs = col.shape[mid:]
ndim = len(dims)
assert len(outs) == len(ksize) == len(stride) == len(pad) == ndim
img_shape = (n, c) + dims # (n, c, d_1, d_2, ..., d_N)
img = cuda.cupy.empty(img_shape, dtype=col.dtype)
in_params, out_params, operation, name = \
conv_nd_kernel.Col2imNDKernel.generate(ndim)
cuda.elementwise(in_params, out_params, operation, name)(
col.reduced_view(), *(dims + outs + ksize + stride + pad + (img,)))
return img
| [
"numpy.pad",
"chainer.utils.conv_nd_kernel.Col2imNDKernel.generate",
"six.moves.range",
"chainer.utils.conv.get_conv_outsize",
"numpy.zeros",
"chainer.utils.conv_nd_kernel.Im2colNDKernel.generate",
"numpy.ndarray",
"chainer.backends.cuda.elementwise",
"chainer.backends.cuda.cupy.empty"
] | [((860, 927), 'numpy.pad', 'numpy.pad', (['img', 'pad_width'], {'mode': '"""constant"""', 'constant_values': '(pval,)'}), "(img, pad_width, mode='constant', constant_values=(pval,))\n", (869, 927), False, 'import numpy\n'), ((1114, 1151), 'numpy.ndarray', 'numpy.ndarray', (['shape'], {'dtype': 'img.dtype'}), '(shape, dtype=img.dtype)\n', (1127, 1151), False, 'import numpy\n'), ((2299, 2338), 'chainer.backends.cuda.cupy.empty', 'cuda.cupy.empty', (['shape'], {'dtype': 'img.dtype'}), '(shape, dtype=img.dtype)\n', (2314, 2338), False, 'from chainer.backends import cuda\n'), ((2395, 2439), 'chainer.utils.conv_nd_kernel.Im2colNDKernel.generate', 'conv_nd_kernel.Im2colNDKernel.generate', (['ndim'], {}), '(ndim)\n', (2433, 2439), False, 'from chainer.utils import conv_nd_kernel\n'), ((3068, 3107), 'numpy.zeros', 'numpy.zeros', (['img_shape'], {'dtype': 'col.dtype'}), '(img_shape, dtype=col.dtype)\n', (3079, 3107), False, 'import numpy\n'), ((4206, 4249), 'chainer.backends.cuda.cupy.empty', 'cuda.cupy.empty', (['img_shape'], {'dtype': 'col.dtype'}), '(img_shape, dtype=col.dtype)\n', (4221, 4249), False, 'from chainer.backends import cuda\n'), ((4306, 4350), 'chainer.utils.conv_nd_kernel.Col2imNDKernel.generate', 'conv_nd_kernel.Col2imNDKernel.generate', (['ndim'], {}), '(ndim)\n', (4344, 4350), False, 'from chainer.utils import conv_nd_kernel\n'), ((2445, 2501), 'chainer.backends.cuda.elementwise', 'cuda.elementwise', (['in_params', 'out_params', 'operation', 'name'], {}), '(in_params, out_params, operation, name)\n', (2461, 2501), False, 'from chainer.backends import cuda\n'), ((4356, 4412), 'chainer.backends.cuda.elementwise', 'cuda.elementwise', (['in_params', 'out_params', 'operation', 'name'], {}), '(in_params, out_params, operation, name)\n', (4372, 4412), False, 'from chainer.backends import cuda\n'), ((544, 583), 'chainer.utils.conv.get_conv_outsize', 'get_conv_outsize', (['d', 'k', 's', 'p', 'cover_all'], {}), '(d, k, s, p, cover_all)\n', (560, 583), False, 'from chainer.utils.conv import get_conv_outsize\n'), ((2000, 2039), 'chainer.utils.conv.get_conv_outsize', 'get_conv_outsize', (['d', 'k', 's', 'p', 'cover_all'], {}), '(d, k, s, p, cover_all)\n', (2016, 2039), False, 'from chainer.utils.conv import get_conv_outsize\n'), ((1240, 1258), 'six.moves.range', 'six.moves.range', (['k'], {}), '(k)\n', (1255, 1258), False, 'import six\n'), ((3143, 3161), 'six.moves.range', 'six.moves.range', (['k'], {}), '(k)\n', (3158, 3161), False, 'import six\n')] |
"""
PDBBind dataset loader.
"""
import logging
import multiprocessing
import os
import re
import time
import deepchem
import numpy as np
import pandas as pd
import tarfile
from deepchem.feat import RdkitGridFeaturizer
from deepchem.feat import ComplexNeighborListFragmentAtomicCoordinates
from deepchem.feat.graph_features import AtomicConvFeaturizer
logger = logging.getLogger(__name__)
DEFAULT_DATA_DIR = deepchem.utils.data_utils.get_data_dir()
def featurize_pdbbind(data_dir=None, feat="grid", subset="core"):
"""Featurizes pdbbind according to provided featurization"""
tasks = ["-logKd/Ki"]
data_dir = deepchem.utils.data_utils.get_data_dir()
pdbbind_dir = os.path.join(data_dir, "pdbbind")
dataset_dir = os.path.join(pdbbind_dir, "%s_%s" % (subset, feat))
if not os.path.exists(dataset_dir):
deepchem.utils.data_utils.download_url(
"https://deepchemdata.s3-us-west-1.amazonaws.com/featurized_datasets/core_grid.tar.gz"
)
deepchem.utils.data_utils.download_url(
"https://deepchemdata.s3-us-west-1.amazonaws.com/featurized_datasets/full_grid.tar.gz"
)
deepchem.utils.data_utils.download_url(
"https://deepchemdata.s3-us-west-1.amazonaws.com/featurized_datasets/refined_grid.tar.gz"
)
if not os.path.exists(pdbbind_dir):
os.system('mkdir ' + pdbbind_dir)
deepchem.utils.data_utils.untargz_file(
os.path.join(data_dir, 'core_grid.tar.gz'), pdbbind_dir)
deepchem.utils.data_utils.untargz_file(
os.path.join(data_dir, 'full_grid.tar.gz'), pdbbind_dir)
deepchem.utils.data_utils.untargz_file(
os.path.join(data_dir, 'refined_grid.tar.gz'), pdbbind_dir)
return deepchem.data.DiskDataset(dataset_dir), tasks
def load_pdbbind_grid(split="random",
featurizer="grid",
subset="core",
reload=True):
"""Load PDBBind datasets. Does not do train/test split"""
if featurizer == 'grid':
dataset, tasks = featurize_pdbbind(feat=featurizer, subset=subset)
splitters = {
'index': deepchem.splits.IndexSplitter(),
'random': deepchem.splits.RandomSplitter(),
'time': deepchem.splits.TimeSplitterPDBbind(dataset.ids)
}
splitter = splitters[split]
train, valid, test = splitter.train_valid_test_split(dataset)
transformers = []
for transformer in transformers:
train = transformer.transform(train)
for transformer in transformers:
valid = transformer.transform(valid)
for transformer in transformers:
test = transformer.transform(test)
all_dataset = (train, valid, test)
return tasks, all_dataset, transformers
else:
data_dir = deepchem.utils.data_utils.get_data_dir()
if reload:
save_dir = os.path.join(
data_dir, "pdbbind_" + subset + "/" + featurizer + "/" + str(split))
dataset_file = os.path.join(data_dir, subset + "_smiles_labels.csv")
if not os.path.exists(dataset_file):
deepchem.utils.data_utils.download_url(
"https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/" + subset +
"_smiles_labels.csv")
tasks = ["-logKd/Ki"]
if reload:
loaded, all_dataset, transformers = deepchem.utils.data_utils.load_dataset_from_disk(
save_dir)
if loaded:
return tasks, all_dataset, transformers
if featurizer == 'ECFP':
featurizer = deepchem.feat.CircularFingerprint(size=1024)
elif featurizer == 'GraphConv':
featurizer = deepchem.feat.ConvMolFeaturizer()
elif featurizer == 'Weave':
featurizer = deepchem.feat.WeaveFeaturizer()
elif featurizer == 'Raw':
featurizer = deepchem.feat.RawFeaturizer()
loader = deepchem.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file, shard_size=8192)
df = pd.read_csv(dataset_file)
if split == None:
transformers = [
deepchem.trans.NormalizationTransformer(
transform_y=True, dataset=dataset)
]
logger.info("Split is None, about to transform data.")
for transformer in transformers:
dataset = transformer.transform(dataset)
return tasks, (dataset, None, None), transformers
splitters = {
'index': deepchem.splits.IndexSplitter(),
'random': deepchem.splits.RandomSplitter(),
'scaffold': deepchem.splits.ScaffoldSplitter(),
}
splitter = splitters[split]
logger.info("About to split dataset with {} splitter.".format(split))
train, valid, test = splitter.train_valid_test_split(dataset)
transformers = [
deepchem.trans.NormalizationTransformer(
transform_y=True, dataset=train)
]
logger.info("About to transform dataset.")
for transformer in transformers:
train = transformer.transform(train)
valid = transformer.transform(valid)
test = transformer.transform(test)
if reload:
deepchem.utils.data_utils.save_dataset_to_disk(save_dir, train, valid,
test, transformers)
return tasks, (train, valid, test), transformers
def load_pdbbind(reload=True,
data_dir=None,
subset="core",
load_binding_pocket=False,
featurizer="grid",
split="random",
split_seed=None,
save_dir=None,
save_timestamp=False):
"""Load raw PDBBind dataset by featurization and split.
Parameters
----------
reload: Bool, optional
Reload saved featurized and splitted dataset or not.
data_dir: Str, optional
Specifies the directory storing the raw dataset.
load_binding_pocket: Bool, optional
Load binding pocket or full protein.
subset: Str
Specifies which subset of PDBBind, only "core" or "refined" for now.
featurizer: Str
Either "grid" or "atomic" for grid and atomic featurizations.
split: Str
Either "random" or "index".
split_seed: Int, optional
Specifies the random seed for splitter.
save_dir: Str, optional
Specifies the directory to store the featurized and splitted dataset when
reload is False. If reload is True, it will load saved dataset inside save_dir.
save_timestamp: Bool, optional
Save featurized and splitted dataset with timestamp or not. Set it as True
when running similar or same jobs simultaneously on multiple compute nodes.
"""
pdbbind_tasks = ["-logKd/Ki"]
deepchem_dir = deepchem.utils.data_utils.get_data_dir()
if data_dir == None:
data_dir = DEFAULT_DATA_DIR
data_folder = os.path.join(data_dir, "pdbbind", "v2015")
if save_dir == None:
save_dir = os.path.join(DEFAULT_DATA_DIR, "from-pdbbind")
if load_binding_pocket:
save_folder = os.path.join(
save_dir, "protein_pocket-%s-%s-%s" % (subset, featurizer, split))
else:
save_folder = os.path.join(
save_dir, "full_protein-%s-%s-%s" % (subset, featurizer, split))
if save_timestamp:
save_folder = "%s-%s-%s" % (save_folder,
time.strftime("%Y%m%d", time.localtime()),
re.search("\.(.*)", str(time.time())).group(1))
if reload:
if not os.path.exists(save_folder):
print(
"Dataset does not exist at {}. Reconstructing...".format(save_folder))
else:
print(
"\nLoading featurized and splitted dataset from:\n%s\n" % save_folder)
loaded, all_dataset, transformers = deepchem.utils.data_utils.load_dataset_from_disk(
save_folder)
if loaded:
return pdbbind_tasks, all_dataset, transformers
dataset_file = os.path.join(data_dir, "pdbbind_v2015.tar.gz")
if not os.path.exists(dataset_file):
logger.warning("About to download PDBBind full dataset. Large file, 2GB")
deepchem.utils.data_utils.download_url(
"https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/pdbbind_v2015.tar.gz",
dest_dir=data_dir)
if os.path.exists(data_folder):
logger.info("PDBBind full dataset already exists.")
else:
print("Untarring full dataset...")
deepchem.utils.data_utils.untargz_file(
dataset_file, dest_dir=os.path.join(data_dir, "pdbbind"))
print("\nRaw dataset:\n%s" % data_folder)
print("\nFeaturized and splitted dataset:\n%s" % save_folder)
if subset == "core":
index_labels_file = os.path.join(data_folder, "INDEX_core_data.2013")
elif subset == "refined":
index_labels_file = os.path.join(data_folder, "INDEX_refined_data.2015")
else:
raise ValueError("Other subsets not supported")
# Extract locations of data
with open(index_labels_file, "r") as g:
pdbs = [line[:4] for line in g.readlines() if line[0] != "#"]
if load_binding_pocket:
protein_files = [
os.path.join(data_folder, pdb, "%s_pocket.pdb" % pdb) for pdb in pdbs
]
else:
protein_files = [
os.path.join(data_folder, pdb, "%s_protein.pdb" % pdb) for pdb in pdbs
]
ligand_files = [
os.path.join(data_folder, pdb, "%s_ligand.sdf" % pdb) for pdb in pdbs
]
# Extract labels
with open(index_labels_file, "r") as g:
labels = np.array([
# Lines have format
# PDB code, resolution, release year, -logKd/Ki, Kd/Ki, reference, ligand name
# The base-10 logarithm, -log kd/pk
float(line.split()[3]) for line in g.readlines() if line[0] != "#"
])
# Featurize Data
if featurizer == "grid":
featurizer = RdkitGridFeaturizer(
voxel_width=2.0,
feature_types=[
'ecfp', 'splif', 'hbond', 'salt_bridge', 'pi_stack', 'cation_pi',
'charge'
],
flatten=True)
elif featurizer == "atomic" or featurizer == "atomic_conv":
# Pulled from PDB files. For larger datasets with more PDBs, would use
# max num atoms instead of exact.
frag1_num_atoms = 70 # for ligand atoms
if load_binding_pocket:
frag2_num_atoms = 1000
complex_num_atoms = 1070
else:
frag2_num_atoms = 24000 # for protein atoms
complex_num_atoms = 24070 # in total
max_num_neighbors = 4
# Cutoff in angstroms
neighbor_cutoff = 4
if featurizer == "atomic":
featurizer = ComplexNeighborListFragmentAtomicCoordinates(
frag1_num_atoms=frag1_num_atoms,
frag2_num_atoms=frag2_num_atoms,
complex_num_atoms=complex_num_atoms,
max_num_neighbors=max_num_neighbors,
neighbor_cutoff=neighbor_cutoff)
if featurizer == "atomic_conv":
featurizer = AtomicConvFeaturizer(
labels=labels,
frag1_num_atoms=frag1_num_atoms,
frag2_num_atoms=frag2_num_atoms,
complex_num_atoms=complex_num_atoms,
neighbor_cutoff=neighbor_cutoff,
max_num_neighbors=max_num_neighbors,
batch_size=64)
else:
raise ValueError("Featurizer not supported")
print("\nFeaturizing Complexes for \"%s\" ...\n" % data_folder)
feat_t1 = time.time()
features, failures = featurizer.featurize(ligand_files, protein_files)
feat_t2 = time.time()
print("\nFeaturization finished, took %0.3f s." % (feat_t2 - feat_t1))
# Delete labels and ids for failing elements
labels = np.delete(labels, failures)
labels = labels.reshape((len(labels), 1))
ids = np.delete(pdbs, failures)
print("\nConstruct dataset excluding failing featurization elements...")
dataset = deepchem.data.DiskDataset.from_numpy(features, y=labels, ids=ids)
# No transformations of data
transformers = []
# Split dataset
print("\nSplit dataset...\n")
if split == None:
return pdbbind_tasks, (dataset, None, None), transformers
# TODO(rbharath): This should be modified to contain a cluster split so
# structures of the same protein aren't in both train/test
splitters = {
'index': deepchem.splits.IndexSplitter(),
'random': deepchem.splits.RandomSplitter(),
}
splitter = splitters[split]
train, valid, test = splitter.train_valid_test_split(dataset, seed=split_seed)
all_dataset = (train, valid, test)
print("\nSaving dataset to \"%s\" ..." % save_folder)
deepchem.utils.data_utils.save_dataset_to_disk(save_folder, train, valid,
test, transformers)
return pdbbind_tasks, all_dataset, transformers
def load_pdbbind_from_dir(data_folder,
index_files,
featurizer="grid",
split="random",
ex_ids=[],
save_dir=None):
"""Load and featurize raw PDBBind dataset from a local directory with the option to avoid certain IDs.
Parameters
----------
data_dir: String,
Specifies the data directory to store the featurized dataset.
index_files: List
List of data and labels index file paths relative to the path in data_dir
split: Str
Either "random" or "index"
feat: Str
Either "grid" or "atomic" for grid and atomic featurizations.
subset: Str
Only "core" or "refined" for now.
ex_ids: List
List of PDB IDs to avoid loading if present
save_dir: String
Path to store featurized datasets
"""
pdbbind_tasks = ["-logKd/Ki"]
index_file = os.path.join(data_folder, index_files[0])
labels_file = os.path.join(data_folder, index_files[1])
# Extract locations of data
pdbs = []
with open(index_file, "r") as g:
lines = g.readlines()
for line in lines:
line = line.split(" ")
pdb = line[0]
if len(pdb) == 4:
pdbs.append(pdb)
protein_files = [
os.path.join(data_folder, pdb, "%s_protein.pdb" % pdb)
for pdb in pdbs
if pdb not in ex_ids
]
ligand_files = [
os.path.join(data_folder, pdb, "%s_ligand.sdf" % pdb)
for pdb in pdbs
if pdb not in ex_ids
]
# Extract labels
labels_tmp = {}
with open(labels_file, "r") as f:
lines = f.readlines()
for line in lines:
# Skip comment lines
if line[0] == "#":
continue
# Lines have format
# PDB code, resolution, release year, -logKd/Ki, Kd/Ki, reference, ligand name
line = line.split()
# The base-10 logarithm, -log kd/pk
log_label = line[3]
labels_tmp[line[0]] = log_label
labels = np.array([labels_tmp[pdb] for pdb in pdbs])
print(labels)
# Featurize Data
if featurizer == "grid":
featurizer = RdkitGridFeaturizer(
voxel_width=2.0,
feature_types=[
'ecfp', 'splif', 'hbond', 'salt_bridge', 'pi_stack', 'cation_pi',
'charge'
],
flatten=True)
elif featurizer == "atomic":
# Pulled from PDB files. For larger datasets with more PDBs, would use
# max num atoms instead of exact.
frag1_num_atoms = 70 # for ligand atoms
frag2_num_atoms = 24000 # for protein atoms
complex_num_atoms = 24070 # in total
max_num_neighbors = 4
# Cutoff in angstroms
neighbor_cutoff = 4
featurizer = ComplexNeighborListFragmentAtomicCoordinates(
frag1_num_atoms, frag2_num_atoms, complex_num_atoms, max_num_neighbors,
neighbor_cutoff)
else:
raise ValueError("Featurizer not supported")
print("Featurizing Complexes")
features, failures = featurizer.featurize(ligand_files, protein_files)
# Delete labels for failing elements
labels = np.delete(labels, failures)
dataset = deepchem.data.DiskDataset.from_numpy(features, labels)
# No transformations of data
transformers = []
if split == None:
return pdbbind_tasks, (dataset, None, None), transformers
# TODO(rbharath): This should be modified to contain a cluster split so
# structures of the same protein aren't in both train/test
splitters = {
'index': deepchem.splits.IndexSplitter(),
'random': deepchem.splits.RandomSplitter(),
}
splitter = splitters[split]
train, valid, test = splitter.train_valid_test_split(dataset)
all_dataset = (train, valid, test)
if save_dir:
deepchem.utils.data_utils.save_dataset_to_disk(save_dir, train, valid, test,
transformers)
return pdbbind_tasks, all_dataset, transformers
| [
"deepchem.splits.ScaffoldSplitter",
"pandas.read_csv",
"deepchem.feat.WeaveFeaturizer",
"deepchem.splits.RandomSplitter",
"os.path.join",
"deepchem.feat.RdkitGridFeaturizer",
"deepchem.feat.RawFeaturizer",
"deepchem.feat.CircularFingerprint",
"os.path.exists",
"deepchem.splits.TimeSplitterPDBbind"... | [((362, 389), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (379, 389), False, 'import logging\n'), ((409, 449), 'deepchem.utils.data_utils.get_data_dir', 'deepchem.utils.data_utils.get_data_dir', ([], {}), '()\n', (447, 449), False, 'import deepchem\n'), ((618, 658), 'deepchem.utils.data_utils.get_data_dir', 'deepchem.utils.data_utils.get_data_dir', ([], {}), '()\n', (656, 658), False, 'import deepchem\n'), ((675, 708), 'os.path.join', 'os.path.join', (['data_dir', '"""pdbbind"""'], {}), "(data_dir, 'pdbbind')\n", (687, 708), False, 'import os\n'), ((725, 776), 'os.path.join', 'os.path.join', (['pdbbind_dir', "('%s_%s' % (subset, feat))"], {}), "(pdbbind_dir, '%s_%s' % (subset, feat))\n", (737, 776), False, 'import os\n'), ((6533, 6573), 'deepchem.utils.data_utils.get_data_dir', 'deepchem.utils.data_utils.get_data_dir', ([], {}), '()\n', (6571, 6573), False, 'import deepchem\n'), ((6646, 6688), 'os.path.join', 'os.path.join', (['data_dir', '"""pdbbind"""', '"""v2015"""'], {}), "(data_dir, 'pdbbind', 'v2015')\n", (6658, 6688), False, 'import os\n'), ((7693, 7739), 'os.path.join', 'os.path.join', (['data_dir', '"""pdbbind_v2015.tar.gz"""'], {}), "(data_dir, 'pdbbind_v2015.tar.gz')\n", (7705, 7739), False, 'import os\n'), ((8022, 8049), 'os.path.exists', 'os.path.exists', (['data_folder'], {}), '(data_folder)\n', (8036, 8049), False, 'import os\n'), ((11008, 11019), 'time.time', 'time.time', ([], {}), '()\n', (11017, 11019), False, 'import time\n'), ((11105, 11116), 'time.time', 'time.time', ([], {}), '()\n', (11114, 11116), False, 'import time\n'), ((11249, 11276), 'numpy.delete', 'np.delete', (['labels', 'failures'], {}), '(labels, failures)\n', (11258, 11276), True, 'import numpy as np\n'), ((11329, 11354), 'numpy.delete', 'np.delete', (['pdbs', 'failures'], {}), '(pdbs, failures)\n', (11338, 11354), True, 'import numpy as np\n'), ((11443, 11508), 'deepchem.data.DiskDataset.from_numpy', 'deepchem.data.DiskDataset.from_numpy', (['features'], {'y': 'labels', 'ids': 'ids'}), '(features, y=labels, ids=ids)\n', (11479, 11508), False, 'import deepchem\n'), ((12155, 12252), 'deepchem.utils.data_utils.save_dataset_to_disk', 'deepchem.utils.data_utils.save_dataset_to_disk', (['save_folder', 'train', 'valid', 'test', 'transformers'], {}), '(save_folder, train, valid,\n test, transformers)\n', (12201, 12252), False, 'import deepchem\n'), ((13292, 13333), 'os.path.join', 'os.path.join', (['data_folder', 'index_files[0]'], {}), '(data_folder, index_files[0])\n', (13304, 13333), False, 'import os\n'), ((13350, 13391), 'os.path.join', 'os.path.join', (['data_folder', 'index_files[1]'], {}), '(data_folder, index_files[1])\n', (13362, 13391), False, 'import os\n'), ((14330, 14373), 'numpy.array', 'np.array', (['[labels_tmp[pdb] for pdb in pdbs]'], {}), '([labels_tmp[pdb] for pdb in pdbs])\n', (14338, 14373), True, 'import numpy as np\n'), ((15393, 15420), 'numpy.delete', 'np.delete', (['labels', 'failures'], {}), '(labels, failures)\n', (15402, 15420), True, 'import numpy as np\n'), ((15433, 15487), 'deepchem.data.DiskDataset.from_numpy', 'deepchem.data.DiskDataset.from_numpy', (['features', 'labels'], {}), '(features, labels)\n', (15469, 15487), False, 'import deepchem\n'), ((787, 814), 'os.path.exists', 'os.path.exists', (['dataset_dir'], {}), '(dataset_dir)\n', (801, 814), False, 'import os\n'), ((820, 956), 'deepchem.utils.data_utils.download_url', 'deepchem.utils.data_utils.download_url', (['"""https://deepchemdata.s3-us-west-1.amazonaws.com/featurized_datasets/core_grid.tar.gz"""'], {}), "(\n 'https://deepchemdata.s3-us-west-1.amazonaws.com/featurized_datasets/core_grid.tar.gz'\n )\n", (858, 956), False, 'import deepchem\n'), ((965, 1101), 'deepchem.utils.data_utils.download_url', 'deepchem.utils.data_utils.download_url', (['"""https://deepchemdata.s3-us-west-1.amazonaws.com/featurized_datasets/full_grid.tar.gz"""'], {}), "(\n 'https://deepchemdata.s3-us-west-1.amazonaws.com/featurized_datasets/full_grid.tar.gz'\n )\n", (1003, 1101), False, 'import deepchem\n'), ((1110, 1249), 'deepchem.utils.data_utils.download_url', 'deepchem.utils.data_utils.download_url', (['"""https://deepchemdata.s3-us-west-1.amazonaws.com/featurized_datasets/refined_grid.tar.gz"""'], {}), "(\n 'https://deepchemdata.s3-us-west-1.amazonaws.com/featurized_datasets/refined_grid.tar.gz'\n )\n", (1148, 1249), False, 'import deepchem\n'), ((1674, 1712), 'deepchem.data.DiskDataset', 'deepchem.data.DiskDataset', (['dataset_dir'], {}), '(dataset_dir)\n', (1699, 1712), False, 'import deepchem\n'), ((2691, 2731), 'deepchem.utils.data_utils.get_data_dir', 'deepchem.utils.data_utils.get_data_dir', ([], {}), '()\n', (2729, 2731), False, 'import deepchem\n'), ((2877, 2930), 'os.path.join', 'os.path.join', (['data_dir', "(subset + '_smiles_labels.csv')"], {}), "(data_dir, subset + '_smiles_labels.csv')\n", (2889, 2930), False, 'import os\n'), ((3710, 3797), 'deepchem.data.CSVLoader', 'deepchem.data.CSVLoader', ([], {'tasks': 'tasks', 'smiles_field': '"""smiles"""', 'featurizer': 'featurizer'}), "(tasks=tasks, smiles_field='smiles', featurizer=\n featurizer)\n", (3733, 3797), False, 'import deepchem\n'), ((3873, 3898), 'pandas.read_csv', 'pd.read_csv', (['dataset_file'], {}), '(dataset_file)\n', (3884, 3898), True, 'import pandas as pd\n'), ((6728, 6774), 'os.path.join', 'os.path.join', (['DEFAULT_DATA_DIR', '"""from-pdbbind"""'], {}), "(DEFAULT_DATA_DIR, 'from-pdbbind')\n", (6740, 6774), False, 'import os\n'), ((6819, 6898), 'os.path.join', 'os.path.join', (['save_dir', "('protein_pocket-%s-%s-%s' % (subset, featurizer, split))"], {}), "(save_dir, 'protein_pocket-%s-%s-%s' % (subset, featurizer, split))\n", (6831, 6898), False, 'import os\n'), ((6934, 7011), 'os.path.join', 'os.path.join', (['save_dir', "('full_protein-%s-%s-%s' % (subset, featurizer, split))"], {}), "(save_dir, 'full_protein-%s-%s-%s' % (subset, featurizer, split))\n", (6946, 7011), False, 'import os\n'), ((7535, 7596), 'deepchem.utils.data_utils.load_dataset_from_disk', 'deepchem.utils.data_utils.load_dataset_from_disk', (['save_folder'], {}), '(save_folder)\n', (7583, 7596), False, 'import deepchem\n'), ((7749, 7777), 'os.path.exists', 'os.path.exists', (['dataset_file'], {}), '(dataset_file)\n', (7763, 7777), False, 'import os\n'), ((7861, 8009), 'deepchem.utils.data_utils.download_url', 'deepchem.utils.data_utils.download_url', (['"""https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/pdbbind_v2015.tar.gz"""'], {'dest_dir': 'data_dir'}), "(\n 'https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/pdbbind_v2015.tar.gz'\n , dest_dir=data_dir)\n", (7899, 8009), False, 'import deepchem\n'), ((8421, 8470), 'os.path.join', 'os.path.join', (['data_folder', '"""INDEX_core_data.2013"""'], {}), "(data_folder, 'INDEX_core_data.2013')\n", (8433, 8470), False, 'import os\n'), ((9047, 9100), 'os.path.join', 'os.path.join', (['data_folder', 'pdb', "('%s_ligand.sdf' % pdb)"], {}), "(data_folder, pdb, '%s_ligand.sdf' % pdb)\n", (9059, 9100), False, 'import os\n'), ((9512, 9658), 'deepchem.feat.RdkitGridFeaturizer', 'RdkitGridFeaturizer', ([], {'voxel_width': '(2.0)', 'feature_types': "['ecfp', 'splif', 'hbond', 'salt_bridge', 'pi_stack', 'cation_pi', 'charge']", 'flatten': '(True)'}), "(voxel_width=2.0, feature_types=['ecfp', 'splif',\n 'hbond', 'salt_bridge', 'pi_stack', 'cation_pi', 'charge'], flatten=True)\n", (9531, 9658), False, 'from deepchem.feat import RdkitGridFeaturizer\n'), ((11861, 11892), 'deepchem.splits.IndexSplitter', 'deepchem.splits.IndexSplitter', ([], {}), '()\n', (11890, 11892), False, 'import deepchem\n'), ((11910, 11942), 'deepchem.splits.RandomSplitter', 'deepchem.splits.RandomSplitter', ([], {}), '()\n', (11940, 11942), False, 'import deepchem\n'), ((13644, 13698), 'os.path.join', 'os.path.join', (['data_folder', 'pdb', "('%s_protein.pdb' % pdb)"], {}), "(data_folder, pdb, '%s_protein.pdb' % pdb)\n", (13656, 13698), False, 'import os\n'), ((13777, 13830), 'os.path.join', 'os.path.join', (['data_folder', 'pdb', "('%s_ligand.sdf' % pdb)"], {}), "(data_folder, pdb, '%s_ligand.sdf' % pdb)\n", (13789, 13830), False, 'import os\n'), ((14453, 14599), 'deepchem.feat.RdkitGridFeaturizer', 'RdkitGridFeaturizer', ([], {'voxel_width': '(2.0)', 'feature_types': "['ecfp', 'splif', 'hbond', 'salt_bridge', 'pi_stack', 'cation_pi', 'charge']", 'flatten': '(True)'}), "(voxel_width=2.0, feature_types=['ecfp', 'splif',\n 'hbond', 'salt_bridge', 'pi_stack', 'cation_pi', 'charge'], flatten=True)\n", (14472, 14599), False, 'from deepchem.feat import RdkitGridFeaturizer\n'), ((15788, 15819), 'deepchem.splits.IndexSplitter', 'deepchem.splits.IndexSplitter', ([], {}), '()\n', (15817, 15819), False, 'import deepchem\n'), ((15837, 15869), 'deepchem.splits.RandomSplitter', 'deepchem.splits.RandomSplitter', ([], {}), '()\n', (15867, 15869), False, 'import deepchem\n'), ((16025, 16119), 'deepchem.utils.data_utils.save_dataset_to_disk', 'deepchem.utils.data_utils.save_dataset_to_disk', (['save_dir', 'train', 'valid', 'test', 'transformers'], {}), '(save_dir, train, valid, test,\n transformers)\n', (16071, 16119), False, 'import deepchem\n'), ((1265, 1292), 'os.path.exists', 'os.path.exists', (['pdbbind_dir'], {}), '(pdbbind_dir)\n', (1279, 1292), False, 'import os\n'), ((1300, 1333), 'os.system', 'os.system', (["('mkdir ' + pdbbind_dir)"], {}), "('mkdir ' + pdbbind_dir)\n", (1309, 1333), False, 'import os\n'), ((1386, 1428), 'os.path.join', 'os.path.join', (['data_dir', '"""core_grid.tar.gz"""'], {}), "(data_dir, 'core_grid.tar.gz')\n", (1398, 1428), False, 'import os\n'), ((1495, 1537), 'os.path.join', 'os.path.join', (['data_dir', '"""full_grid.tar.gz"""'], {}), "(data_dir, 'full_grid.tar.gz')\n", (1507, 1537), False, 'import os\n'), ((1604, 1649), 'os.path.join', 'os.path.join', (['data_dir', '"""refined_grid.tar.gz"""'], {}), "(data_dir, 'refined_grid.tar.gz')\n", (1616, 1649), False, 'import os\n'), ((2068, 2099), 'deepchem.splits.IndexSplitter', 'deepchem.splits.IndexSplitter', ([], {}), '()\n', (2097, 2099), False, 'import deepchem\n'), ((2119, 2151), 'deepchem.splits.RandomSplitter', 'deepchem.splits.RandomSplitter', ([], {}), '()\n', (2149, 2151), False, 'import deepchem\n'), ((2169, 2217), 'deepchem.splits.TimeSplitterPDBbind', 'deepchem.splits.TimeSplitterPDBbind', (['dataset.ids'], {}), '(dataset.ids)\n', (2204, 2217), False, 'import deepchem\n'), ((2943, 2971), 'os.path.exists', 'os.path.exists', (['dataset_file'], {}), '(dataset_file)\n', (2957, 2971), False, 'import os\n'), ((2979, 3119), 'deepchem.utils.data_utils.download_url', 'deepchem.utils.data_utils.download_url', (["('https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/' + subset +\n '_smiles_labels.csv')"], {}), "(\n 'https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/' + subset +\n '_smiles_labels.csv')\n", (3017, 3119), False, 'import deepchem\n'), ((3216, 3274), 'deepchem.utils.data_utils.load_dataset_from_disk', 'deepchem.utils.data_utils.load_dataset_from_disk', (['save_dir'], {}), '(save_dir)\n', (3264, 3274), False, 'import deepchem\n'), ((3400, 3444), 'deepchem.feat.CircularFingerprint', 'deepchem.feat.CircularFingerprint', ([], {'size': '(1024)'}), '(size=1024)\n', (3433, 3444), False, 'import deepchem\n'), ((4295, 4326), 'deepchem.splits.IndexSplitter', 'deepchem.splits.IndexSplitter', ([], {}), '()\n', (4324, 4326), False, 'import deepchem\n'), ((4346, 4378), 'deepchem.splits.RandomSplitter', 'deepchem.splits.RandomSplitter', ([], {}), '()\n', (4376, 4378), False, 'import deepchem\n'), ((4400, 4434), 'deepchem.splits.ScaffoldSplitter', 'deepchem.splits.ScaffoldSplitter', ([], {}), '()\n', (4432, 4434), False, 'import deepchem\n'), ((4644, 4716), 'deepchem.trans.NormalizationTransformer', 'deepchem.trans.NormalizationTransformer', ([], {'transform_y': '(True)', 'dataset': 'train'}), '(transform_y=True, dataset=train)\n', (4683, 4716), False, 'import deepchem\n'), ((4970, 5064), 'deepchem.utils.data_utils.save_dataset_to_disk', 'deepchem.utils.data_utils.save_dataset_to_disk', (['save_dir', 'train', 'valid', 'test', 'transformers'], {}), '(save_dir, train, valid, test,\n transformers)\n', (5016, 5064), False, 'import deepchem\n'), ((7268, 7295), 'os.path.exists', 'os.path.exists', (['save_folder'], {}), '(save_folder)\n', (7282, 7295), False, 'import os\n'), ((8523, 8575), 'os.path.join', 'os.path.join', (['data_folder', '"""INDEX_refined_data.2015"""'], {}), "(data_folder, 'INDEX_refined_data.2015')\n", (8535, 8575), False, 'import os\n'), ((8831, 8884), 'os.path.join', 'os.path.join', (['data_folder', 'pdb', "('%s_pocket.pdb' % pdb)"], {}), "(data_folder, pdb, '%s_pocket.pdb' % pdb)\n", (8843, 8884), False, 'import os\n'), ((8945, 8999), 'os.path.join', 'os.path.join', (['data_folder', 'pdb', "('%s_protein.pdb' % pdb)"], {}), "(data_folder, pdb, '%s_protein.pdb' % pdb)\n", (8957, 8999), False, 'import os\n'), ((15028, 15165), 'deepchem.feat.ComplexNeighborListFragmentAtomicCoordinates', 'ComplexNeighborListFragmentAtomicCoordinates', (['frag1_num_atoms', 'frag2_num_atoms', 'complex_num_atoms', 'max_num_neighbors', 'neighbor_cutoff'], {}), '(frag1_num_atoms,\n frag2_num_atoms, complex_num_atoms, max_num_neighbors, neighbor_cutoff)\n', (15072, 15165), False, 'from deepchem.feat import ComplexNeighborListFragmentAtomicCoordinates\n'), ((3500, 3533), 'deepchem.feat.ConvMolFeaturizer', 'deepchem.feat.ConvMolFeaturizer', ([], {}), '()\n', (3531, 3533), False, 'import deepchem\n'), ((3955, 4029), 'deepchem.trans.NormalizationTransformer', 'deepchem.trans.NormalizationTransformer', ([], {'transform_y': '(True)', 'dataset': 'dataset'}), '(transform_y=True, dataset=dataset)\n', (3994, 4029), False, 'import deepchem\n'), ((8229, 8262), 'os.path.join', 'os.path.join', (['data_dir', '"""pdbbind"""'], {}), "(data_dir, 'pdbbind')\n", (8241, 8262), False, 'import os\n'), ((10253, 10485), 'deepchem.feat.ComplexNeighborListFragmentAtomicCoordinates', 'ComplexNeighborListFragmentAtomicCoordinates', ([], {'frag1_num_atoms': 'frag1_num_atoms', 'frag2_num_atoms': 'frag2_num_atoms', 'complex_num_atoms': 'complex_num_atoms', 'max_num_neighbors': 'max_num_neighbors', 'neighbor_cutoff': 'neighbor_cutoff'}), '(frag1_num_atoms=\n frag1_num_atoms, frag2_num_atoms=frag2_num_atoms, complex_num_atoms=\n complex_num_atoms, max_num_neighbors=max_num_neighbors, neighbor_cutoff\n =neighbor_cutoff)\n', (10297, 10485), False, 'from deepchem.feat import ComplexNeighborListFragmentAtomicCoordinates\n'), ((10577, 10812), 'deepchem.feat.graph_features.AtomicConvFeaturizer', 'AtomicConvFeaturizer', ([], {'labels': 'labels', 'frag1_num_atoms': 'frag1_num_atoms', 'frag2_num_atoms': 'frag2_num_atoms', 'complex_num_atoms': 'complex_num_atoms', 'neighbor_cutoff': 'neighbor_cutoff', 'max_num_neighbors': 'max_num_neighbors', 'batch_size': '(64)'}), '(labels=labels, frag1_num_atoms=frag1_num_atoms,\n frag2_num_atoms=frag2_num_atoms, complex_num_atoms=complex_num_atoms,\n neighbor_cutoff=neighbor_cutoff, max_num_neighbors=max_num_neighbors,\n batch_size=64)\n', (10597, 10812), False, 'from deepchem.feat.graph_features import AtomicConvFeaturizer\n'), ((3585, 3616), 'deepchem.feat.WeaveFeaturizer', 'deepchem.feat.WeaveFeaturizer', ([], {}), '()\n', (3614, 3616), False, 'import deepchem\n'), ((7144, 7160), 'time.localtime', 'time.localtime', ([], {}), '()\n', (7158, 7160), False, 'import time\n'), ((3666, 3695), 'deepchem.feat.RawFeaturizer', 'deepchem.feat.RawFeaturizer', ([], {}), '()\n', (3693, 3695), False, 'import deepchem\n'), ((7219, 7230), 'time.time', 'time.time', ([], {}), '()\n', (7228, 7230), False, 'import time\n')] |
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Construct minibatches for Fast R-CNN training. Handles the minibatch blobs
that are specific to Fast R-CNN. Other blobs that are generic to RPN, etc.
are handled by their respecitive roi_data modules.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import numpy.random as npr
import logging
from core.config import cfg
import utils.boxes as box_utils
import utils.blob as blob_utils
import utils.fpn as fpn_utils
logger = logging.getLogger(__name__)
def add_rel_blobs(blobs, im_scales, roidb):
"""Add blobs needed for training Fast R-CNN style models."""
# Sample training RoIs from each image and append them to the blob lists
for im_i, entry in enumerate(roidb):
frcn_blobs = _sample_pairs(entry, im_scales[im_i], im_i)
for k, v in frcn_blobs.items():
blobs[k].append(v)
# Concat the training blob lists into tensors
for k, v in blobs.items():
if isinstance(v, list) and len(v) > 0:
blobs[k] = np.concatenate(v)
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS:
_add_rel_multilevel_rois(blobs)
return True
def _sample_pairs(roidb, im_scale, batch_idx):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
fg_pairs_per_image = cfg.TRAIN.FG_REL_SIZE_PER_IM
pairs_per_image = int(cfg.TRAIN.FG_REL_SIZE_PER_IM / cfg.TRAIN.FG_REL_FRACTION) # need much more pairs since it's quadratic
max_pair_overlaps = roidb['max_pair_overlaps']
gt_pair_inds = np.where(max_pair_overlaps > 1.0 - 1e-4)[0]
fg_pair_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps <= 1.0 - 1e-4))[0]
fg_pairs_per_this_image = np.minimum(fg_pairs_per_image, gt_pair_inds.size + fg_pair_inds.size)
# Sample foreground regions without replacement
# if rel_pos_inds.size > 0 and rel_pos_inds.size > fg_rois_per_image - rel_gt_inds.size:
if fg_pair_inds.size > 0 and fg_pair_inds.size > (fg_pairs_per_this_image - gt_pair_inds.size) \
and fg_pairs_per_this_image > gt_pair_inds.size:
fg_pair_inds = npr.choice(
fg_pair_inds, size=(fg_pairs_per_this_image - gt_pair_inds.size), replace=False)
fg_pair_inds = np.append(fg_pair_inds, gt_pair_inds)
# Label is the class each RoI has max overlap with
fg_prd_labels = roidb['max_prd_classes'][fg_pair_inds]
blob_dict = dict(
fg_prd_labels_int32=fg_prd_labels.astype(np.int32, copy=False))
bg_pair_inds = np.where((max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_pairs_per_this_image = pairs_per_image - fg_pairs_per_this_image
bg_pairs_per_this_image = np.minimum(bg_pairs_per_this_image, bg_pair_inds.size)
# Sample foreground regions without replacement
if bg_pair_inds.size > 0:
bg_pair_inds = npr.choice(
bg_pair_inds, size=bg_pairs_per_this_image, replace=False)
keep_pair_inds = np.append(fg_pair_inds, bg_pair_inds)
all_prd_labels = np.zeros(keep_pair_inds.size, dtype=np.int32)
all_prd_labels[:fg_pair_inds.size] = fg_prd_labels + 1 # class should start from 1 # size 311
blob_dict['all_prd_labels_int32'] = all_prd_labels.astype(np.int32, copy=False)
blob_dict['fg_size'] = np.array([fg_pair_inds.size], dtype=np.int32) # this is used to check if there is at least one fg to learn
sampled_sbj_boxes = roidb['sbj_boxes'][keep_pair_inds]
sampled_obj_boxes = roidb['obj_boxes'][keep_pair_inds]
# Scale rois and format as (batch_idx, x1, y1, x2, y2)
sampled_sbj_rois = sampled_sbj_boxes * im_scale
sampled_obj_rois = sampled_obj_boxes * im_scale
repeated_batch_idx = batch_idx * blob_utils.ones((keep_pair_inds.shape[0], 1))
sampled_sbj_rois = np.hstack((repeated_batch_idx, sampled_sbj_rois))
sampled_obj_rois = np.hstack((repeated_batch_idx, sampled_obj_rois))
blob_dict['sbj_rois'] = sampled_sbj_rois
blob_dict['obj_rois'] = sampled_obj_rois
sampled_rel_rois = box_utils.rois_union(sampled_sbj_rois, sampled_obj_rois)
blob_dict['rel_rois'] = sampled_rel_rois
if cfg.MODEL.USE_FREQ_BIAS or cfg.MODEL.USE_SEPARATE_SO_SCORES:
sbj_labels = roidb['max_sbj_classes'][keep_pair_inds]
obj_labels = roidb['max_obj_classes'][keep_pair_inds]
blob_dict['all_sbj_labels_int32'] = sbj_labels.astype(np.int32, copy=False) # 1703
blob_dict['all_obj_labels_int32'] = obj_labels.astype(np.int32, copy=False) # 1703
return blob_dict
def _add_rel_multilevel_rois(blobs):
"""By default training RoIs are added for a single feature map level only.
When using FPN, the RoIs must be distributed over different FPN levels
according the level assignment heuristic (see: modeling.FPN.
map_rois_to_fpn_levels).
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
def _distribute_rois_over_fpn_levels(rois_blob_names):
"""Distribute rois over the different FPN levels."""
# Get target level for each roi
# Recall blob rois are in (batch_idx, x1, y1, x2, y2) format, hence take
# the box coordinates from columns 1:5
lowest_target_lvls = None
for rois_blob_name in rois_blob_names:
target_lvls = fpn_utils.map_rois_to_fpn_levels(
blobs[rois_blob_name][:, 1:5], lvl_min, lvl_max)
if lowest_target_lvls is None:
lowest_target_lvls = target_lvls
else:
lowest_target_lvls = np.minimum(lowest_target_lvls, target_lvls)
for rois_blob_name in rois_blob_names:
# Add per FPN level roi blobs named like: <rois_blob_name>_fpn<lvl>
fpn_utils.add_multilevel_roi_blobs(
blobs, rois_blob_name, blobs[rois_blob_name], lowest_target_lvls, lvl_min,
lvl_max)
_distribute_rois_over_fpn_levels(['sbj_rois'])
_distribute_rois_over_fpn_levels(['obj_rois'])
_distribute_rois_over_fpn_levels(['rel_rois']) | [
"numpy.minimum",
"numpy.concatenate",
"utils.fpn.map_rois_to_fpn_levels",
"numpy.zeros",
"numpy.hstack",
"numpy.append",
"utils.fpn.add_multilevel_roi_blobs",
"numpy.where",
"numpy.array",
"utils.blob.ones",
"utils.boxes.rois_union",
"numpy.random.choice",
"logging.getLogger"
] | [((1224, 1251), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1241, 1251), False, 'import logging\n'), ((2527, 2596), 'numpy.minimum', 'np.minimum', (['fg_pairs_per_image', '(gt_pair_inds.size + fg_pair_inds.size)'], {}), '(fg_pairs_per_image, gt_pair_inds.size + fg_pair_inds.size)\n', (2537, 2596), True, 'import numpy as np\n'), ((3052, 3089), 'numpy.append', 'np.append', (['fg_pair_inds', 'gt_pair_inds'], {}), '(fg_pair_inds, gt_pair_inds)\n', (3061, 3089), True, 'import numpy as np\n'), ((3612, 3666), 'numpy.minimum', 'np.minimum', (['bg_pairs_per_this_image', 'bg_pair_inds.size'], {}), '(bg_pairs_per_this_image, bg_pair_inds.size)\n', (3622, 3666), True, 'import numpy as np\n'), ((3876, 3913), 'numpy.append', 'np.append', (['fg_pair_inds', 'bg_pair_inds'], {}), '(fg_pair_inds, bg_pair_inds)\n', (3885, 3913), True, 'import numpy as np\n'), ((3935, 3980), 'numpy.zeros', 'np.zeros', (['keep_pair_inds.size'], {'dtype': 'np.int32'}), '(keep_pair_inds.size, dtype=np.int32)\n', (3943, 3980), True, 'import numpy as np\n'), ((4192, 4237), 'numpy.array', 'np.array', (['[fg_pair_inds.size]'], {'dtype': 'np.int32'}), '([fg_pair_inds.size], dtype=np.int32)\n', (4200, 4237), True, 'import numpy as np\n'), ((4688, 4737), 'numpy.hstack', 'np.hstack', (['(repeated_batch_idx, sampled_sbj_rois)'], {}), '((repeated_batch_idx, sampled_sbj_rois))\n', (4697, 4737), True, 'import numpy as np\n'), ((4761, 4810), 'numpy.hstack', 'np.hstack', (['(repeated_batch_idx, sampled_obj_rois)'], {}), '((repeated_batch_idx, sampled_obj_rois))\n', (4770, 4810), True, 'import numpy as np\n'), ((4924, 4980), 'utils.boxes.rois_union', 'box_utils.rois_union', (['sampled_sbj_rois', 'sampled_obj_rois'], {}), '(sampled_sbj_rois, sampled_obj_rois)\n', (4944, 4980), True, 'import utils.boxes as box_utils\n'), ((2309, 2351), 'numpy.where', 'np.where', (['(max_pair_overlaps > 1.0 - 0.0001)'], {}), '(max_pair_overlaps > 1.0 - 0.0001)\n', (2317, 2351), True, 'import numpy as np\n'), ((2372, 2466), 'numpy.where', 'np.where', (['((max_pair_overlaps >= cfg.TRAIN.FG_THRESH) & (max_pair_overlaps <= 1.0 - \n 0.0001))'], {}), '((max_pair_overlaps >= cfg.TRAIN.FG_THRESH) & (max_pair_overlaps <=\n 1.0 - 0.0001))\n', (2380, 2466), True, 'import numpy as np\n'), ((2928, 3021), 'numpy.random.choice', 'npr.choice', (['fg_pair_inds'], {'size': '(fg_pairs_per_this_image - gt_pair_inds.size)', 'replace': '(False)'}), '(fg_pair_inds, size=fg_pairs_per_this_image - gt_pair_inds.size,\n replace=False)\n', (2938, 3021), True, 'import numpy.random as npr\n'), ((3323, 3375), 'numpy.where', 'np.where', (['(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI)'], {}), '(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI)\n', (3331, 3375), True, 'import numpy as np\n'), ((3772, 3841), 'numpy.random.choice', 'npr.choice', (['bg_pair_inds'], {'size': 'bg_pairs_per_this_image', 'replace': '(False)'}), '(bg_pair_inds, size=bg_pairs_per_this_image, replace=False)\n', (3782, 3841), True, 'import numpy.random as npr\n'), ((4619, 4664), 'utils.blob.ones', 'blob_utils.ones', (['(keep_pair_inds.shape[0], 1)'], {}), '((keep_pair_inds.shape[0], 1))\n', (4634, 4664), True, 'import utils.blob as blob_utils\n'), ((1768, 1785), 'numpy.concatenate', 'np.concatenate', (['v'], {}), '(v)\n', (1782, 1785), True, 'import numpy as np\n'), ((6185, 6270), 'utils.fpn.map_rois_to_fpn_levels', 'fpn_utils.map_rois_to_fpn_levels', (['blobs[rois_blob_name][:, 1:5]', 'lvl_min', 'lvl_max'], {}), '(blobs[rois_blob_name][:, 1:5], lvl_min,\n lvl_max)\n', (6217, 6270), True, 'import utils.fpn as fpn_utils\n'), ((6614, 6737), 'utils.fpn.add_multilevel_roi_blobs', 'fpn_utils.add_multilevel_roi_blobs', (['blobs', 'rois_blob_name', 'blobs[rois_blob_name]', 'lowest_target_lvls', 'lvl_min', 'lvl_max'], {}), '(blobs, rois_blob_name, blobs[\n rois_blob_name], lowest_target_lvls, lvl_min, lvl_max)\n', (6648, 6737), True, 'import utils.fpn as fpn_utils\n'), ((6431, 6474), 'numpy.minimum', 'np.minimum', (['lowest_target_lvls', 'target_lvls'], {}), '(lowest_target_lvls, target_lvls)\n', (6441, 6474), True, 'import numpy as np\n')] |
from numpy import empty as npempty
from numpy import int8 as npint8
from numpy import stack as npstack
from numpy import squeeze as npsqueeze
from numpy import where as npwhere
from numpy import absolute as npabsolute
from numpy import shape as npshape
from astropy.io import fits
import re
import sys
from gethdrinfo import gethdrinfo
from combflagstack import combflagstack
from imgpoly import imgpoly
from uspexampcor import uspexampcor
from idlrotate import idlrotate
#
#=============================================================================
#
def readuspexfits(files,lininfo,keywords=None,pair=False,rotate=0,\
lincor=None,ampcor=False,clupdate=False):
"""
To read an (upgraded) SpeX FITS image file.
Parameters
----------------
files : list of str
A list of fullpaths to FITS files.
lininfo : dict {'bias':str,'max':int,'bit':int}
information to identify pixels beyond range of linearity correction
'bias' is the fullpath to the bias frame
'max' maximum value in DN
'bit' the bit to set for pixels beyond `max`
keywords : list of str, optional
A list of FITS keyword to retain
pair : {False, True}, optional
Set to pair subtract the images.
rotate : {0,1,2,3,4,5,6,7}, optional
Direction Transpose? Rotation Counterclockwise
-------------------------------------------------
0 No None
1 No 90 deg
2 No 180 deg
3 No 270 deg
4 Yes None
5 Yes 90 deg
6 Yes 180 deg
7 Yes 270 deg
The directions follow the IDL rotate function convention.
lincor : str, optional
the fullpath to the FITS file of linearity correction coefficients
ampor : {False, True}, optional
Set to correct for amplifying drift (see uspexampcor.py)
Returns
--------
tuple
The results are returned as (data,var,hdrinfo,bitmask) where
data = the image(s) in DN/s
var = the variance image(s) in (DN/s)**2
hdrinfo = a list where element is a dict. The key is the FITS
keyword and the value is a list consiting of the FITS value and FITS
comment.
Procedure
---------
?
Example
--------
?
Modification History
--------------------
2022-05-25 - Written by <NAME>, University of Toledo.
Based on the Spextool mc_readuspexfits.pro IDL program.
"""
#
# Get setup information
#
NAXIS1=2048
NAXIS2=2048
nfiles = len(files)
dolincor = [0,1][lincor is not None]
# Correct for non-linearity?
if dolincor:
lc_coeffs = fits.getdata(lincor)
else:
lc_coeffs = None
# Get set up for lineary check
hdul = fits.open(lininfo['bias'])
DIVISOR = hdul[0].header['DIVISOR']
bias = (hdul[0].data)/DIVISOR
hdul.close()
if pair:
# Check to make sure the right number of files
if (nfiles % 2) != 0:
print('mc_readuspexfits: Not an even number of images.')
sys.exit(1)
else:
nimages = int(nfiles/2)
else:
nimages = nfiles
# Make empty arrays
data = npempty((nimages,NAXIS2,NAXIS1))
var = npempty((nimages,NAXIS2,NAXIS1))
hdrinfo = []
bitmask = npempty((nimages,NAXIS2,NAXIS1),dtype=npint8)
#
# Load the data
#
if pair is True:
# pair subtraction
for i in range(0,nimages):
A = loaddata(files[i*2],lininfo,bias,\
keywords=keywords,ampcor=ampcor,lccoeffs=lc_coeffs)
B = loaddata(files[i*2+1],lininfo,bias,\
keywords=keywords,ampcor=ampcor,lccoeffs=lc_coeffs)
combmask=combflagstack(npstack((A[3],B[3])),nbits=lininfo['bit']+1)
data[i,:,:] = idlrotate(A[0]-B[0],rotate)
var[i,:,:] = idlrotate(A[1]+B[1],rotate)
bitmask[i,:,:] = idlrotate(combmask,rotate)
hdrinfo.append(A[2])
hdrinfo.append(B[2])
if not pair:
for i in range(0,nimages):
im,va,hd,bm = loaddata(files[i],lininfo,bias,keywords=keywords,\
ampcor=ampcor,lccoeffs=lc_coeffs)
data[i,:,:] = idlrotate(im,rotate)
var[i,:,:] = idlrotate(va,rotate)
bitmask[i,:,:] = idlrotate(bm,rotate)
hdrinfo.append(hd)
return(npsqueeze(data),npsqueeze(var),hdrinfo,npsqueeze(bitmask))
#
#=============================================================================
#
def loaddata(file,lininfo,bias,keywords=None,ampcor=None,lccoeffs=None):
readnoise = 12.0 # per single read
gain = 1.5 # electrons per DN
hdul = fits.open(file)
hdul[0].verify('silentfix') # this was needed for to correct hdr problems
ITIME = hdul[0].header['ITIME']
COADDS = hdul[0].header['CO_ADDS']
NDRS = hdul[0].header['NDR']
READTIME = hdul[0].header['TABLE_SE']
DIVISOR = hdul[0].header['DIVISOR']
# Get set up for error propagation and store total exposure time
rdvar = (2.*readnoise**2)/NDRS/COADDS/ITIME**2/gain**2
crtn = (1.0 - READTIME*(NDRS**2 -1.0)/3./ITIME/NDRS)
# Read images, get into units of DN.
img_P = (hdul[1].data)/DIVISOR
img_S = (hdul[2].data)/DIVISOR
# Check for linearity maximum
mskP = (img_P < (bias-lininfo['max']))*2**lininfo['bit']
mskS = (img_S < (bias-lininfo['max']))*2**lininfo['bit']
# Combine the masks
bitmask=combflagstack(npstack((mskP,mskS)),nbits=lininfo['bit']+1)
# Create the image
img = img_P-img_S
# Correct for amplifier offsets
if ampcor:
img = uspexampcor(img)
# Determine the linearity correction for the image
if lccoeffs is not None:
cor = imgpoly(img,lccoeffs)
cor = npwhere(cor == 0,1,cor)
# Now set the corrections to unity for pixels > lincormax
cor = npwhere(bitmask == 2**lininfo['bit'],1,cor)
# Set black pixel corrections to unity as well.
cor[:,0:3+1] = 1.0
cor[:,2044:2047+1] = 1.0
cor[0:3+1,:] = 1.0
cor[2044:2047+1,:] = 1.0
# Apply the corrections
img/=cor
# Delete unecessary files
del cor,img_P,img_S
# Create the actual image.
# Convert image back to total DN for error propagation
img = img*DIVISOR
# Compute the variance and the final image
var=npabsolute(img)*crtn/NDRS/(COADDS**2)/(ITIME**2)/gain + rdvar
img = img/DIVISOR/ITIME
# Collect header information
hdr = gethdr(hdul[0].header)
hdul.close()
return[img,var,hdr,bitmask]
#
#=============================================================================
#
def gethdr(hdr,keywords=None):
# Grab keywords if requested
if keywords:
hdrinfo = gethdrinfo(hdr,keywords=keywords)
else:
hdrinfo = gethdrinfo(hdr)
# Grab require keywords and convert to standard Spextool keywords
# Airmass
hdrinfo['AM'] = [hdr['TCS_AM'],' Airmass']
# Hour angle
val = hdr['TCS_HA']
m = re.search('[-]','['+val+']')
if not m: val = '+'+val.strip()
hdrinfo['HA'] = [val,' Hour angle (hours)']
# Position Angle
hdrinfo['PA'] = [hdr['POSANGLE'],' Position Angle E of N (deg)']
# Dec
val = hdr['TCS_DEC']
m = re.search('[-]','['+val+']')
if not m: val = '+'+val.strip()
hdrinfo['DEC'] = [val,' Declination, FK5 J2000']
# RA
hdrinfo['RA'] = [hdr['TCS_RA'].strip(),' Right Ascension, FK5 J2000']
# COADDS, ITIME
coadds = hdr['CO_ADDS']
itime = hdr['ITIME']
hdrinfo['ITIME'] = [itime,' Integration time (sec)']
hdrinfo['NCOADDS'] = [coadds,' Number of COADDS']
hdrinfo['IMGITIME'] = [coadds*itime,\
' Image integration time, NCOADDSxITIME (sec)']
# Time
hdrinfo['TIME'] = [hdr['TIME_OBS'].strip(),' Observation time in UTC']
# Date
hdrinfo['DATE'] = [hdr['DATE_OBS'].strip(),' Observation date in UTC']
# MJD
hdrinfo['MJD'] = [hdr['MJD_OBS'],' Modified Julian date OBSDATE+TIME_OBS']
# FILENAME
hdrinfo['FILENAME'] = [hdr['IRAFNAME'].strip(),' Filename']
# MODE
hdrinfo['MODE'] = [hdr['GRAT'].strip(),' Instrument Mode']
# INSTRUMENT
hdrinfo['INSTR'] = ['SpeX',' Instrument']
return(hdrinfo)
| [
"numpy.stack",
"numpy.absolute",
"uspexampcor.uspexampcor",
"numpy.empty",
"astropy.io.fits.getdata",
"numpy.where",
"astropy.io.fits.open",
"gethdrinfo.gethdrinfo",
"numpy.squeeze",
"sys.exit",
"idlrotate.idlrotate",
"re.search",
"imgpoly.imgpoly"
] | [((2965, 2991), 'astropy.io.fits.open', 'fits.open', (["lininfo['bias']"], {}), "(lininfo['bias'])\n", (2974, 2991), False, 'from astropy.io import fits\n'), ((3415, 3449), 'numpy.empty', 'npempty', (['(nimages, NAXIS2, NAXIS1)'], {}), '((nimages, NAXIS2, NAXIS1))\n', (3422, 3449), True, 'from numpy import empty as npempty\n'), ((3462, 3496), 'numpy.empty', 'npempty', (['(nimages, NAXIS2, NAXIS1)'], {}), '((nimages, NAXIS2, NAXIS1))\n', (3469, 3496), True, 'from numpy import empty as npempty\n'), ((3526, 3574), 'numpy.empty', 'npempty', (['(nimages, NAXIS2, NAXIS1)'], {'dtype': 'npint8'}), '((nimages, NAXIS2, NAXIS1), dtype=npint8)\n', (3533, 3574), True, 'from numpy import empty as npempty\n'), ((4994, 5009), 'astropy.io.fits.open', 'fits.open', (['file'], {}), '(file)\n', (5003, 5009), False, 'from astropy.io import fits\n'), ((7492, 7525), 're.search', 're.search', (['"""[-]"""', "('[' + val + ']')"], {}), "('[-]', '[' + val + ']')\n", (7501, 7525), False, 'import re\n'), ((7743, 7776), 're.search', 're.search', (['"""[-]"""', "('[' + val + ']')"], {}), "('[-]', '[' + val + ']')\n", (7752, 7776), False, 'import re\n'), ((2839, 2859), 'astropy.io.fits.getdata', 'fits.getdata', (['lincor'], {}), '(lincor)\n', (2851, 2859), False, 'from astropy.io import fits\n'), ((4677, 4692), 'numpy.squeeze', 'npsqueeze', (['data'], {}), '(data)\n', (4686, 4692), True, 'from numpy import squeeze as npsqueeze\n'), ((4693, 4707), 'numpy.squeeze', 'npsqueeze', (['var'], {}), '(var)\n', (4702, 4707), True, 'from numpy import squeeze as npsqueeze\n'), ((4716, 4734), 'numpy.squeeze', 'npsqueeze', (['bitmask'], {}), '(bitmask)\n', (4725, 4734), True, 'from numpy import squeeze as npsqueeze\n'), ((5848, 5869), 'numpy.stack', 'npstack', (['(mskP, mskS)'], {}), '((mskP, mskS))\n', (5855, 5869), True, 'from numpy import stack as npstack\n'), ((6018, 6034), 'uspexampcor.uspexampcor', 'uspexampcor', (['img'], {}), '(img)\n', (6029, 6034), False, 'from uspexampcor import uspexampcor\n'), ((6133, 6155), 'imgpoly.imgpoly', 'imgpoly', (['img', 'lccoeffs'], {}), '(img, lccoeffs)\n', (6140, 6155), False, 'from imgpoly import imgpoly\n'), ((6169, 6194), 'numpy.where', 'npwhere', (['(cor == 0)', '(1)', 'cor'], {}), '(cor == 0, 1, cor)\n', (6176, 6194), True, 'from numpy import where as npwhere\n'), ((6292, 6339), 'numpy.where', 'npwhere', (["(bitmask == 2 ** lininfo['bit'])", '(1)', 'cor'], {}), "(bitmask == 2 ** lininfo['bit'], 1, cor)\n", (6299, 6339), True, 'from numpy import where as npwhere\n'), ((7213, 7247), 'gethdrinfo.gethdrinfo', 'gethdrinfo', (['hdr'], {'keywords': 'keywords'}), '(hdr, keywords=keywords)\n', (7223, 7247), False, 'from gethdrinfo import gethdrinfo\n'), ((7281, 7296), 'gethdrinfo.gethdrinfo', 'gethdrinfo', (['hdr'], {}), '(hdr)\n', (7291, 7296), False, 'from gethdrinfo import gethdrinfo\n'), ((3269, 3280), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3277, 3280), False, 'import sys\n'), ((4061, 4091), 'idlrotate.idlrotate', 'idlrotate', (['(A[0] - B[0])', 'rotate'], {}), '(A[0] - B[0], rotate)\n', (4070, 4091), False, 'from idlrotate import idlrotate\n'), ((4118, 4148), 'idlrotate.idlrotate', 'idlrotate', (['(A[1] + B[1])', 'rotate'], {}), '(A[1] + B[1], rotate)\n', (4127, 4148), False, 'from idlrotate import idlrotate\n'), ((4175, 4202), 'idlrotate.idlrotate', 'idlrotate', (['combmask', 'rotate'], {}), '(combmask, rotate)\n', (4184, 4202), False, 'from idlrotate import idlrotate\n'), ((4512, 4533), 'idlrotate.idlrotate', 'idlrotate', (['im', 'rotate'], {}), '(im, rotate)\n', (4521, 4533), False, 'from idlrotate import idlrotate\n'), ((4562, 4583), 'idlrotate.idlrotate', 'idlrotate', (['va', 'rotate'], {}), '(va, rotate)\n', (4571, 4583), False, 'from idlrotate import idlrotate\n'), ((4612, 4633), 'idlrotate.idlrotate', 'idlrotate', (['bm', 'rotate'], {}), '(bm, rotate)\n', (4621, 4633), False, 'from idlrotate import idlrotate\n'), ((3974, 3995), 'numpy.stack', 'npstack', (['(A[3], B[3])'], {}), '((A[3], B[3]))\n', (3981, 3995), True, 'from numpy import stack as npstack\n'), ((6797, 6812), 'numpy.absolute', 'npabsolute', (['img'], {}), '(img)\n', (6807, 6812), True, 'from numpy import absolute as npabsolute\n')] |
#!/usr/bin/env python
#
# texture.py - The Texture and Texture2D classes.
#
# Author: <NAME> <<EMAIL>>
#
"""This module provides the :class:`Texture` class, which is the base classes
for all other FSLeyes texture types. See also the :class:`.Texture2D` and
:class:`.Texture3D` classes.
"""
import logging
import contextlib
import functools as ft
import numpy as np
import OpenGL.GL as gl
import fsl.utils.idle as idle
import fsl.utils.notifier as notifier
import fsl.transform.affine as affine
import fsleyes_widgets.utils.status as status
import fsleyes.strings as strings
from . import data as texdata
log = logging.getLogger(__name__)
class TextureBase(object):
"""Base mixin class used by the :class:`Texture` class.
This class provides logic for texture lifecycle management
(creation/destruction) and usage.
.. autosummary::
:nosignatures:
name
handle
target
ndim
nvals
isBound
bound
bindTexture
unbindTexture
The :meth:`bound` method (which uses :meth:`bindTexture` and
:meth:`unbindTexture`) method allows you to bind a texture object to a GL
texture unit. For example, let's say we have a texture object called
``tex``, and we want to configure and use it::
import OpenGL.GL as gl
# When we want to use the texture in a
# scene render, we need to bind it to
# a texture unit.
with tex.bound(gl.GL_TEXTURE0):
# use linear interpolation
tex.interp = gl.GL_LINEAR
# ...
# Do the render
# ...
"""
def __init__(self, name, ndims, nvals):
"""Create a ``TextureBase``.
:arg name: The name of this texture - should be unique.
:arg ndims: Number of dimensions - must be 1, 2 or 3.
:arg nvals: Number of values stored in each texture element.
"""
if ndims == 1: ttype = gl.GL_TEXTURE_1D
elif ndims == 2: ttype = gl.GL_TEXTURE_2D
elif ndims == 3: ttype = gl.GL_TEXTURE_3D
else: raise ValueError('Invalid number of dimensions')
self.__texture = int(gl.glGenTextures(1))
self.__ttype = ttype
self.__name = name
self.__ndims = ndims
self.__nvals = nvals
self.__bound = 0
self.__textureUnit = None
def __del__(self):
"""Prints a log message."""
# log might get deleted before us
try:
log.debug('%s.del (%s)', type(self).__name__, id(self))
except Exception:
pass
def destroy(self):
"""Must be called when this ``TextureBase`` is no longer needed.
Deletes the texture handle.
"""
log.debug('Deleting %s (%s) for %s: %s',
type(self).__name__, id(self),
self.__name, self.__texture)
gl.glDeleteTextures(self.__texture)
self.__texture = None
@property
def name(self):
"""Returns the name of this texture. This is not the GL texture name,
rather it is the unique name passed into :meth:`__init__`.
"""
return self.__name
@property
def handle(self):
"""Returns the GL texture handle for this texture. """
return self.__texture
@property
def target(self):
"""Returns the type of this texture - ``GL_TEXTURE_1D``,
``GL_TEXTURE_2D`` or ``GL_TEXTURE_3D``.
"""
return self.__ttype
@property
def ndim(self):
"""Return the number of dimensions of this texture - 1, 2, or 3. """
return self.__ndims
@property
def nvals(self):
"""Return the number of values stored at each point in this texture.
"""
return self.__nvals
def isBound(self):
"""Returns ``True`` if this texture is currently bound, ``False``
otherwise.
.. note:: This method assumes that the :meth:`bindTexture` and
:meth:`unbindTexture` methods are called in pairs.
"""
return self.__bound > 0
@contextlib.contextmanager
def bound(self, textureUnit=None):
"""Context manager which can be used to bind and unbind this texture,
instead of manually calling :meth:`bindTexture` and
:meth:`unbindTexture`
:arg textureUnit: The texture unit to bind this texture to, e.g.
``GL_TEXTURE0``.
"""
try:
self.bindTexture(textureUnit)
yield
finally:
self.unbindTexture()
def bindTexture(self, textureUnit=None):
"""Activates and binds this texture.
:arg textureUnit: The texture unit to bind this texture to, e.g.
``GL_TEXTURE0``.
"""
if self.__bound == 0:
if textureUnit is not None:
gl.glActiveTexture(textureUnit)
gl.glBindTexture(self.__ttype, self.__texture)
self.__textureUnit = textureUnit
self.__bound += 1
def unbindTexture(self):
"""Unbinds this texture. """
if self.__bound == 1:
if self.__textureUnit is not None:
gl.glActiveTexture(self.__textureUnit)
gl.glBindTexture(self.__ttype, 0)
self.__textureUnit = None
self.__bound = max(0, self.__bound - 1)
class TextureSettingsMixin(object):
"""Mixin class used by the :class:`Texture` class.
This class provides methods to get/set various settings which can
be used to manipulate the texture. All of the logic which uses
these settings is in the ``Texture`` class.
The following settings can be changed:
.. autosummary::
:nosignatures:
interp
prefilter
prefilterRange
normalise
normaliseRange
border
scales
resolution
Additional settings can be added via the ``settings`` argument to
:meth:`__init__`. All settings can be changed via the :meth:`update`
method.
"""
def __init__(self, settings=None):
"""Create a ``TextureSettingsMixin``.
:arg settings: Sequence of additional settings to make available.
"""
defaults = ['interp',
'prefilter', 'prefilterRange',
'normalise', 'normaliseRange',
'border', 'resolution', 'scales']
if settings is None: settings = defaults
else: settings = defaults + list(settings)
self.__settings = {s : None for s in settings}
@property
def interp(self):
"""Return the current texture interpolation setting - either
``GL_NEAREST`` or ``GL_LINEAR``.
"""
return self.__settings['interp']
@interp.setter
def interp(self, interp):
"""Sets the texture interpolation. """
self.update(interp=interp)
@property
def prefilter(self):
"""Return the current prefilter function - texture data is passed
through this function before being uploaded to the GPU.
If this function changes the range of the data, you must also
provide a ``prefilterRange`` function - see :meth:`prefilterRange`.
"""
return self.__settings['prefilter']
@prefilter.setter
def prefilter(self, prefilter):
"""Set the prefilter function """
self.update(prefilter=prefilter)
@property
def prefilterRange(self):
"""Return the current prefilter range function - if the ``prefilter``
function changes the data range, this function must be provided. It
is passed two parameters - the known data minimum and maximum, and
must adjust these values so that they reflect the adjusted range of
the data that was passed to the ``prefilter`` function.
"""
return self.__settings['prefilterRange']
@prefilterRange.setter
def prefilterRange(self, prefilterRange):
"""Set the prefilter range function. """
self.update(prefilter=prefilterRange)
@property
def normalise(self):
"""Return the current normalisation state.
If ``normalise=True``, the data is normalised to lie in the range
``[0, 1]`` (or normalised to the full range, if being stored as
integers) before being stored. The data is normalised according to
the minimum/maximum of the data, or to a normalise range set via
the :meth:`normaliseRange`.
Set this to ``False`` to disable normalisation.
.. note:: If the data is not of a type that can be stored natively
as a texture, the data is automatically normalised,
regardless of the value specified here.
"""
return self.__settings['normalise']
@normalise.setter
def normalise(self, normalise):
"""Enable/disable normalisation. """
self.update(normalise=normalise)
@property
def normaliseRange(self):
"""Return the current normalise range.
If normalisation is enabled (see :meth:`normalise`), or necessary,
the data is normalised according to either its minimum/maximum, or
to the range specified via this method.
This parameter must be a sequence of two values, containing the
``(min, max)`` normalisation range. The data is then normalised to
lie in the range ``[0, 1]`` (or normalised to the full range, if being
stored as integers) before being stored.
If ``None``, the data minimum/maximum are calculated and used.
"""
return self.__settings['normaliseRange']
@normaliseRange.setter
def normaliseRange(self, normaliseRange):
"""Set the normalise range. """
self.update(normaliseRange=normaliseRange)
@property
def border(self):
"""Return the texture border colour. Set this to a tuple of four values
in the range 0 to 1, or ``None`` for no border (in which case the
texture coordinates will be clamped to edges).
"""
return self.__settings['border']
@border.setter
def border(self, border):
"""Return the texture border colour."""
self.update(border=border)
@property
def scales(self):
"""Return the scaling factors for each axis of the texture data.
These values are solely used to calculate the sub-sampling rate if the
resolution (as set by :meth:`resolution`) is in terms of something
other than data indices (e.g. :class:`.Image` pixdims).
"""
return self.__settings['scales']
@scales.setter
def scales(self, scales):
"""Set the texture data axis scaling factors. """
self.update(scales=scales)
@property
def resolution(self):
"""Return the current texture data resolution - this value is passed
to the :func:`.routines.subsample` function, in the
:func:`.prepareData` function.
"""
return self.__settings['resolution']
@resolution.setter
def resolution(self, resolution):
"""Set the texture data resolution. """
self.update(resolution=resolution)
def update(self, **kwargs):
"""Set any parameters on this ``TextureSettingsMixin``. Valid keyword
arguments are:
================== ==========================
``interp`` See :meth:`interp`.
``prefilter`` See :meth:`prefilter`.
``prefilterRange`` See :meth:`prefilterRange`
``normalise`` See :meth:`normalise.`
``normaliseRange`` See :meth:`normaliseRange`
``border`` See :meth:`border`
``scales`` See :meth:`scales`.
``resolution`` See :meth:`resolution`
================== ==========================
:returns: A ``dict`` of ``{attr : changed}`` mappings, indicating
which properties have changed value.
"""
changed = {}
for s in self.__settings.keys():
oldval = self.__settings[s]
newval = kwargs.get(s, self.__settings[s])
changed[s] = oldval != newval
self.__settings[s] = newval
return changed
class Texture(notifier.Notifier, TextureBase, TextureSettingsMixin):
"""The ``Texture`` class is the base class for all other texture types in
*FSLeyes*. This class is not intended to be used directly - use one of the
sub-classes instead.
A texture can be bound and unbound via the methods of the
:class:`TextureBase` class. Various texture settings can be changed
via the methods of the :class:`TextureSettingsMixin` class. In the majority
of cases, in order to draw or configure a texture, it needs to be bound
(although this depends on the sub-class).
In order to use a texture, at the very least you need to provide some
data, or specify a type and shape. This can be done either via the
:meth:`data`/:meth:`shape`/:meth:`dtype` methods, or by the :meth:`set`
method. If you specify a shape and data type, any previously specified
data will be lost, and vice versa.
Calling :meth:`set` will usually cause the texture to be reconfigured and
refreshed, although you can also force a refresh by calling the
:meth:`refresh` method directly.
The following properties can be queried to retrieve information about the
tetxure; some will return ``None`` until you have provided some data (or
a shape and type):
.. autosummary::
:nosignatures:
voxValXform
invVoxValXform
shape
dtype
textureType
baseFormat
internalFormat
data
preparedData
When a ``Texture`` is created, and when its settings are changed, it may
need to prepare the data to be passed to OpenGL - for large textures, this
can be a time consuming process, so this may be performed on a separate
thread using the :mod:`.idle` module (unless the ``threaded`` parameter to
:meth:`__init__` is set to ``False``). The :meth:`ready` method returns
``True`` or ``False`` to indicate whether the ``Texture`` is ready to be
used.
Furthermore, the ``Texture`` class derives from :class:`.Notifier`, so
listeners can register to be notified when an ``Texture`` is ready to
be used.
For textures with multiple values per voxel, it is assumed that these
values are indexed with the first dimension of the texture data (as passed
to :meth:`data` or :meth:`set`).
``Texture`` sub-classes (e.g. :class:`.Texture2D`, :class:`.Texture3D`,
:class:`.ColourMapTexture`) must override the :meth:`doRefresh` method
such that it performs the GL calls required to configure the textureb.
See the :mod:`.resources` module for a method of sharing texture resources.
"""
def __init__(self,
name,
ndims,
nvals,
threaded=False,
settings=None,
textureFormat=None,
internalFormat=None,
**kwargs):
"""Create a ``Texture``.
:arg name: The name of this texture - should be unique.
:arg ndims: Number of dimensions - must be 1, 2 or 3.
:arg nvals: Number of values stored in each texture element.
:arg threaded: If ``True``, the texture data will be prepared on
a separate thread (on calls to
:meth:`refresh`). If ``False``, the texture data
is prepared on the calling thread, and the
:meth:`refresh` call will block until it has been
prepared.
:arg settings: Additional settings to make available through the
:class:`TextureSettingsMixin`.
:arg textureFormat: Texture format to use - if not specified, this is
automatically determined. If specified, an
``internalFormat`` must also be specified.
:arg internalFormat: Internal texture format to use - if not specified,
this is automatically determined.
All other arguments are passed through to the initial call to
:meth:`set`.
.. note:: All subclasses must accept a ``name`` as the first parameter
to their ``__init__`` method, and must pass said ``name``
through to the :meth:`__init__` method.
.. note:: In normal cases, the ``textureFormat`` and ``internalFormat``
do not need to be specified - they will be automatically
determined using the :func:`.data.getTextureType` function.
However, there can be instances where a specific texture type
needs to be used. In these instances, it is up to the calling
code to ensure that the texture data can be coerced into
the correct GL data type.
"""
TextureBase .__init__(self, name, ndims, nvals)
TextureSettingsMixin.__init__(self, settings)
if ((textureFormat is not None) and (internalFormat is None)) or \
((textureFormat is None) and (internalFormat is not None)):
raise ValueError('Both textureFormat and internalFormat '
'must be specified')
self.__ready = False
self.__threaded = threaded
# The data, type and shape are
# refreshed on every call to
# set or refresh (the former
# calls the latter)
self.__data = None
self.__dtype = None
self.__shape = None
self.__preparedData = None
# The data is refreshed on
# every call to set or refresh
# These attributes are set by
# the __determineTextureType
# and __prepareTextureData
# methods (which are called
# by refresh)
self.__voxValXform = None
self.__invVoxValXform = None
self.__autoTexFmt = textureFormat is None
self.__texFmt = textureFormat
self.__texIntFmt = internalFormat
self.__texDtype = None
# If threading is enabled, texture
# refreshes are performed with an
# idle.TaskThread.
if threaded:
self.__taskThread = idle.TaskThread()
self.__taskName = '{}_{}_refresh'.format(type(self).__name__,
id(self))
self.__taskThread.daemon = True
self.__taskThread.start()
else:
self.__taskThread = None
self.__taskName = None
self.set(**kwargs)
def destroy(self):
"""Must be called when this ``Texture`` is no longer needed.
"""
TextureBase.destroy(self)
if self.__taskThread is not None:
self.__taskThread.stop()
self.__taskThread = None
self.__data = None
self.__preparedData = None
def ready(self):
"""Returns ``True`` if this ``Texture`` is ready to be used,
``False`` otherwise.
"""
return self.__ready
@property
def voxValXform(self):
"""Return a transformation matrix that can be used to transform
values read from the texture back to the original data range.
"""
return self.__voxValXform
@property
def invVoxValXform(self):
"""Return a transformation matrix that can be used to transform
values in the original data range to values as read from the texture.
"""
return self.__invVoxValXform
def texCoordXform(self, origShape):
"""Returns a transformation matrix which can be used to adjust a set of
3D texture coordinates so they can index the underlying texture, which
may be 2D.
This implementation returns an identity matrix, but it is overridden
by the .Texture2D sub-class, which is sometimes used to store 3D image
data.
:arg origShape: Original data shape.
"""
return np.eye(4)
def invTexCoordXform(self, origShape):
"""Returns the inverse of :meth:`texCoordXform`. """
return affine.invert(self.texCoordXform(origShape))
@property
def shape(self):
"""Return a tuple containing the texture data shape. """
return self.__shape
@shape.setter
def shape(self, shape):
"""Set the texture data shape. """
return self.set(shape=shape)
@property
def dtype(self):
"""Return the ``numpy`` data type of the texture data."""
return self.__dtype
@dtype.setter
def dtype(self, dtype):
"""Set the ``numpy`` data type for the texture data."""
self.set(dtype=dtype)
@property
def textureType(self):
"""Return the texture data type, e.g. ``gl.GL_UNSIGNED_BYTE``. """
return self.__texDtype
@property
def baseFormat(self):
"""Return the base texture format, e.g. ``gl.GL_ALPHA``. """
return self.__texFmt
@property
def internalFormat(self):
"""Return the sized/internal texture format, e.g. ``gl.GL_ALPHA8``. """
return self.__texIntFmt
@property
def data(self):
"""Returns the data that has been passed to the :meth:`set` method. """
return self.__data
@data.setter
def data(self, data):
"""Set the texture data - this get passed through to :meth:`set`. """
self.set(data=data)
@property
def preparedData(self):
"""Returns the prepared data, i.e. the data as it has been copied
to the GPU.
"""
return self.__preparedData
def shapeData(self, data, oldShape=None):
"""Shape the data so that it is ready for use as texture data.
This implementation returns the data unchanged, but it is overridden
by the ``Texture2D`` class, which is sometimes used to store 3D image
data.
:arg data: ``numpy`` array containing the data to be shaped
:arg oldShape: Original data shape, if this is a sub-array. If not
provided, taken from ``data``.
"""
return data
def set(self, **kwargs):
"""Set any parameters on this ``Texture``. Valid keyword arguments are:
================== ==============================================
``interp`` See :meth:`.interp`.
``data`` See :meth:`.data`.
``shape`` See :meth:`.shape`.
``dtype`` See :meth:`.dtype`.
``prefilter`` See :meth:`.prefilter`.
``prefilterRange`` See :meth:`.prefilterRange`
``normalise`` See :meth:`.normalise.`
``normaliseRange`` See :meth:`.normaliseRange`.
``scales`` See :meth:`.scales`.
``resolution`` See :meth:`.resolution`.
``refresh`` If ``True`` (the default), the :meth:`refresh`
function is called (but only if a setting has
changed).
``callback`` Optional function which will be called (via
:func:`.idle.idle`) when the texture has been
refreshed. Only called if ``refresh`` is
``True``, and a setting has changed.
``notify`` Passed through to the :meth:`refresh` method.
================== ==============================================
:returns: ``True`` if any settings have changed and the
``Texture`` is being/needs to be refreshed, ``False``
otherwise.
"""
changed = TextureSettingsMixin.update(self, **kwargs)
data = kwargs.get('data', None)
shape = kwargs.get('shape', self.shape)
dtype = kwargs.get('dtype', self.dtype)
refresh = kwargs.get('refresh', True)
notify = kwargs.get('notify', True)
callback = kwargs.get('callback', None)
changed['data'] = data is not None
changed['shape'] = shape != self.shape
changed['dtype'] = dtype != self.dtype
if not any(changed.values()):
return False
if data is not None:
# The dtype attribute is set
# later in __prepareTextureData,
# as it may be different from
# the dtype of the passed-in
# data
self.__data = data
self.__dtype = None
dtype = data.dtype
# The first dimension is assumed to contain the
# values, for multi-valued (e.g. RGB) textures
if self.nvals > 1: self.__shape = data.shape[1:]
else: self.__shape = data.shape
# If the data is of a type which cannot
# be stored natively as an OpenGL texture,
# and we don't have support for floating
# point textures, the data must be
# normalised. See determineType and
# prepareData in the data module
self.normalise = self.normalise or \
(not texdata.canUseFloatTextures()[0] and
(dtype not in (np.uint8, np.int8, np.uint16, np.int16)))
# If the caller has not provided
# a normalisation range, we have
# to calculate it.
if (data is not None) and \
self.normalise and \
(self.normaliseRange is None):
self.normaliseRange = np.nanmin(data), np.nanmax(data)
log.debug('Calculated %s data range for normalisation: '
'[%s - %s]', self.name, *self.normaliseRange)
elif changed['shape'] or changed['dtype']:
self.__data = None
self.__dtype = dtype
self.__shape = shape
refreshData = any((changed['data'],
changed['prefilter'],
changed['prefilterRange'],
changed['normaliseRange'] and self.normalise,
changed['resolution'],
changed['scales'],
changed['normalise']))
if refresh:
self.refresh(refreshData=refreshData,
notify=notify,
callback=callback)
return True
def refresh(self, refreshData=True, notify=True, callback=None):
"""(Re-)configures the OpenGL texture.
:arg refreshData: If ``True`` (the default), the texture data is
refreshed.
:arg notify: If ``True`` (the default), a notification is
triggered via the :class:`.Notifier` base-class,
when this ``Texture3D`` has been refreshed, and
is ready to use. Otherwise, the notification is
suppressed.
:arg callback: Optional function which will be called (via
:func:`.idle.idle`) when the texture has been
refreshed. Only called if ``refresh`` is
``True``, and a setting has changed.
.. note:: The texture data may be generated on a separate thread, using
the :func:`.idle.run` function. This is controlled by the
``threaded`` parameter, passed to :meth:`__init__`.
"""
# Don't bother if data
# or shape/type hasn't
# been set
data = self.__data
shape = self.__shape
dtype = self.__dtype
# We either need some data, or
# we need a shape and data type.
if data is None and (shape is None or dtype is None):
return
refreshData = refreshData and (data is not None)
self.__ready = False
# This can take a long time for big
# data, so we do it in a separate
# thread using the idle module.
def genData():
# Another genData function is
# already queued - don't run.
# The TaskThreadVeto error
# will stop the TaskThread from
# calling configTexture as well.
if self.__taskThread is not None and \
self.__taskThread.isQueued(self.__taskName):
raise idle.TaskThreadVeto()
self.__determineTextureType()
if refreshData:
self.__prepareTextureData()
# Once genData is finished, we pass the
# result (see __prepareTextureData) to
# the sub-class doRefresh method.
def doRefresh():
self.doRefresh()
self.__ready = True
if notify:
self.notify()
if callback is not None:
callback()
# Wrap the above functions in a report
# decorator in case an error occurs
title = strings.messages[self, 'dataError']
msg = strings.messages[self, 'dataError']
# the genData function is called on a separate thread,
# but doRefresh is called on the idle/mainloop. So we
# can use the reportErrorDecorator for the latter, but
# not the former.
doRefresh = status.reportErrorDecorator(title, msg)(doRefresh)
genDataError = ft.partial(status.reportError, title, msg)
# Run asynchronously if we are
# threaded, and we have data to
# prepare - if we don't have
# data, we run genData on the
# current thread, because it
# shouldn't do anything
if self.__threaded and (data is not None):
# TODO the task is already queued,
# but a callback function has been
# specified, should you queue the
# callback function?
# Don't queue the texture
# refresh task twice
if not self.__taskThread.isQueued(self.__taskName):
self.__taskThread.enqueue(genData,
taskName=self.__taskName,
onFinish=doRefresh,
onError=genDataError)
else:
genData()
doRefresh()
def patchData(self, data, offset):
"""This is a shortcut method which can be used to replace part
of the image texture data without having to regenerate the entire
texture.
The :meth:`set` and :meth:`refresh` methods are quite heavyweight, and
are written in such a way that partial texture updates are not
possible. This method allows small parts of the image texture to be
quickly updated.
"""
data = np.asarray(data)
if len(data.shape) < self.ndim:
newshape = list(data.shape) + [1] * (self.ndim - len(data.shape))
data = data.reshape(newshape)
data = texdata.prepareData(
data,
prefilter=self.prefilter,
prefilterRange=self.prefilterRange,
resolution=self.resolution,
scales=self.scales,
normalise=self.normalise,
normaliseRange=self.normaliseRange)[0]
self.doPatch(data, offset)
self.notify()
def doRefresh(self):
"""Must be overridden by sub-classes to configure the texture.
This method is not intended to be called externally - call
:meth:`refresh` instead.
This method should use the :meth:`preparedData`, or the :meth:`shape`,
to configure the texture. Sub-classes can assume that at least one
of these will not be ``None``.
If ``preparedData`` is not ``None``, the ``shape`` should be ignored,
and inferred from ``preparedData``.
"""
raise NotImplementedError('Must be implemented by subclasses')
def doPatch(self, data, offset):
"""Must be overridden by sub-classes to quickly update part of
the texture data.
This method is not intended to be called externally - call
:meth:`patchData` instead.
"""
raise NotImplementedError('Must be implemented by subclasses')
def __determineTextureType(self):
"""Figures out how the texture data should be stored as an OpenGL
texture. See the :func:`.data.getTextureType` function.
This method sets the following attributes on this ``Texture`` instance:
==================== ==============================================
``__texFmt`` The texture format (e.g. ``GL_RGB``,
``GL_LUMINANCE``, etc).
``__texIntFmt`` The internal texture format used by OpenGL for
storage (e.g. ``GL_RGB16``, ``GL_LUMINANCE8``,
etc).
``__texDtype`` The raw type of the texture data (e.g.
``GL_UNSIGNED_SHORT``)
==================== ==============================================
"""
if self.nvals not in (1, 3, 4):
raise ValueError('Cannot create texture representation for {} '
'(nvals: {})'.format(self.dtype, self.nvals))
if self.__data is None: dtype = self.__dtype
else: dtype = self.__data.dtype
normalise = self.normalise
nvals = self.nvals
texDtype, texFmt, intFmt = texdata.getTextureType(
normalise, dtype, nvals)
if not self.__autoTexFmt:
texFmt = self.__texFmt
intFmt = self.__texIntFmt
log.debug('Texture (%s) is to be stored as %s/%s/%s '
'(normalised: %s)',
self.name,
texdata.GL_TYPE_NAMES[texDtype],
texdata.GL_TYPE_NAMES[texFmt],
texdata.GL_TYPE_NAMES[intFmt],
normalise)
self.__texFmt = texFmt
self.__texIntFmt = intFmt
self.__texDtype = texDtype
def __prepareTextureData(self):
"""Prepare the texture data.
This method passes the stored data to the :func:`.data.prepareData`
function and then stores references to its return valuesa as
attributes on this ``Texture`` instance:
==================== =============================================
``__preparedata`` A ``numpy`` array containing the image data,
ready to be copied to the GPU.
``__voxValXform`` An affine transformation matrix which encodes
an offset and a scale, which may be used to
transform the texture data from the range
``[0.0, 1.0]`` to its raw data range.
``__invVoxValXform`` Inverse of ``voxValXform``.
==================== =============================================
"""
data, voxValXform, invVoxValXform = texdata.prepareData(
self.__data,
prefilter=self.prefilter,
prefilterRange=self.prefilterRange,
resolution=self.resolution,
scales=self.scales,
normalise=self.normalise,
normaliseRange=self.normaliseRange)
self.__preparedData = data
self.__dtype = data.dtype
self.__voxValXform = voxValXform
self.__invVoxValXform = invVoxValXform
| [
"fsleyes_widgets.utils.status.reportErrorDecorator",
"functools.partial",
"fsl.utils.idle.TaskThread",
"OpenGL.GL.glGenTextures",
"OpenGL.GL.glBindTexture",
"OpenGL.GL.glDeleteTextures",
"numpy.asarray",
"numpy.nanmax",
"numpy.nanmin",
"OpenGL.GL.glActiveTexture",
"numpy.eye",
"fsl.utils.idle.... | [((747, 774), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (764, 774), False, 'import logging\n'), ((3061, 3096), 'OpenGL.GL.glDeleteTextures', 'gl.glDeleteTextures', (['self.__texture'], {}), '(self.__texture)\n', (3080, 3096), True, 'import OpenGL.GL as gl\n'), ((20613, 20622), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (20619, 20622), True, 'import numpy as np\n'), ((29929, 29971), 'functools.partial', 'ft.partial', (['status.reportError', 'title', 'msg'], {}), '(status.reportError, title, msg)\n', (29939, 29971), True, 'import functools as ft\n'), ((31360, 31376), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (31370, 31376), True, 'import numpy as np\n'), ((2307, 2326), 'OpenGL.GL.glGenTextures', 'gl.glGenTextures', (['(1)'], {}), '(1)\n', (2323, 2326), True, 'import OpenGL.GL as gl\n'), ((5112, 5158), 'OpenGL.GL.glBindTexture', 'gl.glBindTexture', (['self.__ttype', 'self.__texture'], {}), '(self.__ttype, self.__texture)\n', (5128, 5158), True, 'import OpenGL.GL as gl\n'), ((5446, 5479), 'OpenGL.GL.glBindTexture', 'gl.glBindTexture', (['self.__ttype', '(0)'], {}), '(self.__ttype, 0)\n', (5462, 5479), True, 'import OpenGL.GL as gl\n'), ((18827, 18844), 'fsl.utils.idle.TaskThread', 'idle.TaskThread', ([], {}), '()\n', (18842, 18844), True, 'import fsl.utils.idle as idle\n'), ((29855, 29894), 'fsleyes_widgets.utils.status.reportErrorDecorator', 'status.reportErrorDecorator', (['title', 'msg'], {}), '(title, msg)\n', (29882, 29894), True, 'import fsleyes_widgets.utils.status as status\n'), ((5067, 5098), 'OpenGL.GL.glActiveTexture', 'gl.glActiveTexture', (['textureUnit'], {}), '(textureUnit)\n', (5085, 5098), True, 'import OpenGL.GL as gl\n'), ((5394, 5432), 'OpenGL.GL.glActiveTexture', 'gl.glActiveTexture', (['self.__textureUnit'], {}), '(self.__textureUnit)\n', (5412, 5432), True, 'import OpenGL.GL as gl\n'), ((26068, 26083), 'numpy.nanmin', 'np.nanmin', (['data'], {}), '(data)\n', (26077, 26083), True, 'import numpy as np\n'), ((26085, 26100), 'numpy.nanmax', 'np.nanmax', (['data'], {}), '(data)\n', (26094, 26100), True, 'import numpy as np\n'), ((28931, 28952), 'fsl.utils.idle.TaskThreadVeto', 'idle.TaskThreadVeto', ([], {}), '()\n', (28950, 28952), True, 'import fsl.utils.idle as idle\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import os
import pandas as pd
import time
import scanpy as sc
import scipy.sparse as ssp
from .. import help_functions as hf
from .. import plotting as CSpl
from .optimal_transport import *
from .. import settings
from .. import logging as logg
####################
# Constructing the similarity matrix (similarity matrix)
####################
def generate_similarity_matrix(adata,file_name,round_of_smooth=10,neighbor_N=20,beta=0.1,truncation_threshold=0.001,save_subset=True,compute_new_Smatrix=False):
"""
Generate similarity matrix (Smatrix) through graph diffusion
It generates the similarity matrix via iteratively graph diffusion.
Similarity matrix from each round of diffusion will be saved, after truncation
to promote sparsity and save space. If save_subset is activated, only save
Smatrix for smooth round [5,10,15,...]. If a Smatrix is pre-computed,
it will be loaded directly if compute_new_Smatrix=Flase.
Parameters
----------
adata: :class:`~anndata.AnnData` object
file_name: str
file name to load pre-computed similarity matrix or save the newly
computed similarity matrix
round_of_smooth: `int`, optional (default: 10)
The rounds of graph diffusion.
neighbor_N: `int`, optional (default: 20)
Neighber number for constructing the KNN graph, using the UMAP method.
beta: `float`, option (default: 0.1)
Probability to stay at origin in a unit diffusion step, in the range [0,1]
truncation_threshold: `float`, optional (default: 0.001)
At each iteration, truncate the similarity matrix (the similarity) using
truncation_threshold. This promotes the sparsity of the matrix,
thus the speed of computation. We set the truncation threshold to be small,
to guarantee accracy.
save_subset: `bool`, optional (default: True)
If true, save only Smatrix at smooth round [5,10,15,...]
Else, save Smatrix at each round.
compute_new_Smatrix: `bool`, optional (default: False)
If true, compute new Smatrix, even if there is pre-computed Smatrix with the
same parameterization.
Returns
-------
similarity_matrix: `sp.spmatrix`
"""
if os.path.exists(file_name+f'_SM{round_of_smooth}.npz') and (not compute_new_Smatrix):
logg.info("Compute similarity matrix: load existing data")
similarity_matrix=ssp.load_npz(file_name+f'_SM{round_of_smooth}.npz')
else: # compute now
logg.info(f"Compute similarity matrix: computing new; beta={beta}")
# add a step to compute PCA in case this is not computed
# here, we assume that adata already has pre-computed PCA
sc.pp.neighbors(adata, n_neighbors=neighbor_N)
## compute the similarity matrix (smooth matrix)
#nrow = adata.shape[0]
#initial_clones = ssp.lil_matrix((nrow, nrow))
#initial_clones.setdiag(np.ones(nrow))
#similarity_matrix=hf.get_smooth_values_SW(initial_clones, adata_sp.uns['neighbors']['connectivities'], beta=0, n_rounds=round_of_smooth)
#similarity_matrix=get_smooth_values_sparseMatrixForm(initial_clones, adata.uns['neighbors']['connectivities'], beta=0, n_rounds=round_of_smooth)
# this similarity_matrix is column-normalized, our B here
#adjacency_matrix=adata.uns['neighbors']['connectivities'];
adjacency_matrix=adata.obsp['connectivities'];
############## The new method
adjacency_matrix=(adjacency_matrix+adjacency_matrix.T)/2
##############
adjacency_matrix = hf.sparse_rowwise_multiply(adjacency_matrix, 1 / adjacency_matrix.sum(1).A.squeeze())
nrow = adata.shape[0]
similarity_matrix = ssp.lil_matrix((nrow, nrow))
similarity_matrix.setdiag(np.ones(nrow))
transpose_A=adjacency_matrix.T
for iRound in range(round_of_smooth):
SM=iRound+1
logg.info("Smooth round:",SM)
t=time.time()
similarity_matrix =beta*similarity_matrix+(1-beta)*transpose_A*similarity_matrix
#similarity_matrix =beta*similarity_matrix+(1-beta)*similarity_matrix*adjacency_matrix
#similarity_matrix_array.append(similarity_matrix)
logg.hint("Time elapsed:",time.time()-t)
t=time.time()
sparsity_frac=(similarity_matrix>0).sum()/(similarity_matrix.shape[0]*similarity_matrix.shape[1])
if sparsity_frac>=0.1:
#similarity_matrix_truncate=similarity_matrix
#similarity_matrix_truncate_array.append(similarity_matrix_truncate)
logg.hint(f"Orignal sparsity={sparsity_frac}, Thresholding")
similarity_matrix=hf.matrix_row_or_column_thresholding(similarity_matrix,truncation_threshold)
sparsity_frac_2=(similarity_matrix>0).sum()/(similarity_matrix.shape[0]*similarity_matrix.shape[1])
#similarity_matrix_truncate_array.append(similarity_matrix_truncate)
logg.hint(f"Final sparsity={sparsity_frac_2}")
logg.info(f"similarity matrix truncated (Smooth round={SM}): ", time.time()-t)
#logg.info("Save the matrix")
#file_name=f'data/20200221_truncated_similarity_matrix_SM{round_of_smooth}_kNN{neighbor_N}_Truncate{str(truncation_threshold)[2:]}.npz'
similarity_matrix=ssp.csr_matrix(similarity_matrix)
############## The new method
#similarity_matrix=similarity_matrix.T.copy()
##############
if save_subset:
if SM%5==0: # save when SM=5,10,15,20,...
logg.info("Save the matrix~~~")
ssp.save_npz(file_name+f'_SM{SM}.npz',similarity_matrix)
else: # save all
logg.info("Save the matrix")
ssp.save_npz(file_name+f'_SM{SM}.npz',similarity_matrix)
return similarity_matrix
def generate_initial_similarity(similarity_matrix,initial_index_0,initial_index_1):
"""
Extract Smatrix at t1 from the full Smatrix
Parameters
----------
similarity_matrix: `np.array` or `sp.spmatrix`
full Smatrix
initial_index_0: `list`
list of selected t1-cell id among all cells (t1+t2)
initial_index_1: `list`
list of selected t1-cell id among all cells (t1+t2)
It can be the same as initial_index_0. In the case that they are different,
initial_index_1 is a subset of cells that correspond to multi-time clones,
while initial_index_0 may be all cells at t1.
Returns
-------
initial Smatrix: `np.array`
"""
t=time.time()
initial_similarity=similarity_matrix[initial_index_0][:,initial_index_1];
#initial_similarity=hf.sparse_column_multiply(initial_similarity,1/(resol+initial_similarity.sum(0)))
if ssp.issparse(initial_similarity): initial_similarity=initial_similarity.A
logg.hint("Time elapsed: ", time.time()-t)
return initial_similarity
def generate_final_similarity(similarity_matrix,final_index_0,final_index_1):
"""
Extract Smatrix at t2 from the full Smatrix
Parameters
----------
similarity_matrix: `np.array` or `sp.spmatrix`
full Smatrix
final_index_0: `list`
list of selected t2-cell id among all cells (t1+t2)
final_index_1: `list`
list of selected t2-cell id among all cells (t1+t2)
It can be the same as final_index_0. In the case that they are different,
initial_index_0 is a subset of cells that correspond to multi-time clones,
while initial_index_1 may be all cells at t2.
Returns
-------
initial Smatrix: `np.array`
"""
t=time.time()
final_similarity=similarity_matrix.T[final_index_0][:,final_index_1];
if ssp.issparse(final_similarity):final_similarity=final_similarity.A
#final_similarity=hf.sparse_rowwise_multiply(final_similarity,1/(resol+final_similarity.sum(1)))
logg.hint("Time elapsed: ", time.time()-t)
return final_similarity
def select_time_points(adata_orig,time_point=['day_1','day_2'],use_all_cells=False):
"""
Select barcoded cells at given time points for Tmap inference
Select cells at given time points, and prepare the right data structure
for running core cospar function to infer the Tmap.
Parameters
----------
adata_orig: original :class:`~anndata.AnnData` object
time_point: `list` optional (default: ['day_1','day_2'])
Require at least two time points, arranged in ascending order.
use_all_cells: `bool` optional (default: `False`)
If true, all cells at selected time points will be used for computing Tmap
If false, only cells belonging to multi-time clones will be used for computing Tmap.
The latter case usually speed up the computation, which is recommended.
Returns
-------
Subsampled :class:`~anndata.AnnData` object
"""
#x_emb_orig=adata_orig.obsm['X_emb'][:,0]
#y_emb_orig=adata_orig.obsm['X_emb'][:,1]
time_info_orig=np.array(adata_orig.obs['time_info'])
clone_annot_orig=adata_orig.obsm['X_clone']
if len(time_point)==0: # use all clonally labelled cell states
time_point=np.sort(list(set(time_info_orig)))
if (len(time_point)<2):
logg.error("Must select more than 1 time point!")
else:
At=[]
for j, time_0 in enumerate(time_point):
At.append(ssp.csr_matrix(clone_annot_orig[time_info_orig==time_0]))
### Day t - t+1
Clonal_cell_ID_FOR_t=[]
for j in range(len(time_point)-1):
idx_t=np.array((At[j]*At[j+1].T).sum(1)>0).flatten()
time_index_t=time_info_orig==time_point[j]
temp=np.nonzero(time_index_t)[0][idx_t]
Clonal_cell_ID_FOR_t.append(temp) # this index is in the original space, without sampling etc
logg.hint(f"Clonal cell fraction (day {time_point[j]}-{time_point[j+1]}):",len(temp)/np.sum(time_index_t))
### Day t+1 - t
Clonal_cell_ID_BACK_t=[]
for j in range(len(time_point)-1):
idx_t=np.array((At[j+1]*At[j].T).sum(1)>0).flatten()
time_index_t=time_info_orig==time_point[j+1]
temp=np.nonzero(time_index_t)[0][idx_t]
Clonal_cell_ID_BACK_t.append(temp) # this index is in the original space, without sampling etc
logg.hint(f"Clonal cell fraction (day {time_point[j+1]}-{time_point[j]}):",len(temp)/np.sum(time_index_t))
for j in range(len(time_point)-1):
logg.hint(f"Numer of cells that are clonally related -- day {time_point[j]}: {len(Clonal_cell_ID_FOR_t[j])} and day {time_point[j+1]}: {len(Clonal_cell_ID_BACK_t[j])}")
proportion=np.ones(len(time_point))
# flatten the list
flatten_clonal_cell_ID_FOR=np.array([sub_item for item in Clonal_cell_ID_FOR_t for sub_item in item])
flatten_clonal_cell_ID_BACK=np.array([sub_item for item in Clonal_cell_ID_BACK_t for sub_item in item])
valid_clone_N_FOR=np.sum(clone_annot_orig[flatten_clonal_cell_ID_FOR].A.sum(0)>0)
valid_clone_N_BACK=np.sum(clone_annot_orig[flatten_clonal_cell_ID_BACK].A.sum(0)>0)
logg.info("Valid clone number 'FOR' post selection",valid_clone_N_FOR)
#logg.info("Valid clone number 'BACK' post selection",valid_clone_N_BACK)
###################### select initial and later cell states
if use_all_cells:
old_Tmap_cell_id_t1=[]
for t_temp in time_point[:-1]:
old_Tmap_cell_id_t1=old_Tmap_cell_id_t1+list(np.nonzero(time_info_orig==t_temp)[0])
old_Tmap_cell_id_t1=np.array(old_Tmap_cell_id_t1)
########
old_Tmap_cell_id_t2=[]
for t_temp in time_point[1:]:
old_Tmap_cell_id_t2=old_Tmap_cell_id_t2+list(np.nonzero(time_info_orig==t_temp)[0])
old_Tmap_cell_id_t2=np.array(old_Tmap_cell_id_t2)
else:
old_Tmap_cell_id_t1=flatten_clonal_cell_ID_FOR
old_Tmap_cell_id_t2=flatten_clonal_cell_ID_BACK
old_clonal_cell_id_t1=flatten_clonal_cell_ID_FOR
old_clonal_cell_id_t2=flatten_clonal_cell_ID_BACK
########################
sp_id=np.sort(list(set(list(old_Tmap_cell_id_t1)+list(old_Tmap_cell_id_t2))))
sp_idx=np.zeros(clone_annot_orig.shape[0],dtype=bool)
sp_idx[sp_id]=True
Tmap_cell_id_t1=hf.converting_id_from_fullSpace_to_subSpace(old_Tmap_cell_id_t1,sp_id)[0]
clonal_cell_id_t1=hf.converting_id_from_fullSpace_to_subSpace(old_clonal_cell_id_t1,sp_id)[0]
clonal_cell_id_t2=hf.converting_id_from_fullSpace_to_subSpace(old_clonal_cell_id_t2,sp_id)[0]
Tmap_cell_id_t2=hf.converting_id_from_fullSpace_to_subSpace(old_Tmap_cell_id_t2,sp_id)[0]
Clonal_cell_ID_FOR_t_new=[]
for temp_id_list in Clonal_cell_ID_FOR_t:
convert_list=hf.converting_id_from_fullSpace_to_subSpace(temp_id_list,sp_id)[0]
Clonal_cell_ID_FOR_t_new.append(convert_list)
Clonal_cell_ID_BACK_t_new=[]
for temp_id_list in Clonal_cell_ID_BACK_t:
convert_list=hf.converting_id_from_fullSpace_to_subSpace(temp_id_list,sp_id)[0]
Clonal_cell_ID_BACK_t_new.append(convert_list)
sp_id_0=np.sort(list(old_clonal_cell_id_t1)+list(old_clonal_cell_id_t2))
sp_idx_0=np.zeros(clone_annot_orig.shape[0],dtype=bool)
sp_idx_0[sp_id_0]=True
barcode_id=np.nonzero(clone_annot_orig[sp_idx_0].A.sum(0).flatten()>0)[0]
#sp_id=np.nonzero(sp_idx)[0]
clone_annot=clone_annot_orig[sp_idx][:,barcode_id]
adata=sc.AnnData(adata_orig.X[sp_idx]);
adata.var_names=adata_orig.var_names
adata.obsm['X_pca']=adata_orig.obsm['X_pca'][sp_idx]
adata.obsm['X_emb']=adata_orig.obsm['X_emb'][sp_idx]
adata.obs['state_info']=pd.Categorical(adata_orig.obs['state_info'][sp_idx])
adata.obs['time_info']=pd.Categorical(adata_orig.obs['time_info'][sp_idx])
adata.obsm['X_clone']=clone_annot
adata.uns['clonal_cell_id_t1']=clonal_cell_id_t1
adata.uns['clonal_cell_id_t2']=clonal_cell_id_t2
adata.uns['Tmap_cell_id_t1']=Tmap_cell_id_t1
adata.uns['Tmap_cell_id_t2']=Tmap_cell_id_t2
adata.uns['multiTime_cell_id_t1']=Clonal_cell_ID_FOR_t_new
adata.uns['multiTime_cell_id_t2']=Clonal_cell_ID_BACK_t_new
adata.uns['proportion']=np.ones(len(time_point)-1)
adata.uns['sp_idx']=sp_idx
data_des_orig=adata_orig.uns['data_des'][0]
data_des_0=adata_orig.uns['data_des'][-1]
time_label='t'
for x in time_point:
time_label=time_label+f'*{x}'
data_des=data_des_0+f'_TwoTimeClone_{time_label}'
adata.uns['data_des']=[data_des_orig,data_des]
if logg._settings_verbosity_greater_or_equal_than(2):
N_cell,N_clone=clone_annot.shape;
logg.info(f"Cell number={N_cell}, Clone number={N_clone}")
x_emb=adata.obsm['X_emb'][:,0]
y_emb=adata.obsm['X_emb'][:,1]
CSpl.customized_embedding(x_emb,y_emb,-x_emb)
return adata
####################
# CoSpar: two-time points
####################
def refine_Tmap_through_cospar(MultiTime_cell_id_array_t1,MultiTime_cell_id_array_t2,
proportion,transition_map,X_clone,initial_similarity,final_similarity,
noise_threshold=0.1,normalization_mode=1):
"""
This performs one iteration of coherent sparsity optimization
This is our core algorithm for coherent sparsity optimization for multi-time
clones. It upates a map by considering clones spanning multiple time points.
Parameters
----------
MultiTime_cell_id_array_t1: `np.array`
an array of cell id sub_array, where each sub_array consists of
clonally-related cell id's at different time points
MultiTime_cell_id_array_t2: `np.array`
an corresponding array of sub_array, where each sub_array are id's of
cells that are clonally related to the corresponding sub_array at
MultiTime_cell_id_array_t1.
proportion: `list`
A weight factor for each time point.
transition_map: `np.array` or `sp.spmatrix`
initialized transition map, or map from a previous iteration.
X_clone: `sp.spmatrix`
clonal matrix
initial_similarity: `np.array`
similarity matrix for all cells belonging
to MultiTime_cell_id_array_t1
final_similarity: `np.array`
similarity matrix for all cells belonging
to MultiTime_cell_id_array_t2
noise_threshold: `float`, optional (default: 0.1)
noise threshold to remove noises in the updated transition map,
in the range [0,1]
normalization_mode: `int`, optional (default: 1)
Method for normalization. Choice: [0,1]
0, single-cell normalization
1, Clone normalization
Returns
-------
smoothed_new_transition_map: `np.array`
un_SM_transition_map: `np.array`
"""
resol=10**(-10)
transition_map=hf.matrix_row_or_column_thresholding(transition_map,noise_threshold,row_threshold=True)
if normalization_mode==0: logg.info("Single-cell normalization")
if normalization_mode==1: logg.info("Clone normalization")
if ssp.issparse(X_clone):
X_clone=ssp.csr_matrix(X_clone)
cell_N,clone_N=X_clone.shape
N1,N2=transition_map.shape
new_coupling_matrix=ssp.lil_matrix((N1,N2))
# cell id order in the similarity matrix is obtained by concatenating the cell id
# list in MultiTime_cell_id_array_t1. So, we need to offset the id if we move to the next list
offset_N1=0;
offset_N2=0;
for j in range(len(MultiTime_cell_id_array_t1)):
logg.hint("Relative time point pair index:",j)
cell_id_array_t1=MultiTime_cell_id_array_t1[j]
cell_id_array_t2=MultiTime_cell_id_array_t2[j]
for clone_id in range(clone_N):
#pdb.set_trace()
if clone_id%1000==0: logg.hint("Clone id:",clone_id)
idx1=X_clone[cell_id_array_t1,clone_id].A.flatten()
idx2=X_clone[cell_id_array_t2,clone_id].A.flatten()
if idx1.sum()>0 and idx2.sum()>0:
## update the new_coupling matrix
id_1=offset_N1+np.nonzero(idx1)[0]
id_2=offset_N2+np.nonzero(idx2)[0]
prob=transition_map[id_1][:,id_2]
## try row normalization
if normalization_mode==0:
prob=hf.sparse_rowwise_multiply(prob,1/(resol+np.sum(prob,1))) # cell-level normalization
else:
prob=prob/(resol+np.sum(prob)) # clone level normalization, account for proliferation
weight_factor=np.sqrt(np.mean(idx1[idx1>0])*np.mean(idx2[idx2>0])) # the contribution of a particular clone can be tuned by its average entries
if (weight_factor>1):
logg.hint("marker gene weight",weight_factor)
#Use the add mode, add up contributions from each clone
new_coupling_matrix[id_1[:,np.newaxis],id_2]=new_coupling_matrix[id_1[:,np.newaxis],id_2]+proportion[j]*prob*weight_factor
## update offset
offset_N1=offset_N1+len(cell_id_array_t1)
offset_N2=offset_N2+len(cell_id_array_t2)
## rescale
new_coupling_matrix=new_coupling_matrix/(new_coupling_matrix.A.max())
## convert to sparse matrix form
new_coupling_matrix=new_coupling_matrix.tocsr()
logg.info("Start to smooth the refined clonal map")
t=time.time()
temp=new_coupling_matrix*final_similarity
logg.info("Phase I: time elapsed -- ", time.time()-t)
smoothed_new_transition_map=initial_similarity.dot(temp)
logg.info("Phase II: time elapsed -- ", time.time()-t)
# both return are numpy array
un_SM_transition_map=new_coupling_matrix.A
return smoothed_new_transition_map, un_SM_transition_map
def refine_Tmap_through_cospar_noSmooth(MultiTime_cell_id_array_t1,
MultiTime_cell_id_array_t2,proportion,transition_map,
X_clone,noise_threshold=0.1,normalization_mode=1):
"""
This performs one iteration of coherent sparsity optimization
This is the same as 'refine_Tmap_through_cospar', except that
there is no smoothing afterwards for demultiplexing.
Parameters
----------
MultiTime_cell_id_array_t1: `np.array`
an array of cell id sub_array, where each sub_array consists of
clonally-related cell id's at different time points
MultiTime_cell_id_array_t2: `np.array`
an corresponding array of sub_array, where each sub_array are id's of
cells that are clonally related to the corresponding sub_array at
MultiTime_cell_id_array_t1.
proportion: `list`
A weight factor for each time point.
transition_map: `np.array` or `sp.spmatrix`
initialized transition map, or map from a previous iteration.
X_clone: `sp.spmatrix`
clonal matrix
initial_similarity: `np.array`
similarity matrix for all cells belonging
to MultiTime_cell_id_array_t1
final_similarity: `np.array`
similarity matrix for all cells belonging
to MultiTime_cell_id_array_t2
noise_threshold: `float`, optional (default: 0.1)
noise threshold to remove noises in the updated transition map,
in the range [0,1]
normalization_mode: `int`, optional (default: 1)
Method for normalization. Choice: [0,1]
0, single-cell normalization
1, Clone normalization
Returns
-------
un_SM_transition_map: `np.array`
"""
if not isinstance(X_clone[0,0], bool):
X_clone=X_clone.astype(bool)
resol=10**(-10)
if normalization_mode==0: logg.info("Single-cell normalization")
if normalization_mode==1: logg.info("Clone normalization")
transition_map=hf.matrix_row_or_column_thresholding(transition_map,noise_threshold,row_threshold=True)
if not ssp.issparse(transition_map): transition_map=ssp.csr_matrix(transition_map)
if not ssp.issparse(X_clone): X_clone=ssp.csr_matrix(X_clone)
cell_N,clone_N=X_clone.shape
N1,N2=transition_map.shape
new_coupling_matrix=ssp.lil_matrix((N1,N2))
# cell id order in the similarity matrix is obtained by concatenating the cell id
# list in MultiTime_cell_id_array_t1. So, we need to offset the id if we move to the next list
offset_N1=0;
offset_N2=0;
for j in range(len(MultiTime_cell_id_array_t1)):
logg.hint("Relative time point pair index:",j)
cell_id_array_t1=MultiTime_cell_id_array_t1[j]
cell_id_array_t2=MultiTime_cell_id_array_t2[j]
for clone_id in range(clone_N):
if clone_id%1000==0: logg.hint("Clone id:",clone_id)
idx1=X_clone[cell_id_array_t1,clone_id].A.flatten()
idx2=X_clone[cell_id_array_t2,clone_id].A.flatten()
if idx1.sum()>0 and idx2.sum()>0:
## update the new_coupling matrix
id_1=offset_N1+np.nonzero(idx1)[0]
id_2=offset_N2+np.nonzero(idx2)[0]
prob=transition_map[id_1][:,id_2].A
## try row normalization
if normalization_mode==0:
prob=hf.sparse_rowwise_multiply(prob,1/(resol+np.sum(prob,1))) # cell-level normalization
else:
prob=prob/(resol+np.sum(prob)) # clone level normalization, account for proliferation
weight_factor=np.sqrt(np.mean(idx1[idx1>0])*np.mean(idx2[idx2>0])) # the contribution of a particular clone can be tuned by its average entries
if (weight_factor>1):
logg.hint("marker gene weight",weight_factor)
#Use the add mode, add up contributions from each clone
new_coupling_matrix[id_1[:,np.newaxis],id_2]=new_coupling_matrix[id_1[:,np.newaxis],id_2]+proportion[j]*prob*weight_factor
## update offset
offset_N1=offset_N1+len(cell_id_array_t1)
offset_N2=offset_N2+len(cell_id_array_t2)
## convert to sparse matrix form
new_coupling_matrix=new_coupling_matrix.tocsr()
#
un_SM_transition_map=new_coupling_matrix
return un_SM_transition_map
###############
def infer_Tmap_from_multitime_clones(adata_orig,selected_clonal_time_points,
smooth_array=[15,10,5],CoSpar_KNN=20,noise_threshold=0.1,demulti_threshold=0.05,
normalization_mode=1,use_all_cells=False,save_subset=True,use_full_Smatrix=False,
trunca_threshold=0.001,compute_new=False):
"""
Infer Tmap for clonal data with multiple time points.
It prepares adata object for cells of targeted time points by
:func:`.select_time_points`, generate the similarity matrix
via :func:`.generate_similarity_matrix`, and iterately calls
the core function :func:`.refine_Tmap_through_cospar` to update
the transition map.
The inferred map allows transitions between neighboring time points.
For example, if selected_clonal_time_points=['day1','day2','day3'],
then it computes transitions for pairs (day1, day2) and (day2, day3),
but not (day1, day3).
Parameters
----------
adata_orig: :class:`~anndata.AnnData` object
Should be prepared from our anadata initialization.
selected_clonal_time_points: `list` of `str`
List of time points to be included for analysis.
We assume that each selected time point has clonal measurement.
It should be in ascending order: 'day_1','day_2'....
smooth_array: `list`, optional (default: [15,10,5])
List of smooth rounds at each iteration.
The n-th entry determines the smooth round for the Smatrix
at the n-th iteration. Its length determins the number of
iteration. It is better to use a number at the multiple of
5, i.e., 5, 10, 15, 20,...
CoSpar_KNN: `int`, optional (default: 20)
the number of neighbors for KNN graph used for computing the
similarity matrix.
trunca_threshold: `float`, optional (default: 0.001)
We set entries to zero in the computed similarity matrix that
are smaller than this threshold. This is to promote the Smatrix
sparsity, which leads to faster computation, and smaller file size.
This threshld should be small, but not too small.
noise_threshold: `float`, optional (default: 0.1)
noise threshold to remove noises in the updated transition map,
in the range [0,1]
demulti_threshold: `float`, optional (default: 0.05)
noise threshold to remove noises in demultiplexed (un-smoothed) map,
in the range [0,1]
normalization_mode: `int`, optional (default: 1)
Method for normalization. Choice: [0,1]
0, single-cell normalization
1, Clone normalization
use_all_cells: `bool` optional (default: `False`)
If true, all cells at selected time points will be used for computing
Tmap. If false, only cells belonging to multi-time clones will be used
for computing Tmap. The latter case usually speed up the computation,
which is recommended.
save_subset: `bool`, optional (default: True)
If true, save only Smatrix at smooth round [5,10,15,...].
Else, save Smatrix at each round.
use_full_Smatrix: `bool`, optional (default: False)
use the Smatrix as defined by all cells, whether they are clonally
barcoded or not. We sub-sample cell states relevant for downstream
analysis from this full Smatrix. This may refine the Smatrix.
But will also increase the computation time significantly.
Compute_new: `bool`, optional (default: False)
If True, compute Smatrix from scratch, whether it was
computed and saved before or not. This is activated only when
`use_full_Smatrix=False`.
Returns
-------
adata: :class:`~anndata.AnnData` object
Store results at adata.uns['transition_map']
and adata.uns['intraclone_transition_map']. This adata is different
from the input adata_orig due to subsampling cells.
"""
t0=time.time()
hf.check_available_clonal_info(adata_orig)
for xx in selected_clonal_time_points:
if xx not in adata_orig.uns['clonal_time_points']:
logg.error(f"'selected_clonal_time_points' contain time points without clonal information. Please set clonal_time_point to be at least two of {adata_orig.uns['clonal_time_points']}. If there is only one clonal time point, plesae run ----cospar.tmap.infer_Tmap_from_one_time_clones----")
return adata_orig
logg.info("-------Step 1: Select time points---------")
data_path=settings.data_path
adata=select_time_points(adata_orig,time_point=selected_clonal_time_points,use_all_cells=use_all_cells)
logg.info("-------Step 2: Compute the full Similarity matrix if necessary---------")
if use_full_Smatrix: # prepare the similarity matrix with all state info, all subsequent similarity will be down-sampled from this one.
temp_str='0'+str(trunca_threshold)[2:]
round_of_smooth=np.max(smooth_array)
data_des=adata.uns['data_des'][0]
similarity_file_name=f'{data_path}/{data_des}_Similarity_matrix_with_all_cell_states_kNN{CoSpar_KNN}_Truncate{temp_str}'
if not (os.path.exists(similarity_file_name+f'_SM{round_of_smooth}.npz') and (not compute_new)):
similarity_matrix_full=generate_similarity_matrix(adata_orig,similarity_file_name,round_of_smooth=round_of_smooth,
neighbor_N=CoSpar_KNN,truncation_threshold=trunca_threshold,save_subset=True,compute_new_Smatrix=compute_new)
logg.info("-------Step 3: Optimize the transition map recursively---------")
infer_Tmap_from_multitime_clones_private(adata,smooth_array=smooth_array,neighbor_N=CoSpar_KNN,noise_threshold=noise_threshold,demulti_threshold=demulti_threshold,normalization_mode=normalization_mode,
save_subset=save_subset,use_full_Smatrix=use_full_Smatrix,trunca_threshold=trunca_threshold,compute_new_Smatrix=compute_new)
logg.info(f"-----------Total used time: {time.time()-t0} s ------------")
return adata
def infer_Tmap_from_multitime_clones_private(adata,smooth_array=[15,10,5],neighbor_N=20,noise_threshold=0.1,demulti_threshold=0.05,normalization_mode=1,save_subset=True,use_full_Smatrix=False,trunca_threshold=0.001,compute_new_Smatrix=False):
"""
Internal function for Tmap inference from multiTime clonal data.
Same as :func:`.infer_Tmap_from_multitime_clones` except that it
assumes that the adata object has been prepared for targeted
time points. It generate the similarity matrix
via :func:`.generate_similarity_matrix`, and iterately calls
the core function :func:`.refine_Tmap_through_cospar` to update
the transition map.
Parameters
----------
adata: :class:`~anndata.AnnData` object
Should be prepared by :func:`.select_time_points`
smooth_array: `list`, optional (default: [15,10,5])
List of smooth rounds at each iteration.
The n-th entry determines the smooth round for the Smatrix
at the n-th iteration. Its length determins the number of
iteration. It is better to use a number at the multiple of
5, i.e., 5, 10, 15, 20,...
neighbor_N: `int`, optional (default: 20)
the number of neighbors for KNN graph used for computing the similarity matrix.
trunca_threshold: `float`, optional (default: 0.001)
We set entries to zero in the computed similarity matrix that
are smaller than this threshold. This is to promote the Smatrix sparsity, which
leads to faster computation, and smaller file size.
This threshld should be small, but not too small.
noise_threshold: `float`, optional (default: 0.1)
noise threshold to remove noises in the updated transition map,
in the range [0,1]
demulti_threshold: `float`, optional (default: 0.05)
noise threshold to remove noises in demultiplexed (un-smoothed) map,
in the range [0,1]
normalization_mode: `int`, optional (default: 1)
Method for normalization. Choice: [0,1]
0, single-cell normalization
1, Clone normalization
save_subset: `bool`, optional (default: True)
If true, save only Smatrix at smooth round [5,10,15,...].
Else, save Smatrix at each round.
use_full_Smatrix: `bool`, optional (default: False)
use the Smatrix as defined by all cells, whether they are clonally
barcoded or not. We sub-sample cell states relevant for downstream
analysis from this full Smatrix. This may refine the Smatrix.
But will also increase the computation time significantly.
compute_new_Smatrix: `bool`, optional (default: False)
If True, compute Smatrix from scratch, whether it was
computed and saved before or not. This is activated only when
`use_full_Smatrix=False`.
Returns
-------
None. Inferred transition map updated at adata.uns['transition_map']
and adata.uns['intraclone_transition_map']
"""
########## extract data
clone_annot=adata.obsm['X_clone']
clonal_cell_id_t1=adata.uns['clonal_cell_id_t1']
clonal_cell_id_t2=adata.uns['clonal_cell_id_t2']
Tmap_cell_id_t1=adata.uns['Tmap_cell_id_t1']
Tmap_cell_id_t2=adata.uns['Tmap_cell_id_t2']
sp_idx=adata.uns['sp_idx']
data_des=adata.uns['data_des'][0] # original label
data_des_1=adata.uns['data_des'][-1] # current label, sensitive to selected time points
multiTime_cell_id_t1=adata.uns['multiTime_cell_id_t1']
multiTime_cell_id_t2=adata.uns['multiTime_cell_id_t2']
proportion=adata.uns['proportion']
data_path=settings.data_path
#########
########################### Compute the transition map
logg.info("---------Compute the transition map-----------")
#trunca_threshold=0.001 # this value is only for reducing the computed matrix size for saving
temp_str='0'+str(trunca_threshold)[2:]
if use_full_Smatrix:
similarity_file_name=f'{data_path}/{data_des}_Similarity_matrix_with_all_cell_states_kNN{neighbor_N}_Truncate{temp_str}'
for round_of_smooth in smooth_array:
if not os.path.exists(similarity_file_name+f'_SM{round_of_smooth}.npz'):
logg.error(f"Similarity matrix at given parameters have not been computed before! Name: {similarity_file_name}")
return
else:
similarity_file_name=f'{data_path}/{data_des_1}_Similarity_matrix_with_selected_states_kNN{neighbor_N}_Truncate{temp_str}'
initial_similarity_array=[]
final_similarity_array=[]
initial_similarity_array_ext=[]
final_similarity_array_ext=[]
for round_of_smooth in smooth_array:
# we cannot force it to compute new at this time. Otherwise, if we use_full_Smatrix, the resulting similarity is actually from adata, thus not full similarity.
re_compute=(not use_full_Smatrix) and (compute_new_Smatrix) # re-compute only when not using full similarity
similarity_matrix_full=generate_similarity_matrix(adata,similarity_file_name,round_of_smooth=round_of_smooth,
neighbor_N=neighbor_N,truncation_threshold=trunca_threshold,save_subset=save_subset,compute_new_Smatrix=re_compute)
if use_full_Smatrix:
#pdb.set_trace()
similarity_matrix_full_sp=similarity_matrix_full[sp_idx][:,sp_idx]
#pdb.set_trace()
### extended similarity matrix
initial_similarity_ext=generate_initial_similarity(similarity_matrix_full_sp,Tmap_cell_id_t1,clonal_cell_id_t1)
final_similarity_ext=generate_final_similarity(similarity_matrix_full_sp,clonal_cell_id_t2,Tmap_cell_id_t2)
### minimum similarity matrix that only involves the multi-time clones
initial_similarity=generate_initial_similarity(similarity_matrix_full_sp,clonal_cell_id_t1,clonal_cell_id_t1)
final_similarity=generate_final_similarity(similarity_matrix_full_sp,clonal_cell_id_t2,clonal_cell_id_t2)
else:
initial_similarity_ext=generate_initial_similarity(similarity_matrix_full,Tmap_cell_id_t1,clonal_cell_id_t1)
final_similarity_ext=generate_final_similarity(similarity_matrix_full,clonal_cell_id_t2,Tmap_cell_id_t2)
initial_similarity=generate_initial_similarity(similarity_matrix_full,clonal_cell_id_t1,clonal_cell_id_t1)
final_similarity=generate_final_similarity(similarity_matrix_full,clonal_cell_id_t2,clonal_cell_id_t2)
initial_similarity_array.append(initial_similarity)
final_similarity_array.append(final_similarity)
initial_similarity_array_ext.append(initial_similarity_ext)
final_similarity_array_ext.append(final_similarity_ext)
#### Compute the core of the transition map that involve multi-time clones, then extend to other cell states
clonal_coupling_v1=np.ones((len(clonal_cell_id_t1),len(clonal_cell_id_t2)))
transition_map_array=[clonal_coupling_v1]
X_clone=clone_annot.copy()
if not ssp.issparse(X_clone):
X_clone=ssp.csr_matrix(X_clone)
CoSpar_iter_N=len(smooth_array)
for j in range(CoSpar_iter_N):
logg.info("Current iteration:",j)
transition_map=transition_map_array[j]
if j<len(smooth_array):
logg.info(f"Use smooth_round={smooth_array[j]}")
used_initial_similarity=initial_similarity_array[j]
used_final_similarity=final_similarity_array[j]
else:
logg.info(f"Use smooth_round={smooth_array[-1]}")
used_initial_similarity=initial_similarity_array[-1]
used_final_similarity=final_similarity_array[-1]
# clonal_coupling, unSM_sc_coupling=refine_transition_map_by_integrating_clonal_info(clonal_cell_id_t1,clonal_cell_id_t2,
# transition_map,X_clone,used_initial_similarity,used_final_similarity,noise_threshold,row_normalize=True,normalization_mode=normalization_mode)
clonal_coupling, unSM_sc_coupling=refine_Tmap_through_cospar(multiTime_cell_id_t1,multiTime_cell_id_t2,
proportion,transition_map,X_clone,used_initial_similarity,used_final_similarity,noise_threshold=noise_threshold,normalization_mode=normalization_mode)
transition_map_array.append(clonal_coupling)
### expand the map to other cell states
ratio_t1=np.sum(np.in1d(Tmap_cell_id_t1,clonal_cell_id_t1))/len(Tmap_cell_id_t1)
ratio_t2=np.sum(np.in1d(Tmap_cell_id_t2,clonal_cell_id_t2))/len(Tmap_cell_id_t2)
if (ratio_t1==1) and (ratio_t2==1): # no need to SM the map
logg.info("No need for Final Smooth (i.e., clonally states are the final state space for Tmap)")
adata.uns['transition_map']=ssp.csr_matrix(clonal_coupling)
else:
logg.info("Final round of Smooth (to expand the state space of Tmap to include non-clonal states)")
if j<len(smooth_array):
used_initial_similarity_ext=initial_similarity_array_ext[j]
used_final_similarity_ext=final_similarity_array_ext[j]
else:
used_initial_similarity_ext=initial_similarity_array_ext[-1]
used_final_similarity_ext=final_similarity_array_ext[-1]
unSM_sc_coupling=ssp.csr_matrix(unSM_sc_coupling)
t=time.time()
temp=unSM_sc_coupling*used_final_similarity_ext
logg.info("Phase I: time elapsed -- ", time.time()-t)
transition_map_1=used_initial_similarity_ext.dot(temp)
logg.info("Phase II: time elapsed -- ", time.time()-t)
adata.uns['transition_map']=ssp.csr_matrix(transition_map_1)
#adata.uns['transition_map_unExtended']=ssp.csr_matrix(clonal_coupling)
logg.info("----Demultiplexed transition map----")
#pdb.set_trace()
demultiplexed_map_0=refine_Tmap_through_cospar_noSmooth(multiTime_cell_id_t1,multiTime_cell_id_t2,proportion,clonal_coupling,
X_clone,noise_threshold=demulti_threshold,normalization_mode=normalization_mode)
idx_t1=hf.converting_id_from_fullSpace_to_subSpace(clonal_cell_id_t1,Tmap_cell_id_t1)[0]
idx_t2=hf.converting_id_from_fullSpace_to_subSpace(clonal_cell_id_t2,Tmap_cell_id_t2)[0]
demultiplexed_map=np.zeros((len(Tmap_cell_id_t1),len(Tmap_cell_id_t2)))
demultiplexed_map[idx_t1[:,np.newaxis],idx_t2]=demultiplexed_map_0.A
adata.uns['intraclone_transition_map']=ssp.csr_matrix(demultiplexed_map)
def infer_intraclone_Tmap(adata,demulti_threshold=0.05,normalization_mode=1):
"""
Infer intra-clone transition map.
Parameters
----------
adata: :class:`~anndata.AnnData` object
Should be prepared by :func:`.select_time_points`
demulti_threshold: `float`, optional (default: 0.05)
noise threshold to remove noises in transition_map,
in the range [0,1]
normalization_mode: `int`, optional (default: 1)
Method for normalization. Choice: [0,1]
0, single-cell normalization
1, Clone normalization
Returns
-------
None. Update/generate adata.uns['intraclone_transition_map']
"""
########## extract data
if 'transition_map' not in adata.uns.keys():
logg.error("Please run ---- CS.tmap.infer_Tmap_from_multitime_clones ---- first")
else:
clone_annot=adata.obsm['X_clone']
multiTime_cell_id_t1=[adata.uns['Tmap_cell_id_t1']]
multiTime_cell_id_t2=[adata.uns['Tmap_cell_id_t2']]
proportion=adata.uns['proportion']
transition_map=adata.uns['transition_map']
X_clone=clone_annot.copy()
if not ssp.issparse(X_clone):
X_clone=ssp.csr_matrix(X_clone)
demultiplexed_map=refine_Tmap_through_cospar_noSmooth(multiTime_cell_id_t1,multiTime_cell_id_t2,proportion,transition_map,
X_clone,noise_threshold=demulti_threshold,normalization_mode=normalization_mode)
adata.uns['intraclone_transition_map']=ssp.csr_matrix(demultiplexed_map)
# v0: avoid cells that are already selected. We tested, this is better than not avoiding...
def Tmap_from_highly_variable_genes(adata,min_counts=3,min_cells=3,
min_gene_vscore_pctl=85,smooth_array=[15,10,5],neighbor_N=20,
noise_threshold=0.2,normalization_mode=1,use_full_Smatrix=False,
trunca_threshold=0.001,compute_new_Smatrix=True,
save_subset=True):
"""
Generate Tmap based on state info using HighVar.
We convert differentially expressed genes into `pseudo-clones`,
and run cospar to infer the transition map. Each clone occupies
a different set of cells.
Parameters
----------
adata: :class:`~anndata.AnnData` object
assumed to be preprocessed, only has two time points.
min_counts: int, optional (default: 3)
Minimum number of UMIs per cell to be considered for selecting highly variable genes.
min_cells: int, optional (default: 3)
Minimum number of cells per gene to be considered for selecting highly variable genes.
min_gene_vscore_pctl: int, optional (default: 85)
Genes wht a variability percentile higher than this threshold are marked as
highly variable genes for dimension reduction. Range: [0,100]
smooth_array: `list`, optional (default: [15,10,5])
List of smooth rounds at each iteration.
The n-th entry determines the smooth round for the Smatrix
at the n-th iteration. Its length determins the number of
iteration.
neighbor_N: `int`, optional (default: 20)
the number of neighbors for KNN graph used for computing the similarity matrix.
trunca_threshold: `float`, optional (default: 0.001)
We set entries to zero in the computed similarity matrix that
are smaller than this threshold. This is to promote the Smatrix sparsity, which
leads to faster computation, and smaller file size.
This threshld should be small, but not too small.
noise_threshold: `float`, optional (default: 0.1)
noise threshold to remove noises in the updated transition map,
in the range [0,1]
normalization_mode: `int`, optional (default: 2)
Method for normalization. Choice: [0,1,2]
0, single-cell normalization
1, Clone normalization: N2/N1 (this one does not make sense)
2, Clone normalization
save_subset: `bool`, optional (default: True)
If true, save only Smatrix at smooth round [5,10,15,...].
Else, save Smatrix at each round.
use_full_Smatrix: `bool`, optional (default: False)
use the Smatrix as defined by all cells, whether they are clonally
barcoded or not. We sub-sample cell states relevant for downstream
analysis from this full Smatrix. This may refine the Smatrix.
But will also increase the computation time significantly.
compute_new_Smatrix: `bool`, optional (default: False)
If True, compute Smatrix from scratch, whether it was
computed and saved before or not.
Returns
-------
None. Results are stored at adata.uns['HighVar_transition_map'].
"""
logg.info("HighVar-v0: avoid cells that have been selected")
weight=1 # wehight of each gene.
cell_id_array_t1=adata.uns['Tmap_cell_id_t1']
cell_id_array_t2=adata.uns['Tmap_cell_id_t2']
real_clone_annot=adata.obsm['X_clone']
time_info=np.array(adata.obs['time_info'])
selected_time_points=[time_info[cell_id_array_t1][0],time_info[cell_id_array_t2][0]]
logg.info("----------------")
logg.info('Step a: find the commonly shared highly variable genes')
adata_t1=sc.AnnData(adata.X[cell_id_array_t1]);
adata_t2=sc.AnnData(adata.X[cell_id_array_t2]);
## use marker genes
gene_list=adata.var_names
verbose=logg._settings_verbosity_greater_or_equal_than(2)
highvar_genes_t1 = gene_list[hf.filter_genes(
adata_t1.X,
min_counts=min_counts,
min_cells=min_cells,
min_vscore_pctl=min_gene_vscore_pctl,
show_vscore_plot=verbose)]
highvar_genes_t2 = gene_list[hf.filter_genes(
adata_t2.X,
min_counts=min_counts,
min_cells=min_cells,
min_vscore_pctl=min_gene_vscore_pctl,
show_vscore_plot=verbose)]
common_gene=list(set(highvar_genes_t1).intersection(highvar_genes_t2))
logg.info(f"Highly varable gene number at t1 is {len(highvar_genes_t1)}, Highly varable gene number at t2 is {len(highvar_genes_t2)}")
logg.info(f"Common gene set is {len(common_gene)}")
logg.info("----------------")
logg.info('Step b: convert the shared highly variable genes into clonal info')
sel_marker_gene_list=common_gene.copy()
clone_annot_gene=np.zeros((adata.shape[0],len(sel_marker_gene_list)))
N_t1=len(cell_id_array_t1)
N_t2=len(cell_id_array_t2)
cumu_sel_idx_t1=np.zeros(N_t1,dtype=bool)
cumu_sel_idx_t2=np.zeros(N_t2,dtype=bool)
cell_fraction_per_gene=1/len(sel_marker_gene_list) # fraction of cells as clonally related by this gene
for j,gene_id in enumerate(sel_marker_gene_list):
temp_t1=adata.obs_vector(gene_id)[cell_id_array_t1]
temp_t1[cumu_sel_idx_t1]=0 # set selected cell id to have zero expression
cutoff_t1=int(np.ceil(len(cell_id_array_t1)*cell_fraction_per_gene))
sel_id_t1=np.argsort(temp_t1,kind='stable')[::-1][:cutoff_t1]
clone_annot_gene[cell_id_array_t1[sel_id_t1],j]=weight
cumu_sel_idx_t1[sel_id_t1]=True
#logg.info(f"Gene id {gene_id}, cell number at t1 is {sel_id_t1.shape[0]}, fraction at t1: {sel_id_t1.shape[0]/len(cell_id_array_t1)}")
temp_t2=adata.obs_vector(gene_id)[cell_id_array_t2]
temp_t2[cumu_sel_idx_t2]=0 # set selected cell id to have zero expression
cutoff_t2=int(np.ceil(len(cell_id_array_t2)*cell_fraction_per_gene))
sel_id_t2=np.argsort(temp_t2,kind='stable')[::-1][:cutoff_t2]
clone_annot_gene[cell_id_array_t2[sel_id_t2],j]=weight
cumu_sel_idx_t2[sel_id_t2]=True
#logg.info(f"Gene id {gene_id}, cell number at t2 is {sel_id_t2.shape[0]}, fraction at t2: {sel_id_t2.shape[0]/len(cell_id_array_t2)}")
if (np.sum(~cumu_sel_idx_t1)==0) or (np.sum(~cumu_sel_idx_t2)==0):
logg.info(f'No cells left for assignment, total used genes={j}')
break
#logg.info(f"Selected cell fraction: t1 -- {np.sum(cumu_sel_idx_t1)/len(cell_id_array_t1)}; t2 -- {np.sum(cumu_sel_idx_t2)/len(cell_id_array_t2)}")
logg.info("----------------")
logg.info("Step c: compute the transition map based on clonal info from highly variable genes")
adata.obsm['X_clone']=ssp.csr_matrix(clone_annot_gene)
adata.uns['multiTime_cell_id_t1']=[cell_id_array_t1]
adata.uns['multiTime_cell_id_t2']=[cell_id_array_t2]
adata.uns['proportion']=[1]
data_des_0=adata.uns['data_des'][-1]
data_des_orig=adata.uns['data_des'][0]
data_des_1=data_des_0+'_HighVar0' # to distinguish Similarity matrix for this step and the next step of CoSpar (use _HighVar0, instead of _HighVar1)
adata.uns['data_des']=[data_des_orig,data_des_1]
infer_Tmap_from_multitime_clones_private(adata,smooth_array=smooth_array,neighbor_N=neighbor_N,noise_threshold=noise_threshold,
normalization_mode=normalization_mode,save_subset=save_subset,use_full_Smatrix=use_full_Smatrix,
trunca_threshold=trunca_threshold,compute_new_Smatrix=compute_new_Smatrix)
adata.uns['HighVar_transition_map']=adata.uns['transition_map']
adata.obsm['X_clone']=real_clone_annot # This entry has been changed previously. Note correct the clonal matrix
data_des_1=data_des_0+'_HighVar1' # to record which initialization is used
adata.uns['data_des']=[data_des_orig,data_des_1]
# this is the new version: v1
def compute_custom_OT_transition_map(adata,OT_epsilon=0.02,OT_dis_KNN=5,
OT_solver='duality_gap',OT_cost='SPD',compute_new=True):
"""
Compute Tmap from state info using optimal transport (OT).
We provide the options for the OT solver, and also the cost function.
The OT solver does not seem to matter, although 'duality_gap' is faster.
The cost function could affect the OT map results. Using shortest path
distance ('SPD') is slower but more accurate, while using gene expression
distance ('GED') is faster but less accurate. The performance of cospar
is robust to the initialized map (this is especially so in terms of fate
bias, not so much for the fate map alone)
Parameters
----------
adata: :class:`~anndata.AnnData` object
Assumed to be preprocessed, only has two time points.
OT_epsilon: `float`, optional (default: 0.02)
The entropic regularization, >0, a larger one increases
uncertainty of the transition
OT_dis_KNN: `int`, optional (default: 5)
Number of nearest neighbors to construct the KNN graph for
computing the shortest path distance.
OT_solver: `str`, optional (default: `duality_gap`)
The method used to compute the optimal transport map. Availabel choice:
{'duality_gap','fixed_iters'}. Our test shows that they produce the same
results, while 'duality_gap' is almost twice faster.
OT_cost: `str`, optional (default: `SPD`), options {'GED','SPD'}
The cost metric. We provide gene expression distance (GED), and also
shortest path distance (SPD). GED is much faster, but SPD is more accurate.
However, cospar is robust to the initialization.
compute_new: `bool`, optional (default: False)
If True, compute OT_map and also the shortest path distance from scratch,
whether it was computed and saved before or not.
Returns
-------
None. Results are stored at adata.uns['OT_transition_map'].
"""
cell_id_array_t1=adata.uns['Tmap_cell_id_t1']
cell_id_array_t2=adata.uns['Tmap_cell_id_t2']
data_des=adata.uns['data_des'][-1]
data_path=settings.data_path
############ Compute shorted-path distance
# use sklearn KNN graph construction method and select the connectivity option, not related to UMAP
# use the mode 'distance' to obtain the shortest-path *distance*, rather than 'connectivity'
if OT_cost=='SPD':
SPD_file_name=f'{data_path}/{data_des}_ShortestPathDistanceMatrix_t0t1_KNN{OT_dis_KNN}.npy'
if os.path.exists(SPD_file_name) and (not compute_new):
logg.info("Load pre-computed shortest path distance matrix")
OT_cost_matrix=np.load(SPD_file_name)
else:
logg.info("Compute new shortest path distance matrix")
t=time.time()
#data_matrix=adata.obsm['X_pca']
#ShortPath_dis=hf.compute_shortest_path_distance_from_raw_matrix(data_matrix,num_neighbors_target=OT_dis_KNN,mode='distance')
ShortPath_dis=hf.compute_shortest_path_distance(adata,num_neighbors_target=OT_dis_KNN,mode='distances',method='umap')
idx0=cell_id_array_t1
idx1=cell_id_array_t2
ShortPath_dis_t0t1=ShortPath_dis[idx0[:,np.newaxis],idx1];
OT_cost_matrix=ShortPath_dis_t0t1/ShortPath_dis_t0t1.max()
np.save(SPD_file_name,OT_cost_matrix)
logg.info(f"Finishing computing shortest-path distance, used time {time.time()-t}")
else:
t=time.time()
pc_n=adata.obsm['X_pca'].shape[1]
OT_cost_matrix=hf.compute_gene_exp_distance(adata,cell_id_array_t1,cell_id_array_t2,pc_n=pc_n)
logg.info(f"Finishing computing gene expression distance, used time {time.time()-t}")
######## apply optimal transport
CustomOT_file_name=f'{data_path}/{data_des}_CustomOT_map_epsilon{OT_epsilon}_KNN{OT_dis_KNN}.npy'
if os.path.exists(CustomOT_file_name) and (not compute_new):
logg.info("Load pre-computed custon OT matrix")
OT_transition_map=np.load(CustomOT_file_name)
else:
logg.info("Compute new custon OT matrix")
t=time.time()
mu1=np.ones(len(cell_id_array_t1));
nu1=np.ones(len(cell_id_array_t2));
input_mu=mu1 # initial distribution
input_nu=nu1 # final distribution
######### We have tested that it is at least 3 times slower than WOT's builtin method,
#### although the results are the same
# # This taks 170s for the subsampled hematopoietic data
# logg.info("Use sinkhorn solver solver")
# OT_transition_map=otb.sinkhorn_stabilized(input_mu,input_nu,ShortPath_dis_t0t1,OT_epsilon,numItermax=OT_max_iter,stopThr=OT_stopThr)
#############
OT_solver='duality_gap'
logg.info(f"OT solver: {OT_solver}")
if OT_solver == 'fixed_iters': # This takes 50s for the subsampled hematopoietic data. The result is the same.
ot_config = {'C':OT_cost_matrix,'G':mu1, 'epsilon': OT_epsilon, 'lambda1': 1, 'lambda2': 50,
'epsilon0': 1, 'scaling_iter': 3000,'tau': 10000, 'inner_iter_max': 50, 'extra_iter': 1000}
OT_transition_map=transport_stablev2(**ot_config)
elif OT_solver == 'duality_gap': # This takes 30s for the subsampled hematopoietic data. The result is the same.
ot_config = {'C':OT_cost_matrix,'G':mu1, 'epsilon': OT_epsilon, 'lambda1': 1, 'lambda2': 50,
'epsilon0': 1, 'tau': 10000, 'tolerance': 1e-08,
'max_iter': 1e7, 'batch_size': 5}
OT_transition_map=optimal_transport_duality_gap(**ot_config)
else:
raise ValueError('Unknown solver')
np.save(CustomOT_file_name,OT_transition_map)
logg.info(f"Finishing computing optial transport map, used time {time.time()-t}")
adata.uns['OT_transition_map']=ssp.csr_matrix(OT_transition_map)
data_des_0=adata.uns['data_des'][-1]
data_des_orig=adata.uns['data_des'][0]
data_des_1=data_des_0+'_OT' # to record which initialization is used
adata.uns['data_des']=[data_des_orig,data_des_1]
# We tested that, for clones of all different sizes, where np.argsort gives unique results,
# this method reproduces the v01, v1 results, when use_fixed_clonesize_t1=True, and when change
# sort_clone=0,1,-1.
def infer_Tmap_from_one_time_clones_private(adata,initialized_map,Clone_update_iter_N=1,
smooth_array=[15,10,5],CoSpar_KNN=20,normalization_mode=1,noise_threshold=0.2,
use_full_Smatrix=False,trunca_threshold=0.001,compute_new=True,
use_fixed_clonesize_t1=False,sort_clone=1):
"""
Infer Tmap from clones with a single time point
Starting from an initialized transitin map from state information,
we jointly infer the initial clonal states and the transition map.
This method has been optimized to be very fast. Besides, it is
deterministic.
Parameters
----------
adata: :class:`~anndata.AnnData` object
Should have only two time points.
initialized_map: `sp.spmatrix`
Initialized transition map based on state information alone.
Clone_update_iter_N: `int`, optional (default: 1)
Number of iteration for the joint optimization.
normalization_mode: `int`, optional (default: 1)
Method for normalization. Choice: [0,1]
0, single-cell normalization
1, Clone normalization
smooth_array: `list`, optional (default: [15,10,5])
List of smooth rounds at each iteration.
The n-th entry determines the smooth round for the Smatrix
at the n-th iteration. Its length determins the number of
iteration. It is better to use a number at the multiple of
5, i.e., 5, 10, 15, 20,...
CoSpar_KNN: `int`, optional (default: 20)
the number of neighbors for KNN graph used for computing the similarity matrix.
trunca_threshold: `float`, optional (default: 0.001)
We set entries to zero in the computed similarity matrix that
are smaller than this threshold. This is to promote the Smatrix sparsity, which
leads to faster computation, and smaller file size.
This threshld should be small, but not too small.
noise_threshold: `float`, optional (default: 0.1)
threshold to remove noises in the updated transition map,
in the range [0,1]
save_subset: `bool`, optional (default: True)
If true, save only Smatrix at smooth round [5,10,15,...].
Else, save Smatrix at each round.
use_full_Smatrix: `bool`, optional (default: False)
use the Smatrix as defined by all cells, whether they are clonally
barcoded or not. We sub-sample cell states relevant for downstream
analysis from this full Smatrix. This may refine the Smatrix.
But will also increase the computation time significantly.
use_fixed_clonesize_t1: `bool`, optional (default: False)
If true, fix the number of initial states as the same for all clones
sort_clone: `int`, optional (default: 1)
The order to infer initial states for each clone: {1,-1,others}
1, sort clones by size from small to large
-1,sort clones by size from large to small
others, do not sort.
compute_new: `bool`, optional (default: False)
If True, compute everthing (ShortestPathDis,OT_map etc.) from scratch,
whether it was computed and saved before or not.
Returns
------
None. Update adata.obsm['X_clone'] and adata.uns['transition_map'],
as well as adata.uns['OT_transition_map'] or
adata.uns['intraclone_transition_map'], depending on the initialization.
"""
# I found the error: 1) we should use clonally related cell number at t2 as a factor to determine the clonally cell number at t1
# 2) update the whole t2 clonal info at once
logg.info("Joint optimization that consider possibility of clonal overlap: v2")
cell_id_array_t1=adata.uns['Tmap_cell_id_t1']
cell_id_array_t2=adata.uns['Tmap_cell_id_t2']
data_des=adata.uns['data_des'][-1]
data_path=settings.data_path
X_clone=adata.obsm['X_clone']
if not ssp.issparse(X_clone): X_clone=ssp.csr_matrix(X_clone)
time_info=np.array(adata.obs['time_info'])
time_index_t1=time_info==(time_info[cell_id_array_t1[0]])
time_index_t2=time_info==(time_info[cell_id_array_t2[0]])
if not ssp.issparse(initialized_map):
map_temp=ssp.csr_matrix(initialized_map)
else:
map_temp=initialized_map
# a clone must has at least 2 cells, to be updated later.
valid_clone_id=np.nonzero(X_clone[cell_id_array_t2].sum(0).A.flatten()>0)[0]
X_clone_temp=X_clone[:,valid_clone_id]
clonal_cells_t2=np.sum(X_clone_temp[cell_id_array_t2].sum(1).flatten())
logg.hint(f"original clone shape: {X_clone.shape}")
logg.hint(f"After excluding zero-sized clones at t2: {X_clone_temp.shape}")
flag=True # to check whether overlapping clones are found or not
if use_fixed_clonesize_t1:
logg.info("Use fixed clone size at t1")
##### Partition cells into non-overlapping, combinatorial BC_id.
# ---------------------------------
# find the combinatorial barcodes
clone_idx=np.nonzero(X_clone_temp.A)
dic=[[] for j in range(X_clone_temp.shape[0])] # a list of list
for j in range(clone_idx[0].shape[0]):
dic[clone_idx[0][j]].append(clone_idx[1][j])
BC_id=[tuple(x) for x in dic] # a BC_id is a unique barcode combination, does not change the ordering of cells
# --------------------
# construct the new X_clone_temp matrix, and the clone_mapping
unique_BC_id=list(set(BC_id))
if () in unique_BC_id: # () is resulted from cells without any barcodes
unique_BC_id.remove(())
# construct a X_clone_newBC for the new BC_id
# also record how the new BC_id is related to the old barcode
X_clone_newBC=np.zeros((X_clone_temp.shape[0],len(unique_BC_id)))
for i, BC_0 in enumerate(BC_id):
for j, BC_1 in enumerate(unique_BC_id):
if BC_1==BC_0:
X_clone_newBC[i,j]=1 # does not change the ordering of cells
clone_mapping=np.zeros((X_clone_temp.shape[1],X_clone_newBC.shape[1]))
for j, BC_1 in enumerate(unique_BC_id):
for k in BC_1:
clone_mapping[k,j]=1
X_clone_newBC=ssp.csr_matrix(X_clone_newBC)
clone_mapping=ssp.csr_matrix(clone_mapping)
# To recover the original X_clone_temp, use 'X_clone_newBC*(clone_mapping.T)'
# howver, clone_mapping is not invertible. We cannot get from X_clone_temp to
# X_clone_newBC using matrix multiplification.
### select the early states using the grouped distribution of a clone
### clones are not overlapping, and all early states should be attached to clones at the end
# we sort clones according to their sizes. The order of cells are not affected. So, it should not affect downstream analysis
# small clones tend to be the ones that are barcoded/mutated later, while large clones tend to be early mutations...
clone_size_t2_temp=X_clone_newBC[cell_id_array_t2].sum(0).A.flatten()
if sort_clone==1:
logg.info("Sort clones by size (small to large)")
sort_clone_id=np.argsort(clone_size_t2_temp,kind='stable')
clone_size_t2=clone_size_t2_temp[sort_clone_id]
X_clone_sort=X_clone_newBC[:,sort_clone_id]
clone_mapping_sort=clone_mapping[:,sort_clone_id]
elif sort_clone==-1:
logg.info("Sort clones by size (large to small)")
sort_clone_id=np.argsort(clone_size_t2_temp,kind='stable')[::-1]
clone_size_t2=clone_size_t2_temp[sort_clone_id]
X_clone_sort=X_clone_newBC[:,sort_clone_id]
clone_mapping_sort=clone_mapping[:,sort_clone_id]
else:
logg.info("Do not order clones by size ")
clone_size_t2=clone_size_t2_temp
X_clone_sort=X_clone_newBC
clone_mapping_sort=clone_mapping
logg.info("Infer the number of initial cells to extract for each clone in advance")
clone_N1=X_clone_sort.shape[1]
ave_clone_size_t1=int(np.ceil(len(cell_id_array_t1)/clone_N1));
cum_cell_N=np.ceil(np.cumsum(clone_size_t2)*len(cell_id_array_t1)/clonal_cells_t2)
cell_N_to_extract=np.zeros(len(cum_cell_N),dtype=int)
if use_fixed_clonesize_t1:
cell_N_to_extract += ave_clone_size_t1
else:
cell_N_to_extract[0]=cum_cell_N[0]
cell_N_to_extract[1:]=np.diff(cum_cell_N)
for x0 in range(Clone_update_iter_N):
# update initial state probability matrix based on the current map
initial_prob_matrix=(map_temp*X_clone_sort[cell_id_array_t2]).A # a initial probability matrix for t1 cells, shape (n_t1_cell,n_clone)
########## begin: update clones
remaining_ids_t1=list(np.arange(len(cell_id_array_t1),dtype=int))
X_clone_new=np.zeros(X_clone_sort.shape,dtype=bool)
X_clone_new[cell_id_array_t2]=X_clone_sort[cell_id_array_t2].A.astype(bool) # update the whole t2 clones at once
for j in range(clone_N1):
if (j%100==0):
#pdb.set_trace()
logg.hint(f"Inferring early clonal states: current clone id {j}")
# infer the earlier clonal states for each clone
### select the early states using the grouped distribution of a clone
sorted_id_array=np.argsort(initial_prob_matrix[remaining_ids_t1,j],kind='stable')[::-1]
sel_id_t1=sorted_id_array[:cell_N_to_extract[j]]
temp_t1_idx=np.zeros(len(cell_id_array_t1),dtype=bool)
temp_t1_idx[np.array(remaining_ids_t1)[sel_id_t1]]=True
X_clone_new[cell_id_array_t1,j]=temp_t1_idx
for kk in np.array(remaining_ids_t1)[sel_id_t1]:
remaining_ids_t1.remove(kk)
if (len(remaining_ids_t1)==0) and ((j+1)<clone_N1):
logg.hint(f'Early break; current clone id: {j+1}')
break
########### end: update clones
cell_id_array_t1_new=np.nonzero((X_clone_new.sum(1)>0) & (time_index_t1))[0]
cell_id_array_t2_new=np.nonzero((X_clone_new.sum(1)>0) & (time_index_t2))[0]
adata.obsm['X_clone']=ssp.csr_matrix(X_clone_new)*(clone_mapping_sort.T) # convert back to the original clone structure
adata.uns['multiTime_cell_id_t1']=[cell_id_array_t1_new] # For CoSpar, clonally-related states
adata.uns['multiTime_cell_id_t2']=[cell_id_array_t2_new]
adata.uns['clonal_cell_id_t1']=cell_id_array_t1_new # for prepare the similarity matrix with same cell states
adata.uns['clonal_cell_id_t2']=cell_id_array_t2_new
adata.uns['proportion']=[1]
infer_Tmap_from_multitime_clones_private(adata,smooth_array=smooth_array,neighbor_N=CoSpar_KNN,noise_threshold=noise_threshold,
normalization_mode=normalization_mode,save_subset=True,use_full_Smatrix=use_full_Smatrix,
trunca_threshold=trunca_threshold,compute_new_Smatrix=compute_new)
# update, for the next iteration
map_temp=adata.uns['transition_map']
def infer_Tmap_from_one_time_clones(adata_orig,initial_time_points,clonal_time_point,
initialize_method='OT',OT_epsilon=0.02,OT_dis_KNN=5,OT_cost='SPD',
HighVar_gene_pctl=85,Clone_update_iter_N=1,normalization_mode=1,
noise_threshold=0.2,CoSpar_KNN=20,use_full_Smatrix=False,smooth_array=[15,10,5],
trunca_threshold=0.001,compute_new=False,
use_fixed_clonesize_t1=False,sort_clone=1,save_subset=True):
"""
Infer transition map from clones with a single time point
We iteratively infer transition map between each of the initial
time points ['day_1','day_2',...,] and the time point with clonal
observation. Given the two time points, after initializing the map
by either OT method or HighVar method, we jointly infer the likely
initial clonal cells and the transition map between cell states
in these two time points.
**Summary**
* Parameters relevant for cell state selection: initial_time_points,
clonal_time_point, use_full_Smatrix.
* Choose the initialization method, and set the corresponding parameters.
* 'OT': tend to be more accurate, but not reliable
under batch effect. Key parameters: `OT_epsilon, OT_dis_KNN`.
* 'HighVar': is robust to batch effect, but not as accurate.
Key parameter: `HighVar_gene_pctl`.
* Key parameters relevant for CoSpar itself: `smooth_array, normalization_mode,
CoSpar_KNN, noise_threshold, Clone_update_iter_N`.
Parameters
----------
adata_orig: :class:`~anndata.AnnData` object
assumed to be preprocessed, can have multiple time points.
initial_time_points: `list`
List of initial time points to be included for the transition map.
Like ['day_1','day_2']. Entries consistent with adata.obs['time_info'].
clonal_time_point: `str`
The time point with clonal observation. Its value should be
consistent with adata.obs['time_info'].
initialize_method: `str`, optional (default 'OT')
Method to initialize the transition map from state information.
Choice: {'OT', 'HighVar'}.
OT_epsilon: `float`, optional (default: 0.02)
The entropic regularization, >0, a larger one increases
uncertainty of the transition. Relevant when `initialize_method='OT'`.
OT_dis_KNN: `int`, optional (default: 5)
Number of nearest neighbors to construct the KNN graph for
computing the shortest path distance. Relevant when `initialize_method='OT'`.
OT_cost: `str`, optional (default: `SPD`), options {'GED','SPD'}
The cost metric. We provide gene expression distance (GED), and also
shortest path distance (SPD). GED is much faster, but SPD is more accurate.
However, cospar is robust to the initialization.
HighVar_gene_pctl: `int`, optional (default: 85)
percentile threshold to select highly variable genes. Range: [0,100].
A higher value selects more variable genes.
Relevant when `initialize_method='HighVar'`.
Clone_update_iter_N: `int`, optional (default: 1)
Number of iteration for the joint optimization
normalization_mode: `int`, optional (default: 1)
Method for normalization. Choice: [0,1]
0, single-cell normalization
1, Clone normalization
smooth_array: `list`, optional (default: [15,10,5])
List of smooth rounds at each iteration.
The n-th entry determines the smooth round for the Smatrix
at the n-th iteration. Its length determins the number of
iteration. It is better to use a number at the multiple of
5, i.e., 5, 10, 15, 20,...
CoSpar_KNN: `int`, optional (default: 20)
the number of neighbors for KNN graph used for computing the similarity matrix.
trunca_threshold: `float`, optional (default: 0.001)
We set entries to zero in the computed similarity matrix that
are smaller than this threshold. This is to promote the Smatrix sparsity, which
leads to faster computation, and smaller file size.
This threshld should be small, but not too small.
noise_threshold: `float`, optional (default: 0.1)
noise threshold to remove noises in the updated transition map,
in the range [0,1]
save_subset: `bool`, optional (default: True)
If true, save only Smatrix at smooth round [5,10,15,...].
Else, save Smatrix at each round.
use_full_Smatrix: `bool`, optional (default: False)
use the Smatrix as defined by all cells, whether they are clonally
barcoded or not. We sub-sample cell states relevant for downstream
analysis from this full Smatrix. This may refine the Smatrix.
But will also increase the computation time significantly.
use_fixed_clonesize_t1: `bool`, optional (default: False)
If true, fix the number of initial states as the same for all clones
sort_clone: `int`, optional (default: 1)
The order to infer initial states for each clone: {1,-1,others}
1, sort clones by size from small to large
-1,sort clones by size from large to small
others, do not sort.
compute_new: `bool`, optional (default: False)
If True, compute everthing (ShortestPathDis,OT_map etc.) from scratch,
whether it was computed and saved before or not. Regarding the Smatrix, it is
recomputed only when `use_full_Smatrix=False`.
Returns
-------
adata: :class:`~anndata.AnnData` object
Update adata.obsm['X_clone'] and adata.uns['transition_map'],
as well as adata.uns['OT_transition_map'] or
adata.uns['intraclone_transition_map'], depending on the initialization.
"""
t0=time.time()
for xx in initial_time_points:
if xx not in list(set(adata_orig.obs['time_info'])):
logg.error(f"the 'initial_time_points' are not valid. Please select from {list(set(adata_orig.obs['time_info']))}")
return adata_orig
hf.check_available_clonal_info(adata_orig)
with_clonal_info=(clonal_time_point in adata_orig.uns['clonal_time_points'])
if not with_clonal_info:
logg.warn(f"'clonal_time_point' do not contain clonal information. Please set clonal_time_point to be one of {adata_orig.uns['clonal_time_points']}")
#logg.info("Consider run ----cs.tmap.CoSpar_NoClonalInfo------")
logg.warn("Keep running but without clonal information")
#return adata_orig
sp_idx=np.zeros(adata_orig.shape[0],dtype=bool)
time_info_orig=np.array(adata_orig.obs['time_info'])
all_time_points=initial_time_points+[clonal_time_point]
label='t'
for xx in all_time_points:
id_array=np.nonzero(time_info_orig==xx)[0]
sp_idx[id_array]=True
label=label+'*'+str(xx)
adata=sc.AnnData(adata_orig.X[sp_idx]);
adata.var_names=adata_orig.var_names
adata.obsm['X_pca']=adata_orig.obsm['X_pca'][sp_idx]
adata.obsm['X_emb']=adata_orig.obsm['X_emb'][sp_idx]
adata.obs['state_info']=pd.Categorical(adata_orig.obs['state_info'][sp_idx])
adata.obs['time_info']=pd.Categorical(adata_orig.obs['time_info'][sp_idx])
data_des_orig=adata_orig.uns['data_des'][0]
data_des_0=adata_orig.uns['data_des'][-1]
data_des=data_des_0+f'_OneTimeClone_{label}'
adata.uns['data_des']=[data_des_orig,data_des]
clone_annot_orig=adata_orig.obsm['X_clone']
clone_annot=clone_annot_orig[sp_idx]
adata.obsm['X_clone']=clone_annot
time_info=np.array(adata.obs['time_info'])
time_index_t2=time_info==clonal_time_point
time_index_t1=~time_index_t2
#### used for similarity matrix generation
Tmap_cell_id_t1=np.nonzero(time_index_t1)[0]
Tmap_cell_id_t2=np.nonzero(time_index_t2)[0]
adata.uns['Tmap_cell_id_t1']=Tmap_cell_id_t1
adata.uns['Tmap_cell_id_t2']=Tmap_cell_id_t2
adata.uns['clonal_cell_id_t1']=Tmap_cell_id_t1
adata.uns['clonal_cell_id_t2']=Tmap_cell_id_t2
adata.uns['sp_idx']=sp_idx
data_path=settings.data_path
transition_map=np.zeros((len(Tmap_cell_id_t1),len(Tmap_cell_id_t2)))
ini_transition_map=np.zeros((len(Tmap_cell_id_t1),len(Tmap_cell_id_t2)))
for yy in initial_time_points:
logg.info("-------------------------------New Start--------------------------------------------------")
logg.info(f"Current time point: {yy}")
adata_temp=infer_Tmap_from_one_time_clones_twoTime(adata_orig,selected_two_time_points=[yy,clonal_time_point],
initialize_method=initialize_method,OT_epsilon=OT_epsilon,OT_dis_KNN=OT_dis_KNN,
OT_cost=OT_cost,HighVar_gene_pctl=HighVar_gene_pctl,
Clone_update_iter_N=Clone_update_iter_N,normalization_mode=normalization_mode,
noise_threshold=noise_threshold,CoSpar_KNN=CoSpar_KNN,use_full_Smatrix=use_full_Smatrix,smooth_array=smooth_array,
trunca_threshold=trunca_threshold,compute_new=compute_new,
use_fixed_clonesize_t1=use_fixed_clonesize_t1,sort_clone=sort_clone,save_subset=save_subset)
temp_id_t1=np.nonzero(time_info==yy)[0]
sp_id_t1=hf.converting_id_from_fullSpace_to_subSpace(temp_id_t1,Tmap_cell_id_t1)[0]
if with_clonal_info:
transition_map_temp=adata_temp.uns['transition_map'].A
transition_map[sp_id_t1,:]=transition_map_temp
if initialize_method=='OT':
transition_map_ini_temp=adata_temp.uns['OT_transition_map']
else:
transition_map_ini_temp=adata_temp.uns['HighVar_transition_map']
ini_transition_map[sp_id_t1,:]=transition_map_ini_temp.A
if with_clonal_info:
adata.uns['transition_map']=ssp.csr_matrix(transition_map)
if initialize_method=='OT':
adata.uns['OT_transition_map']=ssp.csr_matrix(ini_transition_map)
else:
adata.uns['HighVar_transition_map']=ssp.csr_matrix(ini_transition_map)
logg.info(f"-----------Total used time: {time.time()-t0} s ------------")
return adata
def infer_Tmap_from_state_info_alone(adata_orig,initial_time_points,target_time_point,
method='OT',OT_epsilon=0.02,OT_dis_KNN=5,OT_cost='SPD',
HighVar_gene_pctl=85,normalization_mode=1,noise_threshold=0.2,
CoSpar_KNN=20,use_full_Smatrix=False,smooth_array=[15,10,5],
trunca_threshold=0.001,compute_new=False,save_subset=True):
"""
Infer transition map from state information alone.
We iteratively infer transition map between each of the initial
time points ['day_1','day_2',...,] and the targeted time point.
Given each two-time pair, we infer the map by either OT method
or HighVar method:
* 'OT': tend to be more accurate, but not reliable
under batch effect. Key parameters: `OT_epsilon, OT_dis_KNN`.
* 'HighVar': is robust to batch effect, but not as accurate.
Key parameter: `HighVar_gene_pctl`.
Parameters
----------
adata_orig: :class:`~anndata.AnnData` object
assumed to be preprocessed, can have multiple time points.
initial_time_points: `list`
List of initial time points to be included for the transition map.
Like ['day_1','day_2']. Entries consistent with adata.obs['time_info'].
clonal_time_point: `str`
The time point with clonal observation. Its value should be
consistent with adata.obs['time_info'].
method: `str`, optional (default 'OT')
Method to initialize the transition map from state information.
Choice: {'OT', 'HighVar'}.
OT_epsilon: `float`, optional (default: 0.02)
The entropic regularization, >0, a larger one increases
uncertainty of the transition. Relevant when `method='OT'`.
OT_dis_KNN: `int`, optional (default: 5)
Number of nearest neighbors to construct the KNN graph for
computing the shortest path distance. Relevant when `method='OT'`.
OT_cost: `str`, optional (default: `SPD`), options {'GED','SPD'}
The cost metric. We provide gene expression distance (GED), and also
shortest path distance (SPD). GED is much faster, but SPD is more accurate.
However, cospar is robust to the initialization.
HighVar_gene_pctl: `int`, optional (default: 85)
Genes wht a variability percentile higher than this threshold are marked as
highly variable genes for dimension reduction. Range: [0,100].
Relevant when `method='HighVar'`.
normalization_mode: `int`, optional (default: 1)
Method for normalization. Choice: [0,1]
0, single-cell normalization
1, Clone normalization
smooth_array: `list`, optional (default: [15,10,5])
List of smooth rounds at each iteration.
The n-th entry determines the smooth round for the Smatrix
at the n-th iteration. Its length determins the number of
iteration. It is better to use a number at the multiple of
5, i.e., 5, 10, 15, 20,...
CoSpar_KNN: `int`, optional (default: 20)
the number of neighbors for KNN graph used for computing the similarity matrix.
trunca_threshold: `float`, optional (default: 0.001)
We set entries to zero in the computed similarity matrix that
are smaller than this threshold. This is to promote the Smatrix sparsity, which
leads to faster computation, and smaller file size.
This threshld should be small, but not too small.
noise_threshold: `float`, optional (default: 0.1)
noise threshold to remove noises in the updated transition map,
in the range [0,1]
save_subset: `bool`, optional (default: True)
If true, save only Smatrix at smooth round [5,10,15,...].
Else, save Smatrix at each round.
use_full_Smatrix: `bool`, optional (default: False)
use the Smatrix as defined by all cells, whether they are clonally
barcoded or not. We sub-sample cell states relevant for downstream
analysis from this full Smatrix. This may refine the Smatrix.
But will also increase the computation time significantly.
compute_new: `bool`, optional (default: False)
If True, compute everthing (ShortestPathDis,OT_map etc.) from scratch,
whether it was computed and saved before or not. Regarding the Smatrix, it is
recomputed only when `use_full_Smatrix=False`.
Returns
-------
adata: :class:`~anndata.AnnData` object
Update adata.uns['OT_transition_map'] or adata.uns['intraclone_transition_map'],
depending on the initialization.
"""
t0=time.time()
for xx in initial_time_points:
if xx not in list(set(adata_orig.obs['time_info'])):
print(f"the 'initial_time_points' are not valid. Please select from {list(set(adata_orig.obs['time_info']))}")
return adata_orig
sp_idx=np.zeros(adata_orig.shape[0],dtype=bool)
time_info_orig=np.array(adata_orig.obs['time_info'])
all_time_points=initial_time_points+[target_time_point]
label='t'
for xx in all_time_points:
id_array=np.nonzero(time_info_orig==xx)[0]
sp_idx[id_array]=True
label=label+'*'+str(xx)
adata=sc.AnnData(adata_orig.X[sp_idx]);
adata.var_names=adata_orig.var_names
adata.obsm['X_pca']=adata_orig.obsm['X_pca'][sp_idx]
adata.obsm['X_emb']=adata_orig.obsm['X_emb'][sp_idx]
adata.obs['state_info']=pd.Categorical(adata_orig.obs['state_info'][sp_idx])
adata.obs['time_info']=pd.Categorical(adata_orig.obs['time_info'][sp_idx])
data_des_orig=adata_orig.uns['data_des'][0]
data_des_0=adata_orig.uns['data_des'][-1]
data_des=data_des_0+f'_stateInfo_{label}'
adata.uns['data_des']=[data_des_orig,data_des]
clone_annot_orig=adata_orig.obsm['X_clone']
clone_annot=clone_annot_orig[sp_idx]
adata.obsm['X_clone']=clone_annot
time_info=np.array(adata.obs['time_info'])
time_index_t2=time_info==target_time_point
time_index_t1=~time_index_t2
#### used for similarity matrix generation
Tmap_cell_id_t1=np.nonzero(time_index_t1)[0]
Tmap_cell_id_t2=np.nonzero(time_index_t2)[0]
adata.uns['Tmap_cell_id_t1']=Tmap_cell_id_t1
adata.uns['Tmap_cell_id_t2']=Tmap_cell_id_t2
adata.uns['clonal_cell_id_t1']=Tmap_cell_id_t1
adata.uns['clonal_cell_id_t2']=Tmap_cell_id_t2
adata.uns['sp_idx']=sp_idx
#data_path=settings.data_path
ini_transition_map=np.zeros((len(Tmap_cell_id_t1),len(Tmap_cell_id_t2)))
for yy in initial_time_points:
print("-------------------------------New Start--------------------------------------------------")
print(f"Current time point: {yy}")
# inactive the joint optimization by setting joint_optimization=False
adata_temp=infer_Tmap_from_one_time_clones_twoTime(adata_orig,selected_two_time_points=[yy,target_time_point],
initialize_method=method,OT_epsilon=OT_epsilon,OT_dis_KNN=OT_dis_KNN,
OT_cost=OT_cost,HighVar_gene_pctl=HighVar_gene_pctl,normalization_mode=normalization_mode,
noise_threshold=noise_threshold,CoSpar_KNN=CoSpar_KNN,use_full_Smatrix=use_full_Smatrix,smooth_array=smooth_array,
trunca_threshold=trunca_threshold,compute_new=compute_new,joint_optimization=False,save_subset=save_subset)
temp_id_t1=np.nonzero(time_info==yy)[0]
sp_id_t1=hf.converting_id_from_fullSpace_to_subSpace(temp_id_t1,Tmap_cell_id_t1)[0]
if method=='OT':
transition_map_ini_temp=adata_temp.uns['OT_transition_map']
else:
transition_map_ini_temp=adata_temp.uns['HighVar_transition_map']
ini_transition_map[sp_id_t1,:]=transition_map_ini_temp.A
if method=='OT':
adata.uns['OT_transition_map']=ssp.csr_matrix(ini_transition_map)
else:
adata.uns['HighVar_transition_map']=ssp.csr_matrix(ini_transition_map)
logg.info(f"-----------Total used time: {time.time()-t0} s ------------")
return adata
def infer_Tmap_from_one_time_clones_twoTime(adata_orig,selected_two_time_points=['1','2'],
initialize_method='OT',OT_epsilon=0.02,OT_dis_KNN=5,OT_cost='SPD',HighVar_gene_pctl=80,
Clone_update_iter_N=1,normalization_mode=1,noise_threshold=0.2,CoSpar_KNN=20,
use_full_Smatrix=False,smooth_array=[15,10,5],
trunca_threshold=0.001,compute_new=True,use_fixed_clonesize_t1=False,
sort_clone=1,save_subset=True,joint_optimization=True):
"""
Infer transition map from clones with a single time point
It is the same as :func:`.infer_Tmap_from_one_time_clones`, except that
it assumes that the input adata_orig has only two time points.
joint_optimization: `bool`, optional (default: True).
"""
time_info_orig=np.array(adata_orig.obs['time_info'])
sort_time_point=np.sort(list(set(time_info_orig)))
N_valid_time=np.sum(np.in1d(sort_time_point,selected_two_time_points))
if (N_valid_time!=2):
logg.error(f"Must select only two time points among the list {sort_time_point}")
#The second time point in this list (not necessarily later time point) is assumed to have clonal data.")
else:
####################################
logg.info("-----------Pre-processing and sub-sampling cells------------")
# select cells from the two time points, and sub-sampling, create the new adata object with these cell states
sp_idx=(time_info_orig==selected_two_time_points[0]) | (time_info_orig==selected_two_time_points[1])
adata=sc.AnnData(adata_orig.X[sp_idx]);
adata.var_names=adata_orig.var_names
adata.obsm['X_pca']=adata_orig.obsm['X_pca'][sp_idx]
adata.obsm['X_emb']=adata_orig.obsm['X_emb'][sp_idx]
adata.obs['state_info']=pd.Categorical(adata_orig.obs['state_info'][sp_idx])
adata.obs['time_info']=pd.Categorical(adata_orig.obs['time_info'][sp_idx])
data_des_0=adata_orig.uns['data_des'][-1]
data_des_orig=adata_orig.uns['data_des'][0]
data_des=data_des_0+f'_OneTimeClone_t*{selected_two_time_points[0]}*{selected_two_time_points[1]}'
adata.uns['data_des']=[data_des_orig,data_des]
clone_annot_orig=adata_orig.obsm['X_clone']
barcode_id=np.nonzero(clone_annot_orig[sp_idx].A.sum(0).flatten()>0)[0]
clone_annot=clone_annot_orig[sp_idx][:,barcode_id]
adata.obsm['X_clone']=clone_annot
time_info=np.array(adata.obs['time_info'])
time_index_t1=time_info==selected_two_time_points[0]
time_index_t2=time_info==selected_two_time_points[1]
#### used for similarity matrix generation
Tmap_cell_id_t1=np.nonzero(time_index_t1)[0]
Tmap_cell_id_t2=np.nonzero(time_index_t2)[0]
adata.uns['Tmap_cell_id_t1']=Tmap_cell_id_t1
adata.uns['Tmap_cell_id_t2']=Tmap_cell_id_t2
adata.uns['clonal_cell_id_t1']=Tmap_cell_id_t1
adata.uns['clonal_cell_id_t2']=Tmap_cell_id_t2
adata.uns['sp_idx']=sp_idx
data_path=settings.data_path
cell_id_array_t1=Tmap_cell_id_t1
cell_id_array_t2=Tmap_cell_id_t2
###############################
# prepare the similarity matrix with all state info, all subsequent similarity will be down-sampled from this one.
if use_full_Smatrix:
temp_str='0'+str(trunca_threshold)[2:]
round_of_smooth=np.max(smooth_array)
data_des=adata_orig.uns['data_des'][0]
similarity_file_name=f'{data_path}/{data_des}_Similarity_matrix_with_all_cell_states_kNN{CoSpar_KNN}_Truncate{temp_str}'
if not (os.path.exists(similarity_file_name+f'_SM{round_of_smooth}.npz') and (not compute_new)):
similarity_matrix_full=generate_similarity_matrix(adata_orig,similarity_file_name,round_of_smooth=round_of_smooth,
neighbor_N=CoSpar_KNN,truncation_threshold=trunca_threshold,save_subset=save_subset,compute_new_Smatrix=compute_new)
if initialize_method=='OT':
logg.info("----------------")
logg.info("Step 1: Use OT method for initialization")
compute_custom_OT_transition_map(adata,OT_epsilon=OT_epsilon,OT_cost=OT_cost,OT_dis_KNN=OT_dis_KNN,compute_new=compute_new)
OT_transition_map=adata.uns['OT_transition_map']
initialized_map=OT_transition_map
else:
logg.info("----------------")
logg.info("Step 1: Use highly variable genes to construct pseudo-clones, and apply CoSpar to generate initialized map!")
t=time.time()
Tmap_from_highly_variable_genes(adata,min_counts=3,min_cells=3,min_gene_vscore_pctl=HighVar_gene_pctl,noise_threshold=noise_threshold,neighbor_N=CoSpar_KNN,
normalization_mode=normalization_mode,use_full_Smatrix=use_full_Smatrix,smooth_array=smooth_array,trunca_threshold=trunca_threshold,
compute_new_Smatrix=compute_new)
HighVar_transition_map=adata.uns['HighVar_transition_map']
initialized_map=HighVar_transition_map
logg.info(f"Finishing computing transport map from highly variable genes, used time {time.time()-t}")
if joint_optimization:
########### Jointly optimize the transition map and the initial clonal states
if selected_two_time_points[1] in adata_orig.uns['clonal_time_points']:
logg.info("----------------")
logg.info("Step 2: Jointly optimize the transition map and the initial clonal states!")
t=time.time()
infer_Tmap_from_one_time_clones_private(adata,initialized_map,Clone_update_iter_N=Clone_update_iter_N,normalization_mode=normalization_mode,noise_threshold=noise_threshold,
CoSpar_KNN=CoSpar_KNN,use_full_Smatrix=use_full_Smatrix,smooth_array=smooth_array,trunca_threshold=trunca_threshold,
compute_new=compute_new,use_fixed_clonesize_t1=use_fixed_clonesize_t1,sort_clone=sort_clone)
logg.info(f"Finishing computing transport map from CoSpar using inferred clonal data, used time {time.time()-t}")
else:
logg.warn("No clonal information available. Skip the joint optimization of clone and scRNAseq data")
return adata
def infer_Tmap_from_clonal_info_alone(adata,method='naive'):
"""
Compute transition map using only the lineage information
We simply average transitions across all clones, assuming that
the intra-clone transition is uniform within the same clone.
Parameters
----------
adata: :class:`~anndata.AnnData` object
It should have been preprocessed by :func:`.select_time_points`
method: `str`, optional (default: 'naive')
Method used to compute the transition map. Choice: {'naive',
'weinreb'}. For the naive method, we simply average transitions
across all clones, assuming that the intra-clone transition is
uniform within the same clone. For the 'weinreb' method, we first
find uni-potent clones, then compute the transition map by simply
averaging across all clonal transitions as the naive method.
Returns
-------
Update `adata` with the attributes adata.uns['naive_transition_map']
"""
cell_id_t2=adata.uns['Tmap_cell_id_t2']
cell_id_t1=adata.uns['Tmap_cell_id_t1']
clone_annot=adata.obsm['X_clone']
if method=='naive':
T_map=clone_annot[cell_id_t1]*clone_annot[cell_id_t2].T
else:
state_annote=np.array(adata.obs['state_info'])
fate_array=list(set(state_annote))
potential_vector_clone, fate_entropy_clone=hf.compute_state_potential(clone_annot[cell_id_t2].T,state_annote[cell_id_t2],fate_array,fate_count=True)
sel_unipotent_clone_id=np.array(list(set(np.nonzero(fate_entropy_clone==1)[0])))
clone_annot_unipotent=clone_annot[:,sel_unipotent_clone_id]
T_map=clone_annot_unipotent[cell_id_t1]*clone_annot_unipotent[cell_id_t2].T
logg.info(f"Used uni-potent clone fraction {len(sel_unipotent_clone_id)/clone_annot.shape[1]}")
T_map=T_map.astype(int)
adata.uns['clonal_transition_map']=ssp.csr_matrix(T_map)
# def infer_weinreb_Tmap(adata):
# """
# Compute transition map using only the lineage information
# Find uni-potent clones, then compute the transition map by simply
# averaging across all clonal transitions as in :func:`.infer_naive_Tmap`.
# The intra-clone transition is uniform within the same clone.
# Parameters
# ----------
# adata: :class:`~anndata.AnnData` object
# It should have been preprocessed by :func:`.select_time_points`
# Returns
# -------
# Update `adata` with the attributes adata.uns['weinreb_transition_map']
# """
# logg.info("This method works when there are only time points and all datasets")
# cell_id_t2=adata.uns['Tmap_cell_id_t2']
# cell_id_t1=adata.uns['Tmap_cell_id_t1']
# clone_annot=adata.obsm['X_clone']
# state_annote=np.array(adata.obs['state_info'])
# fate_array=list(set(state_annote))
# potential_vector_clone, fate_entropy_clone=hf.compute_state_potential(clone_annot[cell_id_t2].T,state_annote[cell_id_t2],fate_array,fate_count=True)
# sel_unipotent_clone_id=np.array(list(set(np.nonzero(fate_entropy_clone==1)[0])))
# clone_annot_unipotent=clone_annot[:,sel_unipotent_clone_id]
# weinreb_map=clone_annot_unipotent[cell_id_t1]*clone_annot_unipotent[cell_id_t2].T
# weinreb_map=weinreb_map.astype(int)
# logg.info(f"Used clone fraction {len(sel_unipotent_clone_id)/clone_annot.shape[1]}")
# adata.uns['weinreb_transition_map']=ssp.csr_matrix(weinreb_map)
| [
"scanpy.AnnData",
"numpy.load",
"numpy.sum",
"scipy.sparse.issparse",
"scanpy.pp.neighbors",
"numpy.ones",
"numpy.argsort",
"scipy.sparse.lil_matrix",
"numpy.mean",
"scipy.sparse.save_npz",
"os.path.exists",
"numpy.cumsum",
"numpy.max",
"numpy.save",
"scipy.sparse.load_npz",
"scipy.spa... | [((6890, 6901), 'time.time', 'time.time', ([], {}), '()\n', (6899, 6901), False, 'import time\n'), ((7093, 7125), 'scipy.sparse.issparse', 'ssp.issparse', (['initial_similarity'], {}), '(initial_similarity)\n', (7105, 7125), True, 'import scipy.sparse as ssp\n'), ((7958, 7969), 'time.time', 'time.time', ([], {}), '()\n', (7967, 7969), False, 'import time\n'), ((8051, 8081), 'scipy.sparse.issparse', 'ssp.issparse', (['final_similarity'], {}), '(final_similarity)\n', (8063, 8081), True, 'import scipy.sparse as ssp\n'), ((9330, 9367), 'numpy.array', 'np.array', (["adata_orig.obs['time_info']"], {}), "(adata_orig.obs['time_info'])\n", (9338, 9367), True, 'import numpy as np\n'), ((17692, 17713), 'scipy.sparse.issparse', 'ssp.issparse', (['X_clone'], {}), '(X_clone)\n', (17704, 17713), True, 'import scipy.sparse as ssp\n'), ((17844, 17868), 'scipy.sparse.lil_matrix', 'ssp.lil_matrix', (['(N1, N2)'], {}), '((N1, N2))\n', (17858, 17868), True, 'import scipy.sparse as ssp\n'), ((20046, 20057), 'time.time', 'time.time', ([], {}), '()\n', (20055, 20057), False, 'import time\n'), ((22726, 22750), 'scipy.sparse.lil_matrix', 'ssp.lil_matrix', (['(N1, N2)'], {}), '((N1, N2))\n', (22740, 22750), True, 'import scipy.sparse as ssp\n'), ((28764, 28775), 'time.time', 'time.time', ([], {}), '()\n', (28773, 28775), False, 'import time\n'), ((41305, 41338), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['demultiplexed_map'], {}), '(demultiplexed_map)\n', (41319, 41338), True, 'import scipy.sparse as ssp\n'), ((46268, 46300), 'numpy.array', 'np.array', (["adata.obs['time_info']"], {}), "(adata.obs['time_info'])\n", (46276, 46300), True, 'import numpy as np\n'), ((46516, 46553), 'scanpy.AnnData', 'sc.AnnData', (['adata.X[cell_id_array_t1]'], {}), '(adata.X[cell_id_array_t1])\n', (46526, 46553), True, 'import scanpy as sc\n'), ((46568, 46605), 'scanpy.AnnData', 'sc.AnnData', (['adata.X[cell_id_array_t2]'], {}), '(adata.X[cell_id_array_t2])\n', (46578, 46605), True, 'import scanpy as sc\n'), ((47752, 47778), 'numpy.zeros', 'np.zeros', (['N_t1'], {'dtype': 'bool'}), '(N_t1, dtype=bool)\n', (47760, 47778), True, 'import numpy as np\n'), ((47798, 47824), 'numpy.zeros', 'np.zeros', (['N_t2'], {'dtype': 'bool'}), '(N_t2, dtype=bool)\n', (47806, 47824), True, 'import numpy as np\n'), ((49566, 49598), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['clone_annot_gene'], {}), '(clone_annot_gene)\n', (49580, 49598), True, 'import scipy.sparse as ssp\n'), ((56767, 56800), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['OT_transition_map'], {}), '(OT_transition_map)\n', (56781, 56800), True, 'import scipy.sparse as ssp\n'), ((61155, 61187), 'numpy.array', 'np.array', (["adata.obs['time_info']"], {}), "(adata.obs['time_info'])\n", (61163, 61187), True, 'import numpy as np\n'), ((62163, 62189), 'numpy.nonzero', 'np.nonzero', (['X_clone_temp.A'], {}), '(X_clone_temp.A)\n', (62173, 62189), True, 'import numpy as np\n'), ((63104, 63161), 'numpy.zeros', 'np.zeros', (['(X_clone_temp.shape[1], X_clone_newBC.shape[1])'], {}), '((X_clone_temp.shape[1], X_clone_newBC.shape[1]))\n', (63112, 63161), True, 'import numpy as np\n'), ((63280, 63309), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['X_clone_newBC'], {}), '(X_clone_newBC)\n', (63294, 63309), True, 'import scipy.sparse as ssp\n'), ((63328, 63357), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['clone_mapping'], {}), '(clone_mapping)\n', (63342, 63357), True, 'import scipy.sparse as ssp\n'), ((73816, 73827), 'time.time', 'time.time', ([], {}), '()\n', (73825, 73827), False, 'import time\n'), ((74576, 74617), 'numpy.zeros', 'np.zeros', (['adata_orig.shape[0]'], {'dtype': 'bool'}), '(adata_orig.shape[0], dtype=bool)\n', (74584, 74617), True, 'import numpy as np\n'), ((74636, 74673), 'numpy.array', 'np.array', (["adata_orig.obs['time_info']"], {}), "(adata_orig.obs['time_info'])\n", (74644, 74673), True, 'import numpy as np\n'), ((74903, 74935), 'scanpy.AnnData', 'sc.AnnData', (['adata_orig.X[sp_idx]'], {}), '(adata_orig.X[sp_idx])\n', (74913, 74935), True, 'import scanpy as sc\n'), ((75120, 75172), 'pandas.Categorical', 'pd.Categorical', (["adata_orig.obs['state_info'][sp_idx]"], {}), "(adata_orig.obs['state_info'][sp_idx])\n", (75134, 75172), True, 'import pandas as pd\n'), ((75200, 75251), 'pandas.Categorical', 'pd.Categorical', (["adata_orig.obs['time_info'][sp_idx]"], {}), "(adata_orig.obs['time_info'][sp_idx])\n", (75214, 75251), True, 'import pandas as pd\n'), ((75609, 75641), 'numpy.array', 'np.array', (["adata.obs['time_info']"], {}), "(adata.obs['time_info'])\n", (75617, 75641), True, 'import numpy as np\n'), ((82674, 82685), 'time.time', 'time.time', ([], {}), '()\n', (82683, 82685), False, 'import time\n'), ((82948, 82989), 'numpy.zeros', 'np.zeros', (['adata_orig.shape[0]'], {'dtype': 'bool'}), '(adata_orig.shape[0], dtype=bool)\n', (82956, 82989), True, 'import numpy as np\n'), ((83008, 83045), 'numpy.array', 'np.array', (["adata_orig.obs['time_info']"], {}), "(adata_orig.obs['time_info'])\n", (83016, 83045), True, 'import numpy as np\n'), ((83275, 83307), 'scanpy.AnnData', 'sc.AnnData', (['adata_orig.X[sp_idx]'], {}), '(adata_orig.X[sp_idx])\n', (83285, 83307), True, 'import scanpy as sc\n'), ((83492, 83544), 'pandas.Categorical', 'pd.Categorical', (["adata_orig.obs['state_info'][sp_idx]"], {}), "(adata_orig.obs['state_info'][sp_idx])\n", (83506, 83544), True, 'import pandas as pd\n'), ((83572, 83623), 'pandas.Categorical', 'pd.Categorical', (["adata_orig.obs['time_info'][sp_idx]"], {}), "(adata_orig.obs['time_info'][sp_idx])\n", (83586, 83623), True, 'import pandas as pd\n'), ((83977, 84009), 'numpy.array', 'np.array', (["adata.obs['time_info']"], {}), "(adata.obs['time_info'])\n", (83985, 84009), True, 'import numpy as np\n'), ((86853, 86890), 'numpy.array', 'np.array', (["adata_orig.obs['time_info']"], {}), "(adata_orig.obs['time_info'])\n", (86861, 86890), True, 'import numpy as np\n'), ((94425, 94446), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['T_map'], {}), '(T_map)\n', (94439, 94446), True, 'import scipy.sparse as ssp\n'), ((2303, 2358), 'os.path.exists', 'os.path.exists', (["(file_name + f'_SM{round_of_smooth}.npz')"], {}), "(file_name + f'_SM{round_of_smooth}.npz')\n", (2317, 2358), False, 'import os\n'), ((2490, 2543), 'scipy.sparse.load_npz', 'ssp.load_npz', (["(file_name + f'_SM{round_of_smooth}.npz')"], {}), "(file_name + f'_SM{round_of_smooth}.npz')\n", (2502, 2543), True, 'import scipy.sparse as ssp\n'), ((2793, 2839), 'scanpy.pp.neighbors', 'sc.pp.neighbors', (['adata'], {'n_neighbors': 'neighbor_N'}), '(adata, n_neighbors=neighbor_N)\n', (2808, 2839), True, 'import scanpy as sc\n'), ((3839, 3867), 'scipy.sparse.lil_matrix', 'ssp.lil_matrix', (['(nrow, nrow)'], {}), '((nrow, nrow))\n', (3853, 3867), True, 'import scipy.sparse as ssp\n'), ((11148, 11222), 'numpy.array', 'np.array', (['[sub_item for item in Clonal_cell_ID_FOR_t for sub_item in item]'], {}), '([sub_item for item in Clonal_cell_ID_FOR_t for sub_item in item])\n', (11156, 11222), True, 'import numpy as np\n'), ((11259, 11334), 'numpy.array', 'np.array', (['[sub_item for item in Clonal_cell_ID_BACK_t for sub_item in item]'], {}), '([sub_item for item in Clonal_cell_ID_BACK_t for sub_item in item])\n', (11267, 11334), True, 'import numpy as np\n'), ((12672, 12719), 'numpy.zeros', 'np.zeros', (['clone_annot_orig.shape[0]'], {'dtype': 'bool'}), '(clone_annot_orig.shape[0], dtype=bool)\n', (12680, 12719), True, 'import numpy as np\n'), ((13724, 13771), 'numpy.zeros', 'np.zeros', (['clone_annot_orig.shape[0]'], {'dtype': 'bool'}), '(clone_annot_orig.shape[0], dtype=bool)\n', (13732, 13771), True, 'import numpy as np\n'), ((13996, 14028), 'scanpy.AnnData', 'sc.AnnData', (['adata_orig.X[sp_idx]'], {}), '(adata_orig.X[sp_idx])\n', (14006, 14028), True, 'import scanpy as sc\n'), ((14229, 14281), 'pandas.Categorical', 'pd.Categorical', (["adata_orig.obs['state_info'][sp_idx]"], {}), "(adata_orig.obs['state_info'][sp_idx])\n", (14243, 14281), True, 'import pandas as pd\n'), ((14313, 14364), 'pandas.Categorical', 'pd.Categorical', (["adata_orig.obs['time_info'][sp_idx]"], {}), "(adata_orig.obs['time_info'][sp_idx])\n", (14327, 14364), True, 'import pandas as pd\n'), ((17731, 17754), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['X_clone'], {}), '(X_clone)\n', (17745, 17754), True, 'import scipy.sparse as ssp\n'), ((22495, 22523), 'scipy.sparse.issparse', 'ssp.issparse', (['transition_map'], {}), '(transition_map)\n', (22507, 22523), True, 'import scipy.sparse as ssp\n'), ((22540, 22570), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['transition_map'], {}), '(transition_map)\n', (22554, 22570), True, 'import scipy.sparse as ssp\n'), ((22582, 22603), 'scipy.sparse.issparse', 'ssp.issparse', (['X_clone'], {}), '(X_clone)\n', (22594, 22603), True, 'import scipy.sparse as ssp\n'), ((22613, 22636), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['X_clone'], {}), '(X_clone)\n', (22627, 22636), True, 'import scipy.sparse as ssp\n'), ((29770, 29790), 'numpy.max', 'np.max', (['smooth_array'], {}), '(smooth_array)\n', (29776, 29790), True, 'import numpy as np\n'), ((37893, 37914), 'scipy.sparse.issparse', 'ssp.issparse', (['X_clone'], {}), '(X_clone)\n', (37905, 37914), True, 'import scipy.sparse as ssp\n'), ((37932, 37955), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['X_clone'], {}), '(X_clone)\n', (37946, 37955), True, 'import scipy.sparse as ssp\n'), ((39642, 39673), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['clonal_coupling'], {}), '(clonal_coupling)\n', (39656, 39673), True, 'import scipy.sparse as ssp\n'), ((40156, 40188), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['unSM_sc_coupling'], {}), '(unSM_sc_coupling)\n', (40170, 40188), True, 'import scipy.sparse as ssp\n'), ((40199, 40210), 'time.time', 'time.time', ([], {}), '()\n', (40208, 40210), False, 'import time\n'), ((40511, 40543), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['transition_map_1'], {}), '(transition_map_1)\n', (40525, 40543), True, 'import scipy.sparse as ssp\n'), ((42840, 42873), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['demultiplexed_map'], {}), '(demultiplexed_map)\n', (42854, 42873), True, 'import scipy.sparse as ssp\n'), ((54282, 54293), 'time.time', 'time.time', ([], {}), '()\n', (54291, 54293), False, 'import time\n'), ((54695, 54729), 'os.path.exists', 'os.path.exists', (['CustomOT_file_name'], {}), '(CustomOT_file_name)\n', (54709, 54729), False, 'import os\n'), ((54836, 54863), 'numpy.load', 'np.load', (['CustomOT_file_name'], {}), '(CustomOT_file_name)\n', (54843, 54863), True, 'import numpy as np\n'), ((54936, 54947), 'time.time', 'time.time', ([], {}), '()\n', (54945, 54947), False, 'import time\n'), ((56593, 56639), 'numpy.save', 'np.save', (['CustomOT_file_name', 'OT_transition_map'], {}), '(CustomOT_file_name, OT_transition_map)\n', (56600, 56639), True, 'import numpy as np\n'), ((61084, 61105), 'scipy.sparse.issparse', 'ssp.issparse', (['X_clone'], {}), '(X_clone)\n', (61096, 61105), True, 'import scipy.sparse as ssp\n'), ((61115, 61138), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['X_clone'], {}), '(X_clone)\n', (61129, 61138), True, 'import scipy.sparse as ssp\n'), ((61324, 61353), 'scipy.sparse.issparse', 'ssp.issparse', (['initialized_map'], {}), '(initialized_map)\n', (61336, 61353), True, 'import scipy.sparse as ssp\n'), ((61372, 61403), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['initialized_map'], {}), '(initialized_map)\n', (61386, 61403), True, 'import scipy.sparse as ssp\n'), ((64178, 64223), 'numpy.argsort', 'np.argsort', (['clone_size_t2_temp'], {'kind': '"""stable"""'}), "(clone_size_t2_temp, kind='stable')\n", (64188, 64223), True, 'import numpy as np\n'), ((65390, 65409), 'numpy.diff', 'np.diff', (['cum_cell_N'], {}), '(cum_cell_N)\n', (65397, 65409), True, 'import numpy as np\n'), ((65820, 65860), 'numpy.zeros', 'np.zeros', (['X_clone_sort.shape'], {'dtype': 'bool'}), '(X_clone_sort.shape, dtype=bool)\n', (65828, 65860), True, 'import numpy as np\n'), ((75790, 75815), 'numpy.nonzero', 'np.nonzero', (['time_index_t1'], {}), '(time_index_t1)\n', (75800, 75815), True, 'import numpy as np\n'), ((75839, 75864), 'numpy.nonzero', 'np.nonzero', (['time_index_t2'], {}), '(time_index_t2)\n', (75849, 75864), True, 'import numpy as np\n'), ((77793, 77823), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['transition_map'], {}), '(transition_map)\n', (77807, 77823), True, 'import scipy.sparse as ssp\n'), ((77900, 77934), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['ini_transition_map'], {}), '(ini_transition_map)\n', (77914, 77934), True, 'import scipy.sparse as ssp\n'), ((77989, 78023), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['ini_transition_map'], {}), '(ini_transition_map)\n', (78003, 78023), True, 'import scipy.sparse as ssp\n'), ((84158, 84183), 'numpy.nonzero', 'np.nonzero', (['time_index_t1'], {}), '(time_index_t1)\n', (84168, 84183), True, 'import numpy as np\n'), ((84207, 84232), 'numpy.nonzero', 'np.nonzero', (['time_index_t2'], {}), '(time_index_t2)\n', (84217, 84232), True, 'import numpy as np\n'), ((85877, 85911), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['ini_transition_map'], {}), '(ini_transition_map)\n', (85891, 85911), True, 'import scipy.sparse as ssp\n'), ((85966, 86000), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['ini_transition_map'], {}), '(ini_transition_map)\n', (85980, 86000), True, 'import scipy.sparse as ssp\n'), ((86970, 87020), 'numpy.in1d', 'np.in1d', (['sort_time_point', 'selected_two_time_points'], {}), '(sort_time_point, selected_two_time_points)\n', (86977, 87020), True, 'import numpy as np\n'), ((87640, 87672), 'scanpy.AnnData', 'sc.AnnData', (['adata_orig.X[sp_idx]'], {}), '(adata_orig.X[sp_idx])\n', (87650, 87672), True, 'import scanpy as sc\n'), ((87873, 87925), 'pandas.Categorical', 'pd.Categorical', (["adata_orig.obs['state_info'][sp_idx]"], {}), "(adata_orig.obs['state_info'][sp_idx])\n", (87887, 87925), True, 'import pandas as pd\n'), ((87957, 88008), 'pandas.Categorical', 'pd.Categorical', (["adata_orig.obs['time_info'][sp_idx]"], {}), "(adata_orig.obs['time_info'][sp_idx])\n", (87971, 88008), True, 'import pandas as pd\n'), ((88553, 88585), 'numpy.array', 'np.array', (["adata.obs['time_info']"], {}), "(adata.obs['time_info'])\n", (88561, 88585), True, 'import numpy as np\n'), ((93773, 93806), 'numpy.array', 'np.array', (["adata.obs['state_info']"], {}), "(adata.obs['state_info'])\n", (93781, 93806), True, 'import numpy as np\n'), ((3902, 3915), 'numpy.ones', 'np.ones', (['nrow'], {}), '(nrow)\n', (3909, 3915), True, 'import numpy as np\n'), ((4095, 4106), 'time.time', 'time.time', ([], {}), '()\n', (4104, 4106), False, 'import time\n'), ((4443, 4454), 'time.time', 'time.time', ([], {}), '()\n', (4452, 4454), False, 'import time\n'), ((5562, 5595), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['similarity_matrix'], {}), '(similarity_matrix)\n', (5576, 5595), True, 'import scipy.sparse as ssp\n'), ((7204, 7215), 'time.time', 'time.time', ([], {}), '()\n', (7213, 7215), False, 'import time\n'), ((8256, 8267), 'time.time', 'time.time', ([], {}), '()\n', (8265, 8267), False, 'import time\n'), ((11995, 12024), 'numpy.array', 'np.array', (['old_Tmap_cell_id_t1'], {}), '(old_Tmap_cell_id_t1)\n', (12003, 12024), True, 'import numpy as np\n'), ((12256, 12285), 'numpy.array', 'np.array', (['old_Tmap_cell_id_t2'], {}), '(old_Tmap_cell_id_t2)\n', (12264, 12285), True, 'import numpy as np\n'), ((20152, 20163), 'time.time', 'time.time', ([], {}), '()\n', (20161, 20163), False, 'import time\n'), ((20277, 20288), 'time.time', 'time.time', ([], {}), '()\n', (20286, 20288), False, 'import time\n'), ((39269, 39312), 'numpy.in1d', 'np.in1d', (['Tmap_cell_id_t1', 'clonal_cell_id_t1'], {}), '(Tmap_cell_id_t1, clonal_cell_id_t1)\n', (39276, 39312), True, 'import numpy as np\n'), ((39354, 39397), 'numpy.in1d', 'np.in1d', (['Tmap_cell_id_t2', 'clonal_cell_id_t2'], {}), '(Tmap_cell_id_t2, clonal_cell_id_t2)\n', (39361, 39397), True, 'import numpy as np\n'), ((42500, 42521), 'scipy.sparse.issparse', 'ssp.issparse', (['X_clone'], {}), '(X_clone)\n', (42512, 42521), True, 'import scipy.sparse as ssp\n'), ((42543, 42566), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['X_clone'], {}), '(X_clone)\n', (42557, 42566), True, 'import scipy.sparse as ssp\n'), ((53282, 53311), 'os.path.exists', 'os.path.exists', (['SPD_file_name'], {}), '(SPD_file_name)\n', (53296, 53311), False, 'import os\n'), ((53436, 53458), 'numpy.load', 'np.load', (['SPD_file_name'], {}), '(SPD_file_name)\n', (53443, 53458), True, 'import numpy as np\n'), ((53556, 53567), 'time.time', 'time.time', ([], {}), '()\n', (53565, 53567), False, 'import time\n'), ((54126, 54164), 'numpy.save', 'np.save', (['SPD_file_name', 'OT_cost_matrix'], {}), '(SPD_file_name, OT_cost_matrix)\n', (54133, 54164), True, 'import numpy as np\n'), ((67158, 67185), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['X_clone_new'], {}), '(X_clone_new)\n', (67172, 67185), True, 'import scipy.sparse as ssp\n'), ((74796, 74828), 'numpy.nonzero', 'np.nonzero', (['(time_info_orig == xx)'], {}), '(time_info_orig == xx)\n', (74806, 74828), True, 'import numpy as np\n'), ((77180, 77207), 'numpy.nonzero', 'np.nonzero', (['(time_info == yy)'], {}), '(time_info == yy)\n', (77190, 77207), True, 'import numpy as np\n'), ((83168, 83200), 'numpy.nonzero', 'np.nonzero', (['(time_info_orig == xx)'], {}), '(time_info_orig == xx)\n', (83178, 83200), True, 'import numpy as np\n'), ((85426, 85453), 'numpy.nonzero', 'np.nonzero', (['(time_info == yy)'], {}), '(time_info == yy)\n', (85436, 85453), True, 'import numpy as np\n'), ((88784, 88809), 'numpy.nonzero', 'np.nonzero', (['time_index_t1'], {}), '(time_index_t1)\n', (88794, 88809), True, 'import numpy as np\n'), ((88837, 88862), 'numpy.nonzero', 'np.nonzero', (['time_index_t2'], {}), '(time_index_t2)\n', (88847, 88862), True, 'import numpy as np\n'), ((89512, 89532), 'numpy.max', 'np.max', (['smooth_array'], {}), '(smooth_array)\n', (89518, 89532), True, 'import numpy as np\n'), ((90745, 90756), 'time.time', 'time.time', ([], {}), '()\n', (90754, 90756), False, 'import time\n'), ((6072, 6131), 'scipy.sparse.save_npz', 'ssp.save_npz', (["(file_name + f'_SM{SM}.npz')", 'similarity_matrix'], {}), "(file_name + f'_SM{SM}.npz', similarity_matrix)\n", (6084, 6131), True, 'import scipy.sparse as ssp\n'), ((9720, 9778), 'scipy.sparse.csr_matrix', 'ssp.csr_matrix', (['clone_annot_orig[time_info_orig == time_0]'], {}), '(clone_annot_orig[time_info_orig == time_0])\n', (9734, 9778), True, 'import scipy.sparse as ssp\n'), ((29978, 30044), 'os.path.exists', 'os.path.exists', (["(similarity_file_name + f'_SM{round_of_smooth}.npz')"], {}), "(similarity_file_name + f'_SM{round_of_smooth}.npz')\n", (29992, 30044), False, 'import os\n'), ((34995, 35061), 'os.path.exists', 'os.path.exists', (["(similarity_file_name + f'_SM{round_of_smooth}.npz')"], {}), "(similarity_file_name + f'_SM{round_of_smooth}.npz')\n", (35009, 35061), False, 'import os\n'), ((40323, 40334), 'time.time', 'time.time', ([], {}), '()\n', (40332, 40334), False, 'import time\n'), ((40458, 40469), 'time.time', 'time.time', ([], {}), '()\n', (40467, 40469), False, 'import time\n'), ((48224, 48258), 'numpy.argsort', 'np.argsort', (['temp_t1'], {'kind': '"""stable"""'}), "(temp_t1, kind='stable')\n", (48234, 48258), True, 'import numpy as np\n'), ((48762, 48796), 'numpy.argsort', 'np.argsort', (['temp_t2'], {'kind': '"""stable"""'}), "(temp_t2, kind='stable')\n", (48772, 48796), True, 'import numpy as np\n'), ((49083, 49107), 'numpy.sum', 'np.sum', (['(~cumu_sel_idx_t1)'], {}), '(~cumu_sel_idx_t1)\n', (49089, 49107), True, 'import numpy as np\n'), ((49116, 49140), 'numpy.sum', 'np.sum', (['(~cumu_sel_idx_t2)'], {}), '(~cumu_sel_idx_t2)\n', (49122, 49140), True, 'import numpy as np\n'), ((64496, 64541), 'numpy.argsort', 'np.argsort', (['clone_size_t2_temp'], {'kind': '"""stable"""'}), "(clone_size_t2_temp, kind='stable')\n", (64506, 64541), True, 'import numpy as np\n'), ((65107, 65131), 'numpy.cumsum', 'np.cumsum', (['clone_size_t2'], {}), '(clone_size_t2)\n', (65116, 65131), True, 'import numpy as np\n'), ((66332, 66399), 'numpy.argsort', 'np.argsort', (['initial_prob_matrix[remaining_ids_t1, j]'], {'kind': '"""stable"""'}), "(initial_prob_matrix[remaining_ids_t1, j], kind='stable')\n", (66342, 66399), True, 'import numpy as np\n'), ((66679, 66705), 'numpy.array', 'np.array', (['remaining_ids_t1'], {}), '(remaining_ids_t1)\n', (66687, 66705), True, 'import numpy as np\n'), ((91764, 91775), 'time.time', 'time.time', ([], {}), '()\n', (91773, 91775), False, 'import time\n'), ((4413, 4424), 'time.time', 'time.time', ([], {}), '()\n', (4422, 4424), False, 'import time\n'), ((5908, 5967), 'scipy.sparse.save_npz', 'ssp.save_npz', (["(file_name + f'_SM{SM}.npz')", 'similarity_matrix'], {}), "(file_name + f'_SM{SM}.npz', similarity_matrix)\n", (5920, 5967), True, 'import scipy.sparse as ssp\n'), ((10015, 10039), 'numpy.nonzero', 'np.nonzero', (['time_index_t'], {}), '(time_index_t)\n', (10025, 10039), True, 'import numpy as np\n'), ((10266, 10286), 'numpy.sum', 'np.sum', (['time_index_t'], {}), '(time_index_t)\n', (10272, 10286), True, 'import numpy as np\n'), ((10528, 10552), 'numpy.nonzero', 'np.nonzero', (['time_index_t'], {}), '(time_index_t)\n', (10538, 10552), True, 'import numpy as np\n'), ((10780, 10800), 'numpy.sum', 'np.sum', (['time_index_t'], {}), '(time_index_t)\n', (10786, 10800), True, 'import numpy as np\n'), ((30804, 30815), 'time.time', 'time.time', ([], {}), '()\n', (30813, 30815), False, 'import time\n'), ((66557, 66583), 'numpy.array', 'np.array', (['remaining_ids_t1'], {}), '(remaining_ids_t1)\n', (66565, 66583), True, 'import numpy as np\n'), ((78071, 78082), 'time.time', 'time.time', ([], {}), '()\n', (78080, 78082), False, 'import time\n'), ((86047, 86058), 'time.time', 'time.time', ([], {}), '()\n', (86056, 86058), False, 'import time\n'), ((89737, 89803), 'os.path.exists', 'os.path.exists', (["(similarity_file_name + f'_SM{round_of_smooth}.npz')"], {}), "(similarity_file_name + f'_SM{round_of_smooth}.npz')\n", (89751, 89803), False, 'import os\n'), ((5326, 5337), 'time.time', 'time.time', ([], {}), '()\n', (5335, 5337), False, 'import time\n'), ((18721, 18737), 'numpy.nonzero', 'np.nonzero', (['idx1'], {}), '(idx1)\n', (18731, 18737), True, 'import numpy as np\n'), ((18772, 18788), 'numpy.nonzero', 'np.nonzero', (['idx2'], {}), '(idx2)\n', (18782, 18788), True, 'import numpy as np\n'), ((19220, 19243), 'numpy.mean', 'np.mean', (['idx1[idx1 > 0]'], {}), '(idx1[idx1 > 0])\n', (19227, 19243), True, 'import numpy as np\n'), ((19242, 19265), 'numpy.mean', 'np.mean', (['idx2[idx2 > 0]'], {}), '(idx2[idx2 > 0])\n', (19249, 19265), True, 'import numpy as np\n'), ((23574, 23590), 'numpy.nonzero', 'np.nonzero', (['idx1'], {}), '(idx1)\n', (23584, 23590), True, 'import numpy as np\n'), ((23625, 23641), 'numpy.nonzero', 'np.nonzero', (['idx2'], {}), '(idx2)\n', (23635, 23641), True, 'import numpy as np\n'), ((24077, 24100), 'numpy.mean', 'np.mean', (['idx1[idx1 > 0]'], {}), '(idx1[idx1 > 0])\n', (24084, 24100), True, 'import numpy as np\n'), ((24099, 24122), 'numpy.mean', 'np.mean', (['idx2[idx2 > 0]'], {}), '(idx2[idx2 > 0])\n', (24106, 24122), True, 'import numpy as np\n'), ((54516, 54527), 'time.time', 'time.time', ([], {}), '()\n', (54525, 54527), False, 'import time\n'), ((56713, 56724), 'time.time', 'time.time', ([], {}), '()\n', (56722, 56724), False, 'import time\n'), ((94057, 94092), 'numpy.nonzero', 'np.nonzero', (['(fate_entropy_clone == 1)'], {}), '(fate_entropy_clone == 1)\n', (94067, 94092), True, 'import numpy as np\n'), ((11924, 11960), 'numpy.nonzero', 'np.nonzero', (['(time_info_orig == t_temp)'], {}), '(time_info_orig == t_temp)\n', (11934, 11960), True, 'import numpy as np\n'), ((12185, 12221), 'numpy.nonzero', 'np.nonzero', (['(time_info_orig == t_temp)'], {}), '(time_info_orig == t_temp)\n', (12195, 12221), True, 'import numpy as np\n'), ((19112, 19124), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (19118, 19124), True, 'import numpy as np\n'), ((23968, 23980), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (23974, 23980), True, 'import numpy as np\n'), ((54245, 54256), 'time.time', 'time.time', ([], {}), '()\n', (54254, 54256), False, 'import time\n'), ((91358, 91369), 'time.time', 'time.time', ([], {}), '()\n', (91367, 91369), False, 'import time\n'), ((19009, 19024), 'numpy.sum', 'np.sum', (['prob', '(1)'], {}), '(prob, 1)\n', (19015, 19024), True, 'import numpy as np\n'), ((23865, 23880), 'numpy.sum', 'np.sum', (['prob', '(1)'], {}), '(prob, 1)\n', (23871, 23880), True, 'import numpy as np\n'), ((92348, 92359), 'time.time', 'time.time', ([], {}), '()\n', (92357, 92359), False, 'import time\n')] |
import pygame
import numpy as np
class Obstacle():
def __init__(self, origin, w,y):
self.origin = origin
self.x = w
self.y = y
def render(self,screen, ppm = 1):
py_rect = pygame.Rect(ppm*self.origin[0], ppm*self.origin[1],ppm*self.x,ppm*self.y)
pygame.draw.rect(screen, (50,0,0,1),py_rect,0)
def in_collision(self,pt):
if (pt > self.origin).all() and (pt < self.origin + np.array((self.x, self.y))).all():
return True
return False
class Quicksand():
def __init__(self, origin, r):
self.origin = origin
self.r = r
def render(self,screen, ppm = 1):
pygame.draw.circle(screen, (242,229,194,1),(int(ppm*self.origin[0]), int(ppm*self.origin[1])), int(ppm*self.r),0)
def in_collision(self,pt):
if np.linalg.norm(np.subtract(pt,self.origin)) < self.r :
print("In quicksand")
return True
return False
def two_openings_obstacles():
ppm = 100.
gap = 35/ppm
length = 80/ppm
dist_down=150/ppm
print("dist_down", dist_down)
thickness = 20/ppm
obstacles = [Obstacle(np.array((.001,dist_down)),length,thickness),
Obstacle(np.array((length+gap,dist_down)),length,thickness),
Obstacle(np.array((2*length+2*gap,dist_down)),length,thickness)]
return obstacles
"""
line centered a few cm below the goal in the y direction, centered in the x direction
"""
def line_world_obstacles(goal):
length = 0.15
thickness = 0.03
offset = 0.02
x = goal[0]-length/2.
y = goal[1] + offset
return [Obstacle(np.array((x,y)), length, thickness)]
| [
"pygame.draw.rect",
"pygame.Rect",
"numpy.array",
"numpy.subtract"
] | [((211, 298), 'pygame.Rect', 'pygame.Rect', (['(ppm * self.origin[0])', '(ppm * self.origin[1])', '(ppm * self.x)', '(ppm * self.y)'], {}), '(ppm * self.origin[0], ppm * self.origin[1], ppm * self.x, ppm *\n self.y)\n', (222, 298), False, 'import pygame\n'), ((294, 345), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', '(50, 0, 0, 1)', 'py_rect', '(0)'], {}), '(screen, (50, 0, 0, 1), py_rect, 0)\n', (310, 345), False, 'import pygame\n'), ((1143, 1171), 'numpy.array', 'np.array', (['(0.001, dist_down)'], {}), '((0.001, dist_down))\n', (1151, 1171), True, 'import numpy as np\n'), ((1221, 1256), 'numpy.array', 'np.array', (['(length + gap, dist_down)'], {}), '((length + gap, dist_down))\n', (1229, 1256), True, 'import numpy as np\n'), ((1305, 1348), 'numpy.array', 'np.array', (['(2 * length + 2 * gap, dist_down)'], {}), '((2 * length + 2 * gap, dist_down))\n', (1313, 1348), True, 'import numpy as np\n'), ((1637, 1653), 'numpy.array', 'np.array', (['(x, y)'], {}), '((x, y))\n', (1645, 1653), True, 'import numpy as np\n'), ((836, 864), 'numpy.subtract', 'np.subtract', (['pt', 'self.origin'], {}), '(pt, self.origin)\n', (847, 864), True, 'import numpy as np\n'), ((433, 459), 'numpy.array', 'np.array', (['(self.x, self.y)'], {}), '((self.x, self.y))\n', (441, 459), True, 'import numpy as np\n')] |
#!/usr/bin/env/ python3
"""
Convert NetCDF4 files to ASCII (plain text)
"""
import sys
import argparse
import numpy as np
import netCDF4
args = []
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='convert file from netCDF to ASCII format',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-i', '--input', dest='infile', type=str,
default='../data/example_dataHIT.nc',
help='input NetCDF file', metavar='FILE')
parser.add_argument('-o', '--output', dest='outfile', type=str,
default='../data/test_dataHIT_ascii.dat',
help='output ASCII file', metavar='FILE')
args = parser.parse_args()
# Try to read the file
try:
datafile_read = netCDF4.Dataset(args.infile, 'r')
except IOError:
print('There was an error opening the file!')
sys.exit()
u = np.array(datafile_read.variables['velocity_x'][:, :, :])
v = np.array(datafile_read.variables['velocity_y'][:, :, :])
w = np.array(datafile_read.variables['velocity_z'][:, :, :])
print("Converting {:s} file to {:s} file".format(args.infile, args.outfile))
# Try to read the file
try:
outfile = open(args.outfile, 'w')
except IOError:
print('There was an error writing the file!')
sys.exit()
for k in range(len(u)):
outfile.write('x y u v \n')
for i in range(u[0, 0].size):
for j in range(v[0, 0].size):
outfile.write(str(i) + ' ' + str(j) + ' ' + str(u[k, j, i]) + ' ' + str(v[k, j, i]) + '\n')
outfile.close()
# this routine reads the ascii file in top of the netCDF file
# used to see if the exported ascii is equal to the original file
# put this in vortexfitting.py after "a = VelocityField(args.infilename, args.timestep)"
# a.uu = []
# a.vv = []
# infile = open('ascii/DNS_zPlane0.dat', 'r')
# lines = infile.readlines()[1:]
# for x in lines:
# a.uu.append(float(x.split(' ')[2]))
# a.vv.append(float(x.split(' ')[3]))
# a.uu = np.array(a.uu)
# a.vv = np.array(a.vv)
# a.uu = a.uu.reshape(a.u[:, 0].size, a.u[0, :].size)
# a.vv = a.vv.reshape(a.v[:, 0].size, a.v[0, :].size)
# a.u = a.uu
# a.v = a.vv
| [
"netCDF4.Dataset",
"numpy.array",
"argparse.ArgumentParser",
"sys.exit"
] | [((951, 1007), 'numpy.array', 'np.array', (["datafile_read.variables['velocity_x'][:, :, :]"], {}), "(datafile_read.variables['velocity_x'][:, :, :])\n", (959, 1007), True, 'import numpy as np\n'), ((1012, 1068), 'numpy.array', 'np.array', (["datafile_read.variables['velocity_y'][:, :, :]"], {}), "(datafile_read.variables['velocity_y'][:, :, :])\n", (1020, 1068), True, 'import numpy as np\n'), ((1073, 1129), 'numpy.array', 'np.array', (["datafile_read.variables['velocity_z'][:, :, :]"], {}), "(datafile_read.variables['velocity_z'][:, :, :])\n", (1081, 1129), True, 'import numpy as np\n'), ((190, 326), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""convert file from netCDF to ASCII format"""', 'formatter_class': 'argparse.RawTextHelpFormatter'}), "(description=\n 'convert file from netCDF to ASCII format', formatter_class=argparse.\n RawTextHelpFormatter)\n", (213, 326), False, 'import argparse\n'), ((831, 864), 'netCDF4.Dataset', 'netCDF4.Dataset', (['args.infile', '"""r"""'], {}), "(args.infile, 'r')\n", (846, 864), False, 'import netCDF4\n'), ((935, 945), 'sys.exit', 'sys.exit', ([], {}), '()\n', (943, 945), False, 'import sys\n'), ((1345, 1355), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1353, 1355), False, 'import sys\n')] |
import torch
from .disc_loss import DiscriminativeLoss
from .unet import UNet
from pytorch_lightning.core.lightning import LightningModule
from defects_dlmbl.segment_affinities import mutex_watershed
import numpy as np
from skimage.io import imsave
import io
import PIL
from sklearn.decomposition import PCA
from inferno.extensions.criteria import set_similarity_measures as sim
from cremi_tools.metrics import cremi_metrics
import matplotlib.pyplot as plt
import torch.nn.functional as F
class UNetModule(LightningModule):
def __init__(self, num_fmaps=18, inc_factors=3, depth = 4, offsets=[[-1,0],[0,-1], [-9, 0], [0, -9]], separating_channel=2, image_dir="images"):
super().__init__()
self.num_fmaps=num_fmaps
self.offsets = offsets
self.separating_channel=separating_channel
self.unet = UNet(in_channels=1,
num_fmaps=num_fmaps,
fmap_inc_factors=inc_factors,
downsample_factors=[[2,2] for _ in range(depth-1)],
padding='valid')
self.final_conv=torch.nn.Conv2d(num_fmaps,len(self.offsets), 1)
if not offsets:
self.offsets = [[-1,0],[0,-1]]
else:
self.offsets = offsets
self.separating_channel=separating_channel
self.DiceLoss = sim.SorensenDiceLoss()
self.image_dir = image_dir
def forward(self,x):
x= self.unet(x)
x=self.final_conv(x)
return x
def training_step(self,batch,batch_idx):
x,y,gt_seg=batch
logits=self(x)
crop_val = (y.shape[-1]-logits.shape[-1])/2
assert crop_val == int(crop_val), "Can't crop by an odd total pixel count"
crop_val = int(crop_val)
y = y[:,:,crop_val:-crop_val,crop_val:-crop_val]
gt_seg = gt_seg[:,:,crop_val:-crop_val,crop_val:-crop_val]
x = x[:,:,crop_val:-crop_val,crop_val:-crop_val]
y = y.float()
logits *= (y!=-1).float() # ignore label -1
# SDL input shape expects [b, c, ...]
py = torch.sigmoid(logits)
loss = self.DiceLoss(py, y)
loss = loss+len(self.offsets)
#loss=F.binary_cross_entropy_with_logits(logits,y)
logger = self.logger.experiment
self.log('train_loss',loss)
if self.global_step % 100 == 0:
logger.add_image('image', x[0], self.global_step)
affinity_image = torch.sigmoid(logits)
logger.add_image('affinity', affinity_image[0], self.global_step)
affinity_image = affinity_image.cpu().detach().numpy()
segmentation = mutex_watershed(affinity_image,self.offsets,self.separating_channel,strides=None)
logger.add_image('segmentation', segmentation[0], self.global_step, dataformats='CHW')
logger.add_image('GT',y[0], self.global_step)
if self.global_step % 1000 == 0:
imsave(f'{self.image_dir}/{self.global_step}_segmentation.tif', segmentation.astype(np.uint16))
imsave(f'{self.image_dir}/{self.global_step}_affinity.tif', affinity_image)
imsave(f'{self.image_dir}/{self.global_step}_gt.tif', gt_seg[0].cpu().detach().numpy())
imsave(f'{self.image_dir}/{self.global_step}_image.tif', x[0].cpu().detach().numpy())
scores = cremi_metrics.cremi_scores(segmentation, gt_seg.cpu().detach().numpy())
self.log("performance",scores)
return loss
def validation_step(self,batch,batch_idx):
x,y,gt_seg=batch
logits=self(x)
crop_val = (y.shape[-1]-logits.shape[-1])/2
assert crop_val == int(crop_val), "Can't crop by an odd total pixel count"
crop_val = int(crop_val)
y = y[:,:,crop_val:-crop_val,crop_val:-crop_val]
gt_seg = gt_seg[:,:,crop_val:-crop_val,crop_val:-crop_val]
y = y.float()
logits *= (y!=-1).float() # ignore label -1
# SDL input shape expects [b, c, ...]
py = torch.sigmoid(logits)
val_loss = self.DiceLoss(py,y)
val_loss = val_loss+len(self.offsets)
affinity_image = torch.sigmoid(logits).cpu().detach().numpy()
segmentation = mutex_watershed(affinity_image,self.offsets,self.separating_channel,strides=None)
val_scores = cremi_metrics.cremi_scores(segmentation, gt_seg.cpu().numpy())
self.log("val_loss", val_loss, prog_bar=True, on_epoch=True)
self.log("val_performance", val_scores)
return val_loss
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(),lr=1e-4)
class UNetModuleWithMetricAuxiliary(UNetModule):
def __init__(self,metric_dimensions=16,loss_alpha=0.5,**kwargs):
super().__init__(**kwargs)
self.metric_dimensions = metric_dimensions
self.loss_alpha = loss_alpha
self.output_dims = len(self.offsets)+self.metric_dimensions
self.final_conv=torch.nn.Conv2d(self.num_fmaps,self.output_dims, 1)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.DiscriminativeLoss = DiscriminativeLoss(device)
self.pca = PCA()
def calculate_loss(self,logits_aff,logits_metric,gt_aff,gt_seg):
# affinity loss
py = torch.sigmoid(logits_aff)
loss_aff = self.DiceLoss(py, gt_aff)
loss_aff += len(self.offsets)
loss_aff /= len(self.offsets)
# metric loss
loss_metric = self.DiscriminativeLoss(logits_metric,gt_seg)
return (self.loss_alpha)*loss_aff + (1-self.loss_alpha)*loss_metric
def scatter_metric_pca(self,logits_metric,sample=3):
# making pca scatter if not doing pixel-wise pca vizualization
sampled = logits_metric[...,::sample,::sample].reshape(self.metric_dimensions,-1)
sampled_pca = self.pca.fit_transform(sampled.T)
fig,ax = plt.subplots(1,1,figsize=(10,10))
ax.scatter(*sampled_pca.T[:2])
buf = io.BytesIO()
plt.savefig(buf, format='png')
plt.close(fig)
p = PIL.Image.open(buf)
return torch.tensor(np.array(p))
def scatter_metric_pca_from_image(self,image_metric_pca,sample=3):
# sample from pixel-wize pca image and display first two dimensions as scatter plot
sampled_pca = image_metric_pca[:2,::sample,::sample]
fig,ax = plt.subplots(1,1,figsize=(10,10))
ax.scatter(*sampled_pca[:2])
buf = io.BytesIO()
plt.savefig(buf, format='png')
plt.close(fig)
p = PIL.Image.open(buf)
return torch.tensor(np.array(p))
def image_metric_pca(self,logits_metric,return_dimensions=3):
full_pca = self.pca.fit_transform(logits_metric.reshape(self.metric_dimensions,-1).T)
full_pca = full_pca.T.reshape(logits_metric.shape)[:return_dimensions]
full_pca -= full_pca.min(axis=(-2,-1),keepdims=True)
full_pca /= full_pca.max(axis=(-2,-1),keepdims=True)
return full_pca
def training_step(self,batch,batch_idx):
x,gt_aff,gt_seg=batch
logits=self(x)
crop_val = (gt_aff.shape[-1]-logits.shape[-1])/2
assert crop_val == int(crop_val), "Can't crop by an odd total pixel count"
crop_val = int(crop_val)
gt_aff = gt_aff[:,:,crop_val:-crop_val,crop_val:-crop_val]
gt_seg = gt_seg[:,:,crop_val:-crop_val,crop_val:-crop_val]
x = x[:,:,crop_val:-crop_val,crop_val:-crop_val]
gt_aff = gt_aff.float()
logits_aff = logits[:,:len(self.offsets)]
logits_metric = logits[:,len(self.offsets):]
assert self.metric_dimensions == logits_metric.shape[1], "logits_metric channels do not match metric_dimensions"
logits_aff *= (gt_aff!=-1).float() # ignore label -1
loss = self.calculate_loss(logits_aff,logits_metric,gt_aff,gt_seg)
logger = self.logger.experiment
self.log('train_loss',loss)
if self.global_step % 1000 == 0:
logger.add_image('image', x[0], self.global_step)
affinity_image = torch.sigmoid(logits_aff)
logger.add_image('affinity', affinity_image[0], self.global_step)
affinity_image = affinity_image.cpu().detach().numpy()
segmentation = mutex_watershed(affinity_image,self.offsets,self.separating_channel,strides=None)
logger.add_image('segmentation', segmentation[0], self.global_step, dataformats='CHW')
logits_metric_viz = logits_metric[0].detach().cpu().numpy()
logits_metric_viz -= logits_metric_viz.min(axis=(-2,-1),keepdims=True)
logits_metric_viz /= logits_metric_viz.max(axis=(-2,-1),keepdims=True)
image_metric_pca = self.image_metric_pca(logits_metric_viz)
logger.add_image('metric_scatter',self.scatter_metric_pca_from_image(image_metric_pca), self.global_step,dataformats='HWC')
logger.add_image('metric_image',image_metric_pca,self.global_step)
logger.add_image('GT',gt_aff[0], self.global_step)
if self.global_step % 100 == 0:
imsave(f'{self.image_dir}/{self.global_step}_segmentation.tif', segmentation.astype(np.uint16))
imsave(f'{self.image_dir}/{self.global_step}_affinity.tif', affinity_image)
imsave(f'{self.image_dir}/{self.global_step}_gt.tif', gt_seg[0].cpu().detach().numpy())
imsave(f'{self.image_dir}/{self.global_step}_image.tif', x[0].cpu().detach().numpy())
scores = cremi_metrics.cremi_scores(segmentation, gt_seg.cpu().detach().numpy())
self.log("performance",scores)
return loss
def validation_step(self,batch,batch_idx):
x,gt_aff,gt_seg=batch
logits=self(x)
crop_val = (gt_aff.shape[-1]-logits.shape[-1])/2
assert crop_val == int(crop_val), "Can't crop by an odd total pixel count"
crop_val = int(crop_val)
gt_aff = gt_aff[:,:,crop_val:-crop_val,crop_val:-crop_val]
gt_seg = gt_seg[:,:,crop_val:-crop_val,crop_val:-crop_val]
gt_aff = gt_aff.float()
logits_aff = logits[:,:len(self.offsets)]
logits_metric = logits[:,len(self.offsets):]
logits_aff *= (gt_aff!=-1).float() # ignore label -1
val_loss = self.calculate_loss(logits_aff,logits_metric,gt_aff,gt_seg)
affinity_image = torch.sigmoid(logits_aff).cpu().detach().numpy()
segmentation = mutex_watershed(affinity_image,self.offsets,self.separating_channel,strides=None)
val_scores = cremi_metrics.cremi_scores(segmentation, gt_seg.cpu().numpy())
self.log("val_loss", val_loss, prog_bar=True, on_epoch=True)
self.log("val_performance", val_scores)
return val_loss
class UNetModuleSemanticWithDistance(UNetModule):
"""Same UNet, but predict 2 layers instead of a million"""
def __init__(self, num_fmaps=32, inc_factors=2, depth = 4, image_dir="images"):
super().__init__()
self.num_fmaps=num_fmaps
self.unet = UNet(in_channels=1,
num_fmaps=num_fmaps,
fmap_inc_factors=inc_factors,
downsample_factors=[[2,2] for _ in range(depth-1)],
padding='valid')
self.final_conv=torch.nn.Conv2d(num_fmaps, 2, 1)
self.L1loss = torch.nn.L1Loss()
self.DiceLoss = sim.SorensenDiceLoss()
self.loss_alpha = 0.5
self.tanh = torch.nn.Tanh()
self.image_dir = image_dir
def calculate_loss(self,seg,dist,gt_seg,gt_dist):
# affinity loss
loss_seg = self.DiceLoss(seg, gt_seg)
# metric loss
loss_dist = self.L1loss(dist,gt_dist)
return (self.loss_alpha)*loss_seg + (1-self.loss_alpha)*loss_dist
def training_step(self,batch,batch_idx):
x, y = batch # batch size > 1
logits = self(x)
crop_val = (y.shape[-1]-logits.shape[-1])/2 # only works if square
assert crop_val == int(crop_val), "Can't crop by an odd total pixel count"
crop_val = int(crop_val)
y = y[:,:,crop_val:-crop_val,crop_val:-crop_val]
x = x[:,:,crop_val:-crop_val,crop_val:-crop_val]
logits_seg = logits[:,:1]
seg = F.sigmoid(logits_seg)
logits_dist = logits[:,1:]
# dist = self.tanh(logits_dist)
dist = torch.clamp(logits_dist, -30, 30)
dist_seg = dist < 0
gt_seg = y[:,:1]
gt_dist = y[:,1:]
loss = self.calculate_loss(logits_seg,logits_dist,gt_seg,gt_dist)
if self.global_step % 100 == 0:
logger = self.logger.experiment
logger.add_image('image', x[0], self.global_step)
logger.add_image('segmentation', seg[0], self.global_step)
logger.add_image('Distance based segmentation', dist_seg[0], self.global_step)
logger.add_image('GT seg',gt_seg[0], self.global_step)
logger.add_image('distance',dist[0], self.global_step)
logger.add_image('GT dist',gt_dist[0], self.global_step)
imsave(f'{self.image_dir}/{self.global_step}_learnedsegmentation.tif', seg.cpu().detach().numpy())
imsave(f'{self.image_dir}/{self.global_step}_distance.tif', dist.cpu().detach().numpy())
imsave(f'{self.image_dir}/{self.global_step}_distance_seg.tif', dist_seg.cpu().detach().numpy())
imsave(f'{self.image_dir}/{self.global_step}_gt.tif', gt_seg.cpu().detach().numpy())
imsave(f'{self.image_dir}/{self.global_step}_image.tif', x.cpu().detach().numpy())
self.log('train_loss',loss)
try:
scores = cremi_metrics.cremi_scores(dist_seg.cpu().detach().numpy(), gt_seg.cpu().detach().numpy())
self.log("performance",scores)
except:
pass
return loss
def validation_step(self,batch,batch_idx):
x, y = batch
logits = self(x)
crop_val = (y.shape[-1]-logits.shape[-1])/2 # only works if square
assert crop_val == int(crop_val), "Can't crop by an odd total pixel count"
crop_val = int(crop_val)
y = y[:,:,crop_val:-crop_val,crop_val:-crop_val]
x = x[:,:,crop_val:-crop_val,crop_val:-crop_val]
logits_seg = logits[:,:1]
seg = F.sigmoid(logits_seg)
logits_dist = logits[:,1:]
dist =torch.clamp(logits_dist, -30, 30)
# dist = self.tanh(logits_dist)
gt_seg = y[:,:1]
gt_dist = y[:,1:]
val_loss = self.calculate_loss(seg,dist,gt_seg,gt_dist)
try:
val_scores = cremi_metrics.cremi_scores(segmentation, gt_seg.cpu().numpy())
self.log("val_performance", val_scores)
except:
pass
self.log("val_loss", val_loss, prog_bar=True, on_epoch=True)
return val_loss
| [
"io.BytesIO",
"skimage.io.imsave",
"torch.nn.L1Loss",
"torch.nn.Tanh",
"matplotlib.pyplot.close",
"torch.nn.Conv2d",
"inferno.extensions.criteria.set_similarity_measures.SorensenDiceLoss",
"PIL.Image.open",
"defects_dlmbl.segment_affinities.mutex_watershed",
"torch.sigmoid",
"sklearn.decompositi... | [((1203, 1225), 'inferno.extensions.criteria.set_similarity_measures.SorensenDiceLoss', 'sim.SorensenDiceLoss', ([], {}), '()\n', (1223, 1225), True, 'from inferno.extensions.criteria import set_similarity_measures as sim\n'), ((1834, 1855), 'torch.sigmoid', 'torch.sigmoid', (['logits'], {}), '(logits)\n', (1847, 1855), False, 'import torch\n'), ((3537, 3558), 'torch.sigmoid', 'torch.sigmoid', (['logits'], {}), '(logits)\n', (3550, 3558), False, 'import torch\n'), ((3715, 3803), 'defects_dlmbl.segment_affinities.mutex_watershed', 'mutex_watershed', (['affinity_image', 'self.offsets', 'self.separating_channel'], {'strides': 'None'}), '(affinity_image, self.offsets, self.separating_channel,\n strides=None)\n', (3730, 3803), False, 'from defects_dlmbl.segment_affinities import mutex_watershed\n'), ((4386, 4438), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['self.num_fmaps', 'self.output_dims', '(1)'], {}), '(self.num_fmaps, self.output_dims, 1)\n', (4401, 4438), False, 'import torch\n'), ((4580, 4585), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (4583, 4585), False, 'from sklearn.decomposition import PCA\n'), ((4678, 4703), 'torch.sigmoid', 'torch.sigmoid', (['logits_aff'], {}), '(logits_aff)\n', (4691, 4703), False, 'import torch\n'), ((5221, 5257), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 10)'}), '(1, 1, figsize=(10, 10))\n', (5233, 5257), True, 'import matplotlib.pyplot as plt\n'), ((5296, 5308), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5306, 5308), False, 'import io\n'), ((5311, 5341), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buf'], {'format': '"""png"""'}), "(buf, format='png')\n", (5322, 5341), True, 'import matplotlib.pyplot as plt\n'), ((5344, 5358), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (5353, 5358), True, 'import matplotlib.pyplot as plt\n'), ((5365, 5384), 'PIL.Image.open', 'PIL.Image.open', (['buf'], {}), '(buf)\n', (5379, 5384), False, 'import PIL\n'), ((5641, 5677), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 10)'}), '(1, 1, figsize=(10, 10))\n', (5653, 5677), True, 'import matplotlib.pyplot as plt\n'), ((5714, 5726), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5724, 5726), False, 'import io\n'), ((5729, 5759), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buf'], {'format': '"""png"""'}), "(buf, format='png')\n", (5740, 5759), True, 'import matplotlib.pyplot as plt\n'), ((5762, 5776), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (5771, 5776), True, 'import matplotlib.pyplot as plt\n'), ((5783, 5802), 'PIL.Image.open', 'PIL.Image.open', (['buf'], {}), '(buf)\n', (5797, 5802), False, 'import PIL\n'), ((9242, 9330), 'defects_dlmbl.segment_affinities.mutex_watershed', 'mutex_watershed', (['affinity_image', 'self.offsets', 'self.separating_channel'], {'strides': 'None'}), '(affinity_image, self.offsets, self.separating_channel,\n strides=None)\n', (9257, 9330), False, 'from defects_dlmbl.segment_affinities import mutex_watershed\n'), ((9981, 10013), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['num_fmaps', '(2)', '(1)'], {}), '(num_fmaps, 2, 1)\n', (9996, 10013), False, 'import torch\n'), ((10030, 10047), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (10045, 10047), False, 'import torch\n'), ((10066, 10088), 'inferno.extensions.criteria.set_similarity_measures.SorensenDiceLoss', 'sim.SorensenDiceLoss', ([], {}), '()\n', (10086, 10088), True, 'from inferno.extensions.criteria import set_similarity_measures as sim\n'), ((10127, 10142), 'torch.nn.Tanh', 'torch.nn.Tanh', ([], {}), '()\n', (10140, 10142), False, 'import torch\n'), ((10820, 10841), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['logits_seg'], {}), '(logits_seg)\n', (10829, 10841), True, 'import torch.nn.functional as F\n'), ((10917, 10950), 'torch.clamp', 'torch.clamp', (['logits_dist', '(-30)', '(30)'], {}), '(logits_dist, -30, 30)\n', (10928, 10950), False, 'import torch\n'), ((12589, 12610), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['logits_seg'], {}), '(logits_seg)\n', (12598, 12610), True, 'import torch.nn.functional as F\n'), ((12651, 12684), 'torch.clamp', 'torch.clamp', (['logits_dist', '(-30)', '(30)'], {}), '(logits_dist, -30, 30)\n', (12662, 12684), False, 'import torch\n'), ((2157, 2178), 'torch.sigmoid', 'torch.sigmoid', (['logits'], {}), '(logits)\n', (2170, 2178), False, 'import torch\n'), ((2324, 2412), 'defects_dlmbl.segment_affinities.mutex_watershed', 'mutex_watershed', (['affinity_image', 'self.offsets', 'self.separating_channel'], {'strides': 'None'}), '(affinity_image, self.offsets, self.separating_channel,\n strides=None)\n', (2339, 2412), False, 'from defects_dlmbl.segment_affinities import mutex_watershed\n'), ((5407, 5418), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (5415, 5418), True, 'import numpy as np\n'), ((5825, 5836), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (5833, 5836), True, 'import numpy as np\n'), ((7143, 7168), 'torch.sigmoid', 'torch.sigmoid', (['logits_aff'], {}), '(logits_aff)\n', (7156, 7168), False, 'import torch\n'), ((7314, 7402), 'defects_dlmbl.segment_affinities.mutex_watershed', 'mutex_watershed', (['affinity_image', 'self.offsets', 'self.separating_channel'], {'strides': 'None'}), '(affinity_image, self.offsets, self.separating_channel,\n strides=None)\n', (7329, 7402), False, 'from defects_dlmbl.segment_affinities import mutex_watershed\n'), ((2690, 2765), 'skimage.io.imsave', 'imsave', (['f"""{self.image_dir}/{self.global_step}_affinity.tif"""', 'affinity_image'], {}), "(f'{self.image_dir}/{self.global_step}_affinity.tif', affinity_image)\n", (2696, 2765), False, 'from skimage.io import imsave\n'), ((4474, 4499), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4497, 4499), False, 'import torch\n'), ((8151, 8226), 'skimage.io.imsave', 'imsave', (['f"""{self.image_dir}/{self.global_step}_affinity.tif"""', 'affinity_image'], {}), "(f'{self.image_dir}/{self.global_step}_affinity.tif', affinity_image)\n", (8157, 8226), False, 'from skimage.io import imsave\n'), ((3652, 3673), 'torch.sigmoid', 'torch.sigmoid', (['logits'], {}), '(logits)\n', (3665, 3673), False, 'import torch\n'), ((9176, 9201), 'torch.sigmoid', 'torch.sigmoid', (['logits_aff'], {}), '(logits_aff)\n', (9189, 9201), False, 'import torch\n')] |
#!python3
#-*- coding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../")
import random
import numpy as np
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torch.autograd import Variable
#import torchvision.models as models
from utils.state import State
from utils.transition import Transition
from utils.random_replaymemory import RandomReplayMemory
from utils.permemory import PERMemory
from networks.maskNet import MaskNet
import pickle
#------------------------------------------------
class Brain:
TARGET_UPDATE = 10
def __init__(self, num_actions, batch_size=32, capacity=10000, gamma=0.99, prioritized=True, lr=0.0005):
self.batch_size = batch_size
self.gamma = gamma
self.num_actions = num_actions
self.prioritized = prioritized
# Instantiate memory object
if self.prioritized:
print('* Prioritized Experience Replay Mode')
self.memory = PERMemory(capacity)
else:
print('* Random Experience Replay Mode')
self.memory = RandomReplayMemory(capacity)
# Build network
self.policy_net = MaskNet(self.num_actions, duel=False)
self.target_net = MaskNet(self.num_actions, duel=False)
self.target_net.eval()
# Set device type; GPU or CPU (Use GPU if available)
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
#self.device = torch.device('cpu')
self.policy_net = self.policy_net.to(self.device)
self.target_net = self.target_net.to(self.device)
print('using device:', self.device)
#print(self.policy_net) # Print network
# Configure optimizer
self.optimizer = optim.Adam(self.policy_net.parameters(), lr=lr)
def replay(self):
"""Experience Replayでネットワークの重みを学習 """
# Do nothing while size of memory is lower than batch size
if len(self.memory) < self.batch_size:
return
# Extract datasets and their corresponding indices from memory
transitions, indexes = self.memory.sample(self.batch_size)
# ミニバッチの作成-----------------
# transitionsは1stepごとの(state, action, next_state, reward)が、self.batch_size分格納されている
# つまり、(state, action, next_state, reward)×self.batch_size
# これをミニバッチにしたい。つまり
# (state×self.batch_size, action×BATCH_SIZE, next_state, reward×BATCH_SIZE)にする
batch = Transition(*zip(*transitions))
batch_state = State(*zip(*batch.state))
batch_next_state = State(*zip(*batch.next_state))
# cartpoleがdoneになっておらず、next_stateがあるかをチェックするマスクを作成
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch.next_state)), dtype=torch.bool).to(self.device)
# バッチから状態、行動、報酬を格納(non_finalはdoneになっていないstate)
# catはConcatenates(結合)のことです。
# 例えばstateの場合、[torch.FloatTensor of size 1x4]がself.batch_size分並んでいるのですが、
# それを size self.batch_sizex4 に変換します
pose_batch = Variable(torch.cat(batch_state.pose)).to(self.device)
lidar_batch = Variable(torch.cat(batch_state.lidar)).to(self.device)
image_batch = Variable(torch.cat(batch_state.image)).to(self.device)
mask_batch = Variable(torch.cat(batch_state.mask)).to(self.device)
action_batch = Variable(torch.cat(batch.action)).to(self.device)
reward_batch = Variable(torch.cat(batch.reward)).to(self.device)
non_final_next_poses = Variable(torch.cat([s for s in batch_next_state.pose if s is not None])).to(self.device)
non_final_next_lidars = Variable(torch.cat([s for s in batch_next_state.lidar if s is not None])).to(self.device)
non_final_next_images = Variable(torch.cat([s for s in batch_next_state.image if s is not None])).to(self.device)
non_final_next_masks = Variable(torch.cat([s for s in batch_next_state.mask if s is not None])).to(self.device)
# ミニバッチの作成終了------------------
# ネットワークを推論モードに切り替える
self.policy_net.eval()
# Q(s_t, a_t)を求める
# self.policy_net(state_batch)は、[torch.FloatTensor of size self.batch_sizex2]になっており、
# 実行したアクションに対応する[torch.FloatTensor of size self.batch_sizex1]にするために
# gatherを使用します。
state_action_values = self.policy_net(pose_batch, lidar_batch, image_batch, mask_batch).gather(1, action_batch)
# max{Q(s_t+1, a)}値を求める。
# 次の状態がない場合は0にしておく
next_state_values = Variable(torch.zeros(self.batch_size).type(torch.FloatTensor)).to(self.device)
# double dqn part
a_m = Variable(torch.zeros(self.batch_size).type(torch.LongTensor)).to(self.device)
a_m[non_final_mask] = self.policy_net(non_final_next_poses,
non_final_next_lidars,
non_final_next_images,
non_final_next_masks).max(1)[1].detach()
a_m_non_final_next_states = a_m[non_final_mask].view(-1, 1)
# 次の状態がある場合の値を求める
# 出力であるdataにアクセスし、max(1)で列方向の最大値の[値、index]を求めます
# そしてその値(index=0)を出力します
next_state_values[non_final_mask] = self.target_net(
non_final_next_poses,
non_final_next_lidars,
non_final_next_images,
non_final_next_masks
).gather(1, a_m_non_final_next_states).detach().squeeze()
# 教師となるQ(s_t, a_t)値を求める
expected_state_action_values = reward_batch + self.gamma * next_state_values
expected_state_action_values = expected_state_action_values.unsqueeze(1)
# ネットワークを訓練モードに切り替える
self.policy_net.train() # TODO: No need?
# 損失関数を計算する。smooth_l1_lossはHuberlossです
loss = F.smooth_l1_loss(state_action_values,
expected_state_action_values)
# ネットワークを更新します
self.optimizer.zero_grad() # 勾配をリセット
loss.backward() # バックプロパゲーションを計算
self.optimizer.step() # 結合パラメータを更新
# Update priority
if self.prioritized and indexes != None:
for i, val in enumerate(state_action_values):
td_err = abs(expected_state_action_values[i].item() - val.item())
self.memory.update(indexes[i], td_err)
def decide_action(self, state, episode, policy_mode="epsilon", debug=True):
"""
policy
Args:
state (State): state object
episode (int): current episode
policy_mode (str): exploration methods
- epsilon: deterministic policy with eps-greedy
- boltzmann: stochastic policy by softmax
debug (bool): whether train or inference
"""
if not debug:
self.policy_net.eval() # ネットワークを推論モードに切り替える
# Set device type; GPU or CPU
input_pose = Variable(state.pose).to(self.device)
input_lidar = Variable(state.lidar).to(self.device)
input_image = Variable(state.image).to(self.device)
input_mask = Variable(state.mask).to(self.device)
# Infer
output = self.policy_net(input_pose, input_lidar, input_image, input_mask)
action = output.data.max(1)[1].view(1, 1)
return action
if policy_mode == "epsilon":
# ε-greedy法で徐々に最適行動のみを採用する
# epsilon = 0.5 * (1 / (episode + 1))
if episode < 50:
epsilon = 0.25
elif episode < 100:
epsilon = 0.15
else:
epsilon = 0.05
if epsilon <= np.random.uniform(0, 1):
self.policy_net.eval() # ネットワークを推論モードに切り替える
# Set device type; GPU or CPU
input_pose = Variable(state.pose).to(self.device)
input_lidar = Variable(state.lidar).to(self.device)
input_image = Variable(state.image).to(self.device)
input_mask = Variable(state.mask).to(self.device)
# Infer
output = self.policy_net(input_pose, input_lidar, input_image, input_mask)
action = output.data.max(1)[1].view(1, 1)
print("Q-values: {}, Action: {}".format(output[0], action.item()))
else:
# Generate random value [0.0, 1.0)
action = torch.LongTensor([[random.randrange(self.num_actions)]])
action = action.to(self.device)
print("Random action: {}".format(action.item()))
elif policy_mode == "boltzmann":
self.policy_net.eval() # ネットワークを推論モードに切り替える
# Set device type; GPU or CPU
input_pose = Variable(state.pose).to(self.device)
input_lidar = Variable(state.lidar).to(self.device)
input_image = Variable(state.image).to(self.device)
input_mask = Variable(state.mask).to(self.device)
# Infer
output = self.policy_net(input_pose, input_lidar, input_image, input_mask)
prob = F.softmax(output, dim=1)
action = torch.multinomial(prob, 1)
print("Prob: {}, Action: {}".format(prob[0], action.item()))
else:
print("Error: policy_mode is 'epsilon' or 'boltzmann'")
action = torch.LongTensor([[random.randrange(self.num_actions)]])
action = action.to(self.device)
return action # FloatTensor size 1x1
def save_model(self, path):
# Save a model checkpoint.
print('Saving model...: {}'.format(path))
torch.save(self.policy_net.state_dict(), path)
def load_model(self, path):
print('Loading model...: {}'.format(path))
model = torch.load(path)
self.policy_net.load_state_dict(model)
self.update_target_network()
def save_memory(self, path):
print('Saving memory (size={})...: {}'.format(len(self.memory) ,path))
with open(path, 'wb') as f:
pickle.dump(self.memory, f)
def load_memory(self, path):
print('Loading memory...: {}'.format(path))
with open(path, 'rb') as f:
self.memory = pickle.load(f)
print('Loaded memory (size={})'.format(len(self.memory)))
def update_target_network(self):
self.target_net.load_state_dict(self.policy_net.state_dict())
| [
"utils.random_replaymemory.RandomReplayMemory",
"os.path.abspath",
"pickle.dump",
"numpy.random.uniform",
"torch.multinomial",
"torch.autograd.Variable",
"torch.load",
"torch.cat",
"torch.nn.functional.softmax",
"utils.permemory.PERMemory",
"pickle.load",
"torch.cuda.is_available",
"networks... | [((1220, 1257), 'networks.maskNet.MaskNet', 'MaskNet', (['self.num_actions'], {'duel': '(False)'}), '(self.num_actions, duel=False)\n', (1227, 1257), False, 'from networks.maskNet import MaskNet\n'), ((1284, 1321), 'networks.maskNet.MaskNet', 'MaskNet', (['self.num_actions'], {'duel': '(False)'}), '(self.num_actions, duel=False)\n', (1291, 1321), False, 'from networks.maskNet import MaskNet\n'), ((5988, 6055), 'torch.nn.functional.smooth_l1_loss', 'F.smooth_l1_loss', (['state_action_values', 'expected_state_action_values'], {}), '(state_action_values, expected_state_action_values)\n', (6004, 6055), True, 'import torch.nn.functional as F\n'), ((9966, 9982), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (9976, 9982), False, 'import torch\n'), ((87, 112), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (102, 112), False, 'import os\n'), ((1027, 1046), 'utils.permemory.PERMemory', 'PERMemory', (['capacity'], {}), '(capacity)\n', (1036, 1046), False, 'from utils.permemory import PERMemory\n'), ((1140, 1168), 'utils.random_replaymemory.RandomReplayMemory', 'RandomReplayMemory', (['capacity'], {}), '(capacity)\n', (1158, 1168), False, 'from utils.random_replaymemory import RandomReplayMemory\n'), ((10228, 10255), 'pickle.dump', 'pickle.dump', (['self.memory', 'f'], {}), '(self.memory, f)\n', (10239, 10255), False, 'import pickle\n'), ((10405, 10419), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10416, 10419), False, 'import pickle\n'), ((1462, 1487), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1485, 1487), False, 'import torch\n'), ((7850, 7873), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (7867, 7873), True, 'import numpy as np\n'), ((9294, 9318), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (9303, 9318), True, 'import torch.nn.functional as F\n'), ((9341, 9367), 'torch.multinomial', 'torch.multinomial', (['prob', '(1)'], {}), '(prob, 1)\n', (9358, 9367), False, 'import torch\n'), ((3096, 3123), 'torch.cat', 'torch.cat', (['batch_state.pose'], {}), '(batch_state.pose)\n', (3105, 3123), False, 'import torch\n'), ((3172, 3200), 'torch.cat', 'torch.cat', (['batch_state.lidar'], {}), '(batch_state.lidar)\n', (3181, 3200), False, 'import torch\n'), ((3249, 3277), 'torch.cat', 'torch.cat', (['batch_state.image'], {}), '(batch_state.image)\n', (3258, 3277), False, 'import torch\n'), ((3325, 3352), 'torch.cat', 'torch.cat', (['batch_state.mask'], {}), '(batch_state.mask)\n', (3334, 3352), False, 'import torch\n'), ((3403, 3426), 'torch.cat', 'torch.cat', (['batch.action'], {}), '(batch.action)\n', (3412, 3426), False, 'import torch\n'), ((3476, 3499), 'torch.cat', 'torch.cat', (['batch.reward'], {}), '(batch.reward)\n', (3485, 3499), False, 'import torch\n'), ((3558, 3620), 'torch.cat', 'torch.cat', (['[s for s in batch_next_state.pose if s is not None]'], {}), '([s for s in batch_next_state.pose if s is not None])\n', (3567, 3620), False, 'import torch\n'), ((3679, 3742), 'torch.cat', 'torch.cat', (['[s for s in batch_next_state.lidar if s is not None]'], {}), '([s for s in batch_next_state.lidar if s is not None])\n', (3688, 3742), False, 'import torch\n'), ((3801, 3864), 'torch.cat', 'torch.cat', (['[s for s in batch_next_state.image if s is not None]'], {}), '([s for s in batch_next_state.image if s is not None])\n', (3810, 3864), False, 'import torch\n'), ((3922, 3984), 'torch.cat', 'torch.cat', (['[s for s in batch_next_state.mask if s is not None]'], {}), '([s for s in batch_next_state.mask if s is not None])\n', (3931, 3984), False, 'import torch\n'), ((7108, 7128), 'torch.autograd.Variable', 'Variable', (['state.pose'], {}), '(state.pose)\n', (7116, 7128), False, 'from torch.autograd import Variable\n'), ((7171, 7192), 'torch.autograd.Variable', 'Variable', (['state.lidar'], {}), '(state.lidar)\n', (7179, 7192), False, 'from torch.autograd import Variable\n'), ((7235, 7256), 'torch.autograd.Variable', 'Variable', (['state.image'], {}), '(state.image)\n', (7243, 7256), False, 'from torch.autograd import Variable\n'), ((7298, 7318), 'torch.autograd.Variable', 'Variable', (['state.mask'], {}), '(state.mask)\n', (7306, 7318), False, 'from torch.autograd import Variable\n'), ((8012, 8032), 'torch.autograd.Variable', 'Variable', (['state.pose'], {}), '(state.pose)\n', (8020, 8032), False, 'from torch.autograd import Variable\n'), ((8079, 8100), 'torch.autograd.Variable', 'Variable', (['state.lidar'], {}), '(state.lidar)\n', (8087, 8100), False, 'from torch.autograd import Variable\n'), ((8147, 8168), 'torch.autograd.Variable', 'Variable', (['state.image'], {}), '(state.image)\n', (8155, 8168), False, 'from torch.autograd import Variable\n'), ((8214, 8234), 'torch.autograd.Variable', 'Variable', (['state.mask'], {}), '(state.mask)\n', (8222, 8234), False, 'from torch.autograd import Variable\n'), ((8940, 8960), 'torch.autograd.Variable', 'Variable', (['state.pose'], {}), '(state.pose)\n', (8948, 8960), False, 'from torch.autograd import Variable\n'), ((9003, 9024), 'torch.autograd.Variable', 'Variable', (['state.lidar'], {}), '(state.lidar)\n', (9011, 9024), False, 'from torch.autograd import Variable\n'), ((9067, 9088), 'torch.autograd.Variable', 'Variable', (['state.image'], {}), '(state.image)\n', (9075, 9088), False, 'from torch.autograd import Variable\n'), ((9130, 9150), 'torch.autograd.Variable', 'Variable', (['state.mask'], {}), '(state.mask)\n', (9138, 9150), False, 'from torch.autograd import Variable\n'), ((4542, 4570), 'torch.zeros', 'torch.zeros', (['self.batch_size'], {}), '(self.batch_size)\n', (4553, 4570), False, 'import torch\n'), ((4662, 4690), 'torch.zeros', 'torch.zeros', (['self.batch_size'], {}), '(self.batch_size)\n', (4673, 4690), False, 'import torch\n'), ((8622, 8656), 'random.randrange', 'random.randrange', (['self.num_actions'], {}), '(self.num_actions)\n', (8638, 8656), False, 'import random\n'), ((9564, 9598), 'random.randrange', 'random.randrange', (['self.num_actions'], {}), '(self.num_actions)\n', (9580, 9598), False, 'import random\n')] |
import random
import gym
import numpy as np
# noinspection PyUnresolvedReferences
import rps3env
import tensorflow as tf
OBS_SHAPE = [None, 28, 1, 3]
NUM_ACTIONS = 28 * 28
LEARN_RATE = 0.001
DECAY_RATE = 0.95
NUM_EPISODES = 100
def deep_conv_net(input_layer):
with tf.name_scope('conv'):
conv1 = tf.layers.conv2d(
inputs=input_layer, filters=9, kernel_size=[5, 1], padding='same', activation=tf.nn.relu, name='conv1'
)
pool1 = tf.layers.max_pooling2d(
inputs=conv1, pool_size=[3, 1], strides=3, name='pool1'
)
conv2 = tf.layers.conv2d(
inputs=pool1, filters=18, kernel_size=[3, 1], padding='same', activation=tf.nn.relu, name='conv2'
)
pool2 = tf.layers.max_pooling2d(
inputs=conv2, pool_size=[2, 1], strides=1, name='pool2'
)
pool2_flat = tf.layers.flatten(pool2, name='conv_flat')
with tf.name_scope('dense') as s:
dense1 = tf.contrib.layers.fully_connected(
inputs=pool2_flat, num_outputs=84, activation_fn=tf.nn.relu, scope=s
)
with tf.name_scope('output') as s:
output = tf.contrib.layers.fully_connected(
inputs=dense1, num_outputs=NUM_ACTIONS, activation_fn=tf.nn.relu, scope=s
)
return output
def get_observation(obs):
return np.concatenate([
np.array(obs['occupied'], dtype=np.float32).reshape([28, 1, 1]),
np.array(obs['player_owned'], dtype=np.float32).reshape([28, 1, 1]),
np.array(obs['piece_type'], dtype=np.float32).reshape([28, 1, 1]),
], axis=2)
def get_available_actions(available_actions):
action_indices = np.ravel_multi_index(tuple(zip(*available_actions)), (28, 28))
action_filter = np.eye(NUM_ACTIONS)[action_indices].sum(axis=0)
return action_filter
def main():
tf.reset_default_graph()
state_input = tf.placeholder(tf.float32, OBS_SHAPE, name='state_input')
expected_output = tf.placeholder(tf.float32, [None, NUM_ACTIONS], name='expected_output')
available_actions = tf.placeholder(tf.float32, [None, NUM_ACTIONS], name='available_actions')
train_summary_ops = []
reward_summary_ops = []
with tf.name_scope("deep_conv_net"):
nn_output = deep_conv_net(state_input)
train_summary_ops.append(tf.summary.histogram('nn_output', nn_output))
filtered_output = tf.multiply(nn_output, available_actions, name='filtered_output')
best_action = tf.argmax(filtered_output, axis=1, name='best_action')
loss_fn = tf.losses.mean_squared_error(expected_output, nn_output)
train_summary_ops.append(tf.summary.histogram('loss_fn', loss_fn))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=LEARN_RATE)
train_op = optimizer.minimize(loss=loss_fn, global_step=tf.train.get_global_step())
episode_rewards = tf.placeholder(tf.float32, (None, 1), name='episode_rewards')
reward_summary_ops.append(tf.summary.scalar('mean_episode_reward', tf.reduce_mean(episode_rewards[-100:])))
reward_summary_ops.append(tf.summary.scalar('last_episode_reward', episode_rewards[-1, 0]))
reward_summary_ops.append(tf.summary.histogram('episode_reward_values', episode_rewards))
# output graph for tensorboard
summary_writer = tf.summary.FileWriter('graph')
summary_writer.add_graph(tf.get_default_graph())
# merge summary operators
train_summaries = tf.summary.merge(train_summary_ops)
reward_summaries = tf.summary.merge(reward_summary_ops)
# create model saver
saver = tf.train.Saver()
# create environment
env = gym.make('RPS3Game-v0')
with tf.Session() as sess:
checkpoint = tf.train.latest_checkpoint("models")
if checkpoint is not None and len(checkpoint.split('-')) > 1:
print("Loading weights from checkpoint: {}".format(checkpoint))
saver.restore(sess, checkpoint)
start_episode = int(checkpoint.split('-')[-1])
else:
sess.run(tf.global_variables_initializer())
start_episode = 0
exploration_prob = 1. / ((start_episode / 50) + 10)
episode_reward_values = []
for i in range(start_episode, NUM_EPISODES):
# initialize the environment consistently every time
env.seed(0)
env.reset()
obs, reward, done, info = env.step([1, 2, 3] * 3)
total_episode_reward = 0
while not done:
# env.render()
obs_extracted = get_observation(obs)
a_best, q_values = sess.run([best_action, filtered_output], feed_dict={
state_input: np.array([obs_extracted]),
available_actions: np.array([get_available_actions(env.available_actions)]),
})
if np.random.rand(1) < exploration_prob or not q_values.max():
a_best[0] = np.ravel_multi_index(random.choice(env.available_actions), (28, 28))
action = np.unravel_index(a_best[0], (28, 28))
assert action in env.available_actions
obs, reward, done, info = env.step(action)
move_reward = sum(reward)
total_episode_reward += move_reward
new_q_values = sess.run(nn_output, feed_dict={
state_input: np.array([get_observation(obs)])
})
target_q_values = q_values
target_q_values[0, a_best[0]] = move_reward + DECAY_RATE * np.max(new_q_values)
_, summary = sess.run([train_op, train_summaries], feed_dict={
state_input: np.array([obs_extracted]),
expected_output: target_q_values,
})
summary_writer.add_summary(summary, i)
# env.render()
print('Finished episode {} with total reward {}'.format(i, total_episode_reward))
episode_reward_values.append(total_episode_reward)
# reduce exploration probability gradually
exploration_prob = 1. / ((i / 50) + 10)
summary = sess.run(reward_summaries, feed_dict={
episode_rewards: np.array(episode_reward_values).reshape((-1, 1))
})
summary_writer.add_summary(summary, i)
if i % 10 == 0:
# save the variables to disk
saver.save(sess, "models/deepconv", global_step=i)
saver.save(sess, "models/deepconv")
env.close()
if __name__ == '__main__':
main()
| [
"tensorflow.reset_default_graph",
"tensorflow.multiply",
"tensorflow.train.latest_checkpoint",
"tensorflow.layers.max_pooling2d",
"tensorflow.get_default_graph",
"tensorflow.summary.merge",
"tensorflow.placeholder",
"numpy.max",
"tensorflow.summary.FileWriter",
"tensorflow.summary.histogram",
"t... | [((1838, 1862), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1860, 1862), True, 'import tensorflow as tf\n'), ((1882, 1939), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'OBS_SHAPE'], {'name': '"""state_input"""'}), "(tf.float32, OBS_SHAPE, name='state_input')\n", (1896, 1939), True, 'import tensorflow as tf\n'), ((1962, 2033), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, NUM_ACTIONS]'], {'name': '"""expected_output"""'}), "(tf.float32, [None, NUM_ACTIONS], name='expected_output')\n", (1976, 2033), True, 'import tensorflow as tf\n'), ((2058, 2131), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, NUM_ACTIONS]'], {'name': '"""available_actions"""'}), "(tf.float32, [None, NUM_ACTIONS], name='available_actions')\n", (2072, 2131), True, 'import tensorflow as tf\n'), ((2876, 2937), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, 1)'], {'name': '"""episode_rewards"""'}), "(tf.float32, (None, 1), name='episode_rewards')\n", (2890, 2937), True, 'import tensorflow as tf\n'), ((3297, 3327), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""graph"""'], {}), "('graph')\n", (3318, 3327), True, 'import tensorflow as tf\n'), ((3434, 3469), 'tensorflow.summary.merge', 'tf.summary.merge', (['train_summary_ops'], {}), '(train_summary_ops)\n', (3450, 3469), True, 'import tensorflow as tf\n'), ((3493, 3529), 'tensorflow.summary.merge', 'tf.summary.merge', (['reward_summary_ops'], {}), '(reward_summary_ops)\n', (3509, 3529), True, 'import tensorflow as tf\n'), ((3568, 3584), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3582, 3584), True, 'import tensorflow as tf\n'), ((3621, 3644), 'gym.make', 'gym.make', (['"""RPS3Game-v0"""'], {}), "('RPS3Game-v0')\n", (3629, 3644), False, 'import gym\n'), ((275, 296), 'tensorflow.name_scope', 'tf.name_scope', (['"""conv"""'], {}), "('conv')\n", (288, 296), True, 'import tensorflow as tf\n'), ((314, 439), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'input_layer', 'filters': '(9)', 'kernel_size': '[5, 1]', 'padding': '"""same"""', 'activation': 'tf.nn.relu', 'name': '"""conv1"""'}), "(inputs=input_layer, filters=9, kernel_size=[5, 1], padding\n ='same', activation=tf.nn.relu, name='conv1')\n", (330, 439), True, 'import tensorflow as tf\n'), ((473, 558), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv1', 'pool_size': '[3, 1]', 'strides': '(3)', 'name': '"""pool1"""'}), "(inputs=conv1, pool_size=[3, 1], strides=3, name='pool1'\n )\n", (496, 558), True, 'import tensorflow as tf\n'), ((592, 712), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'pool1', 'filters': '(18)', 'kernel_size': '[3, 1]', 'padding': '"""same"""', 'activation': 'tf.nn.relu', 'name': '"""conv2"""'}), "(inputs=pool1, filters=18, kernel_size=[3, 1], padding=\n 'same', activation=tf.nn.relu, name='conv2')\n", (608, 712), True, 'import tensorflow as tf\n'), ((746, 831), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv2', 'pool_size': '[2, 1]', 'strides': '(1)', 'name': '"""pool2"""'}), "(inputs=conv2, pool_size=[2, 1], strides=1, name='pool2'\n )\n", (769, 831), True, 'import tensorflow as tf\n'), ((870, 912), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['pool2'], {'name': '"""conv_flat"""'}), "(pool2, name='conv_flat')\n", (887, 912), True, 'import tensorflow as tf\n'), ((922, 944), 'tensorflow.name_scope', 'tf.name_scope', (['"""dense"""'], {}), "('dense')\n", (935, 944), True, 'import tensorflow as tf\n'), ((968, 1075), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', ([], {'inputs': 'pool2_flat', 'num_outputs': '(84)', 'activation_fn': 'tf.nn.relu', 'scope': 's'}), '(inputs=pool2_flat, num_outputs=84,\n activation_fn=tf.nn.relu, scope=s)\n', (1001, 1075), True, 'import tensorflow as tf\n'), ((1103, 1126), 'tensorflow.name_scope', 'tf.name_scope', (['"""output"""'], {}), "('output')\n", (1116, 1126), True, 'import tensorflow as tf\n'), ((1150, 1262), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', ([], {'inputs': 'dense1', 'num_outputs': 'NUM_ACTIONS', 'activation_fn': 'tf.nn.relu', 'scope': 's'}), '(inputs=dense1, num_outputs=NUM_ACTIONS,\n activation_fn=tf.nn.relu, scope=s)\n', (1183, 1262), True, 'import tensorflow as tf\n'), ((2198, 2228), 'tensorflow.name_scope', 'tf.name_scope', (['"""deep_conv_net"""'], {}), "('deep_conv_net')\n", (2211, 2228), True, 'import tensorflow as tf\n'), ((2384, 2449), 'tensorflow.multiply', 'tf.multiply', (['nn_output', 'available_actions'], {'name': '"""filtered_output"""'}), "(nn_output, available_actions, name='filtered_output')\n", (2395, 2449), True, 'import tensorflow as tf\n'), ((2473, 2527), 'tensorflow.argmax', 'tf.argmax', (['filtered_output'], {'axis': '(1)', 'name': '"""best_action"""'}), "(filtered_output, axis=1, name='best_action')\n", (2482, 2527), True, 'import tensorflow as tf\n'), ((2547, 2603), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', (['expected_output', 'nn_output'], {}), '(expected_output, nn_output)\n', (2575, 2603), True, 'import tensorflow as tf\n'), ((2701, 2760), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'LEARN_RATE'}), '(learning_rate=LEARN_RATE)\n', (2734, 2760), True, 'import tensorflow as tf\n'), ((3080, 3144), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""last_episode_reward"""', 'episode_rewards[-1, 0]'], {}), "('last_episode_reward', episode_rewards[-1, 0])\n", (3097, 3144), True, 'import tensorflow as tf\n'), ((3176, 3238), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""episode_reward_values"""', 'episode_rewards'], {}), "('episode_reward_values', episode_rewards)\n", (3196, 3238), True, 'import tensorflow as tf\n'), ((3357, 3379), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3377, 3379), True, 'import tensorflow as tf\n'), ((3655, 3667), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3665, 3667), True, 'import tensorflow as tf\n'), ((3698, 3734), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['"""models"""'], {}), "('models')\n", (3724, 3734), True, 'import tensorflow as tf\n'), ((2311, 2355), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""nn_output"""', 'nn_output'], {}), "('nn_output', nn_output)\n", (2331, 2355), True, 'import tensorflow as tf\n'), ((2638, 2678), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""loss_fn"""', 'loss_fn'], {}), "('loss_fn', loss_fn)\n", (2658, 2678), True, 'import tensorflow as tf\n'), ((3009, 3047), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['episode_rewards[-100:]'], {}), '(episode_rewards[-100:])\n', (3023, 3047), True, 'import tensorflow as tf\n'), ((1747, 1766), 'numpy.eye', 'np.eye', (['NUM_ACTIONS'], {}), '(NUM_ACTIONS)\n', (1753, 1766), True, 'import numpy as np\n'), ((2825, 2851), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (2849, 2851), True, 'import tensorflow as tf\n'), ((4020, 4053), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4051, 4053), True, 'import tensorflow as tf\n'), ((5032, 5069), 'numpy.unravel_index', 'np.unravel_index', (['a_best[0]', '(28, 28)'], {}), '(a_best[0], (28, 28))\n', (5048, 5069), True, 'import numpy as np\n'), ((1363, 1406), 'numpy.array', 'np.array', (["obs['occupied']"], {'dtype': 'np.float32'}), "(obs['occupied'], dtype=np.float32)\n", (1371, 1406), True, 'import numpy as np\n'), ((1436, 1483), 'numpy.array', 'np.array', (["obs['player_owned']"], {'dtype': 'np.float32'}), "(obs['player_owned'], dtype=np.float32)\n", (1444, 1483), True, 'import numpy as np\n'), ((1513, 1558), 'numpy.array', 'np.array', (["obs['piece_type']"], {'dtype': 'np.float32'}), "(obs['piece_type'], dtype=np.float32)\n", (1521, 1558), True, 'import numpy as np\n'), ((4845, 4862), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (4859, 4862), True, 'import numpy as np\n'), ((4958, 4994), 'random.choice', 'random.choice', (['env.available_actions'], {}), '(env.available_actions)\n', (4971, 4994), False, 'import random\n'), ((5547, 5567), 'numpy.max', 'np.max', (['new_q_values'], {}), '(new_q_values)\n', (5553, 5567), True, 'import numpy as np\n'), ((4683, 4708), 'numpy.array', 'np.array', (['[obs_extracted]'], {}), '([obs_extracted])\n', (4691, 4708), True, 'import numpy as np\n'), ((5681, 5706), 'numpy.array', 'np.array', (['[obs_extracted]'], {}), '([obs_extracted])\n', (5689, 5706), True, 'import numpy as np\n'), ((6225, 6256), 'numpy.array', 'np.array', (['episode_reward_values'], {}), '(episode_reward_values)\n', (6233, 6256), True, 'import numpy as np\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Unit tests for KinwaveImplicitOverlandFlowModel.
Created on Sat Apr 1 10:49:33 2017
@author: gtucker
"""
import numpy as np
from landlab import RasterModelGrid
from landlab.components import KinwaveImplicitOverlandFlow
def test_initialization():
"""Test initialization with various parameters."""
rg = RasterModelGrid((3, 4), xy_spacing=2.0)
rg.add_zeros("topographic__elevation", at="node")
kw = KinwaveImplicitOverlandFlow(rg)
# Make sure fields have been created
for field_name in kw._info:
if kw._info[field_name]["mapping"] == "node":
assert field_name in kw.grid.at_node
elif kw._info[field_name]["mapping"] == "link":
assert field_name in kw.grid.at_link
# Re-initialize, this time with fields already existing in the grid
# (this triggers the "if" instead of "else" in the field setup in init)
kw = KinwaveImplicitOverlandFlow(rg)
def test_first_iteration():
"""Test stuff that happens only on first iteration"""
# Create a basic ramp
rg = RasterModelGrid((10, 10), xy_spacing=(2, 2))
rg.add_field("topographic__elevation", 0.1 * rg.node_y, at="node")
# Create component and run it
kw = KinwaveImplicitOverlandFlow(rg)
kw.run_one_step(1.0)
# Max gradient should be 0.1, and min should be zero
assert round(np.amax(kw.grid.at_link["topographic__gradient"]), 2) == 0.1
assert round(np.amin(kw.grid.at_link["topographic__gradient"]), 2) == 0.0
assert round(np.amax(kw._sqrt_slope), 3) == 0.316
assert round(np.amax(kw._grad_width_sum), 3) == 0.632
assert round(np.amax(kw._alpha), 3) == 15.811
def test_steady_basic_ramp():
"""Run to steady state with basic ramp"""
# Create a basic ramp
rg = RasterModelGrid((10, 10), xy_spacing=(2, 2))
rg.add_field("topographic__elevation", 0.1 * rg.node_y, at="node")
# Create component and run it
kw = KinwaveImplicitOverlandFlow(rg, runoff_rate=0.001 * 3600000.0)
for i in range(12):
kw.run_one_step(1.0)
# Look at a column of nodes down the middle. The inflow from uphill should
# be, from top to bottom: 0, 0.004, 0.008, 0.012, 0.016, 0.02, 0.024, 0.028
assert kw._disch_in[85] == 0.0
assert round(kw._disch_in[75], 3) == 0.004
assert round(kw._disch_in[65], 3) == 0.008
assert round(kw._disch_in[55], 3) == 0.012
assert round(kw._disch_in[45], 3) == 0.016
assert round(kw._disch_in[35], 3) == 0.020
assert round(kw._disch_in[25], 3) == 0.024
assert round(kw._disch_in[15], 3) == 0.028
# Try with passing in runoff
kw = KinwaveImplicitOverlandFlow(rg, runoff_rate=360.0)
kw.depth[:] = 0.0
for i in range(22):
kw.run_one_step(1.0)
# Again, look at a column of nodes down the middle. The inflow from uphill
# should now be 1/10 of the prior example.
assert round(kw._disch_in[75], 4) == 0.0004
assert round(kw._disch_in[65], 4) == 0.0008
assert round(kw._disch_in[55], 4) == 0.0012
assert round(kw._disch_in[45], 4) == 0.0016
assert round(kw._disch_in[35], 4) == 0.0020
assert round(kw._disch_in[25], 4) == 0.0024
assert round(kw._disch_in[15], 4) == 0.0028
# Try with default runoff rate of 1 mm/hr = 2.78e-7 m/s
kw = KinwaveImplicitOverlandFlow(rg)
assert round(kw.runoff_rate * 1.0e7, 2) == 2.78
kw.depth[:] = 0.0
for i in range(18):
kw.run_one_step(10.0)
# Look at a column of nodes down the middle. The inflow from uphill should
# be, from top to bottom: 0, 0.004, 0.008, 0.012, 0.016, 0.02, 0.024, 0.028
assert kw._disch_in[85] == 0.0
assert round(kw._disch_in[75], 7) == 0.0000011
assert round(kw._disch_in[65], 7) == 0.0000022
assert round(kw._disch_in[55], 7) == 0.0000033
assert round(kw._disch_in[45], 7) == 0.0000044
assert round(kw._disch_in[35], 7) == 0.0000055
assert round(kw._disch_in[25], 7) == 0.0000066
assert round(kw._disch_in[15], 7) == 0.0000077
def test_curved_surface():
"""Test flow across a curved surface."""
# Create a grid
rg = RasterModelGrid((10, 10), xy_spacing=(2, 2))
rg.add_field(
"topographic__elevation", 3.0 * rg.node_x ** 2 + rg.node_y ** 2, at="node"
)
# Create component and run it
kw = KinwaveImplicitOverlandFlow(rg, runoff_rate=0.001 * 3600000.0)
for i in range(8):
kw.run_one_step(1.0)
# The inflow discharge to each cell at steady state should equal the
# runoff rate times the "inflow" drainage area, which is the total drainage
# area minus the area of the cell itself. Here we'll test a column of core
# nodes across the middle of the domain.
area = rg.at_node["drainage_area"]
runoff_rate = 0.001
unit_area = 4.0
for i in range(15, 95, 10):
assert round(kw._disch_in[i], 6) == round(
runoff_rate * (area[i] - unit_area), 6
)
if __name__ == "__main__":
test_initialization()
test_first_iteration()
test_steady_basic_ramp()
test_curved_surface()
| [
"numpy.amax",
"landlab.components.KinwaveImplicitOverlandFlow",
"landlab.RasterModelGrid",
"numpy.amin"
] | [((368, 407), 'landlab.RasterModelGrid', 'RasterModelGrid', (['(3, 4)'], {'xy_spacing': '(2.0)'}), '((3, 4), xy_spacing=2.0)\n', (383, 407), False, 'from landlab import RasterModelGrid\n'), ((471, 502), 'landlab.components.KinwaveImplicitOverlandFlow', 'KinwaveImplicitOverlandFlow', (['rg'], {}), '(rg)\n', (498, 502), False, 'from landlab.components import KinwaveImplicitOverlandFlow\n'), ((943, 974), 'landlab.components.KinwaveImplicitOverlandFlow', 'KinwaveImplicitOverlandFlow', (['rg'], {}), '(rg)\n', (970, 974), False, 'from landlab.components import KinwaveImplicitOverlandFlow\n'), ((1099, 1143), 'landlab.RasterModelGrid', 'RasterModelGrid', (['(10, 10)'], {'xy_spacing': '(2, 2)'}), '((10, 10), xy_spacing=(2, 2))\n', (1114, 1143), False, 'from landlab import RasterModelGrid\n'), ((1259, 1290), 'landlab.components.KinwaveImplicitOverlandFlow', 'KinwaveImplicitOverlandFlow', (['rg'], {}), '(rg)\n', (1286, 1290), False, 'from landlab.components import KinwaveImplicitOverlandFlow\n'), ((1806, 1850), 'landlab.RasterModelGrid', 'RasterModelGrid', (['(10, 10)'], {'xy_spacing': '(2, 2)'}), '((10, 10), xy_spacing=(2, 2))\n', (1821, 1850), False, 'from landlab import RasterModelGrid\n'), ((1966, 2028), 'landlab.components.KinwaveImplicitOverlandFlow', 'KinwaveImplicitOverlandFlow', (['rg'], {'runoff_rate': '(0.001 * 3600000.0)'}), '(rg, runoff_rate=0.001 * 3600000.0)\n', (1993, 2028), False, 'from landlab.components import KinwaveImplicitOverlandFlow\n'), ((2649, 2699), 'landlab.components.KinwaveImplicitOverlandFlow', 'KinwaveImplicitOverlandFlow', (['rg'], {'runoff_rate': '(360.0)'}), '(rg, runoff_rate=360.0)\n', (2676, 2699), False, 'from landlab.components import KinwaveImplicitOverlandFlow\n'), ((3308, 3339), 'landlab.components.KinwaveImplicitOverlandFlow', 'KinwaveImplicitOverlandFlow', (['rg'], {}), '(rg)\n', (3335, 3339), False, 'from landlab.components import KinwaveImplicitOverlandFlow\n'), ((4124, 4168), 'landlab.RasterModelGrid', 'RasterModelGrid', (['(10, 10)'], {'xy_spacing': '(2, 2)'}), '((10, 10), xy_spacing=(2, 2))\n', (4139, 4168), False, 'from landlab import RasterModelGrid\n'), ((4320, 4382), 'landlab.components.KinwaveImplicitOverlandFlow', 'KinwaveImplicitOverlandFlow', (['rg'], {'runoff_rate': '(0.001 * 3600000.0)'}), '(rg, runoff_rate=0.001 * 3600000.0)\n', (4347, 4382), False, 'from landlab.components import KinwaveImplicitOverlandFlow\n'), ((1391, 1440), 'numpy.amax', 'np.amax', (["kw.grid.at_link['topographic__gradient']"], {}), "(kw.grid.at_link['topographic__gradient'])\n", (1398, 1440), True, 'import numpy as np\n'), ((1469, 1518), 'numpy.amin', 'np.amin', (["kw.grid.at_link['topographic__gradient']"], {}), "(kw.grid.at_link['topographic__gradient'])\n", (1476, 1518), True, 'import numpy as np\n'), ((1547, 1570), 'numpy.amax', 'np.amax', (['kw._sqrt_slope'], {}), '(kw._sqrt_slope)\n', (1554, 1570), True, 'import numpy as np\n'), ((1601, 1628), 'numpy.amax', 'np.amax', (['kw._grad_width_sum'], {}), '(kw._grad_width_sum)\n', (1608, 1628), True, 'import numpy as np\n'), ((1659, 1677), 'numpy.amax', 'np.amax', (['kw._alpha'], {}), '(kw._alpha)\n', (1666, 1677), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import sys
import torch
import numpy as np
from PIL import Image
import random
import imageio
from data import BaseDataset
from data.randaugment import RandAugmentMC
class Synthia_loader(BaseDataset):
"""
Synthia synthetic dataset
for domain adaptation to Cityscapes
"""
def __init__(self, opt, logger, augmentations=None):
self.opt = opt
self.root = opt.src_rootpath
self.augmentations = augmentations
self.randaug = RandAugmentMC(2, 10)
self.n_classes = opt.n_class
self.img_size = (1280, 760)
self.mean = [0.0, 0.0, 0.0] #TODO: calculating the mean value of rgb channels on GTA5
self.image_base_path = os.path.join(self.root, 'RGB')
self.label_base_path = os.path.join(self.root, 'GT/LABELS')
self.distribute = np.zeros(self.n_classes, dtype=float)
ids = os.listdir(self.image_base_path)
self.ids = []
for i in range(len(ids)):
self.ids.append(os.path.join(self.label_base_path, ids[i]))
if self.n_classes == 19:
self.valid_classes = [3,4,2,21,5,7,15,9,6,16,1,10,17,8,18,19,20,12,11,]
self.class_names = ["unlabelled","Road","Sidewalk","Building","Wall",
"Fence","Pole","Traffic_light","Traffic_sign","Vegetation",
"Terrain","sky","Pedestrian","Rider","Car",
"Truck","Bus","Train","Motorcycle","Bicycle",
]
elif self.n_classes == 16:
self.valid_classes = [3,4,2,21,5,7,15,9,6,1,10,17,8,19,12,11,]
self.class_names = ["unlabelled","Road","Sidewalk","Building","Wall",
"Fence","Pole","Traffic_light","Traffic_sign","Vegetation",
"sky","Pedestrian","Rider","Car","Bus",
"Motorcycle","Bicycle",
]
elif self.n_classes == 13:
self.valid_classes = [3,4,2,15,9,6,1,10,17,8,19,12,11,]
self.class_names = ["unlabelled","Road","Sidewalk","Building","Traffic_light",
"Traffic_sign","Vegetation","sky","Pedestrian","Rider",
"Car","Bus","Motorcycle","Bicycle",
]
self.ignore_index = 250
self.class_map = dict(zip(self.valid_classes, range(self.n_classes)))
imageio.plugins.freeimage.download()
if len(self.ids) == 0:
raise Exception(
"No files found in %s" % (self.image_base_path)
)
print("Found {} images".format(len(self.ids)))
def __len__(self):
return len(self.ids)
def __getitem__(self, index):
"""__getitem__
param: index
"""
id = self.ids[index]
img_path = os.path.join(self.image_base_path, id.split('/')[-1])
lbl_path = id
img = Image.open(img_path)
lbl = np.asarray(imageio.imread(lbl_path, format='PNG-FI'))[:,:,0]
lbl = Image.fromarray(lbl)
img = img.resize(self.img_size, Image.BILINEAR)
lbl = lbl.resize(self.img_size, Image.NEAREST)
img = np.asarray(img, dtype=np.uint8)
# lbl = lbl.convert('L')
lbl = np.asarray(lbl, dtype=np.uint8)
lbl = self.encode_segmap(np.array(lbl, dtype=np.uint8))
input_dict = {}
if self.augmentations!=None:
img, lbl, _, _, _ = self.augmentations(img, lbl)
img_strong, params = self.randaug(Image.fromarray(img))
img_strong, _ = self.transform(img_strong, lbl)
input_dict['img_strong'] = img_strong
input_dict['params'] = params
img, lbl = self.transform(img, lbl)
input_dict['img'] = img
input_dict['label'] = lbl
input_dict['img_path'] = self.ids[index]
return input_dict
def encode_segmap(self, lbl):
label_copy = 250 * np.ones(lbl.shape, dtype=np.uint8)
for k, v in list(self.class_map.items()):
label_copy[lbl == k] = v
return label_copy
# def decode_segmap(self, temp):
# r = temp.copy()
# g = temp.copy()
# b = temp.copy()
# for l in range(0, self.n_classes):
# r[temp == l] = self.label_colours[l][0]
# g[temp == l] = self.label_colours[l][1]
# b[temp == l] = self.label_colours[l][2]
# rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
# rgb[:, :, 0] = r / 255.0
# rgb[:, :, 1] = g / 255.0
# rgb[:, :, 2] = b / 255.0
# return rgb
def transform(self, img, lbl):
"""transform
img, lbl
"""
# img = m.imresize(
# img, self.img_size,
# )
img = np.array(img)
# img = img[:, :, ::-1] # RGB -> BGR
img = img.astype(np.float64)
img -= self.mean
img = img.astype(float) / 255.0
img = img.transpose(2, 0, 1)
classes = np.unique(lbl)
lbl = np.array(lbl)
lbl = lbl.astype(float)
# lbl = m.imresize(lbl, self.img_size, "nearest", mode='F')
lbl = lbl.astype(int)
if not np.all(classes == np.unique(lbl)):
print("WARN: resizing labels yielded fewer classes") #TODO: compare the original and processed ones
if not np.all(np.unique(lbl[lbl != self.ignore_index]) < self.n_classes):
print("after det", classes, np.unique(lbl))
raise ValueError("Segmentation map contained invalid class values")
img = torch.from_numpy(img).float()
lbl = torch.from_numpy(lbl).long()
return img, lbl
def get_cls_num_list(self):
return None
| [
"data.randaugment.RandAugmentMC",
"numpy.asarray",
"imageio.imread",
"numpy.zeros",
"numpy.ones",
"PIL.Image.open",
"imageio.plugins.freeimage.download",
"numpy.array",
"PIL.Image.fromarray",
"os.path.join",
"os.listdir",
"numpy.unique",
"torch.from_numpy"
] | [((560, 580), 'data.randaugment.RandAugmentMC', 'RandAugmentMC', (['(2)', '(10)'], {}), '(2, 10)\n', (573, 580), False, 'from data.randaugment import RandAugmentMC\n'), ((781, 811), 'os.path.join', 'os.path.join', (['self.root', '"""RGB"""'], {}), "(self.root, 'RGB')\n", (793, 811), False, 'import os\n'), ((843, 879), 'os.path.join', 'os.path.join', (['self.root', '"""GT/LABELS"""'], {}), "(self.root, 'GT/LABELS')\n", (855, 879), False, 'import os\n'), ((906, 943), 'numpy.zeros', 'np.zeros', (['self.n_classes'], {'dtype': 'float'}), '(self.n_classes, dtype=float)\n', (914, 943), True, 'import numpy as np\n'), ((958, 990), 'os.listdir', 'os.listdir', (['self.image_base_path'], {}), '(self.image_base_path)\n', (968, 990), False, 'import os\n'), ((2360, 2396), 'imageio.plugins.freeimage.download', 'imageio.plugins.freeimage.download', ([], {}), '()\n', (2394, 2396), False, 'import imageio\n'), ((2901, 2921), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (2911, 2921), False, 'from PIL import Image\n'), ((3011, 3031), 'PIL.Image.fromarray', 'Image.fromarray', (['lbl'], {}), '(lbl)\n', (3026, 3031), False, 'from PIL import Image\n'), ((3158, 3189), 'numpy.asarray', 'np.asarray', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (3168, 3189), True, 'import numpy as np\n'), ((3237, 3268), 'numpy.asarray', 'np.asarray', (['lbl'], {'dtype': 'np.uint8'}), '(lbl, dtype=np.uint8)\n', (3247, 3268), True, 'import numpy as np\n'), ((4768, 4781), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (4776, 4781), True, 'import numpy as np\n'), ((4985, 4999), 'numpy.unique', 'np.unique', (['lbl'], {}), '(lbl)\n', (4994, 4999), True, 'import numpy as np\n'), ((5014, 5027), 'numpy.array', 'np.array', (['lbl'], {}), '(lbl)\n', (5022, 5027), True, 'import numpy as np\n'), ((3303, 3332), 'numpy.array', 'np.array', (['lbl'], {'dtype': 'np.uint8'}), '(lbl, dtype=np.uint8)\n', (3311, 3332), True, 'import numpy as np\n'), ((3935, 3969), 'numpy.ones', 'np.ones', (['lbl.shape'], {'dtype': 'np.uint8'}), '(lbl.shape, dtype=np.uint8)\n', (3942, 3969), True, 'import numpy as np\n'), ((1075, 1117), 'os.path.join', 'os.path.join', (['self.label_base_path', 'ids[i]'], {}), '(self.label_base_path, ids[i])\n', (1087, 1117), False, 'import os\n'), ((2947, 2988), 'imageio.imread', 'imageio.imread', (['lbl_path'], {'format': '"""PNG-FI"""'}), "(lbl_path, format='PNG-FI')\n", (2961, 2988), False, 'import imageio\n'), ((3511, 3531), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (3526, 3531), False, 'from PIL import Image\n'), ((5456, 5470), 'numpy.unique', 'np.unique', (['lbl'], {}), '(lbl)\n', (5465, 5470), True, 'import numpy as np\n'), ((5575, 5596), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (5591, 5596), False, 'import torch\n'), ((5619, 5640), 'torch.from_numpy', 'torch.from_numpy', (['lbl'], {}), '(lbl)\n', (5635, 5640), False, 'import torch\n'), ((5200, 5214), 'numpy.unique', 'np.unique', (['lbl'], {}), '(lbl)\n', (5209, 5214), True, 'import numpy as np\n'), ((5355, 5395), 'numpy.unique', 'np.unique', (['lbl[lbl != self.ignore_index]'], {}), '(lbl[lbl != self.ignore_index])\n', (5364, 5395), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import astropy.units as u
from astropy.table import Table
from astropy.time import Time
from astropy.visualization import quantity_support
from gammapy.data import GTI
from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis
from gammapy.utils.scripts import make_path
from gammapy.utils.testing import assert_time_allclose, requires_data, requires_dependency, mpl_plot_check
from gammapy.utils.time import time_ref_to_dict
MAP_AXIS_INTERP = [
(np.array([0.25, 0.75, 1.0, 2.0]), "lin"),
(np.array([0.25, 0.75, 1.0, 2.0]), "log"),
(np.array([0.25, 0.75, 1.0, 2.0]), "sqrt"),
]
MAP_AXIS_NODE_TYPES = [
([0.25, 0.75, 1.0, 2.0], "lin", "edges"),
([0.25, 0.75, 1.0, 2.0], "log", "edges"),
([0.25, 0.75, 1.0, 2.0], "sqrt", "edges"),
([0.25, 0.75, 1.0, 2.0], "lin", "center"),
([0.25, 0.75, 1.0, 2.0], "log", "center"),
([0.25, 0.75, 1.0, 2.0], "sqrt", "center"),
]
nodes_array = np.array([0.25, 0.75, 1.0, 2.0])
MAP_AXIS_NODE_TYPE_UNIT = [
(nodes_array, "lin", "edges", "s", "TEST", True),
(nodes_array, "log", "edges", "s", "test", False),
(nodes_array, "lin", "edges", "TeV", "TEST", False),
(nodes_array, "sqrt", "edges", "s", "test", False),
(nodes_array, "lin", "center", "s", "test", False),
(nodes_array + 1e-9, "lin", "edges", "s", "test", True),
(nodes_array + 1e-3, "lin", "edges", "s", "test", False),
(nodes_array / 3600.0, "lin", "edges", "hr", "TEST", True),
]
@pytest.fixture
def time_intervals():
t0 = Time("2020-03-19")
t_min = np.linspace(0, 10, 20) * u.d
t_max = t_min + 1 * u.h
return {"t_min": t_min, "t_max": t_max, "t_ref": t0}
@pytest.fixture
def time_interval():
t0 = Time("2020-03-19")
t_min = 1 * u.d
t_max = 11 * u.d
return {"t_min": t_min, "t_max": t_max, "t_ref": t0}
@pytest.fixture(scope="session")
def energy_axis_ref():
edges = np.arange(1, 11) * u.TeV
return MapAxis.from_edges(edges, name="energy")
def test_mapaxis_repr():
axis = MapAxis([1, 2, 3], name="test")
assert "MapAxis" in repr(axis)
@pytest.mark.parametrize(
("nodes", "interp", "node_type", "unit", "name", "result"),
MAP_AXIS_NODE_TYPE_UNIT,
)
def test_mapaxis_equal(nodes, interp, node_type, unit, name, result):
axis1 = MapAxis(
nodes=[0.25, 0.75, 1.0, 2.0],
name="test",
unit="s",
interp="lin",
node_type="edges",
)
axis2 = MapAxis(nodes, name=name, unit=unit, interp=interp, node_type=node_type)
assert (axis1 == axis2) is result
assert (axis1 != axis2) is not result
def test_squash():
axis = MapAxis(
nodes=[0, 1, 2, 3], unit="TeV", name="energy", node_type="edges", interp="lin"
)
ax_sq = axis.squash()
assert_allclose(ax_sq.nbin, 1)
assert_allclose(axis.edges[0], ax_sq.edges[0])
assert_allclose(axis.edges[-1], ax_sq.edges[1])
assert_allclose(ax_sq.center, 1.5 * u.TeV)
def test_upsample():
axis = MapAxis(
nodes=[0, 1, 2, 3], unit="TeV", name="energy", node_type="edges", interp="lin"
)
axis_up = axis.upsample(10)
assert_allclose(axis_up.nbin, 10 * axis.nbin)
assert_allclose(axis_up.edges[0], axis.edges[0])
assert_allclose(axis_up.edges[-1], axis.edges[-1])
assert axis_up.node_type == axis.node_type
def test_downsample():
axis = MapAxis(
nodes=[0, 1, 2, 3, 4, 5, 6, 7, 8],
unit="TeV",
name="energy",
node_type="edges",
interp="lin",
)
axis_down = axis.downsample(2)
assert_allclose(axis_down.nbin, 0.5 * axis.nbin)
assert_allclose(axis_down.edges[0], axis.edges[0])
assert_allclose(axis_down.edges[-1], axis.edges[-1])
assert axis_down.node_type == axis.node_type
def test_upsample_non_regular():
axis = MapAxis.from_edges([0, 1, 3, 7], name="test", interp="lin")
axis_up = axis.upsample(2)
assert_allclose(axis_up.nbin, 2 * axis.nbin)
assert_allclose(axis_up.edges[0], axis.edges[0])
assert_allclose(axis_up.edges[-1], axis.edges[-1])
assert axis_up.node_type == axis.node_type
def test_upsample_non_regular_nodes():
axis = MapAxis.from_nodes([0, 1, 3, 7], name="test", interp="lin")
axis_up = axis.upsample(2)
assert_allclose(axis_up.nbin, 2 * axis.nbin - 1)
assert_allclose(axis_up.center[0], axis.center[0])
assert_allclose(axis_up.center[-1], axis.center[-1])
assert axis_up.node_type == axis.node_type
def test_downsample_non_regular():
axis = MapAxis.from_edges([0, 1, 3, 7, 13], name="test", interp="lin")
axis_down = axis.downsample(2)
assert_allclose(axis_down.nbin, 0.5 * axis.nbin)
assert_allclose(axis_down.edges[0], axis.edges[0])
assert_allclose(axis_down.edges[-1], axis.edges[-1])
assert axis_down.node_type == axis.node_type
def test_downsample_non_regular_nodes():
axis = MapAxis.from_edges([0, 1, 3, 7, 9], name="test", interp="lin")
axis_down = axis.downsample(2)
assert_allclose(axis_down.nbin, 0.5 * axis.nbin)
assert_allclose(axis_down.edges[0], axis.edges[0])
assert_allclose(axis_down.edges[-1], axis.edges[-1])
assert axis_down.node_type == axis.node_type
@pytest.mark.parametrize("factor", [1, 3, 5, 7, 11])
def test_up_downsample_consistency(factor):
axis = MapAxis.from_edges([0, 1, 3, 7, 13], name="test", interp="lin")
axis_new = axis.upsample(factor).downsample(factor)
assert_allclose(axis.edges, axis_new.edges)
def test_group_table_basic(energy_axis_ref):
energy_edges = [1, 2, 10] * u.TeV
groups = energy_axis_ref.group_table(energy_edges)
assert_allclose(groups["group_idx"], [0, 1])
assert_allclose(groups["idx_min"], [0, 1])
assert_allclose(groups["idx_max"], [0, 8])
assert_allclose(groups["energy_min"], [1, 2])
assert_allclose(groups["energy_max"], [2, 10])
bin_type = [_.strip() for _ in groups["bin_type"]]
assert_equal(bin_type, ["normal", "normal"])
@pytest.mark.parametrize(
"energy_edges",
[[1.8, 4.8, 7.2] * u.TeV, [2, 5, 7] * u.TeV, [2000, 5000, 7000] * u.GeV],
)
def test_group_tablenergy_edges(energy_axis_ref, energy_edges):
groups = energy_axis_ref.group_table(energy_edges)
assert_allclose(groups["group_idx"], [0, 1, 2, 3])
assert_allclose(groups["idx_min"], [0, 1, 4, 6])
assert_allclose(groups["idx_max"], [0, 3, 5, 8])
assert_allclose(groups["energy_min"].quantity.to_value("TeV"), [1, 2, 5, 7])
assert_allclose(groups["energy_max"].quantity.to_value("TeV"), [2, 5, 7, 10])
bin_type = [_.strip() for _ in groups["bin_type"]]
assert_equal(bin_type, ["underflow", "normal", "normal", "overflow"])
def test_group_table_below_range(energy_axis_ref):
energy_edges = [0.7, 0.8, 1, 4] * u.TeV
groups = energy_axis_ref.group_table(energy_edges)
assert_allclose(groups["group_idx"], [0, 1])
assert_allclose(groups["idx_min"], [0, 3])
assert_allclose(groups["idx_max"], [2, 8])
assert_allclose(groups["energy_min"], [1, 4])
assert_allclose(groups["energy_max"], [4, 10])
bin_type = [_.strip() for _ in groups["bin_type"]]
assert_equal(bin_type, ["normal", "overflow"])
def test_group_table_above_range(energy_axis_ref):
energy_edges = [5, 7, 11, 13] * u.TeV
groups = energy_axis_ref.group_table(energy_edges)
assert_allclose(groups["group_idx"], [0, 1, 2])
assert_allclose(groups["idx_min"], [0, 4, 6])
assert_allclose(groups["idx_max"], [3, 5, 8])
assert_allclose(groups["energy_min"], [1, 5, 7])
assert_allclose(groups["energy_max"], [5, 7, 10])
bin_type = [_.strip() for _ in groups["bin_type"]]
assert_equal(bin_type, ["underflow", "normal", "normal"])
def test_group_table_outside_range(energy_axis_ref):
energy_edges = [20, 30, 40] * u.TeV
with pytest.raises(ValueError):
energy_axis_ref.group_table(energy_edges)
def test_map_axis_single_bin():
with pytest.raises(ValueError):
_ = MapAxis.from_nodes([1])
def test_map_axis_aligned():
ax1 = MapAxis([1, 2, 3], interp="lin", node_type="edges")
ax2 = MapAxis([1.5, 2.5], interp="log", node_type="center")
assert not ax1.is_aligned(ax2)
def test_map_axis_pad():
axis = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=1)
padded = axis.pad(pad_width=(0, 1))
assert_allclose(padded.edges, [1, 10, 100] * u.TeV)
padded = axis.pad(pad_width=(1, 0))
assert_allclose(padded.edges, [0.1, 1, 10] * u.TeV)
padded = axis.pad(pad_width=1)
assert_allclose(padded.edges, [0.1, 1, 10, 100] * u.TeV)
def test_map_axes_pad():
axis_1 = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=1)
axis_2 = MapAxis.from_bounds(0, 1, nbin=2, unit="deg", name="rad")
axes = MapAxes([axis_1, axis_2])
axes = axes.pad(axis_name="energy", pad_width=1)
assert_allclose(axes["energy"].edges, [0.1, 1, 10, 100] * u.TeV)
@pytest.mark.parametrize(("edges", "interp"), MAP_AXIS_INTERP)
def test_mapaxis_init_from_edges(edges, interp):
axis = MapAxis(edges, interp=interp)
assert_allclose(axis.edges, edges)
assert_allclose(axis.nbin, len(edges) - 1)
with pytest.raises(ValueError):
MapAxis.from_edges([1])
MapAxis.from_edges([0, 1, 1, 2])
MapAxis.from_edges([0, 1, 3, 2])
@pytest.mark.parametrize(("nodes", "interp"), MAP_AXIS_INTERP)
def test_mapaxis_from_nodes(nodes, interp):
axis = MapAxis.from_nodes(nodes, interp=interp)
assert_allclose(axis.center, nodes)
assert_allclose(axis.nbin, len(nodes))
with pytest.raises(ValueError):
MapAxis.from_nodes([])
MapAxis.from_nodes([0, 1, 1, 2])
MapAxis.from_nodes([0, 1, 3, 2])
@pytest.mark.parametrize(("nodes", "interp"), MAP_AXIS_INTERP)
def test_mapaxis_from_bounds(nodes, interp):
axis = MapAxis.from_bounds(nodes[0], nodes[-1], 3, interp=interp)
assert_allclose(axis.edges[0], nodes[0])
assert_allclose(axis.edges[-1], nodes[-1])
assert_allclose(axis.nbin, 3)
with pytest.raises(ValueError):
MapAxis.from_bounds(1, 1, 1)
def test_map_axis_from_energy_units():
with pytest.raises(ValueError):
_ = MapAxis.from_energy_bounds(0.1, 10, 2, unit="deg")
with pytest.raises(ValueError):
_ = MapAxis.from_energy_edges([0.1, 1, 10] * u.deg)
@pytest.mark.parametrize(("nodes", "interp", "node_type"), MAP_AXIS_NODE_TYPES)
def test_mapaxis_pix_to_coord(nodes, interp, node_type):
axis = MapAxis(nodes, interp=interp, node_type=node_type)
assert_allclose(axis.center, axis.pix_to_coord(np.arange(axis.nbin, dtype=float)))
assert_allclose(
np.arange(axis.nbin + 1, dtype=float) - 0.5, axis.coord_to_pix(axis.edges)
)
@pytest.mark.parametrize(("nodes", "interp", "node_type"), MAP_AXIS_NODE_TYPES)
def test_mapaxis_coord_to_idx(nodes, interp, node_type):
axis = MapAxis(nodes, interp=interp, node_type=node_type)
assert_allclose(np.arange(axis.nbin, dtype=int), axis.coord_to_idx(axis.center))
@pytest.mark.parametrize(("nodes", "interp", "node_type"), MAP_AXIS_NODE_TYPES)
def test_mapaxis_slice(nodes, interp, node_type):
axis = MapAxis(nodes, interp=interp, node_type=node_type)
saxis = axis.slice(slice(1, 3))
assert_allclose(saxis.nbin, 2)
assert_allclose(saxis.center, axis.center[slice(1, 3)])
axis = MapAxis(nodes, interp=interp, node_type=node_type)
saxis = axis.slice(slice(1, None))
assert_allclose(saxis.nbin, axis.nbin - 1)
assert_allclose(saxis.center, axis.center[slice(1, None)])
axis = MapAxis(nodes, interp=interp, node_type=node_type)
saxis = axis.slice(slice(None, 2))
assert_allclose(saxis.nbin, 2)
assert_allclose(saxis.center, axis.center[slice(None, 2)])
axis = MapAxis(nodes, interp=interp, node_type=node_type)
saxis = axis.slice(slice(None, -1))
assert_allclose(saxis.nbin, axis.nbin - 1)
assert_allclose(saxis.center, axis.center[slice(None, -1)])
def test_map_axis_plot_helpers():
axis = MapAxis.from_nodes([0, 1, 2], unit="deg", name="offset")
labels = axis.as_plot_labels
assert labels[0] == "0.00e+00 deg"
assert_allclose(axis.center, axis.as_plot_center)
assert_allclose(axis.edges, axis.as_plot_edges)
def test_time_axis(time_intervals):
axis = TimeMapAxis(
time_intervals["t_min"], time_intervals["t_max"], time_intervals["t_ref"]
)
axis_copy = axis.copy()
assert axis.nbin == 20
assert axis.name == "time"
assert axis.node_type == "intervals"
assert_allclose(axis.time_delta.to_value("min"), 60)
assert_allclose(axis.time_mid[0].mjd, 58927.020833333336)
assert "time" in axis.__str__()
assert "20" in axis.__str__()
with pytest.raises(ValueError):
axis.assert_name("bad")
assert axis_copy == axis
assert not axis.is_contiguous
ax_cont = axis.to_contiguous()
assert_allclose(ax_cont.nbin, 39)
def test_single_interval_time_axis(time_interval):
axis = TimeMapAxis(
edges_min=time_interval["t_min"],
edges_max=time_interval["t_max"],
reference_time=time_interval["t_ref"],
)
coord = Time(58933, format="mjd") + u.Quantity([1.5, 3.5, 10], unit="d")
pix = axis.coord_to_pix(coord)
assert axis.nbin == 1
assert_allclose(axis.time_delta.to_value("d"), 10)
assert_allclose(axis.time_mid[0].mjd, 58933)
pix_min = axis.coord_to_pix(time_interval["t_min"] + 0.001 * u.s)
assert_allclose(pix_min, -0.5)
pix_max = axis.coord_to_pix(time_interval["t_max"] - 0.001 * u.s)
assert_allclose(pix_max, 0.5)
assert_allclose(pix, [0.15, 0.35, np.nan])
def test_slice_squash_time_axis(time_intervals):
axis = TimeMapAxis(
time_intervals["t_min"], time_intervals["t_max"], time_intervals["t_ref"]
)
axis_squash = axis.squash()
axis_slice = axis.slice(slice(1, 5))
assert axis_squash.nbin == 1
assert_allclose(axis_squash.time_min[0].mjd, 58927)
assert_allclose(axis_squash.time_delta.to_value("d"), 10.04166666)
assert axis_slice.nbin == 4
assert_allclose(axis_slice.time_delta.to_value("d")[0], 0.04166666666)
assert axis_squash != axis_slice
def test_from_time_edges_time_axis():
t0 = Time("2020-03-19")
t_min = t0 + np.linspace(0, 10, 20) * u.d
t_max = t_min + 1 * u.h
axis = TimeMapAxis.from_time_edges(t_min, t_max)
axis_h = TimeMapAxis.from_time_edges(t_min, t_max, unit="h")
assert axis.nbin == 20
assert axis.name == "time"
assert_time_allclose(axis.reference_time, t0)
assert_allclose(axis.time_delta.to_value("min"), 60)
assert_allclose(axis.time_mid[0].mjd, 58927.020833333336)
assert_allclose(axis_h.time_delta.to_value("h"), 1)
assert_allclose(axis_h.time_mid[0].mjd, 58927.020833333336)
assert axis == axis_h
def test_incorrect_time_axis():
tmin = np.linspace(0, 10, 20) * u.h
tmax = np.linspace(1, 11, 20) * u.h
# incorrect reference time
with pytest.raises(ValueError):
TimeMapAxis(tmin, tmax, reference_time=51000 * u.d, name="time")
# overlapping time intervals
with pytest.raises(ValueError):
TimeMapAxis(tmin, tmax, reference_time=Time.now(), name="time")
def test_bad_length_sort_time_axis(time_intervals):
tref = time_intervals["t_ref"]
tmin = time_intervals["t_min"]
tmax_reverse = time_intervals["t_max"][::-1]
tmax_short = time_intervals["t_max"][:-1]
with pytest.raises(ValueError):
TimeMapAxis(tmin, tmax_reverse, tref, name="time")
with pytest.raises(ValueError):
TimeMapAxis(tmin, tmax_short, tref, name="time")
def test_coord_to_idx_time_axis(time_intervals):
tmin = time_intervals["t_min"]
tmax = time_intervals["t_max"]
tref = time_intervals["t_ref"]
axis = TimeMapAxis(tmin, tmax, tref, name="time")
time = Time(58927.020833333336, format="mjd")
times = axis.time_mid
times[::2] += 1 * u.h
times = times.insert(0, tref - [1, 2] * u.yr)
idx = axis.coord_to_idx(time)
indices = axis.coord_to_idx(times)
pix = axis.coord_to_pix(time)
pixels = axis.coord_to_pix(times)
assert idx == 0
assert_allclose(indices[1::2], [-1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19])
assert_allclose(indices[::2], -1)
assert_allclose(pix, 0, atol=1e-10)
assert_allclose(pixels[1::2], [np.nan, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19])
def test_slice_time_axis(time_intervals):
axis = TimeMapAxis(
time_intervals["t_min"], time_intervals["t_max"], time_intervals["t_ref"]
)
new_axis = axis.slice([2, 6, 9])
squashed = axis.squash()
assert new_axis.nbin == 3
assert_allclose(squashed.time_max[0].mjd, 58937.041667)
assert squashed.nbin == 1
assert_allclose(squashed.time_max[0].mjd, 58937.041667)
def test_time_map_axis_from_time_bounds():
t_min = Time("2006-02-12", scale="utc")
t_max = t_min + 12 * u.h
axis = TimeMapAxis.from_time_bounds(time_min=t_min, time_max=t_max, nbin=3)
assert_allclose(axis.center, [0.083333, 0.25, 0.416667] * u.d, rtol=1e-5)
def test_from_table_time_axis():
t0 = Time("2006-02-12", scale="utc")
t_min = np.linspace(0, 10, 10) * u.d
t_max = t_min + 12 * u.h
table = Table()
table["TIME_MIN"] = t_min
table["TIME_MAX"] = t_max
table.meta.update(time_ref_to_dict(t0))
table.meta["AXCOLS1"] = "TIME_MIN,TIME_MAX"
axis = TimeMapAxis.from_table(table, format="gadf")
assert axis.nbin == 10
assert_allclose(axis.time_mid[0].mjd, 53778.25)
@requires_data()
def test_from_gti_time_axis():
filename = "$GAMMAPY_DATA/hess-dl3-dr1/data/hess_dl3_dr1_obs_id_020136.fits.gz"
filename = make_path(filename)
gti = GTI.read(filename)
axis = TimeMapAxis.from_gti(gti)
expected = Time(53090.123451203704, format="mjd", scale="tt")
assert_time_allclose(axis.time_min[0], expected)
assert axis.nbin == 1
def test_map_with_time_axis(time_intervals):
time_axis = TimeMapAxis(
time_intervals["t_min"], time_intervals["t_max"], time_intervals["t_ref"]
)
energy_axis = MapAxis.from_energy_bounds(0.1, 10, 2, unit="TeV")
region_map = RegionNDMap.create(
region="fk5; circle(0,0,0.1)", axes=[energy_axis, time_axis]
)
assert region_map.geom.data_shape == (20, 2, 1, 1)
def test_time_axis_plot_helpers():
time_ref = Time("1999-01-01T00:00:00.123456789")
time_axis = TimeMapAxis(
edges_min=[0, 1, 3] * u.d,
edges_max=[0.8, 1.9, 5.4] * u.d,
reference_time=time_ref,
)
labels = time_axis.as_plot_labels
assert labels[0] == "1999-01-01 00:00:00.123 - 1999-01-01 19:12:00.123"
center = time_axis.as_plot_center
assert center[0].year == 1999
edges = time_axis.to_contiguous().as_plot_edges
assert edges[0].year == 1999
def test_axes_basics():
energy_axis = MapAxis.from_energy_edges([1, 3] * u.TeV)
time_ref = Time("1999-01-01T00:00:00.123456789")
time_axis = TimeMapAxis(
edges_min=[0, 1, 3] * u.d,
edges_max=[0.8, 1.9, 5.4] * u.d,
reference_time=time_ref,
)
axes = MapAxes([energy_axis, time_axis])
assert axes.shape == (1, 3)
assert axes.is_unidimensional
assert not axes.is_flat
assert axes.primary_axis.name == "time"
def test_axes_getitem():
axis1 = MapAxis.from_bounds(1, 4, 3, name="a1")
axis2 = axis1.copy(name="a2")
axis3 = axis1.copy(name="a3")
axes = MapAxes([axis1, axis2, axis3])
assert isinstance(axes[0], MapAxis)
assert axes[-1].name == "a3"
assert isinstance(axes[1:], MapAxes)
assert len(axes[1:]) == 2
assert isinstance(axes[0:1], MapAxes)
assert len(axes[0:1]) == 1
assert isinstance(axes[["a3", "a1"]], MapAxes)
assert axes[["a3", "a1"]][0].name == "a3"
def test_label_map_axis_basics():
axis = LabelMapAxis(labels=["label-1", "label-2"], name="label-axis")
axis_str = str(axis)
assert "node type" in axis_str
assert "labels" in axis_str
assert "label-2" in axis_str
with pytest.raises(ValueError):
axis.assert_name("time")
assert axis.nbin == 2
assert axis.node_type == "label"
assert_allclose(axis.bin_width, 1)
assert axis.name == "label-axis"
with pytest.raises(ValueError):
axis.edges
axis_copy = axis.copy()
assert axis_copy.name == "label-axis"
def test_label_map_axis_coord_to_idx():
axis = LabelMapAxis(labels=["label-1", "label-2", "label-3"], name="label-axis")
labels = "label-1"
idx = axis.coord_to_idx(coord=labels)
assert_allclose(idx, 0)
labels = ["label-1", "label-3"]
idx = axis.coord_to_idx(coord=labels)
assert_allclose(idx, [0, 2])
labels = [["label-1"], ["label-2"]]
idx = axis.coord_to_idx(coord=labels)
assert_allclose(idx, [[0], [1]])
with pytest.raises(ValueError):
labels = [["bad-label"], ["label-2"]]
_ = axis.coord_to_idx(coord=labels)
def test_mixed_axes():
label_axis = LabelMapAxis(labels=["label-1", "label-2", "label-3"], name="label")
time_axis = TimeMapAxis(
edges_min=[1, 10] * u.day,
edges_max=[2, 13] * u.day,
reference_time=Time("2020-03-19"),
)
energy_axis = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=4)
axes = MapAxes(axes=[energy_axis, time_axis, label_axis])
coords = axes.get_coord()
assert coords["label"].shape == (1, 1, 3)
assert coords["energy"].shape == (4, 1, 1)
assert coords["time"].shape == (1, 2, 1)
idx = axes.coord_to_idx(coords)
assert_allclose(idx[0], np.arange(4).reshape((4, 1, 1)))
assert_allclose(idx[1], np.arange(2).reshape((1, 2, 1)))
assert_allclose(idx[2], np.arange(3).reshape((1, 1, 3)))
hdu = axes.to_table_hdu(format="gadf")
table = Table.read(hdu)
assert table["LABEL"].dtype == np.dtype("<U7")
assert len(table) == 24
@requires_dependency("matplotlib")
def test_MapAxis_format_plot_xaxis():
import matplotlib.pyplot as plt
axis = MapAxis.from_energy_bounds(
"0.03 TeV", "300 TeV", nbin=20,
per_decade=True, name="energy_true")
with mpl_plot_check():
ax = plt.gca()
with quantity_support():
ax.plot(axis.center, np.ones_like(axis.center))
ax1 = axis.format_plot_xaxis(ax=ax)
assert ax1.xaxis.label.properties()["text"] == "True Energy [TeV]"
@requires_dependency("matplotlib")
def test_TimeMapAxis_format_plot_xaxis(time_intervals):
import matplotlib.pyplot as plt
axis = TimeMapAxis(
time_intervals["t_min"], time_intervals["t_max"], time_intervals["t_ref"], name="time"
)
with mpl_plot_check():
ax = plt.gca()
with quantity_support():
ax.plot(axis.center, np.ones_like(axis.center))
ax1 = axis.format_plot_xaxis(ax=ax)
assert ax1.xaxis.label.properties()["text"] == "Time [iso]"
| [
"gammapy.maps.MapAxis.from_energy_bounds",
"gammapy.data.GTI.read",
"astropy.visualization.quantity_support",
"gammapy.maps.MapAxis.from_nodes",
"numpy.arange",
"matplotlib.pyplot.gca",
"pytest.mark.parametrize",
"gammapy.maps.TimeMapAxis.from_table",
"gammapy.utils.testing.requires_dependency",
"... | [((1091, 1123), 'numpy.array', 'np.array', (['[0.25, 0.75, 1.0, 2.0]'], {}), '([0.25, 0.75, 1.0, 2.0])\n', (1099, 1123), True, 'import numpy as np\n'), ((1982, 2013), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1996, 2013), False, 'import pytest\n'), ((2234, 2346), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nodes', 'interp', 'node_type', 'unit', 'name', 'result')", 'MAP_AXIS_NODE_TYPE_UNIT'], {}), "(('nodes', 'interp', 'node_type', 'unit', 'name',\n 'result'), MAP_AXIS_NODE_TYPE_UNIT)\n", (2257, 2346), False, 'import pytest\n'), ((5330, 5381), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""factor"""', '[1, 3, 5, 7, 11]'], {}), "('factor', [1, 3, 5, 7, 11])\n", (5353, 5381), False, 'import pytest\n'), ((6099, 6216), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""energy_edges"""', '[[1.8, 4.8, 7.2] * u.TeV, [2, 5, 7] * u.TeV, [2000, 5000, 7000] * u.GeV]'], {}), "('energy_edges', [[1.8, 4.8, 7.2] * u.TeV, [2, 5, 7] *\n u.TeV, [2000, 5000, 7000] * u.GeV])\n", (6122, 6216), False, 'import pytest\n'), ((9023, 9084), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('edges', 'interp')", 'MAP_AXIS_INTERP'], {}), "(('edges', 'interp'), MAP_AXIS_INTERP)\n", (9046, 9084), False, 'import pytest\n'), ((9414, 9475), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nodes', 'interp')", 'MAP_AXIS_INTERP'], {}), "(('nodes', 'interp'), MAP_AXIS_INTERP)\n", (9437, 9475), False, 'import pytest\n'), ((9807, 9868), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nodes', 'interp')", 'MAP_AXIS_INTERP'], {}), "(('nodes', 'interp'), MAP_AXIS_INTERP)\n", (9830, 9868), False, 'import pytest\n'), ((10423, 10501), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nodes', 'interp', 'node_type')", 'MAP_AXIS_NODE_TYPES'], {}), "(('nodes', 'interp', 'node_type'), MAP_AXIS_NODE_TYPES)\n", (10446, 10501), False, 'import pytest\n'), ((10821, 10899), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nodes', 'interp', 'node_type')", 'MAP_AXIS_NODE_TYPES'], {}), "(('nodes', 'interp', 'node_type'), MAP_AXIS_NODE_TYPES)\n", (10844, 10899), False, 'import pytest\n'), ((11107, 11185), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('nodes', 'interp', 'node_type')", 'MAP_AXIS_NODE_TYPES'], {}), "(('nodes', 'interp', 'node_type'), MAP_AXIS_NODE_TYPES)\n", (11130, 11185), False, 'import pytest\n'), ((17621, 17636), 'gammapy.utils.testing.requires_data', 'requires_data', ([], {}), '()\n', (17634, 17636), False, 'from gammapy.utils.testing import assert_time_allclose, requires_data, requires_dependency, mpl_plot_check\n'), ((21980, 22013), 'gammapy.utils.testing.requires_dependency', 'requires_dependency', (['"""matplotlib"""'], {}), "('matplotlib')\n", (21999, 22013), False, 'from gammapy.utils.testing import assert_time_allclose, requires_data, requires_dependency, mpl_plot_check\n'), ((22479, 22512), 'gammapy.utils.testing.requires_dependency', 'requires_dependency', (['"""matplotlib"""'], {}), "('matplotlib')\n", (22498, 22512), False, 'from gammapy.utils.testing import assert_time_allclose, requires_data, requires_dependency, mpl_plot_check\n'), ((1669, 1687), 'astropy.time.Time', 'Time', (['"""2020-03-19"""'], {}), "('2020-03-19')\n", (1673, 1687), False, 'from astropy.time import Time\n'), ((1862, 1880), 'astropy.time.Time', 'Time', (['"""2020-03-19"""'], {}), "('2020-03-19')\n", (1866, 1880), False, 'from astropy.time import Time\n'), ((2085, 2125), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['edges'], {'name': '"""energy"""'}), "(edges, name='energy')\n", (2103, 2125), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((2164, 2195), 'gammapy.maps.MapAxis', 'MapAxis', (['[1, 2, 3]'], {'name': '"""test"""'}), "([1, 2, 3], name='test')\n", (2171, 2195), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((2436, 2533), 'gammapy.maps.MapAxis', 'MapAxis', ([], {'nodes': '[0.25, 0.75, 1.0, 2.0]', 'name': '"""test"""', 'unit': '"""s"""', 'interp': '"""lin"""', 'node_type': '"""edges"""'}), "(nodes=[0.25, 0.75, 1.0, 2.0], name='test', unit='s', interp='lin',\n node_type='edges')\n", (2443, 2533), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((2590, 2662), 'gammapy.maps.MapAxis', 'MapAxis', (['nodes'], {'name': 'name', 'unit': 'unit', 'interp': 'interp', 'node_type': 'node_type'}), '(nodes, name=name, unit=unit, interp=interp, node_type=node_type)\n', (2597, 2662), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((2776, 2867), 'gammapy.maps.MapAxis', 'MapAxis', ([], {'nodes': '[0, 1, 2, 3]', 'unit': '"""TeV"""', 'name': '"""energy"""', 'node_type': '"""edges"""', 'interp': '"""lin"""'}), "(nodes=[0, 1, 2, 3], unit='TeV', name='energy', node_type='edges',\n interp='lin')\n", (2783, 2867), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((2909, 2939), 'numpy.testing.assert_allclose', 'assert_allclose', (['ax_sq.nbin', '(1)'], {}), '(ax_sq.nbin, 1)\n', (2924, 2939), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2944, 2990), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis.edges[0]', 'ax_sq.edges[0]'], {}), '(axis.edges[0], ax_sq.edges[0])\n', (2959, 2990), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2995, 3042), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis.edges[-1]', 'ax_sq.edges[1]'], {}), '(axis.edges[-1], ax_sq.edges[1])\n', (3010, 3042), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3047, 3089), 'numpy.testing.assert_allclose', 'assert_allclose', (['ax_sq.center', '(1.5 * u.TeV)'], {}), '(ax_sq.center, 1.5 * u.TeV)\n', (3062, 3089), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3124, 3215), 'gammapy.maps.MapAxis', 'MapAxis', ([], {'nodes': '[0, 1, 2, 3]', 'unit': '"""TeV"""', 'name': '"""energy"""', 'node_type': '"""edges"""', 'interp': '"""lin"""'}), "(nodes=[0, 1, 2, 3], unit='TeV', name='energy', node_type='edges',\n interp='lin')\n", (3131, 3215), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((3263, 3308), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_up.nbin', '(10 * axis.nbin)'], {}), '(axis_up.nbin, 10 * axis.nbin)\n', (3278, 3308), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3313, 3361), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_up.edges[0]', 'axis.edges[0]'], {}), '(axis_up.edges[0], axis.edges[0])\n', (3328, 3361), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3366, 3416), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_up.edges[-1]', 'axis.edges[-1]'], {}), '(axis_up.edges[-1], axis.edges[-1])\n', (3381, 3416), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3500, 3606), 'gammapy.maps.MapAxis', 'MapAxis', ([], {'nodes': '[0, 1, 2, 3, 4, 5, 6, 7, 8]', 'unit': '"""TeV"""', 'name': '"""energy"""', 'node_type': '"""edges"""', 'interp': '"""lin"""'}), "(nodes=[0, 1, 2, 3, 4, 5, 6, 7, 8], unit='TeV', name='energy',\n node_type='edges', interp='lin')\n", (3507, 3606), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((3690, 3738), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_down.nbin', '(0.5 * axis.nbin)'], {}), '(axis_down.nbin, 0.5 * axis.nbin)\n', (3705, 3738), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3743, 3793), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_down.edges[0]', 'axis.edges[0]'], {}), '(axis_down.edges[0], axis.edges[0])\n', (3758, 3793), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3798, 3850), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_down.edges[-1]', 'axis.edges[-1]'], {}), '(axis_down.edges[-1], axis.edges[-1])\n', (3813, 3850), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3946, 4005), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['[0, 1, 3, 7]'], {'name': '"""test"""', 'interp': '"""lin"""'}), "([0, 1, 3, 7], name='test', interp='lin')\n", (3964, 4005), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((4042, 4086), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_up.nbin', '(2 * axis.nbin)'], {}), '(axis_up.nbin, 2 * axis.nbin)\n', (4057, 4086), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4091, 4139), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_up.edges[0]', 'axis.edges[0]'], {}), '(axis_up.edges[0], axis.edges[0])\n', (4106, 4139), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4144, 4194), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_up.edges[-1]', 'axis.edges[-1]'], {}), '(axis_up.edges[-1], axis.edges[-1])\n', (4159, 4194), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4294, 4353), 'gammapy.maps.MapAxis.from_nodes', 'MapAxis.from_nodes', (['[0, 1, 3, 7]'], {'name': '"""test"""', 'interp': '"""lin"""'}), "([0, 1, 3, 7], name='test', interp='lin')\n", (4312, 4353), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((4390, 4438), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_up.nbin', '(2 * axis.nbin - 1)'], {}), '(axis_up.nbin, 2 * axis.nbin - 1)\n', (4405, 4438), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4443, 4493), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_up.center[0]', 'axis.center[0]'], {}), '(axis_up.center[0], axis.center[0])\n', (4458, 4493), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4498, 4550), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_up.center[-1]', 'axis.center[-1]'], {}), '(axis_up.center[-1], axis.center[-1])\n', (4513, 4550), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4646, 4709), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['[0, 1, 3, 7, 13]'], {'name': '"""test"""', 'interp': '"""lin"""'}), "([0, 1, 3, 7, 13], name='test', interp='lin')\n", (4664, 4709), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((4750, 4798), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_down.nbin', '(0.5 * axis.nbin)'], {}), '(axis_down.nbin, 0.5 * axis.nbin)\n', (4765, 4798), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4803, 4853), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_down.edges[0]', 'axis.edges[0]'], {}), '(axis_down.edges[0], axis.edges[0])\n', (4818, 4853), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4858, 4910), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_down.edges[-1]', 'axis.edges[-1]'], {}), '(axis_down.edges[-1], axis.edges[-1])\n', (4873, 4910), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5014, 5076), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['[0, 1, 3, 7, 9]'], {'name': '"""test"""', 'interp': '"""lin"""'}), "([0, 1, 3, 7, 9], name='test', interp='lin')\n", (5032, 5076), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((5117, 5165), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_down.nbin', '(0.5 * axis.nbin)'], {}), '(axis_down.nbin, 0.5 * axis.nbin)\n', (5132, 5165), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5170, 5220), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_down.edges[0]', 'axis.edges[0]'], {}), '(axis_down.edges[0], axis.edges[0])\n', (5185, 5220), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5225, 5277), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_down.edges[-1]', 'axis.edges[-1]'], {}), '(axis_down.edges[-1], axis.edges[-1])\n', (5240, 5277), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5437, 5500), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['[0, 1, 3, 7, 13]'], {'name': '"""test"""', 'interp': '"""lin"""'}), "([0, 1, 3, 7, 13], name='test', interp='lin')\n", (5455, 5500), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((5561, 5604), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis.edges', 'axis_new.edges'], {}), '(axis.edges, axis_new.edges)\n', (5576, 5604), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5751, 5795), 'numpy.testing.assert_allclose', 'assert_allclose', (["groups['group_idx']", '[0, 1]'], {}), "(groups['group_idx'], [0, 1])\n", (5766, 5795), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5800, 5842), 'numpy.testing.assert_allclose', 'assert_allclose', (["groups['idx_min']", '[0, 1]'], {}), "(groups['idx_min'], [0, 1])\n", (5815, 5842), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5847, 5889), 'numpy.testing.assert_allclose', 'assert_allclose', (["groups['idx_max']", '[0, 8]'], {}), "(groups['idx_max'], [0, 8])\n", (5862, 5889), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5894, 5939), 'numpy.testing.assert_allclose', 'assert_allclose', (["groups['energy_min']", '[1, 2]'], {}), "(groups['energy_min'], [1, 2])\n", (5909, 5939), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5944, 5990), 'numpy.testing.assert_allclose', 'assert_allclose', (["groups['energy_max']", '[2, 10]'], {}), "(groups['energy_max'], [2, 10])\n", (5959, 5990), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6051, 6095), 'numpy.testing.assert_equal', 'assert_equal', (['bin_type', "['normal', 'normal']"], {}), "(bin_type, ['normal', 'normal'])\n", (6063, 6095), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6348, 6398), 'numpy.testing.assert_allclose', 'assert_allclose', (["groups['group_idx']", '[0, 1, 2, 3]'], {}), "(groups['group_idx'], [0, 1, 2, 3])\n", (6363, 6398), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6403, 6451), 'numpy.testing.assert_allclose', 'assert_allclose', (["groups['idx_min']", '[0, 1, 4, 6]'], {}), "(groups['idx_min'], [0, 1, 4, 6])\n", (6418, 6451), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6456, 6504), 'numpy.testing.assert_allclose', 'assert_allclose', (["groups['idx_max']", '[0, 3, 5, 8]'], {}), "(groups['idx_max'], [0, 3, 5, 8])\n", (6471, 6504), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6728, 6797), 'numpy.testing.assert_equal', 'assert_equal', (['bin_type', "['underflow', 'normal', 'normal', 'overflow']"], {}), "(bin_type, ['underflow', 'normal', 'normal', 'overflow'])\n", (6740, 6797), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6955, 6999), 'numpy.testing.assert_allclose', 'assert_allclose', (["groups['group_idx']", '[0, 1]'], {}), "(groups['group_idx'], [0, 1])\n", (6970, 6999), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7004, 7046), 'numpy.testing.assert_allclose', 'assert_allclose', (["groups['idx_min']", '[0, 3]'], {}), "(groups['idx_min'], [0, 3])\n", (7019, 7046), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7051, 7093), 'numpy.testing.assert_allclose', 'assert_allclose', (["groups['idx_max']", '[2, 8]'], {}), "(groups['idx_max'], [2, 8])\n", (7066, 7093), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7098, 7143), 'numpy.testing.assert_allclose', 'assert_allclose', (["groups['energy_min']", '[1, 4]'], {}), "(groups['energy_min'], [1, 4])\n", (7113, 7143), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7148, 7194), 'numpy.testing.assert_allclose', 'assert_allclose', (["groups['energy_max']", '[4, 10]'], {}), "(groups['energy_max'], [4, 10])\n", (7163, 7194), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7255, 7301), 'numpy.testing.assert_equal', 'assert_equal', (['bin_type', "['normal', 'overflow']"], {}), "(bin_type, ['normal', 'overflow'])\n", (7267, 7301), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7457, 7504), 'numpy.testing.assert_allclose', 'assert_allclose', (["groups['group_idx']", '[0, 1, 2]'], {}), "(groups['group_idx'], [0, 1, 2])\n", (7472, 7504), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7509, 7554), 'numpy.testing.assert_allclose', 'assert_allclose', (["groups['idx_min']", '[0, 4, 6]'], {}), "(groups['idx_min'], [0, 4, 6])\n", (7524, 7554), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7559, 7604), 'numpy.testing.assert_allclose', 'assert_allclose', (["groups['idx_max']", '[3, 5, 8]'], {}), "(groups['idx_max'], [3, 5, 8])\n", (7574, 7604), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7609, 7657), 'numpy.testing.assert_allclose', 'assert_allclose', (["groups['energy_min']", '[1, 5, 7]'], {}), "(groups['energy_min'], [1, 5, 7])\n", (7624, 7657), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7662, 7711), 'numpy.testing.assert_allclose', 'assert_allclose', (["groups['energy_max']", '[5, 7, 10]'], {}), "(groups['energy_max'], [5, 7, 10])\n", (7677, 7711), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7772, 7829), 'numpy.testing.assert_equal', 'assert_equal', (['bin_type', "['underflow', 'normal', 'normal']"], {}), "(bin_type, ['underflow', 'normal', 'normal'])\n", (7784, 7829), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((8159, 8210), 'gammapy.maps.MapAxis', 'MapAxis', (['[1, 2, 3]'], {'interp': '"""lin"""', 'node_type': '"""edges"""'}), "([1, 2, 3], interp='lin', node_type='edges')\n", (8166, 8210), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((8221, 8274), 'gammapy.maps.MapAxis', 'MapAxis', (['[1.5, 2.5]'], {'interp': '"""log"""', 'node_type': '"""center"""'}), "([1.5, 2.5], interp='log', node_type='center')\n", (8228, 8274), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((8348, 8401), 'gammapy.maps.MapAxis.from_energy_bounds', 'MapAxis.from_energy_bounds', (['"""1 TeV"""', '"""10 TeV"""'], {'nbin': '(1)'}), "('1 TeV', '10 TeV', nbin=1)\n", (8374, 8401), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((8447, 8498), 'numpy.testing.assert_allclose', 'assert_allclose', (['padded.edges', '([1, 10, 100] * u.TeV)'], {}), '(padded.edges, [1, 10, 100] * u.TeV)\n', (8462, 8498), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((8544, 8595), 'numpy.testing.assert_allclose', 'assert_allclose', (['padded.edges', '([0.1, 1, 10] * u.TeV)'], {}), '(padded.edges, [0.1, 1, 10] * u.TeV)\n', (8559, 8595), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((8636, 8692), 'numpy.testing.assert_allclose', 'assert_allclose', (['padded.edges', '([0.1, 1, 10, 100] * u.TeV)'], {}), '(padded.edges, [0.1, 1, 10, 100] * u.TeV)\n', (8651, 8692), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((8733, 8786), 'gammapy.maps.MapAxis.from_energy_bounds', 'MapAxis.from_energy_bounds', (['"""1 TeV"""', '"""10 TeV"""'], {'nbin': '(1)'}), "('1 TeV', '10 TeV', nbin=1)\n", (8759, 8786), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((8800, 8857), 'gammapy.maps.MapAxis.from_bounds', 'MapAxis.from_bounds', (['(0)', '(1)'], {'nbin': '(2)', 'unit': '"""deg"""', 'name': '"""rad"""'}), "(0, 1, nbin=2, unit='deg', name='rad')\n", (8819, 8857), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((8870, 8895), 'gammapy.maps.MapAxes', 'MapAxes', (['[axis_1, axis_2]'], {}), '([axis_1, axis_2])\n', (8877, 8895), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((8955, 9019), 'numpy.testing.assert_allclose', 'assert_allclose', (["axes['energy'].edges", '([0.1, 1, 10, 100] * u.TeV)'], {}), "(axes['energy'].edges, [0.1, 1, 10, 100] * u.TeV)\n", (8970, 9019), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9145, 9174), 'gammapy.maps.MapAxis', 'MapAxis', (['edges'], {'interp': 'interp'}), '(edges, interp=interp)\n', (9152, 9174), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((9179, 9213), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis.edges', 'edges'], {}), '(axis.edges, edges)\n', (9194, 9213), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9531, 9571), 'gammapy.maps.MapAxis.from_nodes', 'MapAxis.from_nodes', (['nodes'], {'interp': 'interp'}), '(nodes, interp=interp)\n', (9549, 9571), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((9576, 9611), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis.center', 'nodes'], {}), '(axis.center, nodes)\n', (9591, 9611), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9925, 9983), 'gammapy.maps.MapAxis.from_bounds', 'MapAxis.from_bounds', (['nodes[0]', 'nodes[-1]', '(3)'], {'interp': 'interp'}), '(nodes[0], nodes[-1], 3, interp=interp)\n', (9944, 9983), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((9988, 10028), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis.edges[0]', 'nodes[0]'], {}), '(axis.edges[0], nodes[0])\n', (10003, 10028), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((10033, 10075), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis.edges[-1]', 'nodes[-1]'], {}), '(axis.edges[-1], nodes[-1])\n', (10048, 10075), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((10080, 10109), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis.nbin', '(3)'], {}), '(axis.nbin, 3)\n', (10095, 10109), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((10570, 10620), 'gammapy.maps.MapAxis', 'MapAxis', (['nodes'], {'interp': 'interp', 'node_type': 'node_type'}), '(nodes, interp=interp, node_type=node_type)\n', (10577, 10620), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((10968, 11018), 'gammapy.maps.MapAxis', 'MapAxis', (['nodes'], {'interp': 'interp', 'node_type': 'node_type'}), '(nodes, interp=interp, node_type=node_type)\n', (10975, 11018), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((11247, 11297), 'gammapy.maps.MapAxis', 'MapAxis', (['nodes'], {'interp': 'interp', 'node_type': 'node_type'}), '(nodes, interp=interp, node_type=node_type)\n', (11254, 11297), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((11338, 11368), 'numpy.testing.assert_allclose', 'assert_allclose', (['saxis.nbin', '(2)'], {}), '(saxis.nbin, 2)\n', (11353, 11368), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11441, 11491), 'gammapy.maps.MapAxis', 'MapAxis', (['nodes'], {'interp': 'interp', 'node_type': 'node_type'}), '(nodes, interp=interp, node_type=node_type)\n', (11448, 11491), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((11535, 11577), 'numpy.testing.assert_allclose', 'assert_allclose', (['saxis.nbin', '(axis.nbin - 1)'], {}), '(saxis.nbin, axis.nbin - 1)\n', (11550, 11577), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11653, 11703), 'gammapy.maps.MapAxis', 'MapAxis', (['nodes'], {'interp': 'interp', 'node_type': 'node_type'}), '(nodes, interp=interp, node_type=node_type)\n', (11660, 11703), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((11747, 11777), 'numpy.testing.assert_allclose', 'assert_allclose', (['saxis.nbin', '(2)'], {}), '(saxis.nbin, 2)\n', (11762, 11777), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11853, 11903), 'gammapy.maps.MapAxis', 'MapAxis', (['nodes'], {'interp': 'interp', 'node_type': 'node_type'}), '(nodes, interp=interp, node_type=node_type)\n', (11860, 11903), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((11948, 11990), 'numpy.testing.assert_allclose', 'assert_allclose', (['saxis.nbin', '(axis.nbin - 1)'], {}), '(saxis.nbin, axis.nbin - 1)\n', (11963, 11990), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((12102, 12158), 'gammapy.maps.MapAxis.from_nodes', 'MapAxis.from_nodes', (['[0, 1, 2]'], {'unit': '"""deg"""', 'name': '"""offset"""'}), "([0, 1, 2], unit='deg', name='offset')\n", (12120, 12158), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((12237, 12286), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis.center', 'axis.as_plot_center'], {}), '(axis.center, axis.as_plot_center)\n', (12252, 12286), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((12291, 12338), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis.edges', 'axis.as_plot_edges'], {}), '(axis.edges, axis.as_plot_edges)\n', (12306, 12338), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((12388, 12478), 'gammapy.maps.TimeMapAxis', 'TimeMapAxis', (["time_intervals['t_min']", "time_intervals['t_max']", "time_intervals['t_ref']"], {}), "(time_intervals['t_min'], time_intervals['t_max'],\n time_intervals['t_ref'])\n", (12399, 12478), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((12680, 12737), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis.time_mid[0].mjd', '(58927.020833333336)'], {}), '(axis.time_mid[0].mjd, 58927.020833333336)\n', (12695, 12737), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((12983, 13016), 'numpy.testing.assert_allclose', 'assert_allclose', (['ax_cont.nbin', '(39)'], {}), '(ax_cont.nbin, 39)\n', (12998, 13016), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((13081, 13204), 'gammapy.maps.TimeMapAxis', 'TimeMapAxis', ([], {'edges_min': "time_interval['t_min']", 'edges_max': "time_interval['t_max']", 'reference_time': "time_interval['t_ref']"}), "(edges_min=time_interval['t_min'], edges_max=time_interval[\n 't_max'], reference_time=time_interval['t_ref'])\n", (13092, 13204), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((13430, 13474), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis.time_mid[0].mjd', '(58933)'], {}), '(axis.time_mid[0].mjd, 58933)\n', (13445, 13474), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((13550, 13580), 'numpy.testing.assert_allclose', 'assert_allclose', (['pix_min', '(-0.5)'], {}), '(pix_min, -0.5)\n', (13565, 13580), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((13656, 13685), 'numpy.testing.assert_allclose', 'assert_allclose', (['pix_max', '(0.5)'], {}), '(pix_max, 0.5)\n', (13671, 13685), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((13691, 13733), 'numpy.testing.assert_allclose', 'assert_allclose', (['pix', '[0.15, 0.35, np.nan]'], {}), '(pix, [0.15, 0.35, np.nan])\n', (13706, 13733), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((13796, 13886), 'gammapy.maps.TimeMapAxis', 'TimeMapAxis', (["time_intervals['t_min']", "time_intervals['t_max']", "time_intervals['t_ref']"], {}), "(time_intervals['t_min'], time_intervals['t_max'],\n time_intervals['t_ref'])\n", (13807, 13886), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((14008, 14059), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_squash.time_min[0].mjd', '(58927)'], {}), '(axis_squash.time_min[0].mjd, 58927)\n', (14023, 14059), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((14324, 14342), 'astropy.time.Time', 'Time', (['"""2020-03-19"""'], {}), "('2020-03-19')\n", (14328, 14342), False, 'from astropy.time import Time\n'), ((14429, 14470), 'gammapy.maps.TimeMapAxis.from_time_edges', 'TimeMapAxis.from_time_edges', (['t_min', 't_max'], {}), '(t_min, t_max)\n', (14456, 14470), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((14484, 14535), 'gammapy.maps.TimeMapAxis.from_time_edges', 'TimeMapAxis.from_time_edges', (['t_min', 't_max'], {'unit': '"""h"""'}), "(t_min, t_max, unit='h')\n", (14511, 14535), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((14599, 14644), 'gammapy.utils.testing.assert_time_allclose', 'assert_time_allclose', (['axis.reference_time', 't0'], {}), '(axis.reference_time, t0)\n', (14619, 14644), False, 'from gammapy.utils.testing import assert_time_allclose, requires_data, requires_dependency, mpl_plot_check\n'), ((14706, 14763), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis.time_mid[0].mjd', '(58927.020833333336)'], {}), '(axis.time_mid[0].mjd, 58927.020833333336)\n', (14721, 14763), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((14824, 14883), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis_h.time_mid[0].mjd', '(58927.020833333336)'], {}), '(axis_h.time_mid[0].mjd, 58927.020833333336)\n', (14839, 14883), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((15883, 15925), 'gammapy.maps.TimeMapAxis', 'TimeMapAxis', (['tmin', 'tmax', 'tref'], {'name': '"""time"""'}), "(tmin, tmax, tref, name='time')\n", (15894, 15925), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((15938, 15976), 'astropy.time.Time', 'Time', (['(58927.020833333336)'], {'format': '"""mjd"""'}), "(58927.020833333336, format='mjd')\n", (15942, 15976), False, 'from astropy.time import Time\n'), ((16252, 16323), 'numpy.testing.assert_allclose', 'assert_allclose', (['indices[1::2]', '[-1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19]'], {}), '(indices[1::2], [-1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19])\n', (16267, 16323), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((16328, 16361), 'numpy.testing.assert_allclose', 'assert_allclose', (['indices[::2]', '(-1)'], {}), '(indices[::2], -1)\n', (16343, 16361), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((16366, 16401), 'numpy.testing.assert_allclose', 'assert_allclose', (['pix', '(0)'], {'atol': '(1e-10)'}), '(pix, 0, atol=1e-10)\n', (16381, 16401), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((16406, 16480), 'numpy.testing.assert_allclose', 'assert_allclose', (['pixels[1::2]', '[np.nan, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19]'], {}), '(pixels[1::2], [np.nan, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19])\n', (16421, 16480), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((16536, 16626), 'gammapy.maps.TimeMapAxis', 'TimeMapAxis', (["time_intervals['t_min']", "time_intervals['t_max']", "time_intervals['t_ref']"], {}), "(time_intervals['t_min'], time_intervals['t_max'],\n time_intervals['t_ref'])\n", (16547, 16626), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((16739, 16794), 'numpy.testing.assert_allclose', 'assert_allclose', (['squashed.time_max[0].mjd', '(58937.041667)'], {}), '(squashed.time_max[0].mjd, 58937.041667)\n', (16754, 16794), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((16829, 16884), 'numpy.testing.assert_allclose', 'assert_allclose', (['squashed.time_max[0].mjd', '(58937.041667)'], {}), '(squashed.time_max[0].mjd, 58937.041667)\n', (16844, 16884), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((16942, 16973), 'astropy.time.Time', 'Time', (['"""2006-02-12"""'], {'scale': '"""utc"""'}), "('2006-02-12', scale='utc')\n", (16946, 16973), False, 'from astropy.time import Time\n'), ((17015, 17083), 'gammapy.maps.TimeMapAxis.from_time_bounds', 'TimeMapAxis.from_time_bounds', ([], {'time_min': 't_min', 'time_max': 't_max', 'nbin': '(3)'}), '(time_min=t_min, time_max=t_max, nbin=3)\n', (17043, 17083), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((17088, 17162), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis.center', '([0.083333, 0.25, 0.416667] * u.d)'], {'rtol': '(1e-05)'}), '(axis.center, [0.083333, 0.25, 0.416667] * u.d, rtol=1e-05)\n', (17103, 17162), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((17206, 17237), 'astropy.time.Time', 'Time', (['"""2006-02-12"""'], {'scale': '"""utc"""'}), "('2006-02-12', scale='utc')\n", (17210, 17237), False, 'from astropy.time import Time\n'), ((17321, 17328), 'astropy.table.Table', 'Table', ([], {}), '()\n', (17326, 17328), False, 'from astropy.table import Table\n'), ((17493, 17537), 'gammapy.maps.TimeMapAxis.from_table', 'TimeMapAxis.from_table', (['table'], {'format': '"""gadf"""'}), "(table, format='gadf')\n", (17515, 17537), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((17570, 17617), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis.time_mid[0].mjd', '(53778.25)'], {}), '(axis.time_mid[0].mjd, 53778.25)\n', (17585, 17617), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((17767, 17786), 'gammapy.utils.scripts.make_path', 'make_path', (['filename'], {}), '(filename)\n', (17776, 17786), False, 'from gammapy.utils.scripts import make_path\n'), ((17797, 17815), 'gammapy.data.GTI.read', 'GTI.read', (['filename'], {}), '(filename)\n', (17805, 17815), False, 'from gammapy.data import GTI\n'), ((17828, 17853), 'gammapy.maps.TimeMapAxis.from_gti', 'TimeMapAxis.from_gti', (['gti'], {}), '(gti)\n', (17848, 17853), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((17869, 17919), 'astropy.time.Time', 'Time', (['(53090.123451203704)'], {'format': '"""mjd"""', 'scale': '"""tt"""'}), "(53090.123451203704, format='mjd', scale='tt')\n", (17873, 17919), False, 'from astropy.time import Time\n'), ((17924, 17972), 'gammapy.utils.testing.assert_time_allclose', 'assert_time_allclose', (['axis.time_min[0]', 'expected'], {}), '(axis.time_min[0], expected)\n', (17944, 17972), False, 'from gammapy.utils.testing import assert_time_allclose, requires_data, requires_dependency, mpl_plot_check\n'), ((18062, 18152), 'gammapy.maps.TimeMapAxis', 'TimeMapAxis', (["time_intervals['t_min']", "time_intervals['t_max']", "time_intervals['t_ref']"], {}), "(time_intervals['t_min'], time_intervals['t_max'],\n time_intervals['t_ref'])\n", (18073, 18152), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((18181, 18231), 'gammapy.maps.MapAxis.from_energy_bounds', 'MapAxis.from_energy_bounds', (['(0.1)', '(10)', '(2)'], {'unit': '"""TeV"""'}), "(0.1, 10, 2, unit='TeV')\n", (18207, 18231), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((18249, 18334), 'gammapy.maps.RegionNDMap.create', 'RegionNDMap.create', ([], {'region': '"""fk5; circle(0,0,0.1)"""', 'axes': '[energy_axis, time_axis]'}), "(region='fk5; circle(0,0,0.1)', axes=[energy_axis, time_axis]\n )\n", (18267, 18334), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((18452, 18489), 'astropy.time.Time', 'Time', (['"""1999-01-01T00:00:00.123456789"""'], {}), "('1999-01-01T00:00:00.123456789')\n", (18456, 18489), False, 'from astropy.time import Time\n'), ((18507, 18607), 'gammapy.maps.TimeMapAxis', 'TimeMapAxis', ([], {'edges_min': '([0, 1, 3] * u.d)', 'edges_max': '([0.8, 1.9, 5.4] * u.d)', 'reference_time': 'time_ref'}), '(edges_min=[0, 1, 3] * u.d, edges_max=[0.8, 1.9, 5.4] * u.d,\n reference_time=time_ref)\n', (18518, 18607), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((18953, 18994), 'gammapy.maps.MapAxis.from_energy_edges', 'MapAxis.from_energy_edges', (['([1, 3] * u.TeV)'], {}), '([1, 3] * u.TeV)\n', (18978, 18994), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((19011, 19048), 'astropy.time.Time', 'Time', (['"""1999-01-01T00:00:00.123456789"""'], {}), "('1999-01-01T00:00:00.123456789')\n", (19015, 19048), False, 'from astropy.time import Time\n'), ((19066, 19166), 'gammapy.maps.TimeMapAxis', 'TimeMapAxis', ([], {'edges_min': '([0, 1, 3] * u.d)', 'edges_max': '([0.8, 1.9, 5.4] * u.d)', 'reference_time': 'time_ref'}), '(edges_min=[0, 1, 3] * u.d, edges_max=[0.8, 1.9, 5.4] * u.d,\n reference_time=time_ref)\n', (19077, 19166), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((19206, 19239), 'gammapy.maps.MapAxes', 'MapAxes', (['[energy_axis, time_axis]'], {}), '([energy_axis, time_axis])\n', (19213, 19239), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((19418, 19457), 'gammapy.maps.MapAxis.from_bounds', 'MapAxis.from_bounds', (['(1)', '(4)', '(3)'], {'name': '"""a1"""'}), "(1, 4, 3, name='a1')\n", (19437, 19457), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((19537, 19567), 'gammapy.maps.MapAxes', 'MapAxes', (['[axis1, axis2, axis3]'], {}), '([axis1, axis2, axis3])\n', (19544, 19567), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((19929, 19991), 'gammapy.maps.LabelMapAxis', 'LabelMapAxis', ([], {'labels': "['label-1', 'label-2']", 'name': '"""label-axis"""'}), "(labels=['label-1', 'label-2'], name='label-axis')\n", (19941, 19991), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((20257, 20291), 'numpy.testing.assert_allclose', 'assert_allclose', (['axis.bin_width', '(1)'], {}), '(axis.bin_width, 1)\n', (20272, 20291), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((20510, 20583), 'gammapy.maps.LabelMapAxis', 'LabelMapAxis', ([], {'labels': "['label-1', 'label-2', 'label-3']", 'name': '"""label-axis"""'}), "(labels=['label-1', 'label-2', 'label-3'], name='label-axis')\n", (20522, 20583), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((20654, 20677), 'numpy.testing.assert_allclose', 'assert_allclose', (['idx', '(0)'], {}), '(idx, 0)\n', (20669, 20677), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((20761, 20789), 'numpy.testing.assert_allclose', 'assert_allclose', (['idx', '[0, 2]'], {}), '(idx, [0, 2])\n', (20776, 20789), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((20877, 20909), 'numpy.testing.assert_allclose', 'assert_allclose', (['idx', '[[0], [1]]'], {}), '(idx, [[0], [1]])\n', (20892, 20909), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((21079, 21147), 'gammapy.maps.LabelMapAxis', 'LabelMapAxis', ([], {'labels': "['label-1', 'label-2', 'label-3']", 'name': '"""label"""'}), "(labels=['label-1', 'label-2', 'label-3'], name='label')\n", (21091, 21147), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((21316, 21369), 'gammapy.maps.MapAxis.from_energy_bounds', 'MapAxis.from_energy_bounds', (['"""1 TeV"""', '"""10 TeV"""'], {'nbin': '(4)'}), "('1 TeV', '10 TeV', nbin=4)\n", (21342, 21369), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((21382, 21432), 'gammapy.maps.MapAxes', 'MapAxes', ([], {'axes': '[energy_axis, time_axis, label_axis]'}), '(axes=[energy_axis, time_axis, label_axis])\n', (21389, 21432), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((21881, 21896), 'astropy.table.Table.read', 'Table.read', (['hdu'], {}), '(hdu)\n', (21891, 21896), False, 'from astropy.table import Table\n'), ((22099, 22198), 'gammapy.maps.MapAxis.from_energy_bounds', 'MapAxis.from_energy_bounds', (['"""0.03 TeV"""', '"""300 TeV"""'], {'nbin': '(20)', 'per_decade': '(True)', 'name': '"""energy_true"""'}), "('0.03 TeV', '300 TeV', nbin=20, per_decade=True,\n name='energy_true')\n", (22125, 22198), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((22616, 22719), 'gammapy.maps.TimeMapAxis', 'TimeMapAxis', (["time_intervals['t_min']", "time_intervals['t_max']", "time_intervals['t_ref']"], {'name': '"""time"""'}), "(time_intervals['t_min'], time_intervals['t_max'],\n time_intervals['t_ref'], name='time')\n", (22627, 22719), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((628, 660), 'numpy.array', 'np.array', (['[0.25, 0.75, 1.0, 2.0]'], {}), '([0.25, 0.75, 1.0, 2.0])\n', (636, 660), True, 'import numpy as np\n'), ((675, 707), 'numpy.array', 'np.array', (['[0.25, 0.75, 1.0, 2.0]'], {}), '([0.25, 0.75, 1.0, 2.0])\n', (683, 707), True, 'import numpy as np\n'), ((722, 754), 'numpy.array', 'np.array', (['[0.25, 0.75, 1.0, 2.0]'], {}), '([0.25, 0.75, 1.0, 2.0])\n', (730, 754), True, 'import numpy as np\n'), ((1700, 1722), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(20)'], {}), '(0, 10, 20)\n', (1711, 1722), True, 'import numpy as np\n'), ((2049, 2065), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (2058, 2065), True, 'import numpy as np\n'), ((7935, 7960), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7948, 7960), False, 'import pytest\n'), ((8055, 8080), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8068, 8080), False, 'import pytest\n'), ((8094, 8117), 'gammapy.maps.MapAxis.from_nodes', 'MapAxis.from_nodes', (['[1]'], {}), '([1])\n', (8112, 8117), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((9270, 9295), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9283, 9295), False, 'import pytest\n'), ((9305, 9328), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['[1]'], {}), '([1])\n', (9323, 9328), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((9337, 9369), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['[0, 1, 1, 2]'], {}), '([0, 1, 1, 2])\n', (9355, 9369), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((9378, 9410), 'gammapy.maps.MapAxis.from_edges', 'MapAxis.from_edges', (['[0, 1, 3, 2]'], {}), '([0, 1, 3, 2])\n', (9396, 9410), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((9664, 9689), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9677, 9689), False, 'import pytest\n'), ((9699, 9721), 'gammapy.maps.MapAxis.from_nodes', 'MapAxis.from_nodes', (['[]'], {}), '([])\n', (9717, 9721), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((9730, 9762), 'gammapy.maps.MapAxis.from_nodes', 'MapAxis.from_nodes', (['[0, 1, 1, 2]'], {}), '([0, 1, 1, 2])\n', (9748, 9762), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((9771, 9803), 'gammapy.maps.MapAxis.from_nodes', 'MapAxis.from_nodes', (['[0, 1, 3, 2]'], {}), '([0, 1, 3, 2])\n', (9789, 9803), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((10119, 10144), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10132, 10144), False, 'import pytest\n'), ((10154, 10182), 'gammapy.maps.MapAxis.from_bounds', 'MapAxis.from_bounds', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (10173, 10182), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((10233, 10258), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10246, 10258), False, 'import pytest\n'), ((10272, 10322), 'gammapy.maps.MapAxis.from_energy_bounds', 'MapAxis.from_energy_bounds', (['(0.1)', '(10)', '(2)'], {'unit': '"""deg"""'}), "(0.1, 10, 2, unit='deg')\n", (10298, 10322), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((10333, 10358), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10346, 10358), False, 'import pytest\n'), ((10372, 10419), 'gammapy.maps.MapAxis.from_energy_edges', 'MapAxis.from_energy_edges', (['([0.1, 1, 10] * u.deg)'], {}), '([0.1, 1, 10] * u.deg)\n', (10397, 10419), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((11039, 11070), 'numpy.arange', 'np.arange', (['axis.nbin'], {'dtype': 'int'}), '(axis.nbin, dtype=int)\n', (11048, 11070), True, 'import numpy as np\n'), ((12819, 12844), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12832, 12844), False, 'import pytest\n'), ((13244, 13269), 'astropy.time.Time', 'Time', (['(58933)'], {'format': '"""mjd"""'}), "(58933, format='mjd')\n", (13248, 13269), False, 'from astropy.time import Time\n'), ((13272, 13308), 'astropy.units.Quantity', 'u.Quantity', (['[1.5, 3.5, 10]'], {'unit': '"""d"""'}), "([1.5, 3.5, 10], unit='d')\n", (13282, 13308), True, 'import astropy.units as u\n'), ((14955, 14977), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(20)'], {}), '(0, 10, 20)\n', (14966, 14977), True, 'import numpy as np\n'), ((14995, 15017), 'numpy.linspace', 'np.linspace', (['(1)', '(11)', '(20)'], {}), '(1, 11, 20)\n', (15006, 15017), True, 'import numpy as np\n'), ((15065, 15090), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (15078, 15090), False, 'import pytest\n'), ((15100, 15164), 'gammapy.maps.TimeMapAxis', 'TimeMapAxis', (['tmin', 'tmax'], {'reference_time': '(51000 * u.d)', 'name': '"""time"""'}), "(tmin, tmax, reference_time=51000 * u.d, name='time')\n", (15111, 15164), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((15208, 15233), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (15221, 15233), False, 'import pytest\n'), ((15536, 15561), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (15549, 15561), False, 'import pytest\n'), ((15571, 15621), 'gammapy.maps.TimeMapAxis', 'TimeMapAxis', (['tmin', 'tmax_reverse', 'tref'], {'name': '"""time"""'}), "(tmin, tmax_reverse, tref, name='time')\n", (15582, 15621), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((15632, 15657), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (15645, 15657), False, 'import pytest\n'), ((15667, 15715), 'gammapy.maps.TimeMapAxis', 'TimeMapAxis', (['tmin', 'tmax_short', 'tref'], {'name': '"""time"""'}), "(tmin, tmax_short, tref, name='time')\n", (15678, 15715), False, 'from gammapy.maps import LabelMapAxis, MapAxes, MapAxis, RegionNDMap, TimeMapAxis\n'), ((17250, 17272), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(10)'], {}), '(0, 10, 10)\n', (17261, 17272), True, 'import numpy as np\n'), ((17411, 17431), 'gammapy.utils.time.time_ref_to_dict', 'time_ref_to_dict', (['t0'], {}), '(t0)\n', (17427, 17431), False, 'from gammapy.utils.time import time_ref_to_dict\n'), ((20128, 20153), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (20141, 20153), False, 'import pytest\n'), ((20340, 20365), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (20353, 20365), False, 'import pytest\n'), ((20920, 20945), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (20933, 20945), False, 'import pytest\n'), ((21933, 21948), 'numpy.dtype', 'np.dtype', (['"""<U7"""'], {}), "('<U7')\n", (21941, 21948), True, 'import numpy as np\n'), ((22230, 22246), 'gammapy.utils.testing.mpl_plot_check', 'mpl_plot_check', ([], {}), '()\n', (22244, 22246), False, 'from gammapy.utils.testing import assert_time_allclose, requires_data, requires_dependency, mpl_plot_check\n'), ((22261, 22270), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (22268, 22270), True, 'import matplotlib.pyplot as plt\n'), ((22740, 22756), 'gammapy.utils.testing.mpl_plot_check', 'mpl_plot_check', ([], {}), '()\n', (22754, 22756), False, 'from gammapy.utils.testing import assert_time_allclose, requires_data, requires_dependency, mpl_plot_check\n'), ((22771, 22780), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (22778, 22780), True, 'import matplotlib.pyplot as plt\n'), ((10672, 10705), 'numpy.arange', 'np.arange', (['axis.nbin'], {'dtype': 'float'}), '(axis.nbin, dtype=float)\n', (10681, 10705), True, 'import numpy as np\n'), ((10737, 10774), 'numpy.arange', 'np.arange', (['(axis.nbin + 1)'], {'dtype': 'float'}), '(axis.nbin + 1, dtype=float)\n', (10746, 10774), True, 'import numpy as np\n'), ((14360, 14382), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(20)'], {}), '(0, 10, 20)\n', (14371, 14382), True, 'import numpy as np\n'), ((21271, 21289), 'astropy.time.Time', 'Time', (['"""2020-03-19"""'], {}), "('2020-03-19')\n", (21275, 21289), False, 'from astropy.time import Time\n'), ((22284, 22302), 'astropy.visualization.quantity_support', 'quantity_support', ([], {}), '()\n', (22300, 22302), False, 'from astropy.visualization import quantity_support\n'), ((22794, 22812), 'astropy.visualization.quantity_support', 'quantity_support', ([], {}), '()\n', (22810, 22812), False, 'from astropy.visualization import quantity_support\n'), ((15282, 15292), 'astropy.time.Time.now', 'Time.now', ([], {}), '()\n', (15290, 15292), False, 'from astropy.time import Time\n'), ((21669, 21681), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (21678, 21681), True, 'import numpy as np\n'), ((21730, 21742), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (21739, 21742), True, 'import numpy as np\n'), ((21791, 21803), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (21800, 21803), True, 'import numpy as np\n'), ((22337, 22362), 'numpy.ones_like', 'np.ones_like', (['axis.center'], {}), '(axis.center)\n', (22349, 22362), True, 'import numpy as np\n'), ((22847, 22872), 'numpy.ones_like', 'np.ones_like', (['axis.center'], {}), '(axis.center)\n', (22859, 22872), True, 'import numpy as np\n')] |
import artm
import gc
import json
import logging
import numpy as np
import os
import pandas as pd
import sys
import tempfile
import tqdm
import warnings
from collections import defaultdict
from distutils.util import strtobool
from topicnet.cooking_machine.dataset import Dataset
from topicnet.cooking_machine.models import TopicModel
from typing import (
Callable,
Dict,
List,
Tuple,
Union
)
from topnum.data.vowpal_wabbit_text_collection import VowpalWabbitTextCollection
from topnum.scores._base_coherence_score import (
SpecificityEstimationMethod,
TextType,
WordTopicRelatednessType
)
from topnum.scores.base_score import BaseScore
from topnum.scores.base_topic_score import BaseTopicScore
from topnum.scores import (
IntratextCoherenceScore,
PerplexityScore,
SophisticatedTopTokensCoherenceScore,
SparsityPhiScore,
SparsityThetaScore
)
from topnum.scores.intratext_coherence_score import ComputationMethod
from topnum.search_methods.base_search_method import BaseSearchMethod
from topnum.search_methods.constants import (
DEFAULT_MAX_NUM_TOPICS,
DEFAULT_MIN_NUM_TOPICS,
DEFAULT_NUM_FIT_ITERATIONS
)
from topnum.search_methods.base_search_method import (
_KEY_OPTIMUM,
_STD_KEY_SUFFIX
)
from topnum.search_methods.topic_bank.bank_update_method import BankUpdateMethod
from topnum.search_methods.topic_bank.topic_bank import (
TopicBank,
TokenType
)
from topnum.search_methods.topic_bank.one_model_train_funcs import (
default_train_func,
_get_topic_model
)
from topnum.search_methods.topic_bank.phi_initialization.utils import _safe_copy_phi
_KEY_BANK_SCORES = 'bank_scores'
_KEY_BANK_TOPIC_SCORES = 'bank_topic_scores'
_KEY_MODEL_SCORES = 'model_scores'
_KEY_MODEL_TOPIC_SCORES = 'model_topic_scores'
_KEY_NUM_BANK_TOPICS = 'num_bank_topics'
_KEY_NUM_MODEL_TOPICS = 'num_model_topics'
_KEY_TOPIC_SCORE_DISTANCE_TO_NEAREST = 'distance_to_nearest'
_KEY_TOPIC_SCORE_KERNEL_SIZE = 'kernel_size'
_DEFAULT_WINDOW = 20
_logger = logging.getLogger()
class TopicBankMethod(BaseSearchMethod):
_MINIMUM_TOPIC_DISTANCE = 0.0
_MAXIMUM_TOPIC_DISTANCE = 1.0
def __init__(
self,
data: Union[Dataset, VowpalWabbitTextCollection],
main_modality: str = None,
min_df_rate: float = 0.01,
max_df_rate: float = 0.9,
main_topic_score: BaseTopicScore = None,
other_topic_scores: List[BaseTopicScore] = None,
stop_bank_score: BaseScore = None,
other_scores: List[BaseScore] = None,
documents: List[str] = None,
documents_fraction_for_topic_scores: float = 0.2,
max_num_documents_for_topic_scores: int = 100,
start_model_number: int = 0,
max_num_models: int = 100,
one_model_num_topics: Union[int, List[int]] = 100,
num_fit_iterations: int = DEFAULT_NUM_FIT_ITERATIONS,
train_funcs: Union[
Callable[[Dataset, int, int, int], TopicModel],
List[Callable[[Dataset, int, int, int], TopicModel]],
None] = None,
topic_score_threshold_percentile: int = 95,
distance_threshold: float = 0.5,
bank_update: BankUpdateMethod = BankUpdateMethod.PROVIDE_NON_LINEARITY,
child_parent_relationship_threshold: float = None,
save_file_path: str = None,
save_bank: bool = False,
save_model_topics: bool = False,
bank_folder_path: str = None,
seed: int = None,
verbose: bool = False):
super().__init__(
min_num_topics=DEFAULT_MIN_NUM_TOPICS, # not needed
max_num_topics=DEFAULT_MAX_NUM_TOPICS, # not needed
num_fit_iterations=num_fit_iterations
)
if isinstance(data, Dataset):
self._dataset = data
elif isinstance(data, VowpalWabbitTextCollection):
self._dataset = data._to_dataset()
else:
raise TypeError(f'data: "{data}", its type: "{type(data)}"')
_logger.info(
f'Filtering dictionary with params:'
f' min_df_rate={min_df_rate} and max_df_rate={max_df_rate}'
)
self._dictionary = self._dataset.get_dictionary()
self._dictionary.filter(min_df_rate=min_df_rate, max_df_rate=max_df_rate)
self._dataset._cached_dict = self._dictionary
self._main_modality = main_modality
if main_topic_score is not None:
self._main_topic_score = main_topic_score
else:
self._main_topic_score = IntratextCoherenceScore(
name='intratext_coherence_score',
data=self._dataset,
text_type=TextType.VW_TEXT,
computation_method=ComputationMethod.SEGMENT_WEIGHT,
word_topic_relatedness=WordTopicRelatednessType.PWT,
specificity_estimation=SpecificityEstimationMethod.NONE,
max_num_out_of_topic_words=5,
window=_DEFAULT_WINDOW
)
if other_topic_scores is not None:
self._other_topic_scores = other_topic_scores
else:
self._other_topic_scores = [
SophisticatedTopTokensCoherenceScore(
name='top_tokens_coherence_score',
data=self._dataset,
text_type=TextType.VW_TEXT,
word_topic_relatedness=WordTopicRelatednessType.PWT,
specificity_estimation=SpecificityEstimationMethod.NONE,
num_top_words=10,
window=_DEFAULT_WINDOW
)
]
self._all_topic_scores = [self._main_topic_score] + self._other_topic_scores
if stop_bank_score is not None:
self._stop_bank_score = stop_bank_score
else:
self._stop_bank_score = PerplexityScore(name='perplexity_score')
if other_scores is not None:
self._other_scores = other_scores
else:
self._other_scores = [
SparsityPhiScore(name='sparsity_phi_score'),
SparsityThetaScore(name='sparsity_theta_score')
]
self._all_model_scores = [self._stop_bank_score] + self._other_scores
self._documents = documents
self._documents_fraction_for_topic_scores = documents_fraction_for_topic_scores
self._max_num_documents_for_topic_scores = max_num_documents_for_topic_scores
self._start_model_number = start_model_number
self._max_num_models = max_num_models
if not isinstance(one_model_num_topics, list):
one_model_num_topics = [
one_model_num_topics for _ in range(self._max_num_models)
]
if train_funcs is None:
train_funcs = default_train_func
if not isinstance(train_funcs, list):
train_funcs = [
train_funcs for _ in range(self._max_num_models)
]
self._one_model_num_topics: List[int] = one_model_num_topics
self._train_func: List[Callable[[Dataset, int, int, int], TopicModel]] = train_funcs
if topic_score_threshold_percentile < 1:
warnings.warn(
f'topic_score_threshold_percentile {topic_score_threshold_percentile}'
f' is less than one! It is expected to be in [0, 100].'
f' Are you sure you want to proceed (yes/no)?'
)
answer = input()
if strtobool(answer) is False:
warnings.warn('Exiting')
exit(0)
self._topic_score_threshold_percentile = topic_score_threshold_percentile
if distance_threshold > 1 or distance_threshold < 0:
raise ValueError(f'distance_threshold should be in [0, 1], not {distance_threshold}')
self._distance_threshold = distance_threshold
self._bank_update = bank_update
self._child_parent_relationship_threshold = child_parent_relationship_threshold
need_to_load_results = False
if save_file_path is None:
file_descriptor, save_file_path = tempfile.mkstemp(prefix='topic_bank_result__')
os.close(file_descriptor)
elif not os.path.isdir(os.path.dirname(save_file_path)):
raise NotADirectoryError(f'Directory not found "{save_file_path}"')
elif os.path.isfile(save_file_path):
need_to_load_results = True
else:
pass
self._save_file_path = save_file_path
self._save_bank = save_bank
self._save_model_topics = save_model_topics
self._bank_folder_path = bank_folder_path
self._verbose = verbose
self._random = np.random.RandomState(seed=seed)
self._result = dict()
if need_to_load_results:
warnings.warn(f'File "{save_file_path}" already exists. Loading')
self._load()
else:
self._result[_KEY_OPTIMUM] = None
self._result[_KEY_OPTIMUM + _STD_KEY_SUFFIX] = None
self._result[_KEY_BANK_SCORES] = list()
self._result[_KEY_BANK_TOPIC_SCORES] = list()
self._result[_KEY_MODEL_SCORES] = list()
self._result[_KEY_MODEL_TOPIC_SCORES] = list()
self._result[_KEY_NUM_BANK_TOPICS] = list()
self._result[_KEY_NUM_MODEL_TOPICS] = list()
self._topic_bank = TopicBank(
save=self._save_bank,
save_folder_path=self._bank_folder_path
)
@property
def save_path(self) -> str:
return self._save_file_path
def save(self) -> None:
with open(self._save_file_path, 'w') as f:
f.write(json.dumps(self._result))
def _load(self) -> None:
with open(self._save_file_path, 'rb') as f:
self._result = json.loads(f.read())
def clear(self) -> None:
if os.path.isfile(self._save_file_path):
os.remove(self._save_file_path)
# Seems the Topic Bank itself should stay untouched
def search_for_optimum(self, text_collection: VowpalWabbitTextCollection = None) -> None:
"""
Parameters
----------
text_collection:
Not needed, kept only for compatibility with the base search method
"""
# TODO: simplify
word2index = None
documents_for_coherence = self._select_documents_for_topic_scores()
if not self._verbose:
model_number_range = range(self._start_model_number, self._max_num_models)
else:
model_number_range = tqdm.tqdm(
range(self._start_model_number, self._max_num_models),
total=max(0, self._max_num_models - self._start_model_number),
file=sys.stdout,
)
for model_number in model_number_range:
# TODO: stop when perplexity stabilizes
_logger.info(f'Building topic model number {model_number}...')
topic_model = self._train_func[model_number](
dataset=self._dataset,
model_number=model_number,
num_topics=self._one_model_num_topics[model_number],
num_fit_iterations=self._num_fit_iterations,
scores=self._all_model_scores
)
scores = dict()
_logger.info('Computing scores for one topic model...')
scores.update(self._get_default_scores(topic_model))
raw_topic_scores = self._compute_raw_topic_scores(
topic_model,
documents_for_coherence
)
for score_name, score_values in raw_topic_scores.items():
scores[score_name] = self._aggregate_scores_for_models(
raw_topic_scores[score_name], 50
)
self._result[_KEY_MODEL_SCORES].append(scores)
self._result[_KEY_NUM_MODEL_TOPICS].append(topic_model.get_phi().shape[1])
self.save()
threshold = self._aggregate_scores_for_models(
raw_topic_scores[self._main_topic_score.name],
self._topic_score_threshold_percentile
)
_logger.info('Finding new topics...')
phi = topic_model.get_phi()
if self._main_modality is None:
phi = phi
else:
phi = phi.iloc[phi.index.get_level_values(0).isin([self._main_modality])]
if word2index is None:
word2index = {
word: index for index, word in enumerate(phi.index)
}
_logger.info('Finding topics for append and update...')
if self._bank_update == BankUpdateMethod.JUST_ADD_GOOD_TOPICS:
topics_for_append = list(range(len(phi.columns)))
topics_for_update = dict()
elif self._bank_update == BankUpdateMethod.PROVIDE_NON_LINEARITY:
topics_for_append, topics_for_update = self._extract_hierarchical_relationship(
bank_phi=self._get_phi(self._topic_bank.topics, word2index),
new_model_phi=phi,
psi_threshold=self._child_parent_relationship_threshold
)
else:
raise NotImplementedError(f'BankUpdateMethod: "{self._bank_update}"')
_logger.info('Finding good new topics, updating topics for append and update')
good_new_topics = [
topic_index for topic_index, topic_name in enumerate(phi.columns)
if raw_topic_scores[self._main_topic_score.name][topic_name] is not None and
raw_topic_scores[self._main_topic_score.name][topic_name] >= threshold
]
topics_for_append, topics_for_update, topics_for_update_reverse = (
self._keep_good_new_topics_only(
topics_for_append, topics_for_update, good_new_topics
)
)
model_topic_current_scores = list()
_logger.info('Calculating model topic scores...')
for topic_index, topic_name in enumerate(topic_model.get_phi().columns):
topic_scores = dict()
topic_word_prob_values = topic_model.get_phi()[topic_name].values
num_words = topic_model.get_phi().shape[0]
topic_scores[_KEY_TOPIC_SCORE_KERNEL_SIZE] = len(
topic_word_prob_values[topic_word_prob_values > 1.0 / num_words]
)
for score_name in raw_topic_scores:
topic_scores[score_name] = raw_topic_scores[score_name][topic_name]
model_topic_current_scores.append(topic_scores)
if (topic_index not in topics_for_append and
topic_index not in topics_for_update_reverse):
continue
if topic_index in topics_for_update_reverse:
old_topic_index = topics_for_update_reverse[topic_index]
new_topic_candidates = topics_for_update[old_topic_index]
current_topic_score = topic_scores[self._main_topic_score.name]
current_old_topic_score = self._topic_bank.topic_scores[old_topic_index][self._main_topic_score.name]
if (len(new_topic_candidates) == 1 and
current_topic_score <= current_old_topic_score):
continue
if len(self._topic_bank.topics) == 0:
distance_to_nearest = self._MINIMUM_TOPIC_DISTANCE
else:
distance_to_nearest = (
min(self._jaccard_distance(phi.loc[:, topic_name].to_dict(), bt)
for bt in self._topic_bank.topics)
)
if distance_to_nearest < self._distance_threshold:
continue
topic_scores[_KEY_TOPIC_SCORE_DISTANCE_TO_NEAREST] = distance_to_nearest
self._topic_bank.add_topic(phi.loc[:, topic_name].to_dict(), topic_scores)
if topic_index in topics_for_update_reverse:
# TODO: check this
self._topic_bank.delete_topic(topics_for_update_reverse[topic_index])
self._result[_KEY_MODEL_TOPIC_SCORES].append(model_topic_current_scores)
self._result[_KEY_BANK_TOPIC_SCORES] = self._topic_bank.topic_scores # TODO: append
self.save()
if self._save_model_topics:
self._topic_bank.save_model_topics(
name=f'model_{model_number:0{int(np.log10(self._max_num_models + 1))}}',
model=topic_model,
topic_scores=model_topic_current_scores,
phi=phi,
dataset=self._dataset,
)
_logger.info('Scoring bank model...')
scores = dict()
if len(self._topic_bank.topics) == 0:
_logger.info('No topics in bank — returning empty default scores for bank model')
else:
bank_phi = self._get_phi(self._topic_bank.topics, word2index)
bank_model = _get_topic_model(
self._dataset,
phi=bank_phi,
scores=self._all_model_scores,
num_safe_fit_iterations=1
)
bank_model._fit(self._dataset.get_batch_vectorizer(), 1)
_logger.info('Computing default scores for bank model...')
scores.update(self._get_default_scores(bank_model))
# Topic scores already calculated
self._result[_KEY_BANK_SCORES].append(scores)
self._result[_KEY_NUM_BANK_TOPICS].append(len(self._topic_bank.topics))
_logger.info(f'Num topics in bank: {len(self._topic_bank.topics)}')
self.save()
self._result[_KEY_OPTIMUM] = self._result[_KEY_NUM_BANK_TOPICS][-1]
# TODO: refine computing when do early stop
if len(self._result[_KEY_NUM_BANK_TOPICS]) <= 1: # TODO: can be zero?
self._result[_KEY_OPTIMUM + _STD_KEY_SUFFIX] = self._result[_KEY_OPTIMUM]
else:
differences = list()
max_num_last_values = 5
model_number = len(self._result[_KEY_NUM_BANK_TOPICS]) - 1
while model_number > 0 and len(differences) < max_num_last_values:
differences.append(abs(
self._result[_KEY_NUM_BANK_TOPICS][-model_number] -
self._result[_KEY_NUM_BANK_TOPICS][-model_number - 1]
))
self._result[_KEY_OPTIMUM + _STD_KEY_SUFFIX] = float(np.sum(differences))
self.save()
def _select_documents_for_topic_scores(self) -> List[str]:
if self._documents is not None:
return self._documents
document_ids = list(self._dataset._data.index)
num_documents = len(document_ids)
selected_documents = self._random.choice(
document_ids,
size=min(
self._max_num_documents_for_topic_scores,
int(self._documents_fraction_for_topic_scores * num_documents)
),
replace=False
)
self._documents = list(selected_documents)
return self._documents
def _extract_hierarchical_relationship(
self,
bank_phi: pd.DataFrame,
new_model_phi: pd.DataFrame,
psi_threshold: float = None
) -> Tuple[List[int], Dict[int, List[int]]]:
if bank_phi.shape[1] == 0:
return list(range(new_model_phi.shape[1])), dict()
assert bank_phi.shape[0] == new_model_phi.shape[0]
# TODO: think about bank_phi.shape[1] == 1: alright to proceed?
_logger.debug('Creating hARTM')
hierarchy = artm.hARTM(num_processors=1)
_logger.debug(f'Creating first level with {bank_phi.shape[1]} topics')
level0 = hierarchy.add_level(
num_topics=bank_phi.shape[1]
)
level0.initialize(dictionary=self._dictionary)
_logger.debug(
f'Copying phi for the first level.'
f' Phi shape: {bank_phi.shape}.'
f' First words: {bank_phi.index[:10]}'
)
phi_ref0 = _safe_copy_phi(
level0, bank_phi, self._dataset,
small_num_fit_iterations=1
)
_logger.debug(f'Creating second level with {new_model_phi.shape[1]} topics')
level1 = hierarchy.add_level(
num_topics=new_model_phi.shape[1],
parent_level_weight=1
)
level1.initialize(dictionary=self._dictionary)
# Regularizer may help to refine new topics a bit
# in search of parent-child relationship
# However, the regularizer won't affect the topics themselves,
# only the ARTM hierarchy defined here.
_logger.debug('Adding HierarchySparsingThetaRegularizer to second level')
# TODO: or smaller tau? or without regularizer at all? or change the real topics?
level1.regularizers.add(
artm.HierarchySparsingThetaRegularizer(
name='sparse_hierarchy',
tau=1.0
)
)
_logger.debug(
f'Copying phi for the second level.'
f' Phi shape: {new_model_phi.shape}.'
f' First words: {new_model_phi.index[:10]}'
)
phi_ref1 = _safe_copy_phi(
level1, new_model_phi, self._dataset,
small_num_fit_iterations=3
)
psi = level1.get_psi()
assert psi.shape[0] == new_model_phi.shape[1]
assert psi.shape[1] == bank_phi.shape[1]
if psi_threshold is None:
psi_threshold = 1.0 / psi.shape[0]
topics_for_append: List[int] = list()
topics_for_update: Dict[int, List[int]] = defaultdict(list)
_logger.debug('Analyzing Psi for parent-child relationship')
for new_topic in range(level1.get_phi().shape[1]):
psi_row = psi.iloc[new_topic, :]
parents = np.where(psi_row > psi_threshold)[0]
if len(parents) > 1:
pass # linearly dependent -> skip
elif len(parents) == 0:
topics_for_append.append(new_topic)
elif len(parents) == 1:
topics_for_update[parents[0]].append(new_topic)
else:
assert False
_logger.debug('Deleting hARTM')
hierarchy.del_level(1)
hierarchy.del_level(0)
del phi_ref1
del phi_ref0
del hierarchy
gc.collect()
return topics_for_append, topics_for_update
@staticmethod
def _keep_good_new_topics_only(
topics_for_append: List[int],
topics_for_update: Dict[int, List[int]],
good_new_topics: List[int]) -> Tuple[List[int], Dict[int, List[int]], Dict[int, int]]:
topics_for_append = [t for t in topics_for_append if t in good_new_topics]
topics_for_update_new = dict()
for old_topic, new_topic_candidates in topics_for_update.items():
if all([t in good_new_topics for t in new_topic_candidates]):
topics_for_update_new[old_topic] = new_topic_candidates
topics_for_update = topics_for_update_new
topics_for_update_reverse = dict()
for old_topic, new_topics in topics_for_update.items():
for new_topic in new_topics:
assert new_topic not in topics_for_update_reverse # only one parent
topics_for_update_reverse[new_topic] = old_topic
return (
topics_for_append,
topics_for_update,
topics_for_update_reverse
)
@staticmethod
def _jaccard_distance(
p: Dict[str, float],
q: Dict[str, float],
kernel_only: bool = True) -> float:
numerator = 0
denominator = 0
if not kernel_only:
vocabulary_a = set([w for w in p.keys()])
vocabulary_b = set([w for w in q.keys()])
else:
vocabulary_a = set([w for w in p.keys() if p[w] > 1.0 / len(p)])
vocabulary_b = set([w for w in q.keys() if q[w] > 1.0 / len(q)])
common_vocabulary = vocabulary_a.intersection(vocabulary_b)
only_a_vocabulary = vocabulary_a.difference(vocabulary_b)
only_b_vocabulary = vocabulary_b.difference(vocabulary_a)
numerator = numerator + sum(min(p[w], q[w])
for w in common_vocabulary)
denominator = denominator + (
sum(p[w] for w in only_a_vocabulary) +
sum(q[w] for w in only_b_vocabulary) +
sum(max(p[w], q[w])
for w in common_vocabulary)
)
if denominator == 0: # both zero topics
return TopicBankMethod._MINIMUM_TOPIC_DISTANCE
distance = TopicBankMethod._MAXIMUM_TOPIC_DISTANCE - numerator / denominator
distance = max(TopicBankMethod._MINIMUM_TOPIC_DISTANCE, distance)
distance = min(TopicBankMethod._MAXIMUM_TOPIC_DISTANCE, distance)
return distance
@staticmethod
def _get_phi(
topics: List[Dict[TokenType, float]],
word2index: Dict[str, int]) -> pd.DataFrame:
phi = pd.DataFrame.from_dict({
f'topic_{i}': words for i, words in enumerate(topics)
})
phi = phi.reindex(list(word2index.keys()), fill_value=0.0)
phi.fillna(0.0, inplace=True)
return phi
def _get_default_scores(self, topic_model: TopicModel) -> Dict[str, float]:
score_values = dict()
for score in self._all_model_scores:
# TODO: check here
score_values[score.name] = (
topic_model.scores[score.name][-1]
)
return score_values
def _compute_raw_topic_scores(
self,
topic_model: TopicModel,
documents: List[str] = None) -> Dict[str, Dict[str, float]]:
score_values = dict()
if not self._verbose:
all_topic_scores_range = self._all_topic_scores
else:
all_topic_scores_range = tqdm.tqdm(
self._all_topic_scores, total=len(self._all_topic_scores), file=sys.stdout
)
for score in all_topic_scores_range:
score_name = score.name
score_values[score_name] = score.compute(topic_model, documents=documents)
return score_values
def _compute_topic_scores(
self,
topic_model: TopicModel,
documents: List[str]) -> Dict[str, float]:
score_values = dict()
raw_score_values = self._compute_raw_topic_scores(
topic_model, documents=documents
)
for score_name, raw_values in raw_score_values.items():
score_values[score_name] = TopicBankMethod._aggregate_scores_for_models(
raw_values
)
return score_values
@staticmethod
def _aggregate_scores_for_models(topic_scores: Dict[str, float], p: int = 50) -> float:
values = list(v for k, v in topic_scores.items() if v is not None)
if len(values) == 0:
return 0 # TODO: 0 -- so as not to think about it much
return np.percentile(values, p)
@staticmethod
def _average_scores_over_measurements(scores: List[Dict[str, float]]) -> Dict[str, float]:
result = dict()
if len(scores) == 0:
return result
for s in scores[0]:
result[s] = float(np.mean(list(v[s] for v in scores)))
return result
| [
"os.remove",
"numpy.sum",
"topnum.search_methods.topic_bank.one_model_train_funcs._get_topic_model",
"json.dumps",
"collections.defaultdict",
"gc.collect",
"os.path.isfile",
"os.close",
"os.path.dirname",
"numpy.random.RandomState",
"topnum.search_methods.topic_bank.phi_initialization.utils._saf... | [((2022, 2041), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2039, 2041), False, 'import logging\n'), ((8825, 8857), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (8846, 8857), True, 'import numpy as np\n'), ((9514, 9586), 'topnum.search_methods.topic_bank.topic_bank.TopicBank', 'TopicBank', ([], {'save': 'self._save_bank', 'save_folder_path': 'self._bank_folder_path'}), '(save=self._save_bank, save_folder_path=self._bank_folder_path)\n', (9523, 9586), False, 'from topnum.search_methods.topic_bank.topic_bank import TopicBank, TokenType\n'), ((10001, 10037), 'os.path.isfile', 'os.path.isfile', (['self._save_file_path'], {}), '(self._save_file_path)\n', (10015, 10037), False, 'import os\n'), ((20097, 20125), 'artm.hARTM', 'artm.hARTM', ([], {'num_processors': '(1)'}), '(num_processors=1)\n', (20107, 20125), False, 'import artm\n'), ((20549, 20624), 'topnum.search_methods.topic_bank.phi_initialization.utils._safe_copy_phi', '_safe_copy_phi', (['level0', 'bank_phi', 'self._dataset'], {'small_num_fit_iterations': '(1)'}), '(level0, bank_phi, self._dataset, small_num_fit_iterations=1)\n', (20563, 20624), False, 'from topnum.search_methods.topic_bank.phi_initialization.utils import _safe_copy_phi\n'), ((21714, 21799), 'topnum.search_methods.topic_bank.phi_initialization.utils._safe_copy_phi', '_safe_copy_phi', (['level1', 'new_model_phi', 'self._dataset'], {'small_num_fit_iterations': '(3)'}), '(level1, new_model_phi, self._dataset, small_num_fit_iterations=3\n )\n', (21728, 21799), False, 'from topnum.search_methods.topic_bank.phi_initialization.utils import _safe_copy_phi\n'), ((22144, 22161), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (22155, 22161), False, 'from collections import defaultdict\n'), ((22894, 22906), 'gc.collect', 'gc.collect', ([], {}), '()\n', (22904, 22906), False, 'import gc\n'), ((27645, 27669), 'numpy.percentile', 'np.percentile', (['values', 'p'], {}), '(values, p)\n', (27658, 27669), True, 'import numpy as np\n'), ((4647, 4994), 'topnum.scores.IntratextCoherenceScore', 'IntratextCoherenceScore', ([], {'name': '"""intratext_coherence_score"""', 'data': 'self._dataset', 'text_type': 'TextType.VW_TEXT', 'computation_method': 'ComputationMethod.SEGMENT_WEIGHT', 'word_topic_relatedness': 'WordTopicRelatednessType.PWT', 'specificity_estimation': 'SpecificityEstimationMethod.NONE', 'max_num_out_of_topic_words': '(5)', 'window': '_DEFAULT_WINDOW'}), "(name='intratext_coherence_score', data=self.\n _dataset, text_type=TextType.VW_TEXT, computation_method=\n ComputationMethod.SEGMENT_WEIGHT, word_topic_relatedness=\n WordTopicRelatednessType.PWT, specificity_estimation=\n SpecificityEstimationMethod.NONE, max_num_out_of_topic_words=5, window=\n _DEFAULT_WINDOW)\n", (4670, 4994), False, 'from topnum.scores import IntratextCoherenceScore, PerplexityScore, SophisticatedTopTokensCoherenceScore, SparsityPhiScore, SparsityThetaScore\n'), ((5958, 5998), 'topnum.scores.PerplexityScore', 'PerplexityScore', ([], {'name': '"""perplexity_score"""'}), "(name='perplexity_score')\n", (5973, 5998), False, 'from topnum.scores import IntratextCoherenceScore, PerplexityScore, SophisticatedTopTokensCoherenceScore, SparsityPhiScore, SparsityThetaScore\n'), ((7299, 7489), 'warnings.warn', 'warnings.warn', (['f"""topic_score_threshold_percentile {topic_score_threshold_percentile} is less than one! It is expected to be in [0, 100]. Are you sure you want to proceed (yes/no)?"""'], {}), "(\n f'topic_score_threshold_percentile {topic_score_threshold_percentile} is less than one! It is expected to be in [0, 100]. Are you sure you want to proceed (yes/no)?'\n )\n", (7312, 7489), False, 'import warnings\n'), ((8236, 8282), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""topic_bank_result__"""'}), "(prefix='topic_bank_result__')\n", (8252, 8282), False, 'import tempfile\n'), ((8295, 8320), 'os.close', 'os.close', (['file_descriptor'], {}), '(file_descriptor)\n', (8303, 8320), False, 'import os\n'), ((8935, 9000), 'warnings.warn', 'warnings.warn', (['f"""File "{save_file_path}" already exists. Loading"""'], {}), '(f\'File "{save_file_path}" already exists. Loading\')\n', (8948, 9000), False, 'import warnings\n'), ((10051, 10082), 'os.remove', 'os.remove', (['self._save_file_path'], {}), '(self._save_file_path)\n', (10060, 10082), False, 'import os\n'), ((21376, 21448), 'artm.HierarchySparsingThetaRegularizer', 'artm.HierarchySparsingThetaRegularizer', ([], {'name': '"""sparse_hierarchy"""', 'tau': '(1.0)'}), "(name='sparse_hierarchy', tau=1.0)\n", (21414, 21448), False, 'import artm\n'), ((5285, 5570), 'topnum.scores.SophisticatedTopTokensCoherenceScore', 'SophisticatedTopTokensCoherenceScore', ([], {'name': '"""top_tokens_coherence_score"""', 'data': 'self._dataset', 'text_type': 'TextType.VW_TEXT', 'word_topic_relatedness': 'WordTopicRelatednessType.PWT', 'specificity_estimation': 'SpecificityEstimationMethod.NONE', 'num_top_words': '(10)', 'window': '_DEFAULT_WINDOW'}), "(name='top_tokens_coherence_score',\n data=self._dataset, text_type=TextType.VW_TEXT, word_topic_relatedness=\n WordTopicRelatednessType.PWT, specificity_estimation=\n SpecificityEstimationMethod.NONE, num_top_words=10, window=_DEFAULT_WINDOW)\n", (5321, 5570), False, 'from topnum.scores import IntratextCoherenceScore, PerplexityScore, SophisticatedTopTokensCoherenceScore, SparsityPhiScore, SparsityThetaScore\n'), ((6148, 6191), 'topnum.scores.SparsityPhiScore', 'SparsityPhiScore', ([], {'name': '"""sparsity_phi_score"""'}), "(name='sparsity_phi_score')\n", (6164, 6191), False, 'from topnum.scores import IntratextCoherenceScore, PerplexityScore, SophisticatedTopTokensCoherenceScore, SparsityPhiScore, SparsityThetaScore\n'), ((6209, 6256), 'topnum.scores.SparsityThetaScore', 'SparsityThetaScore', ([], {'name': '"""sparsity_theta_score"""'}), "(name='sparsity_theta_score')\n", (6227, 6256), False, 'from topnum.scores import IntratextCoherenceScore, PerplexityScore, SophisticatedTopTokensCoherenceScore, SparsityPhiScore, SparsityThetaScore\n'), ((7596, 7613), 'distutils.util.strtobool', 'strtobool', (['answer'], {}), '(answer)\n', (7605, 7613), False, 'from distutils.util import strtobool\n'), ((7640, 7664), 'warnings.warn', 'warnings.warn', (['"""Exiting"""'], {}), "('Exiting')\n", (7653, 7664), False, 'import warnings\n'), ((8479, 8509), 'os.path.isfile', 'os.path.isfile', (['save_file_path'], {}), '(save_file_path)\n', (8493, 8509), False, 'import os\n'), ((9804, 9828), 'json.dumps', 'json.dumps', (['self._result'], {}), '(self._result)\n', (9814, 9828), False, 'import json\n'), ((17408, 17515), 'topnum.search_methods.topic_bank.one_model_train_funcs._get_topic_model', '_get_topic_model', (['self._dataset'], {'phi': 'bank_phi', 'scores': 'self._all_model_scores', 'num_safe_fit_iterations': '(1)'}), '(self._dataset, phi=bank_phi, scores=self._all_model_scores,\n num_safe_fit_iterations=1)\n', (17424, 17515), False, 'from topnum.search_methods.topic_bank.one_model_train_funcs import default_train_func, _get_topic_model\n'), ((18925, 18944), 'numpy.sum', 'np.sum', (['differences'], {}), '(differences)\n', (18931, 18944), True, 'import numpy as np\n'), ((22359, 22392), 'numpy.where', 'np.where', (['(psi_row > psi_threshold)'], {}), '(psi_row > psi_threshold)\n', (22367, 22392), True, 'import numpy as np\n'), ((8352, 8383), 'os.path.dirname', 'os.path.dirname', (['save_file_path'], {}), '(save_file_path)\n', (8367, 8383), False, 'import os\n'), ((16823, 16857), 'numpy.log10', 'np.log10', (['(self._max_num_models + 1)'], {}), '(self._max_num_models + 1)\n', (16831, 16857), True, 'import numpy as np\n')] |
"""Classes that represent the state of the entire system and entities within.
These classes wrap protobufs, which are basically a fancy NamedTuple that is
generated by the `build` Makefile target. You can read more about protobufs
online, but mainly they're helpful for serializing data over the network."""
import logging
from enum import Enum
from typing import List, Dict, Optional, Union
import numpy as np
import vpython
from orbitx import common
from orbitx import orbitx_pb2 as protos
log = logging.getLogger()
# These entity fields do not change during simulation. Thus, we don't have to
# store them in a big 1D numpy array for use in scipy.solve_ivp.
_PER_ENTITY_UNCHANGING_FIELDS = [
'name', 'mass', 'r', 'artificial', 'atmosphere_thickness',
'atmosphere_scaling'
]
_PER_ENTITY_MUTABLE_FIELDS = [field.name for
field in protos.Entity.DESCRIPTOR.fields if
field.name not in _PER_ENTITY_UNCHANGING_FIELDS]
_FIELD_ORDERING = {name: index for index, name in
enumerate(_PER_ENTITY_MUTABLE_FIELDS)}
# A special field, we reference it a couple times so turn it into a symbol
# to guard against string literal typos.
_LANDED_ON = "landed_on"
assert _LANDED_ON in [field.name for field in protos.Entity.DESCRIPTOR.fields]
# Make sure this is in sync with the corresponding enum in orbitx.proto!
Navmode = Enum('Navmode', zip([ # type: ignore
'Manual', 'CCW Prograde', 'CW Retrograde', 'Depart Reference',
'Approach Target', 'Pro Targ Velocity', 'Anti Targ Velocity'
], protos.Navmode.values()))
class Entity:
"""A wrapper around protos.Entity.
Example usage:
assert Entity(protos.Entity(x=5)).x == 5
assert Entity(protos.Entity(x=1, y=2)).pos == [1, 2]
To add fields, or see what fields exists, please see orbitx.proto,
specifically the "message Entity" declaration.
"""
def __init__(self, entity: protos.Entity):
self.proto = entity
def __repr__(self):
return self.proto.__repr__()
def __str__(self):
return self.proto.__str__()
# These are filled in just below this class definition. These stubs are for
# static type analysis with mypy.
name: str
x: float
y: float
vx: float
vy: float
r: float
mass: float
heading: float
spin: float
fuel: float
throttle: float
landed_on: str
broken: bool
artificial: bool
atmosphere_thickness: float
atmosphere_scaling: float
def screen_pos(self, origin: 'Entity') -> vpython.vector:
"""The on-screen position of this entity, relative to the origin."""
return vpython.vector(self.x - origin.x, self.y - origin.y, 0)
@property
def pos(self):
return np.array((self.x, self.y), dtype=PhysicsState.DTYPE, copy=True)
@pos.setter
def pos(self, coord):
self.x = coord[0]
self.y = coord[1]
@property
def v(self):
return np.asarray([self.vx, self.vy])
@v.setter
def v(self, coord):
self.vx = coord[0]
self.vy = coord[1]
@property
def dockable(self):
return self.name == common.AYSE
def landed(self) -> bool:
"""Convenient and more elegant check to see if the entity is landed."""
return self.landed_on != ''
class _EntityView(Entity):
"""A view into a PhysicsState, very fast to create and use.
Setting fields will update the parent PhysicsState appropriately."""
def __init__(self, creator: 'PhysicsState', index: int):
self._creator = creator
self._index = index
def __repr__(self):
# This is actually a bit hacky. This line implies that orbitx_pb2
# protobuf generated code can't tell the difference between an
# orbitx_pb2.Entity and an _EntityView. Turns out, it can't! But
# hopefully this assumption always holds.
return repr(Entity(self))
def __str__(self):
return str(Entity(self))
# I feel like I should apologize before things get too crazy. Once you read
# the following module-level loop and ask "why _EntityView a janky subclass of
# Entity, and is implemented using janky array indexing into data owned by a
# PhysicsState?".
# My excuse is that I wanted a way to index into PhysicsState and get an Entity
# for ease of use and code. I found this to be a useful API that made physics
# code cleaner, but it was _too_ useful! The PhysicsState.__getitem__ method
# that implemented this indexing was so expensive and called so often that it
# was _half_ the runtime of OrbitX at high time accelerations! My solution to
# this performance issue was to optimize PhysicsState.__getitem__ to very
# return an Entity (specifically, an _EntityView) that was very fast to
# instantiate and very fast to access.
# Hence: janky array-indexing accessors is my super-optimization! 2x speedup!
for field in protos.Entity.DESCRIPTOR.fields:
# For every field in the underlying protobuf entity, make a
# convenient equivalent property to allow code like the following:
# Entity(entity).heading = 5
def entity_fget(self, name=field.name):
return getattr(self.proto, name)
def entity_fset(self, val, name=field.name):
return setattr(self.proto, name, val)
def entity_fdel(self, name=field.name):
return delattr(self.proto, name)
setattr(Entity, field.name, property(
fget=entity_fget, fset=entity_fset, fdel=entity_fdel,
doc=f"Entity proxy of the underlying field, self.proto.{field.name}"))
def entity_view_unchanging_fget(self, name=field.name):
return getattr(self._creator._proto_state.entities[self._index], name)
def entity_view_unchanging_fset(self, val, name=field.name):
return setattr(
self._creator._proto_state.entities[self._index], name, val)
field_n: Optional[int]
if field.name in _PER_ENTITY_MUTABLE_FIELDS:
field_n = _FIELD_ORDERING[field.name]
else:
field_n = None
if field.cpp_type in [field.CPPTYPE_FLOAT, field.CPPTYPE_DOUBLE]:
def entity_view_mutable_fget(self, field_n=field_n):
return self._creator._array_rep[
self._creator._n * field_n + self._index]
def entity_view_mutable_fset(self, val, field_n=field_n):
self._creator._array_rep[
self._creator._n * field_n + self._index] = val
elif field.cpp_type == field.CPPTYPE_BOOL:
# Same as if it's a float, but we have to convert float -> bool.
def entity_view_mutable_fget(self, field_n=field_n):
return bool(
self._creator._array_rep[
self._creator._n * field_n + self._index])
def entity_view_mutable_fset(self, val, field_n=field_n):
self._creator._array_rep[
self._creator._n * field_n + self._index] = val
elif field.name == _LANDED_ON:
# Special case, we store the index of the entity we're landed on as a
# float, but we have to convert this to an int then the name of the
# entity.
def entity_view_mutable_fget(self, field_n=field_n):
entity_index = int(
self._creator._array_rep[
self._creator._n * field_n + self._index])
if entity_index == PhysicsState.NO_INDEX:
return ''
return self._creator._entity_names[entity_index]
def entity_view_mutable_fset(self, val, field_n=field_n):
assert isinstance(val, str)
self._creator._array_rep[
self._creator._n * field_n + self._index] = \
self._creator._name_to_index(val)
elif field.cpp_type == field.CPPTYPE_STRING:
assert field.name in _PER_ENTITY_UNCHANGING_FIELDS
else:
raise NotImplementedError(
"Encountered a field in the protobuf definition of Entity that "
"is of a type we haven't handled.")
if field.name in _PER_ENTITY_UNCHANGING_FIELDS:
# Note there is no fdel defined. The data is owned by the PhysicalState
# so the PhysicalState should delete data on its own time.
setattr(_EntityView, field.name, property(
fget=entity_view_unchanging_fget,
fset=entity_view_unchanging_fset,
doc=f"_EntityView proxy of unchanging field {field.name}"
))
else:
assert field.name in _PER_ENTITY_MUTABLE_FIELDS
setattr(_EntityView, field.name, property(
fget=entity_view_mutable_fget,
fset=entity_view_mutable_fset,
doc=f"_EntityView proxy of mutable field {field.name}"
))
class PhysicsState:
"""The physical state of the system for use in solve_ivp and elsewhere.
The following operations are supported:
# Construction without a y-vector, taking all data from a PhysicalState
PhysicsState(None, protos.PhysicalState)
# Faster Construction from a y-vector and protos.PhysicalState
PhysicsState(ivp_solution.y, protos.PhysicalState)
# Access of a single Entity in the PhysicsState, by index or Entity name
my_entity: Entity = PhysicsState[0]
my_entity: Entity = PhysicsState['Earth']
# Iteration over all Entitys in the PhysicsState
for entity in my_physics_state:
print(entity.name, entity.pos)
# Convert back to a protos.PhysicalState (this almost never happens)
my_physics_state.as_proto()
Example usage:
y = PhysicsState(y_1d, physical_state)
entity = y[0]
y[common.HABITAT] = habitat
scipy.solve_ivp(y.y0())
See help(PhysicsState.__init__) for how to initialize. Basically, the `y`
param should be None at the very start of the program, but for the program
to have good performance, PhysicsState.__init__ should have both parameters
filled if it's being called more than once a second while OrbitX is running
normally.
"""
class NoEntityError(ValueError):
"""Raised when an entity is not found."""
pass
# For if an entity is not landed to anything
NO_INDEX = -1
# The number of single-element values at the end of the y-vector.
# Currently just SRB_TIME and TIME_ACC are appended to the end. If there
# are more values appended to the end, increment this and follow the same
# code for .srb_time and .time_acc
N_SINGULAR_ELEMENTS = 2
# Constant indices for single-element values of the y-vector.
SRB_TIME_INDEX = -2
TIME_ACC_INDEX = -1
# Datatype of internal y-vector
DTYPE = np.float64
def __init__(self,
y: Optional[np.ndarray],
proto_state: protos.PhysicalState):
"""Collects data from proto_state and y, when y is not None.
There are two kinds of values we care about:
1) values that change during simulation (like position, velocity, etc)
2) values that do not change (like mass, radius, name, etc)
If both proto_state and y are given, 1) is taken from y and
2) is taken from proto_state. This is a very quick operation.
If y is None, both 1) and 2) are taken from proto_state, and a new
y vector is generated. This is a somewhat expensive operation."""
assert isinstance(proto_state, protos.PhysicalState)
assert isinstance(y, np.ndarray) or y is None
# self._proto_state will have positions, velocities, etc for all
# entities. DO NOT USE THESE they will be stale. Use the accessors of
# this class instead!
self._proto_state = protos.PhysicalState()
self._proto_state.CopyFrom(proto_state)
self._n = len(proto_state.entities)
self._entity_names = \
[entity.name for entity in self._proto_state.entities]
self._array_rep: np.ndarray
if y is None:
# We rely on having an internal array representation we can refer
# to, so we have to build up this array representation.
y = np.empty(
len(proto_state.entities) * len(_PER_ENTITY_MUTABLE_FIELDS)
+ self.N_SINGULAR_ELEMENTS, dtype=self.DTYPE)
for field_name, field_n in _FIELD_ORDERING.items():
for entity_index, entity in enumerate(proto_state.entities):
proto_value = getattr(entity, field_name)
# Internally translate string names to indices, otherwise
# our entire y vector will turn into a string vector oh no.
# Note this will convert to floats, not integer indices.
if field_name == _LANDED_ON:
proto_value = self._name_to_index(proto_value)
y[self._n * field_n + entity_index] = proto_value
y[-2] = proto_state.srb_time
y[-1] = proto_state.time_acc
self._array_rep = y
else:
# Take everything except the SRB time, the last element.
self._array_rep = y.astype(self.DTYPE)
self._proto_state.srb_time = y[self.SRB_TIME_INDEX]
self._proto_state.time_acc = y[self.TIME_ACC_INDEX]
assert len(self._array_rep.shape) == 1, \
f'y is not 1D: {self._array_rep.shape}'
assert (self._array_rep.size - self.N_SINGULAR_ELEMENTS) % \
len(_PER_ENTITY_MUTABLE_FIELDS) == 0, self._array_rep.size
assert (self._array_rep.size - self.N_SINGULAR_ELEMENTS) // \
len(_PER_ENTITY_MUTABLE_FIELDS) == len(proto_state.entities), \
f'{self._array_rep.size} mismatches: {len(proto_state.entities)}'
np.mod(self.Heading, 2 * np.pi, out=self.Heading)
self._entities_with_atmospheres: Optional[List[int]] = None
def _y_component(self, field_name: str) -> np.ndarray:
"""Returns an n-array with the value of a component for each entity."""
return self._array_rep[
_FIELD_ORDERING[field_name] * self._n:
(_FIELD_ORDERING[field_name] + 1) * self._n
]
def _index_to_name(self, index: int) -> str:
"""Translates an index into the entity list to the right name."""
i = int(index)
return self._entity_names[i] if i != self.NO_INDEX else ''
def _name_to_index(self, name: Optional[str]) -> int:
"""Finds the index of the entity with the given name."""
try:
assert name is not None
return self._entity_names.index(name) if name != '' \
else self.NO_INDEX
except ValueError:
raise self.NoEntityError(f'{name} not in entity list')
def y0(self):
"""Returns a y-vector suitable as input for scipy.solve_ivp."""
return self._array_rep
def as_proto(self) -> protos.PhysicalState:
"""Creates a protos.PhysicalState view into all internal data.
Expensive. Consider one of the other accessors, which are faster.
For example, if you want to iterate over all elements, use __iter__
by doing:
for entity in my_physics_state: print(entity.name)"""
constructed_protobuf = protos.PhysicalState()
constructed_protobuf.CopyFrom(self._proto_state)
for entity_data, entity in zip(self, constructed_protobuf.entities):
(
entity.x, entity.y, entity.vx, entity.vy,
entity.heading, entity.spin, entity.fuel,
entity.throttle, entity.landed_on,
entity.broken
) = (
entity_data.x, entity_data.y, entity_data.vx, entity_data.vy,
entity_data.heading, entity_data.spin, entity_data.fuel,
entity_data.throttle, entity_data.landed_on,
entity_data.broken
)
return constructed_protobuf
def __len__(self):
"""Implements `len(physics_state)`."""
return self._n
def __iter__(self):
"""Implements `for entity in physics_state:` loops."""
for i in range(0, self._n):
yield self.__getitem__(i)
def __getitem__(self, index: Union[str, int]) -> Entity:
"""Returns a Entity view at a given name or index.
Allows the following:
entity = physics_state[2]
entity = physics_state[common.HABITAT]
entity.x = 5 # Propagates to physics_state.
"""
if isinstance(index, str):
# Turn a name-based index into an integer
index = self._entity_names.index(index)
i = int(index)
return _EntityView(self, i)
def __setitem__(self, index: Union[str, int], val: Entity):
"""Puts a Entity at a given name or index in the state.
Allows the following:
PhysicsState[2] = physics_entity
PhysicsState[common.HABITAT] = physics_entity
"""
if isinstance(val, _EntityView) and val._creator == self:
# The _EntityView is a view into our own data, so we already have
# the data.
return
if isinstance(index, str):
# Turn a name-based index into an integer
index = self._entity_names.index(index)
i = int(index)
entity = self[i]
(
entity.x, entity.y, entity.vx, entity.vy, entity.heading,
entity.spin, entity.fuel, entity.throttle, entity.landed_on,
entity.broken
) = (
val.x, val.y, val.vx, val.vy, val.heading,
val.spin, val.fuel, val.throttle, val.landed_on,
val.broken
)
def __repr__(self):
return self.as_proto().__repr__()
def __str__(self):
return self.as_proto().__str__()
@property
def timestamp(self) -> float:
return self._proto_state.timestamp
@timestamp.setter
def timestamp(self, t: float):
self._proto_state.timestamp = t
@property
def srb_time(self) -> float:
return self._proto_state.srb_time
@srb_time.setter
def srb_time(self, val: float):
self._proto_state.srb_time = val
self._array_rep[self.SRB_TIME_INDEX] = val
@property
def parachute_deployed(self) -> bool:
return self._proto_state.parachute_deployed
@parachute_deployed.setter
def parachute_deployed(self, val: bool):
self._proto_state.parachute_deployed = val
@property
def X(self):
return self._y_component('x')
@property
def Y(self):
return self._y_component('y')
@property
def VX(self):
return self._y_component('vx')
@property
def VY(self):
return self._y_component('vy')
@property
def Heading(self):
return self._y_component('heading')
@property
def Spin(self):
return self._y_component('spin')
@property
def Fuel(self):
return self._y_component('fuel')
@property
def Throttle(self):
return self._y_component('throttle')
@property
def LandedOn(self) -> Dict[int, int]:
"""Returns a mapping from index to index of entity landings.
If the 0th entity is landed on the 2nd entity, 0 -> 2 will be mapped.
"""
landed_map = {}
for landed, landee in enumerate(
self._y_component('landed_on')):
if int(landee) != self.NO_INDEX:
landed_map[landed] = int(landee)
return landed_map
@property
def Broken(self):
return self._y_component('broken')
@property
def Atmospheres(self) -> List[int]:
"""Returns a list of indexes of entities that have an atmosphere."""
if self._entities_with_atmospheres is None:
self._entities_with_atmospheres = []
for index, entity in enumerate(self._proto_state.entities):
if entity.atmosphere_scaling != 0 and \
entity.atmosphere_thickness != 0:
self._entities_with_atmospheres.append(index)
return self._entities_with_atmospheres
@property
def time_acc(self) -> float:
"""Returns the time acceleration, e.g. 1x or 50x."""
return self._proto_state.time_acc
@time_acc.setter
def time_acc(self, new_acc: float):
self._proto_state.time_acc = new_acc
self._array_rep[self.TIME_ACC_INDEX] = new_acc
def craft_entity(self):
"""Convenience function, a full Entity representing the craft."""
return self[self.craft]
@property
def craft(self) -> Optional[str]:
"""Returns the currently-controlled craft.
Not actually backed by any stored field, just a calculation."""
if common.HABITAT not in self._entity_names and \
common.AYSE not in self._entity_names:
return None
if common.AYSE not in self._entity_names:
return common.HABITAT
hab_index = self._name_to_index(common.HABITAT)
ayse_index = self._name_to_index(common.AYSE)
if self._y_component('landed_on')[hab_index] == ayse_index:
# Habitat is docked with AYSE, AYSE is active craft
return common.AYSE
else:
return common.HABITAT
def reference_entity(self):
"""Convenience function, a full Entity representing the reference."""
return self[self._proto_state.reference]
@property
def reference(self) -> str:
"""Returns current reference of the physics system, shown in GUI."""
return self._proto_state.reference
@reference.setter
def reference(self, name: str):
self._proto_state.reference = name
def target_entity(self):
"""Convenience function, a full Entity representing the target."""
return self[self._proto_state.target]
@property
def target(self) -> str:
"""Returns landing/docking target, shown in GUI."""
return self._proto_state.target
@target.setter
def target(self, name: str):
self._proto_state.target = name
@property
def navmode(self) -> Navmode:
return Navmode(self._proto_state.navmode)
@navmode.setter
def navmode(self, navmode: Navmode):
self._proto_state.navmode = navmode.value
| [
"vpython.vector",
"numpy.asarray",
"orbitx.orbitx_pb2.Navmode.values",
"orbitx.orbitx_pb2.PhysicalState",
"numpy.mod",
"numpy.array",
"logging.getLogger"
] | [((503, 522), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (520, 522), False, 'import logging\n'), ((1577, 1600), 'orbitx.orbitx_pb2.Navmode.values', 'protos.Navmode.values', ([], {}), '()\n', (1598, 1600), True, 'from orbitx import orbitx_pb2 as protos\n'), ((2670, 2725), 'vpython.vector', 'vpython.vector', (['(self.x - origin.x)', '(self.y - origin.y)', '(0)'], {}), '(self.x - origin.x, self.y - origin.y, 0)\n', (2684, 2725), False, 'import vpython\n'), ((2775, 2838), 'numpy.array', 'np.array', (['(self.x, self.y)'], {'dtype': 'PhysicsState.DTYPE', 'copy': '(True)'}), '((self.x, self.y), dtype=PhysicsState.DTYPE, copy=True)\n', (2783, 2838), True, 'import numpy as np\n'), ((2981, 3011), 'numpy.asarray', 'np.asarray', (['[self.vx, self.vy]'], {}), '([self.vx, self.vy])\n', (2991, 3011), True, 'import numpy as np\n'), ((11623, 11645), 'orbitx.orbitx_pb2.PhysicalState', 'protos.PhysicalState', ([], {}), '()\n', (11643, 11645), True, 'from orbitx import orbitx_pb2 as protos\n'), ((13695, 13744), 'numpy.mod', 'np.mod', (['self.Heading', '(2 * np.pi)'], {'out': 'self.Heading'}), '(self.Heading, 2 * np.pi, out=self.Heading)\n', (13701, 13744), True, 'import numpy as np\n'), ((15202, 15224), 'orbitx.orbitx_pb2.PhysicalState', 'protos.PhysicalState', ([], {}), '()\n', (15222, 15224), True, 'from orbitx import orbitx_pb2 as protos\n')] |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import netCDF4 as nc
import iris
from subprocess import call
# write the google earth file header info
def WriteGEHeader( outp, collection_name ):
outp.write('<?xml version="1.0" encoding="UTF-8"?>\n')
outp.write('<kml xmlns="http://www.opengis.net/kml/2.2">\n')
outp.write('<Document>\n')
outp.write(' <name>%s' %collection_name +'</name>\n')
outp.write(' <open>0</open>\n')
outp.write(' <Style id="PolyStyle12">\n')
outp.write(' <IconStyle>\n')
outp.write(' <color>ffff9933</color>\n')
outp.write(' <colorMode>normal</colorMode>\n')
outp.write(' <scale>1.0</scale>\n')
outp.write(' <icon>\n')
outp.write(' <href>http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png</href>\n')
outp.write(' </icon>\n')
outp.write(' </IconStyle>\n')
outp.write(' </Style>\n')
outp.write(' <Style id="PolyStyle23">\n')
outp.write(' <IconStyle>\n')
outp.write(' <color>ffffff99</color>\n')
outp.write(' <colorMode>normal</colorMode>\n')
outp.write(' <scale>1.0</scale>\n')
outp.write(' <icon>\n')
outp.write(' <href>http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png</href>\n')
outp.write(' </icon>\n')
outp.write(' </IconStyle>\n')
outp.write(' </Style>\n')
outp.write(' <Style id="PolyStyle34">\n')
outp.write(' <IconStyle>\n')
outp.write(' <color>ff669933</color>\n')
outp.write(' <colorMode>normal</colorMode>\n')
outp.write(' <scale>1.0</scale>\n')
outp.write(' <icon>\n')
outp.write(' <href>http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png</href>\n')
outp.write(' </icon>\n')
outp.write(' </IconStyle>\n')
outp.write(' </Style>\n')
outp.write(' <Style id="PolyStyle45">\n')
outp.write(' <IconStyle>\n')
outp.write(' <color>ff00ff00</color>\n')
outp.write(' <colorMode>normal</colorMode>\n')
outp.write(' <scale>1.0</scale>\n')
outp.write(' <icon>\n')
outp.write(' <href>http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png</href>\n')
outp.write(' </icon>\n')
outp.write(' </IconStyle>\n')
outp.write(' </Style>\n')
outp.write(' <Style id="PolyStyle56">\n')
outp.write(' <IconStyle>\n')
outp.write(' <color>ff99ffcc</color>\n')
outp.write(' <colorMode>normal</colorMode>\n')
outp.write(' <scale>1.0</scale>\n')
outp.write(' <icon>\n')
outp.write(' <href>http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png</href>\n')
outp.write(' </icon>\n')
outp.write(' </IconStyle>\n')
outp.write(' </Style>\n')
outp.write(' <Style id="PolyStyle67">\n')
outp.write(' <IconStyle>\n')
outp.write(' <color>ff66ffff</color>\n')
outp.write(' <colorMode>normal</colorMode>\n')
outp.write(' <scale>1.0</scale>\n')
outp.write(' <icon>\n')
outp.write(' <href>http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png</href>\n')
outp.write(' </icon>\n')
outp.write(' </IconStyle>\n')
outp.write(' </Style>\n')
outp.write(' <Style id="PolyStyle78">\n')
outp.write(' <IconStyle>\n')
outp.write(' <color>ff3399ff</color>\n')
outp.write(' <colorMode>normal</colorMode>\n')
outp.write(' <scale>1.0</scale>\n')
outp.write(' <icon>\n')
outp.write(' <href>http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png</href>\n')
outp.write(' </icon>\n')
outp.write(' </IconStyle>\n')
outp.write(' </Style>\n')
outp.write(' <Style id="PolyStyle89">\n')
outp.write(' <IconStyle>\n')
outp.write(' <color>ff6666cc</color>\n')
outp.write(' <colorMode>normal</colorMode>\n')
outp.write(' <scale>1.0</scale>\n')
outp.write(' <icon>\n')
outp.write(' <href>http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png</href>\n')
outp.write(' </icon>\n')
outp.write(' </IconStyle>\n')
outp.write(' </Style>\n')
outp.write(' <Style id="PolyStyle910">\n')
outp.write(' <IconStyle>\n')
outp.write(' <color>ff000099</color>\n')
outp.write(' <colorMode>normal</colorMode>\n')
outp.write(' <scale>1.0</scale>\n')
outp.write(' <icon>\n')
outp.write(' <href>http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png</href>\n')
outp.write(' </icon>\n')
outp.write(' </IconStyle>\n')
outp.write(' </Style>\n')
outp.write(' <Style id="PolyStyle10p">\n')
outp.write(' <IconStyle>\n')
outp.write(' <color>ff0000cc</color>\n')
outp.write(' <colorMode>normal</colorMode>\n')
outp.write(' <scale>1.0</scale>\n')
outp.write(' <icon>\n')
outp.write(' <href>http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png</href>\n')
outp.write(' </icon>\n')
outp.write(' </IconStyle>\n')
outp.write(' </Style>\n')
outp.write(' <Style id="PolyStyleMinVl">\n')
outp.write(' <IconStyle>\n')
outp.write(' <color>ff5c1237</color>\n')
outp.write(' <colorMode>normal</colorMode>\n')
outp.write(' <scale>1.0</scale>\n')
outp.write(' <icon>\n')
outp.write(' <href>http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png</href>\n')
outp.write(' </icon>\n')
outp.write(' </IconStyle>\n')
outp.write(' </Style>\n')
outp.write(' <Style id="PolyStyleBC">\n')
outp.write(' <IconStyle>\n')
outp.write(' <color>ff000000</color>\n')
outp.write(' <colorMode>normal</colorMode>\n')
outp.write(' <scale>1.0</scale>\n')
outp.write(' <icon>\n')
outp.write(' <href>http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png</href>\n')
outp.write(' </icon>\n')
outp.write(' </IconStyle>\n')
outp.write(' <ListStyle>\n')
outp.write(' <listItemType>checkHideChildren</listItemType>\n')
outp.write(' </ListStyle>\n')
outp.write(' </Style>\n')
return
# write the google earth closing statement
def WriteGEEnd( outp ):
outp.write('</Document>\n')
outp.write('</kml>\n')
return
# write the google earth placemark statement
def WriteGEPlace( modelname,lppt,x,y,lat,lon,depth,ylevel,obstrpc=None,minval=10.0 ):
markerheight = 30.0
outp.write(' <Placemark>\n')
# select style for pushpin
if depth >= 200.0:
outp.write(' <styleUrl>#PolyStyle12</styleUrl>\n')
elif depth >= 150.0:
outp.write(' <styleUrl>#PolyStyle23</styleUrl>\n')
elif depth >= 120.0:
outp.write(' <styleUrl>#PolyStyle34</styleUrl>\n')
elif depth >= 100.0:
outp.write(' <styleUrl>#PolyStyle45</styleUrl>\n')
elif depth >= 80.0:
outp.write(' <styleUrl>#PolyStyle56</styleUrl>\n')
elif depth >= 60.0:
outp.write(' <styleUrl>#PolyStyle67</styleUrl>\n')
elif depth >= 40.0:
outp.write(' <styleUrl>#PolyStyle78</styleUrl>\n')
elif depth >= 20.0:
outp.write(' <styleUrl>#PolyStyle89</styleUrl>\n')
elif depth >= 10.0:
outp.write(' <styleUrl>#PolyStyle910</styleUrl>\n')
elif minval != 10.0:
if depth >= minval:
outp.write(' <styleUrl>#PolyStyle10p</styleUrl>\n')
else:
outp.write(' <styleUrl>#PolyStyleMinVl</styleUrl>\n')
else:
outp.write(' <styleUrl>#PolyStyle10p</styleUrl>\n')
outp.write(' <description>\n')
outp.write(' %s' %modelname + '\n')
outp.write(' Cell index number %d' %lppt + ' (first index=1)\n')
outp.write(' X-Y cell number: %d' %x + ', %d' %y + '\n')
outp.write(' Ylevel: %d' %ylevel + ' m\n')
outp.write(' Position (lat,lon): %8.3f' %lat + ', %8.3f' %lon + ' deg.dec\n')
if depth < minval:
outp.write(' Depth: %8.2f' %minval + ' m\n')
else:
outp.write(' Depth: %8.2f' %depth + ' m\n')
if obstrpc is not None:
outp.write(' Obstr: %4.1f' %obstrpc + ' percent\n')
outp.write(' </description>\n')
outp.write(' <Point>\n')
outp.write(' <coordinates>\n')
outp.write(' %8.3f' %lon + ', %8.3f' %lat + ',%4.1f' %markerheight +'\n')
outp.write(' </coordinates>\n')
outp.write(' </Point>\n')
outp.write(' </Placemark>\n')
return
def ReadSMCTxt(smccellsfile,nrlv,jshift,dlon,dlat,slon,slat,rotated=False,rlon=0.0,rlat=0.0,obstrfile=None):
levscl = 2.0 ** (nrlv-1.0)
print('Reading '+smccellsfile)
with open(smccellsfile,'r') as inp:
txtdata = inp.readlines()
inp.close()
obstr = False
if obstrfile is not None:
obstr = True
print('Reading '+obstrfile)
with open(obstrfile,'r') as inp:
obsdata = inp.readlines()
inp.close()
xy = np.empty( [len(txtdata)-1,2] )
latlon = np.empty( [len(txtdata)-1,2] )
depth = np.empty( len(txtdata)-1 )
xlevel = np.empty( len(txtdata)-1 )
ylevel = np.empty( len(txtdata)-1 )
if obstr:
obstrpc = np.empty( len(txtdata)-1 )
for lp in range(1,len(txtdata)):
xy[lp-1,0] = np.float(txtdata[lp].split()[0])
xy[lp-1,1] = np.float(txtdata[lp].split()[1])
xlevel[lp-1] = np.float(txtdata[lp].split()[2])
ylevel[lp-1] = np.float(txtdata[lp].split()[3])
depth[lp-1] = np.float(txtdata[lp].split()[4])
if obstr:
obstrpc[lp-1] = np.float(obsdata[lp])
# lat lon calculations are based on the 0,0 cell being at slat, slon
# dlon and dlat are assumed to be prescribed at the coarsest cell resolution
#latlon[lp-1,0] = (slon - 0.5 * dlon / levscl) + xy[lp-1,0] * dlon / levscl # cell lower left corner
latlon[lp-1,0] = (slon - 0.5 * dlon) + xy[lp-1,0] * dlon / levscl # cell lower left corner
latlon[lp-1,0] = latlon[lp-1,0] + (xlevel[lp-1]/2.0) * dlon / levscl # cell centre
if latlon[lp-1,0] > 180.0:
latlon[lp-1,0] = latlon[lp-1,0] - 360.0 # put on -180 to 180 grid
#latlon[lp-1,1] = (slat - 0.5 * dlon / levscl) + xy[lp-1,1] * dlat / levscl # cell lower left corner
latlon[lp-1,1] = (slat - 0.5 * dlat) + xy[lp-1,1] * dlat / levscl # cell lower left corner
latlon[lp-1,1] = latlon[lp-1,1] + (ylevel[lp-1]/2.0) * dlat / levscl # cell centre
if rotated:
lontmp = latlon[:,0]
lattmp = latlon[:,1]
rlonstmp, rlatstmp = iris.analysis.cartography.unrotate_pole(lontmp,lattmp,rlon,rlat)
latlon[:,0] = rlonstmp
latlon[:,1] = rlatstmp
print(np.min(latlon[:,0]))
print(np.max(latlon[:,0]))
print(np.min(latlon[:,1]))
print(np.max(latlon[:,1]))
print('Read of '+smccellsfile+' completed')
if obstr:
return xy, latlon, depth, ylevel, obstrpc
else:
return xy, latlon, depth, ylevel, None
#-- main program
# set model
#modelname = 'S36125'
#modelname = 'A36125'
#modelname = 'AMM15SMC'
#modelname = 'GS512L4EUK'
modelname = 'AS512L4EUK'
# set levels constraints for points to output
#levslist = [1,2]
levslist = None
# set region
#UK - standard grid domain
#latlims = [46.0, 62.75]
#lonlims = [-16.0, 13.0]
#regname = 'UK'
# set region
#Euro - standard grid domain
#latlims = [30.2, 65.9]
#lonlims = [-19.8, 41.8]
#regname = 'Euro'
## set region
#latlims = [62, 68]
#lonlims = [-42, -20]
#regname = 'GreenIceChk'
# set region
latlims = [40, 50]
lonlims = [-30, -20]
regname = 'MidAtlChk'
#UK coastal domain
#latlims = [48.5, 62.00]
#lonlims = [-8.0, 3.0]
#regname = 'UKcoastal'
#Norway
#latlims = [57.0, 63.0]
#lonlims = [0.0, 9.0]
#regname = 'Norway'
#Baltic settings
#latlims = [53.0, 66.0]
#lonlims = [9.0, 30.0]
#regname = 'Baltic'
# Carib settings
#latlims = [7.0, 26.0]
#lonlims = [-89.0, -58.0]
#regname = 'Carib'
# KoJap settings
#latlims = [30.0, 41.0]
#lonlims = [121.0, 137.0]
#regname = 'KoJap'
# Med settings
#latlims = [30.0, 47.0]
#lonlims = [-10.0, 42.0]
#regname = 'Med'
# Caspian settings
#latlims = [36.5, 47.0]
#lonlims = [46.0, 54.5]
#regname = 'Caspian'
# Arabia
#latlims = [5.0, 30.0]
#lonlims = [31.0, 78.0]
#regname = 'Arabia'
# Brazil
#latlims = [-30.0, -20.0]
#lonlims = [-55.0, -35.0]
#regname = 'Brazil'
# Ascension
#latlims = [-9.0, -7.0]
#lonlims = [-15.0, -13.0]
#regname = 'Ascension'
#UK SW settings
#latlims = [49.6, 51.8]
#lonlims = [-6.6, 0.5]
#regname = 'UK-SW'
#UK NE settings
#latlims = [54.3, 55.4]
#lonlims = [-1.65, -0.25]
#regname = 'UK-NE'
# set the input file data
if modelname == 'S36125':
smccellsfile = '/hpc/home/d01/frxs/FCM_WW3CONFIG/trunk/GblSMC_361225/grid/S36125MCels.dat'
nrlv = 4
jshift = 2816
dlon = 0.35156250
dlat = 0.23437500
slon = 0.0
slat = 0.0
rotated = False
rlon = 0.0
rlat = 0.0
depthmin = 15.0
if levslist == None:
levslist = [1,2,4,8]
elif modelname == 'A36125':
smccellsfile = '/hpc/home/d01/frxs/FCM_WW3CONFIG/trunk/AtlSMC_361225/grid/Atlan36125.dat'
nrlv = 4
jshift = 2816
dlon = 0.35156250
dlat = 0.23437500
slon = 0.0
slat = 0.0
rotated = False
rlon = 0.0
rlat = 0.0
depthmin = 15.0
if levslist == None:
levslist = [1,2,4,8]
elif modelname == 'AMM15SMC':
smccellsfile = '/hpc/home/d01/frxs/FCM_WW3CONFIG/trunk/amm15_smc/grid/amm15s.ww3Cels.dat'
nrlv = 2
jshift = 2816
dlon = 0.0270 # value for coarsest cell size
dlat = 0.0270 # value for coarsest cell size
slon = -10.8895 # cell centre
slat = -7.2942 # cell centre
rotated = True
rlon = 177.5
rlat = 37.5
depthmin = 10.0
if levslist == None:
levslist = [1,2]
elif modelname == 'GS512L4EUK':
smccellsfile = '/hpc/home/d01/frxs/FCM_WW3CONFIG/r2214_PS43/GW1.0/configs/'+modelname+'/ww3Cels.dat'
smcobstrfile = '/hpc/home/d01/frxs/FCM_WW3CONFIG/r2214_PS43/GW1.0/configs/'+modelname+'/ww3Obstr.dat'
nrlv = 4
jshift = 0
dlon = 0.35156250
dlat = 0.23437500
slon = 0.17578125
slat = -80.03906250
rotated = False
rlon = 0.0
rlat = 0.0
depthmin = 15.0
if levslist == None:
levslist = [1,2,4,8]
elif modelname == 'AS512L4EUK':
smccellsfile = '/hpc/home/d01/frxs/FCM_WW3CONFIG/r2214_PS43/GW1.0/configs/'+modelname+'/ww3Cels.dat'
smcobstrfile = '/hpc/home/d01/frxs/FCM_WW3CONFIG/r2214_PS43/GW1.0/configs/'+modelname+'/ww3Obstr.dat'
nrlv = 4
jshift = 0
dlon = 0.35156250
dlat = 0.23437500
slon = -98.26171875
slat = -24.25781250
rotated = False
rlon = 0.0
rlat = 0.0
depthmin = 15.0
if levslist == None:
levslist = [1,2,4,8]
# set working file names and variables
workdir = '/data/cr1/frxs/WW3Grids'
GEBathyFile = workdir + '/' + modelname+'_'+regname+'.kml'
# read in the SMC text file data
xy, latlon, depth, ylevel, obstrpc = ReadSMCTxt(smccellsfile,nrlv,jshift,dlon,dlat,
slon,slat,rotated,rlon,rlat,
obstrfile=smcobstrfile)
# write out to ge file
with open(GEBathyFile,'w') as outp:
WriteGEHeader( outp, modelname+': '+regname )
for lppt in range(np.shape(xy)[0]):
# level constraint
if ylevel[lppt] in levslist:
# lat constraint
if latlon[lppt,1] >= latlims[0] and latlon[lppt,1] <= latlims[1]:
# lon constraint
if latlon[lppt,0] >= lonlims[0] and latlon[lppt,0] <= lonlims[1]:
if obstrpc is not None:
WriteGEPlace(modelname,lppt+1,xy[lppt,0],xy[lppt,1],latlon[lppt,1],latlon[lppt,0],
depth[lppt],ylevel[lppt],obstrpc=obstrpc[lppt],minval=depthmin)
else:
WriteGEPlace(modelname,lppt+1,xy[lppt,0],xy[lppt,1],latlon[lppt,1],latlon[lppt,0],
depth[lppt],ylevel[lppt],minval=depthmin)
if np.mod(lppt,100000) == 0:
print('Searched %d' %lppt + ' points out of %d' %np.shape(xy)[0])
WriteGEEnd( outp )
outp.close()
# zip and copy to kmz file
call(["zip","-r",workdir+'/'+modelname+"_"+regname+".kmz",workdir+'/'+modelname+"_"+regname+".kml"])
#call(["mv",workdir+modelname+".zip",workdir+modelname+".kmz"])
call(["cp",workdir+'/'+modelname+"_"+regname+".kmz","./"])
| [
"iris.analysis.cartography.unrotate_pole",
"numpy.float",
"numpy.mod",
"numpy.shape",
"numpy.max",
"numpy.min",
"subprocess.call"
] | [((16446, 16574), 'subprocess.call', 'call', (["['zip', '-r', workdir + '/' + modelname + '_' + regname + '.kmz', workdir +\n '/' + modelname + '_' + regname + '.kml']"], {}), "(['zip', '-r', workdir + '/' + modelname + '_' + regname + '.kmz', \n workdir + '/' + modelname + '_' + regname + '.kml'])\n", (16450, 16574), False, 'from subprocess import call\n'), ((16611, 16681), 'subprocess.call', 'call', (["['cp', workdir + '/' + modelname + '_' + regname + '.kmz', './']"], {}), "(['cp', workdir + '/' + modelname + '_' + regname + '.kmz', './'])\n", (16615, 16681), False, 'from subprocess import call\n'), ((10770, 10837), 'iris.analysis.cartography.unrotate_pole', 'iris.analysis.cartography.unrotate_pole', (['lontmp', 'lattmp', 'rlon', 'rlat'], {}), '(lontmp, lattmp, rlon, rlat)\n', (10809, 10837), False, 'import iris\n'), ((10908, 10928), 'numpy.min', 'np.min', (['latlon[:, 0]'], {}), '(latlon[:, 0])\n', (10914, 10928), True, 'import numpy as np\n'), ((10939, 10959), 'numpy.max', 'np.max', (['latlon[:, 0]'], {}), '(latlon[:, 0])\n', (10945, 10959), True, 'import numpy as np\n'), ((10970, 10990), 'numpy.min', 'np.min', (['latlon[:, 1]'], {}), '(latlon[:, 1])\n', (10976, 10990), True, 'import numpy as np\n'), ((11001, 11021), 'numpy.max', 'np.max', (['latlon[:, 1]'], {}), '(latlon[:, 1])\n', (11007, 11021), True, 'import numpy as np\n'), ((9770, 9791), 'numpy.float', 'np.float', (['obsdata[lp]'], {}), '(obsdata[lp])\n', (9778, 9791), True, 'import numpy as np\n'), ((15491, 15503), 'numpy.shape', 'np.shape', (['xy'], {}), '(xy)\n', (15499, 15503), True, 'import numpy as np\n'), ((16270, 16290), 'numpy.mod', 'np.mod', (['lppt', '(100000)'], {}), '(lppt, 100000)\n', (16276, 16290), True, 'import numpy as np\n'), ((16357, 16369), 'numpy.shape', 'np.shape', (['xy'], {}), '(xy)\n', (16365, 16369), True, 'import numpy as np\n')] |
#
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD.
# See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np # pylint: disable=unused-import
import typing # pylint: disable=unused-import
from nomad.metainfo import ( # pylint: disable=unused-import
MSection, MCategory, Category, Package, Quantity, Section, SubSection, SectionProxy,
Reference
)
from nomad.metainfo.legacy import LegacyDefinition
from nomad.datamodel.metainfo import public
m_package = Package(
name='dmol3_nomadmetainfo_json',
description='None',
a_legacy=LegacyDefinition(name='dmol3.nomadmetainfo.json'))
class dmol3_section_hirshfeld_population(MSection):
'''
Hirshfeld Population Analysis Section
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='dmol3_section_hirshfeld_population'))
dmol3_hirshfeld_population = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Hirshfeld Population Analysis
''',
a_legacy=LegacyDefinition(name='dmol3_hirshfeld_population'))
class dmol3_section_mulliken_population(MSection):
'''
Mulliken Population Analysis Section
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='dmol3_section_mulliken_population'))
dmol3_mulliken_population = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Mulliken Population Analysis
''',
a_legacy=LegacyDefinition(name='dmol3_mulliken_population'))
class section_method(public.section_method):
m_def = Section(validate=False, extends_base_section=True, a_legacy=LegacyDefinition(name='section_method'))
dmol3_aux_density = Quantity(
type=str,
shape=[],
description='''
dmol3 aux density
''',
a_legacy=LegacyDefinition(name='dmol3_aux_density'))
dmol3_aux_partition = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
dmol3 aux parition
''',
a_legacy=LegacyDefinition(name='dmol3_aux_partition'))
dmol3_basis_name = Quantity(
type=str,
shape=[],
description='''
dmol3 basis name
''',
a_legacy=LegacyDefinition(name='dmol3_basis_name'))
dmol3_calculation_type = Quantity(
type=str,
shape=[],
description='''
dmol3 calculation type
''',
a_legacy=LegacyDefinition(name='dmol3_calculation_type'))
dmol3_charge = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
dmol3 system charge
''',
a_legacy=LegacyDefinition(name='dmol3_charge'))
dmol3_electrostatic_moments = Quantity(
type=str,
shape=[],
description='''
dmol3 Electrostatic_Moments
''',
a_legacy=LegacyDefinition(name='dmol3_electrostatic_moments'))
dmol3_functional_name = Quantity(
type=str,
shape=[],
description='''
dmol3 functional name
''',
a_legacy=LegacyDefinition(name='dmol3_functional_name'))
dmol3_hirshfeld_analysis = Quantity(
type=str,
shape=[],
description='''
dmol3 Hirshfeld_Analysis
''',
a_legacy=LegacyDefinition(name='dmol3_hirshfeld_analysis'))
dmol3_integration_grid = Quantity(
type=str,
shape=[],
description='''
dmol3 integration grid
''',
a_legacy=LegacyDefinition(name='dmol3_integration_grid'))
dmol3_kpoints = Quantity(
type=str,
shape=[],
description='''
dmol3 Kpoints
''',
a_legacy=LegacyDefinition(name='dmol3_kpoints'))
dmol3_mulliken_analysis = Quantity(
type=str,
shape=[],
description='''
dmol3 Mulliken_Analysis
''',
a_legacy=LegacyDefinition(name='dmol3_mulliken_analysis'))
dmol3_nuclear_efg = Quantity(
type=str,
shape=[],
description='''
dmol3 Nuclear_EFG
''',
a_legacy=LegacyDefinition(name='dmol3_nuclear_efg'))
dmol3_occupation_name = Quantity(
type=str,
shape=[],
description='''
dmol3 Occupation name
''',
a_legacy=LegacyDefinition(name='dmol3_occupation_name'))
dmol3_occupation_width = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
dmol3 Occupation width
''',
a_legacy=LegacyDefinition(name='dmol3_occupation_width'))
dmol3_opt_coordinate_system = Quantity(
type=str,
shape=[],
description='''
dmol3 OPT_Coordinate_System
''',
a_legacy=LegacyDefinition(name='dmol3_opt_coordinate_system'))
dmol3_opt_displacement_convergence = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
dmol3 OPT_Displacement_Convergence
''',
a_legacy=LegacyDefinition(name='dmol3_opt_displacement_convergence'))
dmol3_opt_energy_convergence = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
dmol3 OPT_Energy_Convergence
''',
a_legacy=LegacyDefinition(name='dmol3_opt_energy_convergence'))
dmol3_opt_gdiis = Quantity(
type=str,
shape=[],
description='''
dmol3 OPT_Gdiis
''',
a_legacy=LegacyDefinition(name='dmol3_opt_gdiis'))
dmol3_opt_gradient_convergence = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
dmol3 OPT_Gradient_Convergence
''',
a_legacy=LegacyDefinition(name='dmol3_opt_gradient_convergence'))
dmol3_opt_hessian_project = Quantity(
type=str,
shape=[],
description='''
dmol3 OPT_Hessian_Project
''',
a_legacy=LegacyDefinition(name='dmol3_opt_hessian_project'))
dmol3_opt_iterations = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
dmol3 OPT_Iterations
''',
a_legacy=LegacyDefinition(name='dmol3_opt_iterations'))
dmol3_opt_max_displacement = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
dmol3 OPT_Max_Displacement
''',
a_legacy=LegacyDefinition(name='dmol3_opt_max_displacement'))
dmol3_opt_steep_tol = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
dmol3 OPT_Steep_Tol
''',
a_legacy=LegacyDefinition(name='dmol3_opt_steep_tol'))
dmol3_optical_absorption = Quantity(
type=str,
shape=[],
description='''
dmol3 Optical_Absorption
''',
a_legacy=LegacyDefinition(name='dmol3_optical_absorption'))
dmol3_partial_dos = Quantity(
type=str,
shape=[],
description='''
dmol3 Partial_Dos
''',
a_legacy=LegacyDefinition(name='dmol3_partial_dos'))
dmol3_pseudopotential_name = Quantity(
type=str,
shape=[],
description='''
dmol3 pseudopotential name
''',
a_legacy=LegacyDefinition(name='dmol3_pseudopotential_name'))
dmol3_rcut = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
dmol3 atom R_cut
''',
a_legacy=LegacyDefinition(name='dmol3_rcut'))
dmol3_scf_charge_mixing = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
dmol3 SCF_Charge_Mixing
''',
a_legacy=LegacyDefinition(name='dmol3_scf_charge_mixing'))
dmol3_scf_density_convergence = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
dmol3 SCF_Density_Convergence
''',
a_legacy=LegacyDefinition(name='dmol3_scf_density_convergence'))
dmol3_scf_diis_name = Quantity(
type=str,
shape=[],
description='''
dmol3 SCF_DIIS name
''',
a_legacy=LegacyDefinition(name='dmol3_scf_diis_name'))
dmol3_scf_diis_number = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
dmol3 SCF_DIIS number
''',
a_legacy=LegacyDefinition(name='dmol3_scf_diis_number'))
dmol3_scf_direct = Quantity(
type=str,
shape=[],
description='''
dmol3 SCF_Direct
''',
a_legacy=LegacyDefinition(name='dmol3_scf_direct'))
dmol3_scf_iterations = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
dmol3 SCF_Iterations
''',
a_legacy=LegacyDefinition(name='dmol3_scf_iterations'))
dmol3_scf_number_bad_steps = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
dmol3 SCF_Number_Bad_Steps
''',
a_legacy=LegacyDefinition(name='dmol3_scf_number_bad_steps'))
dmol3_scf_restart = Quantity(
type=str,
shape=[],
description='''
dmol3 SCF_Restart
''',
a_legacy=LegacyDefinition(name='dmol3_scf_restart'))
dmol3_scf_spin_mixing = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
dmol3 SCF_Spin_Mixing
''',
a_legacy=LegacyDefinition(name='dmol3_scf_spin_mixing'))
dmol3_spin_polarization = Quantity(
type=str,
shape=[],
description='''
dmol3 spin polarization
''',
a_legacy=LegacyDefinition(name='dmol3_spin_polarization'))
dmol3_spin = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
dmol3 number of unpaired electrons
''',
a_legacy=LegacyDefinition(name='dmol3_spin'))
dmol3_symmetry = Quantity(
type=str,
shape=[],
description='''
dmol3 sysmmetry
''',
a_legacy=LegacyDefinition(name='dmol3_symmetry'))
class section_scf_iteration(public.section_scf_iteration):
m_def = Section(validate=False, extends_base_section=True, a_legacy=LegacyDefinition(name='section_scf_iteration'))
dmol3_binding_energy_scf_iteration = Quantity(
type=np.dtype(np.float64),
shape=[],
unit='joule',
description='''
dmol3 binding energy at every SCF
''',
a_legacy=LegacyDefinition(name='dmol3_binding_energy_scf_iteration'))
dmol3_convergence_scf_iteration = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
dmol3 convergence at every SCF
''',
a_legacy=LegacyDefinition(name='dmol3_convergence_scf_iteration'))
dmol3_number_scf_iteration = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
dmol3 iteration number at every SCF
''',
a_legacy=LegacyDefinition(name='dmol3_number_scf_iteration'))
dmol3_time_scf_iteration = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
dmol3 time at every SCF
''',
a_legacy=LegacyDefinition(name='dmol3_time_scf_iteration'))
class section_eigenvalues(public.section_eigenvalues):
m_def = Section(validate=False, extends_base_section=True, a_legacy=LegacyDefinition(name='section_eigenvalues'))
dmol3_eigenvalue_eigenvalue = Quantity(
type=np.dtype(np.float64),
shape=[],
unit='joule',
description='''
Single eigenvalue
''',
a_legacy=LegacyDefinition(name='dmol3_eigenvalue_eigenvalue'))
dmol3_eigenvalue_occupation = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Occupation of single eigenfunction
''',
a_legacy=LegacyDefinition(name='dmol3_eigenvalue_occupation'))
class section_system(public.section_system):
m_def = Section(validate=False, extends_base_section=True, a_legacy=LegacyDefinition(name='section_system'))
dmol3_geometry_atom_labels = Quantity(
type=str,
shape=[],
description='''
labels of atom
''',
a_legacy=LegacyDefinition(name='dmol3_geometry_atom_labels'))
dmol3_geometry_atom_positions_x = Quantity(
type=np.dtype(np.float64),
shape=[],
unit='meter',
description='''
x component of atomic position
''',
a_legacy=LegacyDefinition(name='dmol3_geometry_atom_positions_x'))
dmol3_geometry_atom_positions_y = Quantity(
type=np.dtype(np.float64),
shape=[],
unit='meter',
description='''
y component of atomic position
''',
a_legacy=LegacyDefinition(name='dmol3_geometry_atom_positions_y'))
dmol3_geometry_atom_positions_z = Quantity(
type=np.dtype(np.float64),
shape=[],
unit='meter',
description='''
z component of atomic position
''',
a_legacy=LegacyDefinition(name='dmol3_geometry_atom_positions_z'))
class section_run(public.section_run):
m_def = Section(validate=False, extends_base_section=True, a_legacy=LegacyDefinition(name='section_run'))
dmol3_program_compilation_date = Quantity(
type=str,
shape=[],
description='''
dmol3 compilation date
''',
a_legacy=LegacyDefinition(name='dmol3_program_compilation_date'))
dmol3_program_compilation_time = Quantity(
type=str,
shape=[],
description='''
dmol compilation date
''',
a_legacy=LegacyDefinition(name='dmol3_program_compilation_time'))
class section_single_configuration_calculation(public.section_single_configuration_calculation):
m_def = Section(validate=False, extends_base_section=True, a_legacy=LegacyDefinition(name='section_single_configuration_calculation'))
dmol3_section_hirshfeld_population = SubSection(
sub_section=SectionProxy('dmol3_section_hirshfeld_population'),
repeats=True,
a_legacy=LegacyDefinition(name='dmol3_section_hirshfeld_population'))
dmol3_section_mulliken_population = SubSection(
sub_section=SectionProxy('dmol3_section_mulliken_population'),
repeats=True,
a_legacy=LegacyDefinition(name='dmol3_section_mulliken_population'))
m_package.__init_metainfo__()
| [
"nomad.metainfo.SectionProxy",
"nomad.metainfo.legacy.LegacyDefinition",
"numpy.dtype"
] | [((1141, 1190), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3.nomadmetainfo.json"""'}), "(name='dmol3.nomadmetainfo.json')\n", (1157, 1190), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((1350, 1409), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_section_hirshfeld_population"""'}), "(name='dmol3_section_hirshfeld_population')\n", (1366, 1409), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((1468, 1488), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (1476, 1488), True, 'import numpy as np\n'), ((1600, 1651), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_hirshfeld_population"""'}), "(name='dmol3_hirshfeld_population')\n", (1616, 1651), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((1809, 1867), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_section_mulliken_population"""'}), "(name='dmol3_section_mulliken_population')\n", (1825, 1867), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((1925, 1945), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (1933, 1945), True, 'import numpy as np\n'), ((2056, 2106), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_mulliken_population"""'}), "(name='dmol3_mulliken_population')\n", (2072, 2106), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((2228, 2267), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""section_method"""'}), "(name='section_method')\n", (2244, 2267), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((2420, 2462), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_aux_density"""'}), "(name='dmol3_aux_density')\n", (2436, 2462), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((2514, 2532), 'numpy.dtype', 'np.dtype', (['np.int32'], {}), '(np.int32)\n', (2522, 2532), True, 'import numpy as np\n'), ((2633, 2677), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_aux_partition"""'}), "(name='dmol3_aux_partition')\n", (2649, 2677), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((2828, 2869), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_basis_name"""'}), "(name='dmol3_basis_name')\n", (2844, 2869), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((3032, 3079), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_calculation_type"""'}), "(name='dmol3_calculation_type')\n", (3048, 3079), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((3124, 3144), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (3132, 3144), True, 'import numpy as np\n'), ((3246, 3283), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_charge"""'}), "(name='dmol3_charge')\n", (3262, 3283), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((3456, 3508), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_electrostatic_moments"""'}), "(name='dmol3_electrostatic_moments')\n", (3472, 3508), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((3669, 3715), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_functional_name"""'}), "(name='dmol3_functional_name')\n", (3685, 3715), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((3882, 3931), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_hirshfeld_analysis"""'}), "(name='dmol3_hirshfeld_analysis')\n", (3898, 3931), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((4094, 4141), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_integration_grid"""'}), "(name='dmol3_integration_grid')\n", (4110, 4141), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((4286, 4324), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_kpoints"""'}), "(name='dmol3_kpoints')\n", (4302, 4324), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((4489, 4537), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_mulliken_analysis"""'}), "(name='dmol3_mulliken_analysis')\n", (4505, 4537), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((4690, 4732), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_nuclear_efg"""'}), "(name='dmol3_nuclear_efg')\n", (4706, 4732), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((4893, 4939), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_occupation_name"""'}), "(name='dmol3_occupation_name')\n", (4909, 4939), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((4994, 5014), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (5002, 5014), True, 'import numpy as np\n'), ((5119, 5166), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_occupation_width"""'}), "(name='dmol3_occupation_width')\n", (5135, 5166), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((5339, 5391), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_opt_coordinate_system"""'}), "(name='dmol3_opt_coordinate_system')\n", (5355, 5391), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((5458, 5478), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (5466, 5478), True, 'import numpy as np\n'), ((5595, 5654), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_opt_displacement_convergence"""'}), "(name='dmol3_opt_displacement_convergence')\n", (5611, 5654), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((5715, 5735), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (5723, 5735), True, 'import numpy as np\n'), ((5846, 5899), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_opt_energy_convergence"""'}), "(name='dmol3_opt_energy_convergence')\n", (5862, 5899), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((6048, 6088), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_opt_gdiis"""'}), "(name='dmol3_opt_gdiis')\n", (6064, 6088), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((6151, 6171), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (6159, 6171), True, 'import numpy as np\n'), ((6284, 6339), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_opt_gradient_convergence"""'}), "(name='dmol3_opt_gradient_convergence')\n", (6300, 6339), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((6508, 6558), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_opt_hessian_project"""'}), "(name='dmol3_opt_hessian_project')\n", (6524, 6558), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((6611, 6629), 'numpy.dtype', 'np.dtype', (['np.int32'], {}), '(np.int32)\n', (6619, 6629), True, 'import numpy as np\n'), ((6732, 6777), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_opt_iterations"""'}), "(name='dmol3_opt_iterations')\n", (6748, 6777), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((6836, 6856), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (6844, 6856), True, 'import numpy as np\n'), ((6965, 7016), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_opt_max_displacement"""'}), "(name='dmol3_opt_max_displacement')\n", (6981, 7016), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((7068, 7088), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (7076, 7088), True, 'import numpy as np\n'), ((7190, 7234), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_opt_steep_tol"""'}), "(name='dmol3_opt_steep_tol')\n", (7206, 7234), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((7401, 7450), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_optical_absorption"""'}), "(name='dmol3_optical_absorption')\n", (7417, 7450), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((7603, 7645), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_partial_dos"""'}), "(name='dmol3_partial_dos')\n", (7619, 7645), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((7816, 7867), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_pseudopotential_name"""'}), "(name='dmol3_pseudopotential_name')\n", (7832, 7867), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((7910, 7930), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (7918, 7930), True, 'import numpy as np\n'), ((8029, 8064), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_rcut"""'}), "(name='dmol3_rcut')\n", (8045, 8064), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((8120, 8140), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (8128, 8140), True, 'import numpy as np\n'), ((8246, 8294), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_scf_charge_mixing"""'}), "(name='dmol3_scf_charge_mixing')\n", (8262, 8294), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((8356, 8376), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (8364, 8376), True, 'import numpy as np\n'), ((8488, 8542), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_scf_density_convergence"""'}), "(name='dmol3_scf_density_convergence')\n", (8504, 8542), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((8699, 8743), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_scf_diis_name"""'}), "(name='dmol3_scf_diis_name')\n", (8715, 8743), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((8797, 8817), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (8805, 8817), True, 'import numpy as np\n'), ((8921, 8967), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_scf_diis_number"""'}), "(name='dmol3_scf_diis_number')\n", (8937, 8967), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((9118, 9159), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_scf_direct"""'}), "(name='dmol3_scf_direct')\n", (9134, 9159), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((9212, 9230), 'numpy.dtype', 'np.dtype', (['np.int32'], {}), '(np.int32)\n', (9220, 9230), True, 'import numpy as np\n'), ((9333, 9378), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_scf_iterations"""'}), "(name='dmol3_scf_iterations')\n", (9349, 9378), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((9437, 9455), 'numpy.dtype', 'np.dtype', (['np.int32'], {}), '(np.int32)\n', (9445, 9455), True, 'import numpy as np\n'), ((9564, 9615), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_scf_number_bad_steps"""'}), "(name='dmol3_scf_number_bad_steps')\n", (9580, 9615), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((9768, 9810), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_scf_restart"""'}), "(name='dmol3_scf_restart')\n", (9784, 9810), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((9864, 9884), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (9872, 9884), True, 'import numpy as np\n'), ((9988, 10034), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_scf_spin_mixing"""'}), "(name='dmol3_scf_spin_mixing')\n", (10004, 10034), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((10199, 10247), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_spin_polarization"""'}), "(name='dmol3_spin_polarization')\n", (10215, 10247), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((10290, 10308), 'numpy.dtype', 'np.dtype', (['np.int32'], {}), '(np.int32)\n', (10298, 10308), True, 'import numpy as np\n'), ((10425, 10460), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_spin"""'}), "(name='dmol3_spin')\n", (10441, 10460), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((10608, 10647), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_symmetry"""'}), "(name='dmol3_symmetry')\n", (10624, 10647), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((10783, 10829), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""section_scf_iteration"""'}), "(name='section_scf_iteration')\n", (10799, 10829), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((10896, 10916), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (10904, 10916), True, 'import numpy as np\n'), ((11054, 11113), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_binding_energy_scf_iteration"""'}), "(name='dmol3_binding_energy_scf_iteration')\n", (11070, 11113), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((11177, 11197), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (11185, 11197), True, 'import numpy as np\n'), ((11310, 11366), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_convergence_scf_iteration"""'}), "(name='dmol3_convergence_scf_iteration')\n", (11326, 11366), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((11425, 11443), 'numpy.dtype', 'np.dtype', (['np.int32'], {}), '(np.int32)\n', (11433, 11443), True, 'import numpy as np\n'), ((11561, 11612), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_number_scf_iteration"""'}), "(name='dmol3_number_scf_iteration')\n", (11577, 11612), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((11669, 11689), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (11677, 11689), True, 'import numpy as np\n'), ((11795, 11844), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_time_scf_iteration"""'}), "(name='dmol3_time_scf_iteration')\n", (11811, 11844), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((11976, 12020), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""section_eigenvalues"""'}), "(name='section_eigenvalues')\n", (11992, 12020), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((12080, 12100), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (12088, 12100), True, 'import numpy as np\n'), ((12222, 12274), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_eigenvalue_eigenvalue"""'}), "(name='dmol3_eigenvalue_eigenvalue')\n", (12238, 12274), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((12334, 12354), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (12342, 12354), True, 'import numpy as np\n'), ((12471, 12523), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_eigenvalue_occupation"""'}), "(name='dmol3_eigenvalue_occupation')\n", (12487, 12523), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((12645, 12684), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""section_system"""'}), "(name='section_system')\n", (12661, 12684), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((12843, 12894), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_geometry_atom_labels"""'}), "(name='dmol3_geometry_atom_labels')\n", (12859, 12894), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((12958, 12978), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (12966, 12978), True, 'import numpy as np\n'), ((13113, 13169), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_geometry_atom_positions_x"""'}), "(name='dmol3_geometry_atom_positions_x')\n", (13129, 13169), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((13233, 13253), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (13241, 13253), True, 'import numpy as np\n'), ((13388, 13444), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_geometry_atom_positions_y"""'}), "(name='dmol3_geometry_atom_positions_y')\n", (13404, 13444), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((13508, 13528), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (13516, 13528), True, 'import numpy as np\n'), ((13663, 13719), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_geometry_atom_positions_z"""'}), "(name='dmol3_geometry_atom_positions_z')\n", (13679, 13719), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((13835, 13871), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""section_run"""'}), "(name='section_run')\n", (13851, 13871), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((14042, 14097), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_program_compilation_date"""'}), "(name='dmol3_program_compilation_date')\n", (14058, 14097), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((14267, 14322), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_program_compilation_time"""'}), "(name='dmol3_program_compilation_time')\n", (14283, 14322), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((14496, 14561), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""section_single_configuration_calculation"""'}), "(name='section_single_configuration_calculation')\n", (14512, 14561), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((14637, 14687), 'nomad.metainfo.SectionProxy', 'SectionProxy', (['"""dmol3_section_hirshfeld_population"""'], {}), "('dmol3_section_hirshfeld_population')\n", (14649, 14687), False, 'from nomad.metainfo import MSection, MCategory, Category, Package, Quantity, Section, SubSection, SectionProxy, Reference\n'), ((14728, 14787), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_section_hirshfeld_population"""'}), "(name='dmol3_section_hirshfeld_population')\n", (14744, 14787), False, 'from nomad.metainfo.legacy import LegacyDefinition\n'), ((14862, 14911), 'nomad.metainfo.SectionProxy', 'SectionProxy', (['"""dmol3_section_mulliken_population"""'], {}), "('dmol3_section_mulliken_population')\n", (14874, 14911), False, 'from nomad.metainfo import MSection, MCategory, Category, Package, Quantity, Section, SubSection, SectionProxy, Reference\n'), ((14952, 15010), 'nomad.metainfo.legacy.LegacyDefinition', 'LegacyDefinition', ([], {'name': '"""dmol3_section_mulliken_population"""'}), "(name='dmol3_section_mulliken_population')\n", (14968, 15010), False, 'from nomad.metainfo.legacy import LegacyDefinition\n')] |
#!/usr/bin/env python
"""
This class runs the regression YAMLs in the ASV format.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import numpy as np
import os
import yaml
import ray
from ray import tune
CONFIG_DIR = os.path.dirname(os.path.abspath(__file__))
def _evaulate_config(filename):
with open(os.path.join(CONFIG_DIR, filename)) as f:
experiments = yaml.load(f)
for _, config in experiments.items():
config["repeat"] = 3
ray.init()
trials = tune.run_experiments(experiments)
results = defaultdict(list)
for t in trials:
results["time_total_s"] += [t.last_result.time_total_s]
results["episode_reward_mean"] += [t.last_result.episode_reward_mean]
results["training_iteration"] += [t.last_result.training_iteration]
return {k: np.median(v) for k, v in results.items()}
class Regression():
def setup_cache(self):
# We need to implement this in separate classes
# below so that ASV will register the setup/class
# as a separate test.
raise NotImplementedError
def teardown(self, *args):
ray.shutdown()
def track_time(self, result):
return result["time_total_s"]
def track_reward(self, result):
return result["episode_reward_mean"]
def track_iterations(self, result):
return result["training_iteration"]
| [
"ray.init",
"os.path.abspath",
"yaml.load",
"numpy.median",
"collections.defaultdict",
"ray.shutdown",
"ray.tune.run_experiments",
"os.path.join"
] | [((338, 363), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (353, 363), False, 'import os\n'), ((573, 583), 'ray.init', 'ray.init', ([], {}), '()\n', (581, 583), False, 'import ray\n'), ((597, 630), 'ray.tune.run_experiments', 'tune.run_experiments', (['experiments'], {}), '(experiments)\n', (617, 630), False, 'from ray import tune\n'), ((645, 662), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (656, 662), False, 'from collections import defaultdict\n'), ((477, 489), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (486, 489), False, 'import yaml\n'), ((918, 930), 'numpy.median', 'np.median', (['v'], {}), '(v)\n', (927, 930), True, 'import numpy as np\n'), ((1227, 1241), 'ray.shutdown', 'ray.shutdown', ([], {}), '()\n', (1239, 1241), False, 'import ray\n'), ((413, 447), 'os.path.join', 'os.path.join', (['CONFIG_DIR', 'filename'], {}), '(CONFIG_DIR, filename)\n', (425, 447), False, 'import os\n')] |
"""
<NAME>
University of Manitoba
February 13th, 2020
"""
import numpy as np
###############################################################################
def aug_hor_ref(data, metadata):
"""Horizontally-reflects samples to generate augmented samples
Performs data augmentation to obtain horizontally reflected samples.
Parameters
----------
data : array_like
The dataset features for every sample
metadata : array_like
The metadata for every sample in the dataset
Returns
-------
reflected_data : array_lke
The dataset containing the features for every original sample,
and the augmented (reflected) samples
metadata : array_like
The IDs of every original sample and the augmented samples
"""
# Initialize arrays for storing the augmented data, labels, and IDs
reflected_data = np.zeros([np.size(data, axis=0) * 2,
np.size(data, axis=1),
np.size(data, axis=2)],
dtype=data.dtype)
reflected_metadata = []
# For every sample in the dataset
for sample in range(np.size(data, axis=0)):
this_sample_data = data[sample, :, :] # Get this sample
# Keep the original sample and a horizontal flip of the sample
reflected_data[sample, :, :] = this_sample_data
reflected_data[-(sample + 1), :, :] = np.flip(this_sample_data,
axis=1)
# Store the metadata for this sample
reflected_metadata.append(metadata[sample])
# Get the IDs for all the samples
reflected_metadata = np.array(reflected_metadata)
reflected_metadata = np.concatenate((reflected_metadata,
np.flip(reflected_metadata, axis=0)))
return reflected_data, reflected_metadata
def aug_hor_translate(data, metadata, step_size=10):
"""Horizontally-translates samples to generate augmented samples
Performs data augmentation to obtain horizontally translated samples.
Parameters
----------
data : array_like
The dataset features for every sample
metadata : array_like
The metadata for every sample in the dataset
step_size : int
The step-size to be used for horizontal translations
Returns
-------
reflected_data : array_lke
The dataset containing the features for every original sample,
and the augmented (translated) samples
reflected_metadata : array_like
The IDs of every original sample and the augmented samples
"""
# Find the number of steps that can be done
num_steps = (np.size(data, axis=2) // step_size - 1)
# Initialize arrays for storing the augmented data, labels, and IDs
translated_data = np.zeros([np.size(data, axis=0) * num_steps,
np.size(data, axis=1),
np.size(data, axis=2)],
dtype=data.dtype)
translated_metadata = []
# For every sample in the set
for sample in range(np.size(data, axis=0)):
this_sample = data[sample, :, :] # Get the sample features here
for step in range(num_steps): # For every translation step we can do
translated_sample = np.zeros_like(this_sample)
# Obtain the translated sample for this translation step by
# performing a translation
for ii in range(np.size(this_sample, axis=1)):
translated_sample[:, ii] = \
this_sample[:, ii - step_size * step]
# Add this translated sample to the augmented dataset
# (and labels, IDs arrays)
translated_data[sample * num_steps + step, :, :] = \
translated_sample
translated_metadata.append(metadata[sample])
# Convert the IDs from a list to an arr
translated_metadata = np.array(translated_metadata)
return translated_data, translated_metadata
def full_aug(data, metadata, step_size=10):
"""Performs horizontal translation and reflection to generate aug samples
Performs both horizontal translation and reflection to obtain an
augmented dataset
Parameters
----------
data : array_like
The dataset features for every sample
metadata : array_like
The ID tags for every sample in the dataset
step_size : int
The step-size to be used for horizontal translations
Returns
-------
data : array_like
The augmented dataset (features for every sample)
metadata : array_like
The augmented labels
"""
# First, perform augmentation by horizontal translation
data, metadata = aug_hor_translate(data, metadata, step_size=step_size)
# Then, perform augmentation by horizontal reflection
data, metadata = aug_hor_ref(data, metadata)
return data, metadata
| [
"numpy.size",
"numpy.zeros_like",
"numpy.array",
"numpy.flip"
] | [((1733, 1761), 'numpy.array', 'np.array', (['reflected_metadata'], {}), '(reflected_metadata)\n', (1741, 1761), True, 'import numpy as np\n'), ((4092, 4121), 'numpy.array', 'np.array', (['translated_metadata'], {}), '(translated_metadata)\n', (4100, 4121), True, 'import numpy as np\n'), ((1208, 1229), 'numpy.size', 'np.size', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (1215, 1229), True, 'import numpy as np\n'), ((1476, 1509), 'numpy.flip', 'np.flip', (['this_sample_data'], {'axis': '(1)'}), '(this_sample_data, axis=1)\n', (1483, 1509), True, 'import numpy as np\n'), ((3229, 3250), 'numpy.size', 'np.size', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (3236, 3250), True, 'import numpy as np\n'), ((983, 1004), 'numpy.size', 'np.size', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (990, 1004), True, 'import numpy as np\n'), ((1038, 1059), 'numpy.size', 'np.size', (['data'], {'axis': '(2)'}), '(data, axis=2)\n', (1045, 1059), True, 'import numpy as np\n'), ((1866, 1901), 'numpy.flip', 'np.flip', (['reflected_metadata'], {'axis': '(0)'}), '(reflected_metadata, axis=0)\n', (1873, 1901), True, 'import numpy as np\n'), ((2789, 2810), 'numpy.size', 'np.size', (['data'], {'axis': '(2)'}), '(data, axis=2)\n', (2796, 2810), True, 'import numpy as np\n'), ((3005, 3026), 'numpy.size', 'np.size', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (3012, 3026), True, 'import numpy as np\n'), ((3061, 3082), 'numpy.size', 'np.size', (['data'], {'axis': '(2)'}), '(data, axis=2)\n', (3068, 3082), True, 'import numpy as np\n'), ((3439, 3465), 'numpy.zeros_like', 'np.zeros_like', (['this_sample'], {}), '(this_sample)\n', (3452, 3465), True, 'import numpy as np\n'), ((924, 945), 'numpy.size', 'np.size', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (931, 945), True, 'import numpy as np\n'), ((2937, 2958), 'numpy.size', 'np.size', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (2944, 2958), True, 'import numpy as np\n'), ((3610, 3638), 'numpy.size', 'np.size', (['this_sample'], {'axis': '(1)'}), '(this_sample, axis=1)\n', (3617, 3638), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Taken from https://github.com/riannevdberg/sylvester-flows/blob/32dde9b7d696fee94f946a338182e542779eecfe/models/layers.py"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class GatedConv2d(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, stride, padding, dilation=1, activation=None):
super(GatedConv2d, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.Conv2d(input_channels, output_channels, kernel_size, stride, padding, dilation)
self.g = nn.Conv2d(input_channels, output_channels, kernel_size, stride, padding, dilation)
def forward(self, x):
h = self.h(x) if self.activation is None else self.activation(self.h(x))
g = self.sigmoid(self.g(x))
return h * g
class GatedConvTranspose2d(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, stride, padding, output_padding=0, dilation=1,
activation=None):
super(GatedConvTranspose2d, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.ConvTranspose2d(input_channels, output_channels, kernel_size, stride, padding, output_padding,
dilation=dilation)
self.g = nn.ConvTranspose2d(input_channels, output_channels, kernel_size, stride, padding, output_padding,
dilation=dilation)
def forward(self, x):
h = self.h(x) if self.activation is None else self.activation(self.h(x))
g = self.sigmoid(self.g(x))
return h * g
class MaskedLinear(nn.Module):
"""
Creates masked linear layer for MLP MADE.
For input (x) to hidden (h) or hidden to hidden layers choose diagonal_zeros = False.
For hidden to output (y) layers:
If output depends on input through y_i = f(x_{<i}) set diagonal_zeros = True.
Else if output depends on input through y_i = f(x_{<=i}) set diagonal_zeros = False.
"""
def __init__(self, in_features, out_features, diagonal_zeros=False, bias=True):
super(MaskedLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.diagonal_zeros = diagonal_zeros
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
mask = torch.from_numpy(self.build_mask())
if torch.cuda.is_available():
mask = mask.cuda()
self.mask = torch.autograd.Variable(mask, requires_grad=False)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_normal(self.weight)
if self.bias is not None:
self.bias.data.zero_()
def build_mask(self):
n_in, n_out = self.in_features, self.out_features
assert n_in % n_out == 0 or n_out % n_in == 0
mask = np.ones((n_in, n_out), dtype=np.float32)
if n_out >= n_in:
k = n_out // n_in
for i in range(n_in):
mask[i + 1:, i * k:(i + 1) * k] = 0
if self.diagonal_zeros:
mask[i:i + 1, i * k:(i + 1) * k] = 0
else:
k = n_in // n_out
for i in range(n_out):
mask[(i + 1) * k:, i:i + 1] = 0
if self.diagonal_zeros:
mask[i * k:(i + 1) * k:, i:i + 1] = 0
return mask
def forward(self, x):
output = x.mm(self.mask * self.weight)
if self.bias is not None:
return output.add(self.bias.expand_as(output))
else:
return output
def __repr__(self):
bias = self.bias is not None
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ', diagonal_zeros=' \
+ str(self.diagonal_zeros) + ', bias=' \
+ str(bias) + ')'
class MaskedConv2d(nn.Module):
"""
Creates masked convolutional autoregressive layer for pixelCNN.
For input (x) to hidden (h) or hidden to hidden layers choose diagonal_zeros = False.
For hidden to output (y) layers:
If output depends on input through y_i = f(x_{<i}) set diagonal_zeros = True.
Else if output depends on input through y_i = f(x_{<=i}) set diagonal_zeros = False.
"""
def __init__(self, in_features, out_features, size_kernel=(3, 3), diagonal_zeros=False, bias=True):
super(MaskedConv2d, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.size_kernel = size_kernel
self.diagonal_zeros = diagonal_zeros
self.weight = Parameter(torch.FloatTensor(out_features, in_features, *self.size_kernel))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
mask = torch.from_numpy(self.build_mask())
if torch.cuda.is_available():
mask = mask.cuda()
self.mask = torch.autograd.Variable(mask, requires_grad=False)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_normal(self.weight)
if self.bias is not None:
self.bias.data.zero_()
def build_mask(self):
n_in, n_out = self.in_features, self.out_features
assert n_out % n_in == 0 or n_in % n_out == 0, "%d - %d" % (n_in, n_out)
# Build autoregressive mask
l = (self.size_kernel[0] - 1) // 2
m = (self.size_kernel[1] - 1) // 2
mask = np.ones((n_out, n_in, self.size_kernel[0], self.size_kernel[1]), dtype=np.float32)
mask[:, :, :l, :] = 0
mask[:, :, l, :m] = 0
if n_out >= n_in:
k = n_out // n_in
for i in range(n_in):
mask[i * k:(i + 1) * k, i + 1:, l, m] = 0
if self.diagonal_zeros:
mask[i * k:(i + 1) * k, i:i + 1, l, m] = 0
else:
k = n_in // n_out
for i in range(n_out):
mask[i:i + 1, (i + 1) * k:, l, m] = 0
if self.diagonal_zeros:
mask[i:i + 1, i * k:(i + 1) * k:, l, m] = 0
return mask
def forward(self, x):
return F.conv2d(x, self.mask * self.weight, bias=self.bias, padding=(1, 1))
def __repr__(self):
bias = self.bias is not None
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ', diagonal_zeros=' \
+ str(self.diagonal_zeros) + ', bias=' \
+ str(bias) + ', size_kernel=' \
+ str(self.size_kernel) + ')'
class Flatten(torch.nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Unflatten(torch.nn.Module):
def __init__(self, ndims):
super(Unflatten, self).__init__()
self.ndims = ndims
def forward(self, x):
return x.view(x.size(0), *self.ndims) | [
"torch.nn.ConvTranspose2d",
"torch.autograd.Variable",
"torch.nn.Conv2d",
"torch.nn.functional.conv2d",
"numpy.ones",
"torch.FloatTensor",
"torch.cuda.is_available",
"torch.nn.init.kaiming_normal",
"torch.nn.Sigmoid"
] | [((670, 682), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (680, 682), True, 'import torch.nn as nn\n'), ((701, 787), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', 'output_channels', 'kernel_size', 'stride', 'padding', 'dilation'], {}), '(input_channels, output_channels, kernel_size, stride, padding,\n dilation)\n', (710, 787), True, 'import torch.nn as nn\n'), ((801, 887), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', 'output_channels', 'kernel_size', 'stride', 'padding', 'dilation'], {}), '(input_channels, output_channels, kernel_size, stride, padding,\n dilation)\n', (810, 887), True, 'import torch.nn as nn\n'), ((1356, 1368), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1366, 1368), True, 'import torch.nn as nn\n'), ((1387, 1507), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['input_channels', 'output_channels', 'kernel_size', 'stride', 'padding', 'output_padding'], {'dilation': 'dilation'}), '(input_channels, output_channels, kernel_size, stride,\n padding, output_padding, dilation=dilation)\n', (1405, 1507), True, 'import torch.nn as nn\n'), ((1557, 1677), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['input_channels', 'output_channels', 'kernel_size', 'stride', 'padding', 'output_padding'], {'dilation': 'dilation'}), '(input_channels, output_channels, kernel_size, stride,\n padding, output_padding, dilation=dilation)\n', (1575, 1677), True, 'import torch.nn as nn\n'), ((2812, 2837), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2835, 2837), False, 'import torch\n'), ((2890, 2940), 'torch.autograd.Variable', 'torch.autograd.Variable', (['mask'], {'requires_grad': '(False)'}), '(mask, requires_grad=False)\n', (2913, 2940), False, 'import torch\n'), ((3014, 3049), 'torch.nn.init.kaiming_normal', 'nn.init.kaiming_normal', (['self.weight'], {}), '(self.weight)\n', (3036, 3049), True, 'import torch.nn as nn\n'), ((3274, 3314), 'numpy.ones', 'np.ones', (['(n_in, n_out)'], {'dtype': 'np.float32'}), '((n_in, n_out), dtype=np.float32)\n', (3281, 3314), True, 'import numpy as np\n'), ((5356, 5381), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5379, 5381), False, 'import torch\n'), ((5434, 5484), 'torch.autograd.Variable', 'torch.autograd.Variable', (['mask'], {'requires_grad': '(False)'}), '(mask, requires_grad=False)\n', (5457, 5484), False, 'import torch\n'), ((5558, 5593), 'torch.nn.init.kaiming_normal', 'nn.init.kaiming_normal', (['self.weight'], {}), '(self.weight)\n', (5580, 5593), True, 'import torch.nn as nn\n'), ((5968, 6055), 'numpy.ones', 'np.ones', (['(n_out, n_in, self.size_kernel[0], self.size_kernel[1])'], {'dtype': 'np.float32'}), '((n_out, n_in, self.size_kernel[0], self.size_kernel[1]), dtype=np.\n float32)\n', (5975, 6055), True, 'import numpy as np\n'), ((6663, 6731), 'torch.nn.functional.conv2d', 'F.conv2d', (['x', '(self.mask * self.weight)'], {'bias': 'self.bias', 'padding': '(1, 1)'}), '(x, self.mask * self.weight, bias=self.bias, padding=(1, 1))\n', (6671, 6731), True, 'import torch.nn.functional as F\n'), ((2556, 2600), 'torch.FloatTensor', 'torch.FloatTensor', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (2573, 2600), False, 'import torch\n'), ((5081, 5144), 'torch.FloatTensor', 'torch.FloatTensor', (['out_features', 'in_features', '*self.size_kernel'], {}), '(out_features, in_features, *self.size_kernel)\n', (5098, 5144), False, 'import torch\n'), ((2653, 2684), 'torch.FloatTensor', 'torch.FloatTensor', (['out_features'], {}), '(out_features)\n', (2670, 2684), False, 'import torch\n'), ((5197, 5228), 'torch.FloatTensor', 'torch.FloatTensor', (['out_features'], {}), '(out_features)\n', (5214, 5228), False, 'import torch\n')] |
import argparse
import json
import pdb
import re
import string
import numpy as np
from nltk.tokenize import WordPunctTokenizer
from simmc_dataset import SIMMCDatasetForResponseGeneration
# for single embedding
FIELDS_TO_EMBED = ['type', 'color', 'embellishments', 'pattern', 'brand']
FIELD2STR = SIMMCDatasetForResponseGeneration._ATTR2STR
def load_embeddings_from_file(embeddings_path):
glove = {}
with open(embeddings_path) as fp:
for l in fp:
line_tokens = l.split()
word = line_tokens[0]
if word in glove:
raise Exception('Repeated words in {} embeddings file'.format(embeddings_path))
vector = np.asarray(line_tokens[1:], "float32")
glove[word] = vector
embedding_size = vector.size
return glove, embedding_size
def clean_value(value, tokenizer):
results = []
tokenized_val = tokenizer.tokenize(value.lower())
for v in tokenized_val:
results.extend(re.split('_|-', v))
return results
def extract_single_metadata_embeddings(metadata_path, embeddings_path, save_path):
with open(metadata_path) as fp:
metadata_dict = json.load(fp)
glove, embedding_size = load_embeddings_from_file(embeddings_path=embeddings_path)
item_embeddings = {}
tokenizer = WordPunctTokenizer()
for item_id, item in metadata_dict.items():
fields_embeddings = []
for field in FIELDS_TO_EMBED:
assert field in item['metadata'], '{} field not in item {}'.format(field, item_id)
cleaned_values = []
if isinstance(item['metadata'][field], list,):
for value in item['metadata'][field]:
cleaned_values.extend(clean_value(value, tokenizer))
else:
cleaned_values = clean_value(item['metadata'][field], tokenizer)
emb = []
for v in cleaned_values:
if v in glove:
emb.append(np.array(glove[v]))
else:
emb.append(np.random.rand(300,))
print('Unknown word \'{}\' initiated with a random embedding'.format(v))
emb = np.stack(emb)
fields_embeddings.append(emb.mean(0))
assert fields_embeddings[-1].size == embedding_size, 'Wrong embedding dimension'
assert len(fields_embeddings) == len(FIELDS_TO_EMBED), 'Wrong number of embeddings'
item_embeddings[item_id] = np.concatenate(fields_embeddings)
np.save(
save_path,
{
'embedding_size': embedding_size*len(FIELDS_TO_EMBED),
'embeddings': item_embeddings
}
)
"""
def extract_list_metadata_embeddings(metadata_path, embeddings_path, save_path):
with open(metadata_path) as fp:
metadata_dict = json.load(fp)
glove, embedding_size = load_embeddings_from_file(embeddings_path=embeddings_path)
unknown_words = set()
item_ids = []
item_embeddings = []
tokenizer = WordPunctTokenizer()
for item_id, item in metadata_dict.items():
for key in item['metadata']:
# availability field is always an empty list
if key == 'availability':
continue
field_name = FIELD2STR[key.lower()] if key.lower() in FIELD2STR else key.lower()
field_tokens = clean_value(field_name, tokenizer)
cleaned_values = []
if isinstance(item['metadata'][key], list,):
if not len(item['metadata'][key]):
cleaned_values.extend('none') #for empty lists
for value in item['metadata'][key]:
cleaned_values.extend(clean_value(value, tokenizer))
else:
cleaned_values = clean_value(item['metadata'][key], tokenizer)
fields_emb = []
for t in field_tokens:
if t in glove:
fields_emb.append(np.array(glove[t]))
else:
if t in string.punctuation:
continue
fields_emb.append(np.random.rand(300,))
unknown_words.add(t)
values_emb = []
for v in cleaned_values:
if v in glove:
values_emb.append(np.array(glove[v]))
else:
if v in string.punctuation:
continue
values_emb.append(np.random.rand(300,))
unknown_words.add(v)
item_ids.append(item_id)
pdb.set_trace()
item_embeddings.append([np.stack(fields_emb).mean(0), np.stack(values_emb).mean(0)])
print('UNKNOWN WORDS: {}'.format(unknown_words))
np.save(
save_path,
{
'embedding_size': embedding_size,
'item_ids': item_ids,
'embeddings': item_embeddings
}
)
print('embeddings saved in {}'.format(save_path))
"""
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--metadata",
type=str,
required=True,
help="Path to metadata JSON file")
parser.add_argument(
"--embeddings",
type=str,
required=True,
help="Path to embeddings file"
)
parser.add_argument(
"--save_path",
type=str,
required=True,
help="Path where to save the embeddings"
)
parser.add_argument(
"--type",
type=str,
choices=['single', 'list'],
required=True,
help="Type of embedding for each item (options: 'single', 'list')"
)
args = parser.parse_args()
if args.type == 'single':
extract_single_metadata_embeddings(args.metadata, args.embeddings, args.save_path)
else:
pass#extract_list_metadata_embeddings(args.metadata, args.embeddings, args.save_path)
| [
"numpy.stack",
"json.load",
"re.split",
"argparse.ArgumentParser",
"numpy.asarray",
"numpy.array",
"numpy.random.rand",
"nltk.tokenize.WordPunctTokenizer",
"numpy.concatenate"
] | [((1359, 1379), 'nltk.tokenize.WordPunctTokenizer', 'WordPunctTokenizer', ([], {}), '()\n', (1377, 1379), False, 'from nltk.tokenize import WordPunctTokenizer\n'), ((5078, 5103), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5101, 5103), False, 'import argparse\n'), ((1215, 1228), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (1224, 1228), False, 'import json\n'), ((2521, 2554), 'numpy.concatenate', 'np.concatenate', (['fields_embeddings'], {}), '(fields_embeddings)\n', (2535, 2554), True, 'import numpy as np\n'), ((717, 755), 'numpy.asarray', 'np.asarray', (['line_tokens[1:]', '"""float32"""'], {}), "(line_tokens[1:], 'float32')\n", (727, 755), True, 'import numpy as np\n'), ((1026, 1044), 're.split', 're.split', (['"""_|-"""', 'v'], {}), "('_|-', v)\n", (1034, 1044), False, 'import re\n'), ((2236, 2249), 'numpy.stack', 'np.stack', (['emb'], {}), '(emb)\n', (2244, 2249), True, 'import numpy as np\n'), ((2030, 2048), 'numpy.array', 'np.array', (['glove[v]'], {}), '(glove[v])\n', (2038, 2048), True, 'import numpy as np\n'), ((2103, 2122), 'numpy.random.rand', 'np.random.rand', (['(300)'], {}), '(300)\n', (2117, 2122), True, 'import numpy as np\n')] |
# 트리의 앙상블
# 램덤포레스트
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
wine = pd.read_csv('https://bit.ly/wine-date')
data = wine[['alcohol', 'sugar', 'pH']].to_numpy()
target = wine['class'].to_numpy()
train_input, test_input, train_target, test_target = train_test_split(data, target, test_size=0.2, random_state=42)
from sklearn.model_selection import cross_validate
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_jobs=-1, random_state=42)
scores = cross_validate(rf, train_input, train_target, return_train_score=True, n_jobs=-1)
print(np.mean(scores['train_score']), np.mean(scores['test_score']))
# 0.9973541965122431 0.8905151032797809
rf.fit(train_input, train_target)
print(rf.feature_importances_)
# [0.23167441 0.50039841 0.26792718]
rf = RandomForestClassifier(oob_score=True, n_jobs=-1, random_state=42)
rf.fit(train_input, train_target)
print(rf.oob_score_)
# 0.8934000384837406
## 엑스트라트리
from sklearn.ensemble import ExtraTreesClassifier
et = ExtraTreesClassifier(n_jobs=-1, random_state=42)
scores = cross_validate(et, train_input, train_target, return_train_score=True, n_jobs=-1)
print(np.mean(scores['train_score']), np.mean(scores['test_score']))
# 0.9974503966084433 0.8887848893166506
et.fit(train_input, train_target)
print(et.feature_importances_)
# [0.20183568 0.52242907 0.27573525]
## 그레이디언트 부스팅
from sklearn.ensemble import GradientBoostingClassifier
gb = GradientBoostingClassifier(random_state=42)
scores = cross_validate(gb, train_input, train_target, return_train_score=True, n_jobs=-1)
print(np.mean(scores['train_score']), np.mean(scores['test_score']))
# 0.8881086892152563 0.8720430147331015
gb = GradientBoostingClassifier(n_estimators=500, learning_rate=0.2, random_state=42)
scores = cross_validate(gb, train_input, train_target, return_train_score=True, n_jobs=-1)
print(np.mean(scores['train_score']), np.mean(scores['test_score']))
# 0.9464595437171814 0.8780082549788999
gb.fit(train_input, train_target)
print(gb.feature_importances_)
# [0.15872278 0.68010884 0.16116839]
## 히스토그램 기반 부스팅
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
hgb = HistGradientBoostingClassifier(random_state=42)
scores = cross_validate(hgb, train_input, train_target, return_train_score=True, n_jobs=-1)
print(np.mean(scores['train_score']), np.mean(scores['test_score']))
# 0.9321723946453317 0.8801241948619236
hgb.fit(train_input, train_target)
print(rf.feature_importances_)
# [0.23167441 0.50039841 0.26792718]
| [
"sklearn.ensemble.RandomForestClassifier",
"sklearn.ensemble.HistGradientBoostingClassifier",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.cross_validate",
"sklearn.ensemble.ExtraTreesClassifier",
"sklearn.ensemble.GradientBoostingClassifier",
"numpy.mean"
] | [((119, 158), 'pandas.read_csv', 'pd.read_csv', (['"""https://bit.ly/wine-date"""'], {}), "('https://bit.ly/wine-date')\n", (130, 158), True, 'import pandas as pd\n'), ((299, 361), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'target'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(data, target, test_size=0.2, random_state=42)\n', (315, 361), False, 'from sklearn.model_selection import train_test_split\n'), ((472, 522), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_jobs': '(-1)', 'random_state': '(42)'}), '(n_jobs=-1, random_state=42)\n', (494, 522), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((532, 617), 'sklearn.model_selection.cross_validate', 'cross_validate', (['rf', 'train_input', 'train_target'], {'return_train_score': '(True)', 'n_jobs': '(-1)'}), '(rf, train_input, train_target, return_train_score=True,\n n_jobs=-1)\n', (546, 617), False, 'from sklearn.model_selection import cross_validate\n'), ((833, 899), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'oob_score': '(True)', 'n_jobs': '(-1)', 'random_state': '(42)'}), '(oob_score=True, n_jobs=-1, random_state=42)\n', (855, 899), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1045, 1093), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'n_jobs': '(-1)', 'random_state': '(42)'}), '(n_jobs=-1, random_state=42)\n', (1065, 1093), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((1103, 1188), 'sklearn.model_selection.cross_validate', 'cross_validate', (['et', 'train_input', 'train_target'], {'return_train_score': '(True)', 'n_jobs': '(-1)'}), '(et, train_input, train_target, return_train_score=True,\n n_jobs=-1)\n', (1117, 1188), False, 'from sklearn.model_selection import cross_validate\n'), ((1476, 1519), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (1502, 1519), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((1529, 1614), 'sklearn.model_selection.cross_validate', 'cross_validate', (['gb', 'train_input', 'train_target'], {'return_train_score': '(True)', 'n_jobs': '(-1)'}), '(gb, train_input, train_target, return_train_score=True,\n n_jobs=-1)\n', (1543, 1614), False, 'from sklearn.model_selection import cross_validate\n'), ((1727, 1812), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {'n_estimators': '(500)', 'learning_rate': '(0.2)', 'random_state': '(42)'}), '(n_estimators=500, learning_rate=0.2, random_state=42\n )\n', (1753, 1812), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((1817, 1902), 'sklearn.model_selection.cross_validate', 'cross_validate', (['gb', 'train_input', 'train_target'], {'return_train_score': '(True)', 'n_jobs': '(-1)'}), '(gb, train_input, train_target, return_train_score=True,\n n_jobs=-1)\n', (1831, 1902), False, 'from sklearn.model_selection import cross_validate\n'), ((2260, 2307), 'sklearn.ensemble.HistGradientBoostingClassifier', 'HistGradientBoostingClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (2290, 2307), False, 'from sklearn.ensemble import HistGradientBoostingClassifier\n'), ((2317, 2403), 'sklearn.model_selection.cross_validate', 'cross_validate', (['hgb', 'train_input', 'train_target'], {'return_train_score': '(True)', 'n_jobs': '(-1)'}), '(hgb, train_input, train_target, return_train_score=True,\n n_jobs=-1)\n', (2331, 2403), False, 'from sklearn.model_selection import cross_validate\n'), ((621, 651), 'numpy.mean', 'np.mean', (["scores['train_score']"], {}), "(scores['train_score'])\n", (628, 651), True, 'import numpy as np\n'), ((653, 682), 'numpy.mean', 'np.mean', (["scores['test_score']"], {}), "(scores['test_score'])\n", (660, 682), True, 'import numpy as np\n'), ((1192, 1222), 'numpy.mean', 'np.mean', (["scores['train_score']"], {}), "(scores['train_score'])\n", (1199, 1222), True, 'import numpy as np\n'), ((1224, 1253), 'numpy.mean', 'np.mean', (["scores['test_score']"], {}), "(scores['test_score'])\n", (1231, 1253), True, 'import numpy as np\n'), ((1618, 1648), 'numpy.mean', 'np.mean', (["scores['train_score']"], {}), "(scores['train_score'])\n", (1625, 1648), True, 'import numpy as np\n'), ((1650, 1679), 'numpy.mean', 'np.mean', (["scores['test_score']"], {}), "(scores['test_score'])\n", (1657, 1679), True, 'import numpy as np\n'), ((1906, 1936), 'numpy.mean', 'np.mean', (["scores['train_score']"], {}), "(scores['train_score'])\n", (1913, 1936), True, 'import numpy as np\n'), ((1938, 1967), 'numpy.mean', 'np.mean', (["scores['test_score']"], {}), "(scores['test_score'])\n", (1945, 1967), True, 'import numpy as np\n'), ((2407, 2437), 'numpy.mean', 'np.mean', (["scores['train_score']"], {}), "(scores['train_score'])\n", (2414, 2437), True, 'import numpy as np\n'), ((2439, 2468), 'numpy.mean', 'np.mean', (["scores['test_score']"], {}), "(scores['test_score'])\n", (2446, 2468), True, 'import numpy as np\n')] |
import torch
import numpy as np
from lagom.core.transform import ExpFactorCumSum
from .base_history import BaseHistory
class Trajectory(BaseHistory):
r"""Define a trajectory of successive transitions from a single episode.
.. note::
It is not necessarily a complete episode (final state is terminal state). However, all transitions
must come from a single episode. For the history containing transitions from multiple episodes
(i.e. ``done=True`` in the middle), it is recommended to use :class:`Segment` instead.
Example::
>>> from lagom.runner import Transition
>>> transition1 = Transition(s=1, a=0.1, r=0.5, s_next=2, done=False)
>>> transition1.add_info(name='V_s', value=10.0)
>>> transition2 = Transition(s=2, a=0.2, r=0.5, s_next=3, done=False)
>>> transition2.add_info(name='V_s', value=20.0)
>>> transition3 = Transition(s=3, a=0.3, r=1.0, s_next=4, done=True)
>>> transition3.add_info(name='V_s', value=30.0)
>>> transition3.add_info(name='V_s_next', value=40.0)
>>> trajectory = Trajectory(gamma=0.1)
>>> trajectory.add_transition(transition1)
>>> trajectory.add_transition(transition2)
>>> trajectory.add_transition(transition3)
>>> trajectory
Trajectory:
Transition: (s=1, a=0.1, r=0.5, s_next=2, done=False)
Transition: (s=2, a=0.2, r=0.5, s_next=3, done=False)
Transition: (s=3, a=0.3, r=1.0, s_next=4, done=True)
>>> trajectory.all_s
([1, 2, 3], 4)
>>> trajectory.all_r
[0.5, 0.5, 1.0]
>>> trajectory.all_done
[False, False, True]
>>> trajectory.all_V
([10.0, 20.0, 30.0], [40.0, True])
>>> trajectory.all_bootstrapped_returns
[2.0, 1.5, 1.0]
>>> trajectory.all_discounted_returns
[0.56, 0.6, 1.0]
>>> trajectory.all_TD
[-7.5, -16.5, -29.0]
"""
def add_transition(self, transition):
# Sanity check for trajectory
# Not allowed to add more transition if it already contains done=True
if len(self.transitions) > 0: # non-empty
assert not self.transitions[-1].done, 'not allowed to add transition, because already contains done=True'
super().add_transition(transition)
@property
def all_s(self):
return [transition.s for transition in self.transitions], self.transitions[-1].s_next
@property
def all_returns(self):
return ExpFactorCumSum(1.0)(self.all_r)
@property
def all_discounted_returns(self):
return ExpFactorCumSum(self.gamma)(self.all_r)
def _rewards_with_bootstrapping(self):
# Get last state value and last done
last_V = self.transitions[-1].V_s_next
last_done = self.transitions[-1].done
# Get raw value if Tensor dtype
if torch.is_tensor(last_V):
last_V = last_V.item()
assert isinstance(last_V, float), f'expected float dtype, got {type(last_V)}'
# Set zero value if terminal state
if last_done:
last_V = 0.0
return self.all_r + [last_V]
@property
def all_bootstrapped_returns(self):
bootstrapped_rewards = self._rewards_with_bootstrapping()
out = ExpFactorCumSum(1.0)(bootstrapped_rewards)
# Take out last one, because it is just last state value itself
out = out[:-1]
return out
@property
def all_bootstrapped_discounted_returns(self):
bootstrapped_rewards = self._rewards_with_bootstrapping()
out = ExpFactorCumSum(self.gamma)(bootstrapped_rewards)
# Take out last one, because it is just last state value itself
out = out[:-1]
return out
@property
def all_V(self):
final = [self.transitions[-1].V_s_next, self.transitions[-1].done]
return [transition.V_s for transition in self.transitions], final
@property
def all_TD(self):
# Get all rewards
all_r = np.array(self.all_r)
# Get all state values with raw values if Tensor dtype
all_V = self.all_V
all_V = all_V[0] + [all_V[1][0]] # unpack to state values from first to last
all_V = np.array([v.item() if torch.is_tensor(v) else v for v in all_V])
# Set last state value as zero if terminal state
if self.all_done[-1]:
all_V[-1] = 0.0
# Unpack state values into current and next time step
all_V_s = all_V[:-1]
all_V_s_next = all_V[1:]
# Calculate TD error
all_TD = all_r + self.gamma*all_V_s_next - all_V_s
return all_TD.astype(np.float32).tolist()
def all_GAE(self, gae_lambda):
# TODO: implement it + add to test_runner
raise NotImplementedError
| [
"lagom.core.transform.ExpFactorCumSum",
"torch.is_tensor",
"numpy.array"
] | [((3002, 3025), 'torch.is_tensor', 'torch.is_tensor', (['last_V'], {}), '(last_V)\n', (3017, 3025), False, 'import torch\n'), ((4219, 4239), 'numpy.array', 'np.array', (['self.all_r'], {}), '(self.all_r)\n', (4227, 4239), True, 'import numpy as np\n'), ((2620, 2640), 'lagom.core.transform.ExpFactorCumSum', 'ExpFactorCumSum', (['(1.0)'], {}), '(1.0)\n', (2635, 2640), False, 'from lagom.core.transform import ExpFactorCumSum\n'), ((2725, 2752), 'lagom.core.transform.ExpFactorCumSum', 'ExpFactorCumSum', (['self.gamma'], {}), '(self.gamma)\n', (2740, 2752), False, 'from lagom.core.transform import ExpFactorCumSum\n'), ((3445, 3465), 'lagom.core.transform.ExpFactorCumSum', 'ExpFactorCumSum', (['(1.0)'], {}), '(1.0)\n', (3460, 3465), False, 'from lagom.core.transform import ExpFactorCumSum\n'), ((3774, 3801), 'lagom.core.transform.ExpFactorCumSum', 'ExpFactorCumSum', (['self.gamma'], {}), '(self.gamma)\n', (3789, 3801), False, 'from lagom.core.transform import ExpFactorCumSum\n'), ((4463, 4481), 'torch.is_tensor', 'torch.is_tensor', (['v'], {}), '(v)\n', (4478, 4481), False, 'import torch\n')] |
#versie 219/12/2018
import math
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
from peepo.pp.v3.sensory_input import SensoryInput
from pgmpy.estimators import BayesianEstimator
from pgmpy.factors.discrete import TabularCPD
from pgmpy.models import BayesianModel
from peepo.playground.simple_color_recognition_old_with_pgmpy.CeePeeDees import CPD
from peepo.playground.simple_color_recognition_old_with_pgmpy.utilities.lattices import Lattices
from peepo.playground.simple_color_recognition_old_with_pgmpy.utilities.utilities import Utilities
from peepo.pp.generative_model import GenerativeModel
class SensoryInputVirtualPeepo(SensoryInput):
def __init__(self, obj):
super().__init__()
self.peepo = obj
def action(self, node, prediction):
a = 0
def value(self, name):
for i, node in enumerate(self.peepo.nodes):
if name == node[0]:
return self.peepo.pgmpy_test.get_cpds(node[0]).values
class MyClass(object):
def __init__(self, case):
self.case = case
self.results = []
self.networx_test = nx.DiGraph()
self.pgmpy_test = BayesianModel()
self.networx = nx.DiGraph()
self.pgmpy = BayesianModel()
self.best_error = math.inf
self.best_topology = [0,0,nx.DiGraph]
self.dictionary = []
self.header = {}
self.nodes_0 = []
self.edges_0 = {}
self.nodes = []
self.edges = {}
self.cpds = {}
self.colors_dictionary ={}
self.colors_table =[]
self.colors_cpd = []
self.learning_data = {}
self.nummber_of_colors = 0
self._util = Utilities(case)
self._lat = Lattices(self._util)
def get_my_colors(self):
evidence = []
cardinality = []
for i, node in enumerate(self.nodes):
if 'BEN' in node[0] or 'MEM' in node[0]:
evidence.append(node[0])
cardinality.append(node[1]['cardinality'])
self.colors_dictionary, self.colors_table, self.colors_cpd = self.color_cpd('WORLD',3,evidence,cardinality)
self.number_of_colors = self.colors_table.shape[1]
print('Number of colors : ', self.number_of_colors)
print(self.colors_cpd)
#print(self.colors_cpd.values)
def color_cpd(self,var,card_var,evidence,cardinality):
table = CPD.get_index_matrix(cardinality)
colors ={}
hi = 1
lo = 0
C = np.prod(cardinality)
matrix = np.full((3, C), 1. / 3.)
matrix[0] = [hi, lo, lo, hi, lo, lo, hi, lo, hi, lo, lo, hi, lo, lo, hi, lo]
matrix[1] = [lo, hi, lo, lo, hi, lo, lo, hi, lo, hi, lo, lo, hi, lo, lo, hi]
matrix[2] = [lo, lo, hi, lo, lo, hi, lo, lo, lo, lo, hi, lo, lo, hi, lo, lo]
cpd =TabularCPD(variable=var, variable_card=card_var, values=matrix,
evidence=evidence,
evidence_card=cardinality)
for i, node in enumerate(evidence):
colors.update({node:table[i]})
return colors,table, cpd
# def set_color(self, color):
# col = self.colors_table[:, color]
# for i in range(0,len(col)):
# node = 'BENS_'+ str(i)
# self.pgmpy.get_cpds(node).values = CPD.RON_cpd(node, self.pgmpy.get_cardinality(node), mu = int(col[i])).values
def add_edges(self, topology):
self.networx.remove_edges_from(self.edges)
self.edges = []
shape = np.asarray(topology).shape
# ''' let's first remove all void nodes ----> not necssary -----> delete the code ??'''
# nodes_to_remove = []
# rows = np.sum(topology, axis = 1)
# columns = np.sum(topology,axis = 0)
# for row in range(0, len(rows)):
# if rows[row] == 0:
# nodes_to_remove.append('WORLD_' + str(row))
# for column in range(0, len(columns)):
# if columns[column] == 0:
# nodes_to_remove.append('BENS_' + str(column))
# self.networx.remove_nodes_from(nodes_to_remove)
self.nodes = self.networx.nodes(data = True)
for column in range(0,shape[1]):
for row in range(0,shape[0]):
if topology[row][column] == 1:
parent = 'BENS_' + str(column)
child = 'WORLD_'+ str(row)
self.networx.add_edge(parent, child)
self.edges = self.networx.edges()
def add_dummy_cpds(self):
for i, node in enumerate(self.nodes):
cardinality = node[1]['cardinality']
if ('BEN' in node[0]) or ('MEM' in node[0]):
self.nodes[i][1]['cpd'] = CPD.create_fixed_parent(cardinality, modus = 'uniform')
else:
incoming_nodes = self.networx.in_edges(node[0])
if len(incoming_nodes) == 0:
self.nodes[i][1]['cpd'] = CPD.create_random_child(cardinality, modus = 'orphan')
continue
card_parent = []
for m, n in enumerate(incoming_nodes):
par = self.networx.node[n[0]]['cardinality']
card_parent.append(par)
self.nodes[i][1]['cpd'] = CPD.create_random_child(cardinality, card_parent)
def create_learning_data(self):
self.get_my_colors()
self.learning_data = {}
for i, node in enumerate(self.nodes):
print('node in create learnin data : ', node[0])
if "BEN" in node[0]:
self.learning_data.update({node[0]:self.colors_table[i].tolist()})
if "WORLD" in node[0]:
shape = self.colors_cpd.values.shape
reshaped_cpd = self.colors_cpd.values.reshape(shape[0], int(np.prod(shape)/shape[0]))
for hue in range(0,3):
if str(hue) in node[0]:
self.learning_data.update({node[0]:reshaped_cpd[hue,:].tolist()})
print('Learning data')
print(self.learning_data)
def do_inference(self, models, expected_result):
for key in models:
err = models[key].process()
def test_topology(self):
self.networx_test = self.networx.copy()
self.pgmpy_test = self.pgmpy.copy()
model = {'main': GenerativeModel(SensoryInputVirtualPeepo(self), self.pgmpy_test)}
expected_result = [0,0,0]
''' ------ going through all possible "colors'''
for color in range(0, self.number_of_colors):
states = self.colors_table[:,color]
shape = self.colors_cpd.values.shape
reshaped_cpd = self.colors_cpd.values.reshape(shape[0], int(np.prod(shape) / shape[0]))
expected_result = reshaped_cpd[:,int(color)]
for i, pixel in enumerate(states):
cardinality = self.pgmpy_test.get_cardinality('BENS_'+str(i))
self.pgmpy_test.get_cpds('BENS_' + str(i)).values = CPD.create_fixed_parent(cardinality, state = int(pixel))
self.do_inference(model ,expected_result)
def estimate_parameters(self):
data = pd.DataFrame(data=self.learning_data)
estimator = BayesianEstimator(self.pgmpy, data)
for i, node in enumerate(self.nodes):
if 'LAN' in node[0] or 'MOTOR' in node[0] or 'WORLD' in node[0]:
self.pgmpy.get_cpds(node[0]).values = estimator.estimate_cpd('WORLD_0', prior_type='dirichlet', pseudo_counts=[2, 3]).values
# print('cpd for ', node[0])
# print(self.pgmpy.get_cpds(node[0]))
def do_it(self):
'''EXPLANATIONS'''
self.networx_test, self.dictionary, self.header = self._util.get_network()
self.networx = self.networx_test.copy()
self.nodes = self.networx.nodes(data=True)
self.create_learning_data()
print('incoming panda data')
print(self.learning_data)
print('Dictionary : ', self.dictionary)
''' -------------- Constructing all possible topologies,
--> option : restrain the number with the treshold :
0 -> all possible topologies, 100 -> only the fully connnected topology'''
possible_topologies = self._lat.get_possible_topologies(treshold = 50)#setting the entropy at a 50% -> only topologies with an entropy >= 0.5 will be considered
print("Possible topologies : ", len(possible_topologies))
entropy = 0
count = 0#TEMPORARY
''' -------------- walking through all toplogies'''
for topology in possible_topologies:
entropy = topology[1]
if entropy == 0:
continue#safeguard
topo = topology[0]
#self.networx = self.networx_0.copy()
edges = []
parent = ''
child = ''
''' ----------- for each topology we construct the edges and update dummy cpd (necessary as the shape of the LENs cpd's can change
depending on the number of incoming nodes'''
self.add_edges(topo)
self.add_dummy_cpds()
''' ----------- convert DiGraph to pgmpy and check'''
self.pgmpy = self._util.translate_digraph_to_pgmpy(self.networx)
self.pgmpy.check_model()
'''------------ ask pgmpy to guess the best cpd's of the LANs and LENs
-> provide pgmpy with the learning data'''
self.estimate_parameters()
'''-------------- Testing the constructed topology'''
self.test_topology()
'''following 4 lines to remove : just use to check whether the algorithms are correct regarding the edges building'''
count += 1
#print('edges : ', self.edges)
if count > 10:
break
print('Check -> number of processed topologies in loop : ', count)
# print('My colors : ')
# print(self.colors_table)
# print(self.colors_cpd)
'''TO DO ----------------------------------------------------
a) add random cpds , convert to pgmpy BN,
b) enbedd the skeleton loop within the learning loop->
loop through all possible colors and the expected classification
-- > for each skeleton with the possible color as BEN, make pgmpy guess the best cpd's
with the method class
in pgmpy.estimators.BayesianEstimator.BayesianEstimator(model, data, **kwargs)[source]
estimate_cpd(node, prior_type='BDeu', pseudo_counts=[], equivalent_sample_size=5)[source]
-- > make inference and calulate the 'error (to be determined)
---> log the error as a tuple (error, 'entropy of the skeleton')
c) create output (grapgh?)
'''
''' the methods have to be completed to cope with a general case i.e. BENS,MEMS,LANS, MOTORs, WORLDs
but for the moment being we just assume there are only BEN's and WORLD's'''
# self.networx.add_edge('BENS_1','WORLD_1')
# self.networx.node['BENS_1']['cpd'] = [0.8,0.2]
# self.networx.node['WORLD_2']['cpd'] = [[0.8, 0.2, 0.5,0.3],[0.2,0.8,0.5,0.7]]
''' if a best model has ben found, save it -> first update the Utility class object and save it'''
# self._util.update_networkx(self.networx, self.dictionary, self.header)
# self._util.save_network()
# self._util.update_pgmpy(self.pgmpy, self.dictionary, self.header)
# self._util.save_pgmpy_network()
self.draw()
return self.results
def draw(self):
'''TO REMOVE LATER'''
plt.figure(figsize=(10, 5))
pos = nx.circular_layout(self.networx, scale=2)
#node_labels = nx.get_node_attributes(self.networx, 'cpd')
nx.draw(self.networx, pos, node_size=1200, node_color='lightblue',
linewidths=0.25, font_size=10, font_weight='bold', with_labels=True)
plt.show()
def main():
case = 'old_color_recognition'
mycase = MyClass(case)
results = mycase.do_it()
print(results)
####################################################################################
############################### BEGIN HERE #########################################
####################################################################################
if __name__ == "__main__":
# logging.basicConfig()
# logging.getLogger().setLevel(logging.INFO)
main() | [
"numpy.full",
"pandas.DataFrame",
"pgmpy.models.BayesianModel",
"peepo.playground.simple_color_recognition_old_with_pgmpy.utilities.utilities.Utilities",
"matplotlib.pyplot.show",
"peepo.playground.simple_color_recognition_old_with_pgmpy.CeePeeDees.CPD.create_random_child",
"numpy.asarray",
"matplotli... | [((1143, 1155), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1153, 1155), True, 'import networkx as nx\n'), ((1183, 1198), 'pgmpy.models.BayesianModel', 'BayesianModel', ([], {}), '()\n', (1196, 1198), False, 'from pgmpy.models import BayesianModel\n'), ((1222, 1234), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1232, 1234), True, 'import networkx as nx\n'), ((1256, 1271), 'pgmpy.models.BayesianModel', 'BayesianModel', ([], {}), '()\n', (1269, 1271), False, 'from pgmpy.models import BayesianModel\n'), ((1712, 1727), 'peepo.playground.simple_color_recognition_old_with_pgmpy.utilities.utilities.Utilities', 'Utilities', (['case'], {}), '(case)\n', (1721, 1727), False, 'from peepo.playground.simple_color_recognition_old_with_pgmpy.utilities.utilities import Utilities\n'), ((1748, 1768), 'peepo.playground.simple_color_recognition_old_with_pgmpy.utilities.lattices.Lattices', 'Lattices', (['self._util'], {}), '(self._util)\n', (1756, 1768), False, 'from peepo.playground.simple_color_recognition_old_with_pgmpy.utilities.lattices import Lattices\n'), ((2426, 2459), 'peepo.playground.simple_color_recognition_old_with_pgmpy.CeePeeDees.CPD.get_index_matrix', 'CPD.get_index_matrix', (['cardinality'], {}), '(cardinality)\n', (2446, 2459), False, 'from peepo.playground.simple_color_recognition_old_with_pgmpy.CeePeeDees import CPD\n'), ((2521, 2541), 'numpy.prod', 'np.prod', (['cardinality'], {}), '(cardinality)\n', (2528, 2541), True, 'import numpy as np\n'), ((2559, 2585), 'numpy.full', 'np.full', (['(3, C)', '(1.0 / 3.0)'], {}), '((3, C), 1.0 / 3.0)\n', (2566, 2585), True, 'import numpy as np\n'), ((2852, 2966), 'pgmpy.factors.discrete.TabularCPD', 'TabularCPD', ([], {'variable': 'var', 'variable_card': 'card_var', 'values': 'matrix', 'evidence': 'evidence', 'evidence_card': 'cardinality'}), '(variable=var, variable_card=card_var, values=matrix, evidence=\n evidence, evidence_card=cardinality)\n', (2862, 2966), False, 'from pgmpy.factors.discrete import TabularCPD\n'), ((7187, 7224), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'self.learning_data'}), '(data=self.learning_data)\n', (7199, 7224), True, 'import pandas as pd\n'), ((7245, 7280), 'pgmpy.estimators.BayesianEstimator', 'BayesianEstimator', (['self.pgmpy', 'data'], {}), '(self.pgmpy, data)\n', (7262, 7280), False, 'from pgmpy.estimators import BayesianEstimator\n'), ((11940, 11967), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (11950, 11967), True, 'import matplotlib.pyplot as plt\n'), ((11982, 12023), 'networkx.circular_layout', 'nx.circular_layout', (['self.networx'], {'scale': '(2)'}), '(self.networx, scale=2)\n', (12000, 12023), True, 'import networkx as nx\n'), ((12099, 12238), 'networkx.draw', 'nx.draw', (['self.networx', 'pos'], {'node_size': '(1200)', 'node_color': '"""lightblue"""', 'linewidths': '(0.25)', 'font_size': '(10)', 'font_weight': '"""bold"""', 'with_labels': '(True)'}), "(self.networx, pos, node_size=1200, node_color='lightblue',\n linewidths=0.25, font_size=10, font_weight='bold', with_labels=True)\n", (12106, 12238), True, 'import networkx as nx\n'), ((12260, 12270), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12268, 12270), True, 'import matplotlib.pyplot as plt\n'), ((3542, 3562), 'numpy.asarray', 'np.asarray', (['topology'], {}), '(topology)\n', (3552, 3562), True, 'import numpy as np\n'), ((4740, 4793), 'peepo.playground.simple_color_recognition_old_with_pgmpy.CeePeeDees.CPD.create_fixed_parent', 'CPD.create_fixed_parent', (['cardinality'], {'modus': '"""uniform"""'}), "(cardinality, modus='uniform')\n", (4763, 4793), False, 'from peepo.playground.simple_color_recognition_old_with_pgmpy.CeePeeDees import CPD\n'), ((5293, 5342), 'peepo.playground.simple_color_recognition_old_with_pgmpy.CeePeeDees.CPD.create_random_child', 'CPD.create_random_child', (['cardinality', 'card_parent'], {}), '(cardinality, card_parent)\n', (5316, 5342), False, 'from peepo.playground.simple_color_recognition_old_with_pgmpy.CeePeeDees import CPD\n'), ((4969, 5021), 'peepo.playground.simple_color_recognition_old_with_pgmpy.CeePeeDees.CPD.create_random_child', 'CPD.create_random_child', (['cardinality'], {'modus': '"""orphan"""'}), "(cardinality, modus='orphan')\n", (4992, 5021), False, 'from peepo.playground.simple_color_recognition_old_with_pgmpy.CeePeeDees import CPD\n'), ((6746, 6760), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (6753, 6760), True, 'import numpy as np\n'), ((5829, 5843), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (5836, 5843), True, 'import numpy as np\n')] |
import csv
import cv2
import numpy as np
import platform
import math
import random
import os
import os.path
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from matplotlib import image as image
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten
from keras.layers import Conv2D, Cropping2D, Input, Conv2D
from keras.optimizers import Adam
from keras.models import load_model
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from tensorflow.python.client import device_lib
from keras.models import load_model
import h5py
from keras import __version__ as keras_version
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
def load_dataset(path):
lines = []
with open(path+'driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
images = []
measurements = []
#skip csv header
iter_lines = iter(lines)
next(iter_lines)
for line in iter_lines:
center_image_path = line[0].split('/')[-1]
left_image_path = line[1].split('/')[-1]
right_image_path = line[2].split('/')[-1]
#print(right_image)
measurement = float(line[3])
if not math.isclose(float(measurement),0.0):
measurements.append(measurement)
center_image = cv2.imread(center_image_path)
images.append(np.array(cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)))
measurements.append(float(measurement)+0.1)
left_image = cv2.imread(left_image_path)
images.append(np.array(cv2.cvtColor(left_image, cv2.COLOR_BGR2RGB)))
measurements.append(float(measurement)-0.1)
right_image = cv2.imread(right_image_path)
images.append(np.array(cv2.cvtColor(right_image, cv2.COLOR_BGR2RGB)))
return np.array(images),np.array(measurements)
def augment_data(features, labels):
augmented_features = []
augmented_labels = []
close_to_zero_cnt=0
for image, angle in zip(features, labels):
if abs(angle) < 1000 :
augmented_features.append(image)
augmented_labels.append(angle)
#Augment data set by appending fliped image
#augmented_features.append(np.fliplr(image))
augmented_features.append(cv2.flip(image, flipCode=1))
augmented_labels.append(-1*angle)
else:
close_to_zero_cnt +=1
print("close to zero : {}".format(close_to_zero_cnt))
return np.array(augmented_features), np.array(augmented_labels)
def nVidia_model():
model = Sequential()
#Labmbda Layer: Normalization
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160,320,3)))
# set up cropping2D layer
model.add(Cropping2D(cropping=((70, 24), (60, 60))))
model.add(Conv2D(24, (5, 5), padding="same", strides=(2, 2), activation="relu"))
model.add(Conv2D(36, (5, 5), padding="same", strides=(2, 2), activation="relu"))
model.add(Conv2D(48, (5, 5), padding="valid", strides=(2, 2), activation="relu"))
model.add(Conv2D(64, (3, 3), padding="valid", activation="relu"))
model.add(Conv2D(64, (3, 3), padding="valid", activation="relu"))
model.add(Flatten())
model.add(Dense(100))
#model.add(Dropout(0.1))
model.add(Dense(50))
#model.add(Dropout(0.1))
model.add(Dense(10))
model.add(Dense(1))
return model
def nVidia_model_v2():
model = Sequential()
#Labmbda Layer: Normalization
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160,320,3)))
# set up cropping2D layer
#model.add(Cropping2D(cropping=((70,24), (0,0))))
model.add(Cropping2D(cropping=((70, 24), (60, 60))))
model.add(Conv2D(24, (5, 5), padding="same", strides=(2, 2), activation="relu"))
model.add(SpatialDropout2D(0.2))
model.add(Conv2D(36, (5, 5), padding="same", strides=(2, 2), activation="relu"))
model.add(SpatialDropout2D(0.2))
model.add(Conv2D(48, (5, 5), padding="valid", strides=(2, 2), activation="relu"))
model.add(SpatialDropout2D(0.2))
model.add(Conv2D(64, (3, 3), padding="valid", activation="relu"))
model.add(SpatialDropout2D(0.2))
model.add(Conv2D(64, (3, 3), padding="valid", activation="relu"))
model.add(SpatialDropout2D(0.2))
model.add(Flatten())
model.add(Dense(100))
model.add(Dropout(0.5))
model.add(Dense(50))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Dense(1))
return model
#GPU Configuration
print("=========================================")
print(">>GPU Configuration")
print("====================")
config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.7
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
get_available_gpus()
#Loading Training Data
print("=========================================")
print(">>Loading Training Data")
print("====================")
X_train = []
y_train = []
X_train_fwd = []
y_train_fwd = []
X_train_fwd,y_train_fwd = load_dataset('C:/simout/forward-3-laps/')
print("X_train_fwd ="+str(len(X_train_fwd)))
X_train = X_train_fwd
y_train = y_train_fwd
X_train_rev = []
y_train_rev = []
X_train_rev,y_train_rev = load_dataset('C:/simout/reverse-3-laps/')
print("X_train_rev ="+str(len(X_train_rev)))
X_train = np.concatenate( [X_train , X_train_rev])
y_train = np.concatenate ( [y_train , y_train_rev])
X_train_track2_fwd = []
y_train_track2_fwd = []
X_train_track2_fwd,y_train_track2_fwd = load_dataset('C:/simout/track2-fwd/')
print("X_train_track2_fwd ="+str(len(X_train_track2_fwd)))
X_train = np.concatenate( [X_train , X_train_track2_fwd])
y_train = np.concatenate ( [y_train , y_train_track2_fwd])
X_train_corrections = []
y_train_corrections = []
X_train_corrections,y_train_corrections = load_dataset('C:/simout/corrective-actions/')
print("X_train_corrections ="+str(len(X_train_corrections)))
X_train = np.concatenate( [X_train , X_train_corrections])
y_train = np.concatenate ( [y_train , y_train_corrections])
X_train_curves = []
y_train_curves= []
X_train_curves,y_train_curves = load_dataset('C:/simout/curves/')
print("y_train_curves ="+str(len(y_train_curves)))
X_train = np.concatenate( [X_train , X_train_curves])
y_train = np.concatenate ( [y_train , y_train_curves])
#Data Exploration
print("=========================================")
print(">>Data Exploration")
print("====================")
# TODO: Number of training examples
n_train = len(X_train)
# TODO: What's the shape of an traffic sign image?
image_shape = X_train[0].shape
# TODO: How many unique classes/labels there are in the dataset.
n_classes = len(np.unique(y_train))
print()
print("Number of training examples =", n_train)
print("Image data shape =", image_shape)
print("Number of unique classes =", n_classes)
#Data Augmentation
print("=========================================")
print(">>Data Augmentation")
print("====================")
X_train_aug1=[]
y_train_aug1=[]
#X_train_aug1,y_train_aug1=balance_dataset(X_train,y_train)
X_train_aug1=X_train
y_train_aug1=y_train
X_train_aug=[]
y_train_aug=[]
X_train_aug,y_train_aug= augment_data(X_train_aug1,y_train_aug1)
#Building Model
print("=========================================")
print(">>Building Model")
print("====================")
current_model='model.attempt-test.h5'
if os.path.isfile('./'+current_model):
model = load_model(current_model)
print("using saved model")
else:
model = nVidia_model_v2()
print("creating new model")
print(model.summary())
#Training Model
print("=========================================")
print(">>Training Model")
print("====================")
BATCH_SIZE = 128
EPOCHS = 150
LEARNING_RATE = 0.0001
#model.compile(optimizer=Adam(lr=LEARNING_RATE), loss='mse')
#let adams optimizer decide
model.compile(optimizer='adam', loss='mse')
history_object=model.fit(X_train_aug, y_train_aug, batch_size=128, epochs=EPOCHS, validation_split=0.20, shuffle=True, verbose=1)
### print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
plt.savefig('./images/model_training_loss_trend.png')
model.save(current_model)
| [
"matplotlib.pyplot.title",
"keras.models.load_model",
"csv.reader",
"keras.layers.Cropping2D",
"tensorflow.ConfigProto",
"os.path.isfile",
"numpy.unique",
"cv2.cvtColor",
"keras.layers.Flatten",
"keras.layers.SpatialDropout2D",
"matplotlib.pyplot.show",
"keras.layers.Dropout",
"matplotlib.py... | [((5067, 5083), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (5081, 5083), True, 'import tensorflow as tf\n'), ((5759, 5797), 'numpy.concatenate', 'np.concatenate', (['[X_train, X_train_rev]'], {}), '([X_train, X_train_rev])\n', (5773, 5797), True, 'import numpy as np\n'), ((5810, 5848), 'numpy.concatenate', 'np.concatenate', (['[y_train, y_train_rev]'], {}), '([y_train, y_train_rev])\n', (5824, 5848), True, 'import numpy as np\n'), ((6049, 6094), 'numpy.concatenate', 'np.concatenate', (['[X_train, X_train_track2_fwd]'], {}), '([X_train, X_train_track2_fwd])\n', (6063, 6094), True, 'import numpy as np\n'), ((6107, 6152), 'numpy.concatenate', 'np.concatenate', (['[y_train, y_train_track2_fwd]'], {}), '([y_train, y_train_track2_fwd])\n', (6121, 6152), True, 'import numpy as np\n'), ((6366, 6412), 'numpy.concatenate', 'np.concatenate', (['[X_train, X_train_corrections]'], {}), '([X_train, X_train_corrections])\n', (6380, 6412), True, 'import numpy as np\n'), ((6425, 6471), 'numpy.concatenate', 'np.concatenate', (['[y_train, y_train_corrections]'], {}), '([y_train, y_train_corrections])\n', (6439, 6471), True, 'import numpy as np\n'), ((6643, 6684), 'numpy.concatenate', 'np.concatenate', (['[X_train, X_train_curves]'], {}), '([X_train, X_train_curves])\n', (6657, 6684), True, 'import numpy as np\n'), ((6697, 6738), 'numpy.concatenate', 'np.concatenate', (['[y_train, y_train_curves]'], {}), '([y_train, y_train_curves])\n', (6711, 6738), True, 'import numpy as np\n'), ((7800, 7836), 'os.path.isfile', 'os.path.isfile', (["('./' + current_model)"], {}), "('./' + current_model)\n", (7814, 7836), False, 'import os\n'), ((8595, 8635), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['loss']"], {}), "(history_object.history['loss'])\n", (8603, 8635), True, 'import matplotlib.pyplot as plt\n'), ((8636, 8680), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['val_loss']"], {}), "(history_object.history['val_loss'])\n", (8644, 8680), True, 'import matplotlib.pyplot as plt\n'), ((8681, 8723), 'matplotlib.pyplot.title', 'plt.title', (['"""model mean squared error loss"""'], {}), "('model mean squared error loss')\n", (8690, 8723), True, 'import matplotlib.pyplot as plt\n'), ((8724, 8761), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mean squared error loss"""'], {}), "('mean squared error loss')\n", (8734, 8761), True, 'import matplotlib.pyplot as plt\n'), ((8762, 8781), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (8772, 8781), True, 'import matplotlib.pyplot as plt\n'), ((8782, 8847), 'matplotlib.pyplot.legend', 'plt.legend', (["['training set', 'validation set']"], {'loc': '"""upper right"""'}), "(['training set', 'validation set'], loc='upper right')\n", (8792, 8847), True, 'import matplotlib.pyplot as plt\n'), ((8848, 8858), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8856, 8858), True, 'import matplotlib.pyplot as plt\n'), ((8859, 8912), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./images/model_training_loss_trend.png"""'], {}), "('./images/model_training_loss_trend.png')\n", (8870, 8912), True, 'import matplotlib.pyplot as plt\n'), ((744, 775), 'tensorflow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', ([], {}), '()\n', (773, 775), False, 'from tensorflow.python.client import device_lib\n'), ((2967, 2979), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2977, 2979), False, 'from keras.models import Sequential, Model\n'), ((3856, 3868), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3866, 3868), False, 'from keras.models import Sequential, Model\n'), ((5193, 5218), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (5203, 5218), True, 'import tensorflow as tf\n'), ((7098, 7116), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (7107, 7116), True, 'import numpy as np\n'), ((7848, 7873), 'keras.models.load_model', 'load_model', (['current_model'], {}), '(current_model)\n', (7858, 7873), False, 'from keras.models import load_model\n'), ((956, 975), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (966, 975), False, 'import csv\n'), ((2154, 2170), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (2162, 2170), True, 'import numpy as np\n'), ((2171, 2193), 'numpy.array', 'np.array', (['measurements'], {}), '(measurements)\n', (2179, 2193), True, 'import numpy as np\n'), ((2869, 2897), 'numpy.array', 'np.array', (['augmented_features'], {}), '(augmented_features)\n', (2877, 2897), True, 'import numpy as np\n'), ((2899, 2925), 'numpy.array', 'np.array', (['augmented_labels'], {}), '(augmented_labels)\n', (2907, 2925), True, 'import numpy as np\n'), ((3029, 3089), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (3035, 3089), False, 'from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten\n'), ((3148, 3189), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 24), (60, 60))'}), '(cropping=((70, 24), (60, 60)))\n', (3158, 3189), False, 'from keras.layers import Conv2D, Cropping2D, Input, Conv2D\n'), ((3210, 3279), 'keras.layers.Conv2D', 'Conv2D', (['(24)', '(5, 5)'], {'padding': '"""same"""', 'strides': '(2, 2)', 'activation': '"""relu"""'}), "(24, (5, 5), padding='same', strides=(2, 2), activation='relu')\n", (3216, 3279), False, 'from keras.layers import Conv2D, Cropping2D, Input, Conv2D\n'), ((3296, 3365), 'keras.layers.Conv2D', 'Conv2D', (['(36)', '(5, 5)'], {'padding': '"""same"""', 'strides': '(2, 2)', 'activation': '"""relu"""'}), "(36, (5, 5), padding='same', strides=(2, 2), activation='relu')\n", (3302, 3365), False, 'from keras.layers import Conv2D, Cropping2D, Input, Conv2D\n'), ((3386, 3456), 'keras.layers.Conv2D', 'Conv2D', (['(48)', '(5, 5)'], {'padding': '"""valid"""', 'strides': '(2, 2)', 'activation': '"""relu"""'}), "(48, (5, 5), padding='valid', strides=(2, 2), activation='relu')\n", (3392, 3456), False, 'from keras.layers import Conv2D, Cropping2D, Input, Conv2D\n'), ((3473, 3527), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""valid"""', 'activation': '"""relu"""'}), "(64, (3, 3), padding='valid', activation='relu')\n", (3479, 3527), False, 'from keras.layers import Conv2D, Cropping2D, Input, Conv2D\n'), ((3544, 3598), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""valid"""', 'activation': '"""relu"""'}), "(64, (3, 3), padding='valid', activation='relu')\n", (3550, 3598), False, 'from keras.layers import Conv2D, Cropping2D, Input, Conv2D\n'), ((3616, 3625), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3623, 3625), False, 'from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten\n'), ((3642, 3652), 'keras.layers.Dense', 'Dense', (['(100)'], {}), '(100)\n', (3647, 3652), False, 'from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten\n'), ((3699, 3708), 'keras.layers.Dense', 'Dense', (['(50)'], {}), '(50)\n', (3704, 3708), False, 'from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten\n'), ((3755, 3764), 'keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (3760, 3764), False, 'from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten\n'), ((3781, 3789), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (3786, 3789), False, 'from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten\n'), ((3918, 3978), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (3924, 3978), False, 'from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten\n'), ((4087, 4128), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 24), (60, 60))'}), '(cropping=((70, 24), (60, 60)))\n', (4097, 4128), False, 'from keras.layers import Conv2D, Cropping2D, Input, Conv2D\n'), ((4149, 4218), 'keras.layers.Conv2D', 'Conv2D', (['(24)', '(5, 5)'], {'padding': '"""same"""', 'strides': '(2, 2)', 'activation': '"""relu"""'}), "(24, (5, 5), padding='same', strides=(2, 2), activation='relu')\n", (4155, 4218), False, 'from keras.layers import Conv2D, Cropping2D, Input, Conv2D\n'), ((4234, 4255), 'keras.layers.SpatialDropout2D', 'SpatialDropout2D', (['(0.2)'], {}), '(0.2)\n', (4250, 4255), False, 'from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten\n'), ((4271, 4340), 'keras.layers.Conv2D', 'Conv2D', (['(36)', '(5, 5)'], {'padding': '"""same"""', 'strides': '(2, 2)', 'activation': '"""relu"""'}), "(36, (5, 5), padding='same', strides=(2, 2), activation='relu')\n", (4277, 4340), False, 'from keras.layers import Conv2D, Cropping2D, Input, Conv2D\n'), ((4356, 4377), 'keras.layers.SpatialDropout2D', 'SpatialDropout2D', (['(0.2)'], {}), '(0.2)\n', (4372, 4377), False, 'from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten\n'), ((4393, 4463), 'keras.layers.Conv2D', 'Conv2D', (['(48)', '(5, 5)'], {'padding': '"""valid"""', 'strides': '(2, 2)', 'activation': '"""relu"""'}), "(48, (5, 5), padding='valid', strides=(2, 2), activation='relu')\n", (4399, 4463), False, 'from keras.layers import Conv2D, Cropping2D, Input, Conv2D\n'), ((4479, 4500), 'keras.layers.SpatialDropout2D', 'SpatialDropout2D', (['(0.2)'], {}), '(0.2)\n', (4495, 4500), False, 'from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten\n'), ((4516, 4570), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""valid"""', 'activation': '"""relu"""'}), "(64, (3, 3), padding='valid', activation='relu')\n", (4522, 4570), False, 'from keras.layers import Conv2D, Cropping2D, Input, Conv2D\n'), ((4586, 4607), 'keras.layers.SpatialDropout2D', 'SpatialDropout2D', (['(0.2)'], {}), '(0.2)\n', (4602, 4607), False, 'from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten\n'), ((4623, 4677), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""valid"""', 'activation': '"""relu"""'}), "(64, (3, 3), padding='valid', activation='relu')\n", (4629, 4677), False, 'from keras.layers import Conv2D, Cropping2D, Input, Conv2D\n'), ((4693, 4714), 'keras.layers.SpatialDropout2D', 'SpatialDropout2D', (['(0.2)'], {}), '(0.2)\n', (4709, 4714), False, 'from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten\n'), ((4731, 4740), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4738, 4740), False, 'from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten\n'), ((4757, 4767), 'keras.layers.Dense', 'Dense', (['(100)'], {}), '(100)\n', (4762, 4767), False, 'from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten\n'), ((4784, 4796), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4791, 4796), False, 'from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten\n'), ((4813, 4822), 'keras.layers.Dense', 'Dense', (['(50)'], {}), '(50)\n', (4818, 4822), False, 'from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten\n'), ((4839, 4851), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4846, 4851), False, 'from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten\n'), ((4868, 4877), 'keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (4873, 4877), False, 'from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten\n'), ((4894, 4902), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (4899, 4902), False, 'from keras.layers import Dense, Dropout, Lambda, SpatialDropout2D, Flatten\n'), ((1559, 1588), 'cv2.imread', 'cv2.imread', (['center_image_path'], {}), '(center_image_path)\n', (1569, 1588), False, 'import cv2\n'), ((1780, 1807), 'cv2.imread', 'cv2.imread', (['left_image_path'], {}), '(left_image_path)\n', (1790, 1807), False, 'import cv2\n'), ((2010, 2038), 'cv2.imread', 'cv2.imread', (['right_image_path'], {}), '(right_image_path)\n', (2020, 2038), False, 'import cv2\n'), ((2642, 2669), 'cv2.flip', 'cv2.flip', (['image'], {'flipCode': '(1)'}), '(image, flipCode=1)\n', (2650, 2669), False, 'import cv2\n'), ((1624, 1669), 'cv2.cvtColor', 'cv2.cvtColor', (['center_image', 'cv2.COLOR_BGR2RGB'], {}), '(center_image, cv2.COLOR_BGR2RGB)\n', (1636, 1669), False, 'import cv2\n'), ((1843, 1886), 'cv2.cvtColor', 'cv2.cvtColor', (['left_image', 'cv2.COLOR_BGR2RGB'], {}), '(left_image, cv2.COLOR_BGR2RGB)\n', (1855, 1886), False, 'import cv2\n'), ((2074, 2118), 'cv2.cvtColor', 'cv2.cvtColor', (['right_image', 'cv2.COLOR_BGR2RGB'], {}), '(right_image, cv2.COLOR_BGR2RGB)\n', (2086, 2118), False, 'import cv2\n')] |
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import pandas as pd
import numpy as np
import arviz as az
def plot_ltv(empirical_ltv, inference_data=None, hdi_prob=.95, extra_label_text='', ax=None):
if ax is None:
fig, ax = plt.subplots(figsize=(20, 10))
if inference_data:
az.plot_hdi(x=np.arange(52), hdi_prob=hdi_prob, smooth=False, y=inference_data['posterior']['ltv'], ax=ax)
curve_m = np.median(np.median(inference_data['posterior']['ltv'], axis=0), axis=0)
ax.plot(curve_m, 'k', linestyle='dashed', alpha=0.5,
label=f'Median ltv: {curve_m[len(curve_m) - 1].round(2)}')
if 'true_ltv' in inference_data['posterior'].keys():
curve_m = np.median(np.median(inference_data['posterior']['true_ltv'], axis=0), axis=0)
ax.plot(curve_m, 'k', alpha=0.5,
label=f'True ltv: {curve_m[len(curve_m) - 1].round(2)}')
ax.plot(empirical_ltv, 'o',
label=f'{extra_label_text}Empirical @{len(empirical_ltv)} periods: {empirical_ltv[len(empirical_ltv) - 1].round(2)}')
return ax
def plot_conversion_rate(inference_data, hdi_prob=.95, extra_label_text='', ax=None):
if ax is None:
fig, ax = plt.subplots(figsize=(20, 10))
conversion_rate_by_cohort = inference_data['posterior']['conversion_rate_by_cohort']
az.plot_hdi(x=np.arange(conversion_rate_by_cohort.shape[-1]), hdi_prob=hdi_prob, smooth=False,
y=conversion_rate_by_cohort, ax=ax)
curve_m = np.median(np.median(inference_data['posterior']['conversion_rate_by_cohort'], axis=0), axis=0)
ax.plot(curve_m, 'k-', alpha=0.3,
label=f'{extra_label_text}Median Conversion Rate: {np.median(curve_m).round(2)}')
ax.plot(curve_m, 'ko', alpha=0.8,
label=f'{extra_label_text}Median Conversion Rate: {np.median(curve_m).round(2)}')
def plot_cohort_matrix_retention(cohort_matrix, title=''):
cohort_size = cohort_matrix.max(axis=1)
retention_matrix = cohort_matrix.divide(cohort_size, axis=0)
with sns.axes_style("white"):
fig, ax = plt.subplots(1, 2, figsize=(20, 10), sharey='all', gridspec_kw={'width_ratios': [1, 11]})
sns.heatmap(retention_matrix.iloc[:, :-1],
mask=retention_matrix.iloc[:, :-1].isnull(),
annot=True,
fmt='.0%',
cmap='RdYlGn',
ax=ax[1])
ax[1].set_title(title, fontsize=12)
ax[1].set(xlabel='# of periods',
ylabel='')
# cohort size
cohort_size_df = pd.DataFrame(cohort_size).rename(columns={0: 'cohort_size'})
white_cmap = mcolors.ListedColormap(['white'])
sns.heatmap(cohort_size_df,
annot=True,
cbar=False,
fmt='g',
cmap=white_cmap,
ax=ax[0])
fig.tight_layout()
| [
"pandas.DataFrame",
"seaborn.axes_style",
"seaborn.heatmap",
"numpy.median",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.colors.ListedColormap"
] | [((281, 311), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (293, 311), True, 'import matplotlib.pyplot as plt\n'), ((1247, 1277), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (1259, 1277), True, 'import matplotlib.pyplot as plt\n'), ((1543, 1618), 'numpy.median', 'np.median', (["inference_data['posterior']['conversion_rate_by_cohort']"], {'axis': '(0)'}), "(inference_data['posterior']['conversion_rate_by_cohort'], axis=0)\n", (1552, 1618), True, 'import numpy as np\n'), ((2072, 2095), 'seaborn.axes_style', 'sns.axes_style', (['"""white"""'], {}), "('white')\n", (2086, 2095), True, 'import seaborn as sns\n'), ((2115, 2209), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(20, 10)', 'sharey': '"""all"""', 'gridspec_kw': "{'width_ratios': [1, 11]}"}), "(1, 2, figsize=(20, 10), sharey='all', gridspec_kw={\n 'width_ratios': [1, 11]})\n", (2127, 2209), True, 'import matplotlib.pyplot as plt\n'), ((2693, 2726), 'matplotlib.colors.ListedColormap', 'mcolors.ListedColormap', (["['white']"], {}), "(['white'])\n", (2715, 2726), True, 'import matplotlib.colors as mcolors\n'), ((2735, 2827), 'seaborn.heatmap', 'sns.heatmap', (['cohort_size_df'], {'annot': '(True)', 'cbar': '(False)', 'fmt': '"""g"""', 'cmap': 'white_cmap', 'ax': 'ax[0]'}), "(cohort_size_df, annot=True, cbar=False, fmt='g', cmap=\n white_cmap, ax=ax[0])\n", (2746, 2827), True, 'import seaborn as sns\n'), ((478, 531), 'numpy.median', 'np.median', (["inference_data['posterior']['ltv']"], {'axis': '(0)'}), "(inference_data['posterior']['ltv'], axis=0)\n", (487, 531), True, 'import numpy as np\n'), ((763, 821), 'numpy.median', 'np.median', (["inference_data['posterior']['true_ltv']"], {'axis': '(0)'}), "(inference_data['posterior']['true_ltv'], axis=0)\n", (772, 821), True, 'import numpy as np\n'), ((1386, 1432), 'numpy.arange', 'np.arange', (['conversion_rate_by_cohort.shape[-1]'], {}), '(conversion_rate_by_cohort.shape[-1])\n', (1395, 1432), True, 'import numpy as np\n'), ((357, 370), 'numpy.arange', 'np.arange', (['(52)'], {}), '(52)\n', (366, 370), True, 'import numpy as np\n'), ((2611, 2636), 'pandas.DataFrame', 'pd.DataFrame', (['cohort_size'], {}), '(cohort_size)\n', (2623, 2636), True, 'import pandas as pd\n'), ((1729, 1747), 'numpy.median', 'np.median', (['curve_m'], {}), '(curve_m)\n', (1738, 1747), True, 'import numpy as np\n'), ((1861, 1879), 'numpy.median', 'np.median', (['curve_m'], {}), '(curve_m)\n', (1870, 1879), True, 'import numpy as np\n')] |
import numpy
from matplotlib import pyplot
x = numpy.linspace(0, 10, 80)
y = numpy.linspace(0, 1, 20)
X, Y = numpy.meshgrid(x, y)
U = 4*numpy.sin(2*numpy.pi*Y)*numpy.cos(numpy.pi*X/2)
V = -numpy.cos(2*numpy.pi*Y)*numpy.sin(numpy.pi*X/2)
fig = pyplot.figure(figsize=(10, 2), frameon=True)
ax = fig.add_subplot(1, 1, 1)
ax.quiver(X, Y, 0.25*U, V, numpy.sqrt(U**2 + V**2))
ax.set_ylabel("$y$", fontsize=14)
bbox_artists = [ax.set_xlabel("$x$", fontsize=14)]
fig.savefig("wind-field.pdf",
orientation="landscape", format="pdf",
transparent=True,
bbox_inches="tight",
bbox_extra_artists=bbox_artists)
| [
"numpy.meshgrid",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.cos",
"numpy.linspace",
"numpy.sqrt"
] | [((48, 73), 'numpy.linspace', 'numpy.linspace', (['(0)', '(10)', '(80)'], {}), '(0, 10, 80)\n', (62, 73), False, 'import numpy\n'), ((78, 102), 'numpy.linspace', 'numpy.linspace', (['(0)', '(1)', '(20)'], {}), '(0, 1, 20)\n', (92, 102), False, 'import numpy\n'), ((111, 131), 'numpy.meshgrid', 'numpy.meshgrid', (['x', 'y'], {}), '(x, y)\n', (125, 131), False, 'import numpy\n'), ((247, 291), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(10, 2)', 'frameon': '(True)'}), '(figsize=(10, 2), frameon=True)\n', (260, 291), False, 'from matplotlib import pyplot\n'), ((163, 190), 'numpy.cos', 'numpy.cos', (['(numpy.pi * X / 2)'], {}), '(numpy.pi * X / 2)\n', (172, 190), False, 'import numpy\n'), ((216, 243), 'numpy.sin', 'numpy.sin', (['(numpy.pi * X / 2)'], {}), '(numpy.pi * X / 2)\n', (225, 243), False, 'import numpy\n'), ((349, 376), 'numpy.sqrt', 'numpy.sqrt', (['(U ** 2 + V ** 2)'], {}), '(U ** 2 + V ** 2)\n', (359, 376), False, 'import numpy\n'), ((139, 166), 'numpy.sin', 'numpy.sin', (['(2 * numpy.pi * Y)'], {}), '(2 * numpy.pi * Y)\n', (148, 166), False, 'import numpy\n'), ((192, 219), 'numpy.cos', 'numpy.cos', (['(2 * numpy.pi * Y)'], {}), '(2 * numpy.pi * Y)\n', (201, 219), False, 'import numpy\n')] |
# Distributed under the MIT License.
# See LICENSE.txt for details.
import itertools as it
import numpy as np
import PointwiseFunctions.GeneralRelativity.Christoffel as ch
import PointwiseFunctions.GeneralRelativity.ComputeGhQuantities as gh
import PointwiseFunctions.GeneralRelativity.ProjectionOperators as proj
import PointwiseFunctions.GeneralRelativity.WeylPropagating as wp
def constraint_preserving_bjorhus_corrections_dt_v_psi(
unit_interface_normal_vector, three_index_constraint, char_speeds):
return (char_speeds[0] * np.einsum(
'i,iab->ab', unit_interface_normal_vector, three_index_constraint))
def constraint_preserving_bjorhus_corrections_dt_v_zero(
unit_interface_normal_vector, four_index_constraint, char_speeds):
spatial_dim = len(unit_interface_normal_vector)
result = np.zeros([spatial_dim, 1 + spatial_dim, 1 + spatial_dim])
if spatial_dim == 2:
result[0, :, :] += char_speeds[1] * unit_interface_normal_vector[
1] * four_index_constraint[1, :, :]
result[1, :, :] += char_speeds[1] * unit_interface_normal_vector[
0] * four_index_constraint[0, :, :]
elif spatial_dim == 3:
def is_even(sequence):
count = 0
for i, n in enumerate(sequence, start=1):
count += sum(n > num for num in sequence[i:])
return not count % 2
for p in it.permutations(np.arange(len(unit_interface_normal_vector))):
sgn = 1 if is_even(p) else -1
result[p[0], :, :] += (sgn * char_speeds[1] *
unit_interface_normal_vector[p[2]] *
four_index_constraint[p[1], :, :])
return result
def add_gauge_sommerfeld_terms_to_dt_v_minus(
gamma2, inertial_coords, incoming_null_one_form, outgoing_null_one_form,
incoming_null_vector, outgoing_null_vector, projection_Ab,
char_projected_rhs_dt_v_psi):
gauge_bc_coeff = 1.
inertial_radius = np.sum(inertial_coords**2)**0.5
prefac = (gamma2 - gauge_bc_coeff / inertial_radius)
t1_ = np.einsum('a,cb,d,cd->ab', incoming_null_one_form, projection_Ab,
outgoing_null_vector, char_projected_rhs_dt_v_psi)
t2_ = np.einsum('b,ca,d,cd->ab', incoming_null_one_form, projection_Ab,
outgoing_null_vector, char_projected_rhs_dt_v_psi)
t3_ = np.einsum('a,b,c,d,cd->ab', incoming_null_one_form,
outgoing_null_one_form, incoming_null_vector,
outgoing_null_vector, char_projected_rhs_dt_v_psi)
t4_ = np.einsum('b,a,c,d,cd->ab', incoming_null_one_form,
outgoing_null_one_form, incoming_null_vector,
outgoing_null_vector, char_projected_rhs_dt_v_psi)
t5_ = np.einsum('a,b,c,d,cd->ab', incoming_null_one_form,
incoming_null_one_form, outgoing_null_vector,
outgoing_null_vector, char_projected_rhs_dt_v_psi)
return prefac * (t1_ + t2_ - t3_ - t4_ - t5_)
def add_constraint_dependent_terms_to_dt_v_minus(
incoming_null_one_form, outgoing_null_one_form, incoming_null_vector,
outgoing_null_vector, projection_ab, projection_Ab, projection_AB,
constraint_char_zero_plus, constraint_char_zero_minus,
char_projected_rhs_dt_v_minus, char_speeds):
mu = 0.0 # hard-coded value from SpEC Bbh input file Mu = 0
t1_ = np.einsum('c,d,a,b,cd->ab', incoming_null_vector,
incoming_null_vector, outgoing_null_one_form,
outgoing_null_one_form, char_projected_rhs_dt_v_minus)
t2_ = np.einsum('c,da,b,cd->ab', incoming_null_vector, projection_Ab,
outgoing_null_one_form, char_projected_rhs_dt_v_minus)
t3_ = np.einsum('c,db,a,cd->ab', incoming_null_vector, projection_Ab,
outgoing_null_one_form, char_projected_rhs_dt_v_minus)
t4_ = np.einsum('d,ca,b,cd->ab', incoming_null_vector, projection_Ab,
outgoing_null_one_form, char_projected_rhs_dt_v_minus)
t5_ = np.einsum('d,cb,a,cd->ab', incoming_null_vector, projection_Ab,
outgoing_null_one_form, char_projected_rhs_dt_v_minus)
t6_ = np.einsum('cd,ab,cd->ab', projection_AB, projection_ab,
char_projected_rhs_dt_v_minus)
common_term = np.sqrt(0.5) * char_speeds[3] * (
constraint_char_zero_minus - mu * constraint_char_zero_plus)
t7_ = np.einsum('a,b,c,c->ab', outgoing_null_one_form,
outgoing_null_one_form, incoming_null_vector, common_term)
t8_ = np.einsum('ab,c,c->ab', projection_ab, outgoing_null_vector,
common_term)
t9_ = np.einsum('cb,a,c->ab', projection_Ab, outgoing_null_one_form,
common_term)
t10_ = np.einsum('ca,b,c->ab', projection_Ab, outgoing_null_one_form,
common_term)
return (0.5 * (2.0 * t1_ - t2_ - t3_ - t4_ - t5_ + t6_) +
(t7_ + t8_ - t9_ - t10_))
def add_physical_dof_terms_to_dt_v_minus(
gamma2, unit_interface_normal_one_form, unit_interface_normal_vector,
spacetime_unit_normal_vector, projection_ab, projection_Ab, projection_AB,
inverse_spatial_metric, extrinsic_curvature, spacetime_metric,
inverse_spacetime_metric, three_index_constraint,
char_projected_rhs_dt_v_minus, phi, d_phi, d_pi, char_speeds):
mu_phys = 0
adjust_phys_using_c4 = True
gamma2_in_phys = True
# calculate weyl propagating modes
# cov deriv of Kij
# calculate ricci3
# adjust_phys_using_c4
# calculate projection operators
spatial_christoffel_1st_kind = ch.christoffel_first_kind(phi[:, 1:, 1:])
spatial_christoffel_second_kind = np.einsum('ij,jkl->ikl',
inverse_spatial_metric,
spatial_christoffel_1st_kind)
cov_d_Kij = gh.covariant_deriv_extrinsic_curvture(
extrinsic_curvature, spacetime_unit_normal_vector,
spatial_christoffel_second_kind, inverse_spacetime_metric, phi, d_pi,
d_phi)
ricci3 = gh.gh_spatial_ricci_tensor(phi, d_phi, inverse_spatial_metric)
if adjust_phys_using_c4:
ricci3 = ricci3 + 0.25 * (
np.einsum('kl,iklj->ij', inverse_spatial_metric, d_phi[:, :, 1:,
1:]) -
np.einsum('kl,kilj->ij', inverse_spatial_metric, d_phi[:, :, 1:,
1:]) +
np.einsum('kl,jkli->ij', inverse_spatial_metric, d_phi[:, :, 1:,
1:]) - np.
einsum('kl,kjli->ij', inverse_spatial_metric, d_phi[:, :, 1:, 1:]))
ricci3 = ricci3 + 0.5 * (
np.einsum('k,a,ikja->ij', unit_interface_normal_vector,
spacetime_unit_normal_vector, d_phi[:, :, 1:, :]) -
np.einsum('k,a,kija->ij', unit_interface_normal_vector,
spacetime_unit_normal_vector, d_phi[:, :, 1:, :]) +
np.einsum('k,a,jkia->ij', unit_interface_normal_vector,
spacetime_unit_normal_vector, d_phi[:, :, 1:, :]) -
np.einsum('k,a,kjia->ij', unit_interface_normal_vector,
spacetime_unit_normal_vector, d_phi[:, :, 1:, :]))
spatial_proj_IJ = proj.transverse_projection_operator(
inverse_spatial_metric, unit_interface_normal_vector)
spatial_proj_ij = proj.transverse_projection_operator(
spacetime_metric[1:, 1:], unit_interface_normal_one_form)
spatial_proj_Ij =\
proj.transverse_projection_operator_mixed_from_spatial_input(
unit_interface_normal_vector, unit_interface_normal_one_form)
weyl_prop_plus = wp.weyl_propagating_mode_plus(
ricci3, extrinsic_curvature, inverse_spatial_metric, cov_d_Kij,
unit_interface_normal_vector, spatial_proj_IJ, spatial_proj_ij,
spatial_proj_Ij)
weyl_prop_minus = wp.weyl_propagating_mode_minus(
ricci3, extrinsic_curvature, inverse_spatial_metric, cov_d_Kij,
unit_interface_normal_vector, spatial_proj_IJ, spatial_proj_ij,
spatial_proj_Ij)
# calculate U3+ U3-
U3_plus = 2 * np.einsum('ia,jb,ij->ab', projection_Ab[1:, :],
projection_Ab[1:, :], weyl_prop_plus)
U3_minus = 2 * np.einsum('ia,jb,ij->ab', projection_Ab[1:, :],
projection_Ab[1:, :], weyl_prop_minus)
# calculate corrections
tmp_ = char_speeds[3] * (U3_minus - mu_phys * U3_plus)
if gamma2_in_phys:
tmp_ = tmp_ - char_speeds[3] * gamma2 * np.einsum(
'i,iab->ab', unit_interface_normal_vector, three_index_constraint)
t1_ = np.einsum('ac,bd,ab->cd', projection_Ab, projection_Ab,
char_projected_rhs_dt_v_minus + tmp_)
t2_ = -0.5 * np.einsum('ab,cd,ab->cd', projection_AB, projection_ab,
char_projected_rhs_dt_v_minus + tmp_)
return t1_ + t2_
def constraint_preserving_bjorhus_corrections_dt_v_minus(
gamma2, inertial_coords, incoming_null_one_form, outgoing_null_one_form,
incoming_null_vector, outgoing_null_vector, projection_ab, projection_Ab,
projection_AB, char_projected_rhs_dt_v_psi, char_projected_rhs_dt_v_minus,
constraint_char_zero_plus, constraint_char_zero_minus, char_speeds):
return add_constraint_dependent_terms_to_dt_v_minus(
incoming_null_one_form, outgoing_null_one_form, incoming_null_vector,
outgoing_null_vector, projection_ab, projection_Ab, projection_AB,
constraint_char_zero_plus, constraint_char_zero_minus,
char_projected_rhs_dt_v_minus,
char_speeds) + add_gauge_sommerfeld_terms_to_dt_v_minus(
gamma2, inertial_coords, incoming_null_one_form,
outgoing_null_one_form, incoming_null_vector, outgoing_null_vector,
projection_Ab,
char_projected_rhs_dt_v_psi) - char_projected_rhs_dt_v_minus
def constraint_preserving_physical_bjorhus_corrections_dt_v_minus(
gamma2, inertial_coords, unit_interface_normal_one_form,
unit_interface_normal_vector, spacetime_unit_normal_vector,
incoming_null_one_form, outgoing_null_one_form, incoming_null_vector,
outgoing_null_vector, projection_ab, projection_Ab, projection_AB,
inverse_spatial_metric, extrinsic_curvature, spacetime_metric,
inverse_spacetime_metric, three_index_constraint,
char_projected_rhs_dt_v_psi, char_projected_rhs_dt_v_minus,
constraint_char_zero_plus, constraint_char_zero_minus, phi, d_phi, d_pi,
char_speeds):
return add_constraint_dependent_terms_to_dt_v_minus(
incoming_null_one_form, outgoing_null_one_form, incoming_null_vector,
outgoing_null_vector, projection_ab, projection_Ab, projection_AB,
constraint_char_zero_plus, constraint_char_zero_minus,
char_projected_rhs_dt_v_minus,
char_speeds) + add_physical_dof_terms_to_dt_v_minus(
gamma2, unit_interface_normal_one_form,
unit_interface_normal_vector, spacetime_unit_normal_vector,
projection_ab, projection_Ab, projection_AB,
inverse_spatial_metric, extrinsic_curvature, spacetime_metric,
inverse_spacetime_metric, three_index_constraint,
char_projected_rhs_dt_v_minus, phi, d_phi, d_pi,
char_speeds) + add_gauge_sommerfeld_terms_to_dt_v_minus(
gamma2, inertial_coords, incoming_null_one_form,
outgoing_null_one_form, incoming_null_vector,
outgoing_null_vector, projection_Ab,
char_projected_rhs_dt_v_psi) - char_projected_rhs_dt_v_minus
| [
"PointwiseFunctions.GeneralRelativity.WeylPropagating.weyl_propagating_mode_minus",
"PointwiseFunctions.GeneralRelativity.WeylPropagating.weyl_propagating_mode_plus",
"numpy.sum",
"PointwiseFunctions.GeneralRelativity.Christoffel.christoffel_first_kind",
"numpy.zeros",
"numpy.einsum",
"PointwiseFunction... | [((823, 880), 'numpy.zeros', 'np.zeros', (['[spatial_dim, 1 + spatial_dim, 1 + spatial_dim]'], {}), '([spatial_dim, 1 + spatial_dim, 1 + spatial_dim])\n', (831, 880), True, 'import numpy as np\n'), ((2090, 2210), 'numpy.einsum', 'np.einsum', (['"""a,cb,d,cd->ab"""', 'incoming_null_one_form', 'projection_Ab', 'outgoing_null_vector', 'char_projected_rhs_dt_v_psi'], {}), "('a,cb,d,cd->ab', incoming_null_one_form, projection_Ab,\n outgoing_null_vector, char_projected_rhs_dt_v_psi)\n", (2099, 2210), True, 'import numpy as np\n'), ((2237, 2357), 'numpy.einsum', 'np.einsum', (['"""b,ca,d,cd->ab"""', 'incoming_null_one_form', 'projection_Ab', 'outgoing_null_vector', 'char_projected_rhs_dt_v_psi'], {}), "('b,ca,d,cd->ab', incoming_null_one_form, projection_Ab,\n outgoing_null_vector, char_projected_rhs_dt_v_psi)\n", (2246, 2357), True, 'import numpy as np\n'), ((2384, 2536), 'numpy.einsum', 'np.einsum', (['"""a,b,c,d,cd->ab"""', 'incoming_null_one_form', 'outgoing_null_one_form', 'incoming_null_vector', 'outgoing_null_vector', 'char_projected_rhs_dt_v_psi'], {}), "('a,b,c,d,cd->ab', incoming_null_one_form, outgoing_null_one_form,\n incoming_null_vector, outgoing_null_vector, char_projected_rhs_dt_v_psi)\n", (2393, 2536), True, 'import numpy as np\n'), ((2583, 2735), 'numpy.einsum', 'np.einsum', (['"""b,a,c,d,cd->ab"""', 'incoming_null_one_form', 'outgoing_null_one_form', 'incoming_null_vector', 'outgoing_null_vector', 'char_projected_rhs_dt_v_psi'], {}), "('b,a,c,d,cd->ab', incoming_null_one_form, outgoing_null_one_form,\n incoming_null_vector, outgoing_null_vector, char_projected_rhs_dt_v_psi)\n", (2592, 2735), True, 'import numpy as np\n'), ((2782, 2934), 'numpy.einsum', 'np.einsum', (['"""a,b,c,d,cd->ab"""', 'incoming_null_one_form', 'incoming_null_one_form', 'outgoing_null_vector', 'outgoing_null_vector', 'char_projected_rhs_dt_v_psi'], {}), "('a,b,c,d,cd->ab', incoming_null_one_form, incoming_null_one_form,\n outgoing_null_vector, outgoing_null_vector, char_projected_rhs_dt_v_psi)\n", (2791, 2934), True, 'import numpy as np\n'), ((3402, 3560), 'numpy.einsum', 'np.einsum', (['"""c,d,a,b,cd->ab"""', 'incoming_null_vector', 'incoming_null_vector', 'outgoing_null_one_form', 'outgoing_null_one_form', 'char_projected_rhs_dt_v_minus'], {}), "('c,d,a,b,cd->ab', incoming_null_vector, incoming_null_vector,\n outgoing_null_one_form, outgoing_null_one_form,\n char_projected_rhs_dt_v_minus)\n", (3411, 3560), True, 'import numpy as np\n'), ((3603, 3725), 'numpy.einsum', 'np.einsum', (['"""c,da,b,cd->ab"""', 'incoming_null_vector', 'projection_Ab', 'outgoing_null_one_form', 'char_projected_rhs_dt_v_minus'], {}), "('c,da,b,cd->ab', incoming_null_vector, projection_Ab,\n outgoing_null_one_form, char_projected_rhs_dt_v_minus)\n", (3612, 3725), True, 'import numpy as np\n'), ((3752, 3874), 'numpy.einsum', 'np.einsum', (['"""c,db,a,cd->ab"""', 'incoming_null_vector', 'projection_Ab', 'outgoing_null_one_form', 'char_projected_rhs_dt_v_minus'], {}), "('c,db,a,cd->ab', incoming_null_vector, projection_Ab,\n outgoing_null_one_form, char_projected_rhs_dt_v_minus)\n", (3761, 3874), True, 'import numpy as np\n'), ((3901, 4023), 'numpy.einsum', 'np.einsum', (['"""d,ca,b,cd->ab"""', 'incoming_null_vector', 'projection_Ab', 'outgoing_null_one_form', 'char_projected_rhs_dt_v_minus'], {}), "('d,ca,b,cd->ab', incoming_null_vector, projection_Ab,\n outgoing_null_one_form, char_projected_rhs_dt_v_minus)\n", (3910, 4023), True, 'import numpy as np\n'), ((4050, 4172), 'numpy.einsum', 'np.einsum', (['"""d,cb,a,cd->ab"""', 'incoming_null_vector', 'projection_Ab', 'outgoing_null_one_form', 'char_projected_rhs_dt_v_minus'], {}), "('d,cb,a,cd->ab', incoming_null_vector, projection_Ab,\n outgoing_null_one_form, char_projected_rhs_dt_v_minus)\n", (4059, 4172), True, 'import numpy as np\n'), ((4199, 4289), 'numpy.einsum', 'np.einsum', (['"""cd,ab,cd->ab"""', 'projection_AB', 'projection_ab', 'char_projected_rhs_dt_v_minus'], {}), "('cd,ab,cd->ab', projection_AB, projection_ab,\n char_projected_rhs_dt_v_minus)\n", (4208, 4289), True, 'import numpy as np\n'), ((4438, 4549), 'numpy.einsum', 'np.einsum', (['"""a,b,c,c->ab"""', 'outgoing_null_one_form', 'outgoing_null_one_form', 'incoming_null_vector', 'common_term'], {}), "('a,b,c,c->ab', outgoing_null_one_form, outgoing_null_one_form,\n incoming_null_vector, common_term)\n", (4447, 4549), True, 'import numpy as np\n'), ((4576, 4649), 'numpy.einsum', 'np.einsum', (['"""ab,c,c->ab"""', 'projection_ab', 'outgoing_null_vector', 'common_term'], {}), "('ab,c,c->ab', projection_ab, outgoing_null_vector, common_term)\n", (4585, 4649), True, 'import numpy as np\n'), ((4680, 4755), 'numpy.einsum', 'np.einsum', (['"""cb,a,c->ab"""', 'projection_Ab', 'outgoing_null_one_form', 'common_term'], {}), "('cb,a,c->ab', projection_Ab, outgoing_null_one_form, common_term)\n", (4689, 4755), True, 'import numpy as np\n'), ((4787, 4862), 'numpy.einsum', 'np.einsum', (['"""ca,b,c->ab"""', 'projection_Ab', 'outgoing_null_one_form', 'common_term'], {}), "('ca,b,c->ab', projection_Ab, outgoing_null_one_form, common_term)\n", (4796, 4862), True, 'import numpy as np\n'), ((5651, 5692), 'PointwiseFunctions.GeneralRelativity.Christoffel.christoffel_first_kind', 'ch.christoffel_first_kind', (['phi[:, 1:, 1:]'], {}), '(phi[:, 1:, 1:])\n', (5676, 5692), True, 'import PointwiseFunctions.GeneralRelativity.Christoffel as ch\n'), ((5731, 5809), 'numpy.einsum', 'np.einsum', (['"""ij,jkl->ikl"""', 'inverse_spatial_metric', 'spatial_christoffel_1st_kind'], {}), "('ij,jkl->ikl', inverse_spatial_metric, spatial_christoffel_1st_kind)\n", (5740, 5809), True, 'import numpy as np\n'), ((5922, 6095), 'PointwiseFunctions.GeneralRelativity.ComputeGhQuantities.covariant_deriv_extrinsic_curvture', 'gh.covariant_deriv_extrinsic_curvture', (['extrinsic_curvature', 'spacetime_unit_normal_vector', 'spatial_christoffel_second_kind', 'inverse_spacetime_metric', 'phi', 'd_pi', 'd_phi'], {}), '(extrinsic_curvature,\n spacetime_unit_normal_vector, spatial_christoffel_second_kind,\n inverse_spacetime_metric, phi, d_pi, d_phi)\n', (5959, 6095), True, 'import PointwiseFunctions.GeneralRelativity.ComputeGhQuantities as gh\n'), ((6126, 6188), 'PointwiseFunctions.GeneralRelativity.ComputeGhQuantities.gh_spatial_ricci_tensor', 'gh.gh_spatial_ricci_tensor', (['phi', 'd_phi', 'inverse_spatial_metric'], {}), '(phi, d_phi, inverse_spatial_metric)\n', (6152, 6188), True, 'import PointwiseFunctions.GeneralRelativity.ComputeGhQuantities as gh\n'), ((7413, 7506), 'PointwiseFunctions.GeneralRelativity.ProjectionOperators.transverse_projection_operator', 'proj.transverse_projection_operator', (['inverse_spatial_metric', 'unit_interface_normal_vector'], {}), '(inverse_spatial_metric,\n unit_interface_normal_vector)\n', (7448, 7506), True, 'import PointwiseFunctions.GeneralRelativity.ProjectionOperators as proj\n'), ((7534, 7631), 'PointwiseFunctions.GeneralRelativity.ProjectionOperators.transverse_projection_operator', 'proj.transverse_projection_operator', (['spacetime_metric[1:, 1:]', 'unit_interface_normal_one_form'], {}), '(spacetime_metric[1:, 1:],\n unit_interface_normal_one_form)\n', (7569, 7631), True, 'import PointwiseFunctions.GeneralRelativity.ProjectionOperators as proj\n'), ((7668, 7795), 'PointwiseFunctions.GeneralRelativity.ProjectionOperators.transverse_projection_operator_mixed_from_spatial_input', 'proj.transverse_projection_operator_mixed_from_spatial_input', (['unit_interface_normal_vector', 'unit_interface_normal_one_form'], {}), '(\n unit_interface_normal_vector, unit_interface_normal_one_form)\n', (7728, 7795), True, 'import PointwiseFunctions.GeneralRelativity.ProjectionOperators as proj\n'), ((7825, 8007), 'PointwiseFunctions.GeneralRelativity.WeylPropagating.weyl_propagating_mode_plus', 'wp.weyl_propagating_mode_plus', (['ricci3', 'extrinsic_curvature', 'inverse_spatial_metric', 'cov_d_Kij', 'unit_interface_normal_vector', 'spatial_proj_IJ', 'spatial_proj_ij', 'spatial_proj_Ij'], {}), '(ricci3, extrinsic_curvature,\n inverse_spatial_metric, cov_d_Kij, unit_interface_normal_vector,\n spatial_proj_IJ, spatial_proj_ij, spatial_proj_Ij)\n', (7854, 8007), True, 'import PointwiseFunctions.GeneralRelativity.WeylPropagating as wp\n'), ((8047, 8230), 'PointwiseFunctions.GeneralRelativity.WeylPropagating.weyl_propagating_mode_minus', 'wp.weyl_propagating_mode_minus', (['ricci3', 'extrinsic_curvature', 'inverse_spatial_metric', 'cov_d_Kij', 'unit_interface_normal_vector', 'spatial_proj_IJ', 'spatial_proj_ij', 'spatial_proj_Ij'], {}), '(ricci3, extrinsic_curvature,\n inverse_spatial_metric, cov_d_Kij, unit_interface_normal_vector,\n spatial_proj_IJ, spatial_proj_ij, spatial_proj_Ij)\n', (8077, 8230), True, 'import PointwiseFunctions.GeneralRelativity.WeylPropagating as wp\n'), ((8798, 8896), 'numpy.einsum', 'np.einsum', (['"""ac,bd,ab->cd"""', 'projection_Ab', 'projection_Ab', '(char_projected_rhs_dt_v_minus + tmp_)'], {}), "('ac,bd,ab->cd', projection_Ab, projection_Ab, \n char_projected_rhs_dt_v_minus + tmp_)\n", (8807, 8896), True, 'import numpy as np\n'), ((541, 617), 'numpy.einsum', 'np.einsum', (['"""i,iab->ab"""', 'unit_interface_normal_vector', 'three_index_constraint'], {}), "('i,iab->ab', unit_interface_normal_vector, three_index_constraint)\n", (550, 617), True, 'import numpy as np\n'), ((1990, 2018), 'numpy.sum', 'np.sum', (['(inertial_coords ** 2)'], {}), '(inertial_coords ** 2)\n', (1996, 2018), True, 'import numpy as np\n'), ((8290, 8379), 'numpy.einsum', 'np.einsum', (['"""ia,jb,ij->ab"""', 'projection_Ab[1:, :]', 'projection_Ab[1:, :]', 'weyl_prop_plus'], {}), "('ia,jb,ij->ab', projection_Ab[1:, :], projection_Ab[1:, :],\n weyl_prop_plus)\n", (8299, 8379), True, 'import numpy as np\n'), ((8423, 8513), 'numpy.einsum', 'np.einsum', (['"""ia,jb,ij->ab"""', 'projection_Ab[1:, :]', 'projection_Ab[1:, :]', 'weyl_prop_minus'], {}), "('ia,jb,ij->ab', projection_Ab[1:, :], projection_Ab[1:, :],\n weyl_prop_minus)\n", (8432, 8513), True, 'import numpy as np\n'), ((8929, 9027), 'numpy.einsum', 'np.einsum', (['"""ab,cd,ab->cd"""', 'projection_AB', 'projection_ab', '(char_projected_rhs_dt_v_minus + tmp_)'], {}), "('ab,cd,ab->cd', projection_AB, projection_ab, \n char_projected_rhs_dt_v_minus + tmp_)\n", (8938, 9027), True, 'import numpy as np\n'), ((4325, 4337), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (4332, 4337), True, 'import numpy as np\n'), ((8697, 8773), 'numpy.einsum', 'np.einsum', (['"""i,iab->ab"""', 'unit_interface_normal_vector', 'three_index_constraint'], {}), "('i,iab->ab', unit_interface_normal_vector, three_index_constraint)\n", (8706, 8773), True, 'import numpy as np\n'), ((6706, 6775), 'numpy.einsum', 'np.einsum', (['"""kl,kjli->ij"""', 'inverse_spatial_metric', 'd_phi[:, :, 1:, 1:]'], {}), "('kl,kjli->ij', inverse_spatial_metric, d_phi[:, :, 1:, 1:])\n", (6715, 6775), True, 'import numpy as np\n'), ((7262, 7371), 'numpy.einsum', 'np.einsum', (['"""k,a,kjia->ij"""', 'unit_interface_normal_vector', 'spacetime_unit_normal_vector', 'd_phi[:, :, 1:, :]'], {}), "('k,a,kjia->ij', unit_interface_normal_vector,\n spacetime_unit_normal_vector, d_phi[:, :, 1:, :])\n", (7271, 7371), True, 'import numpy as np\n'), ((6567, 6636), 'numpy.einsum', 'np.einsum', (['"""kl,jkli->ij"""', 'inverse_spatial_metric', 'd_phi[:, :, 1:, 1:]'], {}), "('kl,jkli->ij', inverse_spatial_metric, d_phi[:, :, 1:, 1:])\n", (6576, 6636), True, 'import numpy as np\n'), ((7120, 7229), 'numpy.einsum', 'np.einsum', (['"""k,a,jkia->ij"""', 'unit_interface_normal_vector', 'spacetime_unit_normal_vector', 'd_phi[:, :, 1:, :]'], {}), "('k,a,jkia->ij', unit_interface_normal_vector,\n spacetime_unit_normal_vector, d_phi[:, :, 1:, :])\n", (7129, 7229), True, 'import numpy as np\n'), ((6265, 6334), 'numpy.einsum', 'np.einsum', (['"""kl,iklj->ij"""', 'inverse_spatial_metric', 'd_phi[:, :, 1:, 1:]'], {}), "('kl,iklj->ij', inverse_spatial_metric, d_phi[:, :, 1:, 1:])\n", (6274, 6334), True, 'import numpy as np\n'), ((6416, 6485), 'numpy.einsum', 'np.einsum', (['"""kl,kilj->ij"""', 'inverse_spatial_metric', 'd_phi[:, :, 1:, 1:]'], {}), "('kl,kilj->ij', inverse_spatial_metric, d_phi[:, :, 1:, 1:])\n", (6425, 6485), True, 'import numpy as np\n'), ((6836, 6945), 'numpy.einsum', 'np.einsum', (['"""k,a,ikja->ij"""', 'unit_interface_normal_vector', 'spacetime_unit_normal_vector', 'd_phi[:, :, 1:, :]'], {}), "('k,a,ikja->ij', unit_interface_normal_vector,\n spacetime_unit_normal_vector, d_phi[:, :, 1:, :])\n", (6845, 6945), True, 'import numpy as np\n'), ((6978, 7087), 'numpy.einsum', 'np.einsum', (['"""k,a,kija->ij"""', 'unit_interface_normal_vector', 'spacetime_unit_normal_vector', 'd_phi[:, :, 1:, :]'], {}), "('k,a,kija->ij', unit_interface_normal_vector,\n spacetime_unit_normal_vector, d_phi[:, :, 1:, :])\n", (6987, 7087), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import csv
from datetime import date
import itertools
from operator import itemgetter
import subprocess
import gzip
import logging
from allel.compat import zip_longest, PY2, text_type, range, unquote_plus
import numpy as np
import allel
logger = logging.getLogger(__name__)
debug = logger.debug
VCF_FIXED_FIELDS = 'CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO'
def write_vcf(path, variants, rename=None, number=None, description=None,
fill=None, write_header=True):
with open(path, 'w') as vcf_file:
if write_header:
write_vcf_header(vcf_file, variants=variants, rename=rename,
number=number, description=description)
write_vcf_data(vcf_file, variants=variants, rename=rename, fill=fill)
def write_vcf_header(vcf_file, variants, rename, number, description):
if rename is None:
rename = dict()
if number is None:
number = dict()
if description is None:
description = dict()
# write file format version
print('##fileformat=VCFv4.1', file=vcf_file)
# write today's date
today = date.today().strftime('%Y%m%d')
print('##fileDate=%s' % today, file=vcf_file)
# write source
print('##source=scikit-allel-%s' % allel.__version__, file=vcf_file)
names = variants.dtype.names
info_names = [n for n in names
if not n.upper().startswith('FILTER_') and
not n.upper() in VCF_FIXED_FIELDS]
info_ids = [rename[n] if n in rename else n
for n in info_names]
# write INFO headers, sorted by ID
for name, vcf_id in sorted(zip(info_names, info_ids), key=itemgetter(1)):
col = variants[name]
# determine VCF Number
if name in number:
vcf_number = number[name]
else:
if col.ndim == 1 and col.dtype.kind == 'b':
# Flag
vcf_number = 0
elif col.ndim == 1:
vcf_number = 1
elif col.ndim == 2:
vcf_number = col.shape[1]
else:
raise NotImplementedError('only columns with 1 or two '
'dimensions are supported')
# determine VCF Type
kind = col.dtype.kind
if kind == 'b':
vcf_type = 'Flag'
elif kind in 'ui':
vcf_type = 'Integer'
elif kind == 'f':
vcf_type = 'Float'
else:
vcf_type = 'String'
# determine VCF Description
if name in description:
vcf_description = description[name]
else:
vcf_description = ''
# construct INFO header line
header_line = '##INFO=<ID=%s,Number=%s,Type=%s,Description="%s">'\
% (vcf_id, vcf_number, vcf_type, vcf_description)
print(header_line, file=vcf_file)
filter_names = [n for n in names if n.upper().startswith('FILTER_')]
filter_ids = [rename[n] if n in rename else n[7:]
for n in filter_names]
# write FILTER headers, sorted by ID
for name, vcf_id in sorted(zip(filter_names, filter_ids),
key=itemgetter(1)):
# determine VCF Description
if name in description:
vcf_description = description[name]
else:
vcf_description = ''
# construct FILTER header line
header_line = '##FILTER=<ID=%s,Description="%s">'\
% (vcf_id, vcf_description)
print(header_line, file=vcf_file)
# write column names
line = '#' + '\t'.join(VCF_FIXED_FIELDS)
print(line, file=vcf_file)
def write_vcf_data(vcf_file, variants, rename, fill):
if rename is None:
rename = dict()
if fill is None:
fill = dict()
# find the fixed columns, allowing for case insensitive naming in the
# input array
col_chrom = None
col_pos = None
col_id = None
col_ref = None
col_alt = None
col_qual = None
for n in variants.dtype.names:
if n.upper() == 'CHROM':
col_chrom = variants[n]
elif n.upper() == 'POS':
col_pos = variants[n]
elif n.upper() == 'ID':
col_id = variants[n]
elif n.upper() == 'REF':
col_ref = variants[n]
elif n.upper() == 'ALT':
col_alt = variants[n]
elif n.upper() == 'QUAL':
col_qual = variants[n]
# check for required columns
if col_chrom is None:
raise ValueError('CHROM column not found')
if col_pos is None:
raise ValueError('POS column not found')
# pad optional columns
dot = itertools.repeat('.')
if col_id is None:
col_id = dot
if col_ref is None:
col_ref = dot
if col_alt is None:
col_alt = dot
if col_qual is None:
col_qual = dot
# find FILTER columns
filter_names = [n for n in variants.dtype.names
if n.upper().startswith('FILTER_')]
filter_ids = [rename[n] if n in rename else n[7:]
for n in filter_names]
filter_cols = [variants[n] for n in filter_names]
# sort by ID
if filter_names:
filters = sorted(zip(filter_names, filter_ids, filter_cols),
key=itemgetter(1))
filter_names, filter_ids, filter_cols = zip(*filters)
# find INFO columns
info_names = [n for n in variants.dtype.names
if not n.upper().startswith('FILTER_') and
not n.upper() in VCF_FIXED_FIELDS]
info_ids = [rename[n] if n in rename else n
for n in info_names]
info_cols = [variants[n] for n in info_names]
# sort by ID
if info_names:
infos = sorted(zip(info_names, info_ids, info_cols),
key=itemgetter(1))
info_names, info_ids, info_cols = zip(*infos)
# setup writer
writer = csv.writer(vcf_file, delimiter='\t', lineterminator='\n')
# zip up data as rows
rows = zip(col_chrom, col_pos, col_id, col_ref, col_alt, col_qual)
filter_rows = zip(*filter_cols)
info_rows = zip(*info_cols)
for row, filter_row, info_row in zip_longest(rows, filter_rows, info_rows):
# unpack main row
chrom, pos, id, ref, alt, qual = row
chrom = _vcf_value_str(chrom)
pos = _vcf_value_str(pos)
id = _vcf_value_str(id)
ref = _vcf_value_str(ref)
alt = _vcf_value_str(alt, fill=fill.get('ALT', None))
qual = _vcf_value_str(qual)
# construct FILTER value
if filter_row is not None:
flt = [i for i, v in zip(filter_ids, filter_row) if v]
if flt:
flt = ';'.join(flt)
else:
flt = 'PASS'
else:
flt = '.'
# construct INFO value
if info_row is not None:
info_vals = [_vcf_info_str(n, i, v, fill)
for n, i, v in zip(info_names, info_ids, info_row)]
info_vals = [x for x in info_vals if x is not None]
info = ';'.join(info_vals)
else:
info = '.'
# repack
row = chrom, pos, id, ref, alt, qual, flt, info
writer.writerow(row)
def _vcf_value_str(o, fill=None):
if isinstance(o, bytes) and not PY2:
return str(o, encoding='ascii')
elif isinstance(o, (tuple, list, np.ndarray)):
if fill is None:
t = [_vcf_value_str(x) for x in o]
else:
t = [_vcf_value_str(x) for x in o if x != fill]
return ','.join(t)
else:
return str(o)
def _vcf_info_str(name, id, value, fill):
if isinstance(value, (bool, np.bool_)):
if bool(value):
return id
else:
return None
else:
return '%s=%s' % (id, _vcf_value_str(value, fill=fill.get(name, None)))
def write_fasta(path, sequences, names, mode='w', width=80):
"""Write nucleotide sequences stored as numpy arrays to a FASTA file.
Parameters
----------
path : string
File path.
sequences : sequence of arrays
One or more ndarrays of dtype 'S1' containing the sequences.
names : sequence of strings
Names of the sequences.
mode : string, optional
Use 'a' to append to an existing file.
width : int, optional
Maximum line width.
"""
# check inputs
if isinstance(sequences, np.ndarray):
# single sequence
sequences = [sequences]
names = [names]
if len(sequences) != len(names):
raise ValueError('must provide the same number of sequences and names')
for sequence in sequences:
if sequence.dtype != np.dtype('S1'):
raise ValueError('expected S1 dtype, found %r' % sequence.dtype)
# force binary mode
mode = 'ab' if 'a' in mode else 'wb'
# write to file
with open(path, mode=mode) as fasta:
for name, sequence in zip(names, sequences):
# force bytes
if isinstance(name, text_type):
name = name.encode('ascii')
header = b'>' + name + b'\n'
fasta.write(header)
for i in range(0, sequence.size, width):
line = sequence[i:i+width].tostring() + b'\n'
fasta.write(line)
def gff3_parse_attributes(attributes_string):
"""Parse a string of GFF3 attributes ('key=value' pairs delimited by ';')
and return a dictionary.
"""
attributes = dict()
if not PY2:
# convert to ascii string to enable conversion of escaped characters
attributes_string = str(attributes_string, encoding='ascii')
fields = attributes_string.split(';')
for f in fields:
if '=' in f:
key, value = f.split('=')
key = unquote_plus(key).strip()
value = unquote_plus(value.strip())
if not PY2:
# convert back to bytes
value = value.encode('ascii')
attributes[key] = value
elif len(f) > 0:
# not strictly kosher
attributes[unquote_plus(f).strip()] = True
return attributes
def iter_gff3(path, attributes=None, region=None, score_fill=-1,
phase_fill=-1, attributes_fill=b'.'):
# prepare fill values for attributes
if attributes is not None:
if isinstance(attributes_fill, (list, tuple)):
if len(attributes != len(attributes_fill)):
raise ValueError('number of fills does not match attributes')
else:
attributes_fill = [attributes_fill] * len(attributes)
# open input stream
needs_closing = False
if region is not None:
cmd = ['tabix', path, region]
buffer = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout
elif path.endswith('.gz') or path.endswith('.bgz'):
buffer = gzip.open(path, mode='rb')
needs_closing = True
else:
buffer = open(path, mode='rb')
needs_closing = True
debug(buffer)
try:
for line in buffer:
if line[0] == b'>':
# assume begin embedded FASTA
raise StopIteration
if line[0] == b'#':
# skip comment lines
continue
vals = line.split(b'\t')
if len(vals) == 9:
# unpack for processing
fseqid, fsource, ftype, fstart, fend, fscore, fstrand, \
fphase, fattrs = vals
# convert numerics
fstart = int(fstart)
fend = int(fend)
if fscore == b'.':
fscore = score_fill
else:
fscore = float(fscore)
if fphase == b'.':
fphase = phase_fill
else:
fphase = int(fphase)
rec = (fseqid, fsource, ftype, fstart, fend, fscore,
fstrand, fphase)
if attributes is not None:
dattrs = gff3_parse_attributes(fattrs)
vattrs = tuple(
dattrs.get(k, f)
for k, f in zip(attributes, attributes_fill)
)
rec += vattrs
yield rec
finally:
if needs_closing:
buffer.close()
| [
"allel.compat.zip_longest",
"subprocess.Popen",
"gzip.open",
"csv.writer",
"numpy.dtype",
"datetime.date.today",
"logging.getLogger",
"allel.compat.range",
"allel.compat.unquote_plus",
"operator.itemgetter",
"itertools.repeat"
] | [((343, 370), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (360, 370), False, 'import logging\n'), ((4775, 4796), 'itertools.repeat', 'itertools.repeat', (['"""."""'], {}), "('.')\n", (4791, 4796), False, 'import itertools\n'), ((6028, 6085), 'csv.writer', 'csv.writer', (['vcf_file'], {'delimiter': '"""\t"""', 'lineterminator': '"""\n"""'}), "(vcf_file, delimiter='\\t', lineterminator='\\n')\n", (6038, 6085), False, 'import csv\n'), ((6290, 6331), 'allel.compat.zip_longest', 'zip_longest', (['rows', 'filter_rows', 'info_rows'], {}), '(rows, filter_rows, info_rows)\n', (6301, 6331), False, 'from allel.compat import zip_longest, PY2, text_type, range, unquote_plus\n'), ((1222, 1234), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1232, 1234), False, 'from datetime import date\n'), ((1767, 1780), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (1777, 1780), False, 'from operator import itemgetter\n'), ((3296, 3309), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (3306, 3309), False, 'from operator import itemgetter\n'), ((8823, 8837), 'numpy.dtype', 'np.dtype', (['"""S1"""'], {}), "('S1')\n", (8831, 8837), True, 'import numpy as np\n'), ((9305, 9335), 'allel.compat.range', 'range', (['(0)', 'sequence.size', 'width'], {}), '(0, sequence.size, width)\n', (9310, 9335), False, 'from allel.compat import zip_longest, PY2, text_type, range, unquote_plus\n'), ((10874, 10919), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE'}), '(cmd, stdout=subprocess.PIPE)\n', (10890, 10919), False, 'import subprocess\n'), ((11000, 11026), 'gzip.open', 'gzip.open', (['path'], {'mode': '"""rb"""'}), "(path, mode='rb')\n", (11009, 11026), False, 'import gzip\n'), ((5401, 5414), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (5411, 5414), False, 'from operator import itemgetter\n'), ((5926, 5939), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (5936, 5939), False, 'from operator import itemgetter\n'), ((9924, 9941), 'allel.compat.unquote_plus', 'unquote_plus', (['key'], {}), '(key)\n', (9936, 9941), False, 'from allel.compat import zip_longest, PY2, text_type, range, unquote_plus\n'), ((10226, 10241), 'allel.compat.unquote_plus', 'unquote_plus', (['f'], {}), '(f)\n', (10238, 10241), False, 'from allel.compat import zip_longest, PY2, text_type, range, unquote_plus\n')] |
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
def draw_ellipse(position, covariance, ax=None, **kwargs):
"""Draw an ellipse with a given position and covariance"""
ax = ax or plt.gca()
# Convert covariance to principal axes
if covariance.shape == (2, 2):
U, s, Vt = np.linalg.svd(covariance)
angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))
width, height = 2 * np.sqrt(s)
else:
angle = 0
width, height = 2 * np.sqrt(covariance)
# Draw the Ellipse
for nsig in range(1, 4):
ax.add_patch(mpl.patches.Ellipse(position, nsig * width, nsig * height, angle, **kwargs))
mpl.rc("text", usetex=True)
mpl.rc("font", family="serif")
fig = plt.figure(figsize=(8, 8/3), constrained_layout=True)
gs = mpl.gridspec.GridSpec(2, 6, figure=fig)
for i in range(3):
mean_1 = np.array([-0.5, -0.5])
mean_2 = np.array([0.5, 0.5])
if i == 0:
cov_1 = np.array([[0.3, 0.2], [0.2, 0.1]], dtype=np.float32)
cov_2 = np.array([[0.05, -0.1], [-0.1, 0.6]], dtype=np.float32)
elif i == 1:
cov_1 = np.array([[1, 0], [0, 0.1]], dtype=np.float32)
cov_2 = np.array([[0.2, 0], [0, 0.4]], dtype=np.float32)
else:
cov_1 = np.array([[0.2, 0], [0, 0.2]], dtype=np.float32)
cov_2 = np.array([[0.5, 0], [0, 0.5]], dtype=np.float32)
#cov_1 /= cov_1.sum()
#cov_2 /= cov_2.sum()
ax = fig.add_subplot(1, 3, int(i+1))
draw_ellipse(
mean_1,
cov_1,
edgecolor="none",
facecolor="magenta",
alpha=0.2,
ax=ax
)
draw_ellipse(
mean_2,
cov_2,
edgecolor="none",
facecolor="turquoise",
alpha=0.2,
ax=ax
)
draw_ellipse(
mean_2,
cov_2,
edgecolor="none",
facecolor="turquoise",
alpha=0.1,
ax=ax
)
draw_ellipse(
mean_1,
cov_1,
edgecolor="none",
facecolor="magenta",
alpha=0.1,
ax=ax
)
ax.tick_params(
axis="both",
which="both",
bottom=False,
top=False,
left=False,
right=False,
# labelbottom=False,
# labelleft=False
)
xticks = range(-3, 4)
ax.set_xlim(-3.6, 3.6)
ax.set_xticks(xticks)
yticks = range(-3, 4)
ax.set_ylim(-3.6, 3.6)
ax.set_yticks(yticks)
# Grids
ax.set_axisbelow(True)
ax.tick_params(
which="both",
top="off",
left="off",
right="off",
bottom="off",
length=0
)
ax.grid(linestyle=":", linewidth=0.5)
ax.set_aspect(1/ax.get_data_ratio(), adjustable="box")
if i == 0:
ax.set_title("Different covariance matrices", fontsize=12)
if i == 1:
ax.set_title("Diagonal covariance matrices", fontsize=12)
if i == 2:
ax.set_title("Diagonal with equal variance", fontsize=12)
# link
ax.text(
0.68,
0.04,
'cookieblues.github.io',
fontsize=11,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes,
color='dimgrey',
zorder=5
)
plt.tight_layout()
plt.savefig("gaussians.png", bbox_inches="tight")
plt.show()
| [
"matplotlib.rc",
"matplotlib.pyplot.show",
"numpy.arctan2",
"matplotlib.patches.Ellipse",
"matplotlib.pyplot.figure",
"numpy.linalg.svd",
"numpy.array",
"matplotlib.pyplot.gca",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] | [((674, 701), 'matplotlib.rc', 'mpl.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (680, 701), True, 'import matplotlib as mpl\n'), ((702, 732), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (708, 732), True, 'import matplotlib as mpl\n'), ((741, 796), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8 / 3)', 'constrained_layout': '(True)'}), '(figsize=(8, 8 / 3), constrained_layout=True)\n', (751, 796), True, 'import matplotlib.pyplot as plt\n'), ((800, 839), 'matplotlib.gridspec.GridSpec', 'mpl.gridspec.GridSpec', (['(2)', '(6)'], {'figure': 'fig'}), '(2, 6, figure=fig)\n', (821, 839), True, 'import matplotlib as mpl\n'), ((3253, 3271), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3269, 3271), True, 'import matplotlib.pyplot as plt\n'), ((3272, 3321), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""gaussians.png"""'], {'bbox_inches': '"""tight"""'}), "('gaussians.png', bbox_inches='tight')\n", (3283, 3321), True, 'import matplotlib.pyplot as plt\n'), ((3322, 3332), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3330, 3332), True, 'import matplotlib.pyplot as plt\n'), ((873, 895), 'numpy.array', 'np.array', (['[-0.5, -0.5]'], {}), '([-0.5, -0.5])\n', (881, 895), True, 'import numpy as np\n'), ((909, 929), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (917, 929), True, 'import numpy as np\n'), ((215, 224), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (222, 224), True, 'import matplotlib.pyplot as plt\n'), ((323, 348), 'numpy.linalg.svd', 'np.linalg.svd', (['covariance'], {}), '(covariance)\n', (336, 348), True, 'import numpy as np\n'), ((961, 1013), 'numpy.array', 'np.array', (['[[0.3, 0.2], [0.2, 0.1]]'], {'dtype': 'np.float32'}), '([[0.3, 0.2], [0.2, 0.1]], dtype=np.float32)\n', (969, 1013), True, 'import numpy as np\n'), ((1030, 1085), 'numpy.array', 'np.array', (['[[0.05, -0.1], [-0.1, 0.6]]'], {'dtype': 'np.float32'}), '([[0.05, -0.1], [-0.1, 0.6]], dtype=np.float32)\n', (1038, 1085), True, 'import numpy as np\n'), ((376, 404), 'numpy.arctan2', 'np.arctan2', (['U[1, 0]', 'U[0, 0]'], {}), '(U[1, 0], U[0, 0])\n', (386, 404), True, 'import numpy as np\n'), ((434, 444), 'numpy.sqrt', 'np.sqrt', (['s'], {}), '(s)\n', (441, 444), True, 'import numpy as np\n'), ((501, 520), 'numpy.sqrt', 'np.sqrt', (['covariance'], {}), '(covariance)\n', (508, 520), True, 'import numpy as np\n'), ((595, 670), 'matplotlib.patches.Ellipse', 'mpl.patches.Ellipse', (['position', '(nsig * width)', '(nsig * height)', 'angle'], {}), '(position, nsig * width, nsig * height, angle, **kwargs)\n', (614, 670), True, 'import matplotlib as mpl\n'), ((1119, 1165), 'numpy.array', 'np.array', (['[[1, 0], [0, 0.1]]'], {'dtype': 'np.float32'}), '([[1, 0], [0, 0.1]], dtype=np.float32)\n', (1127, 1165), True, 'import numpy as np\n'), ((1182, 1230), 'numpy.array', 'np.array', (['[[0.2, 0], [0, 0.4]]'], {'dtype': 'np.float32'}), '([[0.2, 0], [0, 0.4]], dtype=np.float32)\n', (1190, 1230), True, 'import numpy as np\n'), ((1257, 1305), 'numpy.array', 'np.array', (['[[0.2, 0], [0, 0.2]]'], {'dtype': 'np.float32'}), '([[0.2, 0], [0, 0.2]], dtype=np.float32)\n', (1265, 1305), True, 'import numpy as np\n'), ((1322, 1370), 'numpy.array', 'np.array', (['[[0.5, 0], [0, 0.5]]'], {'dtype': 'np.float32'}), '([[0.5, 0], [0, 0.5]], dtype=np.float32)\n', (1330, 1370), True, 'import numpy as np\n')] |
"""
Python in Astronomy 2016 is the second iteration of the Python in Astronomy
conference series.
This is the docstring for the pyastro module, this gets included as the
description for the module.
"""
import numpy as np
def times(a, b):
"""
Multiply a by b.
Parameters
----------
a : `numpy.ndarray`
Array one.
b : `numpy.ndarray`
Array two
Returns
-------
result : `numpy.ndarray`
``a`` multiplied by ``b``
"""
return np.multipy(a, b)
class PyAstro(object):
"""
This is a class docstring, here you must describe the parameters for the
creation of the class, which is normally the signature of the ``__init__``
method.
Parameters
----------
awesomeness_level : `int`
How awesome is pyastro16??!
day : `int`
Day of the conference. Defaults to 1.
Attributes
----------
awesomeness_level: `int`
How awesome is this class attributes?! You can document attributes that
are not properties here.
"""
def __init__(self, awesomeness_level, day=1):
"""
This docstring is not used, because it is for a hidden method.
"""
self.awesomeness_level = awesomeness_level
self._day = day
@property
def day(self):
"""
Day of the conference.
Properties are automatically documented as attributes
"""
return self._day
class PyAstro16(PyAstro):
"""
The 2016 edition of the python in astronomy conference.
"""
__doc__ += PyAstro.__doc__
| [
"numpy.multipy"
] | [((499, 515), 'numpy.multipy', 'np.multipy', (['a', 'b'], {}), '(a, b)\n', (509, 515), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import mirheo as mir
import numpy as np
import trimesh, argparse
parser = argparse.ArgumentParser()
parser.add_argument("--readFrom", choices=["off", 'trimesh'])
args = parser.parse_args()
path = "ply/"
pvname = "rbc"
off = "rbc_mesh.off"
ranks = (1, 1, 1)
domain = (12, 8, 10)
u = mir.Mirheo(ranks, domain, dt=0, debug_level=3, log_filename='log', no_splash=True)
if args.readFrom == "off":
mesh = mir.ParticleVectors.MembraneMesh(off)
elif args.readFrom == "trimesh":
m = trimesh.load(off);
mesh = mir.ParticleVectors.MembraneMesh(m.vertices.tolist(), m.faces.tolist())
pv_rbc = mir.ParticleVectors.MembraneVector(pvname, mass=1.0, mesh=mesh)
ic_rbc = mir.InitialConditions.Membrane([[6.0, 4.0, 5.0, 1.0, 0.0, 0.0, 0.0]])
u.registerParticleVector(pv_rbc, ic_rbc)
u.registerPlugins(mir.Plugins.createDumpMesh("mesh_dump", pv_rbc, 1, path))
u.run(3)
if u.isMasterTask():
mesh = trimesh.load(path + pvname + "_00000.ply")
np.savetxt("vertices.txt", mesh.vertices, fmt="%g")
np.savetxt("faces.txt", mesh.faces, fmt="%d")
# TEST: dump.mesh
# cd dump
# rm -rf ply/ vertices.txt faces.txt mesh.out.txt
# cp ../../data/rbc_mesh.off .
# mir.run --runargs "-n 2" ./mesh.py --readFrom off
# cat vertices.txt faces.txt > mesh.out.txt
# TEST: dump.mesh.fromTrimesh
# cd dump
# rm -rf ply/ vertices.txt faces.txt mesh.out.txt
# cp ../../data/rbc_mesh.off .
# mir.run --runargs "-n 2" ./mesh.py --readFrom trimesh
# cat vertices.txt faces.txt > mesh.out.txt
| [
"mirheo.InitialConditions.Membrane",
"trimesh.load",
"argparse.ArgumentParser",
"mirheo.Mirheo",
"numpy.savetxt",
"mirheo.ParticleVectors.MembraneVector",
"mirheo.Plugins.createDumpMesh",
"mirheo.ParticleVectors.MembraneMesh"
] | [((98, 123), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (121, 123), False, 'import trimesh, argparse\n'), ((316, 402), 'mirheo.Mirheo', 'mir.Mirheo', (['ranks', 'domain'], {'dt': '(0)', 'debug_level': '(3)', 'log_filename': '"""log"""', 'no_splash': '(True)'}), "(ranks, domain, dt=0, debug_level=3, log_filename='log',\n no_splash=True)\n", (326, 402), True, 'import mirheo as mir\n'), ((629, 692), 'mirheo.ParticleVectors.MembraneVector', 'mir.ParticleVectors.MembraneVector', (['pvname'], {'mass': '(1.0)', 'mesh': 'mesh'}), '(pvname, mass=1.0, mesh=mesh)\n', (663, 692), True, 'import mirheo as mir\n'), ((702, 771), 'mirheo.InitialConditions.Membrane', 'mir.InitialConditions.Membrane', (['[[6.0, 4.0, 5.0, 1.0, 0.0, 0.0, 0.0]]'], {}), '([[6.0, 4.0, 5.0, 1.0, 0.0, 0.0, 0.0]])\n', (732, 771), True, 'import mirheo as mir\n'), ((438, 475), 'mirheo.ParticleVectors.MembraneMesh', 'mir.ParticleVectors.MembraneMesh', (['off'], {}), '(off)\n', (470, 475), True, 'import mirheo as mir\n'), ((834, 890), 'mirheo.Plugins.createDumpMesh', 'mir.Plugins.createDumpMesh', (['"""mesh_dump"""', 'pv_rbc', '(1)', 'path'], {}), "('mesh_dump', pv_rbc, 1, path)\n", (860, 890), True, 'import mirheo as mir\n'), ((935, 977), 'trimesh.load', 'trimesh.load', (["(path + pvname + '_00000.ply')"], {}), "(path + pvname + '_00000.ply')\n", (947, 977), False, 'import trimesh, argparse\n'), ((982, 1033), 'numpy.savetxt', 'np.savetxt', (['"""vertices.txt"""', 'mesh.vertices'], {'fmt': '"""%g"""'}), "('vertices.txt', mesh.vertices, fmt='%g')\n", (992, 1033), True, 'import numpy as np\n'), ((1038, 1083), 'numpy.savetxt', 'np.savetxt', (['"""faces.txt"""', 'mesh.faces'], {'fmt': '"""%d"""'}), "('faces.txt', mesh.faces, fmt='%d')\n", (1048, 1083), True, 'import numpy as np\n'), ((517, 534), 'trimesh.load', 'trimesh.load', (['off'], {}), '(off)\n', (529, 534), False, 'import trimesh, argparse\n')] |
import pytest
import unittest
import numpy as np
import numpy.testing as npt
from lenstronomy.Util import sampling_util
def test_unit2gaussian():
mu, sigma = 5, 1
cube = np.linspace(0, 1, 3)
cube = sampling_util.unit2gaussian(cube, mu, sigma)
npt.assert_equal(cube, [-np.inf, mu, np.inf])
def test_unit2uniform():
lower, upper = -5, 15
cube = np.linspace(0, 1, 3)
cube = sampling_util.unit2uniform(cube, lower, upper)
npt.assert_equal(cube, [lower, (lower+upper)/2., upper])
def test_uniform2unit():
lower, upper = -5, 15
cube = np.linspace(lower, upper, 3)
cube = sampling_util.uniform2unit(cube, lower, upper)
npt.assert_equal(cube, [0, 0.5, 1])
def test_cube2args_uniform():
n_dims = 3
l, u = -5., 15.
lowers, uppers = l * np.ones(n_dims), u * np.ones(n_dims)
truth = [l, (l+u)/2., u]
cube = [0, 0.5, 1]
sampling_util.cube2args_uniform(cube, lowers, uppers, n_dims, copy=False)
npt.assert_equal(cube, truth)
cube = [0, 0.5, 1]
sampling_util.cube2args_uniform(cube, lowers, uppers, n_dims, copy=True)
# they should NOT be equal because cube was not modified in-place
npt.assert_equal(np.any(np.not_equal(cube, truth)), True)
cube = sampling_util.cube2args_uniform(cube, lowers, uppers, n_dims, copy=True)
# here they should
npt.assert_equal(cube, truth)
def test_cube2args_gaussian():
n_dims = 3
l, u = -5., 15.
m, s = 5., 1.
lowers, uppers = [l] * n_dims, [u] * n_dims
means, sigmas = [m] * n_dims, [s] * n_dims
truth = [l, m, u]
cube = [0, 0.5, 1]
sampling_util.cube2args_gaussian(cube, lowers, uppers,
means, sigmas, n_dims, copy=False)
npt.assert_equal(cube, truth)
cube = [0, 0.5, 1]
sampling_util.cube2args_gaussian(cube, lowers, uppers,
means, sigmas, n_dims, copy=True)
# they should NOT be equal because cube was not modified in-place
npt.assert_equal(np.any(np.not_equal(cube, truth)), True)
cube = sampling_util.cube2args_gaussian(cube, lowers, uppers,
means, sigmas, n_dims, copy=True)
# here they should
npt.assert_equal(cube, truth)
def test_scale_limits():
lowers_list, uppers_list = [0, -1, 5], [10, 9, 15]
lowers, uppers = np.array(lowers_list), np.array(uppers_list)
widths = uppers - lowers
scale_factor = 0.5
lowers_s, uppers_s = sampling_util.scale_limits(lowers_list, uppers_list, scale_factor)
npt.assert_equal(lowers_s, np.array([2.5, 1.5, 7.5]))
npt.assert_equal(uppers_s, np.array([7.5, 6.5, 12.5]))
npt.assert_equal(widths*scale_factor, (uppers_s - lowers_s))
def test_sample_ball():
p0 = np.ones(10)
std = np.ones(10)
sample = sampling_util.sample_ball(p0, std, size=10000, dist='normal')
mean = np.mean(sample, axis=0)
npt.assert_almost_equal(mean, p0, decimal=1)
sigma = np.std(sample, axis=0)
npt.assert_almost_equal(sigma, std, decimal=1)
sample = sampling_util.sample_ball(p0, std, size=10000, dist='uniform')
mean = np.mean(sample, axis=0)
npt.assert_almost_equal(mean, p0, decimal=1)
sigma = np.std(sample, axis=0)
npt.assert_almost_equal(sigma, std*0.607, decimal=1)
class TestRaise(unittest.TestCase):
def test_raise(self):
with self.assertRaises(ValueError):
sampling_util.sample_ball(p0=np.ones(10), std=np.ones(10), size=1000, dist='BAD')
if __name__ == '__main__':
pytest.main()
| [
"lenstronomy.Util.sampling_util.unit2uniform",
"lenstronomy.Util.sampling_util.uniform2unit",
"lenstronomy.Util.sampling_util.scale_limits",
"lenstronomy.Util.sampling_util.sample_ball",
"numpy.testing.assert_almost_equal",
"numpy.std",
"numpy.ones",
"pytest.main",
"lenstronomy.Util.sampling_util.cu... | [((180, 200), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(3)'], {}), '(0, 1, 3)\n', (191, 200), True, 'import numpy as np\n'), ((212, 256), 'lenstronomy.Util.sampling_util.unit2gaussian', 'sampling_util.unit2gaussian', (['cube', 'mu', 'sigma'], {}), '(cube, mu, sigma)\n', (239, 256), False, 'from lenstronomy.Util import sampling_util\n'), ((261, 306), 'numpy.testing.assert_equal', 'npt.assert_equal', (['cube', '[-np.inf, mu, np.inf]'], {}), '(cube, [-np.inf, mu, np.inf])\n', (277, 306), True, 'import numpy.testing as npt\n'), ((371, 391), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(3)'], {}), '(0, 1, 3)\n', (382, 391), True, 'import numpy as np\n'), ((403, 449), 'lenstronomy.Util.sampling_util.unit2uniform', 'sampling_util.unit2uniform', (['cube', 'lower', 'upper'], {}), '(cube, lower, upper)\n', (429, 449), False, 'from lenstronomy.Util import sampling_util\n'), ((454, 515), 'numpy.testing.assert_equal', 'npt.assert_equal', (['cube', '[lower, (lower + upper) / 2.0, upper]'], {}), '(cube, [lower, (lower + upper) / 2.0, upper])\n', (470, 515), True, 'import numpy.testing as npt\n'), ((575, 603), 'numpy.linspace', 'np.linspace', (['lower', 'upper', '(3)'], {}), '(lower, upper, 3)\n', (586, 603), True, 'import numpy as np\n'), ((615, 661), 'lenstronomy.Util.sampling_util.uniform2unit', 'sampling_util.uniform2unit', (['cube', 'lower', 'upper'], {}), '(cube, lower, upper)\n', (641, 661), False, 'from lenstronomy.Util import sampling_util\n'), ((666, 701), 'numpy.testing.assert_equal', 'npt.assert_equal', (['cube', '[0, 0.5, 1]'], {}), '(cube, [0, 0.5, 1])\n', (682, 701), True, 'import numpy.testing as npt\n'), ((888, 961), 'lenstronomy.Util.sampling_util.cube2args_uniform', 'sampling_util.cube2args_uniform', (['cube', 'lowers', 'uppers', 'n_dims'], {'copy': '(False)'}), '(cube, lowers, uppers, n_dims, copy=False)\n', (919, 961), False, 'from lenstronomy.Util import sampling_util\n'), ((966, 995), 'numpy.testing.assert_equal', 'npt.assert_equal', (['cube', 'truth'], {}), '(cube, truth)\n', (982, 995), True, 'import numpy.testing as npt\n'), ((1024, 1096), 'lenstronomy.Util.sampling_util.cube2args_uniform', 'sampling_util.cube2args_uniform', (['cube', 'lowers', 'uppers', 'n_dims'], {'copy': '(True)'}), '(cube, lowers, uppers, n_dims, copy=True)\n', (1055, 1096), False, 'from lenstronomy.Util import sampling_util\n'), ((1241, 1313), 'lenstronomy.Util.sampling_util.cube2args_uniform', 'sampling_util.cube2args_uniform', (['cube', 'lowers', 'uppers', 'n_dims'], {'copy': '(True)'}), '(cube, lowers, uppers, n_dims, copy=True)\n', (1272, 1313), False, 'from lenstronomy.Util import sampling_util\n'), ((1341, 1370), 'numpy.testing.assert_equal', 'npt.assert_equal', (['cube', 'truth'], {}), '(cube, truth)\n', (1357, 1370), True, 'import numpy.testing as npt\n'), ((1603, 1696), 'lenstronomy.Util.sampling_util.cube2args_gaussian', 'sampling_util.cube2args_gaussian', (['cube', 'lowers', 'uppers', 'means', 'sigmas', 'n_dims'], {'copy': '(False)'}), '(cube, lowers, uppers, means, sigmas,\n n_dims, copy=False)\n', (1635, 1696), False, 'from lenstronomy.Util import sampling_util\n'), ((1735, 1764), 'numpy.testing.assert_equal', 'npt.assert_equal', (['cube', 'truth'], {}), '(cube, truth)\n', (1751, 1764), True, 'import numpy.testing as npt\n'), ((1793, 1885), 'lenstronomy.Util.sampling_util.cube2args_gaussian', 'sampling_util.cube2args_gaussian', (['cube', 'lowers', 'uppers', 'means', 'sigmas', 'n_dims'], {'copy': '(True)'}), '(cube, lowers, uppers, means, sigmas,\n n_dims, copy=True)\n', (1825, 1885), False, 'from lenstronomy.Util import sampling_util\n'), ((2064, 2156), 'lenstronomy.Util.sampling_util.cube2args_gaussian', 'sampling_util.cube2args_gaussian', (['cube', 'lowers', 'uppers', 'means', 'sigmas', 'n_dims'], {'copy': '(True)'}), '(cube, lowers, uppers, means, sigmas,\n n_dims, copy=True)\n', (2096, 2156), False, 'from lenstronomy.Util import sampling_util\n'), ((2225, 2254), 'numpy.testing.assert_equal', 'npt.assert_equal', (['cube', 'truth'], {}), '(cube, truth)\n', (2241, 2254), True, 'import numpy.testing as npt\n'), ((2480, 2546), 'lenstronomy.Util.sampling_util.scale_limits', 'sampling_util.scale_limits', (['lowers_list', 'uppers_list', 'scale_factor'], {}), '(lowers_list, uppers_list, scale_factor)\n', (2506, 2546), False, 'from lenstronomy.Util import sampling_util\n'), ((2668, 2728), 'numpy.testing.assert_equal', 'npt.assert_equal', (['(widths * scale_factor)', '(uppers_s - lowers_s)'], {}), '(widths * scale_factor, uppers_s - lowers_s)\n', (2684, 2728), True, 'import numpy.testing as npt\n'), ((2764, 2775), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (2771, 2775), True, 'import numpy as np\n'), ((2786, 2797), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (2793, 2797), True, 'import numpy as np\n'), ((2811, 2872), 'lenstronomy.Util.sampling_util.sample_ball', 'sampling_util.sample_ball', (['p0', 'std'], {'size': '(10000)', 'dist': '"""normal"""'}), "(p0, std, size=10000, dist='normal')\n", (2836, 2872), False, 'from lenstronomy.Util import sampling_util\n'), ((2884, 2907), 'numpy.mean', 'np.mean', (['sample'], {'axis': '(0)'}), '(sample, axis=0)\n', (2891, 2907), True, 'import numpy as np\n'), ((2912, 2956), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['mean', 'p0'], {'decimal': '(1)'}), '(mean, p0, decimal=1)\n', (2935, 2956), True, 'import numpy.testing as npt\n'), ((2969, 2991), 'numpy.std', 'np.std', (['sample'], {'axis': '(0)'}), '(sample, axis=0)\n', (2975, 2991), True, 'import numpy as np\n'), ((2996, 3042), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['sigma', 'std'], {'decimal': '(1)'}), '(sigma, std, decimal=1)\n', (3019, 3042), True, 'import numpy.testing as npt\n'), ((3057, 3119), 'lenstronomy.Util.sampling_util.sample_ball', 'sampling_util.sample_ball', (['p0', 'std'], {'size': '(10000)', 'dist': '"""uniform"""'}), "(p0, std, size=10000, dist='uniform')\n", (3082, 3119), False, 'from lenstronomy.Util import sampling_util\n'), ((3131, 3154), 'numpy.mean', 'np.mean', (['sample'], {'axis': '(0)'}), '(sample, axis=0)\n', (3138, 3154), True, 'import numpy as np\n'), ((3159, 3203), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['mean', 'p0'], {'decimal': '(1)'}), '(mean, p0, decimal=1)\n', (3182, 3203), True, 'import numpy.testing as npt\n'), ((3216, 3238), 'numpy.std', 'np.std', (['sample'], {'axis': '(0)'}), '(sample, axis=0)\n', (3222, 3238), True, 'import numpy as np\n'), ((3243, 3297), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['sigma', '(std * 0.607)'], {'decimal': '(1)'}), '(sigma, std * 0.607, decimal=1)\n', (3266, 3297), True, 'import numpy.testing as npt\n'), ((3533, 3546), 'pytest.main', 'pytest.main', ([], {}), '()\n', (3544, 3546), False, 'import pytest\n'), ((2358, 2379), 'numpy.array', 'np.array', (['lowers_list'], {}), '(lowers_list)\n', (2366, 2379), True, 'import numpy as np\n'), ((2381, 2402), 'numpy.array', 'np.array', (['uppers_list'], {}), '(uppers_list)\n', (2389, 2402), True, 'import numpy as np\n'), ((2578, 2603), 'numpy.array', 'np.array', (['[2.5, 1.5, 7.5]'], {}), '([2.5, 1.5, 7.5])\n', (2586, 2603), True, 'import numpy as np\n'), ((2636, 2662), 'numpy.array', 'np.array', (['[7.5, 6.5, 12.5]'], {}), '([7.5, 6.5, 12.5])\n', (2644, 2662), True, 'import numpy as np\n'), ((794, 809), 'numpy.ones', 'np.ones', (['n_dims'], {}), '(n_dims)\n', (801, 809), True, 'import numpy as np\n'), ((815, 830), 'numpy.ones', 'np.ones', (['n_dims'], {}), '(n_dims)\n', (822, 830), True, 'import numpy as np\n'), ((1195, 1220), 'numpy.not_equal', 'np.not_equal', (['cube', 'truth'], {}), '(cube, truth)\n', (1207, 1220), True, 'import numpy as np\n'), ((2018, 2043), 'numpy.not_equal', 'np.not_equal', (['cube', 'truth'], {}), '(cube, truth)\n', (2030, 2043), True, 'import numpy as np\n'), ((3447, 3458), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (3454, 3458), True, 'import numpy as np\n'), ((3464, 3475), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (3471, 3475), True, 'import numpy as np\n')] |
# 2019-11-20 21:35:12(JST)
import sys
import numpy as np
# import collections
# import math
# from string import ascii_lowercase, ascii_uppercase, digits
# from bisect import bisect_left as bi_l, bisect_right as bi_r
# import itertools
# from functools import reduce
# import operator as op
# import re
# import heapq
# import array
from scipy.misc import comb # (default: exact=False)
def main():
n, p = [int(x) for x in sys.stdin.readline().split()]
a = np.array(sys.stdin.readline().split(), np.int64)
a %= 2
a.sort()
count0 = np.searchsorted(a, 1)
count1 = n - count0
ans = 0
if p == 1:
for i in range(count0+1):
for j in range(1, count1+1, 2):
ans += comb(count0, i, exact=True) * comb(count1, j, exact=True)
else:
for i in range(count0+1):
for j in range(0, count1+1, 2):
ans += comb(count0, i, exact=True) * comb(count1, j, exact=True)
print(ans)
if __name__ == "__main__":
main()
| [
"scipy.misc.comb",
"sys.stdin.readline",
"numpy.searchsorted"
] | [((578, 599), 'numpy.searchsorted', 'np.searchsorted', (['a', '(1)'], {}), '(a, 1)\n', (593, 599), True, 'import numpy as np\n'), ((498, 518), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (516, 518), False, 'import sys\n'), ((450, 470), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (468, 470), False, 'import sys\n'), ((760, 787), 'scipy.misc.comb', 'comb', (['count0', 'i'], {'exact': '(True)'}), '(count0, i, exact=True)\n', (764, 787), False, 'from scipy.misc import comb\n'), ((790, 817), 'scipy.misc.comb', 'comb', (['count1', 'j'], {'exact': '(True)'}), '(count1, j, exact=True)\n', (794, 817), False, 'from scipy.misc import comb\n'), ((933, 960), 'scipy.misc.comb', 'comb', (['count0', 'i'], {'exact': '(True)'}), '(count0, i, exact=True)\n', (937, 960), False, 'from scipy.misc import comb\n'), ((963, 990), 'scipy.misc.comb', 'comb', (['count1', 'j'], {'exact': '(True)'}), '(count1, j, exact=True)\n', (967, 990), False, 'from scipy.misc import comb\n')] |
"""
Everything in this file was written by us.
"""
import argparse
import time
import numpy as np
import torch.nn as nn
import torch.optim as optim
from environments.FourRooms import *
from models.NEC import *
from models.DND import *
from models.MFEC import *
from utils.atari_wrappers import make_atari, wrap_deepmind
from utils.utils import inverse_distance
parser = argparse.ArgumentParser()
# CUDA
parser.add_argument('--use_cuda', action='store_true',
help='Use GPU, if available')
parser.add_argument('--seed', type=int, default=1,
help='Random seed')
# Environment
parser.add_argument('--environment_type', default='atari',
choices=['atari','fourrooms'],
help='Type of environment to use.')
parser.add_argument('--room_size', type=int, default=13,
help='Size of one side of each room in fourrooms')
parser.add_argument('--fourrooms_state_type', default='tabular',
choices=['tabular','mnist'],
help='Type of state to return in fourrooms env')
parser.add_argument('--env_id', default='PongNoFrameskip-v0',
choices=['PongNoFrameskip-v0','BreakoutNoFrameskip-v0'],
help='OpenAI gym name for Atari env to use for training')
parser.add_argument('--frames_to_stack', type=int, default=4,
help='Number of prev. frames to fold into current state')
# Training
parser.add_argument('--n_episodes', type=int, default=10000,
help='Number of episodes for training')
parser.add_argument('--initial_epsilon', type=float, default=1.0,
help='Initial probability of selecting random action')
parser.add_argument('--final_epsilon', type=float, default=0.01,
help='Final probability of selecting random action')
parser.add_argument('--epsilon_decay', type=float, default=0.99,
help='Decay for probability of selecting random action')
parser.add_argument('--gamma', type=float, default=0.99,
help='Temporal discounting parameter')
parser.add_argument('--SR_gamma', type=float, default=0.99,
help='Temporal discounting parameter for learning SR')
parser.add_argument('--N', type=int, default=100,
help='Horizon for N-step Q-estimates')
parser.add_argument('--replay_buffer_size', type=int, default=100000,
help='Number of states to store in the replay buffer')
parser.add_argument('--replay_every', type=int, default=16,
help='Number of observed frames before replaying')
parser.add_argument('--batch_size', type=int, default=32,
help='Minibatch size for replay update')
parser.add_argument('--vae_batch_size', type=int, default=32,
help='Minibatch size for vae training')
parser.add_argument('--vae_train_frames', type=int, default=1000000,
help='Number of frames to train VAE')
parser.add_argument('--vae_epochs', type=int, default=10,
help='Number of epochs for training VAE on frames')
parser.add_argument('--SR_batch_size', type=int, default=32,
help='Minibatch size for SR updating')
parser.add_argument('--SR_train_frames', type=int, default=1000000,
help='Number of frames for training SR')
parser.add_argument('--SR_epochs', type=int, default=10,
help='Number of epochs for training SR')
parser.add_argument('--SR_train_algo', choices=['TD', 'MC', 'DP'],
default='TD',
help='Training algorithm for successor representation')
parser.add_argument('--Q_train_algo', choices=['MC', 'TD'],
default='MC',
help='Training algorithm for updating Q in MFEC')
parser.add_argument('--use_Q_max', action='store_true',
help='Use weird max in Q update equation from paper')
parser.add_argument('--force_knn', action='store_true',
help='Always estimate values using k nearest neighbors')
parser.add_argument('--weight_neighbors', action='store_true',
help='weight neighbors according to similarity')
parser.add_argument('--delta',type=float, default=0.1,
help='Small constant to add in similarity calculation')
# Model
parser.add_argument('--agent', choices=['NEC','MFEC'],
help='Type of agent to use')
parser.add_argument('--num_neighbors', type=int, default=50,
help='Number of nearest neighbors used for lookup')
parser.add_argument('--embedding_type', choices=['VAE','random','SR'], default='VAE',
help='Type of embedding model for MFEC')
parser.add_argument('--SR_embedding_type', choices=['random','VAE','pixels'], default='random',
help='Type of embedding model for SR')
parser.add_argument('--embedding_size', type=int, default=64,
help='Dimension of state embeddings (default from mjacar)')
parser.add_argument('--in_height', type=int, default=84,
help='The height of the input')
parser.add_argument('--in_width', type=int, default=84,
help='The width of the input')
parser.add_argument('--max_memory', type=int, default=500000,
help='Maximum number of memories in DND')
parser.add_argument('--load_vae_from',default=None,
help='Path to file to load vae weights from')
parser.add_argument('--n_hidden', type=int, default=100,
help='Number of hidden nodes in MLP')
# Optimization
parser.add_argument('--optimizer', choices=['Adam','RMSprop'],
default='RMSprop',
help='Optimizer to use for training')
parser.add_argument('--lr', type=float, default=1e-6,
help='Learning rate of optimizer (default from mjacar)')
parser.add_argument('--q_lr', type=float, default=0.01,
help='Learning rate for Q-values (default from mjacar)')
# Output options
parser.add_argument('--print_every', type=int, default=1000,
help='Number of episodes before printing some score data')
parser.add_argument('--vae_print_every', type=int, default=1000,
help='Number of batches before printing vae data')
parser.add_argument('--vae_weights_file', default=None,
help='Path to file to save vae weights')
parser.add_argument('--SR_filename', default='../results/MFEC_SR/random_TD',
help='Filename for saving SR representation')
parser.add_argument('--out_data_file', default='../results/NEC/results.npy',
help='Path to output data file with score history')
def main(args):
# CUDA
if args.use_cuda:
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
else:
use_cuda = False
device = "cpu"
print("Using cuda: ", use_cuda)
# Environment
if args.environment_type == 'atari':
env = make_atari(args.env_id)
env = wrap_deepmind(env,args.frames_to_stack,scale=True)
elif args.environment_type == 'fourrooms':
env = FourRooms(args.room_size,args.fourrooms_state_type)
# Random seed
env.seed(args.seed)
torch.manual_seed(args.seed)
# Agent
if args.agent == 'MFEC':
agent = MFEC(env,args,device)
elif args.agent == 'NEC':
agent = NEC(env,args,device)
# Pretraining: autoencoder in MFEC or DND warmup in NEC
agent.warmup()
# Training loop
time_history = [] # records time (in sec) of each episode
num_frames_history = [] # records the number of frames of each episode
score_history = [] # records total score of each episode
n_extra_steps_history = [] # records number of extra steps in fourrooms
for episode in range(args.n_episodes):
start_time = time.time()
if args.environment_type == 'fourrooms':
n_extra_steps,num_frames,score = agent.run_episode()
else:
num_frames,score = agent.run_episode()
time_history.append(time.time() - start_time)
num_frames_history.append(num_frames)
n_extra_steps_history.append(n_extra_steps)
score_history.append(score)
if episode % args.print_every == 0:
if args.environment_type == 'fourrooms':
print("Episode:", episode,
"Score:",score_history[-1],
"Average score:", np.mean(score_history),
"Extra steps:",n_extra_steps,
"Time:",time_history[-1])
else:
print("Episode:", episode,
"Score:",score_history[-1],
"Average score:", np.mean(score_history),
"Frames:",num_frames,
"Time:",time_history[-1])
print("Average time per episode:", np.mean(time_history))
print("Total number of frames:", np.sum(num_frames_history))
# Testing loop
# TODO: test with smaller epsilon, no random starting actions, etc.?
# TODO: can also record rendered frames of a few episodes?
# Save score history to file
scores_arr = np.array(score_history)
frames_arr = np.array(num_frames_history)
if args.environment_type == 'fourrooms':
n_extra_steps_arr = np.array(n_extra_steps_history)
data_arr = np.stack([scores_arr,frames_arr,n_extra_steps_arr],1)
else:
data_arr = np.stack([scores_arr,frames_arr],1)
np.save(args.out_data_file,data_arr)
if __name__ == '__main__':
args = parser.parse_args()
print(args)
main(args)
| [
"numpy.stack",
"numpy.save",
"numpy.sum",
"argparse.ArgumentParser",
"utils.atari_wrappers.make_atari",
"time.time",
"numpy.mean",
"numpy.array",
"utils.atari_wrappers.wrap_deepmind"
] | [((372, 397), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (395, 397), False, 'import argparse\n'), ((9301, 9324), 'numpy.array', 'np.array', (['score_history'], {}), '(score_history)\n', (9309, 9324), True, 'import numpy as np\n'), ((9342, 9370), 'numpy.array', 'np.array', (['num_frames_history'], {}), '(num_frames_history)\n', (9350, 9370), True, 'import numpy as np\n'), ((9618, 9655), 'numpy.save', 'np.save', (['args.out_data_file', 'data_arr'], {}), '(args.out_data_file, data_arr)\n', (9625, 9655), True, 'import numpy as np\n'), ((7103, 7126), 'utils.atari_wrappers.make_atari', 'make_atari', (['args.env_id'], {}), '(args.env_id)\n', (7113, 7126), False, 'from utils.atari_wrappers import make_atari, wrap_deepmind\n'), ((7141, 7193), 'utils.atari_wrappers.wrap_deepmind', 'wrap_deepmind', (['env', 'args.frames_to_stack'], {'scale': '(True)'}), '(env, args.frames_to_stack, scale=True)\n', (7154, 7193), False, 'from utils.atari_wrappers import make_atari, wrap_deepmind\n'), ((7967, 7978), 'time.time', 'time.time', ([], {}), '()\n', (7976, 7978), False, 'import time\n'), ((9006, 9027), 'numpy.mean', 'np.mean', (['time_history'], {}), '(time_history)\n', (9013, 9027), True, 'import numpy as np\n'), ((9066, 9092), 'numpy.sum', 'np.sum', (['num_frames_history'], {}), '(num_frames_history)\n', (9072, 9092), True, 'import numpy as np\n'), ((9444, 9475), 'numpy.array', 'np.array', (['n_extra_steps_history'], {}), '(n_extra_steps_history)\n', (9452, 9475), True, 'import numpy as np\n'), ((9495, 9551), 'numpy.stack', 'np.stack', (['[scores_arr, frames_arr, n_extra_steps_arr]', '(1)'], {}), '([scores_arr, frames_arr, n_extra_steps_arr], 1)\n', (9503, 9551), True, 'import numpy as np\n'), ((9578, 9615), 'numpy.stack', 'np.stack', (['[scores_arr, frames_arr]', '(1)'], {}), '([scores_arr, frames_arr], 1)\n', (9586, 9615), True, 'import numpy as np\n'), ((8186, 8197), 'time.time', 'time.time', ([], {}), '()\n', (8195, 8197), False, 'import time\n'), ((8576, 8598), 'numpy.mean', 'np.mean', (['score_history'], {}), '(score_history)\n', (8583, 8598), True, 'import numpy as np\n'), ((8851, 8873), 'numpy.mean', 'np.mean', (['score_history'], {}), '(score_history)\n', (8858, 8873), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
from generate_solution import generate_solution
from calculate_feroments import calculate_feroments
np.set_printoptions(precision=3)
np.random.seed(0)
T = 100
q = np.random.choice(np.arange(1, 5), size=T)
w = np.random.choice(np.arange(10, 20), size=T)
u = np.random.choice(np.arange(100, 200), size=T)
W = 1_000
N = 100
M = 100
alpha = .1
F = np.array(np.repeat(100, T), dtype='float64')
best_u = 0
for i in range(M):
f = np.array(np.repeat(0, T), dtype='float64')
for j in range(N):
c, _w, _u = generate_solution(T, q, w, u, W, F)
best_u = max(_u, best_u)
f += calculate_feroments(c, _w, _u, w, u)
print(f'it #{i}: best_u = {best_u}')
F = alpha * f + (1 - alpha) * F
print(generate_solution(T, q, w, u, W, F))
print(u/w)
print(F) | [
"numpy.set_printoptions",
"numpy.random.seed",
"numpy.arange",
"calculate_feroments.calculate_feroments",
"generate_solution.generate_solution",
"numpy.repeat"
] | [((142, 174), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)'}), '(precision=3)\n', (161, 174), True, 'import numpy as np\n'), ((176, 193), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (190, 193), True, 'import numpy as np\n'), ((224, 239), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (233, 239), True, 'import numpy as np\n'), ((270, 287), 'numpy.arange', 'np.arange', (['(10)', '(20)'], {}), '(10, 20)\n', (279, 287), True, 'import numpy as np\n'), ((318, 337), 'numpy.arange', 'np.arange', (['(100)', '(200)'], {}), '(100, 200)\n', (327, 337), True, 'import numpy as np\n'), ((397, 414), 'numpy.repeat', 'np.repeat', (['(100)', 'T'], {}), '(100, T)\n', (406, 414), True, 'import numpy as np\n'), ((731, 766), 'generate_solution.generate_solution', 'generate_solution', (['T', 'q', 'w', 'u', 'W', 'F'], {}), '(T, q, w, u, W, F)\n', (748, 766), False, 'from generate_solution import generate_solution\n'), ((478, 493), 'numpy.repeat', 'np.repeat', (['(0)', 'T'], {}), '(0, T)\n', (487, 493), True, 'import numpy as np\n'), ((546, 581), 'generate_solution.generate_solution', 'generate_solution', (['T', 'q', 'w', 'u', 'W', 'F'], {}), '(T, q, w, u, W, F)\n', (563, 581), False, 'from generate_solution import generate_solution\n'), ((616, 652), 'calculate_feroments.calculate_feroments', 'calculate_feroments', (['c', '_w', '_u', 'w', 'u'], {}), '(c, _w, _u, w, u)\n', (635, 652), False, 'from calculate_feroments import calculate_feroments\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.patches as mpatches
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.lines import Line2D
import copy
from Utility import Utility
from FactorAnalyzer import FactorAnalyzer
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.cluster.hierarchy import fcluster
from sklearn import mixture
import itertools
def fancy_dendrogram(*args, **kwargs):
max_d = kwargs.pop('max_d', None)
if max_d and 'color_threshold' not in kwargs:
kwargs['color_threshold'] = max_d
annotate_above = kwargs.pop('annotate_above', 0)
ddata = dendrogram(*args, **kwargs)
if not kwargs.get('no_plot', False):
plt.title('Hierarchical Clustering Dendrogram (truncated)')
plt.xlabel('sample index or (cluster size)')
plt.ylabel('distance')
for i, d, c in zip(ddata['icoord'], ddata['dcoord'], ddata['color_list']):
x = 0.5 * sum(i[1:3])
y = d[1]
if y > annotate_above:
plt.plot(x, y, 'o', c=c)
plt.annotate("%.3g" % y, (x, y), xytext=(0, -5),
textcoords='offset points',
va='top', ha='center')
if max_d:
plt.axhline(y=max_d, c='k')
return ddata
class Clusterer:
def silhouette_clusters_K_Means(self, X, range_n_clusters=[2,3,4,5,6]):
'''Plots the silhouette coefficients values of the different clusters along with the average silhouette score
Parameters:
- X: data to be clustered of size (n_instances, n_features)
- range_n_clusters: sizes of the clusters considered, for instance: range_n_clusters = [2, 3, 4, 5, 6]
'''
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, ax1 = plt.subplots(1, 1)
#fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for " + str(n_clusters) + " clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
plt.show()
def elbow_k_means(self, X, range_n_clusters = np.arange(2,11)):
'''Plots the Elbow Method for K-Means
Parameters:
- X: data to be clustered of size (n_instances, n_features)
- range_n_clusters: sizes of the clusters considered, for instance: range_n_clusters = [2, 3, 4, 5, 6]
'''
km = [KMeans(n_clusters=i, random_state=10) for i in range_n_clusters]
inertias = [km[i].fit(X).inertia_ for i in range(len(km))]
plt.plot(range_n_clusters, inertias, '.-', linewidth = 2)
plt.xlabel("k: number of clusters")
plt.ylabel("intertia")
plt.title("Elbow method")
return inertias
def plot_cluster_centers(self, centers):
"""
Heatmap visualization
"""
fig, ax = plt.subplots(figsize = (10,8))
sns.heatmap(centers.T, annot = True, center = 0, fmt = '.2f', linewidth = 0.5, cmap = 'viridis')
ax.set_title("Cluster centers", fontsize = 20)
plt.xticks(rotation = 0)
plt.yticks(rotation = 0)
ax.set_ylim(centers.T.shape[0]-1e-9, -1e-9)
ax.set_xlabel("Cluster")
plt.show()
def k_means(self, df, n_clusters):
'''
Returns the label encoded clusters for each example in the df.
'''
df = Utility().normalize(df)
kmeans = KMeans(n_clusters = n_clusters)
kmeans.fit(df)
print("Reduced inertia:", kmeans.inertia_)
print("Clusters centers:")
display(pd.DataFrame(kmeans.cluster_centers_, columns = df.columns,
index = ["cluster %i" %i for i in np.arange(n_clusters)]))
centers = pd.DataFrame(kmeans.cluster_centers_, columns = df.columns, index =["cluster %i" %i for i in np.arange(n_clusters)])
self.plot_cluster_centers(centers)
# return Utility().label_encode(kmeans.labels_)
return kmeans.labels_
def hierarchical_clusters(self, X, method='ward', p=30, k=4):
'''Returns the clusters predicted for each example:
- X: data to cluster
- method: linkage method, by default 'ward'
- p: last p splits considered in the dendogram
- k: number of clusters used in the predicted clusters (does not influence the dendogram)
'''
Z = linkage(X, 'ward')
plt.figure(figsize=(25, 10))
_ = fancy_dendrogram(Z,
leaf_rotation=90., # rotates the x axis labels
leaf_font_size=8., # font size for the x axis labels
truncate_mode='lastp',
p=p
)
clusters = np.array(fcluster(Z, k, criterion='maxclust')) -1
X_copy = copy.deepcopy(X)
X_copy['clusters'] = clusters
centers = X_copy.groupby(['clusters']).mean()
display(centers)
self.plot_cluster_centers(centers)
return clusters
def best_GMM_clusters(self, X, criterion='bic'):
'''
Returns the best GMM clusters according to AIC or BIC and plots the criterion AIC and BIC
found for each number of components used.
Parameters:
- X: data to be cluster of size (n_examples, n_features)
- criterion: 'bic' or 'aic', criterion selected for the best gmm
'''
lowest_aic = np.infty
lowest_bic = np.infty
aic = []
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a Gaussian mixture with EM
gmm = mixture.GaussianMixture(n_components=n_components,
covariance_type=cv_type)
gmm.fit(X)
aic.append(gmm.aic(X))
bic.append(gmm.bic(X))
if aic[-1] < lowest_aic:
lowest_aic = aic[-1]
best_gmm_aic = gmm
best_type_aic = cv_type
best_component_aic = n_components
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm_bic = gmm
best_type_bic = cv_type
best_component_bic = n_components
aic = np.array(aic)
bic = np.array(bic)
color_iter = itertools.cycle(['navy', 'turquoise', 'cornflowerblue',
'darkorange'])
bars = []
plt.figure(figsize=(20, 8))
# Plot the AIC scores
spl = plt.subplot(1, 2, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, aic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([aic.min() * 1.01 - .01 * aic.max(), aic.max()])
plt.title('AIC score per model')
xpos = np.mod(aic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(aic.argmin() / len(n_components_range))
plt.text(xpos, aic.min() * 0.97 + .03 * aic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the BIC scores
spl = plt.subplot(1, 2, 2)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
if criterion == 'aic':
# return Utility().label_encode(best_gmm_aic.predict(X))
print('Criterion selected for clusters: AIC')
print('Selected CV type:', best_type_aic)
print('Selected number of components:', best_component_aic)
return best_gmm_aic.predict(X)
else:
# return Utility().label_encode(best_gmm_bic.predict(X))
print('Criterion selected for clusters: BIC')
print('Selected CV type:', best_type_bic)
print('Selected number of components:', best_component_bic)
return best_gmm_bic.predict(X)
def plot_cluster_2D(self, df, clusters, pca_plot = False):
"""
Give a 2D view of clusters using 2-dimensional PCA
df: dataframe
clusters: list of labels
"""
f = FactorAnalyzer()
pca = f.pca(2, df, plot = pca_plot)
palette = sns.color_palette('hls', len(np.unique(clusters))+1)
plt.scatter(pca['results'][0],pca['results'][1], c = [palette[i] for i in clusters])
handles = [mpatches.Patch(color=palette[i], label="cluster %i" % i) for i in np.unique(clusters)]
plt.legend(handles = handles)
plt.xlabel("PC1 ({:.2%} explained variance)".format(pca['explained_variance'][0]))
plt.ylabel("PC2 ({:.2%} explained variance)".format(pca['explained_variance'][1]))
plt.show()
def plot_cluster_3D(self, df, clusters, pca_plot = False):
"""
Give a 3D view of clusters using 3-dimensional PCA
"""
f = FactorAnalyzer()
pca = f.pca(3, df, plot = pca_plot)
palette = sns.color_palette('hls', len(np.unique(clusters))+1)
fig = plt.figure(figsize = (10,10))
ax = fig.add_subplot(111, projection = '3d')
ax.scatter(pca['results'][0], pca['results'][1], pca['results'][2],
c = [palette[i] for i in clusters], marker = 'o', s = 50)
ax.set_xlabel("PC1 ({:.2%} explained variance)".format(pca['explained_variance'][0]))
ax.set_ylabel("PC2 ({:.2%} explained variance)".format(pca['explained_variance'][1]))
ax.set_zlabel("PC3 ({:.2%} explained variance)".format(pca['explained_variance'][2]))
scatter_proxy = [Line2D([0],[0], linestyle = 'none', c = palette[i], marker = 'o') for i in np.unique(clusters)]
label = ["cluster %i" % i for i in np.unique(clusters)]
ax.legend(scatter_proxy, label, loc = (0.7,0.6))
fig.show()
| [
"matplotlib.pyplot.title",
"seaborn.heatmap",
"scipy.cluster.hierarchy.fcluster",
"scipy.cluster.hierarchy.linkage",
"sklearn.mixture.GaussianMixture",
"matplotlib.pyplot.figure",
"numpy.arange",
"FactorAnalyzer.FactorAnalyzer",
"itertools.cycle",
"sklearn.metrics.silhouette_samples",
"matplotli... | [((803, 830), 'scipy.cluster.hierarchy.dendrogram', 'dendrogram', (['*args'], {}), '(*args, **kwargs)\n', (813, 830), False, 'from scipy.cluster.hierarchy import dendrogram, linkage\n'), ((881, 940), 'matplotlib.pyplot.title', 'plt.title', (['"""Hierarchical Clustering Dendrogram (truncated)"""'], {}), "('Hierarchical Clustering Dendrogram (truncated)')\n", (890, 940), True, 'import matplotlib.pyplot as plt\n'), ((949, 993), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""sample index or (cluster size)"""'], {}), "('sample index or (cluster size)')\n", (959, 993), True, 'import matplotlib.pyplot as plt\n'), ((1002, 1024), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""distance"""'], {}), "('distance')\n", (1012, 1024), True, 'import matplotlib.pyplot as plt\n'), ((4742, 4758), 'numpy.arange', 'np.arange', (['(2)', '(11)'], {}), '(2, 11)\n', (4751, 4758), True, 'import numpy as np\n'), ((5172, 5227), 'matplotlib.pyplot.plot', 'plt.plot', (['range_n_clusters', 'inertias', '""".-"""'], {'linewidth': '(2)'}), "(range_n_clusters, inertias, '.-', linewidth=2)\n", (5180, 5227), True, 'import matplotlib.pyplot as plt\n'), ((5238, 5273), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""k: number of clusters"""'], {}), "('k: number of clusters')\n", (5248, 5273), True, 'import matplotlib.pyplot as plt\n'), ((5282, 5304), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""intertia"""'], {}), "('intertia')\n", (5292, 5304), True, 'import matplotlib.pyplot as plt\n'), ((5313, 5338), 'matplotlib.pyplot.title', 'plt.title', (['"""Elbow method"""'], {}), "('Elbow method')\n", (5322, 5338), True, 'import matplotlib.pyplot as plt\n'), ((5486, 5515), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (5498, 5515), True, 'import matplotlib.pyplot as plt\n'), ((5525, 5616), 'seaborn.heatmap', 'sns.heatmap', (['centers.T'], {'annot': '(True)', 'center': '(0)', 'fmt': '""".2f"""', 'linewidth': '(0.5)', 'cmap': '"""viridis"""'}), "(centers.T, annot=True, center=0, fmt='.2f', linewidth=0.5, cmap\n ='viridis')\n", (5536, 5616), True, 'import seaborn as sns\n'), ((5685, 5707), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(0)'}), '(rotation=0)\n', (5695, 5707), True, 'import matplotlib.pyplot as plt\n'), ((5718, 5740), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'rotation': '(0)'}), '(rotation=0)\n', (5728, 5740), True, 'import matplotlib.pyplot as plt\n'), ((5836, 5846), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5844, 5846), True, 'import matplotlib.pyplot as plt\n'), ((6045, 6074), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters'}), '(n_clusters=n_clusters)\n', (6051, 6074), False, 'from sklearn.cluster import KMeans\n'), ((7009, 7027), 'scipy.cluster.hierarchy.linkage', 'linkage', (['X', '"""ward"""'], {}), "(X, 'ward')\n", (7016, 7027), False, 'from scipy.cluster.hierarchy import dendrogram, linkage\n'), ((7037, 7065), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(25, 10)'}), '(figsize=(25, 10))\n', (7047, 7065), True, 'import matplotlib.pyplot as plt\n'), ((7470, 7486), 'copy.deepcopy', 'copy.deepcopy', (['X'], {}), '(X)\n', (7483, 7486), False, 'import copy\n'), ((9085, 9098), 'numpy.array', 'np.array', (['aic'], {}), '(aic)\n', (9093, 9098), True, 'import numpy as np\n'), ((9113, 9126), 'numpy.array', 'np.array', (['bic'], {}), '(bic)\n', (9121, 9126), True, 'import numpy as np\n'), ((9148, 9218), 'itertools.cycle', 'itertools.cycle', (["['navy', 'turquoise', 'cornflowerblue', 'darkorange']"], {}), "(['navy', 'turquoise', 'cornflowerblue', 'darkorange'])\n", (9163, 9218), False, 'import itertools\n'), ((9283, 9310), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 8)'}), '(figsize=(20, 8))\n', (9293, 9310), True, 'import matplotlib.pyplot as plt\n'), ((9355, 9375), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (9366, 9375), True, 'import matplotlib.pyplot as plt\n'), ((9725, 9755), 'matplotlib.pyplot.xticks', 'plt.xticks', (['n_components_range'], {}), '(n_components_range)\n', (9735, 9755), True, 'import matplotlib.pyplot as plt\n'), ((9830, 9862), 'matplotlib.pyplot.title', 'plt.title', (['"""AIC score per model"""'], {}), "('AIC score per model')\n", (9839, 9862), True, 'import matplotlib.pyplot as plt\n'), ((10218, 10238), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (10229, 10238), True, 'import matplotlib.pyplot as plt\n'), ((10588, 10618), 'matplotlib.pyplot.xticks', 'plt.xticks', (['n_components_range'], {}), '(n_components_range)\n', (10598, 10618), True, 'import matplotlib.pyplot as plt\n'), ((10693, 10725), 'matplotlib.pyplot.title', 'plt.title', (['"""BIC score per model"""'], {}), "('BIC score per model')\n", (10702, 10725), True, 'import matplotlib.pyplot as plt\n'), ((11890, 11906), 'FactorAnalyzer.FactorAnalyzer', 'FactorAnalyzer', ([], {}), '()\n', (11904, 11906), False, 'from FactorAnalyzer import FactorAnalyzer\n'), ((12031, 12118), 'matplotlib.pyplot.scatter', 'plt.scatter', (["pca['results'][0]", "pca['results'][1]"], {'c': '[palette[i] for i in clusters]'}), "(pca['results'][0], pca['results'][1], c=[palette[i] for i in\n clusters])\n", (12042, 12118), True, 'import matplotlib.pyplot as plt\n'), ((12231, 12258), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'handles'}), '(handles=handles)\n', (12241, 12258), True, 'import matplotlib.pyplot as plt\n'), ((12458, 12468), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12466, 12468), True, 'import matplotlib.pyplot as plt\n'), ((12628, 12644), 'FactorAnalyzer.FactorAnalyzer', 'FactorAnalyzer', ([], {}), '()\n', (12642, 12644), False, 'from FactorAnalyzer import FactorAnalyzer\n'), ((12775, 12803), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (12785, 12803), True, 'import matplotlib.pyplot as plt\n'), ((1443, 1470), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': 'max_d', 'c': '"""k"""'}), "(y=max_d, c='k')\n", (1454, 1470), True, 'import matplotlib.pyplot as plt\n'), ((2034, 2052), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (2046, 2052), True, 'import matplotlib.pyplot as plt\n'), ((2674, 2720), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'random_state': '(10)'}), '(n_clusters=n_clusters, random_state=10)\n', (2680, 2720), False, 'from sklearn.cluster import KMeans\n'), ((2993, 3028), 'sklearn.metrics.silhouette_score', 'silhouette_score', (['X', 'cluster_labels'], {}), '(X, cluster_labels)\n', (3009, 3028), False, 'from sklearn.metrics import silhouette_samples, silhouette_score\n'), ((3250, 3287), 'sklearn.metrics.silhouette_samples', 'silhouette_samples', (['X', 'cluster_labels'], {}), '(X, cluster_labels)\n', (3268, 3287), False, 'from sklearn.metrics import silhouette_samples, silhouette_score\n'), ((4680, 4690), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4688, 4690), True, 'import matplotlib.pyplot as plt\n'), ((5031, 5068), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'i', 'random_state': '(10)'}), '(n_clusters=i, random_state=10)\n', (5037, 5068), False, 'from sklearn.cluster import KMeans\n'), ((12136, 12192), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'palette[i]', 'label': "('cluster %i' % i)"}), "(color=palette[i], label='cluster %i' % i)\n", (12150, 12192), True, 'import matplotlib.patches as mpatches\n'), ((13330, 13390), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'linestyle': '"""none"""', 'c': 'palette[i]', 'marker': '"""o"""'}), "([0], [0], linestyle='none', c=palette[i], marker='o')\n", (13336, 13390), False, 'from matplotlib.lines import Line2D\n'), ((1214, 1238), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""o"""'], {'c': 'c'}), "(x, y, 'o', c=c)\n", (1222, 1238), True, 'import matplotlib.pyplot as plt\n'), ((1255, 1358), 'matplotlib.pyplot.annotate', 'plt.annotate', (["('%.3g' % y)", '(x, y)'], {'xytext': '(0, -5)', 'textcoords': '"""offset points"""', 'va': '"""top"""', 'ha': '"""center"""'}), "('%.3g' % y, (x, y), xytext=(0, -5), textcoords='offset points',\n va='top', ha='center')\n", (1267, 1358), True, 'import matplotlib.pyplot as plt\n'), ((6004, 6013), 'Utility.Utility', 'Utility', ([], {}), '()\n', (6011, 6013), False, 'from Utility import Utility\n'), ((7412, 7448), 'scipy.cluster.hierarchy.fcluster', 'fcluster', (['Z', 'k'], {'criterion': '"""maxclust"""'}), "(Z, k, criterion='maxclust')\n", (7420, 7448), False, 'from scipy.cluster.hierarchy import fcluster\n'), ((8404, 8479), 'sklearn.mixture.GaussianMixture', 'mixture.GaussianMixture', ([], {'n_components': 'n_components', 'covariance_type': 'cv_type'}), '(n_components=n_components, covariance_type=cv_type)\n', (8427, 8479), False, 'from sklearn import mixture\n'), ((9468, 9496), 'numpy.array', 'np.array', (['n_components_range'], {}), '(n_components_range)\n', (9476, 9496), True, 'import numpy as np\n'), ((10331, 10359), 'numpy.array', 'np.array', (['n_components_range'], {}), '(n_components_range)\n', (10339, 10359), True, 'import numpy as np\n'), ((12202, 12221), 'numpy.unique', 'np.unique', (['clusters'], {}), '(clusters)\n', (12211, 12221), True, 'import numpy as np\n'), ((13405, 13424), 'numpy.unique', 'np.unique', (['clusters'], {}), '(clusters)\n', (13414, 13424), True, 'import numpy as np\n'), ((13469, 13488), 'numpy.unique', 'np.unique', (['clusters'], {}), '(clusters)\n', (13478, 13488), True, 'import numpy as np\n'), ((3865, 3892), 'numpy.arange', 'np.arange', (['y_lower', 'y_upper'], {}), '(y_lower, y_upper)\n', (3874, 3892), True, 'import numpy as np\n'), ((11998, 12017), 'numpy.unique', 'np.unique', (['clusters'], {}), '(clusters)\n', (12007, 12017), True, 'import numpy as np\n'), ((12736, 12755), 'numpy.unique', 'np.unique', (['clusters'], {}), '(clusters)\n', (12745, 12755), True, 'import numpy as np\n'), ((6470, 6491), 'numpy.arange', 'np.arange', (['n_clusters'], {}), '(n_clusters)\n', (6479, 6491), True, 'import numpy as np\n'), ((6325, 6346), 'numpy.arange', 'np.arange', (['n_clusters'], {}), '(n_clusters)\n', (6334, 6346), True, 'import numpy as np\n')] |
import os
import setuptools
from torch.utils.data import DataLoader
import numpy as np
from pytorch_lightning import LightningModule, Trainer
import torch
import torch.nn as nn
import torchmetrics
import pytorch_lightning.loggers as pl_loggers
import pytorch_lightning.callbacks as pl_callbacks
from lightning_nets.hooks import *
from lightning_nets.hooks.plotters import *
from lightning_nets.data import *
from lightning_nets.modules import *
class EpochInferenceCallback(pl_callbacks.Callback):
def __init__(self, dataloader:DataLoader, data_plotter:DataPlotter = None, num_samples:int = 5, shuffle:bool = True) -> None:
super().__init__()
self.data_plotter = data_plotter
data_loader = DataLoader(dataset=dataloader.dataset, batch_size=num_samples, shuffle=shuffle)
_, sample = enumerate(data_loader).__next__()
self.x, self.y = sample
self.data_loader = DataLoader(PredictTorchDataset(self.x))
self._current_epoch = 0
self._global_step = 0
def on_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
if self._current_epoch == trainer.current_epoch:
return
self._current_epoch = trainer.current_epoch
self._global_step = trainer.global_step
x = self.to_tensor(self.x, pl_module, self.x.dtype)
y = self.to_tensor(self.y, pl_module, self.y.dtype)
pl_module.eval()
y_hat = pl_module(x)
pl_module.train()
x = self.to_numpy(x, pl_module)
y = self.to_numpy(y, pl_module)
y_hat = self.to_numpy(y_hat, pl_module)
if self.data_plotter is not None:
self.data_plotter.plot_data(x, y, y_hat, current_epoch=self._current_epoch, global_step=self._global_step)
return
def to_tensor(self, data: Any, pl_module: LightningModule, dtype: torch.dtype=torch.float32):
"""
A utility method for converting data to tensor objects
Args:
data (Any): The data to convert to a tensor
dtype (torch.dtype, optional): Defaults to torch.float32.
Raises:
TypeError: If data is not a list, or not convertible to a numpy ndarray.
Returns:
torch.Tensor: A tensor
"""
if isinstance(data, list):
result = torch.tensor(np.asarray(data), dtype=dtype).to(device=pl_module.device)
elif isinstance(data, np.ndarray):
result = torch.tensor(data, dtype=dtype).to(device=pl_module.device)
elif isinstance(data, torch.Tensor):
result = data.clone().detach().requires_grad_(data.requires_grad).to(device=pl_module.device, dtype=dtype)
else:
raise TypeError("type {} of data is not acceptable".format(type(data)))
return result
def to_numpy(self, data: torch.Tensor, pl_module: LightningModule):
"""
Converts a torch.Tensor to a numpy.ndarray
Args:
data (torch.Tensor): The array to convert
Returns:
numpy.ndarray: The converted array
"""
if data.requires_grad:
data = data.detach()
if pl_module.device != torch.device("cpu"):
data = data.cpu()
return data.numpy()
| [
"numpy.asarray",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.device"
] | [((735, 814), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'dataloader.dataset', 'batch_size': 'num_samples', 'shuffle': 'shuffle'}), '(dataset=dataloader.dataset, batch_size=num_samples, shuffle=shuffle)\n', (745, 814), False, 'from torch.utils.data import DataLoader\n'), ((3204, 3223), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3216, 3223), False, 'import torch\n'), ((2360, 2376), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (2370, 2376), True, 'import numpy as np\n'), ((2483, 2514), 'torch.tensor', 'torch.tensor', (['data'], {'dtype': 'dtype'}), '(data, dtype=dtype)\n', (2495, 2514), False, 'import torch\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 1 14:08:37 2018
@author: nadine
"""
import numpy as np
def coralGrowthRate(depth):
"""
Doctest:
>>> depth = np.array([-1.,0.,1.,2.,])
>>> G = coralgrowth(depth)
>>> G
np.array([0,0,...,...])
"""
Gm = .012 # max upward growth rate, 10-15 mm/yr
I0 = 2200. * 365.25*24*60*60 # surface light intentsity 2000-2500 muE/m^2*s
Ik = 200. * 365.25*24*60*60 # saturating light intensity 50-450 muE/m^2*s
k = 0.08 # extinction coefficient 0.04-0.16 per m
G = np.zeros(len(depth))
below = np.where(depth>0.)[0]
G[below] = Gm * np.tanh((I0*np.exp(-k*depth[below]))/Ik)
return G
| [
"numpy.where",
"numpy.exp"
] | [((604, 625), 'numpy.where', 'np.where', (['(depth > 0.0)'], {}), '(depth > 0.0)\n', (612, 625), True, 'import numpy as np\n'), ((658, 683), 'numpy.exp', 'np.exp', (['(-k * depth[below])'], {}), '(-k * depth[below])\n', (664, 683), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import matplotlib.pyplot as plt
imPath = "target/myim.png"
imPath2 = "target/myim2.png"
image = np.zeros((100, 200, 3),np.uint8)
cv2.line(image, (5,5), (31,31), (0,0,255),3)
cv2.imwrite(imPath, image)
image = cv2.imread(imPath)
cv2.line(image, (50,50), (31,31), (0,255,0),3)
cv2.imwrite(imPath2, image)
dim = [image.shape[0], image.shape[1]]
imRed = np.zeros((dim[0], dim[1]),np.uint8)
for i in range(0, dim[0]):
for j in range(0,dim[1]):
imRed[i][j] = image[i][j][2]
cv2.imwrite("target/imRed.png", imRed)
img = cv2.imread("hsv.PNG")
mylist = []
for pixel in img[500]:
mylist.append(pixel[1])
plt.plot(mylist)
plt.savefig('green_plot.png')
plt.show() | [
"cv2.line",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"cv2.imwrite",
"numpy.zeros",
"cv2.imread",
"matplotlib.pyplot.savefig"
] | [((128, 161), 'numpy.zeros', 'np.zeros', (['(100, 200, 3)', 'np.uint8'], {}), '((100, 200, 3), np.uint8)\n', (136, 161), True, 'import numpy as np\n'), ((161, 210), 'cv2.line', 'cv2.line', (['image', '(5, 5)', '(31, 31)', '(0, 0, 255)', '(3)'], {}), '(image, (5, 5), (31, 31), (0, 0, 255), 3)\n', (169, 210), False, 'import cv2\n'), ((206, 232), 'cv2.imwrite', 'cv2.imwrite', (['imPath', 'image'], {}), '(imPath, image)\n', (217, 232), False, 'import cv2\n'), ((242, 260), 'cv2.imread', 'cv2.imread', (['imPath'], {}), '(imPath)\n', (252, 260), False, 'import cv2\n'), ((261, 312), 'cv2.line', 'cv2.line', (['image', '(50, 50)', '(31, 31)', '(0, 255, 0)', '(3)'], {}), '(image, (50, 50), (31, 31), (0, 255, 0), 3)\n', (269, 312), False, 'import cv2\n'), ((308, 335), 'cv2.imwrite', 'cv2.imwrite', (['imPath2', 'image'], {}), '(imPath2, image)\n', (319, 335), False, 'import cv2\n'), ((384, 420), 'numpy.zeros', 'np.zeros', (['(dim[0], dim[1])', 'np.uint8'], {}), '((dim[0], dim[1]), np.uint8)\n', (392, 420), True, 'import numpy as np\n'), ((515, 553), 'cv2.imwrite', 'cv2.imwrite', (['"""target/imRed.png"""', 'imRed'], {}), "('target/imRed.png', imRed)\n", (526, 553), False, 'import cv2\n'), ((562, 583), 'cv2.imread', 'cv2.imread', (['"""hsv.PNG"""'], {}), "('hsv.PNG')\n", (572, 583), False, 'import cv2\n'), ((648, 664), 'matplotlib.pyplot.plot', 'plt.plot', (['mylist'], {}), '(mylist)\n', (656, 664), True, 'import matplotlib.pyplot as plt\n'), ((665, 694), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""green_plot.png"""'], {}), "('green_plot.png')\n", (676, 694), True, 'import matplotlib.pyplot as plt\n'), ((695, 705), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (703, 705), True, 'import matplotlib.pyplot as plt\n')] |
import time
import autopy
import cv2
import numpy as np
import level_3.module.hand_tracking_module as htm
###################
wCam, hCam = 520, 370
frameR_h = 160 # Frame Reduction
frameR_w = 90
smoothening = 8
#########################
def main_avm(queue_shared):
pTime = 0
detector = htm.HandDetector(maxHands=1)
wScr, hScr = autopy.screen.size()
plocX, plocY = 0, 0
clocX, clocY = 0, 0
cap = cv2.VideoCapture(0)
print('capturin video')
cap.set(9, wCam)
cap.set(11, hCam)
i = 0
while True:
# Find hand Landmarks
success, img_org = cap.read()
width = int(cap.get(3))
height = int(cap.get(4))
img = cv2.resize(img_org, (0,0), fx = 0.7, fy = 0.7)
img = detector.findHands(img)
lmlist, bbox = detector.findPosition(img)
# Get the tip of the index and middle fingers
if len(lmlist) != 0:
x1, y1 = lmlist[8][1:]
# Check which fingers are up
fingers = detector.fingersUp()
cv2.rectangle(img, (frameR_w, frameR_h), (wCam - frameR_w, hCam - frameR_h), (255, 0, 255), 2)
# Only Index Finger : Moving Mode
if fingers[1] == 1 and fingers[2] == 0:
# 5. Convert Coordinates
x3 = np.interp(x1, (frameR_w, wCam - frameR_w), (0, wScr))
y3 = np.interp(y1, (frameR_h, hCam - frameR_h), (0, hScr))
# Smoothen Values
clocX = plocX + (x3 - plocX) / smoothening
clocY = plocY + (y3 - plocY) / smoothening
# Move Mouse
try:
autopy.mouse.move(wScr - clocX, clocY)
except:
pass
circle_img = cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)
plocX, plocY = clocX, clocY
else:
i += 1
# print("Index finger not found",i)
queue_shared.put(0)
# Frame Rate
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (20, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3)
# Display
# cv2.imshow("Image", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main_avm()
| [
"level_3.module.hand_tracking_module.HandDetector",
"cv2.circle",
"cv2.waitKey",
"autopy.screen.size",
"autopy.mouse.move",
"time.time",
"cv2.VideoCapture",
"cv2.rectangle",
"numpy.interp",
"cv2.destroyAllWindows",
"cv2.resize"
] | [((305, 333), 'level_3.module.hand_tracking_module.HandDetector', 'htm.HandDetector', ([], {'maxHands': '(1)'}), '(maxHands=1)\n', (321, 333), True, 'import level_3.module.hand_tracking_module as htm\n'), ((351, 371), 'autopy.screen.size', 'autopy.screen.size', ([], {}), '()\n', (369, 371), False, 'import autopy\n'), ((436, 455), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (452, 455), False, 'import cv2\n'), ((2438, 2461), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2459, 2461), False, 'import cv2\n'), ((712, 755), 'cv2.resize', 'cv2.resize', (['img_org', '(0, 0)'], {'fx': '(0.7)', 'fy': '(0.7)'}), '(img_org, (0, 0), fx=0.7, fy=0.7)\n', (722, 755), False, 'import cv2\n'), ((2114, 2125), 'time.time', 'time.time', ([], {}), '()\n', (2123, 2125), False, 'import time\n'), ((1073, 1171), 'cv2.rectangle', 'cv2.rectangle', (['img', '(frameR_w, frameR_h)', '(wCam - frameR_w, hCam - frameR_h)', '(255, 0, 255)', '(2)'], {}), '(img, (frameR_w, frameR_h), (wCam - frameR_w, hCam - frameR_h),\n (255, 0, 255), 2)\n', (1086, 1171), False, 'import cv2\n'), ((1340, 1393), 'numpy.interp', 'np.interp', (['x1', '(frameR_w, wCam - frameR_w)', '(0, wScr)'], {}), '(x1, (frameR_w, wCam - frameR_w), (0, wScr))\n', (1349, 1393), True, 'import numpy as np\n'), ((1415, 1468), 'numpy.interp', 'np.interp', (['y1', '(frameR_h, hCam - frameR_h)', '(0, hScr)'], {}), '(y1, (frameR_h, hCam - frameR_h), (0, hScr))\n', (1424, 1468), True, 'import numpy as np\n'), ((1826, 1882), 'cv2.circle', 'cv2.circle', (['img', '(x1, y1)', '(15)', '(255, 0, 255)', 'cv2.FILLED'], {}), '(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)\n', (1836, 1882), False, 'import cv2\n'), ((2350, 2364), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2361, 2364), False, 'import cv2\n'), ((1708, 1746), 'autopy.mouse.move', 'autopy.mouse.move', (['(wScr - clocX)', 'clocY'], {}), '(wScr - clocX, clocY)\n', (1725, 1746), False, 'import autopy\n')] |
import tensorflow as tf
import json
import numpy as np
from scipy import signal, stats
from matplotlib import pyplot as plt
import pickle
from warnings import simplefilter
from stable_baselines.results_plotter import load_results, ts2xy
stiffness_versions = 9
RL_method = "PPO1"
total_MC_runs = 42 # starts from 1
experiment_ID = "experiment_4_pool_with_MC_C"
total_timesteps = 500000
episode_timesteps = 1000
total_episodes = int(total_timesteps/episode_timesteps)
episode_rewards_all = np.zeros([total_MC_runs, stiffness_versions, total_episodes])
for stiffness_value in range(stiffness_versions):
stiffness_value_str = "stiffness_{}".format(stiffness_value)
for mc_cntr in range(total_MC_runs-1,total_MC_runs):
log_dir = "./logs/{}/MC_{}/{}/{}/".format(experiment_ID, mc_cntr, RL_method, stiffness_value_str)
jsonFile = open(log_dir+"monitor/openaigym.episode_batch.{}.Monitor_info.stats.json".format(0))
jsonString = jsonFile.read()
jsonData = json.loads(jsonString)
print("stiffness_value: ", stiffness_value, "mc_cntr: ", mc_cntr)
episode_rewards_all[mc_cntr, stiffness_value, :] = np.array(jsonData['episode_rewards'])
episode_rewards_average = episode_rewards_all.mean(0)
for stiffness_value in range(stiffness_versions):
plt.plot(episode_rewards_average[stiffness_value,:])
plt.legend(['0','500', '1K', '2K', '4K', '7K', '10K', '15K', '20K'])
plt.show()
#import pdb; pdb.set_trace()
| [
"matplotlib.pyplot.show",
"json.loads",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.zeros",
"numpy.array"
] | [((490, 551), 'numpy.zeros', 'np.zeros', (['[total_MC_runs, stiffness_versions, total_episodes]'], {}), '([total_MC_runs, stiffness_versions, total_episodes])\n', (498, 551), True, 'import numpy as np\n'), ((1302, 1371), 'matplotlib.pyplot.legend', 'plt.legend', (["['0', '500', '1K', '2K', '4K', '7K', '10K', '15K', '20K']"], {}), "(['0', '500', '1K', '2K', '4K', '7K', '10K', '15K', '20K'])\n", (1312, 1371), True, 'from matplotlib import pyplot as plt\n'), ((1371, 1381), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1379, 1381), True, 'from matplotlib import pyplot as plt\n'), ((1249, 1302), 'matplotlib.pyplot.plot', 'plt.plot', (['episode_rewards_average[stiffness_value, :]'], {}), '(episode_rewards_average[stiffness_value, :])\n', (1257, 1302), True, 'from matplotlib import pyplot as plt\n'), ((961, 983), 'json.loads', 'json.loads', (['jsonString'], {}), '(jsonString)\n', (971, 983), False, 'import json\n'), ((1105, 1142), 'numpy.array', 'np.array', (["jsonData['episode_rewards']"], {}), "(jsonData['episode_rewards'])\n", (1113, 1142), True, 'import numpy as np\n')] |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
def floyd_warshall(graph: np.ndarray, path_reconstruction=False) -> Union[np.ndarray,
Optional[Tuple[np.ndarray, np.ndarray]]]:
"""Finds all shortest paths in a weighted graph. Works with positive and negative weights,
but not with negative cycles.
Time complexity O(|V|^3)
Space complexity O(|V|^3)
Args:
graph (np.ndarray): Adjacency matrix of the graph as numpy ndarray.
path_reconstruction (bool, optional): Whether to calculate and output the matrix needed for path reconstruction.
Return:
np.matrix: Adjacency matrix showing all shortest paths.
np.matrix (optional): Adjacency matrix needed for path reconstruction.
Examples:
>>> graph = np.asarray([[0, np.inf, -2, np.inf], [4, 0, 3, np.inf], [np.inf, np.inf, 0, 2], [np.inf, -1, np.inf, 0]])
>>> shortest_paths = floyd_warshall(graph)
>>> print(shortest_paths)
[[ 0. -1. -2. 0.]
[ 4. 0. 2. 4.]
[ 5. 1. 0. 2.]
[ 3. -1. 1. 0.]]
"""
assert graph.shape[0] == graph.shape[1], "Input matrix must be a square adjacency matrix"
dist_matrix = np.full(graph.shape, np.inf)
if path_reconstruction:
next_matrix = np.full(graph.shape, 0)
for edge, weight in np.ndenumerate(graph):
dist_matrix[edge[0], edge[1]] = weight
if path_reconstruction:
next_matrix[edge[0], edge[1]] = edge[1]
np.fill_diagonal(dist_matrix, 0)
if path_reconstruction:
np.fill_diagonal(next_matrix, range(next_matrix.shape[0]))
for k in range(0, graph.shape[0]):
for i in range(0, graph.shape[0]):
for j in range(0, graph.shape[0]):
if dist_matrix[i][j] > dist_matrix[i][k] + dist_matrix[k][j]:
dist_matrix[i][j] = dist_matrix[i][k] + dist_matrix[k][j]
if path_reconstruction:
next_matrix[i][j] = next_matrix[i][k]
if path_reconstruction:
return dist_matrix, next_matrix
return dist_matrix
def shortest_path(graph: np.ndarray, *waypoints, pretty=False) -> Union[str, None, Tuple[List[int], Dict[Tuple[int, int], int]]]:
"""Reconstructs shortest path between multiple nodes
Args:
graph (np.matrix): Adjacency matrix of the graph as numpy matrix.
waypoints: waypoints between which the shortest path should get calculated.
pretty (bool): Whether to pretty print path or return raw path
Note:
if not path exists between all of the waypoints not path will get calculated
Return:
str: Formatted text output showing input, shortest path and cost of the shortest path
Tuple[List[int], Dict[Tuple[int, int], int]]: Tuple list with indices of shortest path and dictionary which
contains the cost for every step of the shortest path
Example:
>>> graph = np.asarray([[0, np.inf, -2, np.inf], [4, 0, 3, np.inf], [np.inf, np.inf, 0, 2], [np.inf, -1, np.inf, 0]])
>>> path = shortest_path(graph, 0, 1, 3)
>>> print(path)
([0, 2, 3, 1, 0, 2, 3], {(0, 2): -2.0, (2, 3): 2.0, (3, 1): -1.0, (1, 0): 4.0})
"""
dist_matrix, path_matrix = floyd_warshall(graph, path_reconstruction=True)
assert len(waypoints) > 0, "No waypoints supplied."
assert all(isinstance(waypoint, int) for waypoint in waypoints), "False datatype for at least one waypoint " \
"(int needed)"
for i in range(len(waypoints) - 1):
try:
path_matrix[waypoints[i]][waypoints[i + 1]]
except IndexError:
return "IndexError: Certain waypoint nodes are not part of the input graph"
def calculate_shortest_path():
path = [waypoints[0]]
for i in range(len(waypoints) - 1):
u = waypoints[i]
v = waypoints[i + 1]
while u != v:
u = path_matrix[u][v]
path.append(u)
return path
def pprint_path():
_waypoints = " ⟶ ".join([str(node) for node in waypoints])
_shortest_path = " ⟶ ".join([str(node) for node in path])
_cost = '\n'.join(
[f"{node[0]} ⟶ {node[1]} ({dist_matrix[node[0]][node[1]]})" for node in list(zip(path, path[1:]))])
return f"== Waypoints ==\n{_waypoints}\n\n==Shortest Path ==\n{_shortest_path}\n\n== Cost ==\n{_cost}"
def calc_path_costs():
return {(node[0], node[1]): dist_matrix[node[0]][node[1]] for node in list(zip(path, path[1:]))}
path = calculate_shortest_path()
if pretty:
return print(f"{pprint_path()}\nTotal Cost: {sum(calc_path_costs().values())}")
return path, calc_path_costs()
| [
"numpy.full",
"numpy.fill_diagonal",
"numpy.ndenumerate"
] | [((1287, 1315), 'numpy.full', 'np.full', (['graph.shape', 'np.inf'], {}), '(graph.shape, np.inf)\n', (1294, 1315), True, 'import numpy as np\n'), ((1415, 1436), 'numpy.ndenumerate', 'np.ndenumerate', (['graph'], {}), '(graph)\n', (1429, 1436), True, 'import numpy as np\n'), ((1575, 1607), 'numpy.fill_diagonal', 'np.fill_diagonal', (['dist_matrix', '(0)'], {}), '(dist_matrix, 0)\n', (1591, 1607), True, 'import numpy as np\n'), ((1366, 1389), 'numpy.full', 'np.full', (['graph.shape', '(0)'], {}), '(graph.shape, 0)\n', (1373, 1389), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright: 2020, <NAME>, Berlin . All rights reserved.
import sys
import numpy as np
import matplotlib.pyplot as plt
from common import load_fft_data
N = 1024
T = 1.0 / 1000.0
NOTCHES = [
(253, 259),
(285, 290),
(464, 472),
]
START = 250
END = 2010
THRESHOLD = -170.0
def main():
ffts = load_fft_data(sys.argv[1])
# crop to relevant range
ffts = ffts[START:END, :]
# lowpass
ffts[:, :20] = 0.0
for low, high in NOTCHES:
ffts[:, low:high] = 0.0
ffts = 20 * np.log10(ffts)
if THRESHOLD is not None:
ffts[ffts < THRESHOLD] = 0.0
ffts = ffts.T
plt.imshow(
ffts,
origin='lower',
cmap='jet',
interpolation='nearest',
aspect='auto'
)
plt.grid()
plt.show()
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.show",
"common.load_fft_data",
"matplotlib.pyplot.imshow",
"numpy.log10",
"matplotlib.pyplot.grid"
] | [((336, 362), 'common.load_fft_data', 'load_fft_data', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (349, 362), False, 'from common import load_fft_data\n'), ((644, 732), 'matplotlib.pyplot.imshow', 'plt.imshow', (['ffts'], {'origin': '"""lower"""', 'cmap': '"""jet"""', 'interpolation': '"""nearest"""', 'aspect': '"""auto"""'}), "(ffts, origin='lower', cmap='jet', interpolation='nearest',\n aspect='auto')\n", (654, 732), True, 'import matplotlib.pyplot as plt\n'), ((779, 789), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (787, 789), True, 'import matplotlib.pyplot as plt\n'), ((794, 804), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (802, 804), True, 'import matplotlib.pyplot as plt\n'), ((538, 552), 'numpy.log10', 'np.log10', (['ffts'], {}), '(ffts)\n', (546, 552), True, 'import numpy as np\n')] |
import numpy as np
def EaseInSine(t):
return 1-np.sin((1-t)*np.pi/2)
def EaseOutSine(t):
return np.sin(t*np.pi/2)
def EaseInOutSine(t):
return 0.5-0.5*np.sin((1-t*2)*np.pi/2)
def EaseInQuad(t):
return t**2
def EaseOutQuad(t):
return 1-EaseInQuad(1-t)
def EaseInOutQuad(t):
return (t<0.5) * 0.5 * EaseInQuad(t * 2) + (t>=0.5) * 0.5 * (EaseOutQuad((t-0.5)*2) + 1)
def EaseInCubic(t):
return t**3
def EaseOutCubic(t):
return 1-EaseInCubic(1-t)
def EaseInOutCubic(t):
return (t<0.5) * 0.5 * EaseInCubic(t * 2) + (t>=0.5) * 0.5 * (EaseOutCubic((t-0.5)*2) + 1)
def EaseInQuart(t):
return t**4
def EaseOutQuart(t):
return 1-EaseInQuart(1-t)
def EaseInOutQuart(t):
return (t<0.5) * 0.5 * EaseInQuart(t * 2) + (t>=0.5) * 0.5 * (EaseOutQuart((t-0.5)*2) + 1)
def EaseInQuint(t):
return t**5
def EaseOutQuint(t):
return 1-EaseInQuint(1-t)
def EaseInOutQuint(t):
return (t<0.5) * 0.5 * EaseInQuint(t * 2) + (t>=0.5) * 0.5 * (EaseOutQuint((t-0.5)*2) + 1)
def EaseInExpo(t):
return (2**(t*10)-1)/1023
def EaseOutExpo(t):
return 1-EaseInExpo(1-t)
def EaseInOutExpo(t):
return (t<0.5) * 0.5 * EaseInExpo(t * 2) + (t>=0.5) * 0.5 * (EaseOutExpo((t-0.5)*2) + 1)
def EaseInCirc(t):
return 1-np.sqrt(1-t**2)
def EaseOutCirc(t):
return 1-EaseInCirc(1-t)
def EaseInOutCirc(t):
return (t<0.5) * 0.5 * EaseInCirc((t<0.5) * t * 2) + (t>=0.5) * 0.5 * (EaseOutCirc((t>=0.5) * (t-0.5)*2) + 1)
def EaseInBack(t):
return t**2 * (np.e*t-np.e+1)
def EaseOutBack(t):
return 1-EaseInBack(1-t)
def EaseInOutBack(t):
return (t<0.5) * 0.5 * EaseInBack(t * 2) + (t>=0.5) * 0.5 * (EaseOutBack((t-0.5)*2) + 1)
def EaseInElastic(t):
return EaseInExpo(t) * np.sin(t * np.pi * 6.5) / np.sin(np.pi * 6.5)
def EaseOutElastic(t):
return 1-EaseInElastic(1-t)
def EaseInOutElastic(t):
v1 = (t<0.46503) * 0.5 * EaseInElastic(t * 2)
v2 = (t>0.53497) * 0.5 * (EaseOutElastic((t-0.5)*2) + 1)
# 确保一阶导数连续
v3 = ((0.46503<=t) & (t<=0.53497)) * (13.047404544869156 * t - 6.023702272434578)
return v1+v2+v3
def EaseInBounce(t):
def _poly(_t):
return 1-((_t-0.5)*2)**2
v1 = (t<0.125) * _poly(t/0.125) * 0.015625
v2 = ((0.125<=t) & (t<0.375)) * _poly((t-0.125)/0.25) * 0.0625
v3 = ((0.375<=t) & (t<0.750)) * _poly((t-0.375)/0.375) * 0.25
v4 = (0.750<=t) * _poly((t-0.75)/0.5) * 1
return v1+v2+v3+v4
def EaseOutBounce(t):
return 1-EaseInBounce(1-t)
def EaseInOutBounce(t):
return (t<0.5) * 0.5 * EaseInBounce(t * 2) + (t>=0.5) * 0.5 * (EaseOutBounce((t-0.5)*2) + 1) | [
"numpy.sin",
"numpy.sqrt"
] | [((106, 127), 'numpy.sin', 'np.sin', (['(t * np.pi / 2)'], {}), '(t * np.pi / 2)\n', (112, 127), True, 'import numpy as np\n'), ((52, 79), 'numpy.sin', 'np.sin', (['((1 - t) * np.pi / 2)'], {}), '((1 - t) * np.pi / 2)\n', (58, 79), True, 'import numpy as np\n'), ((1265, 1284), 'numpy.sqrt', 'np.sqrt', (['(1 - t ** 2)'], {}), '(1 - t ** 2)\n', (1272, 1284), True, 'import numpy as np\n'), ((1764, 1783), 'numpy.sin', 'np.sin', (['(np.pi * 6.5)'], {}), '(np.pi * 6.5)\n', (1770, 1783), True, 'import numpy as np\n'), ((166, 197), 'numpy.sin', 'np.sin', (['((1 - t * 2) * np.pi / 2)'], {}), '((1 - t * 2) * np.pi / 2)\n', (172, 197), True, 'import numpy as np\n'), ((1738, 1761), 'numpy.sin', 'np.sin', (['(t * np.pi * 6.5)'], {}), '(t * np.pi * 6.5)\n', (1744, 1761), True, 'import numpy as np\n')] |
import cv2
import os
import numpy as np
from PIL import Image
recogniser = cv2.face.createLBPHFaceRecognizer()
path = 'C:\\Users\\MY PC\\PycharmProjects\\untitled\\Images'
def getimageIds(path):
imagepaths = [os.path.join(path,f) for f in os.listdir(path)]
faces = []
Ids = []
for imagepath in imagepaths:
faceimg = Image.open(imagepath).convert('L')
facenp = np.array(faceimg,'uint8')
Id = int(os.path.split(imagepath)[-1].split('.')[1])
faces.append(facenp)
print(Id)
Ids.append(Id)
cv2.imshow('Training the dataset',facenp)
cv2.waitKey(10)
return Ids,faces
Ids,faces = getimageIds(path)
recogniser.train(faces,np.array(Ids))
recogniser.save('recogniser/recogniser_all.yml')
cv2.destroyAllWindows()
| [
"cv2.waitKey",
"cv2.destroyAllWindows",
"PIL.Image.open",
"cv2.face.createLBPHFaceRecognizer",
"numpy.array",
"cv2.imshow",
"os.path.join",
"os.listdir",
"os.path.split"
] | [((77, 112), 'cv2.face.createLBPHFaceRecognizer', 'cv2.face.createLBPHFaceRecognizer', ([], {}), '()\n', (110, 112), False, 'import cv2\n'), ((764, 787), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (785, 787), False, 'import cv2\n'), ((700, 713), 'numpy.array', 'np.array', (['Ids'], {}), '(Ids)\n', (708, 713), True, 'import numpy as np\n'), ((216, 237), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (228, 237), False, 'import os\n'), ((395, 421), 'numpy.array', 'np.array', (['faceimg', '"""uint8"""'], {}), "(faceimg, 'uint8')\n", (403, 421), True, 'import numpy as np\n'), ((560, 602), 'cv2.imshow', 'cv2.imshow', (['"""Training the dataset"""', 'facenp'], {}), "('Training the dataset', facenp)\n", (570, 602), False, 'import cv2\n'), ((610, 625), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (621, 625), False, 'import cv2\n'), ((246, 262), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (256, 262), False, 'import os\n'), ((343, 364), 'PIL.Image.open', 'Image.open', (['imagepath'], {}), '(imagepath)\n', (353, 364), False, 'from PIL import Image\n'), ((438, 462), 'os.path.split', 'os.path.split', (['imagepath'], {}), '(imagepath)\n', (451, 462), False, 'import os\n')] |
from flask import Flask, request, Response, jsonify, send_from_directory
import json
import config
import util
import os
import traceback
import numpy as np
import tensorflow as tf
import time
import wikipedia
port = 5000
if os.getenv("PORT"):
port = int(os.getenv("PORT"))
app = Flask(__name__, static_url_path='/static')
@app.route('/', methods=['GET'])
def index():
import socket
return "it's working! " + socket.getfqdn()
@app.route('/', methods=['POST'])
def indexPost():
print(json.loads(request.get_data()))
return jsonify(
status=200,
replies=[{
'type': 'text',
'content': 'Roger that',
}],
conversation={
'memory': { 'key': 'value' }
}
)
@app.route('/wiki', methods=['POST'])
def wiki_search():
payload = request.get_json(silent=True,force=True)
print("payload")
print(payload)
if payload == None:
if request.get_data() !=None:
payload = json.loads(request.get_data())
print(payload)
# doesn't work
#payload = json.loads(request.get_data())
# Get user request
if payload != None:
if payload.get("nlp").get("source"):
print("message: {}".format(payload.get("nlp").get("source")))
message = payload.get("nlp").get("source")
message = message.replace('can you tell me more about','')
message = message.replace('can you tell me more information about','')
message = message.replace('can you tell me some information about','')
message = message.replace('can you please tell me more about it','')
message = message.replace('please tell me more information about','')
message = message.replace('can you give me more information about','')
message = message.replace('can you please give me more information about','')
message = message.replace('can you provide more information about','')
message = message.replace('can you please provide more information about','')
message = message.replace('can you explain a little bit more what is a','')
message = message.replace('can you please explain a little bit more what is a','')
message = message.replace('?','')
query = message
if message.replace(" ", "") == 'it' or message.replace(" ", "") == 'this':
# Check memory
if payload.get("conversation").get("memory") != None:
plankton = payload.get("conversation").get("memory").get("plankton")
query = plankton
elif payload.get("conversation").get("memory") != None:
plankton = payload.get("conversation").get("memory").get("plankton")
query = plankton
response = wikipedia.summary(query, sentences=2)
return jsonify(
status=200,
replies=[{
'type': 'text',
'content': response
}],
conversation={
'memory': {}
}
)
@app.route('/tensorclassify', methods=['POST'])
def tf_classify():
# TODO: python -m scripts.label_image --graph=tf_files/retrained_graph.pb --image=test/aurelia.jpeg
import socket
print("In tf_classify handler from {}".format(socket.getfqdn()))
file_name = "models/mobilenet/example/3475870145_685a19116d.jpg"
file_name = "https://www.eopugetsound.org/sites/default/files/styles/magazinewidth_592px/public/topical_article/images/moon_jellyfish.jpg?itok=Esreg6zX"
# print("req = json.loads(request.get_data())")
# print(json.loads(request.get_data()))
payload = request.get_json(silent=True,force=True)
print("payload")
print(payload)
if payload == None:
if request.get_data() !=None:
payload = json.loads(request.get_data())
# doesn't work
#payload = json.loads(request.get_data())
if payload != None:
if payload.get("nlp").get("entities").get("url"):
file_name = payload.get("nlp").get("entities").get("url")[0].get("raw")
model_file = "models/mobilenet/retrained_graph.pb"
label_file = "models/mobilenet/retrained_labels.txt"
input_height = 224
input_width = 224
input_mean = 128
input_std = 128
input_layer = "input"
output_layer = "final_result"
graph = util.load_graph(model_file)
t = util.read_tensor_from_image_file(file_name,
input_height=input_height,
input_width=input_width,
input_mean=input_mean,
input_std=input_std)
input_name = "import/" + input_layer
output_name = "import/" + output_layer
input_operation = graph.get_operation_by_name(input_name);
output_operation = graph.get_operation_by_name(output_name);
with tf.Session(graph=graph) as sess:
start = time.time()
results = sess.run(output_operation.outputs[0],
{input_operation.outputs[0]: t})
end=time.time()
results = np.squeeze(results)
top_k = results.argsort()[-5:][::-1]
labels = util.load_labels(label_file)
print('\nEvaluation time (1-image): {:.3f}s\n'.format(end-start))
template = "{} (score={:0.5f})"
print(top_k)
for i in top_k:
print(template.format(labels[i], results[i]))
# I really don't know, my best guess is []
if results[0] < 0.1:
response = "I really don't know, my best guess is that this looks like a " + labels[top_k[0]]
else:
response = 'I think this is a ' + labels[top_k[0]]
response = 'I think this is a ' + labels[top_k[0]]
return jsonify(
status=200,
replies=[{
'type': 'text',
'content': response
}],
conversation={
'memory': { 'plankton': labels[top_k[0]] }
}
)
@app.route('/errors', methods=['POST'])
def errors():
print('in /errors !')
print(json.loads(request.get_data()))
return jsonify(status=200)
#app.run(port=port)
if __name__ == '__main__':
print("Launching web application...")
#app.run(host='0.0.0.0', port=port, ssl_context=('./keys/keys.key', './keys/keys.crt'))
#app.run(host='0.0.0.0', port=port)
app.run(debug=True,host='0.0.0.0', port=port)
| [
"util.load_graph",
"flask.Flask",
"tensorflow.Session",
"time.time",
"flask.jsonify",
"socket.getfqdn",
"flask.request.get_data",
"wikipedia.summary",
"util.load_labels",
"numpy.squeeze",
"util.read_tensor_from_image_file",
"os.getenv",
"flask.request.get_json"
] | [((227, 244), 'os.getenv', 'os.getenv', (['"""PORT"""'], {}), "('PORT')\n", (236, 244), False, 'import os\n'), ((288, 330), 'flask.Flask', 'Flask', (['__name__'], {'static_url_path': '"""/static"""'}), "(__name__, static_url_path='/static')\n", (293, 330), False, 'from flask import Flask, request, Response, jsonify, send_from_directory\n'), ((544, 663), 'flask.jsonify', 'jsonify', ([], {'status': '(200)', 'replies': "[{'type': 'text', 'content': 'Roger that'}]", 'conversation': "{'memory': {'key': 'value'}}"}), "(status=200, replies=[{'type': 'text', 'content': 'Roger that'}],\n conversation={'memory': {'key': 'value'}})\n", (551, 663), False, 'from flask import Flask, request, Response, jsonify, send_from_directory\n'), ((783, 824), 'flask.request.get_json', 'request.get_json', ([], {'silent': '(True)', 'force': '(True)'}), '(silent=True, force=True)\n', (799, 824), False, 'from flask import Flask, request, Response, jsonify, send_from_directory\n'), ((2801, 2838), 'wikipedia.summary', 'wikipedia.summary', (['query'], {'sentences': '(2)'}), '(query, sentences=2)\n', (2818, 2838), False, 'import wikipedia\n'), ((2851, 2952), 'flask.jsonify', 'jsonify', ([], {'status': '(200)', 'replies': "[{'type': 'text', 'content': response}]", 'conversation': "{'memory': {}}"}), "(status=200, replies=[{'type': 'text', 'content': response}],\n conversation={'memory': {}})\n", (2858, 2952), False, 'from flask import Flask, request, Response, jsonify, send_from_directory\n'), ((3623, 3664), 'flask.request.get_json', 'request.get_json', ([], {'silent': '(True)', 'force': '(True)'}), '(silent=True, force=True)\n', (3639, 3664), False, 'from flask import Flask, request, Response, jsonify, send_from_directory\n'), ((6392, 6411), 'flask.jsonify', 'jsonify', ([], {'status': '(200)'}), '(status=200)\n', (6399, 6411), False, 'from flask import Flask, request, Response, jsonify, send_from_directory\n'), ((261, 278), 'os.getenv', 'os.getenv', (['"""PORT"""'], {}), "('PORT')\n", (270, 278), False, 'import os\n'), ((426, 442), 'socket.getfqdn', 'socket.getfqdn', ([], {}), '()\n', (440, 442), False, 'import socket\n'), ((514, 532), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (530, 532), False, 'from flask import Flask, request, Response, jsonify, send_from_directory\n'), ((901, 919), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (917, 919), False, 'from flask import Flask, request, Response, jsonify, send_from_directory\n'), ((3264, 3280), 'socket.getfqdn', 'socket.getfqdn', ([], {}), '()\n', (3278, 3280), False, 'import socket\n'), ((3741, 3759), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (3757, 3759), False, 'from flask import Flask, request, Response, jsonify, send_from_directory\n'), ((4400, 4427), 'util.load_graph', 'util.load_graph', (['model_file'], {}), '(model_file)\n', (4415, 4427), False, 'import util\n'), ((4444, 4587), 'util.read_tensor_from_image_file', 'util.read_tensor_from_image_file', (['file_name'], {'input_height': 'input_height', 'input_width': 'input_width', 'input_mean': 'input_mean', 'input_std': 'input_std'}), '(file_name, input_height=input_height,\n input_width=input_width, input_mean=input_mean, input_std=input_std)\n', (4476, 4587), False, 'import util\n'), ((5270, 5289), 'numpy.squeeze', 'np.squeeze', (['results'], {}), '(results)\n', (5280, 5289), True, 'import numpy as np\n'), ((5361, 5389), 'util.load_labels', 'util.load_labels', (['label_file'], {}), '(label_file)\n', (5377, 5389), False, 'import util\n'), ((5998, 6127), 'flask.jsonify', 'jsonify', ([], {'status': '(200)', 'replies': "[{'type': 'text', 'content': response}]", 'conversation': "{'memory': {'plankton': labels[top_k[0]]}}"}), "(status=200, replies=[{'type': 'text', 'content': response}],\n conversation={'memory': {'plankton': labels[top_k[0]]}})\n", (6005, 6127), False, 'from flask import Flask, request, Response, jsonify, send_from_directory\n'), ((6362, 6380), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (6378, 6380), False, 'from flask import Flask, request, Response, jsonify, send_from_directory\n'), ((961, 979), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (977, 979), False, 'from flask import Flask, request, Response, jsonify, send_from_directory\n'), ((3801, 3819), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (3817, 3819), False, 'from flask import Flask, request, Response, jsonify, send_from_directory\n'), ((5023, 5046), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (5033, 5046), True, 'import tensorflow as tf\n'), ((5078, 5089), 'time.time', 'time.time', ([], {}), '()\n', (5087, 5089), False, 'import time\n'), ((5235, 5246), 'time.time', 'time.time', ([], {}), '()\n', (5244, 5246), False, 'import time\n')] |
import datetime
import time
import typing
import cv2
import imutils
import imutils.video
import numpy as np
from ..common.fps import FPSTracker
class MotionDetector:
video_stream: imutils.video.VideoStream
target_frame: np.array = None
current_frame: np.array = None
marked_frame: np.array = None
fps_tracker: FPSTracker
max_fps: int
min_area: int = 500
movement_ttl = datetime.timedelta(seconds=20)
is_occupied: bool = False
last_change_timestamp = None
imshow: bool
def __init__(self, *, video_stream: imutils.video.VideoStream, max_fps: int, imshow: bool) -> None:
self.fps_tracker = FPSTracker()
self.video_stream = video_stream
self.max_fps = max_fps
self.imshow = imshow
def run(self) -> typing.Iterator:
self.fps_tracker.start()
while True:
start_time = datetime.datetime.now()
self.current_frame = self.video_stream.read()
# If the frame could not be grabbed, then we have reached the end of the video
if self.current_frame is None:
break
self._process_current_frame()
yield self
end_time = datetime.datetime.now()
self.fps_tracker.update()
time_to_sleep = 1 / self.max_fps - (end_time - start_time).total_seconds()
if time_to_sleep > 0:
time.sleep(time_to_sleep)
self.realese()
def _process_current_frame(self) -> None:
now = datetime.datetime.now()
# Convert frame to grayscale, and blur it
gray = cv2.cvtColor(self.current_frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21,), 0)
self.is_occupied = False
# If the target frame is None, initialize it
if self.target_frame is None or (now - self.last_change_timestamp) > self.movement_ttl:
self.target_frame = gray
self.last_change_timestamp = now
return
# Compute the absolute difference between the current frame and first frame
frame_delta = cv2.absdiff(self.target_frame, gray)
thresh = cv2.threshold(frame_delta, 25, 255, cv2.THRESH_BINARY)[1]
# Dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
contours = cv2.findContours(
image=thresh,
mode=cv2.RETR_EXTERNAL,
method=cv2.CHAIN_APPROX_SIMPLE,
)
contours = imutils.grab_contours(contours)
# Loop over the contours
for contour in contours:
# If the contour is too small, ignore it
if cv2.contourArea(contour) < self.min_area:
continue
# Compute the bounding box for the contour, draw it on the frame,
# and update the text
x, y, w, h = cv2.boundingRect(contour)
cv2.rectangle(self.current_frame, (x, y,), (x + w, y + h,), (0, 0, 255,), 1)
self.is_occupied = True
self._draw_result(thresh=thresh, frame_delta=frame_delta)
def realese(self) -> None:
if self.imshow:
cv2.destroyAllWindows()
def _draw_result(self, thresh: np.array, frame_delta: np.array) -> None:
self.marked_frame = np.copy(self.current_frame)
cv2.putText(
img=self.marked_frame,
text=f'Status: {"Occupied" if self.is_occupied else "Unoccupied"}',
org=(10, 20,),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.5,
color=(0, 0, 255,) if self.is_occupied else (0, 255, 0,),
thickness=1,
)
current_fps = round(self.fps_tracker.fps(), 2)
cv2.putText(
img=self.marked_frame,
text=f'FPS: {current_fps}',
org=(10, 50,),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.5,
color=(0, 0, 255,) if self.max_fps - current_fps > 0.1 else (0, 255, 0,),
thickness=1,
)
cv2.putText(
img=self.marked_frame,
text=datetime.datetime.now().strftime('%d.%m.%Y, %H:%M:%S'),
org=(10, self.current_frame.shape[0] - 10,),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.5,
color=(0, 0, 255,),
thickness=1,
)
if self.imshow:
cv2.imshow('Security Feed', self.marked_frame)
cv2.imshow('Thresh', thresh)
cv2.imshow('Frame Delta', frame_delta)
| [
"cv2.GaussianBlur",
"cv2.contourArea",
"cv2.putText",
"numpy.copy",
"cv2.dilate",
"cv2.cvtColor",
"cv2.destroyAllWindows",
"cv2.threshold",
"cv2.imshow",
"time.sleep",
"cv2.rectangle",
"datetime.timedelta",
"imutils.grab_contours",
"cv2.absdiff",
"cv2.boundingRect",
"datetime.datetime.... | [((405, 435), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(20)'}), '(seconds=20)\n', (423, 435), False, 'import datetime\n'), ((1522, 1545), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1543, 1545), False, 'import datetime\n'), ((1612, 1664), 'cv2.cvtColor', 'cv2.cvtColor', (['self.current_frame', 'cv2.COLOR_BGR2GRAY'], {}), '(self.current_frame, cv2.COLOR_BGR2GRAY)\n', (1624, 1664), False, 'import cv2\n'), ((1680, 1715), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(21, 21)', '(0)'], {}), '(gray, (21, 21), 0)\n', (1696, 1715), False, 'import cv2\n'), ((2108, 2144), 'cv2.absdiff', 'cv2.absdiff', (['self.target_frame', 'gray'], {}), '(self.target_frame, gray)\n', (2119, 2144), False, 'import cv2\n'), ((2345, 2383), 'cv2.dilate', 'cv2.dilate', (['thresh', 'None'], {'iterations': '(2)'}), '(thresh, None, iterations=2)\n', (2355, 2383), False, 'import cv2\n'), ((2403, 2494), 'cv2.findContours', 'cv2.findContours', ([], {'image': 'thresh', 'mode': 'cv2.RETR_EXTERNAL', 'method': 'cv2.CHAIN_APPROX_SIMPLE'}), '(image=thresh, mode=cv2.RETR_EXTERNAL, method=cv2.\n CHAIN_APPROX_SIMPLE)\n', (2419, 2494), False, 'import cv2\n'), ((2556, 2587), 'imutils.grab_contours', 'imutils.grab_contours', (['contours'], {}), '(contours)\n', (2577, 2587), False, 'import imutils\n'), ((3344, 3371), 'numpy.copy', 'np.copy', (['self.current_frame'], {}), '(self.current_frame)\n', (3351, 3371), True, 'import numpy as np\n'), ((3381, 3630), 'cv2.putText', 'cv2.putText', ([], {'img': 'self.marked_frame', 'text': 'f"""Status: {\'Occupied\' if self.is_occupied else \'Unoccupied\'}"""', 'org': '(10, 20)', 'fontFace': 'cv2.FONT_HERSHEY_SIMPLEX', 'fontScale': '(0.5)', 'color': '((0, 0, 255) if self.is_occupied else (0, 255, 0))', 'thickness': '(1)'}), '(img=self.marked_frame, text=\n f"Status: {\'Occupied\' if self.is_occupied else \'Unoccupied\'}", org=(10,\n 20), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=(0, 0, 255\n ) if self.is_occupied else (0, 255, 0), thickness=1)\n', (3392, 3630), False, 'import cv2\n'), ((3778, 3998), 'cv2.putText', 'cv2.putText', ([], {'img': 'self.marked_frame', 'text': 'f"""FPS: {current_fps}"""', 'org': '(10, 50)', 'fontFace': 'cv2.FONT_HERSHEY_SIMPLEX', 'fontScale': '(0.5)', 'color': '((0, 0, 255) if self.max_fps - current_fps > 0.1 else (0, 255, 0))', 'thickness': '(1)'}), "(img=self.marked_frame, text=f'FPS: {current_fps}', org=(10, 50),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=(0, 0, 255) if \n self.max_fps - current_fps > 0.1 else (0, 255, 0), thickness=1)\n", (3789, 3998), False, 'import cv2\n'), ((880, 903), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (901, 903), False, 'import datetime\n'), ((1209, 1232), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1230, 1232), False, 'import datetime\n'), ((2162, 2216), 'cv2.threshold', 'cv2.threshold', (['frame_delta', '(25)', '(255)', 'cv2.THRESH_BINARY'], {}), '(frame_delta, 25, 255, cv2.THRESH_BINARY)\n', (2175, 2216), False, 'import cv2\n'), ((2928, 2953), 'cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (2944, 2953), False, 'import cv2\n'), ((2966, 3039), 'cv2.rectangle', 'cv2.rectangle', (['self.current_frame', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(1)'], {}), '(self.current_frame, (x, y), (x + w, y + h), (0, 0, 255), 1)\n', (2979, 3039), False, 'import cv2\n'), ((3214, 3237), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3235, 3237), False, 'import cv2\n'), ((4452, 4498), 'cv2.imshow', 'cv2.imshow', (['"""Security Feed"""', 'self.marked_frame'], {}), "('Security Feed', self.marked_frame)\n", (4462, 4498), False, 'import cv2\n'), ((4511, 4539), 'cv2.imshow', 'cv2.imshow', (['"""Thresh"""', 'thresh'], {}), "('Thresh', thresh)\n", (4521, 4539), False, 'import cv2\n'), ((4552, 4590), 'cv2.imshow', 'cv2.imshow', (['"""Frame Delta"""', 'frame_delta'], {}), "('Frame Delta', frame_delta)\n", (4562, 4590), False, 'import cv2\n'), ((1411, 1436), 'time.sleep', 'time.sleep', (['time_to_sleep'], {}), '(time_to_sleep)\n', (1421, 1436), False, 'import time\n'), ((2723, 2747), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (2738, 2747), False, 'import cv2\n'), ((4161, 4184), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4182, 4184), False, 'import datetime\n')] |
# The MIT License (MIT)
#
# Copyright (c) 2020 ETH Zurich
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import exputil
import numpy as np
import os
local_shell = exputil.LocalShell()
# Create the data files
local_shell.remove_force_recursive("data")
local_shell.make_full_dir("data")
with open("data/traffic_goodput_total_data_sent_vs_runtime.csv", "w+") \
as f_out_data_sent_vs_runtime, \
open("data/traffic_goodput_rate_vs_slowdown.csv", "w+") \
as f_out_rate_vs_slowdown, \
open("data/run_dirs.csv", "w+") \
as f_out_run_dirs:
for protocol_chosen in ["tcp","udp"]:
for run_dir_details in (run_dirs:=["runs/"+fic for fic in os.listdir("runs") if os.path.isdir("runs/"+fic) and protocol_chosen in fic]):
run_dir = run_dir_details
#get simulation duration from directory name
duration_s = float(run_dir_details.split("Mbps_for_")[1].split("s_with_")[0])
logs_ns3_dir = run_dir + "/logs_ns3"
# Finished filename to check if done
finished_filename = logs_ns3_dir + "/finished.txt"
if not (exputil.LocalShell().file_exists(finished_filename)
and exputil.LocalShell().read_file(finished_filename).strip() == "Yes"):
print("Skipping: " + run_dir)
else:
print("Processing: " + run_dir)
if protocol_chosen == "tcp":
# Sum up all goodput
tcp_flows_csv_columns = exputil.read_csv_direct_in_columns(
logs_ns3_dir + "/tcp_flows.csv",
"idx_int,pos_int,pos_int,pos_int,pos_int,pos_int,pos_int,pos_int,string,string"
)
amount_sent_byte_list = tcp_flows_csv_columns[7]
total_sent_byte = float(np.sum(amount_sent_byte_list))
elif protocol_chosen == "udp":
# Sum up all goodput
udp_bursts_incoming_csv_columns = exputil.read_csv_direct_in_columns(
logs_ns3_dir + "/udp_bursts_incoming.csv",
"idx_int,pos_int,pos_int,pos_float,pos_int,pos_int,pos_float,pos_float,pos_float,pos_float,pos_float,string"
)
amount_payload_sent_byte_list = udp_bursts_incoming_csv_columns[10]
total_sent_byte = float(np.sum(amount_payload_sent_byte_list))
# Sum up total runtime
timing_results_csv_columns = exputil.read_csv_direct_in_columns(
logs_ns3_dir + "/timing_results.csv",
"string,pos_int"
)
duration_ns_list = timing_results_csv_columns[1]
total_duration_ns = float(np.sum(duration_ns_list))
# Write into data files
# tcp/udp,<total sent (byte),<duration (ns)>
f_out_data_sent_vs_runtime.write("%s,%.10f,%.10f\n" % (
protocol_chosen,
total_sent_byte,
total_duration_ns
))
# tcp/udp,<Mbit/s>,<slow-down (real s / sim s)
f_out_rate_vs_slowdown.write("%s,%.10f,%.10f\n" % (
protocol_chosen,
(total_sent_byte / duration_s) / 125000.0,
(total_duration_ns / 1e9) / duration_s
))
# Run directory (to investigate)
f_out_run_dirs.write(run_dir + "\n")
# Execute plots
local_shell.remove_force_recursive("pdf")
local_shell.make_full_dir("pdf")
local_shell.perfect_exec("cd plots; gnuplot plot_goodput_total_data_sent_vs_runtime.plt")
local_shell.perfect_exec("cd plots; gnuplot plot_goodput_rate_vs_slowdown.plt")
| [
"numpy.sum",
"os.path.isdir",
"exputil.LocalShell",
"exputil.read_csv_direct_in_columns",
"os.listdir"
] | [((1175, 1195), 'exputil.LocalShell', 'exputil.LocalShell', ([], {}), '()\n', (1193, 1195), False, 'import exputil\n'), ((3150, 3244), 'exputil.read_csv_direct_in_columns', 'exputil.read_csv_direct_in_columns', (["(logs_ns3_dir + '/timing_results.csv')", '"""string,pos_int"""'], {}), "(logs_ns3_dir + '/timing_results.csv',\n 'string,pos_int')\n", (3184, 3244), False, 'import exputil\n'), ((1669, 1687), 'os.listdir', 'os.listdir', (['"""runs"""'], {}), "('runs')\n", (1679, 1687), False, 'import os\n'), ((2357, 2514), 'exputil.read_csv_direct_in_columns', 'exputil.read_csv_direct_in_columns', (["(logs_ns3_dir + '/tcp_flows.csv')", '"""idx_int,pos_int,pos_int,pos_int,pos_int,pos_int,pos_int,pos_int,string,string"""'], {}), "(logs_ns3_dir + '/tcp_flows.csv',\n 'idx_int,pos_int,pos_int,pos_int,pos_int,pos_int,pos_int,pos_int,string,string'\n )\n", (2391, 2514), False, 'import exputil\n'), ((3340, 3364), 'numpy.sum', 'np.sum', (['duration_ns_list'], {}), '(duration_ns_list)\n', (3346, 3364), True, 'import numpy as np\n'), ((1691, 1719), 'os.path.isdir', 'os.path.isdir', (["('runs/' + fic)"], {}), "('runs/' + fic)\n", (1704, 1719), False, 'import os\n'), ((2608, 2637), 'numpy.sum', 'np.sum', (['amount_sent_byte_list'], {}), '(amount_sent_byte_list)\n', (2614, 2637), True, 'import numpy as np\n'), ((2741, 2941), 'exputil.read_csv_direct_in_columns', 'exputil.read_csv_direct_in_columns', (["(logs_ns3_dir + '/udp_bursts_incoming.csv')", '"""idx_int,pos_int,pos_int,pos_float,pos_int,pos_int,pos_float,pos_float,pos_float,pos_float,pos_float,string"""'], {}), "(logs_ns3_dir +\n '/udp_bursts_incoming.csv',\n 'idx_int,pos_int,pos_int,pos_float,pos_int,pos_int,pos_float,pos_float,pos_float,pos_float,pos_float,string'\n )\n", (2775, 2941), False, 'import exputil\n'), ((2057, 2077), 'exputil.LocalShell', 'exputil.LocalShell', ([], {}), '()\n', (2075, 2077), False, 'import exputil\n'), ((3050, 3087), 'numpy.sum', 'np.sum', (['amount_payload_sent_byte_list'], {}), '(amount_payload_sent_byte_list)\n', (3056, 3087), True, 'import numpy as np\n'), ((2118, 2138), 'exputil.LocalShell', 'exputil.LocalShell', ([], {}), '()\n', (2136, 2138), False, 'import exputil\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
*This introductory sentence should state the intent and goal of the tutorial. Keep it brief.
This next block should state any assumptions that you the writer are making. Present them in list form.*
Let us start with ...
.. math::
\nabla\cdot( A \cdot \nabla u ) + B u + C = 0
This is the first tutorial example where we actually use finite elements to compute something.
We will solve a simple version of Poisson's equation with zero boundary values, but a nonzero right hand side:
.. math::
- \Delta u & = 1 \quad{\mathrm{in}}\quad\Omega\\
u & = 0 \quad{\mathrm{on}}\quad\partial\Omega\\
We will solve this equation on the unit square, $\Omega=[-1,1]^2$
First, the library must be imported.
To avoid name clashes with other libraries we suggest to import `pygimli` and alias it to a simple abbreviation 'g', e.g., by using
"""
import pygimli as g
from pygimli.solver import solvePoisson
"""
As a result, all :ref:`gimli` objects (classes and functions) can be referred to with a preceding `g.`, e.g.,
printing the version string for gimli.
"""
import numpy as np
#grid = g.createGrid( np.linspace( -1, 1, 5 ), np.linspace( -1, 1, 5 ) )
##.pot( [1. / 3., 1. / 3.], u)
#u = []
#for i in range( 4 ):
#grid = grid.createH2()
#grid.createNeighbourInfos()
#u = solvePoisson( grid, f = 1., u0 = grid.findBoundaryByMarker( 1 ), verbose = True )
#print grid.findCell( [1. / 3., 1. / 3.] ).pot( [1. / 3., 1. / 3.], u)
##grid.createNeighbourInfos( True )
from pygimli.viewer import showMesh
from pygimli.mplviewer import *
import pylab as P
grid = g.createGrid( np.linspace( -1, 1, 20 ), np.linspace( -1, 1, 20 ), np.linspace( -1, 1, 20 ) )
u = solvePoisson( grid, f = 1., uBoundary = [ grid.findBoundaryByMarker( 1 ), 0], verbose = True )
#ax = showMesh( grid, data = u, filled = True, showLater = True, colorBar = True, orientation = 'vertical', label = 'Solution er$u$' )[0]
#drawMesh( ax, grid )
"""
.. error::
do we find an analytical solution for this example?
"""
for b in grid.boundaries():
if b.marker() == 1:
if b.norm()[0] == 1:
b.setMarker( 2 )
if b.norm()[1] == 1:
b.setMarker( 3 )
if b.norm()[1] == -1:
b.setMarker( 4 )
u = solvePoisson( grid, f = 0., uBoundary = [ [ grid.findBoundaryByMarker( 2 ), 1.0 ],
[ grid.findBoundaryByMarker( 4 ), 0.0 ] ], verbose = True )
#ax = showMesh( grid, data = u, filled = True, showLater = True, colorBar = True, orientation = 'vertical', label = 'Solution $u$' )[0]
#drawMesh( ax, grid )
#drawSelectedMeshBoundaries( ax, grid.findBoundaryByMarker( 1 ), color = (1.0, 0.0, 0.0), linewidth=2 )
#drawSelectedMeshBoundaries( ax, grid.findBoundaryByMarker( 2 ), color = (0.0, 1.0, 0.0), linewidth=2 )
#drawSelectedMeshBoundaries( ax, grid.findBoundaryByMarker( 3 ), color = (0.0, 0.0, 1.0), linewidth=2 )
grid.addExportData('u', u )
grid.exportVTK( "grid" )
P.show()
| [
"pylab.show",
"numpy.linspace"
] | [((3073, 3081), 'pylab.show', 'P.show', ([], {}), '()\n', (3079, 3081), True, 'import pylab as P\n'), ((1682, 1704), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(20)'], {}), '(-1, 1, 20)\n', (1693, 1704), True, 'import numpy as np\n'), ((1708, 1730), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(20)'], {}), '(-1, 1, 20)\n', (1719, 1730), True, 'import numpy as np\n'), ((1734, 1756), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(20)'], {}), '(-1, 1, 20)\n', (1745, 1756), True, 'import numpy as np\n')] |
# Transform a picture into a mesh
from vedo import Picture, dataurl, show
import numpy as np
pic = Picture(dataurl+"images/dog.jpg").smooth(5)
msh = pic.tomesh() # make a quad-mesh out of it
# build a scalar array with intensities
rgb = msh.pointdata["RGBA"]
intensity = np.sum(rgb, axis=1)
intensityz = np.zeros_like(rgb)
intensityz[:,2] = intensity / 10
# set the new vertex points
pts = msh.points() + intensityz
msh.points(pts)
# more cosmetics
msh.triangulate().smooth()
msh.lighting("default").lineWidth(0.1)
msh.cmap("bone", "RGBA").addScalarBar()
msht = pic.clone().threshold(100).lineWidth(0)
show([[pic, "A normal jpg image.."],
[msh, "..becomes a polygonal Mesh"],
[msht, "Thresholding also generates a Mesh"]
], N=3, axes=1, zoom=5, elevation=-20, bg='black').close()
| [
"numpy.zeros_like",
"numpy.sum",
"vedo.show",
"vedo.Picture"
] | [((275, 294), 'numpy.sum', 'np.sum', (['rgb'], {'axis': '(1)'}), '(rgb, axis=1)\n', (281, 294), True, 'import numpy as np\n'), ((308, 326), 'numpy.zeros_like', 'np.zeros_like', (['rgb'], {}), '(rgb)\n', (321, 326), True, 'import numpy as np\n'), ((101, 136), 'vedo.Picture', 'Picture', (["(dataurl + 'images/dog.jpg')"], {}), "(dataurl + 'images/dog.jpg')\n", (108, 136), False, 'from vedo import Picture, dataurl, show\n'), ((610, 787), 'vedo.show', 'show', (["[[pic, 'A normal jpg image..'], [msh, '..becomes a polygonal Mesh'], [msht,\n 'Thresholding also generates a Mesh']]"], {'N': '(3)', 'axes': '(1)', 'zoom': '(5)', 'elevation': '(-20)', 'bg': '"""black"""'}), "([[pic, 'A normal jpg image..'], [msh, '..becomes a polygonal Mesh'], [\n msht, 'Thresholding also generates a Mesh']], N=3, axes=1, zoom=5,\n elevation=-20, bg='black')\n", (614, 787), False, 'from vedo import Picture, dataurl, show\n')] |
import os, sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import scipy.signal
from classy import Class
from helper import cache_grendel, cropsave, grendel_dir
"""
For just one of the boxes, plot the absolute CONCEPT
and GADGET spectra over time, including a = a_begin.
"""
textwidth = 504 # mnras: 240 (single-column), 504 (both columns)
width = textwidth/72.27
height = 4.185
# The general font size is 9 but in captions it is 8.
# We choose to match this exactly.
fontsize = 8 #9/1.2
latex_preamble = r'''
\usepackage{lmodern}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{mathtools}
\usepackage{siunitx}
\usepackage{xfrac}
\usepackage{nicefrac}
\usepackage{relsize}
\newcommand{\CONCEPT}{\textsc{co\textsl{n}cept}}
\newcommand{\CONCEPTONE}{\textsc{co\textsl{n}cept}\,\textscale{.77}{{1.0}}}
\newcommand{\GADGETTWO}{\textsc{gadget}-\textscale{.77}{{2}}}
'''
matplotlib.rcParams.update({
'text.usetex' : True,
'font.family' : 'serif',
'font.serif' : 'cmr10',
'font.size' : fontsize,
'mathtext.fontset' : 'cm',
'axes.formatter.use_mathtext': True,
'text.latex.preamble': latex_preamble,
})
h = 0.67
size = 1024
N = size**3
z_values = [10, 5, 3, 2, 1, 0.5, 0]
a_values = [1/(1 + z) for z in z_values]
boxsizes = [512]
boxsize = boxsizes[0]
nprocs = {2048: 256, 1024: 256, 512: 256, 256: 1024}
concept_standard = ['', 'final', 'symplectic_final'][2]
gadget_standard = ['', ][0]
textwidth = 240 # mnras: 240 (single-column), 504 (both columns)
width = textwidth/72.27
height = 2.09
fig = plt.figure(figsize=(width, height))
n_axis = 6
gs = fig.add_gridspec(n_axis, 1)
ax1 = fig.add_subplot(gs[:(n_axis - 1), 0])
ax2 = fig.add_subplot(gs[n_axis - 1, 0])
axes = [ax1, ax2]
# Load
cache_assume_uptodate = True
output_dir = f'{grendel_dir}/powerspec/concept_vs_gadget'
def load_concept(boxsize, special=''):
directory = f'{output_dir}/nprocs{nprocs[boxsize]}/{boxsize}/{special}'.rstrip('/')
P = []
k = None
for a in a_values:
filename = f'{directory}/powerspec_a={a:.2f}'
if cache_assume_uptodate:
filename = cache_grendel(filename, cache_assume_uptodate=cache_assume_uptodate)
else:
if not os.path.isfile(filename):
continue
filename = cache_grendel(filename, cache_assume_uptodate=cache_assume_uptodate)
k, _P = np.loadtxt(filename, usecols=(0, 2), unpack=True)
P.append(_P)
return k, P
def load_gadget_power(box, special=''):
directory = f'{output_dir}/Gadget2/box{box}_size{size}/{special}'.rstrip('/')
P = []
k = None
for i, a in enumerate(a_values):
filename = f'{directory}/powerspec_snapshot_00{i}'
if cache_assume_uptodate:
filename = cache_grendel(filename, cache_assume_uptodate=cache_assume_uptodate)
else:
if not os.path.isfile(filename):
continue
filename = cache_grendel(filename, cache_assume_uptodate=cache_assume_uptodate)
k, _P = np.loadtxt(filename, usecols=(0, 2), unpack=True)
P.append(_P)
return k, P
k_all = {}
P_concept = {}
P_gadget = {}
for boxsize in boxsizes:
k_all[boxsize], P_concept[boxsize] = load_concept(boxsize, concept_standard)
_, P_gadget[boxsize] = load_gadget_power(boxsize, gadget_standard)
def get_mask(k, k_nyq):
mask = (k < k_nyq)
for i, el in enumerate(reversed(mask)):
if el:
mask[i-3:] = False
break
return mask
def k_nyq_particles(boxsize):
"""Supply box in [Mpc/h] to get k_Nyquist in [1/Mpc]
"""
return 2*np.pi/boxsize*(np.cbrt(N)/2)*h
def smooth(x, y, n=500, num=40):
fac = np.log(1024)/np.log(2048)
num *= fac
num = int(round(num))
if num%2 == 0:
num += 1
x_interp = np.logspace(np.log10(x[0]), np.log10(x[-1]), n)
y_interp = np.interp(np.log10(x_interp), np.log10(x), y)
y_smoothed = scipy.signal.savgol_filter(y_interp, num, 2)
#steepness = 2
#k_smooth = 0.4*np.sqrt(x[0]*x[-1])
#weight = (1 - scipy.special.erf(steepness*np.log10(x_interp/k_smooth)))/2
#y_smoothed = weight*y_interp + (1 - weight)*y_smoothed
return x_interp, y_smoothed
# Also load initial power spectrum
filename = f'{grendel_dir}/powerspec/box512_size1024/powerspec_a=0.01'
filename = cache_grendel(filename, cache_assume_uptodate=cache_assume_uptodate)
k_ini, P_ini = np.loadtxt(filename, usecols=(0, 2), unpack=True)
k = k_all[boxsize]
if not np.all(k_ini == k):
print('Mismatch between initial and sim k!', file=sys.stderr)
a_begin = 0.01
z_values_all = [int(round(1/a_begin - 1))] + z_values
def get_class(boxsize):
k = k_all[boxsize]
cosmo = Class()
Omega_b = 0.049
Omega_cdm = 0.27
params = {
'Omega_b': Omega_b,
'Omega_cdm': Omega_cdm,
'H0': 67.0,
'P_k_max_1/Mpc': np.max(k)*1.01,
'output': 'dTk mPk',
'z_pk': ', '.join([str(float(z)) for z in z_values_all]),
}
cosmo.set(params)
cosmo.compute()
P_class = [np.array([cosmo.pk(ki, z) for ki in k]) for z in z_values_all]
# Scale according to D(a) with and without radiation
bg = cosmo.get_background()
a_bg = 1/(1 + bg['z'])
a_min = 1e-6
mask = (a_bg >= a_min)
a_bg = a_bg[mask]
D_class = bg['gr.fac. D'][mask]
Omega_m = Omega_b + Omega_cdm
Omega_Lambda = 1 - Omega_m
D_concept = a_bg*scipy.special.hyp2f1(1/3, 1, 11/6, -Omega_Lambda/Omega_m*a_bg**3)
D_concept /= D_concept[-1]
D_class_begin = scipy.interpolate.interp1d(np.log(a_bg), np.log(D_class), kind='cubic')(np.log(a_begin))
D_concept_begin = scipy.interpolate.interp1d(np.log(a_bg), np.log(D_concept), kind='cubic')(np.log(a_begin))
D_class = D_class * (D_concept_begin/D_class_begin) # same D at a_begin
facs = scipy.interpolate.interp1d(np.log(a_bg), D_concept/D_class, kind='cubic')(np.log([1/(1 + z) for z in z_values_all]))**2
P_class = [P_cl*fac for P_cl, fac in zip(P_class, facs)]
# Match at a_begin (difference in gauge)
fac = P_ini/P_class[0]
P_class = [P_cl*fac for P_cl in P_class]
return P_class
k_nyq = k_nyq_particles(boxsize)
mask = get_mask(k, k_nyq)
P_class = get_class(boxsize)
def plot(i, ax, P_c, P_g, P_cl, clip_on=True):
zorder = -100*(i+1)
z = z_values_all[i]
if z == 0.5:
z = r'\text{\textonehalf}'
color = f'C{i-1}'
if i == 0:
color = f'C{len(z_values_all)-1}'
x, y = k[mask]/h, ((k/h)**1.5*P_c *h**3)[mask]
x, y = smooth(x, np.log(y))
y = np.exp(y)
ax.loglog(x, y, f'{color}-',
label=f'$z = {z}$', clip_on=clip_on, zorder=zorder)
x, y = k[mask]/h, ((k/h)**1.5*P_g *h**3)[mask]
x, y = smooth(x, np.log(y))
y = np.exp(y)
ax.loglog(x, y, f'k--', clip_on=clip_on, zorder=zorder)
ax.loglog(k[mask]/h, ((k/h)**1.5*P_cl*h**3)[mask], f'k:', lw=1, clip_on=clip_on, zorder=zorder)
# a == a_begin
plot(0, ax2, P_ini, P_ini, P_class[0], clip_on=False)
# a > a_begin
for i, (P_c, P_g, P_cl) in enumerate(zip(P_concept[boxsize], P_gadget[boxsize], P_class[1:]), 1):
plot(i, ax1, P_c, P_g, P_cl, clip_on=(i != 1))
# Legend needs to be made from ax2
z = z_values_all[i]
if z == 0.5:
z = r'\text{\textonehalf}'
color = f'C{i-1}'
if i == 0:
color = f'C{len(z_values_all)-1}'
ax2.loglog(k[mask][0], P_c[mask][0], f'{color}-', label=f'$z = {z}$')
legend1 = ax2.legend(framealpha=0.6)
handles, labels = ax2.get_legend_handles_labels()
jumpy = 0.24
jumpx = 0.0088
legend1 = ax2.legend(handles[::-1], labels[::-1], loc=(0.037 + jumpx, 0.228 + jumpy))
legend1.set_zorder(np.inf)
legend1.set_clip_on(False)
for ax in axes:
ax.set_xlim(k[0]/h, k_nyq/h)
ax2.set_xlabel(r'$k\; [h/\mathrm{Mpc}]$')
ax1.set_ylabel(r'$k^{3/2}P\; [(\mathrm{Mpc}/h)^{3/2}]$ .......')
ax1.fill([-0.155, -0.115, -0.115, -0.155, -0.155], [0.78, 0.78, 0.98, 0.98, 0.78], 'w', ec='none', clip_on=False, transform=ax1.transAxes, zorder=np.inf)
fig.subplots_adjust(wspace=0, hspace=0.205)
# hide the spines between ax and ax2
ax1.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax1.xaxis.tick_top()
ax1.tick_params(which='both', top=False, labeltop=False)
ax2.xaxis.tick_bottom()
# ylim
ax1.set_ylim(0.185, 6.2e+2)
ax2.set_ylim(2e-3, 0.803e-2)
# Place cut-out slanted lines
mew = 0.8
q = 22*np.pi/180
offset = 0.33
x = np.linspace(offset*np.pi, (2 - offset)*np.pi, 100)
y = np.sin(x)
X, Y = np.array([[np.cos(q), -np.sin(q)], [np.sin(q), np.cos(q)]]) @ np.array([x, y])
X -= np.mean(X)
Y -= np.mean(Y)
scale = 0.01
ex = 0.0024
for xx in (0, 1):
ax1.plot(xx + scale*X, (0 - ex) + scale*Y, 'k-',
lw=mew, transform=ax1.transAxes, zorder=1e+6, clip_on=False)
ax2.plot(xx + scale*X, (1 + ex) + scale*(n_axis - 1)*Y, 'k-',
lw=mew, transform=ax2.transAxes, zorder=1e+6, clip_on=False)
# Clean up
if xx == 1:
ax1.plot(xx + scale*X + 0.001, (0 - ex) + scale*Y - 0.008, 'w-', alpha=1,
lw=mew, transform=ax1.transAxes, zorder=1e+6, clip_on=False)
# old
#d = 0.65 # proportion of vertical to horizontal extent of the slanted line
#kwargs = dict(marker=[(-1, -d), (1, d)], markersize=8.5,
# linestyle='none', color='k', mec='k', mew=mew, clip_on=False)
#ax1.plot([0, 1], [0, 0], transform=ax1.transAxes, **kwargs)
#ax2.plot([0, 1], [1, 1], transform=ax2.transAxes, **kwargs)
# Clean a = a_begin curve at the lower right
ax2.plot([0.996, 1.004, 1.004], [-0.035, -0.035, 0.06], 'w-', lw=1.2, transform=ax2.transAxes, clip_on=False, zorder=-5, alpha=1)
# Clean a = a_begin curve at the lower left
ax2.plot([-0.004]*2, [0.6, 0.8], 'w-', lw=1.2, transform=ax2.transAxes, clip_on=False, zorder=-5, alpha=1)
# Clean a = 0.09 curve at the lower left
ax1.plot([-0.004]*2, [0.08, 0.15], 'w-', lw=1.2, transform=ax1.transAxes, clip_on=False, zorder=-5, alpha=1)
# Draw extra part of spine with tick at lower left
ax2.tick_params(which='both', labelleft=False)
ax2.plot(
[ax2.get_xlim()[0]]*2,
[ax2.get_ylim()[0], 1e-3],
'k-', lw=mew, clip_on=False,
)
ax2.plot(
[ax2.get_xlim()[0], 0.907*ax2.get_xlim()[0]],
[1e-3]*2,
'k-', lw=mew, clip_on=False,
)
ax2.text(0.804*ax2.get_xlim()[0], 0.9356*1e-3, r'$10^{-3}$', ha='right', va='center')
# Extra legend
x_leg_start = 0.3445 + jumpx
legend2 = ax2.legend(
(
ax2.plot(1, 1, '-' , color='w')
+ ax2.plot(1, 1, '--' , color='k')
+ ax2.plot(1, 1, ':', lw=1, color='k')
),
(r'\CONCEPTONE{}', r'\GADGETTWO{}', r'linear'),
loc=(x_leg_start, 0.228 + jumpy), framealpha=0.6,
)
ax2.add_artist(legend1)
ax2.add_artist(legend2)
# Rainbow
y_bot = 1.8935
y_top = y_bot + 0.0862
offsetx = 0.0123
dx = 0.01102*8/len(z_values_all)
for i in range(len(z_values_all)):
color = f'C{i-1}'
if i == 0:
color = f'C{len(z_values_all)-1}'
ax2.fill(
[
x_leg_start + offsetx + dx*i*0.995,
x_leg_start + offsetx + dx*(i+1)/0.995,
x_leg_start + offsetx + dx*(i+1)/0.995,
x_leg_start + offsetx + dx*i*0.995,
x_leg_start + offsetx + dx*i*0.995,
],
np.array([y_bot, y_bot, y_top, y_top, y_bot]) + jumpy,
color, alpha=1.0,
ec='none', transform=ax2.transAxes, zorder=np.inf, clip_on=False,
)
# Remove small legend bits
ax2.plot([0.443 + jumpx]*2, np.array([0.5, 1.5]) + jumpy, 'w', transform=ax2.transAxes, zorder=np.inf, clip_on=False, alpha=1)
ax2.set_xticks([0.1, 1])
ax2.set_xticklabels([r'$0.1$', r'$1$'])
# Save
cropsave(fig, '../figure/abspower.pdf') # no tight_layout() or bbox_inches()
| [
"helper.cache_grendel",
"numpy.cbrt",
"numpy.log",
"matplotlib.rcParams.update",
"helper.cropsave",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.mean",
"numpy.loadtxt",
"numpy.linspace",
"classy.Class",
"numpy.exp",
"numpy.array",
"numpy.log10",
"numpy.max",
"os.path.isfile",
"num... | [((948, 1182), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'text.usetex': True, 'font.family': 'serif', 'font.serif': 'cmr10',\n 'font.size': fontsize, 'mathtext.fontset': 'cm',\n 'axes.formatter.use_mathtext': True, 'text.latex.preamble': latex_preamble}"], {}), "({'text.usetex': True, 'font.family': 'serif',\n 'font.serif': 'cmr10', 'font.size': fontsize, 'mathtext.fontset': 'cm',\n 'axes.formatter.use_mathtext': True, 'text.latex.preamble': latex_preamble}\n )\n", (974, 1182), False, 'import matplotlib\n'), ((1640, 1675), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (1650, 1675), True, 'import matplotlib.pyplot as plt\n'), ((4410, 4478), 'helper.cache_grendel', 'cache_grendel', (['filename'], {'cache_assume_uptodate': 'cache_assume_uptodate'}), '(filename, cache_assume_uptodate=cache_assume_uptodate)\n', (4423, 4478), False, 'from helper import cache_grendel, cropsave, grendel_dir\n'), ((4494, 4543), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'usecols': '(0, 2)', 'unpack': '(True)'}), '(filename, usecols=(0, 2), unpack=True)\n', (4504, 4543), True, 'import numpy as np\n'), ((8456, 8510), 'numpy.linspace', 'np.linspace', (['(offset * np.pi)', '((2 - offset) * np.pi)', '(100)'], {}), '(offset * np.pi, (2 - offset) * np.pi, 100)\n', (8467, 8510), True, 'import numpy as np\n'), ((8511, 8520), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (8517, 8520), True, 'import numpy as np\n'), ((8612, 8622), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (8619, 8622), True, 'import numpy as np\n'), ((8628, 8638), 'numpy.mean', 'np.mean', (['Y'], {}), '(Y)\n', (8635, 8638), True, 'import numpy as np\n'), ((11597, 11636), 'helper.cropsave', 'cropsave', (['fig', '"""../figure/abspower.pdf"""'], {}), "(fig, '../figure/abspower.pdf')\n", (11605, 11636), False, 'from helper import cache_grendel, cropsave, grendel_dir\n'), ((4570, 4588), 'numpy.all', 'np.all', (['(k_ini == k)'], {}), '(k_ini == k)\n', (4576, 4588), True, 'import numpy as np\n'), ((4784, 4791), 'classy.Class', 'Class', ([], {}), '()\n', (4789, 4791), False, 'from classy import Class\n'), ((6623, 6632), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (6629, 6632), True, 'import numpy as np\n'), ((6817, 6826), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (6823, 6826), True, 'import numpy as np\n'), ((8590, 8606), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (8598, 8606), True, 'import numpy as np\n'), ((2464, 2513), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'usecols': '(0, 2)', 'unpack': '(True)'}), '(filename, usecols=(0, 2), unpack=True)\n', (2474, 2513), True, 'import numpy as np\n'), ((3111, 3160), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'usecols': '(0, 2)', 'unpack': '(True)'}), '(filename, usecols=(0, 2), unpack=True)\n', (3121, 3160), True, 'import numpy as np\n'), ((3773, 3785), 'numpy.log', 'np.log', (['(1024)'], {}), '(1024)\n', (3779, 3785), True, 'import numpy as np\n'), ((3786, 3798), 'numpy.log', 'np.log', (['(2048)'], {}), '(2048)\n', (3792, 3798), True, 'import numpy as np\n'), ((3903, 3917), 'numpy.log10', 'np.log10', (['x[0]'], {}), '(x[0])\n', (3911, 3917), True, 'import numpy as np\n'), ((3919, 3934), 'numpy.log10', 'np.log10', (['x[-1]'], {}), '(x[-1])\n', (3927, 3934), True, 'import numpy as np\n'), ((3964, 3982), 'numpy.log10', 'np.log10', (['x_interp'], {}), '(x_interp)\n', (3972, 3982), True, 'import numpy as np\n'), ((3984, 3995), 'numpy.log10', 'np.log10', (['x'], {}), '(x)\n', (3992, 3995), True, 'import numpy as np\n'), ((5683, 5698), 'numpy.log', 'np.log', (['a_begin'], {}), '(a_begin)\n', (5689, 5698), True, 'import numpy as np\n'), ((5796, 5811), 'numpy.log', 'np.log', (['a_begin'], {}), '(a_begin)\n', (5802, 5811), True, 'import numpy as np\n'), ((6604, 6613), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (6610, 6613), True, 'import numpy as np\n'), ((6798, 6807), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (6804, 6807), True, 'import numpy as np\n'), ((11424, 11444), 'numpy.array', 'np.array', (['[0.5, 1.5]'], {}), '([0.5, 1.5])\n', (11432, 11444), True, 'import numpy as np\n'), ((2203, 2271), 'helper.cache_grendel', 'cache_grendel', (['filename'], {'cache_assume_uptodate': 'cache_assume_uptodate'}), '(filename, cache_assume_uptodate=cache_assume_uptodate)\n', (2216, 2271), False, 'from helper import cache_grendel, cropsave, grendel_dir\n'), ((2379, 2447), 'helper.cache_grendel', 'cache_grendel', (['filename'], {'cache_assume_uptodate': 'cache_assume_uptodate'}), '(filename, cache_assume_uptodate=cache_assume_uptodate)\n', (2392, 2447), False, 'from helper import cache_grendel, cropsave, grendel_dir\n'), ((2850, 2918), 'helper.cache_grendel', 'cache_grendel', (['filename'], {'cache_assume_uptodate': 'cache_assume_uptodate'}), '(filename, cache_assume_uptodate=cache_assume_uptodate)\n', (2863, 2918), False, 'from helper import cache_grendel, cropsave, grendel_dir\n'), ((3026, 3094), 'helper.cache_grendel', 'cache_grendel', (['filename'], {'cache_assume_uptodate': 'cache_assume_uptodate'}), '(filename, cache_assume_uptodate=cache_assume_uptodate)\n', (3039, 3094), False, 'from helper import cache_grendel, cropsave, grendel_dir\n'), ((4953, 4962), 'numpy.max', 'np.max', (['k'], {}), '(k)\n', (4959, 4962), True, 'import numpy as np\n'), ((5638, 5650), 'numpy.log', 'np.log', (['a_bg'], {}), '(a_bg)\n', (5644, 5650), True, 'import numpy as np\n'), ((5652, 5667), 'numpy.log', 'np.log', (['D_class'], {}), '(D_class)\n', (5658, 5667), True, 'import numpy as np\n'), ((5749, 5761), 'numpy.log', 'np.log', (['a_bg'], {}), '(a_bg)\n', (5755, 5761), True, 'import numpy as np\n'), ((5763, 5780), 'numpy.log', 'np.log', (['D_concept'], {}), '(D_concept)\n', (5769, 5780), True, 'import numpy as np\n'), ((5975, 6020), 'numpy.log', 'np.log', (['[(1 / (1 + z)) for z in z_values_all]'], {}), '([(1 / (1 + z)) for z in z_values_all])\n', (5981, 6020), True, 'import numpy as np\n'), ((11212, 11257), 'numpy.array', 'np.array', (['[y_bot, y_bot, y_top, y_top, y_bot]'], {}), '([y_bot, y_bot, y_top, y_top, y_bot])\n', (11220, 11257), True, 'import numpy as np\n'), ((2305, 2329), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (2319, 2329), False, 'import os, sys\n'), ((2952, 2976), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (2966, 2976), False, 'import os, sys\n'), ((3713, 3723), 'numpy.cbrt', 'np.cbrt', (['N'], {}), '(N)\n', (3720, 3723), True, 'import numpy as np\n'), ((5928, 5940), 'numpy.log', 'np.log', (['a_bg'], {}), '(a_bg)\n', (5934, 5940), True, 'import numpy as np\n'), ((8539, 8548), 'numpy.cos', 'np.cos', (['q'], {}), '(q)\n', (8545, 8548), True, 'import numpy as np\n'), ((8564, 8573), 'numpy.sin', 'np.sin', (['q'], {}), '(q)\n', (8570, 8573), True, 'import numpy as np\n'), ((8575, 8584), 'numpy.cos', 'np.cos', (['q'], {}), '(q)\n', (8581, 8584), True, 'import numpy as np\n'), ((8551, 8560), 'numpy.sin', 'np.sin', (['q'], {}), '(q)\n', (8557, 8560), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import h5py
import re
import sys
import operator
import argparse
from random import sample, seed
from math import ceil
import nltk
nltk.data.path.append('/ssd-playpen/home/shiyue/oposum/cache')
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from tqdm import tqdm
def parallel_chunks(l1, l2, l3, l4, n):
"""
Yields chunks of size n from 4 lists in parallel
"""
if len(l1) != len(l2) or len(l2) != len(l3) or len(l3) != len(l4):
raise IndexError
else:
for i in range(0, len(l1), n):
yield l1[i:i+n], l2[i:i+n], l3[i:i+n], l4[i:i+n]
def load_bin_vec(fname, vocab):
"""
Loads 300x1 word vecs from Google (Mikolov) word2vec
"""
word_vecs = {}
with open(fname, "rb") as f:
header = f.readline()
vocab_size, layer1_size = map(int, header.split())
# number of bytes per embedding
binary_len = np.dtype('float32').itemsize * layer1_size
for line in tqdm(range(vocab_size)):
word = []
while True:
ch = f.read(1)
if ch == ' ':
word = ''.join(word)
break
if ch != '\n':
word.append(ch)
# store words in vocab, discard rest
if vocab is None or word in vocab:
word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32')
else:
f.read(binary_len)
return word_vecs
def load_glove_vec(fname, vocab):
with open(fname, 'r') as f:
lines = f.readlines()
word_vecs = {}
for line in tqdm(lines):
items = line.strip().split(' ')
word = items[0]
if vocab is None or word in vocab:
vec = list(map(float, items[1:]))
word_vecs[word] = vec
return word_vecs
def line_to_words(line, min_len, max_len, stop_words=None, lemmatize=True):
"""
Reads a line of text (sentence) and returns a list of tokenized EDUs
"""
if lemmatize:
lemmatizer = WordNetLemmatizer()
# clean sentence and break it into EDUs
clean_line = clean_str(line.strip())
edus = [edu.strip() for edu in clean_line.split('edu_break')]
# original text for each EDU
edus_o = [edu.strip() for edu in line.split('EDU_BREAK')]
segs = []
ids = []
total = 0
original = []
for i, edu in enumerate(edus):
total += 1
words = edu.split()
if stop_words is not None:
words = [word for word in words if word not in stop_words]
if lemmatize:
words = [lemmatizer.lemmatize(word) for word in words]
# discard short segments
if len(words) < min_len:
continue
# truncate long ones
if len(words) > max_len:
words = words[:max_len]
segs.append(words)
ids.append(i) # storing ids to keep track of discarded segments
original.append(edus_o[i])
return segs, original, ids, total
def get_vocab(file, min_len, max_len, stop_words, lemmatize):
"""
Reads an input file and builds vocabulary, product mapping, etc.
"""
max_len_actual = 0
wid = 1
pid = 0
word2id = {}
word2cnt = {}
prod2id = {}
seg_cnt = 0
doc_cnt = 0
f = open(file, 'r')
first_line = True
for line in f:
if not first_line:
if len(line.strip()) != 0:
segs, _, _, _ = line_to_words(line, min_len, max_len, stop_words, lemmatize)
for seg in segs:
seg_cnt += 1
max_len_actual = max(max_len_actual, len(seg))
for word in seg:
if word not in word2id:
word2id[word] = wid
wid += 1
if word not in word2cnt:
word2cnt[word] = 1
else:
word2cnt[word] += 1
else:
first_line = True
doc_cnt += 1
else:
first_line = False
rcode, label = line.split()
prod, _ = rcode.split('-')
if prod not in prod2id:
prod2id[prod] = pid
pid += 1
f.close()
return max_len_actual, seg_cnt, doc_cnt, word2id, prod2id, word2cnt
def load_data(file, args):
"""
Loads dataset into appropriate data structures
"""
padding = args.padding
min_len = args.min_len
max_len = args.max_len
batch_size = args.batch_size
stop_words = args.stop_words
lemmatize = args.lemmatize
max_len_actual, seg_cnt, doc_cnt, word2id, prod2id, word2cnt = get_vocab(file, min_len, max_len,
stop_words, lemmatize)
print('Number of documents:', doc_cnt)
print('Number of edus:', seg_cnt)
print('Number of products:', len(prod2id))
print('Max segment length:', max_len_actual)
print('Vocabulary size:', len(word2id))
data = []
products = []
scodes = []
original = []
f = open(file, 'r')
first_line = True
for line in f:
if not first_line:
if len(line.strip()) != 0:
segs, orig, ids, total = line_to_words(line, min_len, max_len,
stop_words=stop_words, lemmatize=lemmatize)
for i, seg in enumerate(segs):
seg_ids = [word2id[word] for word in seg]
seg_ids = [0] * padding + seg_ids + [0] * padding
scode = '{0}-{1:04d}'.format(rcode, start_idx + ids[i])
data.append(seg_ids)
products.append(pid)
scodes.append(scode)
original.append(orig[i])
start_idx += total
else:
first_line = True
else:
first_line = False
rcode, label = line.split()
prod, _ = rcode.split('-')
pid = prod2id[prod]
start_idx = 0
f.close()
return word2id, prod2id, data, products, scodes, original, word2cnt
def clean_str(string):
"""
String cleaning
"""
string = string.lower()
string = re.sub(r"\s{2,}", " ", string)
string = re.sub(r""", " ", string)
string = re.sub(r"(http://)?www\.[^ ]+", " _url_ ", string)
string = re.sub(r"[^a-z0-9$\'_]", " ", string)
string = re.sub(r"_{2,}", "_", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'m", " \'m", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r"\$+", " $ ", string)
string = re.sub(r"rrb", " ", string)
string = re.sub(r"lrb", " ", string)
string = re.sub(r"rsb", " ", string)
string = re.sub(r"lsb", " ", string)
string = re.sub(r"(?<=[a-z])I", " I", string)
string = re.sub(r"(?<= )[0-9]+(?= )", "<NUM>", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip()
def main():
parser = argparse.ArgumentParser(
description =__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--w2v', help='word2vec binary file', type=str, default='')
parser.add_argument('--data', help='data in appropriate format', type=str, default='')
parser.add_argument('--name', help='name of dataset', type=str, default='')
parser.add_argument('--batch_size', help='number of segments per batch (default: 50)', type=int, default=50)
parser.add_argument('--padding', help='padding around each segment (default: 0)', type=int, default=0)
parser.add_argument('--lemmatize', help='Lemmatize words', action='store_true')
parser.add_argument('--stopfile', help='Stop-word file (default: nltk english stop words)', type=str, default='')
parser.add_argument('--min_len', help='minimum allowed words per segment (default: 1)', type=int, default=1)
parser.add_argument('--max_len', help='maximum allowed words per segment (default: 150)', type=int, default=150)
parser.add_argument('--seed', help='random seed (default: 1)', type=int, default=1)
args = parser.parse_args()
# set stop words policy
if args.stopfile == 'no':
args.stop_words = None
elif args.stopfile != '':
stop_words = set()
fstop = open(args.stopfile, 'r')
for line in fstop:
stop_words.add(line.strip())
fstop.close()
args.stop_words = stop_words
else:
args.stop_words = set(stopwords.words('english'))
# load data
word2id, prod2id, data, products, scodes, original, word2cnt = load_data(args.data, args)
# write mapping files
with open(args.name + '_word_mapping.txt', 'w') as f:
f.write('<PAD> 0\n')
for word, idx in sorted(word2id.items(), key=operator.itemgetter(1)):
f.write("%s %d\n" % (word, idx))
with open(args.name + '_product_mapping.txt', 'w') as f:
for prod, idx in sorted(prod2id.items(), key=operator.itemgetter(1)):
f.write("%s %d\n" % (prod, idx))
with open(args.name + '_word_counts.txt', 'w') as f:
for word, count in sorted(word2cnt.items(), key=operator.itemgetter(1), reverse=True):
f.write("%s %d\n" % (word, count))
# populate embedding matrix
vocab_size = len(word2id) + 1
w2v = load_glove_vec(args.w2v, word2id)
embed = np.random.uniform(-0.25, 0.25, (vocab_size, len(list(w2v.values())[0])))
embed[0] = 0
for word, vec in w2v.items():
embed[word2id[word]] = vec
seed(args.seed)
# sort data by segment length (to minimize padding)
data, products, scodes, original = zip(*sorted(
sample(list(zip(data, products, scodes, original)), len(data)),
key=lambda x:len(x[0])))
filename = args.name + '.hdf5'
print(filename)
with h5py.File(filename, 'w') as f:
f['w2v'] = np.array(embed)
for i, (segments, prods, codes, segs_o), in tqdm(enumerate(parallel_chunks(data, products,
scodes, original, args.batch_size))):
max_len_batch = len(max(segments, key=len))
batch_id = str(i)
for j in range(len(segments)):
segments[j].extend([0] * (max_len_batch - len(segments[j])))
f['data/' + batch_id] = np.array(segments, dtype=np.int32)
f['products/' + batch_id] = np.array(prods, dtype=np.int32)
f.create_dataset('scodes/' + batch_id, (len(codes),), dtype="S{0}".format(len(codes[0])), data=codes)
dt = h5py.special_dtype(vlen=bytes)
f.create_dataset('original/' + batch_id, (len(segs_o),), dtype=dt, data=segs_o)
if __name__ == '__main__':
main()
| [
"tqdm.tqdm",
"h5py.File",
"argparse.ArgumentParser",
"h5py.special_dtype",
"nltk.data.path.append",
"numpy.dtype",
"random.seed",
"numpy.array",
"nltk.corpus.stopwords.words",
"nltk.stem.wordnet.WordNetLemmatizer",
"operator.itemgetter",
"re.sub"
] | [((173, 235), 'nltk.data.path.append', 'nltk.data.path.append', (['"""/ssd-playpen/home/shiyue/oposum/cache"""'], {}), "('/ssd-playpen/home/shiyue/oposum/cache')\n", (194, 235), False, 'import nltk\n'), ((1684, 1695), 'tqdm.tqdm', 'tqdm', (['lines'], {}), '(lines)\n', (1688, 1695), False, 'from tqdm import tqdm\n'), ((6316, 6346), 're.sub', 're.sub', (['"""\\\\s{2,}"""', '""" """', 'string'], {}), "('\\\\s{2,}', ' ', string)\n", (6322, 6346), False, 'import re\n'), ((6360, 6388), 're.sub', 're.sub', (['"""""""', '""" """', 'string'], {}), "('"', ' ', string)\n", (6366, 6388), False, 'import re\n'), ((6403, 6453), 're.sub', 're.sub', (['"""(http://)?www\\\\.[^ ]+"""', '""" _url_ """', 'string'], {}), "('(http://)?www\\\\.[^ ]+', ' _url_ ', string)\n", (6409, 6453), False, 'import re\n'), ((6467, 6504), 're.sub', 're.sub', (['"""[^a-z0-9$\\\\\'_]"""', '""" """', 'string'], {}), '("[^a-z0-9$\\\\\'_]", \' \', string)\n', (6473, 6504), False, 'import re\n'), ((6518, 6546), 're.sub', 're.sub', (['"""_{2,}"""', '"""_"""', 'string'], {}), "('_{2,}', '_', string)\n", (6524, 6546), False, 'import re\n'), ((6561, 6590), 're.sub', 're.sub', (['"""\\\\\'s"""', '""" \'s"""', 'string'], {}), '("\\\\\'s", " \'s", string)\n', (6567, 6590), False, 'import re\n'), ((6605, 6636), 're.sub', 're.sub', (['"""\\\\\'ve"""', '""" \'ve"""', 'string'], {}), '("\\\\\'ve", " \'ve", string)\n', (6611, 6636), False, 'import re\n'), ((6651, 6682), 're.sub', 're.sub', (['"""n\\\\\'t"""', '""" n\'t"""', 'string'], {}), '("n\\\\\'t", " n\'t", string)\n', (6657, 6682), False, 'import re\n'), ((6697, 6728), 're.sub', 're.sub', (['"""\\\\\'re"""', '""" \'re"""', 'string'], {}), '("\\\\\'re", " \'re", string)\n', (6703, 6728), False, 'import re\n'), ((6743, 6772), 're.sub', 're.sub', (['"""\\\\\'d"""', '""" \'d"""', 'string'], {}), '("\\\\\'d", " \'d", string)\n', (6749, 6772), False, 'import re\n'), ((6787, 6816), 're.sub', 're.sub', (['"""\\\\\'m"""', '""" \'m"""', 'string'], {}), '("\\\\\'m", " \'m", string)\n', (6793, 6816), False, 'import re\n'), ((6831, 6862), 're.sub', 're.sub', (['"""\\\\\'ll"""', '""" \'ll"""', 'string'], {}), '("\\\\\'ll", " \'ll", string)\n', (6837, 6862), False, 'import re\n'), ((6877, 6906), 're.sub', 're.sub', (['"""\\\\$+"""', '""" $ """', 'string'], {}), "('\\\\$+', ' $ ', string)\n", (6883, 6906), False, 'import re\n'), ((6920, 6946), 're.sub', 're.sub', (['"""rrb"""', '""" """', 'string'], {}), "('rrb', ' ', string)\n", (6926, 6946), False, 'import re\n'), ((6961, 6987), 're.sub', 're.sub', (['"""lrb"""', '""" """', 'string'], {}), "('lrb', ' ', string)\n", (6967, 6987), False, 'import re\n'), ((7002, 7028), 're.sub', 're.sub', (['"""rsb"""', '""" """', 'string'], {}), "('rsb', ' ', string)\n", (7008, 7028), False, 'import re\n'), ((7043, 7069), 're.sub', 're.sub', (['"""lsb"""', '""" """', 'string'], {}), "('lsb', ' ', string)\n", (7049, 7069), False, 'import re\n'), ((7084, 7119), 're.sub', 're.sub', (['"""(?<=[a-z])I"""', '""" I"""', 'string'], {}), "('(?<=[a-z])I', ' I', string)\n", (7090, 7119), False, 'import re\n'), ((7134, 7178), 're.sub', 're.sub', (['"""(?<= )[0-9]+(?= )"""', '"""<NUM>"""', 'string'], {}), "('(?<= )[0-9]+(?= )', '<NUM>', string)\n", (7140, 7178), False, 'import re\n'), ((7193, 7223), 're.sub', 're.sub', (['"""\\\\s{2,}"""', '""" """', 'string'], {}), "('\\\\s{2,}', ' ', string)\n", (7199, 7223), False, 'import re\n'), ((7276, 7379), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (7299, 7379), False, 'import argparse\n'), ((9796, 9811), 'random.seed', 'seed', (['args.seed'], {}), '(args.seed)\n', (9800, 9811), False, 'from random import sample, seed\n'), ((2111, 2130), 'nltk.stem.wordnet.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (2128, 2130), False, 'from nltk.stem.wordnet import WordNetLemmatizer\n'), ((10091, 10115), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (10100, 10115), False, 'import h5py\n'), ((10141, 10156), 'numpy.array', 'np.array', (['embed'], {}), '(embed)\n', (10149, 10156), True, 'import numpy as np\n'), ((10594, 10628), 'numpy.array', 'np.array', (['segments'], {'dtype': 'np.int32'}), '(segments, dtype=np.int32)\n', (10602, 10628), True, 'import numpy as np\n'), ((10669, 10700), 'numpy.array', 'np.array', (['prods'], {'dtype': 'np.int32'}), '(prods, dtype=np.int32)\n', (10677, 10700), True, 'import numpy as np\n'), ((10833, 10863), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'vlen': 'bytes'}), '(vlen=bytes)\n', (10851, 10863), False, 'import h5py\n'), ((965, 984), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (973, 984), True, 'import numpy as np\n'), ((8770, 8796), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (8785, 8796), False, 'from nltk.corpus import stopwords\n'), ((9072, 9094), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (9091, 9094), False, 'import operator\n'), ((9251, 9273), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (9270, 9273), False, 'import operator\n'), ((9429, 9451), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (9448, 9451), False, 'import operator\n')] |
"""
Sources:
https://pjreddie.com/darknet/yolo/
https://www.youtube.com/watch?v=1LCb1PVqzeY
"""
import cv2
import numpy as np
# import serial_cmd
# Instantiate serial command
# control = serial_cmd.Serial_cmd()
# Load yolo weights and configuration files
net = cv2.dnn.readNet('yolov3-tiny.weights', 'yolov3-tiny.cfg')
cap = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
_, image = cap.read()
height, width, _ = image.shape
print(image[0, 0, 0])
blob = cv2.dnn.blobFromImage(image, 1/255, (416,416), (0,0,0), swapRB=True, crop=False)
net.setInput(blob)
output_layers_names = net.getUnconnectedOutLayersNames()
layerOutputs = net.forward(output_layers_names)
boxes = []
confidences = []
class_ids = []
# Get information from each identified object
for output in layerOutputs:
for detection in output:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
center_x = int(detection[0]*width)
center_y = int(detection[1]*height)
w = int(detection[2]*width)
h = int(detection[3]*height)
x = int(center_x - w/2)
y = int(center_y - h/2)
boxes.append([x, y, w, h])
confidences.append((float(confidence)))
class_ids.append(class_id)
# Apply non-maxima suppression to the bounding boxes
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
font = cv2.FONT_HERSHEY_PLAIN
colors = np.random.uniform(0, 255, size=(len(boxes), 3))
if len(indexes) > 0:
for i in indexes.flatten():
x, y, w, h = boxes[i]
label = "person"
confidence = str(round(confidences[i], 2))
color = colors[i]
cv2.rectangle(image, (x,y), (x+w, y+h), color, 2)
cv2.putText(image, str(x) + " " + str(y) + " " + confidence, (x, y+20), font, 2, (255, 255, 255), 2)
# Display the resulting image
cv2.imshow('Image', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
cap.release()
cv2.destroyAllWindows()
| [
"cv2.dnn.NMSBoxes",
"numpy.argmax",
"cv2.waitKey",
"cv2.dnn.blobFromImage",
"cv2.imshow",
"cv2.dnn.readNet",
"cv2.VideoCapture",
"cv2.rectangle",
"cv2.destroyAllWindows"
] | [((263, 320), 'cv2.dnn.readNet', 'cv2.dnn.readNet', (['"""yolov3-tiny.weights"""', '"""yolov3-tiny.cfg"""'], {}), "('yolov3-tiny.weights', 'yolov3-tiny.cfg')\n", (278, 320), False, 'import cv2\n'), ((328, 347), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (344, 347), False, 'import cv2\n'), ((2226, 2249), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2247, 2249), False, 'import cv2\n'), ((489, 578), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['image', '(1 / 255)', '(416, 416)', '(0, 0, 0)'], {'swapRB': '(True)', 'crop': '(False)'}), '(image, 1 / 255, (416, 416), (0, 0, 0), swapRB=True,\n crop=False)\n', (510, 578), False, 'import cv2\n'), ((1514, 1560), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['boxes', 'confidences', '(0.5)', '(0.4)'], {}), '(boxes, confidences, 0.5, 0.4)\n', (1530, 1560), False, 'import cv2\n'), ((2081, 2107), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'image'], {}), "('Image', image)\n", (2091, 2107), False, 'import cv2\n'), ((936, 953), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (945, 953), True, 'import numpy as np\n'), ((1879, 1933), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x, y)', '(x + w, y + h)', 'color', '(2)'], {}), '(image, (x, y), (x + w, y + h), color, 2)\n', (1892, 1933), False, 'import cv2\n'), ((2115, 2129), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2126, 2129), False, 'import cv2\n')] |
""" Weather Panel
Displays current weather and some forecast information for multiple locations.
Automatically updates the weather information on a regular basis.
"""
from typing import Optional, Tuple, List, Union
import os
import numpy as np
from datetime import datetime
from tokenize import String
from pyowm import OWM
from pyowm.weatherapi25.one_call import OneCall
from kivy_garden.graph import Graph, MeshLinePlot, ScatterPlot
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.lang.builder import Builder
from kivy.clock import Clock
from kivy.utils import get_color_from_hex as rgb
from kivy.properties import NumericProperty, StringProperty, DictProperty, ListProperty
from kwidgets.text.simpletable import SimpleTable
Builder.load_string('''
<WeatherPanel>:
orientation: 'vertical'
canvas.before:
Color:
rgba: root.bg_color
Rectangle:
pos: self.pos
size: self.size
BoxLayout:
orientation: 'horizontal'
size_hint_y: None
size: 0,275
BoxLayout:
orientation: 'vertical'
Spinner:
id: selected_location
size_hint: 1, None
height: 50
text: 'Loading'
markup: True
background_normal: ''
background_color: root.bg_color
values: []
on_text:
root.update_panel()
Image:
id: current_image
source: root.current_image
SimpleTable:
id: current
key_size_hint_x: .4
data: root.table_data
key_color: root.text_color
value_color: root.text_color
Graph:
id: graph
y_ticks_major: 5
y_grid_label: True
x_ticks_major: 60*60
x_grid_label: True
padding: 5
precision: '%0.2f'
tick_color: root.text_color
border_color: root.text_color
label_options: {'color': root.text_color}
''')
class WeatherResponse:
""" Class for storing json responses with weather information
Stores the lat/lon and user provided location name, along with the
json of the last weather information.
"""
lat_lon: Tuple[float, float]
location_name: str = None
response: Optional[OneCall] = None
def __init__(self, lat_lon: Tuple[float, float], location_name: Optional[datetime]) -> None:
"""Create a new WeatherResponse Object
:param lat_lon: Lattitude and Longitude of the location of interest
:type lat_lon: Tuple[float, float]
:param location_name: Human readable name for the location (e.g. "Tampa, FL" or "My House")
:type location_name: Optional[datetime]
"""
self.lat_lon = lat_lon
self.location_name = location_name if location_name is not None else str(lat_lon)
class WeatherPanel(BoxLayout):
"""The main WeatherPanel interface.
The Key User Relevant Properties are:
* owm_key - The user's key from https://openweathermap.org/. IMPORTANT: This panel will not work without this key.*
It either needs to be specified as a property in the configuration or it needs to be set as an
environment variable. You can get a free key by registering at openweathermap.org.
* temp_units - either fahrenheit or celcius (Default - fahrenheit)
* text_color - the rgba color of the text and line components of the interface. (Default - [0,0,0,1])
* bg_color - the background color (Default - [.5, .5, .5, 1])
* data_update_rate_sec - The number of seconds between data updates
* location_switch_rate_sec - The number of seconds to show each location
* locations - a list of WeatherResponse objects, one each for the locations of interest. This attribute can be
set by assigning a list in the form of [(lat1, lon1), location_name1, (lat2, lon2), location_name2, ...].
This is the form to use when assigning locations using the configuration file. If assigned this way,
it will be converted in a list of WeatherResponse objects.
"""
data_update_rate_sec = NumericProperty(60*5)
location_switch_rate_sec = NumericProperty(3)
_locations = ListProperty([WeatherResponse((51.4778, -0.0014), "Royal Observatory")])
owm_key = StringProperty(None)
temp_units = StringProperty("fahrenheit")
text_color = ListProperty([0,0,0,1])
bg_color = ListProperty([.5, .5, .5, 1])
table_data = DictProperty({"sunrise": "Unknown", "sunset": "Unknown"})
current_image = StringProperty(None)
started = False
rng = np.random.RandomState()
def update_initialize(self):
"""Run update_data and then schedule data update and panel update if they are not already
scheduled.
"""
self.update_data()
if not self.started:
self.started=True
Clock.schedule_interval(self.update_data, self.data_update_rate_sec)
Clock.schedule_interval(self.choose_random_location, self.location_switch_rate_sec)
def dp_start(self):
"""Run when the panel is displayed. Call update_initialize
"""
self.update_initialize()
def choose_random_location(self, *args):
"""Choose one of the specified locations to display at random.
"""
self.ids.selected_location.text = self.rng.choice(self._locations).location_name
def update_data(self, *args):
"""Call openweather and update the forecasts for all the locations.
:raises RuntimeError: If owm_key is not set either in the configuration file or as an enviornment
variable.
"""
if self.owm_key is None:
self.owm_key = os.environ.get("OWM_KEY")
if self.owm_key is None:
raise RuntimeError("OpenWeathermap Key not set")
for wr in self._locations:
owm = OWM(self.owm_key)
mgr = owm.weather_manager()
ans = mgr.one_call(lat=wr.lat_lon[0], lon=wr.lat_lon[1])
wr.response = ans
if wr.location_name not in self.ids.selected_location.values:
self.ids.selected_location.values = self.ids.selected_location.values + [wr.location_name]
self.ids.selected_location.text = wr.location_name
def update_panel(self, *args):
"""Update the data displayed on the panel. Called when the spinner text field is set.
"""
ans = [r for r in self._locations if r.location_name==self.ids.selected_location.text][0].response
data = {
'As of': datetime.fromtimestamp(ans.current.reference_time()).strftime("%H:%M:%S"),
'Sunrise': datetime.fromtimestamp(ans.current.sunrise_time()).strftime("%H:%M:%S"),
'Sunset': datetime.fromtimestamp(ans.current.sunset_time()).strftime("%H:%M:%S"),
'Detailed status': ans.current.detailed_status,
'Temperature': ans.current.temperature(self.temp_units)["temp"],
'Feels like': ans.current.temperature(self.temp_units)["feels_like"],
'Wind speed': ans.current.wind()["speed"],
'Wind direction': ans.current.wind()["deg"],
'UVI': ans.current.uvi
}
self.table_data = data
icon_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "images")
self.current_image = os.path.join(icon_path, ans.current.weather_icon_name+".png")
temps = [(m.reference_time(), m.temperature(self.temp_units)["temp"]) for m in ans.forecast_hourly]
max_temp = max([x[1] for x in temps])
min_temp = min([x[1] for x in temps])
for p in list(self.ids.graph.plots):
self.ids.graph.remove_plot(p)
self.ids.graph.xmin=temps[0][0]-60*60
self.ids.graph.xmax=temps[-1][0]+60*60
self.ids.graph.ymin=min_temp - (max_temp-min_temp)*.05
self.ids.graph.ymax=max_temp + (max_temp-min_temp)*.05
plot = MeshLinePlot(color=self.text_color)
plot.points = [(i,c) for i,c in temps]
self.ids.graph.add_plot(plot)
mean_temp = (min_temp+max_temp)/2.
hasrain = [(m.reference_time(), ('1h' in m.rain) or ('1h' in m.snow)) for m in ans.forecast_hourly]
rainpoints = [(i,mean_temp) for i,r in hasrain if r]
if len(rainpoints)>0:
rainplot = ScatterPlot(color=[0.2, 0.2, 1, 1], point_size=5)
rainplot.points = rainpoints
self.ids.graph.add_plot(rainplot)
@property
def locations(self) -> List[WeatherResponse]:
"""Get the list of locations that this panel instance displays.
:return: A list of WeatherResponse objects, including any retrieved data.
:rtype: List[WeatherResponse]
"""
return self._locations
@locations.setter
def locations(self, location_list: Union[List[WeatherResponse], List[Union[Tuple[float, float], str]]]):
"""Set the locations that this panel will display
:param location_list: Either a list of WeatherResponse objects or a list in the form [(lat1, lon1),
location_name1, (lat2, lon2), location_name2, ...].
This is the form to use when assigning locations using the configuration file.
If assigned this way, it will be converted in a list of WeatherResponse objects.
:type location_list: Union[List[WeatherResponse], List[Union[Tuple[float, float], str]]]
"""
if isinstance(location_list[0], WeatherResponse):
self._locations = location_list
else:
resp = []
for i in range(0,len(location_list), 2):
resp.append(WeatherResponse(location_list[i], location_list[i+1]))
self._locations = resp
class WeatherPanelApp(App):
"""Sample app for displaying the weather panel
"""
def build(self):
container = Builder.load_string('''
WeatherPanel:
locations: (51.4778, -0.0014), 'Royal Observatory', (48.858222, 2.2945), 'Eiffel Tower'
''')
container.update_initialize()
return container
if __name__ == "__main__":
WeatherPanelApp().run() | [
"kivy.properties.ListProperty",
"os.path.abspath",
"pyowm.OWM",
"kivy.properties.DictProperty",
"kivy.clock.Clock.schedule_interval",
"kivy_garden.graph.ScatterPlot",
"kivy.properties.StringProperty",
"numpy.random.RandomState",
"kivy_garden.graph.MeshLinePlot",
"os.environ.get",
"kivy.lang.buil... | [((760, 2086), 'kivy.lang.builder.Builder.load_string', 'Builder.load_string', (['"""\n<WeatherPanel>:\n orientation: \'vertical\'\n canvas.before:\n Color: \n rgba: root.bg_color\n Rectangle:\n pos: self.pos\n size: self.size\n BoxLayout:\n orientation: \'horizontal\'\n size_hint_y: None\n size: 0,275\n BoxLayout:\n orientation: \'vertical\'\n Spinner:\n id: selected_location\n size_hint: 1, None\n height: 50\n text: \'Loading\'\n markup: True\n background_normal: \'\'\n background_color: root.bg_color\n values: []\n on_text:\n root.update_panel()\n Image:\n id: current_image\n source: root.current_image\n SimpleTable:\n id: current\n key_size_hint_x: .4\n data: root.table_data\n key_color: root.text_color\n value_color: root.text_color\n Graph:\n id: graph\n y_ticks_major: 5\n y_grid_label: True\n x_ticks_major: 60*60\n x_grid_label: True\n padding: 5\n precision: \'%0.2f\'\n tick_color: root.text_color\n border_color: root.text_color\n label_options: {\'color\': root.text_color}\n"""'], {}), '(\n """\n<WeatherPanel>:\n orientation: \'vertical\'\n canvas.before:\n Color: \n rgba: root.bg_color\n Rectangle:\n pos: self.pos\n size: self.size\n BoxLayout:\n orientation: \'horizontal\'\n size_hint_y: None\n size: 0,275\n BoxLayout:\n orientation: \'vertical\'\n Spinner:\n id: selected_location\n size_hint: 1, None\n height: 50\n text: \'Loading\'\n markup: True\n background_normal: \'\'\n background_color: root.bg_color\n values: []\n on_text:\n root.update_panel()\n Image:\n id: current_image\n source: root.current_image\n SimpleTable:\n id: current\n key_size_hint_x: .4\n data: root.table_data\n key_color: root.text_color\n value_color: root.text_color\n Graph:\n id: graph\n y_ticks_major: 5\n y_grid_label: True\n x_ticks_major: 60*60\n x_grid_label: True\n padding: 5\n precision: \'%0.2f\'\n tick_color: root.text_color\n border_color: root.text_color\n label_options: {\'color\': root.text_color}\n"""\n )\n', (779, 2086), False, 'from kivy.lang.builder import Builder\n'), ((4252, 4275), 'kivy.properties.NumericProperty', 'NumericProperty', (['(60 * 5)'], {}), '(60 * 5)\n', (4267, 4275), False, 'from kivy.properties import NumericProperty, StringProperty, DictProperty, ListProperty\n'), ((4305, 4323), 'kivy.properties.NumericProperty', 'NumericProperty', (['(3)'], {}), '(3)\n', (4320, 4323), False, 'from kivy.properties import NumericProperty, StringProperty, DictProperty, ListProperty\n'), ((4430, 4450), 'kivy.properties.StringProperty', 'StringProperty', (['None'], {}), '(None)\n', (4444, 4450), False, 'from kivy.properties import NumericProperty, StringProperty, DictProperty, ListProperty\n'), ((4468, 4496), 'kivy.properties.StringProperty', 'StringProperty', (['"""fahrenheit"""'], {}), "('fahrenheit')\n", (4482, 4496), False, 'from kivy.properties import NumericProperty, StringProperty, DictProperty, ListProperty\n'), ((4514, 4540), 'kivy.properties.ListProperty', 'ListProperty', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (4526, 4540), False, 'from kivy.properties import NumericProperty, StringProperty, DictProperty, ListProperty\n'), ((4553, 4585), 'kivy.properties.ListProperty', 'ListProperty', (['[0.5, 0.5, 0.5, 1]'], {}), '([0.5, 0.5, 0.5, 1])\n', (4565, 4585), False, 'from kivy.properties import NumericProperty, StringProperty, DictProperty, ListProperty\n'), ((4600, 4657), 'kivy.properties.DictProperty', 'DictProperty', (["{'sunrise': 'Unknown', 'sunset': 'Unknown'}"], {}), "({'sunrise': 'Unknown', 'sunset': 'Unknown'})\n", (4612, 4657), False, 'from kivy.properties import NumericProperty, StringProperty, DictProperty, ListProperty\n'), ((4678, 4698), 'kivy.properties.StringProperty', 'StringProperty', (['None'], {}), '(None)\n', (4692, 4698), False, 'from kivy.properties import NumericProperty, StringProperty, DictProperty, ListProperty\n'), ((4729, 4752), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (4750, 4752), True, 'import numpy as np\n'), ((7521, 7584), 'os.path.join', 'os.path.join', (['icon_path', "(ans.current.weather_icon_name + '.png')"], {}), "(icon_path, ans.current.weather_icon_name + '.png')\n", (7533, 7584), False, 'import os\n'), ((8107, 8142), 'kivy_garden.graph.MeshLinePlot', 'MeshLinePlot', ([], {'color': 'self.text_color'}), '(color=self.text_color)\n', (8119, 8142), False, 'from kivy_garden.graph import Graph, MeshLinePlot, ScatterPlot\n'), ((10094, 10238), 'kivy.lang.builder.Builder.load_string', 'Builder.load_string', (['"""\nWeatherPanel:\n locations: (51.4778, -0.0014), \'Royal Observatory\', (48.858222, 2.2945), \'Eiffel Tower\'\n"""'], {}), '(\n """\nWeatherPanel:\n locations: (51.4778, -0.0014), \'Royal Observatory\', (48.858222, 2.2945), \'Eiffel Tower\'\n"""\n )\n', (10113, 10238), False, 'from kivy.lang.builder import Builder\n'), ((5014, 5082), 'kivy.clock.Clock.schedule_interval', 'Clock.schedule_interval', (['self.update_data', 'self.data_update_rate_sec'], {}), '(self.update_data, self.data_update_rate_sec)\n', (5037, 5082), False, 'from kivy.clock import Clock\n'), ((5095, 5183), 'kivy.clock.Clock.schedule_interval', 'Clock.schedule_interval', (['self.choose_random_location', 'self.location_switch_rate_sec'], {}), '(self.choose_random_location, self.\n location_switch_rate_sec)\n', (5118, 5183), False, 'from kivy.clock import Clock\n'), ((5852, 5877), 'os.environ.get', 'os.environ.get', (['"""OWM_KEY"""'], {}), "('OWM_KEY')\n", (5866, 5877), False, 'import os\n'), ((6034, 6051), 'pyowm.OWM', 'OWM', (['self.owm_key'], {}), '(self.owm_key)\n', (6037, 6051), False, 'from pyowm import OWM\n'), ((8494, 8543), 'kivy_garden.graph.ScatterPlot', 'ScatterPlot', ([], {'color': '[0.2, 0.2, 1, 1]', 'point_size': '(5)'}), '(color=[0.2, 0.2, 1, 1], point_size=5)\n', (8505, 8543), False, 'from kivy_garden.graph import Graph, MeshLinePlot, ScatterPlot\n'), ((7454, 7479), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (7469, 7479), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import numpy as np
import common
def load_json(filename):
with open(filename, "r") as f:
return json.load(f)
def load_data(filename):
j = load_json(filename)
return j["nsamples"], np.array(j["vx"]), np.array(j["vy"])
# PDF for velocity components, Reynolds number 1600
def velocity_pdfs(dir_sol, dir_fig, lx, show_plot, save_plot, plot_id):
fn = dir_sol+'measuredData_lx'+str(lx)+'.json'
nsamples, data_vx, data_vy = load_data(fn)
titles = [r'Measured at $(x,y) = (1, 0.1)$',
r'Measured at $(x,y) = (1, 0.2)$',
r'Measured at $(x,y) = (1, 0.3)$',
r'Measured at $(x,y) = (1, 0.4)$']
myPlotDict = {}
myPlotDict['show_plot'] = show_plot
myPlotDict['save_plot'] = save_plot
if plot_id == 1:
xLims = np.array([[0.8, 1.10],
[1.3, 1.60],
[1.3, 1.60],
[0.875, 1.05]])
nbins = [10]*4
myPlotDict['xlabel'] = r'velocity in $x$'
myPlotDict['ylabel'] = r'number of samples'
myPlotDict['ylim'] = [0, nsamples]
for n in range(0,4):
myPlotDict['title'] = titles[n]
myPlotDict['xlim'] = xLims[n,:]
myPlotDict['nbins'] = nbins[n]
myPlotDict['out_filename'] = dir_fig+"cf_vx"+str(n)+"_lx"+str(lx)+".pdf"
common.plotHist(data_vx[n*nsamples:(n+1)*nsamples], myPlotDict)
elif plot_id == 2:
xLims = np.array([[-0.004, 0.004],
[-0.006, 0.006],
[-0.006, 0.006],
[-0.005, 0.005]])
nbins = [10]*4
myPlotDict['xlabel'] = r'velocity in $y$'
myPlotDict['ylabel'] = r'number of samples'
myPlotDict['ylim'] = [0, nsamples]
for n in range(0,4):
myPlotDict['title'] = titles[n]
myPlotDict['xlim'] = xLims[n,:]
myPlotDict['nbins'] = nbins[n]
myPlotDict['out_filename'] = dir_fig+"cf_vy"+str(n)+"_lx"+str(lx)+".pdf"
common.plotHist(data_vy[n*nsamples:(n+1)*nsamples], myPlotDict)
if __name__ == "__main__":
show_plot = True
save_plot = True
# Re = 1600
Re = 3200
nsamples = 480
lx = 3
plot_id = 2
dir_sol = '../../output/uq_pincompNS_cf/Re'+str(Re)+'/samples'+str(nsamples)+'/'
dir_fig = '../../figures/uq_incompNS/uq_cf/Re'+str(Re)+'/pdfs/samples'+str(nsamples)+'/'
velocity_pdfs(dir_sol, dir_fig, lx, show_plot, save_plot, plot_id)
# End of file
| [
"common.plotHist",
"json.load",
"numpy.array"
] | [((170, 182), 'json.load', 'json.load', (['f'], {}), '(f)\n', (179, 182), False, 'import json\n'), ((263, 280), 'numpy.array', 'np.array', (["j['vx']"], {}), "(j['vx'])\n", (271, 280), True, 'import numpy as np\n'), ((282, 299), 'numpy.array', 'np.array', (["j['vy']"], {}), "(j['vy'])\n", (290, 299), True, 'import numpy as np\n'), ((873, 934), 'numpy.array', 'np.array', (['[[0.8, 1.1], [1.3, 1.6], [1.3, 1.6], [0.875, 1.05]]'], {}), '([[0.8, 1.1], [1.3, 1.6], [1.3, 1.6], [0.875, 1.05]])\n', (881, 934), True, 'import numpy as np\n'), ((1452, 1521), 'common.plotHist', 'common.plotHist', (['data_vx[n * nsamples:(n + 1) * nsamples]', 'myPlotDict'], {}), '(data_vx[n * nsamples:(n + 1) * nsamples], myPlotDict)\n', (1467, 1521), False, 'import common\n'), ((1569, 1647), 'numpy.array', 'np.array', (['[[-0.004, 0.004], [-0.006, 0.006], [-0.006, 0.006], [-0.005, 0.005]]'], {}), '([[-0.004, 0.004], [-0.006, 0.006], [-0.006, 0.006], [-0.005, 0.005]])\n', (1577, 1647), True, 'import numpy as np\n'), ((2153, 2222), 'common.plotHist', 'common.plotHist', (['data_vy[n * nsamples:(n + 1) * nsamples]', 'myPlotDict'], {}), '(data_vy[n * nsamples:(n + 1) * nsamples], myPlotDict)\n', (2168, 2222), False, 'import common\n')] |
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import numpy as np
from onnx import TensorProto, helper
import finn.core.onnx_exec as oxe
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.fold_constants import FoldConstants
from finn.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames
from finn.transformation.streamline.reorder import MoveLinearPastEltwiseAdd
from finn.transformation.infer_shapes import InferShapes
from finn.transformation.double_to_single_float import DoubleToSingleFloat
import pytest
export_onnx_path = "test_scalar_past_eltwise.onnx"
# construct a synthetic graph to test:
# topk insertion, topk conversion to hls, add conversion to hls
# graph should just be a sum
def make_model(shape):
inp1 = helper.make_tensor_value_info("inp1", TensorProto.FLOAT, shape)
inp2 = helper.make_tensor_value_info("inp2", TensorProto.FLOAT, shape)
inp1_add = helper.make_tensor_value_info("inp1_add", TensorProto.FLOAT, shape)
inp1_add_ct = helper.make_tensor_value_info("inp1_add_ct", TensorProto.FLOAT, [1])
inp2_add = helper.make_tensor_value_info("inp2_add", TensorProto.FLOAT, shape)
inp2_add_ct = helper.make_tensor_value_info("inp2_add_ct", TensorProto.FLOAT, [1])
inp1_mul = helper.make_tensor_value_info("inp1_mul", TensorProto.FLOAT, shape)
inp1_mul_ct = helper.make_tensor_value_info("inp1_mul_ct", TensorProto.FLOAT, [1])
inp2_mul = helper.make_tensor_value_info("inp2_mul", TensorProto.FLOAT, shape)
inp2_mul_ct = helper.make_tensor_value_info("inp2_mul_ct", TensorProto.FLOAT, [1])
outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, shape)
add1_node = helper.make_node("Add", [inp1.name, inp1_add_ct.name], [inp1_add.name])
add2_node = helper.make_node("Add", [inp2.name, inp2_add_ct.name], [inp2_add.name])
mul1_node = helper.make_node(
"Mul", [inp1_add.name, inp1_mul_ct.name], [inp1_mul.name]
)
mul2_node = helper.make_node(
"Mul", [inp2_add.name, inp2_mul_ct.name], [inp2_mul.name]
)
eltwise_add_node = helper.make_node(
"Add", [inp1_mul.name, inp2_mul.name], [outp.name]
)
graph = helper.make_graph(
nodes=[add1_node, add2_node, mul1_node, mul2_node, eltwise_add_node],
name="graph",
inputs=[inp1, inp2],
outputs=[outp],
)
model = helper.make_model(graph, producer_name="add-model")
model = ModelWrapper(model)
# set initializers for scalar add/mul nodes
model.set_initializer(add1_node.input[1], np.array([7.0]))
model.set_initializer(add2_node.input[1], np.array([8.0]))
model.set_initializer(mul1_node.input[1], np.array([3.0]))
model.set_initializer(mul2_node.input[1], np.array([3.0]))
return model
# channels
@pytest.mark.parametrize("ch", [64])
# ifmdim
@pytest.mark.parametrize("ifmdim", [-1, 7])
def test_linear_past_eltwise_add(ch, ifmdim):
# generate test vectors of correct shape
if ifmdim == -1:
input_tensor_shape = (1, ch)
else:
input_tensor_shape = (1, ch, ifmdim, ifmdim)
model = make_model(input_tensor_shape)
model.save(export_onnx_path)
model = ModelWrapper(export_onnx_path)
model = model.transform(DoubleToSingleFloat())
model = model.transform(InferShapes())
model = model.transform(FoldConstants())
model = model.transform(GiveUniqueNodeNames())
model = model.transform(GiveReadableTensorNames())
x1 = np.random.randn(*input_tensor_shape).astype(np.float32)
x2 = np.random.randn(*input_tensor_shape).astype(np.float32)
# generate expected value from streamlined net
input_dict = {model.graph.input[0].name: x1, model.graph.input[1].name: x2}
output_dict = oxe.execute_onnx(model, input_dict, True)
produced_sum = output_dict[model.graph.output[0].name]
expected_sum = 3.0 * ((x1 + x2) + 15.0)
assert np.isclose(expected_sum, produced_sum, atol=1e-3).all()
assert len(model.get_nodes_by_op_type("Add")) == 3
assert len(model.get_nodes_by_op_type("Mul")) == 2
model = model.transform(MoveLinearPastEltwiseAdd())
# verify again, to check we didnt break anything
output_dict = oxe.execute_onnx(model, input_dict, True)
produced_sum = output_dict[model.graph.output[0].name]
assert np.isclose(expected_sum, produced_sum, atol=1e-3).all()
assert len(model.get_nodes_by_op_type("Add")) == 2
assert len(model.get_nodes_by_op_type("Mul")) == 1
os.remove(export_onnx_path)
@pytest.mark.parametrize("ch", [64, 1])
# ifmdim
@pytest.mark.parametrize("ifmdim", [-1, 7])
def test_linear_past_eltwise_add_multiple_forks(ch, ifmdim):
# generate test vectors of correct shape
if ifmdim == -1:
input_shape = (1, ch)
else:
input_shape = (1, ch, ifmdim, ifmdim)
top_in = helper.make_tensor_value_info("top_in", TensorProto.FLOAT, input_shape)
top_out = helper.make_tensor_value_info("top_out", TensorProto.FLOAT, input_shape)
num_of_params = 6
value_info = []
for i in range(num_of_params):
value_info += [
helper.make_tensor_value_info("p" + str(i), TensorProto.FLOAT, input_shape)
]
modelproto = helper.make_model(
helper.make_graph(
name="test",
inputs=[top_in],
outputs=[top_out],
value_info=value_info,
nodes=[
helper.make_node("Add", ["top_in", "p0"], ["fork1"]),
helper.make_node("Mul", ["fork1", "p1"], ["t2"]),
helper.make_node("Mul", ["fork1", "p2"], ["t3"]),
helper.make_node("Add", ["t2", "t3"], ["t4"]),
helper.make_node("Mul", ["t4", "p3"], ["fork2"]),
helper.make_node("Add", ["fork2", "p4"], ["t5"]),
helper.make_node("Add", ["fork2", "p5"], ["t6"]),
helper.make_node("Add", ["t5", "t6"], ["top_out"]),
],
)
)
model = ModelWrapper(modelproto)
model = model.transform(InferShapes())
np.random.seed(0)
for i in range(num_of_params):
model.set_initializer(
"p" + str(i), np.random.rand(*input_shape).astype(np.float32)
)
# need equal mults:
model.set_initializer("p2", model.get_initializer("p1"))
# Transform
new_model = model.transform(MoveLinearPastEltwiseAdd())
inp_dict = {"top_in": np.random.rand(*input_shape).astype(np.float32)}
# Test
assert oxe.compare_execution(model, new_model, inp_dict)
assert new_model.graph.node[0].op_type == "Add"
assert new_model.graph.node[1].op_type == "Add"
assert new_model.graph.node[2].op_type == "Mul"
assert new_model.graph.node[3].op_type == "Mul"
assert new_model.graph.node[4].op_type == "Add"
assert new_model.graph.node[5].op_type == "Add"
assert len(new_model.graph.node) == 6
| [
"onnx.helper.make_node",
"os.remove",
"numpy.random.seed",
"onnx.helper.make_tensor_value_info",
"finn.transformation.double_to_single_float.DoubleToSingleFloat",
"finn.transformation.infer_shapes.InferShapes",
"finn.core.onnx_exec.execute_onnx",
"finn.transformation.general.GiveUniqueNodeNames",
"n... | [((4267, 4302), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ch"""', '[64]'], {}), "('ch', [64])\n", (4290, 4302), False, 'import pytest\n'), ((4313, 4355), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ifmdim"""', '[-1, 7]'], {}), "('ifmdim', [-1, 7])\n", (4336, 4355), False, 'import pytest\n'), ((5980, 6018), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ch"""', '[64, 1]'], {}), "('ch', [64, 1])\n", (6003, 6018), False, 'import pytest\n'), ((6029, 6071), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ifmdim"""', '[-1, 7]'], {}), "('ifmdim', [-1, 7])\n", (6052, 6071), False, 'import pytest\n'), ((2258, 2321), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""inp1"""', 'TensorProto.FLOAT', 'shape'], {}), "('inp1', TensorProto.FLOAT, shape)\n", (2287, 2321), False, 'from onnx import TensorProto, helper\n'), ((2333, 2396), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""inp2"""', 'TensorProto.FLOAT', 'shape'], {}), "('inp2', TensorProto.FLOAT, shape)\n", (2362, 2396), False, 'from onnx import TensorProto, helper\n'), ((2412, 2479), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""inp1_add"""', 'TensorProto.FLOAT', 'shape'], {}), "('inp1_add', TensorProto.FLOAT, shape)\n", (2441, 2479), False, 'from onnx import TensorProto, helper\n'), ((2498, 2566), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""inp1_add_ct"""', 'TensorProto.FLOAT', '[1]'], {}), "('inp1_add_ct', TensorProto.FLOAT, [1])\n", (2527, 2566), False, 'from onnx import TensorProto, helper\n'), ((2582, 2649), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""inp2_add"""', 'TensorProto.FLOAT', 'shape'], {}), "('inp2_add', TensorProto.FLOAT, shape)\n", (2611, 2649), False, 'from onnx import TensorProto, helper\n'), ((2668, 2736), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""inp2_add_ct"""', 'TensorProto.FLOAT', '[1]'], {}), "('inp2_add_ct', TensorProto.FLOAT, [1])\n", (2697, 2736), False, 'from onnx import TensorProto, helper\n'), ((2752, 2819), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""inp1_mul"""', 'TensorProto.FLOAT', 'shape'], {}), "('inp1_mul', TensorProto.FLOAT, shape)\n", (2781, 2819), False, 'from onnx import TensorProto, helper\n'), ((2838, 2906), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""inp1_mul_ct"""', 'TensorProto.FLOAT', '[1]'], {}), "('inp1_mul_ct', TensorProto.FLOAT, [1])\n", (2867, 2906), False, 'from onnx import TensorProto, helper\n'), ((2922, 2989), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""inp2_mul"""', 'TensorProto.FLOAT', 'shape'], {}), "('inp2_mul', TensorProto.FLOAT, shape)\n", (2951, 2989), False, 'from onnx import TensorProto, helper\n'), ((3008, 3076), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""inp2_mul_ct"""', 'TensorProto.FLOAT', '[1]'], {}), "('inp2_mul_ct', TensorProto.FLOAT, [1])\n", (3037, 3076), False, 'from onnx import TensorProto, helper\n'), ((3088, 3151), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""outp"""', 'TensorProto.FLOAT', 'shape'], {}), "('outp', TensorProto.FLOAT, shape)\n", (3117, 3151), False, 'from onnx import TensorProto, helper\n'), ((3169, 3240), 'onnx.helper.make_node', 'helper.make_node', (['"""Add"""', '[inp1.name, inp1_add_ct.name]', '[inp1_add.name]'], {}), "('Add', [inp1.name, inp1_add_ct.name], [inp1_add.name])\n", (3185, 3240), False, 'from onnx import TensorProto, helper\n'), ((3257, 3328), 'onnx.helper.make_node', 'helper.make_node', (['"""Add"""', '[inp2.name, inp2_add_ct.name]', '[inp2_add.name]'], {}), "('Add', [inp2.name, inp2_add_ct.name], [inp2_add.name])\n", (3273, 3328), False, 'from onnx import TensorProto, helper\n'), ((3345, 3420), 'onnx.helper.make_node', 'helper.make_node', (['"""Mul"""', '[inp1_add.name, inp1_mul_ct.name]', '[inp1_mul.name]'], {}), "('Mul', [inp1_add.name, inp1_mul_ct.name], [inp1_mul.name])\n", (3361, 3420), False, 'from onnx import TensorProto, helper\n'), ((3451, 3526), 'onnx.helper.make_node', 'helper.make_node', (['"""Mul"""', '[inp2_add.name, inp2_mul_ct.name]', '[inp2_mul.name]'], {}), "('Mul', [inp2_add.name, inp2_mul_ct.name], [inp2_mul.name])\n", (3467, 3526), False, 'from onnx import TensorProto, helper\n'), ((3564, 3632), 'onnx.helper.make_node', 'helper.make_node', (['"""Add"""', '[inp1_mul.name, inp2_mul.name]', '[outp.name]'], {}), "('Add', [inp1_mul.name, inp2_mul.name], [outp.name])\n", (3580, 3632), False, 'from onnx import TensorProto, helper\n'), ((3659, 3801), 'onnx.helper.make_graph', 'helper.make_graph', ([], {'nodes': '[add1_node, add2_node, mul1_node, mul2_node, eltwise_add_node]', 'name': '"""graph"""', 'inputs': '[inp1, inp2]', 'outputs': '[outp]'}), "(nodes=[add1_node, add2_node, mul1_node, mul2_node,\n eltwise_add_node], name='graph', inputs=[inp1, inp2], outputs=[outp])\n", (3676, 3801), False, 'from onnx import TensorProto, helper\n'), ((3850, 3901), 'onnx.helper.make_model', 'helper.make_model', (['graph'], {'producer_name': '"""add-model"""'}), "(graph, producer_name='add-model')\n", (3867, 3901), False, 'from onnx import TensorProto, helper\n'), ((3914, 3933), 'finn.core.modelwrapper.ModelWrapper', 'ModelWrapper', (['model'], {}), '(model)\n', (3926, 3933), False, 'from finn.core.modelwrapper import ModelWrapper\n'), ((4657, 4687), 'finn.core.modelwrapper.ModelWrapper', 'ModelWrapper', (['export_onnx_path'], {}), '(export_onnx_path)\n', (4669, 4687), False, 'from finn.core.modelwrapper import ModelWrapper\n'), ((5215, 5256), 'finn.core.onnx_exec.execute_onnx', 'oxe.execute_onnx', (['model', 'input_dict', '(True)'], {}), '(model, input_dict, True)\n', (5231, 5256), True, 'import finn.core.onnx_exec as oxe\n'), ((5666, 5707), 'finn.core.onnx_exec.execute_onnx', 'oxe.execute_onnx', (['model', 'input_dict', '(True)'], {}), '(model, input_dict, True)\n', (5682, 5707), True, 'import finn.core.onnx_exec as oxe\n'), ((5949, 5976), 'os.remove', 'os.remove', (['export_onnx_path'], {}), '(export_onnx_path)\n', (5958, 5976), False, 'import os\n'), ((6299, 6370), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""top_in"""', 'TensorProto.FLOAT', 'input_shape'], {}), "('top_in', TensorProto.FLOAT, input_shape)\n", (6328, 6370), False, 'from onnx import TensorProto, helper\n'), ((6385, 6457), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""top_out"""', 'TensorProto.FLOAT', 'input_shape'], {}), "('top_out', TensorProto.FLOAT, input_shape)\n", (6414, 6457), False, 'from onnx import TensorProto, helper\n'), ((7436, 7460), 'finn.core.modelwrapper.ModelWrapper', 'ModelWrapper', (['modelproto'], {}), '(modelproto)\n', (7448, 7460), False, 'from finn.core.modelwrapper import ModelWrapper\n'), ((7509, 7526), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (7523, 7526), True, 'import numpy as np\n'), ((7938, 7987), 'finn.core.onnx_exec.compare_execution', 'oxe.compare_execution', (['model', 'new_model', 'inp_dict'], {}), '(model, new_model, inp_dict)\n', (7959, 7987), True, 'import finn.core.onnx_exec as oxe\n'), ((4029, 4044), 'numpy.array', 'np.array', (['[7.0]'], {}), '([7.0])\n', (4037, 4044), True, 'import numpy as np\n'), ((4092, 4107), 'numpy.array', 'np.array', (['[8.0]'], {}), '([8.0])\n', (4100, 4107), True, 'import numpy as np\n'), ((4155, 4170), 'numpy.array', 'np.array', (['[3.0]'], {}), '([3.0])\n', (4163, 4170), True, 'import numpy as np\n'), ((4218, 4233), 'numpy.array', 'np.array', (['[3.0]'], {}), '([3.0])\n', (4226, 4233), True, 'import numpy as np\n'), ((4716, 4737), 'finn.transformation.double_to_single_float.DoubleToSingleFloat', 'DoubleToSingleFloat', ([], {}), '()\n', (4735, 4737), False, 'from finn.transformation.double_to_single_float import DoubleToSingleFloat\n'), ((4767, 4780), 'finn.transformation.infer_shapes.InferShapes', 'InferShapes', ([], {}), '()\n', (4778, 4780), False, 'from finn.transformation.infer_shapes import InferShapes\n'), ((4810, 4825), 'finn.transformation.fold_constants.FoldConstants', 'FoldConstants', ([], {}), '()\n', (4823, 4825), False, 'from finn.transformation.fold_constants import FoldConstants\n'), ((4855, 4876), 'finn.transformation.general.GiveUniqueNodeNames', 'GiveUniqueNodeNames', ([], {}), '()\n', (4874, 4876), False, 'from finn.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames\n'), ((4906, 4931), 'finn.transformation.general.GiveReadableTensorNames', 'GiveReadableTensorNames', ([], {}), '()\n', (4929, 4931), False, 'from finn.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames\n'), ((5566, 5592), 'finn.transformation.streamline.reorder.MoveLinearPastEltwiseAdd', 'MoveLinearPastEltwiseAdd', ([], {}), '()\n', (5590, 5592), False, 'from finn.transformation.streamline.reorder import MoveLinearPastEltwiseAdd\n'), ((7489, 7502), 'finn.transformation.infer_shapes.InferShapes', 'InferShapes', ([], {}), '()\n', (7500, 7502), False, 'from finn.transformation.infer_shapes import InferShapes\n'), ((7812, 7838), 'finn.transformation.streamline.reorder.MoveLinearPastEltwiseAdd', 'MoveLinearPastEltwiseAdd', ([], {}), '()\n', (7836, 7838), False, 'from finn.transformation.streamline.reorder import MoveLinearPastEltwiseAdd\n'), ((4943, 4979), 'numpy.random.randn', 'np.random.randn', (['*input_tensor_shape'], {}), '(*input_tensor_shape)\n', (4958, 4979), True, 'import numpy as np\n'), ((5008, 5044), 'numpy.random.randn', 'np.random.randn', (['*input_tensor_shape'], {}), '(*input_tensor_shape)\n', (5023, 5044), True, 'import numpy as np\n'), ((5371, 5421), 'numpy.isclose', 'np.isclose', (['expected_sum', 'produced_sum'], {'atol': '(0.001)'}), '(expected_sum, produced_sum, atol=0.001)\n', (5381, 5421), True, 'import numpy as np\n'), ((5778, 5828), 'numpy.isclose', 'np.isclose', (['expected_sum', 'produced_sum'], {'atol': '(0.001)'}), '(expected_sum, produced_sum, atol=0.001)\n', (5788, 5828), True, 'import numpy as np\n'), ((7866, 7894), 'numpy.random.rand', 'np.random.rand', (['*input_shape'], {}), '(*input_shape)\n', (7880, 7894), True, 'import numpy as np\n'), ((6878, 6930), 'onnx.helper.make_node', 'helper.make_node', (['"""Add"""', "['top_in', 'p0']", "['fork1']"], {}), "('Add', ['top_in', 'p0'], ['fork1'])\n", (6894, 6930), False, 'from onnx import TensorProto, helper\n'), ((6948, 6996), 'onnx.helper.make_node', 'helper.make_node', (['"""Mul"""', "['fork1', 'p1']", "['t2']"], {}), "('Mul', ['fork1', 'p1'], ['t2'])\n", (6964, 6996), False, 'from onnx import TensorProto, helper\n'), ((7014, 7062), 'onnx.helper.make_node', 'helper.make_node', (['"""Mul"""', "['fork1', 'p2']", "['t3']"], {}), "('Mul', ['fork1', 'p2'], ['t3'])\n", (7030, 7062), False, 'from onnx import TensorProto, helper\n'), ((7080, 7125), 'onnx.helper.make_node', 'helper.make_node', (['"""Add"""', "['t2', 't3']", "['t4']"], {}), "('Add', ['t2', 't3'], ['t4'])\n", (7096, 7125), False, 'from onnx import TensorProto, helper\n'), ((7143, 7191), 'onnx.helper.make_node', 'helper.make_node', (['"""Mul"""', "['t4', 'p3']", "['fork2']"], {}), "('Mul', ['t4', 'p3'], ['fork2'])\n", (7159, 7191), False, 'from onnx import TensorProto, helper\n'), ((7209, 7257), 'onnx.helper.make_node', 'helper.make_node', (['"""Add"""', "['fork2', 'p4']", "['t5']"], {}), "('Add', ['fork2', 'p4'], ['t5'])\n", (7225, 7257), False, 'from onnx import TensorProto, helper\n'), ((7275, 7323), 'onnx.helper.make_node', 'helper.make_node', (['"""Add"""', "['fork2', 'p5']", "['t6']"], {}), "('Add', ['fork2', 'p5'], ['t6'])\n", (7291, 7323), False, 'from onnx import TensorProto, helper\n'), ((7341, 7391), 'onnx.helper.make_node', 'helper.make_node', (['"""Add"""', "['t5', 't6']", "['top_out']"], {}), "('Add', ['t5', 't6'], ['top_out'])\n", (7357, 7391), False, 'from onnx import TensorProto, helper\n'), ((7619, 7647), 'numpy.random.rand', 'np.random.rand', (['*input_shape'], {}), '(*input_shape)\n', (7633, 7647), True, 'import numpy as np\n')] |
"""Extract data from MALA file types and output into useful formats."""
from os.path import splitext
from numpy import fromstring
from glob import glob
#from .gpr import get_file_path
#from .filesystem import get_file_path
from .calculations import distance, time
def file_extensions():
"""Return a list of file extensions associated with MALA equipment output"""
extension_list = ['.cor', '.mrk', '.rad', '.rd3']
return(extension_list)
def rad2dict(path):
"""Return line header information as a dictionary.
Attributes:
rad <string>: Full path to MALA '.rad' or '.RAD' file;
"""
p = get_file_path(path, ext='.rad')
try:
f = open(p, 'r')
d = {}
for line in f.readlines():
text = line.split(':')
key = text[0]
value = text[1].strip()
try:
if '.' in value:
value = float(value)
else:
value = int(value)
except:
pass
d[key] = value
f.close()
return(d)
except Exception as e:
print(e)
def rd32arr(path, traces, samples):
with open(path, 'rb') as f:
s = fromstring(f.read(), dtype = 'int16')
arr = s.reshape(traces, samples).T
return(arr)
def arr2rd3(array, path):
""""""
import numpy as np
a = np.concatenate([i for i in array.T])
with open(path, 'wb') as f:
for i in a:
f.write(i)
class RD3:
"""Create a MALA object for use in general GPR methods. Note that slight variations in frequency between lines require rounding of the time interval to force equality of time-values between adjacent lines. Reading of separate files should be independent of one another so that the class does not fail if one is missing, or if an isolated file needs to be inspected."""
def __init__(self, path):
p, e = get_file_path, file_extensions
cor, mrk, rad, rd3 = [p(path, i) for i in e()]
m = rad2dict(rad)
t = m['LAST TRACE']
s = m['SAMPLES']
d = m['DISTANCE INTERVAL']
f = m['FREQUENCY']
a = rd32arr(rd3, t, s)
x = distance(d, t)
y = time(f, s, precision=5)
self.array = a
self.x = x
self.y = y
self.x_precision = 4
self.y_precision = 7
self.traces = t
self.samples = s
self.step = d
self.frequency = f
self.path = path
def write(self, path):
"""Write rd3 to file"""
b, e = splitext(path)
arr2rd3(b + '.rd3', self.traces, self.samples)
class RD3WorkInProgress:
"""Create a MALA object for use in GPR methods. Note that slight variations in frequency between lines require rounding of the time interval to force equality of time-values between adjacent lines. Reading of separate files should be independent of one another so that the class does not fail if one is missing, or if an isolated file needs to be inspected."""
def __init__(self, path, traces, samples):
""""""
p, e = get_file_path, file_extensions
self.array = rd32arr(p, traces, samples)
def write(self, path):
"""Write rd3 to file"""
b, e = splitext(path)
arr2rd3(b + '.rd3', self.traces, self.samples)
class RADWorkInProgress:
"""Read and write MALA RAD files."""
def __init__(self, path):
self.p = path
def to_dict(self):
d = rad2dict(self.p)
return(d)
def to_file(self, path):
lst = [
'SAMPLES',
'FREQUENCY',
'FREQUENCY STEPS',
'SIGNAL POSITION',
'RAW SIGNAL POSITION',
'DISTANCE FLAG',
'TIME FLAG',
'PROGRAM FLAG',
'EXTERNAL FLAG',
'TIME INTERVAL',
'DISTANCE INTERVAL',
'OPERATOR',
'CUSTOMER',
'SITE',
'ANTENNAS',
'ANTENNA ORIENTATION',
'ANTENNA SEPARATION',
'COMMENT',
'TIMEWINDOW',
'STACKS',
'STACK EXPONENT',
'STACKING TIME',
'LAST TRACE',
'STOP POSITION',
'SYSTEM CALIBRATION',
'START POSITION',
'SHORT FLAG',
'INTERMEDIATE FLAG',
'LONG FLAG',
'PREPROCESSING',
'HIGH',
'LOW',
'FIXED INCREMENT',
'FIXED MOVES UP',
'FIXED MOVES DOWN',
'FIXED POSITION',
'WHEEL CALIBRATION',
'POSITIVE DIRECTION',
]
def combine_rads(lst):
# this method likely does not work because of modifications elsewhere; it should probably work with gpr objects as input and return some sort of useful dataframe highlighting problem gpr lines; maybe should be a class
"""Combine a list of rad files into a dictionary of lists. The method is useful for comparing the metadata of a group of GPR lines to check for compatability in creating a 3D grid. A property can be checked by calling set(d['key']); the property for all lines is equal if the length of the result is equal to 1."""
d = {}
for i in lst:
p = ramacpath(i, '.rad')
r = rad2dict(p)
for key in r.keys():
try:
d[key] += [r[key]]
except:
d[key] = [r[key]]
return(d)
| [
"os.path.splitext",
"numpy.concatenate"
] | [((1449, 1485), 'numpy.concatenate', 'np.concatenate', (['[i for i in array.T]'], {}), '([i for i in array.T])\n', (1463, 1485), True, 'import numpy as np\n'), ((2678, 2692), 'os.path.splitext', 'splitext', (['path'], {}), '(path)\n', (2686, 2692), False, 'from os.path import splitext\n'), ((3389, 3403), 'os.path.splitext', 'splitext', (['path'], {}), '(path)\n', (3397, 3403), False, 'from os.path import splitext\n')] |
import numpy as np
class RandomGenerator:
def __init__(self, n_symbols, smart=False, offset=2):
self.n_symbols = n_symbols
self.smart = smart
self.offset = offset
def next_symbols(self, n):
if not self.smart:
symbols = np.random.randint(0, self.n_symbols, n)
else:
symbols = list(range(self.n_symbols))
np.random.shuffle(symbols)
while len(symbols) < n:
new_symbols = symbols[-self.n_symbols:-self.offset]
np.random.shuffle(new_symbols)
symbols += new_symbols
return symbols[:n]
class SequenceGenerator:
def __init__(self, sequence, global_shift=0, shuffle=None):
self.sequence = sequence[global_shift:] + sequence[:global_shift]
if 'global' == shuffle:
np.random.shuffle(self.sequence)
self.shuffle_line = 'line' == shuffle
self.shift = 0
def next_symbols(self, n):
symbols = self.sequence[self.shift:self.shift+n]
if self.shuffle_line:
np.random.shuffle(symbols)
self.shift += n
return symbols
| [
"numpy.random.randint",
"numpy.random.shuffle"
] | [((301, 340), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.n_symbols', 'n'], {}), '(0, self.n_symbols, n)\n', (318, 340), True, 'import numpy as np\n'), ((417, 443), 'numpy.random.shuffle', 'np.random.shuffle', (['symbols'], {}), '(symbols)\n', (434, 443), True, 'import numpy as np\n'), ((898, 930), 'numpy.random.shuffle', 'np.random.shuffle', (['self.sequence'], {}), '(self.sequence)\n', (915, 930), True, 'import numpy as np\n'), ((1140, 1166), 'numpy.random.shuffle', 'np.random.shuffle', (['symbols'], {}), '(symbols)\n', (1157, 1166), True, 'import numpy as np\n'), ((565, 595), 'numpy.random.shuffle', 'np.random.shuffle', (['new_symbols'], {}), '(new_symbols)\n', (582, 595), True, 'import numpy as np\n')] |
import os
from transformers import (
PreTrainedModel,
PreTrainedTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer
)
import csv, re
import torch, numpy as np, pandas as pd
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from argparse import ArgumentParser
import h5py
import pytorch_lightning as pl
class Roberta_ft(pl.LightningModule):
def __init__(self, hparams):
super(Roberta_ft, self).__init__()
config_class, model_class, tokenizer_class = RobertaConfig, RobertaForMaskedLM, RobertaTokenizer
config = config_class.from_pretrained('roberta-base', cache_dir=None)
config.__dict__["output_hidden_states"] = True
self.model = model_class.from_pretrained(
'/home/speed/PycharmProjects/transformers/examples/checkpoint-14000-best/pytorch_model_best.bin',
from_tf=bool(
".ckpt" in '/home/speed/PycharmProjects/transformers/examples/checkpoint-14000-best/pytorch_model_best.bin'),
config=config,
cache_dir=None,
)
self.model.cuda()
self.model.eval()
# not the best model...
self.hparams = hparams
self.l1 = torch.nn.Linear(768, 256)
self.l2 = torch.nn.Linear(256, 1)
def forward(self, edited_input_id, edited_token_type_id, edited_attention_mask):
print(edited_input_id)
exit()
_, hidden = self.model(input_ids=edited_input_id, attention_mask=edited_attention_mask,
token_type_ids=edited_token_type_id)
feature = hidden[-2]
x = torch.mean(hidden, dim=1).detach().numpy()
f1 = torch.relu(self.l1(x.view(x.size(0), -1)))
out = self.l2(f1)
return out
def training_step(self, batch, batch_idx):
id, edited_input_id, edited_token_type_id, edited_attention_mask, unedited_input_id, unedited_token_type_id, unedited_attention_mask, score = batch
y_hat = self.forward(edited_input_id, edited_token_type_id, edited_attention_mask)
return {'loss': F.mse_loss(y_hat.squeeze(), score)}
def validation_step(self, batch, batch_idx):
id, edited, unedited, score = batch
y_hat = self.forward(edited)
return {'val_loss': F.mse_loss(y_hat.squeeze(), score)}
def validation_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
return {'avg_val_loss': avg_loss}
def test_step(self, batch, batch_idx):
id, edited, unedited = batch
y_hat = self.forward(edited)
return {'pred': y_hat, 'id': id}
def test_end(self, outputs):
all_preds = []
all_ids = []
for x in outputs:
all_preds += list(x['pred'])
all_ids += list(x['id'])
all_preds = [float(ap) for ap in all_preds]
all_ids = [int(ai) for ai in all_ids]
df = pd.DataFrame(data={'id': all_ids, 'pred': all_preds})
df.to_csv("./task-1-output.csv", sep=',', index=False)
return {'all_preds': all_preds, 'all_ids': all_ids}
def configure_optimizers(self):
# REQUIRED
# can return multiple optimizers and learning_rate schedulers
return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
@pl.data_loader
def train_dataloader(self):
# REQUIRED
return DataLoader(S_bert_not_finetuend_Data(train=True), batch_size=self.hparams.batch_size)
@pl.data_loader
def val_dataloader(self):
return DataLoader(S_bert_not_finetuend_Data(val=True), batch_size=self.hparams.batch_size)
@pl.data_loader
def test_dataloader(self):
return DataLoader(S_bert_not_finetuend_Data(test=True), batch_size=self.hparams.batch_size)
@staticmethod
def add_model_specific_args(parent_parser):
"""
Specify the hyperparams for this LightningModule
"""
# MODEL specific
parser = ArgumentParser(parents=[parent_parser])
parser.add_argument('--learning_rate', default=0.02, type=float)
parser.add_argument('--batch_size', default=32, type=int)
# training specific (for this model)
parser.add_argument('--max_nb_epochs', default=2, type=int)
return parser
class S_bert_not_finetuend_Data(Dataset):
def __init__(self, train=False, val=False, test=False):
super(S_bert_not_finetuend_Data, self).__init__()
tokenizer = RobertaTokenizer.from_pretrained('roberta-base', cache_dir=None)
file = []
file1 = []
if train:
file = open('/home/speed/PycharmProjects/funnyAgain/data/task-1/train.csv', 'r')
file1 = open('/home/speed/PycharmProjects/funnyAgain/data/task-1/train_funlines.csv', 'r')
if val:
file = open('/home/speed/PycharmProjects/funnyAgain/data/task-1/dev.csv', 'r')
if test:
file = open('/home/speed/PycharmProjects/funnyAgain/data/task-1/test.csv', 'r')
reader = csv.reader(file)
ids = []
unedited = []
edited = []
scores = []
edits = []
replacements = []
for i, lines in enumerate(reader):
if i == 0:
continue
Id = lines[0]
line = lines[1]
edit = lines[2]
if not test:
score = lines[4]
scores.append(score)
ids.append(Id)
match = re.search(r'<.*/>', line)
replacements.append(line[match.start() + 1: match.end() - 2])
unedited.append(re.sub(r'[</>]', '', line))
edited.append(re.sub(r'<.*/>', edit, line))
edits.append(edit)
# if train:
# reader = csv.reader(file1)
# fids = []
# funedited = []
# fedited = []
# fscores = []
# fedits = []
# freplacements = []
# for i, lines in enumerate(reader):
# if i == 0:
# continue
# Id = lines[0]
# line = lines[1]
# edit = lines[2]
# score = lines[4]
#
# fids.append(Id)
# match = re.search(r'<.*/>', line)
# freplacements.append(line[match.start() + 1: match.end() - 2])
# funedited.append(re.sub(r'[</>]', '', line))
# fedited.append(re.sub(r'<.*/>', edit, line))
# fscores.append(score)
# fedits.append(edit)
#
# ids = ids + fids
# edited = edited + fedited
# unedited = unedited + funedited
# scores = scores + fscores
# edits = edits + fedits
edited = tokenizer.batch_encode_plus(edited, add_special_tokens=True, max_length=512)
unedited = tokenizer.batch_encode_plus(unedited, add_special_tokens=True, max_length=512)
self.ids = ids
self.unedited = unedited
self.edited = edited
self.scores = scores
self.edits = edits
self.replacements = replacements
self.train = train
self.val = val
self.test = test
def __getitem__(self, index):
if not self.test:
# print(self.ids[index], self.edited[index], self.unedited[index], self.scores[index])
# id, edited_input_id, edited_token_type_id, edited_attention_mask, unedited_input_id, unedited_token_type_id, unedited_attention_mask, score
# print(torch.tensor(self.edited['input_ids'][index], dtype=torch.long).unsqueeze(dim=0).shape)
# return int(self.ids[index]), torch.tensor(self.edited['input_ids'][index], dtype=torch.long).unsqueeze(dim=0), torch.tensor(
# self.edited['token_type_ids'][index], dtype=torch.long).unsqueeze(dim=0), torch.tensor(
# self.edited['attention_mask'][index], dtype=torch.long).unsqueeze(dim=0), torch.tensor(
# self.unedited['input_ids'][index], dtype=torch.long).unsqueeze(dim=0), torch.tensor(
# self.unedited['token_type_ids'][index], dtype=torch.long).unsqueeze(dim=0), torch.tensor(
# self.unedited['attention_mask'][index], dtype=torch.long).unsqueeze(dim=0), np.array(self.scores[index])
# print(type(self.edited['input_ids'][index]))
# print(self.edited['input_ids'][index])
# print(self.edited['token_type_ids'][index])
# print(self.edited['attention_mask'][index])
# print(self.unedited['input_ids'][index])
# print(self.unedited['token_type_ids'][index])
# print(self.unedited['attention_mask'][index])
# print(np.array(self.scores[index]))
# exit()
return int(self.ids[index]), self.edited['input_ids'][index], self.edited['token_type_ids'][index], \
self.edited['attention_mask'][index], self.unedited['input_ids'][index], \
self.unedited['token_type_ids'][index], self.unedited['attention_mask'][index], np.array(
self.scores[index])
else:
return int(self.ids[index]), torch.tensor(self.edited[index], dtype=torch.long).unsqueeze(
dim=0), torch.tensor(
self.unedited[index], dtype=torch.long).unsqueeze(dim=0)
def __len__(self):
return len(self.ids)
| [
"pandas.DataFrame",
"torch.mean",
"csv.reader",
"argparse.ArgumentParser",
"torch.stack",
"transformers.RobertaTokenizer.from_pretrained",
"numpy.array",
"torch.nn.Linear",
"torch.tensor",
"re.search",
"re.sub"
] | [((1238, 1263), 'torch.nn.Linear', 'torch.nn.Linear', (['(768)', '(256)'], {}), '(768, 256)\n', (1253, 1263), False, 'import torch, numpy as np, pandas as pd\n'), ((1282, 1305), 'torch.nn.Linear', 'torch.nn.Linear', (['(256)', '(1)'], {}), '(256, 1)\n', (1297, 1305), False, 'import torch, numpy as np, pandas as pd\n'), ((2947, 3000), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'id': all_ids, 'pred': all_preds}"}), "(data={'id': all_ids, 'pred': all_preds})\n", (2959, 3000), True, 'import torch, numpy as np, pandas as pd\n'), ((3997, 4036), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'parents': '[parent_parser]'}), '(parents=[parent_parser])\n', (4011, 4036), False, 'from argparse import ArgumentParser\n'), ((4496, 4560), 'transformers.RobertaTokenizer.from_pretrained', 'RobertaTokenizer.from_pretrained', (['"""roberta-base"""'], {'cache_dir': 'None'}), "('roberta-base', cache_dir=None)\n", (4528, 4560), False, 'from transformers import PreTrainedModel, PreTrainedTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer\n'), ((5047, 5063), 'csv.reader', 'csv.reader', (['file'], {}), '(file)\n', (5057, 5063), False, 'import csv, re\n'), ((5504, 5528), 're.search', 're.search', (['"""<.*/>"""', 'line'], {}), "('<.*/>', line)\n", (5513, 5528), False, 'import csv, re\n'), ((2398, 2443), 'torch.stack', 'torch.stack', (["[x['val_loss'] for x in outputs]"], {}), "([x['val_loss'] for x in outputs])\n", (2409, 2443), False, 'import torch, numpy as np, pandas as pd\n'), ((5632, 5657), 're.sub', 're.sub', (['"""[</>]"""', '""""""', 'line'], {}), "('[</>]', '', line)\n", (5638, 5657), False, 'import csv, re\n'), ((5686, 5713), 're.sub', 're.sub', (['"""<.*/>"""', 'edit', 'line'], {}), "('<.*/>', edit, line)\n", (5692, 5713), False, 'import csv, re\n'), ((9146, 9174), 'numpy.array', 'np.array', (['self.scores[index]'], {}), '(self.scores[index])\n', (9154, 9174), True, 'import torch, numpy as np, pandas as pd\n'), ((1643, 1668), 'torch.mean', 'torch.mean', (['hidden'], {'dim': '(1)'}), '(hidden, dim=1)\n', (1653, 1668), False, 'import torch, numpy as np, pandas as pd\n'), ((9247, 9297), 'torch.tensor', 'torch.tensor', (['self.edited[index]'], {'dtype': 'torch.long'}), '(self.edited[index], dtype=torch.long)\n', (9259, 9297), False, 'import torch, numpy as np, pandas as pd\n'), ((9333, 9385), 'torch.tensor', 'torch.tensor', (['self.unedited[index]'], {'dtype': 'torch.long'}), '(self.unedited[index], dtype=torch.long)\n', (9345, 9385), False, 'import torch, numpy as np, pandas as pd\n')] |
from os.path import sep
from pathlib import Path
from typing import List, Tuple
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
import cv2
import numpy as np
import torch
from NaMAZU.functional.image_control import apply_mask_to
from PIL import Image
from PIL.Image import Image as PILImage
from pytorch_lightning import LightningModule
from torch import Tensor
from torchvision import transforms
from .u2net import U2NET, U2NETP, RescaleT, ToTensorLab
__all__ = ["LitU2Net"]
class LitU2Net(LightningModule):
def __init__(
self,
in_chans: int = 3,
out_chans: int = 1,
model_type: Literal["basic", "mobile", "human", "portrait"] = "basic",
train_model: bool = False,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.save_hyperparameters()
self.__load_model()
self.preprocess = transforms.Compose([RescaleT(320), ToTensorLab(flag=0)])
if train_model:
self.model.train()
self.bce_loss = torch.nn.BCELoss(size_average=True)
def forward(self, x: Tensor) -> Tuple[Tensor, ...]:
if x.dim() == 3:
x = x.unsqueeze(0)
x = x.to(self.device)
return self.model(x)
def configure_optimizers(self):
optimizer = torch.optim.Adam(
self.model.parameters(),
lr=0.001,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=0,
)
return optimizer
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.forward(x)
leading_loss, loss = self.__multi_bce_loss_fusion(*y_hat, labels_v=y)
return {"loss": loss, "log": {"train_loss": loss, "train_tar": leading_loss}}
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self.forward(x)
leading_loss, loss = self.__multi_bce_loss_fusion(*y_hat, labels_v=y)
return {"val_loss": loss, "log": {"val_loss": loss, "val_tar": leading_loss}}
def __multi_bce_loss_fusion(self, d0, d1, d2, d3, d4, d5, d6, labels_v):
loss0 = self.bce_loss(d0, labels_v)
loss1 = self.bce_loss(d1, labels_v)
loss2 = self.bce_loss(d2, labels_v)
loss3 = self.bce_loss(d3, labels_v)
loss4 = self.bce_loss(d4, labels_v)
loss5 = self.bce_loss(d5, labels_v)
loss6 = self.bce_loss(d6, labels_v)
loss = loss0 + loss1 + loss2 + loss3 + loss4 + loss5 + loss6
print(
"l0: %3f, l1: %3f, l2: %3f, l3: %3f, l4: %3f, l5: %3f, l6: %3f\n"
% (
loss0.data.item(),
loss1.data.item(),
loss2.data.item(),
loss3.data.item(),
loss4.data.item(),
loss5.data.item(),
loss6.data.item(),
)
)
return loss0, loss
def predict(self, x_path: str, save: bool = False, save_path: str = "",) -> Tensor:
x = cv2.imread(x_path)
x = self.__input_preprocess(x)
d1 = self.forward(x)[0]
pred = d1[:, 0, :, :]
pred = self.__normPRED(pred)
if save:
self.__save_output(x_path, pred, save_path)
return pred
def __normPRED(self, d):
ma = torch.max(d)
mi = torch.min(d)
dn = (d - mi) / (ma - mi)
return dn
def __save_output(self, original_image: str, pred: Tensor, d_dir: str):
predict = pred
predict = predict.squeeze()
predict_np = predict.cpu().data.numpy()
im = Image.fromarray(predict_np * 255).convert("RGB")
img_name = original_image.split(sep)[-1]
# image = io.imread(original_image)
image = Image.open(original_image)
imo = im.resize((image.size[0], image.size[1]), resample=Image.BILINEAR)
pb_np = np.array(imo)
aaa = img_name.split(".")
bbb = aaa[0:-1]
imidx = bbb[0]
for i in range(1, len(bbb)):
imidx = imidx + "." + bbb[i]
out_dir = Path(d_dir)
if not out_dir.exists():
out_dir.mkdir(parents=True)
imo.save(out_dir / (imidx + ".png"))
def apply_mask(self, prediction: Tensor, original_image: str) -> PILImage:
predict_np = prediction.squeeze().cpu().data.numpy()
im = Image.fromarray(predict_np * 255).convert("RGB")
image = Image.open(original_image)
mask = im.resize((image.size[0], image.size[1]), resample=Image.BILINEAR)
return apply_mask_to(image, mask)
def __input_preprocess(self, x: PILImage) -> Tensor:
processed = self.preprocess(x).type(torch.FloatTensor) # type: ignore
return processed
def __load_model(self) -> None:
"""Download checkpoint file and load the model.
"""
url_dict = {
"basic": "https://github.com/NMZ0429/NaMAZU/releases/download/Checkpoint/basic.pth",
"mobile": "https://github.com/NMZ0429/NaMAZU/releases/download/Checkpoint/mobile.pth",
"human": "https://github.com/NMZ0429/NaMAZU/releases/download/Checkpoint/human_seg.pth",
"portrait": "https://github.com/NMZ0429/NaMAZU/releases/download/Checkpoint/portrait.pth",
}
if not self.hparams.model_type in url_dict: # type: ignore
raise ValueError(f"model_type {self.hparams.model_type} is not supported") # type: ignore
url = url_dict[self.hparams.model_type] # type: ignore
if self.hparams.model_type == "mobile": # type: ignore
self.model = U2NETP(
in_chans=self.hparams.in_chans, out_chans=self.hparams.out_chans # type: ignore
)
else:
self.model = U2NET(
in_chans=self.hparams.in_chans, out_chans=self.hparams.out_chans # type: ignore
)
st_dict = torch.hub.load_state_dict_from_url(url, map_location=self.device,)
self.model.load_state_dict(state_dict=st_dict)
self.model.eval()
| [
"torch.hub.load_state_dict_from_url",
"torch.nn.BCELoss",
"PIL.Image.open",
"cv2.imread",
"pathlib.Path",
"numpy.array",
"torch.max",
"NaMAZU.functional.image_control.apply_mask_to",
"PIL.Image.fromarray",
"torch.min"
] | [((3038, 3056), 'cv2.imread', 'cv2.imread', (['x_path'], {}), '(x_path)\n', (3048, 3056), False, 'import cv2\n'), ((3332, 3344), 'torch.max', 'torch.max', (['d'], {}), '(d)\n', (3341, 3344), False, 'import torch\n'), ((3358, 3370), 'torch.min', 'torch.min', (['d'], {}), '(d)\n', (3367, 3370), False, 'import torch\n'), ((3782, 3808), 'PIL.Image.open', 'Image.open', (['original_image'], {}), '(original_image)\n', (3792, 3808), False, 'from PIL import Image\n'), ((3907, 3920), 'numpy.array', 'np.array', (['imo'], {}), '(imo)\n', (3915, 3920), True, 'import numpy as np\n'), ((4100, 4111), 'pathlib.Path', 'Path', (['d_dir'], {}), '(d_dir)\n', (4104, 4111), False, 'from pathlib import Path\n'), ((4451, 4477), 'PIL.Image.open', 'Image.open', (['original_image'], {}), '(original_image)\n', (4461, 4477), False, 'from PIL import Image\n'), ((4576, 4602), 'NaMAZU.functional.image_control.apply_mask_to', 'apply_mask_to', (['image', 'mask'], {}), '(image, mask)\n', (4589, 4602), False, 'from NaMAZU.functional.image_control import apply_mask_to\n'), ((5921, 5986), 'torch.hub.load_state_dict_from_url', 'torch.hub.load_state_dict_from_url', (['url'], {'map_location': 'self.device'}), '(url, map_location=self.device)\n', (5955, 5986), False, 'import torch\n'), ((1080, 1115), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {'size_average': '(True)'}), '(size_average=True)\n', (1096, 1115), False, 'import torch\n'), ((3624, 3657), 'PIL.Image.fromarray', 'Image.fromarray', (['(predict_np * 255)'], {}), '(predict_np * 255)\n', (3639, 3657), False, 'from PIL import Image\n'), ((4386, 4419), 'PIL.Image.fromarray', 'Image.fromarray', (['(predict_np * 255)'], {}), '(predict_np * 255)\n', (4401, 4419), False, 'from PIL import Image\n')] |
"""
Alanine.py - Python module.
- Includes the AlanineDipeptideSimulation class
- Includes functions for generating the coarse model
- Includes functions for running the reinjection WE process
"""
import numpy as np
from scipy.spatial import KDTree, Voronoi, voronoi_plot_2d
from pathos.multiprocessing import ProcessPool
import random
from random import randint
from math import pi as π
import pickle
from simtk import openmm, unit
from simtk.openmm import *
from simtk.openmm.app import *
from simtk.unit import *
from openmmtools import testsystems
from sys import stdout
import mdtraj
import sys
sys.path.append("../")
import WeightedEnsemble as WE
from WeightedEnsemble import Bins
"""
Alanine Molecule Class
"""
class AlanineDipeptideSimulation():
def __init__(self):
self.positions = testsystems.AlanineDipeptideVacuum().positions.value_in_unit(nanometer)
self.topology = testsystems.AlanineDipeptideVacuum().topology
self.system = testsystems.AlanineDipeptideVacuum().system
self.Bins = [] #only used for sampling, storing it here saves memory when calling pmap
self.temperature = 300 #Kelvin
self.stepsize = 0.002*picoseconds
def minimize_energy(self):
integrator = LangevinIntegrator(self.temperature*kelvin, 1/picosecond, self.stepsize)
simulation = Simulation(self.topology,self.system,integrator)
simulation.context.setPositions(self.positions)
simulation.minimizeEnergy()
state = simulation.context.getState(getPositions=True)
self.positions = state.getPositions(asNumpy=True).value_in_unit(nanometer)
def get_energy(self):
integrator = LangevinIntegrator(self.temperature*kelvin, 1/picosecond, self.stepsize)
simulation = Simulation(self.topology,self.system,integrator)
simulation.context.setPositions(self.positions)
state = simulation.context.getState(getEnergy=True)
return state.getPotentialEnergy()
def Step(self, num_steps, seed=0):
integrator = LangevinIntegrator(self.temperature*kelvin, 1/picosecond, self.stepsize)
#integrator.setRandomNumberSeed(seed)
simulation = Simulation(self.topology,self.system,integrator)
simulation.context.setPositions(self.positions)
simulation.step(num_steps)
state = simulation.context.getState(getPositions=True)
self.positions = state.getPositions(asNumpy=True).value_in_unit(nanometer)
def getAngles(self):
traj = mdtraj.Trajectory(self.positions, mdtraj.Topology.from_openmm(self.topology))
psi_indices, phi_indices = [6, 8, 14, 16], [4, 6, 8, 14]
angles = mdtraj.compute_dihedrals(traj, [phi_indices, psi_indices])
#print('compute_dihedrals returns', angles[0], ' with positions ')
return angles[0]
def bin_id(self, B):
angles = self.getAngles()
j=0
bin_id = -1
while j < B.length():
if angles[0] < B.Ω[j][1][0]:
if angles[1] < B.Ω[j][1][1]:
bin_id = j
j=B.length()+1
j=j+1;
#if bin_id == -1:
#print('angles = ', angles)
return bin_id
def bin_id_voronoi(self, B):
tree = KDTree(B.Ω)
d, id = tree.query(self.positions)
return id
def AddTorsionForce(self, phi, psi, magnitude):
torsion_force = openmm.CustomTorsionForce("0.5*k*min(dtheta, 2*pi-dtheta)^2; dtheta = abs(theta-theta0)")
torsion_force.addPerTorsionParameter("k")
torsion_force.addPerTorsionParameter("theta0")
torsion_force.addGlobalParameter("pi",3.141592653589793)
k = magnitude * kilojoule
torsion_force.addTorsion(6,8,14,16, [k, psi])
torsion_force.addTorsion(4, 6, 8, 14, [k, phi])
self.system.addForce(torsion_force)
def RemoveTorsionForce(self, phi, psi):
self.system = testsystems.AlanineDipeptideVacuum().system
def move_particle_to_bin_minenergy(self, phi, psi, magnitude):
#This isn't seeded, but perhaps it should be
self.AddTorsionForce(phi, psi, magnitude)
self.minimize_energy()
self.RemoveTorsionForce(phi, psi)
return self.positions
def move_particle_to_bin_minenergy_with_stepping(self, phi, psi, magnitude, seed, B, target_id):
#This isn't seeded, but perhaps it should be
self.AddTorsionForce(phi, psi, magnitude)
self.minimize_energy()
id = -2
count = 0
while id != target_id and count < 10**2:
self.Step(num_steps, seed)
id = self.bin_id(B)
if id == -1:
id = target_id
count = count +1
self.RemoveTorsionForce(phi, psi)
return self.positions
def move_particle_to_bin_stepping_old(self, phi, psi, num_steps, seed, B, target_id):
#Obsolete, moves particle to bin by adding torsion force and stepping
#We use the minimize energy approach now instead
self.AddTorsionForce(phi, psi)
id = -2
count = 0
while id != target_id and count < 10**2:
self.Step(num_steps, seed)
id = self.bin_id(B)
if id == -1:
id = target_id
count = count +1
#if count == 10**2:
# print('Bin ' , target_id, ' failed to allocate', flush=True)
self.RemoveTorsionForce(phi, psi)
#print('Bin ',target_id,' took ', count, 'iterations to get a position', flush = True)
return self.positions
def sample(self, num_steps, seed):
start_bin = self.bin_id(self.Bins)
self.Step(num_steps, seed)
end_bin = self.bin_id(self.Bins)
return [start_bin, end_bin]
def sample_voronoi(self, num_steps, seed):
start_bin = self.bin_id_voronoi(self.Bins)
self.Step(num_steps, seed)
end_bin = self.bin_id_voronoi(self.Bins)
return [start_bin, end_bin]
"""
Functions for generating the coarse model
"""
def BinMidpoints(B, bin_no):
ϕ = (B.Ω[bin_no][0][1]+B.Ω[bin_no][0][0])/2
ψ = (B.Ω[bin_no][1][1]+B.Ω[bin_no][1][0])/2
return [ϕ, ψ]
def Build_UnifBins(nx_bins, ny_bins):
xx = np.linspace(-π, π, nx_bins+1)
yy = np.linspace(-π, π, ny_bins+1)
B = Bins()
for j in range(nx_bins):
for k in range(ny_bins):
Ω = np.array([[xx[j],yy[k]],[xx[j+1],yy[k+1]]])#Identifying rectangular bins by a pair of opposite corners.
B.push(Ω,0)
B.dim = [nx_bins,ny_bins]
return B
def Build_Voronoi_Bins(positions, energy_threshold):
"""
Requires a selection of particle positions, such as would be generated by Build_UnifBins
and AllocateParticlesInBins (positionsset).
Particles above the energy threshold are removed.
A bin structure element here is a particle, the work is done in the bin_id_voronoi method
of the AlanineDipeptideSimulation class. Here bins are identified based on which one
of the particles in B.Ω is closest.
"""
B = Bins()
#for j in range(np.shape(positions)[0]):
def AllocateParticlesInBins(B, num_nodes, step_data):
#seeds not being used at the moment
magnitude = 1000.0
particle_data = []
phi_data = []
psi_data = []
target_ids = []
B_copies = [B]
magnitude_copies=[]
for j in range(B.length()):
angles = BinMidpoints(B, j)
phi_data.append(angles[0])
psi_data.append(angles[1])
target_ids.append(j)
B_copies.append(B)
magnitude_copies.append(magnitude)
pool = ProcessPool(nodes = num_nodes)
particle = AlanineDipeptideSimulation()
#positionsset = pool.map(particle.move_particle_to_bin, phi_data, psi_data, step_data, seeds, B_copies, target_ids)
positionsset = pool.map(particle.move_particle_to_bin_minenergy, phi_data, psi_data, magnitude_copies)
#positions = particle.move_particle_to_bin(1,1, step_data[0], seeds[0])
return positionsset
def build_coarse_model(positionsset, B, num_samples_per_bin, num_nodes, num_steps):
n_bins = B.length()
pool = ProcessPool(nodes = num_nodes)
num_steps = [num_steps for x in range(num_samples_per_bin)]
seed_data = [x for x in range(num_samples_per_bin)]
Transitions = []
particle = AlanineDipeptideSimulation()
particle.Bins = B
particle.temperature = 1000
#particle.stepsize = 1*picoseconds
T = np.zeros((n_bins,n_bins))
for j in range(n_bins):
particle.positions = positionsset[j]
Transitions.append(pool.map(particle.sample, num_steps, seed_data))
for j in range(len(Transitions)):
T[Transitions[j][0], Transitions[j][1]] = T[Transitions[j][0], Transitions[j][1]] + 1
for j in range(n_bins):
if (sum(T[j,:])==0):
#print('No transitions in row', j, flush = True)
T[j,j]=1
T[j,:] = T[j,:] / sum(T[j,:])
return T
def build_coarse_model_voronoi(B, num_samples_per_bin, num_nodes, num_steps):
n_bins = B.length()
pool = ProcessPool(nodes = num_nodes)
num_steps = [num_steps for x in range(num_samples_per_bin)]
seed_data = [x for x in range(num_samples_per_bin)]
Transitions = []
particle = AlanineDipeptideSimulation()
particle.Bins = B
particle.temperature = 1000
T = np.zeros((n_bins,n_bins))
for j in range(n_bins):
particle.positions = B.Ω[j]
Transitions.append(pool.map(particle.sample_voronoi, num_steps, seed_data))
for j in range(len(Transitions)):
T[Transitions[j][0], Transitions[j][1]] = T[Transitions[j][0], Transitions[j][1]] + 1
for j in range(n_bins):
if (sum(T[j,:])==0):
#print('No transitions in row', j, flush = True)
T[j,j]=1
T[j,:] = T[j,:] / sum(T[j,:])
return T
def mutation(pos, stepsize, seed):
sim = AlanineDipeptideSimulation()
sim.positions = pos
sim.Step(stepsize, seed)
return sim.positions
"""
Functions for running the reinjection process
"""
def In_target(ϕ, ψ):
target_centre = [(54*π)/180,-π/4]
tolerance = π/9 #copperman suggestion 20 degrees (π/9) but my coarse model isn't fine enough right now
if target_centre[0]-tolerance < ϕ and ϕ < target_centre[0]+tolerance:
if target_centre[1]-tolerance < ψ and ψ < target_centre[1]+tolerance:
return 1
else:
return 0
else:
return 0
def find_target_bins(B):
#u contains 1 in the bins with midpoint inside the target area
midpoints = []
u=[]
target_binids = []
for j in range(B.length()):
midpoints.append([(B.Ω[j][1][0]+B.Ω[j][0][0]) / 2,(B.Ω[j][1][1]+B.Ω[j][0][1]) / 2])
u.append(In_target(midpoints[j][0],midpoints[j][1]))
if u[j] == 1:
target_binids.append(j)
#it is possible to have a target structure with no midpoints in the bins.
print(sum(u),' target bins found')
if sum(u)==0:
print("No Target Bins Found, Refine Coarse Model or Expand Target")
return u, target_binids
def generate_spawn_configuration(target_spawn_angles, positionsset):
angle_distance = 100
for j in range(len(positionsset)):
sim = AlanineDipeptideSimulation()
sim.positions = positionsset[j]
angles = sim.getAngles()
if np.linalg.norm(angles - target_spawn_angles) < angle_distance:
angle_distance = np.linalg.norm(angles - target_spawn_angles)
spawn_configuration = positionsset[j]
return spawn_configuration
def build_ensemble(spawn_configuration, n_particles):
E0 = WE.Ensemble()
E0.ξ = [spawn_configuration] * n_particles
E0.ξ̂ = [spawn_configuration] * n_particles
E0.ω = np.ones(n_particles) / n_particles
E0.ω̂ = np.ones(n_particles) / n_particles
E0.bin = np.ones(n_particles).astype(int)
return E0
def bin_id(position, B):
sim = AlanineDipeptideSimulation()
sim.positions = position
return sim.bin_id(B)
def bin_id_voronoi(position, B):
sim = AlanineDipeptideSimulation()
sim.positions = position
return sim.bin_id_voronoi(B)
def respawn(E, B, target_binids, spawn_configuration):
"""
Alanine reinjection process - target_binids are bins where resampling occurs
- spawn configuration is the position particles
are respawned with
- As well as ensemble and bin structures, this
also returns the number of times the target
was hit and the sum of the weight moving
through the reinjection.
"""
weight_flux = 0
num_target_hits = 0
for j in range(E.length()):
for k in range(len(target_binids)):
if E.bin[j] == target_binids[k]:
weight_flux = weight_flux + E.ω[j]
E.ξ[j] = spawn_configuration
num_target_hits = num_target_hits + 1
return E, B, num_target_hits, weight_flux
def respawn_voronoi(E, B, target_binids, spawn_configuration):
"""
Alanine reinjection process - target_binids are bins where resampling occurs
- spawn configuration is the position particles
are respawned with
- As well as ensemble and bin structures, this
also returns the number of times the target
was hit and the sum of the weight moving
through the reinjection.
"""
weight_flux = 0
num_target_hits = 0
for j in range(E.length()):
for k in range(len(target_binids)):
if E.bin[j] == target_binids[k]:
weight_flux = weight_flux + E.ω[j]
E.ξ[j] = spawn_configuration
num_target_hits = num_target_hits + 1
E.update_bin_id(B, bin_id_voronoi)
B.update_bin_weights(E)
return E, B, num_target_hits, weight_flux
| [
"sys.path.append",
"pathos.multiprocessing.ProcessPool",
"WeightedEnsemble.Bins",
"simtk.openmm.CustomTorsionForce",
"mdtraj.compute_dihedrals",
"openmmtools.testsystems.AlanineDipeptideVacuum",
"numpy.zeros",
"numpy.ones",
"numpy.array",
"numpy.linalg.norm",
"numpy.linspace",
"scipy.spatial.K... | [((660, 682), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (675, 682), False, 'import sys\n'), ((6440, 6471), 'numpy.linspace', 'np.linspace', (['(-π)', 'π', '(nx_bins + 1)'], {}), '(-π, π, nx_bins + 1)\n', (6451, 6471), True, 'import numpy as np\n'), ((6480, 6511), 'numpy.linspace', 'np.linspace', (['(-π)', 'π', '(ny_bins + 1)'], {}), '(-π, π, ny_bins + 1)\n', (6491, 6511), True, 'import numpy as np\n'), ((6519, 6525), 'WeightedEnsemble.Bins', 'Bins', ([], {}), '()\n', (6523, 6525), False, 'from WeightedEnsemble import Bins\n'), ((7298, 7304), 'WeightedEnsemble.Bins', 'Bins', ([], {}), '()\n', (7302, 7304), False, 'from WeightedEnsemble import Bins\n'), ((7865, 7893), 'pathos.multiprocessing.ProcessPool', 'ProcessPool', ([], {'nodes': 'num_nodes'}), '(nodes=num_nodes)\n', (7876, 7893), False, 'from pathos.multiprocessing import ProcessPool\n'), ((8400, 8428), 'pathos.multiprocessing.ProcessPool', 'ProcessPool', ([], {'nodes': 'num_nodes'}), '(nodes=num_nodes)\n', (8411, 8428), False, 'from pathos.multiprocessing import ProcessPool\n'), ((8731, 8757), 'numpy.zeros', 'np.zeros', (['(n_bins, n_bins)'], {}), '((n_bins, n_bins))\n', (8739, 8757), True, 'import numpy as np\n'), ((9375, 9403), 'pathos.multiprocessing.ProcessPool', 'ProcessPool', ([], {'nodes': 'num_nodes'}), '(nodes=num_nodes)\n', (9386, 9403), False, 'from pathos.multiprocessing import ProcessPool\n'), ((9666, 9692), 'numpy.zeros', 'np.zeros', (['(n_bins, n_bins)'], {}), '((n_bins, n_bins))\n', (9674, 9692), True, 'import numpy as np\n'), ((12027, 12040), 'WeightedEnsemble.Ensemble', 'WE.Ensemble', ([], {}), '()\n', (12038, 12040), True, 'import WeightedEnsemble as WE\n'), ((2766, 2824), 'mdtraj.compute_dihedrals', 'mdtraj.compute_dihedrals', (['traj', '[phi_indices, psi_indices]'], {}), '(traj, [phi_indices, psi_indices])\n', (2790, 2824), False, 'import mdtraj\n'), ((3378, 3389), 'scipy.spatial.KDTree', 'KDTree', (['B.Ω'], {}), '(B.Ω)\n', (3384, 3389), False, 'from scipy.spatial import KDTree, Voronoi, voronoi_plot_2d\n'), ((3534, 3628), 'simtk.openmm.CustomTorsionForce', 'openmm.CustomTorsionForce', (['"""0.5*k*min(dtheta, 2*pi-dtheta)^2; dtheta = abs(theta-theta0)"""'], {}), "(\n '0.5*k*min(dtheta, 2*pi-dtheta)^2; dtheta = abs(theta-theta0)')\n", (3559, 3628), False, 'from simtk import openmm, unit\n'), ((12153, 12173), 'numpy.ones', 'np.ones', (['n_particles'], {}), '(n_particles)\n', (12160, 12173), True, 'import numpy as np\n'), ((12202, 12222), 'numpy.ones', 'np.ones', (['n_particles'], {}), '(n_particles)\n', (12209, 12222), True, 'import numpy as np\n'), ((974, 1010), 'openmmtools.testsystems.AlanineDipeptideVacuum', 'testsystems.AlanineDipeptideVacuum', ([], {}), '()\n', (1008, 1010), False, 'from openmmtools import testsystems\n'), ((1043, 1079), 'openmmtools.testsystems.AlanineDipeptideVacuum', 'testsystems.AlanineDipeptideVacuum', ([], {}), '()\n', (1077, 1079), False, 'from openmmtools import testsystems\n'), ((2638, 2680), 'mdtraj.Topology.from_openmm', 'mdtraj.Topology.from_openmm', (['self.topology'], {}), '(self.topology)\n', (2665, 2680), False, 'import mdtraj\n'), ((4059, 4095), 'openmmtools.testsystems.AlanineDipeptideVacuum', 'testsystems.AlanineDipeptideVacuum', ([], {}), '()\n', (4093, 4095), False, 'from openmmtools import testsystems\n'), ((6610, 6660), 'numpy.array', 'np.array', (['[[xx[j], yy[k]], [xx[j + 1], yy[k + 1]]]'], {}), '([[xx[j], yy[k]], [xx[j + 1], yy[k + 1]]])\n', (6618, 6660), True, 'import numpy as np\n'), ((11739, 11783), 'numpy.linalg.norm', 'np.linalg.norm', (['(angles - target_spawn_angles)'], {}), '(angles - target_spawn_angles)\n', (11753, 11783), True, 'import numpy as np\n'), ((11832, 11876), 'numpy.linalg.norm', 'np.linalg.norm', (['(angles - target_spawn_angles)'], {}), '(angles - target_spawn_angles)\n', (11846, 11876), True, 'import numpy as np\n'), ((12249, 12269), 'numpy.ones', 'np.ones', (['n_particles'], {}), '(n_particles)\n', (12256, 12269), True, 'import numpy as np\n'), ((877, 913), 'openmmtools.testsystems.AlanineDipeptideVacuum', 'testsystems.AlanineDipeptideVacuum', ([], {}), '()\n', (911, 913), False, 'from openmmtools import testsystems\n')] |
# Area, Perimeter, Center, Curvature
import numpy as np
import cv2
img=cv2.imread('detect_blob.png')
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
thresh=cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,115,1)
_,contours,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
img2=img.copy()
color=(123,255,61)
index=-1
tk=2
frame=np.zeros([img.shape[0],img.shape[1],3],'uint8')
i=1
for c in contours:
cv2.drawContours(frame,[c],index,color,tk)
area=cv2.contourArea(c)
arclength=cv2.arcLength(c,True)
print('{} Area: {}, perimeter: {}'.format(i,area,arclength))
i+=1
m=cv2.moments(c)
cx=int(m['m10']/m['m00'])
cy=int(m['m01']/m['m00'])
cv2.circle(frame,(cx,cy),1,(0,0,255),-1)
cv2.imshow('Original',frame)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"cv2.contourArea",
"cv2.circle",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.arcLength",
"cv2.moments",
"numpy.zeros",
"cv2.imshow",
"cv2.adaptiveThreshold",
"cv2.imread",
"cv2.drawContours",
"cv2.destroyAllWindows",
"cv2.findContours"
] | [((72, 101), 'cv2.imread', 'cv2.imread', (['"""detect_blob.png"""'], {}), "('detect_blob.png')\n", (82, 101), False, 'import cv2\n'), ((107, 144), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (119, 144), False, 'import cv2\n'), ((151, 247), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['gray', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY', '(115)', '(1)'], {}), '(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY, 115, 1)\n', (172, 247), False, 'import cv2\n'), ((259, 323), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (275, 323), False, 'import cv2\n'), ((378, 428), 'numpy.zeros', 'np.zeros', (['[img.shape[0], img.shape[1], 3]', '"""uint8"""'], {}), "([img.shape[0], img.shape[1], 3], 'uint8')\n", (386, 428), True, 'import numpy as np\n'), ((795, 809), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (806, 809), False, 'import cv2\n'), ((810, 833), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (831, 833), False, 'import cv2\n'), ((453, 499), 'cv2.drawContours', 'cv2.drawContours', (['frame', '[c]', 'index', 'color', 'tk'], {}), '(frame, [c], index, color, tk)\n', (469, 499), False, 'import cv2\n'), ((505, 523), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (520, 523), False, 'import cv2\n'), ((538, 560), 'cv2.arcLength', 'cv2.arcLength', (['c', '(True)'], {}), '(c, True)\n', (551, 560), False, 'import cv2\n'), ((641, 655), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (652, 655), False, 'import cv2\n'), ((720, 767), 'cv2.circle', 'cv2.circle', (['frame', '(cx, cy)', '(1)', '(0, 0, 255)', '(-1)'], {}), '(frame, (cx, cy), 1, (0, 0, 255), -1)\n', (730, 767), False, 'import cv2\n'), ((765, 794), 'cv2.imshow', 'cv2.imshow', (['"""Original"""', 'frame'], {}), "('Original', frame)\n", (775, 794), False, 'import cv2\n')] |
# ------------------------------------------------------------------
# PyTorch implementation of
# "ROAM: Recurrently Optimizing Tracking Model", CVPR, 2020
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------
import config
import os
from utils import list_models
from networks import FeatureExtractor
from tracker import Tracker
import time
from utils import compute_success_overlap, get_axis_aligned_bbox
import numpy as np
from PIL import Image
def load_seq_config(data_root, seq_name):
src = os.path.join(data_root, seq_name, 'groundtruth_rect.txt')
gt_file = open(src)
lines = gt_file.readlines()
gt_rects = []
for gt_rect in lines:
rect = [int(v) for v in gt_rect[:-1].split(',')]
gt_rects.append(rect)
img_path = os.path.join(data_root, seq_name, 'img')
img_names = sorted(os.listdir(img_path))
frame_paths = [os.path.join(img_path, img_name) for img_name in img_names]
return gt_rects, frame_paths
def OTB_run(gt_rects, frame_paths, tracker):
tic = time.time()
# tracking loop
res = []
for idx, img_path in enumerate(frame_paths):
print('Frame', idx)
img = np.array(Image.open(frame_paths[idx]).convert('RGB'))
if idx == 0:
if len(gt_rects[0]) == 8:
init_bbox = get_axis_aligned_bbox(np.array(gt_rects[0]))
else:
init_bbox = gt_rects[0]
pred_bbox = tracker.initialize(img, init_bbox)
else:
pred_bbox = tracker.track(img)
res.append(pred_bbox)
fps = len(res) / (time.time() - tic)
success_overlap = compute_success_overlap(gt_rects, res)
print('success overlap: %.4f, fps:%.2f' % (success_overlap.mean(), fps))
if __name__ == '__main__':
gt_rects, frame_paths = load_seq_config(config.otb_dir, 'Trans')
feat_extractor = FeatureExtractor(config.feat_dir)
tracker = Tracker(feat_extractor, is_debug=True)
models = list_models(os.path.abspath(config.model_dir))
tracker.load_models(models[-1])
OTB_run(gt_rects, frame_paths, tracker) | [
"os.path.abspath",
"networks.FeatureExtractor",
"utils.compute_success_overlap",
"time.time",
"PIL.Image.open",
"numpy.array",
"tracker.Tracker",
"os.path.join",
"os.listdir"
] | [((610, 667), 'os.path.join', 'os.path.join', (['data_root', 'seq_name', '"""groundtruth_rect.txt"""'], {}), "(data_root, seq_name, 'groundtruth_rect.txt')\n", (622, 667), False, 'import os\n'), ((871, 911), 'os.path.join', 'os.path.join', (['data_root', 'seq_name', '"""img"""'], {}), "(data_root, seq_name, 'img')\n", (883, 911), False, 'import os\n'), ((1127, 1138), 'time.time', 'time.time', ([], {}), '()\n', (1136, 1138), False, 'import time\n'), ((1716, 1754), 'utils.compute_success_overlap', 'compute_success_overlap', (['gt_rects', 'res'], {}), '(gt_rects, res)\n', (1739, 1754), False, 'from utils import compute_success_overlap, get_axis_aligned_bbox\n'), ((1952, 1985), 'networks.FeatureExtractor', 'FeatureExtractor', (['config.feat_dir'], {}), '(config.feat_dir)\n', (1968, 1985), False, 'from networks import FeatureExtractor\n'), ((2000, 2038), 'tracker.Tracker', 'Tracker', (['feat_extractor'], {'is_debug': '(True)'}), '(feat_extractor, is_debug=True)\n', (2007, 2038), False, 'from tracker import Tracker\n'), ((935, 955), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (945, 955), False, 'import os\n'), ((976, 1008), 'os.path.join', 'os.path.join', (['img_path', 'img_name'], {}), '(img_path, img_name)\n', (988, 1008), False, 'import os\n'), ((2064, 2097), 'os.path.abspath', 'os.path.abspath', (['config.model_dir'], {}), '(config.model_dir)\n', (2079, 2097), False, 'import os\n'), ((1675, 1686), 'time.time', 'time.time', ([], {}), '()\n', (1684, 1686), False, 'import time\n'), ((1272, 1300), 'PIL.Image.open', 'Image.open', (['frame_paths[idx]'], {}), '(frame_paths[idx])\n', (1282, 1300), False, 'from PIL import Image\n'), ((1426, 1447), 'numpy.array', 'np.array', (['gt_rects[0]'], {}), '(gt_rects[0])\n', (1434, 1447), True, 'import numpy as np\n')] |
"""Remap landcover codes based on distance from stream."""
import datetime
import os
import sys
import logging
import zipfile
from osgeo import gdal
from osgeo import osr
import pygeoprocessing
import pygeoprocessing.routing
import pygeoprocessing.symbolic
import numpy
import taskgraph
import ecoshard
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=logging.DEBUG,
format=('%(message)s'),
stream=sys.stdout)
LOGGER = logging.getLogger(__name__)
N_CPUS = 4
DEM_ECOSHARD_URL = 'https://storage.googleapis.com/critical-natural-capital-ecoshards/Dem10cr1_md5_1ec5d8b327316c8adc888dde96595a82.zip'
LULC_ECOSHARD_URL = 'https://storage.googleapis.com/critical-natural-capital-ecoshards/Base_LULC_CR_updated1_md5_a63f1e8a0538e268c6ae8701ccf0291b.tif'
STREAM_LAYER_ECOSHARD_URL = 'https://storage.googleapis.com/critical-natural-capital-ecoshards/Rivers_lascruces_KEL-20190827T205323Z-001_md5_76455ad11ee32423388f0bbf22f07795.zip'
STREAM_10M_BUFFER_PATH = '10mbuffer.gpkg'
STREAM_50M_BUFFER_PATH = '50mbuffer.gpkg'
WORKSPACE_DIR = 'raster_stream_buffer_workspace'
def conditional_convert_op(
base_lulc, lulc_nodata, converted_lulc, buffer_10m_array,
flow_accum_50m_slope_mask_array,
rasterized_50m_buffer_raster_path,
stream_array, target_nodata):
"""Convert LULC to the converted one on the special cases.
convert lulc to converted if:
buffer_size_path_map[1] == 1
or buffer_size_path_map[5] == 1 & steep_slope_mask_path
"""
result = numpy.empty(base_lulc.shape, dtype=numpy.int16)
result[:] = target_nodata
lulc_nodata_mask = (base_lulc == lulc_nodata)
result[~lulc_nodata_mask] = base_lulc[~lulc_nodata_mask]
valid_mask = (~lulc_nodata_mask) & (
(buffer_10m_array == 1) | (
(rasterized_50m_buffer_raster_path == 1) &
(flow_accum_50m_slope_mask_array >= 1)))
result[valid_mask] = converted_lulc[valid_mask]
stream_mask = (stream_array == 1) & (~lulc_nodata_mask)
result[stream_mask] = 4
return result
def mask_by_value_op(array, value, nodata):
"""Return 1 where array==value 0 otherwise."""
result = numpy.empty_like(array)
result[:] = 0
result[array == value] = 1
result[numpy.isclose(array, nodata)] = 2
return result
def mask_slope_and_distance(
slope_array, slope_threshold, slope_nodata,
dist_array, dist_threshold, dist_nodata, target_nodata):
result = numpy.empty(slope_array.shape, dtype=numpy.int8)
result[:] = target_nodata
valid_mask = (
~numpy.isclose(slope_array, slope_nodata) &
~numpy.isclose(dist_array, dist_nodata))
result[valid_mask] = (
(slope_array[valid_mask] < slope_threshold) &
(dist_array[valid_mask] < dist_threshold))
return result
def mask_by_inv_value_op(array, value, nodata):
"""Return 0 where array==value 1 otherwise."""
result = numpy.empty_like(array)
result[:] = 0
result[array != value] = 1
result[numpy.isclose(array, nodata)] = 2
return result
def download_and_unzip(base_url, target_dir, done_token_path):
"""Download and unzip base_url to target_dir and write done token path."""
path_to_zip_file = os.path.join(target_dir, os.path.basename(base_url))
ecoshard.download_url(
base_url, path_to_zip_file, skip_if_target_exists=False)
zip_ref = zipfile.ZipFile(path_to_zip_file, 'r')
zip_ref.extractall(target_dir)
zip_ref.close()
with open(done_token_path, 'w') as done_file:
done_file.write(str(datetime.datetime.now()))
def burn_dem(
dem_raster_path, streams_raster_path, target_burned_dem_path,
burn_depth=10):
"""Burn streams into dem."""
dem_raster_info = pygeoprocessing.get_raster_info(dem_raster_path)
dem_nodata = dem_raster_info['nodata'][0]
pygeoprocessing.new_raster_from_base(
dem_raster_path, target_burned_dem_path, dem_raster_info['datatype'],
[dem_nodata])
burned_dem_raster = gdal.OpenEx(
target_burned_dem_path, gdal.OF_RASTER | gdal.OF_UPDATE)
burned_dem_band = burned_dem_raster.GetRasterBand(1)
stream_raster = gdal.OpenEx(streams_raster_path, gdal.OF_RASTER)
stream_band = stream_raster.GetRasterBand(1)
for offset_dict, dem_block in pygeoprocessing.iterblocks(
(dem_raster_path, 1)):
stream_block = stream_band.ReadAsArray(**offset_dict)
stream_mask = (
(stream_block == 1) & ~numpy.isclose(dem_block, dem_nodata))
filled_block = numpy.copy(dem_block)
filled_block[stream_mask] = filled_block[stream_mask]-burn_depth
burned_dem_band.WriteArray(
filled_block, xoff=offset_dict['xoff'], yoff=offset_dict['yoff'])
stream_band = None
stream_raster = None
burned_dem_band = None
burned_dem_raster = None
def length_of_degree(lat):
"""Calculate the length of a degree in meters."""
m1 = 111132.92
m2 = -559.82
m3 = 1.175
m4 = -0.0023
p1 = 111412.84
p2 = -93.5
p3 = 0.118
lat_rad = lat * numpy.pi / 180
latlen = (
m1 + m2 * numpy.cos(2 * lat_rad) + m3 * numpy.cos(4 * lat_rad) +
m4 * numpy.cos(6 * lat_rad))
longlen = abs(
p1 * numpy.cos(lat_rad) + p2 * numpy.cos(3 * lat_rad) + p3 *
numpy.cos(5 * lat_rad))
return max(latlen, longlen)
def rasterize_streams(
base_raster_path, stream_vector_path, target_streams_raster_path):
"""Rasterize streams."""
pygeoprocessing.new_raster_from_base(
base_raster_path, target_streams_raster_path, gdal.GDT_Byte, [2],
fill_value_list=[2])
LOGGER.debug(stream_vector_path)
pygeoprocessing.rasterize(
stream_vector_path, target_streams_raster_path,
burn_values=[1])
def hat_distance_kernel(pixel_radius, kernel_filepath):
"""Create a raster-based 0, 1 kernel path.
Parameters:
pixel_radius (int): Radius of the kernel in pixels.
kernel_filepath (string): The path to the file on disk where this
kernel should be stored. If this file exists, it will be
overwritten.
Returns:
None
"""
kernel_size = int((pixel_radius)*2+1)
driver = gdal.GetDriverByName('GTiff')
kernel_dataset = driver.Create(
kernel_filepath.encode('utf-8'), kernel_size, kernel_size, 1,
gdal.GDT_Float32, options=[
'BIGTIFF=IF_SAFER', 'TILED=YES', 'BLOCKXSIZE=256',
'BLOCKYSIZE=256'])
# Make some kind of geotransform, it doesn't matter what but
# will make GIS libraries behave better if it's all defined
kernel_dataset.SetGeoTransform([0, 1, 0, 0, 0, -1])
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
kernel_dataset.SetProjection(srs.ExportToWkt())
kernel_band = kernel_dataset.GetRasterBand(1)
kernel_band.SetNoDataValue(-9999)
cols_per_block, rows_per_block = kernel_band.GetBlockSize()
row_indices, col_indices = numpy.indices(
(kernel_size, kernel_size), dtype=numpy.float) - pixel_radius
kernel_index_distances = numpy.hypot(row_indices, col_indices)
kernel = kernel_index_distances <= pixel_radius
kernel_band.WriteArray(kernel)
kernel_band.FlushCache()
kernel_dataset.FlushCache()
kernel_band = None
kernel_dataset = None
def linear_decay_kernel(pixel_radius, kernel_filepath):
"""Create a raster-based linear decay kernel path.
Parameters:
pixel_radius (int): Radius of the kernel in pixels.
kernel_filepath (string): The path to the file on disk where this
kernel should be stored. If this file exists, it will be
overwritten.
Returns:
None
"""
kernel_size = int((pixel_radius)*2+1)
driver = gdal.GetDriverByName('GTiff')
kernel_dataset = driver.Create(
kernel_filepath.encode('utf-8'), kernel_size, kernel_size, 1,
gdal.GDT_Float32, options=[
'BIGTIFF=IF_SAFER', 'TILED=YES', 'BLOCKXSIZE=256',
'BLOCKYSIZE=256'])
# Make some kind of geotransform, it doesn't matter what but
# will make GIS libraries behave better if it's all defined
kernel_dataset.SetGeoTransform([0, 1, 0, 0, 0, -1])
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
kernel_dataset.SetProjection(srs.ExportToWkt())
kernel_band = kernel_dataset.GetRasterBand(1)
kernel_band.SetNoDataValue(-9999)
cols_per_block, rows_per_block = kernel_band.GetBlockSize()
row_indices, col_indices = numpy.indices(
(kernel_size, kernel_size), dtype=numpy.float) - pixel_radius
kernel_index_distances = numpy.hypot(row_indices, col_indices)
inverse_distances = (pixel_radius - kernel_index_distances) / pixel_radius
inverse_distances[inverse_distances < 0] = 0
kernel_band.WriteArray(inverse_distances)
kernel_band.FlushCache()
kernel_dataset.FlushCache()
kernel_band = None
kernel_dataset = None
if __name__ == '__main__':
try:
os.makedirs(WORKSPACE_DIR)
except OSError:
pass
task_graph = taskgraph.TaskGraph(WORKSPACE_DIR, N_CPUS, 5)
dem_download_token_path = os.path.join(
WORKSPACE_DIR, 'dem_downloaded.TOKEN')
dem_raster_path = os.path.join(WORKSPACE_DIR, 'Dem10cr1', 'Dem10cr1')
_ = task_graph.add_task(
func=download_and_unzip,
args=(DEM_ECOSHARD_URL, WORKSPACE_DIR, dem_download_token_path),
target_path_list=[dem_download_token_path],
task_name='download dem')
stream_vector_path = os.path.join(WORKSPACE_DIR, 'Rivers_lascruces_KEL')
stream_download_token_path = os.path.join(
WORKSPACE_DIR, 'stream_downloaded.TOKEN')
_ = task_graph.add_task(
func=download_and_unzip,
args=(STREAM_LAYER_ECOSHARD_URL, WORKSPACE_DIR,
stream_download_token_path),
target_path_list=[stream_download_token_path],
task_name='download stream')
lulc_raster_path = os.path.join(
WORKSPACE_DIR, os.path.basename(LULC_ECOSHARD_URL))
_ = task_graph.add_task(
func=ecoshard.download_url,
args=(LULC_ECOSHARD_URL, lulc_raster_path),
target_path_list=[lulc_raster_path],
task_name='download lulc')
task_graph.join()
base_raster_path_list = [dem_raster_path, lulc_raster_path]
dem_raster_info = pygeoprocessing.get_raster_info(dem_raster_path)
lulc_raster_info = pygeoprocessing.get_raster_info(lulc_raster_path)
LOGGER.debug(dem_raster_info)
LOGGER.debug(lulc_raster_info)
aligned_raster_path_list = [
'%s/aligned_%s' % (os.path.dirname(path), os.path.basename(path))
for path in base_raster_path_list]
align_task = task_graph.add_task(
func=pygeoprocessing.align_and_resize_raster_stack,
args=(
base_raster_path_list, aligned_raster_path_list,
['near', 'near'], dem_raster_info['pixel_size'],
'intersection'),
kwargs={'target_sr_wkt': lulc_raster_info['projection']},
target_path_list=aligned_raster_path_list,
task_name='align rasters')
rasterized_streams_raster_path = os.path.join(
WORKSPACE_DIR, 'rasterized_streams.tif')
rasterize_streams_task = task_graph.add_task(
func=rasterize_streams,
args=(aligned_raster_path_list[0], stream_vector_path,
rasterized_streams_raster_path),
target_path_list=[rasterized_streams_raster_path],
dependent_task_list=[align_task],
task_name='rasterize streams')
rasterized_10m_buffer_raster_path = os.path.join(
WORKSPACE_DIR, 'rasterized_10m_buffer_streams.tif')
rasterize_10m_buffer_task = task_graph.add_task(
func=rasterize_streams,
args=(aligned_raster_path_list[0], STREAM_10M_BUFFER_PATH,
rasterized_10m_buffer_raster_path),
target_path_list=[rasterized_10m_buffer_raster_path],
dependent_task_list=[align_task],
task_name='rasterize 10m buffer')
rasterized_50m_buffer_raster_path = os.path.join(
WORKSPACE_DIR, 'rasterized_50m_buffer_streams.tif')
rasterize_50m_buffer_task = task_graph.add_task(
func=rasterize_streams,
args=(aligned_raster_path_list[0], STREAM_50M_BUFFER_PATH,
rasterized_50m_buffer_raster_path),
target_path_list=[rasterized_50m_buffer_raster_path],
dependent_task_list=[align_task],
task_name='rasterize 50m buffer')
burned_dem_path = os.path.join(WORKSPACE_DIR, 'burned_dem.tif')
burn_dem_task = task_graph.add_task(
func=burn_dem,
args=(aligned_raster_path_list[0], rasterized_streams_raster_path,
burned_dem_path),
target_path_list=[burned_dem_path],
dependent_task_list=[rasterize_streams_task],
task_name='burn streams')
filled_dem_raster_path = os.path.join(
WORKSPACE_DIR, 'filled_dem.tif')
fill_pits_task = task_graph.add_task(
func=pygeoprocessing.routing.fill_pits,
args=(
(burned_dem_path, 1), filled_dem_raster_path),
kwargs={'working_dir': WORKSPACE_DIR},
dependent_task_list=[burn_dem_task],
target_path_list=[filled_dem_raster_path],
task_name='fill pits')
slope_raster_path = os.path.join(WORKSPACE_DIR, 'slope.tif')
slope_task = task_graph.add_task(
func=pygeoprocessing.calculate_slope,
args=((aligned_raster_path_list[0], 1), slope_raster_path),
target_path_list=[slope_raster_path],
dependent_task_list=[align_task],
task_name='calculate slope')
flow_direction_path = os.path.join(WORKSPACE_DIR, 'mfd_flow_dir.tif')
flow_dir_task = task_graph.add_task(
func=pygeoprocessing.routing.flow_dir_mfd,
args=((filled_dem_raster_path, 1), flow_direction_path),
kwargs={'working_dir': WORKSPACE_DIR},
target_path_list=[flow_direction_path],
dependent_task_list=[fill_pits_task],
task_name='flow dir')
flow_accum_path = os.path.join(WORKSPACE_DIR, 'flow_accum.tif')
flow_accum_task = task_graph.add_task(
func=pygeoprocessing.routing.flow_accumulation_mfd,
args=((flow_direction_path, 1), flow_accum_path),
target_path_list=[flow_accum_path],
dependent_task_list=[flow_dir_task],
task_name='flow accum')
# 3) make slope threshold mask
slope_threshold = 40.0
slope_mask_nodata = -9999
steep_slope_50m_mask_path = os.path.join(
WORKSPACE_DIR,
'steep_slope_%.2f_in_50m_mask.tif' % slope_threshold)
steep_slope_50m_task = task_graph.add_task(
func=pygeoprocessing.symbolic.evaluate_raster_calculator_expression,
args=(
'And(slope > %f, buffer_50m_mask)' % slope_threshold,
{'slope': (slope_raster_path, 1),
'buffer_50m_mask': (rasterized_50m_buffer_raster_path, 1)},
slope_mask_nodata,
steep_slope_50m_mask_path),
target_path_list=[steep_slope_50m_mask_path],
dependent_task_list=[slope_task, rasterize_50m_buffer_task],
task_name='mask slope to %.2f%%' % slope_threshold)
# 4) weighted flow accum of slope threshold mask
flow_accum_slope_mask_path = os.path.join(
WORKSPACE_DIR, 'flow_accum_masked_high_slope.tif')
slope_flow_accum_task = task_graph.add_task(
func=pygeoprocessing.routing.flow_accumulation_mfd,
args=(
(flow_direction_path, 1), flow_accum_slope_mask_path),
kwargs={'weight_raster_path_band': (steep_slope_50m_mask_path, 1)},
target_path_list=[flow_accum_slope_mask_path],
dependent_task_list=[steep_slope_50m_task, flow_dir_task],
task_name='masked slope weighted flow accum')
lulc_to_converted_map = {
0: 100,
1: 1,
2: 102,
3: 103,
4: 4,
5: 5,
6: 106,
7: 7,
8: 108,
9: 109,
10: 110,
11: 111,
12: 12,
13: 113,
14: 14,
15: 15,
16: 16,
21: 21,
22: 122,
23: 123,
24: 24,
}
target_lulc_nodata = -1
potential_converted_landover_raster_path = os.path.join(
WORKSPACE_DIR, 'potential_converted_lulc.tif')
converted_lulc_task = task_graph.add_task(
func=pygeoprocessing.reclassify_raster,
args=(
(aligned_raster_path_list[1], 1), lulc_to_converted_map,
potential_converted_landover_raster_path, gdal.GDT_Int16,
target_lulc_nodata),
kwargs={'values_required': True},
target_path_list=[potential_converted_landover_raster_path],
dependent_task_list=[align_task],
task_name='calculate converted')
task_graph.join()
converted_landover_raster_path = os.path.join(
WORKSPACE_DIR, 'converted_lulc.tif')
base_lulc_nodata = lulc_raster_info['nodata'][0]
task_graph.add_task(
func=pygeoprocessing.raster_calculator,
args=(
((aligned_raster_path_list[1], 1), (base_lulc_nodata, 'raw'),
(potential_converted_landover_raster_path, 1),
(rasterized_10m_buffer_raster_path, 1),
(flow_accum_slope_mask_path, 1),
(rasterized_50m_buffer_raster_path, 1),
(rasterized_streams_raster_path, 1),
(target_lulc_nodata, 'raw')),
conditional_convert_op, converted_landover_raster_path,
gdal.GDT_Int16, target_lulc_nodata),
target_path_list=[converted_landover_raster_path],
task_name='convert landcover')
task_graph.join()
task_graph.close()
| [
"pygeoprocessing.rasterize",
"taskgraph.TaskGraph",
"numpy.empty",
"logging.getLogger",
"numpy.isclose",
"os.path.join",
"osgeo.gdal.GetDriverByName",
"numpy.copy",
"os.path.dirname",
"numpy.empty_like",
"ecoshard.download_url",
"datetime.datetime.now",
"pygeoprocessing.get_raster_info",
"... | [((314, 341), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (331, 341), False, 'import logging\n'), ((343, 429), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(message)s"""', 'stream': 'sys.stdout'}), "(level=logging.DEBUG, format='%(message)s', stream=sys.\n stdout)\n", (362, 429), False, 'import logging\n'), ((449, 476), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (466, 476), False, 'import logging\n'), ((1529, 1576), 'numpy.empty', 'numpy.empty', (['base_lulc.shape'], {'dtype': 'numpy.int16'}), '(base_lulc.shape, dtype=numpy.int16)\n', (1540, 1576), False, 'import numpy\n'), ((2171, 2194), 'numpy.empty_like', 'numpy.empty_like', (['array'], {}), '(array)\n', (2187, 2194), False, 'import numpy\n'), ((2468, 2516), 'numpy.empty', 'numpy.empty', (['slope_array.shape'], {'dtype': 'numpy.int8'}), '(slope_array.shape, dtype=numpy.int8)\n', (2479, 2516), False, 'import numpy\n'), ((2931, 2954), 'numpy.empty_like', 'numpy.empty_like', (['array'], {}), '(array)\n', (2947, 2954), False, 'import numpy\n'), ((3291, 3369), 'ecoshard.download_url', 'ecoshard.download_url', (['base_url', 'path_to_zip_file'], {'skip_if_target_exists': '(False)'}), '(base_url, path_to_zip_file, skip_if_target_exists=False)\n', (3312, 3369), False, 'import ecoshard\n'), ((3393, 3431), 'zipfile.ZipFile', 'zipfile.ZipFile', (['path_to_zip_file', '"""r"""'], {}), "(path_to_zip_file, 'r')\n", (3408, 3431), False, 'import zipfile\n'), ((3756, 3804), 'pygeoprocessing.get_raster_info', 'pygeoprocessing.get_raster_info', (['dem_raster_path'], {}), '(dem_raster_path)\n', (3787, 3804), False, 'import pygeoprocessing\n'), ((3855, 3979), 'pygeoprocessing.new_raster_from_base', 'pygeoprocessing.new_raster_from_base', (['dem_raster_path', 'target_burned_dem_path', "dem_raster_info['datatype']", '[dem_nodata]'], {}), "(dem_raster_path,\n target_burned_dem_path, dem_raster_info['datatype'], [dem_nodata])\n", (3891, 3979), False, 'import pygeoprocessing\n'), ((4018, 4086), 'osgeo.gdal.OpenEx', 'gdal.OpenEx', (['target_burned_dem_path', '(gdal.OF_RASTER | gdal.OF_UPDATE)'], {}), '(target_burned_dem_path, gdal.OF_RASTER | gdal.OF_UPDATE)\n', (4029, 4086), False, 'from osgeo import gdal\n'), ((4173, 4221), 'osgeo.gdal.OpenEx', 'gdal.OpenEx', (['streams_raster_path', 'gdal.OF_RASTER'], {}), '(streams_raster_path, gdal.OF_RASTER)\n', (4184, 4221), False, 'from osgeo import gdal\n'), ((4305, 4353), 'pygeoprocessing.iterblocks', 'pygeoprocessing.iterblocks', (['(dem_raster_path, 1)'], {}), '((dem_raster_path, 1))\n', (4331, 4353), False, 'import pygeoprocessing\n'), ((5508, 5635), 'pygeoprocessing.new_raster_from_base', 'pygeoprocessing.new_raster_from_base', (['base_raster_path', 'target_streams_raster_path', 'gdal.GDT_Byte', '[2]'], {'fill_value_list': '[2]'}), '(base_raster_path,\n target_streams_raster_path, gdal.GDT_Byte, [2], fill_value_list=[2])\n', (5544, 5635), False, 'import pygeoprocessing\n'), ((5690, 5784), 'pygeoprocessing.rasterize', 'pygeoprocessing.rasterize', (['stream_vector_path', 'target_streams_raster_path'], {'burn_values': '[1]'}), '(stream_vector_path, target_streams_raster_path,\n burn_values=[1])\n', (5715, 5784), False, 'import pygeoprocessing\n'), ((6240, 6269), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (6260, 6269), False, 'from osgeo import gdal\n'), ((6701, 6723), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (6721, 6723), False, 'from osgeo import osr\n'), ((7110, 7147), 'numpy.hypot', 'numpy.hypot', (['row_indices', 'col_indices'], {}), '(row_indices, col_indices)\n', (7121, 7147), False, 'import numpy\n'), ((7796, 7825), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (7816, 7825), False, 'from osgeo import gdal\n'), ((8257, 8279), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (8277, 8279), False, 'from osgeo import osr\n'), ((8666, 8703), 'numpy.hypot', 'numpy.hypot', (['row_indices', 'col_indices'], {}), '(row_indices, col_indices)\n', (8677, 8703), False, 'import numpy\n'), ((9112, 9157), 'taskgraph.TaskGraph', 'taskgraph.TaskGraph', (['WORKSPACE_DIR', 'N_CPUS', '(5)'], {}), '(WORKSPACE_DIR, N_CPUS, 5)\n', (9131, 9157), False, 'import taskgraph\n'), ((9188, 9239), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', '"""dem_downloaded.TOKEN"""'], {}), "(WORKSPACE_DIR, 'dem_downloaded.TOKEN')\n", (9200, 9239), False, 'import os\n'), ((9271, 9322), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', '"""Dem10cr1"""', '"""Dem10cr1"""'], {}), "(WORKSPACE_DIR, 'Dem10cr1', 'Dem10cr1')\n", (9283, 9322), False, 'import os\n'), ((9570, 9621), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', '"""Rivers_lascruces_KEL"""'], {}), "(WORKSPACE_DIR, 'Rivers_lascruces_KEL')\n", (9582, 9621), False, 'import os\n'), ((9655, 9709), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', '"""stream_downloaded.TOKEN"""'], {}), "(WORKSPACE_DIR, 'stream_downloaded.TOKEN')\n", (9667, 9709), False, 'import os\n'), ((10377, 10425), 'pygeoprocessing.get_raster_info', 'pygeoprocessing.get_raster_info', (['dem_raster_path'], {}), '(dem_raster_path)\n', (10408, 10425), False, 'import pygeoprocessing\n'), ((10449, 10498), 'pygeoprocessing.get_raster_info', 'pygeoprocessing.get_raster_info', (['lulc_raster_path'], {}), '(lulc_raster_path)\n', (10480, 10498), False, 'import pygeoprocessing\n'), ((11172, 11225), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', '"""rasterized_streams.tif"""'], {}), "(WORKSPACE_DIR, 'rasterized_streams.tif')\n", (11184, 11225), False, 'import os\n'), ((11632, 11696), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', '"""rasterized_10m_buffer_streams.tif"""'], {}), "(WORKSPACE_DIR, 'rasterized_10m_buffer_streams.tif')\n", (11644, 11696), False, 'import os\n'), ((12119, 12183), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', '"""rasterized_50m_buffer_streams.tif"""'], {}), "(WORKSPACE_DIR, 'rasterized_50m_buffer_streams.tif')\n", (12131, 12183), False, 'import os\n'), ((12588, 12633), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', '"""burned_dem.tif"""'], {}), "(WORKSPACE_DIR, 'burned_dem.tif')\n", (12600, 12633), False, 'import os\n'), ((12967, 13012), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', '"""filled_dem.tif"""'], {}), "(WORKSPACE_DIR, 'filled_dem.tif')\n", (12979, 13012), False, 'import os\n'), ((13385, 13425), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', '"""slope.tif"""'], {}), "(WORKSPACE_DIR, 'slope.tif')\n", (13397, 13425), False, 'import os\n'), ((13730, 13777), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', '"""mfd_flow_dir.tif"""'], {}), "(WORKSPACE_DIR, 'mfd_flow_dir.tif')\n", (13742, 13777), False, 'import os\n'), ((14129, 14174), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', '"""flow_accum.tif"""'], {}), "(WORKSPACE_DIR, 'flow_accum.tif')\n", (14141, 14174), False, 'import os\n'), ((14582, 14667), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', "('steep_slope_%.2f_in_50m_mask.tif' % slope_threshold)"], {}), "(WORKSPACE_DIR, 'steep_slope_%.2f_in_50m_mask.tif' %\n slope_threshold)\n", (14594, 14667), False, 'import os\n'), ((15347, 15410), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', '"""flow_accum_masked_high_slope.tif"""'], {}), "(WORKSPACE_DIR, 'flow_accum_masked_high_slope.tif')\n", (15359, 15410), False, 'import os\n'), ((16309, 16368), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', '"""potential_converted_lulc.tif"""'], {}), "(WORKSPACE_DIR, 'potential_converted_lulc.tif')\n", (16321, 16368), False, 'import os\n'), ((16915, 16964), 'os.path.join', 'os.path.join', (['WORKSPACE_DIR', '"""converted_lulc.tif"""'], {}), "(WORKSPACE_DIR, 'converted_lulc.tif')\n", (16927, 16964), False, 'import os\n'), ((2255, 2283), 'numpy.isclose', 'numpy.isclose', (['array', 'nodata'], {}), '(array, nodata)\n', (2268, 2283), False, 'import numpy\n'), ((3015, 3043), 'numpy.isclose', 'numpy.isclose', (['array', 'nodata'], {}), '(array, nodata)\n', (3028, 3043), False, 'import numpy\n'), ((3259, 3285), 'os.path.basename', 'os.path.basename', (['base_url'], {}), '(base_url)\n', (3275, 3285), False, 'import os\n'), ((4550, 4571), 'numpy.copy', 'numpy.copy', (['dem_block'], {}), '(dem_block)\n', (4560, 4571), False, 'import numpy\n'), ((6995, 7055), 'numpy.indices', 'numpy.indices', (['(kernel_size, kernel_size)'], {'dtype': 'numpy.float'}), '((kernel_size, kernel_size), dtype=numpy.float)\n', (7008, 7055), False, 'import numpy\n'), ((8551, 8611), 'numpy.indices', 'numpy.indices', (['(kernel_size, kernel_size)'], {'dtype': 'numpy.float'}), '((kernel_size, kernel_size), dtype=numpy.float)\n', (8564, 8611), False, 'import numpy\n'), ((9035, 9061), 'os.makedirs', 'os.makedirs', (['WORKSPACE_DIR'], {}), '(WORKSPACE_DIR)\n', (9046, 9061), False, 'import os\n'), ((10033, 10068), 'os.path.basename', 'os.path.basename', (['LULC_ECOSHARD_URL'], {}), '(LULC_ECOSHARD_URL)\n', (10049, 10068), False, 'import os\n'), ((2575, 2615), 'numpy.isclose', 'numpy.isclose', (['slope_array', 'slope_nodata'], {}), '(slope_array, slope_nodata)\n', (2588, 2615), False, 'import numpy\n'), ((2627, 2665), 'numpy.isclose', 'numpy.isclose', (['dist_array', 'dist_nodata'], {}), '(dist_array, dist_nodata)\n', (2640, 2665), False, 'import numpy\n'), ((5199, 5221), 'numpy.cos', 'numpy.cos', (['(6 * lat_rad)'], {}), '(6 * lat_rad)\n', (5208, 5221), False, 'import numpy\n'), ((3565, 3588), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3586, 3588), False, 'import datetime\n'), ((4489, 4525), 'numpy.isclose', 'numpy.isclose', (['dem_block', 'dem_nodata'], {}), '(dem_block, dem_nodata)\n', (4502, 4525), False, 'import numpy\n'), ((5161, 5183), 'numpy.cos', 'numpy.cos', (['(4 * lat_rad)'], {}), '(4 * lat_rad)\n', (5170, 5183), False, 'import numpy\n'), ((5319, 5341), 'numpy.cos', 'numpy.cos', (['(5 * lat_rad)'], {}), '(5 * lat_rad)\n', (5328, 5341), False, 'import numpy\n'), ((10628, 10649), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (10643, 10649), False, 'import os\n'), ((10651, 10673), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (10667, 10673), False, 'import os\n'), ((5131, 5153), 'numpy.cos', 'numpy.cos', (['(2 * lat_rad)'], {}), '(2 * lat_rad)\n', (5140, 5153), False, 'import numpy\n'), ((5255, 5273), 'numpy.cos', 'numpy.cos', (['lat_rad'], {}), '(lat_rad)\n', (5264, 5273), False, 'import numpy\n'), ((5281, 5303), 'numpy.cos', 'numpy.cos', (['(3 * lat_rad)'], {}), '(3 * lat_rad)\n', (5290, 5303), False, 'import numpy\n')] |
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
import cPickle
from sklearn.neural_network import MLPClassifier
from sklearn import preprocessing
# rescale the data, use the traditional train/test split
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# algorithm='sgd', verbose=10, tol=1e-4, random_state=1)
data = cPickle.load(open('cifar_2class_py2.p', 'rb'))
train_x = np.array(data['train_data'])
train_y = np.array(data['train_labels'])
test_x = np.array(data['test_data'])
test_y = np.array(data['test_labels'])
train_y = np.ravel(train_y)
test_y = np.ravel(test_y)
train_x = preprocessing.scale(train_x)
test_x = preprocessing.scale(test_x)
mlp = MLPClassifier(hidden_layer_sizes=(10,1), batch_size=100, learning_rate='adaptive',
max_iter=9, solver='sgd', verbose=True, learning_rate_init=.1)
mlp.fit(train_x, train_y)
print("Training set score: %f" % mlp.score(train_x, train_y))
print("Test set score: %f" % mlp.score(test_x, test_y))
| [
"sklearn.preprocessing.scale",
"numpy.array",
"sklearn.neural_network.MLPClassifier",
"numpy.ravel"
] | [((445, 473), 'numpy.array', 'np.array', (["data['train_data']"], {}), "(data['train_data'])\n", (453, 473), True, 'import numpy as np\n'), ((484, 514), 'numpy.array', 'np.array', (["data['train_labels']"], {}), "(data['train_labels'])\n", (492, 514), True, 'import numpy as np\n'), ((525, 552), 'numpy.array', 'np.array', (["data['test_data']"], {}), "(data['test_data'])\n", (533, 552), True, 'import numpy as np\n'), ((563, 592), 'numpy.array', 'np.array', (["data['test_labels']"], {}), "(data['test_labels'])\n", (571, 592), True, 'import numpy as np\n'), ((604, 621), 'numpy.ravel', 'np.ravel', (['train_y'], {}), '(train_y)\n', (612, 621), True, 'import numpy as np\n'), ((631, 647), 'numpy.ravel', 'np.ravel', (['test_y'], {}), '(test_y)\n', (639, 647), True, 'import numpy as np\n'), ((659, 687), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['train_x'], {}), '(train_x)\n', (678, 687), False, 'from sklearn import preprocessing\n'), ((697, 724), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['test_x'], {}), '(test_x)\n', (716, 724), False, 'from sklearn import preprocessing\n'), ((731, 883), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': '(10, 1)', 'batch_size': '(100)', 'learning_rate': '"""adaptive"""', 'max_iter': '(9)', 'solver': '"""sgd"""', 'verbose': '(True)', 'learning_rate_init': '(0.1)'}), "(hidden_layer_sizes=(10, 1), batch_size=100, learning_rate=\n 'adaptive', max_iter=9, solver='sgd', verbose=True, learning_rate_init=0.1)\n", (744, 883), False, 'from sklearn.neural_network import MLPClassifier\n')] |
import pickle
import pandas as pd
import sklearn.linear_model as linear_model
from sklearn.linear_model import ElasticNetCV
import statsmodels.api as sm
from sklearn.model_selection import train_test_split
import numpy as np
import os
from sklearn.linear_model import LassoCV
from sklearn.feature_selection import SelectFromModel
demo_local = False
home_dir=""
if demo_local:
home_dir = "data/"
number_of_biochars=237
S_matrix_filename="S_matrix.pkl"
S_matrix_filename_new="S_matrix_new.pkl"
Z_matrix_filename="Z_matrix.pkl"
Z_matrix_filename_new="Z_matrix_new.pkl"
Y_matrix_filename="Y_matrix.pkl"
#gamma_ck is the fixed effect dummy matrix on circuit-target
gamma_ck_filename="gamma_ck_new"
#gamma_kt is the fixed effect dummy matrix on target year
gamma_kt_filename="gamma_kt_new"
#gamma_ct is the fixed effect dummy matrix on circuit year
gamma_ct_filename="gamma_ct_new"
S_matrix=pickle.load(open(home_dir+S_matrix_filename,"rb"))
S_matrix_new=pickle.load(open(home_dir+S_matrix_filename_new,"rb"))
S_matrix_new=S_matrix_new.reshape((S_matrix_new.shape[0],1))
Z_matrix=pickle.load(open(home_dir+Z_matrix_filename,"rb"))
Y_matrix=pickle.load(open(home_dir+Y_matrix_filename,"rb"))
gamma_ck=pickle.load(open(home_dir+gamma_ck_filename,"rb"))
gamma_kt=pickle.load(open(home_dir+gamma_kt_filename,"rb"))
gamma_ct=pickle.load(open(home_dir+gamma_ct_filename,"rb"))
print(gamma_ct.shape)
biocharacteristics_order = ['x_aba', 'x_ageon40orless', 'x_ageon40s', 'x_ageon50s',
'x_ageon60s', 'x_ageon70ormore', 'x_b10s', 'x_b20s', 'x_b30s',
'x_b40s', 'x_b50s', 'x_ba_public', 'x_black', 'x_catholic',
'x_crossa', 'x_dem', 'x_elev', 'x_evangelical', 'x_female',
'x_instate_ba', 'x_jd_public', 'x_jewish', 'x_llm_sjd', 'x_mainline',
'x_nonwhite', 'x_noreligion', 'x_paag', 'x_pada', 'x_pag', 'x_pago',
'x_pasatty', 'x_pausa', 'x_pbank', 'x_pcab', 'x_pcc', 'x_pccoun',
'x_pda', 'x_pfedjdge', 'x_pgov', 'x_pgovt', 'x_phouse', 'x_pindreg1',
'x_plawprof', 'x_plocct', 'x_pmag', 'x_pmayor', 'x_pprivate', 'x_protestant',
'x_psatty', 'x_pscab', 'x_psenate', 'x_psg', 'x_psgo', 'x_pshouse',
'x_pslc', 'x_psp', 'x_pssc', 'x_pssenate', 'x_pusa', 'x_republican', 'x_unity']
input_matrix=np.column_stack(( gamma_ct,S_matrix_new,Z_matrix))
def checkResults(y_predict, y_test):
#for i in range(len(y_predict)):
# print(y_test[i], y_predict[i], y_predict[i] - y_test[i])
diff = y_predict - y_test
mse = np.mean(np.square(diff))
print(np.sqrt(mse))
'''
def feature_selection(df, target, model = LassoCV()):
characteristics_cols = [col for col in list(df) if col.startswith('x_')]
# characteristics_cols += [col for col in list(df) if col.startswith('e_x_')]
# characteristics_cols += [col for col in list(df) if col.startswith('dummy_')]
X, y = df[characteristics_cols].fillna(0), df[target]
# clf = LassoCV()
# Use ExtraTreesClassifier() for Random Forest
sfm = SelectFromModel(model, threshold=0)
sfm.fit(X, y)
n_features = sfm.transform(X).shape[1]
# Reset the threshold till the number of features equals two.
# Note that the attribute can be set directly instead of repeatedly
# fitting the metatransformer.
while n_features > 5:
sfm.threshold += 0.05
X_transform = sfm.transform(X)
n_features = X_transform.shape[1]
features_selected = [x for (x, y) in zip(characteristics_cols, sfm.get_support()) if y == True]
return features_selected
'''
def regression(X_train, y_train):
#features = feature_selection(X_train, y_train)
model = linear_model.LinearRegression(normalize=True)
model.fit(X_train, y_train)
return model
def predict(model, X_test, y_test):
y_predict = model.predict(X_test)
checkResults(y_predict, y_test)
def main():
# X is the endogenous regressor (S_ckt)
# Z are the instruments (biocharacteristics, weighted and unweighted in a circuit-year)
Z = input_matrix
X = S_matrix
#Implementing a plain regression model
#model = regression(X_train, y_train)
#print("Using plain Linear regression. MSE below:")
#predict(model, X_train, y_train)
# de-mean and standardize (optional)
X = (X - X.mean()) / X.std()
Z = (Z - Z.mean()) / Z.std()
X = X.flatten()
# N is the number of datapoints
# Q is the number of instruments
# We have only one endogenous regressor
N = X.shape[0]
Q = Z.shape[1]
#configure Lasso
enetcv = LassoCV(n_alphas=20, n_jobs=4, selection='cyclic', max_iter=1e5, tol=1e-4)
# #configure elasticnet
# enetcv = ElasticNetCV(l1_ratio=[.01, .1, .5, .7, .9, .99, 1], n_alphas=20, n_jobs=4,
# selection='cyclic', max_iter=50000, tol=1e-4)
#
# fit Lasso/elastic net
enetcv.fit(Z, X)
Xhat_enet = enetcv.predict(Z)
# save index of non-zero coefficients in enetZ
enetZ = np.where(enetcv.coef_ != 0)[0]
# Number of instruments selected
numSelected = len(enetZ)
print("Total number of data points: {0}".format(N))
print("Total number of instruments: {0}".format(Q))
print("Number of instruments selected: {0}".format(numSelected))
print(S_matrix_new.shape)
#print(enetZ)
print("Selected biocharacteristics: " + str([biocharacteristics_order[x-238] for x in enetZ if x>239]))
# if all zeros, None of the instruments are correlated to X
if numSelected == 0:
print("None of the instruments are good. Skipping OLS.")
return
else:
# run OLS with selected instruments
postenet = sm.OLS(X, Z[:, enetZ]).fit(cov_type='HC0') # (cov_type='cluster',cov_kwds={'groups':(clusters)})
print(postenet.summary())
# Save the predicted endogenous regressor vector of X
Xhat_post_enet = postenet.predict()
os.makedirs(os.path.join('data', 'OLS'), exist_ok=True)
pd.to_pickle(enetZ, os.path.join('data','OLS','enetZ.pkl'))
pd.to_pickle(Xhat_enet, os.path.join('data','OLS','Xhat-enet.pkl'))
pd.to_pickle(Xhat_post_enet, os.path.join('data','OLS','Xhat-post-enet.pkl'))
if __name__ == "__main__":
main() | [
"sklearn.linear_model.LassoCV",
"statsmodels.api.OLS",
"numpy.square",
"sklearn.linear_model.LinearRegression",
"numpy.where",
"numpy.column_stack",
"os.path.join",
"numpy.sqrt"
] | [((2462, 2513), 'numpy.column_stack', 'np.column_stack', (['(gamma_ct, S_matrix_new, Z_matrix)'], {}), '((gamma_ct, S_matrix_new, Z_matrix))\n', (2477, 2513), True, 'import numpy as np\n'), ((3829, 3874), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {'normalize': '(True)'}), '(normalize=True)\n', (3858, 3874), True, 'import sklearn.linear_model as linear_model\n'), ((4723, 4809), 'sklearn.linear_model.LassoCV', 'LassoCV', ([], {'n_alphas': '(20)', 'n_jobs': '(4)', 'selection': '"""cyclic"""', 'max_iter': '(100000.0)', 'tol': '(0.0001)'}), "(n_alphas=20, n_jobs=4, selection='cyclic', max_iter=100000.0, tol=\n 0.0001)\n", (4730, 4809), False, 'from sklearn.linear_model import LassoCV\n'), ((2702, 2717), 'numpy.square', 'np.square', (['diff'], {}), '(diff)\n', (2711, 2717), True, 'import numpy as np\n'), ((2729, 2741), 'numpy.sqrt', 'np.sqrt', (['mse'], {}), '(mse)\n', (2736, 2741), True, 'import numpy as np\n'), ((5146, 5173), 'numpy.where', 'np.where', (['(enetcv.coef_ != 0)'], {}), '(enetcv.coef_ != 0)\n', (5154, 5173), True, 'import numpy as np\n'), ((6081, 6108), 'os.path.join', 'os.path.join', (['"""data"""', '"""OLS"""'], {}), "('data', 'OLS')\n", (6093, 6108), False, 'import os\n'), ((6149, 6189), 'os.path.join', 'os.path.join', (['"""data"""', '"""OLS"""', '"""enetZ.pkl"""'], {}), "('data', 'OLS', 'enetZ.pkl')\n", (6161, 6189), False, 'import os\n'), ((6217, 6261), 'os.path.join', 'os.path.join', (['"""data"""', '"""OLS"""', '"""Xhat-enet.pkl"""'], {}), "('data', 'OLS', 'Xhat-enet.pkl')\n", (6229, 6261), False, 'import os\n'), ((6294, 6343), 'os.path.join', 'os.path.join', (['"""data"""', '"""OLS"""', '"""Xhat-post-enet.pkl"""'], {}), "('data', 'OLS', 'Xhat-post-enet.pkl')\n", (6306, 6343), False, 'import os\n'), ((5825, 5847), 'statsmodels.api.OLS', 'sm.OLS', (['X', 'Z[:, enetZ]'], {}), '(X, Z[:, enetZ])\n', (5831, 5847), True, 'import statsmodels.api as sm\n')] |
import itertools
import math
import copy
import numpy as np
__all__ = ['HybridModeling']
__docformat__ = "restructuredtext en"
class HybridModeling(object):
"""Class containing a collection of methods needed for seismic inversion in
the frequency domain.
This collection is designed so that a collection of like-methods can be
passed to an optimization routine, changing how we compute each part, eg, in
time, frequency, or the Laplace domain, without having to reimplement the
optimization routines.
A collection of inversion functions must contain a procedure for computing:
* the foward model: apply script_F (in our notation)
* migrate: apply F* (in our notation)
* demigrate: apply F (in our notation)
* Hessian?
Attributes
----------
solver : pysit wave solver object
A wave solver that inherits from pysit.solvers.WaveSolverBase
"""
# read only class description
@property
def solver_type(self): return "time"
@property
def modeling_type(self): return "frequency"
def __init__(self, solver, dft_points_per_period=12.0, adjoint_energy_threshold=1e-5):
"""Constructor for the FrequencyInversion class.
Parameters
----------
solver : pysit wave solver object
A wave solver that inherits from pysit.solvers.WaveSolverBase
"""
if self.solver_type == solver.supports['equation_dynamics']:
self.solver = solver
else:
raise TypeError("Argument 'solver' type {1} does not match modeling solver type {0}.".format(self.solver_type, solver.supports['equation_dynamics']))
if dft_points_per_period < 2:
raise ValueError("Must have at least 2 points per period for DFT.")
self.dft_points_per_period = dft_points_per_period
self.adjoint_energy_threshold = adjoint_energy_threshold
def _setup_forward_rhs(self, rhs_array, data):
return self.solver.mesh.pad_array(data, out_array=rhs_array)
def _compute_subsample_indices(self, frequencies):
dt = self.solver.dt
subsample_indices = dict()
for nu in frequencies:
max_dt = 1./(self.dft_points_per_period*nu) # nyquist is 1/2nu
ratio = max_dt/dt
idx = max(int(math.floor(ratio)),1)
if idx*dt > max_dt:
raise ValueError("Something went wrong in determinining large DFT time steps.")
subsample_indices[nu] = idx
return subsample_indices
def forward_model(self, shot, m0, frequencies, return_parameters=[]):
"""Applies the forward model to the model for the given solver.
Parameters
----------
shot : pysit.Shot
Gives the source signal approximation for the right hand side.
frequencies : list of 2-tuples
2-tuple, first element is the frequency to use, second element the weight.
return_parameters : list of {'wavefield', 'simdata', 'simdata_time', 'dWaveOp'}
Returns
-------
retval : dict
Dictionary whose keys are return_parameters that contains the specified data.
Notes
-----
* u is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.
* uhat is used to generically refer to the DFT of u that is needed to compute the imaging condition.
"""
# Local references
solver = self.solver
solver.model_parameters = m0 # this updates dt and the number of steps so that is appropriate for the current model
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
# Sanitize the input
if not np.iterable(frequencies):
frequencies = [frequencies]
# Setup data storage for the forward modeled data
if 'simdata' in return_parameters:
simdata = dict()
for nu in frequencies:
simdata[nu] = np.zeros(shot.receivers.receiver_count)
# Setup data storage for the forward modeled data (in time, if it is needed, and it frequently is)
if 'simdata_time' in return_parameters:
simdata_time = np.zeros((solver.nsteps, shot.receivers.receiver_count))
# Storage for the derivative of the propagation operator with respect to the model \frac{d\script{L}}{dm}
if 'dWaveOp' in return_parameters:
dWaveOp = dict()
for nu in frequencies:
dWaveOp[nu] = 0.0
# Initialize the DFT components
uhats = dict()
for nu in frequencies:
uhats[nu] = 0.0
subsample_indices = self._compute_subsample_indices(frequencies)
# Step k = 0
# p_0 is a zero array because if we assume the input signal is causal
# and we assume that the initial system (i.e., p_(-2) and p_(-1)) is
# uniformly zero, then the leapfrog scheme would compute that p_0 = 0 as
# well. ukm1 is needed to compute the temporal derivative.
solver_data = solver.SolverData()
rhs_k = np.zeros(mesh.shape(include_bc=True))
rhs_kp1 = np.zeros(mesh.shape(include_bc=True))
for k in range(nsteps):
# Local reference
uk = solver_data.k.primary_wavefield
uk_bulk = mesh.unpad_array(uk)
# Record the data at t_k
if 'simdata_time' in return_parameters:
shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata_time)
t = k*dt
for nu in frequencies:
idx = subsample_indices[nu]
if np.mod(k, idx) == 0:
uhats[nu] += uk*(np.exp(-1j*2*np.pi*nu*t)*dt*idx)
if k == 0:
rhs_k = self._setup_forward_rhs(rhs_k, source.f(k*dt))
rhs_kp1 = self._setup_forward_rhs(rhs_kp1, source.f((k+1)*dt))
else:
# shift time forward
rhs_k, rhs_kp1 = rhs_kp1, rhs_k
rhs_kp1 = self._setup_forward_rhs(rhs_kp1, source.f((k+1)*dt))
# Note, we compute result for k+1 even when k == nsteps-1. We need
# it for the time derivative at k=nsteps-1.
solver.time_step(solver_data, rhs_k, rhs_kp1)
# When k is the nth step, the next step is uneeded, so don't swap
# any values. This way, uk at the end is always the final step
if(k == (nsteps-1)): break
# Don't know what data is needed for the solver, so the solver data
# handles advancing everything forward by one time step.
# k-1 <-- k, k <-- k+1, etc
solver_data.advance()
# Record the data at t_k
if 'simdata' in return_parameters:
for nu in frequencies:
simdata[nu] = shot.receivers.sample_data_from_array(mesh.unpad_array(uhats[nu]))
# Compute time derivative of p at time k
if 'dWaveOp' in return_parameters:
for nu in frequencies:
dWaveOp[nu] += solver.compute_dWaveOp('frequency', uhats[nu], nu)
retval = dict()
if 'dWaveOp' in return_parameters:
retval['dWaveOp'] = dWaveOp
if 'simdata' in return_parameters:
retval['simdata'] = simdata
if 'wavefield' in return_parameters:
_uhats = dict()
_uhats = {nu: mesh.unpad_array(uhats[nu], copy=True) for nu in frequencies}
retval['wavefield'] = _uhats
if 'simdata_time' in return_parameters:
retval['simdata_time'] = simdata_time
return retval
def migrate_shot(self, shot, m0, operand_simdata, frequencies,
operand_dWaveOpAdj=None, operand_model=None,
frequency_weights=None,
dWaveOp=None,
adjointfield=None, dWaveOpAdj=None):
"""Performs migration on a single shot.
Parameters
----------
shot : pysit.Shot
Shot for which to compute migration.
operand_simdata : ndarray
Operand, i.e., b in F*b. This data is in TIME to properly compute the adjoint.
frequencies : list of 2-tuples
2-tuple, first element is the frequency to use, second element the weight.
utt : list
Imaging condition components from the forward model for each receiver in the shot.
qs : list
Optional return list allowing us to retrieve the adjoint field as desired.
"""
# If the imaging component has not already been computed, compute it.
prep_rp = list()
if dWaveOp is None:
prep_rp.append('dWaveOp')
dWaveOp = dict()
if len(prep_rp) > 0:
retval = self.forward_model(shot, m0, frequencies, return_parameters=prep_rp)
if 'dWaveOp' in prep_rp:
for nu in frequencies:
dWaveOp[nu] = retval['dWaveOp'][nu]
rp = ['imaging_condition']
if adjointfield is not None:
rp.append('adjointfield')
if dWaveOpAdj is not None:
rp.append('dWaveOpAdj')
rv = self.adjoint_model(shot, m0, operand_simdata, frequencies, operand_dWaveOpAdj=operand_dWaveOpAdj, operand_model=operand_model, frequency_weights=frequency_weights, return_parameters=rp, dWaveOp=dWaveOp)
# If the adjoint field is desired as output.
for nu in frequencies:
if adjointfield is not None:
adjointfield[nu] = rv['adjointfield'][nu]
if dWaveOpAdj is not None:
dWaveOpAdj[nu] = rv['dWaveOpAdj'][nu]
# Get the imaging condition part from the result, this is the migrated image.
ic = rv['imaging_condition']
return ic
def _setup_adjoint_rhs(self, rhs_array, shot, k, operand_simdata, operand_model, operand_dWaveOpAdj):
# basic rhs is always the pseudodata or residual
rhs_array = self.solver.mesh.pad_array(shot.receivers.extend_data_to_array(k, data=operand_simdata), out_array=rhs_array)
# for Hessians, sometimes there is more to the rhs
if (operand_dWaveOpAdj is not None) and (operand_model is not None):
rhs_array += operand_model*operand_dWaveOpAdj[k]
return rhs_array
def adjoint_model(self, shot, m0,
operand_simdata, frequencies,
operand_dWaveOpAdj=None, operand_model=None,
frequency_weights=None,
return_parameters=[],
dWaveOp=None):
"""Solves for the adjoint field in frequency.
m*q_tt - lap q = resid
Parameters
----------
shot : pysit.Shot
Gives the receiver model for the right hand side.
operand : ndarray
Right hand side, usually the residual.
frequencies : list of 2-tuples
2-tuple, first element is the frequency to use, second element the weight.
return_parameters : list of {'q', 'qhat', 'ic'}
dWaveOp : ndarray
Imaging component from the forward model (in frequency).
Returns
-------
retval : dict
Dictionary whose keys are return_parameters that contains the specified data.
Notes
-----
* q is the adjoint field.
* qhat is the DFT of oq at the specified frequencies
* ic is the imaging component. Because this function computes many of
the things required to compute the imaging condition, there is an option
to compute the imaging condition as we go. This should be used to save
computational effort. If the imaging condition is to be computed, the
optional argument utt must be present.
"""
# Sanitize the input
if not np.iterable(frequencies):
frequencies = [frequencies]
# Local references
solver = self.solver
solver.model_parameters = m0
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
# Sanitize the input
if not np.iterable(frequencies):
frequencies = [frequencies]
qhats = dict()
vhats = dict()
for nu in frequencies:
vhats[nu] = 0.0
subsample_indices = self._compute_subsample_indices(frequencies)
if 'dWaveOpAdj' in return_parameters:
dWaveOpAdj = dict()
for nu in frequencies:
dWaveOpAdj[nu] = 0.0
# If we are computing the imaging condition, ensure that all of the parts are there.
if dWaveOp is None and 'imaging_condition' in return_parameters:
raise ValueError('To compute imaging condition, forward component must be specified.')
if operand_model is not None:
operand_model = operand_model.with_padding()
# Time-reversed wave solver
solver_data = solver.SolverData()
rhs_k = np.zeros(mesh.shape(include_bc=True))
rhs_km1 = np.zeros(mesh.shape(include_bc=True))
max_energy = 0.0
# Loop goes over the valid indices backwards
for k in range(nsteps-1, -1, -1): #xrange(int(solver.nsteps)):
# Local references
vk = solver_data.k.primary_wavefield
max_energy = max(max_energy, np.linalg.norm(vk, np.inf))
t = k*dt
# When dpdt is not set, store the current q, otherwise compute the
# relevant gradient portion
for nu in frequencies:
# Note, this compuation is the DFT, but we need the conjugate later, so rather than exp(-1j...) we use exp(1j...) to compute the conjugate now.
idx = subsample_indices[nu]
if np.mod(k, idx) == 0:
vhats[nu] += vk*(np.exp(-1j*2*np.pi*nu*(solver.tf-t))*dt*idx)
if k == nsteps-1:
rhs_k = self._setup_adjoint_rhs( rhs_k, shot, k, operand_simdata, operand_model, operand_dWaveOpAdj)
rhs_km1 = self._setup_adjoint_rhs( rhs_km1, shot, k-1, operand_simdata, operand_model, operand_dWaveOpAdj)
else:
# shift time forward
rhs_k, rhs_km1 = rhs_km1, rhs_k
rhs_km1 = self._setup_adjoint_rhs( rhs_km1, shot, k-1, operand_simdata, operand_model, operand_dWaveOpAdj)
solver.time_step(solver_data, rhs_k, rhs_km1)
# Don't know what data is needed for the solver, so the solver data
# handles advancing everything forward by one time step.
# k-1 <-- k, k <-- k+1, etc
solver_data.advance()
# When computing the adjoint field by DFT, the field, as a function of
# time, must have finite support. To achieve this, the wave must be
# given sufficient time to die out. In practice, an additional solver.tf
# seconds appears to be sufficient, though it may be excessive.
# As of now, no data about the wavefields are stored in this function,
# so this part simply does the DFT on the conjugate (time-reversed)
# adjoint field vk. The right-hand-side should be zero.
rhs_k *= 0
for k in range(1,nsteps):
vk = solver_data.k.primary_wavefield
t = -k*dt
if np.abs(np.linalg.norm(vk, np.inf)/max_energy) < self.adjoint_energy_threshold:
# print "Breaking early:", nsteps + k, k
break
for nu in frequencies:
idx = subsample_indices[nu]
if np.mod(k, idx) == 0:
vhats[nu] += vk*(np.exp(-1j*2*np.pi*nu*(solver.tf-t))*dt*idx)
solver.time_step(solver_data, rhs_k, rhs_k)
solver_data.advance()
retval = dict()
for nu in frequencies:
qhats[nu] = np.conj(vhats[nu],vhats[nu])
# The next line accounts for the fact that not all frequencies are
# integer, in the relationship between the adjoint field q and the
# conjugate adjoint field v.
qhats[nu] *= np.exp(-1j*2*np.pi*nu*solver.tf)
if 'adjointfield' in return_parameters:
_qhats = dict()
_qhats = {nu: mesh.unpad_array(qhats[nu], copy=True) for nu in frequencies}
retval['adjointfield'] = _qhats
if 'dWaveOpAdj' in return_parameters:
for nu in frequencies:
dWaveOpAdj[nu] = solver.compute_dWaveOp('frequency', qhats[nu],nu)
retval['dWaveOpAdj'] = dWaveOpAdj
# If the imaging component needs to be computed, do it
if 'imaging_condition' in return_parameters:
ic = solver.model_parameters.perturbation(dtype=np.complex)
if frequency_weights is None:
frequency_weights = itertools.repeat(1.0)
for nu,weight in zip(frequencies,frequency_weights):
# note, no dnu here because the nus are not generally the complete set, so dnu makes little sense, otherwise dnu = 1./(nsteps*dt)
ic -= weight*qhats[nu]*np.conj(dWaveOp[nu])
retval['imaging_condition'] = ic.without_padding()
return retval
def linear_forward_model(self, shot, m0, m1, frequencies, return_parameters=[]):
"""Applies the forward model to the model for the given solver.
Parameters
----------
shot : pysit.Shot
Gives the source signal approximation for the right hand side.
m1 : solver.ModelParameters
frequencies : list of 2-tuples
2-tuple, first element is the frequency to use, second element the weight.
return_parameters : list of {'dWaveOp0', 'wavefield1', 'dWaveOp1', 'simdata', 'simdata_time'}, optional
Values to return.
Returns
-------
retval : dict
Dictionary whose keys are return_parameters that contains the specified data.
Notes
-----
* u1 is used as the target field universally. It could be velocity potential, it could be displacement, it could be pressure.
* u1tt is used to generically refer to the derivative of u1 that is needed to compute the imaging condition.
* If u0tt is not specified, it may be computed on the fly at potentially high expense.
"""
# Sanitize the input
if not np.iterable(frequencies):
frequencies = [frequencies]
# Local references
solver = self.solver
solver.model_parameters = m0 # this updates dt and the number of steps so that is appropriate for the current model
mesh = solver.mesh
d = solver.domain
dt = solver.dt
nsteps = solver.nsteps
source = shot.sources
m1_padded = m1.with_padding()
# Storage for the field
u1hats = dict()
for nu in frequencies:
u1hats[nu] = 0.0
# Setup data storage for the forward modeled data
if 'simdata' in return_parameters:
simdata = dict()
# Setup data storage for the forward modeled data (in time, if it is needed, and it frequently is)
if 'simdata_time' in return_parameters:
simdata_time = np.zeros((solver.nsteps, shot.receivers.receiver_count))
# Storage for the time derivatives of p
if 'dWaveOp0' in return_parameters:
dWaveOp0 = dict()
u0hats = dict()
for nu in frequencies:
dWaveOp0[nu] = 0.0
u0hats[nu] = 0.0
# Storage for the time derivatives of p
if 'dWaveOp1' in return_parameters:
dWaveOp1 = dict()
for nu in frequencies:
dWaveOp1[nu] = 0.0
subsample_indices = self._compute_subsample_indices(frequencies)
# Step k = 0
# p_0 is a zero array because if we assume the input signal is causal
# and we assume that the initial system (i.e., p_(-2) and p_(-1)) is
# uniformly zero, then the leapfrog scheme would compute that p_0 = 0 as
# well. ukm1 is needed to compute the temporal derivative.
solver_data = solver.SolverData()
# (***) Given that these modeling tools are for frequency methods, we do not
# have the time derivatives / wave operator derivatives (aka dWaveOp) in
# time available. This saves space, but as a result we have to recompute
# it.
# Also, because implicit and some ODE methods require uhat_1 at times k
# and k+1, we need uhat_0 at k, k+1, and k+2, so all of this rigamaroll
# is to get that.
solver_data_u0 = solver.SolverData()
# For u0, set up the right hand sides
rhs_u0_k = np.zeros(mesh.shape(include_bc=True))
rhs_u0_kp1 = np.zeros(mesh.shape(include_bc=True))
rhs_u0_k = self._setup_forward_rhs(rhs_u0_k, source.f(0*dt))
rhs_u0_kp1 = self._setup_forward_rhs(rhs_u0_kp1, source.f(1*dt))
# compute u0_kp1 so that we can compute dWaveOp0_k (needed for u1)
solver.time_step(solver_data_u0, rhs_u0_k, rhs_u0_kp1)
# compute dwaveop_0 (k=0) and allocate space for kp1 (needed for u1 time step)
dWaveOp0_k = solver.compute_dWaveOp('time', solver_data_u0)
dWaveOp0_kp1 = dWaveOp0_k.copy()
solver_data_u0.advance()
# from here, it makes more sense to refer to rhs_u0 as kp1 and kp2, because those are the values we need
# to compute u0_kp2, which is what we need to compute dWaveOp0_kp1
rhs_u0_kp1, rhs_u0_kp2 = rhs_u0_k, rhs_u0_kp1 # to reuse the allocated space and setup the swap that occurs a few lines down
for k in range(nsteps):
uk = solver_data.k.primary_wavefield
uk_bulk = mesh.unpad_array(uk)
t = k*dt
# Record the data at t_k
if 'simdata_time' in return_parameters:
shot.receivers.sample_data_from_array(uk_bulk, k, data=simdata_time)
for nu in frequencies:
idx = subsample_indices[nu]
if np.mod(k, idx) == 0:
u1hats[nu] += uk*(np.exp(-1j*2*np.pi*nu*t)*dt*idx)
if 'dWaveOp0' in return_parameters:
for nu in frequencies:
idx = subsample_indices[nu]
if np.mod(k, idx) == 0:
u0hats[nu] += solver_data_u0.k.primary_wavefield*(np.exp(-1j*2*np.pi*nu*t)*dt*idx)
# Note, we compute result for k+1 even when k == nsteps-1. We need
# it for the time derivative at k=nsteps-1.
# See comment (***) above.
# compute u0_kp2 so we can get dWaveOp0_kp1 for the rhs for u1
rhs_u0_kp1, rhs_u0_kp2 = rhs_u0_kp2, rhs_u0_kp1
rhs_u0_kp2 = self._setup_forward_rhs(rhs_u0_kp2, source.f((k+2)*dt))
solver.time_step(solver_data_u0, rhs_u0_kp1, rhs_u0_kp2)
# shift the dWaveOp0's (ok at k=0 because they are equal then)
# The derivative component is computed after the time step so that
# information from time k+1 can be used to compute the derivative.
dWaveOp0_k, dWaveOp0_kp1 = dWaveOp0_kp1, dWaveOp0_k
dWaveOp0_kp1 = solver.compute_dWaveOp('time', solver_data_u0)
solver_data_u0.advance()
if k == 0:
rhs_k = m1_padded*(-1*dWaveOp0_k)
rhs_kp1 = m1_padded*(-1*dWaveOp0_kp1)
else:
rhs_k, rhs_kp1 = rhs_kp1, m1_padded*(-1*dWaveOp0_kp1)
solver.time_step(solver_data, rhs_k, rhs_kp1)
# When k is the nth step, the next step is uneeded, so don't swap
# any values. This way, uk at the end is always the final step
if(k == (nsteps-1)): break
# Don't know what data is needed for the solver, so the solver data
# handles advancing everything forward by one time step.
# k-1 <-- k, k <-- k+1, etc
solver_data.advance()
# Compute time derivative of p at time k
if 'dWaveOp0' in return_parameters:
for nu in frequencies:
dWaveOp0[nu] = solver.compute_dWaveOp('frequency', u0hats[nu],nu)
# Compute time derivative of p at time k
if 'dWaveOp1' in return_parameters:
for nu in frequencies:
dWaveOp1[nu] = solver.compute_dWaveOp('frequency', u1hats[nu],nu)
# Record the data at t_k
if 'simdata' in return_parameters:
for nu in frequencies:
simdata[nu] = shot.receivers.sample_data_from_array(mesh.unpad_array(u1hats[nu]))
retval = dict()
if 'dWaveOp0' in return_parameters:
retval['dWaveOp0'] = dWaveOp0
if 'wavefield1' in return_parameters:
_u1hats = dict()
_u1hats = {nu: mesh.unpad_array(u1hats[nu], copy=True) for nu in frequencies}
retval['wavefield1'] = _u1hats
if 'dWaveOp1' in return_parameters:
retval['dWaveOp1'] = dWaveOp1
if 'simdata' in return_parameters:
retval['simdata'] = simdata
if 'simdata_time' in return_parameters:
retval['simdata_time'] = simdata_time
return retval
def adjoint_test(frequencies=[10.0, 10.5, 10.1413515123], plots=False, data_noise=0.0, purefrequency=False):
# default frequencies are enough to indicate a bug due to integer offsets
import numpy as np
import matplotlib.pyplot as plt
from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis
from pysit.gallery import horizontal_reflector
# Define Domain
pmlx = PML(0.3, 100, ftype='quadratic')
pmlz = PML(0.3, 100, ftype='quadratic')
x_config = (0.1, 1.0, pmlx, pmlx)
z_config = (0.1, 0.8, pmlz, pmlz)
d = RectangularDomain( x_config, z_config )
m = CartesianMesh(d, 90, 70)
# Generate true wave speed
# (M = C^-2 - C0^-2)
C0, C = horizontal_reflector(m)
# Set up shots
Nshots = 1
shots = []
xmin = d.x.lbound
xmax = d.x.rbound
nx = m.x.n
zmin = d.z.lbound
zmax = d.z.rbound
point_approx = 'delta'
for i in range(Nshots):
# Define source location and type
source = PointSource(m, (.188888, 0.18888), RickerWavelet(10.0), approximation=point_approx)
# Define set of receivers
zpos = zmin + (1./9.)*zmax
xpos = np.linspace(xmin, xmax, nx)
receivers = ReceiverSet(m, [PointReceiver(m, (x, zpos)) for x in xpos])
# Create and store the shot
shot = Shot(source, receivers)
shots.append(shot)
# Define and configure the wave solver
trange=(0.,3.0)
solver = ConstantDensityAcousticWave(m,
# formulation='ode',
formulation='scalar',
model_parameters={'C': C},
spatial_accuracy_order=4,
# spatial_shifted_differences=True,
# cfl_safety=0.01,
trange=trange,
time_accuracy_order=4)
tools = HybridModeling(solver)
m0 = solver.ModelParameters(m,{'C': C0})
solver_frequency = ConstantDensityHelmholtz(m,
model_parameters={'C': C0},
spatial_shifted_differences=True,
spatial_accuracy_order=4)
frequencytools = FrequencyModeling(solver_frequency)
m0_freq = solver_frequency.ModelParameters(m,{'C': C0})
np.random.seed(0)
m1 = m0.perturbation()
pert = np.random.rand(*m1.data.shape)
m1 += pert
# freqs = [10.5514213] #[3.0, 5.0, 10.0]
# freqs = [10.5]
# freqs = np.linspace(3,19,8)
freqs = frequencies
fwdret = tools.forward_model(shot, m0, freqs, ['wavefield', 'dWaveOp', 'simdata_time'])
dWaveOp0 = fwdret['dWaveOp']
data = fwdret['simdata_time']
u0hat = fwdret['wavefield'][freqs[0]]
data += data_noise*np.random.rand(*data.shape)
dhat = dict()
for nu in freqs: dhat[nu]=0
assert data.shape[0] == solver.nsteps
for k in range(solver.nsteps):
t = k*solver.dt
for nu in freqs:
dhat[nu] += data[k,:]*np.exp(-1j*2*np.pi*nu*t)*solver.dt
print("Hybrid:")
linfwdret = tools.linear_forward_model(shot, m0, m1, freqs, ['simdata','wavefield1','simdata_time'])
lindata = linfwdret['simdata']
lindata_time = linfwdret['simdata_time']
u1hat = linfwdret['wavefield1'][freqs[0]]
adjret = tools.adjoint_model(shot, m0, data, freqs, return_parameters=['imaging_condition', 'adjointfield'], dWaveOp=dWaveOp0)
qhat = adjret['adjointfield'][freqs[0]]
adjmodel = adjret['imaging_condition'].asarray()
m1_ = m1.asarray()
temp_data_prod = 0.0
for nu in freqs:
temp_data_prod += np.dot(lindata[nu].reshape(dhat[nu].shape), np.conj(dhat[nu]))
print(temp_data_prod)
print(np.dot(m1_.T, np.conj(adjmodel)).squeeze()*np.prod(m.deltas))
print(np.dot(m1_.T, np.conj(adjmodel)).squeeze()*np.prod(m.deltas) - temp_data_prod)
if purefrequency:
print("Frequency:")
linfwdret_freq = frequencytools.linear_forward_model(shot, m0, m1, freqs, ['simdata','wavefield1', 'dWaveOp0'])
lindata_freq = linfwdret_freq['simdata']
u1hat_freq = linfwdret_freq['wavefield1'][freqs[0]]
dWaveOp0_freq = linfwdret_freq['dWaveOp0']
adjret_freq = frequencytools.adjoint_model(shot, m0, dhat, freqs, return_parameters=['imaging_condition', 'adjointfield'], dWaveOp=dWaveOp0_freq)
qhat_freq = adjret_freq['adjointfield'][freqs[0]]
adjmodel_freq = adjret_freq['imaging_condition'].asarray()
temp_data_prod = 0.0
for nu in freqs:
temp_data_prod += np.dot(lindata_freq[nu].reshape(dhat[nu].shape).T, np.conj(dhat[nu]))
print(temp_data_prod.squeeze())
print(np.dot(m1_.T, np.conj(adjmodel_freq)).squeeze()*np.prod(m.deltas))
print(np.dot(m1_.T, np.conj(adjmodel_freq)).squeeze()*np.prod(m.deltas) - temp_data_prod.squeeze())
if plots:
xx, zz = d.generate_grid()
sl = [(xx>=0.1) & (xx<=0.99) & (zz>=0.1) & (zz<0.8)]
pml_null = PML(0.0,100)
x_bulk = (0.1, 1.0, 90, pml_null, pml_null)
z_bulk = (0.1, 0.8, 70, pml_null, pml_null)
d_bulk = Domain( (x_bulk, z_bulk) )
def clims(*args):
rclim = min([np.real(x).min() for x in args]), max([np.real(x).max() for x in args])
iclim = min([np.imag(x).min() for x in args]), max([np.imag(x).max() for x in args])
return rclim, iclim
qrclim, qiclim = clims(qhat, qhat_freq)
u1rclim, u1iclim = clims(u1hat, u1hat_freq)
plt.figure()
plt.subplot(2,3,1)
display_on_grid(np.real(u0hat[sl]), d_bulk)
plt.title(r're(${\hat u_0}$)')
plt.subplot(2,3,4)
display_on_grid(np.imag(u0hat[sl]), d_bulk)
plt.title(r'im(${\hat u_0}$)')
plt.subplot(2,3,2)
display_on_grid(np.real(qhat[sl]), d_bulk, clim=qrclim)
plt.title(r're(${\hat q}$) H')
plt.subplot(2,3,5)
display_on_grid(np.imag(qhat[sl]), d_bulk, clim=qiclim)
plt.title(r'im(${\hat q}$) H')
plt.subplot(2,3,3)
display_on_grid(np.real(u1hat[sl]), d_bulk, clim=u1rclim)
plt.title(r're(${\hat u_1}$) H')
plt.subplot(2,3,6)
display_on_grid(np.imag(u1hat[sl]), d_bulk, clim=u1iclim)
plt.title(r'im(${\hat u_1}$) H')
plt.show()
plt.figure()
plt.subplot(2,3,1)
display_on_grid(np.real(u0hat[sl]), d_bulk)
plt.title(r're(${\hat u_0}$)')
plt.subplot(2,3,4)
display_on_grid(np.imag(u0hat[sl]), d_bulk)
plt.title(r'im(${\hat u_0}$)')
plt.subplot(2,3,2)
display_on_grid(np.real(qhat_freq[sl]), d_bulk, clim=qrclim)
plt.title(r're(${\hat q}$) P')
plt.subplot(2,3,5)
display_on_grid(np.imag(qhat_freq[sl]), d_bulk, clim=qiclim)
plt.title(r'im(${\hat q}$) P')
plt.subplot(2,3,3)
display_on_grid(np.real(u1hat_freq[sl]), d_bulk, clim=u1rclim)
plt.title(r're(${\hat u_1}$) P')
plt.subplot(2,3,6)
display_on_grid(np.imag(u1hat_freq[sl]), d_bulk, clim=u1iclim)
plt.title(r'im(${\hat u_1}$) P')
plt.show()
if __name__ == '__main__':
#adjoint_test(purefrequency=True, frequencies=[10.0, 10.5, 10.1413515123])
import time
import numpy as np
import matplotlib.pyplot as plt
import pysit
import pysit.vis as vis
from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis
from pysit.gallery import horizontal_reflector
# Define Domain
pmlx = PML(0.3, 100, ftype='quadratic')
pmlz = PML(0.3, 100, ftype='quadratic')
x_config = (0.1, 1.0, pmlx, pmlx)
z_config = (0.1, 0.8, pmlz, pmlz)
d = RectangularDomain( x_config, z_config )
m = CartesianMesh(d, 2*90, 2*70)
m = CartesianMesh(d, 90, 70)
# Generate true wave speed
# (M = C^-2 - C0^-2)
C0, C = horizontal_reflector(m)
# Set up shots
shots = list()
xmin = d.x.lbound
xmax = d.x.rbound
nx = m.x.n
zmin = d.z.lbound
zmax = d.z.rbound
point_approx = 'delta'
# Define source location and type
source = PointSource(m, (.188888, 0.18888), RickerWavelet(10.0), approximation=point_approx)
# Define set of receivers
zpos = zmin + (1./9.)*zmax
xpos = np.linspace(xmin, xmax, nx)
receivers = ReceiverSet(m, [PointReceiver(m, (x, zpos)) for x in xpos])
# Create and store the shot
shot = Shot(source, receivers)
shots.append(shot)
# Define and configure the wave solver
trange=(0.,3.0)
solver = ConstantDensityAcousticWave(m,
formulation='scalar',
model_parameters={'C': C},
spatial_accuracy_order=4,
use_cpp_acceleration=True,
trange=trange,)
class Experiment(object):
def __init__(self, shot, tools, m0, m1, name='', data_noise=0.0):
self.shot = shot
self.tools = tools
self.m0 = m0
self.m1 = m1
self.results_fwd = None
self.name = name
self.data_noise = data_noise
def run_fwd(self, freqs):
tt = time.time()
self.fwd_results = self.tools.forward_model(self.shot, self.m0, freqs, ['wavefield', 'dWaveOp', 'simdata_time'])
self.fwd_time = time.time() - tt
print(self.name + ": fwd run time ({0} frequency) -- {1:.6f}s".format(len(freqs), self.fwd_time))
np.random.seed(1)
data = self.fwd_results['simdata_time']
self.data = data + self.data_noise*np.random.rand(*data.shape)
dhat = dict()
for nu in freqs: dhat[nu]=0
assert data.shape[0] == self.tools.solver.nsteps
for k in range(self.tools.solver.nsteps):
t = k*self.tools.solver.dt
for nu in freqs:
dhat[nu] += data[k,:]*np.exp(-1j*2*np.pi*nu*t)*self.tools.solver.dt
self.dhat = dhat
def run_lin_fwd(self,freqs):
tt = time.time()
self.lin_results = self.tools.linear_forward_model(self.shot, self.m0, self.m1, freqs, ['simdata','wavefield1','simdata_time'])
self.lin_time = time.time() - tt
print(self.name + ": lin run time ({0} frequency) -- {1:.6f}s".format(len(freqs), self.lin_time))
def run_adj(self,freqs):
tt = time.time()
self.adj_results = self.tools.adjoint_model(self.shot, self.m0, self.data, freqs, return_parameters=['imaging_condition', 'adjointfield'], dWaveOp=self.fwd_results['dWaveOp'])
self.adj_time = time.time() - tt
print(self.name + ": adj run time ({0} frequency) -- {1:.6f}s".format(len(freqs), self.adj_time))
def compare_fwd(exp1, exp2, freqs, plot=True):
for nu in freqs:
uhat0_1 = exp1.fwd_results['wavefield'][nu]
uhat0_2 = exp2.fwd_results['wavefield'][nu]
diff = uhat0_1 - uhat0_2
print("Error norm ({0} - {1}) {3: 09.4f}Hz: {2:.4e}".format(exp1.name, exp2.name, np.linalg.norm(diff)/np.linalg.norm(uhat0_1), nu))
if plot:
clim = min(uhat0_1.min(), uhat0_2.min()), max(uhat0_1.max(), uhat0_2.max())
plt.figure()
plt.subplot(3,2,1)
vis.plot(uhat0_1.real, m,clim=clim)
plt.colorbar()
plt.subplot(3,2,3)
vis.plot(uhat0_2.real, m,clim=clim)
plt.colorbar()
plt.subplot(3,2,5)
vis.plot(diff.real, m,diff)
plt.colorbar()
plt.subplot(3,2,2)
vis.plot(uhat0_1.imag, m,clim=clim)
plt.colorbar()
plt.subplot(3,2,4)
vis.plot(uhat0_2.imag, m,clim=clim)
plt.colorbar()
plt.subplot(3,2,6)
vis.plot(diff.imag, m,diff)
plt.colorbar()
plt.show()
nsteps = exp1.tools.solver.nsteps
print("\nTime steps: {0}".format(nsteps))
print("Per step improvement (fwd): {0: .4e} ({1:.4f}x)".format((exp1.fwd_time - exp2.fwd_time)/nsteps, exp1.fwd_time/exp2.fwd_time))
print("Per step improvement (lin): {0: .4e} ({1:.4f}x)".format((exp1.lin_time - exp2.lin_time)/nsteps, exp1.lin_time/exp2.lin_time))
print("Per step improvement (adj): {0: .4e} ({1:.4f}x)".format((exp1.adj_time - exp2.adj_time)/nsteps, exp1.adj_time/exp2.adj_time))
print("")
def test_adjoints(exp, freqs):
deltas = exp.m0.mesh.deltas
m1_ = exp.m1.asarray()
lindata = exp.lin_results['simdata']
dhat = exp.dhat
adjmodel = exp.adj_results['imaging_condition'].asarray()
temp_data_prod = 0.0
for nu in freqs:
temp_data_prod += np.dot(lindata[nu].reshape(dhat[nu].shape), np.conj(dhat[nu]))
pt1 = temp_data_prod
pt2 = np.dot(m1_.T, np.conj(adjmodel)).squeeze()*np.prod(deltas)
print("{0}: ".format(exp.name))
print("<Fm1, d> = {0: .4e} ({1:.4e})".format(pt1, np.linalg.norm(pt1)))
print("<m1, F*d> = {0: .4e} ({1:.4e})".format(pt2, np.linalg.norm(pt2)))
print("<Fm1, d> - <m1, F*d> = {0: .4e} ({1:.4e})".format(pt1-pt2, np.linalg.norm(pt1-pt2)))
print("Relative error = {0: .4e}\n".format(np.linalg.norm(pt1-pt2)/np.linalg.norm(pt1)))
tools_old = pysit.modeling.HybridModeling(solver)
tools_new = HybridModeling(solver, adjoint_energy_threshold=1e-3)
np.random.seed(0)
m0 = solver.ModelParameters(m,{'C': C0})
m1 = m0.perturbation()
pert = np.random.rand(*m1.data.shape)
m1 += pert
freqs = [3.0, 5.0, 10.0, 10.5, 10.5514213] #[3.0, 5.0, 10.0]
# freqs = [10.5]
freqs = np.linspace(3,19,8)
# freqs = [20.0]
# freqs = [3.0]
shot_old = copy.deepcopy(shot)
shot_new = copy.deepcopy(shot)
old = Experiment(shot_old, tools_old, m0, m1, 'old')
new = Experiment(shot_new, tools_new, m0, m1, 'new')
old.run_fwd(freqs)
old.run_lin_fwd(freqs)
old.run_adj(freqs)
print("")
new.run_fwd(freqs)
new.run_lin_fwd(freqs)
new.run_adj(freqs)
print("")
compare_fwd(old, new, freqs, plot=False)
test_adjoints(old, freqs)
test_adjoints(new, freqs) | [
"matplotlib.pyplot.title",
"numpy.random.seed",
"pysit.PML",
"pysit.modeling.HybridModeling",
"matplotlib.pyplot.figure",
"numpy.imag",
"numpy.linalg.norm",
"numpy.exp",
"pysit.Shot",
"pysit.vis.plot",
"numpy.prod",
"pysit.RickerWavelet",
"pysit.RectangularDomain",
"matplotlib.pyplot.color... | [((26145, 26177), 'pysit.PML', 'PML', (['(0.3)', '(100)'], {'ftype': '"""quadratic"""'}), "(0.3, 100, ftype='quadratic')\n", (26148, 26177), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((26189, 26221), 'pysit.PML', 'PML', (['(0.3)', '(100)'], {'ftype': '"""quadratic"""'}), "(0.3, 100, ftype='quadratic')\n", (26192, 26221), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((26308, 26345), 'pysit.RectangularDomain', 'RectangularDomain', (['x_config', 'z_config'], {}), '(x_config, z_config)\n', (26325, 26345), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((26357, 26381), 'pysit.CartesianMesh', 'CartesianMesh', (['d', '(90)', '(70)'], {}), '(d, 90, 70)\n', (26370, 26381), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((26455, 26478), 'pysit.gallery.horizontal_reflector', 'horizontal_reflector', (['m'], {}), '(m)\n', (26475, 26478), False, 'from pysit.gallery import horizontal_reflector\n'), ((27209, 27356), 'pysit.ConstantDensityAcousticWave', 'ConstantDensityAcousticWave', (['m'], {'formulation': '"""scalar"""', 'model_parameters': "{'C': C}", 'spatial_accuracy_order': '(4)', 'trange': 'trange', 'time_accuracy_order': '(4)'}), "(m, formulation='scalar', model_parameters={'C':\n C}, spatial_accuracy_order=4, trange=trange, time_accuracy_order=4)\n", (27236, 27356), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((27857, 27976), 'pysit.ConstantDensityHelmholtz', 'ConstantDensityHelmholtz', (['m'], {'model_parameters': "{'C': C0}", 'spatial_shifted_differences': '(True)', 'spatial_accuracy_order': '(4)'}), "(m, model_parameters={'C': C0},\n spatial_shifted_differences=True, spatial_accuracy_order=4)\n", (27881, 27976), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((28138, 28173), 'pysit.FrequencyModeling', 'FrequencyModeling', (['solver_frequency'], {}), '(solver_frequency)\n', (28155, 28173), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((28239, 28256), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (28253, 28256), True, 'import numpy as np\n'), ((28296, 28326), 'numpy.random.rand', 'np.random.rand', (['*m1.data.shape'], {}), '(*m1.data.shape)\n', (28310, 28326), True, 'import numpy as np\n'), ((33606, 33638), 'pysit.PML', 'PML', (['(0.3)', '(100)'], {'ftype': '"""quadratic"""'}), "(0.3, 100, ftype='quadratic')\n", (33609, 33638), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((33650, 33682), 'pysit.PML', 'PML', (['(0.3)', '(100)'], {'ftype': '"""quadratic"""'}), "(0.3, 100, ftype='quadratic')\n", (33653, 33682), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((33769, 33806), 'pysit.RectangularDomain', 'RectangularDomain', (['x_config', 'z_config'], {}), '(x_config, z_config)\n', (33786, 33806), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((33818, 33850), 'pysit.CartesianMesh', 'CartesianMesh', (['d', '(2 * 90)', '(2 * 70)'], {}), '(d, 2 * 90, 2 * 70)\n', (33831, 33850), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((33855, 33879), 'pysit.CartesianMesh', 'CartesianMesh', (['d', '(90)', '(70)'], {}), '(d, 90, 70)\n', (33868, 33879), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((33953, 33976), 'pysit.gallery.horizontal_reflector', 'horizontal_reflector', (['m'], {}), '(m)\n', (33973, 33976), False, 'from pysit.gallery import horizontal_reflector\n'), ((34359, 34386), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'nx'], {}), '(xmin, xmax, nx)\n', (34370, 34386), True, 'import numpy as np\n'), ((34507, 34530), 'pysit.Shot', 'Shot', (['source', 'receivers'], {}), '(source, receivers)\n', (34511, 34530), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((34631, 34782), 'pysit.ConstantDensityAcousticWave', 'ConstantDensityAcousticWave', (['m'], {'formulation': '"""scalar"""', 'model_parameters': "{'C': C}", 'spatial_accuracy_order': '(4)', 'use_cpp_acceleration': '(True)', 'trange': 'trange'}), "(m, formulation='scalar', model_parameters={'C':\n C}, spatial_accuracy_order=4, use_cpp_acceleration=True, trange=trange)\n", (34658, 34782), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((39692, 39729), 'pysit.modeling.HybridModeling', 'pysit.modeling.HybridModeling', (['solver'], {}), '(solver)\n', (39721, 39729), False, 'import pysit\n'), ((39805, 39822), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (39819, 39822), True, 'import numpy as np\n'), ((39908, 39938), 'numpy.random.rand', 'np.random.rand', (['*m1.data.shape'], {}), '(*m1.data.shape)\n', (39922, 39938), True, 'import numpy as np\n'), ((40052, 40073), 'numpy.linspace', 'np.linspace', (['(3)', '(19)', '(8)'], {}), '(3, 19, 8)\n', (40063, 40073), True, 'import numpy as np\n'), ((40125, 40144), 'copy.deepcopy', 'copy.deepcopy', (['shot'], {}), '(shot)\n', (40138, 40144), False, 'import copy\n'), ((40160, 40179), 'copy.deepcopy', 'copy.deepcopy', (['shot'], {}), '(shot)\n', (40173, 40179), False, 'import copy\n'), ((26921, 26948), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'nx'], {}), '(xmin, xmax, nx)\n', (26932, 26948), True, 'import numpy as np\n'), ((27081, 27104), 'pysit.Shot', 'Shot', (['source', 'receivers'], {}), '(source, receivers)\n', (27085, 27104), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((28688, 28715), 'numpy.random.rand', 'np.random.rand', (['*data.shape'], {}), '(*data.shape)\n', (28702, 28715), True, 'import numpy as np\n'), ((30920, 30933), 'pysit.PML', 'PML', (['(0.0)', '(100)'], {}), '(0.0, 100)\n', (30923, 30933), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((31444, 31456), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (31454, 31456), True, 'import matplotlib.pyplot as plt\n'), ((31465, 31485), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(1)'], {}), '(2, 3, 1)\n', (31476, 31485), True, 'import matplotlib.pyplot as plt\n'), ((31544, 31574), 'matplotlib.pyplot.title', 'plt.title', (['"""re(${\\\\hat u_0}$)"""'], {}), "('re(${\\\\hat u_0}$)')\n", (31553, 31574), True, 'import matplotlib.pyplot as plt\n'), ((31583, 31603), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (31594, 31603), True, 'import matplotlib.pyplot as plt\n'), ((31662, 31692), 'matplotlib.pyplot.title', 'plt.title', (['"""im(${\\\\hat u_0}$)"""'], {}), "('im(${\\\\hat u_0}$)')\n", (31671, 31692), True, 'import matplotlib.pyplot as plt\n'), ((31701, 31721), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(2)'], {}), '(2, 3, 2)\n', (31712, 31721), True, 'import matplotlib.pyplot as plt\n'), ((31792, 31822), 'matplotlib.pyplot.title', 'plt.title', (['"""re(${\\\\hat q}$) H"""'], {}), "('re(${\\\\hat q}$) H')\n", (31801, 31822), True, 'import matplotlib.pyplot as plt\n'), ((31831, 31851), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(5)'], {}), '(2, 3, 5)\n', (31842, 31851), True, 'import matplotlib.pyplot as plt\n'), ((31922, 31952), 'matplotlib.pyplot.title', 'plt.title', (['"""im(${\\\\hat q}$) H"""'], {}), "('im(${\\\\hat q}$) H')\n", (31931, 31952), True, 'import matplotlib.pyplot as plt\n'), ((31961, 31981), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(3)'], {}), '(2, 3, 3)\n', (31972, 31981), True, 'import matplotlib.pyplot as plt\n'), ((32054, 32086), 'matplotlib.pyplot.title', 'plt.title', (['"""re(${\\\\hat u_1}$) H"""'], {}), "('re(${\\\\hat u_1}$) H')\n", (32063, 32086), True, 'import matplotlib.pyplot as plt\n'), ((32095, 32115), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(6)'], {}), '(2, 3, 6)\n', (32106, 32115), True, 'import matplotlib.pyplot as plt\n'), ((32188, 32220), 'matplotlib.pyplot.title', 'plt.title', (['"""im(${\\\\hat u_1}$) H"""'], {}), "('im(${\\\\hat u_1}$) H')\n", (32197, 32220), True, 'import matplotlib.pyplot as plt\n'), ((32229, 32239), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32237, 32239), True, 'import matplotlib.pyplot as plt\n'), ((32249, 32261), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (32259, 32261), True, 'import matplotlib.pyplot as plt\n'), ((32270, 32290), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(1)'], {}), '(2, 3, 1)\n', (32281, 32290), True, 'import matplotlib.pyplot as plt\n'), ((32349, 32379), 'matplotlib.pyplot.title', 'plt.title', (['"""re(${\\\\hat u_0}$)"""'], {}), "('re(${\\\\hat u_0}$)')\n", (32358, 32379), True, 'import matplotlib.pyplot as plt\n'), ((32388, 32408), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (32399, 32408), True, 'import matplotlib.pyplot as plt\n'), ((32467, 32497), 'matplotlib.pyplot.title', 'plt.title', (['"""im(${\\\\hat u_0}$)"""'], {}), "('im(${\\\\hat u_0}$)')\n", (32476, 32497), True, 'import matplotlib.pyplot as plt\n'), ((32506, 32526), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(2)'], {}), '(2, 3, 2)\n', (32517, 32526), True, 'import matplotlib.pyplot as plt\n'), ((32602, 32632), 'matplotlib.pyplot.title', 'plt.title', (['"""re(${\\\\hat q}$) P"""'], {}), "('re(${\\\\hat q}$) P')\n", (32611, 32632), True, 'import matplotlib.pyplot as plt\n'), ((32641, 32661), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(5)'], {}), '(2, 3, 5)\n', (32652, 32661), True, 'import matplotlib.pyplot as plt\n'), ((32737, 32767), 'matplotlib.pyplot.title', 'plt.title', (['"""im(${\\\\hat q}$) P"""'], {}), "('im(${\\\\hat q}$) P')\n", (32746, 32767), True, 'import matplotlib.pyplot as plt\n'), ((32776, 32796), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(3)'], {}), '(2, 3, 3)\n', (32787, 32796), True, 'import matplotlib.pyplot as plt\n'), ((32874, 32906), 'matplotlib.pyplot.title', 'plt.title', (['"""re(${\\\\hat u_1}$) P"""'], {}), "('re(${\\\\hat u_1}$) P')\n", (32883, 32906), True, 'import matplotlib.pyplot as plt\n'), ((32915, 32935), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(6)'], {}), '(2, 3, 6)\n', (32926, 32935), True, 'import matplotlib.pyplot as plt\n'), ((33013, 33045), 'matplotlib.pyplot.title', 'plt.title', (['"""im(${\\\\hat u_1}$) P"""'], {}), "('im(${\\\\hat u_1}$) P')\n", (33022, 33045), True, 'import matplotlib.pyplot as plt\n'), ((33054, 33064), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (33062, 33064), True, 'import matplotlib.pyplot as plt\n'), ((34237, 34256), 'pysit.RickerWavelet', 'RickerWavelet', (['(10.0)'], {}), '(10.0)\n', (34250, 34256), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((3833, 3857), 'numpy.iterable', 'np.iterable', (['frequencies'], {}), '(frequencies)\n', (3844, 3857), True, 'import numpy as np\n'), ((4318, 4374), 'numpy.zeros', 'np.zeros', (['(solver.nsteps, shot.receivers.receiver_count)'], {}), '((solver.nsteps, shot.receivers.receiver_count))\n', (4326, 4374), True, 'import numpy as np\n'), ((12060, 12084), 'numpy.iterable', 'np.iterable', (['frequencies'], {}), '(frequencies)\n', (12071, 12084), True, 'import numpy as np\n'), ((12404, 12428), 'numpy.iterable', 'np.iterable', (['frequencies'], {}), '(frequencies)\n', (12415, 12428), True, 'import numpy as np\n'), ((16145, 16174), 'numpy.conj', 'np.conj', (['vhats[nu]', 'vhats[nu]'], {}), '(vhats[nu], vhats[nu])\n', (16152, 16174), True, 'import numpy as np\n'), ((16398, 16440), 'numpy.exp', 'np.exp', (['(-1.0j * 2 * np.pi * nu * solver.tf)'], {}), '(-1.0j * 2 * np.pi * nu * solver.tf)\n', (16404, 16440), True, 'import numpy as np\n'), ((18681, 18705), 'numpy.iterable', 'np.iterable', (['frequencies'], {}), '(frequencies)\n', (18692, 18705), True, 'import numpy as np\n'), ((19537, 19593), 'numpy.zeros', 'np.zeros', (['(solver.nsteps, shot.receivers.receiver_count)'], {}), '((solver.nsteps, shot.receivers.receiver_count))\n', (19545, 19593), True, 'import numpy as np\n'), ((26787, 26806), 'pysit.RickerWavelet', 'RickerWavelet', (['(10.0)'], {}), '(10.0)\n', (26800, 26806), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((29585, 29602), 'numpy.conj', 'np.conj', (['dhat[nu]'], {}), '(dhat[nu])\n', (29592, 29602), True, 'import numpy as np\n'), ((29684, 29701), 'numpy.prod', 'np.prod', (['m.deltas'], {}), '(m.deltas)\n', (29691, 29701), True, 'import numpy as np\n'), ((31508, 31526), 'numpy.real', 'np.real', (['u0hat[sl]'], {}), '(u0hat[sl])\n', (31515, 31526), True, 'import numpy as np\n'), ((31626, 31644), 'numpy.imag', 'np.imag', (['u0hat[sl]'], {}), '(u0hat[sl])\n', (31633, 31644), True, 'import numpy as np\n'), ((31744, 31761), 'numpy.real', 'np.real', (['qhat[sl]'], {}), '(qhat[sl])\n', (31751, 31761), True, 'import numpy as np\n'), ((31874, 31891), 'numpy.imag', 'np.imag', (['qhat[sl]'], {}), '(qhat[sl])\n', (31881, 31891), True, 'import numpy as np\n'), ((32004, 32022), 'numpy.real', 'np.real', (['u1hat[sl]'], {}), '(u1hat[sl])\n', (32011, 32022), True, 'import numpy as np\n'), ((32138, 32156), 'numpy.imag', 'np.imag', (['u1hat[sl]'], {}), '(u1hat[sl])\n', (32145, 32156), True, 'import numpy as np\n'), ((32313, 32331), 'numpy.real', 'np.real', (['u0hat[sl]'], {}), '(u0hat[sl])\n', (32320, 32331), True, 'import numpy as np\n'), ((32431, 32449), 'numpy.imag', 'np.imag', (['u0hat[sl]'], {}), '(u0hat[sl])\n', (32438, 32449), True, 'import numpy as np\n'), ((32549, 32571), 'numpy.real', 'np.real', (['qhat_freq[sl]'], {}), '(qhat_freq[sl])\n', (32556, 32571), True, 'import numpy as np\n'), ((32684, 32706), 'numpy.imag', 'np.imag', (['qhat_freq[sl]'], {}), '(qhat_freq[sl])\n', (32691, 32706), True, 'import numpy as np\n'), ((32819, 32842), 'numpy.real', 'np.real', (['u1hat_freq[sl]'], {}), '(u1hat_freq[sl])\n', (32826, 32842), True, 'import numpy as np\n'), ((32958, 32981), 'numpy.imag', 'np.imag', (['u1hat_freq[sl]'], {}), '(u1hat_freq[sl])\n', (32965, 32981), True, 'import numpy as np\n'), ((34419, 34446), 'pysit.PointReceiver', 'PointReceiver', (['m', '(x, zpos)'], {}), '(m, (x, zpos))\n', (34432, 34446), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((35366, 35377), 'time.time', 'time.time', ([], {}), '()\n', (35375, 35377), False, 'import time\n'), ((35674, 35691), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (35688, 35691), True, 'import numpy as np\n'), ((36250, 36261), 'time.time', 'time.time', ([], {}), '()\n', (36259, 36261), False, 'import time\n'), ((36612, 36623), 'time.time', 'time.time', ([], {}), '()\n', (36621, 36623), False, 'import time\n'), ((39230, 39245), 'numpy.prod', 'np.prod', (['deltas'], {}), '(deltas)\n', (39237, 39245), True, 'import numpy as np\n'), ((4095, 4134), 'numpy.zeros', 'np.zeros', (['shot.receivers.receiver_count'], {}), '(shot.receivers.receiver_count)\n', (4103, 4134), True, 'import numpy as np\n'), ((13629, 13655), 'numpy.linalg.norm', 'np.linalg.norm', (['vk', 'np.inf'], {}), '(vk, np.inf)\n', (13643, 13655), True, 'import numpy as np\n'), ((17118, 17139), 'itertools.repeat', 'itertools.repeat', (['(1.0)'], {}), '(1.0)\n', (17134, 17139), False, 'import itertools\n'), ((26985, 27012), 'pysit.PointReceiver', 'PointReceiver', (['m', '(x, zpos)'], {}), '(m, (x, zpos))\n', (26998, 27012), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((29756, 29773), 'numpy.prod', 'np.prod', (['m.deltas'], {}), '(m.deltas)\n', (29763, 29773), True, 'import numpy as np\n'), ((30539, 30556), 'numpy.conj', 'np.conj', (['dhat[nu]'], {}), '(dhat[nu])\n', (30546, 30556), True, 'import numpy as np\n'), ((30661, 30678), 'numpy.prod', 'np.prod', (['m.deltas'], {}), '(m.deltas)\n', (30668, 30678), True, 'import numpy as np\n'), ((35533, 35544), 'time.time', 'time.time', ([], {}), '()\n', (35542, 35544), False, 'import time\n'), ((36432, 36443), 'time.time', 'time.time', ([], {}), '()\n', (36441, 36443), False, 'import time\n'), ((36842, 36853), 'time.time', 'time.time', ([], {}), '()\n', (36851, 36853), False, 'import time\n'), ((37481, 37493), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (37491, 37493), True, 'import matplotlib.pyplot as plt\n'), ((37510, 37530), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(1)'], {}), '(3, 2, 1)\n', (37521, 37530), True, 'import matplotlib.pyplot as plt\n'), ((37545, 37581), 'pysit.vis.plot', 'vis.plot', (['uhat0_1.real', 'm'], {'clim': 'clim'}), '(uhat0_1.real, m, clim=clim)\n', (37553, 37581), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((37597, 37611), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (37609, 37611), True, 'import matplotlib.pyplot as plt\n'), ((37628, 37648), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(3)'], {}), '(3, 2, 3)\n', (37639, 37648), True, 'import matplotlib.pyplot as plt\n'), ((37663, 37699), 'pysit.vis.plot', 'vis.plot', (['uhat0_2.real', 'm'], {'clim': 'clim'}), '(uhat0_2.real, m, clim=clim)\n', (37671, 37699), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((37715, 37729), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (37727, 37729), True, 'import matplotlib.pyplot as plt\n'), ((37746, 37766), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(5)'], {}), '(3, 2, 5)\n', (37757, 37766), True, 'import matplotlib.pyplot as plt\n'), ((37781, 37809), 'pysit.vis.plot', 'vis.plot', (['diff.real', 'm', 'diff'], {}), '(diff.real, m, diff)\n', (37789, 37809), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((37825, 37839), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (37837, 37839), True, 'import matplotlib.pyplot as plt\n'), ((37857, 37877), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(2)'], {}), '(3, 2, 2)\n', (37868, 37877), True, 'import matplotlib.pyplot as plt\n'), ((37892, 37928), 'pysit.vis.plot', 'vis.plot', (['uhat0_1.imag', 'm'], {'clim': 'clim'}), '(uhat0_1.imag, m, clim=clim)\n', (37900, 37928), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((37944, 37958), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (37956, 37958), True, 'import matplotlib.pyplot as plt\n'), ((37975, 37995), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(4)'], {}), '(3, 2, 4)\n', (37986, 37995), True, 'import matplotlib.pyplot as plt\n'), ((38010, 38046), 'pysit.vis.plot', 'vis.plot', (['uhat0_2.imag', 'm'], {'clim': 'clim'}), '(uhat0_2.imag, m, clim=clim)\n', (38018, 38046), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((38062, 38076), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (38074, 38076), True, 'import matplotlib.pyplot as plt\n'), ((38093, 38113), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(6)'], {}), '(3, 2, 6)\n', (38104, 38113), True, 'import matplotlib.pyplot as plt\n'), ((38128, 38156), 'pysit.vis.plot', 'vis.plot', (['diff.imag', 'm', 'diff'], {}), '(diff.imag, m, diff)\n', (38136, 38156), False, 'from pysit import PML, RectangularDomain, CartesianMesh, PointSource, ReceiverSet, Shot, ConstantDensityAcousticWave, generate_seismic_data, PointReceiver, RickerWavelet, FrequencyModeling, ConstantDensityHelmholtz, vis\n'), ((38172, 38186), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (38184, 38186), True, 'import matplotlib.pyplot as plt\n'), ((38204, 38214), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (38212, 38214), True, 'import matplotlib.pyplot as plt\n'), ((39123, 39140), 'numpy.conj', 'np.conj', (['dhat[nu]'], {}), '(dhat[nu])\n', (39130, 39140), True, 'import numpy as np\n'), ((39357, 39376), 'numpy.linalg.norm', 'np.linalg.norm', (['pt1'], {}), '(pt1)\n', (39371, 39376), True, 'import numpy as np\n'), ((39449, 39468), 'numpy.linalg.norm', 'np.linalg.norm', (['pt2'], {}), '(pt2)\n', (39463, 39468), True, 'import numpy as np\n'), ((39545, 39570), 'numpy.linalg.norm', 'np.linalg.norm', (['(pt1 - pt2)'], {}), '(pt1 - pt2)\n', (39559, 39570), True, 'import numpy as np\n'), ((2313, 2330), 'math.floor', 'math.floor', (['ratio'], {}), '(ratio)\n', (2323, 2330), False, 'import math\n'), ((5761, 5775), 'numpy.mod', 'np.mod', (['k', 'idx'], {}), '(k, idx)\n', (5767, 5775), True, 'import numpy as np\n'), ((14058, 14072), 'numpy.mod', 'np.mod', (['k', 'idx'], {}), '(k, idx)\n', (14064, 14072), True, 'import numpy as np\n'), ((15870, 15884), 'numpy.mod', 'np.mod', (['k', 'idx'], {}), '(k, idx)\n', (15876, 15884), True, 'import numpy as np\n'), ((17391, 17411), 'numpy.conj', 'np.conj', (['dWaveOp[nu]'], {}), '(dWaveOp[nu])\n', (17398, 17411), True, 'import numpy as np\n'), ((22400, 22414), 'numpy.mod', 'np.mod', (['k', 'idx'], {}), '(k, idx)\n', (22406, 22414), True, 'import numpy as np\n'), ((28927, 28961), 'numpy.exp', 'np.exp', (['(-1.0j * 2 * np.pi * nu * t)'], {}), '(-1.0j * 2 * np.pi * nu * t)\n', (28933, 28961), True, 'import numpy as np\n'), ((30742, 30759), 'numpy.prod', 'np.prod', (['m.deltas'], {}), '(m.deltas)\n', (30749, 30759), True, 'import numpy as np\n'), ((35791, 35818), 'numpy.random.rand', 'np.random.rand', (['*data.shape'], {}), '(*data.shape)\n', (35805, 35818), True, 'import numpy as np\n'), ((39629, 39654), 'numpy.linalg.norm', 'np.linalg.norm', (['(pt1 - pt2)'], {}), '(pt1 - pt2)\n', (39643, 39654), True, 'import numpy as np\n'), ((39653, 39672), 'numpy.linalg.norm', 'np.linalg.norm', (['pt1'], {}), '(pt1)\n', (39667, 39672), True, 'import numpy as np\n'), ((15622, 15648), 'numpy.linalg.norm', 'np.linalg.norm', (['vk', 'np.inf'], {}), '(vk, np.inf)\n', (15636, 15648), True, 'import numpy as np\n'), ((22651, 22665), 'numpy.mod', 'np.mod', (['k', 'idx'], {}), '(k, idx)\n', (22657, 22665), True, 'import numpy as np\n'), ((29655, 29672), 'numpy.conj', 'np.conj', (['adjmodel'], {}), '(adjmodel)\n', (29662, 29672), True, 'import numpy as np\n'), ((37298, 37318), 'numpy.linalg.norm', 'np.linalg.norm', (['diff'], {}), '(diff)\n', (37312, 37318), True, 'import numpy as np\n'), ((37319, 37342), 'numpy.linalg.norm', 'np.linalg.norm', (['uhat0_1'], {}), '(uhat0_1)\n', (37333, 37342), True, 'import numpy as np\n'), ((39201, 39218), 'numpy.conj', 'np.conj', (['adjmodel'], {}), '(adjmodel)\n', (39208, 39218), True, 'import numpy as np\n'), ((29727, 29744), 'numpy.conj', 'np.conj', (['adjmodel'], {}), '(adjmodel)\n', (29734, 29744), True, 'import numpy as np\n'), ((30627, 30649), 'numpy.conj', 'np.conj', (['adjmodel_freq'], {}), '(adjmodel_freq)\n', (30634, 30649), True, 'import numpy as np\n'), ((36119, 36153), 'numpy.exp', 'np.exp', (['(-1.0j * 2 * np.pi * nu * t)'], {}), '(-1.0j * 2 * np.pi * nu * t)\n', (36125, 36153), True, 'import numpy as np\n'), ((5819, 5853), 'numpy.exp', 'np.exp', (['(-1.0j * 2 * np.pi * nu * t)'], {}), '(-1.0j * 2 * np.pi * nu * t)\n', (5825, 5853), True, 'import numpy as np\n'), ((14116, 14164), 'numpy.exp', 'np.exp', (['(-1.0j * 2 * np.pi * nu * (solver.tf - t))'], {}), '(-1.0j * 2 * np.pi * nu * (solver.tf - t))\n', (14122, 14164), True, 'import numpy as np\n'), ((15928, 15976), 'numpy.exp', 'np.exp', (['(-1.0j * 2 * np.pi * nu * (solver.tf - t))'], {}), '(-1.0j * 2 * np.pi * nu * (solver.tf - t))\n', (15934, 15976), True, 'import numpy as np\n'), ((22459, 22493), 'numpy.exp', 'np.exp', (['(-1.0j * 2 * np.pi * nu * t)'], {}), '(-1.0j * 2 * np.pi * nu * t)\n', (22465, 22493), True, 'import numpy as np\n'), ((30708, 30730), 'numpy.conj', 'np.conj', (['adjmodel_freq'], {}), '(adjmodel_freq)\n', (30715, 30730), True, 'import numpy as np\n'), ((31133, 31143), 'numpy.real', 'np.real', (['x'], {}), '(x)\n', (31140, 31143), True, 'import numpy as np\n'), ((31172, 31182), 'numpy.real', 'np.real', (['x'], {}), '(x)\n', (31179, 31182), True, 'import numpy as np\n'), ((31230, 31240), 'numpy.imag', 'np.imag', (['x'], {}), '(x)\n', (31237, 31240), True, 'import numpy as np\n'), ((31269, 31279), 'numpy.imag', 'np.imag', (['x'], {}), '(x)\n', (31276, 31279), True, 'import numpy as np\n'), ((22746, 22780), 'numpy.exp', 'np.exp', (['(-1.0j * 2 * np.pi * nu * t)'], {}), '(-1.0j * 2 * np.pi * nu * t)\n', (22752, 22780), True, 'import numpy as np\n')] |
import numpy as np
from pyglet.window import key
from core import constants
from core.entities.tank import tank
class Player(tank.Tank):
"""
A tank taking input from the keyboard and representing a play in-game.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.speed = constants.PLAYER_SPEED
self.move_skip = constants.PLAYER_MOVE_SKIP
self.key_handler = key.KeyStateHandler()
self.raw_movement_direction = np.zeros(2, int)
self.health = constants.PLAYER_HEALTH
self.shoot_now = False
def logic_update(self, game, tick):
self.handle_movement_controls()
if self.shoot_now:
self.shoot(game, player_invoked=True)
self.shoot_now = False
super().logic_update(game, tick)
# Just a quick test for one of the map methods. Perpetually destroys tiles around the player tank.
# list = game.game_map.get_tiles_around_tile(game.game_map.get_entity_map_position(self), 2)
# list = list.ravel()
# for tile in list:
# if tile is not None:
# if hasattr(tile, "damage"):
# tile.damage(game)
def on_key_press(self, symbol, modifiers):
if symbol == key.SPACE:
self.shoot_now = True
def handle_movement_controls(self):
"""
Handles movement input from the keyboard. Player Tank's movement is restricted to only 4 axis
with the last pressed key taking priority over movement direction.
"""
dx = dy = 0
if self.key_handler[key.A]:
dx -= 1
if self.key_handler[key.D]:
dx += 1
if self.key_handler[key.W]:
dy += 1
if self.key_handler[key.S]:
dy -= 1
change = not np.array_equal(self.raw_movement_direction, np.array([dx, dy]))
if change:
if dx != 0 and dy != 0:
if self.move_dir[0] != 0:
self.move_dir[0] = 0
self.move_dir[1] = dy
else:
self.move_dir[1] = 0
self.move_dir[0] = dx
else:
self.move_dir[0] = dx
self.move_dir[1] = dy
self.raw_movement_direction[0] = dx
self.raw_movement_direction[1] = dy
| [
"numpy.array",
"pyglet.window.key.KeyStateHandler",
"numpy.zeros"
] | [((423, 444), 'pyglet.window.key.KeyStateHandler', 'key.KeyStateHandler', ([], {}), '()\n', (442, 444), False, 'from pyglet.window import key\n'), ((483, 499), 'numpy.zeros', 'np.zeros', (['(2)', 'int'], {}), '(2, int)\n', (491, 499), True, 'import numpy as np\n'), ((1866, 1884), 'numpy.array', 'np.array', (['[dx, dy]'], {}), '([dx, dy])\n', (1874, 1884), True, 'import numpy as np\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dataset interface for Census dataset.
Census dataset: https://archive.ics.uci.edu/ml/machine-learning-databases/adult
"""
import os
import urllib.request
import numpy as np
import pandas as pd
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import OneHotEncoder
import tensorflow as tf
from deep4rec.datasets.dataset import Dataset
import deep4rec.utils as utils
_CSV_COLUMNS = [
"age",
"workclass",
"fnlwgt",
"education",
"education_num",
"marital_status",
"occupation",
"relationship",
"race",
"gender",
"capital_gain",
"capital_loss",
"hours_per_week",
"native_country",
"income_bracket",
]
_CSV_COLUMN_DEFAULTS = [
[0],
[""],
[0],
[""],
[0],
[""],
[""],
[""],
[""],
[""],
[0],
[0],
[0],
[""],
[""],
]
class CensusDataset(Dataset):
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/adult"
def __init__(self, dataset_name, output_dir, *args, **kwargs):
super().__init__(dataset_name, output_dir, *args, **kwargs)
self.train_filename = "adult.data"
self.test_filename = "adult.test"
self.train_url = os.path.join(self.url, self.train_filename)
self.test_url = os.path.join(self.url, self.test_filename)
self.train_path = os.path.join(self.output_dir, self.train_filename)
self.test_path = os.path.join(self.output_dir, self.test_filename)
self.preprocessed_path = os.path.join(self.output_dir, self.dataset_name)
self._ord_encoder = OrdinalEncoder()
self._occupation_ord_encoder = OrdinalEncoder()
self._one_hot_encoder = OneHotEncoder(sparse=False)
def _download_and_clean_file(self, url, filename):
"""Downloads data from url, and makes changes to match the CSV format."""
temp_file, _ = urllib.request.urlretrieve(url)
with tf.gfile.Open(temp_file, "r") as temp_eval_file:
with tf.gfile.Open(filename, "w") as eval_file:
for line in temp_eval_file:
line = line.strip()
line = line.replace(", ", ",")
if not line or "," not in line:
continue
if line[-1] == ".":
line = line[:-1]
line += "\n"
eval_file.write(line)
tf.gfile.Remove(temp_file)
def download(self):
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
self._download_and_clean_file(self.train_url, self.train_path)
self._download_and_clean_file(self.test_url, self.test_path)
def check_downloaded(self):
return os.path.exists(self.train_path) and os.path.exists(self.test_path)
def check_preprocessed(self):
return False
def _preprocess(self, filename, train_data=False):
df = pd.read_csv(filename, names=_CSV_COLUMNS)
# Categorical columns
df_base_columns = df[
["education", "marital_status", "relationship", "workclass"]
]
if train_data:
base_columns = self._ord_encoder.fit_transform(df_base_columns.values)
occupation_column = self._occupation_ord_encoder.fit_transform(
df["occupation"].values.reshape(-1, 1)
)
one_hot_base_columns = self._one_hot_encoder.fit_transform(
df_base_columns.values
)
else:
base_columns = self._ord_encoder.transform(df_base_columns.values)
occupation_column = self._occupation_ord_encoder.transform(
df["occupation"].values.reshape(-1, 1)
)
one_hot_base_columns = self._one_hot_encoder.transform(
df_base_columns.values
)
# Age buckets
buckets = [0, 18, 25, 30, 35, 40, 45, 50, 55, 60, 65, 200]
age_buckets = np.array(
pd.cut(df["age"], buckets, labels=range(len(buckets) - 1)).values
)
wide_columns = np.concatenate(
(base_columns, age_buckets.reshape(-1, 1)), axis=1
)
numerical_columns = df[
["age", "education_num", "capital_gain", "capital_loss", "hours_per_week"]
].values
deep_columns = np.concatenate((one_hot_base_columns, numerical_columns), axis=1)
labels = np.where(df["income_bracket"].values == ">50K", 1, 0)
return wide_columns, deep_columns, occupation_column, labels
def preprocess(self):
self.train_wide_data, self.train_deep_data, self.train_embedding_data, self.train_y = self._preprocess(
self.train_path, train_data=True
)
self.test_wide_data, self.test_deep_data, self.test_embedding_data, self.test_y = self._preprocess(
self.test_path, train_data=False
)
@property
def train_size(self):
return len(self.train_wide_data)
@property
def train_features(self):
return [self.train_embedding_data, self.train_wide_data, self.train_deep_data]
@property
def test_features(self):
return [self.test_embedding_data, self.test_wide_data, self.test_deep_data]
@property
def num_features_one_hot(self):
return len(np.unique(self.train_embedding_data))
@property
def num_features(self):
return 1
| [
"os.mkdir",
"os.path.join",
"pandas.read_csv",
"numpy.unique",
"sklearn.preprocessing.OneHotEncoder",
"os.path.exists",
"numpy.where",
"tensorflow.gfile.Open",
"tensorflow.gfile.Remove",
"sklearn.preprocessing.OrdinalEncoder",
"numpy.concatenate"
] | [((1910, 1953), 'os.path.join', 'os.path.join', (['self.url', 'self.train_filename'], {}), '(self.url, self.train_filename)\n', (1922, 1953), False, 'import os\n'), ((1978, 2020), 'os.path.join', 'os.path.join', (['self.url', 'self.test_filename'], {}), '(self.url, self.test_filename)\n', (1990, 2020), False, 'import os\n'), ((2048, 2098), 'os.path.join', 'os.path.join', (['self.output_dir', 'self.train_filename'], {}), '(self.output_dir, self.train_filename)\n', (2060, 2098), False, 'import os\n'), ((2124, 2173), 'os.path.join', 'os.path.join', (['self.output_dir', 'self.test_filename'], {}), '(self.output_dir, self.test_filename)\n', (2136, 2173), False, 'import os\n'), ((2208, 2256), 'os.path.join', 'os.path.join', (['self.output_dir', 'self.dataset_name'], {}), '(self.output_dir, self.dataset_name)\n', (2220, 2256), False, 'import os\n'), ((2285, 2301), 'sklearn.preprocessing.OrdinalEncoder', 'OrdinalEncoder', ([], {}), '()\n', (2299, 2301), False, 'from sklearn.preprocessing import OrdinalEncoder\n'), ((2341, 2357), 'sklearn.preprocessing.OrdinalEncoder', 'OrdinalEncoder', ([], {}), '()\n', (2355, 2357), False, 'from sklearn.preprocessing import OrdinalEncoder\n'), ((2390, 2417), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)'}), '(sparse=False)\n', (2403, 2417), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((3117, 3143), 'tensorflow.gfile.Remove', 'tf.gfile.Remove', (['temp_file'], {}), '(temp_file)\n', (3132, 3143), True, 'import tensorflow as tf\n'), ((3635, 3676), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'names': '_CSV_COLUMNS'}), '(filename, names=_CSV_COLUMNS)\n', (3646, 3676), True, 'import pandas as pd\n'), ((5035, 5100), 'numpy.concatenate', 'np.concatenate', (['(one_hot_base_columns, numerical_columns)'], {'axis': '(1)'}), '((one_hot_base_columns, numerical_columns), axis=1)\n', (5049, 5100), True, 'import numpy as np\n'), ((5119, 5172), 'numpy.where', 'np.where', (["(df['income_bracket'].values == '>50K')", '(1)', '(0)'], {}), "(df['income_bracket'].values == '>50K', 1, 0)\n", (5127, 5172), True, 'import numpy as np\n'), ((2624, 2653), 'tensorflow.gfile.Open', 'tf.gfile.Open', (['temp_file', '"""r"""'], {}), "(temp_file, 'r')\n", (2637, 2653), True, 'import tensorflow as tf\n'), ((3184, 3215), 'os.path.exists', 'os.path.exists', (['self.output_dir'], {}), '(self.output_dir)\n', (3198, 3215), False, 'import os\n'), ((3229, 3254), 'os.mkdir', 'os.mkdir', (['self.output_dir'], {}), '(self.output_dir)\n', (3237, 3254), False, 'import os\n'), ((3443, 3474), 'os.path.exists', 'os.path.exists', (['self.train_path'], {}), '(self.train_path)\n', (3457, 3474), False, 'import os\n'), ((3479, 3509), 'os.path.exists', 'os.path.exists', (['self.test_path'], {}), '(self.test_path)\n', (3493, 3509), False, 'import os\n'), ((6012, 6048), 'numpy.unique', 'np.unique', (['self.train_embedding_data'], {}), '(self.train_embedding_data)\n', (6021, 6048), True, 'import numpy as np\n'), ((2690, 2718), 'tensorflow.gfile.Open', 'tf.gfile.Open', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (2703, 2718), True, 'import tensorflow as tf\n')] |
import numpy as np
import tensorflow as tf
seed: int = 13371337
# reproducibility
np.random.seed(seed)
tf.set_random_seed(seed)
class ImageDataLoader:
def __init__(self,
patch_shape: tuple = (128, 128),
channels: int = 3,
n_patches: int = 16):
self.patch_shape = patch_shape
self.channels = channels
self.n_patches = n_patches
self.scale = int(np.sqrt(self.n_patches))
self.lr_patch_shape = (
self.patch_shape[0],
self.patch_shape[1])
self.hr_patch_shape = (
self.patch_shape[0] * self.scale,
self.patch_shape[1] * self.scale
)
@staticmethod
def normalize(x):
return (x / 127.5) - 1.
def random_crop(self, x_lr, x_hr):
x_hr_shape = x_hr.get_shape().as_list()
rand_lr_w = (np.random.randint(0, x_hr_shape[0] - self.hr_patch_shape[0])
// self.scale)
rand_lr_h = (np.random.randint(0, x_hr_shape[1] - self.hr_patch_shape[1])
// self.scale)
rand_hr_w = rand_lr_w * self.scale
rand_hr_h = rand_lr_h * self.scale
x_lr = x_lr[rand_lr_w:rand_lr_w + self.lr_patch_shape[0], rand_lr_h:rand_lr_h + self.lr_patch_shape[1], :]
x_hr = x_hr[rand_hr_w:rand_hr_w + self.hr_patch_shape[0], rand_hr_h:rand_hr_h + self.hr_patch_shape[1], :]
return x_lr, x_hr
def pre_processing(self, fn, use_augmentation: bool = True):
lr = tf.read_file(fn[0])
lr = tf.image.decode_png(lr, channels=self.channels)
lr = self.normalize(tf.cast(lr, dtype=tf.float32))
hr = tf.read_file(fn[1])
hr = tf.image.decode_png(hr, channels=self.channels)
hr = self.normalize(tf.cast(hr, dtype=tf.float32))
# random crop
lr, hr = self.random_crop(lr, hr)
if use_augmentation:
if np.random.randint(0, 2) == 0:
lr = tf.image.flip_up_down(lr)
hr = tf.image.flip_up_down(hr)
if np.random.randint(0, 2) == 0:
lr = tf.image.rot90(lr)
hr = tf.image.rot90(hr)
# split into patches
lr_patches = tf.image.extract_image_patches(
images=tf.expand_dims(lr, axis=0),
ksizes=(1,) + self.lr_patch_shape + (1,),
strides=(1,) + self.lr_patch_shape + (1,),
rates=[1, 1, 1, 1],
padding='VALID'
)
lr_patches = tf.reshape(lr_patches,
(-1,) + self.lr_patch_shape + (self.channels,))
hr_patches = tf.image.extract_image_patches(
images=tf.expand_dims(hr, axis=0),
ksizes=(1,) + self.hr_patch_shape + (1,),
strides=(1,) + self.hr_patch_shape + (1,),
rates=[1, 1, 1, 1],
padding='VALID'
)
hr_patches = tf.reshape(hr_patches,
(-1,) + self.hr_patch_shape + (self.channels,))
return lr_patches, hr_patches
| [
"tensorflow.image.rot90",
"numpy.random.seed",
"tensorflow.image.flip_up_down",
"tensorflow.reshape",
"tensorflow.set_random_seed",
"tensorflow.image.decode_png",
"tensorflow.cast",
"numpy.random.randint",
"tensorflow.read_file",
"tensorflow.expand_dims",
"numpy.sqrt"
] | [((84, 104), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (98, 104), True, 'import numpy as np\n'), ((105, 129), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (123, 129), True, 'import tensorflow as tf\n'), ((1512, 1531), 'tensorflow.read_file', 'tf.read_file', (['fn[0]'], {}), '(fn[0])\n', (1524, 1531), True, 'import tensorflow as tf\n'), ((1545, 1592), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['lr'], {'channels': 'self.channels'}), '(lr, channels=self.channels)\n', (1564, 1592), True, 'import tensorflow as tf\n'), ((1666, 1685), 'tensorflow.read_file', 'tf.read_file', (['fn[1]'], {}), '(fn[1])\n', (1678, 1685), True, 'import tensorflow as tf\n'), ((1699, 1746), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['hr'], {'channels': 'self.channels'}), '(hr, channels=self.channels)\n', (1718, 1746), True, 'import tensorflow as tf\n'), ((2496, 2566), 'tensorflow.reshape', 'tf.reshape', (['lr_patches', '((-1,) + self.lr_patch_shape + (self.channels,))'], {}), '(lr_patches, (-1,) + self.lr_patch_shape + (self.channels,))\n', (2506, 2566), True, 'import tensorflow as tf\n'), ((2900, 2970), 'tensorflow.reshape', 'tf.reshape', (['hr_patches', '((-1,) + self.hr_patch_shape + (self.channels,))'], {}), '(hr_patches, (-1,) + self.hr_patch_shape + (self.channels,))\n', (2910, 2970), True, 'import tensorflow as tf\n'), ((435, 458), 'numpy.sqrt', 'np.sqrt', (['self.n_patches'], {}), '(self.n_patches)\n', (442, 458), True, 'import numpy as np\n'), ((875, 935), 'numpy.random.randint', 'np.random.randint', (['(0)', '(x_hr_shape[0] - self.hr_patch_shape[0])'], {}), '(0, x_hr_shape[0] - self.hr_patch_shape[0])\n', (892, 935), True, 'import numpy as np\n'), ((993, 1053), 'numpy.random.randint', 'np.random.randint', (['(0)', '(x_hr_shape[1] - self.hr_patch_shape[1])'], {}), '(0, x_hr_shape[1] - self.hr_patch_shape[1])\n', (1010, 1053), True, 'import numpy as np\n'), ((1621, 1650), 'tensorflow.cast', 'tf.cast', (['lr'], {'dtype': 'tf.float32'}), '(lr, dtype=tf.float32)\n', (1628, 1650), True, 'import tensorflow as tf\n'), ((1775, 1804), 'tensorflow.cast', 'tf.cast', (['hr'], {'dtype': 'tf.float32'}), '(hr, dtype=tf.float32)\n', (1782, 1804), True, 'import tensorflow as tf\n'), ((1916, 1939), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (1933, 1939), True, 'import numpy as np\n'), ((1967, 1992), 'tensorflow.image.flip_up_down', 'tf.image.flip_up_down', (['lr'], {}), '(lr)\n', (1988, 1992), True, 'import tensorflow as tf\n'), ((2014, 2039), 'tensorflow.image.flip_up_down', 'tf.image.flip_up_down', (['hr'], {}), '(hr)\n', (2035, 2039), True, 'import tensorflow as tf\n'), ((2056, 2079), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (2073, 2079), True, 'import numpy as np\n'), ((2107, 2125), 'tensorflow.image.rot90', 'tf.image.rot90', (['lr'], {}), '(lr)\n', (2121, 2125), True, 'import tensorflow as tf\n'), ((2147, 2165), 'tensorflow.image.rot90', 'tf.image.rot90', (['hr'], {}), '(hr)\n', (2161, 2165), True, 'import tensorflow as tf\n'), ((2268, 2294), 'tensorflow.expand_dims', 'tf.expand_dims', (['lr'], {'axis': '(0)'}), '(lr, axis=0)\n', (2282, 2294), True, 'import tensorflow as tf\n'), ((2672, 2698), 'tensorflow.expand_dims', 'tf.expand_dims', (['hr'], {'axis': '(0)'}), '(hr, axis=0)\n', (2686, 2698), True, 'import tensorflow as tf\n')] |
#pylint: disable=bare-except, invalid-name, too-many-arguments
"""
Merging tools for REF_M
"""
from __future__ import (absolute_import, division, print_function)
import sys
import os
import pytz
import json
import time
import datetime
import pandas
import numpy as np
import mantid
import mantid.simpleapi as api
from .settings import AR_OUT_DIR_TEMPLATE, DATA_DIR_TEMPLATE
from .script_output import write_reduction_script, write_tunable_reduction_script
def match_run_for_cross_section(run, ipts, cross_section):
"""
Return a list of matching runs to be stitched
@param run: run to start with
@param ipts: experiment identifier
@param cross_section: polarization entry
"""
_previous_q_min = 0
_previous_q_max = 0
api.logger.notice("Matching for IPTS-%s r%s [%s]" % (ipts, run, cross_section))
matched_runs = []
for i in range(10):
i_run = run - i
output_dir = AR_OUT_DIR_TEMPLATE % dict(ipts=ipts)
file_path = os.path.join(output_dir, "REF_M_%s_%s_autoreduce.dat" % (i_run, cross_section))
if os.path.isfile(file_path):
ref_data = pandas.read_csv(file_path, delim_whitespace=True,
comment='#', names=['q','r','dr','dq', 'a'])
q_min = min(ref_data['q'])
q_max = max(ref_data['q'])
api.logger.notice("%s: [%s %s]" % (i_run, q_min, q_max))
if (q_max < _previous_q_max and q_max > _previous_q_min ) or _previous_q_max == 0:
_previous_q_max = q_max
_previous_q_min = q_min
matched_runs.insert(0, str(i_run))
else:
# The series stops here
break
return matched_runs
def _extract_sequence_id(file_path):
"""
Extract the sequence id from a data file
@param str file_path: file to process
"""
run_number = None
group_id = None
lowest_q = None
if os.path.isfile(file_path):
with open(file_path, 'r') as fd:
for line in fd.readlines():
if line.startswith("# sequence_id"):
try:
group_id = int(line[len("# sequence_id"):].strip())
except:
api.logger.error("Could not extract group id from line: %s" % line)
if line.startswith("# Input file indices:"):
try:
run_number = int(line[len("# Input file indices:"):].strip())
except:
api.logger.error("Could not extract run number from line: %s" % line)
if not line.startswith("#") and len(line.strip()) > 0:
try:
toks = line.split()
lowest_q = float(toks[0])
except:
api.logger.error("Could not extract lowest q from line: %s" % line)
if run_number is not None and group_id is not None and lowest_q is not None:
return run_number, group_id, lowest_q
return run_number, group_id, lowest_q
def match_run_with_sequence(run, ipts, cross_section):
"""
Return a list of matching runs to be stitched
#TODO: order the runs wth increasing Q.
@param run: run to start with
@param ipts: experiment identifier
@param cross_section: polarization entry
"""
api.logger.notice("Matching sequence for IPTS-%s r%s [%s]" % (ipts, run, cross_section))
data_dir = AR_OUT_DIR_TEMPLATE % dict(ipts=ipts)
# Check to see if we have the sequence information
file_path = os.path.join(data_dir, "REF_M_%s_%s_autoreduce.dat" % (run, cross_section))
_, group_id, _ = _extract_sequence_id(file_path)
# If we don't have a group id, just group together runs of increasing q-values
if group_id is None:
return match_run_for_cross_section(run, ipts, cross_section)
# Start with the run matching the sequence id
matched_runs = []
_lowest_q_available = True
for item in os.listdir(data_dir):
if item.endswith("%s_autoreduce.dat" % cross_section):
_run, _group_id, lowest_q = _extract_sequence_id(os.path.join(data_dir, item))
if _group_id == group_id:
matched_runs.append([str(_run), lowest_q])
_lowest_q_available = _lowest_q_available and lowest_q is not None
if _lowest_q_available:
match_series = [item[0] for item in sorted(matched_runs, key=lambda a:a[1])]
return match_series
return sorted(matched_runs)
def compute_scaling_factors(matched_runs, ipts, cross_section):
_previous_ws = None
running_scale = 1.0
data_buffer = ""
direct_beam_info = ""
data_info = ""
_cross_section_label = cross_section
direct_beam_count = 0
run_count = 0
scaling_factors = [1.0]
for i_run in matched_runs:
output_dir = AR_OUT_DIR_TEMPLATE % dict(ipts=ipts)
file_path = os.path.join(output_dir, "REF_M_%s_%s_autoreduce.dat" % (i_run, cross_section))
if os.path.isfile(file_path):
_run_info = open(file_path, 'r')
ref_data = pandas.read_csv(_run_info,
delim_whitespace=True, comment='#', names=['q','r','dr','dq', 'a'])
ws = api.CreateWorkspace(DataX=ref_data['q'], DataY=ref_data['r'], DataE=ref_data['dr'])
ws = api.ConvertToHistogram(ws)
if _previous_ws is not None:
_, scale = api.Stitch1D(_previous_ws, ws)
running_scale *= scale
scaling_factors.append(running_scale)
_previous_ws = api.CloneWorkspace(ws)
# Rewind and get meta-data
_run_info.seek(0)
_direct_beams_started = 0
_data_runs_started = 0
for line in _run_info.readlines():
# Look for cross-section label
if line.find("Extracted states:") > 0:
toks = line.split(':')
if len(toks) > 1:
_cross_section_label = toks[1].strip()
# If we are in the data run block, copy the data we need
if _data_runs_started == 1 and line.find(str(i_run)) > 0:
toks = ["%8s" % t for t in line.split()]
if len(toks)>10:
toks[1] = "%8g" % scaling_factors[run_count]
run_count += 1
toks[14] = "%8s" % str(run_count)
_line = ' '.join(toks).strip() + '\n'
data_info += _line.replace("# ", "#")
# Find out whether we started the direct beam block
if line.find("Data Runs") > 0:
_direct_beams_started = 0
_data_runs_started = 1
# Get the direct beam info
if _direct_beams_started == 2:
toks = ["%8s" % t for t in line.split()]
if len(toks)>10:
direct_beam_count += 1
toks[1] = "%8g" % direct_beam_count
_line = ' '.join(toks).strip() + '\n'
direct_beam_info += _line.replace("# ", "#")
# If we are in the direct beam block, we need to skip the column info line
if _direct_beams_started == 1 and line.find("DB_ID") > 0:
_direct_beams_started = 2
# Find out whether we started the direct beam block
if line.find("Direct Beam Runs") > 0:
_direct_beams_started = 1
for i in range(len(ref_data['q'])):
data_buffer += "%12.6g %12.6g %12.6g %12.6g %12.6g\n" % (ref_data['q'][i],
running_scale*ref_data['r'][i],
running_scale*ref_data['dr'][i],
ref_data['dq'][i],
ref_data['a'][i],
)
return scaling_factors, direct_beam_info, data_info, data_buffer, _cross_section_label
def apply_scaling_factors(matched_runs, ipts, cross_section, scaling_factors):
data_buffers = []
for xs in ['Off_Off', 'On_Off', 'Off_On', 'On_On']:
# Skip the cross section that we computed the scaling factors with
# since we havce that data already
if xs == cross_section:
continue
data_buffer = ""
for j, i_run in enumerate(matched_runs):
output_dir = AR_OUT_DIR_TEMPLATE % dict(ipts=ipts)
file_path = os.path.join(output_dir, "REF_M_%s_%s_autoreduce.dat" % (i_run, xs))
if os.path.isfile(file_path):
_run_info = open(file_path, 'r')
ref_data = pandas.read_csv(_run_info,
delim_whitespace=True, comment='#', names=['q','r','dr','dq', 'a'])
for i in range(len(ref_data['q'])):
data_buffer += "%12.6g %12.6g %12.6g %12.6g %12.6g\n" % (ref_data['q'][i],
scaling_factors[j]*ref_data['r'][i],
scaling_factors[j]*ref_data['dr'][i],
ref_data['dq'][i],
ref_data['a'][i],
)
data_buffers.append((xs, data_buffer))
return data_buffers
def select_cross_section(run, ipts):
best_xs = None
best_error = None
for xs in ['Off_Off', 'On_Off', 'Off_On', 'On_On']:
output_dir = AR_OUT_DIR_TEMPLATE % dict(ipts=ipts)
file_path = os.path.join(output_dir, "REF_M_%s_%s_autoreduce.dat" % (run, xs))
if os.path.isfile(file_path):
api.logger.notice("Found: %s" % file_path)
ref_data = pandas.read_csv(file_path,
delim_whitespace=True, comment='#', names=['q','r','dr','dq', 'a'])
relative_error = np.sum(ref_data['dr'] * ref_data['dr']) / np.sum(ref_data['r'])
if best_xs is None or relative_error < best_error:
best_xs = xs
best_error = relative_error
else:
api.logger.notice("NOT found: %s" % file_path)
return best_xs
def write_reflectivity_cross_section(run, ipts, cross_section, matched_runs, direct_beam_info, data_info, data_buffer, xs_label):
direct_beam_options=['DB_ID', 'P0', 'PN', 'x_pos', 'x_width', 'y_pos', 'y_width',
'bg_pos', 'bg_width', 'dpix', 'tth', 'number', 'File']
dataset_options=['scale', 'P0', 'PN', 'x_pos', 'x_width', 'y_pos', 'y_width',
'bg_pos', 'bg_width', 'fan', 'dpix', 'tth', 'number', 'DB_ID', 'File']
output_dir = AR_OUT_DIR_TEMPLATE % dict(ipts=ipts)
file_path = os.path.join(output_dir, "REF_M_%s_%s_combined.dat" % (run, cross_section))
fd = open(file_path, 'w')
fd.write("# Datafile created by QuickNXS 1.0.32\n")
fd.write("# Datafile created by Mantid %s\n" % mantid.__version__)
fd.write("# Date: %s\n" % time.strftime(u"%Y-%m-%d %H:%M:%S"))
fd.write("# Type: Specular\n")
fd.write("# Input file indices: %s\n" % ','.join(matched_runs))
fd.write("# Extracted states: %s\n" % xs_label)
fd.write("#\n")
fd.write("# [Direct Beam Runs]\n")
toks = ['%8s' % item for item in direct_beam_options]
fd.write("# %s\n" % ' '.join(toks))
fd.write(direct_beam_info)
fd.write("#\n")
fd.write("# [Data Runs]\n")
toks = ['%8s' % item for item in dataset_options]
fd.write("# %s\n" % ' '.join(toks))
fd.write(data_info)
fd.write("#\n")
fd.write("# [Global Options]\n")
fd.write("# name value\n")
fd.write("# sample_length 10\n")
fd.write("#\n")
fd.write("# [Data]\n")
toks = [u'%12s' % item for item in [u'Qz [1/A]', u'R [a.u.]', u'dR [a.u.]', u'dQz [1/A]', u'theta [rad]']]
fd.write(u"# %s\n" % ' '.join(toks))
fd.write(data_buffer)
fd.close()
return file_path
def plot_combined(matched_runs, scaling_factors, ipts, publish=True):
data_names = []
data_list = []
for i, run in enumerate(matched_runs):
for xs in ['Off_Off', 'On_Off', 'Off_On', 'On_On']:
output_dir = AR_OUT_DIR_TEMPLATE % dict(ipts=ipts)
file_path = os.path.join(output_dir, "REF_M_%s_%s_autoreduce.dat" % (run, xs))
if os.path.isfile(file_path):
ref_data = pandas.read_csv(file_path,
delim_whitespace=True, comment='#', names=['q','r','dr','dq', 'a'])
data_list.append([ref_data['q'], scaling_factors[i]*ref_data['r'], scaling_factors[i]*ref_data['dr']])
data_names.append("r%s [%s]" % (run, xs))
try:
# Depending on where we run, we might get our publisher from
# different places, or not at all.
try: # version on autoreduce
from postprocessing.publish_plot import plot1d
except ImportError: # version on instrument computers
from finddata.publish_plot import plot1d
if data_names:
return plot1d(matched_runs[-1], data_list, data_names=data_names, instrument='REF_M',
x_title=u"Q (1/A)", x_log=False,
y_title="Reflectivity", y_log=True, show_dx=False, publish=publish)
else:
api.logger.notice("Nothing to plot")
except:
api.logger.error(str(sys.exc_info()[1]))
api.logger.error("No publisher module found")
return None
def combined_curves(run, ipts):
"""
Produce combined R(q)
"""
# Select the cross section with the best statistics
high_stat_xs = select_cross_section(run, ipts)
api.logger.notice("High xs: %s" % high_stat_xs)
# Match the given run with previous runs if they are overlapping in Q
matched_runs = match_run_with_sequence(run, ipts, high_stat_xs)
api.logger.notice("Matched runs: %s" % str(matched_runs))
# Compute scaling factors for this cross section
try:
scaling_factors, direct_beam_info, data_info, data_buffer, xs_label = compute_scaling_factors(matched_runs, ipts, high_stat_xs)
except:
return matched_runs, np.ones(len(matched_runs)), ['']*len(matched_runs)
# Write combined python script
write_reduction_script(matched_runs, scaling_factors, ipts)
write_tunable_reduction_script(matched_runs, scaling_factors, ipts)
xs_buffers = apply_scaling_factors(matched_runs, ipts, high_stat_xs, scaling_factors)
xs_buffers.append((high_stat_xs, data_buffer))
file_list = []
for item in xs_buffers:
if item[1]:
_file_path = write_reflectivity_cross_section(matched_runs[0], ipts, item[0],
matched_runs, direct_beam_info,
data_info, item[1], xs_label)
file_list.append(_file_path)
return matched_runs, scaling_factors, file_list
def combined_catalog_info(matched_runs, ipts, output_files, run_number=None):
"""
Produce cataloging information for reduced data
:param list matched_runs: list of matched runs
:param str ipts: experiment name
:param list output_files: list of output files for this reduction process
:param str run_number: run number we want to associate this reduction with
"""
NEW_YORK_TZ = pytz.timezone('America/New_York')
info = dict(user='auto',
created=NEW_YORK_TZ.localize(datetime.datetime.now()).isoformat(),
metadata=dict())
# List of input files
input_list = []
for item in matched_runs:
data_dir = DATA_DIR_TEMPLATE % dict(ipts=ipts)
data_file = os.path.join(data_dir, 'REF_M_%s.nxs.h5' % item)
if os.path.isfile(data_file):
input_list.append(dict(location=data_file,
type='raw',
purpose='sample-data'))
info['input_files'] = input_list
# List of output files
output_list = []
for item in output_files:
output_list.append(dict(location=item,
type='processed',
purpose='reduced-data',
fields=dict()))
info['output_files'] = output_list
output_dir = AR_OUT_DIR_TEMPLATE % dict(ipts=ipts)
if run_number is None:
run_number = matched_runs[0]
json_path = os.path.join(output_dir, "REF_M_%s.json" % run_number)
with open(json_path, 'w') as fd:
fd.write(json.dumps(info, indent=4))
return json_path
| [
"numpy.sum",
"mantid.simpleapi.Stitch1D",
"pandas.read_csv",
"mantid.simpleapi.CreateWorkspace",
"mantid.simpleapi.ConvertToHistogram",
"time.strftime",
"finddata.publish_plot.plot1d",
"json.dumps",
"datetime.datetime.now",
"os.path.isfile",
"mantid.simpleapi.CloneWorkspace",
"pytz.timezone",
... | [((776, 855), 'mantid.simpleapi.logger.notice', 'api.logger.notice', (["('Matching for IPTS-%s r%s [%s]' % (ipts, run, cross_section))"], {}), "('Matching for IPTS-%s r%s [%s]' % (ipts, run, cross_section))\n", (793, 855), True, 'import mantid.simpleapi as api\n'), ((1976, 2001), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (1990, 2001), False, 'import os\n'), ((3465, 3557), 'mantid.simpleapi.logger.notice', 'api.logger.notice', (["('Matching sequence for IPTS-%s r%s [%s]' % (ipts, run, cross_section))"], {}), "('Matching sequence for IPTS-%s r%s [%s]' % (ipts, run,\n cross_section))\n", (3482, 3557), True, 'import mantid.simpleapi as api\n'), ((3679, 3754), 'os.path.join', 'os.path.join', (['data_dir', "('REF_M_%s_%s_autoreduce.dat' % (run, cross_section))"], {}), "(data_dir, 'REF_M_%s_%s_autoreduce.dat' % (run, cross_section))\n", (3691, 3754), False, 'import os\n'), ((4106, 4126), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (4116, 4126), False, 'import os\n'), ((11412, 11487), 'os.path.join', 'os.path.join', (['output_dir', "('REF_M_%s_%s_combined.dat' % (run, cross_section))"], {}), "(output_dir, 'REF_M_%s_%s_combined.dat' % (run, cross_section))\n", (11424, 11487), False, 'import os\n'), ((14367, 14414), 'mantid.simpleapi.logger.notice', 'api.logger.notice', (["('High xs: %s' % high_stat_xs)"], {}), "('High xs: %s' % high_stat_xs)\n", (14384, 14414), True, 'import mantid.simpleapi as api\n'), ((16085, 16118), 'pytz.timezone', 'pytz.timezone', (['"""America/New_York"""'], {}), "('America/New_York')\n", (16098, 16118), False, 'import pytz\n'), ((17156, 17210), 'os.path.join', 'os.path.join', (['output_dir', "('REF_M_%s.json' % run_number)"], {}), "(output_dir, 'REF_M_%s.json' % run_number)\n", (17168, 17210), False, 'import os\n'), ((1005, 1084), 'os.path.join', 'os.path.join', (['output_dir', "('REF_M_%s_%s_autoreduce.dat' % (i_run, cross_section))"], {}), "(output_dir, 'REF_M_%s_%s_autoreduce.dat' % (i_run, cross_section))\n", (1017, 1084), False, 'import os\n'), ((1096, 1121), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (1110, 1121), False, 'import os\n'), ((5039, 5118), 'os.path.join', 'os.path.join', (['output_dir', "('REF_M_%s_%s_autoreduce.dat' % (i_run, cross_section))"], {}), "(output_dir, 'REF_M_%s_%s_autoreduce.dat' % (i_run, cross_section))\n", (5051, 5118), False, 'import os\n'), ((5130, 5155), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (5144, 5155), False, 'import os\n'), ((10231, 10297), 'os.path.join', 'os.path.join', (['output_dir', "('REF_M_%s_%s_autoreduce.dat' % (run, xs))"], {}), "(output_dir, 'REF_M_%s_%s_autoreduce.dat' % (run, xs))\n", (10243, 10297), False, 'import os\n'), ((10309, 10334), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (10323, 10334), False, 'import os\n'), ((16416, 16464), 'os.path.join', 'os.path.join', (['data_dir', "('REF_M_%s.nxs.h5' % item)"], {}), "(data_dir, 'REF_M_%s.nxs.h5' % item)\n", (16428, 16464), False, 'import os\n'), ((16476, 16501), 'os.path.isfile', 'os.path.isfile', (['data_file'], {}), '(data_file)\n', (16490, 16501), False, 'import os\n'), ((1146, 1247), 'pandas.read_csv', 'pandas.read_csv', (['file_path'], {'delim_whitespace': '(True)', 'comment': '"""#"""', 'names': "['q', 'r', 'dr', 'dq', 'a']"}), "(file_path, delim_whitespace=True, comment='#', names=['q',\n 'r', 'dr', 'dq', 'a'])\n", (1161, 1247), False, 'import pandas\n'), ((1370, 1426), 'mantid.simpleapi.logger.notice', 'api.logger.notice', (["('%s: [%s %s]' % (i_run, q_min, q_max))"], {}), "('%s: [%s %s]' % (i_run, q_min, q_max))\n", (1387, 1426), True, 'import mantid.simpleapi as api\n'), ((5225, 5326), 'pandas.read_csv', 'pandas.read_csv', (['_run_info'], {'delim_whitespace': '(True)', 'comment': '"""#"""', 'names': "['q', 'r', 'dr', 'dq', 'a']"}), "(_run_info, delim_whitespace=True, comment='#', names=['q',\n 'r', 'dr', 'dq', 'a'])\n", (5240, 5326), False, 'import pandas\n'), ((5377, 5465), 'mantid.simpleapi.CreateWorkspace', 'api.CreateWorkspace', ([], {'DataX': "ref_data['q']", 'DataY': "ref_data['r']", 'DataE': "ref_data['dr']"}), "(DataX=ref_data['q'], DataY=ref_data['r'], DataE=\n ref_data['dr'])\n", (5396, 5465), True, 'import mantid.simpleapi as api\n'), ((5478, 5504), 'mantid.simpleapi.ConvertToHistogram', 'api.ConvertToHistogram', (['ws'], {}), '(ws)\n', (5500, 5504), True, 'import mantid.simpleapi as api\n'), ((5724, 5746), 'mantid.simpleapi.CloneWorkspace', 'api.CloneWorkspace', (['ws'], {}), '(ws)\n', (5742, 5746), True, 'import mantid.simpleapi as api\n'), ((8949, 9017), 'os.path.join', 'os.path.join', (['output_dir', "('REF_M_%s_%s_autoreduce.dat' % (i_run, xs))"], {}), "(output_dir, 'REF_M_%s_%s_autoreduce.dat' % (i_run, xs))\n", (8961, 9017), False, 'import os\n'), ((9033, 9058), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (9047, 9058), False, 'import os\n'), ((10348, 10390), 'mantid.simpleapi.logger.notice', 'api.logger.notice', (["('Found: %s' % file_path)"], {}), "('Found: %s' % file_path)\n", (10365, 10390), True, 'import mantid.simpleapi as api\n'), ((10414, 10515), 'pandas.read_csv', 'pandas.read_csv', (['file_path'], {'delim_whitespace': '(True)', 'comment': '"""#"""', 'names': "['q', 'r', 'dr', 'dq', 'a']"}), "(file_path, delim_whitespace=True, comment='#', names=['q',\n 'r', 'dr', 'dq', 'a'])\n", (10429, 10515), False, 'import pandas\n'), ((10803, 10849), 'mantid.simpleapi.logger.notice', 'api.logger.notice', (["('NOT found: %s' % file_path)"], {}), "('NOT found: %s' % file_path)\n", (10820, 10849), True, 'import mantid.simpleapi as api\n'), ((11675, 11710), 'time.strftime', 'time.strftime', (['u"""%Y-%m-%d %H:%M:%S"""'], {}), "(u'%Y-%m-%d %H:%M:%S')\n", (11688, 11710), False, 'import time\n'), ((12925, 12991), 'os.path.join', 'os.path.join', (['output_dir', "('REF_M_%s_%s_autoreduce.dat' % (run, xs))"], {}), "(output_dir, 'REF_M_%s_%s_autoreduce.dat' % (run, xs))\n", (12937, 12991), False, 'import os\n'), ((13007, 13032), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (13021, 13032), False, 'import os\n'), ((13751, 13940), 'finddata.publish_plot.plot1d', 'plot1d', (['matched_runs[-1]', 'data_list'], {'data_names': 'data_names', 'instrument': '"""REF_M"""', 'x_title': 'u"""Q (1/A)"""', 'x_log': '(False)', 'y_title': '"""Reflectivity"""', 'y_log': '(True)', 'show_dx': '(False)', 'publish': 'publish'}), "(matched_runs[-1], data_list, data_names=data_names, instrument=\n 'REF_M', x_title=u'Q (1/A)', x_log=False, y_title='Reflectivity', y_log\n =True, show_dx=False, publish=publish)\n", (13757, 13940), False, 'from finddata.publish_plot import plot1d\n'), ((14009, 14045), 'mantid.simpleapi.logger.notice', 'api.logger.notice', (['"""Nothing to plot"""'], {}), "('Nothing to plot')\n", (14026, 14045), True, 'import mantid.simpleapi as api\n'), ((14115, 14160), 'mantid.simpleapi.logger.error', 'api.logger.error', (['"""No publisher module found"""'], {}), "('No publisher module found')\n", (14131, 14160), True, 'import mantid.simpleapi as api\n'), ((17265, 17291), 'json.dumps', 'json.dumps', (['info'], {'indent': '(4)'}), '(info, indent=4)\n', (17275, 17291), False, 'import json\n'), ((4252, 4280), 'os.path.join', 'os.path.join', (['data_dir', 'item'], {}), '(data_dir, item)\n', (4264, 4280), False, 'import os\n'), ((5573, 5603), 'mantid.simpleapi.Stitch1D', 'api.Stitch1D', (['_previous_ws', 'ws'], {}), '(_previous_ws, ws)\n', (5585, 5603), True, 'import mantid.simpleapi as api\n'), ((9136, 9237), 'pandas.read_csv', 'pandas.read_csv', (['_run_info'], {'delim_whitespace': '(True)', 'comment': '"""#"""', 'names': "['q', 'r', 'dr', 'dq', 'a']"}), "(_run_info, delim_whitespace=True, comment='#', names=['q',\n 'r', 'dr', 'dq', 'a'])\n", (9151, 9237), False, 'import pandas\n'), ((10577, 10616), 'numpy.sum', 'np.sum', (["(ref_data['dr'] * ref_data['dr'])"], {}), "(ref_data['dr'] * ref_data['dr'])\n", (10583, 10616), True, 'import numpy as np\n'), ((10619, 10640), 'numpy.sum', 'np.sum', (["ref_data['r']"], {}), "(ref_data['r'])\n", (10625, 10640), True, 'import numpy as np\n'), ((13061, 13162), 'pandas.read_csv', 'pandas.read_csv', (['file_path'], {'delim_whitespace': '(True)', 'comment': '"""#"""', 'names': "['q', 'r', 'dr', 'dq', 'a']"}), "(file_path, delim_whitespace=True, comment='#', names=['q',\n 'r', 'dr', 'dq', 'a'])\n", (13076, 13162), False, 'import pandas\n'), ((14087, 14101), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (14099, 14101), False, 'import sys\n'), ((16193, 16216), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (16214, 16216), False, 'import datetime\n'), ((2290, 2357), 'mantid.simpleapi.logger.error', 'api.logger.error', (["('Could not extract group id from line: %s' % line)"], {}), "('Could not extract group id from line: %s' % line)\n", (2306, 2357), True, 'import mantid.simpleapi as api\n'), ((2582, 2651), 'mantid.simpleapi.logger.error', 'api.logger.error', (["('Could not extract run number from line: %s' % line)"], {}), "('Could not extract run number from line: %s' % line)\n", (2598, 2651), True, 'import mantid.simpleapi as api\n'), ((2894, 2961), 'mantid.simpleapi.logger.error', 'api.logger.error', (["('Could not extract lowest q from line: %s' % line)"], {}), "('Could not extract lowest q from line: %s' % line)\n", (2910, 2961), True, 'import mantid.simpleapi as api\n')] |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.append('/home/dgp_iwvi_gpflow2/')
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
import gpflow
import enum
import collections
from typing import Callable, Optional, Tuple, TypeVar, Union, List
from gpflow.kernels import Kernel, MultioutputKernel
from gpflow.mean_functions import MeanFunction, Zero
from gpflow.inducing_variables import SeparateIndependentInducingVariables, SharedIndependentInducingVariables
from gpflow.kullback_leiblers import gauss_kl as gauss_kl_gpflow
import attr
# avoiding use of defaults kwarg, to keep compatibility with Python3.6
class RegularizerType(enum.Enum):
LOCAL = 0
GLOBAL = 1
def gauss_kl(q_mu, q_sqrt, K=None):
"""
Wrapper for gauss_kl from gpflow that returns the negative log prob if q_sqrt is None. This can be
for use in HMC: all that is required is to set q_sqrt to None and this function substitues the
negative log prob instead of the KL (so no need to set q_mu.prior = gpflow.priors.Gaussian(0, 1)).
Also, this allows the use of HMC in the unwhitened case.
"""
if q_sqrt is None:
# return negative log prob with q_mu as 'x', with mean 0 and cov K (or I, if None)
M, D = tf.shape(q_mu)[0], tf.shape(q_mu)[1]
I = tf.eye(M, dtype=q_mu.dtype)
if K is None:
L = I
else:
L = tf.cholesky(K + I * gpflow.default_jitter())
return -tf.reduce_sum(gpflow.logdensities.multivariate_normal(q_mu, tf.zeros_like(q_mu), L))
else:
# return kl
return gauss_kl_gpflow(q_mu, q_sqrt, K=K)
class GPLayer(gpflow.Module):
regularizer_type = RegularizerType.GLOBAL
def __init__(self,
kernel: gpflow.kernels.Kernel,
inducing: gpflow.inducing_variables.InducingVariables,
mean_func: gpflow.mean_functions.MeanFunction,
**kwargs
):
super().__init__()
"""
The range of supported options for sample conditional is not complete. The following do not
work:
LinearCoregionalization: in order to get the proper behavior, you must have a separate kernel
for each independent GP. While some things evaluate with a single shared kernel, Kuu and Kuf
do not work properly and treat the number of latent gps as the number of separate kernels.
LinearCoregionalization / SharedIndependentInducingVariable: In this case, you can only eval-
uate the propagation when full_cov = False. However, full_cov is possible if one uses
SeparateIndependentInducingVariable.
"""
assert issubclass(type(kernel), gpflow.kernels.Kernel)
assert issubclass(type(inducing), gpflow.inducing_variables.InducingVariables)
if not issubclass(type(kernel), gpflow.kernels.MultioutputKernel):
self.num_latent_gps = 1
self.output_dim = 1
else:
self.num_latent_gps = kernel.num_latent_gps
if not len(kernel.kernels) == self.num_latent_gps:
# we want to catch the error with LinearCoregionalization mentioned above
raise ValueError(
f"number of kernels should match number of latent gps " \
f"({self.num_latent_gps}) got {len(kernel.kernels)} kernels"
)
if issubclass(type(kernel), gpflow.kernels.LinearCoregionalization):
self.output_dim = kernel.W.shape[-2]
else:
self.output_dim = self.num_latent_gps
self.kernel = kernel
self.inducing = inducing
if hasattr(self.inducing, 'inducing_variable_list'):
# case for separate independent
assert len(self.inducing.inducing_variable_list) == self.num_latent_gps, \
f"Got {len(self.inducing.inducing_variable_list)} inducing variables, " \
f"but expected {self.num_latent_gps} gps from kernel. These should match."
self.in_features = self.inducing.inducing_variable_list[0].Z.shape[-1]
self.num_inducing = self.inducing.inducing_variable_list[0].Z.shape[-2]
elif hasattr(self.inducing, 'inducing_variable'):
# case for shared independent
self.in_features = self.inducing.inducing_variable.Z.shape[-1]
self.num_inducing = self.inducing.inducing_variable.Z.shape[-2]
else:
self.in_features = self.inducing.Z.shape[-1]
self.num_inducing = self.inducing.Z.shape[-2]
assert issubclass(type(mean_func), gpflow.mean_functions.MeanFunction)
self.mean = mean_func
if type(mean_func) is gpflow.mean_functions.Linear:
# more consistency checking
assert self.mean.A.shape[-1] == self.output_dim
# Now for the storage of variational parameters
self.q_mu = gpflow.Parameter(np.zeros((self.num_inducing, self.num_latent_gps)), transform=None)
init_sqrt = np.tile(np.eye(self.num_inducing)[None, :, :], [self.num_latent_gps, 1, 1])
if 'scale_init_q_sqrt' in kwargs:
init_sqrt *= kwargs['scale_init_q_sqrt']
self.q_sqrt = gpflow.Parameter(init_sqrt, transform=gpflow.utilities.triangular())
def propagate(self, F, num_samples=None, full_cov=False, **kwargs):
# In Hugh's code, he forces one to use full_cov = False for the case of a MoK. This has the effect
# that only the final layer uses full covariance (as his code uses a single output kernel for the
# final layer). This is inspite of the fact that he passes full_cov=True to all layers in the IWVI
# case. Since I don't want to hack GPFlow's conditional system, I will let the full_cov pass through
# and manually implement his behavior in the model.
samples, mean, cov = gpflow.conditionals.sample_conditional(F,
self.inducing,
self.kernel,
self.q_mu,
full_cov=full_cov,
q_sqrt=self.q_sqrt,
white=True,
num_samples=num_samples,
)
kl = gauss_kl(self.q_mu, self.q_sqrt)
mf = self.mean(F)
if num_samples is not None:
samples = samples + mf[...,None,:,:]
else:
samples = samples + mf
mean = mean + mf
return samples, mean, cov, kl
def components(self, F, num_samples=None, full_cov=False, **kwargs):
# In Hugh's code, he forces one to use full_cov = False for the case of a MoK. This has the effect
# that only the final layer uses full covariance (as his code uses a single output kernel for the
# final layer). This is inspite of the fact that he passes full_cov=True to all layers in the IWVI
# case. Since I don't want to hack GPFlow's conditional system, I will let the full_cov pass through
# and manually implement his behavior in the model.
mean, cov = gpflow.conditionals.conditional(F,
self.inducing,
self.kernel,
self.q_mu,
full_cov=full_cov,
q_sqrt=self.q_sqrt,
white=True,
)
kl = gauss_kl(self.q_mu, self.q_sqrt)
mf = self.mean(F)
mean = mean + mf
return mean, cov, kl
class Encoder(gpflow.Module):
def __init__(self, latent_dim: int,
input_dim: int,
network_dims: int,
activation_func: Optional[Callable] = None):
"""
Encoder that uses GPflow params to encode the features.
Creates an MLP with input dimensions `input_dim` and produces
2 * `latent_dim` outputs.
:param latent_dim: dimension of the latent variable
:param input_dim: the MLP acts on data of `input_dim` dimensions
:param network_dims: dimensions of inner MLPs, e.g. [10, 20, 10]
:param activation_func: TensorFlow operation that can be used
as non-linearity between the layers (default: tanh).
"""
super().__init__()
self.latent_dim = latent_dim
self.activation_func = activation_func or tf.nn.tanh
self.layer_dims = [input_dim, *network_dims, latent_dim * 2]
Ws, bs = [], []
for input_dim, output_dim in zip(self.layer_dims[:-1], self.layer_dims[1:]):
xavier_std = (2. / (input_dim + output_dim)) ** 0.5
W = np.random.randn(input_dim, output_dim) * xavier_std
Ws.append(gpflow.Parameter(W, dtype=gpflow.config.default_float()))
bs.append(gpflow.Parameter(np.zeros(output_dim), dtype=gpflow.config.default_float()))
self.Ws, self.bs = Ws, bs
def __call__(self, Z) -> Tuple[tf.Tensor, tf.Tensor]:
o = tf.ones_like(Z)[..., :1, :1] # for correct broadcasting
for i, (W, b, dim_in, dim_out) in enumerate(zip(self.Ws, self.bs, self.layer_dims[:-1], self.layer_dims[1:])):
Z0 = tf.identity(Z)
Z = tf.matmul(Z, o * W) + o * b
if i < len(self.bs) - 1:
Z = self.activation_func(Z)
if dim_out == dim_in: # skip connection
Z += Z0
means, log_chol_diag = tf.split(Z, 2, axis=-1)
q_sqrt = tf.nn.softplus(log_chol_diag - 3.) # bias it towards small vals at first
q_mu = means
return q_mu, q_sqrt
class SASEEncoder(gpflow.Module):
def __init__(self, latent_dim: int,
input_dim: int,
network_dims: int,
activation_func: Optional[Callable] = None):
"""
Encoder that uses GPflow params to encode the features.
Creates an MLP with input dimensions `input_dim` and produces
2 * `latent_dim` outputs. Unlike the standard encoder, this
expects an input of NR shape, and converts that to an output which is
(N*R)L, where L is the latent dim.
:param latent_dim: dimension of the latent variable, i.e L
:param input_dim: the MLP acts on data of `input_dim` dimensions, i.e. R
:param network_dims: dimensions of inner MLPs, e.g. [10, 20, 10]
:param activation_func: TensorFlow operation that can be used
as non-linearity between the layers (default: tanh).
"""
super().__init__()
self.latent_dim = tf.convert_to_tensor([latent_dim], tf.int32)
self.activation_func = activation_func or tf.nn.tanh
self.layer_dims = [input_dim, *network_dims, input_dim * latent_dim * 2]
Ws, bs = [], []
for input_dim, output_dim in zip(self.layer_dims[:-1], self.layer_dims[1:]):
xavier_std = (2. / (input_dim + output_dim)) ** 0.5
W = np.random.randn(input_dim, output_dim) * xavier_std
Ws.append(gpflow.Parameter(W, dtype=gpflow.config.default_float()))
bs.append(gpflow.Parameter(np.zeros(output_dim), dtype=gpflow.config.default_float()))
self.Ws, self.bs = Ws, bs
def __call__(self, Z) -> Tuple[tf.Tensor, tf.Tensor]:
N = tf.convert_to_tensor([tf.shape(Z)[-2]], dtype=tf.int32)
R = tf.convert_to_tensor([tf.shape(Z)[-1]], dtype=tf.int32)
batch_shape = tf.convert_to_tensor(tf.shape(Z)[:-2], dtype=tf.int32)
o = tf.ones_like(Z)[..., :1, :1] # for correct broadcasting
for i, (W, b, dim_in, dim_out) in enumerate(zip(self.Ws, self.bs, self.layer_dims[:-1], self.layer_dims[1:])):
Z0 = tf.identity(Z)
Z = tf.matmul(Z, o * W) + o * b
if i < len(self.bs) - 1:
Z = self.activation_func(Z)
if dim_out == dim_in: # skip connection
Z += Z0
means, log_chol_diag = tf.split(Z, 2, axis=-1)
q_sqrt = tf.nn.softplus(log_chol_diag - 3.) # bias it towards small vals at first
q_mu = means #...N(L*R)
q_mu_reshaped = tf.reshape(q_mu, tf.concat([batch_shape, N*R, self.latent_dim],0)) # ...(N*R)L
q_sqrt_reshaped = tf.reshape(q_sqrt, tf.concat([batch_shape, N*R, self.latent_dim],0))
return q_mu_reshaped, q_sqrt_reshaped
class EmbeddingEncoder(gpflow.Module):
def __init__(self, latent_dim: int,
nwords: int):
"""
Here we simply pass a shot index to an embedding lookup. This allows us to
randomly access a tensor more easily, but basically we're just storing latent
values for each shot.
"""
super().__init__()
self.latent_dim = int(latent_dim)
self.embedding = gpflow.Parameter(np.random.randn(nwords, 2*latent_dim),
dtype=gpflow.config.default_float())
@tf.function
def __call__(self, Z: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
Zenc = tf.nn.embedding_lookup(self.embedding, tf.squeeze(Z,-1))
means, log_chol_diag = tf.split(Zenc, 2, axis=-1)
q_sqrt = tf.nn.softplus(log_chol_diag - 3.) # bias it towards small vals at first
q_mu = means
return q_mu, q_sqrt
class AmortizedLatentVariableLayer(gpflow.Module):
regularizer_type = RegularizerType.LOCAL
def __init__(self, latent_dim: int,
XY_dim: Optional[int] = None,
encoder: Optional[Callable] = None):
super().__init__()
self.latent_dim = latent_dim
if encoder is None:
assert XY_dim, 'must pass XY_dim or else an encoder'
encoder = Encoder(latent_dim, XY_dim, [20, 20])
self.encoder = encoder
def propagate(self, F: tf.Tensor,
inference_amortization_inputs: Optional[tf.Tensor] = None,
is_sampled_local_regularizer: bool = False,
**kwargs) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
if inference_amortization_inputs is None:
"""
If there isn't an X and Y passed for the recognition model, this samples from the prior.
Optionally, q_mu and q_sqrt can be fed via a placeholder (e.g. for plotting purposes)
"""
shape = tf.concat([F.shape[:-1], tf.TensorShape([self.latent_dim])], 0)
q_mu = tf.zeros(shape, dtype=gpflow.config.default_float())
q_sqrt = tf.ones(shape, dtype=gpflow.config.default_float())
else:
q_mu, q_sqrt = self.encoder(inference_amortization_inputs)
# reparameterization trick to take a sample for W
eps = tf.random.normal(tf.shape(q_mu), dtype=gpflow.config.default_float())
W = q_mu + eps * q_sqrt
samples = tf.concat([F, W], -1)
mean = tf.concat([F, q_mu], -1)
cov = tf.concat([tf.zeros_like(F), q_sqrt ** 2], -1)
# the prior regularization
p = p = tfp.distributions.Normal(loc=tf.zeros(1,dtype=gpflow.config.default_float()),
scale=tf.ones(1,dtype=gpflow.config.default_float()))
q = tfp.distributions.Normal(loc=q_mu, scale=q_sqrt)
if is_sampled_local_regularizer:
# for the IW models, we need to return a log q/p for each sample W
kl = q.log_prob(W) - p.log_prob(W)
else:
# for the VI models, we want E_q log q/p, which is closed form for Gaussians
kl = tfp.distributions.kl_divergence(q, p)
return samples, mean, cov, kl
class AmortizedSASELatentVariableLayer(gpflow.Module):
regularizer_type = RegularizerType.LOCAL
def __init__(self, latent_dim: int,
sase_dim: int,
encoder: Optional[Callable] = None):
super().__init__()
if encoder is None:
encoder = SASEEncoder(latent_dim, sase_dim, [50, 10, 50])
self.latent_dim = tf.convert_to_tensor([latent_dim], dtype=tf.int32)
self.sase_dim = tf.convert_to_tensor([sase_dim], dtype=tf.int32)
self.encoder = encoder
def propagate(self, F: tf.Tensor,
inference_amortization_inputs: Optional[tf.Tensor] = None,
is_sampled_local_regularizer: bool = False,
**kwargs) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
if inference_amortization_inputs is None:
"""
If there isn't a SASE spec passed for the recognition model, this samples from the prior.
Optionally, q_mu and q_sqrt can be fed via a placeholder (e.g. for plotting purposes)
"""
batch_shape = tf.convert_to_tensor(tf.shape(F)[:-2], dtype=tf.int32) # ...
N = tf.convert_to_tensor([tf.shape(F)[-2]], dtype=tf.int32)
shape = tf.concat([batch_shape, N, self.latent_dim], 0) # ...(N)L
q_mu = tf.zeros(shape, dtype=gpflow.config.default_float())
q_sqrt = tf.ones(shape, dtype=gpflow.config.default_float())
else:
q_mu, q_sqrt = self.encoder(inference_amortization_inputs)
# reparameterization trick to take a sample for W
eps = tf.random.normal(tf.shape(q_mu), dtype=gpflow.config.default_float())
W = q_mu + eps * q_sqrt
samples = tf.concat([F, W], -1)
mean = tf.concat([F, q_mu], -1)
cov = tf.concat([tf.zeros_like(F), q_sqrt ** 2], -1)
# the prior regularization
p = p = tfp.distributions.Normal(loc=tf.zeros(1,dtype=gpflow.config.default_float()),
scale=tf.ones(1,dtype=gpflow.config.default_float()))
q = tfp.distributions.Normal(loc=q_mu, scale=q_sqrt)
if is_sampled_local_regularizer:
# for the IW models, we need to return a log q/p for each sample W
kl = q.log_prob(W) - p.log_prob(W)
else:
# for the VI models, we want E_q log q/p, which is closed form for Gaussians
kl = tfp.distributions.kl_divergence(q, p)
return samples, mean, cov, kl
class AmortizedEmbeddingLatentVariableLayer(gpflow.Module):
regularizer_type = RegularizerType.LOCAL
def __init__(self, latent_dim: int, nwords: int, nembed: int = 10, nhidden: int = 20):
super().__init__()
self.latent_dim = latent_dim
self.encoder = EmbeddingEncoder(latent_dim, nwords)
def propagate(self, F: tf.Tensor,
inference_amortization_inputs: Optional[tf.Tensor] = None,
is_sampled_local_regularizer: bool = False,
**kwargs) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
if inference_amortization_inputs is None:
"""
If there isn't an X and Y passed for the recognition model, this samples from the prior.
Optionally, q_mu and q_sqrt can be fed via a placeholder (e.g. for plotting purposes)
"""
shape = tf.concat([F.shape[:-1], tf.TensorShape([self.latent_dim])], 0)
q_mu = tf.zeros(shape, dtype=gpflow.config.default_float())
q_sqrt = tf.ones(shape, dtype=gpflow.config.default_float())
else:
q_mu, q_sqrt = self.encoder(inference_amortization_inputs)
# reparameterization trick to take a sample for W
eps = tf.random.normal(tf.shape(q_mu), dtype=gpflow.config.default_float())
W = q_mu + eps * q_sqrt
samples = tf.concat([F, W], -1)
mean = tf.concat([F, q_mu], -1)
cov = tf.concat([tf.zeros_like(F), q_sqrt ** 2], -1)
# the prior regularization
p = p = tfp.distributions.Normal(loc=tf.zeros(1,dtype=gpflow.config.default_float()),
scale=tf.ones(1,dtype=gpflow.config.default_float()))
q = tfp.distributions.Normal(loc=q_mu, scale=q_sqrt)
if is_sampled_local_regularizer:
# for the IW models, we need to return a log q/p for each sample W
kl = q.log_prob(W) - p.log_prob(W)
else:
# for the VI models, we want E_q log q/p, which is closed form for Gaussians
kl = tfp.distributions.kl_divergence(q, p)
return samples, mean, cov, kl
class SASEReducedEncoder(gpflow.Module):
def __init__(self, latent_dim: int,
input_dim: int,
network_dims: int,
activation_func: Optional[Callable] = None):
"""
Encoder that uses GPflow params to encode the features.
Creates an MLP with input dimensions `input_dim` and produces
2 * `latent_dim` outputs. Unlike the standard encoder, this
expects an input of NR shape, and converts that to an output which is
(N*R)L, where L is the latent dim.
:param latent_dim: dimension of the latent variable, i.e L
:param input_dim: the MLP acts on data of `input_dim` dimensions, i.e. R
:param network_dims: dimensions of inner MLPs, e.g. [10, 20, 10]
:param activation_func: TensorFlow operation that can be used
as non-linearity between the layers (default: tanh).
"""
super().__init__()
self.latent_dim = tf.convert_to_tensor([latent_dim], tf.int32)
self.activation_func = activation_func or tf.nn.tanh
self.layer_dims = [input_dim, *network_dims, latent_dim * 2]
Ws, bs = [], []
for input_dim, output_dim in zip(self.layer_dims[:-1], self.layer_dims[1:]):
xavier_std = (2. / (input_dim + output_dim)) ** 0.5
W = np.random.randn(input_dim, output_dim) * xavier_std
Ws.append(gpflow.Parameter(W, dtype=gpflow.config.default_float()))
bs.append(gpflow.Parameter(np.zeros(output_dim), dtype=gpflow.config.default_float()))
self.Ws, self.bs = Ws, bs
def __call__(self, Z) -> Tuple[tf.Tensor, tf.Tensor]:
N = tf.convert_to_tensor([tf.shape(Z)[-2]], dtype=tf.int32)
R = tf.convert_to_tensor([tf.shape(Z)[-1]], dtype=tf.int32)
batch_shape = tf.convert_to_tensor(tf.shape(Z)[:-2], dtype=tf.int32)
o = tf.ones_like(Z)[..., :1, :1] # for correct broadcasting
for i, (W, b, dim_in, dim_out) in enumerate(zip(self.Ws, self.bs, self.layer_dims[:-1], self.layer_dims[1:])):
Z0 = tf.identity(Z)
Z = tf.matmul(Z, o * W) + o * b
if i < len(self.bs) - 1:
Z = self.activation_func(Z)
if dim_out == dim_in: # skip connection
Z += Z0
means, log_chol_diag = tf.split(Z, 2, axis=-1)
q_sqrt = tf.nn.softplus(log_chol_diag - 3.) # bias it towards small vals at first
q_mu = means #...N(L*R)
return q_mu, q_sqrt
class AmortizedSASEReducedLatentVariableLayer(gpflow.Module):
regularizer_type = RegularizerType.LOCAL
def __init__(self, latent_dim: int,
sase_dim: int,
encoder: Optional[Callable] = None):
super().__init__()
if encoder is None:
encoder = SASEReducedEncoder(latent_dim, sase_dim, [50, 10, 10])
self.latent_dim = tf.convert_to_tensor([latent_dim], dtype=tf.int32)
self.sase_dim = tf.convert_to_tensor([sase_dim], dtype=tf.int32)
self.encoder = encoder
def propagate(self, F: tf.Tensor,
inference_amortization_inputs: Optional[tf.Tensor] = None,
is_sampled_local_regularizer: bool = False,
**kwargs) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
if inference_amortization_inputs is None:
"""
If there isn't a SASE spec passed for the recognition model, this samples from the prior.
Optionally, q_mu and q_sqrt can be fed via a placeholder (e.g. for plotting purposes)
"""
batch_shape = tf.convert_to_tensor(tf.shape(F)[:-2], dtype=tf.int32) # ...
N = tf.convert_to_tensor([tf.shape(F)[-2]], dtype=tf.int32)
shape = tf.concat([batch_shape, N, self.latent_dim], 0) # ...(N)L
q_mu = tf.zeros(shape, dtype=gpflow.config.default_float())
q_sqrt = tf.ones(shape, dtype=gpflow.config.default_float())
else:
q_mu, q_sqrt = self.encoder(inference_amortization_inputs)
# reparameterization trick to take a sample for W
eps = tf.random.normal(tf.shape(q_mu), dtype=gpflow.config.default_float())
W = q_mu + eps * q_sqrt
samples = tf.concat([F, W], -1)
mean = tf.concat([F, q_mu], -1)
cov = tf.concat([tf.zeros_like(F), q_sqrt ** 2], -1)
# the prior regularization
p = p = tfp.distributions.Normal(loc=tf.zeros(1,dtype=gpflow.config.default_float()),
scale=tf.ones(1,dtype=gpflow.config.default_float()))
q = tfp.distributions.Normal(loc=q_mu, scale=q_sqrt)
if is_sampled_local_regularizer:
# for the IW models, we need to return a log q/p for each sample W
kl = q.log_prob(W) - p.log_prob(W)
else:
# for the VI models, we want E_q log q/p, which is closed form for Gaussians
kl = tfp.distributions.kl_divergence(q, p)
return samples, mean, cov, kl
class AmortizedLatentVariableLayer2(gpflow.Module):
regularizer_type = RegularizerType.LOCAL
def __init__(self, latent_dim: int,
sase_dim: int,
encoder_dims: Optional[List[int]] = None):
super().__init__()
if encoder_dims is None:
encoder = Encoder(latent_dim, sase_dim, [50, 10, 10])
else:
encoder = Encoder(latent_dim, sase_dim, encoder_dims)
self.latent_dim = tf.convert_to_tensor([latent_dim], dtype=tf.int32)
self.sase_dim = tf.convert_to_tensor([sase_dim], dtype=tf.int32)
self.encoder = encoder
def propagate(self, F: tf.Tensor,
inference_amortization_inputs: Optional[tf.Tensor] = None,
is_sampled_local_regularizer: bool = False,
**kwargs) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
if inference_amortization_inputs is None:
"""
If there isn't a SASE spec passed for the recognition model, this samples from the prior.
Optionally, q_mu and q_sqrt can be fed via a placeholder (e.g. for plotting purposes)
"""
batch_shape = tf.convert_to_tensor(tf.shape(F)[:-2], dtype=tf.int32) # ...
N = tf.convert_to_tensor([tf.shape(F)[-2]], dtype=tf.int32)
shape = tf.concat([batch_shape, N, self.latent_dim], 0) # ...(N)L
q_mu = tf.zeros(shape, dtype=gpflow.config.default_float())
q_sqrt = tf.ones(shape, dtype=gpflow.config.default_float())
else:
q_mu, q_sqrt = self.encoder(inference_amortization_inputs)
# reparameterization trick to take a sample for W
eps = tf.random.normal(tf.shape(q_mu), dtype=gpflow.config.default_float())
W = q_mu + eps * q_sqrt
samples = tf.concat([F, W], -1)
mean = tf.concat([F, q_mu], -1)
cov = tf.concat([tf.zeros_like(F), q_sqrt ** 2], -1)
#### HAHAHASDFDDADSF
##### AGGHHH NOTICE THE SCALE ON THE KL
##### ITS NOT 1!!!!!
# the prior regularization
p = p = tfp.distributions.Normal(loc=tf.zeros(1,dtype=gpflow.config.default_float()),
scale=tf.ones(1,dtype=gpflow.config.default_float()))
q = tfp.distributions.Normal(loc=q_mu, scale=q_sqrt)
if is_sampled_local_regularizer:
# for the IW models, we need to return a log q/p for each sample W
kl = q.log_prob(W) - p.log_prob(W)
else:
# for the VI models, we want E_q log q/p, which is closed form for Gaussians
kl = tfp.distributions.kl_divergence(q, p)
return samples, mean, cov, kl
class AmortizedLatentVariableLayerTiled(gpflow.Module):
regularizer_type = RegularizerType.LOCAL
def __init__(self, latent_dim: int,
sase_dim: int,
encoder_dims: Optional[List[int]] = None):
super().__init__()
if encoder_dims is None:
encoder = Encoder(latent_dim, sase_dim, [50, 10, 10])
else:
encoder = Encoder(latent_dim, sase_dim, encoder_dims)
self.latent_dim = tf.convert_to_tensor([latent_dim], dtype=tf.int32)
self.sase_dim = tf.convert_to_tensor([sase_dim], dtype=tf.int32)
self.encoder = encoder
def propagate(self, F: tf.Tensor,
inference_amortization_inputs: Optional[tf.Tensor] = None,
is_sampled_local_regularizer: bool = False,
**kwargs) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
if inference_amortization_inputs is None:
"""
If there isn't a SASE spec passed for the recognition model, this samples from the prior.
Optionally, q_mu and q_sqrt can be fed via a placeholder (e.g. for plotting purposes)
"""
batch_shape = tf.convert_to_tensor(tf.shape(F)[:-3], dtype=tf.int32) # ...
N = tf.convert_to_tensor([tf.shape(F)[-3]], dtype=tf.int32)
S = tf.convert_to_tensor([tf.shape(F)[-2]//self.sase_dim[0]], dtype=tf.int32)
shape = tf.concat([batch_shape, N, S, self.latent_dim], 0) # ...(N)L
q_mu = tf.zeros(shape, dtype=gpflow.config.default_float())
q_sqrt = tf.ones(shape, dtype=gpflow.config.default_float())
else:
q_mu, q_sqrt = self.encoder(inference_amortization_inputs)
# reparameterization trick to take a sample for W
eps = tf.random.normal(tf.shape(q_mu), dtype=gpflow.config.default_float())
W = q_mu + eps * q_sqrt
tile_vec = tf.concat([tf.convert_to_tensor([1], dtype=tf.int32),
self.sase_dim, tf.convert_to_tensor([1], dtype=tf.int32)],0)
TW = tf.tile(W,tile_vec)
Tmu = tf.tile(q_mu, tile_vec)
Tsqrt = tf.tile(q_sqrt, tile_vec)
samples = tf.concat([F, TW], -1)
mean = tf.concat([F, Tmu], -1)
cov = tf.concat([tf.zeros_like(F), Tsqrt ** 2], -1)
# the prior regularization
p = p = tfp.distributions.Normal(loc=tf.zeros(1,dtype=gpflow.config.default_float()),
scale=0.1*tf.ones(1,dtype=gpflow.config.default_float()))
q = tfp.distributions.Normal(loc=Tmu, scale=Tsqrt)
if is_sampled_local_regularizer:
# for the IW models, we need to return a log q/p for each sample W
kl = q.log_prob(TW) - p.log_prob(TW)
else:
# for the VI models, we want E_q log q/p, which is closed form for Gaussians
kl = tfp.distributions.kl_divergence(q, p)
return samples, mean, cov, kl
# +
@attr.s(auto_attribs=True)
class GPLayer_Config(object):
ngps: int
ninducing: int
@attr.s(auto_attribs=True)
class LatentLayer_Config(object):
latent_features: int
xy_dim: int
@attr.s(auto_attribs=True)
class EmbeddingLatentLayer_Config(object):
latent_features: int
nwords: int
# -
| [
"gpflow.config.default_float",
"attr.s",
"tensorflow.identity",
"tensorflow.zeros_like",
"tensorflow.matmul",
"tensorflow.split",
"gpflow.utilities.triangular",
"gpflow.conditionals.sample_conditional",
"sys.path.append",
"gpflow.kullback_leiblers.gauss_kl",
"numpy.random.randn",
"tensorflow.T... | [((306, 348), 'sys.path.append', 'sys.path.append', (['"""/home/dgp_iwvi_gpflow2/"""'], {}), "('/home/dgp_iwvi_gpflow2/')\n", (321, 348), False, 'import sys\n'), ((31564, 31589), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)'}), '(auto_attribs=True)\n', (31570, 31589), False, 'import attr\n'), ((31663, 31688), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)'}), '(auto_attribs=True)\n', (31669, 31688), False, 'import attr\n'), ((31774, 31799), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)'}), '(auto_attribs=True)\n', (31780, 31799), False, 'import attr\n'), ((1572, 1599), 'tensorflow.eye', 'tf.eye', (['M'], {'dtype': 'q_mu.dtype'}), '(M, dtype=q_mu.dtype)\n', (1578, 1599), True, 'import tensorflow as tf\n'), ((1864, 1898), 'gpflow.kullback_leiblers.gauss_kl', 'gauss_kl_gpflow', (['q_mu', 'q_sqrt'], {'K': 'K'}), '(q_mu, q_sqrt, K=K)\n', (1879, 1898), True, 'from gpflow.kullback_leiblers import gauss_kl as gauss_kl_gpflow\n'), ((6232, 6398), 'gpflow.conditionals.sample_conditional', 'gpflow.conditionals.sample_conditional', (['F', 'self.inducing', 'self.kernel', 'self.q_mu'], {'full_cov': 'full_cov', 'q_sqrt': 'self.q_sqrt', 'white': '(True)', 'num_samples': 'num_samples'}), '(F, self.inducing, self.kernel, self.\n q_mu, full_cov=full_cov, q_sqrt=self.q_sqrt, white=True, num_samples=\n num_samples)\n', (6270, 6398), False, 'import gpflow\n'), ((7630, 7758), 'gpflow.conditionals.conditional', 'gpflow.conditionals.conditional', (['F', 'self.inducing', 'self.kernel', 'self.q_mu'], {'full_cov': 'full_cov', 'q_sqrt': 'self.q_sqrt', 'white': '(True)'}), '(F, self.inducing, self.kernel, self.q_mu,\n full_cov=full_cov, q_sqrt=self.q_sqrt, white=True)\n', (7661, 7758), False, 'import gpflow\n'), ((10121, 10144), 'tensorflow.split', 'tf.split', (['Z', '(2)'], {'axis': '(-1)'}), '(Z, 2, axis=-1)\n', (10129, 10144), True, 'import tensorflow as tf\n'), ((10162, 10197), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['(log_chol_diag - 3.0)'], {}), '(log_chol_diag - 3.0)\n', (10176, 10197), True, 'import tensorflow as tf\n'), ((11258, 11302), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[latent_dim]', 'tf.int32'], {}), '([latent_dim], tf.int32)\n', (11278, 11302), True, 'import tensorflow as tf\n'), ((12631, 12654), 'tensorflow.split', 'tf.split', (['Z', '(2)'], {'axis': '(-1)'}), '(Z, 2, axis=-1)\n', (12639, 12654), True, 'import tensorflow as tf\n'), ((12672, 12707), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['(log_chol_diag - 3.0)'], {}), '(log_chol_diag - 3.0)\n', (12686, 12707), True, 'import tensorflow as tf\n'), ((13776, 13802), 'tensorflow.split', 'tf.split', (['Zenc', '(2)'], {'axis': '(-1)'}), '(Zenc, 2, axis=-1)\n', (13784, 13802), True, 'import tensorflow as tf\n'), ((13820, 13855), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['(log_chol_diag - 3.0)'], {}), '(log_chol_diag - 3.0)\n', (13834, 13855), True, 'import tensorflow as tf\n'), ((15479, 15500), 'tensorflow.concat', 'tf.concat', (['[F, W]', '(-1)'], {}), '([F, W], -1)\n', (15488, 15500), True, 'import tensorflow as tf\n'), ((15516, 15540), 'tensorflow.concat', 'tf.concat', (['[F, q_mu]', '(-1)'], {}), '([F, q_mu], -1)\n', (15525, 15540), True, 'import tensorflow as tf\n'), ((15827, 15875), 'tensorflow_probability.distributions.Normal', 'tfp.distributions.Normal', ([], {'loc': 'q_mu', 'scale': 'q_sqrt'}), '(loc=q_mu, scale=q_sqrt)\n', (15851, 15875), True, 'import tensorflow_probability as tfp\n'), ((16620, 16670), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[latent_dim]'], {'dtype': 'tf.int32'}), '([latent_dim], dtype=tf.int32)\n', (16640, 16670), True, 'import tensorflow as tf\n'), ((16695, 16743), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[sase_dim]'], {'dtype': 'tf.int32'}), '([sase_dim], dtype=tf.int32)\n', (16715, 16743), True, 'import tensorflow as tf\n'), ((17978, 17999), 'tensorflow.concat', 'tf.concat', (['[F, W]', '(-1)'], {}), '([F, W], -1)\n', (17987, 17999), True, 'import tensorflow as tf\n'), ((18015, 18039), 'tensorflow.concat', 'tf.concat', (['[F, q_mu]', '(-1)'], {}), '([F, q_mu], -1)\n', (18024, 18039), True, 'import tensorflow as tf\n'), ((18326, 18374), 'tensorflow_probability.distributions.Normal', 'tfp.distributions.Normal', ([], {'loc': 'q_mu', 'scale': 'q_sqrt'}), '(loc=q_mu, scale=q_sqrt)\n', (18350, 18374), True, 'import tensorflow_probability as tfp\n'), ((20111, 20132), 'tensorflow.concat', 'tf.concat', (['[F, W]', '(-1)'], {}), '([F, W], -1)\n', (20120, 20132), True, 'import tensorflow as tf\n'), ((20148, 20172), 'tensorflow.concat', 'tf.concat', (['[F, q_mu]', '(-1)'], {}), '([F, q_mu], -1)\n', (20157, 20172), True, 'import tensorflow as tf\n'), ((20459, 20507), 'tensorflow_probability.distributions.Normal', 'tfp.distributions.Normal', ([], {'loc': 'q_mu', 'scale': 'q_sqrt'}), '(loc=q_mu, scale=q_sqrt)\n', (20483, 20507), True, 'import tensorflow_probability as tfp\n'), ((21853, 21897), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[latent_dim]', 'tf.int32'], {}), '([latent_dim], tf.int32)\n', (21873, 21897), True, 'import tensorflow as tf\n'), ((23214, 23237), 'tensorflow.split', 'tf.split', (['Z', '(2)'], {'axis': '(-1)'}), '(Z, 2, axis=-1)\n', (23222, 23237), True, 'import tensorflow as tf\n'), ((23255, 23290), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['(log_chol_diag - 3.0)'], {}), '(log_chol_diag - 3.0)\n', (23269, 23290), True, 'import tensorflow as tf\n'), ((23782, 23832), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[latent_dim]'], {'dtype': 'tf.int32'}), '([latent_dim], dtype=tf.int32)\n', (23802, 23832), True, 'import tensorflow as tf\n'), ((23857, 23905), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[sase_dim]'], {'dtype': 'tf.int32'}), '([sase_dim], dtype=tf.int32)\n', (23877, 23905), True, 'import tensorflow as tf\n'), ((25139, 25160), 'tensorflow.concat', 'tf.concat', (['[F, W]', '(-1)'], {}), '([F, W], -1)\n', (25148, 25160), True, 'import tensorflow as tf\n'), ((25176, 25200), 'tensorflow.concat', 'tf.concat', (['[F, q_mu]', '(-1)'], {}), '([F, q_mu], -1)\n', (25185, 25200), True, 'import tensorflow as tf\n'), ((25487, 25535), 'tensorflow_probability.distributions.Normal', 'tfp.distributions.Normal', ([], {'loc': 'q_mu', 'scale': 'q_sqrt'}), '(loc=q_mu, scale=q_sqrt)\n', (25511, 25535), True, 'import tensorflow_probability as tfp\n'), ((26364, 26414), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[latent_dim]'], {'dtype': 'tf.int32'}), '([latent_dim], dtype=tf.int32)\n', (26384, 26414), True, 'import tensorflow as tf\n'), ((26439, 26487), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[sase_dim]'], {'dtype': 'tf.int32'}), '([sase_dim], dtype=tf.int32)\n', (26459, 26487), True, 'import tensorflow as tf\n'), ((27721, 27742), 'tensorflow.concat', 'tf.concat', (['[F, W]', '(-1)'], {}), '([F, W], -1)\n', (27730, 27742), True, 'import tensorflow as tf\n'), ((27758, 27782), 'tensorflow.concat', 'tf.concat', (['[F, q_mu]', '(-1)'], {}), '([F, q_mu], -1)\n', (27767, 27782), True, 'import tensorflow as tf\n'), ((28184, 28232), 'tensorflow_probability.distributions.Normal', 'tfp.distributions.Normal', ([], {'loc': 'q_mu', 'scale': 'q_sqrt'}), '(loc=q_mu, scale=q_sqrt)\n', (28208, 28232), True, 'import tensorflow_probability as tfp\n'), ((29065, 29115), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[latent_dim]'], {'dtype': 'tf.int32'}), '([latent_dim], dtype=tf.int32)\n', (29085, 29115), True, 'import tensorflow as tf\n'), ((29140, 29188), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[sase_dim]'], {'dtype': 'tf.int32'}), '([sase_dim], dtype=tf.int32)\n', (29160, 29188), True, 'import tensorflow as tf\n'), ((30674, 30694), 'tensorflow.tile', 'tf.tile', (['W', 'tile_vec'], {}), '(W, tile_vec)\n', (30681, 30694), True, 'import tensorflow as tf\n'), ((30708, 30731), 'tensorflow.tile', 'tf.tile', (['q_mu', 'tile_vec'], {}), '(q_mu, tile_vec)\n', (30715, 30731), True, 'import tensorflow as tf\n'), ((30748, 30773), 'tensorflow.tile', 'tf.tile', (['q_sqrt', 'tile_vec'], {}), '(q_sqrt, tile_vec)\n', (30755, 30773), True, 'import tensorflow as tf\n'), ((30792, 30814), 'tensorflow.concat', 'tf.concat', (['[F, TW]', '(-1)'], {}), '([F, TW], -1)\n', (30801, 30814), True, 'import tensorflow as tf\n'), ((30830, 30853), 'tensorflow.concat', 'tf.concat', (['[F, Tmu]', '(-1)'], {}), '([F, Tmu], -1)\n', (30839, 30853), True, 'import tensorflow as tf\n'), ((31143, 31189), 'tensorflow_probability.distributions.Normal', 'tfp.distributions.Normal', ([], {'loc': 'Tmu', 'scale': 'Tsqrt'}), '(loc=Tmu, scale=Tsqrt)\n', (31167, 31189), True, 'import tensorflow_probability as tfp\n'), ((5287, 5337), 'numpy.zeros', 'np.zeros', (['(self.num_inducing, self.num_latent_gps)'], {}), '((self.num_inducing, self.num_latent_gps))\n', (5295, 5337), True, 'import numpy as np\n'), ((9677, 9692), 'tensorflow.ones_like', 'tf.ones_like', (['Z'], {}), '(Z)\n', (9689, 9692), True, 'import tensorflow as tf\n'), ((9870, 9884), 'tensorflow.identity', 'tf.identity', (['Z'], {}), '(Z)\n', (9881, 9884), True, 'import tensorflow as tf\n'), ((12187, 12202), 'tensorflow.ones_like', 'tf.ones_like', (['Z'], {}), '(Z)\n', (12199, 12202), True, 'import tensorflow as tf\n'), ((12380, 12394), 'tensorflow.identity', 'tf.identity', (['Z'], {}), '(Z)\n', (12391, 12394), True, 'import tensorflow as tf\n'), ((12819, 12870), 'tensorflow.concat', 'tf.concat', (['[batch_shape, N * R, self.latent_dim]', '(0)'], {}), '([batch_shape, N * R, self.latent_dim], 0)\n', (12828, 12870), True, 'import tensorflow as tf\n'), ((12927, 12978), 'tensorflow.concat', 'tf.concat', (['[batch_shape, N * R, self.latent_dim]', '(0)'], {}), '([batch_shape, N * R, self.latent_dim], 0)\n', (12936, 12978), True, 'import tensorflow as tf\n'), ((13470, 13509), 'numpy.random.randn', 'np.random.randn', (['nwords', '(2 * latent_dim)'], {}), '(nwords, 2 * latent_dim)\n', (13485, 13509), True, 'import numpy as np\n'), ((13727, 13744), 'tensorflow.squeeze', 'tf.squeeze', (['Z', '(-1)'], {}), '(Z, -1)\n', (13737, 13744), True, 'import tensorflow as tf\n'), ((15375, 15389), 'tensorflow.shape', 'tf.shape', (['q_mu'], {}), '(q_mu)\n', (15383, 15389), True, 'import tensorflow as tf\n'), ((16164, 16201), 'tensorflow_probability.distributions.kl_divergence', 'tfp.distributions.kl_divergence', (['q', 'p'], {}), '(q, p)\n', (16195, 16201), True, 'import tensorflow_probability as tfp\n'), ((17496, 17543), 'tensorflow.concat', 'tf.concat', (['[batch_shape, N, self.latent_dim]', '(0)'], {}), '([batch_shape, N, self.latent_dim], 0)\n', (17505, 17543), True, 'import tensorflow as tf\n'), ((17874, 17888), 'tensorflow.shape', 'tf.shape', (['q_mu'], {}), '(q_mu)\n', (17882, 17888), True, 'import tensorflow as tf\n'), ((18663, 18700), 'tensorflow_probability.distributions.kl_divergence', 'tfp.distributions.kl_divergence', (['q', 'p'], {}), '(q, p)\n', (18694, 18700), True, 'import tensorflow_probability as tfp\n'), ((20007, 20021), 'tensorflow.shape', 'tf.shape', (['q_mu'], {}), '(q_mu)\n', (20015, 20021), True, 'import tensorflow as tf\n'), ((20796, 20833), 'tensorflow_probability.distributions.kl_divergence', 'tfp.distributions.kl_divergence', (['q', 'p'], {}), '(q, p)\n', (20827, 20833), True, 'import tensorflow_probability as tfp\n'), ((22770, 22785), 'tensorflow.ones_like', 'tf.ones_like', (['Z'], {}), '(Z)\n', (22782, 22785), True, 'import tensorflow as tf\n'), ((22963, 22977), 'tensorflow.identity', 'tf.identity', (['Z'], {}), '(Z)\n', (22974, 22977), True, 'import tensorflow as tf\n'), ((24658, 24705), 'tensorflow.concat', 'tf.concat', (['[batch_shape, N, self.latent_dim]', '(0)'], {}), '([batch_shape, N, self.latent_dim], 0)\n', (24667, 24705), True, 'import tensorflow as tf\n'), ((25036, 25050), 'tensorflow.shape', 'tf.shape', (['q_mu'], {}), '(q_mu)\n', (25044, 25050), True, 'import tensorflow as tf\n'), ((25824, 25861), 'tensorflow_probability.distributions.kl_divergence', 'tfp.distributions.kl_divergence', (['q', 'p'], {}), '(q, p)\n', (25855, 25861), True, 'import tensorflow_probability as tfp\n'), ((27240, 27287), 'tensorflow.concat', 'tf.concat', (['[batch_shape, N, self.latent_dim]', '(0)'], {}), '([batch_shape, N, self.latent_dim], 0)\n', (27249, 27287), True, 'import tensorflow as tf\n'), ((27618, 27632), 'tensorflow.shape', 'tf.shape', (['q_mu'], {}), '(q_mu)\n', (27626, 27632), True, 'import tensorflow as tf\n'), ((28521, 28558), 'tensorflow_probability.distributions.kl_divergence', 'tfp.distributions.kl_divergence', (['q', 'p'], {}), '(q, p)\n', (28552, 28558), True, 'import tensorflow_probability as tfp\n'), ((30031, 30081), 'tensorflow.concat', 'tf.concat', (['[batch_shape, N, S, self.latent_dim]', '(0)'], {}), '([batch_shape, N, S, self.latent_dim], 0)\n', (30040, 30081), True, 'import tensorflow as tf\n'), ((30412, 30426), 'tensorflow.shape', 'tf.shape', (['q_mu'], {}), '(q_mu)\n', (30420, 30426), True, 'import tensorflow as tf\n'), ((31480, 31517), 'tensorflow_probability.distributions.kl_divergence', 'tfp.distributions.kl_divergence', (['q', 'p'], {}), '(q, p)\n', (31511, 31517), True, 'import tensorflow_probability as tfp\n'), ((1523, 1537), 'tensorflow.shape', 'tf.shape', (['q_mu'], {}), '(q_mu)\n', (1531, 1537), True, 'import tensorflow as tf\n'), ((1542, 1556), 'tensorflow.shape', 'tf.shape', (['q_mu'], {}), '(q_mu)\n', (1550, 1556), True, 'import tensorflow as tf\n'), ((5383, 5408), 'numpy.eye', 'np.eye', (['self.num_inducing'], {}), '(self.num_inducing)\n', (5389, 5408), True, 'import numpy as np\n'), ((5606, 5635), 'gpflow.utilities.triangular', 'gpflow.utilities.triangular', ([], {}), '()\n', (5633, 5635), False, 'import gpflow\n'), ((9340, 9378), 'numpy.random.randn', 'np.random.randn', (['input_dim', 'output_dim'], {}), '(input_dim, output_dim)\n', (9355, 9378), True, 'import numpy as np\n'), ((9901, 9920), 'tensorflow.matmul', 'tf.matmul', (['Z', '(o * W)'], {}), '(Z, o * W)\n', (9910, 9920), True, 'import tensorflow as tf\n'), ((11637, 11675), 'numpy.random.randn', 'np.random.randn', (['input_dim', 'output_dim'], {}), '(input_dim, output_dim)\n', (11652, 11675), True, 'import numpy as np\n'), ((12141, 12152), 'tensorflow.shape', 'tf.shape', (['Z'], {}), '(Z)\n', (12149, 12152), True, 'import tensorflow as tf\n'), ((12411, 12430), 'tensorflow.matmul', 'tf.matmul', (['Z', '(o * W)'], {}), '(Z, o * W)\n', (12420, 12430), True, 'import tensorflow as tf\n'), ((13556, 13585), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (13583, 13585), False, 'import gpflow\n'), ((15397, 15426), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (15424, 15426), False, 'import gpflow\n'), ((15566, 15582), 'tensorflow.zeros_like', 'tf.zeros_like', (['F'], {}), '(F)\n', (15579, 15582), True, 'import tensorflow as tf\n'), ((17896, 17925), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (17923, 17925), False, 'import gpflow\n'), ((18065, 18081), 'tensorflow.zeros_like', 'tf.zeros_like', (['F'], {}), '(F)\n', (18078, 18081), True, 'import tensorflow as tf\n'), ((20029, 20058), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (20056, 20058), False, 'import gpflow\n'), ((20198, 20214), 'tensorflow.zeros_like', 'tf.zeros_like', (['F'], {}), '(F)\n', (20211, 20214), True, 'import tensorflow as tf\n'), ((22220, 22258), 'numpy.random.randn', 'np.random.randn', (['input_dim', 'output_dim'], {}), '(input_dim, output_dim)\n', (22235, 22258), True, 'import numpy as np\n'), ((22724, 22735), 'tensorflow.shape', 'tf.shape', (['Z'], {}), '(Z)\n', (22732, 22735), True, 'import tensorflow as tf\n'), ((22994, 23013), 'tensorflow.matmul', 'tf.matmul', (['Z', '(o * W)'], {}), '(Z, o * W)\n', (23003, 23013), True, 'import tensorflow as tf\n'), ((25058, 25087), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (25085, 25087), False, 'import gpflow\n'), ((25226, 25242), 'tensorflow.zeros_like', 'tf.zeros_like', (['F'], {}), '(F)\n', (25239, 25242), True, 'import tensorflow as tf\n'), ((27640, 27669), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (27667, 27669), False, 'import gpflow\n'), ((27808, 27824), 'tensorflow.zeros_like', 'tf.zeros_like', (['F'], {}), '(F)\n', (27821, 27824), True, 'import tensorflow as tf\n'), ((30434, 30463), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (30461, 30463), False, 'import gpflow\n'), ((30527, 30568), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[1]'], {'dtype': 'tf.int32'}), '([1], dtype=tf.int32)\n', (30547, 30568), True, 'import tensorflow as tf\n'), ((30615, 30656), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[1]'], {'dtype': 'tf.int32'}), '([1], dtype=tf.int32)\n', (30635, 30656), True, 'import tensorflow as tf\n'), ((30879, 30895), 'tensorflow.zeros_like', 'tf.zeros_like', (['F'], {}), '(F)\n', (30892, 30895), True, 'import tensorflow as tf\n'), ((1793, 1812), 'tensorflow.zeros_like', 'tf.zeros_like', (['q_mu'], {}), '(q_mu)\n', (1806, 1812), True, 'import tensorflow as tf\n'), ((9511, 9531), 'numpy.zeros', 'np.zeros', (['output_dim'], {}), '(output_dim)\n', (9519, 9531), True, 'import numpy as np\n'), ((11808, 11828), 'numpy.zeros', 'np.zeros', (['output_dim'], {}), '(output_dim)\n', (11816, 11828), True, 'import numpy as np\n'), ((11996, 12007), 'tensorflow.shape', 'tf.shape', (['Z'], {}), '(Z)\n', (12004, 12007), True, 'import tensorflow as tf\n'), ((12064, 12075), 'tensorflow.shape', 'tf.shape', (['Z'], {}), '(Z)\n', (12072, 12075), True, 'import tensorflow as tf\n'), ((15016, 15049), 'tensorflow.TensorShape', 'tf.TensorShape', (['[self.latent_dim]'], {}), '([self.latent_dim])\n', (15030, 15049), True, 'import tensorflow as tf\n'), ((15096, 15125), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (15123, 15125), False, 'import gpflow\n'), ((15169, 15198), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (15196, 15198), False, 'import gpflow\n'), ((17364, 17375), 'tensorflow.shape', 'tf.shape', (['F'], {}), '(F)\n', (17372, 17375), True, 'import tensorflow as tf\n'), ((17595, 17624), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (17622, 17624), False, 'import gpflow\n'), ((17668, 17697), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (17695, 17697), False, 'import gpflow\n'), ((19648, 19681), 'tensorflow.TensorShape', 'tf.TensorShape', (['[self.latent_dim]'], {}), '([self.latent_dim])\n', (19662, 19681), True, 'import tensorflow as tf\n'), ((19728, 19757), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (19755, 19757), False, 'import gpflow\n'), ((19801, 19830), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (19828, 19830), False, 'import gpflow\n'), ((22391, 22411), 'numpy.zeros', 'np.zeros', (['output_dim'], {}), '(output_dim)\n', (22399, 22411), True, 'import numpy as np\n'), ((22579, 22590), 'tensorflow.shape', 'tf.shape', (['Z'], {}), '(Z)\n', (22587, 22590), True, 'import tensorflow as tf\n'), ((22647, 22658), 'tensorflow.shape', 'tf.shape', (['Z'], {}), '(Z)\n', (22655, 22658), True, 'import tensorflow as tf\n'), ((24526, 24537), 'tensorflow.shape', 'tf.shape', (['F'], {}), '(F)\n', (24534, 24537), True, 'import tensorflow as tf\n'), ((24757, 24786), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (24784, 24786), False, 'import gpflow\n'), ((24830, 24859), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (24857, 24859), False, 'import gpflow\n'), ((27108, 27119), 'tensorflow.shape', 'tf.shape', (['F'], {}), '(F)\n', (27116, 27119), True, 'import tensorflow as tf\n'), ((27339, 27368), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (27366, 27368), False, 'import gpflow\n'), ((27412, 27441), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (27439, 27441), False, 'import gpflow\n'), ((29809, 29820), 'tensorflow.shape', 'tf.shape', (['F'], {}), '(F)\n', (29817, 29820), True, 'import tensorflow as tf\n'), ((30133, 30162), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (30160, 30162), False, 'import gpflow\n'), ((30206, 30235), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (30233, 30235), False, 'import gpflow\n'), ((1691, 1714), 'gpflow.default_jitter', 'gpflow.default_jitter', ([], {}), '()\n', (1712, 1714), False, 'import gpflow\n'), ((9440, 9469), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (9467, 9469), False, 'import gpflow\n'), ((9539, 9568), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (9566, 9568), False, 'import gpflow\n'), ((11737, 11766), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (11764, 11766), False, 'import gpflow\n'), ((11836, 11865), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (11863, 11865), False, 'import gpflow\n'), ((15700, 15729), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (15727, 15729), False, 'import gpflow\n'), ((15783, 15812), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (15810, 15812), False, 'import gpflow\n'), ((17442, 17453), 'tensorflow.shape', 'tf.shape', (['F'], {}), '(F)\n', (17450, 17453), True, 'import tensorflow as tf\n'), ((18199, 18228), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (18226, 18228), False, 'import gpflow\n'), ((18282, 18311), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (18309, 18311), False, 'import gpflow\n'), ((20332, 20361), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (20359, 20361), False, 'import gpflow\n'), ((20415, 20444), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (20442, 20444), False, 'import gpflow\n'), ((22320, 22349), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (22347, 22349), False, 'import gpflow\n'), ((22419, 22448), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (22446, 22448), False, 'import gpflow\n'), ((24604, 24615), 'tensorflow.shape', 'tf.shape', (['F'], {}), '(F)\n', (24612, 24615), True, 'import tensorflow as tf\n'), ((25360, 25389), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (25387, 25389), False, 'import gpflow\n'), ((25443, 25472), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (25470, 25472), False, 'import gpflow\n'), ((27186, 27197), 'tensorflow.shape', 'tf.shape', (['F'], {}), '(F)\n', (27194, 27197), True, 'import tensorflow as tf\n'), ((28057, 28086), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (28084, 28086), False, 'import gpflow\n'), ((28140, 28169), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (28167, 28169), False, 'import gpflow\n'), ((29887, 29898), 'tensorflow.shape', 'tf.shape', (['F'], {}), '(F)\n', (29895, 29898), True, 'import tensorflow as tf\n'), ((31012, 31041), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (31039, 31041), False, 'import gpflow\n'), ((29959, 29970), 'tensorflow.shape', 'tf.shape', (['F'], {}), '(F)\n', (29967, 29970), True, 'import tensorflow as tf\n'), ((31099, 31128), 'gpflow.config.default_float', 'gpflow.config.default_float', ([], {}), '()\n', (31126, 31128), False, 'import gpflow\n')] |
import os
import sys
import time
import logging
import collections
import csv
import numpy as np
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from .datasets import Landmarks
def _read_csv(path: str):
"""Reads a csv file, and returns the content inside a list of dictionaries.
Args:
path: The path to the csv file.
Returns:
A list of dictionaries. Each row in the csv file will be a list entry. The
dictionary is keyed by the column names.
"""
with open(path, 'r') as f:
return list(csv.DictReader(f))
# class Cutout(object):
# def __init__(self, length):
# self.length = length
# def __call__(self, img):
# h, w = img.size(1), img.size(2)
# mask = np.ones((h, w), np.float32)
# y = np.random.randint(h)
# x = np.random.randint(w)
# y1 = np.clip(y - self.length // 2, 0, h)
# y2 = np.clip(y + self.length // 2, 0, h)
# x1 = np.clip(x - self.length // 2, 0, w)
# x2 = np.clip(x + self.length // 2, 0, w)
# mask[y1: y2, x1: x2] = 0.
# mask = torch.from_numpy(mask)
# mask = mask.expand_as(img)
# img *= mask
# return img
# def _data_transforms_landmarks():
# landmarks_MEAN = [0.5071, 0.4865, 0.4409]
# landmarks_STD = [0.2673, 0.2564, 0.2762]
# train_transform = transforms.Compose([
# transforms.ToPILImage(),
# transforms.RandomCrop(32, padding=4),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# transforms.Normalize(landmarks_MEAN, landmarks_STD),
# ])
# train_transform.transforms.append(Cutout(16))
# valid_transform = transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize(landmarks_MEAN, landmarks_STD),
# ])
# return train_transform, valid_transform
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_landmarks():
# IMAGENET_MEAN = [0.5071, 0.4865, 0.4409]
# IMAGENET_STD = [0.2673, 0.2564, 0.2762]
IMAGENET_MEAN = [0.5, 0.5, 0.5]
IMAGENET_STD = [0.5, 0.5, 0.5]
image_size = 224
train_transform = transforms.Compose([
# transforms.ToPILImage(),
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD),
])
train_transform.transforms.append(Cutout(16))
valid_transform = transforms.Compose([
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD),
])
return train_transform, valid_transform
def get_mapping_per_user(fn):
"""
mapping_per_user is {'user_id': [{'user_id': xxx, 'image_id': xxx, 'class': xxx} ... {}],
'user_id': [{'user_id': xxx, 'image_id': xxx, 'class': xxx} ... {}],
} or
[{'user_id': xxx, 'image_id': xxx, 'class': xxx} ...
{'user_id': xxx, 'image_id': xxx, 'class': xxx} ... ]
}
"""
mapping_table = _read_csv(fn)
expected_cols = ['user_id', 'image_id', 'class']
if not all(col in mapping_table[0].keys() for col in expected_cols):
logging.error('%s has wrong format.', mapping_file)
raise ValueError(
'The mapping file must contain user_id, image_id and class columns. '
'The existing columns are %s' % ','.join(mapping_table[0].keys()))
data_local_num_dict = dict()
mapping_per_user = collections.defaultdict(list)
data_files = []
net_dataidx_map = {}
sum_temp = 0
for row in mapping_table:
user_id = row['user_id']
mapping_per_user[user_id].append(row)
for user_id, data in mapping_per_user.items():
num_local = len(mapping_per_user[user_id])
# net_dataidx_map[user_id]= (sum_temp, sum_temp+num_local)
# data_local_num_dict[user_id] = num_local
net_dataidx_map[int(user_id)]= (sum_temp, sum_temp+num_local)
data_local_num_dict[int(user_id)] = num_local
sum_temp += num_local
data_files += mapping_per_user[user_id]
assert sum_temp == len(data_files)
return data_files, data_local_num_dict, net_dataidx_map
# for centralized training
def get_dataloader(dataset, datadir, train_files, test_files, train_bs, test_bs, dataidxs=None):
return get_dataloader_Landmarks(datadir, train_files, test_files, train_bs, test_bs, dataidxs)
# for local devices
def get_dataloader_test(dataset, datadir, train_files, test_files, train_bs, test_bs, dataidxs_train, dataidxs_test):
return get_dataloader_test_Landmarks(datadir, train_files, test_files, train_bs, test_bs, dataidxs_train, dataidxs_test)
def get_dataloader_Landmarks(datadir, train_files, test_files, train_bs, test_bs, dataidxs=None):
dl_obj = Landmarks
transform_train, transform_test = _data_transforms_landmarks()
train_ds = dl_obj(datadir, train_files, dataidxs=dataidxs, train=True, transform=transform_train, download=True)
test_ds = dl_obj(datadir, test_files, dataidxs=None, train=False, transform=transform_test, download=True)
train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last=False)
test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=False)
return train_dl, test_dl
def get_dataloader_test_Landmarks(datadir, train_files, test_files, train_bs, test_bs, dataidxs_train=None, dataidxs_test=None):
dl_obj = Landmarks
transform_train, transform_test = _data_transforms_landmarks()
train_ds = dl_obj(datadir, train_files, dataidxs=dataidxs_train, train=True, transform=transform_train, download=True)
test_ds = dl_obj(datadir, test_files, dataidxs=dataidxs_test, train=False, transform=transform_test, download=True)
train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last=False)
test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=False)
return train_dl, test_dl
def load_partition_data_landmarks(dataset, data_dir, fed_train_map_file, fed_test_map_file,
partition_method=None, partition_alpha=None, client_number=233, batch_size=10):
train_files, data_local_num_dict, net_dataidx_map = get_mapping_per_user(fed_train_map_file)
test_files = _read_csv(fed_test_map_file)
class_num = len(np.unique([item['class'] for item in train_files]))
# logging.info("traindata_cls_counts = " + str(traindata_cls_counts))
train_data_num = len(train_files)
train_data_global, test_data_global = get_dataloader(dataset, data_dir, train_files, test_files, batch_size, batch_size)
# logging.info("train_dl_global number = " + str(len(train_data_global)))
# logging.info("test_dl_global number = " + str(len(test_data_global)))
test_data_num = len(test_files)
# get local dataset
data_local_num_dict = data_local_num_dict
train_data_local_dict = dict()
test_data_local_dict = dict()
for client_idx in range(client_number):
dataidxs = net_dataidx_map[client_idx]
# local_data_num = len(dataidxs)
local_data_num = dataidxs[1] - dataidxs[0]
# data_local_num_dict[client_idx] = local_data_num
# logging.info("client_idx = %d, local_sample_number = %d" % (client_idx, local_data_num))
# training batch size = 64; algorithms batch size = 32
train_data_local, test_data_local = get_dataloader(dataset, data_dir, train_files, test_files, batch_size, batch_size,
dataidxs)
# logging.info("client_idx = %d, batch_num_train_local = %d, batch_num_test_local = %d" % (
# client_idx, len(train_data_local), len(test_data_local)))
train_data_local_dict[client_idx] = train_data_local
test_data_local_dict[client_idx] = test_data_local
# logging("data_local_num_dict: %s" % data_local_num_dict)
return train_data_num, test_data_num, train_data_global, test_data_global, \
data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num
if __name__ == '__main__':
data_dir = './cache/images'
fed_g23k_train_map_file = '../../../data/gld/data_user_dict/gld23k_user_dict_train.csv'
fed_g23k_test_map_file = '../../../data/gld/data_user_dict/gld23k_user_dict_test.csv'
fed_g160k_train_map_file = '../../../data/gld/data_user_dict/gld160k_user_dict_train.csv'
fed_g160k_map_file = '../../../data/gld/data_user_dict/gld160k_user_dict_test.csv'
dataset_name = 'g160k'
if dataset_name == 'g23k':
client_number = 233
fed_train_map_file = fed_g23k_train_map_file
fed_test_map_file = fed_g23k_test_map_file
elif dataset_name == 'g160k':
client_number = 1262
fed_train_map_file = fed_g160k_train_map_file
fed_test_map_file = fed_g160k_map_file
train_data_num, test_data_num, train_data_global, test_data_global, \
data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num = \
load_partition_data_landmarks(None, data_dir, fed_train_map_file, fed_test_map_file,
partition_method=None, partition_alpha=None, client_number=client_number, batch_size=10)
print(train_data_num, test_data_num, class_num)
print(data_local_num_dict)
i = 0
for data, label in train_data_global:
print(data)
print(label)
i += 1
if i > 5:
break
print("=============================\n")
for client_idx in range(client_number):
i = 0
for data, label in train_data_local_dict[client_idx]:
print(data)
print(label)
i += 1
if i > 5:
break
| [
"logging.error",
"torch.utils.data.DataLoader",
"torchvision.transforms.RandomHorizontalFlip",
"csv.DictReader",
"torchvision.transforms.Normalize",
"numpy.ones",
"numpy.clip",
"torchvision.transforms.ToTensor",
"collections.defaultdict",
"numpy.random.randint",
"torchvision.transforms.CenterCro... | [((4141, 4170), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (4164, 4170), False, 'import collections\n'), ((5791, 5880), 'torch.utils.data.DataLoader', 'data.DataLoader', ([], {'dataset': 'train_ds', 'batch_size': 'train_bs', 'shuffle': '(True)', 'drop_last': '(False)'}), '(dataset=train_ds, batch_size=train_bs, shuffle=True,\n drop_last=False)\n', (5806, 5880), True, 'import torch.utils.data as data\n'), ((5891, 5979), 'torch.utils.data.DataLoader', 'data.DataLoader', ([], {'dataset': 'test_ds', 'batch_size': 'test_bs', 'shuffle': '(False)', 'drop_last': '(False)'}), '(dataset=test_ds, batch_size=test_bs, shuffle=False,\n drop_last=False)\n', (5906, 5979), True, 'import torch.utils.data as data\n'), ((6488, 6577), 'torch.utils.data.DataLoader', 'data.DataLoader', ([], {'dataset': 'train_ds', 'batch_size': 'train_bs', 'shuffle': '(True)', 'drop_last': '(False)'}), '(dataset=train_ds, batch_size=train_bs, shuffle=True,\n drop_last=False)\n', (6503, 6577), True, 'import torch.utils.data as data\n'), ((6588, 6676), 'torch.utils.data.DataLoader', 'data.DataLoader', ([], {'dataset': 'test_ds', 'batch_size': 'test_bs', 'shuffle': '(False)', 'drop_last': '(False)'}), '(dataset=test_ds, batch_size=test_bs, shuffle=False,\n drop_last=False)\n', (6603, 6676), True, 'import torch.utils.data as data\n'), ((2047, 2074), 'numpy.ones', 'np.ones', (['(h, w)', 'np.float32'], {}), '((h, w), np.float32)\n', (2054, 2074), True, 'import numpy as np\n'), ((2087, 2107), 'numpy.random.randint', 'np.random.randint', (['h'], {}), '(h)\n', (2104, 2107), True, 'import numpy as np\n'), ((2120, 2140), 'numpy.random.randint', 'np.random.randint', (['w'], {}), '(w)\n', (2137, 2140), True, 'import numpy as np\n'), ((2155, 2190), 'numpy.clip', 'np.clip', (['(y - self.length // 2)', '(0)', 'h'], {}), '(y - self.length // 2, 0, h)\n', (2162, 2190), True, 'import numpy as np\n'), ((2204, 2239), 'numpy.clip', 'np.clip', (['(y + self.length // 2)', '(0)', 'h'], {}), '(y + self.length // 2, 0, h)\n', (2211, 2239), True, 'import numpy as np\n'), ((2253, 2288), 'numpy.clip', 'np.clip', (['(x - self.length // 2)', '(0)', 'w'], {}), '(x - self.length // 2, 0, w)\n', (2260, 2288), True, 'import numpy as np\n'), ((2302, 2337), 'numpy.clip', 'np.clip', (['(x + self.length // 2)', '(0)', 'w'], {}), '(x + self.length // 2, 0, w)\n', (2309, 2337), True, 'import numpy as np\n'), ((2388, 2410), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (2404, 2410), False, 'import torch\n'), ((3843, 3894), 'logging.error', 'logging.error', (['"""%s has wrong format."""', 'mapping_file'], {}), "('%s has wrong format.', mapping_file)\n", (3856, 3894), False, 'import logging\n'), ((7071, 7121), 'numpy.unique', 'np.unique', (["[item['class'] for item in train_files]"], {}), "([item['class'] for item in train_files])\n", (7080, 7121), True, 'import numpy as np\n'), ((556, 573), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (570, 573), False, 'import csv\n'), ((2794, 2834), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['image_size'], {}), '(image_size)\n', (2822, 2834), True, 'import torchvision.transforms as transforms\n'), ((2844, 2877), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (2875, 2877), True, 'import torchvision.transforms as transforms\n'), ((2887, 2908), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2906, 2908), True, 'import torchvision.transforms as transforms\n'), ((2918, 2967), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['IMAGENET_MEAN', 'IMAGENET_STD'], {}), '(IMAGENET_MEAN, IMAGENET_STD)\n', (2938, 2967), True, 'import torchvision.transforms as transforms\n'), ((3079, 3105), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (3100, 3105), True, 'import torchvision.transforms as transforms\n'), ((3115, 3136), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3134, 3136), True, 'import torchvision.transforms as transforms\n'), ((3146, 3195), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['IMAGENET_MEAN', 'IMAGENET_STD'], {}), '(IMAGENET_MEAN, IMAGENET_STD)\n', (3166, 3195), True, 'import torchvision.transforms as transforms\n')] |
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import sys
import os
import random
import numpy as np
import torch.nn.functional as F
from torch.utils.data import DataLoader
from apl import models
from apl import memory_store
from datasets import omniglot
N_CLASSES = 200
N_NEIGHBOURS = 5
MAX_BATCHES = 3000
MEMORY_SIZE = 10000
SIGMA_RATIO = 0.75
QUERY_EMBED_DIM = 64
LABEL_EMBED_DIM = 32
KEY_SIZE = 256
VALUE_SIZE = 256
N_HEADS = 2
NUM_LAYERS = 5
USE_CUDA = True
SAVE_FREQUENCY = 100
def to_one_hot(y, n_dims=None):
""" Take integer y (tensor or variable) with n dims and convert it to 1-hot representation with n+1 dims. """
y_tensor = y.data if isinstance(y, torch.autograd.Variable) else y
y_tensor = y_tensor.long().view(-1, 1)
n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1
y_one_hot = torch.zeros(
y_tensor.size()[0], n_dims, device=y.device).scatter_(1, y_tensor, 1)
y_one_hot = y_one_hot.view(*y.shape, -1)
return torch.autograd.Variable(y_one_hot) if isinstance(y, torch.autograd.Variable) else y_one_hot
def split_batch(batch, nshot, n_classes, n_per_class):
context = []
query = []
for i in range(n_classes):
class_start = i * n_per_class
context.extend(
[batch[b] for b in range(class_start, class_start + nshot)])
query.extend(
[batch[b] for b in range(class_start + nshot, class_start + n_per_class)])
return context, query
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint", type=str)
parser.add_argument("--n_classes", type=int, default=N_CLASSES)
parser.add_argument("--n_neighbours", type=int, default=N_NEIGHBOURS)
parser.add_argument("--memory_size", type=int, default=MEMORY_SIZE)
parser.add_argument("--sigma_ratio", type=float, default=SIGMA_RATIO)
parser.add_argument("--query_embed_dim", type=int, default=QUERY_EMBED_DIM)
parser.add_argument("--label_embed_dim", type=int, default=LABEL_EMBED_DIM)
parser.add_argument("--key_size", type=int, default=KEY_SIZE)
parser.add_argument("--value_size", type=int, default=VALUE_SIZE)
parser.add_argument("--n_heads", type=int, default=N_HEADS)
parser.add_argument("--num_layers", type=int, default=NUM_LAYERS)
parser.add_argument("--use_cuda", type=bool, default=USE_CUDA)
return parser.parse_args()
def test_checkpoint():
args = get_arguments()
use_cuda = args.use_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
enc = models.Encoder()
dec = models.RSAFFDecoder(
args.n_classes, args.query_embed_dim, args.label_embed_dim,
args.n_neighbours, args.key_size, args.value_size, args.n_heads,
args.num_layers)
enc.to(device)
dec.to(device)
memory = memory_store.MemoryStore(
args.memory_size, args.n_classes,
args.n_neighbours, args.query_embed_dim, device)
train_dataset = omniglot.RestrictedOmniglot(
"data/Omniglot", args.n_classes, train=True, noise_std=0.1)
test_dataset = omniglot.RestrictedOmniglot(
"data/Omniglot", args.n_classes, train=False, noise_std=0)
nll_threshold = args.sigma_ratio * np.log(args.n_classes)
checkpoint = torch.load(args.checkpoint)
enc.load_state_dict(checkpoint['encoder_state'])
dec.load_state_dict(checkpoint['decoder_state'])
memory.flush()
enc.eval()
dec.eval()
accuracy = []
ker_accuracy = []
memory_size = []
top1_matches = []
loss_list = []
# Pick a batch with n_classes classes and 20 examples per class.
# Test it on the online setting.
test_dataset.shuffle_classes()
batch = list(test_dataset)
shuffled_batch = random.sample(batch, len(batch))
for batch_idx, (data, target) in enumerate(shuffled_batch):
target = torch.Tensor([target]).long()
data, target = data.to(device), target.to(device)
query_embeds = enc(data.unsqueeze(0))
buffer_embeds, buffer_labels, distances = memory.get_nearest_entries(query_embeds)
top1_labels = buffer_labels[:, 0]
top1_match = float(torch.mean((top1_labels == target).double()))
logprob = dec(buffer_embeds, buffer_labels, query_embeds, distances)
preds = torch.argmax(logprob, dim=1)
acc = float(torch.mean((preds == target).double()))
batch_loss = F.cross_entropy(logprob, target, reduce=False)
n_classes = memory.n_classes
dist_probs = F.softmax(-distances, dim=1)
ker_probs = to_one_hot(
buffer_labels, n_dims=n_classes + 1)[:, :, :n_classes] * dist_probs.unsqueeze(-1)
ker_probs = torch.sum(ker_probs, dim=1)
ker_pred = torch.argmax(ker_probs, dim=1)
ker_acc = float(torch.mean(
(ker_pred == target).double()))
surprise_indices = torch.nonzero(batch_loss > nll_threshold)
for idx in surprise_indices:
memory.add_entry(query_embeds[idx], target[idx])
accuracy.append(acc)
ker_accuracy.append(ker_acc)
memory_size.append(len(memory))
top1_matches.append(top1_match)
loss_list.append(float(torch.mean(batch_loss)))
accuracy = np.array(accuracy)
ker_accuracy = np.array(ker_accuracy)
top1_matches = np.array(top1_matches)
print("APL (full) / no decoder (kernel) / no decoder (top1)")
print("Final accuracy (last n_classes items): {:.3f} / {:.3f} / {:.3f}".format(
np.mean(accuracy[-n_classes:]), np.mean(ker_accuracy[-n_classes:]),
np.mean(top1_matches[-n_classes:])))
print("Final avg. memory size {}".format(int(np.mean(memory_size[-n_classes:]))))
# Now test the same batch but with a fixed context size.
memory.flush()
context, query = split_batch(batch, nshot=1, n_classes=n_classes, n_per_class=20)
for example in context:
data = example[0].unsqueeze(0)
target = torch.Tensor([example[1]]).long()
data, target = data.to(device), target.to(device)
memory.add_entry(enc(data), target)
accuracy = []
ker_accuracy = []
top1_matches = []
loss_list = []
for q in query:
data, target = q
data = data.unsqueeze(0)
target = torch.Tensor([target]).long()
data, target = data.to(device), target.to(device)
query_embeds = enc(data)
buffer_embeds, buffer_labels, distances = memory.get_nearest_entries(query_embeds)
top1_labels = buffer_labels[:, 0]
top1_match = float(torch.mean((top1_labels == target).double()))
logprob = dec(buffer_embeds, buffer_labels, query_embeds, distances)
preds = torch.argmax(logprob, dim=1)
acc = float(torch.mean((preds == target).double()))
batch_loss = F.cross_entropy(logprob, target, reduce=False)
n_classes = memory.n_classes
dist_probs = F.softmax(-distances, dim=1)
ker_probs = to_one_hot(
buffer_labels, n_dims=n_classes + 1)[:, :, :n_classes] * dist_probs.unsqueeze(-1)
ker_probs = torch.sum(ker_probs, dim=1)
ker_pred = torch.argmax(ker_probs, dim=1)
ker_acc = float(torch.mean(
(ker_pred == target).double()))
accuracy.append(acc)
ker_accuracy.append(ker_acc)
top1_matches.append(top1_match)
loss_list.append(float(torch.mean(batch_loss)))
print("APL (full) / no decoder (kernel) / no decoder (top1)")
print("Avg. accuracy: {:.3f} / {:.3f} / {:.3f}".format(
np.mean(accuracy), np.mean(ker_accuracy), np.mean(top1_matches)))
if __name__ == "__main__":
test_checkpoint()
| [
"argparse.ArgumentParser",
"torch.argmax",
"numpy.mean",
"apl.memory_store.MemoryStore",
"torch.device",
"torch.load",
"torch.Tensor",
"torch.mean",
"torch.autograd.Variable",
"torch.nn.functional.cross_entropy",
"apl.models.Encoder",
"torch.cuda.is_available",
"torch.max",
"torch.sum",
... | [((1537, 1562), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1560, 1562), False, 'import argparse\n'), ((2553, 2596), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (2565, 2596), False, 'import torch\n'), ((2608, 2624), 'apl.models.Encoder', 'models.Encoder', ([], {}), '()\n', (2622, 2624), False, 'from apl import models\n'), ((2635, 2805), 'apl.models.RSAFFDecoder', 'models.RSAFFDecoder', (['args.n_classes', 'args.query_embed_dim', 'args.label_embed_dim', 'args.n_neighbours', 'args.key_size', 'args.value_size', 'args.n_heads', 'args.num_layers'], {}), '(args.n_classes, args.query_embed_dim, args.\n label_embed_dim, args.n_neighbours, args.key_size, args.value_size,\n args.n_heads, args.num_layers)\n', (2654, 2805), False, 'from apl import models\n'), ((2873, 2985), 'apl.memory_store.MemoryStore', 'memory_store.MemoryStore', (['args.memory_size', 'args.n_classes', 'args.n_neighbours', 'args.query_embed_dim', 'device'], {}), '(args.memory_size, args.n_classes, args.\n n_neighbours, args.query_embed_dim, device)\n', (2897, 2985), False, 'from apl import memory_store\n'), ((3018, 3109), 'datasets.omniglot.RestrictedOmniglot', 'omniglot.RestrictedOmniglot', (['"""data/Omniglot"""', 'args.n_classes'], {'train': '(True)', 'noise_std': '(0.1)'}), "('data/Omniglot', args.n_classes, train=True,\n noise_std=0.1)\n", (3045, 3109), False, 'from datasets import omniglot\n'), ((3134, 3224), 'datasets.omniglot.RestrictedOmniglot', 'omniglot.RestrictedOmniglot', (['"""data/Omniglot"""', 'args.n_classes'], {'train': '(False)', 'noise_std': '(0)'}), "('data/Omniglot', args.n_classes, train=False,\n noise_std=0)\n", (3161, 3224), False, 'from datasets import omniglot\n'), ((3310, 3337), 'torch.load', 'torch.load', (['args.checkpoint'], {}), '(args.checkpoint)\n', (3320, 3337), False, 'import torch\n'), ((5276, 5294), 'numpy.array', 'np.array', (['accuracy'], {}), '(accuracy)\n', (5284, 5294), True, 'import numpy as np\n'), ((5314, 5336), 'numpy.array', 'np.array', (['ker_accuracy'], {}), '(ker_accuracy)\n', (5322, 5336), True, 'import numpy as np\n'), ((5356, 5378), 'numpy.array', 'np.array', (['top1_matches'], {}), '(top1_matches)\n', (5364, 5378), True, 'import numpy as np\n'), ((1019, 1053), 'torch.autograd.Variable', 'torch.autograd.Variable', (['y_one_hot'], {}), '(y_one_hot)\n', (1042, 1053), False, 'import torch\n'), ((2514, 2539), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2537, 2539), False, 'import torch\n'), ((3269, 3291), 'numpy.log', 'np.log', (['args.n_classes'], {}), '(args.n_classes)\n', (3275, 3291), True, 'import numpy as np\n'), ((4340, 4368), 'torch.argmax', 'torch.argmax', (['logprob'], {'dim': '(1)'}), '(logprob, dim=1)\n', (4352, 4368), False, 'import torch\n'), ((4450, 4496), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logprob', 'target'], {'reduce': '(False)'}), '(logprob, target, reduce=False)\n', (4465, 4496), True, 'import torch.nn.functional as F\n'), ((4556, 4584), 'torch.nn.functional.softmax', 'F.softmax', (['(-distances)'], {'dim': '(1)'}), '(-distances, dim=1)\n', (4565, 4584), True, 'import torch.nn.functional as F\n'), ((4731, 4758), 'torch.sum', 'torch.sum', (['ker_probs'], {'dim': '(1)'}), '(ker_probs, dim=1)\n', (4740, 4758), False, 'import torch\n'), ((4778, 4808), 'torch.argmax', 'torch.argmax', (['ker_probs'], {'dim': '(1)'}), '(ker_probs, dim=1)\n', (4790, 4808), False, 'import torch\n'), ((4917, 4958), 'torch.nonzero', 'torch.nonzero', (['(batch_loss > nll_threshold)'], {}), '(batch_loss > nll_threshold)\n', (4930, 4958), False, 'import torch\n'), ((6724, 6752), 'torch.argmax', 'torch.argmax', (['logprob'], {'dim': '(1)'}), '(logprob, dim=1)\n', (6736, 6752), False, 'import torch\n'), ((6834, 6880), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logprob', 'target'], {'reduce': '(False)'}), '(logprob, target, reduce=False)\n', (6849, 6880), True, 'import torch.nn.functional as F\n'), ((6940, 6968), 'torch.nn.functional.softmax', 'F.softmax', (['(-distances)'], {'dim': '(1)'}), '(-distances, dim=1)\n', (6949, 6968), True, 'import torch.nn.functional as F\n'), ((7115, 7142), 'torch.sum', 'torch.sum', (['ker_probs'], {'dim': '(1)'}), '(ker_probs, dim=1)\n', (7124, 7142), False, 'import torch\n'), ((7162, 7192), 'torch.argmax', 'torch.argmax', (['ker_probs'], {'dim': '(1)'}), '(ker_probs, dim=1)\n', (7174, 7192), False, 'import torch\n'), ((5538, 5568), 'numpy.mean', 'np.mean', (['accuracy[-n_classes:]'], {}), '(accuracy[-n_classes:])\n', (5545, 5568), True, 'import numpy as np\n'), ((5570, 5604), 'numpy.mean', 'np.mean', (['ker_accuracy[-n_classes:]'], {}), '(ker_accuracy[-n_classes:])\n', (5577, 5604), True, 'import numpy as np\n'), ((5614, 5648), 'numpy.mean', 'np.mean', (['top1_matches[-n_classes:]'], {}), '(top1_matches[-n_classes:])\n', (5621, 5648), True, 'import numpy as np\n'), ((7571, 7588), 'numpy.mean', 'np.mean', (['accuracy'], {}), '(accuracy)\n', (7578, 7588), True, 'import numpy as np\n'), ((7590, 7611), 'numpy.mean', 'np.mean', (['ker_accuracy'], {}), '(ker_accuracy)\n', (7597, 7611), True, 'import numpy as np\n'), ((7613, 7634), 'numpy.mean', 'np.mean', (['top1_matches'], {}), '(top1_matches)\n', (7620, 7634), True, 'import numpy as np\n'), ((831, 850), 'torch.max', 'torch.max', (['y_tensor'], {}), '(y_tensor)\n', (840, 850), False, 'import torch\n'), ((3906, 3928), 'torch.Tensor', 'torch.Tensor', (['[target]'], {}), '([target])\n', (3918, 3928), False, 'import torch\n'), ((5235, 5257), 'torch.mean', 'torch.mean', (['batch_loss'], {}), '(batch_loss)\n', (5245, 5257), False, 'import torch\n'), ((5700, 5733), 'numpy.mean', 'np.mean', (['memory_size[-n_classes:]'], {}), '(memory_size[-n_classes:])\n', (5707, 5733), True, 'import numpy as np\n'), ((5988, 6014), 'torch.Tensor', 'torch.Tensor', (['[example[1]]'], {}), '([example[1]])\n', (6000, 6014), False, 'import torch\n'), ((6302, 6324), 'torch.Tensor', 'torch.Tensor', (['[target]'], {}), '([target])\n', (6314, 6324), False, 'import torch\n'), ((7411, 7433), 'torch.mean', 'torch.mean', (['batch_loss'], {}), '(batch_loss)\n', (7421, 7433), False, 'import torch\n')] |
from pdb import set_trace as T
import numpy as np
from nmmo.lib import overlay
from nmmo.lib.colors import Neon
from nmmo.systems import combat
class OverlayRegistry:
def __init__(self, config, realm):
'''Manager class for overlays
Args:
config: A Config object
realm: An environment
'''
self.initialized = False
self.config = config
self.realm = realm
self.overlays = {
'counts': Counts,
'skills': Skills,
'wilderness': Wilderness}
def init(self):
for cmd, overlay in self.overlays.items():
self.overlays[cmd] = overlay(self.config, self.realm)
def step(self, obs, pos, cmd):
'''Per-tick overlay updates
Args:
obs: Observation returned by the environment
pos: Client camera focus position
cmd: User command returned by the client
'''
if not self.initialized:
self.initialized = True
self.init()
self.realm.overlayPos = pos
for overlay in self.overlays.values():
overlay.update(obs)
if cmd in self.overlays:
self.overlays[cmd].register(obs)
class Overlay:
'''Define a overlay for visualization in the client
Overlays are color images of the same size as the game map.
They are rendered over the environment with transparency and
can be used to gain insight about agent behaviors.'''
def __init__(self, config, realm, *args):
'''
Args:
config: A Config object
realm: An environment
'''
self.config = config
self.realm = realm
self.size = config.TERRAIN_SIZE
self.values = np.zeros((self.size, self.size))
def update(self, obs):
'''Compute per-tick updates to this overlay. Override per overlay.
Args:
obs: Observation returned by the environment
'''
pass
def register(self):
'''Compute the overlay and register it within realm. Override per overlay.'''
pass
class Skills(Overlay):
def __init__(self, config, realm, *args):
'''Indicates whether agents specialize in foraging or combat'''
super().__init__(config, realm)
self.nSkills = 2
self.values = np.zeros((self.size, self.size, self.nSkills))
def update(self, obs):
'''Computes a count-based exploration map by painting
tiles as agents walk over them'''
for entID, agent in self.realm.realm.players.items():
r, c = agent.base.pos
skillLvl = (agent.skills.fishing.level + agent.skills.hunting.level)/2.0
combatLvl = combat.level(agent.skills)
if skillLvl == 10 and combatLvl == 3:
continue
self.values[r, c, 0] = skillLvl
self.values[r, c, 1] = combatLvl
def register(self, obs):
values = np.zeros((self.size, self.size, self.nSkills))
for idx in range(self.nSkills):
ary = self.values[:, :, idx]
vals = ary[ary != 0]
mean = np.mean(vals)
std = np.std(vals)
if std == 0:
std = 1
values[:, :, idx] = (ary - mean) / std
values[ary == 0] = 0
colors = np.array([Neon.BLUE.rgb, Neon.BLOOD.rgb])
colorized = np.zeros((self.size, self.size, 3))
amax = np.argmax(values, -1)
for idx in range(self.nSkills):
colorized[amax == idx] = colors[idx] / 255
colorized[values[:, :, idx] == 0] = 0
self.realm.register(colorized)
class Counts(Overlay):
def __init__(self, config, realm, *args):
super().__init__(config, realm)
self.values = np.zeros((self.size, self.size, config.NPOP))
def update(self, obs):
'''Computes a count-based exploration map by painting
tiles as agents walk over them'''
for entID, agent in self.realm.realm.players.items():
pop = agent.base.population.val
r, c = agent.base.pos
self.values[r, c][pop] += 1
def register(self, obs):
colors = self.realm.realm.players.palette.colors
colors = np.array([colors[pop].rgb
for pop in range(self.config.NPOP)])
colorized = self.values[:, :, :, None] * colors / 255
colorized = np.sum(colorized, -2)
countSum = np.sum(self.values[:, :], -1)
data = overlay.norm(countSum)[..., None]
countSum[countSum==0] = 1
colorized = colorized * data / countSum[..., None]
self.realm.register(colorized)
class Wilderness(Overlay):
def init(self):
'''Computes the local wilderness level'''
data = np.zeros((self.size, self.size))
for r in range(self.size):
for c in range(self.size):
data[r, c] = combat.wilderness(self.config, (r, c))
self.wildy = overlay.twoTone(data, preprocess='clip', invert=True, periods=5)
def register(self, obs):
if not hasattr(self, 'wildy'):
print('Initializing Wilderness')
self.init()
self.realm.register(self.wildy)
| [
"nmmo.systems.combat.level",
"nmmo.lib.overlay",
"numpy.sum",
"nmmo.systems.combat.wilderness",
"numpy.argmax",
"numpy.std",
"nmmo.lib.overlay.norm",
"numpy.zeros",
"numpy.mean",
"numpy.array",
"nmmo.lib.overlay.twoTone",
"nmmo.lib.overlay.update"
] | [((1731, 1763), 'numpy.zeros', 'np.zeros', (['(self.size, self.size)'], {}), '((self.size, self.size))\n', (1739, 1763), True, 'import numpy as np\n'), ((2301, 2347), 'numpy.zeros', 'np.zeros', (['(self.size, self.size, self.nSkills)'], {}), '((self.size, self.size, self.nSkills))\n', (2309, 2347), True, 'import numpy as np\n'), ((2895, 2941), 'numpy.zeros', 'np.zeros', (['(self.size, self.size, self.nSkills)'], {}), '((self.size, self.size, self.nSkills))\n', (2903, 2941), True, 'import numpy as np\n'), ((3248, 3289), 'numpy.array', 'np.array', (['[Neon.BLUE.rgb, Neon.BLOOD.rgb]'], {}), '([Neon.BLUE.rgb, Neon.BLOOD.rgb])\n', (3256, 3289), True, 'import numpy as np\n'), ((3308, 3343), 'numpy.zeros', 'np.zeros', (['(self.size, self.size, 3)'], {}), '((self.size, self.size, 3))\n', (3316, 3343), True, 'import numpy as np\n'), ((3362, 3383), 'numpy.argmax', 'np.argmax', (['values', '(-1)'], {}), '(values, -1)\n', (3371, 3383), True, 'import numpy as np\n'), ((3687, 3732), 'numpy.zeros', 'np.zeros', (['(self.size, self.size, config.NPOP)'], {}), '((self.size, self.size, config.NPOP))\n', (3695, 3732), True, 'import numpy as np\n'), ((4289, 4310), 'numpy.sum', 'np.sum', (['colorized', '(-2)'], {}), '(colorized, -2)\n', (4295, 4310), True, 'import numpy as np\n'), ((4329, 4358), 'numpy.sum', 'np.sum', (['self.values[:, :]', '(-1)'], {}), '(self.values[:, :], -1)\n', (4335, 4358), True, 'import numpy as np\n'), ((4647, 4679), 'numpy.zeros', 'np.zeros', (['(self.size, self.size)'], {}), '((self.size, self.size))\n', (4655, 4679), True, 'import numpy as np\n'), ((4833, 4897), 'nmmo.lib.overlay.twoTone', 'overlay.twoTone', (['data'], {'preprocess': '"""clip"""', 'invert': '(True)', 'periods': '(5)'}), "(data, preprocess='clip', invert=True, periods=5)\n", (4848, 4897), False, 'from nmmo.lib import overlay\n'), ((655, 687), 'nmmo.lib.overlay', 'overlay', (['self.config', 'self.realm'], {}), '(self.config, self.realm)\n', (662, 687), False, 'from nmmo.lib import overlay\n'), ((1107, 1126), 'nmmo.lib.overlay.update', 'overlay.update', (['obs'], {}), '(obs)\n', (1121, 1126), False, 'from nmmo.lib import overlay\n'), ((2671, 2697), 'nmmo.systems.combat.level', 'combat.level', (['agent.skills'], {}), '(agent.skills)\n', (2683, 2697), False, 'from nmmo.systems import combat\n'), ((3065, 3078), 'numpy.mean', 'np.mean', (['vals'], {}), '(vals)\n', (3072, 3078), True, 'import numpy as np\n'), ((3095, 3107), 'numpy.std', 'np.std', (['vals'], {}), '(vals)\n', (3101, 3107), True, 'import numpy as np\n'), ((4377, 4399), 'nmmo.lib.overlay.norm', 'overlay.norm', (['countSum'], {}), '(countSum)\n', (4389, 4399), False, 'from nmmo.lib import overlay\n'), ((4774, 4812), 'nmmo.systems.combat.wilderness', 'combat.wilderness', (['self.config', '(r, c)'], {}), '(self.config, (r, c))\n', (4791, 4812), False, 'from nmmo.systems import combat\n')] |
import time
import os
import datetime
import tensorflow as tf
import matplotlib.pyplot as plt
import utils
import numpy as np
from sklearn import svm
import pickle
tf.flags.DEFINE_string("datasetPath", './data/appearance_spliced_images/appearance.p', "dataset path")
tf.flags.DEFINE_integer("max", 10000, "max number of dataset")
tf.flags.DEFINE_string("checkpoint_dir", "none", "loading latest checkpoint")
tf.flags.DEFINE_string('label_dir', './label/label15.p', "dir of label")
flags = tf.flags.FLAGS
graph = tf.Graph()
with graph.as_default():
sess = tf.Session()
with sess.as_default():
print('loading checkpoint in dir: %s' % flags.checkpoint_dir)
checkpoint_file = tf.train.latest_checkpoint(flags.checkpoint_dir)
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
print('load success')
encode = graph.get_operation_by_name("hidden_layer_128/Sigmoid").outputs[0]
recon = graph.get_operation_by_name("finetuning_decoder_3/Sigmoid").outputs[0]
input_x = graph.get_operation_by_name("input_x").outputs[0]
mask = graph.get_operation_by_name("mask").outputs[0]
# dataset = u.loadDataset(batch_size=flags.max, max=flags.max)
label = utils.loadlabel(dir=flags.label_dir)
dataset = utils.load_whole_dataset(max = flags.max, dataset_dir=flags.datasetPath)
# dataset = next(dataset)
mask_ts = np.random.binomial(1, 1, dataset.shape)
encoder_result, recon_result = sess.run([encode, recon], feed_dict={
input_x: dataset, mask: mask_ts})
# n_examples = 20
# fig, axs = plt.subplots(2, n_examples, figsize=(10, 2))
# for example_i in range(n_examples):
# axs[0][example_i].imshow(
# # np.reshape(test_xs[example_i, :], (28, 28)))
# np.reshape(dataset[example_i, :], (15, 15)))
# axs[1][example_i].imshow(
# # np.reshape([recon[example_i, :] + mean_img], (28, 28)))
# np.reshape([recon_result[example_i, :]], (15, 15)))
# plt.show()
# plt.waitforbuttonpress()
print ('Plot complete now showing...')
accs = []
test = utils.load_whole_dataset(max = len(label), dataset_dir="./label.appearance15.p")
mask_ts = np.random.binomial(1, 1, test.shape)
test_result = sess.run([encode], feed_dict={
input_x: test, mask: mask_ts
})
print('starting cross fold validation')
for i in range(0, 0.1, 0.0001):
print('step i:{}'.format(i))
clf = svm.OneClassSVM(kernel='rbf', gamma='auto', nu=i)
clf.fit(encoder_result)
pre = clf.predict(test_result)
prediction = np.equal(pre, label)
accuracy = np.mean(np.cast[np.float](prediction))
print('accuracy:{:g}'.format(accuracy))
accs.append(accuracy)
i = np.argmax(accs)
print('biggest nu is:{} acc:{:g}'.format(i * 0.0001, accs[i]))
# with open("./svm.model", "rb") as f:
# new_svm = pickle.load(f)
# pre = new_svm.predict(encoder_result)
# plt.scatter(range(pre.shape[0]) ,pre)
# plt.waitforbuttonpress()
# prediction = np.equal(pre, label)
# accuracy = np.mean(np.cast[np.float](prediction))
# print('accuracy:{:g}'.format(accuracy))
# clf = svm.OneClassSVM(kernel='rbf', gamma='auto', nu=1e-3)
# clf.fit(encoder_result)
# with open('./svm.model', 'wb') as m:
# pickle.dump(clf, m) | [
"numpy.random.binomial",
"numpy.argmax",
"tensorflow.Session",
"utils.loadlabel",
"numpy.equal",
"sklearn.svm.OneClassSVM",
"tensorflow.train.latest_checkpoint",
"tensorflow.Graph",
"tensorflow.flags.DEFINE_integer",
"utils.load_whole_dataset",
"tensorflow.flags.DEFINE_string"
] | [((165, 271), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""datasetPath"""', '"""./data/appearance_spliced_images/appearance.p"""', '"""dataset path"""'], {}), "('datasetPath',\n './data/appearance_spliced_images/appearance.p', 'dataset path')\n", (187, 271), True, 'import tensorflow as tf\n'), ((268, 330), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""max"""', '(10000)', '"""max number of dataset"""'], {}), "('max', 10000, 'max number of dataset')\n", (291, 330), True, 'import tensorflow as tf\n'), ((331, 408), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""checkpoint_dir"""', '"""none"""', '"""loading latest checkpoint"""'], {}), "('checkpoint_dir', 'none', 'loading latest checkpoint')\n", (353, 408), True, 'import tensorflow as tf\n'), ((409, 481), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""label_dir"""', '"""./label/label15.p"""', '"""dir of label"""'], {}), "('label_dir', './label/label15.p', 'dir of label')\n", (431, 481), True, 'import tensorflow as tf\n'), ((513, 523), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (521, 523), True, 'import tensorflow as tf\n'), ((560, 572), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (570, 572), True, 'import tensorflow as tf\n'), ((697, 745), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['flags.checkpoint_dir'], {}), '(flags.checkpoint_dir)\n', (723, 745), True, 'import tensorflow as tf\n'), ((1288, 1324), 'utils.loadlabel', 'utils.loadlabel', ([], {'dir': 'flags.label_dir'}), '(dir=flags.label_dir)\n', (1303, 1324), False, 'import utils\n'), ((1343, 1413), 'utils.load_whole_dataset', 'utils.load_whole_dataset', ([], {'max': 'flags.max', 'dataset_dir': 'flags.datasetPath'}), '(max=flags.max, dataset_dir=flags.datasetPath)\n', (1367, 1413), False, 'import utils\n'), ((1468, 1507), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(1)', 'dataset.shape'], {}), '(1, 1, dataset.shape)\n', (1486, 1507), True, 'import numpy as np\n'), ((2358, 2394), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(1)', 'test.shape'], {}), '(1, 1, test.shape)\n', (2376, 2394), True, 'import numpy as np\n'), ((2982, 2997), 'numpy.argmax', 'np.argmax', (['accs'], {}), '(accs)\n', (2991, 2997), True, 'import numpy as np\n'), ((2647, 2696), 'sklearn.svm.OneClassSVM', 'svm.OneClassSVM', ([], {'kernel': '"""rbf"""', 'gamma': '"""auto"""', 'nu': 'i'}), "(kernel='rbf', gamma='auto', nu=i)\n", (2662, 2696), False, 'from sklearn import svm\n'), ((2801, 2821), 'numpy.equal', 'np.equal', (['pre', 'label'], {}), '(pre, label)\n', (2809, 2821), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""This module corresponds to the sdss directory in idlutils.
"""
import os
import re
import numpy as np
from astropy.io import fits
from astropy.utils.data import download_file
from . import PydlutilsException
from .spheregroup import spherematch
from .yanny import yanny
from .. import uniq
#
# Cache for the maskbits file.
#
maskbits = None
#
# Cache the sweep index
#
sweep_cache = {'star': None, 'gal': None, 'sky': None}
#
# Cache sdss_astrombad data
#
opbadfields = None
def default_skyversion():
"""Returns skyversion number to use for photoop imaging.
Returns
-------
:class:`int`
The default skyversion number.
Notes
-----
The skyversion number is hard-coded to 2.
Examples
--------
>>> from pydl.pydlutils.sdss import default_skyversion
>>> default_skyversion()
2
"""
return 2
def sdss_astrombad(run, camcol, field, photolog_version='dr10'):
"""For a list of RUN, CAMCOL, FIELD, return whether each field has bad
astrometry.
Parameters
----------
run, camcol, field : :class:`int` or array of :class:`int`
Run, camcol and field. If arrays are passed,
all must have the same length.
photolog_version : :class:`str`, optional
Use this version of photolog to obtain the obBadfields.par file,
if :envvar:`PHOTOLOG_DIR` is not set.
Returns
-------
:class:`numpy.ndarray` of :class:`bool`
Array of bool. ``True`` indicates the field is bad.
Raises
------
:exc:`ValueError`
If the sizes of the arrays don't match or if the array values are out
of bounds.
Notes
-----
Reads data from ``$PHOTOLOG_DIR/opfiles/opBadFields.par``.
If there is a problem with one camcol, we assume a
problem with all camcols.
"""
global opbadfields
#
# Check inputs
#
if isinstance(run, int):
#
# Assume all inputs are integers & promote to arrays.
#
run = np.array([run], dtype=np.int64)
camcol = np.array([camcol], dtype=np.int64)
field = np.array([field], dtype=np.int64)
else:
#
# Check that all inputs have the same shape.
#
if run.shape != camcol.shape:
raise ValueError("camcol.shape does not match run.shape!")
if run.shape != field.shape:
raise ValueError("field.shape does not match run.shape!")
#
# Check ranges of parameters
#
if ((run < 0) | (run >= 2**16)).any():
raise ValueError("run values are out-of-bounds!")
if ((camcol < 1) | (camcol > 6)).any():
raise ValueError("camcol values are out-of-bounds!")
if ((field < 0) | (field >= 2**12)).any():
raise ValueError("camcol values are out-of-bounds!")
#
# Read the file
#
if opbadfields is None: # pragma: no cover
if os.getenv('PHOTOLOG_DIR') is None:
if (photolog_version == 'trunk' or
photolog_version.startswith('branches/')):
iversion = photolog_version
else:
iversion = 'tags/'+photolog_version
baseurl = ('https://svn.sdss.org/public/data/sdss/photolog/' +
'{0}/opfiles/opBadfields.par').format(iversion)
filename = download_file(baseurl, cache=True)
else:
filename = os.path.join(os.getenv('PHOTOLOG_DIR'), 'opfiles',
'opBadfields.par')
astrombadfile = yanny(filename)
w = ((astrombadfile['BADFIELDS']['problem'] == 'astrom'.encode()) |
(astrombadfile['BADFIELDS']['problem'] == 'rotator'.encode()))
opbadfields = astrombadfile['BADFIELDS'][w]
#
# opbadfields already has astrom problems selected at this point
#
bad = np.zeros(run.shape, dtype=bool)
for row in opbadfields:
w = ((run == row['run']) &
(field >= row['firstfield']) & (field < row['lastfield']))
if w.any():
bad[w] = True
return bad
def sdss_flagexist(flagname, bitname, flagexist=False, whichexist=False):
"""Check for the existence of flags.
Parameters
----------
flagname : :class:`str`
The name of a bitmask group. Not case-sensitive.
bitname : :class:`str` or :class:`list`
The name(s) of the specific bitmask(s) within the `flagname` group.
flagexist : :class:`bool`, optional
If flagexist is True, return a tuple with the second component
indicating whether the binary flag named `flagname` exists, even
if `bitname` is wrong.
whichexist : :class:`bool`, optional
If whichexist is True, return a list containing existence test results
for each individual flag.
Returns
-------
:class:`bool` or :class:`tuple`
A boolean value or a tuple of bool.
"""
global maskbits
if maskbits is None: # pragma: no cover
maskbits = set_maskbits()
#
# Make sure label is a list
#
if isinstance(bitname, (str,)):
bitnames = [bitname.upper()]
else:
bitnames = [b.upper() for b in bitname]
f = False
l = False
which = [False] * len(bitnames)
if flagname.upper() in maskbits:
f = True
which = [n in maskbits[flagname.upper()] for n in bitnames]
l = sum(which) == len(which)
if flagexist and whichexist:
return (l, f, which)
elif flagexist:
return (l, f)
elif whichexist:
return (l, which)
else:
return l
def sdss_flagname(flagname, flagvalue, concat=False):
"""Return a list of flag names corresponding to the values.
Parameters
----------
flagname : :class:`str`
The name of a bitmask group. Not case-sensitive.
flagvalue : :class:`long`
The value to be converted into bitmask names.
concat : :class:`bool`, optional
If set to ``True``, the list of names is converted to a
space-separated string.
Returns
-------
:class:`str` or :class:`list`
The names of the bitmasks encoded in `flagvalue`.
Raises
------
:exc:`KeyError`
If `flagname` is invalid
Examples
--------
>>> from pydl.pydlutils.sdss import sdss_flagname
>>> sdss_flagname('ANCILLARY_TARGET1',2310346608843161600) # doctest: +REMOTE_DATA
['BRIGHTGAL', 'BLAZGX', 'ELG']
"""
global maskbits
if maskbits is None: # pragma: no cover
maskbits = set_maskbits()
flagu = flagname.upper()
flagvaluint = np.uint64(flagvalue)
one = np.uint64(1)
bits = [bit for bit in range(64)
if (flagvaluint & (one << np.uint64(bit))) != 0]
retval = list()
for bit in bits:
try:
f = [x for x in maskbits[flagu].items() if x[1] == bit]
except KeyError:
raise KeyError("Unknown flag group {0}!".format(flagu))
if f:
retval.append(f[0][0])
if concat:
retval = ' '.join(retval)
return retval
def sdss_flagval(flagname, bitname):
"""Convert bitmask names into values.
Converts human-readable bitmask names into numerical values. The inputs
are not case-sensitive; all inputs are converted to upper case internally.
Parameters
----------
flagname : :class:`str`
The name of a bitmask group.
bitname : :class:`str` or :class:`list`
The name(s) of the specific bitmask(s) within the `flagname` group.
Returns
-------
:class:`numpy.uint64`
The value of the bitmask name(s).
Raises
------
:exc:`KeyError`
If `flagname` or `bitname` are invalid names.
Examples
--------
>>> from pydl.pydlutils.sdss import sdss_flagval
>>> sdss_flagval('ANCILLARY_TARGET1',['BLAZGX','ELG','BRIGHTGAL']) # doctest: +REMOTE_DATA
2310346608843161600
"""
global maskbits
if maskbits is None: # pragma: no cover
maskbits = set_maskbits()
#
# Make sure inlabel is a list
#
if isinstance(bitname, (str,)):
bitnames = [bitname.upper()]
else:
bitnames = [b.upper() for b in bitname]
flagu = flagname.upper()
flagvalue = np.uint64(0)
for bit in bitnames:
if flagu in maskbits:
if bit in maskbits[flagu]:
flagvalue += np.uint64(2)**np.uint64(maskbits[flagu][bit])
else:
raise KeyError("Unknown bit label {0} for flag group {1}!".format(bit, flagu))
else:
raise KeyError("Unknown flag group {0}!".format(flagu))
return flagvalue
def sdss_objid(run, camcol, field, objnum, rerun=301, skyversion=None,
firstfield=None):
"""Convert SDSS photometric identifiers into CAS-style ObjID.
Bits are assigned in ObjID thus:
===== ========== ===============================================
Bits Name Comment
===== ========== ===============================================
63 empty unassigned
59-62 skyVersion resolved sky version (0-15)
48-58 rerun number of pipeline rerun
32-47 run run number
29-31 camcol camera column (1-6)
28 firstField is this the first field in segment? Usually 0.
16-27 field field number within run
0-15 object object number within field
===== ========== ===============================================
Parameters
----------
run, camcol, field, objnum : :class:`int` or array of int
Run, camcol, field and object number within field. If arrays are
passed, all must have the same length.
rerun, skyversion, firstfield : :class:`int` or array of int, optional
`rerun`, `skyversion` and `firstfield` usually don't change at all,
especially for ObjIDs in DR8 and later. If supplied,
make sure the size matches all the other values.
Returns
-------
:class:`numpy.ndarray` of :class:`numpy.int64`
The ObjIDs of the objects.
Raises
------
:exc:`ValueError`
If the sizes of the arrays don't match or if the array values are
out of bounds.
Notes
-----
* The ``firstField`` flag is never set in ObjIDs from DR8 and later.
* On 32-bit systems, makes sure to explicitly declare all inputs as
64-bit integers.
Examples
--------
>>> from pydl.pydlutils.sdss import sdss_objid
>>> print(sdss_objid(3704,3,91,146))
[1237661382772195474]
"""
if skyversion is None:
skyversion = default_skyversion()
if firstfield is None:
firstfield = 0
if isinstance(run, int):
run = np.array([run], dtype=np.int64)
if isinstance(camcol, int):
camcol = np.array([camcol], dtype=np.int64)
if isinstance(field, int):
field = np.array([field], dtype=np.int64)
if isinstance(objnum, int):
objnum = np.array([objnum], dtype=np.int64)
if isinstance(rerun, int):
if rerun == 301:
rerun = np.zeros(run.shape, dtype=np.int64) + 301
else:
rerun = np.array([rerun], dtype=np.int64)
if isinstance(skyversion, int):
if skyversion == default_skyversion():
skyversion = np.zeros(run.shape, dtype=np.int64) + default_skyversion()
else:
skyversion = np.array([skyversion], dtype=np.int64)
if isinstance(firstfield, int):
if firstfield == 0:
firstfield = np.zeros(run.shape, dtype=np.int64)
else:
firstfield = np.array([firstfield], dtype=np.int64)
#
# Check that all inputs have the same shape.
#
if run.shape != camcol.shape:
raise ValueError("camcol.shape does not match run.shape!")
if run.shape != field.shape:
raise ValueError("field.shape does not match run.shape!")
if run.shape != objnum.shape:
raise ValueError("objnum.shape does not match run.shape!")
if run.shape != rerun.shape:
raise ValueError("rerun.shape does not match run.shape!")
if run.shape != skyversion.shape:
raise ValueError("skyversion.shape does not match run.shape!")
if run.shape != firstfield.shape:
raise ValueError("firstfield.shape does not match run.shape!")
#
# Check ranges of parameters
#
if ((firstfield < 0) | (firstfield > 1)).any():
raise ValueError("firstfield values are out-of-bounds!")
if ((skyversion < 0) | (skyversion >= 16)).any():
raise ValueError("skyversion values are out-of-bounds!")
if ((rerun < 0) | (rerun >= 2**11)).any():
raise ValueError("rerun values are out-of-bounds!")
if ((run < 0) | (run >= 2**16)).any():
raise ValueError("run values are out-of-bounds!")
if ((camcol < 1) | (camcol > 6)).any():
raise ValueError("camcol values are out-of-bounds!")
if ((field < 0) | (field >= 2**12)).any():
raise ValueError("camcol values are out-of-bounds!")
if ((objnum < 0) | (objnum >= 2**16)).any():
raise ValueError("id values are out-of-bounds!")
#
# Compute the objid
#
objid = ((skyversion << 59) |
(rerun << 48) |
(run << 32) |
(camcol << 29) |
(firstfield << 28) |
(field << 16) |
(objnum))
return objid
def sdss_specobjid(plate, fiber, mjd, run2d, line=None, index=None):
"""Convert SDSS spectrum identifiers into CAS-style specObjID.
Bits are assigned in specObjID thus:
===== ========== =============================================================
Bits Name Comment
===== ========== =============================================================
50-63 Plate ID 14 bits
38-49 Fiber ID 12 bits
24-37 MJD Date plate was observed minus 50000 (14 bits)
10-23 run2d Spectroscopic reduction version
0-9 line/index 0 for use in SpecObj files see below for other uses (10 bits)
===== ========== =============================================================
Parameters
----------
plate, fiber, mjd : :class:`int` or array of int
Plate, fiber ID, and MJD for a spectrum. If arrays are
passed, all must have the same length. The MJD value must be
greater than 50000.
run2d : :class:`int`, :class:`str` or array of int or str
The run2d value must be an integer or a string of the form 'vN_M_P'.
If an array is passed, it must have the same length as the other
inputs listed above. If the string form is used, the values are
restricted to :math:`5 \le N \le 6`, :math:`0 \le M \le 99`,
:math:`0 \le P \le 99`.
line : :class:`int`, optional
A line index, only used for defining specObjID for SpecLine files.
`line` and `index` cannot both be non-zero.
index : :class:`int`, optional
An index measure, only used for defining specObjID for SpecLineIndex
files. `line` and `index` cannot both be non-zero.
Returns
-------
:class:`numpy.ndarray` of :class:`numpy.uint64`
The specObjIDs of the objects.
Raises
------
:exc:`ValueError`
If the sizes of the arrays don't match or if the array values are
out of bounds.
Notes
-----
* On 32-bit systems, makes sure to explicitly declare all inputs as
64-bit integers.
* This function defines the SDSS-III/IV version of specObjID, used for
SDSS DR8 and subsequent data releases. It is not compatible with
SDSS DR7 or earlier.
* If the string form of `run2d` is used, the bits are assigned by
the formula :math:`(N - 5) \\times 10000 + M \\times 100 + P`.
Examples
--------
>>> from pydl.pydlutils.sdss import sdss_specobjid
>>> print(sdss_specobjid(4055,408,55359,'v5_7_0'))
[4565636362342690816]
"""
if line is not None and index is not None:
raise ValueError("line and index inputs cannot both be non-zero!")
if isinstance(plate, int):
plate = np.array([plate], dtype=np.uint64)
if isinstance(fiber, int):
fiber = np.array([fiber], dtype=np.uint64)
if isinstance(mjd, int):
mjd = np.array([mjd], dtype=np.uint64) - 50000
if isinstance(run2d, str):
try:
run2d = np.array([int(run2d)], dtype=np.uint64)
except ValueError:
# Try a "vN_M_P" string.
m = re.match(r'v(\d+)_(\d+)_(\d+)', run2d)
if m is None:
raise ValueError("Could not extract integer run2d value!")
else:
N, M, P = m.groups()
run2d = np.array([(int(N) - 5)*10000 + int(M) * 100 + int(P)],
dtype=np.uint64)
elif isinstance(run2d, int):
run2d = np.array([run2d], dtype=np.uint64)
if line is None:
line = np.zeros(plate.shape, dtype=plate.dtype)
else:
if isinstance(line, int):
line = np.array([line], dtype=np.uint64)
if index is None:
index = np.zeros(plate.shape, dtype=plate.dtype)
else:
if isinstance(index, int):
index = np.array([index], dtype=np.uint64)
#
# Check that all inputs have the same shape.
#
if plate.shape != fiber.shape:
raise ValueError("fiber.shape does not match plate.shape!")
if plate.shape != mjd.shape:
raise ValueError("mjd.shape does not match plate.shape!")
if plate.shape != run2d.shape:
raise ValueError("run2d.shape does not match plate.shape!")
if plate.shape != line.shape:
raise ValueError("line.shape does not match plate.shape!")
if plate.shape != index.shape:
raise ValueError("index.shape does not match plate.shape!")
#
# Check ranges of parameters
#
if ((plate < 0) | (plate >= 2**14)).any():
raise ValueError("plate values are out-of-bounds!")
if ((fiber < 0) | (fiber >= 2**12)).any():
raise ValueError("fiber values are out-of-bounds!")
if ((mjd < 0) | (mjd >= 2**14)).any():
raise ValueError("MJD values are out-of-bounds!")
if ((run2d < 0) | (run2d >= 2**14)).any():
raise ValueError("MJD values are out-of-bounds!")
if ((line < 0) | (line >= 2**10)).any():
raise ValueError("line values are out-of-bounds!")
if ((index < 0) | (index >= 2**10)).any():
raise ValueError("index values are out-of-bounds!")
#
# Compute the specObjID
#
specObjID = ((plate << 50) |
(fiber << 38) |
(mjd << 24) |
(run2d << 10) |
(line | index))
return specObjID
def sdss_sweep_circle(ra, dec, radius, stype='star', allobj=False):
"""Read the SDSS datasweep files and return objects around a location.
Parameters
----------
ra, dec : :class:`float`
The sky location to search, J2000 degrees.
radius : :class:`float`
The radius around `ra`, `dec` to search.
stype : :class:`str`, optional
The type of object to search, 'star', 'gal' or 'sky'.
The default is 'star'.
allobj : :class:`bool`, optional
If set to ``True``, return all objects found, not just SURVEY_PRIMARY.
Returns
-------
:class:`numpy.ndarray`
The data extracted from the sweep files.
Raises
------
:exc:`PydlutilsException`
If :envvar:`PHOTO_SWEEP` is not set.
Notes
-----
Assumes that the sweep files exist in :envvar:`PHOTO_SWEEP` and
that index files have been created.
"""
global sweep_cache
#
# Check values
#
if stype not in ('star', 'gal', 'sky'):
raise ValueError('Invalid type {0}!'.format(stype))
sweepdir = os.getenv('PHOTO_SWEEP')
if sweepdir is None:
raise PydlutilsException('PHOTO_SWEEP is not set!')
#
# Read the index
#
if sweep_cache[stype] is None:
indexfile = os.path.join(sweepdir, 'datasweep-index-{0}.fits'.format(stype))
with fits.open(indexfile) as f:
sweep_cache[stype] = f[1].data
index = sweep_cache[stype]
#
# Match
#
ira = np.array([ra])
idec = np.array([dec])
m1, m2, d12 = spherematch(index['RA'], index['DEC'], ira, idec,
radius+0.36, maxmatch=0)
if len(m1) == 0:
return None
if not allobj:
w = index['NPRIMARY'][m1] > 0
if w.any():
m1 = m1[w]
else:
return None
#
# Maximum number of objects
#
if allobj:
n = index['IEND'][m1] - index['ISTART'][m1] + 1
ntot = (where(n > 0, n, np.zeros(n.shape, dtype=n.dtype))).sum()
else:
ntot = index['NPRIMARY'][m1].sum()
#
# Find unique run + camcol
#
rc = index['RUN'][m1]*6 + index['CAMCOL'][m1] - 1
isort = rc.argsort()
iuniq = uniq(rc[isort])
istart = 0
objs = None
nobjs = 0
for i in range(len(iuniq)):
iend = iuniq[i]
icurr = isort[istart:iend]
#
# Determine which file and range of rows
#
run = index['RUN'][m1[icurr[0]]]
camcol = index['CAMCOL'][m1[icurr[0]]]
rerun = index['RERUN'][m1[icurr[0]]]
fields = index[m1[icurr]]
ist = fields['ISTART'].min()
ind = fields['IEND'].max()
if ind >= ist:
#
# Read in the rows of that file
#
swfile = os.path.join(os.getenv('PHOTO_SWEEP'), rerun,
'calibObj-{0:06d}-{1:1d}-{2}.fits.gz'.format(
int(run), int(camcol), stype))
with fits.open(swfile) as f:
tmp_objs = f[1].data[ist:ind]
if tmp_objs.size > 0:
#
# Keep only objects within the desired radius
#
tm1, tm2, d12 = spherematch(tmp_objs['RA'], tmp_objs['DEC'],
ira, idec, radius, maxmatch=0)
if len(tm1) > 0:
tmp_objs = tmp_objs[tm1]
#
# Keep only SURVEY_PRIMARY objects by default
#
if not allobj:
w = ((tmp_objs['RESOLVE_STATUS'] &
sdss_flagval('RESOLVE_STATUS',
'SURVEY_PRIMARY')) > 0)
if w.any():
tmp_objs = tmp_objs[w]
else:
tmp_objs = None
if tmp_objs is not None:
if objs is None:
objs = np.zeros(ntot, dtype=tmp_objs.dtype)
objs[nobjs:nobjs+tmp_objs.size] = tmp_objs
nobjs += tmp_objs.size
istart = iend+1
if nobjs > 0:
return objs[0:nobjs]
else:
return None
def set_maskbits(idlutils_version='v5_5_24', maskbits_file=None):
"""Populate the maskbits cache.
Parameters
----------
idlutils_version : :class:`str`, optional
Fetch the sdssMaskbits.par file corresponding to this idlutils version.
maskbits_file : :class:`str`, optional
Use an explicit file instead of downloading the official version.
This should only be used for tests.
Returns
-------
:class:`dict`
A dictionary of bitmasks suitable for caching.
Raises
------
:exc:`URLError`
If the data file could not be retrieved.
"""
from astropy.utils.data import download_file
from .yanny import yanny
if maskbits_file is None: # pragma: no cover
if (idlutils_version == 'trunk' or
idlutils_version.startswith('branches/')):
iversion = idlutils_version
else:
iversion = 'tags/'+idlutils_version
baseurl = ('https://svn.sdss.org/public/repo/sdss/idlutils/' +
'{0}/data/sdss/sdssMaskbits.par').format(iversion)
filename = download_file(baseurl, cache=True, show_progress=False)
else:
filename = maskbits_file
maskfile = yanny(filename, raw=True)
#
# Parse the file & cache the results in maskbits
#
maskbits = dict()
for k in range(maskfile.size('MASKBITS')):
if maskfile['MASKBITS']['flag'][k] in maskbits:
maskbits[maskfile['MASKBITS']['flag'][k]][maskfile['MASKBITS']['label'][k]] = maskfile['MASKBITS']['bit'][k]
else:
maskbits[maskfile['MASKBITS']['flag'][k]] = {maskfile['MASKBITS']['label'][k]: maskfile['MASKBITS']['bit'][k]}
if 'MASKALIAS' in maskfile:
for k in range(maskfile.size('MASKALIAS')):
maskbits[maskfile['MASKALIAS']['alias'][k]] = maskbits[maskfile['MASKALIAS']['flag'][k]].copy()
return maskbits
def unwrap_specobjid(specObjID, run2d_integer=False, specLineIndex=False):
"""Unwrap CAS-style specObjID into plate, fiber, mjd, run2d.
See :func:`~pydl.pydlutils.sdss.sdss_specobjid` for details on how the
bits within a specObjID are assigned.
Parameters
----------
specObjID : :class:`numpy.ndarray`
An array containing 64-bit integers or strings. If strings are passed,
they will be converted to integers internally.
run2d_integer : :class:`bool`, optional
If ``True``, do *not* attempt to convert the encoded run2d values
to a string of the form 'vN_M_P'.
specLineIndex : :class:`bool`, optional
If ``True`` interpret any low-order bits as being an 'index'
rather than a 'line'.
Returns
-------
:class:`numpy.recarray`
A record array with the same length as `specObjID`, with the columns
'plate', 'fiber', 'mjd', 'run2d', 'line'.
Examples
--------
>>> from numpy import array, uint64
>>> from pydl.pydlutils.sdss import unwrap_specobjid
>>> unwrap_specobjid(array([4565636362342690816], dtype=uint64))
rec.array([(4055, 408, 55359, 'v5_7_0', 0)],
dtype=[('plate', '<i4'), ('fiber', '<i4'), ('mjd', '<i4'), ('run2d', '<U8'), ('line', '<i4')])
"""
if (specObjID.dtype.type is np.string_ or
specObjID.dtype.type is np.unicode_):
tempobjid = specObjID.astype(np.uint64)
elif specObjID.dtype.type is np.uint64:
tempobjid = specObjID.copy()
else:
raise ValueError('Unrecognized type for specObjID!')
run2d_dtype = 'U8'
if run2d_integer:
run2d_dtype = 'i4'
line = 'line'
if specLineIndex:
line = 'index'
unwrap = np.recarray(specObjID.shape,
dtype=[('plate', 'i4'), ('fiber', 'i4'),
('mjd', 'i4'), ('run2d', run2d_dtype),
(line, 'i4')])
unwrap.plate = np.bitwise_and(tempobjid >> 50, 2**14 - 1)
unwrap.fiber = np.bitwise_and(tempobjid >> 38, 2**12 - 1)
unwrap.mjd = np.bitwise_and(tempobjid >> 24, 2**14 - 1) + 50000
run2d = np.bitwise_and(tempobjid >> 10, 2**14 - 1)
if run2d_integer:
unwrap.run2d = run2d
else:
N = ((run2d // 10000) + 5).tolist()
M = ((run2d % 10000) // 100).tolist()
P = (run2d % 100).tolist()
unwrap.run2d = ['v{0:d}_{1:d}_{2:d}'.format(n, m, p)
for n, m, p in zip(N, M, P)]
unwrap[line] = np.bitwise_and(tempobjid, 2**10 - 1)
return unwrap
| [
"numpy.uint64",
"numpy.zeros",
"re.match",
"numpy.recarray",
"numpy.array",
"numpy.bitwise_and",
"astropy.io.fits.open",
"astropy.utils.data.download_file",
"os.getenv"
] | [((3901, 3932), 'numpy.zeros', 'np.zeros', (['run.shape'], {'dtype': 'bool'}), '(run.shape, dtype=bool)\n', (3909, 3932), True, 'import numpy as np\n'), ((6637, 6657), 'numpy.uint64', 'np.uint64', (['flagvalue'], {}), '(flagvalue)\n', (6646, 6657), True, 'import numpy as np\n'), ((6668, 6680), 'numpy.uint64', 'np.uint64', (['(1)'], {}), '(1)\n', (6677, 6680), True, 'import numpy as np\n'), ((8282, 8294), 'numpy.uint64', 'np.uint64', (['(0)'], {}), '(0)\n', (8291, 8294), True, 'import numpy as np\n'), ((19766, 19790), 'os.getenv', 'os.getenv', (['"""PHOTO_SWEEP"""'], {}), "('PHOTO_SWEEP')\n", (19775, 19790), False, 'import os\n'), ((20177, 20191), 'numpy.array', 'np.array', (['[ra]'], {}), '([ra])\n', (20185, 20191), True, 'import numpy as np\n'), ((20203, 20218), 'numpy.array', 'np.array', (['[dec]'], {}), '([dec])\n', (20211, 20218), True, 'import numpy as np\n'), ((26623, 26751), 'numpy.recarray', 'np.recarray', (['specObjID.shape'], {'dtype': "[('plate', 'i4'), ('fiber', 'i4'), ('mjd', 'i4'), ('run2d', run2d_dtype), (\n line, 'i4')]"}), "(specObjID.shape, dtype=[('plate', 'i4'), ('fiber', 'i4'), (\n 'mjd', 'i4'), ('run2d', run2d_dtype), (line, 'i4')])\n", (26634, 26751), True, 'import numpy as np\n'), ((26855, 26899), 'numpy.bitwise_and', 'np.bitwise_and', (['(tempobjid >> 50)', '(2 ** 14 - 1)'], {}), '(tempobjid >> 50, 2 ** 14 - 1)\n', (26869, 26899), True, 'import numpy as np\n'), ((26917, 26961), 'numpy.bitwise_and', 'np.bitwise_and', (['(tempobjid >> 38)', '(2 ** 12 - 1)'], {}), '(tempobjid >> 38, 2 ** 12 - 1)\n', (26931, 26961), True, 'import numpy as np\n'), ((27040, 27084), 'numpy.bitwise_and', 'np.bitwise_and', (['(tempobjid >> 10)', '(2 ** 14 - 1)'], {}), '(tempobjid >> 10, 2 ** 14 - 1)\n', (27054, 27084), True, 'import numpy as np\n'), ((27402, 27440), 'numpy.bitwise_and', 'np.bitwise_and', (['tempobjid', '(2 ** 10 - 1)'], {}), '(tempobjid, 2 ** 10 - 1)\n', (27416, 27440), True, 'import numpy as np\n'), ((2085, 2116), 'numpy.array', 'np.array', (['[run]'], {'dtype': 'np.int64'}), '([run], dtype=np.int64)\n', (2093, 2116), True, 'import numpy as np\n'), ((2134, 2168), 'numpy.array', 'np.array', (['[camcol]'], {'dtype': 'np.int64'}), '([camcol], dtype=np.int64)\n', (2142, 2168), True, 'import numpy as np\n'), ((2185, 2218), 'numpy.array', 'np.array', (['[field]'], {'dtype': 'np.int64'}), '([field], dtype=np.int64)\n', (2193, 2218), True, 'import numpy as np\n'), ((10729, 10760), 'numpy.array', 'np.array', (['[run]'], {'dtype': 'np.int64'}), '([run], dtype=np.int64)\n', (10737, 10760), True, 'import numpy as np\n'), ((10810, 10844), 'numpy.array', 'np.array', (['[camcol]'], {'dtype': 'np.int64'}), '([camcol], dtype=np.int64)\n', (10818, 10844), True, 'import numpy as np\n'), ((10892, 10925), 'numpy.array', 'np.array', (['[field]'], {'dtype': 'np.int64'}), '([field], dtype=np.int64)\n', (10900, 10925), True, 'import numpy as np\n'), ((10975, 11009), 'numpy.array', 'np.array', (['[objnum]'], {'dtype': 'np.int64'}), '([objnum], dtype=np.int64)\n', (10983, 11009), True, 'import numpy as np\n'), ((16072, 16106), 'numpy.array', 'np.array', (['[plate]'], {'dtype': 'np.uint64'}), '([plate], dtype=np.uint64)\n', (16080, 16106), True, 'import numpy as np\n'), ((16154, 16188), 'numpy.array', 'np.array', (['[fiber]'], {'dtype': 'np.uint64'}), '([fiber], dtype=np.uint64)\n', (16162, 16188), True, 'import numpy as np\n'), ((16893, 16933), 'numpy.zeros', 'np.zeros', (['plate.shape'], {'dtype': 'plate.dtype'}), '(plate.shape, dtype=plate.dtype)\n', (16901, 16933), True, 'import numpy as np\n'), ((17069, 17109), 'numpy.zeros', 'np.zeros', (['plate.shape'], {'dtype': 'plate.dtype'}), '(plate.shape, dtype=plate.dtype)\n', (17077, 17109), True, 'import numpy as np\n'), ((24075, 24130), 'astropy.utils.data.download_file', 'download_file', (['baseurl'], {'cache': '(True)', 'show_progress': '(False)'}), '(baseurl, cache=True, show_progress=False)\n', (24088, 24130), False, 'from astropy.utils.data import download_file\n'), ((26977, 27021), 'numpy.bitwise_and', 'np.bitwise_and', (['(tempobjid >> 24)', '(2 ** 14 - 1)'], {}), '(tempobjid >> 24, 2 ** 14 - 1)\n', (26991, 27021), True, 'import numpy as np\n'), ((2968, 2993), 'os.getenv', 'os.getenv', (['"""PHOTOLOG_DIR"""'], {}), "('PHOTOLOG_DIR')\n", (2977, 2993), False, 'import os\n'), ((3397, 3431), 'astropy.utils.data.download_file', 'download_file', (['baseurl'], {'cache': '(True)'}), '(baseurl, cache=True)\n', (3410, 3431), False, 'from astropy.utils.data import download_file\n'), ((11162, 11195), 'numpy.array', 'np.array', (['[rerun]'], {'dtype': 'np.int64'}), '([rerun], dtype=np.int64)\n', (11170, 11195), True, 'import numpy as np\n'), ((11402, 11440), 'numpy.array', 'np.array', (['[skyversion]'], {'dtype': 'np.int64'}), '([skyversion], dtype=np.int64)\n', (11410, 11440), True, 'import numpy as np\n'), ((11530, 11565), 'numpy.zeros', 'np.zeros', (['run.shape'], {'dtype': 'np.int64'}), '(run.shape, dtype=np.int64)\n', (11538, 11565), True, 'import numpy as np\n'), ((11605, 11643), 'numpy.array', 'np.array', (['[firstfield]'], {'dtype': 'np.int64'}), '([firstfield], dtype=np.int64)\n', (11613, 11643), True, 'import numpy as np\n'), ((16232, 16264), 'numpy.array', 'np.array', (['[mjd]'], {'dtype': 'np.uint64'}), '([mjd], dtype=np.uint64)\n', (16240, 16264), True, 'import numpy as np\n'), ((16822, 16856), 'numpy.array', 'np.array', (['[run2d]'], {'dtype': 'np.uint64'}), '([run2d], dtype=np.uint64)\n', (16830, 16856), True, 'import numpy as np\n'), ((16997, 17030), 'numpy.array', 'np.array', (['[line]'], {'dtype': 'np.uint64'}), '([line], dtype=np.uint64)\n', (17005, 17030), True, 'import numpy as np\n'), ((17175, 17209), 'numpy.array', 'np.array', (['[index]'], {'dtype': 'np.uint64'}), '([index], dtype=np.uint64)\n', (17183, 17209), True, 'import numpy as np\n'), ((20042, 20062), 'astropy.io.fits.open', 'fits.open', (['indexfile'], {}), '(indexfile)\n', (20051, 20062), False, 'from astropy.io import fits\n'), ((3482, 3507), 'os.getenv', 'os.getenv', (['"""PHOTOLOG_DIR"""'], {}), "('PHOTOLOG_DIR')\n", (3491, 3507), False, 'import os\n'), ((11086, 11121), 'numpy.zeros', 'np.zeros', (['run.shape'], {'dtype': 'np.int64'}), '(run.shape, dtype=np.int64)\n', (11094, 11121), True, 'import numpy as np\n'), ((11304, 11339), 'numpy.zeros', 'np.zeros', (['run.shape'], {'dtype': 'np.int64'}), '(run.shape, dtype=np.int64)\n', (11312, 11339), True, 'import numpy as np\n'), ((16457, 16497), 're.match', 're.match', (['"""v(\\\\d+)_(\\\\d+)_(\\\\d+)"""', 'run2d'], {}), "('v(\\\\d+)_(\\\\d+)_(\\\\d+)', run2d)\n", (16465, 16497), False, 'import re\n'), ((21487, 21511), 'os.getenv', 'os.getenv', (['"""PHOTO_SWEEP"""'], {}), "('PHOTO_SWEEP')\n", (21496, 21511), False, 'import os\n'), ((21670, 21687), 'astropy.io.fits.open', 'fits.open', (['swfile'], {}), '(swfile)\n', (21679, 21687), False, 'from astropy.io import fits\n'), ((8418, 8430), 'numpy.uint64', 'np.uint64', (['(2)'], {}), '(2)\n', (8427, 8430), True, 'import numpy as np\n'), ((8432, 8463), 'numpy.uint64', 'np.uint64', (['maskbits[flagu][bit]'], {}), '(maskbits[flagu][bit])\n', (8441, 8463), True, 'import numpy as np\n'), ((20670, 20702), 'numpy.zeros', 'np.zeros', (['n.shape'], {'dtype': 'n.dtype'}), '(n.shape, dtype=n.dtype)\n', (20678, 20702), True, 'import numpy as np\n'), ((6756, 6770), 'numpy.uint64', 'np.uint64', (['bit'], {}), '(bit)\n', (6765, 6770), True, 'import numpy as np\n'), ((22711, 22747), 'numpy.zeros', 'np.zeros', (['ntot'], {'dtype': 'tmp_objs.dtype'}), '(ntot, dtype=tmp_objs.dtype)\n', (22719, 22747), True, 'import numpy as np\n')] |
from __future__ import print_function
from __future__ import division
import time
import sys
import numpy as np
from numpy import *
from scipy.ndimage.filters import gaussian_filter1d
import config
class ExpFilter:
"""Temporal exponential smoothing filter
"""
def __init__(self, val=0.0, alpha_decay=0.5, alpha_rise=0.5):
"""Small rise / decay factors = more smoothing"""
self.alpha_decay = alpha_decay
self.alpha_rise = alpha_rise
self.value = val
def update(self, value):
if isinstance(self.value, (list, np.ndarray, tuple)):
alpha = value - self.value
alpha[alpha > 0.0] = self.alpha_rise
alpha[alpha <= 0.0] = self.alpha_decay
else:
alpha = self.alpha_rise if value > self.value else self.alpha_decay
self.value = alpha * value + (1.0 - alpha) * self.value
def getNotesToKeyMatrix(noteList, weights):
matrix = np.zeros([12, len(noteList)])
for i in range(12):
for note in noteList:
scaleDegree = ((note-i%12)%12)-1
matrix[i,note-noteList[0]] = weights[scaleDegree]
return matrix
class Key:
def __init__(self, noteList,
# 1 2 3 4 5 6 7
weights=[3.,-1.,1.,-1.,1.,2.,-5.,3.,-1.,2.,-5.,1.],
alpha=0.00025):
self.keySums = ExpFilter(np.ones(12), alpha_rise=alpha, alpha_decay=alpha)
self.matrix = getNotesToKeyMatrix(noteList, weights)
self.keyStringList = ['c ', 'cs ', 'd ', 'ef ',
'e ', 'f ', 'fs ', 'g ',
'af ', 'a ', 'bf ', 'b ' ]
self.currentKeyNum = 0
def update(self, newNoteSpectrum):
newKeySums = np.dot(self.matrix, newNoteSpectrum)
self.keySums.update(newKeySums)
self.currentKeyNum = np.argmax(self.keySums.value)
def printKey(self):
sortedValues = np.sort(self.keySums.value)
sortedNames = list(self.keyStringList[i] for i in np.argsort(self.keySums.value))
surety = np.round(100 * (sortedValues[-1]/sortedValues[-2] - 1.),1)
print("most likely key is: " + self.keyStringList[self.currentKeyNum] + " " + str(surety) + "%")
print(np.fliplr([sortedNames])[0][0:7])
print(np.round(np.fliplr([sortedValues])[0],0)[0:7])
class NoteSums:
def __init__(self, noteList, alpha=0.00025):
self.noteSums = ExpFilter(np.ones(12), alpha_rise=alpha, alpha_decay=alpha)
self.matrix = getNotesToKeyMatrix(noteList, [1.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.])
self.newNoteSums = np.zeros(12)
self.noteStringList = ['c ', 'cs ', 'd ', 'ef ',
'e ', 'f ', 'fs ', 'g ',
'af ', 'a ', 'bf ', 'b ' ]
def update(self, newNoteSpectrum):
self.newNoteSums = np.dot(self.matrix, newNoteSpectrum)
self.noteSums.update(self.newNoteSums)
def printNoteSums(self):
print("most used notes are: ")
sortedValues = np.sort(self.noteSums.value)
sortedNames = list(self.noteStringList[i] for i in np.argsort(self.noteSums.value))
print(np.fliplr([sortedNames])[0][0:7])
print(np.round(np.fliplr([sortedValues])[0],0)[0:7])
class Chord:
def __init__(self, noteList):
# define the 7 x notes matrix for each of 12 possible keys.
# c d e f g a b
chordRefMatrix = np.array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] for i in range(6)])
weights = np.array([[1 ,0 ,0 ,0 ,1 ,0 ,0, 1 ,0 ,0 ,0 , 0 ],
[0 ,0 ,1 ,0 ,0 ,1 ,0, 0 ,0 ,1 ,0 , 0 ],
[0 ,0 ,0 ,0 ,1 ,0 ,0, 1 ,0 ,0 ,0 , 1 ],
[1 ,0 ,0 ,0 ,0 ,1 ,0, 0 ,0 ,1 ,0 , 0 ],
[0 ,0 ,1 ,0 ,0 ,0 ,0, 1 ,0 ,0 ,0 , 1 ],
[1 ,0 ,0 ,0 ,1 ,0 ,0, 0 ,0 ,1 ,0 , 0 ]])
self.chordMatrixList = []
for i in range(12):
self.chordMatrixList.append(np.zeros([7,len(noteList)]))
for keyNum in range(12):
for chordNum in range(6):
for note in noteList:
scaleDegree = (note-keyNum%12)%12 -1
if scaleDegree in chordRefMatrix[chordNum]:
arg = np.argmin(np.abs(chordRefMatrix[chordNum]-scaleDegree))
self.chordMatrixList[keyNum][chordNum, note-noteList[0]] = weights[chordNum, arg]
else:
self.chordMatrixList[keyNum][chordNum, note-noteList[0]] = 0.0
self.chordSums = ExpFilter(np.zeros(7), alpha_rise=0.01, alpha_decay=0.01)
self.chordStringList = ['I', 'ii', 'iii', 'IV', 'V', 'vi', 'vii']
self.currentChordNum = 0
def update(self, newNoteSpectrum, currentKeyNum):
newChordSums = np.dot(self.chordMatrixList[currentKeyNum], newNoteSpectrum)
self.chordSums.update(newChordSums)
self.currentChordNum = np.argmax(self.chordSums.value)
def printChord(self):
print("most likely chord is " + self.chordStringList[self.currentChordNum])
print(np.round(self.chordSums.value,0))
class Beat:
def __init__(self, freqs, freqMin=2, freqMax=60):
self.matrix = np.zeros_like(freqs)
for i in range(len(freqs)):
if freqMin < freqs[i] < freqMax:
self.matrix[i] = 1.0
else:
self.matrix[i] = 0.0
self.bassPower = np.zeros(5)
def update(self, freqSpectrum):
self.bassPower = np.roll(self.bassPower, -1)
self.bassPower[4] = np.dot(self.matrix, freqSpectrum)
def beatRightNow(self):
if (self.bassPower[2]*1.2 < self.bassPower[3] and
self.bassPower[3]*1.2 < self.bassPower[4]):
return True
else:
return False
| [
"numpy.zeros_like",
"numpy.abs",
"numpy.argmax",
"numpy.roll",
"numpy.zeros",
"numpy.ones",
"numpy.argsort",
"numpy.sort",
"numpy.fliplr",
"numpy.array",
"numpy.dot",
"numpy.round"
] | [((1811, 1847), 'numpy.dot', 'np.dot', (['self.matrix', 'newNoteSpectrum'], {}), '(self.matrix, newNoteSpectrum)\n', (1817, 1847), True, 'import numpy as np\n'), ((1917, 1946), 'numpy.argmax', 'np.argmax', (['self.keySums.value'], {}), '(self.keySums.value)\n', (1926, 1946), True, 'import numpy as np\n'), ((1994, 2021), 'numpy.sort', 'np.sort', (['self.keySums.value'], {}), '(self.keySums.value)\n', (2001, 2021), True, 'import numpy as np\n'), ((2129, 2191), 'numpy.round', 'np.round', (['(100 * (sortedValues[-1] / sortedValues[-2] - 1.0))', '(1)'], {}), '(100 * (sortedValues[-1] / sortedValues[-2] - 1.0), 1)\n', (2137, 2191), True, 'import numpy as np\n'), ((2687, 2699), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (2695, 2699), True, 'import numpy as np\n'), ((2942, 2978), 'numpy.dot', 'np.dot', (['self.matrix', 'newNoteSpectrum'], {}), '(self.matrix, newNoteSpectrum)\n', (2948, 2978), True, 'import numpy as np\n'), ((3117, 3145), 'numpy.sort', 'np.sort', (['self.noteSums.value'], {}), '(self.noteSums.value)\n', (3124, 3145), True, 'import numpy as np\n'), ((3676, 3926), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1], [1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0,\n 0], [0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0, 0, 0, 0, 1,\n 0, 0]]'], {}), '([[1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1, 0, 0, 0,\n 1, 0, 0], [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1], [1, 0, 0, 0, 0, 1, 0, 0,\n 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0, 0,\n 0, 0, 1, 0, 0]])\n', (3684, 3926), True, 'import numpy as np\n'), ((5041, 5101), 'numpy.dot', 'np.dot', (['self.chordMatrixList[currentKeyNum]', 'newNoteSpectrum'], {}), '(self.chordMatrixList[currentKeyNum], newNoteSpectrum)\n', (5047, 5101), True, 'import numpy as np\n'), ((5177, 5208), 'numpy.argmax', 'np.argmax', (['self.chordSums.value'], {}), '(self.chordSums.value)\n', (5186, 5208), True, 'import numpy as np\n'), ((5469, 5489), 'numpy.zeros_like', 'np.zeros_like', (['freqs'], {}), '(freqs)\n', (5482, 5489), True, 'import numpy as np\n'), ((5707, 5718), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (5715, 5718), True, 'import numpy as np\n'), ((5780, 5807), 'numpy.roll', 'np.roll', (['self.bassPower', '(-1)'], {}), '(self.bassPower, -1)\n', (5787, 5807), True, 'import numpy as np\n'), ((5836, 5869), 'numpy.dot', 'np.dot', (['self.matrix', 'freqSpectrum'], {}), '(self.matrix, freqSpectrum)\n', (5842, 5869), True, 'import numpy as np\n'), ((1434, 1445), 'numpy.ones', 'np.ones', (['(12)'], {}), '(12)\n', (1441, 1445), True, 'import numpy as np\n'), ((2519, 2530), 'numpy.ones', 'np.ones', (['(12)'], {}), '(12)\n', (2526, 2530), True, 'import numpy as np\n'), ((4809, 4820), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (4817, 4820), True, 'import numpy as np\n'), ((5333, 5366), 'numpy.round', 'np.round', (['self.chordSums.value', '(0)'], {}), '(self.chordSums.value, 0)\n', (5341, 5366), True, 'import numpy as np\n'), ((2080, 2110), 'numpy.argsort', 'np.argsort', (['self.keySums.value'], {}), '(self.keySums.value)\n', (2090, 2110), True, 'import numpy as np\n'), ((2307, 2331), 'numpy.fliplr', 'np.fliplr', (['[sortedNames]'], {}), '([sortedNames])\n', (2316, 2331), True, 'import numpy as np\n'), ((3205, 3236), 'numpy.argsort', 'np.argsort', (['self.noteSums.value'], {}), '(self.noteSums.value)\n', (3215, 3236), True, 'import numpy as np\n'), ((3252, 3276), 'numpy.fliplr', 'np.fliplr', (['[sortedNames]'], {}), '([sortedNames])\n', (3261, 3276), True, 'import numpy as np\n'), ((2364, 2389), 'numpy.fliplr', 'np.fliplr', (['[sortedValues]'], {}), '([sortedValues])\n', (2373, 2389), True, 'import numpy as np\n'), ((3309, 3334), 'numpy.fliplr', 'np.fliplr', (['[sortedValues]'], {}), '([sortedValues])\n', (3318, 3334), True, 'import numpy as np\n'), ((4509, 4555), 'numpy.abs', 'np.abs', (['(chordRefMatrix[chordNum] - scaleDegree)'], {}), '(chordRefMatrix[chordNum] - scaleDegree)\n', (4515, 4555), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import networkx as nx
import re
import math
from collections import Counter, defaultdict
from matplotlib.font_manager import FontProperties
from matplotlib import gridspec
from scipy.optimize import minimize
# from .bundle import PathBundle
def checkEqual2(iterator):
return len(set(iterator)) <= 1
def findbestxy(N):
if N % 2 != 0:
N += 1
temp = int(N ** 0.5)
while N % temp != 0:
temp -= 1
return (temp, N // temp)
def convertPath2Edge(pathlist):
tuplist = []
for i in range(len(pathlist) - 1):
tuplist.append((pathlist[i], pathlist[i + 1]))
return tuplist
def convertLocation2xy(location):
if 'SUR' in location:
r = 0.5
elif 'LEO' in location:
r = 1.
elif 'MEO' in location:
r = 1.5
elif "GEO" in location:
r = 2
else:
r = 2.35
sect = int(re.search(r'.+(\d)', location).group(1))
tetha = +math.pi / 3 - (sect - 1) * math.pi / 3
x, y = (r * math.cos(tetha), r * math.sin(tetha))
# print location, x, y
return (x, y)
def convertPath2StaticPath(path):
temppath = [e[:-2] for e in path.nodelist]
ends = [e[-1] for e in path.nodelist]
seen = set([])
seen_add = seen.add
staticpath = [e for e in temppath if not (e in seen or seen_add(e))]
# print "convert path 2 static path:", path, staticpath
deltatime = path.deltatime
assert len(set(ends[deltatime:])) == 1
return (staticpath, deltatime)
def fillBetween3Points(a, b, c):
sortedpoints = sorted([a,b,c])
# print sortedpoints
z = zip(a,b,c)
x1 = np.linspace(sortedpoints[0][0], sortedpoints[1][0], num=2)
x2 = np.linspace(sortedpoints[1][0], sortedpoints[2][0], num=2)
# print x1, x2
y1 = (sortedpoints[0][1]-sortedpoints[1][1])*(x1-sortedpoints[0][0])/float(sortedpoints[0][0]-sortedpoints[1][0]) + sortedpoints[0][1] if sortedpoints[0][0]-sortedpoints[1][0] != 0 else None
y2 = (sortedpoints[0][1]-sortedpoints[2][1])*(x1-sortedpoints[0][0])/float(sortedpoints[0][0]-sortedpoints[2][0]) + sortedpoints[0][1] if sortedpoints[0][0]-sortedpoints[2][0] != 0 else None
y3 = (sortedpoints[0][1]-sortedpoints[2][1])*(x2-sortedpoints[2][0])/float(sortedpoints[0][0]-sortedpoints[2][0]) + sortedpoints[2][1] if sortedpoints[0][0]-sortedpoints[2][0] != 0 else None
y4 = (sortedpoints[1][1]-sortedpoints[2][1])*(x2-sortedpoints[2][0])/float(sortedpoints[1][0]-sortedpoints[2][0]) + sortedpoints[2][1] if sortedpoints[1][0]-sortedpoints[2][0] != 0 else None
# plt.fill_betweenx(X, Y, interpolate=True)
col = 'yellow'
if y1 is None:
# plt.plot(x2, y3, 'g')
# plt.plot(x2, y4, 'g')
plt.fill_between(x2, y3, y4, color = col)
elif y4 is None:
# plt.plot(x1, y1, 'g')
# plt.plot(x1, y2, 'g')
plt.fill_between(x1, y1, y2, color= col)
elif y2 is None or y3 is None:
print("there is error with points")
else:
# plt.plot(x1, y1, 'g')
# plt.plot(x1, y2, 'g')
# plt.plot(x2, y3, 'g')
# plt.plot(x2, y4, 'g')
plt.fill_between(x1, y1, y2, color= col)
plt.fill_between(x2, y3, y4, color= col)
def drawGraph(graph, context):
G = graph.graphList[graph.graphOrder]
if not plt.fignum_exists(1):
plt.figure(1)
plt.ion()
plt.show()
plt.clf()
nodes = [e.name for e in graph.elements]
nameselementdict = {x: y for (x, y) in zip(nodes, graph.elements)}
# print "nodes:", nodes
satellites = [n for n in nodes if 'GS' not in n]
# alltuples = set([])
# for s in satellites:
# path = graph.findcheapestpath(s)
# pathedges = convertPath2Edge(path)
# # print "graphorder & source & path:", s, pathedges
# alltuples = alltuples.union(pathedges)
alltuples = set([])
print("Number of saved tasks:", [len(element.savedTasks) for element in graph.elements])
currenttasks = [e for l in [element.savedTasks for element in graph.elements] for e in l]
assert len(set(currenttasks)) == len(currenttasks)
activetasks = [t for t in currenttasks if t.activationTime == context.time]
for actives in activetasks:
path = [a.name for a in actives.pathlist]
pathedges = convertPath2Edge(path)
alltuples = alltuples.union(pathedges)
# for s in satellites:
# path = graph.findcheapestpath(s)
# pathedges = convertPath2Edge(path)
# # print "graphorder & source & path:", s, pathedges
# alltuples = alltuples.union(pathedges)
recenttasks = [t.taskid for t in currenttasks if t.initTime == context.time-1]
# print "recent tasks:", recenttasks
elementswithrecenttasks = [e for e in graph.elements if set([t.taskid for t in e.savedTasks]).intersection(recenttasks)]
# print "elementlist with recent tasks:", elementswithrecenttasks
section2pointsdict = {1: [(0, 1), (0.866, 0.5)], 2: [(0.866, 0.5), (0.866, -0.5)], 3: [(0.866, -0.5), (0, -1)], 4: [(0, -1), (-0.866, -0.5)], 5: [(-0.866, -0.5), (-0.866, 0.5)], 6: [(-0.866, 0.5), (0, 1)]}
nodeLocations = [e.getLocation() for e in graph.elements]
pos = {e.name: convertLocation2xy(nodeLocations[i]) for i, e in enumerate(graph.elements)}
positionsection = [[pos[e.name]]+section2pointsdict[e.section] for e in elementswithrecenttasks]
# print "position and section :", positionsection
sec = {e.name: nodeLocations[i] for i, e in enumerate(graph.elements)}
labels = {n: n[0] + n[-3:] for n in nodes}
labelpos = {n: [v[0], v[1] + 0.3] for n, v in pos.items()}
x = np.linspace(-1.0, 1.0, 50)
y = np.linspace(-1.0, 1.0, 50)
X, Y = np.meshgrid(x, y)
F = X ** 2 + Y ** 2 - 0.75
plt.contour(X, Y, F, [0])
# print nodes
nx.draw_networkx_nodes(G, pos, nodelist=[n for n in nodes if 'GS' not in n and nameselementdict[n].savedTasks],
node_color='r', node_size=100)
nx.draw_networkx_nodes(G, pos, nodelist=[n for n in nodes if 'GS' not in n and not nameselementdict[n].savedTasks],
node_color='g', node_size=100)
# nx.draw_networkx_nodes(Graph, pos, nodelist=[n for n in nodes if 'GS' not in n and 'LE' in sec[n]], node_color='g', node_size=100)
nx.draw_networkx_nodes(G, pos, nodelist=[n for n in nodes if 'GS' in n], node_color='b', node_size=100)
# print "Graph all tuples: ", alltuples
for ps in positionsection:
fillBetween3Points(*ps)
nx.draw_networkx_edges(G, pos, edgelist=list(alltuples))
# nx.draw_networkx_edges(Graph, pos)
nx.draw_networkx_labels(G, labelpos, labels, font_size=8)
plt.xticks([])
plt.yticks([])
plt.xlim(-2.5, 2.5)
plt.ylim(-2.5, 2.5)
# plt.draw()
plt.draw()
plt.waitforbuttonpress()
# plt.pause(0.5)
# Figure is closed
def drawGraphbyDesign(number, design):
elements = design.split(' ')
federates = set([int(e[0]) for e in elements])
federates_location_dict = defaultdict(list)
federates_type_dict = defaultdict(list)
federate_coordinates_dict = defaultdict(list)
my_dpi = 150
plt.figure(figsize=(800/my_dpi, 800/my_dpi), dpi=my_dpi)
for r in [4, 2.25, 1.]:
x = np.linspace(-1.0*r, 1.0*r, 50)
y = np.linspace(-1.0*r, 1.0*r, 50)
X, Y = np.meshgrid(x, y)
F = X ** 2 + Y ** 2 - r
plt.contour(X, Y, F, [0], colors='k', linewidths = 0.3, origin = 'lower', zorder = -1)
font = FontProperties()
font.set_style('italic')
font.set_weight('bold')
font.set_size('x-small')
for x,y,lab in [(0,0,'SUR'), (0, 1, "LEO"),(0, 1.5, 'MEO'),(0, 2, 'GEO')]:
# plt.annotate(lab, xy = (x,y), xytext = (x-0.2, y-0.1))
plt.text(x,y, ha="center", va="center", s = lab, bbox = dict(fc="w", ec="w", lw=2),fontproperties=font)
for i, (x, y) in enumerate([convertLocation2xy(e) for e in ['OOO'+str(i) for i in range(1,7)]]):
plt.text(x, y, ha="center", va="center", s=str(i+1), bbox=dict(fc="none", ec="none", lw=2), fontproperties=font)
font.set_size('medium')
plt.text(0, 2.3 , ha="left", va="center", s=r'$|\rightarrow \theta$', bbox=dict(fc="w", ec="w", lw=2), fontproperties=font)
types_dict = {'GroundSta': "G", 'Sat': 'S'}
colordict = {'F1': 'yellow', 'F2': 'lightcyan', 'F3': 'lightgrey'}
allpossiblelocations = []
for location in ['SUR', 'LEO', 'MEO', 'GEO']:
for i in range(1,7):
allpossiblelocations.append(location + str(i))
allpossiblecoordinates = [convertLocation2xy(e) for e in allpossiblelocations]
plt.scatter(*zip(*allpossiblecoordinates), marker = "H", s = 800, color = 'k', facecolors = 'w')
for f in federates:
types = [re.search(r'\d\.(.+)@(\w+\d)', e).group(1) for e in elements if '%d.' % f in e]
federates_type_dict['F%d'%f] = [types_dict[t] for t in types]
federates_location_dict['F%d'%f] = [re.search(r'(.+)@(\w+\d)', e).group(2) for e in elements if '%d.'%f in e]
federate_coordinates_dict['F%d'%f] = [convertLocation2xy(loc) for loc in federates_location_dict['F%d'%f]]
plt.scatter(*zip(*federate_coordinates_dict['F%d'%f]), marker = "H", s = 800, edgecolors = 'k', facecolors = colordict['F%d'%f], linewidth='3')
for x, y in federate_coordinates_dict['F%d'%f]:
plt.annotate('F%d'%f, xy = (x, y), xytext = (x-0.1, y-0.075))
plt.xticks([])
plt.yticks([])
rlim = 2.5
plt.xlim(-rlim, rlim)
plt.ylim(-rlim+0.2, rlim)
plt.axis('off')
des_roman_dict = {1: 'I', 2: 'II', 3:'III', 4:'IV', 5:'V'}
plt.savefig("Design_%s.pdf"%des_roman_dict[number], bbox_inches='tight')
# plt.show()
def drawGraphs(graph):
# pos = None
plt.figure()
n1, n2 = findbestxy(len(graph.graphList))
# print n1,n2
earth = plt.Circle((0, 0), 1.1, color='k', fill=True)
for j, g in enumerate(graph.graphList):
nodes = [e.name for e in graph.elements]
pos = {e.name: convertLocation2xy(graph.nodeLocations[j][i]) for i, e in enumerate(graph.elements)}
sec = {e.name: graph.nodeLocations[j][i] for i, e in enumerate(graph.elements)}
labels = {n: n[0] + n[-3:] for n in nodes}
labelpos = {n: [v[0], v[1] + 0.3] for n, v in pos.items()}
ax = plt.subplot('%d%d%d' % (n1, n2, j + 1))
x = np.linspace(-1.0, 1.0, 50)
y = np.linspace(-1.0, 1.0, 50)
X, Y = np.meshgrid(x, y)
F = X ** 2 + Y ** 2 - 0.75
plt.contour(X, Y, F, [0])
# print nodes
nx.draw_networkx_nodes(g, pos, nodelist=[n for n in nodes if 'Ground' not in n and 'LE' not in sec[n]],
node_color='r', node_size=100)
nx.draw_networkx_nodes(g, pos, nodelist=[n for n in nodes if 'Ground' not in n and 'LE' in sec[n]],
node_color='g', node_size=100)
nx.draw_networkx_nodes(g, pos, nodelist=[n for n in nodes if 'Ground' in n], node_color='b', node_size=100)
nx.draw_networkx_edges(g, pos)
nx.draw_networkx_labels(g, labelpos, labels, font_size=8)
plt.xticks([])
plt.yticks([])
plt.xlim(-2.5, 2.5)
plt.ylim(-2.5, 2.5)
# ax.set_title('Graph:'+str(j))
# print j, graph.shortestPathes[j]
# plt.savefig("Networks_elements%d_.png"%len(graph.elementlist), bbox_inches='tight')
plt.show()
def bfs_paths(G, source, destination):
queue = [(source, [source])]
while queue:
v, path = queue.pop(0)
for next in set(G.neighbors(v)) - set(path):
if next == destination:
yield path + [next]
else:
queue.append((next, path + [next]))
def findAllPaths(G, sources, destinations):
allpathes = []
for s in sources:
for d in destinations:
allpathes.extend(bfs_paths(G, s, d))
return allpathes
#
# class Path():
# def __init__(self, l):
# self.linklist = l
def findClosestIndex(value, valulist):
abslist = [abs(v-value) for v in valulist]
return abslist.index(min(abslist))
def addDict2Dict(dict1, dict2):
dict3 = dict1.copy()
for d, c in dict2.items():
dict3[d] += c
return dict3
def returnCompatiblePaths(pathlist, linkcounter, maxlink = 1):
# for path in pathlist[0]
# print("length of pathlist:", len(pathlist))
# print([p.linklist for p in pathlist[0]], linkcounter)
if pathlist:
queue = [(0, [], linkcounter)]
while queue:
n, histpath, s = queue.pop(0)
# print("length of pathlist and n:", len(pathlist), n)
# if n == len(pathlist) - 1:
# yield histpath
# else:
nextpaths = []
for path in pathlist[n]:
newcounter = Counter(path.linklist)
combinedcounter = addDict2Dict(s, newcounter)
valueset = list(combinedcounter.values())
# print("counter value set:", valueset)
# print(combinedcounter)
if max(valueset) <= maxlink:
nextpaths.append(path)
# else:
# print(max(valueset))
# print(len(pathlist[n]), len(nextpaths))
# print("current path, next pathf:\n", [e.linklist for e in histpath],'\n', [p.linklist for p in nextpaths])
# print("set:", s)
n += 1
for np in nextpaths:
# print("new path:", np.linklist)
if n == len(pathlist):
# print([p.linklist for p in histpath + [np]])
yield histpath + [np]
else:
# scopy = s.union(set(np.linklist))
combinedcounter = addDict2Dict(s, newcounter)
queue.append((n, histpath + [np], combinedcounter))
def returnAvgPathCost(taskPathDict):
tasksumcostnum = [(min([p.pathCost for p in paths]), len(paths), taskid) for taskid, paths in taskPathDict.items()]
# tasksumcostnum = [(min([len(p.nodelist) for p in paths]), len(paths), taskid) for taskid, paths in taskPathDict.items()]
avgcosttask = sorted([(x, z) for x,y,z in tasksumcostnum])
# print("avg cost task:", avgcosttask)
return avgcosttask
def combineBundles(bundles):
alltasks = []
allpaths = []
for b in bundles:
alltasks.extend(list(b.tasklist))
allpaths.extend(list(b.pathlist))
return alltasks, allpaths
def generateFops(costrange, storange):
fops = []
for cost in costrange:
costsgl = cost
costisl = cost
for sto in storange:
stopen = sto
for sto2 in storange:
stopen2 = sto2
yield ["x%d,%d,%d" % (costsgl, costisl, stopen2), "x%d,%d,%d" % (costsgl, costisl, stopen), "x%d,%d,%d" % (costsgl, costisl, sotpen)]
def calGaussianKernel(x, y, M, N, scale = 0.8):
sigma1 = scale*M/6.
sigma2 = scale*N/10.
kernelmesh = np.zeros((M, N))
for i in range(M):
for j in range(N):
delta1 = min(abs(x-i), M-abs(x-i))
delta2 = abs(y-j)
kernelmesh[i, j] = math.exp(-delta1**2/(2*sigma1**2)-delta2**2/(2*sigma2**2))
kernelmesh = kernelmesh/sum(sum(kernelmesh))
return kernelmesh
def matchVariance(a, b, var0):
# print(var0 ** 0.5)
# currentvar = a * b / ((a + b + 1.) * (a + b) ** 2)
# print(currentvar ** 0.5)
k = (a * b - var0 * (a + b) ** 2) / (var0 * (a + b) ** 3)
# print("a, b, coef:", a, b, k)
return (k * a, k * b)
initcostlist = linkCountList = federatelist = pathLinkCount = taskValueDict = linkCountList_A = None
R_LiA = R_TiA = pathTaskValueList = pathlist = []
def calTaskLinkRevenue(costlist, fi, federatelist, pathlist, pathLinkCount, linkCountDict, taskValueDict):
federatelinkcost = 0.
for path, federateCount in zip(pathlist, pathLinkCount):
if path.elementOwner.federateOwner.name == federatelist[fi]:
for fed, cost in zip(federatelist, costlist):
federatelinkcost += cost * federateCount[fed]
fed = federatelist[fi]
linkreveune = costlist[fi] * linkCountDict[fed]
# print(taskValueDict[fed] , pathCostDict[fed])
taskrevenue = taskValueDict[fed] - federatelinkcost
return (taskrevenue, linkreveune)
class Constraint1():
def __init__(self, fi, linkCountDict, pathlist, federatelist, pathLinkCount, taskValueDict, R_LiA, R_TiA):
self.fi = fi
self.linkCountDict = linkCountDict
self.pathlist = pathlist
# self.inicostlist = initcostlist
self.federatelist = federatelist
self.pathLinkCount = pathLinkCount
self.taskValueDict = taskValueDict
self.R_LiA = R_LiA
self.R_TiA = R_TiA
def calTaskLinkRevenue(self, costlist, fi, federatelist, pathlist, pathLinkCount, linkCountDict, taskValueDict):
federatelinkcost = 0.
for path, federateCount in zip(pathlist, pathLinkCount):
if path.elementOwner.federateOwner.name == federatelist[fi]:
for fed, cost in zip(federatelist, costlist):
federatelinkcost += cost * federateCount[fed]
fed = federatelist[fi]
linkreveune = costlist[fi] * linkCountDict[fed]
# print(taskValueDict[fed] , pathCostDict[fed])
taskrevenue = taskValueDict[fed] - federatelinkcost
return (taskrevenue, linkreveune)
def __call__(self, costlist):
R_Ti, R_Li = self.calTaskLinkRevenue(costlist, self.fi, self.federatelist, self.pathlist, self.pathLinkCount, self.linkCountDict, self.taskValueDict)
# print(self.fi, R_Li + R_Ti, self.R_LiA[self.fi] + self.R_TiA[self.fi])
return R_Li + R_Ti - self.R_LiA[self.fi] - self.R_TiA[self.fi]
class Constraint2():
def __init__(self, pi, path, pathTaskValueList, pathLinkCount, federatelist):
self.pi = pi
self.path = path
self.pathTaskValueList = pathTaskValueList
self.pathLinkCount = pathLinkCount
self.federatelist = federatelist
def __call__(self, costlist):
self.federateCount = self.pathLinkCount[self.pi]
# pathcost = sum([c for c, f in zip(self.path.linkcostlist, self.path.linkfederatelist) if f is self.path.elementOwner.federateOwner])
pathcost = 0.
# print("Already path cost:", pathcost)
value = self.pathTaskValueList[self.pi]
for fed, cost in zip(self.federatelist, costlist):
pathcost += cost * self.federateCount[fed]
return value - pathcost
class Objective():
def __init__(self, linkcostlist):
self.linkcostlist = linkcostlist
def __call__(self, costlist):
return -1*sum([a*b for a,b in zip(costlist, self.linkcostlist)])
def optimizeCost(initCostDict, adaptiveBestBundle, bestBundle):
global linkCountList
# global initcostlist, linkCountList, federatelist, pathLinkCount, taskValueDict, pathCostDict0, R_LiA, R_TiA
initCostItems = sorted(list(initCostDict.items()))
federatelist = [e[0] for e in initCostItems]
initcostlist = [e[1] for e in initCostItems]
# pathCostDict = defaultdict(int)
pathLinkCount = []
pathTaskValueList = []
linkCountDict = defaultdict(int)
taskValueDict = defaultdict(int)
# pathCostDict0 = defaultdict(int)
pathlist = bestBundle.pathlist
taskvalues = bestBundle.taskvalues
for taskvalue, path in zip(taskvalues, pathlist):
federateOwner = path.elementOwner.federateOwner.name
taskValueDict[federateOwner] += taskvalue
linkfederates = [e.name for e in path.linkfederatelist if federateOwner != e.name]
pathTaskValueList.append(taskvalue)
# print(federateOwner, [e.name for e in path.linkfederatelist], linkfederates)
federateCount = defaultdict(int, Counter(linkfederates))
pathLinkCount.append(federateCount)
for f, c in federateCount.items():
linkCountDict[f] += c
linkCountList = [e[1] for e in sorted(list(linkCountDict.items()))]
# print(linkCountDict, linkCountList)
pathLinkCount_A = []
linkCountDict_A = defaultdict(int)
taskValueDict_A = defaultdict(int)
# pathCostDict0 = defaultdict(int)
pathlist_A = adaptiveBestBundle.pathlist
taskvalues_A = adaptiveBestBundle.taskvalues
for taskvalue, path in zip(taskvalues_A, pathlist_A):
federateOwner = path.elementOwner.federateOwner.name
taskValueDict_A[federateOwner] += taskvalue
linkfederates = [e.name for e in path.linkfederatelist if federateOwner != e.name]
federateCount = defaultdict(int, Counter(linkfederates))
pathLinkCount_A.append(federateCount)
for f, c in federateCount.items():
linkCountDict_A[f] += c
# linkCountList_A = [e[1] for e in sorted(list(linkCountDict_A.items()))]
# print("Federate link Count list:", linkCountList_A)
# pathCostDict_A = defaultdict(int)
# pathCostDict_A[federateOwner] = sum([federateCount[fed] * cost
# for federateCount, fed, cost in zip(pathLinkCount_A, federatelist, initcostlist)])
# if len(pathlist) != len(pathlist_A):
# print(len(pathlist), len(pathlist_A))
R_LiA = []
R_TiA = []
for i in range(len(federatelist)):
Rt, Rl = calTaskLinkRevenue(initcostlist, i, federatelist, pathlist_A, pathLinkCount_A, linkCountDict_A, taskValueDict_A)
R_LiA.append(Rl)
R_TiA.append(Rt)
# print("Revenue Adaptive:", sum(R_TiA) + sum(R_LiA))
# print("zero and adaptive links:", linkCountDict, linkCountDict_A)
# print("Adaptive task and link revenue:", R_TiA, R_LiA)
# def objective(costlist):
# global linkCountList
# # print("objective funciton :", )
# # print(linkCountList)
# return -1*sum([a*b for a,b in zip(costlist, linkCountList)])
objective = Objective(linkCountList)
conslist1 = [{'type': 'ineq', 'fun': Constraint1(i, linkCountDict, pathlist, federatelist, pathLinkCount, taskValueDict, R_LiA, R_TiA)} for i in range(len(initcostlist))]
conslist2 = [{'type': 'ineq', 'fun': Constraint2(i, path, pathTaskValueList, pathLinkCount, federatelist)} for i, path in enumerate(pathlist)]
# con1 = {'type': 'ineq', 'fun': constraint1}
# con2 = {'type': 'ineq', 'fun': constraint2}
# con3 = {'type': 'ineq', 'fun': constraint3}
cons = conslist1 + conslist2 # [con1, con2, con3][:len(initCostDict)]
bnds = [(min(0, 1100), 1101) for c in initcostlist]
# print("boundaries:", bnds)
# print("length of constraints:", len(initCostDict), len(cons))
templist = initcostlist[:]
initcostlist = [0 for i in range(len(initcostlist))]
sol = minimize(objective, initcostlist, method = 'SLSQP', bounds = bnds, constraints = cons)
# print("solution:", sol.x)
# print("constraints:")
# for con in cons:
cons_changes = [int(round(con['fun'](sol.x))) for con in cons]
# print(cons_changes)
# consresults = all([e >= 0 for e in [int(round(con['fun'](sol.x))) for con in cons]])
if all([e >= 0 for e in cons_changes]) and sum(cons_changes[:2])>0:
# if True:
# print(templist, [int(e) for e in sol.x])
# print("Revenue 2, 1:", [int(round(con['fun'](sol.x))) for con in cons])
# print('')
return {'F%d' % (i+1): c for i, c in enumerate(list(sol.x))}
else:
return False
# print(calGaussianKernel(0,7,6,10, 0.6))
# nactions = 12
# nstates = 6
# N = nactions * 10
# M = nstates * 10
# n = int(N/3)
# m = int(2*M/3)
# kernelmesh = np.zeros((M, N))
#
# kernelmesh = calGaussianKernel(m,n, M, N)
#
# print(sum(sum(kernelmesh)))
# print(kernelmesh.shape)
#
# # f, (ax1, ax2) = plt.subplots(1, 2, sharex=False, sharey=True)
# gs = gridspec.GridSpec(1, 2, width_ratios=[2, 1])
# ax1 = plt.subplot(gs[0])
# ax2 = plt.subplot(gs[1], sharey=ax1)
# plt.setp(ax2.get_yticklabels(), visible=False)
#
# ax1.plot(100*kernelmesh[m, :], 'k--', zorder = -1)
# ax1.text(1,0.21, ha="left", va="center", s = 'sector = 4')
# ax1.axvline(m, zorder = -2)
# x1 = [i for i in range(0, N-1, 10)]
# y = list(100*kernelmesh[m,:])[::10]
# ax1.scatter(x1, y, marker = 'o', s = 100, facecolors = 'w', edgecolors= 'k')
# ax1.set_xlabel('action (k$)')
# ax1.set_ylabel(r'Q learning factor: $\alpha$')
# # ax1.set_title('sector = %d'%m)
# ax2.plot(list(100*kernelmesh[:, n]), 'k--', zorder = -1)
# ax2.axvline(n, zorder = -2)
# ax2.text(1,0.21, ha="left", va="center", s = 'action = 0.4')
#
# x2 = [i for i in range(0, M-1, 10)]
# y = list(100*kernelmesh[:, n])[::10]
# ax2.scatter(x2, y, marker = 'o', s = 100, facecolors = 'w', edgecolors= 'k')
# ax2.set_xlabel('states (sectors)')
# # ax2.set_title('action = %1.1f'%(n/100.))
# plt.sca(ax1)
# plt.xticks(x1, [100*a/1000 for a in range(nactions)], rotation = 0)
# plt.xlim(-5, N-5)
#
# plt.sca(ax2)
# plt.xticks(x2, [(i+1) for i in range(nstates)], rotation = 0)
# plt.xlim(-5, M-5)
# plt.tight_layout()
#
# plt.savefig("Q_Gaussian_qupdate.pdf", bbox_inches='tight')
# plt.suptitle('state-action-reward: (4, 0.4, 1)')
# plt.subplots_adjust(top=0.93)
# plt.show()
# #
# for path in pathlist[0]:
# print(path)
# tempset = set(path.linklist)
# print("Linkset and temp set:", linkset, tempset)
# inter = linkset.intersection(tempset)
# print(inter)
# if inter:
# continue
# else:
# nextset = linkset.union(tempset)
# print("nextset:", nextset)
# print("length:", len(pathlist))
# if len(pathlist)>1:
#
# yield returnCompatiblePaths(pathlist[1:], nextset, histpath + [path])
# else:
# yield histpath + [path]
# l1 = [(1,2), (2,3), (3,4)]
# l2 = [(2,4), (4,9)]
# l3 = [(1,3), (4,5)]
#
#
# p1 = Path(l1)
# p2 = Path(l2)
# p3 = Path(l3)
# p4 = Path([(1,4),(5,6)])
#
# gen = returnCompatiblePaths([[p1, p2, p3, p4], [p1, p2, p3, p4],[p1, p2, p3, p4]])
# print(len(list(gen)))
# for g in gen:
# print([e.linklist for e in g])
# nodes = range(1,12)
# edges = [(1,7), (4,7), (4,2), (6,2), (4,7), (7,3), (7,5), (2,5), (2,8), (3,11), (3,9), (5,11), (5,9), (8,9), (8,10)]
# sources = [1, 4, 6]
# destinations = [9, 10, 11]
#
# Graph = nx.DiGraph()
# Graph.add_nodes_from(nodes)
# Graph.add_edges_from(edges)
#
# # for s in sources:
# # print s
# # gen = findAllPaths(Graph, [s], destinations)
# # print gen
#
#
# print findAllPathes(Graph, sources, destinations)
# hardcoded_designs = (
# # "1.GroundSta@SUR1 2.GroundSta@SUR4 1.Sat@MEO1 2.Sat@MEO3 1.Sat@LEO1 2.Sat@LEO2",
# # "1.GroundSta@SUR1 2.GroundSta@SUR4 1.Sat@GEO1 1.Sat@MEO1 2.Sat@MEO3 1.Sat@LEO1 2.Sat@LEO2",
# "1.GroundSta@SUR1 2.GroundSta@SUR4 1.Sat@MEO1 1.Sat@MEO4 2.Sat@MEO5 1.Sat@LEO1 2.Sat@LEO2",
# # "1.GroundSta@SUR1 2.GroundSta@SUR4 1.Sat@MEO1 1.Sat@MEO3 1.Sat@MEO4 2.Sat@MEO5 2.Sat@MEO6",
# "1.GroundSta@SUR1 2.GroundSta@SUR4 2.Sat@GEO4 1.Sat@MEO1 1.Sat@MEO4 2.Sat@MEO5 1.Sat@LEO1 2.Sat@LEO2",
# # "1.GroundSta@SUR1 2.GroundSta@SUR3 3.GroundSta@SUR5 2.Sat@GEO3 1.Sat@MEO1 2.Sat@MEO3 3.Sat@MEO6 1.Sat@LEO2",
# "1.GroundSta@SUR1 2.GroundSta@SUR3 3.GroundSta@SUR5 1.Sat@MEO1 1.Sat@MEO2 2.Sat@MEO3 2.Sat@MEO5 3.Sat@MEO6",
# "1.GroundSta@SUR1 2.GroundSta@SUR3 3.GroundSta@SUR5 3.Sat@GEO5 1.Sat@MEO1 1.Sat@MEO2 2.Sat@MEO3 2.Sat@MEO5 3.Sat@MEO6",
# "1.GroundSta@SUR1 2.GroundSta@SUR3 3.GroundSta@SUR5 1.Sat@MEO1 2.Sat@MEO2 3.Sat@MEO5 1.Sat@LEO2 2.Sat@LEO4 3.Sat@LEO6",
# # "1.GroundSta@SUR1 2.GroundSta@SUR3 3.GroundSta@SUR5 1.Sat@GEO1 1.Sat@MEO1 2.Sat@MEO4 3.Sat@MEO5 1.Sat@LEO2 2.Sat@LEO4 3.Sat@LEO6",
# )
#
| [
"matplotlib.pyplot.clf",
"collections.defaultdict",
"matplotlib.pyplot.figure",
"networkx.draw_networkx_nodes",
"matplotlib.pyplot.contour",
"networkx.draw_networkx_labels",
"matplotlib.pyplot.fill_between",
"scipy.optimize.minimize",
"numpy.meshgrid",
"matplotlib.font_manager.FontProperties",
"... | [((1646, 1704), 'numpy.linspace', 'np.linspace', (['sortedpoints[0][0]', 'sortedpoints[1][0]'], {'num': '(2)'}), '(sortedpoints[0][0], sortedpoints[1][0], num=2)\n', (1657, 1704), True, 'import numpy as np\n'), ((1714, 1772), 'numpy.linspace', 'np.linspace', (['sortedpoints[1][0]', 'sortedpoints[2][0]'], {'num': '(2)'}), '(sortedpoints[1][0], sortedpoints[2][0], num=2)\n', (1725, 1772), True, 'import numpy as np\n'), ((3397, 3406), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3404, 3406), True, 'import matplotlib.pyplot as plt\n'), ((5640, 5666), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1.0)', '(50)'], {}), '(-1.0, 1.0, 50)\n', (5651, 5666), True, 'import numpy as np\n'), ((5675, 5701), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1.0)', '(50)'], {}), '(-1.0, 1.0, 50)\n', (5686, 5701), True, 'import numpy as np\n'), ((5713, 5730), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (5724, 5730), True, 'import numpy as np\n'), ((5766, 5791), 'matplotlib.pyplot.contour', 'plt.contour', (['X', 'Y', 'F', '[0]'], {}), '(X, Y, F, [0])\n', (5777, 5791), True, 'import matplotlib.pyplot as plt\n'), ((5814, 5960), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['G', 'pos'], {'nodelist': "[n for n in nodes if 'GS' not in n and nameselementdict[n].savedTasks]", 'node_color': '"""r"""', 'node_size': '(100)'}), "(G, pos, nodelist=[n for n in nodes if 'GS' not in n and\n nameselementdict[n].savedTasks], node_color='r', node_size=100)\n", (5836, 5960), True, 'import networkx as nx\n'), ((5989, 6139), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['G', 'pos'], {'nodelist': "[n for n in nodes if 'GS' not in n and not nameselementdict[n].savedTasks]", 'node_color': '"""g"""', 'node_size': '(100)'}), "(G, pos, nodelist=[n for n in nodes if 'GS' not in n and\n not nameselementdict[n].savedTasks], node_color='g', node_size=100)\n", (6011, 6139), True, 'import networkx as nx\n'), ((6306, 6413), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['G', 'pos'], {'nodelist': "[n for n in nodes if 'GS' in n]", 'node_color': '"""b"""', 'node_size': '(100)'}), "(G, pos, nodelist=[n for n in nodes if 'GS' in n],\n node_color='b', node_size=100)\n", (6328, 6413), True, 'import networkx as nx\n'), ((6625, 6682), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['G', 'labelpos', 'labels'], {'font_size': '(8)'}), '(G, labelpos, labels, font_size=8)\n', (6648, 6682), True, 'import networkx as nx\n'), ((6688, 6702), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (6698, 6702), True, 'import matplotlib.pyplot as plt\n'), ((6707, 6721), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (6717, 6721), True, 'import matplotlib.pyplot as plt\n'), ((6726, 6745), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-2.5)', '(2.5)'], {}), '(-2.5, 2.5)\n', (6734, 6745), True, 'import matplotlib.pyplot as plt\n'), ((6750, 6769), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-2.5)', '(2.5)'], {}), '(-2.5, 2.5)\n', (6758, 6769), True, 'import matplotlib.pyplot as plt\n'), ((6791, 6801), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (6799, 6801), True, 'import matplotlib.pyplot as plt\n'), ((6806, 6830), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (6828, 6830), True, 'import matplotlib.pyplot as plt\n'), ((7027, 7044), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7038, 7044), False, 'from collections import Counter, defaultdict\n'), ((7071, 7088), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7082, 7088), False, 'from collections import Counter, defaultdict\n'), ((7121, 7138), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7132, 7138), False, 'from collections import Counter, defaultdict\n'), ((7160, 7220), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(800 / my_dpi, 800 / my_dpi)', 'dpi': 'my_dpi'}), '(figsize=(800 / my_dpi, 800 / my_dpi), dpi=my_dpi)\n', (7170, 7220), True, 'import matplotlib.pyplot as plt\n'), ((7503, 7519), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {}), '()\n', (7517, 7519), False, 'from matplotlib.font_manager import FontProperties\n'), ((9427, 9441), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (9437, 9441), True, 'import matplotlib.pyplot as plt\n'), ((9446, 9460), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (9456, 9460), True, 'import matplotlib.pyplot as plt\n'), ((9480, 9501), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-rlim)', 'rlim'], {}), '(-rlim, rlim)\n', (9488, 9501), True, 'import matplotlib.pyplot as plt\n'), ((9506, 9533), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-rlim + 0.2)', 'rlim'], {}), '(-rlim + 0.2, rlim)\n', (9514, 9533), True, 'import matplotlib.pyplot as plt\n'), ((9536, 9551), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9544, 9551), True, 'import matplotlib.pyplot as plt\n'), ((9619, 9693), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Design_%s.pdf' % des_roman_dict[number])"], {'bbox_inches': '"""tight"""'}), "('Design_%s.pdf' % des_roman_dict[number], bbox_inches='tight')\n", (9630, 9693), True, 'import matplotlib.pyplot as plt\n'), ((9757, 9769), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9767, 9769), True, 'import matplotlib.pyplot as plt\n'), ((9846, 9891), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(0, 0)', '(1.1)'], {'color': '"""k"""', 'fill': '(True)'}), "((0, 0), 1.1, color='k', fill=True)\n", (9856, 9891), True, 'import matplotlib.pyplot as plt\n'), ((11400, 11410), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11408, 11410), True, 'import matplotlib.pyplot as plt\n'), ((15026, 15042), 'numpy.zeros', 'np.zeros', (['(M, N)'], {}), '((M, N))\n', (15034, 15042), True, 'import numpy as np\n'), ((19284, 19300), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (19295, 19300), False, 'from collections import Counter, defaultdict\n'), ((19321, 19337), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (19332, 19337), False, 'from collections import Counter, defaultdict\n'), ((20188, 20204), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (20199, 20204), False, 'from collections import Counter, defaultdict\n'), ((20227, 20243), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (20238, 20243), False, 'from collections import Counter, defaultdict\n'), ((22798, 22883), 'scipy.optimize.minimize', 'minimize', (['objective', 'initcostlist'], {'method': '"""SLSQP"""', 'bounds': 'bnds', 'constraints': 'cons'}), "(objective, initcostlist, method='SLSQP', bounds=bnds, constraints=cons\n )\n", (22806, 22883), False, 'from scipy.optimize import minimize\n'), ((2730, 2769), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x2', 'y3', 'y4'], {'color': 'col'}), '(x2, y3, y4, color=col)\n', (2746, 2769), True, 'import matplotlib.pyplot as plt\n'), ((3311, 3331), 'matplotlib.pyplot.fignum_exists', 'plt.fignum_exists', (['(1)'], {}), '(1)\n', (3328, 3331), True, 'import matplotlib.pyplot as plt\n'), ((3341, 3354), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3351, 3354), True, 'import matplotlib.pyplot as plt\n'), ((3363, 3372), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (3370, 3372), True, 'import matplotlib.pyplot as plt\n'), ((3381, 3391), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3389, 3391), True, 'import matplotlib.pyplot as plt\n'), ((7257, 7291), 'numpy.linspace', 'np.linspace', (['(-1.0 * r)', '(1.0 * r)', '(50)'], {}), '(-1.0 * r, 1.0 * r, 50)\n', (7268, 7291), True, 'import numpy as np\n'), ((7300, 7334), 'numpy.linspace', 'np.linspace', (['(-1.0 * r)', '(1.0 * r)', '(50)'], {}), '(-1.0 * r, 1.0 * r, 50)\n', (7311, 7334), True, 'import numpy as np\n'), ((7346, 7363), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (7357, 7363), True, 'import numpy as np\n'), ((7404, 7489), 'matplotlib.pyplot.contour', 'plt.contour', (['X', 'Y', 'F', '[0]'], {'colors': '"""k"""', 'linewidths': '(0.3)', 'origin': '"""lower"""', 'zorder': '(-1)'}), "(X, Y, F, [0], colors='k', linewidths=0.3, origin='lower', zorder=-1\n )\n", (7415, 7489), True, 'import matplotlib.pyplot as plt\n'), ((10313, 10352), 'matplotlib.pyplot.subplot', 'plt.subplot', (["('%d%d%d' % (n1, n2, j + 1))"], {}), "('%d%d%d' % (n1, n2, j + 1))\n", (10324, 10352), True, 'import matplotlib.pyplot as plt\n'), ((10365, 10391), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1.0)', '(50)'], {}), '(-1.0, 1.0, 50)\n', (10376, 10391), True, 'import numpy as np\n'), ((10404, 10430), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1.0)', '(50)'], {}), '(-1.0, 1.0, 50)\n', (10415, 10430), True, 'import numpy as np\n'), ((10446, 10463), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (10457, 10463), True, 'import numpy as np\n'), ((10507, 10532), 'matplotlib.pyplot.contour', 'plt.contour', (['X', 'Y', 'F', '[0]'], {}), '(X, Y, F, [0])\n', (10518, 10532), True, 'import matplotlib.pyplot as plt\n'), ((10563, 10701), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['g', 'pos'], {'nodelist': "[n for n in nodes if 'Ground' not in n and 'LE' not in sec[n]]", 'node_color': '"""r"""', 'node_size': '(100)'}), "(g, pos, nodelist=[n for n in nodes if 'Ground' not in\n n and 'LE' not in sec[n]], node_color='r', node_size=100)\n", (10585, 10701), True, 'import networkx as nx\n'), ((10737, 10871), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['g', 'pos'], {'nodelist': "[n for n in nodes if 'Ground' not in n and 'LE' in sec[n]]", 'node_color': '"""g"""', 'node_size': '(100)'}), "(g, pos, nodelist=[n for n in nodes if 'Ground' not in\n n and 'LE' in sec[n]], node_color='g', node_size=100)\n", (10759, 10871), True, 'import networkx as nx\n'), ((10907, 11018), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['g', 'pos'], {'nodelist': "[n for n in nodes if 'Ground' in n]", 'node_color': '"""b"""', 'node_size': '(100)'}), "(g, pos, nodelist=[n for n in nodes if 'Ground' in n],\n node_color='b', node_size=100)\n", (10929, 11018), True, 'import networkx as nx\n'), ((11023, 11053), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['g', 'pos'], {}), '(g, pos)\n', (11045, 11053), True, 'import networkx as nx\n'), ((11062, 11119), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['g', 'labelpos', 'labels'], {'font_size': '(8)'}), '(g, labelpos, labels, font_size=8)\n', (11085, 11119), True, 'import networkx as nx\n'), ((11128, 11142), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (11138, 11142), True, 'import matplotlib.pyplot as plt\n'), ((11151, 11165), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (11161, 11165), True, 'import matplotlib.pyplot as plt\n'), ((11174, 11193), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-2.5)', '(2.5)'], {}), '(-2.5, 2.5)\n', (11182, 11193), True, 'import matplotlib.pyplot as plt\n'), ((11202, 11221), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-2.5)', '(2.5)'], {}), '(-2.5, 2.5)\n', (11210, 11221), True, 'import matplotlib.pyplot as plt\n'), ((1031, 1046), 'math.cos', 'math.cos', (['tetha'], {}), '(tetha)\n', (1039, 1046), False, 'import math\n'), ((1052, 1067), 'math.sin', 'math.sin', (['tetha'], {}), '(tetha)\n', (1060, 1067), False, 'import math\n'), ((2865, 2904), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x1', 'y1', 'y2'], {'color': 'col'}), '(x1, y1, y2, color=col)\n', (2881, 2904), True, 'import matplotlib.pyplot as plt\n'), ((9359, 9422), 'matplotlib.pyplot.annotate', 'plt.annotate', (["('F%d' % f)"], {'xy': '(x, y)', 'xytext': '(x - 0.1, y - 0.075)'}), "('F%d' % f, xy=(x, y), xytext=(x - 0.1, y - 0.075))\n", (9371, 9422), True, 'import matplotlib.pyplot as plt\n'), ((15201, 15277), 'math.exp', 'math.exp', (['(-delta1 ** 2 / (2 * sigma1 ** 2) - delta2 ** 2 / (2 * sigma2 ** 2))'], {}), '(-delta1 ** 2 / (2 * sigma1 ** 2) - delta2 ** 2 / (2 * sigma2 ** 2))\n', (15209, 15277), False, 'import math\n'), ((19880, 19902), 'collections.Counter', 'Counter', (['linkfederates'], {}), '(linkfederates)\n', (19887, 19902), False, 'from collections import Counter, defaultdict\n'), ((20681, 20703), 'collections.Counter', 'Counter', (['linkfederates'], {}), '(linkfederates)\n', (20688, 20703), False, 'from collections import Counter, defaultdict\n'), ((921, 951), 're.search', 're.search', (['""".+(\\\\d)"""', 'location'], {}), "('.+(\\\\d)', location)\n", (930, 951), False, 'import re\n'), ((3133, 3172), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x1', 'y1', 'y2'], {'color': 'col'}), '(x1, y1, y2, color=col)\n', (3149, 3172), True, 'import matplotlib.pyplot as plt\n'), ((3182, 3221), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x2', 'y3', 'y4'], {'color': 'col'}), '(x2, y3, y4, color=col)\n', (3198, 3221), True, 'import matplotlib.pyplot as plt\n'), ((12824, 12846), 'collections.Counter', 'Counter', (['path.linklist'], {}), '(path.linklist)\n', (12831, 12846), False, 'from collections import Counter, defaultdict\n'), ((8756, 8792), 're.search', 're.search', (['"""\\\\d\\\\.(.+)@(\\\\w+\\\\d)"""', 'e'], {}), "('\\\\d\\\\.(.+)@(\\\\w+\\\\d)', e)\n", (8765, 8792), False, 'import re\n'), ((8950, 8980), 're.search', 're.search', (['"""(.+)@(\\\\w+\\\\d)"""', 'e'], {}), "('(.+)@(\\\\w+\\\\d)', e)\n", (8959, 8980), False, 'import re\n')] |
import numpy as np
from numba import float64, int64, boolean, deferred_type
from numba.experimental import jitclass
from polygon import Polygon
from numba_utils import NanMean12, NbRound1, NbRound2, Mean0, Any1, NotInInt
from linalg_utils import CheckCircleIntersect, Normalize, CircleLineIntersectDiscriminant, FullLineIntersections, MirrorPoints
poly_type = deferred_type()
poly_type.define(Polygon.class_type.instance_type)
pack_spec = [('x', float64[:]),
('y', float64[:]),
('r', float64[:]),
('n', int64),
('precision', int64),
('on_point', boolean),
('polygons', poly_type),
('dx', float64[:, :]),
('dy', float64[:, :]),
('d', float64[:, :]),
('dr', boolean[:, :]),
('vx', float64[:]),
('vy', float64[:])]
@jitclass(pack_spec)
class CirclePack(object):
"""
Class storing information on circle properties
Attributes
----------
x : 1D numpy array, floats
The x-coordinates of the circle centres.
Necesary to create class object, inititated on class creation.
y : 1D numpy array, floats
The y-coordinates of the circle centres.
Necesary to create class object, inititated on class creation.
r : 1D numpy array, floats
Necesary to create class object, inititated on class creation.
precision: int
The accuracy to which is rounded.
This is important for determining wether a point either intersects with a line, or is just very close to it.
Default is 9
on_point: boolean
If True, the circles will bounce on their centers when encountering a polygon edge.
If False, they will bounce on their edges.
Default is True.
polygons: Polygons class
Special numba class type containing the polygon shapes which will contain the circles.
n : integer
The amount of circles.
Determined on class object initialization.
vx : 1D numpy array, floats
Velocity over the x-axis.
Determined during circlepack.run(). Initializes as random uniform between -0.01 and 0.01.
vy : 1D numpy array, floats
Velocity over the y-axis.
Determined during circlepack.run(). Initializes as random uniform between -0.01 and 0.01.
dx : 2D numpy array, floats
Distance between two circles over the x-axis. Determined during initialization.
dy : 2D numpy array, floats
Distance between two circles over the y-axis. Determined during initialization.
"""
def __init__(self, x, y, r, polygons, on_point = True, precision = 9):
"""
Initialize the CirclePack class object.
Thise class functions as a consistant storage of parameters.
Parameters
----------
r : 1D numpy array, floats
Necesary to create class object, inititated on class creation.
x : 1D numpy array, floats
The x-coordinates of the circle centres.
Necesary to create class object, inititated on class creation.
y : 1D numpy array, floats
The y-coordinates of the circle centres.
Necesary to create class object, inititated on class creation.
polygons: Polygons class
Special numba class type containing the polygon shapes which will contain the circles.
Necesary to create class object, inititated on class creation.
"""
self.x = x
self.y = y
self.r = r
self.polygons = polygons
self.on_point = on_point
self.precision = precision
self.n = len(self.x)
self.d = np.zeros((self.x.size, self.x.size))
self.dx = np.zeros((self.x.size, self.x.size))
self.dy = np.zeros((self.x.size, self.x.size))
self.vx = np.random.uniform(-1, 1, size = self.n)/100
self.vy = np.random.uniform(-1, 1, size = self.n)/100
def set_precision(self, new_precision):
self.precision = new_precision
@property
def circle_indeces(self):
"""
Returns
-------
1D numpy array, int
The indexes of all the circles.
"""
return np.array(list(range(self.n)))
@property
def fp(self):
"""
fp = future positions
The points the circles will be in after adding their velocity.
Returns
-------
2D numpy array, float
A CirclePack.n by 2 numpy array whith cartesian coordinates of the future circle center locations
"""
ball_fp = np.column_stack((self.x + self.vx, self.y + self.vy))
return NbRound2(ball_fp, self.precision)
def fp_ball(self, ball):
"""
fp = future positions
Returns the future position of a specific circle
Parameters
----------
ball : int
Index of the circle of which the future position is requested.
Returns
-------
1D numpy array
Cartesian coördinates of the future ball position.
"""
ball_fp = np.array((self.x[ball] + self.vx[ball], self.y[ball] + self.vy[ball]))
return NbRound1(ball_fp, self.precision)
@property
def p(self):
"""
p = positions
THe cartesian coordinates of the circle positions.
Returns
-------
2D numpy array, float
A CirclePack.n by 2 numpy array whith cartesian coordinates of the circle center locations
"""
ball_p = np.column_stack((self.x , self.y))
return NbRound2(ball_p, self.precision)
def p_ball(self, ball):
"""
p = positions
Returns the position of a specific circle
Parameters
----------
ball : int
Index of the circle of which the position is requested.
Returns
-------
1D numpy array
Cartesian coördinates of the ball position.
"""
ball_p = np.array((self.x[ball], self.y[ball]))
return NbRound1(ball_p, self.precision)
def update_positions(self):
"""
Sets the x- and y-coordinates based on current locations and their velocities.
Returns
-------
None.
"""
self.x = NbRound1(self.x + self.vx, self.precision)
self.y = NbRound1(self.y + self.vy, self.precision)
def CheckEdges(self):
"""
Check if the circles intersect with their polygon.
Returns
-------
edge_check : 3D numpy array, boolean.
For each circle is checked if it overlaps with a polygon and on which segment this occurs.
"""
check_int = np.zeros((self.n, self.polygons.n, self.polygons.polygons.shape[1]))
edge_check = check_int > 0
for b in range(self.n):
for s in range(self.polygons.n):
for seg in range(np.logical_not(np.isnan(self.polygons.polygons[s][:,0])).sum() - 1):
edge_check[b][s][seg] = CheckCircleIntersect(self.fp_ball(b), self.r[b], self.polygons.polygons[s][seg], self.polygons.polygons[s][seg+1])
return edge_check
def Distance(self):
"""
Calculates the distances between points
Returns
-------
d : 2D numpy array, floats
Cartesian distance between the circle centres
dx : 2D numpy array, floats
The distance between the circle centres, over the x-axis
dy : 2D numpy array, floats
The distance between the circle centres, over the y-axis.
"""
# returns a numpy array with the distance between all the balls
dx = np.zeros((self.n, self.n))
dx[:] = self.x
self.dx = dx - dx.T
dy = np.zeros((self.n, self.n))
dy[:] = self.y
self.dy = dy - dy.T
self.d = np.sqrt(self.dx**2 + self.dy**2)
self.Overlap()
return self.d, self.dx, self.dy
def Overlap(self):
"""
Sets (ans returns) CirclePack.dr.
Returns
-------
dr : 2D numpy array, boolean
A CirclePack.n by Circlepack.n matrix with boolean values, showing if a circle on a row overlaps with a circle on the columns (True) or not (False).
"""
dr = np.zeros((self.n, self.n))
dr[:] = self.r
dr = self.d - dr - dr.T
np.fill_diagonal(dr, np.max(self.r) + 999) # set distances with self to huge
self.dr = dr < 0
return self.dr
def GetForce(self, move):
"""
When circles overlap, they apply a force on each other to push themselves away.
Parameters
----------
move : 1D numpy array
Indeces of the circles that overlap and therefore need to be moved.
Returns
-------
fx : 1D numpy array, floats
Force to be applied on the x-axis.
fy : 1D numpy array, floats
Force to be applied on the y-axis.
"""
fx = np.zeros(self.n)
fy = np.zeros(self.n)
for b in move:
# The balls it is overlapping w/
ob = np.where(self.dr[b])[0]
# The distances
diff_x = self.dx[b][ob]
diff_y = self.dy[b][ob]
diff_xy = np.column_stack((diff_x, diff_y)) # The distance between the balls in vectors
# Normalize the distance to vector with length of 1
diffn = Normalize(diff_xy)
# Set force as a function of the absolute distance to other circles
diff_dist = self.d[b][ob]
force1 = -1 * diffn / (1 / (diff_dist**2)).reshape(diffn.shape[0], 1)
# Check for norm bounds
normbi = np.zeros((force1.shape[0], 2))
for i, vectorbi in enumerate(force1):
normbi[i] = np.linalg.norm(vectorbi)
normbibool = np.where(normbi > 0)[0]
force = force1.copy()
for ni in normbibool:
force[ni] = np.subtract(force1[ni], np.array([self.vx[b], self.vy[b]]))
net_force = force.sum(axis = 0)/(self.n - 1)
if np.linalg.norm(net_force) < self.r[b]/10:
net_force = Normalize(AddAxis(net_force)) * self.r[b] / 10
net_force = net_force[0]
# Set force
fx[b] = net_force[0]
fy[b] = net_force[1]
return fx, fy
def BouncePointCircle(self, edge_detect):
"""
Returns the average of all the line segments the circles overlaps with.
TODO: It doesn't need to return a value for all the circles.
Parameters
----------
edge_detect : 1D numpy array (C.n)
Boolean array with True for the circles that overlap, False for those wo do not.
Returns
-------
1D numpy array floats (len(np.unique(edge_detect[0])), 2)
The average of all the line segments the circles overlaps with..
"""
bounce_points = np.full((self.n, self.polygons.n, self.polygons.polygons.shape[1], 2), np.nan)
for i, ball in enumerate(edge_detect[0]):
p = edge_detect[1][i]
seg = edge_detect[2][i]
cx, cy, dx, dy, dr, big_d, discriminant = CircleLineIntersectDiscriminant(self.fp_ball(ball),
self.r[ball],
self.polygons.polygons[p][seg],
self.polygons.polygons[p][seg + 1])
intersections = FullLineIntersections(cx, cy, dx, dy, dr, big_d, discriminant)
bounce_points[ball, p, seg] = Mean0(intersections)
bounce_point = NanMean12(bounce_points)
return bounce_point[np.unique(edge_detect[0])]
def BouncePoint(self):
"""
Calculates the representative velocity of the bounce with the walls. Circles "bounce" on the circle centre.
Sets the velocity of the balls to stay within the polygon.
"""
# Express the path the circle will travel as a line (lp)
p = self.p # current positions
fp = self.fp # future ball position
lp = np.zeros((self.n, 2, 2))
for i in range(self.n):
lp[i][0] = p[i]
lp[i][1] = fp[i]
# Determine which circles will end up outside the polygon and therefore will have to bounce
bouncy = np.where(np.logical_not(self.polygons.ContainsPoints(fp)))[0]
if len(bouncy) > 0:
bouncy_lines = lp[bouncy]
# Determine where the traveling path lp intersects the line and with which segment.
intersect, intersegments = self.polygons.PolygonIntersection(bouncy_lines)
# Determine where the ball would bounce towards and set velocity to move towards that point
self.x[bouncy] = intersect[:, 0]
self.y[bouncy] = intersect[:, 1]
v = np.transpose(np.vstack((self.vx, self.vy)))
v[bouncy] = MirrorPoints(fp[bouncy], intersegments) - intersect
self.vx, self.vy = np.transpose(v)
def BounceEdge(self):
"""
Calculates the representative velocity of the bounce with the walls. Circles "bounce" on the circle edge.
Sets the velocity of the balls to stay within the polygon.
"""
# Determine if balls touch the edges
p = self.p # current positions
fp = self.fp # future ball position
bp = fp.copy()
edge_check = self.CheckEdges() # which balls intersect with which polygon, on which segment(s)
pre_edge_detect = np.argwhere(edge_check)
edge_detect = pre_edge_detect.T
bouncy = np.unique(edge_detect[0]) # The balls that intersect should move accordingly
if len(bouncy) > 0:
# Get the point from which the circles bounces (only an approximation on non-straight lines)
bp[bouncy] = self.BouncePointCircle(edge_detect)
# Get the vector between that point and the future ball position
bounce_vector = np.subtract(bp, fp)
# Get the vector of the line between the radius and the centre of the circle, along the vector with the bounce point
radius_vector = np.zeros_like(bounce_vector)
for i in bouncy:
radius_vector[i] = bounce_vector[i]*self.r[i]/np.linalg.norm(bounce_vector[i])
# get the difference between the two vector to get the vector along which to move the ball to avoid overlap with polygon
bounce = 2 * (bounce_vector - radius_vector)
fpbounce = fp + bounce
self.vx, self.vy = (fpbounce - p).T
def set_v_to_r(self):
"""
Set max speed (self.vx, self.vy) to radius of circle. This way, when on_point = False,
the circle is less likely to overshoot the polygon boundary.
"""
vr = np.column_stack((self.vx, self.vy))
vn = np.zeros(self.n)
for i, vri in enumerate(vr):
vn[i] = np.linalg.norm(vri)
set_v = np.where(vn > self.r)[0]
self.vx[set_v] *= self.r[set_v]/vn[set_v]
self.vy[set_v] *= self.r[set_v]/vn[set_v]
self.vx = NbRound1(self.vx, self.precision)
self.vy = NbRound1(self.vy, self.precision)
def run(self):
"""
Calculates forces between balls. Then determines if they should bounce against the surroundin polygon feature.
Check if they don't escape the polygon none the less (in case of double bounces) and adjust speed vector to point towards
CirclePack.polygons.centringpoint.
Returns
-------
None.
"""
# determine the distances between the circles
self.Distance()
# The circles that should move, and shouldn't
move = np.where(Any1(self.dr))[0] # On the index are the balls that are checked upon
no_move = NotInInt(self.circle_indeces, move) # The balls that shouldn't
# Stop the circles that don't have to move and appl force to those that do.
self.vx[no_move] = 0
self.vy[no_move] = 0
fx, fy = self.GetForce(move)
# Velocity because of forces between balls
self.vx = NbRound1(self.vx + fx, self.precision)
self.vy = NbRound1(self.vy + fy, self.precision)
# Make sure the circles do not move too fast. When passing the polygon some might behave differently on bouncing.
self.set_v_to_r()
# Determine velocity due to bouncing
if self.on_point:
self.BouncePoint()
else:
self.BounceEdge()
# The balls can for some reason still be outside at times. Bring them back home.
ball_fp = self.fp
ball_io = np.where(np.logical_not(self.polygons.ContainsPoints(ball_fp)))[0]
if len(ball_io) > 0:
diff_v = NbRound2(self.polygons.centringpoint - ball_fp[ball_io], self.precision)
self.vx[ball_io] = diff_v[:,0]
self.vy[ball_io] = diff_v[:,1]
self.set_v_to_r()
# Move
self.update_positions()
return
def check(self):
"""
Determine if the circles are not moving, are inside the desired area
and do not still overlap.
Returns
-------
check : bool
True if the circles are not moving, are inside the desired area
and do not still overlap. False if any of them are the opposite.
list
To check which one is the culprit.
"""
vel = self.check_vel() #True when moving
box = np.invert(self.check_bounds()) # Geeft True wanneer het buiten de box is
dis = self.check_overlap() # Geeft True wanneer er cirkels overlappen
check = np.array([vel, box, dis]).any() #wanneer dit True returned, is het dus nog niet goed!
return check, [vel, box, dis]
def check_vel(self):
"""
Check if the balls are still moving.
Returns
-------
bool
True if still moving, Flase if not.
"""
return np.array([self.vx.any(), self.vy.any()]).any()
def check_bounds(self):
return self.polygons.ContainsPoints(self.p).all() # Returns True when all are inside
def check_overlap(self):
return self.dr.any() # True when overlap
def run_till_check(self, max_itt):
itt = 0
self.run()
while itt < max_itt and self.check_vel():
self.run()
itt+=1
return itt
| [
"numpy.isnan",
"numpy.linalg.norm",
"numpy.unique",
"numpy.full",
"numpy.zeros_like",
"linalg_utils.MirrorPoints",
"numba_utils.NbRound1",
"numba.experimental.jitclass",
"numpy.transpose",
"numpy.max",
"numba_utils.NotInInt",
"numpy.column_stack",
"numba_utils.NbRound2",
"numba_utils.Mean0... | [((369, 384), 'numba.deferred_type', 'deferred_type', ([], {}), '()\n', (382, 384), False, 'from numba import float64, int64, boolean, deferred_type\n'), ((897, 916), 'numba.experimental.jitclass', 'jitclass', (['pack_spec'], {}), '(pack_spec)\n', (905, 916), False, 'from numba.experimental import jitclass\n'), ((3836, 3872), 'numpy.zeros', 'np.zeros', (['(self.x.size, self.x.size)'], {}), '((self.x.size, self.x.size))\n', (3844, 3872), True, 'import numpy as np\n'), ((3892, 3928), 'numpy.zeros', 'np.zeros', (['(self.x.size, self.x.size)'], {}), '((self.x.size, self.x.size))\n', (3900, 3928), True, 'import numpy as np\n'), ((3948, 3984), 'numpy.zeros', 'np.zeros', (['(self.x.size, self.x.size)'], {}), '((self.x.size, self.x.size))\n', (3956, 3984), True, 'import numpy as np\n'), ((4804, 4857), 'numpy.column_stack', 'np.column_stack', (['(self.x + self.vx, self.y + self.vy)'], {}), '((self.x + self.vx, self.y + self.vy))\n', (4819, 4857), True, 'import numpy as np\n'), ((4874, 4907), 'numba_utils.NbRound2', 'NbRound2', (['ball_fp', 'self.precision'], {}), '(ball_fp, self.precision)\n', (4882, 4907), False, 'from numba_utils import NanMean12, NbRound1, NbRound2, Mean0, Any1, NotInInt\n'), ((5340, 5410), 'numpy.array', 'np.array', (['(self.x[ball] + self.vx[ball], self.y[ball] + self.vy[ball])'], {}), '((self.x[ball] + self.vx[ball], self.y[ball] + self.vy[ball]))\n', (5348, 5410), True, 'import numpy as np\n'), ((5427, 5460), 'numba_utils.NbRound1', 'NbRound1', (['ball_fp', 'self.precision'], {}), '(ball_fp, self.precision)\n', (5435, 5460), False, 'from numba_utils import NanMean12, NbRound1, NbRound2, Mean0, Any1, NotInInt\n'), ((5798, 5831), 'numpy.column_stack', 'np.column_stack', (['(self.x, self.y)'], {}), '((self.x, self.y))\n', (5813, 5831), True, 'import numpy as np\n'), ((5854, 5886), 'numba_utils.NbRound2', 'NbRound2', (['ball_p', 'self.precision'], {}), '(ball_p, self.precision)\n', (5862, 5886), False, 'from numba_utils import NanMean12, NbRound1, NbRound2, Mean0, Any1, NotInInt\n'), ((6288, 6326), 'numpy.array', 'np.array', (['(self.x[ball], self.y[ball])'], {}), '((self.x[ball], self.y[ball]))\n', (6296, 6326), True, 'import numpy as np\n'), ((6343, 6375), 'numba_utils.NbRound1', 'NbRound1', (['ball_p', 'self.precision'], {}), '(ball_p, self.precision)\n', (6351, 6375), False, 'from numba_utils import NanMean12, NbRound1, NbRound2, Mean0, Any1, NotInInt\n'), ((6600, 6642), 'numba_utils.NbRound1', 'NbRound1', (['(self.x + self.vx)', 'self.precision'], {}), '(self.x + self.vx, self.precision)\n', (6608, 6642), False, 'from numba_utils import NanMean12, NbRound1, NbRound2, Mean0, Any1, NotInInt\n'), ((6661, 6703), 'numba_utils.NbRound1', 'NbRound1', (['(self.y + self.vy)', 'self.precision'], {}), '(self.y + self.vy, self.precision)\n', (6669, 6703), False, 'from numba_utils import NanMean12, NbRound1, NbRound2, Mean0, Any1, NotInInt\n'), ((7037, 7105), 'numpy.zeros', 'np.zeros', (['(self.n, self.polygons.n, self.polygons.polygons.shape[1])'], {}), '((self.n, self.polygons.n, self.polygons.polygons.shape[1]))\n', (7045, 7105), True, 'import numpy as np\n'), ((8059, 8085), 'numpy.zeros', 'np.zeros', (['(self.n, self.n)'], {}), '((self.n, self.n))\n', (8067, 8085), True, 'import numpy as np\n'), ((8163, 8189), 'numpy.zeros', 'np.zeros', (['(self.n, self.n)'], {}), '((self.n, self.n))\n', (8171, 8189), True, 'import numpy as np\n'), ((8261, 8297), 'numpy.sqrt', 'np.sqrt', (['(self.dx ** 2 + self.dy ** 2)'], {}), '(self.dx ** 2 + self.dy ** 2)\n', (8268, 8297), True, 'import numpy as np\n'), ((8709, 8735), 'numpy.zeros', 'np.zeros', (['(self.n, self.n)'], {}), '((self.n, self.n))\n', (8717, 8735), True, 'import numpy as np\n'), ((9453, 9469), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (9461, 9469), True, 'import numpy as np\n'), ((9484, 9500), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (9492, 9500), True, 'import numpy as np\n'), ((11603, 11681), 'numpy.full', 'np.full', (['(self.n, self.polygons.n, self.polygons.polygons.shape[1], 2)', 'np.nan'], {}), '((self.n, self.polygons.n, self.polygons.polygons.shape[1], 2), np.nan)\n', (11610, 11681), True, 'import numpy as np\n'), ((12446, 12470), 'numba_utils.NanMean12', 'NanMean12', (['bounce_points'], {}), '(bounce_points)\n', (12455, 12470), False, 'from numba_utils import NanMean12, NbRound1, NbRound2, Mean0, Any1, NotInInt\n'), ((12968, 12992), 'numpy.zeros', 'np.zeros', (['(self.n, 2, 2)'], {}), '((self.n, 2, 2))\n', (12976, 12992), True, 'import numpy as np\n'), ((14490, 14513), 'numpy.argwhere', 'np.argwhere', (['edge_check'], {}), '(edge_check)\n', (14501, 14513), True, 'import numpy as np\n'), ((14573, 14598), 'numpy.unique', 'np.unique', (['edge_detect[0]'], {}), '(edge_detect[0])\n', (14582, 14598), True, 'import numpy as np\n'), ((15876, 15911), 'numpy.column_stack', 'np.column_stack', (['(self.vx, self.vy)'], {}), '((self.vx, self.vy))\n', (15891, 15911), True, 'import numpy as np\n'), ((15926, 15942), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (15934, 15942), True, 'import numpy as np\n'), ((16215, 16248), 'numba_utils.NbRound1', 'NbRound1', (['self.vx', 'self.precision'], {}), '(self.vx, self.precision)\n', (16223, 16248), False, 'from numba_utils import NanMean12, NbRound1, NbRound2, Mean0, Any1, NotInInt\n'), ((16268, 16301), 'numba_utils.NbRound1', 'NbRound1', (['self.vy', 'self.precision'], {}), '(self.vy, self.precision)\n', (16276, 16301), False, 'from numba_utils import NanMean12, NbRound1, NbRound2, Mean0, Any1, NotInInt\n'), ((16975, 17010), 'numba_utils.NotInInt', 'NotInInt', (['self.circle_indeces', 'move'], {}), '(self.circle_indeces, move)\n', (16983, 17010), False, 'from numba_utils import NanMean12, NbRound1, NbRound2, Mean0, Any1, NotInInt\n'), ((17312, 17350), 'numba_utils.NbRound1', 'NbRound1', (['(self.vx + fx)', 'self.precision'], {}), '(self.vx + fx, self.precision)\n', (17320, 17350), False, 'from numba_utils import NanMean12, NbRound1, NbRound2, Mean0, Any1, NotInInt\n'), ((17370, 17408), 'numba_utils.NbRound1', 'NbRound1', (['(self.vy + fy)', 'self.precision'], {}), '(self.vy + fy, self.precision)\n', (17378, 17408), False, 'from numba_utils import NanMean12, NbRound1, NbRound2, Mean0, Any1, NotInInt\n'), ((4004, 4041), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'self.n'}), '(-1, 1, size=self.n)\n', (4021, 4041), True, 'import numpy as np\n'), ((4067, 4104), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'self.n'}), '(-1, 1, size=self.n)\n', (4084, 4104), True, 'import numpy as np\n'), ((9753, 9786), 'numpy.column_stack', 'np.column_stack', (['(diff_x, diff_y)'], {}), '((diff_x, diff_y))\n', (9768, 9786), True, 'import numpy as np\n'), ((9931, 9949), 'linalg_utils.Normalize', 'Normalize', (['diff_xy'], {}), '(diff_xy)\n', (9940, 9949), False, 'from linalg_utils import CheckCircleIntersect, Normalize, CircleLineIntersectDiscriminant, FullLineIntersections, MirrorPoints\n'), ((10254, 10284), 'numpy.zeros', 'np.zeros', (['(force1.shape[0], 2)'], {}), '((force1.shape[0], 2))\n', (10262, 10284), True, 'import numpy as np\n'), ((12287, 12349), 'linalg_utils.FullLineIntersections', 'FullLineIntersections', (['cx', 'cy', 'dx', 'dy', 'dr', 'big_d', 'discriminant'], {}), '(cx, cy, dx, dy, dr, big_d, discriminant)\n', (12308, 12349), False, 'from linalg_utils import CheckCircleIntersect, Normalize, CircleLineIntersectDiscriminant, FullLineIntersections, MirrorPoints\n'), ((12393, 12413), 'numba_utils.Mean0', 'Mean0', (['intersections'], {}), '(intersections)\n', (12398, 12413), False, 'from numba_utils import NanMean12, NbRound1, NbRound2, Mean0, Any1, NotInInt\n'), ((12508, 12533), 'numpy.unique', 'np.unique', (['edge_detect[0]'], {}), '(edge_detect[0])\n', (12517, 12533), True, 'import numpy as np\n'), ((13923, 13938), 'numpy.transpose', 'np.transpose', (['v'], {}), '(v)\n', (13935, 13938), True, 'import numpy as np\n'), ((14983, 15002), 'numpy.subtract', 'np.subtract', (['bp', 'fp'], {}), '(bp, fp)\n', (14994, 15002), True, 'import numpy as np\n'), ((15177, 15205), 'numpy.zeros_like', 'np.zeros_like', (['bounce_vector'], {}), '(bounce_vector)\n', (15190, 15205), True, 'import numpy as np\n'), ((16014, 16033), 'numpy.linalg.norm', 'np.linalg.norm', (['vri'], {}), '(vri)\n', (16028, 16033), True, 'import numpy as np\n'), ((16069, 16090), 'numpy.where', 'np.where', (['(vn > self.r)'], {}), '(vn > self.r)\n', (16077, 16090), True, 'import numpy as np\n'), ((18006, 18078), 'numba_utils.NbRound2', 'NbRound2', (['(self.polygons.centringpoint - ball_fp[ball_io])', 'self.precision'], {}), '(self.polygons.centringpoint - ball_fp[ball_io], self.precision)\n', (18014, 18078), False, 'from numba_utils import NanMean12, NbRound1, NbRound2, Mean0, Any1, NotInInt\n'), ((8823, 8837), 'numpy.max', 'np.max', (['self.r'], {}), '(self.r)\n', (8829, 8837), True, 'import numpy as np\n'), ((9589, 9609), 'numpy.where', 'np.where', (['self.dr[b]'], {}), '(self.dr[b])\n', (9597, 9609), True, 'import numpy as np\n'), ((10365, 10389), 'numpy.linalg.norm', 'np.linalg.norm', (['vectorbi'], {}), '(vectorbi)\n', (10379, 10389), True, 'import numpy as np\n'), ((10416, 10436), 'numpy.where', 'np.where', (['(normbi > 0)'], {}), '(normbi > 0)\n', (10424, 10436), True, 'import numpy as np\n'), ((10687, 10712), 'numpy.linalg.norm', 'np.linalg.norm', (['net_force'], {}), '(net_force)\n', (10701, 10712), True, 'import numpy as np\n'), ((13783, 13812), 'numpy.vstack', 'np.vstack', (['(self.vx, self.vy)'], {}), '((self.vx, self.vy))\n', (13792, 13812), True, 'import numpy as np\n'), ((13839, 13878), 'linalg_utils.MirrorPoints', 'MirrorPoints', (['fp[bouncy]', 'intersegments'], {}), '(fp[bouncy], intersegments)\n', (13851, 13878), False, 'from linalg_utils import CheckCircleIntersect, Normalize, CircleLineIntersectDiscriminant, FullLineIntersections, MirrorPoints\n'), ((16887, 16900), 'numba_utils.Any1', 'Any1', (['self.dr'], {}), '(self.dr)\n', (16891, 16900), False, 'from numba_utils import NanMean12, NbRound1, NbRound2, Mean0, Any1, NotInInt\n'), ((18974, 18999), 'numpy.array', 'np.array', (['[vel, box, dis]'], {}), '([vel, box, dis])\n', (18982, 18999), True, 'import numpy as np\n'), ((10563, 10597), 'numpy.array', 'np.array', (['[self.vx[b], self.vy[b]]'], {}), '([self.vx[b], self.vy[b]])\n', (10571, 10597), True, 'import numpy as np\n'), ((15302, 15334), 'numpy.linalg.norm', 'np.linalg.norm', (['bounce_vector[i]'], {}), '(bounce_vector[i])\n', (15316, 15334), True, 'import numpy as np\n'), ((7270, 7311), 'numpy.isnan', 'np.isnan', (['self.polygons.polygons[s][:, 0]'], {}), '(self.polygons.polygons[s][:, 0])\n', (7278, 7311), True, 'import numpy as np\n')] |
#-*- coding:utf-8 -*-
"""
qmap.py
author: <NAME> (2013/05/17)
量子系の計算を行います.
具体的には,
1. 与えられた初期条件の時間発展を行います
2. 系の時間発展演算子の固有値及び固有ベクトルを求めますよ.
"""
import numpy
from SimpleQmap.state import *
twopi = 2.0*numpy.pi
class SplitHamiltonian(ScaleInfo):
def __init__(self, dim, domain):
super().__init__(dim, domain)
self.scaleinfo = ScaleInfo(dim, domain)
def matVqrep(self, V):
q,p = self.x
mat = numpy.diag(V(q))
return Matrix(mat, self.scaleinfo)
def matTqrep(self, T):
q,p = self.x
dim = self.dim
eye = numpy.eye(dim, dtype=numpy.complex)
mat = numpy.zeros((dim,dim), dtype=numpy.complex)
for i, x in enumerate(eye):
pvec = numpy.fft.fft(x)
if p[0]*p[-1] < 0:
pvec = numpy.fft.fftshift( T(p) ) * pvec
else:
pvec = T(p) * pvec
mat[:,i] = numpy.fft.ifft(pvec)
return Matrix(mat, self.scaleinfo)
class SplitUnitary(ScaleInfo):
def __init__(self, dim, domain, tau = 1):
super().__init__(dim, domain)
self.tau = tau
self.scaleinfo = ScaleInfo(dim, domain)
def TVmatrix(self, T, V):
mat = numpy.zeros([self.dim, self.dim],dtype=numpy.complex128)
for i in range(self.dim):
vec = State(self.scaleinfo)
vec[i] = 1
mat[:,i] = self.TVevolve(T, V, vec)
return Matrix(mat, self.scaleinfo)
def VTmatrix(self, T, V):
mat = numpy.zeros([self.dim, self.dim],dtype=numpy.complex128)
for i in range(self.dim):
vec = State(self.scaleinfo)
vec[i] = 1
mat[:,i] = self.VTevolve(T, V, vec)
return Matrix(mat, self.scaleinfo)
def TVevolve(self, T, V, vec):
q,p = self.x
hbar = self.hbar
qvec = numpy.exp(-1.j*V(q)/hbar * self.tau) * vec
pvec = numpy.exp(-1.j*T(p)/hbar * self.tau) * qvec.q2p()
return pvec.p2q()
def VTevolve(self, T, V, vec):
q,p = self.x
hbar = self.hbar
pvec = numpy.exp(-1.j*T(p)/hbar * self.tau) * vec.q2p()
qvec = numpy.exp(-1.j*V(q)/hbar * self.tau) * pvec.p2q()
return qvec
class Qmap(object):
def __init__(self, map, dim, domain):
"""
量子論の計算手続きをまとめたclassです.
Parameter
----------
map : map instance
dim : integer
Hilbert Space dimension.
domain:
region of the calculation domain.
"""
self.map = map
self.scaleinfo = ScaleInfo(dim, domain)
self.dim =self.scaleinfo.dim
self.setOperate()
self.stateIn = State(self.scaleinfo)
self.stateOut = State(self.scaleinfo)
def setOperate(self):
"""
make operators
.. seealso::
Module :qmap:Qmap:`op0` and Module :qmap:Qmap:`op1`
"""
self.operator = [State(self.scaleinfo) for i in range(2)]
self.op0(self.scaleinfo.x[0])
if (self.scaleinfo.domain[1][0] == 0.0):
self.op1(self.scaleinfo.x[1])
elif (numpy.abs(self.scaleinfo.domain[1][0]) == numpy.abs(self.scaleinfo.domain[1][1])):
self.op1(numpy.fft.fftshift(self.scaleinfo.x[1]))
else:
raise ValueError("unexpected domain.")
def op0(self,x):
"""
make operator :math:`\exp[-\\frac{i}{\hbar}V(\hat{q})]`
"""
self.operator[0] = numpy.exp(-1.j*twopi*self.map.ifunc0(x)/self.scaleinfo.h)
def op1(self,x):
"""
make operator :math:`\exp[-\\frac{i}{\hbar}T(\hat{p})]`
"""
self.operator[1] = numpy.exp(-1.j*twopi*self.map.ifunc1(x)/self.scaleinfo.h)
def operate(self):
"""
time evolution of a given state :math:`|\psi_0\\rangle`
.. math::
\langle q | \psi_1 \\rangle = \langle q |\hat{U} | \psi_0 \\rangle
.. note::
特に理由がなければ時間発展にはevolve() を使ってください.
"""
pvec = numpy.fft.fft(self.operator[0]*self.stateIn)
qvec = numpy.fft.ifft(self.operator[1]*pvec)
self.stateOut = State(self.scaleinfo, qvec)
def setInit(self, state):
"""
set initial state
Parameters
----------
state: State class instance
"""
if not isinstance(state, State):
raise TypeError("expected State:",type(state))
self.stateIn = state.copy()
def getState(self):
""" return null state"""
return State(self.scaleinfo)
def getIn(self):
""" return previous state of time evolution"""
return self.stateIn
def getOut(self):
""" return time evolved state"""
return self.stateOut
def pull(self):
"""
substitution time evolved state into previous state
.. math::
|\psi_0 \\rangle = |\psi_1 \\rangle
"""
self.stateIn = self.stateOut
def evolve(self):
"""
iteative operation of :math:`\hat{U}` for a given initial state
"""
self.operate()
self.pull()
def setMatrix(self):
"""
make time evolution operator matrix in position representation
.. math::
\langle q_1 | \hat{U} | q_0\\rangle
where
.. math::
\hat{U} = \exp[-\\frac{i}{\hbar}T(\hat{p})]\exp[-\\frac{i}{\hbar}V(\hat{q})]
"""
self.matrix = numpy.zeros([self.dim, self.dim],dtype=numpy.complex128)
for i in range(self.dim):
vec = State(self.scaleinfo)
vec.insert(i,1.0+0j)
self.setInit(vec)
self.operate()
self.matrix[i,:] = self.stateOut
self.matrix = numpy.transpose(self.matrix)
def eigen(self):
"""
return eigenvalues and eigenvectors of time evolution operator matrix
"""
try:
evals, evecs = numpy.linalg.eig(self.matrix)
vecs = [State(self.scaleinfo, evec) for evec in evecs.transpose()]
return evals, vecs
except AttributeError:
self.setMatrix()
evals, evecs = numpy.linalg.eig(self.matrix)
vecs = [State(self.scaleinfo, evec) for evec in evecs.transpose()]
return evals, vecs
def getMatrix(self):
"""
return time evolution operator matrix
"""
try:
return self.matrix
except AttributeError:
self.setMatrix()
return self.matrix
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
| [
"numpy.fft.ifft",
"numpy.abs",
"numpy.fft.fft",
"numpy.zeros",
"numpy.transpose",
"numpy.linalg.eig",
"numpy.fft.fftshift",
"numpy.eye",
"doctest.testmod"
] | [((6568, 6585), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (6583, 6585), False, 'import doctest\n'), ((587, 622), 'numpy.eye', 'numpy.eye', (['dim'], {'dtype': 'numpy.complex'}), '(dim, dtype=numpy.complex)\n', (596, 622), False, 'import numpy\n'), ((637, 681), 'numpy.zeros', 'numpy.zeros', (['(dim, dim)'], {'dtype': 'numpy.complex'}), '((dim, dim), dtype=numpy.complex)\n', (648, 681), False, 'import numpy\n'), ((1213, 1270), 'numpy.zeros', 'numpy.zeros', (['[self.dim, self.dim]'], {'dtype': 'numpy.complex128'}), '([self.dim, self.dim], dtype=numpy.complex128)\n', (1224, 1270), False, 'import numpy\n'), ((1504, 1561), 'numpy.zeros', 'numpy.zeros', (['[self.dim, self.dim]'], {'dtype': 'numpy.complex128'}), '([self.dim, self.dim], dtype=numpy.complex128)\n', (1515, 1561), False, 'import numpy\n'), ((4010, 4056), 'numpy.fft.fft', 'numpy.fft.fft', (['(self.operator[0] * self.stateIn)'], {}), '(self.operator[0] * self.stateIn)\n', (4023, 4056), False, 'import numpy\n'), ((4070, 4109), 'numpy.fft.ifft', 'numpy.fft.ifft', (['(self.operator[1] * pvec)'], {}), '(self.operator[1] * pvec)\n', (4084, 4109), False, 'import numpy\n'), ((5451, 5508), 'numpy.zeros', 'numpy.zeros', (['[self.dim, self.dim]'], {'dtype': 'numpy.complex128'}), '([self.dim, self.dim], dtype=numpy.complex128)\n', (5462, 5508), False, 'import numpy\n'), ((5740, 5768), 'numpy.transpose', 'numpy.transpose', (['self.matrix'], {}), '(self.matrix)\n', (5755, 5768), False, 'import numpy\n'), ((736, 752), 'numpy.fft.fft', 'numpy.fft.fft', (['x'], {}), '(x)\n', (749, 752), False, 'import numpy\n'), ((917, 937), 'numpy.fft.ifft', 'numpy.fft.ifft', (['pvec'], {}), '(pvec)\n', (931, 937), False, 'import numpy\n'), ((5933, 5962), 'numpy.linalg.eig', 'numpy.linalg.eig', (['self.matrix'], {}), '(self.matrix)\n', (5949, 5962), False, 'import numpy\n'), ((3116, 3154), 'numpy.abs', 'numpy.abs', (['self.scaleinfo.domain[1][0]'], {}), '(self.scaleinfo.domain[1][0])\n', (3125, 3154), False, 'import numpy\n'), ((3158, 3196), 'numpy.abs', 'numpy.abs', (['self.scaleinfo.domain[1][1]'], {}), '(self.scaleinfo.domain[1][1])\n', (3167, 3196), False, 'import numpy\n'), ((6160, 6189), 'numpy.linalg.eig', 'numpy.linalg.eig', (['self.matrix'], {}), '(self.matrix)\n', (6176, 6189), False, 'import numpy\n'), ((3220, 3259), 'numpy.fft.fftshift', 'numpy.fft.fftshift', (['self.scaleinfo.x[1]'], {}), '(self.scaleinfo.x[1])\n', (3238, 3259), False, 'import numpy\n')] |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# <NAME> <<EMAIL>>
# Wed Nov 16 13:27:15 2011 +0100
#
# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland
"""A combined test for all built-in types of Array/interaction in
python.
"""
import os
import sys
import numpy
import nose.tools
from . import load, write, peek, peek_all, File, test_utils
def test_peek():
f = test_utils.datafile('test1.hdf5', __name__)
assert peek(f) == (numpy.uint16, (3,), (1,))
assert peek_all(f) == (numpy.uint16, (3,3), (3,1))
def test_iteration():
fname = test_utils.datafile('matlab_2d.hdf5', __name__)
f = File(fname, 'r')
nose.tools.eq_(len(f), 512)
objs = load(fname)
for l, i in zip(objs, f):
assert numpy.allclose(l, i)
def test_indexing():
fname = test_utils.datafile('matlab_2d.hdf5', __name__)
f = File(fname, 'r')
nose.tools.eq_(len(f), 512)
objs = load(fname)
nose.tools.eq_(len(f), len(objs))
# simple indexing
assert numpy.allclose(f[0], objs[0])
assert numpy.allclose(f[1], objs[1])
assert numpy.allclose(f[-1], objs[-1])
assert numpy.allclose(f[-2], objs[-2])
def test_slicing_empty():
fname = test_utils.datafile('matlab_2d.hdf5', __name__)
f = File(fname, 'r')
objs = f[1:1]
assert objs.shape == tuple()
def test_slicing_0():
fname = test_utils.datafile('matlab_2d.hdf5', __name__)
f = File(fname, 'r')
objs = f[:]
for i, k in enumerate(load(fname)):
assert numpy.allclose(k, objs[i])
def test_slicing_1():
fname = test_utils.datafile('matlab_2d.hdf5', __name__)
f = File(fname, 'r')
# get slice
s1 = f[3:10:2]
nose.tools.eq_(len(s1), 4)
assert numpy.allclose(s1[0], f[3])
assert numpy.allclose(s1[1], f[5])
assert numpy.allclose(s1[2], f[7])
assert numpy.allclose(s1[3], f[9])
def test_slicing_2():
fname = test_utils.datafile('matlab_2d.hdf5', __name__)
f = File(fname, 'r')
# get negative slicing
s = f[-10:-2:3]
nose.tools.eq_(len(s), 3)
assert numpy.allclose(s[0], f[len(f)-10])
assert numpy.allclose(s[1], f[len(f)-7])
assert numpy.allclose(s[2], f[len(f)-4])
def test_slicing_3():
fname = test_utils.datafile('matlab_2d.hdf5', __name__)
f = File(fname, 'r')
# get negative stepping slice
s = f[20:10:-3]
nose.tools.eq_(len(s), 4)
assert numpy.allclose(s[0], f[20])
assert numpy.allclose(s[1], f[17])
assert numpy.allclose(s[2], f[14])
assert numpy.allclose(s[3], f[11])
def test_slicing_4():
fname = test_utils.datafile('matlab_2d.hdf5', __name__)
f = File(fname, 'r')
# get all negative slice
s = f[-10:-20:-3]
nose.tools.eq_(len(s), 4)
assert numpy.allclose(s[0], f[len(f)-10])
assert numpy.allclose(s[1], f[len(f)-13])
assert numpy.allclose(s[2], f[len(f)-16])
assert numpy.allclose(s[3], f[len(f)-19])
@nose.tools.raises(TypeError)
def test_indexing_type_check():
f = File(test_utils.datafile('matlab_2d.hdf5', __name__), 'r')
nose.tools.eq_(len(f), 512)
f[4.5]
@nose.tools.raises(IndexError)
def test_indexing_boundaries():
f = File(test_utils.datafile('matlab_2d.hdf5', __name__), 'r')
nose.tools.eq_(len(f), 512)
f[512]
@nose.tools.raises(IndexError)
def test_indexing_negative_boundaries():
f = File(test_utils.datafile('matlab_2d.hdf5', __name__), 'r')
nose.tools.eq_(len(f), 512)
f[-513]
def transcode(filename):
"""Runs a complete transcoding test, to and from the binary format."""
tmpname = test_utils.temporary_filename(suffix=os.path.splitext(filename)[1])
try:
# transcode from test format into the test format -- test array access modes
orig_data = load(filename)
write(orig_data, tmpname)
rewritten_data = load(tmpname)
assert numpy.array_equal(orig_data, rewritten_data)
# transcode to test format -- test arrayset access modes
trans_file = File(tmpname, 'w')
index = [slice(orig_data.shape[k]) for k in range(len(orig_data.shape))]
for k in range(orig_data.shape[0]):
index[0] = k
trans_file.append(orig_data[index]) #slice from first dimension
del trans_file
rewritten_file = File(tmpname, 'r')
for k in range(orig_data.shape[0]):
rewritten_data = rewritten_file.read(k)
index[0] = k
assert numpy.array_equal(orig_data[index], rewritten_data)
finally:
# And we erase both files after this
if os.path.exists(tmpname): os.unlink(tmpname)
def array_readwrite(extension, arr, close=False):
"""Runs a read/write verify step using the given numpy data"""
tmpname = test_utils.temporary_filename(suffix=extension)
try:
write(arr, tmpname)
reloaded = load(tmpname)
if close: assert numpy.allclose(arr, reloaded)
else: assert numpy.array_equal(arr, reloaded)
finally:
if os.path.exists(tmpname): os.unlink(tmpname)
def arrayset_readwrite(extension, arrays, close=False):
"""Runs a read/write verify step using the given numpy data"""
tmpname = test_utils.temporary_filename(suffix=extension)
try:
f = File(tmpname, 'w')
for k in arrays:
f.append(k)
del f
f = File(tmpname, 'r')
for k, array in enumerate(arrays):
reloaded = f.read(k) #read the contents
if close:
assert numpy.allclose(array, reloaded)
else: assert numpy.array_equal(array, reloaded)
finally:
if os.path.exists(tmpname): os.unlink(tmpname)
def test_hdf5():
# array writing tests
a1 = numpy.random.normal(size=(2,3)).astype('float32')
a2 = numpy.random.normal(size=(2,3,4)).astype('float64')
a3 = numpy.random.normal(size=(2,3,4,5)).astype('complex128')
a4 = (10 * numpy.random.normal(size=(3,3))).astype('uint64')
array_readwrite('.hdf5', a1) # extensions: .hdf5 or .h5
array_readwrite(".h5", a2)
array_readwrite('.h5', a3)
array_readwrite(".h5", a4)
array_readwrite('.h5', a3[:,::2,::2,::2]) #test non-contiguous
# arrayset writing tests
a1 = []
a2 = []
a3 = []
a4 = []
for k in range(10):
a1.append(numpy.random.normal(size=(2,3)).astype('float32'))
a2.append(numpy.random.normal(size=(2,3,4)).astype('float64'))
a3.append(numpy.random.normal(size=(2,3,4,5)).astype('complex128'))
a4.append((10*numpy.random.normal(size=(3,3))).astype('uint64'))
arrayset_readwrite('.h5', a1)
arrayset_readwrite(".h5", a2)
arrayset_readwrite('.h5', a3)
arrayset_readwrite(".h5", a4)
# complete transcoding tests
transcode(test_utils.datafile('test1.hdf5', __name__))
transcode(test_utils.datafile('matlab_1d.hdf5', __name__))
transcode(test_utils.datafile('matlab_2d.hdf5', __name__))
| [
"os.unlink",
"numpy.allclose",
"os.path.exists",
"os.path.splitext",
"numpy.random.normal",
"numpy.array_equal"
] | [((984, 1013), 'numpy.allclose', 'numpy.allclose', (['f[0]', 'objs[0]'], {}), '(f[0], objs[0])\n', (998, 1013), False, 'import numpy\n'), ((1023, 1052), 'numpy.allclose', 'numpy.allclose', (['f[1]', 'objs[1]'], {}), '(f[1], objs[1])\n', (1037, 1052), False, 'import numpy\n'), ((1062, 1093), 'numpy.allclose', 'numpy.allclose', (['f[-1]', 'objs[-1]'], {}), '(f[-1], objs[-1])\n', (1076, 1093), False, 'import numpy\n'), ((1103, 1134), 'numpy.allclose', 'numpy.allclose', (['f[-2]', 'objs[-2]'], {}), '(f[-2], objs[-2])\n', (1117, 1134), False, 'import numpy\n'), ((1663, 1690), 'numpy.allclose', 'numpy.allclose', (['s1[0]', 'f[3]'], {}), '(s1[0], f[3])\n', (1677, 1690), False, 'import numpy\n'), ((1700, 1727), 'numpy.allclose', 'numpy.allclose', (['s1[1]', 'f[5]'], {}), '(s1[1], f[5])\n', (1714, 1727), False, 'import numpy\n'), ((1737, 1764), 'numpy.allclose', 'numpy.allclose', (['s1[2]', 'f[7]'], {}), '(s1[2], f[7])\n', (1751, 1764), False, 'import numpy\n'), ((1774, 1801), 'numpy.allclose', 'numpy.allclose', (['s1[3]', 'f[9]'], {}), '(s1[3], f[9])\n', (1788, 1801), False, 'import numpy\n'), ((2302, 2329), 'numpy.allclose', 'numpy.allclose', (['s[0]', 'f[20]'], {}), '(s[0], f[20])\n', (2316, 2329), False, 'import numpy\n'), ((2339, 2366), 'numpy.allclose', 'numpy.allclose', (['s[1]', 'f[17]'], {}), '(s[1], f[17])\n', (2353, 2366), False, 'import numpy\n'), ((2376, 2403), 'numpy.allclose', 'numpy.allclose', (['s[2]', 'f[14]'], {}), '(s[2], f[14])\n', (2390, 2403), False, 'import numpy\n'), ((2413, 2440), 'numpy.allclose', 'numpy.allclose', (['s[3]', 'f[11]'], {}), '(s[3], f[11])\n', (2427, 2440), False, 'import numpy\n'), ((741, 761), 'numpy.allclose', 'numpy.allclose', (['l', 'i'], {}), '(l, i)\n', (755, 761), False, 'import numpy\n'), ((1461, 1487), 'numpy.allclose', 'numpy.allclose', (['k', 'objs[i]'], {}), '(k, objs[i])\n', (1475, 1487), False, 'import numpy\n'), ((3690, 3734), 'numpy.array_equal', 'numpy.array_equal', (['orig_data', 'rewritten_data'], {}), '(orig_data, rewritten_data)\n', (3707, 3734), False, 'import numpy\n'), ((4330, 4353), 'os.path.exists', 'os.path.exists', (['tmpname'], {}), '(tmpname)\n', (4344, 4353), False, 'import os\n'), ((4729, 4752), 'os.path.exists', 'os.path.exists', (['tmpname'], {}), '(tmpname)\n', (4743, 4752), False, 'import os\n'), ((5285, 5308), 'os.path.exists', 'os.path.exists', (['tmpname'], {}), '(tmpname)\n', (5299, 5308), False, 'import os\n'), ((4218, 4269), 'numpy.array_equal', 'numpy.array_equal', (['orig_data[index]', 'rewritten_data'], {}), '(orig_data[index], rewritten_data)\n', (4235, 4269), False, 'import numpy\n'), ((4355, 4373), 'os.unlink', 'os.unlink', (['tmpname'], {}), '(tmpname)\n', (4364, 4373), False, 'import os\n'), ((4631, 4660), 'numpy.allclose', 'numpy.allclose', (['arr', 'reloaded'], {}), '(arr, reloaded)\n', (4645, 4660), False, 'import numpy\n'), ((4678, 4710), 'numpy.array_equal', 'numpy.array_equal', (['arr', 'reloaded'], {}), '(arr, reloaded)\n', (4695, 4710), False, 'import numpy\n'), ((4754, 4772), 'os.unlink', 'os.unlink', (['tmpname'], {}), '(tmpname)\n', (4763, 4772), False, 'import os\n'), ((5310, 5328), 'os.unlink', 'os.unlink', (['tmpname'], {}), '(tmpname)\n', (5319, 5328), False, 'import os\n'), ((5379, 5411), 'numpy.random.normal', 'numpy.random.normal', ([], {'size': '(2, 3)'}), '(size=(2, 3))\n', (5398, 5411), False, 'import numpy\n'), ((5436, 5471), 'numpy.random.normal', 'numpy.random.normal', ([], {'size': '(2, 3, 4)'}), '(size=(2, 3, 4))\n', (5455, 5471), False, 'import numpy\n'), ((5495, 5533), 'numpy.random.normal', 'numpy.random.normal', ([], {'size': '(2, 3, 4, 5)'}), '(size=(2, 3, 4, 5))\n', (5514, 5533), False, 'import numpy\n'), ((3462, 3488), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (3478, 3488), False, 'import os\n'), ((5181, 5212), 'numpy.allclose', 'numpy.allclose', (['array', 'reloaded'], {}), '(array, reloaded)\n', (5195, 5212), False, 'import numpy\n'), ((5232, 5266), 'numpy.array_equal', 'numpy.array_equal', (['array', 'reloaded'], {}), '(array, reloaded)\n', (5249, 5266), False, 'import numpy\n'), ((5565, 5597), 'numpy.random.normal', 'numpy.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (5584, 5597), False, 'import numpy\n'), ((5930, 5962), 'numpy.random.normal', 'numpy.random.normal', ([], {'size': '(2, 3)'}), '(size=(2, 3))\n', (5949, 5962), False, 'import numpy\n'), ((5995, 6030), 'numpy.random.normal', 'numpy.random.normal', ([], {'size': '(2, 3, 4)'}), '(size=(2, 3, 4))\n', (6014, 6030), False, 'import numpy\n'), ((6062, 6100), 'numpy.random.normal', 'numpy.random.normal', ([], {'size': '(2, 3, 4, 5)'}), '(size=(2, 3, 4, 5))\n', (6081, 6100), False, 'import numpy\n'), ((6138, 6170), 'numpy.random.normal', 'numpy.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (6157, 6170), False, 'import numpy\n')] |
import numpy as np
from scipy.spatial.distance import cdist
class KMeans:
def __init__(
self,
k: int,
metric: str = "euclidean",
tol: float = 1e-6,
max_iter: int = 100):
"""
inputs:
k: int
the number of centroids to use in cluster fitting
metric: str
the name of the distance metric to use
tol: float
the minimum error tolerance from previous error during optimization to quit the model fit
max_iter: int
the maximum number of iterations before quitting model fit
"""
#raise an error if k=0
if k==0:
raise AttributeError("k must be a positive integer greater than zero")
#assign initial attributes
self.k = k
self.tol = tol
self.max_iter = max_iter
self.metric = metric
#initialize empty clusters and centroids
self.clusters = [[] for i in range(self.k)] #holds data point labels for the current clustering
self.centroids = [] #holds mean feature vector for each centroid
def fit(self, mat: np.ndarray):
#this is basically like creation of the clusters, or finding of the centroids, and then in the predict method
#you will place the data points on top of the fitted clusters based on what the data points are most similar to
"""
fits the kmeans algorithm onto a provided 2D matrix
inputs:
mat: np.ndarray
A 2D matrix where the rows are observations and columns are features
"""
self.fit_mat = mat #matrix for fitting
self.n = mat.shape[0] #number of samples in matrix (i.e. number of rows)
self.m = mat.shape[1] #number of features in matrix (i.e. number of columns)
if self.k>self.n:
raise AttributeError("k must be less than the number of observations")
#for shorter typing:
n = self.n
k = self.k
m = self.m
#set random seed
np.random.seed() #set to 42 for now so results are reproducible
#initialize centroids by randomly picking k data points as the starting centroids
rand_idx = np.random.choice(n, k, replace=False) #generate random indices to pick from the input array
self.centroids = [mat[idx] for idx in rand_idx] #assign the samples of those indices to be the initial centroids
#initialize variables
cur_mse = 0
last_mse = 0
#optimization procedure
for i in range(0,self.max_iter):
#if i=0, initialize centroids randomly
if i==0:
rand_idx = np.random.choice(n, k, replace=False) #generate random indices to pick from the input array
self.centroids = [mat[idx] for idx in rand_idx] #assign the samples of those indices to be the initial centroids
#otherwise, get the centroids from the mean of each cluster
else:
self.centroids = self.get_centroids() #get centroids
#now generate clusters from the calculated centroids
self.clusters = self._create_clusters(self.centroids)
#calculate the mse
cur_mse = self.get_error()
if i==0:
last_mse = cur_mse
#check if convergence has been reached
else:
if abs(cur_mse - last_mse) <= self.tol:
break
else:
last_mse = cur_mse
def predict(self, mat: np.ndarray) -> np.ndarray:
"""
predicts the cluster labels for a provided 2D matrix
inputs:
mat: np.ndarray
A 2D matrix where the rows are observations and columns are features
outputs:
np.ndarray
a 1D array with the cluster label for each of the observations in `mat`
"""
labels = [] #create an empty 1D array to fill
self.mat = mat
clusters = [[] for i in range(self.k)] #create a list of lists that represent empty clusters for now
for sample_idx, sample in enumerate(mat):
centroid_idx = self._closest_centroid(sample, self.centroids) #find the closest centroid for each sample
labels.append(centroid_idx) #append cluster label to 1D array
return np.array([labels])
def get_error(self) -> float:
"""
returns the final mean-squared error of the fit model
outputs:
float
the squared-mean error of the fit model
"""
errors = []
for i in range(0,len(self.clusters)): #i is the index of the centroid/cluster you want
for j in range(0,len(self.clusters[i])): #j is the index of each data point in the current cluster i
cur_cluster = self.clusters[i] #get the list of samples in the current cluster
cur_idx = cur_cluster[j] #get the index of the current data point since only indices are stored in self.clusters
sample = self.fit_mat[cur_idx] #get the feature vector of the current data point from the original matrix
#calculate distance between sample and corresponding centroid
dist = cdist(np.array([sample]), np.array([self.centroids[i]]), self.metric)
errors.append(dist) #append to a list of errors
#square the distances
squared_errors = [number ** 2 for number in errors]
#take mean of squared errors to get MSE
return np.mean(squared_errors)
def get_centroids(self) -> np.ndarray:
#you will call this within the fit method to get the centroids
#assign the final centroids as an attribute of the class so that you can use it in the predict method
"""
returns the centroid locations of the fit model
outputs:
np.ndarray
a `k x m` 2D matrix representing the cluster centroids of the fit model
"""
centroids = [[] for i in range(self.k)]
#for each cluster index and each cluster, get the actual sample values in each cluster and find their mean to get the
#overall mean feature values for the centroid
for cluster_idx, cluster in enumerate(self.clusters):
mean = (np.array(self.fit_mat[cluster])).mean(axis=0)
centroids[cluster_idx] = mean
return centroids
def _create_clusters(self, centroids):
"""
assigns all samples in the input matrix to the closest centroids
input:
mean feature vectors defining the centroids
output:
list of lists containing indices of data samples sorted into which cluster they belong to
"""
clusters = [[] for i in range(self.k)] #create a list of lists that represent empty clusters for now
for idx,sample in enumerate(self.fit_mat):
centroid_idx = self._closest_centroid(sample, centroids) #find the closest centroid for each sample
clusters[centroid_idx].append(idx) #append current sample index to the cluster with the centroid it is closest to
return clusters
def _closest_centroid(self, sample, centroids):
"""
gets the index of the centroid that is closest to the input data point
inputs:
sample
input data point in m dimensions
centroids
list of mean feature vectors representing each centroid
metric:string
distance metric used to calculate distances between sample and centroids
output:
index of centroid closest to sample
"""
#convert centroids to an array
#calculate the distance between the current sample (i.e. row of the input matrix) and each centroid, and take the min
distances = []
for i in range(0,len(centroids)):
distances.append(cdist(np.array([sample]), np.array([centroids[i]]), self.metric))
centroid_idx = np.argmin(distances) #get index of minimum distance--corresponds to closest centroid
return centroid_idx | [
"numpy.random.seed",
"numpy.argmin",
"numpy.mean",
"numpy.array",
"numpy.random.choice"
] | [((2154, 2170), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (2168, 2170), True, 'import numpy as np\n'), ((2328, 2365), 'numpy.random.choice', 'np.random.choice', (['n', 'k'], {'replace': '(False)'}), '(n, k, replace=False)\n', (2344, 2365), True, 'import numpy as np\n'), ((4546, 4564), 'numpy.array', 'np.array', (['[labels]'], {}), '([labels])\n', (4554, 4564), True, 'import numpy as np\n'), ((5783, 5806), 'numpy.mean', 'np.mean', (['squared_errors'], {}), '(squared_errors)\n', (5790, 5806), True, 'import numpy as np\n'), ((8326, 8346), 'numpy.argmin', 'np.argmin', (['distances'], {}), '(distances)\n', (8335, 8346), True, 'import numpy as np\n'), ((2811, 2848), 'numpy.random.choice', 'np.random.choice', (['n', 'k'], {'replace': '(False)'}), '(n, k, replace=False)\n', (2827, 2848), True, 'import numpy as np\n'), ((5485, 5503), 'numpy.array', 'np.array', (['[sample]'], {}), '([sample])\n', (5493, 5503), True, 'import numpy as np\n'), ((5505, 5534), 'numpy.array', 'np.array', (['[self.centroids[i]]'], {}), '([self.centroids[i]])\n', (5513, 5534), True, 'import numpy as np\n'), ((6568, 6599), 'numpy.array', 'np.array', (['self.fit_mat[cluster]'], {}), '(self.fit_mat[cluster])\n', (6576, 6599), True, 'import numpy as np\n'), ((8243, 8261), 'numpy.array', 'np.array', (['[sample]'], {}), '([sample])\n', (8251, 8261), True, 'import numpy as np\n'), ((8263, 8287), 'numpy.array', 'np.array', (['[centroids[i]]'], {}), '([centroids[i]])\n', (8271, 8287), True, 'import numpy as np\n')] |
# Inputs: Database containing:
# - origin-destination pairs (table)
# - a subset of the destinations that contain services of interest (table)
# Maximum duration of walking
# Output: Table containing O-D pairs only for the destinations of interest
import pandas as pd
import numpy as np
import sqlite3
import code
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
# file names and parameters
db_fn = '../query_results/combined-data_5km_with_hssa.db'
db_fn = '../query_results/sea_hospital_5km.db'
max_dur = 30*60 # 30 minutes
# run main function
subsetDatabase(db_fn, max_dur)
db.close()
def subsetDatabase(db_fn, max_dur):
# create connection to the database
logger.info('subsetting the database')
db = sqlite3.connect(db_fn)
cursor = db.cursor()
# create a dataframe with only the relevant O-D pairs
if 'destsubset' not in getTabNames(db):
createSubsetDataframe(cursor, max_dur)
db.commit()
# calculate walk scores for origins
calcWalkScores(cursor, db, max_dur)
print('finished!!')
def calcWalkScores(cursor, db, max_dur):
# calculate the walk score for each origin
# get np.DataFrame of orig ids
logger.info('calculating walk scores')
orig_ids = getTable(cursor, 'orig', [0, 4], ['orig_id', 'pop_over_65'])
scores_dict = {}
# initialize the amount of people for each contract
contract_per_cap = {}
contract_data = getTable(cursor, 'contracts', [0, 3], ['ContractNo', 'TotalBudgt'])
for i in range(contract_data.shape[0]):
contract_per_cap[contract_data.ix[i,'ContractNo']] = {'amt' : contract_data.ix[i,'TotalBudgt'], 'ppl' : 0}
# initialize dictionary to store contracts for each origin
orig_contracts = {}
# Loop through each origin id
for i in range(orig_ids.shape[0]):
if i % 100 == 0:
print('i = {} / {}'.format(i, orig_ids.shape[0]))
# find all services within 30min of this orig
services_pd = getVendorsForOrig(orig_ids.ix[i, 'orig_id'], cursor).drop_duplicates()
# initialize contract list for orig i
orig_contracts[orig_ids.ix[i,'orig_id']] = {'contracts' : [], 'pop' : orig_ids.ix[i, 'pop_over_65']}
# loop through the services
for j in range(services_pd.shape[0]):
# get the duration to this service
tmp = cursor.execute('''SELECT walking_time FROM destsubset
WHERE dest_id={} AND orig_id={}'''
.format(services_pd.ix[j, 'dest_id'], orig_ids.ix[i, 'orig_id']))
duration = tmp.fetchall()
# add to data frame
services_pd.ix[j, 'walking_time'] = duration[0][0]
# add origin pop to the services funding count
contract_per_cap[services_pd.ix[j, 'ContractNo']]['ppl'] += orig_ids.ix[i,'pop_over_65']
# add contract id to the origin's contracts
orig_contracts[orig_ids.ix[i,'orig_id']]['contracts'].append(services_pd.ix[j, 'ContractNo'])
# CALCULATE WALKING SCORE
score = calcHSSAScore(services_pd, cursor, max_dur)
scores_dict[orig_ids.ix[i,'orig_id']] = {'HSSA' : score}
# code.interact(local=locals())
# calculate per capita spending for each contract
contract_per_cap = calcPerCapSpending(contract_data, contract_per_cap)
# calculate spending per origin (update the scores dictionary with this data)
scores_dict = calcOrigFunding(orig_contracts, contract_per_cap, scores_dict)
# add scores to database
HSSAs = [val['HSSA'] for val in scores_dict.values()]
investments = [val['investment'] for val in scores_dict.values()]
# scores_pd = pd.DataFrame({'orig_id' : list(scores_dict.keys()), 'score' : HSSAs, 'investment' : investments})
scores_pd = pd.DataFrame({'orig_id' : list(scores_dict.keys()), 'investment' : investments})
# normalize the scores
# scores_pd['score'] = (100 * scores_pd['score'].divide(max(scores_pd['score']))).astype(int)
print('...normalized the scores')
code.interact(local=locals())
WriteDB(scores_pd, db, 'investment')
db.commit()
def calcOrigFunding(orig_contracts, contract_per_cap, scores_dict):
'''
calculate the amount of funding (per capita) to be apportioned to each origin
using the contracts that each orign has within their walkshed
and the per capita funding of each service
add this to scores_dict
'''
output_dict = {}
for orig_id, orig_data in orig_contracts.items():
orig_spending = 0
for contract_id in orig_data['contracts']:
per_cap_spend = contract_per_cap[contract_id]['per_cap']
orig_spending += per_cap_spend * orig_data['pop']
scores_dict[orig_id].update({'investment' : orig_spending})
return(scores_dict)
def calcPerCapSpending(contract_data, contract_per_cap):
# for each contract, create key for per capita spending and add to dictionary
for i in range(contract_data.shape[0]):
dict_i = contract_per_cap[contract_data.ix[i,'ContractNo']]
# calculate per capita spending
if dict_i['ppl']:
d_per_cap = {'per_cap' : dict_i['amt'] / dict_i['ppl']}
else:
d_per_cap = {'per_cap' : 0}
dict_i.update(d_per_cap)
return(contract_per_cap)
def WriteDB(df, db, col_name):
'''
Add table to db
'''
logger.info('Writing to DB')
#Initialize connections to .db
cursor = db.cursor()
# code.interact(local=locals())
# add column
# col_name = attr['field']
add_col_str = "ALTER TABLE orig ADD COLUMN {} REAL".format(col_name)
db.execute(add_col_str)
for i in range(len(df)):
add_data_str = "UPDATE orig SET {} =(?) WHERE orig_id=(?)".format(col_name)
value = df.values[i][1]
idx = df.values[i][0]
db.execute(add_data_str, (value, idx))
# commit
db.commit()
# logger.info('Complete')
def calcHSSAScore(services, cursor, max_dur):
'''
Calculate the HSSA score for a given origin
Note: this code is adapted from Logan Noel's code
https://github.com/GeoDaCenter/contracts/blob/master/analytics/ScoreModel.py
'''
WEIGHTS = [.1, .25, .5, .75, 1]
weight_dict = {}
score = 0
for i in range(services.shape[0]):
# cat = VendorLookup(cursor, services.ix[i, 'ContractNo'], 'Project')
cat = services.ix[i, 'Project']
if cat not in weight_dict:
weight_dict[cat] = WEIGHTS
if len(weight_dict[cat]) > 0:
variety_weight = weight_dict[cat].pop()
else:
variety_weight = 0
distance_weight = linearDecayFunction(services.ix[i, 'walking_time'], max_dur)
# calculate score
score += variety_weight * distance_weight * services.ix[i,'TotalBudgt']
return(score)
def linearDecayFunction(time, upper):
# penalty function for distance
# taken from https://github.com/GeoDaCenter/contracts/blob/master/analytics/ScoreModel.py
upper = float(upper)
time = float(time)
if time > upper:
return 0
else:
return (upper - time) / upper
# def VendorLookup(cursor, id, kind):
# # look up the value for a specific record, such as Project or TotalBudgt
# # Note: this code is adapted from <NAME>'s code
# # https://github.com/GeoDaCenter/contracts/blob/master/analytics/ScoreModel.py
# query = "SELECT {} FROM contracts WHERE ContractNo is {}".format(kind, id)
# data = cursor.execute(query).fetchone()
# return(data)
def getVendorsForOrig(orig_id, cursor):
# get all of the vendors within reach of a given origin point
# note - doesn't actually get the duration (creates a column with 'None')
tmp = cursor.execute('''SELECT * FROM contracts
WHERE dest_id IN
(SELECT dest_id FROM destsubset WHERE orig_id={})'''
.format(orig_id))
services_tuple = tmp.fetchall()
# convert to pandas data frame
services_list = [x for x in services_tuple]
services_pd = pd.DataFrame(services_list, columns=getColNames(cursor, 'contracts'))
# add column for duration
services_pd['walking_time'] = None
return(services_pd)
def createSubsetDataframe(cursor, max_dur):
# create a pandas dataframe containing the O-D pairs for destinations that contain services
# and adds it to the database
# # get list of dest id's that contain the services of interest
# tmp = cursor.execute("SELECT dest_id FROM contracts")
# service_dest_ids_tuple = tmp.fetchall()
# service_dest_ids = [x[0] for x in service_dest_ids_tuple]
# # filter the database to O-D pairs with duration < specified time
# tmp = cursor.execute("SELECT * FROM origxdest WHERE walking_time < {}".format(max_dur))
logger.info('subsetting the walking results table')
tmp = cursor.execute('''SELECT * FROM walking
WHERE duration < {}
AND dest_id IN (SELECT dest_id FROM contracts)'''.format(max_dur))
#
od_pairs = tmp.fetchall()
#
# create pandas dataframe
data_list = [[row[0], row[1], row[2]] for row in od_pairs]
od_pairs = pd.DataFrame(data_list, columns=['orig_id', 'dest_id', 'walking_time'])
# write this as a table in the database...
# strings
cols_str = "orig_id VARCHAR (15), dest_id VARCHAR (15), walking_time INT"
col_names = ['orig_id', 'dest_id', 'walking_time']
# convert index back to column and format data frame
# od_pairs_subset['dest_id'] = od_pairs_subset.index
# od_pairs_subset = od_pairs_subset[['orig_id', 'dest_id', 'walking_time']]
# add to data base
addPdToDb(od_pairs, cursor, 'destsubset', cols_str, col_names)
return
def addPdToDb(d_frame, cursor, new_table_name, cols_str, col_names):
# add a pandas dataframe (d_frame) to a database (db)
# NOTE: this code is not generalizable (it adds the 3rd column as an int)
# create new table
add_table_str = "CREATE TABLE {}({})".format(new_table_name, cols_str)
cursor.execute(add_table_str)
# add data
add_data_str = "INSERT INTO {}({}) VALUES(?,?,?)".format(new_table_name, ', '.join(col_names))
for i in range(d_frame.shape[0]):
# cursor.execute(add_data_str, (d_frame.ix[i,:]))
cursor.execute(add_data_str, (d_frame.ix[i,0],d_frame.ix[i,1],int(d_frame.ix[i,2])))
def addLatLon(d_frame, cursor, table_name):
# add lat and lon columns to the data
# retrieve the lat and lon from table_name
# match the lat/lon to the d_frame using 'orig_id' column name
# NOTE: this assumes there are three columns in 'table_name' corresponding to id, lon, and lat
# get the lat/lon data
lat_lon_pd = getTable(cursor, table_name, [0,1,2], ['id', 'lon', 'lat'])
# tmp = cursor.execute("SELECT * FROM {}".format(table_name))
# tuple_data = tmp.fetchall()
# # turn into pandas dataframe
# data_list = [[row[0], row[1], row[2]] for row in tuple_data]
# lat_lon_pd = pd.DataFrame(data_list, columns=['id', 'lon', 'lat'])
lat_lon_pd.set_index('id', inplace=True)
# match to the input dataframe
# CHECK THIS! -- does it work with 'id' as index
d_frame_combined = pd.merge(d_frame, lat_lon_pd, left_on='orig_id', right_on='id')
return(d_frame_combined)
def addContractData(d_frame, cursor, table_name):
# Add info about the contracts to the data frame
# get the contract data
contract_pd = getTable(cursor, table_name, [4,1,3], ['dest_id', 'Project', 'TotalBudgt'])
# some destinations may have multiple contracts -- identify these and add together
# get unique contract dests
contract_dests = np.unique(contract_pd.loc[:, 'dest_id'])
# creat pd dataframe to be filled out
nans = [float('NaN')]*len(contract_dests)
pd_data_dict = {'dest_id' : contract_dests, 'tot_budget' : nans, 'con_names' : nans}
comb_unique_contracts = pd.DataFrame(pd_data_dict)
comb_unique_contracts.set_index('dest_id', inplace=True)
for dest in contract_dests:
# find matching ids
ids = np.where(contract_pd.loc[:,'dest_id'] == dest)
# get total budget and contract names
comb_unique_contracts.ix[dest, 'tot_budget'] = sum([contract_pd.loc[id,'TotalBudgt'] for id in ids[0]])
comb_unique_contracts.ix[dest, 'con_names'] = '\n'.join([contract_pd.loc[id, 'Project'] for id in ids[0]])
# match to the data frame
d_frame_combined = pd.merge(d_frame, comb_unique_contracts, left_index=True, right_index=True)
return(d_frame_combined)
def addDemographicData(d_frame, cursor, table_name, dem_ids):
# add demographic data to the data frame
# dem_ids is a dictionary with key=column name and value=column number
# get the demographic data
# NEED TO WAIT UNTIL AGE / POPULATION DATA ADDED TO TABLE
for (col_name, col_num) in dem_names:
dem_pd = getTable(cursor, 'orig', [0, col_num], ['orig_id', col_name])
# merge with the data frame
d_frame_combined = pd.merge(d_frame, dem_pd, left_on='orig_id', right_on='orig_id')
return(d_frame_combined)
def getTable(cursor, table_name, col_nums, col_names):
# get table 'table_name' from the database
# convert to pandas data frame
# col_nums = a list of column numbers
# col_names = a list of column names
tmp = cursor.execute("SELECT * FROM {}".format(table_name))
tuple_data = tmp.fetchall()
# convert to pandas dataframe
data_list = [[row[i] for i in col_nums] for row in tuple_data]
contract_pd = pd.DataFrame(data_list, columns=col_names)
return(contract_pd)
def getTabNames(db):
nms = db.execute("SELECT name FROM sqlite_master WHERE type='table';")
names = [nm[0] for nm in nms]
return(names)
def getColNames(cursor, table_name):
tmp = cursor.execute("SELECT * FROM {}".format(table_name))
tmp.fetchone()
nmes = [description[0] for description in tmp.description]
# print(nmes)
return(nmes)
if __name__ == '__main__':
main()
# scratch
# temp = getTable(db, 'destsubset', [0,1,2], ['orig_id', 'dest_id', 'walking_time'])
# id_list = od_pairs.index.tolist()
# for i in service_dest_ids:
# if i in id_list:
# print ('phew')
# def calculateAccessScore():
# # remove all O-D pairs with no destination or >30min
# for orig in origs:
# # find all dests with services
# score = 0
# for each dest:
# for each service in dest:
# score += new_score
# # option A
# for each orig:
# select from table where dur < 30 AND orig == orig AND (dest in service_dest)
# # option B
# select from table where dur < 30 and (dest in service_desrt)
# for each orig:
# select from new_table hwere orig == orig | [
"pandas.DataFrame",
"logging.basicConfig",
"pandas.merge",
"numpy.unique",
"numpy.where",
"sqlite3.connect",
"logging.getLogger"
] | [((382, 421), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (401, 421), False, 'import logging\n'), ((431, 458), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (448, 458), False, 'import logging\n'), ((857, 879), 'sqlite3.connect', 'sqlite3.connect', (['db_fn'], {}), '(db_fn)\n', (872, 879), False, 'import sqlite3\n'), ((9247, 9318), 'pandas.DataFrame', 'pd.DataFrame', (['data_list'], {'columns': "['orig_id', 'dest_id', 'walking_time']"}), "(data_list, columns=['orig_id', 'dest_id', 'walking_time'])\n", (9259, 9318), True, 'import pandas as pd\n'), ((11286, 11349), 'pandas.merge', 'pd.merge', (['d_frame', 'lat_lon_pd'], {'left_on': '"""orig_id"""', 'right_on': '"""id"""'}), "(d_frame, lat_lon_pd, left_on='orig_id', right_on='id')\n", (11294, 11349), True, 'import pandas as pd\n'), ((11751, 11791), 'numpy.unique', 'np.unique', (["contract_pd.loc[:, 'dest_id']"], {}), "(contract_pd.loc[:, 'dest_id'])\n", (11760, 11791), True, 'import numpy as np\n'), ((11997, 12023), 'pandas.DataFrame', 'pd.DataFrame', (['pd_data_dict'], {}), '(pd_data_dict)\n', (12009, 12023), True, 'import pandas as pd\n'), ((12537, 12612), 'pandas.merge', 'pd.merge', (['d_frame', 'comb_unique_contracts'], {'left_index': '(True)', 'right_index': '(True)'}), '(d_frame, comb_unique_contracts, left_index=True, right_index=True)\n', (12545, 12612), True, 'import pandas as pd\n'), ((13096, 13160), 'pandas.merge', 'pd.merge', (['d_frame', 'dem_pd'], {'left_on': '"""orig_id"""', 'right_on': '"""orig_id"""'}), "(d_frame, dem_pd, left_on='orig_id', right_on='orig_id')\n", (13104, 13160), True, 'import pandas as pd\n'), ((13627, 13669), 'pandas.DataFrame', 'pd.DataFrame', (['data_list'], {'columns': 'col_names'}), '(data_list, columns=col_names)\n', (13639, 13669), True, 'import pandas as pd\n'), ((12159, 12206), 'numpy.where', 'np.where', (["(contract_pd.loc[:, 'dest_id'] == dest)"], {}), "(contract_pd.loc[:, 'dest_id'] == dest)\n", (12167, 12206), True, 'import numpy as np\n')] |
import pickle
import numpy as np
import pandas as pd
from fugue import FugueWorkflow
from pytest import raises
from sklearn.base import is_classifier, is_regressor
from sklearn.linear_model import LinearRegression, Ridge, LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import StackingRegressor
from fugue_tune import Space
from fugue_tune.sklearn import (
_process_stack_space,
_sk_cv,
_sk_stack_cv,
_to_model,
_to_model_str,
build_sk_cv,
)
from fugue_tune.sklearn import sk_space as ss
from fugue_tune.sklearn import suggest_sk_model, suggest_sk_stacking_model
from fugue_tune.space import Grid
def test_to_model():
assert is_classifier(_to_model("sklearn.ensemble.RandomForestClassifier"))
assert is_regressor(_to_model(LinearRegression))
raises(TypeError, lambda: _to_model("dummy"))
raises(TypeError, lambda: _to_model("fugue_tune.space.Space"))
raises(TypeError, lambda: _to_model(Space))
def test_to_model_str():
assert "sklearn.linear_model._base.LinearRegression" == _to_model_str(
LinearRegression
)
assert "sklearn.linear_model._base.LinearRegression" == _to_model_str(
"sklearn.linear_model.LinearRegression"
)
assert _to_model(_to_model_str(LinearRegression)) is LinearRegression
def test_tunable_sk_cv(tmpdir):
res = _sk_cv(
"sklearn.linear_model.LinearRegression",
_create_mock_data(),
_sk__scoring="neg_mean_absolute_error",
_sk__label_col="l",
_sk__feature_prefix="f_",
fit_intercept=True,
)
assert res["error"] < 0.1
assert _to_model(res["hp"]["_sk__model"]) is LinearRegression
assert res["hp"]["fit_intercept"]
assert isinstance(res["metadata"]["cv_scores"], list)
assert "model_path" not in res["metadata"]
res = _sk_cv(
"sklearn.linear_model.LinearRegression",
_create_mock_data(),
_sk__scoring="neg_mean_absolute_error",
_sk__feature_prefix="f_",
_sk__label_col="l",
_sk__save_path=str(tmpdir),
fit_intercept=True,
)
obj = pickle.load(open(res["metadata"]["model_path"], mode="rb"))
assert isinstance(obj, LinearRegression)
def test_tunable_sk_stack_cv(tmpdir):
res = _sk_stack_cv(
"sklearn.linear_model.LinearRegression",
'[{"_sk__model": "sklearn.linear_model._base.LinearRegression", "normalize": true},'
'{"_sk__model": "sklearn.linear_model._base.LinearRegression", "normalize": false}]',
_create_mock_data(),
_sk__scoring="neg_mean_absolute_error",
_sk__label_col="l",
_sk__feature_prefix="f_",
fit_intercept=True,
_sk__save_path=str(tmpdir),
)
print(res)
assert res["error"] < 0.1
assert _to_model(res["hp"]["_sk__model"]) is StackingRegressor
assert (
_to_model(res["hp"]["_sk__estimators"]["stacking"]["_sk__model"])
is LinearRegression
)
assert res["hp"]["_sk__estimators"]["stacking"]["fit_intercept"]
assert isinstance(res["metadata"]["cv_scores"], list)
obj = pickle.load(open(res["metadata"]["model_path"], mode="rb"))
assert isinstance(obj, StackingRegressor)
def test_build_sk_cv(tmpdir):
space = sum(
[
ss(LinearRegression, fit_intercept=Grid(True, False)),
ss(LinearRegression, normalize=Grid(True, False)),
]
)
dag = FugueWorkflow()
build_sk_cv(
space,
dag.df(_create_mock_data()),
scoring="neg_mean_absolute_error",
cv=4,
label_col="l",
feature_prefix="f_",
save_path=str(tmpdir),
).tune(distributable=False, serialize_path=str(tmpdir)).show()
dag.run()
def test_suggest_sk_model(tmpdir):
space = sum(
[
ss(LinearRegression, fit_intercept=Grid(True, False)),
ss(LinearRegression, normalize=Grid(True, False)),
]
)
res = suggest_sk_model(
space,
_create_mock_data(),
scoring="neg_mean_absolute_error",
serialize_path=str(tmpdir),
label_col="l",
feature_prefix="f_",
save_model=True,
partition_keys=["p"],
visualize_top_n=2,
)
assert len(res) == 4
print(res)
def test_suggest_sk_stacking_model(tmpdir):
space = sum(
[
ss(LinearRegression, fit_intercept=Grid(True, False)),
ss(Ridge, alpha=Grid(0.1, 0.2)),
]
)
space2 = sum(
[
ss(LinearRegression, normalize=Grid(True, False)),
]
)
res = suggest_sk_stacking_model(
space,
space2,
_create_mock_data(),
scoring="neg_mean_absolute_error",
serialize_path=str(tmpdir),
label_col="l",
feature_prefix="f_",
save_model=True,
partition_keys=["p"],
top_n=2,
)
assert len(res) == 4
space = sum(
[
ss(LogisticRegression),
ss(RandomForestClassifier),
]
)
space2 = sum(
[
ss(LogisticRegression),
]
)
res = suggest_sk_stacking_model(
space,
space2,
_create_mock_data(regression=False),
scoring="neg_mean_absolute_error",
serialize_path=str(tmpdir),
label_col="l",
feature_prefix="f_",
save_model=True,
top_n=2,
)
assert len(res) == 1
print(res)
def test_process_stack_space(tmpdir):
space1 = ss(LinearRegression, normalize=Grid(True, False))
space2 = ss(LinearRegression, fit_intercept=Grid(True, False))
dag = FugueWorkflow()
result0 = build_sk_cv(
space1,
dag.df(_create_mock_data()),
scoring="neg_mean_absolute_error",
cv=2,
label_col="l",
feature_prefix="f_",
).tune(distributable=False, serialize_path=str(tmpdir))
res0 = result0.process(_process_stack_space, params=dict(keys=[], space=space2))
res0.show()
result1 = build_sk_cv(
space1,
dag.df(_create_mock_data()).partition(by=["p"]),
scoring="neg_mean_absolute_error",
cv=2,
label_col="l",
feature_prefix="f_",
).tune(distributable=False, serialize_path=str(tmpdir))
res1 = result1.process(_process_stack_space, params=dict(keys=["p"], space=space2))
dag.run()
assert 2 == len(res0.result.as_array())
assert 8 == len(res1.result.as_array())
def _create_mock_data(regression=True):
np.random.seed(0)
df = pd.DataFrame(np.random.rand(100, 3), columns=["f_a", "f_b", "f_c"])
df["d"] = "x"
if regression:
df["l"] = df["f_a"] * 3 + df["f_b"] * 4 + df["f_c"] * 5 + 100
else:
df["l"] = (df["f_a"] * 3 - df["f_b"] * 4 + df["f_c"] * 5) > 0.5
df["p"] = np.random.randint(low=0, high=4, size=(100, 1))
return df
| [
"numpy.random.seed",
"fugue_tune.space.Grid",
"fugue_tune.sklearn._to_model",
"fugue_tune.sklearn.sk_space",
"fugue.FugueWorkflow",
"numpy.random.randint",
"numpy.random.rand",
"fugue_tune.sklearn._to_model_str"
] | [((3430, 3445), 'fugue.FugueWorkflow', 'FugueWorkflow', ([], {}), '()\n', (3443, 3445), False, 'from fugue import FugueWorkflow\n'), ((5628, 5643), 'fugue.FugueWorkflow', 'FugueWorkflow', ([], {}), '()\n', (5641, 5643), False, 'from fugue import FugueWorkflow\n'), ((6501, 6518), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (6515, 6518), True, 'import numpy as np\n'), ((6799, 6846), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(4)', 'size': '(100, 1)'}), '(low=0, high=4, size=(100, 1))\n', (6816, 6846), True, 'import numpy as np\n'), ((716, 768), 'fugue_tune.sklearn._to_model', '_to_model', (['"""sklearn.ensemble.RandomForestClassifier"""'], {}), "('sklearn.ensemble.RandomForestClassifier')\n", (725, 768), False, 'from fugue_tune.sklearn import _process_stack_space, _sk_cv, _sk_stack_cv, _to_model, _to_model_str, build_sk_cv\n'), ((794, 821), 'fugue_tune.sklearn._to_model', '_to_model', (['LinearRegression'], {}), '(LinearRegression)\n', (803, 821), False, 'from fugue_tune.sklearn import _process_stack_space, _sk_cv, _sk_stack_cv, _to_model, _to_model_str, build_sk_cv\n'), ((1075, 1106), 'fugue_tune.sklearn._to_model_str', '_to_model_str', (['LinearRegression'], {}), '(LinearRegression)\n', (1088, 1106), False, 'from fugue_tune.sklearn import _process_stack_space, _sk_cv, _sk_stack_cv, _to_model, _to_model_str, build_sk_cv\n'), ((1181, 1235), 'fugue_tune.sklearn._to_model_str', '_to_model_str', (['"""sklearn.linear_model.LinearRegression"""'], {}), "('sklearn.linear_model.LinearRegression')\n", (1194, 1235), False, 'from fugue_tune.sklearn import _process_stack_space, _sk_cv, _sk_stack_cv, _to_model, _to_model_str, build_sk_cv\n'), ((1639, 1673), 'fugue_tune.sklearn._to_model', '_to_model', (["res['hp']['_sk__model']"], {}), "(res['hp']['_sk__model'])\n", (1648, 1673), False, 'from fugue_tune.sklearn import _process_stack_space, _sk_cv, _sk_stack_cv, _to_model, _to_model_str, build_sk_cv\n'), ((2794, 2828), 'fugue_tune.sklearn._to_model', '_to_model', (["res['hp']['_sk__model']"], {}), "(res['hp']['_sk__model'])\n", (2803, 2828), False, 'from fugue_tune.sklearn import _process_stack_space, _sk_cv, _sk_stack_cv, _to_model, _to_model_str, build_sk_cv\n'), ((2871, 2936), 'fugue_tune.sklearn._to_model', '_to_model', (["res['hp']['_sk__estimators']['stacking']['_sk__model']"], {}), "(res['hp']['_sk__estimators']['stacking']['_sk__model'])\n", (2880, 2936), False, 'from fugue_tune.sklearn import _process_stack_space, _sk_cv, _sk_stack_cv, _to_model, _to_model_str, build_sk_cv\n'), ((6541, 6563), 'numpy.random.rand', 'np.random.rand', (['(100)', '(3)'], {}), '(100, 3)\n', (6555, 6563), True, 'import numpy as np\n'), ((853, 871), 'fugue_tune.sklearn._to_model', '_to_model', (['"""dummy"""'], {}), "('dummy')\n", (862, 871), False, 'from fugue_tune.sklearn import _process_stack_space, _sk_cv, _sk_stack_cv, _to_model, _to_model_str, build_sk_cv\n'), ((903, 938), 'fugue_tune.sklearn._to_model', '_to_model', (['"""fugue_tune.space.Space"""'], {}), "('fugue_tune.space.Space')\n", (912, 938), False, 'from fugue_tune.sklearn import _process_stack_space, _sk_cv, _sk_stack_cv, _to_model, _to_model_str, build_sk_cv\n'), ((970, 986), 'fugue_tune.sklearn._to_model', '_to_model', (['Space'], {}), '(Space)\n', (979, 986), False, 'from fugue_tune.sklearn import _process_stack_space, _sk_cv, _sk_stack_cv, _to_model, _to_model_str, build_sk_cv\n'), ((1271, 1302), 'fugue_tune.sklearn._to_model_str', '_to_model_str', (['LinearRegression'], {}), '(LinearRegression)\n', (1284, 1302), False, 'from fugue_tune.sklearn import _process_stack_space, _sk_cv, _sk_stack_cv, _to_model, _to_model_str, build_sk_cv\n'), ((4956, 4978), 'fugue_tune.sklearn.sk_space', 'ss', (['LogisticRegression'], {}), '(LogisticRegression)\n', (4958, 4978), True, 'from fugue_tune.sklearn import sk_space as ss\n'), ((4992, 5018), 'fugue_tune.sklearn.sk_space', 'ss', (['RandomForestClassifier'], {}), '(RandomForestClassifier)\n', (4994, 5018), True, 'from fugue_tune.sklearn import sk_space as ss\n'), ((5076, 5098), 'fugue_tune.sklearn.sk_space', 'ss', (['LogisticRegression'], {}), '(LogisticRegression)\n', (5078, 5098), True, 'from fugue_tune.sklearn import sk_space as ss\n'), ((5532, 5549), 'fugue_tune.space.Grid', 'Grid', (['(True)', '(False)'], {}), '(True, False)\n', (5536, 5549), False, 'from fugue_tune.space import Grid\n'), ((5599, 5616), 'fugue_tune.space.Grid', 'Grid', (['(True)', '(False)'], {}), '(True, False)\n', (5603, 5616), False, 'from fugue_tune.space import Grid\n'), ((3321, 3338), 'fugue_tune.space.Grid', 'Grid', (['(True)', '(False)'], {}), '(True, False)\n', (3325, 3338), False, 'from fugue_tune.space import Grid\n'), ((3384, 3401), 'fugue_tune.space.Grid', 'Grid', (['(True)', '(False)'], {}), '(True, False)\n', (3388, 3401), False, 'from fugue_tune.space import Grid\n'), ((3847, 3864), 'fugue_tune.space.Grid', 'Grid', (['(True)', '(False)'], {}), '(True, False)\n', (3851, 3864), False, 'from fugue_tune.space import Grid\n'), ((3910, 3927), 'fugue_tune.space.Grid', 'Grid', (['(True)', '(False)'], {}), '(True, False)\n', (3914, 3927), False, 'from fugue_tune.space import Grid\n'), ((4397, 4414), 'fugue_tune.space.Grid', 'Grid', (['(True)', '(False)'], {}), '(True, False)\n', (4401, 4414), False, 'from fugue_tune.space import Grid\n'), ((4445, 4459), 'fugue_tune.space.Grid', 'Grid', (['(0.1)', '(0.2)'], {}), '(0.1, 0.2)\n', (4449, 4459), False, 'from fugue_tune.space import Grid\n'), ((4549, 4566), 'fugue_tune.space.Grid', 'Grid', (['(True)', '(False)'], {}), '(True, False)\n', (4553, 4566), False, 'from fugue_tune.space import Grid\n')] |
#!/usr/bin/env python
import numpy as np
import pandas as pd
import torch
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.preprocessing import MinMaxScaler as Scaler
from sklearn.cross_decomposition import PLSRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC, SVR
from sklearn.model_selection import StratifiedKFold, KFold
from torch.utils.data import DataLoader, TensorDataset
import models
import os
import utils
import joblib
from copy import deepcopy
from rdkit import Chem
def SVM(X, y, X_ind, y_ind, reg=False):
""" Cross validation and Independent test for SVM classifion/regression model.
Arguments:
X (np.ndarray): m x n feature matrix for cross validation, where m is the number of samples
and n is the number of features.
y (np.ndarray): m-d label array for cross validation, where m is the number of samples and
equals to row of X.
X_ind (np.ndarray): m x n Feature matrix for independent set, where m is the number of samples
and n is the number of features.
y_ind (np.ndarray): m-d label array for independent set, where m is the number of samples and
equals to row of X_ind, and l is the number of types.
reg (bool): it True, the training is for regression, otherwise for classification.
Returns:
cvs (np.ndarray): m x l result matrix for cross validation, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
inds (np.ndarray): m x l result matrix for independent test, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
"""
if reg:
folds = KFold(5).split(X)
alg = SVR()
else:
folds = StratifiedKFold(5).split(X, y)
alg = SVC(probability=True)
cvs = np.zeros(y.shape)
inds = np.zeros(y_ind.shape)
gs = GridSearchCV(deepcopy(alg), {'C': 2.0 ** np.array([-15, 15]), 'gamma': 2.0 ** np.array([-15, 15])}, n_jobs=10)
gs.fit(X, y)
params = gs.best_params_
print(params)
for i, (trained, valided) in enumerate(folds):
model = deepcopy(alg)
model.C = params['C']
model.gamma = params['gamma']
if not reg:
model.probability=True
model.fit(X[trained], y[trained], sample_weight=[1 if v >= 4 else 0.1 for v in y[trained]])
if reg:
cvs[valided] = model.predict(X[valided])
inds += model.predict(X_ind)
else:
cvs[valided] = model.predict_proba(X[valided])[:, 1]
inds += model.predict_proba(X_ind)[:, 1]
return cvs, inds / 5
def RF(X, y, X_ind, y_ind, reg=False):
""" Cross validation and Independent test for RF classifion/regression model.
Arguments:
X (np.ndarray): m x n feature matrix for cross validation, where m is the number of samples
and n is the number of features.
y (np.ndarray): m-d label array for cross validation, where m is the number of samples and
equals to row of X.
X_ind (np.ndarray): m x n Feature matrix for independent set, where m is the number of samples
and n is the number of features.
y_ind (np.ndarray): m-d label array for independent set, where m is the number of samples and
equals to row of X_ind, and l is the number of types.
reg (bool): it True, the training is for regression, otherwise for classification.
Returns:
cvs (np.ndarray): m x l result matrix for cross validation, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
inds (np.ndarray): m x l result matrix for independent test, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
"""
if reg:
folds = KFold(5).split(X)
alg = RandomForestRegressor
else:
folds = StratifiedKFold(5).split(X, y)
alg = RandomForestClassifier
cvs = np.zeros(y.shape)
inds = np.zeros(y_ind.shape)
for i, (trained, valided) in enumerate(folds):
model = alg(n_estimators=1000, n_jobs=10)
model.fit(X[trained], y[trained], sample_weight=[1 if v >= 4 else 0.1 for v in y[trained]])
if reg:
cvs[valided] = model.predict(X[valided])
inds += model.predict(X_ind)
else:
cvs[valided] = model.predict_proba(X[valided])[:, 1]
inds += model.predict_proba(X_ind)[:, 1]
return cvs, inds / 5
def KNN(X, y, X_ind, y_ind, reg=False):
""" Cross validation and Independent test for KNN classifion/regression model.
Arguments:
X (np.ndarray): m x n feature matrix for cross validation, where m is the number of samples
and n is the number of features.
y (np.ndarray): m-d label array for cross validation, where m is the number of samples and
equals to row of X.
X_ind (np.ndarray): m x n Feature matrix for independent set, where m is the number of samples
and n is the number of features.
y_ind (np.ndarray): m-d label array for independent set, where m is the number of samples and
equals to row of X_ind, and l is the number of types.
reg (bool): it True, the training is for regression, otherwise for classification.
Returns:
cvs (np.ndarray): m x l result matrix for cross validation, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
inds (np.ndarray): m x l result matrix for independent test, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
"""
if reg:
folds = KFold(5).split(X)
alg = KNeighborsRegressor
else:
folds = StratifiedKFold(5).split(X, y)
alg = KNeighborsClassifier
cvs = np.zeros(y.shape)
inds = np.zeros(y_ind.shape)
for i, (trained, valided) in enumerate(folds):
model = alg(n_jobs=10)
model.fit(X[trained], y[trained])
if reg:
cvs[valided] = model.predict(X[valided])
inds += model.predict(X_ind)
else:
cvs[valided] = model.predict_proba(X[valided])[:, 1]
inds += model.predict_proba(X_ind)[:, 1]
return cvs, inds / 5
def NB(X, y, X_ind, y_ind):
""" Cross validation and Independent test for Naive Bayes classifion model.
Arguments:
X (np.ndarray): m x n feature matrix for cross validation, where m is the number of samples
and n is the number of features.
y (np.ndarray): m-d label array for cross validation, where m is the number of samples and
equals to row of X.
X_ind (np.ndarray): m x n Feature matrix for independent set, where m is the number of samples
and n is the number of features.
y_ind (np.ndarray): m-d label array for independent set, where m is the number of samples and
equals to row of X_ind, and l is the number of types.
Returns:
cvs (np.ndarray): m x l result matrix for cross validation, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
inds (np.ndarray): m x l result matrix for independent test, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
"""
folds = KFold(5).split(X)
cvs = np.zeros(y.shape)
inds = np.zeros(y_ind.shape)
for i, (trained, valided) in enumerate(folds):
model = GaussianNB()
model.fit(X[trained], y[trained], sample_weight=[1 if v >= 4 else 0.1 for v in y[trained]])
cvs[valided] = model.predict_proba(X[valided])[:, 1]
inds += model.predict_proba(X_ind)[:, 1]
return cvs, inds / 5
def PLS(X, y, X_ind, y_ind):
""" Cross validation and Independent test for PLS regression model.
Arguments:
X (np.ndarray): m x n feature matrix for cross validation, where m is the number of samples
and n is the number of features.
y (np.ndarray): m-d label array for cross validation, where m is the number of samples and
equals to row of X.
X_ind (np.ndarray): m x n Feature matrix for independent set, where m is the number of samples
and n is the number of features.
y_ind (np.ndarray): m-d label array for independent set, where m is the number of samples and
equals to row of X_ind, and l is the number of types.
reg (bool): it True, the training is for regression, otherwise for classification.
Returns:
cvs (np.ndarray): m x l result matrix for cross validation, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
inds (np.ndarray): m x l result matrix for independent test, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
"""
folds = KFold(5).split(X)
cvs = np.zeros(y.shape)
inds = np.zeros(y_ind.shape)
for i, (trained, valided) in enumerate(folds):
model = PLSRegression()
model.fit(X[trained], y[trained])
cvs[valided] = model.predict(X[valided])[:, 0]
inds += model.predict(X_ind)[:, 0]
return cvs, inds / 5
def DNN(X, y, X_ind, y_ind, out, reg=False):
""" Cross validation and Independent test for DNN classifion/regression model.
Arguments:
X (np.ndarray): m x n feature matrix for cross validation, where m is the number of samples
and n is the number of features.
y (np.ndarray): m x l label matrix for cross validation, where m is the number of samples and
equals to row of X, and l is the number of types.
X_ind (np.ndarray): m x n Feature matrix for independent set, where m is the number of samples
and n is the number of features.
y_ind (np.ndarray): m-d label arrays for independent set, where m is the number of samples and
equals to row of X_ind, and l is the number of types.
reg (bool): it True, the training is for regression, otherwise for classification.
Returns:
cvs (np.ndarray): m x l result matrix for cross validation, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
inds (np.ndarray): m x l result matrix for independent test, where m is the number of samples and
equals to row of X, and l is the number of types and equals to row of X.
"""
if y.shape[1] > 1 or reg:
folds = KFold(5).split(X)
else:
folds = StratifiedKFold(5).split(X, y[:, 0])
NET = models.STFullyConnected if y.shape[1] == 1 else models.MTFullyConnected
indep_set = TensorDataset(torch.Tensor(X_ind), torch.Tensor(y_ind))
indep_loader = DataLoader(indep_set, batch_size=BATCH_SIZE)
cvs = np.zeros(y.shape)
inds = np.zeros(y_ind.shape)
for i, (trained, valided) in enumerate(folds):
train_set = TensorDataset(torch.Tensor(X[trained]), torch.Tensor(y[trained]))
train_loader = DataLoader(train_set, batch_size=BATCH_SIZE)
valid_set = TensorDataset(torch.Tensor(X[valided]), torch.Tensor(y[valided]))
valid_loader = DataLoader(valid_set, batch_size=BATCH_SIZE)
net = NET(X.shape[1], y.shape[1], is_reg=reg)
net.fit(train_loader, valid_loader, out='%s_%d' % (out, i), epochs=N_EPOCH, lr=LR)
cvs[valided] = net.predict(valid_loader)
inds += net.predict(indep_loader)
return cvs, inds / 5
def Train_RF(X, y, out, reg=False):
if reg:
model = RandomForestRegressor(n_estimators=1000, n_jobs=10)
else:
model = RandomForestClassifier(n_estimators=1000, n_jobs=10)
model.fit(X, y, sample_weight=[1 if v >= 4 else 0.1 for v in y])
joblib.dump(model, out, compress=3)
def mt_task(fname, out, reg=False, is_extra=True, time_split=False):
df = pd.read_table(fname)[pair].dropna(subset=pair[1:2])
df = df[df.Target_ChEMBL_ID.isin(trgs)]
year = df.groupby(pair[1])[pair[-1:]].min().dropna()
year = year[year.Document_Year > 2015].index
df = df[pair].set_index(pair[0:2])
numery = df[pair[2]].groupby(pair[0:2]).mean().dropna()
comments = df[(df.Comment.str.contains('Not Active') == True)]
inhibits = df[(df.Standard_Type == 'Inhibition') & df.Standard_Relation.isin(['<', '<='])]
relations = df[df.Standard_Type.isin(['EC50', 'IC50', 'Kd', 'Ki']) & df.Standard_Relation.isin(['>', '>='])]
binary = pd.concat([comments, inhibits, relations], axis=0)
binary = binary[~binary.index.isin(numery.index)]
binary[pair[2]] = 3.99
binary = binary[pair[2]].groupby(pair[0:2]).first()
df = numery.append(binary) if is_extra else numery
if not reg:
df[pair[2]] = (df[pair[2]] > th).astype(float)
df = df.unstack(pair[0])
test_ix = set(df.index).intersection(year)
df_test = df.loc[test_ix] if time_split else df.sample(len(test_ix))
df_data = df.drop(df_test.index)
df_data = df_data.sample(len(df_data))
for alg in ['RF', 'MT_DNN', 'SVM', 'PLS', 'KNN', 'DNN']:
if alg == 'MT_DNN':
test_x = utils.Predictor.calc_fp([Chem.MolFromSmiles(mol) for mol in df_test.index])
data_x = utils.Predictor.calc_fp([Chem.MolFromSmiles(mol) for mol in df_data.index])
scaler = Scaler(); scaler.fit(data_x)
test_x = scaler.transform(test_x)
data_x = scaler.transform(data_x)
data = df_data.stack().to_frame(name='Label')
test = df_test.stack().to_frame(name='Label')
data_p, test_p = DNN(data_x, df_data.values, test_x, df_test.values, out=out, reg=reg)
data['Score'] = pd.DataFrame(data_p, index=df_data.index, columns=df_data.columns).stack()
test['Score'] = pd.DataFrame(test_p, index=df_test.index, columns=df_test.columns).stack()
data.to_csv(out + alg + '_LIGAND.cv.tsv', sep='\t')
test.to_csv(out + alg + '_LIGAND.ind.tsv', sep='\t')
else:
for trg in trgs:
test_y = df_test[trg].dropna()
data_y = df_data[trg].dropna()
test_x = utils.Predictor.calc_fp([Chem.MolFromSmiles(mol) for mol in test_y.index])
data_x = utils.Predictor.calc_fp([Chem.MolFromSmiles(mol) for mol in data_y.index])
if alg != 'RF':
scaler = Scaler(); scaler.fit(data_x)
test_x = scaler.transform(test_x)
data_x = scaler.transform(data_x)
else:
X = np.concatenate([data_x, test_x], axis=0)
y = np.concatenate([data_y.values, test_y.values], axis=0)
Train_RF(X, y, out=out + '%s_%s.pkg' % (alg, trg), reg=reg)
data, test = data_y.to_frame(name='Label'), test_y.to_frame(name='Label')
a, b = cross_validation(data_x, data.values, test_x, test.values,
alg, out + '%s_%s' % (alg, trg), reg=reg)
data['Score'], test['Score'] = a, b
data.to_csv(out + '%s_%s.cv.tsv' % (alg, trg), sep='\t')
test.to_csv(out + '%s_%s.ind.tsv' % (alg, trg), sep='\t')
def single_task(feat, alg='RF', reg=False, is_extra=True):
df = pd.read_table('data/LIGAND_RAW.tsv').dropna(subset=pair[1:2])
df = df[df[pair[0]] == feat]
df = df[pair].set_index(pair[1])
year = df[pair[-1:]].groupby(pair[1]).min().dropna()
test = year[year[pair[-1]] > 2015].index
numery = df[pair[2]].groupby(pair[1]).mean().dropna()
comments = df[(df.Comment.str.contains('Not Active') == True)]
inhibits = df[(df.Standard_Type == 'Inhibition') & df.Standard_Relation.isin(['<', '<='])]
relations = df[df.Standard_Type.isin(['EC50', 'IC50', 'Kd', 'Ki']) & df.Standard_Relation.isin(['>', '>='])]
binary = pd.concat([comments, inhibits, relations], axis=0)
binary = binary[~binary.index.isin(numery.index)]
binary[pair[2]] = 3.99
binary = binary[pair[2]].groupby(binary.index).first()
df = numery.append(binary) if is_extra else numery
if not reg:
df = (df > th).astype(float)
df = df.sample(len(df))
print(feat, len(numery[numery >= th]), len(numery[numery < th]), len(binary))
test_ix = set(df.index).intersection(test)
test = df.loc[test_ix].dropna()
data = df.drop(test.index)
test_x = utils.Predictor.calc_fp([Chem.MolFromSmiles(mol) for mol in test.index])
data_x = utils.Predictor.calc_fp([Chem.MolFromSmiles(mol) for mol in data.index])
out = 'output/single/%s_%s_%s' % (alg, 'REG' if reg else 'CLS', feat)
if alg != 'RF':
scaler = Scaler(); scaler.fit(data_x)
test_x = scaler.transform(test_x)
data_x = scaler.transform(data_x)
else:
X = np.concatenate([data_x, test_x], axis=0)
y = np.concatenate([data.values, test.values], axis=0)
Train_RF(X, y[:, 0], out=out + '.pkg', reg=reg)
data, test = data.to_frame(name='Label'), test.to_frame(name='Label')
data['Score'], test['Score'] = cross_validation(data_x, data.values, test_x, test.values, alg, out, reg=reg)
data.to_csv(out + '.cv.tsv', sep='\t')
test.to_csv(out + '.ind.tsv', sep='\t')
def cross_validation(X, y, X_ind, y_ind, alg='DNN', out=None, reg=False):
if alg == 'RF':
cv, ind = RF(X, y[:, 0], X_ind, y_ind[:, 0], reg=reg)
elif alg == 'SVM':
cv, ind = SVM(X, y[:, 0], X_ind, y_ind[:, 0], reg=reg)
elif alg == 'KNN':
cv, ind = KNN(X, y[:, 0], X_ind, y_ind[:, 0], reg=reg)
elif alg == 'NB':
cv, ind = NB(X, y[:, 0], X_ind, y_ind[:, 0])
elif alg == 'PLS':
cv, ind = PLS(X, y[:, 0], X_ind, y_ind[:, 0])
elif alg == 'DNN':
cv, ind = DNN(X, y, X_ind, y_ind, out=out, reg=reg)
return cv, ind
if __name__ == '__main__':
pair = ['Target_ChEMBL_ID', 'Smiles', 'pChEMBL_Value', 'Comment',
'Standard_Type', 'Standard_Relation', 'Document_Year']
BATCH_SIZE = int(2 ** 11)
N_EPOCH = 1000
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
th= 6.5
trgs = ['CHEMBL226', 'CHEMBL251', 'CHEMBL240']
for reg in [False, True]:
LR = 1e-4 if reg else 1e-5
for chembl in trgs:
single_task(chembl, 'DNN', reg=reg)
single_task(chembl, 'RF', reg=reg)
single_task(chembl, 'SVM', reg=reg)
if reg:
single_task(chembl, 'PLS', reg=reg)
else:
single_task(chembl, 'NB', reg=reg)
single_task(chembl, 'KNN', reg=reg)
mt_task('data/LIGAND_RAW.tsv', 'output/random_split/', reg=reg, time_split=False)
mt_task('data/LIGAND_RAW.tsv', 'output/time_split/', reg=reg, time_split=True)
| [
"sklearn.preprocessing.MinMaxScaler",
"joblib.dump",
"sklearn.svm.SVC",
"pandas.read_table",
"pandas.DataFrame",
"torch.utils.data.DataLoader",
"torch.Tensor",
"pandas.concat",
"sklearn.ensemble.RandomForestClassifier",
"copy.deepcopy",
"sklearn.ensemble.RandomForestRegressor",
"sklearn.cross_... | [((2135, 2152), 'numpy.zeros', 'np.zeros', (['y.shape'], {}), '(y.shape)\n', (2143, 2152), True, 'import numpy as np\n'), ((2164, 2185), 'numpy.zeros', 'np.zeros', (['y_ind.shape'], {}), '(y_ind.shape)\n', (2172, 2185), True, 'import numpy as np\n'), ((4420, 4437), 'numpy.zeros', 'np.zeros', (['y.shape'], {}), '(y.shape)\n', (4428, 4437), True, 'import numpy as np\n'), ((4449, 4470), 'numpy.zeros', 'np.zeros', (['y_ind.shape'], {}), '(y_ind.shape)\n', (4457, 4470), True, 'import numpy as np\n'), ((6416, 6433), 'numpy.zeros', 'np.zeros', (['y.shape'], {}), '(y.shape)\n', (6424, 6433), True, 'import numpy as np\n'), ((6445, 6466), 'numpy.zeros', 'np.zeros', (['y_ind.shape'], {}), '(y_ind.shape)\n', (6453, 6466), True, 'import numpy as np\n'), ((8081, 8098), 'numpy.zeros', 'np.zeros', (['y.shape'], {}), '(y.shape)\n', (8089, 8098), True, 'import numpy as np\n'), ((8110, 8131), 'numpy.zeros', 'np.zeros', (['y_ind.shape'], {}), '(y_ind.shape)\n', (8118, 8131), True, 'import numpy as np\n'), ((9760, 9777), 'numpy.zeros', 'np.zeros', (['y.shape'], {}), '(y.shape)\n', (9768, 9777), True, 'import numpy as np\n'), ((9789, 9810), 'numpy.zeros', 'np.zeros', (['y_ind.shape'], {}), '(y_ind.shape)\n', (9797, 9810), True, 'import numpy as np\n'), ((11693, 11737), 'torch.utils.data.DataLoader', 'DataLoader', (['indep_set'], {'batch_size': 'BATCH_SIZE'}), '(indep_set, batch_size=BATCH_SIZE)\n', (11703, 11737), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((11748, 11765), 'numpy.zeros', 'np.zeros', (['y.shape'], {}), '(y.shape)\n', (11756, 11765), True, 'import numpy as np\n'), ((11777, 11798), 'numpy.zeros', 'np.zeros', (['y_ind.shape'], {}), '(y_ind.shape)\n', (11785, 11798), True, 'import numpy as np\n'), ((12689, 12724), 'joblib.dump', 'joblib.dump', (['model', 'out'], {'compress': '(3)'}), '(model, out, compress=3)\n', (12700, 12724), False, 'import joblib\n'), ((13395, 13445), 'pandas.concat', 'pd.concat', (['[comments, inhibits, relations]'], {'axis': '(0)'}), '([comments, inhibits, relations], axis=0)\n', (13404, 13445), True, 'import pandas as pd\n'), ((16800, 16850), 'pandas.concat', 'pd.concat', (['[comments, inhibits, relations]'], {'axis': '(0)'}), '([comments, inhibits, relations], axis=0)\n', (16809, 16850), True, 'import pandas as pd\n'), ((2026, 2031), 'sklearn.svm.SVR', 'SVR', ([], {}), '()\n', (2029, 2031), False, 'from sklearn.svm import SVC, SVR\n'), ((2103, 2124), 'sklearn.svm.SVC', 'SVC', ([], {'probability': '(True)'}), '(probability=True)\n', (2106, 2124), False, 'from sklearn.svm import SVC, SVR\n'), ((2208, 2221), 'copy.deepcopy', 'deepcopy', (['alg'], {}), '(alg)\n', (2216, 2221), False, 'from copy import deepcopy\n'), ((2437, 2450), 'copy.deepcopy', 'deepcopy', (['alg'], {}), '(alg)\n', (2445, 2450), False, 'from copy import deepcopy\n'), ((8199, 8211), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (8209, 8211), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((9878, 9893), 'sklearn.cross_decomposition.PLSRegression', 'PLSRegression', ([], {}), '()\n', (9891, 9893), False, 'from sklearn.cross_decomposition import PLSRegression\n'), ((11632, 11651), 'torch.Tensor', 'torch.Tensor', (['X_ind'], {}), '(X_ind)\n', (11644, 11651), False, 'import torch\n'), ((11653, 11672), 'torch.Tensor', 'torch.Tensor', (['y_ind'], {}), '(y_ind)\n', (11665, 11672), False, 'import torch\n'), ((11959, 12003), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'batch_size': 'BATCH_SIZE'}), '(train_set, batch_size=BATCH_SIZE)\n', (11969, 12003), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((12113, 12157), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_set'], {'batch_size': 'BATCH_SIZE'}), '(valid_set, batch_size=BATCH_SIZE)\n', (12123, 12157), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((12485, 12536), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(1000)', 'n_jobs': '(10)'}), '(n_estimators=1000, n_jobs=10)\n', (12506, 12536), False, 'from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\n'), ((12563, 12615), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(1000)', 'n_jobs': '(10)'}), '(n_estimators=1000, n_jobs=10)\n', (12585, 12615), False, 'from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\n'), ((17608, 17616), 'sklearn.preprocessing.MinMaxScaler', 'Scaler', ([], {}), '()\n', (17614, 17616), True, 'from sklearn.preprocessing import MinMaxScaler as Scaler\n'), ((17743, 17783), 'numpy.concatenate', 'np.concatenate', (['[data_x, test_x]'], {'axis': '(0)'}), '([data_x, test_x], axis=0)\n', (17757, 17783), True, 'import numpy as np\n'), ((17796, 17846), 'numpy.concatenate', 'np.concatenate', (['[data.values, test.values]'], {'axis': '(0)'}), '([data.values, test.values], axis=0)\n', (17810, 17846), True, 'import numpy as np\n'), ((8053, 8061), 'sklearn.model_selection.KFold', 'KFold', (['(5)'], {}), '(5)\n', (8058, 8061), False, 'from sklearn.model_selection import StratifiedKFold, KFold\n'), ((9732, 9740), 'sklearn.model_selection.KFold', 'KFold', (['(5)'], {}), '(5)\n', (9737, 9740), False, 'from sklearn.model_selection import StratifiedKFold, KFold\n'), ((11884, 11908), 'torch.Tensor', 'torch.Tensor', (['X[trained]'], {}), '(X[trained])\n', (11896, 11908), False, 'import torch\n'), ((11910, 11934), 'torch.Tensor', 'torch.Tensor', (['y[trained]'], {}), '(y[trained])\n', (11922, 11934), False, 'import torch\n'), ((12038, 12062), 'torch.Tensor', 'torch.Tensor', (['X[valided]'], {}), '(X[valided])\n', (12050, 12062), False, 'import torch\n'), ((12064, 12088), 'torch.Tensor', 'torch.Tensor', (['y[valided]'], {}), '(y[valided])\n', (12076, 12088), False, 'import torch\n'), ((14243, 14251), 'sklearn.preprocessing.MinMaxScaler', 'Scaler', ([], {}), '()\n', (14249, 14251), True, 'from sklearn.preprocessing import MinMaxScaler as Scaler\n'), ((16219, 16255), 'pandas.read_table', 'pd.read_table', (['"""data/LIGAND_RAW.tsv"""'], {}), "('data/LIGAND_RAW.tsv')\n", (16232, 16255), True, 'import pandas as pd\n'), ((17363, 17386), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['mol'], {}), '(mol)\n', (17381, 17386), False, 'from rdkit import Chem\n'), ((17449, 17472), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['mol'], {}), '(mol)\n', (17467, 17472), False, 'from rdkit import Chem\n'), ((1994, 2002), 'sklearn.model_selection.KFold', 'KFold', (['(5)'], {}), '(5)\n', (1999, 2002), False, 'from sklearn.model_selection import StratifiedKFold, KFold\n'), ((2058, 2076), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['(5)'], {}), '(5)\n', (2073, 2076), False, 'from sklearn.model_selection import StratifiedKFold, KFold\n'), ((2236, 2255), 'numpy.array', 'np.array', (['[-15, 15]'], {}), '([-15, 15])\n', (2244, 2255), True, 'import numpy as np\n'), ((2273, 2292), 'numpy.array', 'np.array', (['[-15, 15]'], {}), '([-15, 15])\n', (2281, 2292), True, 'import numpy as np\n'), ((4262, 4270), 'sklearn.model_selection.KFold', 'KFold', (['(5)'], {}), '(5)\n', (4267, 4270), False, 'from sklearn.model_selection import StratifiedKFold, KFold\n'), ((4342, 4360), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['(5)'], {}), '(5)\n', (4357, 4360), False, 'from sklearn.model_selection import StratifiedKFold, KFold\n'), ((6262, 6270), 'sklearn.model_selection.KFold', 'KFold', (['(5)'], {}), '(5)\n', (6267, 6270), False, 'from sklearn.model_selection import StratifiedKFold, KFold\n'), ((6340, 6358), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['(5)'], {}), '(5)\n', (6355, 6358), False, 'from sklearn.model_selection import StratifiedKFold, KFold\n'), ((11439, 11447), 'sklearn.model_selection.KFold', 'KFold', (['(5)'], {}), '(5)\n', (11444, 11447), False, 'from sklearn.model_selection import StratifiedKFold, KFold\n'), ((11483, 11501), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['(5)'], {}), '(5)\n', (11498, 11501), False, 'from sklearn.model_selection import StratifiedKFold, KFold\n'), ((12805, 12825), 'pandas.read_table', 'pd.read_table', (['fname'], {}), '(fname)\n', (12818, 12825), True, 'import pandas as pd\n'), ((14074, 14097), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['mol'], {}), '(mol)\n', (14092, 14097), False, 'from rdkit import Chem\n'), ((14171, 14194), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['mol'], {}), '(mol)\n', (14189, 14194), False, 'from rdkit import Chem\n'), ((14608, 14674), 'pandas.DataFrame', 'pd.DataFrame', (['data_p'], {'index': 'df_data.index', 'columns': 'df_data.columns'}), '(data_p, index=df_data.index, columns=df_data.columns)\n', (14620, 14674), True, 'import pandas as pd\n'), ((14711, 14777), 'pandas.DataFrame', 'pd.DataFrame', (['test_p'], {'index': 'df_test.index', 'columns': 'df_test.columns'}), '(test_p, index=df_test.index, columns=df_test.columns)\n', (14723, 14777), True, 'import pandas as pd\n'), ((15313, 15321), 'sklearn.preprocessing.MinMaxScaler', 'Scaler', ([], {}), '()\n', (15319, 15321), True, 'from sklearn.preprocessing import MinMaxScaler as Scaler\n'), ((15496, 15536), 'numpy.concatenate', 'np.concatenate', (['[data_x, test_x]'], {'axis': '(0)'}), '([data_x, test_x], axis=0)\n', (15510, 15536), True, 'import numpy as np\n'), ((15561, 15615), 'numpy.concatenate', 'np.concatenate', (['[data_y.values, test_y.values]'], {'axis': '(0)'}), '([data_y.values, test_y.values], axis=0)\n', (15575, 15615), True, 'import numpy as np\n'), ((15102, 15125), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['mol'], {}), '(mol)\n', (15120, 15125), False, 'from rdkit import Chem\n'), ((15202, 15225), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['mol'], {}), '(mol)\n', (15220, 15225), False, 'from rdkit import Chem\n')] |
import numpy as np
from numpy.linalg import norm
""" This gets the cost function that lead to a certain coordinate x and y. """
def get_point_cost_function(target_x, target_y, x_weight, y_weight, control_weight):
def cost_function(state, control):
x_dist_to_target = target_x - state[0]
y_dist_to_target = target_y - state[1]
return x_weight * x_dist_to_target**2 + y_weight * y_dist_to_target ** 2 + control_weight * control ** 2
return cost_function
"""
This gets the cost function that lead to tracking of a circle trajectory.
circle_center: loiter circle center position.
radius: loiter radius
direction: Direction of travel on circle, 1 for counter clockwise or -1 for clockwise.
direction_weight: Weight of error in direction relative to distance error
direction_tau: Direction weight exponentially decay with respect to distance.
control_weight: weight of control relative to distance error.
"""
def get_circle_cost_function(circle_center, radius, direction, direction_weight, direction_tau, control_weight):
def cost_function(state, control):
x = state[0]
y = state[1]
theta = state[2]
dist_to_circle_center = np.sqrt((circle_center[0] - x) ** 2 + (circle_center[1] - y) ** 2)
dist_to_circle = dist_to_circle_center - radius
# Unit velocity
v_unit = np.array([np.cos(theta), np.sin(theta), 0])
# Cicle center to aircraft vector
vec_circle_to_pos = np.array([x, y, 0]) - np.array([circle_center[0], circle_center[1], 0])
vec_circle_to_pos_unit = vec_circle_to_pos / norm(vec_circle_to_pos)
# Desired travel direction for aircraft.
desired_v_vec = np.cross(np.array([0, 0, direction]), vec_circle_to_pos_unit)
# Difference in current velocity direction and desired velocity direction, value in [0, 1].
error_direction = norm(v_unit - desired_v_vec) / 2.0
cost_direction_error = direction_weight * error_direction * np.exp(-direction_tau * dist_to_circle)
return dist_to_circle ** 2 + cost_direction_error + control_weight * control ** 2
return cost_function
"""
"""
def get_spline_cost_function(cached_spline, direction_weight, direction_tau, control_weight):
def cost_function(state, control):
theta = state[2]
# Closest point on the spline.
s, dist_to_spline = cached_spline.get_s_distance(state[0:2])
v_unit = np.array([np.cos(theta), np.sin(theta)])
desired_v = cached_spline.get_velocity(s)
desired_v = desired_v / norm(desired_v)
error_direction = norm(v_unit - desired_v) / 2.0
direction_cost = direction_weight * error_direction * np.exp(-direction_tau * dist_to_spline)
return dist_to_spline ** 2 + direction_cost + control_weight * control ** 2
return cost_function
| [
"numpy.sin",
"numpy.linalg.norm",
"numpy.array",
"numpy.exp",
"numpy.cos",
"numpy.sqrt"
] | [((1191, 1257), 'numpy.sqrt', 'np.sqrt', (['((circle_center[0] - x) ** 2 + (circle_center[1] - y) ** 2)'], {}), '((circle_center[0] - x) ** 2 + (circle_center[1] - y) ** 2)\n', (1198, 1257), True, 'import numpy as np\n'), ((1470, 1489), 'numpy.array', 'np.array', (['[x, y, 0]'], {}), '([x, y, 0])\n', (1478, 1489), True, 'import numpy as np\n'), ((1492, 1541), 'numpy.array', 'np.array', (['[circle_center[0], circle_center[1], 0]'], {}), '([circle_center[0], circle_center[1], 0])\n', (1500, 1541), True, 'import numpy as np\n'), ((1595, 1618), 'numpy.linalg.norm', 'norm', (['vec_circle_to_pos'], {}), '(vec_circle_to_pos)\n', (1599, 1618), False, 'from numpy.linalg import norm\n'), ((1701, 1728), 'numpy.array', 'np.array', (['[0, 0, direction]'], {}), '([0, 0, direction])\n', (1709, 1728), True, 'import numpy as np\n'), ((1880, 1908), 'numpy.linalg.norm', 'norm', (['(v_unit - desired_v_vec)'], {}), '(v_unit - desired_v_vec)\n', (1884, 1908), False, 'from numpy.linalg import norm\n'), ((1983, 2022), 'numpy.exp', 'np.exp', (['(-direction_tau * dist_to_circle)'], {}), '(-direction_tau * dist_to_circle)\n', (1989, 2022), True, 'import numpy as np\n'), ((2556, 2571), 'numpy.linalg.norm', 'norm', (['desired_v'], {}), '(desired_v)\n', (2560, 2571), False, 'from numpy.linalg import norm\n'), ((2598, 2622), 'numpy.linalg.norm', 'norm', (['(v_unit - desired_v)'], {}), '(v_unit - desired_v)\n', (2602, 2622), False, 'from numpy.linalg import norm\n'), ((2691, 2730), 'numpy.exp', 'np.exp', (['(-direction_tau * dist_to_spline)'], {}), '(-direction_tau * dist_to_spline)\n', (2697, 2730), True, 'import numpy as np\n'), ((1366, 1379), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1372, 1379), True, 'import numpy as np\n'), ((1381, 1394), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1387, 1394), True, 'import numpy as np\n'), ((2443, 2456), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2449, 2456), True, 'import numpy as np\n'), ((2458, 2471), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2464, 2471), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping
from sklearn import preprocessing
with tf.device('/gpu:1'):
Arm1_CS_State = pd.read_csv('/home/kiyanoushs/Kiyanoush Codes/Needle Insertion/Data/Arm2_CS_ordered.csv', header=None)
Arm1_NS_State = pd.read_csv('/home/kiyanoushs/Kiyanoush Codes/Needle Insertion/Data/Arm2_NS_ordered.csv', header=None)
CS1_names = Arm1_CS_State.columns
NS1_names = Arm1_NS_State.columns
scaler = preprocessing.StandardScaler()
myScaler = scaler.fit(Arm1_CS_State)
Arm1_CS_State = myScaler.transform(Arm1_CS_State)
Arm1_NS_State = myScaler.transform(Arm1_NS_State)
Arm1_CS_State = pd.DataFrame(Arm1_CS_State, columns=CS1_names)
Arm1_NS_State = pd.DataFrame(Arm1_NS_State, columns=NS1_names)
X = np.load('/home/kiyanoushs/Kiyanoush Codes/Needle Insertion/Data/Test1-20imageOrdered.npy')
print(Arm1_CS_State[0:1])
D3data = np.ones((X.shape[0], X.shape[1], X.shape[2], X.shape[3]))
D3data[ : , : , : , :] = X
print(D3data.shape)
train = D3data[0:D3data.shape[0]-2000 , : , : , :]
train = train.astype(np.float64)
print("training dataset size : {}".format(train.shape[0]))
for i in range(0 , train.shape[0]):
train[i, : , : , :] = train[i, : , : , :] / np.max(train[i, : , : , :])
robot_state_train_input = Arm1_CS_State[0:train.shape[0]]
print("Robot state input trainingset size: {}".format(robot_state_train_input.shape))
robot_state_train_label = Arm1_NS_State[0:train.shape[0]]
print("Robot state label trainingset size: {}".format(robot_state_train_label.shape))
test = D3data[D3data.shape[0]-2000: , : , : , :]
test = test.astype(np.float64)
for i in range(0 , test.shape[0]):
test[i, : , : , :] = test[i, : , : , :] / np.max(test[i, : , : , :])
robot_state_test_input = Arm1_CS_State[train.shape[0]:train.shape[0]+test.shape[0]]
print("Robot state input testset size: {}".format(robot_state_test_input.shape))
robot_state_test_label = Arm1_NS_State[train.shape[0]:train.shape[0]+test.shape[0]]
print("Robot state label testset size: {}".format(robot_state_test_label.shape))
#image_input_layer = keras.layers.Input(shape=(174,224,1))
image_input_layer = keras.layers.Input(shape=(224,224,3))
layer_conv_1 = keras.layers.Conv2D(filters=64, kernel_size=(3,3), padding="same", activation="relu")(image_input_layer)
layer_conv_2 = keras.layers.Conv2D(filters=64, kernel_size=(3,3), padding="same", activation="relu")(layer_conv_1)
layer_pooling_1 = keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2))(layer_conv_2)
layer_conv_3 = keras.layers.Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu")(layer_pooling_1)
layer_conv_4 = keras.layers.Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu")(layer_conv_3)
layer_pooling_2 = keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2))(layer_conv_4)
layer_conv_5 = keras.layers.Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")(layer_pooling_2)
layer_conv_6 = keras.layers.Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")(layer_conv_5)
layer_conv_7 = keras.layers.Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")(layer_conv_6)
layer_pooling_3 = keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2))(layer_conv_7)
layer_conv_8 = keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")(layer_pooling_3)
layer_conv_9 = keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")(layer_conv_8)
layer_conv_10 = keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")(layer_conv_9)
layer_pooling_4 = keras.layers.MaxPool2D(pool_size=(2,2), strides=(2,2))(layer_conv_10)
layer_conv_11 = keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")(layer_pooling_4)
layer_conv_12 = keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")(layer_conv_11)
layer_conv_13 = keras.layers.Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")(layer_conv_12)
cnn_flatten = keras.layers.Flatten()(layer_conv_13)
robot_state_input_layer = keras.layers.Input(shape=(7,))
dense_1 = keras.layers.Dense(15, activation="relu")(robot_state_input_layer)
dense_2 = keras.layers.Dense(25, activation="relu")(dense_1)
concat = keras.layers.concatenate([dense_2 , cnn_flatten])
dense_3 = keras.layers.Dense(80, activation="relu")(concat)
dense_4 = keras.layers.Dense(20, activation="relu")(dense_3)
output_layer = keras.layers.Dense(7, activation="linear")(dense_4)
model = keras.models.Model(inputs=[image_input_layer , robot_state_input_layer] , outputs=output_layer)
model.compile(optimizer='adam',loss='mean_absolute_error', metrics=['mse','accuracy'])
latent_space = 'dense_3'
intermediate_layer_model = keras.models.Model(inputs=model.input, outputs=dense_3)
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=2,
verbose=1, mode='auto', restore_best_weights=True)
history = model.fit([train , robot_state_train_input], robot_state_train_label, callbacks=[monitor], batch_size=50, validation_split=0.2, epochs=6)
score = model.evaluate([test , robot_state_test_input] , robot_state_test_label)
predict_cnn_dense = model.predict([test , robot_state_test_input])
err_matrix_cnn_dense = robot_state_test_label - predict_cnn_dense
cnn_err_mean = np.mean(abs(err_matrix_cnn_dense))
print("CNN mean error values for each output: ")
print(cnn_err_mean)
a = np.where(err_matrix_cnn_dense > 0.01)
a = np.asarray(list(zip(*a)))
print("number of err elements higher than 0.01: {}".format(a.shape))
predict_cnn_dense.shape
model.save('/home/kiyanoushs/Kiyanoush Codes/Needle Insertion/Models/CNN_Dense_Net.h5')
intermediate_layer_model.save('/home/kiyanoushs/Kiyanoush Codes/Needle Insertion/Models/CNN_intermediate_layer.h5')
model.summary()
| [
"pandas.DataFrame",
"numpy.load",
"sklearn.preprocessing.StandardScaler",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Dense",
"pandas.read_csv",
"tensorflow.device",
"numpy.ones",
"tensorflow.keras.models.Model",
"numpy.max",
"tensorflow.keras.layers.concatenate",
"numpy.where",... | [((225, 244), 'tensorflow.device', 'tf.device', (['"""/gpu:1"""'], {}), "('/gpu:1')\n", (234, 244), True, 'import tensorflow as tf\n'), ((264, 376), 'pandas.read_csv', 'pd.read_csv', (['"""/home/kiyanoushs/Kiyanoush Codes/Needle Insertion/Data/Arm2_CS_ordered.csv"""'], {'header': 'None'}), "(\n '/home/kiyanoushs/Kiyanoush Codes/Needle Insertion/Data/Arm2_CS_ordered.csv'\n , header=None)\n", (275, 376), True, 'import pandas as pd\n'), ((384, 496), 'pandas.read_csv', 'pd.read_csv', (['"""/home/kiyanoushs/Kiyanoush Codes/Needle Insertion/Data/Arm2_NS_ordered.csv"""'], {'header': 'None'}), "(\n '/home/kiyanoushs/Kiyanoush Codes/Needle Insertion/Data/Arm2_NS_ordered.csv'\n , header=None)\n", (395, 496), True, 'import pandas as pd\n'), ((569, 599), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (597, 599), False, 'from sklearn import preprocessing\n'), ((758, 804), 'pandas.DataFrame', 'pd.DataFrame', (['Arm1_CS_State'], {'columns': 'CS1_names'}), '(Arm1_CS_State, columns=CS1_names)\n', (770, 804), True, 'import pandas as pd\n'), ((822, 868), 'pandas.DataFrame', 'pd.DataFrame', (['Arm1_NS_State'], {'columns': 'NS1_names'}), '(Arm1_NS_State, columns=NS1_names)\n', (834, 868), True, 'import pandas as pd\n'), ((876, 976), 'numpy.load', 'np.load', (['"""/home/kiyanoushs/Kiyanoush Codes/Needle Insertion/Data/Test1-20imageOrdered.npy"""'], {}), "(\n '/home/kiyanoushs/Kiyanoush Codes/Needle Insertion/Data/Test1-20imageOrdered.npy'\n )\n", (883, 976), True, 'import numpy as np\n'), ((1006, 1063), 'numpy.ones', 'np.ones', (['(X.shape[0], X.shape[1], X.shape[2], X.shape[3])'], {}), '((X.shape[0], X.shape[1], X.shape[2], X.shape[3]))\n', (1013, 1063), True, 'import numpy as np\n'), ((2278, 2317), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(224, 224, 3)'}), '(shape=(224, 224, 3))\n', (2296, 2317), False, 'from tensorflow import keras\n'), ((4298, 4328), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(7,)'}), '(shape=(7,))\n', (4316, 4328), False, 'from tensorflow import keras\n'), ((4480, 4528), 'tensorflow.keras.layers.concatenate', 'keras.layers.concatenate', (['[dense_2, cnn_flatten]'], {}), '([dense_2, cnn_flatten])\n', (4504, 4528), False, 'from tensorflow import keras\n'), ((4732, 4829), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': '[image_input_layer, robot_state_input_layer]', 'outputs': 'output_layer'}), '(inputs=[image_input_layer, robot_state_input_layer],\n outputs=output_layer)\n', (4750, 4829), False, 'from tensorflow import keras\n'), ((4971, 5026), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'model.input', 'outputs': 'dense_3'}), '(inputs=model.input, outputs=dense_3)\n', (4989, 5026), False, 'from tensorflow import keras\n'), ((5041, 5158), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0.001)', 'patience': '(2)', 'verbose': '(1)', 'mode': '"""auto"""', 'restore_best_weights': '(True)'}), "(monitor='val_loss', min_delta=0.001, patience=2, verbose=1,\n mode='auto', restore_best_weights=True)\n", (5054, 5158), False, 'from tensorflow.keras.callbacks import EarlyStopping\n'), ((5662, 5699), 'numpy.where', 'np.where', (['(err_matrix_cnn_dense > 0.01)'], {}), '(err_matrix_cnn_dense > 0.01)\n', (5670, 5699), True, 'import numpy as np\n'), ((2333, 2423), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (2352, 2423), False, 'from tensorflow import keras\n'), ((2454, 2544), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (2473, 2544), False, 'from tensorflow import keras\n'), ((2573, 2629), 'tensorflow.keras.layers.MaxPool2D', 'keras.layers.MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (2595, 2629), False, 'from tensorflow import keras\n'), ((2659, 2750), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=128, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (2678, 2750), False, 'from tensorflow import keras\n'), ((2779, 2870), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=128, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (2798, 2870), False, 'from tensorflow import keras\n'), ((2899, 2955), 'tensorflow.keras.layers.MaxPool2D', 'keras.layers.MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (2921, 2955), False, 'from tensorflow import keras\n'), ((2985, 3076), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(256)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=256, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (3004, 3076), False, 'from tensorflow import keras\n'), ((3105, 3196), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(256)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=256, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (3124, 3196), False, 'from tensorflow import keras\n'), ((3222, 3313), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(256)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=256, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (3241, 3313), False, 'from tensorflow import keras\n'), ((3342, 3398), 'tensorflow.keras.layers.MaxPool2D', 'keras.layers.MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (3364, 3398), False, 'from tensorflow import keras\n'), ((3428, 3519), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(512)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=512, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (3447, 3519), False, 'from tensorflow import keras\n'), ((3548, 3639), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(512)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=512, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (3567, 3639), False, 'from tensorflow import keras\n'), ((3666, 3757), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(512)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=512, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (3685, 3757), False, 'from tensorflow import keras\n'), ((3786, 3842), 'tensorflow.keras.layers.MaxPool2D', 'keras.layers.MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)'}), '(pool_size=(2, 2), strides=(2, 2))\n', (3808, 3842), False, 'from tensorflow import keras\n'), ((3874, 3965), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(512)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=512, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (3893, 3965), False, 'from tensorflow import keras\n'), ((3995, 4086), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(512)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=512, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (4014, 4086), False, 'from tensorflow import keras\n'), ((4114, 4205), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(512)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=512, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (4133, 4205), False, 'from tensorflow import keras\n'), ((4232, 4254), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (4252, 4254), False, 'from tensorflow import keras\n'), ((4340, 4381), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(15)'], {'activation': '"""relu"""'}), "(15, activation='relu')\n", (4358, 4381), False, 'from tensorflow import keras\n'), ((4418, 4459), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(25)'], {'activation': '"""relu"""'}), "(25, activation='relu')\n", (4436, 4459), False, 'from tensorflow import keras\n'), ((4542, 4583), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(80)'], {'activation': '"""relu"""'}), "(80, activation='relu')\n", (4560, 4583), False, 'from tensorflow import keras\n'), ((4603, 4644), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(20)'], {'activation': '"""relu"""'}), "(20, activation='relu')\n", (4621, 4644), False, 'from tensorflow import keras\n'), ((4670, 4712), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(7)'], {'activation': '"""linear"""'}), "(7, activation='linear')\n", (4688, 4712), False, 'from tensorflow import keras\n'), ((1346, 1371), 'numpy.max', 'np.max', (['train[i, :, :, :]'], {}), '(train[i, :, :, :])\n', (1352, 1371), True, 'import numpy as np\n'), ((1832, 1856), 'numpy.max', 'np.max', (['test[i, :, :, :]'], {}), '(test[i, :, :, :])\n', (1838, 1856), True, 'import numpy as np\n')] |
"""
Continuous NSGA-II, NSGA-III
"""
from xopt import creator, vocs_tools, fitness_with_constraints
from xopt import creator, vocs_tools, fitness_with_constraints
from xopt.tools import full_path, random_settings_arrays, DummyExecutor, load_config, NpEncoder
from xopt import __version__
from deap import algorithms, base, tools
from tqdm.auto import tqdm
import numpy as np
import json
import logging
logger = logging.getLogger(__name__)
import warnings
from pprint import pprint
import time
import array
import random
import traceback
import os, sys
# Check for continuous integration
if 'CI' in os.environ:
cnsga_logo = f"""
Continuous Non-dominated Sorting Genetic Algorithm
Version {__version__}
"""
else:
cnsga_logo = f"""
▄████▄ ███▄ █ ██████ ▄████ ▄▄▄
▒██▀ ▀█ ██ ▀█ █ ▒██ ▒ ██▒ ▀█▒▒████▄
▒▓█ ▄ ▓██ ▀█ ██▒░ ▓██▄ ▒██░▄▄▄░▒██ ▀█▄
▒▓▓▄ ▄██▒▓██▒ ▐▌██▒ ▒ ██▒░▓█ ██▓░██▄▄▄▄██
▒ ▓███▀ ░▒██░ ▓██░▒██████▒▒░▒▓███▀▒ ▓█ ▓██▒
░ ░▒ ▒ ░░ ▒░ ▒ ▒ ▒ ▒▓▒ ▒ ░ ░▒ ▒ ▒▒ ▓▒█░
░ ▒ ░ ░░ ░ ▒░░ ░▒ ░ ░ ░ ░ ▒ ▒▒ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ▒
░ ░ ░ ░ ░ ░ ░
░
Continuous Non-dominated Sorting Genetic Algorithm
Version {__version__}
"""
def uniform(low, up, size=None):
"""
"""
try:
return [random.uniform(a, b) for a, b in zip(low, up)]
except TypeError:
return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]
def cnsga_toolbox(vocs, selection='auto'):
"""
Creates a DEAP toolbox from VOCS dict for use with cnsga.
Selection options:
nsga2: Standard NSGA2 [Deb2002] selection
nsga3: NSGA3 [Deb2014] selection
spea2: SPEA-II [Zitzler2001] selection
auto: will choose nsga2 for <= 2 objectives, otherwise nsga3
See DEAP code for details.
"""
var, obj, con = vocs['variables'], vocs['objectives'], vocs['constraints']
n_var = len(var)
n_obj = len(obj)
n_con = len(con)
var_labels = vocs_tools.skeys(var)
obj_labels = vocs_tools.skeys(obj)
bound_low = vocs_tools.var_mins(var)
bound_up = vocs_tools.var_maxs(var)
weights = vocs_tools.weight_list(obj)
# Create MyFitness
if 'MyFitness' in dir(creator):
del creator.MyFitness
if n_con == 0:
# Normal Fitness class
creator.create('MyFitness', base.Fitness, weights=weights, labels=obj_labels)
else:
# Fitness with Constraints
creator.create('MyFitness', fitness_with_constraints.FitnessWithConstraints,
weights=weights, n_constraints=n_con, labels=obj_labels)
# Create Individual. Check if exists first.
if 'Individual' in dir(creator):
del creator.Individual
creator.create('Individual', array.array, typecode='d', fitness=creator.MyFitness,
labels=var_labels)
# Make toolbox
toolbox = base.Toolbox()
# Register indivitual and population creation routines
toolbox.register('attr_float', uniform, bound_low, bound_up)
toolbox.register('individual', tools.initIterate, creator.Individual, toolbox.attr_float)
toolbox.register('population', tools.initRepeat, list, toolbox.individual)
# Register mate and mutate functions
toolbox.register('mate', tools.cxSimulatedBinaryBounded, low=bound_low, up=bound_up, eta=20.0)
toolbox.register('mutate', tools.mutPolynomialBounded, low=bound_low, up=bound_up, eta=20.0, indpb=1.0/n_var)
# Register NSGA selection algorithm.
# NSGA-III should be better for 3 or more objectives
if selection == 'auto':
if len(obj) <= 2:
selection = 'nsga2'
else:
selection='nsga3'
if selection == 'nsga2':
toolbox.register('select', tools.selNSGA2)
# Doesn't work with constraints. TODO: investigate
#elif selection == 'nsga2_log':
# toolbox.register('select', tools.selNSGA2, nd='log')
elif selection == 'nsga3':
# Create uniform reference point
ref_points = tools.uniform_reference_points(n_obj, 12)
toolbox.register('select', tools.selNSGA3, ref_points=ref_points)
elif selection == 'spea2':
toolbox.register('select', tools.selSPEA2)
else:
raise ValueError(f'Invalid selection algorithm: {selection}')
logger.info(f'Created toolbox with {n_var} variables, {n_con} constraints, and {n_obj} objectives.')
logger.info(f' Using selection algorithm: {selection}')
return toolbox
def cnsga_evaluate(vec, evaluate_f=None, vocs=None, include_inputs_and_outputs=True, verbose=True):
"""
Evaluation function wrapper for use with cngsa. Returns dict with:
'vec', 'obj', 'con', 'err'
If a vocs is given, the function evaluate_f is assumed to have labeled inputs and outputs,
and vocs will be used to form the output above. If include_inputs_and_outputs, then:
'inputs', 'outputs'
will be included in the returned dict.
Otherwise, evaluate_f should return pure numbers as:
vec -> (objectives, constraints)
This function will be evaulated by a worker.
Any exceptions will be caught, and this will return:
error = True
0 for all objectives
-666.0 for all constraints
"""
result = {}
if vocs:
# labeled inputs -> labeled outputs evaluate_f
inputs = vocs_tools.inputs_from_vec(vec, vocs=vocs)
try:
if vocs:
# Evaluation
inputs0 = inputs.copy() # Make a copy, because the evaluate function might modify the inputs.
outputs = evaluate_f(inputs0)
obj_eval = vocs_tools.evaluate_objectives(vocs['objectives'], outputs)
con_eval = vocs_tools.evaluate_constraints(vocs['constraints'], outputs)
else:
# Pure number function
obj_eval, con_eval = evaluate_f(vec)
err = False
except Exception as ex:
# No need to print a nasty logger exception
logger.error(f'Exception caught in {__name__}')
outputs = {'Exception': str(traceback.format_exc())}
# Dummy output
err = True
obj_eval = [0.0]*len(vocs['objectives'])
con_eval = [-666.0]*len(vocs['constraints'])
finally:
# Add these to result
if include_inputs_and_outputs:
result['inputs'] = inputs
result['outputs'] = outputs
result['vec'] = vec
result['obj'] = obj_eval
result['con'] = con_eval
result['err'] = err
return result
def pop_init(vocs, data):
"""
Initialize a pop (list of creator.Indivituals) from vocs and keyed data.
Data should have keys as in vocs['variables'] as arrays.
"""
# Get keys to look for in data
varkeys = vocs_tools.skeys(vocs['variables'])
# extract vecs
vecs = np.array([data[k] for k in varkeys]).T
# Check bounds
for i, v in enumerate(varkeys):
low, up = vocs['variables'][v]
assert vecs[:,i].min() >= low, 'Data below lower bound'
assert vecs[:,i].max() <= up, 'Data above upper bound'
# Pop must be a multiple of 4. Trim off any extras
n_extra = len(vecs) % 4
if n_extra > 0:
print(f'Warnning: trimming {n_extra} from initial population to make a multiple of 4.')
vecs = vecs[:-n_extra]
assert len(vecs) > 0, 'Population is empty'
# Form individuals
pop = []
for vec in vecs:
ind = creator.Individual(vec)
pop.append(ind)
return pop
def pop_init_random(vocs, n):
"""
Returns a random population of size n.
"""
data = random_settings_arrays(vocs, n)
return pop_init(vocs, data)
def pop_to_data(vocs, pop, generation=0):
"""
Pop should be a list of inds.
Returns a dict with
'variables': dict with lists of input variable values
'errors': corresponding error of these.
'vocs': the vocs used
If inds had inputs and outputs, also returns:
'inputs'
'outputs'
"""
data = {'variables':{}, 'generation':generation, 'vocs':vocs}
vlist = vocs_tools.skeys(vocs['variables']) # get the correct order
for i, v in enumerate(vlist):
data['variables'][v] = [ind[i] for ind in pop]
if not all([ind.fitness.valid for ind in pop]):
return data
#
data['error'] = [ind.error for ind in pop]
data['inputs'] = [ind.inputs for ind in pop]
data['outputs'] = [ind.outputs for ind in pop]
return data
# function to reform individual
def form_ind(res):
vec, obj, con, err = res['vec'], res['obj'], res['con'], res['err']
ind = creator.Individual(vec)
ind.fitness.values = obj
ind.fitness.cvalues = con
ind.error = err
if 'inputs' in res:
ind.inputs = res['inputs']
if 'outputs' in res:
ind.outputs = res['outputs']
return ind
# Only allow vectors to be sent to evaluate
def get_vec(ind):
return array.array('d', [float(x) for x in ind])
def get_vecs(inds):
return [get_vec(ind) for ind in inds]
#--------------------------------------------
#--------------------------------------------
def cnsga(executor=None,
vocs=None,
population=None,
toolbox=None,
seed=None,
evaluate_f=None,
output_path=None,
max_generations = 2,
population_size = 4,
crossover_probability = 0.9,
mutation_probability = 1.0,
selection='auto',
verbose=None,
show_progress=False):
"""
Continuous NSGA-II, NSGA-III
Futures method, uses an executor as defined in:
https://www.python.org/dev/peps/pep-3148/
Works with executors instantiated from:
concurrent.futures.ProcessPoolExecutor
concurrent.futures.ThreadPoolExecutor
mpi4py.futures.MPIPoolExecutor
dask.distributed.Client
Requires either a DEAP toolbox or a vocs dict.
If an output_path is given, regular outputs:
gen_{i}.json
pop_{i}.json
will be written for each generation i, and the best population at that generation.
These files can be used for restarting the function.
"""
random.seed(seed)
MU = population_size
CXPB = float(crossover_probability)
MUTPB = float(mutation_probability)
if verbose is not None:
warnings.warn('xopt.cnsga verbose option has been deprecated')
# Initial population
# Logo
try:
logger.info(cnsga_logo)
except:
logger.info('CNSGA') # Windows has a problem with the logo
if not executor:
executor = DummyExecutor()
logger.info('No executor given. Running in serial mode.')
# Setup saving to file
if output_path:
path = full_path(output_path)
assert os.path.exists(path), f'output_path does not exist {path}'
def save(pop, prefix, generation):
file = f'{prefix}{generation}.json'
data = pop_to_data(vocs, pop, generation=generation)
fullfile = os.path.join(path, file)
with open(fullfile, 'w') as f:
json.dump(data, f, ensure_ascii=True, cls=NpEncoder, indent=4)
logger.info(f'Pop written to {fullfile}')
else:
# Dummy save
def save(pop, prefix, generation):
pass
# Toolbox
if not toolbox:
logger.info('Creating toolbox from vocs.')
toolbox = cnsga_toolbox(vocs, selection=selection)
toolbox.register('evaluate', cnsga_evaluate, evaluate_f=evaluate_f, vocs=vocs)
# Initial pop
if population:
# Load JSON
population = load_config(population)
assert 'variables' in population, 'Population must have key: variables'
pop = pop_init(vocs, population['variables'])
if 'generation' in population:
generation = population['generation']+1
max_generations += generation
else:
generation=0
MU = len(pop)
logger.info(f'Initializing with existing population, size {MU}')
else:
generation = 0
#pop = toolbox.population(n=MU)
pop = pop_init_random(vocs, n=MU)
logger.info(f'Initializing with a new population, size {MU}')
assert MU % 4 == 0, f'Population size (here {MU}) must be a multiple of 4'
logger.info(f'Maximum generations: {max_generations}')
# Individuals that need evaluating
vecs = [get_vec(ind) for ind in pop if not ind.fitness.valid]
# Initial population
futures = [executor.submit(toolbox.evaluate, vec) for vec in vecs]
logger.info('____________________________________________________')
logger.info(f'{MU} fitness calculations for initial generation')
# Clear pop
pop = []
for future in futures:
res = future.result()
ind = form_ind(res)
pop.append(ind)
logger.info('done.')
logger.info('Submitting first batch of children')
save(pop, 'initial_pop_', generation)
# This is just to assign the crowding distance to the individuals
# no actual selection is done
pop = toolbox.select(pop, len(pop))
# Make inital offspring to start the iteration
vecs0 = get_vecs(algorithms.varAnd(pop, toolbox, CXPB, MUTPB) )
# Submit evaluation of initial population
futures = [executor.submit(toolbox.evaluate, vec) for vec in vecs0]
new_vecs = get_vecs(algorithms.varAnd(pop, toolbox, CXPB, MUTPB))
new_offspring = []
# Continuous loop
t0 = time.time()
done = False
# Nice progress bar
pbar = tqdm(total=len(futures), disable=not show_progress)
while not done:
# Check the status of all futures
for ix in range(len(futures)):
# Examine a future
fut = futures[ix]
if fut.done():
res = fut.result()
ind = form_ind(res)
new_offspring.append(ind)
# Increment the progress bar
pbar.update(1)
# Refill inputs and save
if len(new_vecs) == 0:
pbar.close()
pbar = tqdm(total=(len(futures)))
t1 = time.time()
dt = t1-t0
t0 = t1
#logger.info('__________________________________________________________')
logger.info(f'Generation {generation} completed in {dt/60:0.5f} minutes')
generation += 1
save(new_offspring, 'gen_', generation)
pop = toolbox.select(pop + new_offspring, MU)
save(pop, 'pop_', generation)
new_offspring = []
# New offspring
new_vecs = get_vecs(algorithms.varAnd(pop, toolbox, CXPB, MUTPB))
if generation >= max_generations:
done = True
# Add new job for worker
vec = new_vecs.pop()
future = executor.submit(toolbox.evaluate, vec)
futures[ix] = future
# Slow down polling. Needed for MPI to work well.
time.sleep(0.001)
# Close any progress bars
pbar.update(len(futures))
# pbar.clear()
pbar.close()
# Cancel remaining jobs
for future in futures:
future.cancel()
final_population = pop_to_data(vocs, pop, generation=generation)
return final_population
| [
"xopt.vocs_tools.var_maxs",
"xopt.tools.DummyExecutor",
"xopt.vocs_tools.weight_list",
"xopt.vocs_tools.evaluate_constraints",
"os.path.join",
"deap.base.Toolbox",
"os.path.exists",
"deap.algorithms.varAnd",
"random.seed",
"xopt.tools.random_settings_arrays",
"traceback.format_exc",
"xopt.crea... | [((418, 445), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (435, 445), False, 'import logging\n'), ((2200, 2221), 'xopt.vocs_tools.skeys', 'vocs_tools.skeys', (['var'], {}), '(var)\n', (2216, 2221), False, 'from xopt import creator, vocs_tools, fitness_with_constraints\n'), ((2239, 2260), 'xopt.vocs_tools.skeys', 'vocs_tools.skeys', (['obj'], {}), '(obj)\n', (2255, 2260), False, 'from xopt import creator, vocs_tools, fitness_with_constraints\n'), ((2282, 2306), 'xopt.vocs_tools.var_mins', 'vocs_tools.var_mins', (['var'], {}), '(var)\n', (2301, 2306), False, 'from xopt import creator, vocs_tools, fitness_with_constraints\n'), ((2322, 2346), 'xopt.vocs_tools.var_maxs', 'vocs_tools.var_maxs', (['var'], {}), '(var)\n', (2341, 2346), False, 'from xopt import creator, vocs_tools, fitness_with_constraints\n'), ((2366, 2393), 'xopt.vocs_tools.weight_list', 'vocs_tools.weight_list', (['obj'], {}), '(obj)\n', (2388, 2393), False, 'from xopt import creator, vocs_tools, fitness_with_constraints\n'), ((2967, 3073), 'xopt.creator.create', 'creator.create', (['"""Individual"""', 'array.array'], {'typecode': '"""d"""', 'fitness': 'creator.MyFitness', 'labels': 'var_labels'}), "('Individual', array.array, typecode='d', fitness=creator.\n MyFitness, labels=var_labels)\n", (2981, 3073), False, 'from xopt import creator, vocs_tools, fitness_with_constraints\n'), ((3139, 3153), 'deap.base.Toolbox', 'base.Toolbox', ([], {}), '()\n', (3151, 3153), False, 'from deap import algorithms, base, tools\n'), ((7239, 7274), 'xopt.vocs_tools.skeys', 'vocs_tools.skeys', (["vocs['variables']"], {}), "(vocs['variables'])\n", (7255, 7274), False, 'from xopt import creator, vocs_tools, fitness_with_constraints\n'), ((8103, 8134), 'xopt.tools.random_settings_arrays', 'random_settings_arrays', (['vocs', 'n'], {}), '(vocs, n)\n', (8125, 8134), False, 'from xopt.tools import full_path, random_settings_arrays, DummyExecutor, load_config, NpEncoder\n'), ((8627, 8662), 'xopt.vocs_tools.skeys', 'vocs_tools.skeys', (["vocs['variables']"], {}), "(vocs['variables'])\n", (8643, 8662), False, 'from xopt import creator, vocs_tools, fitness_with_constraints\n'), ((9190, 9213), 'xopt.creator.Individual', 'creator.Individual', (['vec'], {}), '(vec)\n', (9208, 9213), False, 'from xopt import creator, vocs_tools, fitness_with_constraints\n'), ((10806, 10823), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (10817, 10823), False, 'import random\n'), ((14349, 14360), 'time.time', 'time.time', ([], {}), '()\n', (14358, 14360), False, 'import time\n'), ((2552, 2629), 'xopt.creator.create', 'creator.create', (['"""MyFitness"""', 'base.Fitness'], {'weights': 'weights', 'labels': 'obj_labels'}), "('MyFitness', base.Fitness, weights=weights, labels=obj_labels)\n", (2566, 2629), False, 'from xopt import creator, vocs_tools, fitness_with_constraints\n'), ((2683, 2820), 'xopt.creator.create', 'creator.create', (['"""MyFitness"""', 'fitness_with_constraints.FitnessWithConstraints'], {'weights': 'weights', 'n_constraints': 'n_con', 'labels': 'obj_labels'}), "('MyFitness', fitness_with_constraints.FitnessWithConstraints,\n weights=weights, n_constraints=n_con, labels=obj_labels)\n", (2697, 2820), False, 'from xopt import creator, vocs_tools, fitness_with_constraints\n'), ((5738, 5780), 'xopt.vocs_tools.inputs_from_vec', 'vocs_tools.inputs_from_vec', (['vec'], {'vocs': 'vocs'}), '(vec, vocs=vocs)\n', (5764, 5780), False, 'from xopt import creator, vocs_tools, fitness_with_constraints\n'), ((7306, 7342), 'numpy.array', 'np.array', (['[data[k] for k in varkeys]'], {}), '([data[k] for k in varkeys])\n', (7314, 7342), True, 'import numpy as np\n'), ((7932, 7955), 'xopt.creator.Individual', 'creator.Individual', (['vec'], {}), '(vec)\n', (7950, 7955), False, 'from xopt import creator, vocs_tools, fitness_with_constraints\n'), ((10975, 11037), 'warnings.warn', 'warnings.warn', (['"""xopt.cnsga verbose option has been deprecated"""'], {}), "('xopt.cnsga verbose option has been deprecated')\n", (10988, 11037), False, 'import warnings\n'), ((11315, 11330), 'xopt.tools.DummyExecutor', 'DummyExecutor', ([], {}), '()\n', (11328, 11330), False, 'from xopt.tools import full_path, random_settings_arrays, DummyExecutor, load_config, NpEncoder\n'), ((11469, 11491), 'xopt.tools.full_path', 'full_path', (['output_path'], {}), '(output_path)\n', (11478, 11491), False, 'from xopt.tools import full_path, random_settings_arrays, DummyExecutor, load_config, NpEncoder\n'), ((11507, 11527), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (11521, 11527), False, 'import os, sys\n'), ((12392, 12415), 'xopt.tools.load_config', 'load_config', (['population'], {}), '(population)\n', (12403, 12415), False, 'from xopt.tools import full_path, random_settings_arrays, DummyExecutor, load_config, NpEncoder\n'), ((14040, 14084), 'deap.algorithms.varAnd', 'algorithms.varAnd', (['pop', 'toolbox', 'CXPB', 'MUTPB'], {}), '(pop, toolbox, CXPB, MUTPB)\n', (14057, 14084), False, 'from deap import algorithms, base, tools\n'), ((14241, 14285), 'deap.algorithms.varAnd', 'algorithms.varAnd', (['pop', 'toolbox', 'CXPB', 'MUTPB'], {}), '(pop, toolbox, CXPB, MUTPB)\n', (14258, 14285), False, 'from deap import algorithms, base, tools\n'), ((16322, 16339), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (16332, 16339), False, 'import time\n'), ((1475, 1495), 'random.uniform', 'random.uniform', (['a', 'b'], {}), '(a, b)\n', (1489, 1495), False, 'import random\n'), ((4300, 4341), 'deap.tools.uniform_reference_points', 'tools.uniform_reference_points', (['n_obj', '(12)'], {}), '(n_obj, 12)\n', (4330, 4341), False, 'from deap import algorithms, base, tools\n'), ((6042, 6101), 'xopt.vocs_tools.evaluate_objectives', 'vocs_tools.evaluate_objectives', (["vocs['objectives']", 'outputs'], {}), "(vocs['objectives'], outputs)\n", (6072, 6101), False, 'from xopt import creator, vocs_tools, fitness_with_constraints\n'), ((6125, 6186), 'xopt.vocs_tools.evaluate_constraints', 'vocs_tools.evaluate_constraints', (["vocs['constraints']", 'outputs'], {}), "(vocs['constraints'], outputs)\n", (6156, 6186), False, 'from xopt import creator, vocs_tools, fitness_with_constraints\n'), ((11754, 11778), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (11766, 11778), False, 'import os, sys\n'), ((1560, 1580), 'random.uniform', 'random.uniform', (['a', 'b'], {}), '(a, b)\n', (1574, 1580), False, 'import random\n'), ((11838, 11900), 'json.dump', 'json.dump', (['data', 'f'], {'ensure_ascii': '(True)', 'cls': 'NpEncoder', 'indent': '(4)'}), '(data, f, ensure_ascii=True, cls=NpEncoder, indent=4)\n', (11847, 11900), False, 'import json\n'), ((6509, 6531), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6529, 6531), False, 'import traceback\n'), ((15180, 15191), 'time.time', 'time.time', ([], {}), '()\n', (15189, 15191), False, 'import time\n'), ((15830, 15874), 'deap.algorithms.varAnd', 'algorithms.varAnd', (['pop', 'toolbox', 'CXPB', 'MUTPB'], {}), '(pop, toolbox, CXPB, MUTPB)\n', (15847, 15874), False, 'from deap import algorithms, base, tools\n')] |
"""
Extract robot positions from Ignition SQLite log file
"""
import cv2
import numpy as np
import re
import sqlite3
import xml.etree.ElementTree as ET
from datetime import timedelta
from subt.ign_pb2 import Pose_V
COLORS = [
(255, 255, 255),
(0, 255, 0),
(0, 0, 255),
(255, 0, 0),
(0, 255, 255),
(255, 0, 255),
(255, 255, 0),
]
SCALE = 10 # 1 pixel is 1dm
BORDER_PX = 10 # extra border
MARKERS = {
'backpack': (cv2.MARKER_SQUARE, 3),
'rescue_randy': (cv2.MARKER_DIAMOND, 2),
'phone': (cv2.MARKER_TILTED_CROSS, 5),
'gas': (cv2.MARKER_TRIANGLE_UP, 1), # Urban
'vent': (cv2.MARKER_STAR, 4),
'helmet': (cv2.MARKER_TRIANGLE_UP, 1), # Cave
'rope': (cv2.MARKER_STAR, 4),
'artifact_origin': (cv2.MARKER_CROSS, 6),
}
def read_poses(filename, seconds=3700):
ret = []
con = sqlite3.connect(filename)
cursor = con.cursor()
cursor.execute(r"SELECT id FROM topics where name LIKE '%/dynamic_pose/info';")
result = cursor.fetchone()
dynamic_topic_id = result[0]
poses = Pose_V()
try:
# cannot use WHERE filtering since the state.log is always corrupted
cursor.execute("SELECT message, topic_id FROM messages")
for m, topic_id in cursor:
if topic_id != dynamic_topic_id:
continue
poses.ParseFromString(m)
timestamp = timedelta(seconds=poses.header.stamp.sec, microseconds=poses.header.stamp.nsec / 1000)
if timestamp > timedelta(seconds=seconds):
return ret
current = dict()
for pose in poses.pose:
if "_" in pose.name:
continue
current[pose.name] = pose.position
if len(current) > 0:
ret.append((timestamp, current))
except sqlite3.DatabaseError as e:
print(f"{type(e).__name__}: {e}")
return ret
def read_artifacts(filename):
ret = []
con = sqlite3.connect(filename)
cursor = con.cursor()
cursor.execute(f"SELECT id FROM topics WHERE name == '/logs/sdf'")
result = cursor.fetchone()
sdf_id = result[0]
cursor.execute(r"SELECT message, topic_id FROM messages")
for message, topic_id in cursor:
if topic_id == sdf_id:
world = message
break
root = ET.fromstring(world[4:])
type_re = re.compile('^(backpack|rescue_randy|gas|vent|phone|artifact_origin|rope|helmet)')
for model in root.iterfind("./world/model"):
name = model.get('name')
match = type_re.match(name)
if match:
kind = match.group(1)
x, y, z = [float(a) for a in model.find('./pose').text.split()[:3]]
ret.append([kind, [x,y,z]])
return ret
def draw(poses, artifacts):
min_x, min_y = 10_000, 10_000
max_x, max_y = -10_000, -10_000
for timestamp, sample in poses:
for k, v in sample.items():
min_x = min_x if v.x > min_x else v.x
min_y = min_y if v.y > min_y else v.y
max_x = max_x if v.x < max_x else v.x
max_y = max_y if v.y < max_y else v.y
for kind, p in artifacts:
min_x = min_x if p[0] > min_x else p[0]
min_y = min_y if p[1] > min_y else p[1]
max_x = max_x if p[0] < max_x else p[0]
max_y = max_y if p[1] < max_y else p[1]
print(f"min x: {min_x:.2f} y: {min_y:.2f}")
print(f"max x: {max_x:.2f} y: {max_y:.2f}")
width_px = 2*BORDER_PX + int(SCALE*(max_x - min_x))
height_px = 2*BORDER_PX + int(SCALE*(max_y - min_y))
print(f"width: {width_px}px height: {height_px}px")
colors = dict()
world = np.zeros((height_px, width_px), dtype=np.uint8)
# draw cross at (0,0)
#px = int(SCALE * (0 - min_x)) + BORDER_PX
#py = int(SCALE * (0 - min_y)) + BORDER_PX
#cv2.line(world, (px, py - 20), (px, py + 20), 255, 3)
#cv2.line(world, (px - 20, py), (px + 20, py), 255, 3)
for kind, p in artifacts:
px = int(SCALE * (p[0] - min_x)) + BORDER_PX
py = int(SCALE * (p[1] - min_y)) + BORDER_PX
point = (px, height_px - py - 1)
cv2.drawMarker(world, point, MARKERS[kind][1], markerType=MARKERS[kind][0], thickness=3)
for timestamp, sample in poses:
for k, v in sample.items():
if k not in colors:
colors[k] = len(colors)+1
px = int(SCALE*(v.x - min_x)) + BORDER_PX
py = int(SCALE*(v.y - min_y)) + BORDER_PX
world[height_px - py - 1, px] = colors[k]
user_color_map = np.zeros((256, 1, 3), dtype=np.uint8)
user_color_map[0] = (0, 0, 0)
for i in range(len(COLORS)):
user_color_map[i+1] = COLORS[i]
user_color_map[255] = (50, 50, 255) # BGR -> Red
cimg = cv2.applyColorMap(world, user_color_map)
return cimg
def main():
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('filename', help='Ignition file from simulation (state.tlog)')
parser.add_argument('-s', default=3700, type=float,
help='Limit duration time')
parser.add_argument('-a', '--artifacts', action='store_true', default=False,
help='Display list of artifacts with positions')
args = parser.parse_args()
if args.artifacts:
artifacts = read_artifacts(args.filename)
origin = next(filter(lambda a: a[0] == 'artifact_origin', artifacts))[1]
for kind, p in artifacts:
corrected = ", ".join(f"{aa-oo:.2f}".rstrip('0').rstrip('.') for aa, oo in zip(p, origin))
print(f"{kind:<15}", f"[{corrected}]")
return
p = read_poses(args.filename, args.s)
a = read_artifacts(args.filename)
img = draw(p, a)
cv2.imwrite(args.filename+'.png', img)
if __name__ == "__main__":
main()
| [
"subt.ign_pb2.Pose_V",
"argparse.ArgumentParser",
"xml.etree.ElementTree.fromstring",
"cv2.imwrite",
"numpy.zeros",
"cv2.drawMarker",
"sqlite3.connect",
"datetime.timedelta",
"cv2.applyColorMap",
"re.compile"
] | [((848, 873), 'sqlite3.connect', 'sqlite3.connect', (['filename'], {}), '(filename)\n', (863, 873), False, 'import sqlite3\n'), ((1061, 1069), 'subt.ign_pb2.Pose_V', 'Pose_V', ([], {}), '()\n', (1067, 1069), False, 'from subt.ign_pb2 import Pose_V\n'), ((1972, 1997), 'sqlite3.connect', 'sqlite3.connect', (['filename'], {}), '(filename)\n', (1987, 1997), False, 'import sqlite3\n'), ((2336, 2360), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['world[4:]'], {}), '(world[4:])\n', (2349, 2360), True, 'import xml.etree.ElementTree as ET\n'), ((2375, 2461), 're.compile', 're.compile', (['"""^(backpack|rescue_randy|gas|vent|phone|artifact_origin|rope|helmet)"""'], {}), "(\n '^(backpack|rescue_randy|gas|vent|phone|artifact_origin|rope|helmet)')\n", (2385, 2461), False, 'import re\n'), ((3656, 3703), 'numpy.zeros', 'np.zeros', (['(height_px, width_px)'], {'dtype': 'np.uint8'}), '((height_px, width_px), dtype=np.uint8)\n', (3664, 3703), True, 'import numpy as np\n'), ((4549, 4586), 'numpy.zeros', 'np.zeros', (['(256, 1, 3)'], {'dtype': 'np.uint8'}), '((256, 1, 3), dtype=np.uint8)\n', (4557, 4586), True, 'import numpy as np\n'), ((4759, 4799), 'cv2.applyColorMap', 'cv2.applyColorMap', (['world', 'user_color_map'], {}), '(world, user_color_map)\n', (4776, 4799), False, 'import cv2\n'), ((4863, 4907), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (4886, 4907), False, 'import argparse\n'), ((5752, 5792), 'cv2.imwrite', 'cv2.imwrite', (["(args.filename + '.png')", 'img'], {}), "(args.filename + '.png', img)\n", (5763, 5792), False, 'import cv2\n'), ((4129, 4221), 'cv2.drawMarker', 'cv2.drawMarker', (['world', 'point', 'MARKERS[kind][1]'], {'markerType': 'MARKERS[kind][0]', 'thickness': '(3)'}), '(world, point, MARKERS[kind][1], markerType=MARKERS[kind][0],\n thickness=3)\n', (4143, 4221), False, 'import cv2\n'), ((1387, 1478), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'poses.header.stamp.sec', 'microseconds': '(poses.header.stamp.nsec / 1000)'}), '(seconds=poses.header.stamp.sec, microseconds=poses.header.stamp.\n nsec / 1000)\n', (1396, 1478), False, 'from datetime import timedelta\n'), ((1501, 1527), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'seconds'}), '(seconds=seconds)\n', (1510, 1527), False, 'from datetime import timedelta\n')] |
"""
Screen timeseries for anomalies and impute missing and anomalous values.
The screening methods were originally designed to identify unrealistic data in the
electricity demand timeseries reported to EIA on Form 930, and have also been
applied to the FERC Form 714, and various historical demand timeseries
published by regional grid operators like MISO, PJM, ERCOT, and SPP.
They are adapted from code published and modified by:
* <NAME> <<EMAIL>>
* <NAME> <<EMAIL>>
And described at:
* https://doi.org/10.1038/s41597-020-0483-x
* https://zenodo.org/record/3737085
* https://github.com/truggles/EIA_Cleaned_Hourly_Electricity_Demand_Code
The imputation methods were designed for multivariate time series forecasting.
They are adapted from code published by:
* <NAME> <<EMAIL>>
And described at:
* https://arxiv.org/abs/2006.10436
* https://arxiv.org/abs/2008.03194
* https://github.com/xinychen/tensor-learning
"""
import functools
import warnings
from typing import Any, Iterable, List, Sequence, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats
# ---- Helpers ---- #
def slice_axis(
x: np.ndarray, start: int = None, end: int = None, step: int = None, axis: int = 0
) -> Tuple[slice, ...]:
"""
Return an index that slices an array along an axis.
Args:
x: Array to slice.
start: Start index of slice.
end: End index of slice.
step: Step size of slice.
axis: Axis along which to slice.
Returns:
Tuple of :class:`slice` that slices array `x` along axis `axis`
(`x[..., start:stop:step]`).
Examples:
>>> x = np.random.random((3, 4, 5))
>>> np.all(x[1:] == x[slice_axis(x, start=1, axis=0)])
True
>>> np.all(x[:, 1:] == x[slice_axis(x, start=1, axis=1)])
True
>>> np.all(x[:, :, 1:] == x[slice_axis(x, start=1, axis=2)])
True
"""
index = [slice(None)] * np.mod(axis, x.ndim) + [slice(start, end, step)]
return tuple(index)
def array_diff(
x: np.ndarray,
periods: int = 1,
axis: int = 0,
fill: Any = np.nan
) -> np.ndarray:
"""
First discrete difference of array elements.
This is a fast numpy implementation of :meth:`pd.DataFrame.diff`.
Args:
periods: Periods to shift for calculating difference, accepts negative values.
axis: Array axis along which to calculate the difference.
fill: Value to use at the margins where a difference cannot be calculated.
Returns:
Array of same shape and type as `x` with discrete element differences.
Examples:
>>> x = np.random.random((4, 2))
>>> np.all(array_diff(x, 1)[1:] == pd.DataFrame(x).diff(1).values[1:])
True
>>> np.all(array_diff(x, 2)[2:] == pd.DataFrame(x).diff(2).values[2:])
True
>>> np.all(array_diff(x, -1)[:-1] == pd.DataFrame(x).diff(-1).values[:-1])
True
"""
if not periods:
return x - x
dx = np.empty_like(x)
prepend = slice_axis(x, end=periods, axis=axis)
append = slice_axis(x, start=periods, axis=axis)
if periods > 0:
dx[prepend] = fill
dx[append] = x[append] - x[slice_axis(x, end=-periods, axis=axis)]
else:
dx[prepend] = x[prepend] - x[slice_axis(x, start=-periods, axis=axis)]
dx[append] = fill
return dx
def encode_run_length(
x: Union[Sequence, np.ndarray]
) -> Tuple[np.ndarray, np.ndarray]:
"""
Encode vector with run-length encoding.
Args:
x: Vector to encode.
Returns:
Values and their run lengths.
Examples:
>>> x = np.array([0, 1, 1, 0, 1])
>>> encode_run_length(x)
(array([0, 1, 0, 1]), array([1, 2, 1, 1]))
>>> encode_run_length(x.astype('bool'))
(array([False, True, False, True]), array([1, 2, 1, 1]))
>>> encode_run_length(x.astype('<U1'))
(array(['0', '1', '0', '1'], dtype='<U1'), array([1, 2, 1, 1]))
>>> encode_run_length(np.where(x == 0, np.nan, x))
(array([nan, 1., nan, 1.]), array([1, 2, 1, 1]))
"""
# Inspired by https://stackoverflow.com/a/32681075
x = np.asarray(x)
n = len(x)
if not n:
return x, np.array([], dtype=int)
# Pairwise unequal (string safe)
y = np.array(x[1:] != x[:-1])
# Must include last element position
i = np.append(np.where(y), n - 1)
lengths = np.diff(np.append(-1, i))
# starts = np.cumsum(np.append(0, lengths))[:-1]
return x[i], lengths
def insert_run_length( # noqa: C901
x: Union[Sequence, np.ndarray],
values: Union[Sequence, np.ndarray],
lengths: Sequence[int],
mask: Sequence[bool] = None,
padding: int = 0,
intersect: bool = False,
) -> np.ndarray:
"""
Insert run-length encoded values into a vector.
Args:
x: Vector to insert values into.
values: Values to insert.
lengths: Length of run to insert for each value in `values`.
mask: Boolean mask, of the same length as `x`, where values can be inserted.
By default, values can be inserted anywhere in `x`.
padding: Minimum space between inserted runs and,
if `mask` is provided, the edges of masked-out areas.
intersect: Whether to allow inserted runs to intersect each other.
Raises:
ValueError: Padding must zero or greater.
ValueError: Run length must be greater than zero.
ValueError: Cound not find space for run of length {length}.
Returns:
Copy of array `x` with values inserted.
Example:
>>> x = [0, 0, 0, 0]
>>> mask = [True, False, True, True]
>>> insert_run_length(x, values=[1, 2], lengths=[1, 2], mask=mask)
array([1, 0, 2, 2])
If we use unique values for the background and each inserted run,
the run length encoding of the result (ignoring the background)
is the same as the inserted run, albeit in a different order.
>>> x = np.zeros(10, dtype=int)
>>> values = [1, 2, 3]
>>> lengths = [1, 2, 3]
>>> x = insert_run_length(x, values=values, lengths=lengths)
>>> rvalues, rlengths = encode_run_length(x[x != 0])
>>> order = np.argsort(rvalues)
>>> all(rvalues[order] == values) and all(rlengths[order] == lengths)
True
Null values can be inserted into a vector such that the new null runs
match the run length encoding of the existing null runs.
>>> x = [1, 2, np.nan, np.nan, 5, 6, 7, 8, np.nan]
>>> is_nan = np.isnan(x)
>>> rvalues, rlengths = encode_run_length(is_nan)
>>> xi = insert_run_length(
... x,
... values=[np.nan] * rvalues.sum(),
... lengths=rlengths[rvalues],
... mask=~is_nan
... )
>>> np.isnan(xi).sum() == 2 * is_nan.sum()
True
The same as above, with non-zero `padding`, yields a unique solution:
>>> insert_run_length(
... x,
... values=[np.nan] * rvalues.sum(),
... lengths=rlengths[rvalues],
... mask=~is_nan,
... padding=1
... )
array([nan, 2., nan, nan, 5., nan, nan, 8., nan])
"""
if padding < 0:
raise ValueError("Padding must zero or greater")
# Make a new array to modify in place
x = np.array(x)
# Compute runs available for insertions
if mask is None:
run_starts = np.array([0])
run_lengths = np.array([len(x)])
else:
mask_values, mask_lengths = encode_run_length(mask)
run_starts = np.cumsum(np.append(0, mask_lengths))[:-1][mask_values]
run_lengths = mask_lengths[mask_values]
if padding:
# Constrict runs
run_ends = run_starts + run_lengths
# Move run starts forward, unless endpoint
moved = slice(int(run_starts[0] == 0), None)
run_starts[moved] += padding
# Move run ends backward, unless endpoint
moved = slice(None, -1 if run_ends[-1] == len(x) else None)
run_ends[moved] -= padding
# Recalculate run lengths and keep runs with positive length
run_lengths = run_ends - run_starts
keep = run_lengths > 0
run_starts = run_starts[keep]
run_lengths = run_lengths[keep]
# Grow runs by maximum number of insertions (for speed)
n_runs = len(run_starts)
if not intersect:
buffer = np.zeros(len(values), dtype=int)
run_starts = np.concatenate((run_starts, buffer))
run_lengths = np.concatenate((run_lengths, buffer))
# Initialize random number generator
rng = np.random.default_rng()
# Sort insertions from longest to shortest
order = np.argsort(lengths)[::-1]
values = np.asarray(values)[order]
lengths = np.asarray(lengths)[order]
for value, length in zip(values, lengths):
if length < 1:
raise ValueError("Run length must be greater than zero")
# Choose runs of adequate length
choices = np.nonzero(run_lengths[:n_runs] >= length)[0]
if not choices.size:
raise ValueError(f"Could not find space for run of length {length}")
idx = rng.choice(choices)
# Choose adequate start position in run
offset = rng.integers(0, run_lengths[idx] - length, endpoint=True)
start = run_starts[idx] + offset
# Insert value
x[start:start + length] = value
if intersect:
continue
# Update runs
padded_length = length + padding
if offset:
tail = run_lengths[idx] - offset - padded_length
if tail > 0:
# Insert run
run_starts[n_runs] = start + padded_length
run_lengths[n_runs] = tail
n_runs += 1
# Shorten run
run_lengths[idx] = offset - padding
else:
# Shift and shorten run
run_starts[idx] += padded_length
run_lengths[idx] -= padded_length
return x
def _mat2ten(matrix: np.ndarray, shape: np.ndarray, mode: int) -> np.ndarray:
"""Fold matrix into a tensor."""
index = [mode] + [i for i in range(len(shape)) if i != mode]
return np.moveaxis(
np.reshape(matrix, newshape=shape[index], order='F'),
source=0,
destination=mode
)
def _ten2mat(tensor: np.ndarray, mode: int) -> np.ndarray:
"""Unfold tensor into a matrix."""
return np.reshape(
np.moveaxis(tensor, source=mode, destination=0),
newshape=(tensor.shape[mode], -1),
order='F'
)
def _svt_tnn(matrix: np.ndarray, tau: float, theta: int) -> np.ndarray:
"""Singular value thresholding (SVT) truncated nuclear norm (TNN) minimization."""
[m, n] = matrix.shape
if 2 * m < n:
u, s, v = np.linalg.svd(matrix @ matrix.T, full_matrices=0)
s = np.sqrt(s)
idx = np.sum(s > tau)
mid = np.zeros(idx)
mid[: theta] = 1
mid[theta:idx] = (s[theta:idx] - tau) / s[theta:idx]
return (u[:, :idx] @ np.diag(mid)) @ (u[:, :idx].T @ matrix)
if m > 2 * n:
return _svt_tnn(matrix.T, tau, theta).T
u, s, v = np.linalg.svd(matrix, full_matrices=0)
idx = np.sum(s > tau)
vec = s[:idx].copy()
vec[theta:idx] = s[theta:idx] - tau
return u[:, :idx] @ np.diag(vec) @ v[:idx, :]
def impute_latc_tnn(
tensor: np.ndarray,
lags: Sequence[int] = [1],
alpha: Sequence[float] = [1 / 3, 1 / 3, 1 / 3],
rho0: float = 1e-7,
lambda0: float = 2e-7,
theta: int = 20,
epsilon: float = 1e-7,
maxiter: int = 300
) -> np.ndarray:
"""
Impute tensor values with LATC-TNN method by <NAME> (2020).
Uses low-rank autoregressive tensor completion (LATC) with
truncated nuclear norm (TNN) minimization.
* description: https://arxiv.org/abs/2006.10436
* code: https://github.com/xinychen/tensor-learning/blob/master/mats
Args:
tensor: Observational series in the form (series, groups, periods).
Null values are replaced with zeros, so any zeros will be treated as null.
lags:
alpha:
rho0:
lambda0:
theta:
epsilon: Convergence criterion. A smaller number will result in more iterations.
maxiter: Maximum number of iterations.
Returns:
Tensor with missing values in `tensor` replaced by imputed values.
"""
tensor = np.where(np.isnan(tensor), 0, tensor)
dim = np.array(tensor.shape)
dim_time = int(np.prod(dim) / dim[0])
d = len(lags)
max_lag = np.max(lags)
mat = _ten2mat(tensor, mode=0)
pos_missing = np.where(mat == 0)
x = np.zeros(np.insert(dim, 0, len(dim)))
t = np.zeros(np.insert(dim, 0, len(dim)))
z = mat.copy()
z[pos_missing] = np.mean(mat[mat != 0])
a = 0.001 * np.random.rand(dim[0], d)
it = 0
ind = np.zeros((d, dim_time - max_lag), dtype=int)
for i in range(d):
ind[i, :] = np.arange(max_lag - lags[i], dim_time - lags[i])
last_mat = mat.copy()
snorm = np.linalg.norm(mat, 'fro')
rho = rho0
while True:
rho = min(rho * 1.05, 1e5)
for k in range(len(dim)):
x[k] = _mat2ten(
_svt_tnn(
_ten2mat(_mat2ten(z, shape=dim, mode=0) - t[k] / rho, mode=k),
tau=alpha[k] / rho,
theta=theta
),
shape=dim,
mode=k
)
tensor_hat = np.einsum('k, kmnt -> mnt', alpha, x)
mat_hat = _ten2mat(tensor_hat, 0)
mat0 = np.zeros((dim[0], dim_time - max_lag))
if lambda0 > 0:
for m in range(dim[0]):
qm = mat_hat[m, ind].T
a[m, :] = np.linalg.pinv(qm) @ z[m, max_lag:]
mat0[m, :] = qm @ a[m, :]
mat1 = _ten2mat(np.mean(rho * x + t, axis=0), 0)
z[pos_missing] = np.append(
(mat1[:, :max_lag] / rho),
(mat1[:, max_lag:] + lambda0 * mat0) / (rho + lambda0),
axis=1
)[pos_missing]
else:
z[pos_missing] = (_ten2mat(np.mean(x + t / rho, axis=0), 0))[pos_missing]
t = t + rho * (x - np.broadcast_to(
_mat2ten(z, dim, 0), np.insert(dim, 0, len(dim))
))
tol = np.linalg.norm((mat_hat - last_mat), 'fro') / snorm
last_mat = mat_hat.copy()
it += 1
print(f"Iteration: {it}", end="\r")
if tol < epsilon or it >= maxiter:
break
print(f"Iteration: {it}")
return tensor_hat
def _tsvt(tensor: np.ndarray, phi: np.ndarray, tau: float) -> np.ndarray:
"""Tensor singular value thresholding (TSVT)."""
dim = tensor.shape
x = np.zeros(dim)
tensor = np.einsum('kt, ijk -> ijt', phi, tensor)
for t in range(dim[2]):
u, s, v = np.linalg.svd(tensor[:, :, t], full_matrices=False)
r = len(np.where(s > tau)[0])
if r >= 1:
s = s[:r]
s[: r] = s[:r] - tau
x[:, :, t] = u[:, :r] @ np.diag(s) @ v[:r, :]
return np.einsum('kt, ijt -> ijk', phi, x)
def impute_latc_tubal( # noqa: C901
tensor: np.ndarray,
lags: Sequence[int] = [1],
rho0: float = 1e-7,
lambda0: float = 2e-7,
epsilon: float = 1e-7,
maxiter: int = 300
) -> np.ndarray:
"""
Impute tensor values with LATC-Tubal method by Chen, Chen and Sun (2020).
Uses low-tubal-rank autoregressive tensor completion (LATC-Tubal).
It is much faster than :func:`impute_latc_tnn` for very large datasets,
with comparable accuracy.
* description: https://arxiv.org/abs/2008.03194
* code: https://github.com/xinychen/tensor-learning/blob/master/mats
Args:
tensor: Observational series in the form (series, groups, periods).
Null values are replaced with zeros, so any zeros will be treated as null.
lags:
rho0:
lambda0:
epsilon: Convergence criterion. A smaller number will result in more iterations.
maxiter: Maximum number of iterations.
Returns:
Tensor with missing values in `tensor` replaced by imputed values.
"""
tensor = np.where(np.isnan(tensor), 0, tensor)
dim = np.array(tensor.shape)
dim_time = int(np.prod(dim) / dim[0])
d = len(lags)
max_lag = np.max(lags)
mat = _ten2mat(tensor, 0)
pos_missing = np.where(mat == 0)
t = np.zeros(dim)
z = mat.copy()
z[pos_missing] = np.mean(mat[mat != 0])
a = 0.001 * np.random.rand(dim[0], d)
it = 0
ind = np.zeros((d, dim_time - max_lag), dtype=np.int_)
for i in range(d):
ind[i, :] = np.arange(max_lag - lags[i], dim_time - lags[i])
last_mat = mat.copy()
snorm = np.linalg.norm(mat, 'fro')
rho = rho0
temp1 = _ten2mat(_mat2ten(z, dim, 0), 2)
_, phi = np.linalg.eig(temp1 @ temp1.T)
del temp1
if dim_time > 5e3 and dim_time <= 1e4:
sample_rate = 0.2
elif dim_time > 1e4:
sample_rate = 0.1
while True:
rho = min(rho * 1.05, 1e5)
x = _tsvt(_mat2ten(z, dim, 0) - t / rho, phi, 1 / rho)
mat_hat = _ten2mat(x, 0)
mat0 = np.zeros((dim[0], dim_time - max_lag))
temp2 = _ten2mat(rho * x + t, 0)
if lambda0 > 0:
if dim_time <= 5e3:
for m in range(dim[0]):
qm = mat_hat[m, ind].T
a[m, :] = np.linalg.pinv(qm) @ z[m, max_lag:]
mat0[m, :] = qm @ a[m, :]
elif dim_time > 5e3:
for m in range(dim[0]):
idx = np.arange(0, dim_time - max_lag)
np.random.shuffle(idx)
idx = idx[: int(sample_rate * (dim_time - max_lag))]
qm = mat_hat[m, ind].T
a[m, :] = np.linalg.pinv(qm[idx[:], :]) @ z[m, max_lag:][idx[:]]
mat0[m, :] = qm @ a[m, :]
z[pos_missing] = np.append(
(temp2[:, :max_lag] / rho),
(temp2[:, max_lag:] + lambda0 * mat0) / (rho + lambda0), axis=1
)[pos_missing]
else:
z[pos_missing] = temp2[pos_missing] / rho
t = t + rho * (x - _mat2ten(z, dim, 0))
tol = np.linalg.norm((mat_hat - last_mat), 'fro') / snorm
last_mat = mat_hat.copy()
it += 1
if not np.mod(it, 10):
temp1 = _ten2mat(_mat2ten(z, dim, 0) - t / rho, 2)
_, phi = np.linalg.eig(temp1 @ temp1.T)
del temp1
print(f"Iteration: {it}", end="\r")
if tol < epsilon or it >= maxiter:
break
print(f"Iteration: {it}")
return x
# ---- Anomaly detection ---- #
class Timeseries:
"""
Multivariate timeseries for anomalies detection and imputation.
Attributes:
xi: Reference to the original values (can be null).
Many methods assume that these represent chronological, regular timeseries.
x: Copy of :attr:`xi` with any flagged values replaced with null.
flags: Flag label for each value, or null if not flagged.
flagged: Running list of flags that have been checked so far.
index: Row index.
columns: Column names.
"""
def __init__(self, x: Union[np.ndarray, pd.DataFrame]) -> None:
"""
Initialize a multivariate timeseries.
Args:
x: Timeseries with shape (n observations, m variables).
If :class:`pandas.DataFrame`, :attr:`index` and :attr:`columns`
are equal to `x.index` and `x.columns`, respectively.
Otherwise, :attr:`index` and :attr:`columns` are the default
`pandas.RangeIndex`.
"""
self.xi: np.ndarray
self.index: pd.Index
self.columns: pd.Index
if isinstance(x, pd.DataFrame):
self.xi = x.values
self.index = x.index
self.columns = x.columns
else:
self.xi = x
self.index = pd.RangeIndex(x.shape[0])
self.columns = pd.RangeIndex(x.shape[1])
self.x: np.ndarray = self.xi.copy()
self.flags: np.ndarray = np.empty(self.x.shape, dtype=object)
self.flagged: List[str] = []
def to_dataframe(self, array: np.ndarray = None, copy: bool = True) -> pd.DataFrame:
"""
Return multivariate timeseries as a :class:`pandas.DataFrame`.
Args:
array: Two-dimensional array to use. If `None`, uses :attr:`x`.
copy: Whether to use a copy of `array`.
"""
x = self.x if array is None else array
return pd.DataFrame(x, columns=self.columns, index=self.index, copy=copy)
def flag(self, mask: np.ndarray, flag: str) -> None:
"""
Flag values.
Flags values (if not already flagged) and nulls flagged values.
Args:
mask: Boolean mask of the values to flag.
flag: Flag name.
"""
# Only flag unflagged values
mask = mask & ~np.isnan(self.x)
self.flags[mask] = flag
self.flagged.append(flag)
# Null flagged values
self.x[mask] = np.nan
# Clear cached metrics
for name in dir(self):
attr = getattr(self, name)
if hasattr(attr, 'cache_clear'):
attr.cache_clear()
def unflag(self, flags: Iterable[str] = None) -> None:
"""
Unflag values.
Unflags values by restoring their original values and removing their flag.
Args:
flags: Flag names. If `None`, all flags are removed.
"""
mask = slice(None) if flags is None else np.isin(self.flags, flags)
self.flags[mask] = None
self.x[mask] = self.xi[mask]
self.flagged = [f for f in self.flagged if flags is not None and f not in flags]
def flag_negative_or_zero(self) -> None:
"""Flag negative or zero values (NEGATIVE_OR_ZERO)."""
mask = self.x <= 0
self.flag(mask, "NEGATIVE_OR_ZERO")
def flag_identical_run(self, length: int = 3) -> None:
"""
Flag the last values in identical runs (IDENTICAL_RUN).
Args:
length: Run length to flag.
If `3`, the third (and subsequent) identical values are flagged.
Raises:
ValueError: Run length must be 2 or greater.
"""
if length < 2:
raise ValueError("Run length must be 2 or greater")
mask = np.ones(self.x.shape, dtype=bool)
mask[0] = False
for n in range(1, length):
mask[n:] &= self.x[n:] == self.x[:-n]
self.flag(mask, "IDENTICAL_RUN")
def flag_global_outlier(self, medians: float = 9) -> None:
"""
Flag values greater or less than n times the global median (GLOBAL_OUTLIER).
Args:
medians: Number of times the median the value must exceed the median.
"""
median = np.nanmedian(self.x, axis=0)
mask = np.abs(self.x - median) > np.abs(median * medians)
self.flag(mask, "GLOBAL_OUTLIER")
def flag_global_outlier_neighbor(self, neighbors: int = 1) -> None:
"""
Flag values neighboring global outliers (GLOBAL_OUTLIER_NEIGHBOR).
Args:
neighbors: Number of neighbors to flag on either side of each outlier.
Raises:
ValueError: Global outliers must be flagged first.
"""
if "GLOBAL_OUTLIER" not in self.flagged:
raise ValueError("Global outliers must be flagged first")
mask = np.zeros(self.x.shape, dtype=bool)
outliers = self.flags == "GLOBAL_OUTLIER"
for shift in range(1, neighbors + 1):
# Neighbors before
mask[:-shift][outliers[shift:]] = True
# Neighors after
mask[shift:][outliers[:-shift]] = True
self.flag(mask, "GLOBAL_OUTLIER_NEIGHBOR")
@functools.lru_cache(maxsize=2)
def rolling_median(self, window: int = 48) -> np.ndarray:
"""
Rolling median of values.
Args:
window: Number of values in the moving window.
"""
# RUGGLES: rollingDem, rollingDemLong (window=480)
df = pd.DataFrame(self.x, copy=False)
return df.rolling(window, min_periods=1, center=True).median().values
def rolling_median_offset(self, window: int = 48) -> np.ndarray:
"""
Values minus the rolling median.
Estimates the local cycle in cyclical data by removing longterm trends.
Args:
window: Number of values in the moving window.
"""
# RUGGLES: dem_minus_rolling
return self.x - self.rolling_median(window=window)
def median_of_rolling_median_offset(
self,
window: int = 48,
shifts: Sequence[int] = range(-240, 241, 24)
) -> np.ndarray:
"""
Median of the offset from the rolling median.
Calculated by shifting the rolling median offset (:meth:`rolling_median_offset`)
by different numbers of values, then taking the median at each position.
Estimates the typical local cycle in cyclical data.
Args:
window: Number of values in the moving window for the rolling median.
shifts: Number of values to shift the rolling median offset by.
"""
# RUGGLES: vals_dem_minus_rolling
offset = self.rolling_median_offset(window=window)
# Fast numpy implementation of pd.DataFrame.shift
shifted = np.empty([len(shifts), *offset.shape], dtype=float)
for i, shift in enumerate(shifts):
if shift > 0:
shifted[i, :shift] = np.nan
shifted[i, shift:] = offset[:-shift]
elif shift < 0:
shifted[i, shift:] = np.nan
shifted[i, :shift] = offset[-shift:]
else:
shifted[i, :] = offset
# Ignore warning for rows with all null values
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", category=RuntimeWarning, message="All-NaN slice encountered"
)
return np.nanmedian(shifted, axis=0)
def rolling_iqr_of_rolling_median_offset(
self,
window: int = 48,
iqr_window: int = 240
) -> np.ndarray:
"""
Rolling interquartile range (IQR) of rolling median offset.
Estimates the spread of the local cycles in cyclical data.
Args:
window: Number of values in the moving window for the rolling median.
iqr_window: Number of values in the moving window for the rolling IQR.
"""
# RUGGLES: dem_minus_rolling_IQR
offset = self.rolling_median_offset(window=window)
df = pd.DataFrame(offset, copy=False)
rolling = df.rolling(iqr_window, min_periods=1, center=True)
return (rolling.quantile(0.75) - rolling.quantile(0.25)).values
def median_prediction(
self,
window: int = 48,
shifts: Sequence[int] = range(-240, 241, 24),
long_window: int = 480
) -> np.ndarray:
"""
Values predicted from local and regional rolling medians.
Calculated as `{ local median } +
{ median of local median offset } * { local median } / { regional median }`.
Args:
window: Number of values in the moving window for the local rolling median.
shifts: Positions to shift the local rolling median offset by,
for computing its median.
long_window: Number of values in the moving window
for the regional (long) rolling median.
"""
# RUGGLES: hourly_median_dem_dev (multiplied by rollingDem)
return self.rolling_median(window=window) * (
1 + self.median_of_rolling_median_offset(window=window, shifts=shifts)
/ self.rolling_median(window=long_window)
)
def flag_local_outlier(
self,
window: int = 48,
shifts: Sequence[int] = range(-240, 241, 24),
long_window: int = 480,
iqr_window: int = 240,
multiplier: Tuple[float, float] = (3.5, 2.5)
) -> None:
"""
Flag local outliers (LOCAL_OUTLIER_HIGH, LOCAL_OUTLIER_LOW).
Flags values which are above or below the :meth:`median_prediction` by more than
a `multiplier` times the :meth:`rolling_iqr_of_rolling_median_offset`.
Args:
window: Number of values in the moving window for the local rolling median.
shifts: Positions to shift the local rolling median offset by,
for computing its median.
long_window: Number of values in the moving window
for the regional (long) rolling median.
iqr_window: Number of values in the moving window
for the rolling interquartile range (IQR).
multiplier: Number of times the :meth:`rolling_iqr_of_rolling_median_offset`
the value must be above (HIGH) and below (LOW)
the :meth:`median_prediction` to be flagged.
"""
# Compute constants
prediction = self.median_prediction(
window=window, shifts=shifts, long_window=long_window
)
iqr = self.rolling_iqr_of_rolling_median_offset(
window=window, iqr_window=iqr_window
)
mask = self.x > prediction + multiplier[0] * iqr
self.flag(mask, "LOCAL_OUTLIER_HIGH")
# As in original code, do not recompute constants with new nulls
mask = self.x < prediction - multiplier[1] * iqr
self.flag(mask, "LOCAL_OUTLIER_LOW")
def diff(self, shift: int = 1) -> np.ndarray:
"""
Values minus the value of their neighbor.
Args:
shift: Positions to shift for calculating the difference.
Positive values select a preceding (left) neighbor.
"""
# RUGGLES: delta_pre (shift=1), delta_post (shift=-1)
return array_diff(self.x, shift)
def rolling_iqr_of_diff(self, shift: int = 1, window: int = 240) -> np.ndarray:
"""
Rolling interquartile range (IQR) of the difference between neighboring values.
Args:
shift: Positions to shift for calculating the difference.
window: Number of values in the moving window for the rolling IQR.
"""
# RUGGLES: delta_rolling_iqr
diff = self.diff(shift=shift)
df = pd.DataFrame(diff, copy=False)
rolling = df.rolling(window, min_periods=1, center=True)
return (rolling.quantile(0.75) - rolling.quantile(0.25)).values
def flag_double_delta(
self, iqr_window: int = 240, multiplier: float = 2
) -> None:
"""
Flag values very different from their neighbors on either side (DOUBLE_DELTA).
Flags values whose differences to both neighbors on either side exceeds a
`multiplier` times the rolling interquartile range (IQR) of neighbor difference.
Args:
iqr_window: Number of values in the moving window for the rolling IQR
of neighbor difference.
multiplier: Number of times the rolling IQR of neighbor difference
the value's difference to its neighbors must exceed
for the value to be flagged.
"""
before = self.diff(shift=1)
after = self.diff(shift=-1)
iqr = multiplier * self.rolling_iqr_of_diff(shift=1, window=iqr_window)
mask = (np.minimum(before, after) > iqr) | (np.maximum(before, after) < -iqr)
self.flag(mask, "DOUBLE_DELTA")
@functools.lru_cache(maxsize=2)
def relative_median_prediction(self, **kwargs: Any) -> np.ndarray:
"""
Values divided by their value predicted from medians.
Args:
kwargs: Arguments to :meth:`median_prediction`.
"""
# RUGGLES: dem_rel_diff_wrt_hourly, dem_rel_diff_wrt_hourly_long (window=480)
return self.x / self.median_prediction(**kwargs)
def iqr_of_diff_of_relative_median_prediction(
self, shift: int = 1, **kwargs: Any
) -> np.ndarray:
"""
Interquartile range of the running difference of the relative median prediction.
Args:
shift: Positions to shift for calculating the difference.
Positive values select a preceding (left) neighbor.
kwargs: Arguments to :meth:`relative_median_prediction`.
"""
# RUGGLES: iqr_relative_deltas
diff = array_diff(self.relative_median_prediction(**kwargs), shift)
return scipy.stats.iqr(diff, nan_policy='omit', axis=0)
def _find_single_delta(
self,
relative_median_prediction: np.ndarray,
relative_median_prediction_long: np.ndarray,
rolling_iqr_of_diff: np.ndarray,
iqr_of_diff_of_relative_median_prediction: np.ndarray,
reverse: bool = False
) -> np.ndarray:
not_nan = ~np.isnan(self.x)
mask = np.zeros(self.x.shape, dtype=bool)
for col in range(self.x.shape[1]):
indices = np.flatnonzero(not_nan[:, col])[::(-1 if reverse else 1)]
previous, current = indices[:-1], indices[1:]
while len(current):
# Evaluate value pairs
diff = np.abs(self.x[current, col] - self.x[previous, col])
diff_relative_median_prediction = np.abs(
relative_median_prediction[current, col] -
relative_median_prediction[previous, col]
)
# Compare max deviation across short and long rolling median
# to catch when outliers pull short median towards themselves.
previous_max = np.maximum(
np.abs(1 - relative_median_prediction[previous, col]),
np.abs(1 - relative_median_prediction_long[previous, col]),
)
current_max = np.maximum(
np.abs(1 - relative_median_prediction[current, col]),
np.abs(1 - relative_median_prediction_long[current, col]),
)
flagged = (
(diff > rolling_iqr_of_diff[current, col]) &
(
diff_relative_median_prediction >
iqr_of_diff_of_relative_median_prediction[col]
) &
(current_max > previous_max)
)
flagged_indices = current[flagged]
if not flagged_indices.size:
break
# Find position of flagged indices in index
if reverse:
indices_idx = indices.size - (
indices[::-1].searchsorted(flagged_indices, side='right')
)
else:
indices_idx = indices.searchsorted(flagged_indices, side='left')
# Only flag first of consecutive flagged indices
# TODO: May not be necessary after first iteration
unflagged = np.concatenate(([False], np.diff(indices_idx) == 1))
flagged_indices = np.delete(flagged_indices, unflagged)
indices_idx = np.delete(indices_idx, unflagged)
mask[flagged_indices, col] = True
flagged[flagged] = ~unflagged
# Bump current index of flagged pairs to next unflagged index
# Next index always unflagged because flagged runs are not permitted
next_indices_idx = indices_idx + 1
if next_indices_idx[-1] == len(indices):
# Drop last index if out of range
next_indices_idx = next_indices_idx[:-1]
current = indices[next_indices_idx]
# Trim previous values to length of current values
previous = previous[flagged][:len(current)]
# Delete flagged indices
indices = np.delete(indices, indices_idx)
return mask
def flag_single_delta(
self,
window: int = 48,
shifts: Sequence[int] = range(-240, 241, 24),
long_window: int = 480,
iqr_window: int = 240,
multiplier: float = 5,
rel_multiplier: float = 15
) -> None:
"""
Flag values very different from the nearest unflagged value (SINGLE_DELTA).
Flags values whose difference to the nearest unflagged value,
with respect to value and relative median prediction,
differ by less than a multiplier times the rolling interquartile range (IQR)
of the difference -
`multiplier` times :meth:`rolling_iqr_of_diff` and
`rel_multiplier` times :meth:`iqr_of_diff_of_relative_mean_prediction`,
respectively.
Args:
window: Number of values in the moving window for the rolling median
(for the relative median prediction).
shifts: Positions to shift the local rolling median offset by,
for computing its median (for the relative median prediction).
long_window: Number of values in the moving window for the long rolling
median (for the relative median prediction).
iqr_window: Number of values in the moving window for the rolling IQR
of neighbor difference.
multiplier: Number of times the rolling IQR of neighbor difference
the value's difference to its neighbor must exceed
for the value to be flagged.
rel_multiplier: Number of times the rolling IQR of relative median
prediction the value's prediction difference to its neighbor must exceed
for the value to be flagged.
"""
# Compute constants used in both forward and reverse pass
relative_median_prediction = self.relative_median_prediction(
window=window, shifts=shifts, long_window=long_window
)
relative_median_prediction_long = self.relative_median_prediction(
window=long_window, shifts=shifts, long_window=long_window
)
rolling_iqr_of_diff = multiplier * self.rolling_iqr_of_diff(
shift=1, window=iqr_window
)
iqr_of_diff_of_relative_median_prediction = rel_multiplier * (
self.iqr_of_diff_of_relative_median_prediction(
shift=1, window=window, shifts=shifts, long_window=long_window
)
)
# Set values flagged in forward pass to null before reverse pass
mask = self._find_single_delta(
relative_median_prediction,
relative_median_prediction_long,
rolling_iqr_of_diff,
iqr_of_diff_of_relative_median_prediction,
reverse=False
)
self.flag(mask, "SINGLE_DELTA")
# Repeat in reverse to get all options.
# As in original code, do not recompute constants with new nulls
mask = self._find_single_delta(
relative_median_prediction,
relative_median_prediction_long,
rolling_iqr_of_diff,
iqr_of_diff_of_relative_median_prediction,
reverse=True
)
self.flag(mask, "SINGLE_DELTA")
def flag_anomalous_region(
self, window: int = 48, threshold: float = 0.15
) -> None:
"""
Flag values surrounded by flagged values (ANOMALOUS_REGION).
Original null values are not considered flagged values.
Args:
width: Width of regions.
threshold: Fraction of flagged values required for a region to be flagged.
"""
# Check whether unflagged
mask = np.equal(self.flags, None)
# Check whether after or before half-width region with 1+ flagged values
half_window = window // 2
is_after = (
pd.DataFrame(mask, copy=False)
.rolling(window=half_window)
.mean()
.lt(1)
.values
)
is_before = np.roll(is_after, -(half_window - 1), axis=0)
# Check whether not part of a run of unflagged values longer than a half-width
is_not_run = np.empty_like(mask)
for col in range(mask.shape[1]):
rvalues, rlengths = encode_run_length(mask[:, col])
is_short_run = np.where(rvalues, rlengths, 0) <= half_window
is_not_run[:, col] = np.repeat(is_short_run, rlengths)
# Check whether within full-width region with too many flagged values
is_region = (
pd.DataFrame(~mask, copy=False)
.rolling(window=window, center=True)
.mean()
.gt(threshold)
.rolling(window=window, center=True)
.max()
.eq(True)
.values
)
# Flag if all conditions are met
mask &= is_after & is_before & is_not_run & is_region
self.flag(mask, "ANOMALOUS_REGION")
def flag_ruggles(self) -> None:
"""
Flag values following the method of Ruggles and others (2020).
Assumes values are hourly electricity demand.
* description: https://doi.org/10.1038/s41597-020-0483-x
* code: https://github.com/truggles/EIA_Cleaned_Hourly_Electricity_Demand_Code
"""
# Step 1
self.flag_negative_or_zero()
self.flag_identical_run(length=3)
self.flag_global_outlier(medians=9)
self.flag_global_outlier_neighbor(neighbors=1)
# Step 2
# NOTE: In original code, statistics used for the flags below are precomputed
# here, rather than computed for each flag with nulls added by previous flags.
window = 48
long_window = 480
iqr_window = 240
shifts = range(-240, 241, 24)
self.flag_local_outlier(
window=window,
shifts=shifts,
long_window=long_window,
iqr_window=iqr_window,
multiplier=(3.5, 2.5),
)
self.flag_double_delta(iqr_window=iqr_window, multiplier=2)
self.flag_single_delta(
window=window,
shifts=shifts,
long_window=long_window,
iqr_window=iqr_window,
multiplier=5,
rel_multiplier=15,
)
self.flag_anomalous_region(window=window + 1, threshold=0.15)
def summarize_flags(self) -> pd.DataFrame:
"""Summarize flagged values by flag, count and median."""
stats = {}
for col in range(self.xi.shape[1]):
stats[self.columns[col]] = (
pd.Series(self.xi[:, col])
.groupby(self.flags[:, col])
.agg(['count', 'median'])
)
df = pd.concat(stats, names=['column', 'flag']).reset_index()
# Sort flags by flagged order
ordered = df['flag'].astype(pd.CategoricalDtype(pd.unique(self.flagged)))
return df.assign(flag=ordered).sort_values(['column', 'flag'])
def plot_flags(self, name: Any = 0) -> None:
"""
Plot cleaned series and anomalous values colored by flag.
Args:
name: Series to plot, as either an integer index or name in :attr:`columns`.
"""
if name not in self.columns:
name = self.columns[name]
col = list(self.columns).index(name)
plt.plot(self.index, self.x[:, col], color='lightgrey', marker='.', zorder=1)
colors = {
'NEGATIVE_OR_ZERO': 'pink',
'IDENTICAL_RUN': 'blue',
'GLOBAL_OUTLIER': 'brown',
'GLOBAL_OUTLIER_NEIGHBOR': 'brown',
'LOCAL_OUTLIER_HIGH': 'purple',
'LOCAL_OUTLIER_LOW': 'purple',
'DOUBLE_DELTA': 'green',
'SINGLE_DELTA': 'red',
'ANOMALOUS_REGION': 'orange',
}
for flag in colors:
mask = self.flags[:, col] == flag
x, y = self.index[mask], self.xi[mask, col]
# Set zorder manually to ensure flagged points are drawn on top
plt.scatter(x, y, c=colors[flag], label=flag, zorder=2)
plt.legend()
def simulate_nulls(
self,
lengths: Sequence[int] = None,
padding: int = 1,
intersect: bool = False,
overlap: bool = False,
) -> np.ndarray:
"""
Find non-null values to null to match a run-length distribution.
Args:
length: Length of null runs to simulate for each series.
By default, uses the run lengths of null values in each series.
padding: Minimum number of non-null values between simulated null runs
and between simulated and existing null runs.
intersect: Whether simulated null runs can intersect each other.
overlap: Whether simulated null runs can overlap existing null runs.
If `True`, `padding` is ignored.
Returns:
Boolean mask of current non-null values to set to null.
Raises:
ValueError: Cound not find space for run of length {length}.
Examples:
>>> x = np.column_stack([[1, 2, np.nan, 4, 5, 6, 7, np.nan, np.nan]])
>>> s = Timeseries(x)
>>> s.simulate_nulls().ravel()
array([ True, False, False, False, True, True, False, False, False])
>>> s.simulate_nulls(lengths=[4], padding=0).ravel()
array([False, False, False, True, True, True, True, False, False])
"""
new_nulls = np.zeros(self.x.shape, dtype=bool)
for col in range(self.x.shape[1]):
is_null = np.isnan(self.x[:, col])
if lengths is None:
run_values, run_lengths = encode_run_length(is_null)
run_lengths = run_lengths[run_values]
else:
run_lengths = lengths
is_new_null = insert_run_length(
new_nulls[:, col],
values=np.ones(len(run_lengths), dtype=bool),
lengths=run_lengths,
mask=None if overlap else ~is_null,
padding=0 if overlap else padding,
intersect=intersect
)
if overlap:
is_new_null &= ~is_null
new_nulls[:, col] = is_new_null
return new_nulls
def fold_tensor(self, x: np.ndarray = None, periods: int = 24) -> np.ndarray:
"""
Fold into a 3-dimensional tensor representation.
Folds the series `x` (number of observations, number of series)
into a 3-d tensor (number of series, number of groups, number of periods),
splitting observations into groups of length `periods`.
For example, each group may represent a day and each period the hour of the day.
Args:
x: Series array to fold. Uses :attr:`x` by default.
periods: Number of consecutive values in each series to fold into a group.
Returns:
>>> x = np.column_stack([[1, 2, 3, 4, 5, 6], [10, 20, 30, 40, 50, 60]])
>>> s = Timeseries(x)
>>> tensor = s.fold_tensor(periods=3)
>>> tensor[0]
array([[1, 2, 3],
[4, 5, 6]])
>>> np.all(x == s.unfold_tensor(tensor))
True
"""
tensor_shape = self.x.shape[1], self.x.shape[0] // periods, periods
x = self.x if x is None else x
return x.T.reshape(tensor_shape)
def unfold_tensor(self, tensor: np.ndarray) -> np.ndarray:
"""
Unfold a 3-dimensional tensor representation.
Performs the reverse of :meth:`fold_tensor`.
"""
return tensor.T.reshape(self.x.shape, order='F')
def impute(
self,
mask: np.ndarray = None,
periods: int = 24,
blocks: int = 1,
method: str = 'tubal',
**kwargs: Any
) -> np.ndarray:
"""
Impute null values.
.. note::
The imputation method requires that nulls be replaced by zeros,
so the series cannot already contain zeros.
Args:
mask: Boolean mask of values to impute in addition to
any null values in :attr:`x`.
periods: Number of consecutive values in each series to fold into a group.
See :meth:`fold_tensor`.
blocks: Number of blocks into which to split the series for imputation.
This has been found to reduce processing time for `method='tnn'`.
method: Imputation method to use
('tubal': :func:`impute_latc_tubal`, 'tnn': :func:`impute_latc_tnn`).
kwargs: Optional arguments to `method`.
Returns:
Array of same shape as :attr:`x` with all null values
(and those selected by `mask`) replaced with imputed values.
Raises:
ValueError: Zero values present. Replace with very small value.
"""
imputer = {'tubal': impute_latc_tubal, 'tnn': impute_latc_tnn}[method]
if mask is None:
x = self.x.copy()
else:
x = np.where(mask, np.nan, self.x)
if (x == 0).any():
raise ValueError("Zero values present. Replace with very small value.")
tensor = self.fold_tensor(x, periods=periods)
n = tensor.shape[1]
ends = [*range(0, n, int(np.ceil(n / blocks))), n]
for i in range(blocks):
if blocks > 1:
print(f"Block: {i}")
idx = slice(None), slice(ends[i], ends[i + 1]), slice(None)
tensor[idx] = imputer(tensor[idx], **kwargs)
return self.unfold_tensor(tensor)
def summarize_imputed(
self,
imputed: np.ndarray,
mask: np.ndarray
) -> pd.DataFrame:
"""
Summarize the fit of imputed values to actual values.
Summarizes the agreement between actual and imputed values with the
following statistics:
* `mpe`: Mean percent error, `(actual - imputed) / actual`.
* `mape`: Mean absolute percent error, `abs(mpe)`.
Args:
imputed: Series of same shape as :attr:`x` with imputed values.
See :meth:`impute`.
mask: Boolean mask of imputed values that were not null in :attr:`x`.
See :meth:`simulate_nulls`.
Returns:
Table of imputed value statistics for each series.
"""
stats = []
for col in range(self.x.shape[1]):
x = self.x[mask[:, col], col]
if not x.size:
continue
pe = (x - imputed[mask[:, col], col]) / x
pe = pe[~np.isnan(pe)]
stats.append({
'column': self.columns[col],
'count': x.size,
'mpe': np.mean(pe),
'mape': np.mean(np.abs(pe)),
})
return pd.DataFrame(stats)
| [
"numpy.isin",
"numpy.moveaxis",
"numpy.sum",
"numpy.abs",
"numpy.nanmedian",
"numpy.maximum",
"numpy.empty",
"numpy.einsum",
"numpy.ones",
"numpy.isnan",
"numpy.random.default_rng",
"numpy.argsort",
"numpy.linalg.svd",
"numpy.mean",
"numpy.linalg.norm",
"numpy.arange",
"numpy.diag",
... | [((3024, 3040), 'numpy.empty_like', 'np.empty_like', (['x'], {}), '(x)\n', (3037, 3040), True, 'import numpy as np\n'), ((4201, 4214), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (4211, 4214), True, 'import numpy as np\n'), ((4331, 4356), 'numpy.array', 'np.array', (['(x[1:] != x[:-1])'], {}), '(x[1:] != x[:-1])\n', (4339, 4356), True, 'import numpy as np\n'), ((7424, 7435), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (7432, 7435), True, 'import numpy as np\n'), ((8703, 8726), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (8724, 8726), True, 'import numpy as np\n'), ((11258, 11296), 'numpy.linalg.svd', 'np.linalg.svd', (['matrix'], {'full_matrices': '(0)'}), '(matrix, full_matrices=0)\n', (11271, 11296), True, 'import numpy as np\n'), ((11307, 11322), 'numpy.sum', 'np.sum', (['(s > tau)'], {}), '(s > tau)\n', (11313, 11322), True, 'import numpy as np\n'), ((12559, 12581), 'numpy.array', 'np.array', (['tensor.shape'], {}), '(tensor.shape)\n', (12567, 12581), True, 'import numpy as np\n'), ((12656, 12668), 'numpy.max', 'np.max', (['lags'], {}), '(lags)\n', (12662, 12668), True, 'import numpy as np\n'), ((12722, 12740), 'numpy.where', 'np.where', (['(mat == 0)'], {}), '(mat == 0)\n', (12730, 12740), True, 'import numpy as np\n'), ((12873, 12895), 'numpy.mean', 'np.mean', (['mat[mat != 0]'], {}), '(mat[mat != 0])\n', (12880, 12895), True, 'import numpy as np\n'), ((12959, 13003), 'numpy.zeros', 'np.zeros', (['(d, dim_time - max_lag)'], {'dtype': 'int'}), '((d, dim_time - max_lag), dtype=int)\n', (12967, 13003), True, 'import numpy as np\n'), ((13134, 13160), 'numpy.linalg.norm', 'np.linalg.norm', (['mat', '"""fro"""'], {}), "(mat, 'fro')\n", (13148, 13160), True, 'import numpy as np\n'), ((14827, 14840), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (14835, 14840), True, 'import numpy as np\n'), ((14854, 14894), 'numpy.einsum', 'np.einsum', (['"""kt, ijk -> ijt"""', 'phi', 'tensor'], {}), "('kt, ijk -> ijt', phi, tensor)\n", (14863, 14894), True, 'import numpy as np\n'), ((15174, 15209), 'numpy.einsum', 'np.einsum', (['"""kt, ijt -> ijk"""', 'phi', 'x'], {}), "('kt, ijt -> ijk', phi, x)\n", (15183, 15209), True, 'import numpy as np\n'), ((16325, 16347), 'numpy.array', 'np.array', (['tensor.shape'], {}), '(tensor.shape)\n', (16333, 16347), True, 'import numpy as np\n'), ((16422, 16434), 'numpy.max', 'np.max', (['lags'], {}), '(lags)\n', (16428, 16434), True, 'import numpy as np\n'), ((16483, 16501), 'numpy.where', 'np.where', (['(mat == 0)'], {}), '(mat == 0)\n', (16491, 16501), True, 'import numpy as np\n'), ((16510, 16523), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (16518, 16523), True, 'import numpy as np\n'), ((16564, 16586), 'numpy.mean', 'np.mean', (['mat[mat != 0]'], {}), '(mat[mat != 0])\n', (16571, 16586), True, 'import numpy as np\n'), ((16650, 16698), 'numpy.zeros', 'np.zeros', (['(d, dim_time - max_lag)'], {'dtype': 'np.int_'}), '((d, dim_time - max_lag), dtype=np.int_)\n', (16658, 16698), True, 'import numpy as np\n'), ((16829, 16855), 'numpy.linalg.norm', 'np.linalg.norm', (['mat', '"""fro"""'], {}), "(mat, 'fro')\n", (16843, 16855), True, 'import numpy as np\n'), ((16929, 16959), 'numpy.linalg.eig', 'np.linalg.eig', (['(temp1 @ temp1.T)'], {}), '(temp1 @ temp1.T)\n', (16942, 16959), True, 'import numpy as np\n'), ((24028, 24058), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': '(2)'}), '(maxsize=2)\n', (24047, 24058), False, 'import functools\n'), ((31812, 31842), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': '(2)'}), '(maxsize=2)\n', (31831, 31842), False, 'import functools\n'), ((4416, 4427), 'numpy.where', 'np.where', (['y'], {}), '(y)\n', (4424, 4427), True, 'import numpy as np\n'), ((4458, 4474), 'numpy.append', 'np.append', (['(-1)', 'i'], {}), '(-1, i)\n', (4467, 4474), True, 'import numpy as np\n'), ((7522, 7535), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (7530, 7535), True, 'import numpy as np\n'), ((8555, 8591), 'numpy.concatenate', 'np.concatenate', (['(run_starts, buffer)'], {}), '((run_starts, buffer))\n', (8569, 8591), True, 'import numpy as np\n'), ((8614, 8651), 'numpy.concatenate', 'np.concatenate', (['(run_lengths, buffer)'], {}), '((run_lengths, buffer))\n', (8628, 8651), True, 'import numpy as np\n'), ((8786, 8805), 'numpy.argsort', 'np.argsort', (['lengths'], {}), '(lengths)\n', (8796, 8805), True, 'import numpy as np\n'), ((8825, 8843), 'numpy.asarray', 'np.asarray', (['values'], {}), '(values)\n', (8835, 8843), True, 'import numpy as np\n'), ((8865, 8884), 'numpy.asarray', 'np.asarray', (['lengths'], {}), '(lengths)\n', (8875, 8884), True, 'import numpy as np\n'), ((10319, 10371), 'numpy.reshape', 'np.reshape', (['matrix'], {'newshape': 'shape[index]', 'order': '"""F"""'}), "(matrix, newshape=shape[index], order='F')\n", (10329, 10371), True, 'import numpy as np\n'), ((10553, 10600), 'numpy.moveaxis', 'np.moveaxis', (['tensor'], {'source': 'mode', 'destination': '(0)'}), '(tensor, source=mode, destination=0)\n', (10564, 10600), True, 'import numpy as np\n'), ((10892, 10941), 'numpy.linalg.svd', 'np.linalg.svd', (['(matrix @ matrix.T)'], {'full_matrices': '(0)'}), '(matrix @ matrix.T, full_matrices=0)\n', (10905, 10941), True, 'import numpy as np\n'), ((10954, 10964), 'numpy.sqrt', 'np.sqrt', (['s'], {}), '(s)\n', (10961, 10964), True, 'import numpy as np\n'), ((10979, 10994), 'numpy.sum', 'np.sum', (['(s > tau)'], {}), '(s > tau)\n', (10985, 10994), True, 'import numpy as np\n'), ((11009, 11022), 'numpy.zeros', 'np.zeros', (['idx'], {}), '(idx)\n', (11017, 11022), True, 'import numpy as np\n'), ((12520, 12536), 'numpy.isnan', 'np.isnan', (['tensor'], {}), '(tensor)\n', (12528, 12536), True, 'import numpy as np\n'), ((12912, 12937), 'numpy.random.rand', 'np.random.rand', (['dim[0]', 'd'], {}), '(dim[0], d)\n', (12926, 12937), True, 'import numpy as np\n'), ((13047, 13095), 'numpy.arange', 'np.arange', (['(max_lag - lags[i])', '(dim_time - lags[i])'], {}), '(max_lag - lags[i], dim_time - lags[i])\n', (13056, 13095), True, 'import numpy as np\n'), ((13575, 13612), 'numpy.einsum', 'np.einsum', (['"""k, kmnt -> mnt"""', 'alpha', 'x'], {}), "('k, kmnt -> mnt', alpha, x)\n", (13584, 13612), True, 'import numpy as np\n'), ((13670, 13708), 'numpy.zeros', 'np.zeros', (['(dim[0], dim_time - max_lag)'], {}), '((dim[0], dim_time - max_lag))\n', (13678, 13708), True, 'import numpy as np\n'), ((14941, 14992), 'numpy.linalg.svd', 'np.linalg.svd', (['tensor[:, :, t]'], {'full_matrices': '(False)'}), '(tensor[:, :, t], full_matrices=False)\n', (14954, 14992), True, 'import numpy as np\n'), ((16286, 16302), 'numpy.isnan', 'np.isnan', (['tensor'], {}), '(tensor)\n', (16294, 16302), True, 'import numpy as np\n'), ((16603, 16628), 'numpy.random.rand', 'np.random.rand', (['dim[0]', 'd'], {}), '(dim[0], d)\n', (16617, 16628), True, 'import numpy as np\n'), ((16742, 16790), 'numpy.arange', 'np.arange', (['(max_lag - lags[i])', '(dim_time - lags[i])'], {}), '(max_lag - lags[i], dim_time - lags[i])\n', (16751, 16790), True, 'import numpy as np\n'), ((17256, 17294), 'numpy.zeros', 'np.zeros', (['(dim[0], dim_time - max_lag)'], {}), '((dim[0], dim_time - max_lag))\n', (17264, 17294), True, 'import numpy as np\n'), ((20252, 20288), 'numpy.empty', 'np.empty', (['self.x.shape'], {'dtype': 'object'}), '(self.x.shape, dtype=object)\n', (20260, 20288), True, 'import numpy as np\n'), ((20716, 20782), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {'columns': 'self.columns', 'index': 'self.index', 'copy': 'copy'}), '(x, columns=self.columns, index=self.index, copy=copy)\n', (20728, 20782), True, 'import pandas as pd\n'), ((22586, 22619), 'numpy.ones', 'np.ones', (['self.x.shape'], {'dtype': 'bool'}), '(self.x.shape, dtype=bool)\n', (22593, 22619), True, 'import numpy as np\n'), ((23057, 23085), 'numpy.nanmedian', 'np.nanmedian', (['self.x'], {'axis': '(0)'}), '(self.x, axis=0)\n', (23069, 23085), True, 'import numpy as np\n'), ((23678, 23712), 'numpy.zeros', 'np.zeros', (['self.x.shape'], {'dtype': 'bool'}), '(self.x.shape, dtype=bool)\n', (23686, 23712), True, 'import numpy as np\n'), ((24325, 24357), 'pandas.DataFrame', 'pd.DataFrame', (['self.x'], {'copy': '(False)'}), '(self.x, copy=False)\n', (24337, 24357), True, 'import pandas as pd\n'), ((26910, 26942), 'pandas.DataFrame', 'pd.DataFrame', (['offset'], {'copy': '(False)'}), '(offset, copy=False)\n', (26922, 26942), True, 'import pandas as pd\n'), ((30646, 30676), 'pandas.DataFrame', 'pd.DataFrame', (['diff'], {'copy': '(False)'}), '(diff, copy=False)\n', (30658, 30676), True, 'import pandas as pd\n'), ((33199, 33233), 'numpy.zeros', 'np.zeros', (['self.x.shape'], {'dtype': 'bool'}), '(self.x.shape, dtype=bool)\n', (33207, 33233), True, 'import numpy as np\n'), ((39980, 40006), 'numpy.equal', 'np.equal', (['self.flags', 'None'], {}), '(self.flags, None)\n', (39988, 40006), True, 'import numpy as np\n'), ((40316, 40361), 'numpy.roll', 'np.roll', (['is_after', '(-(half_window - 1))'], {'axis': '(0)'}), '(is_after, -(half_window - 1), axis=0)\n', (40323, 40361), True, 'import numpy as np\n'), ((40470, 40489), 'numpy.empty_like', 'np.empty_like', (['mask'], {}), '(mask)\n', (40483, 40489), True, 'import numpy as np\n'), ((43638, 43715), 'matplotlib.pyplot.plot', 'plt.plot', (['self.index', 'self.x[:, col]'], {'color': '"""lightgrey"""', 'marker': '"""."""', 'zorder': '(1)'}), "(self.index, self.x[:, col], color='lightgrey', marker='.', zorder=1)\n", (43646, 43715), True, 'import matplotlib.pyplot as plt\n'), ((44392, 44404), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (44402, 44404), True, 'import matplotlib.pyplot as plt\n'), ((45806, 45840), 'numpy.zeros', 'np.zeros', (['self.x.shape'], {'dtype': 'bool'}), '(self.x.shape, dtype=bool)\n', (45814, 45840), True, 'import numpy as np\n'), ((51194, 51213), 'pandas.DataFrame', 'pd.DataFrame', (['stats'], {}), '(stats)\n', (51206, 51213), True, 'import pandas as pd\n'), ((1971, 1991), 'numpy.mod', 'np.mod', (['axis', 'x.ndim'], {}), '(axis, x.ndim)\n', (1977, 1991), True, 'import numpy as np\n'), ((4262, 4285), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (4270, 4285), True, 'import numpy as np\n'), ((9090, 9132), 'numpy.nonzero', 'np.nonzero', (['(run_lengths[:n_runs] >= length)'], {}), '(run_lengths[:n_runs] >= length)\n', (9100, 9132), True, 'import numpy as np\n'), ((11412, 11424), 'numpy.diag', 'np.diag', (['vec'], {}), '(vec)\n', (11419, 11424), True, 'import numpy as np\n'), ((12601, 12613), 'numpy.prod', 'np.prod', (['dim'], {}), '(dim)\n', (12608, 12613), True, 'import numpy as np\n'), ((14408, 14449), 'numpy.linalg.norm', 'np.linalg.norm', (['(mat_hat - last_mat)', '"""fro"""'], {}), "(mat_hat - last_mat, 'fro')\n", (14422, 14449), True, 'import numpy as np\n'), ((16367, 16379), 'numpy.prod', 'np.prod', (['dim'], {}), '(dim)\n', (16374, 16379), True, 'import numpy as np\n'), ((18330, 18371), 'numpy.linalg.norm', 'np.linalg.norm', (['(mat_hat - last_mat)', '"""fro"""'], {}), "(mat_hat - last_mat, 'fro')\n", (18344, 18371), True, 'import numpy as np\n'), ((18447, 18461), 'numpy.mod', 'np.mod', (['it', '(10)'], {}), '(it, 10)\n', (18453, 18461), True, 'import numpy as np\n'), ((18547, 18577), 'numpy.linalg.eig', 'np.linalg.eig', (['(temp1 @ temp1.T)'], {}), '(temp1 @ temp1.T)\n', (18560, 18577), True, 'import numpy as np\n'), ((20096, 20121), 'pandas.RangeIndex', 'pd.RangeIndex', (['x.shape[0]'], {}), '(x.shape[0])\n', (20109, 20121), True, 'import pandas as pd\n'), ((20149, 20174), 'pandas.RangeIndex', 'pd.RangeIndex', (['x.shape[1]'], {}), '(x.shape[1])\n', (20162, 20174), True, 'import pandas as pd\n'), ((21761, 21787), 'numpy.isin', 'np.isin', (['self.flags', 'flags'], {}), '(self.flags, flags)\n', (21768, 21787), True, 'import numpy as np\n'), ((23101, 23124), 'numpy.abs', 'np.abs', (['(self.x - median)'], {}), '(self.x - median)\n', (23107, 23124), True, 'import numpy as np\n'), ((23127, 23151), 'numpy.abs', 'np.abs', (['(median * medians)'], {}), '(median * medians)\n', (23133, 23151), True, 'import numpy as np\n'), ((26105, 26130), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (26128, 26130), False, 'import warnings\n'), ((26144, 26244), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning', 'message': '"""All-NaN slice encountered"""'}), "('ignore', category=RuntimeWarning, message=\n 'All-NaN slice encountered')\n", (26167, 26244), False, 'import warnings\n'), ((26289, 26318), 'numpy.nanmedian', 'np.nanmedian', (['shifted'], {'axis': '(0)'}), '(shifted, axis=0)\n', (26301, 26318), True, 'import numpy as np\n'), ((33167, 33183), 'numpy.isnan', 'np.isnan', (['self.x'], {}), '(self.x)\n', (33175, 33183), True, 'import numpy as np\n'), ((40701, 40734), 'numpy.repeat', 'np.repeat', (['is_short_run', 'rlengths'], {}), '(is_short_run, rlengths)\n', (40710, 40734), True, 'import numpy as np\n'), ((44328, 44383), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': 'colors[flag]', 'label': 'flag', 'zorder': '(2)'}), '(x, y, c=colors[flag], label=flag, zorder=2)\n', (44339, 44383), True, 'import matplotlib.pyplot as plt\n'), ((45906, 45930), 'numpy.isnan', 'np.isnan', (['self.x[:, col]'], {}), '(self.x[:, col])\n', (45914, 45930), True, 'import numpy as np\n'), ((49409, 49439), 'numpy.where', 'np.where', (['mask', 'np.nan', 'self.x'], {}), '(mask, np.nan, self.x)\n', (49417, 49439), True, 'import numpy as np\n'), ((11138, 11150), 'numpy.diag', 'np.diag', (['mid'], {}), '(mid)\n', (11145, 11150), True, 'import numpy as np\n'), ((13940, 13968), 'numpy.mean', 'np.mean', (['(rho * x + t)'], {'axis': '(0)'}), '(rho * x + t, axis=0)\n', (13947, 13968), True, 'import numpy as np\n'), ((14002, 14105), 'numpy.append', 'np.append', (['(mat1[:, :max_lag] / rho)', '((mat1[:, max_lag:] + lambda0 * mat0) / (rho + lambda0))'], {'axis': '(1)'}), '(mat1[:, :max_lag] / rho, (mat1[:, max_lag:] + lambda0 * mat0) / (\n rho + lambda0), axis=1)\n', (14011, 14105), True, 'import numpy as np\n'), ((15009, 15026), 'numpy.where', 'np.where', (['(s > tau)'], {}), '(s > tau)\n', (15017, 15026), True, 'import numpy as np\n'), ((18038, 18142), 'numpy.append', 'np.append', (['(temp2[:, :max_lag] / rho)', '((temp2[:, max_lag:] + lambda0 * mat0) / (rho + lambda0))'], {'axis': '(1)'}), '(temp2[:, :max_lag] / rho, (temp2[:, max_lag:] + lambda0 * mat0) /\n (rho + lambda0), axis=1)\n', (18047, 18142), True, 'import numpy as np\n'), ((21117, 21133), 'numpy.isnan', 'np.isnan', (['self.x'], {}), '(self.x)\n', (21125, 21133), True, 'import numpy as np\n'), ((31696, 31721), 'numpy.minimum', 'np.minimum', (['before', 'after'], {}), '(before, after)\n', (31706, 31721), True, 'import numpy as np\n'), ((31732, 31757), 'numpy.maximum', 'np.maximum', (['before', 'after'], {}), '(before, after)\n', (31742, 31757), True, 'import numpy as np\n'), ((33299, 33330), 'numpy.flatnonzero', 'np.flatnonzero', (['not_nan[:, col]'], {}), '(not_nan[:, col])\n', (33313, 33330), True, 'import numpy as np\n'), ((33509, 33561), 'numpy.abs', 'np.abs', (['(self.x[current, col] - self.x[previous, col])'], {}), '(self.x[current, col] - self.x[previous, col])\n', (33515, 33561), True, 'import numpy as np\n'), ((33612, 33708), 'numpy.abs', 'np.abs', (['(relative_median_prediction[current, col] - relative_median_prediction[\n previous, col])'], {}), '(relative_median_prediction[current, col] -\n relative_median_prediction[previous, col])\n', (33618, 33708), True, 'import numpy as np\n'), ((35402, 35439), 'numpy.delete', 'np.delete', (['flagged_indices', 'unflagged'], {}), '(flagged_indices, unflagged)\n', (35411, 35439), True, 'import numpy as np\n'), ((35470, 35503), 'numpy.delete', 'np.delete', (['indices_idx', 'unflagged'], {}), '(indices_idx, unflagged)\n', (35479, 35503), True, 'import numpy as np\n'), ((36232, 36263), 'numpy.delete', 'np.delete', (['indices', 'indices_idx'], {}), '(indices, indices_idx)\n', (36241, 36263), True, 'import numpy as np\n'), ((40622, 40652), 'numpy.where', 'np.where', (['rvalues', 'rlengths', '(0)'], {}), '(rvalues, rlengths, 0)\n', (40630, 40652), True, 'import numpy as np\n'), ((43018, 43060), 'pandas.concat', 'pd.concat', (['stats'], {'names': "['column', 'flag']"}), "(stats, names=['column', 'flag'])\n", (43027, 43060), True, 'import pandas as pd\n'), ((43169, 43192), 'pandas.unique', 'pd.unique', (['self.flagged'], {}), '(self.flagged)\n', (43178, 43192), True, 'import pandas as pd\n'), ((7678, 7704), 'numpy.append', 'np.append', (['(0)', 'mask_lengths'], {}), '(0, mask_lengths)\n', (7687, 7704), True, 'import numpy as np\n'), ((13834, 13852), 'numpy.linalg.pinv', 'np.linalg.pinv', (['qm'], {}), '(qm)\n', (13848, 13852), True, 'import numpy as np\n'), ((14231, 14259), 'numpy.mean', 'np.mean', (['(x + t / rho)'], {'axis': '(0)'}), '(x + t / rho, axis=0)\n', (14238, 14259), True, 'import numpy as np\n'), ((15141, 15151), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (15148, 15151), True, 'import numpy as np\n'), ((33982, 34035), 'numpy.abs', 'np.abs', (['(1 - relative_median_prediction[previous, col])'], {}), '(1 - relative_median_prediction[previous, col])\n', (33988, 34035), True, 'import numpy as np\n'), ((34057, 34115), 'numpy.abs', 'np.abs', (['(1 - relative_median_prediction_long[previous, col])'], {}), '(1 - relative_median_prediction_long[previous, col])\n', (34063, 34115), True, 'import numpy as np\n'), ((34197, 34249), 'numpy.abs', 'np.abs', (['(1 - relative_median_prediction[current, col])'], {}), '(1 - relative_median_prediction[current, col])\n', (34203, 34249), True, 'import numpy as np\n'), ((34271, 34328), 'numpy.abs', 'np.abs', (['(1 - relative_median_prediction_long[current, col])'], {}), '(1 - relative_median_prediction_long[current, col])\n', (34277, 34328), True, 'import numpy as np\n'), ((50964, 50976), 'numpy.isnan', 'np.isnan', (['pe'], {}), '(pe)\n', (50972, 50976), True, 'import numpy as np\n'), ((51106, 51117), 'numpy.mean', 'np.mean', (['pe'], {}), '(pe)\n', (51113, 51117), True, 'import numpy as np\n'), ((17505, 17523), 'numpy.linalg.pinv', 'np.linalg.pinv', (['qm'], {}), '(qm)\n', (17519, 17523), True, 'import numpy as np\n'), ((17686, 17718), 'numpy.arange', 'np.arange', (['(0)', '(dim_time - max_lag)'], {}), '(0, dim_time - max_lag)\n', (17695, 17718), True, 'import numpy as np\n'), ((17739, 17761), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (17756, 17761), True, 'import numpy as np\n'), ((49666, 49685), 'numpy.ceil', 'np.ceil', (['(n / blocks)'], {}), '(n / blocks)\n', (49673, 49685), True, 'import numpy as np\n'), ((51151, 51161), 'numpy.abs', 'np.abs', (['pe'], {}), '(pe)\n', (51157, 51161), True, 'import numpy as np\n'), ((17908, 17937), 'numpy.linalg.pinv', 'np.linalg.pinv', (['qm[idx[:], :]'], {}), '(qm[idx[:], :])\n', (17922, 17937), True, 'import numpy as np\n'), ((35340, 35360), 'numpy.diff', 'np.diff', (['indices_idx'], {}), '(indices_idx)\n', (35347, 35360), True, 'import numpy as np\n'), ((42877, 42903), 'pandas.Series', 'pd.Series', (['self.xi[:, col]'], {}), '(self.xi[:, col])\n', (42886, 42903), True, 'import pandas as pd\n'), ((40155, 40185), 'pandas.DataFrame', 'pd.DataFrame', (['mask'], {'copy': '(False)'}), '(mask, copy=False)\n', (40167, 40185), True, 'import pandas as pd\n'), ((40847, 40878), 'pandas.DataFrame', 'pd.DataFrame', (['(~mask)'], {'copy': '(False)'}), '(~mask, copy=False)\n', (40859, 40878), True, 'import pandas as pd\n')] |
# coding=utf-8
# Copyright 2020 The Adp Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Continue training WRN/CNN on SVHN/CIFAR10 w SGD/DPSGD on 10 classes."""
import logging
import operator
import pickle
import time
from absl import app
from absl import flags
from jax import grad
from jax import jit
from jax import partial
from jax import random
from jax import tree_util
from jax import vmap
import jax.experimental.optimizers as optimizers
import jax.experimental.stax as stax
from jax.lax import stop_gradient
import jax.numpy as np
import numpy as onp
from tensorflow.compat.v1.io import gfile
# https://github.com/tensorflow/privacy
from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
from adp import data
from adp import datasets
FLAGS = flags.FLAGS
flags.DEFINE_string(
'config', '', 'Configuration.')
flags.DEFINE_boolean('dpsgd', True,
'True, train with DP-SGD. False, train with vanilla SGD.')
flags.DEFINE_string('dataset', 'svhn_cropped', 'Dataset, or cifar10')
flags.DEFINE_string('exp_dir', None, 'Experiment directory')
flags.DEFINE_string('pretrained_dir', None, 'Path to pretrained model')
# BEGIN: JAX implementation of WideResNet
# paper: https://arxiv.org/abs/1605.07146
# code : https://github.com/szagoruyko/wide-residual-networks
def wide_resnet_block(num_channels, strides=(1, 1), channel_mismatch=False):
"""Wide ResNet block."""
pre = stax.serial(stax.BatchNorm(), stax.Relu)
mid = stax.serial(
pre,
stax.Conv(num_channels, (3, 3), strides, padding='SAME'),
stax.BatchNorm(), stax.Relu,
stax.Conv(num_channels, (3, 3), strides=(1, 1), padding='SAME'))
if channel_mismatch:
cut = stax.serial(
pre,
stax.Conv(num_channels, (3, 3), strides, padding='SAME'))
else:
cut = stax.Identity
return stax.serial(stax.FanOut(2), stax.parallel(mid, cut), stax.FanInSum)
def wide_resnet_group(n, num_channels, strides=(1, 1)):
blocks = [wide_resnet_block(num_channels, strides, channel_mismatch=True)]
for _ in range(1, n):
blocks += [wide_resnet_block(num_channels, strides=(1, 1))]
return stax.serial(*blocks)
def wide_resnet(n, k, num_classes):
"""Original WRN from paper and previous experiments."""
return stax.serial(
stax.Conv(16, (3, 3), padding='SAME'),
wide_resnet_group(n, 16 * k, strides=(1, 1)),
wide_resnet_group(n, 32 * k, strides=(2, 2)),
wide_resnet_group(n, 64 * k, strides=(2, 2)),
stax.BatchNorm(), stax.Relu,
stax.AvgPool((8, 8)), stax.Flatten,
stax.Dense(num_classes))
# END: JAX implementation of WideResNet
def cnn(num_classes=10):
return stax.serial(
stax.Conv(16, (8, 8), padding='SAME', strides=(2, 2)),
stax.Tanh,
stax.MaxPool((2, 2), (1, 1)),
stax.Conv(32, (4, 4), padding='VALID', strides=(2, 2)),
stax.Tanh,
stax.MaxPool((2, 2), (1, 1)),
stax.Flatten, # (-1, 800)
stax.Dense(64),
stax.Tanh, # embeddings
stax.Dense(num_classes), # logits
)
def compute_epsilon(steps, batch_size, num_examples=50000, target_delta=1e-5,
noise_multiplier=1.1):
if num_examples * target_delta > 1.:
logging.warning('Your target_delta might be too high.')
q = batch_size / float(num_examples)
orders = list(np.linspace(1.1, 10.9, 99)) + range(11, 64)
rdp_const = compute_rdp(q, noise_multiplier, steps, orders)
eps, _, _ = get_privacy_spent(orders, rdp_const, target_delta=target_delta)
return eps
def main(_):
logging.info('Starting experiment.')
configs = FLAGS.config
# Create model folder for outputs
try:
gfile.MakeDirs(FLAGS.exp_dir)
except gfile.GOSError:
pass
stdout_log = gfile.Open('{}/stdout.log'.format(FLAGS.exp_dir), 'w+')
if configs.optimization == 'sgd':
lr_schedule = optimizers.make_schedule(configs.learning_rate)
opt_init, opt_update, get_params = optimizers.sgd(lr_schedule)
elif configs.optimization == 'momentum':
lr_schedule = cosine(configs.learning_rate, configs.train_steps)
opt_init, opt_update, get_params = optimizers.momentum(lr_schedule, 0.9)
else:
raise ValueError('Optimizer not implemented.')
with gfile.Open(FLAGS.pretrained_dir, 'rb') as fpre:
pretrained_opt_state = optimizers.pack_optimizer_state(
pickle.load(fpre))
fixed_params = get_params(pretrained_opt_state)[:7]
# BEGIN: define the classifier model
init_fn_0, apply_fn_0 = stax.serial(
stax.Conv(16, (8, 8), padding='SAME', strides=(2, 2)),
stax.Tanh,
stax.MaxPool((2, 2), (1, 1)),
stax.Conv(32, (4, 4), padding='VALID', strides=(2, 2)),
stax.Tanh,
stax.MaxPool((2, 2), (1, 1)),
stax.Flatten, # representations
)
init_fn_1, apply_fn_1 = stax.serial(
stax.Dense(64),
stax.Tanh, # embeddings
stax.Dense(10), # logits
)
def predict(params, inputs):
representations = apply_fn_0(fixed_params, inputs) # use pretrained params
logits = apply_fn_1(params, representations)
return logits
# END: define the classifier model
if configs.seed is not None:
key = random.PRNGKey(configs.seed)
else:
key = random.PRNGKey(int(time.time()))
_, _ = init_fn_0(key, (-1, 32, 32, 3))
_, params = init_fn_1(key, (-1, 800))
opt_state = opt_init(params)
logging.info('Loading data.')
tic = time.time()
train_images, train_labels, _ = datasets.get_dataset_split(
FLAGS.dataset, 'train')
train_mu, train_std = onp.mean(train_images), onp.std(train_images)
n_train = len(train_images)
train = data.DataChunk(
X=(train_images - train_mu) / train_std,
Y=train_labels,
image_size=32, image_channels=3, label_dim=1, label_format='numeric')
test_images, test_labels, _ = datasets.get_dataset_split(
FLAGS.dataset, 'test')
test = data.DataChunk(
X=(test_images - train_mu) / train_std, # normalize w train mean/std
Y=test_labels,
image_size=32, image_channels=3, label_dim=1, label_format='numeric')
# Data augmentation
if configs.augment_data:
augmentation = data.chain_transforms(
data.RandomHorizontalFlip(0.5), data.RandomCrop(4), data.ToDevice)
else:
augmentation = None
batch = data.minibatcher(train, configs.batch_size, transform=augmentation)
# count params of JAX model
def count_parameters(params):
return tree_util.tree_reduce(
operator.add, tree_util.tree_map(lambda x: np.prod(x.shape), params))
logging.info('Number of parameters: %d', count_parameters(params))
stdout_log.write('Number of params: {}\n'.format(count_parameters(params)))
# loss functions
def cross_entropy_loss(params, x_img, y_lbl):
return -np.mean(stax.logsoftmax(predict(params, x_img)) * y_lbl)
def mse_loss(params, x_img, y_lbl):
return 0.5 * np.mean((y_lbl - predict(params, x_img)) ** 2)
def accuracy(y_lbl_hat, y_lbl):
target_class = np.argmax(y_lbl, axis=1)
predicted_class = np.argmax(y_lbl_hat, axis=1)
return np.mean(predicted_class == target_class)
# Loss and gradient
if configs.loss == 'xent':
loss = cross_entropy_loss
elif configs.loss == 'mse':
loss = mse_loss
else:
raise ValueError('Loss function not implemented.')
grad_loss = jit(grad(loss))
# learning rate schedule and optimizer
def cosine(initial_step_size, train_steps):
k = np.pi / (2.0 * train_steps)
def schedule(i):
return initial_step_size * np.cos(k * i)
return schedule
def private_grad(params, batch,
rng, l2_norm_clip, noise_multiplier, batch_size):
"""Return differentially private gradients of params, evaluated on batch."""
def _clipped_grad(params, single_example_batch):
"""Evaluate gradient for a single-example batch and clip its grad norm."""
grads = grad_loss(params,
single_example_batch[0].reshape((-1, 32, 32, 3)),
single_example_batch[1])
nonempty_grads, tree_def = tree_util.tree_flatten(grads)
total_grad_norm = np.linalg.norm(
[np.linalg.norm(neg.ravel()) for neg in nonempty_grads])
divisor = stop_gradient(np.amax((total_grad_norm / l2_norm_clip, 1.)))
normalized_nonempty_grads = [neg / divisor for neg in nonempty_grads]
return tree_util.tree_unflatten(tree_def, normalized_nonempty_grads)
px_clipped_grad_fn = vmap(partial(_clipped_grad, params))
std_dev = l2_norm_clip * noise_multiplier
noise_ = lambda n: n + std_dev * random.normal(rng, n.shape)
normalize_ = lambda n: n / float(batch_size)
sum_ = lambda n: np.sum(n, 0) # aggregate
aggregated_clipped_grads = tree_util.tree_map(
sum_, px_clipped_grad_fn(batch))
noised_aggregated_clipped_grads = tree_util.tree_map(
noise_, aggregated_clipped_grads)
normalized_noised_aggregated_clipped_grads = (
tree_util.tree_map(normalize_, noised_aggregated_clipped_grads)
)
return normalized_noised_aggregated_clipped_grads
# summarize measurements
steps_per_epoch = n_train // configs.batch_size
def summarize(step, params):
"""Compute measurements in a zipped way."""
set_entries = [train, test]
set_bsizes = [configs.train_eval_bsize, configs.test_eval_bsize]
set_names, loss_dict, acc_dict = ['train', 'test'], {}, {}
for set_entry, set_bsize, set_name in zip(
set_entries, set_bsizes, set_names):
temp_loss, temp_acc, points = 0.0, 0.0, 0
for b in data.batch(set_entry, set_bsize):
temp_loss += loss(params, b.X, b.Y) * b.X.shape[0]
temp_acc += accuracy(predict(params, b.X), b.Y) * b.X.shape[0]
points += b.X.shape[0]
loss_dict[set_name] = temp_loss / float(points)
acc_dict[set_name] = temp_acc / float(points)
logging.info('Step: %s', str(step))
logging.info('Train acc : %.4f', acc_dict['train'])
logging.info('Train loss: %.4f', loss_dict['train'])
logging.info('Test acc : %.4f', acc_dict['test'])
logging.info('Test loss : %.4f', loss_dict['test'])
stdout_log.write('Step: {}\n'.format(step))
stdout_log.write('Train acc : {}\n'.format(acc_dict['train']))
stdout_log.write('Train loss: {}\n'.format(loss_dict['train']))
stdout_log.write('Test acc : {}\n'.format(acc_dict['test']))
stdout_log.write('Test loss : {}\n'.format(loss_dict['test']))
stdout_log.flush()
return acc_dict['test']
toc = time.time()
logging.info('Elapsed SETUP time: %s', str(toc - tic))
stdout_log.write('Elapsed SETUP time: {}\n'.format(toc - tic))
# BEGIN: training steps
logging.info('Training network.')
tic = time.time()
t = time.time()
for s in range(configs.train_steps):
b = next(batch)
params = get_params(opt_state)
# t0 = time.time()
if FLAGS.dpsgd:
key = random.fold_in(key, s) # get new key for new random numbers
opt_state = opt_update(
s,
private_grad(params, (b.X.reshape((-1, 1, 32, 32, 3)), b.Y),
key, configs.l2_norm_clip, configs.noise_multiplier,
configs.batch_size),
opt_state)
else:
opt_state = opt_update(s, grad_loss(params, b.X, b.Y), opt_state)
# t1 = time.time()
# logging.info('batch update time: %s', str(t1 - t0))
if s % steps_per_epoch == 0:
with gfile.Open('{}/ckpt_{}'.format(
FLAGS.exp_dir, int(s/steps_per_epoch)), 'wb') as fckpt:
pickle.dump(optimizers.unpack_optimizer_state(opt_state), fckpt)
if FLAGS.dpsgd:
eps = compute_epsilon(s, configs.batch_size, n_train,
configs.target_delta, configs.noise_multiplier)
stdout_log.write(
'For delta={:.0e}, current epsilon is: {:.2f}\n'.format(
configs.target_delta, eps))
logging.info('Elapsed EPOCH time: %s', str(time.time() - t))
stdout_log.write('Elapsed EPOCH time: {}'.format(time.time() - t))
stdout_log.flush()
t = time.time()
toc = time.time()
summarize(configs.train_steps, params)
logging.info('Elapsed TRAIN time: %s', str(toc - tic))
stdout_log.write('Elapsed TRAIN time: {}'.format(toc - tic))
stdout_log.close()
# END: training steps
if __name__ == '__main__':
app.run(main)
| [
"jax.experimental.stax.BatchNorm",
"jax.experimental.optimizers.unpack_optimizer_state",
"tensorflow.compat.v1.io.gfile.Open",
"jax.experimental.stax.serial",
"jax.random.PRNGKey",
"tensorflow.compat.v1.io.gfile.MakeDirs",
"numpy.mean",
"absl.flags.DEFINE_boolean",
"pickle.load",
"adp.data.DataChu... | [((1378, 1429), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""config"""', '""""""', '"""Configuration."""'], {}), "('config', '', 'Configuration.')\n", (1397, 1429), False, 'from absl import flags\n'), ((1435, 1533), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""dpsgd"""', '(True)', '"""True, train with DP-SGD. False, train with vanilla SGD."""'], {}), "('dpsgd', True,\n 'True, train with DP-SGD. False, train with vanilla SGD.')\n", (1455, 1533), False, 'from absl import flags\n'), ((1551, 1620), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""dataset"""', '"""svhn_cropped"""', '"""Dataset, or cifar10"""'], {}), "('dataset', 'svhn_cropped', 'Dataset, or cifar10')\n", (1570, 1620), False, 'from absl import flags\n'), ((1621, 1681), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""exp_dir"""', 'None', '"""Experiment directory"""'], {}), "('exp_dir', None, 'Experiment directory')\n", (1640, 1681), False, 'from absl import flags\n'), ((1682, 1753), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""pretrained_dir"""', 'None', '"""Path to pretrained model"""'], {}), "('pretrained_dir', None, 'Path to pretrained model')\n", (1701, 1753), False, 'from absl import flags\n'), ((2723, 2743), 'jax.experimental.stax.serial', 'stax.serial', (['*blocks'], {}), '(*blocks)\n', (2734, 2743), True, 'import jax.experimental.stax as stax\n'), ((3959, 4006), 'tensorflow_privacy.privacy.analysis.rdp_accountant.compute_rdp', 'compute_rdp', (['q', 'noise_multiplier', 'steps', 'orders'], {}), '(q, noise_multiplier, steps, orders)\n', (3970, 4006), False, 'from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp\n'), ((4021, 4084), 'tensorflow_privacy.privacy.analysis.rdp_accountant.get_privacy_spent', 'get_privacy_spent', (['orders', 'rdp_const'], {'target_delta': 'target_delta'}), '(orders, rdp_const, target_delta=target_delta)\n', (4038, 4084), False, 'from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent\n'), ((4116, 4152), 'logging.info', 'logging.info', (['"""Starting experiment."""'], {}), "('Starting experiment.')\n", (4128, 4152), False, 'import logging\n'), ((5909, 5938), 'logging.info', 'logging.info', (['"""Loading data."""'], {}), "('Loading data.')\n", (5921, 5938), False, 'import logging\n'), ((5947, 5958), 'time.time', 'time.time', ([], {}), '()\n', (5956, 5958), False, 'import time\n'), ((5994, 6044), 'adp.datasets.get_dataset_split', 'datasets.get_dataset_split', (['FLAGS.dataset', '"""train"""'], {}), "(FLAGS.dataset, 'train')\n", (6020, 6044), False, 'from adp import datasets\n'), ((6162, 6307), 'adp.data.DataChunk', 'data.DataChunk', ([], {'X': '((train_images - train_mu) / train_std)', 'Y': 'train_labels', 'image_size': '(32)', 'image_channels': '(3)', 'label_dim': '(1)', 'label_format': '"""numeric"""'}), "(X=(train_images - train_mu) / train_std, Y=train_labels,\n image_size=32, image_channels=3, label_dim=1, label_format='numeric')\n", (6176, 6307), False, 'from adp import data\n'), ((6356, 6405), 'adp.datasets.get_dataset_split', 'datasets.get_dataset_split', (['FLAGS.dataset', '"""test"""'], {}), "(FLAGS.dataset, 'test')\n", (6382, 6405), False, 'from adp import datasets\n'), ((6422, 6565), 'adp.data.DataChunk', 'data.DataChunk', ([], {'X': '((test_images - train_mu) / train_std)', 'Y': 'test_labels', 'image_size': '(32)', 'image_channels': '(3)', 'label_dim': '(1)', 'label_format': '"""numeric"""'}), "(X=(test_images - train_mu) / train_std, Y=test_labels,\n image_size=32, image_channels=3, label_dim=1, label_format='numeric')\n", (6436, 6565), False, 'from adp import data\n'), ((6820, 6887), 'adp.data.minibatcher', 'data.minibatcher', (['train', 'configs.batch_size'], {'transform': 'augmentation'}), '(train, configs.batch_size, transform=augmentation)\n', (6836, 6887), False, 'from adp import data\n'), ((11010, 11021), 'time.time', 'time.time', ([], {}), '()\n', (11019, 11021), False, 'import time\n'), ((11173, 11206), 'logging.info', 'logging.info', (['"""Training network."""'], {}), "('Training network.')\n", (11185, 11206), False, 'import logging\n'), ((11215, 11226), 'time.time', 'time.time', ([], {}), '()\n', (11224, 11226), False, 'import time\n'), ((11233, 11244), 'time.time', 'time.time', ([], {}), '()\n', (11242, 11244), False, 'import time\n'), ((12590, 12601), 'time.time', 'time.time', ([], {}), '()\n', (12599, 12601), False, 'import time\n'), ((12839, 12852), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (12846, 12852), False, 'from absl import app\n'), ((2026, 2042), 'jax.experimental.stax.BatchNorm', 'stax.BatchNorm', ([], {}), '()\n', (2040, 2042), True, 'import jax.experimental.stax as stax\n'), ((2093, 2149), 'jax.experimental.stax.Conv', 'stax.Conv', (['num_channels', '(3, 3)', 'strides'], {'padding': '"""SAME"""'}), "(num_channels, (3, 3), strides, padding='SAME')\n", (2102, 2149), True, 'import jax.experimental.stax as stax\n'), ((2157, 2173), 'jax.experimental.stax.BatchNorm', 'stax.BatchNorm', ([], {}), '()\n', (2171, 2173), True, 'import jax.experimental.stax as stax\n'), ((2192, 2255), 'jax.experimental.stax.Conv', 'stax.Conv', (['num_channels', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""SAME"""'}), "(num_channels, (3, 3), strides=(1, 1), padding='SAME')\n", (2201, 2255), True, 'import jax.experimental.stax as stax\n'), ((2435, 2449), 'jax.experimental.stax.FanOut', 'stax.FanOut', (['(2)'], {}), '(2)\n', (2446, 2449), True, 'import jax.experimental.stax as stax\n'), ((2451, 2474), 'jax.experimental.stax.parallel', 'stax.parallel', (['mid', 'cut'], {}), '(mid, cut)\n', (2464, 2474), True, 'import jax.experimental.stax as stax\n'), ((2868, 2905), 'jax.experimental.stax.Conv', 'stax.Conv', (['(16)', '(3, 3)'], {'padding': '"""SAME"""'}), "(16, (3, 3), padding='SAME')\n", (2877, 2905), True, 'import jax.experimental.stax as stax\n'), ((3069, 3085), 'jax.experimental.stax.BatchNorm', 'stax.BatchNorm', ([], {}), '()\n', (3083, 3085), True, 'import jax.experimental.stax as stax\n'), ((3104, 3124), 'jax.experimental.stax.AvgPool', 'stax.AvgPool', (['(8, 8)'], {}), '((8, 8))\n', (3116, 3124), True, 'import jax.experimental.stax as stax\n'), ((3146, 3169), 'jax.experimental.stax.Dense', 'stax.Dense', (['num_classes'], {}), '(num_classes)\n', (3156, 3169), True, 'import jax.experimental.stax as stax\n'), ((3266, 3319), 'jax.experimental.stax.Conv', 'stax.Conv', (['(16)', '(8, 8)'], {'padding': '"""SAME"""', 'strides': '(2, 2)'}), "(16, (8, 8), padding='SAME', strides=(2, 2))\n", (3275, 3319), True, 'import jax.experimental.stax as stax\n'), ((3344, 3372), 'jax.experimental.stax.MaxPool', 'stax.MaxPool', (['(2, 2)', '(1, 1)'], {}), '((2, 2), (1, 1))\n', (3356, 3372), True, 'import jax.experimental.stax as stax\n'), ((3380, 3434), 'jax.experimental.stax.Conv', 'stax.Conv', (['(32)', '(4, 4)'], {'padding': '"""VALID"""', 'strides': '(2, 2)'}), "(32, (4, 4), padding='VALID', strides=(2, 2))\n", (3389, 3434), True, 'import jax.experimental.stax as stax\n'), ((3459, 3487), 'jax.experimental.stax.MaxPool', 'stax.MaxPool', (['(2, 2)', '(1, 1)'], {}), '((2, 2), (1, 1))\n', (3471, 3487), True, 'import jax.experimental.stax as stax\n'), ((3528, 3542), 'jax.experimental.stax.Dense', 'stax.Dense', (['(64)'], {}), '(64)\n', (3538, 3542), True, 'import jax.experimental.stax as stax\n'), ((3581, 3604), 'jax.experimental.stax.Dense', 'stax.Dense', (['num_classes'], {}), '(num_classes)\n', (3591, 3604), True, 'import jax.experimental.stax as stax\n'), ((3790, 3845), 'logging.warning', 'logging.warning', (['"""Your target_delta might be too high."""'], {}), "('Your target_delta might be too high.')\n", (3805, 3845), False, 'import logging\n'), ((4226, 4255), 'tensorflow.compat.v1.io.gfile.MakeDirs', 'gfile.MakeDirs', (['FLAGS.exp_dir'], {}), '(FLAGS.exp_dir)\n', (4240, 4255), False, 'from tensorflow.compat.v1.io import gfile\n'), ((4416, 4463), 'jax.experimental.optimizers.make_schedule', 'optimizers.make_schedule', (['configs.learning_rate'], {}), '(configs.learning_rate)\n', (4440, 4463), True, 'import jax.experimental.optimizers as optimizers\n'), ((4503, 4530), 'jax.experimental.optimizers.sgd', 'optimizers.sgd', (['lr_schedule'], {}), '(lr_schedule)\n', (4517, 4530), True, 'import jax.experimental.optimizers as optimizers\n'), ((4787, 4825), 'tensorflow.compat.v1.io.gfile.Open', 'gfile.Open', (['FLAGS.pretrained_dir', '"""rb"""'], {}), "(FLAGS.pretrained_dir, 'rb')\n", (4797, 4825), False, 'from tensorflow.compat.v1.io import gfile\n'), ((5061, 5114), 'jax.experimental.stax.Conv', 'stax.Conv', (['(16)', '(8, 8)'], {'padding': '"""SAME"""', 'strides': '(2, 2)'}), "(16, (8, 8), padding='SAME', strides=(2, 2))\n", (5070, 5114), True, 'import jax.experimental.stax as stax\n'), ((5139, 5167), 'jax.experimental.stax.MaxPool', 'stax.MaxPool', (['(2, 2)', '(1, 1)'], {}), '((2, 2), (1, 1))\n', (5151, 5167), True, 'import jax.experimental.stax as stax\n'), ((5175, 5229), 'jax.experimental.stax.Conv', 'stax.Conv', (['(32)', '(4, 4)'], {'padding': '"""VALID"""', 'strides': '(2, 2)'}), "(32, (4, 4), padding='VALID', strides=(2, 2))\n", (5184, 5229), True, 'import jax.experimental.stax as stax\n'), ((5254, 5282), 'jax.experimental.stax.MaxPool', 'stax.MaxPool', (['(2, 2)', '(1, 1)'], {}), '((2, 2), (1, 1))\n', (5266, 5282), True, 'import jax.experimental.stax as stax\n'), ((5373, 5387), 'jax.experimental.stax.Dense', 'stax.Dense', (['(64)'], {}), '(64)\n', (5383, 5387), True, 'import jax.experimental.stax as stax\n'), ((5426, 5440), 'jax.experimental.stax.Dense', 'stax.Dense', (['(10)'], {}), '(10)\n', (5436, 5440), True, 'import jax.experimental.stax as stax\n'), ((5714, 5742), 'jax.random.PRNGKey', 'random.PRNGKey', (['configs.seed'], {}), '(configs.seed)\n', (5728, 5742), False, 'from jax import random\n'), ((6076, 6098), 'numpy.mean', 'onp.mean', (['train_images'], {}), '(train_images)\n', (6084, 6098), True, 'import numpy as onp\n'), ((6100, 6121), 'numpy.std', 'onp.std', (['train_images'], {}), '(train_images)\n', (6107, 6121), True, 'import numpy as onp\n'), ((7504, 7528), 'jax.numpy.argmax', 'np.argmax', (['y_lbl'], {'axis': '(1)'}), '(y_lbl, axis=1)\n', (7513, 7528), True, 'import jax.numpy as np\n'), ((7551, 7579), 'jax.numpy.argmax', 'np.argmax', (['y_lbl_hat'], {'axis': '(1)'}), '(y_lbl_hat, axis=1)\n', (7560, 7579), True, 'import jax.numpy as np\n'), ((7591, 7631), 'jax.numpy.mean', 'np.mean', (['(predicted_class == target_class)'], {}), '(predicted_class == target_class)\n', (7598, 7631), True, 'import jax.numpy as np\n'), ((7845, 7855), 'jax.grad', 'grad', (['loss'], {}), '(loss)\n', (7849, 7855), False, 'from jax import grad\n'), ((9343, 9395), 'jax.tree_util.tree_map', 'tree_util.tree_map', (['noise_', 'aggregated_clipped_grads'], {}), '(noise_, aggregated_clipped_grads)\n', (9361, 9395), False, 'from jax import tree_util\n'), ((9464, 9527), 'jax.tree_util.tree_map', 'tree_util.tree_map', (['normalize_', 'noised_aggregated_clipped_grads'], {}), '(normalize_, noised_aggregated_clipped_grads)\n', (9482, 9527), False, 'from jax import tree_util\n'), ((10412, 10463), 'logging.info', 'logging.info', (['"""Train acc : %.4f"""', "acc_dict['train']"], {}), "('Train acc : %.4f', acc_dict['train'])\n", (10424, 10463), False, 'import logging\n'), ((10468, 10520), 'logging.info', 'logging.info', (['"""Train loss: %.4f"""', "loss_dict['train']"], {}), "('Train loss: %.4f', loss_dict['train'])\n", (10480, 10520), False, 'import logging\n'), ((10525, 10575), 'logging.info', 'logging.info', (['"""Test acc : %.4f"""', "acc_dict['test']"], {}), "('Test acc : %.4f', acc_dict['test'])\n", (10537, 10575), False, 'import logging\n'), ((10580, 10631), 'logging.info', 'logging.info', (['"""Test loss : %.4f"""', "loss_dict['test']"], {}), "('Test loss : %.4f', loss_dict['test'])\n", (10592, 10631), False, 'import logging\n'), ((2324, 2380), 'jax.experimental.stax.Conv', 'stax.Conv', (['num_channels', '(3, 3)', 'strides'], {'padding': '"""SAME"""'}), "(num_channels, (3, 3), strides, padding='SAME')\n", (2333, 2380), True, 'import jax.experimental.stax as stax\n'), ((3901, 3927), 'jax.numpy.linspace', 'np.linspace', (['(1.1)', '(10.9)', '(99)'], {}), '(1.1, 10.9, 99)\n', (3912, 3927), True, 'import jax.numpy as np\n'), ((4682, 4719), 'jax.experimental.optimizers.momentum', 'optimizers.momentum', (['lr_schedule', '(0.9)'], {}), '(lr_schedule, 0.9)\n', (4701, 4719), True, 'import jax.experimental.optimizers as optimizers\n'), ((4903, 4920), 'pickle.load', 'pickle.load', (['fpre'], {}), '(fpre)\n', (4914, 4920), False, 'import pickle\n'), ((6711, 6741), 'adp.data.RandomHorizontalFlip', 'data.RandomHorizontalFlip', (['(0.5)'], {}), '(0.5)\n', (6736, 6741), False, 'from adp import data\n'), ((6743, 6761), 'adp.data.RandomCrop', 'data.RandomCrop', (['(4)'], {}), '(4)\n', (6758, 6761), False, 'from adp import data\n'), ((8578, 8607), 'jax.tree_util.tree_flatten', 'tree_util.tree_flatten', (['grads'], {}), '(grads)\n', (8600, 8607), False, 'from jax import tree_util\n'), ((8881, 8942), 'jax.tree_util.tree_unflatten', 'tree_util.tree_unflatten', (['tree_def', 'normalized_nonempty_grads'], {}), '(tree_def, normalized_nonempty_grads)\n', (8905, 8942), False, 'from jax import tree_util\n'), ((8974, 9004), 'jax.partial', 'partial', (['_clipped_grad', 'params'], {}), '(_clipped_grad, params)\n', (8981, 9004), False, 'from jax import partial\n'), ((9187, 9199), 'jax.numpy.sum', 'np.sum', (['n', '(0)'], {}), '(n, 0)\n', (9193, 9199), True, 'import jax.numpy as np\n'), ((10066, 10098), 'adp.data.batch', 'data.batch', (['set_entry', 'set_bsize'], {}), '(set_entry, set_bsize)\n', (10076, 10098), False, 'from adp import data\n'), ((11396, 11418), 'jax.random.fold_in', 'random.fold_in', (['key', 's'], {}), '(key, s)\n', (11410, 11418), False, 'from jax import random\n'), ((12569, 12580), 'time.time', 'time.time', ([], {}), '()\n', (12578, 12580), False, 'import time\n'), ((5780, 5791), 'time.time', 'time.time', ([], {}), '()\n', (5789, 5791), False, 'import time\n'), ((8035, 8048), 'jax.numpy.cos', 'np.cos', (['(k * i)'], {}), '(k * i)\n', (8041, 8048), True, 'import jax.numpy as np\n'), ((8745, 8791), 'jax.numpy.amax', 'np.amax', (['(total_grad_norm / l2_norm_clip, 1.0)'], {}), '((total_grad_norm / l2_norm_clip, 1.0))\n', (8752, 8791), True, 'import jax.numpy as np\n'), ((7036, 7052), 'jax.numpy.prod', 'np.prod', (['x.shape'], {}), '(x.shape)\n', (7043, 7052), True, 'import jax.numpy as np\n'), ((9089, 9116), 'jax.random.normal', 'random.normal', (['rng', 'n.shape'], {}), '(rng, n.shape)\n', (9102, 9116), False, 'from jax import random\n'), ((12038, 12082), 'jax.experimental.optimizers.unpack_optimizer_state', 'optimizers.unpack_optimizer_state', (['opt_state'], {}), '(opt_state)\n', (12071, 12082), True, 'import jax.experimental.optimizers as optimizers\n'), ((12443, 12454), 'time.time', 'time.time', ([], {}), '()\n', (12452, 12454), False, 'import time\n'), ((12516, 12527), 'time.time', 'time.time', ([], {}), '()\n', (12525, 12527), False, 'import time\n')] |
from PIL import Image
from filepath.file_relative_paths import ImagePathAndProps
from filepath.file_relative_paths import GuiCheckImagePathAndPropsOrdered
from filepath.file_relative_paths import FilePaths
from utils import resource_path
from utils import img_to_string
from utils import img_remove_background_and_enhance_word
from utils import bot_print
from enum import Enum
import traceback
import numpy as np
import cv2
from bot_related import aircve as aircv
import io
# small percentage are more similar
def cal_similarity(image1, image2):
res = cv2.absdiff(image1, image2)
# --- convert the result to integer type ---
res = res.astype(np.uint8)
# --- find percentage difference based on number of pixels that are not zero ---
percentage = (np.count_nonzero(res) * 100) / res.size
return percentage
class GuiName(Enum):
HOME = 0
MAP = 1
WINDOW = 2
WINDOW_TITLE = 3
# VERIFICATION_CHEST = 4
VERIFICATION_VERIFY = 5
# VERIFICATION_VERIFY_TITLE = 6
# VERIFICATION_CLOSE_REFRESH_OK = 7
class GuiDetector:
def __init__(self, device):
self.debug = False
self.__device = device
def get_curr_device_screen_img_byte_array(self):
return self.__device.screencap()
def get_curr_device_screen_img(self):
return Image.open(io.BytesIO(self.__device.screencap()))
def save_screen(self, file_name):
image = Image.open(io.BytesIO(self.__device.screencap()))
image.save(resource_path(FilePaths.TEST_SRC_FOLDER_PATH.value + file_name))
def get_curr_gui_name(self):
for image_path_and_props in GuiCheckImagePathAndPropsOrdered:
result = self.check_any(image_path_and_props.value)
if result[0]:
return [result[1], result[2]]
return None
def get_windows_name(self):
path, size, box, threshold, least_diff, gui = ImagePathAndProps.WINDOW_TITLE_MARK_IMG_PATH.value
imsch = cv2.resize(
cv2.imdecode(np.asarray(self.get_curr_device_screen_img_byte_array(), dtype=np.uint8), cv2.IMREAD_COLOR),
size
)
imsrc = cv2.imread(resource_path(path))
# find 2 window title mark location
result = aircv.find_all_template(imsrc, imsch, threshold)
# get box position from result
x0, x1, y0, y1 = 0, 0, 0, 0
if result is not None and len(result) == 2:
x0 = result[0]['rectangle'][2][0] + 50
x1 = result[1]['rectangle'][0][0] - 50
y0 = result[0]['rectangle'][0][1]
y1 = result[0]['rectangle'][1][1]
else:
return None
# crop image for ocr
title_image = imsch[y0:y1, x0:x1]
title_image = img_remove_background_and_enhance_word(title_image, np.array([0, 0, 160]),
np.array([255, 255, 255]))
title_image = Image.fromarray(title_image)
return img_to_string(title_image)
def resource_amount_image_to_string(self):
result_list = []
boxes = [
(695, 10, 770, 34), (820, 10, 890, 34), (943, 10, 1015, 34), (1065, 10, 1140, 34)
]
for box in boxes:
x0, y0, x1, y1 = box
imsch = cv2.imdecode(np.asarray(self.get_curr_device_screen_img_byte_array(), dtype=np.uint8),
cv2.IMREAD_COLOR)
imsch = imsch[y0:y1, x0:x1]
resource_image = Image.fromarray(imsch)
try:
result_list.append(abs(int(img_to_string(resource_image)
.replace('.', '')
.replace('B', '00000000')
.replace('M', '00000')
.replace('K', '00')
))
)
except Exception:
# check
pass
# todo : faire le check si on a l'hdv pour le gold
# Check si on a pas remplit les 4 cases
for _ in range(len(boxes) - 1, 4):
result_list.append(-1)
return result_list
def materilal_amount_image_to_string(self):
result_list = []
boxes = [
(710, 245, 800, 264),
(820, 245, 900, 264),
(910, 245, 990, 264),
(1000, 245, 1100, 264),
]
for box in boxes:
x0, y0, x1, y1 = box
imsch = cv2.imdecode(np.asarray(self.get_curr_device_screen_img_byte_array(), dtype=np.uint8),
cv2.IMREAD_COLOR)
imsch = cv2.cvtColor(imsch, cv2.COLOR_BGR2GRAY)
imsch = imsch[y0:y1, x0:x1]
ret, imsch = cv2.threshold(imsch, 215, 255, cv2.THRESH_BINARY)
resource_image = Image.fromarray(imsch)
try:
result_list.append(int(img_to_string(resource_image)))
except Exception as e:
result_list.append(-1)
return result_list
def resource_location_image_to_string(self):
x0, y0, x1, y1 = (885, 190, 1035, 207)
imsch = cv2.imdecode(np.asarray(self.get_curr_device_screen_img_byte_array(), dtype=np.uint8),
cv2.IMREAD_COLOR)
imsch = cv2.cvtColor(imsch, cv2.COLOR_BGR2GRAY)
imsch = imsch[y0:y1, x0:x1]
ret, imsch = cv2.threshold(imsch, 215, 255, cv2.THRESH_BINARY)
resource_image = Image.fromarray(imsch)
result = ''.join(c for c in img_to_string(resource_image) if c.isdigit())
return result
def match_query_to_string(self):
x0, y0, x1, y1 = (1211, 162, 1242, 179)
try:
imsch = cv2.imdecode(np.asarray(self.get_curr_device_screen_img_byte_array(), dtype=np.uint8),
cv2.IMREAD_COLOR)
imsch = cv2.cvtColor(imsch, cv2.COLOR_BGR2GRAY)
imsch = imsch[y0:y1, x0:x1]
ret, imsch = cv2.threshold(imsch, 215, 255, cv2.THRESH_BINARY)
resource_image = Image.fromarray(imsch)
result = ''.join(c for c in img_to_string(resource_image) if c.isdigit())
return int(result[0]), int(result[1])
except Exception as e:
return None, None
def barbarians_level_image_to_string(self):
try:
x0, y0, x1, y1 = (106, 370, 436, 384)
imsch = cv2.imdecode(np.asarray(self.get_curr_device_screen_img_byte_array(), dtype=np.uint8),
cv2.IMREAD_COLOR)
imsch = cv2.cvtColor(imsch, cv2.COLOR_BGR2GRAY)
imsch = imsch[y0:y1, x0:x1]
# ret, imsch = cv2.threshold(imsch, 165, 255, cv2.THRESH_BINARY)
resource_image = Image.fromarray(imsch)
str = img_to_string(resource_image)
if self.debug:
cv2.imshow('imsch', imsch)
print(str)
cv2.waitKey(0)
result = int(''.join(c for c in str if c.isdigit()))
except Exception as e:
traceback.print_exc()
return -1
if result > 99:
return -1
return result
def get_building_name(self, box):
x0, y0, x1, y1 = box
title_image = self.get_curr_device_screen_img().crop(box)
s = img_to_string(title_image)
title_image.save(resource_path('{}title_x_{}_y_{}.png'.format(FilePaths.TEST_SRC_FOLDER_PATH.value, x0, y0)))
bot_print("Building <{}> on position [({}, {}), ({}, {})] ".format(s, x0, y0, x1, y1))
def check_any(self, *props_list):
imsch = cv2.imdecode(np.asarray(self.get_curr_device_screen_img_byte_array(), dtype=np.uint8),
cv2.IMREAD_COLOR)
for props in props_list:
path, size, box, threshold, least_diff, gui = props
# x0, y0, x1, y1 = box
imsrc = cv2.imread(resource_path(path))
result = aircv.find_template(imsrc, imsch, threshold, True)
if self.debug:
cv2.imshow('imsrc', imsrc)
cv2.imshow('imsch', imsch)
cv2.waitKey(0)
if result is not None:
return True, gui, result['result']
return False, None, None
def has_image_props(self, props):
path, size, box, threshold, least_diff, gui = props
imsch = cv2.imdecode(np.asarray(self.get_curr_device_screen_img_byte_array(), dtype=np.uint8),
cv2.IMREAD_COLOR)
imsrc = cv2.imread(resource_path(path))
result = aircv.find_template(imsrc, imsch, threshold, True)
return result
def find_all_image_props(self, props, max_cnt=3):
path, size, box, threshold, least_diff, gui = props
imsch = cv2.imdecode(np.asarray(self.get_curr_device_screen_img_byte_array(), dtype=np.uint8),
cv2.IMREAD_COLOR)
imsrc = cv2.imread(resource_path(path))
result = aircv.find_all_template(imsrc, imsch, threshold, max_cnt, True)
return result
def has_image_cv_img(self, cv_img, threshold=0.90):
imsch = cv2.imdecode(np.asarray(self.get_curr_device_screen_img_byte_array(), dtype=np.uint8),
cv2.IMREAD_COLOR)
result = aircv.find_template(cv_img, imsch, threshold, True)
return result
def get_image_in_box(self, box=(0, 0, 1280, 720)):
"""
:param box: The crop rectangle, as a (left, upper, right, lower)-tuple.
"""
x0, y0, x1, y1 = box
img = cv2.imdecode(np.asarray(self.get_curr_device_screen_img_byte_array(), dtype=np.uint8),
cv2.IMREAD_COLOR)
return img[y0:y1, x0:x1]
| [
"traceback.print_exc",
"numpy.count_nonzero",
"utils.resource_path",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.threshold",
"bot_related.aircve.find_all_template",
"bot_related.aircve.find_template",
"utils.img_to_string",
"numpy.array",
"PIL.Image.fromarray",
"cv2.absdiff",
"cv2.imshow"
] | [((559, 586), 'cv2.absdiff', 'cv2.absdiff', (['image1', 'image2'], {}), '(image1, image2)\n', (570, 586), False, 'import cv2\n'), ((2236, 2284), 'bot_related.aircve.find_all_template', 'aircv.find_all_template', (['imsrc', 'imsch', 'threshold'], {}), '(imsrc, imsch, threshold)\n', (2259, 2284), True, 'from bot_related import aircve as aircv\n'), ((2923, 2951), 'PIL.Image.fromarray', 'Image.fromarray', (['title_image'], {}), '(title_image)\n', (2938, 2951), False, 'from PIL import Image\n'), ((2967, 2993), 'utils.img_to_string', 'img_to_string', (['title_image'], {}), '(title_image)\n', (2980, 2993), False, 'from utils import img_to_string\n'), ((5357, 5396), 'cv2.cvtColor', 'cv2.cvtColor', (['imsch', 'cv2.COLOR_BGR2GRAY'], {}), '(imsch, cv2.COLOR_BGR2GRAY)\n', (5369, 5396), False, 'import cv2\n'), ((5454, 5503), 'cv2.threshold', 'cv2.threshold', (['imsch', '(215)', '(255)', 'cv2.THRESH_BINARY'], {}), '(imsch, 215, 255, cv2.THRESH_BINARY)\n', (5467, 5503), False, 'import cv2\n'), ((5529, 5551), 'PIL.Image.fromarray', 'Image.fromarray', (['imsch'], {}), '(imsch)\n', (5544, 5551), False, 'from PIL import Image\n'), ((7379, 7405), 'utils.img_to_string', 'img_to_string', (['title_image'], {}), '(title_image)\n', (7392, 7405), False, 'from utils import img_to_string\n'), ((8647, 8697), 'bot_related.aircve.find_template', 'aircv.find_template', (['imsrc', 'imsch', 'threshold', '(True)'], {}), '(imsrc, imsch, threshold, True)\n', (8666, 8697), True, 'from bot_related import aircve as aircv\n'), ((9050, 9113), 'bot_related.aircve.find_all_template', 'aircv.find_all_template', (['imsrc', 'imsch', 'threshold', 'max_cnt', '(True)'], {}), '(imsrc, imsch, threshold, max_cnt, True)\n', (9073, 9113), True, 'from bot_related import aircve as aircv\n'), ((9360, 9411), 'bot_related.aircve.find_template', 'aircv.find_template', (['cv_img', 'imsch', 'threshold', '(True)'], {}), '(cv_img, imsch, threshold, True)\n', (9379, 9411), True, 'from bot_related import aircve as aircv\n'), ((770, 791), 'numpy.count_nonzero', 'np.count_nonzero', (['res'], {}), '(res)\n', (786, 791), True, 'import numpy as np\n'), ((1489, 1552), 'utils.resource_path', 'resource_path', (['(FilePaths.TEST_SRC_FOLDER_PATH.value + file_name)'], {}), '(FilePaths.TEST_SRC_FOLDER_PATH.value + file_name)\n', (1502, 1552), False, 'from utils import resource_path\n'), ((2153, 2172), 'utils.resource_path', 'resource_path', (['path'], {}), '(path)\n', (2166, 2172), False, 'from utils import resource_path\n'), ((2790, 2811), 'numpy.array', 'np.array', (['[0, 0, 160]'], {}), '([0, 0, 160])\n', (2798, 2811), True, 'import numpy as np\n'), ((2874, 2899), 'numpy.array', 'np.array', (['[255, 255, 255]'], {}), '([255, 255, 255])\n', (2882, 2899), True, 'import numpy as np\n'), ((3475, 3497), 'PIL.Image.fromarray', 'Image.fromarray', (['imsch'], {}), '(imsch)\n', (3490, 3497), False, 'from PIL import Image\n'), ((4697, 4736), 'cv2.cvtColor', 'cv2.cvtColor', (['imsch', 'cv2.COLOR_BGR2GRAY'], {}), '(imsch, cv2.COLOR_BGR2GRAY)\n', (4709, 4736), False, 'import cv2\n'), ((4802, 4851), 'cv2.threshold', 'cv2.threshold', (['imsch', '(215)', '(255)', 'cv2.THRESH_BINARY'], {}), '(imsch, 215, 255, cv2.THRESH_BINARY)\n', (4815, 4851), False, 'import cv2\n'), ((4881, 4903), 'PIL.Image.fromarray', 'Image.fromarray', (['imsch'], {}), '(imsch)\n', (4896, 4903), False, 'from PIL import Image\n'), ((5934, 5973), 'cv2.cvtColor', 'cv2.cvtColor', (['imsch', 'cv2.COLOR_BGR2GRAY'], {}), '(imsch, cv2.COLOR_BGR2GRAY)\n', (5946, 5973), False, 'import cv2\n'), ((6039, 6088), 'cv2.threshold', 'cv2.threshold', (['imsch', '(215)', '(255)', 'cv2.THRESH_BINARY'], {}), '(imsch, 215, 255, cv2.THRESH_BINARY)\n', (6052, 6088), False, 'import cv2\n'), ((6118, 6140), 'PIL.Image.fromarray', 'Image.fromarray', (['imsch'], {}), '(imsch)\n', (6133, 6140), False, 'from PIL import Image\n'), ((6628, 6667), 'cv2.cvtColor', 'cv2.cvtColor', (['imsch', 'cv2.COLOR_BGR2GRAY'], {}), '(imsch, cv2.COLOR_BGR2GRAY)\n', (6640, 6667), False, 'import cv2\n'), ((6814, 6836), 'PIL.Image.fromarray', 'Image.fromarray', (['imsch'], {}), '(imsch)\n', (6829, 6836), False, 'from PIL import Image\n'), ((6855, 6884), 'utils.img_to_string', 'img_to_string', (['resource_image'], {}), '(resource_image)\n', (6868, 6884), False, 'from utils import img_to_string\n'), ((8016, 8066), 'bot_related.aircve.find_template', 'aircv.find_template', (['imsrc', 'imsch', 'threshold', '(True)'], {}), '(imsrc, imsch, threshold, True)\n', (8035, 8066), True, 'from bot_related import aircve as aircv\n'), ((8609, 8628), 'utils.resource_path', 'resource_path', (['path'], {}), '(path)\n', (8622, 8628), False, 'from utils import resource_path\n'), ((9012, 9031), 'utils.resource_path', 'resource_path', (['path'], {}), '(path)\n', (9025, 9031), False, 'from utils import resource_path\n'), ((6928, 6954), 'cv2.imshow', 'cv2.imshow', (['"""imsch"""', 'imsch'], {}), "('imsch', imsch)\n", (6938, 6954), False, 'import cv2\n'), ((6998, 7012), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (7009, 7012), False, 'import cv2\n'), ((7121, 7142), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7140, 7142), False, 'import traceback\n'), ((7973, 7992), 'utils.resource_path', 'resource_path', (['path'], {}), '(path)\n', (7986, 7992), False, 'from utils import resource_path\n'), ((8111, 8137), 'cv2.imshow', 'cv2.imshow', (['"""imsrc"""', 'imsrc'], {}), "('imsrc', imsrc)\n", (8121, 8137), False, 'import cv2\n'), ((8154, 8180), 'cv2.imshow', 'cv2.imshow', (['"""imsch"""', 'imsch'], {}), "('imsch', imsch)\n", (8164, 8180), False, 'import cv2\n'), ((8197, 8211), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (8208, 8211), False, 'import cv2\n'), ((5588, 5617), 'utils.img_to_string', 'img_to_string', (['resource_image'], {}), '(resource_image)\n', (5601, 5617), False, 'from utils import img_to_string\n'), ((4960, 4989), 'utils.img_to_string', 'img_to_string', (['resource_image'], {}), '(resource_image)\n', (4973, 4989), False, 'from utils import img_to_string\n'), ((6181, 6210), 'utils.img_to_string', 'img_to_string', (['resource_image'], {}), '(resource_image)\n', (6194, 6210), False, 'from utils import img_to_string\n'), ((3558, 3587), 'utils.img_to_string', 'img_to_string', (['resource_image'], {}), '(resource_image)\n', (3571, 3587), False, 'from utils import img_to_string\n')] |
import luigi
import numpy as np
from pathlib import Path
from common import utils
from tasks.reg.rollings import RollingFeatures
class PolyFeatures(luigi.Task):
uuid = luigi.Parameter()
task = luigi.Parameter()
def requires(self):
return RollingFeatures(self.uuid, self.task)
def output(self):
self.dir = Path(self.input().path).absolute().parent
fname = 'feature_ploy_' + Path(self.input().path).name
outfile = self.dir / fname
return luigi.LocalTarget(outfile.absolute().as_posix())
def run(self):
self._source()
df = utils.load_data(self.input().path)
# remove those data not in valids or tests time
# those invalids features only used for rollings
features = self._remove_not_in_times(df)
# generate some ploy features
features = self.ploy_func(features)
features.to_csv(self.output().path, index=False)
def _remove_not_in_times(self, df):
final = df[df.time_window_start.map(
lambda x: x.time().hour in [6, 7, 8, 9, 15, 16, 17, 18]
)]
return final
def _traj_ploy_features(self, df):
def __np_bool(x):
return 1 if x else -1
'''
allcols = [
'minutes_since_0', 'minutes_diff_13', 'before_holiday', 'after_holiday',
'start_holiday', 'end_holiday',
'holiday_len', 'is_am', 'pressure', 'sea_pressure', 'wind_direction',
'wind_speed', 'temperature', 'rel_humidity', 'precipitation',
'neigh_2h_avg_travel_time_avg', 'neigh_2h_avg_travel_time_std',
'neigh_2h_extracost1_avg', 'neigh_2h_extracost1_std',
'neigh_2h_extracost2_avg', 'neigh_2h_extracost2_std',
'neigh_2h_travel_time_avg', 'neigh_2h_travel_time_std',
'neigh_2h_vehicles_avg', 'neigh_2h_vehicles_std']
'''
df['is_am_min_diff_13'] = df['is_am'].map(__np_bool) * df['minutes_diff_13']
df['surprise1'] = (df['before_holiday'] | df['end_holiday']) & df['is_am']
df['surprise2'] = df['surprise1'].map(__np_bool) * df['minutes_diff_13']
#df['surprise3'] = df['after_holiday'].map(__np_bool) * df['minutes_since_0']
df['surprise3'] = df['minutes_diff_13'] / np.log(
df['neigh_2h_avg_travel_time_avg'])
#rains = df['precipitation'] - df['precipitation'].median()
#workday = df['holiday_len'].map(lambda x: 1 if x > 0 else 0)
#df['surprise4'] = rains / df['temperature']
return df
def _vol_ploy_features(self, df):
return df
def _source(self):
cols = ['time_window_start', 'time_window_end']
if self.task == 'volume':
cols.extend(['tollgate_id', 'direction', 'volume'])
vcol = 'volume'
ploy_func = self._vol_ploy_features
else:
cols.extend(['intersection_id', 'tollgate_id', 'avg_travel_time'])
vcol = 'avg_travel_time'
ploy_func = self._traj_ploy_features
self.meta_cols = cols
self.vcol = vcol
self.ploy_func = ploy_func
| [
"luigi.Parameter",
"numpy.log",
"tasks.reg.rollings.RollingFeatures"
] | [((175, 192), 'luigi.Parameter', 'luigi.Parameter', ([], {}), '()\n', (190, 192), False, 'import luigi\n'), ((204, 221), 'luigi.Parameter', 'luigi.Parameter', ([], {}), '()\n', (219, 221), False, 'import luigi\n'), ((262, 299), 'tasks.reg.rollings.RollingFeatures', 'RollingFeatures', (['self.uuid', 'self.task'], {}), '(self.uuid, self.task)\n', (277, 299), False, 'from tasks.reg.rollings import RollingFeatures\n'), ((2287, 2329), 'numpy.log', 'np.log', (["df['neigh_2h_avg_travel_time_avg']"], {}), "(df['neigh_2h_avg_travel_time_avg'])\n", (2293, 2329), True, 'import numpy as np\n')] |
from model_framework import model_loader
from data_generators import get_subject_names
from math import pow, sqrt
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import f
def calculate_f_score(unrestricted_RSS,restricted_RSS,p1,p2,n,ci=0.05,visualize=True):
assert (p2 > p1)
#Calculate the f-score
f_score = ((restricted_RSS - unrestricted_RSS)/(p2-p1))/(unrestricted_RSS/(n - p2))
if(visualize==True):
print("F_score: " + str(f_score))
#Get the critical f-score to validate
df1 = p2-p1
df2 = n-p2
print("DF1: " + str(df1) + " DF2: " + str(df2) + " CI: " + str(ci))
#Critical F score
critical_f_score = f.isf(ci, df1, df2)
if(visualize):
print("Critical F-score: " + str(critical_f_score))
#Validate if this works
if(critical_f_score < f_score):
if(visualize):
print("We have a paper")
else:
if(visualize):
print("gg")
return f_score, critical_f_score
#Calculate the standard deviation for every point in phase
def standard_deviation_binned_by_phase(filename):
list_of_dicts = model_loader(filename)
gait_fingerprints, expected_model_parameters, personalization_map_dict, regression_matrix_dict, output_map, average_xi_map = tuple(list_of_dicts)
subject_names = get_subject_names()
amount_of_subjects = len(subject_names)
for j, name in enumerate(subject_names):
#Get the information for a particular person
gait_fingerprint = gait_fingerprints[name]
expected_model_parameter = expected_model_parameters[name]
personalization_map = personalization_map_dict[name]
regression_matrix = regression_matrix_dict[name]
output = output_map[name]
average_xi = average_xi_map[name]
#Calculate the residual error based on the average xi
average_xi_residual = output.ravel() - regression_matrix @ average_xi
#Calcuate the residual error based on the gait figerprint
individualized_xi = average_xi + personalization_map @ gait_fingerprint
individialuzed_xi_residual = output.ravel() - regression_matrix @ individualized_xi
#Bin the residuals based on phase
#Initialize dicionary for bins
avg_residual_dict = {}
ind_residual_dict = {}
phase = 0
for i in range(150):
#Create a dictionary for (Residual Squared Sum, Number of people)
avg_residual_dict[phase] = [0,0]
ind_residual_dict[phase] = [0,0]
phase = phase + 1/150
residual_size = average_xi_residual.shape[0]
phase = 0
for i in range(residual_size):
avg_residual_dict[phase][0] += pow(average_xi_residual[i],2)
avg_residual_dict[phase][1] += 1
ind_residual_dict[phase][0] += pow(individialuzed_xi_residual[i],2)
ind_residual_dict[phase][1] += 1
phase += 1/150
if(phase >= 1):
phase = 0
#Convert the dictionary into the standard deviation
avg_std_dev_dic = {phase: sqrt(value[0])/value[1] for (phase, value) in avg_residual_dict.items()}
ind_std_dev_dic = {phase: sqrt(value[0])/value[1] for (phase, value) in ind_residual_dict.items()}
#plot?
phase = list(avg_std_dev_dic.keys())
up_std_dev = np.array([avg_std_dev_dic[i] for i in phase])
p_std_dev = np.array([ind_std_dev_dic[i] for i in phase])
diff = up_std_dev - p_std_dev
# plt.subplot(3, 1, 1)
# plt.plot(phase, up_std_dev)
# plt.title('Un-Personalized Xi Standard Deviation')
# plt.legend(['Phase', 'Standard Deviation'])
# plt.subplot(3, 1, 2)
# plt.plot(phase, p_std_dev)
# plt.title('Personalized Xi Standard Deviation')
# plt.legend(['Phase', 'Standard Deviation'])
plt.subplot(amount_of_subjects, 1, j+1)
plt.plot(phase, diff)
plt.title('Difference in Standard Deviation')
plt.legend(['Phase', 'Standard Deviation'])
plt.show()
def standard_deviation_total():
subject_names = get_subject_names()
f_score_dict = {}
for k,name in enumerate(subject_names):
f_score_dict[name] = []
for i in range(1,7):
filename = 'gait_fingerprints_n' + str(i) + '.pickle'
list_of_dicts = model_loader(filename)
gait_fingerprints, expected_model_parameters, personalization_map_dict, regression_matrix_dict, output_map, average_xi_map = tuple(list_of_dicts)
amount_of_subjects = len(subject_names)
for j, name in enumerate(subject_names):
#Get the information for a particular person
gait_fingerprint = gait_fingerprints[name]
expected_model_parameter = expected_model_parameters[name]
personalization_map = personalization_map_dict[name]
regression_matrix = regression_matrix_dict[name]
output = output_map[name]
average_xi = average_xi_map[name]
#Calculate sum of squared errors for the restricted model
restricted_RSS = np.mean(np.power(output.ravel() - regression_matrix @ average_xi, 2))
#Calcuate the residual error based on the gait figerprint
individualized_xi = average_xi + personalization_map @ gait_fingerprint
#individualized_xi = expected_model_parameters[name]
unrestricted_RSS = np.mean(np.power(output.ravel() - regression_matrix @ individualized_xi, 2))
#How many samples are we testing
n = regression_matrix.shape[0]
#print("The number of samples is: " + str(n))
#How many gait coefficients do we have
p2 = i
p1 = 0
#Calculate the f-score
f_score = ((restricted_RSS - unrestricted_RSS)/(p2-p1))/(unrestricted_RSS/(n - p2))
print(name + " f_score: " + str(f_score))
f_score_dict[name].append(f_score)
#Get the critical f-score to validate
df1 = p2-p1
df2 = n-p2
#Confidence interval
ci = 0.05
#Critical F score
cf = f.isf(ci, df1, df2)
print(name + " Critical F-score: " + str(cf))
#Validate if this works
if(cf < f_score):
print("We have a paper")
else:
print("gg")
for key in f_score_dict.keys():
print(key + str(f_score_dict[name]))
for name in subject_names:
plt.plot(np.array(range(1,7)), np.array(np.array(f_score_dict[name])))
plt.yscale('log')
plt.ylabel('F_score')
plt.xlabel('gait finterprints')
plt.legend(f_score_dict.keys())
plt.show()
if __name__ == '__main__':
standard_deviation_total()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"math.pow",
"math.sqrt",
"matplotlib.pyplot.legend",
"data_generators.get_subject_names",
"model_framework.model_loader",
"numpy.array",
"matplotlib.pyplot.yl... | [((659, 678), 'scipy.stats.f.isf', 'f.isf', (['ci', 'df1', 'df2'], {}), '(ci, df1, df2)\n', (664, 678), False, 'from scipy.stats import f\n'), ((1057, 1079), 'model_framework.model_loader', 'model_loader', (['filename'], {}), '(filename)\n', (1069, 1079), False, 'from model_framework import model_loader\n'), ((1246, 1265), 'data_generators.get_subject_names', 'get_subject_names', ([], {}), '()\n', (1263, 1265), False, 'from data_generators import get_subject_names\n'), ((3655, 3665), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3663, 3665), True, 'import matplotlib.pyplot as plt\n'), ((3720, 3739), 'data_generators.get_subject_names', 'get_subject_names', ([], {}), '()\n', (3737, 3739), False, 'from data_generators import get_subject_names\n'), ((5864, 5881), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (5874, 5881), True, 'import matplotlib.pyplot as plt\n'), ((5883, 5904), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""F_score"""'], {}), "('F_score')\n", (5893, 5904), True, 'import matplotlib.pyplot as plt\n'), ((5906, 5937), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""gait finterprints"""'], {}), "('gait finterprints')\n", (5916, 5937), True, 'import matplotlib.pyplot as plt\n'), ((5972, 5982), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5980, 5982), True, 'import matplotlib.pyplot as plt\n'), ((3036, 3081), 'numpy.array', 'np.array', (['[avg_std_dev_dic[i] for i in phase]'], {}), '([avg_std_dev_dic[i] for i in phase])\n', (3044, 3081), True, 'import numpy as np\n'), ((3097, 3142), 'numpy.array', 'np.array', (['[ind_std_dev_dic[i] for i in phase]'], {}), '([ind_std_dev_dic[i] for i in phase])\n', (3105, 3142), True, 'import numpy as np\n'), ((3494, 3535), 'matplotlib.pyplot.subplot', 'plt.subplot', (['amount_of_subjects', '(1)', '(j + 1)'], {}), '(amount_of_subjects, 1, j + 1)\n', (3505, 3535), True, 'import matplotlib.pyplot as plt\n'), ((3536, 3557), 'matplotlib.pyplot.plot', 'plt.plot', (['phase', 'diff'], {}), '(phase, diff)\n', (3544, 3557), True, 'import matplotlib.pyplot as plt\n'), ((3560, 3605), 'matplotlib.pyplot.title', 'plt.title', (['"""Difference in Standard Deviation"""'], {}), "('Difference in Standard Deviation')\n", (3569, 3605), True, 'import matplotlib.pyplot as plt\n'), ((3608, 3651), 'matplotlib.pyplot.legend', 'plt.legend', (["['Phase', 'Standard Deviation']"], {}), "(['Phase', 'Standard Deviation'])\n", (3618, 3651), True, 'import matplotlib.pyplot as plt\n'), ((3929, 3951), 'model_framework.model_loader', 'model_loader', (['filename'], {}), '(filename)\n', (3941, 3951), False, 'from model_framework import model_loader\n'), ((2487, 2517), 'math.pow', 'pow', (['average_xi_residual[i]', '(2)'], {}), '(average_xi_residual[i], 2)\n', (2490, 2517), False, 'from math import pow, sqrt\n'), ((2588, 2625), 'math.pow', 'pow', (['individialuzed_xi_residual[i]', '(2)'], {}), '(individialuzed_xi_residual[i], 2)\n', (2591, 2625), False, 'from math import pow, sqrt\n'), ((5512, 5531), 'scipy.stats.f.isf', 'f.isf', (['ci', 'df1', 'df2'], {}), '(ci, df1, df2)\n', (5517, 5531), False, 'from scipy.stats import f\n'), ((2797, 2811), 'math.sqrt', 'sqrt', (['value[0]'], {}), '(value[0])\n', (2801, 2811), False, 'from math import pow, sqrt\n'), ((2898, 2912), 'math.sqrt', 'sqrt', (['value[0]'], {}), '(value[0])\n', (2902, 2912), False, 'from math import pow, sqrt\n'), ((5829, 5857), 'numpy.array', 'np.array', (['f_score_dict[name]'], {}), '(f_score_dict[name])\n', (5837, 5857), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.