code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from mmdet.apis import init_detector, inference_detector
import numpy as np
import os, glob
from PIL import Image
def solo_infer(model, img, conf):
image_np = np.array(Image.open(img))
result, _ = inference_detector(model, img)
cur_result = result[0]
if cur_result is not None:
masks = cur_result[0].cpu().numpy().astype(np.uint8)
classes = cur_result[1].cpu().numpy()
scores = cur_result[2].cpu().numpy()
h, w = masks[0].shape
vis_inds = (scores > conf)
masks = masks[vis_inds]
classes = classes[vis_inds]
areas = [mask.sum() for mask in masks]
sorted_inds = np.argsort(areas)[::-1]
keep_inds = []
for i in sorted_inds:
if i != 0:
for j in range(i):
if np.sum((masks[i, :, :] > 0) * (masks[j, :, :] > 0)) / np.sum(masks[j, :, :] > 0) > 0.85:
break
keep_inds.append(i)
masks = masks[keep_inds]
classes = classes[keep_inds]
instance_map = np.zeros((h, w), dtype=np.uint8)
semantic_map = np.zeros((h, w), dtype=np.uint8)
if masks is not None:
for i, (mask, cls) in enumerate(zip(masks, classes)):
instance_map[mask > 0] = i + 1
semantic_map[mask > 0] = cls + 1
if cls in [0, 1, 7]:
color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
mask_bool = mask.astype(np.bool)
image_np[mask_bool] = image_np[mask_bool] * 0.5 + color_mask * 0.5
final_mask = np.stack([instance_map, semantic_map], axis=-1)
return masks, classes, final_mask, image_np
if __name__ == '__main__':
config_file = 'ade_cfg/solov2_r101_dcn_22.py'
checkpoint_file = './indoor_dcn.pth'
model = init_detector(config_file, checkpoint_file, device='cuda:2')
root = '/versa/dyy/dataset/nyu_depth_v2/'
imgs = sorted(glob.glob(os.path.join(root, 'sync/*/*.jpg')))
mask_dir = os.path.join(root, '2channels')
if not os.path.exists(mask_dir):
os.makedirs(mask_dir)
blend_dir = os.path.join(root, 'blend')
if not os.path.exists(blend_dir):
os.makedirs(blend_dir)
total = 0
for i, img in enumerate(imgs):
name = img.split('/')[-2] + '_' + img.split('/')[-1]
print(i, name)
masks, classes, final_mask, img_blend = solo_infer(model, img, conf=0.2)
Image.fromarray(final_mask).save(os.path.join(mask_dir, name.replace('.jpg', '.png')))
Image.fromarray(img_blend).save(os.path.join(blend_dir, name)) | [
"numpy.stack",
"numpy.sum",
"os.makedirs",
"mmdet.apis.init_detector",
"numpy.zeros",
"mmdet.apis.inference_detector",
"os.path.exists",
"PIL.Image.open",
"numpy.argsort",
"numpy.random.randint",
"PIL.Image.fromarray",
"os.path.join"
] | [((207, 237), 'mmdet.apis.inference_detector', 'inference_detector', (['model', 'img'], {}), '(model, img)\n', (225, 237), False, 'from mmdet.apis import init_detector, inference_detector\n'), ((1851, 1911), 'mmdet.apis.init_detector', 'init_detector', (['config_file', 'checkpoint_file'], {'device': '"""cuda:2"""'}), "(config_file, checkpoint_file, device='cuda:2')\n", (1864, 1911), False, 'from mmdet.apis import init_detector, inference_detector\n'), ((2039, 2070), 'os.path.join', 'os.path.join', (['root', '"""2channels"""'], {}), "(root, '2channels')\n", (2051, 2070), False, 'import os, glob\n'), ((2154, 2181), 'os.path.join', 'os.path.join', (['root', '"""blend"""'], {}), "(root, 'blend')\n", (2166, 2181), False, 'import os, glob\n'), ((174, 189), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (184, 189), False, 'from PIL import Image\n'), ((1055, 1087), 'numpy.zeros', 'np.zeros', (['(h, w)'], {'dtype': 'np.uint8'}), '((h, w), dtype=np.uint8)\n', (1063, 1087), True, 'import numpy as np\n'), ((1111, 1143), 'numpy.zeros', 'np.zeros', (['(h, w)'], {'dtype': 'np.uint8'}), '((h, w), dtype=np.uint8)\n', (1119, 1143), True, 'import numpy as np\n'), ((1618, 1665), 'numpy.stack', 'np.stack', (['[instance_map, semantic_map]'], {'axis': '(-1)'}), '([instance_map, semantic_map], axis=-1)\n', (1626, 1665), True, 'import numpy as np\n'), ((2082, 2106), 'os.path.exists', 'os.path.exists', (['mask_dir'], {}), '(mask_dir)\n', (2096, 2106), False, 'import os, glob\n'), ((2116, 2137), 'os.makedirs', 'os.makedirs', (['mask_dir'], {}), '(mask_dir)\n', (2127, 2137), False, 'import os, glob\n'), ((2193, 2218), 'os.path.exists', 'os.path.exists', (['blend_dir'], {}), '(blend_dir)\n', (2207, 2218), False, 'import os, glob\n'), ((2228, 2250), 'os.makedirs', 'os.makedirs', (['blend_dir'], {}), '(blend_dir)\n', (2239, 2250), False, 'import os, glob\n'), ((652, 669), 'numpy.argsort', 'np.argsort', (['areas'], {}), '(areas)\n', (662, 669), True, 'import numpy as np\n'), ((1987, 2021), 'os.path.join', 'os.path.join', (['root', '"""sync/*/*.jpg"""'], {}), "(root, 'sync/*/*.jpg')\n", (1999, 2021), False, 'import os, glob\n'), ((2601, 2630), 'os.path.join', 'os.path.join', (['blend_dir', 'name'], {}), '(blend_dir, name)\n', (2613, 2630), False, 'import os, glob\n'), ((2474, 2501), 'PIL.Image.fromarray', 'Image.fromarray', (['final_mask'], {}), '(final_mask)\n', (2489, 2501), False, 'from PIL import Image\n'), ((2569, 2595), 'PIL.Image.fromarray', 'Image.fromarray', (['img_blend'], {}), '(img_blend)\n', (2584, 2595), False, 'from PIL import Image\n'), ((1406, 1455), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)', '(1, 3)'], {'dtype': 'np.uint8'}), '(0, 256, (1, 3), dtype=np.uint8)\n', (1423, 1455), True, 'import numpy as np\n'), ((810, 861), 'numpy.sum', 'np.sum', (['((masks[i, :, :] > 0) * (masks[j, :, :] > 0))'], {}), '((masks[i, :, :] > 0) * (masks[j, :, :] > 0))\n', (816, 861), True, 'import numpy as np\n'), ((864, 890), 'numpy.sum', 'np.sum', (['(masks[j, :, :] > 0)'], {}), '(masks[j, :, :] > 0)\n', (870, 890), True, 'import numpy as np\n')] |
#This weeks code focuses on understanding basic functions of pandas and numpy
#This will help you complete other lab experiments
# Do not change the function definations or the parameters
import numpy as np
import pandas as pd
#input: tuple (x,y) x,y:int
def create_numpy_ones_array(shape):
#return a numpy array with one at all index
try:
array = np.ones(shape)
except:
s = tuple(shape)
array = np.ones(s)
return array
#input: tuple (x,y) x,y:int
def create_numpy_zeros_array(shape):
#return a numpy array with zeros at all index
try:
array = np.zeros(shape)
except:
s = tuple(shape)
array = np.zeros(s)
return array
#input: int
def create_identity_numpy_array(order):
#return a identity numpy array of the defined order
try:
array = np.identity(order)
except:
array = np.eye(order)
return array
def matrix_cofactor(array):
m = np.array(array)
if(np.linalg.det(m) == 0):
cofactors = []
for r in range(m.shape[0]):
cofactorRow = []
for c in range(m.shape[1]):
minor = np.delete(np.delete(m,r,axis=0), c, axis=1)
cofactorRow.append(((-1)**(r+c)) * np.linalg.det(np.array(minor)))
cofactors.append(cofactorRow)
array = np.reshape(np.array(cofactors),m.shape)
else:
array = np.linalg.inv(m).T * np.linalg.det(m)
return array
#Input: (numpy array, int ,numpy array, int , int , int , int , tuple,tuple)
#tuple (x,y) x,y:int
def f1(X1,coef1,X2,coef2,seed1,seed2,seed3,shape1,shape2):
#note: shape is of the forst (x1,x2)
#return W1 x (X1 ** coef1) + W2 x (X2 ** coef2) +b
# where W1 is random matrix of shape shape1 with seed1
# where W2 is random matrix of shape shape2 with seed2
# where B is a random matrix of comaptible shape with seed3
# if dimension mismatch occur return -1
np.random.seed(seed1)
W1 = np.random.rand(*(shape1))
np.random.seed(seed2)
W2 = np.random.rand(*(shape2))
try:
temp = np.dot(W1,(X1 ** coef1)) + np.dot(W2,(X2 ** coef2))
np.random.seed(seed3)
B = np.random.rand(*(temp.shape))
ans = temp + B
except ValueError:
ans = -1
return ans
def fill_with_mode(filename, column):
"""
Fill the missing values(NaN) in a column with the mode of that column
Args:
filename: Name of the CSV file.
column: Name of the column to fill
Returns:
df: Pandas DataFrame object.
(Representing entire data and where 'column' does not contain NaN values)
(Filled with above mentioned rules)
"""
df = pd.read_csv(filename)
df[column] = df[column].fillna(df[column].mode()[0])
return df
def fill_with_group_average(df, group, column):
"""
Fill the missing values(NaN) in column with the mean value of the
group the row belongs to.
The rows are grouped based on the values of another column
Args:
df: A pandas DataFrame object representing the data.
group: The column to group the rows with
column: Name of the column to fill
Returns:
df: Pandas DataFrame object.
(Representing entire data and where 'column' does not contain NaN values)
(Filled with above mentioned rules)
"""
df[column] = df[column].fillna(df.groupby(group)[column].transform('mean'))
return df
def get_rows_greater_than_avg(df, column):
"""
Return all the rows(with all columns) where the value in a certain 'column'
is greater than the average value of that column.
row where row.column > mean(data.column)
Args:
df: A pandas DataFrame object representing the data.
column: Name of the column to fill
Returns:
df: Pandas DataFrame object.
"""
mean = df[column].mean(axis=0)
df=df[df[column] > mean]
return df
| [
"numpy.random.seed",
"numpy.eye",
"numpy.dot",
"pandas.read_csv",
"numpy.zeros",
"numpy.ones",
"numpy.identity",
"numpy.array",
"numpy.linalg.inv",
"numpy.random.rand",
"numpy.linalg.det",
"numpy.delete"
] | [((913, 928), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (921, 928), True, 'import numpy as np\n'), ((1824, 1845), 'numpy.random.seed', 'np.random.seed', (['seed1'], {}), '(seed1)\n', (1838, 1845), True, 'import numpy as np\n'), ((1853, 1876), 'numpy.random.rand', 'np.random.rand', (['*shape1'], {}), '(*shape1)\n', (1867, 1876), True, 'import numpy as np\n'), ((1881, 1902), 'numpy.random.seed', 'np.random.seed', (['seed2'], {}), '(seed2)\n', (1895, 1902), True, 'import numpy as np\n'), ((1910, 1933), 'numpy.random.rand', 'np.random.rand', (['*shape2'], {}), '(*shape2)\n', (1924, 1933), True, 'import numpy as np\n'), ((2505, 2526), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (2516, 2526), True, 'import pandas as pd\n'), ((372, 386), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (379, 386), True, 'import numpy as np\n'), ((593, 608), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (601, 608), True, 'import numpy as np\n'), ((807, 825), 'numpy.identity', 'np.identity', (['order'], {}), '(order)\n', (818, 825), True, 'import numpy as np\n'), ((934, 950), 'numpy.linalg.det', 'np.linalg.det', (['m'], {}), '(m)\n', (947, 950), True, 'import numpy as np\n'), ((2008, 2029), 'numpy.random.seed', 'np.random.seed', (['seed3'], {}), '(seed3)\n', (2022, 2029), True, 'import numpy as np\n'), ((2037, 2064), 'numpy.random.rand', 'np.random.rand', (['*temp.shape'], {}), '(*temp.shape)\n', (2051, 2064), True, 'import numpy as np\n'), ((428, 438), 'numpy.ones', 'np.ones', (['s'], {}), '(s)\n', (435, 438), True, 'import numpy as np\n'), ((650, 661), 'numpy.zeros', 'np.zeros', (['s'], {}), '(s)\n', (658, 661), True, 'import numpy as np\n'), ((847, 860), 'numpy.eye', 'np.eye', (['order'], {}), '(order)\n', (853, 860), True, 'import numpy as np\n'), ((1245, 1264), 'numpy.array', 'np.array', (['cofactors'], {}), '(cofactors)\n', (1253, 1264), True, 'import numpy as np\n'), ((1314, 1330), 'numpy.linalg.det', 'np.linalg.det', (['m'], {}), '(m)\n', (1327, 1330), True, 'import numpy as np\n'), ((1953, 1976), 'numpy.dot', 'np.dot', (['W1', '(X1 ** coef1)'], {}), '(W1, X1 ** coef1)\n', (1959, 1976), True, 'import numpy as np\n'), ((1980, 2003), 'numpy.dot', 'np.dot', (['W2', '(X2 ** coef2)'], {}), '(W2, X2 ** coef2)\n', (1986, 2003), True, 'import numpy as np\n'), ((1293, 1309), 'numpy.linalg.inv', 'np.linalg.inv', (['m'], {}), '(m)\n', (1306, 1309), True, 'import numpy as np\n'), ((1083, 1106), 'numpy.delete', 'np.delete', (['m', 'r'], {'axis': '(0)'}), '(m, r, axis=0)\n', (1092, 1106), True, 'import numpy as np\n'), ((1171, 1186), 'numpy.array', 'np.array', (['minor'], {}), '(minor)\n', (1179, 1186), True, 'import numpy as np\n')] |
# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import io
import json
import struct
import tempfile
import warnings
from collections import namedtuple
from typing import BinaryIO, ByteString, Callable, Mapping, Tuple
import numpy as np
from dimod.variables import iter_deserialize_variables, iter_serialize_variables
__all__ = ['FileView', 'load']
# we want to use SpooledTemporaryFile but have it also include the methods
# from io.IOBase. This is (probably) forthcoming in future python, see
# https://bugs.python.org/issue35112
if issubclass(tempfile.SpooledTemporaryFile, io.IOBase):
warnings.warn("Using deprecated SpooledTemporaryFile wrapper, "
"functionality is now included in SpooledTemporaryFile",
DeprecationWarning)
class SpooledTemporaryFile(tempfile.SpooledTemporaryFile):
# This is not part of io.IOBase, but it is implemented in io.BytesIO
# and io.TextIOWrapper
def readinto(self, *args, **kwargs):
return self._file.readinto(*args, **kwargs)
def readable(self):
return self._file.readable()
def seekable(self):
return self._file.seekable()
def writable(self):
return self._file.writable()
class Section(abc.ABC):
@property
@abc.abstractmethod
def magic(self):
"""A 4-byte section identifier. Must be a class variable."""
pass
@classmethod
@abc.abstractmethod
def loads_data(cls, buff):
"""Accepts a bytes-like object and returns the saved data."""
pass
@abc.abstractmethod
def dump_data(self):
"""Returns a bytes-like object encoding the relevant data."""
pass
def dumps(self):
"""Wraps .dump_data to include the identifier and section length."""
magic = self.magic
if not isinstance(magic, bytes):
raise TypeError("magic string should by bytes object")
if len(magic) != 4:
raise ValueError("magic string should be 4 bytes in length")
length = bytes(4) # placeholder 4 bytes for length
data = self.dump_data()
data_length = len(data)
parts = [magic, length, data]
if (data_length + len(magic) + len(length)) % 64:
pad_length = 64 - (data_length + len(magic) + len(length)) % 64
parts.append(b' '*pad_length)
data_length += pad_length
parts[1] = np.dtype('<u4').type(data_length).tobytes()
assert sum(map(len, parts)) % 64 == 0
return b''.join(parts)
@classmethod
def load(cls, fp):
"""Wraps .loads_data and checks the identifier and length."""
magic = fp.read(len(cls.magic))
if magic != cls.magic:
raise ValueError("unknown subheader, expected {} but recieved "
"{}".format(cls.magic, magic))
length = np.frombuffer(fp.read(4), '<u4')[0]
return cls.loads_data(fp.read(int(length)))
class VariablesSection(Section):
magic = b'VARS'
def __init__(self, variables):
self.variables = variables
def dump_data(self):
serializable = list(iter_serialize_variables(self.variables))
return json.dumps(serializable).encode('ascii')
@classmethod
def loads_data(self, data):
return iter_deserialize_variables(json.loads(data.decode('ascii')))
def FileView(bqm, version=(1, 0), ignore_labels=False):
warnings.warn("FileView is deprecated, please use `bqm.to_file` instead",
DeprecationWarning, stacklevel=2)
return bqm.to_file(version=version, ignore_labels=ignore_labels)
class _BytesIO(io.RawIOBase):
# A stub implementation that mimics io.BytesIO but does not make a copy
# in the case of a memoryview or bytearray. This is necessary because,
# although io.BytesIO avoids a copy of bytes objects in python 3.5+, it
# still copies the mutable versions.
#
# This is based on the version in the _pyio library
# https://github.com/python/cpython/blob/3.5/Lib/_pyio.py#L831
#
# Copyright 2001-2019 Python Software Foundation; All Rights Reserved
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and
# the Individual or Organization ("Licensee") accessing and otherwise using Python
# 3.5.9 software in source or binary form and its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python 3.5.9 alone or in any derivative
# version, provided, however, that PSF's License Agreement and PSF's notice of
# copyright, i.e., "Copyright 2001-2019 Python Software Foundation; All Rights
# Reserved" are retained in Python 3.5.9 alone or in any derivative version
# prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on or
# incorporates Python 3.5.9 or any part thereof, and wants to make the
# derivative work available to others as provided herein, then Licensee hereby
# agrees to include in any such work a brief summary of the changes made to Python
# 3.5.9.
#
# 4. PSF is making Python 3.5.9 available to Licensee on an "AS IS" basis.
# PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF
# EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR
# WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE
# USE OF PYTHON 3.5.9 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 3.5.9
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF
# MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 3.5.9, OR ANY DERIVATIVE
# THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material breach of
# its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any relationship
# of agency, partnership, or joint venture between PSF and Licensee. This License
# Agreement does not grant permission to use PSF trademarks or trade name in a
# trademark sense to endorse or promote products or services of Licensee, or any
# third party.
#
# 8. By copying, installing or otherwise using Python 3.5.9, Licensee agrees
# to be bound by the terms and conditions of this License Agreement.
def __init__(self, buff):
self._buffer = memoryview(buff)
self._pos = 0
def read(self, size=None):
if size is None:
size = -1
if size < 0:
size = len(self._buffer)
if len(self._buffer) <= self._pos:
return b''
newpos = min(len(self._buffer), self._pos + size)
b = self._buffer[self._pos: newpos]
self._pos = newpos
return bytes(b)
def readable(self):
return True
def seek(self, pos, whence=0):
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("unsupported whence value")
return self._pos
def seekable(self):
return True
_loaders: Mapping[bytes, Callable] = dict()
def register(prefix: bytes, loader: Callable):
"""Register a new loader."""
_loaders[prefix] = loader
def load(fp, cls=None):
"""Load a model from a file.
Args:
fp (bytes-like/file-like):
If file-like, should be readable, seekable file-like object. If
bytes-like it will be wrapped with `io.BytesIO`.
cls (class, optional):
Deprecated keyword argument. Is ignored.
Returns:
The loaded model.
"""
if cls is not None:
warnings.warn("'cls' keyword argument is deprecated and ignored",
DeprecationWarning, stacklevel=2)
if isinstance(fp, ByteString):
file_like: BinaryIO = _BytesIO(fp) # type: ignore[assignment]
else:
file_like = fp
if not file_like.seekable:
raise ValueError("expected file-like to be seekable")
pos = file_like.tell()
lengths = sorted(set(map(len, _loaders)))
for num_bytes in lengths:
prefix = file_like.read(num_bytes)
file_like.seek(pos)
try:
return _loaders[prefix](file_like)
except KeyError:
pass
raise ValueError("cannot load the given file-like")
# for slightly more explicit naming
load.register = register
def make_header(prefix: bytes, data: Mapping, version: Tuple[int, int]) -> bytearray:
"""Construct a header for serializing dimod models.
The first `len(prefix)` bytes will be exactly the contents of `prefix`.
The next 1 byte is an unsigned byte: the major version of the file
format.
The next 1 byte is an unsigned byte: the minor version of the file
format.
The next 4 bytes form a little-endian unsigned int, the length of
the header data `HEADER_LEN`.
The next `HEADER_LEN` bytes form the header data. This will be `data`
json-serialized and encoded with 'ascii'.
The header is padded with spaces to make the entire length divisible by 64.
"""
header = bytearray()
header += prefix
header += bytes(version)
version_start = len(header)
header += bytes(4) # will hold the data length
data_start = len(header)
header += json.dumps(data, sort_keys=True).encode('ascii')
header += b'\n'
# want the entire thing to be divisible by 64 for alignment
if len(header) % 64:
header += b' '*(64 - len(header) % 64)
assert not len(header) % 64
header[version_start:version_start+4] = struct.pack('<I', len(header) - data_start)
return header
def write_header(file_like: BinaryIO, prefix: bytes, data: Mapping, version: Tuple[int, int]):
"""Write a header constructed by :func:`.make_header` to a file-like."""
file_like.write(make_header(prefix, data, version))
HeaderInfo = namedtuple('HeaderInfo', ['data', 'version'])
def read_header(file_like: BinaryIO, prefix: bytes) -> HeaderInfo:
"""Read the information from a header constructed by :func:`.make_header`.
The return value should be accessed by attribute for easy future expansion.
"""
read_prefix = file_like.read(len(prefix))
if read_prefix != prefix:
raise ValueError("unknown file type, expected magic string "
f"{prefix!r} but got {read_prefix!r} "
"instead")
version = tuple(file_like.read(2))
header_len = struct.unpack('<I', file_like.read(4))[0]
data = json.loads(file_like.read(header_len).decode('ascii'))
return HeaderInfo(data, version)
| [
"numpy.dtype",
"json.dumps",
"dimod.variables.iter_serialize_variables",
"collections.namedtuple",
"warnings.warn"
] | [((11149, 11194), 'collections.namedtuple', 'namedtuple', (['"""HeaderInfo"""', "['data', 'version']"], {}), "('HeaderInfo', ['data', 'version'])\n", (11159, 11194), False, 'from collections import namedtuple\n'), ((1175, 1322), 'warnings.warn', 'warnings.warn', (['"""Using deprecated SpooledTemporaryFile wrapper, functionality is now included in SpooledTemporaryFile"""', 'DeprecationWarning'], {}), "(\n 'Using deprecated SpooledTemporaryFile wrapper, functionality is now included in SpooledTemporaryFile'\n , DeprecationWarning)\n", (1188, 1322), False, 'import warnings\n'), ((3998, 4109), 'warnings.warn', 'warnings.warn', (['"""FileView is deprecated, please use `bqm.to_file` instead"""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "('FileView is deprecated, please use `bqm.to_file` instead',\n DeprecationWarning, stacklevel=2)\n", (4011, 4109), False, 'import warnings\n'), ((8890, 8993), 'warnings.warn', 'warnings.warn', (['"""\'cls\' keyword argument is deprecated and ignored"""', 'DeprecationWarning'], {'stacklevel': '(2)'}), '("\'cls\' keyword argument is deprecated and ignored",\n DeprecationWarning, stacklevel=2)\n', (8903, 8993), False, 'import warnings\n'), ((3712, 3752), 'dimod.variables.iter_serialize_variables', 'iter_serialize_variables', (['self.variables'], {}), '(self.variables)\n', (3736, 3752), False, 'from dimod.variables import iter_deserialize_variables, iter_serialize_variables\n'), ((10557, 10589), 'json.dumps', 'json.dumps', (['data'], {'sort_keys': '(True)'}), '(data, sort_keys=True)\n', (10567, 10589), False, 'import json\n'), ((3769, 3793), 'json.dumps', 'json.dumps', (['serializable'], {}), '(serializable)\n', (3779, 3793), False, 'import json\n'), ((2986, 3001), 'numpy.dtype', 'np.dtype', (['"""<u4"""'], {}), "('<u4')\n", (2994, 3001), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
#
# Tests the high-dimensional Gaussian log-pdf toy problem.
#
# This file is part of PINTS.
# Copyright (c) 2017-2019, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
import pints
import pints.toy
import unittest
import numpy as np
import scipy.stats
class TestHighDimensionalGaussianLogPDF(unittest.TestCase):
"""
Tests the high-dimensional Gaussian log-pdf toy problem.
"""
def test_high_dimensional_log_pdf(self):
# Test basic usage
f = pints.toy.HighDimensionalGaussianLogPDF(3)
self.assertEqual(f.n_parameters(), 3)
cov = np.array([
[1, 0.5 * np.sqrt(2), 0.5 * np.sqrt(3)],
[0.5 * np.sqrt(2), 2, 0.5 * np.sqrt(2) * np.sqrt(3)],
[0.5 * np.sqrt(3), 0.5 * np.sqrt(2) * np.sqrt(3), 3]])
self.assertTrue(np.all(f._cov == cov))
f1 = f([0, 0, 0])
f2 = f([0.1, 0.1, 0.1])
self.assertTrue(np.isscalar(f1))
self.assertTrue(np.isscalar(f2))
self.assertTrue(f1 > f2)
f = pints.toy.HighDimensionalGaussianLogPDF(100)
self.assertEqual(f.n_parameters(), 100)
f1 = f(np.zeros(100))
f2 = f(np.ones(100) * 0.1)
self.assertTrue(np.isscalar(f1))
self.assertTrue(np.isscalar(f2))
self.assertTrue(f1 > f2)
# default
f = pints.toy.HighDimensionalGaussianLogPDF()
self.assertEqual(f.n_parameters(), 20)
self.assertEqual(f.rho(), 0.5)
# change rho
f = pints.toy.HighDimensionalGaussianLogPDF(rho=0.9)
self.assertEqual(f.n_parameters(), 20)
self.assertEqual(f.rho(), 0.9)
# change both
f = pints.toy.HighDimensionalGaussianLogPDF(dimension=15,
rho=0.8)
self.assertEqual(f.n_parameters(), 15)
self.assertEqual(f.rho(), 0.8)
# For 2d case check value versus Scipy (in case we change to
# implementing via something other than Scipy)
f = pints.toy.HighDimensionalGaussianLogPDF(dimension=2)
cov = [[1.0, np.sqrt(1.0 / 2.0)],
[np.sqrt(1.0 / 2.0), 2.0]]
mean = np.zeros(2)
self.assertEqual(f([1, 2]), scipy.stats.multivariate_normal.logpdf(
[1, 2], mean, cov))
# check suggested bounds
f = pints.toy.HighDimensionalGaussianLogPDF(dimension=2)
bounds = f.suggested_bounds()
magnitude = 3 * np.sqrt(2.0)
bounds1 = np.tile([-magnitude, magnitude], (2, 1))
bounds1 = np.transpose(bounds1).tolist()
self.assertTrue(np.array_equal(bounds, bounds1))
f = pints.toy.HighDimensionalGaussianLogPDF()
bounds = f.suggested_bounds()
self.assertTrue(bounds[0][0], np.sqrt(20) * 3.0)
# Test kl_divergence() errors
n = 1000
d = f.n_parameters()
samples1 = f.sample(n)
self.assertEqual(samples1.shape, (n, d))
x = np.ones((n, d + 1))
self.assertRaises(ValueError, f.kl_divergence, x)
x = np.ones((n, d, 2))
self.assertRaises(ValueError, f.kl_divergence, x)
self.assertTrue(f.kl_divergence(samples1) > 0)
self.assertEqual(f.kl_divergence(samples1), f.distance(samples1))
self.assertRaises(ValueError, f.sample, 0)
# Test errors
self.assertRaises(
ValueError, pints.toy.HighDimensionalGaussianLogPDF, 0)
self.assertRaises(
ValueError, pints.toy.HighDimensionalGaussianLogPDF, 2, 2)
# in order for matrix to be positive definite there are bounds
# on the lower value of rho > - 1 / (dims - 1)
self.assertRaises(
ValueError, pints.toy.HighDimensionalGaussianLogPDF, 4, -0.34
)
self.assertRaises(
ValueError, pints.toy.HighDimensionalGaussianLogPDF, 11, -0.11
)
if __name__ == '__main__':
print('Add -v for more debug output')
import sys
if '-v' in sys.argv:
debug = True
unittest.main()
| [
"unittest.main",
"numpy.isscalar",
"pints.toy.HighDimensionalGaussianLogPDF",
"numpy.zeros",
"numpy.ones",
"numpy.transpose",
"numpy.tile",
"numpy.array_equal",
"numpy.all",
"numpy.sqrt"
] | [((4068, 4083), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4081, 4083), False, 'import unittest\n'), ((575, 617), 'pints.toy.HighDimensionalGaussianLogPDF', 'pints.toy.HighDimensionalGaussianLogPDF', (['(3)'], {}), '(3)\n', (614, 617), False, 'import pints\n'), ((1108, 1152), 'pints.toy.HighDimensionalGaussianLogPDF', 'pints.toy.HighDimensionalGaussianLogPDF', (['(100)'], {}), '(100)\n', (1147, 1152), False, 'import pints\n'), ((1412, 1453), 'pints.toy.HighDimensionalGaussianLogPDF', 'pints.toy.HighDimensionalGaussianLogPDF', ([], {}), '()\n', (1451, 1453), False, 'import pints\n'), ((1574, 1622), 'pints.toy.HighDimensionalGaussianLogPDF', 'pints.toy.HighDimensionalGaussianLogPDF', ([], {'rho': '(0.9)'}), '(rho=0.9)\n', (1613, 1622), False, 'import pints\n'), ((1744, 1806), 'pints.toy.HighDimensionalGaussianLogPDF', 'pints.toy.HighDimensionalGaussianLogPDF', ([], {'dimension': '(15)', 'rho': '(0.8)'}), '(dimension=15, rho=0.8)\n', (1783, 1806), False, 'import pints\n'), ((2082, 2134), 'pints.toy.HighDimensionalGaussianLogPDF', 'pints.toy.HighDimensionalGaussianLogPDF', ([], {'dimension': '(2)'}), '(dimension=2)\n', (2121, 2134), False, 'import pints\n'), ((2234, 2245), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2242, 2245), True, 'import numpy as np\n'), ((2400, 2452), 'pints.toy.HighDimensionalGaussianLogPDF', 'pints.toy.HighDimensionalGaussianLogPDF', ([], {'dimension': '(2)'}), '(dimension=2)\n', (2439, 2452), False, 'import pints\n'), ((2546, 2586), 'numpy.tile', 'np.tile', (['[-magnitude, magnitude]', '(2, 1)'], {}), '([-magnitude, magnitude], (2, 1))\n', (2553, 2586), True, 'import numpy as np\n'), ((2706, 2747), 'pints.toy.HighDimensionalGaussianLogPDF', 'pints.toy.HighDimensionalGaussianLogPDF', ([], {}), '()\n', (2745, 2747), False, 'import pints\n'), ((3020, 3039), 'numpy.ones', 'np.ones', (['(n, d + 1)'], {}), '((n, d + 1))\n', (3027, 3039), True, 'import numpy as np\n'), ((3110, 3128), 'numpy.ones', 'np.ones', (['(n, d, 2)'], {}), '((n, d, 2))\n', (3117, 3128), True, 'import numpy as np\n'), ((899, 920), 'numpy.all', 'np.all', (['(f._cov == cov)'], {}), '(f._cov == cov)\n', (905, 920), True, 'import numpy as np\n'), ((1004, 1019), 'numpy.isscalar', 'np.isscalar', (['f1'], {}), '(f1)\n', (1015, 1019), True, 'import numpy as np\n'), ((1045, 1060), 'numpy.isscalar', 'np.isscalar', (['f2'], {}), '(f2)\n', (1056, 1060), True, 'import numpy as np\n'), ((1216, 1229), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (1224, 1229), True, 'import numpy as np\n'), ((1290, 1305), 'numpy.isscalar', 'np.isscalar', (['f1'], {}), '(f1)\n', (1301, 1305), True, 'import numpy as np\n'), ((1331, 1346), 'numpy.isscalar', 'np.isscalar', (['f2'], {}), '(f2)\n', (1342, 1346), True, 'import numpy as np\n'), ((2515, 2527), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (2522, 2527), True, 'import numpy as np\n'), ((2660, 2691), 'numpy.array_equal', 'np.array_equal', (['bounds', 'bounds1'], {}), '(bounds, bounds1)\n', (2674, 2691), True, 'import numpy as np\n'), ((1246, 1258), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (1253, 1258), True, 'import numpy as np\n'), ((2156, 2174), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 2.0)'], {}), '(1.0 / 2.0)\n', (2163, 2174), True, 'import numpy as np\n'), ((2193, 2211), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 2.0)'], {}), '(1.0 / 2.0)\n', (2200, 2211), True, 'import numpy as np\n'), ((2605, 2626), 'numpy.transpose', 'np.transpose', (['bounds1'], {}), '(bounds1)\n', (2617, 2626), True, 'import numpy as np\n'), ((2824, 2835), 'numpy.sqrt', 'np.sqrt', (['(20)'], {}), '(20)\n', (2831, 2835), True, 'import numpy as np\n'), ((711, 721), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (718, 721), True, 'import numpy as np\n'), ((729, 739), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (736, 739), True, 'import numpy as np\n'), ((761, 771), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (768, 771), True, 'import numpy as np\n'), ((795, 805), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (802, 805), True, 'import numpy as np\n'), ((827, 837), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (834, 837), True, 'import numpy as np\n'), ((858, 868), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (865, 868), True, 'import numpy as np\n'), ((782, 792), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (789, 792), True, 'import numpy as np\n'), ((845, 855), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (852, 855), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Description
A simple script for testing either in-built methods or newly added methods with automatic parameter-tuning
"""
import os
import json
import optuna
import numpy as np
import torch
from ptranking.ltr_global import ltr_seed
from ptranking.ltr_auto.tree.ltr_auto_tree import AutoTreeLTREvaluator
np.random.seed(seed=ltr_seed)
if __name__ == '__main__':
"""
>>> Tree-based Learning-to-Rank Models <<<
(3) Tree-based Model
-----------------------------------------------------------------------------------------
| LightGBMLambdaMART |
-----------------------------------------------------------------------------------------
>>> Supported Datasets <<<
-----------------------------------------------------------------------------------------
| LETTOR | MQ2007_Super % MQ2008_Super % MQ2007_Semi % MQ2008_Semi |
-----------------------------------------------------------------------------------------
| MSLRWEB | MSLRWEB10K % MSLRWEB30K |
-----------------------------------------------------------------------------------------
| Yahoo_LTR | Set1 % Set2 |
-----------------------------------------------------------------------------------------
| ISTELLA_LTR | Istella_S | Istella | Istella_X |
-----------------------------------------------------------------------------------------
"""
cuda = None # the gpu id, e.g., 0 or 1, otherwise, set it as None indicating to use cpu
debug = True # in a debug mode, we just check whether the model can operate
config_with_json = True # specify configuration with json files or not
global_study = optuna.create_study(direction='maximize')
auto_evaluator = AutoTreeLTREvaluator(cuda=cuda)
if config_with_json: # specify configuration with json files
# the directory of json files
# dir_json = '/Users/dryuhaitao/WorkBench/Dropbox/CodeBench/GitPool/wildltr_ptranking/testing/ltr_adhoc/json/'
# dir_json = '/Volumes/data_hdd/ptranking.github.io/testing/ltr_adhoc/json/'
# dir_json = '/Users/solar/WorkBench/Dropbox/CodeBench/GitPool/wildltr_ptranking/testing/ltr_adhoc/json/'
# dir_json = '/Volumes/data_hdd/ptranking/testing/ltr_auto/tree/json/'
dir_json = '/Users/iimac/II-Research Dropbox/Hai-Tao Yu/CodeBench/GitPool/auto_ptr/testing/ltr_auto/tree/json/'
auto_evaluator.run(global_study=global_study, auto_evaluator=auto_evaluator, debug=debug,
model_id='LightGBMLambdaMART', config_with_json=True, dir_json=dir_json)
else:
# data_id = 'MQ2007_Super'
data_id = 'MQ2008_Super'
''' location of the adopted data '''
# dir_data = '/Users/dryuhaitao/WorkBench/Corpus/' + 'LETOR4.0/MQ2007/'
# dir_data = '/home/dl-box/WorkBench/Datasets/L2R/LETOR4.0/MQ2007/'
#dir_data = '/Users/solar/WorkBench/Datasets/L2R/LETOR4.0/MQ2008/'
dir_data = '/Volumes/data_hdd/dataset/MQ2008/'
''' output directory '''
# dir_output = '/Users/dryuhaitao/WorkBench/CodeBench/Bench_Output/NeuralLTR/Listwise/'
# dir_output = '/home/dl-box/WorkBench/CodeBench/PyCharmProject/Project_output/Out_L2R/Listwise/'
# dir_output = '/Users/solar/WorkBench/CodeBench/PyCharmProject/Project_output/Out_L2R/'
dir_output = '/Volumes/data_hdd/l2r_output/auto/'
auto_evaluator.run(global_study=global_study, auto_evaluator=auto_evaluator, debug=debug, model_id='LightGBMLambdaMART',
config_with_json=False, data_id=data_id, dir_data=dir_data, dir_output=dir_output) | [
"ptranking.ltr_auto.tree.ltr_auto_tree.AutoTreeLTREvaluator",
"numpy.random.seed",
"optuna.create_study"
] | [((359, 388), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'ltr_seed'}), '(seed=ltr_seed)\n', (373, 388), True, 'import numpy as np\n'), ((1954, 1995), 'optuna.create_study', 'optuna.create_study', ([], {'direction': '"""maximize"""'}), "(direction='maximize')\n", (1973, 1995), False, 'import optuna\n'), ((2017, 2048), 'ptranking.ltr_auto.tree.ltr_auto_tree.AutoTreeLTREvaluator', 'AutoTreeLTREvaluator', ([], {'cuda': 'cuda'}), '(cuda=cuda)\n', (2037, 2048), False, 'from ptranking.ltr_auto.tree.ltr_auto_tree import AutoTreeLTREvaluator\n')] |
import tensorflow as tf
from keras.models import Model
from keras.layers import Input, Dense
from keras import backend as K
from keras import optimizers,applications, callbacks
from keras.callbacks import ModelCheckpoint
from keras.callbacks import LearningRateScheduler
import numpy as np
from wx_hyperparam import WxHyperParameter
import xgboost as xgb
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
import functools
import time
#set default global hyper paramerters
wx_hyperparam = WxHyperParameter(learning_ratio=0.001)
def timeit(func):
@functools.wraps(func)
def newfunc(*args, **kwargs):
startTime = time.time()
ret = func(*args, **kwargs)
elapsedTime = time.time() - startTime
# print('function [{}] finished in {} ms'.format(
# func.__name__, int(elapsedTime * 1000)))
print('\nfunction [{}] finished in {} s'.format(
func.__name__, float(elapsedTime)))
return ret
return newfunc
def cw_ann_model(x_train, y_train, x_val, y_val, hyper_param=wx_hyperparam, hidden_layer_size=128, num_cls=2):
input_dim = len(x_train[0])
inputs = Input((input_dim,))
hidden = Dense(hidden_layer_size)(inputs)
fc_out = Dense(num_cls, activation='softmax')(hidden)
model = Model(input=inputs, output=fc_out)
# model.summary()
#build a optimizer
sgd = optimizers.SGD(lr=hyper_param.learning_ratio, decay=hyper_param.weight_decay, momentum=hyper_param.momentum, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
#call backs
def step_decay(epoch):
exp_num = int(epoch/10)+1
return float(hyper_param.learning_ratio/(10 ** exp_num))
best_model_path="../slp_cw_ann_weights_best"+".hdf5"
save_best_model = ModelCheckpoint(best_model_path, monitor="val_loss", verbose=hyper_param.verbose, save_best_only=True, mode='min')
#save_best_model = ModelCheckpoint(best_model_path, monitor="val_acc", verbose=1, save_best_only=True, mode='max')
change_lr = LearningRateScheduler(step_decay)
#run train
history = model.fit(x_train, y_train, validation_data=(x_val,y_val),
epochs=hyper_param.epochs, batch_size=hyper_param.batch_size, shuffle=True, callbacks=[save_best_model, change_lr], verbose=hyper_param.verbose)
#load best model
model.load_weights(best_model_path)
return model
@timeit
def connection_weight(x_train, y_train, x_val, y_val, n_selection=100, hidden_layer_size=128, hyper_param=wx_hyperparam, num_cls=2):
input_dim = len(x_train[0])
# make model and do train
model = cw_ann_model(x_train, y_train, x_val, y_val, hyper_param=hyper_param, hidden_layer_size=hidden_layer_size, num_cls=num_cls)
#load weights
weights = model.get_weights()
#get feature importance using connection weight algo (Olden 2004)
wt_ih = weights[0]#.transpose() #input-hidden weights
wt_ho = weights[1]#.transpose() #hidden-out weights
dot_wt = wt_ih * wt_ho
sum_wt = np.sum(dot_wt,axis=1)
selected_idx = np.argsort(sum_wt)[::-1][0:n_selection]
selected_weights = sum_wt[selected_idx]
#get evaluation acc from best model
loss, val_acc = model.evaluate(x_val, y_val)
K.clear_session()
return selected_idx, selected_weights, val_acc
def naive_SLP_model(x_train, y_train, x_val, y_val, hyper_param=wx_hyperparam, num_cls=2):
input_dim = len(x_train[0])
inputs = Input((input_dim,))
#fc_out = Dense(2, kernel_initializer='zeros', bias_initializer='zeros', activation='softmax')(inputs)
fc_out = Dense(num_cls, activation='softmax')(inputs)
model = Model(input=inputs, output=fc_out)
# model.summary()
#build a optimizer
sgd = optimizers.SGD(lr=hyper_param.learning_ratio, decay=hyper_param.weight_decay, momentum=hyper_param.momentum, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
#call backs
def step_decay(epoch):
exp_num = int(epoch/10)+1
return float(hyper_param.learning_ratio/(10 ** exp_num))
best_model_path="./slp_wx_weights_best"+".hdf5"
#save_best_model = ModelCheckpoint(best_model_path, monitor="val_acc", verbose=1, save_best_only=True, mode='max')
change_lr = LearningRateScheduler(step_decay)
if len(x_train) + len(x_val) < 10 :
save_best_model = ModelCheckpoint(best_model_path, monitor="loss", verbose=1, save_best_only=True, mode='min')
if len(x_val) != 0 :
x_train = np.concatenate((x_train, x_val), axis=0)
y_train = np.concatenate((y_train, y_val), axis=0)
#run train
history = model.fit(x_train, y_train,
epochs=hyper_param.epochs, batch_size=hyper_param.batch_size, shuffle=True, callbacks=[save_best_model, change_lr])
else :
save_best_model = ModelCheckpoint(best_model_path, monitor="val_loss", verbose=1, save_best_only=True, mode='min')
history = model.fit(x_train, y_train, validation_data=(x_val,y_val),
epochs=hyper_param.epochs, batch_size=hyper_param.batch_size, shuffle=True, callbacks=[save_best_model, change_lr])
#load best model
model.load_weights(best_model_path)
return model
def classifier_LOOCV(x_train, y_train, x_val, y_val, x_test, y_test, method_clf='xgb', verbose=False, num_cls=2):
if method_clf=='xgb':
if num_cls == 2:
clf = xgb.XGBClassifier(seed=1, objective='binary:logistic')
clf.fit(x_train, y_train, eval_set=[(x_val, y_val)], verbose=verbose, eval_metric='logloss', early_stopping_rounds=100)
pred_prob = clf.predict_proba(x_test)
return pred_prob[0][1]
else:
clf = xgb.XGBClassifier(seed=1, objective='multi:softprob')
clf.fit(x_train, y_train, eval_set=[(x_val, y_val)], verbose=verbose, eval_metric='mlogloss', early_stopping_rounds=100)
pred_prob = clf.predict_proba(x_test)
return pred_prob[0]
if method_clf=='svm':
# clf = SVC(kernel = 'linear')
if num_cls == 2:
clf = SVC(kernel='rbf', probability=True, C=1.0, degree=3, verbose=verbose, random_state=0)
#print(x_train.shape, y_train.shape, x_test.shape)
clf.fit(x_train,y_train)
pred_prob = clf.predict_proba(x_test)
return pred_prob[0][1]
else:
clf = SVC(kernel='rbf', probability=True, C=1.0, degree=3, verbose=verbose, random_state=0)
#print(x_train.shape, y_train.shape, x_test.shape)
clf.fit(x_train,y_train)
pred_prob = clf.predict_proba(x_test)
return pred_prob[0]
@timeit
def wx_slp(x_train, y_train, x_val, y_val, n_selection=100, hyper_param=wx_hyperparam, num_cls=2):
if num_cls < 2:
return
# sess = tf.Session()
# K.set_session(sess)
input_dim = len(x_train[0])
# make model and do train
model = naive_SLP_model(x_train, y_train, x_val, y_val, hyper_param=hyper_param, num_cls=num_cls)
#load weights
weights = model.get_weights()
#cacul WX scores
num_data = {}
running_avg={}
tot_avg={}
Wt = weights[0].transpose() #all weights of model
Wb = weights[1].transpose() #all bias of model
for i in range(num_cls):
tot_avg[i] = np.zeros(input_dim) # avg of input data for each output class
num_data[i] = 0.
for i in range(len(x_train)):
c = y_train[i].argmax()
x = x_train[i]
tot_avg[c] = tot_avg[c] + x
num_data[c] = num_data[c] + 1
for i in range(num_cls):
tot_avg[i] = tot_avg[i] / num_data[i]
#for general multi class problems
wx_mul = []
for i in range(0,num_cls):
wx_mul_at_class = []
for j in range(0,num_cls):
wx_mul_at_class.append(tot_avg[i] * Wt[j])
wx_mul.append(wx_mul_at_class)
wx_mul = np.asarray(wx_mul)
wx_abs = np.zeros(Wt.shape[1])
for n in range(0, Wt.shape[1]):
for i in range(0,num_cls):
for j in range(0,num_cls):
if i != j:
wx_abs[n] += np.abs(wx_mul[i][i][n] - wx_mul[i][j][n])
selected_idx = np.argsort(wx_abs)[::-1][0:n_selection]
selected_weights = wx_abs[selected_idx]
#get evaluation acc from best model
if len(x_val) != 0 :
loss, val_acc = model.evaluate(x_val, y_val)
else :
loss = 0
val_acc = 0
K.clear_session()
return selected_idx, selected_weights, val_acc
def sum_fan_in(xi, input_x, layer_num, index, wt, output_class_idx):
#wx = ux*uw
# print('call ', index)
if index == layer_num - 1:#backprop output layer
cur_x = sum_fan_in(xi, input_x, layer_num, index-1, wt, output_class_idx)
cur_w = wt[index][output_class_idx]
cur_wx = cur_x * cur_w
ret = np.sum(cur_wx)
elif index == 0:#handle input layer
cur_x = input_x[xi]
cur_w = wt[index]
cur_wx = []
for i in range(0,len(wt[index])):#loop for hidden units
cur_wx.append(cur_x * wt[index][i][xi])
ret = np.asarray(cur_wx)
else:#normal hiddenlayer backprop
cur_x = sum_fan_in(xi, input_x, layer_num, index-1, wt, output_class_idx)
cur_wx = []
for i in range(0,len(wt[index])):#loop for hidden units
local_wx = cur_x * wt[index][i]
local_sum = np.sum(local_wx)
cur_wx.append(local_sum)
ret = np.asarray(cur_wx)
return ret
def cal_class_wx_mlp(input_avg, wt, wb, input_class_idx, output_class_idx):
layer_num = len(wt)
num_feature = len(input_avg[0])
wx = []
for i in range(0, num_feature):
wx.append( sum_fan_in(i, input_avg[input_class_idx], layer_num, layer_num - 1, wt, output_class_idx) )
#print('mlp wx_done ', input_class_idx, output_class_idx)
return np.asarray(wx)
@timeit
def wx_mlp(x_train, y_train, x_val, y_val, n_selection=100, hyper_param=wx_hyperparam, num_cls=2):
if num_cls < 2:
return
#sess = tf.Session()
#K.set_session(sess)
#build a NN model
input_dim = len(x_train[0])
num_hidden_layer = hyper_param.num_hidden_layer
num_h_unit = hyper_param.num_h_unit
inputs = Input((input_dim,))
hidden_1 = Dense(units=num_h_unit)(inputs)
hidden_2 = Dense(units=int(num_h_unit/2))(hidden_1)
fc_out = Dense(num_cls, kernel_initializer='zeros', bias_initializer='zeros', activation='softmax')(hidden_2)
model = Model(input=inputs, output=fc_out)
#build a optimizer
sgd = optimizers.SGD(lr=hyper_param.learning_ratio, decay=hyper_param.weight_decay, momentum=hyper_param.momentum, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
# model.summary()
#call backs
def step_decay(epoch):
exp_num = int(epoch/10)+1
return float(hyper_param.learning_ratio/(10 ** exp_num))
best_model_path="./mlp_wx_weights_best"+".hdf5"
save_best_model = ModelCheckpoint(best_model_path, monitor="val_loss", verbose=hyper_param.verbose, save_best_only=True, mode='min')
change_lr = LearningRateScheduler(step_decay)
#run train
history = model.fit(x_train, y_train, validation_data=(x_val,y_val),
epochs=hyper_param.epochs, batch_size=hyper_param.batch_size, shuffle=True, callbacks=[save_best_model, change_lr], verbose=hyper_param.verbose)
#load best model
model.load_weights(best_model_path)
#load weights
weights = model.get_weights()
#cacul WX scores
num_data = {}
running_avg={}
tot_avg={}
# load weights and bias
wt = {}
wb = {}
for i in range(0,num_hidden_layer+1):
wt[i] = weights[i*2].transpose()
wb[i] = weights[i*2+1].transpose()
# make avg for input data
for i in range(num_cls):
tot_avg[i] = np.zeros(input_dim) # avg of input data for each output class
num_data[i] = 0.
for i in range(len(x_train)):
c = y_train[i].argmax()
x = x_train[i]
tot_avg[c] = tot_avg[c] + x
num_data[c] = num_data[c] + 1
for i in range(num_cls):
tot_avg[i] = tot_avg[i] / num_data[i]
#for general multi class problems
wx_mul = []
for i in range(0,num_cls):
wx_mul_at_class = []
for j in range(0,num_cls):
#wx_mul_at_class.append(tot_avg[i] * Wt[j])
print('Cal mlp wx : input class, weight class = ',i,j)
wx_mul_at_class.append( cal_class_wx_mlp(tot_avg, wt, wb, i, j) )
wx_mul.append(wx_mul_at_class)
wx_mul = np.asarray(wx_mul)
wx_abs = np.zeros(input_dim)
for n in range(0, input_dim):
for i in range(0,num_cls):
for j in range(0,num_cls):
if i != j:
wx_abs[n] += np.abs(wx_mul[i][i][n] - wx_mul[i][j][n])
selected_idx = np.argsort(wx_abs)[::-1][0:n_selection]
selected_weights = wx_abs[selected_idx]
#get evaluation acc from best model
loss, val_acc = model.evaluate(x_val, y_val)
K.clear_session()
return selected_idx, selected_weights, val_acc
| [
"numpy.sum",
"keras.optimizers.SGD",
"keras.callbacks.LearningRateScheduler",
"numpy.concatenate",
"keras.callbacks.ModelCheckpoint",
"sklearn.svm.SVC",
"numpy.asarray",
"numpy.abs",
"numpy.zeros",
"keras.models.Model",
"time.time",
"numpy.argsort",
"keras.layers.Dense",
"xgboost.XGBClassi... | [((519, 557), 'wx_hyperparam.WxHyperParameter', 'WxHyperParameter', ([], {'learning_ratio': '(0.001)'}), '(learning_ratio=0.001)\n', (535, 557), False, 'from wx_hyperparam import WxHyperParameter\n'), ((582, 603), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (597, 603), False, 'import functools\n'), ((1165, 1184), 'keras.layers.Input', 'Input', (['(input_dim,)'], {}), '((input_dim,))\n', (1170, 1184), False, 'from keras.layers import Input, Dense\n'), ((1302, 1336), 'keras.models.Model', 'Model', ([], {'input': 'inputs', 'output': 'fc_out'}), '(input=inputs, output=fc_out)\n', (1307, 1336), False, 'from keras.models import Model\n'), ((1393, 1521), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': 'hyper_param.learning_ratio', 'decay': 'hyper_param.weight_decay', 'momentum': 'hyper_param.momentum', 'nesterov': '(True)'}), '(lr=hyper_param.learning_ratio, decay=hyper_param.\n weight_decay, momentum=hyper_param.momentum, nesterov=True)\n', (1407, 1521), False, 'from keras import optimizers, applications, callbacks\n'), ((1843, 1962), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['best_model_path'], {'monitor': '"""val_loss"""', 'verbose': 'hyper_param.verbose', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(best_model_path, monitor='val_loss', verbose=hyper_param.\n verbose, save_best_only=True, mode='min')\n", (1858, 1962), False, 'from keras.callbacks import ModelCheckpoint\n'), ((2093, 2126), 'keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['step_decay'], {}), '(step_decay)\n', (2114, 2126), False, 'from keras.callbacks import LearningRateScheduler\n'), ((3109, 3131), 'numpy.sum', 'np.sum', (['dot_wt'], {'axis': '(1)'}), '(dot_wt, axis=1)\n', (3115, 3131), True, 'import numpy as np\n'), ((3330, 3347), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (3345, 3347), True, 'from keras import backend as K\n'), ((3537, 3556), 'keras.layers.Input', 'Input', (['(input_dim,)'], {}), '((input_dim,))\n', (3542, 3556), False, 'from keras.layers import Input, Dense\n'), ((3736, 3770), 'keras.models.Model', 'Model', ([], {'input': 'inputs', 'output': 'fc_out'}), '(input=inputs, output=fc_out)\n', (3741, 3770), False, 'from keras.models import Model\n'), ((3827, 3955), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': 'hyper_param.learning_ratio', 'decay': 'hyper_param.weight_decay', 'momentum': 'hyper_param.momentum', 'nesterov': '(True)'}), '(lr=hyper_param.learning_ratio, decay=hyper_param.\n weight_decay, momentum=hyper_param.momentum, nesterov=True)\n', (3841, 3955), False, 'from keras import optimizers, applications, callbacks\n'), ((4385, 4418), 'keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['step_decay'], {}), '(step_decay)\n', (4406, 4418), False, 'from keras.callbacks import LearningRateScheduler\n'), ((8094, 8112), 'numpy.asarray', 'np.asarray', (['wx_mul'], {}), '(wx_mul)\n', (8104, 8112), True, 'import numpy as np\n'), ((8127, 8148), 'numpy.zeros', 'np.zeros', (['Wt.shape[1]'], {}), '(Wt.shape[1])\n', (8135, 8148), True, 'import numpy as np\n'), ((8650, 8667), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (8665, 8667), True, 'from keras import backend as K\n'), ((10098, 10112), 'numpy.asarray', 'np.asarray', (['wx'], {}), '(wx)\n', (10108, 10112), True, 'import numpy as np\n'), ((10475, 10494), 'keras.layers.Input', 'Input', (['(input_dim,)'], {}), '((input_dim,))\n', (10480, 10494), False, 'from keras.layers import Input, Dense\n'), ((10729, 10763), 'keras.models.Model', 'Model', ([], {'input': 'inputs', 'output': 'fc_out'}), '(input=inputs, output=fc_out)\n', (10734, 10763), False, 'from keras.models import Model\n'), ((10798, 10926), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': 'hyper_param.learning_ratio', 'decay': 'hyper_param.weight_decay', 'momentum': 'hyper_param.momentum', 'nesterov': '(True)'}), '(lr=hyper_param.learning_ratio, decay=hyper_param.\n weight_decay, momentum=hyper_param.momentum, nesterov=True)\n', (10812, 10926), False, 'from keras import optimizers, applications, callbacks\n'), ((11261, 11380), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['best_model_path'], {'monitor': '"""val_loss"""', 'verbose': 'hyper_param.verbose', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(best_model_path, monitor='val_loss', verbose=hyper_param.\n verbose, save_best_only=True, mode='min')\n", (11276, 11380), False, 'from keras.callbacks import ModelCheckpoint\n'), ((11392, 11425), 'keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['step_decay'], {}), '(step_decay)\n', (11413, 11425), False, 'from keras.callbacks import LearningRateScheduler\n'), ((12893, 12911), 'numpy.asarray', 'np.asarray', (['wx_mul'], {}), '(wx_mul)\n', (12903, 12911), True, 'import numpy as np\n'), ((12926, 12945), 'numpy.zeros', 'np.zeros', (['input_dim'], {}), '(input_dim)\n', (12934, 12945), True, 'import numpy as np\n'), ((13356, 13373), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (13371, 13373), True, 'from keras import backend as K\n'), ((658, 669), 'time.time', 'time.time', ([], {}), '()\n', (667, 669), False, 'import time\n'), ((1198, 1222), 'keras.layers.Dense', 'Dense', (['hidden_layer_size'], {}), '(hidden_layer_size)\n', (1203, 1222), False, 'from keras.layers import Input, Dense\n'), ((1244, 1280), 'keras.layers.Dense', 'Dense', (['num_cls'], {'activation': '"""softmax"""'}), "(num_cls, activation='softmax')\n", (1249, 1280), False, 'from keras.layers import Input, Dense\n'), ((3678, 3714), 'keras.layers.Dense', 'Dense', (['num_cls'], {'activation': '"""softmax"""'}), "(num_cls, activation='softmax')\n", (3683, 3714), False, 'from keras.layers import Input, Dense\n'), ((4518, 4615), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['best_model_path'], {'monitor': '"""loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(best_model_path, monitor='loss', verbose=1, save_best_only=\n True, mode='min')\n", (4533, 4615), False, 'from keras.callbacks import ModelCheckpoint\n'), ((5014, 5114), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['best_model_path'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(best_model_path, monitor='val_loss', verbose=1,\n save_best_only=True, mode='min')\n", (5029, 5114), False, 'from keras.callbacks import ModelCheckpoint\n'), ((7512, 7531), 'numpy.zeros', 'np.zeros', (['input_dim'], {}), '(input_dim)\n', (7520, 7531), True, 'import numpy as np\n'), ((9059, 9073), 'numpy.sum', 'np.sum', (['cur_wx'], {}), '(cur_wx)\n', (9065, 9073), True, 'import numpy as np\n'), ((10510, 10533), 'keras.layers.Dense', 'Dense', ([], {'units': 'num_h_unit'}), '(units=num_h_unit)\n', (10515, 10533), False, 'from keras.layers import Input, Dense\n'), ((10615, 10709), 'keras.layers.Dense', 'Dense', (['num_cls'], {'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""', 'activation': '"""softmax"""'}), "(num_cls, kernel_initializer='zeros', bias_initializer='zeros',\n activation='softmax')\n", (10620, 10709), False, 'from keras.layers import Input, Dense\n'), ((12157, 12176), 'numpy.zeros', 'np.zeros', (['input_dim'], {}), '(input_dim)\n', (12165, 12176), True, 'import numpy as np\n'), ((728, 739), 'time.time', 'time.time', ([], {}), '()\n', (737, 739), False, 'import time\n'), ((3151, 3169), 'numpy.argsort', 'np.argsort', (['sum_wt'], {}), '(sum_wt)\n', (3161, 3169), True, 'import numpy as np\n'), ((4671, 4711), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_val)'], {'axis': '(0)'}), '((x_train, x_val), axis=0)\n', (4685, 4711), True, 'import numpy as np\n'), ((4734, 4774), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_val)'], {'axis': '(0)'}), '((y_train, y_val), axis=0)\n', (4748, 4774), True, 'import numpy as np\n'), ((5589, 5643), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {'seed': '(1)', 'objective': '"""binary:logistic"""'}), "(seed=1, objective='binary:logistic')\n", (5606, 5643), True, 'import xgboost as xgb\n'), ((5893, 5946), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {'seed': '(1)', 'objective': '"""multi:softprob"""'}), "(seed=1, objective='multi:softprob')\n", (5910, 5946), True, 'import xgboost as xgb\n'), ((6280, 6369), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""', 'probability': '(True)', 'C': '(1.0)', 'degree': '(3)', 'verbose': 'verbose', 'random_state': '(0)'}), "(kernel='rbf', probability=True, C=1.0, degree=3, verbose=verbose,\n random_state=0)\n", (6283, 6369), False, 'from sklearn.svm import SVC\n'), ((6583, 6672), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""', 'probability': '(True)', 'C': '(1.0)', 'degree': '(3)', 'verbose': 'verbose', 'random_state': '(0)'}), "(kernel='rbf', probability=True, C=1.0, degree=3, verbose=verbose,\n random_state=0)\n", (6586, 6672), False, 'from sklearn.svm import SVC\n'), ((8381, 8399), 'numpy.argsort', 'np.argsort', (['wx_abs'], {}), '(wx_abs)\n', (8391, 8399), True, 'import numpy as np\n'), ((9320, 9338), 'numpy.asarray', 'np.asarray', (['cur_wx'], {}), '(cur_wx)\n', (9330, 9338), True, 'import numpy as np\n'), ((9681, 9699), 'numpy.asarray', 'np.asarray', (['cur_wx'], {}), '(cur_wx)\n', (9691, 9699), True, 'import numpy as np\n'), ((13177, 13195), 'numpy.argsort', 'np.argsort', (['wx_abs'], {}), '(wx_abs)\n', (13187, 13195), True, 'import numpy as np\n'), ((9613, 9629), 'numpy.sum', 'np.sum', (['local_wx'], {}), '(local_wx)\n', (9619, 9629), True, 'import numpy as np\n'), ((8319, 8360), 'numpy.abs', 'np.abs', (['(wx_mul[i][i][n] - wx_mul[i][j][n])'], {}), '(wx_mul[i][i][n] - wx_mul[i][j][n])\n', (8325, 8360), True, 'import numpy as np\n'), ((13114, 13155), 'numpy.abs', 'np.abs', (['(wx_mul[i][i][n] - wx_mul[i][j][n])'], {}), '(wx_mul[i][i][n] - wx_mul[i][j][n])\n', (13120, 13155), True, 'import numpy as np\n')] |
"""
This code was copied from:
https://github.com/hmdolatabadi/LRS_NF/blob/master/nde/transforms/splines/rational_linear.py
"""
import torch
from torch.nn import functional as F
import torch.nn as nn
from nflows.utils import sum_except_batch, searchsorted
import numpy as np
from nflows.transforms import Transform
from src.models.layers.utils import InputOutsideDomain, share_across_batch
DEFAULT_MIN_BIN_WIDTH = 1e-3
DEFAULT_MIN_BIN_HEIGHT = 1e-3
DEFAULT_MIN_DERIVATIVE = 1e-3
class PiecewiseRationalLinearCDF(Transform):
def __init__(
self,
shape,
num_bins=10,
tails=None,
tail_bound=1.0,
identity_init=False,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE,
):
super().__init__()
self.min_bin_width = min_bin_width
self.min_bin_height = min_bin_height
self.min_derivative = min_derivative
self.tail_bound = tail_bound
self.tails = tails
if identity_init:
self.unnormalized_widths = nn.Parameter(torch.zeros(*shape, num_bins))
self.unnormalized_heights = nn.Parameter(torch.zeros(*shape, num_bins))
self.unnormalized_lambdas = nn.Parameter(torch.zeros(*shape, num_bins))
constant = np.log(np.exp(1 - min_derivative) - 1)
num_derivatives = (
(num_bins - 1) if self.tails == "linear" else (num_bins + 1)
)
self.unnormalized_derivatives = nn.Parameter(
constant * torch.ones(*shape, num_derivatives)
)
else:
self.unnormalized_widths = nn.Parameter(torch.rand(*shape, num_bins))
self.unnormalized_heights = nn.Parameter(torch.rand(*shape, num_bins))
self.unnormalized_lambdas = nn.Parameter(torch.rand(*shape, num_bins))
num_derivatives = (
(num_bins - 1) if self.tails == "linear" else (num_bins + 1)
)
self.unnormalized_derivatives = nn.Parameter(
torch.rand(*shape, num_derivatives)
)
def _spline(self, inputs, inverse=False):
batch_size = inputs.shape[0]
unnormalized_widths = share_across_batch(self.unnormalized_widths, batch_size)
unnormalized_heights = share_across_batch(self.unnormalized_heights, batch_size)
unnormalized_lambdas = share_across_batch(self.unnormalized_lambdas, batch_size)
unnormalized_derivatives = share_across_batch(
self.unnormalized_derivatives, batch_size
)
if self.tails is None:
spline_fn = rational_linear_spline
spline_kwargs = {}
else:
spline_fn = unconstrained_rational_linear_spline
spline_kwargs = {"tails": self.tails, "tail_bound": self.tail_bound}
outputs, logabsdet = spline_fn(
inputs=inputs,
unnormalized_widths=unnormalized_widths,
unnormalized_heights=unnormalized_heights,
unnormalized_derivatives=unnormalized_derivatives,
unnormalized_lambdas=unnormalized_lambdas,
inverse=inverse,
min_bin_width=self.min_bin_width,
min_bin_height=self.min_bin_height,
min_derivative=self.min_derivative,
**spline_kwargs
)
return outputs, sum_except_batch(logabsdet)
def forward(self, inputs, context=None):
return self._spline(inputs, inverse=False)
def inverse(self, inputs, context=None):
return self._spline(inputs, inverse=True)
def unconstrained_rational_linear_spline(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
unnormalized_lambdas,
inverse=False,
tails="linear",
tail_bound=1.0,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE,
):
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
outside_interval_mask = ~inside_interval_mask
outputs = torch.zeros_like(inputs)
logabsdet = torch.zeros_like(inputs)
if tails == "linear":
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
constant = np.log(np.exp(1 - min_derivative) - 1)
unnormalized_derivatives[..., 0] = constant
unnormalized_derivatives[..., -1] = constant
outputs[outside_interval_mask] = inputs[outside_interval_mask]
logabsdet[outside_interval_mask] = 0
else:
raise RuntimeError("{} tails are not implemented.".format(tails))
(
outputs[inside_interval_mask],
logabsdet[inside_interval_mask],
) = rational_linear_spline(
inputs=inputs[inside_interval_mask],
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
unnormalized_lambdas=unnormalized_lambdas[inside_interval_mask, :],
inverse=inverse,
left=-tail_bound,
right=tail_bound,
bottom=-tail_bound,
top=tail_bound,
min_bin_width=min_bin_width,
min_bin_height=min_bin_height,
min_derivative=min_derivative,
)
return outputs, logabsdet
def rational_linear_spline(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
unnormalized_lambdas,
inverse=False,
left=0.0,
right=1.0,
bottom=0.0,
top=1.0,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE,
):
if torch.min(inputs) < left or torch.max(inputs) > right:
raise InputOutsideDomain()
num_bins = unnormalized_widths.shape[-1]
if min_bin_width * num_bins > 1.0:
raise ValueError("Minimal bin width too large for the number of bins")
if min_bin_height * num_bins > 1.0:
raise ValueError("Minimal bin height too large for the number of bins")
widths = F.softmax(unnormalized_widths, dim=-1)
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
cumwidths = torch.cumsum(widths, dim=-1)
cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
cumwidths = (right - left) * cumwidths + left
cumwidths[..., 0] = left
cumwidths[..., -1] = right
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
heights = F.softmax(unnormalized_heights, dim=-1)
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
cumheights = torch.cumsum(heights, dim=-1)
cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
cumheights = (top - bottom) * cumheights + bottom
cumheights[..., 0] = bottom
cumheights[..., -1] = top
heights = cumheights[..., 1:] - cumheights[..., :-1]
if inverse:
bin_idx = searchsorted(cumheights, inputs)[..., None]
else:
bin_idx = searchsorted(cumwidths, inputs)[..., None]
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
delta = heights / widths
input_delta = delta.gather(-1, bin_idx)[..., 0]
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
input_heights = heights.gather(-1, bin_idx)[..., 0]
lambdas = 0.95 * torch.sigmoid(unnormalized_lambdas) + 0.025
lam = lambdas.gather(-1, bin_idx)[..., 0]
wa = 1
wb = torch.sqrt(input_derivatives / input_derivatives_plus_one) * wa
wc = (
lam * wa * input_derivatives + (1 - lam) * wb * input_derivatives_plus_one
) / input_delta
ya = input_cumheights
yb = input_heights + input_cumheights
yc = ((1 - lam) * wa * ya + lam * wb * yb) / ((1 - lam) * wa + lam * wb)
if inverse:
numerator = (lam * wa * (ya - inputs)) * (inputs <= yc).float() + (
(wc - lam * wb) * inputs + lam * wb * yb - wc * yc
) * (inputs > yc).float()
denominator = ((wc - wa) * inputs + wa * ya - wc * yc) * (
inputs <= yc
).float() + ((wc - wb) * inputs + wb * yb - wc * yc) * (inputs > yc).float()
theta = numerator / denominator
outputs = theta * input_bin_widths + input_cumwidths
derivative_numerator = (
wa * wc * lam * (yc - ya) * (inputs <= yc).float()
+ wb * wc * (1 - lam) * (yb - yc) * (inputs > yc).float()
) * input_bin_widths
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(abs(denominator))
return outputs, logabsdet
else:
theta = (inputs - input_cumwidths) / input_bin_widths
numerator = (wa * ya * (lam - theta) + wc * yc * theta) * (
theta <= lam
).float() + (wc * yc * (1 - theta) + wb * yb * (theta - lam)) * (
theta > lam
).float()
denominator = (wa * (lam - theta) + wc * theta) * (theta <= lam).float() + (
wc * (1 - theta) + wb * (theta - lam)
) * (theta > lam).float()
outputs = numerator / denominator
derivative_numerator = (
wa * wc * lam * (yc - ya) * (theta <= lam).float()
+ wb * wc * (1 - lam) * (yb - yc) * (theta > lam).float()
) / input_bin_widths
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(abs(denominator))
return outputs, logabsdet | [
"torch.ones",
"src.models.layers.utils.share_across_batch",
"torch.zeros_like",
"src.models.layers.utils.InputOutsideDomain",
"torch.sqrt",
"nflows.utils.sum_except_batch",
"torch.log",
"torch.nn.functional.softmax",
"torch.cumsum",
"torch.sigmoid",
"torch.max",
"nflows.utils.searchsorted",
... | [((4130, 4154), 'torch.zeros_like', 'torch.zeros_like', (['inputs'], {}), '(inputs)\n', (4146, 4154), False, 'import torch\n'), ((4171, 4195), 'torch.zeros_like', 'torch.zeros_like', (['inputs'], {}), '(inputs)\n', (4187, 4195), False, 'import torch\n'), ((6174, 6212), 'torch.nn.functional.softmax', 'F.softmax', (['unnormalized_widths'], {'dim': '(-1)'}), '(unnormalized_widths, dim=-1)\n', (6183, 6212), True, 'from torch.nn import functional as F\n'), ((6299, 6327), 'torch.cumsum', 'torch.cumsum', (['widths'], {'dim': '(-1)'}), '(widths, dim=-1)\n', (6311, 6327), False, 'import torch\n'), ((6344, 6400), 'torch.nn.functional.pad', 'F.pad', (['cumwidths'], {'pad': '(1, 0)', 'mode': '"""constant"""', 'value': '(0.0)'}), "(cumwidths, pad=(1, 0), mode='constant', value=0.0)\n", (6349, 6400), True, 'from torch.nn import functional as F\n'), ((6654, 6693), 'torch.nn.functional.softmax', 'F.softmax', (['unnormalized_heights'], {'dim': '(-1)'}), '(unnormalized_heights, dim=-1)\n', (6663, 6693), True, 'from torch.nn import functional as F\n'), ((6784, 6813), 'torch.cumsum', 'torch.cumsum', (['heights'], {'dim': '(-1)'}), '(heights, dim=-1)\n', (6796, 6813), False, 'import torch\n'), ((6831, 6888), 'torch.nn.functional.pad', 'F.pad', (['cumheights'], {'pad': '(1, 0)', 'mode': '"""constant"""', 'value': '(0.0)'}), "(cumheights, pad=(1, 0), mode='constant', value=0.0)\n", (6836, 6888), True, 'from torch.nn import functional as F\n'), ((2267, 2323), 'src.models.layers.utils.share_across_batch', 'share_across_batch', (['self.unnormalized_widths', 'batch_size'], {}), '(self.unnormalized_widths, batch_size)\n', (2285, 2323), False, 'from src.models.layers.utils import InputOutsideDomain, share_across_batch\n'), ((2355, 2412), 'src.models.layers.utils.share_across_batch', 'share_across_batch', (['self.unnormalized_heights', 'batch_size'], {}), '(self.unnormalized_heights, batch_size)\n', (2373, 2412), False, 'from src.models.layers.utils import InputOutsideDomain, share_across_batch\n'), ((2444, 2501), 'src.models.layers.utils.share_across_batch', 'share_across_batch', (['self.unnormalized_lambdas', 'batch_size'], {}), '(self.unnormalized_lambdas, batch_size)\n', (2462, 2501), False, 'from src.models.layers.utils import InputOutsideDomain, share_across_batch\n'), ((2537, 2598), 'src.models.layers.utils.share_across_batch', 'share_across_batch', (['self.unnormalized_derivatives', 'batch_size'], {}), '(self.unnormalized_derivatives, batch_size)\n', (2555, 2598), False, 'from src.models.layers.utils import InputOutsideDomain, share_across_batch\n'), ((4258, 4301), 'torch.nn.functional.pad', 'F.pad', (['unnormalized_derivatives'], {'pad': '(1, 1)'}), '(unnormalized_derivatives, pad=(1, 1))\n', (4263, 4301), True, 'from torch.nn import functional as F\n'), ((5854, 5874), 'src.models.layers.utils.InputOutsideDomain', 'InputOutsideDomain', ([], {}), '()\n', (5872, 5874), False, 'from src.models.layers.utils import InputOutsideDomain, share_across_batch\n'), ((6602, 6638), 'torch.nn.functional.softplus', 'F.softplus', (['unnormalized_derivatives'], {}), '(unnormalized_derivatives)\n', (6612, 6638), True, 'from torch.nn import functional as F\n'), ((7812, 7870), 'torch.sqrt', 'torch.sqrt', (['(input_derivatives / input_derivatives_plus_one)'], {}), '(input_derivatives / input_derivatives_plus_one)\n', (7822, 7870), False, 'import torch\n'), ((3415, 3442), 'nflows.utils.sum_except_batch', 'sum_except_batch', (['logabsdet'], {}), '(logabsdet)\n', (3431, 3442), False, 'from nflows.utils import sum_except_batch, searchsorted\n'), ((5785, 5802), 'torch.min', 'torch.min', (['inputs'], {}), '(inputs)\n', (5794, 5802), False, 'import torch\n'), ((5813, 5830), 'torch.max', 'torch.max', (['inputs'], {}), '(inputs)\n', (5822, 5830), False, 'import torch\n'), ((7097, 7129), 'nflows.utils.searchsorted', 'searchsorted', (['cumheights', 'inputs'], {}), '(cumheights, inputs)\n', (7109, 7129), False, 'from nflows.utils import sum_except_batch, searchsorted\n'), ((7169, 7200), 'nflows.utils.searchsorted', 'searchsorted', (['cumwidths', 'inputs'], {}), '(cumwidths, inputs)\n', (7181, 7200), False, 'from nflows.utils import sum_except_batch, searchsorted\n'), ((7701, 7736), 'torch.sigmoid', 'torch.sigmoid', (['unnormalized_lambdas'], {}), '(unnormalized_lambdas)\n', (7714, 7736), False, 'import torch\n'), ((8824, 8855), 'torch.log', 'torch.log', (['derivative_numerator'], {}), '(derivative_numerator)\n', (8833, 8855), False, 'import torch\n'), ((9638, 9669), 'torch.log', 'torch.log', (['derivative_numerator'], {}), '(derivative_numerator)\n', (9647, 9669), False, 'import torch\n'), ((1121, 1150), 'torch.zeros', 'torch.zeros', (['*shape', 'num_bins'], {}), '(*shape, num_bins)\n', (1132, 1150), False, 'import torch\n'), ((1205, 1234), 'torch.zeros', 'torch.zeros', (['*shape', 'num_bins'], {}), '(*shape, num_bins)\n', (1216, 1234), False, 'import torch\n'), ((1289, 1318), 'torch.zeros', 'torch.zeros', (['*shape', 'num_bins'], {}), '(*shape, num_bins)\n', (1300, 1318), False, 'import torch\n'), ((1708, 1736), 'torch.rand', 'torch.rand', (['*shape', 'num_bins'], {}), '(*shape, num_bins)\n', (1718, 1736), False, 'import torch\n'), ((1791, 1819), 'torch.rand', 'torch.rand', (['*shape', 'num_bins'], {}), '(*shape, num_bins)\n', (1801, 1819), False, 'import torch\n'), ((1874, 1902), 'torch.rand', 'torch.rand', (['*shape', 'num_bins'], {}), '(*shape, num_bins)\n', (1884, 1902), False, 'import torch\n'), ((2102, 2137), 'torch.rand', 'torch.rand', (['*shape', 'num_derivatives'], {}), '(*shape, num_derivatives)\n', (2112, 2137), False, 'import torch\n'), ((4328, 4354), 'numpy.exp', 'np.exp', (['(1 - min_derivative)'], {}), '(1 - min_derivative)\n', (4334, 4354), True, 'import numpy as np\n'), ((1351, 1377), 'numpy.exp', 'np.exp', (['(1 - min_derivative)'], {}), '(1 - min_derivative)\n', (1357, 1377), True, 'import numpy as np\n'), ((1591, 1626), 'torch.ones', 'torch.ones', (['*shape', 'num_derivatives'], {}), '(*shape, num_derivatives)\n', (1601, 1626), False, 'import torch\n')] |
#!/usr/bin/env python
"""
Generate a grid of size L*L and give random binary values
"""
import numpy as np
from sys import argv
def gen_binary_rand():
"""return 1 with probability p, return 0 otherwise"""
probability = 0.6
rand = np.random.uniform()
if rand < probability:
return 1
else:
return 0
def randomize_sys(grid):
"""
Take a grid and randomize it with probability p
"""
# Loop over the block and randomize it
for i in range(grid.shape[0]):
for j in range(grid.shape[1]):
# Random block assignment
grid[i, j] = gen_binary_rand()
def gen_rand_sys(size):
"""
generate a grid with input size
"""
# Initialize grid with size
grid = np.zeros(shape=(size, size), dtype=int, order='F')
# Randomize grid
randomize_sys(grid)
# return the generated table
return grid
def main():
""" Main body """
# validate the input
if len(argv) != 2:
print("usage: python gen_rand_sys.py <size of grid>")
return 1
else:
# get size from input
size = int(argv[1])
grid = gen_rand_sys(size)
print(grid)
if __name__ == "__main__":
main()
| [
"numpy.random.uniform",
"numpy.zeros"
] | [((246, 265), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (263, 265), True, 'import numpy as np\n'), ((751, 801), 'numpy.zeros', 'np.zeros', ([], {'shape': '(size, size)', 'dtype': 'int', 'order': '"""F"""'}), "(shape=(size, size), dtype=int, order='F')\n", (759, 801), True, 'import numpy as np\n')] |
from med2image import med2image
from keras.models import load_model
from keras.preprocessing import image
import cv2
import numpy as np
import os
import tablib
from flask import Flask, render_template, request, flash, redirect, request, jsonify, session,g, render_template
import tensorflow as tf
from werkzeug import secure_filename
from sklearn.externals import joblib
import pandas as pd
from flask_mysqldb import MySQL
from nipype.interfaces import fsl
import time
from flask_mail import Mail, Message
from keras import backend as K
from flask_session import Session
from nilearn.image.image import mean_img
from nilearn.plotting import plot_epi
import nibabel
from matplotlib import pyplot as plt
nii_file = "" #fmri data file
csv_file = "" #eye movement data file
fmri_status = 0 #fmri data prediction status
preproc_status = 0 #fmri preprocessing status
fex_status = 0 #fmri feature extraction status
em_status = 0 #eye movement data prediction status
def getfMRIModel():
fmri_model = load_model('fmri_model.h5')
fmri_model.compile(loss='binary_crossentropy',optimizer='rmsprop', metrics=['accuracy'])
return fmri_model
def getEyeMovementModel():
em_model = joblib.load('eye_movement_ensembled.pkl')
return em_model
def getEyeMovementPrediction():
global csv_file
em_model= getEyeMovementModel()
predict_data = pd.read_csv(csv_file, index_col=[0])
# Onehot encoding for gender (binary) column
predict_data['Gender'] = pd.get_dummies(predict_data['Gender'], prefix='Gender')
predictions = em_model.predict(predict_data)
counts = np.bincount(predictions)
bclass=np.argmax(counts) # Get the highest probable class
probValue=0
if(bclass==1):
probValue=counts[1]/sum(counts)
else:
probValue=counts[0]/sum(counts)
if probValue ==1:
probValue=0.11
K.clear_session()
return probValue
def preporcessFMRI():
global nii_file, preproc_status
# skull stripping
btr = fsl.BET()
btr.inputs.in_file = nii_file
btr.inputs.frac = 0.7
btr.inputs.out_file = nii_file
btr.cmdline
res = btr.run()
preproc_status = 20
# segmentation and bias correction
fastr = fsl.FAST()
fastr.inputs.in_files = nii_file
fastr.cmdline
out = fastr.run()
preproc_status = 40
# coregistration
flt = fsl.FLIRT(bins=640, cost_func='mutualinfo')
flt.inputs.in_file = nii_file
flt.inputs.reference = nii_file
flt.inputs.output_type = "NIFTI_GZ"
preproc_status = 45
flt.cmdline
preproc_status = 50
res = flt.run()
preproc_status = 60
# motion correction
mcflt = fsl.MCFLIRT()
mcflt.inputs.in_file = nii_file
mcflt.inputs.cost = 'mutualinfo'
mcflt.inputs.out_file = nii_file
mcflt.cmdline
res = mcflt.run()
preproc_status = 80
# smoothing
sus = fsl.SUSAN()
sus.inputs.in_file = nii_file
sus.inputs.out_file = nii_file
sus.inputs.brightness_threshold = 2000.0
sus.inputs.fwhm = 8.0
result = sus.run()
preproc_status = 100
def resetValues():
global fmri_status, preproc_status, fex_status, em_status
if(fmri_status == 100 and preproc_status == 100 and fex_status == 100):
fmri_status = 0
preproc_status = 0
fex_status = 0
if (em_status == 100):
em_status = 0
def getFMRIPrediction():
global nii_file, fmri_status, fex_status
preporcessFMRI()
time.sleep(2)
fex_status = 50
c_convert = med2image.med2image_nii(inputFile=nii_file, outputDir="temp9", outputFileStem="image",
outputFileType="png", sliceToConvert='-1', frameToConvert='0', showSlices=False, reslice=False)
time.sleep(1)
fex_status = 100
time.sleep(2)
med2image.misc.tic()
c_convert.run()
fmri_status = 40
fmri_model = getfMRIModel()
images = []
for img in os.listdir('/home/adhd/adhd_cnn/dataFolder/temp9/'):
img = cv2.imread('temp9/'+img)
# img=img.astype('float')/255.0
img = cv2.resize(img, (73, 61))
img = np.reshape(img, [1, 73, 61, 3])
images.append(img)
images = np.vstack(images)
clas = fmri_model.predict_classes(images, batch_size=10)
fmri_status = 60
print('Possibility of ADHD: ', (clas == 0).sum()/len(clas))
print('Possibility of non-ADHD: ', (clas == 1).sum()/len(clas))
adhd = (clas == 0).sum()/len(clas)
nadhd = (clas == 1).sum()/len(clas)
K.clear_session() #To avoid reinstantiation of Tensorflow graph
return adhd
def storeData(fname, lname, email, age, diag, score, data_type, user, symptoms, chronicDisease):
cur = mysql.connection.cursor()
cur.execute("INSERT INTO Diagnosis (Patient_first_name,Patient_last_name,Email,Age,Diagnosis,Composite_Score,Data_Type,User,Symptoms,Test_date,Test_time,ChronicDisease) VALUES (%s, %s,%s,%s,%s,%s,%s,%s,%s,CURDATE(),CURTIME(),%s)",
(fname, lname, email, age, diag, score, data_type, user, symptoms,chronicDisease))
mysql.connection.commit()
cur.close()
app = Flask(__name__)
app.secret_key = "secret key"
app.config['UPLOAD_FOLDER'] = '/home/adhd/adhd_cnn/dataFolder/uploads'
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = 'password'
app.config['MYSQL_DB'] = 'ADHD'
app.config.update(dict(
DEBUG=True,
MAIL_SERVER='smtp.gmail.com',
MAIL_PORT=587,
MAIL_USE_TLS=True,
MAIL_USE_SSL=False,
MAIL_USERNAME='<EMAIL>',
MAIL_PASSWORD='<PASSWORD>',
))
SESSION_TYPE = 'filesystem'
app.config.from_object(__name__)
Session(app)
mysql = MySQL(app)
mail = Mail(app)
@app.route('/')
def render_homepage():
resetValues()
return render_template('home.html')
@app.route('/fmri_predict')
def render_fmripage():
resetValues()
return render_template('health_info.html')
@app.route('/em_predict')
def render_empage():
resetValues()
return render_template('health_info_em.html')
@app.route('/report')
def render_reportpage():
return render_template('report.html')
@app.route("/predict", methods=['GET', 'POST'])
def predict():
resetValues()
data = {'success': False}
params = request.json
if(params == None):
params = request.args
print(params)
adhd = getPrediction()
nadhd = 1-adhd
if(params != None):
data['adhd'] = str(adhd)
data['nadhd'] = str(nadhd)
data['success'] = True
return jsonify(data)
@app.route("/status", methods=['GET', 'POST'])
def get_status():
global fmri_status, preproc_status, fex_status
status_data = {'data': fmri_status,
'preproc': preproc_status, 'fex': fex_status}
return jsonify(status_data)
@app.route("/em_status", methods=['GET', 'POST'])
def get_em_status():
global em_status
if 'em_status' not in session:
session['em_status']=0
print("em_status", session['em_status'])
status_data = {'data': em_status}
return jsonify(status_data)
@app.route("/fmri_preview", methods=['GET', 'POST'])
def get_fmri_preview():
data={"image":""}
x_size = 64
y_size = 64
n_slice = 64
n_volumes = 96
if request.method == 'POST':
f= request.files['file']
nii_file = os.path.join(
app.config['UPLOAD_FOLDER'], secure_filename(f.filename))
f.save(nii_file)
mean_haxby = mean_img(nii_file)
plot_epi(mean_haxby,output_file="static/img/viz.png")
data = {'image': "static/img/viz.png"}
return jsonify(data)
@app.route("/em_preview" , methods=['GET', 'POST'])
def get_em_preview():
global dataset
if request.method == 'POST':
dataset = tablib.Dataset()
f = request.files['file']
em_file = os.path.join(
app.config['UPLOAD_FOLDER'], secure_filename(f.filename))
df=pd.read_csv(em_file)
dataset=df.head(10)
data = dataset.to_html()
print(data)
dt={'table':data}
#return dataset.html
return jsonify(dt)
@app.route("/fmri_uploader", methods=['GET', 'POST'])
def upload_fmri_file():
global nii_file, fmri_status, preproc_status
resetValues()
preproc_status = 5
if request.method == 'POST':
print(request.form)
f = request.files['file']
nii_file = os.path.join(
app.config['UPLOAD_FOLDER'], secure_filename(f.filename))
f.save(nii_file)
flash('file uploaded suceessfully')
preproc_status = 10
data = {'success': False}
params = request.json
if(params == None):
params = request.args
adhd = getFMRIPrediction()
nadhd = 1-adhd
fmri_status = 85
if(params != None):
data['adhd'] = str(adhd)
data['nadhd'] = str(nadhd)
data['success'] = True
if (adhd > nadhd):
diag = 'ADHD'
score = adhd
else:
diag = 'Non-ADHD'
score = nadhd
fmri_status = 100
print(score)
r = request.form
if session.get('email'):
user = session['email']
else:
user = "Guest"
storeData(r['fname'], r['lname'], r['email'], int(
r['age']), diag, score, 'fmri', user, r['symptoms'],r['chronic'])
time.sleep(1)
# return jsonify(data)
return redirect('/report')
@app.route("/send_mail", methods=['GET', 'POST'])
def index():
r = request.form
fname = r['fname']
lname = r['lname']
to = r['to']
subject = r['subject']
body = r['body']
sender = r['from']
msg = Message(subject, sender=sender, recipients=[to])
msg.body = body
mail.send(msg)
rst = {'result': True}
return jsonify(rst)
@app.route("/get_data", methods=['GET', 'POST'])
def getData():
r = request.form
fname = r['fname']
lname = r['lname']
cur = mysql.connection.cursor()
cur.execute(
"SELECT * FROM Diagnosis where Patient_first_name = %s && Patient_last_name =%s ORDER BY Patient_id DESC LIMIT 1", (fname, lname))
row = cur.fetchone()
data = {}
if len(row) > 0:
data['Patient_id'] = row[0]
data['Patient_first_name'] = row[1]
data['Patient_last_name'] = row[2]
data['Email'] = row[3]
data['Age'] = str(row[4])
data['Diagnosis'] = row[5]
data['Composite_Score'] = str(row[6])
data['Symptoms'] = row[9]
data['ChronicDisease'] = row[12]
print(data)
cur.close()
return jsonify(data)
@app.route("/get_patient_data", methods=['GET', 'POST'])
def get_patient_data():
cid = request.args.get('uid')
cur = mysql.connection.cursor()
if cid != None:
cur.execute("SELECT * FROM Diagnosis where Patient_id = %s", [cid])
row = cur.fetchone()
data = {}
if row != None:
if len(row) > 0:
data['Patient_first_name'] = row[1]
data['Patient_last_name'] = row[2]
data['Email'] = row[3]
data['Age'] = str(row[4])
data['Symptoms'] = row[9]
data['Chronic'] = row[12]
print(data)
cur.close()
return jsonify(data)
else:
return "Invalid pateint ID"
else:
return "Error"
else:
return "Error"
@app.route("/em_uploader", methods=['GET', 'POST'])
def upload_em_file():
global csv_file, em_status
if request.method == 'POST':
f = request.files['file']
csv_file = os.path.join(
app.config['UPLOAD_FOLDER'], secure_filename(f.filename))
f.save(csv_file)
flash('file uploaded suceessfully')
session['em_status'] = 10
em_status = 10
data = {'success': False}
params = request.json
if(params == None):
params = request.args
print(params)
time.sleep(1)
session['em_status'] = 40
em_status = 40
adhd = getEyeMovementPrediction()
nadhd = 1-adhd
session['em_status'] = 60
em_status = 60
if(params != None):
data['adhd'] = str(adhd)
data['nadhd'] = str(nadhd)
data['success'] = True
if (adhd > nadhd):
diag = 'ADHD'
score = adhd
else:
diag = 'Non-ADHD'
score = nadhd
print(score)
time.sleep(1)
session['em_status'] = 80
em_status = 80
r = request.form
if session.get('email'):
user = session['email']
else:
user = "Guest"
storeData(r['fname'], r['lname'], r['email'], int(r['age']), diag, score, 'EM', user, r['symptoms'],r['chronic'])
session['em_status'] = 100
em_status = 100
time.sleep(1)
return redirect('/report')
@app.route('/register', methods=["GET", "POST"])
def register():
if request.method == 'GET':
if session.get('name') is not None:
if session['name'] != '' and session['email'] != '':
return redirect('/account')
else:
return render_template("login.html")
else:
fname = request.form['fname']
lname = request.form['lname']
email = request.form['email']
password = request.form['password']
cur = mysql.connection.cursor()
cur.execute("INSERT INTO User (first_name,last_name, Email, psw) VALUES (%s,%s,%s,%s)",
(fname, lname, email, password,))
mysql.connection.commit()
session['name'] = request.form['fname']+request.form['lname']
session['email'] = request.form['email']
return render_template("register_success.html")
@app.route('/login', methods=["GET", "POST"])
def login():
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
curl = mysql.connection.cursor()
curl.execute("SELECT * FROM User WHERE Email=%s", (email,))
user = curl.fetchone()
curl.close()
if len(user) > 0:
if password == user[3]:
session['name'] = user[1]+user[2]
session['email'] = user[0]
return redirect("/account")
else:
return render_template('login.html',message="Error password and email not match")
else:
return render_template('login.html',message="Error user not found")
else:
if session['name'] != '' and session['email'] != '':
return redirect('/account')
else:
return render_template("login.html")
@app.route('/logout', methods=["GET", "POST"])
def logout():
session.clear()
return redirect("/")
@app.route('/account', methods=["GET", "POST"])
def account():
if session['name'] != '' and session['email'] != '':
print('Session started ....')
email = session['email']
curl = mysql.connection.cursor()
curl.execute(
"SELECT * FROM Diagnosis inner join User ON Diagnosis.User = User.Email WHERE Diagnosis.User=%s", (email,))
data = curl.fetchall()
curl.close()
return render_template('account.html', data=data,len=len(data))
else:
return render_template("login.html")
app.run(host='0.0.0.0', debug=False)
| [
"keras.models.load_model",
"med2image.med2image.med2image_nii",
"flask.flash",
"numpy.argmax",
"pandas.read_csv",
"flask_mail.Mail",
"flask.jsonify",
"med2image.med2image.misc.tic",
"nipype.interfaces.fsl.FAST",
"tablib.Dataset",
"nilearn.image.image.mean_img",
"flask_mysqldb.MySQL",
"flask.... | [((5106, 5121), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (5111, 5121), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((5638, 5650), 'flask_session.Session', 'Session', (['app'], {}), '(app)\n', (5645, 5650), False, 'from flask_session import Session\n'), ((5660, 5670), 'flask_mysqldb.MySQL', 'MySQL', (['app'], {}), '(app)\n', (5665, 5670), False, 'from flask_mysqldb import MySQL\n'), ((5678, 5687), 'flask_mail.Mail', 'Mail', (['app'], {}), '(app)\n', (5682, 5687), False, 'from flask_mail import Mail, Message\n'), ((1004, 1031), 'keras.models.load_model', 'load_model', (['"""fmri_model.h5"""'], {}), "('fmri_model.h5')\n", (1014, 1031), False, 'from keras.models import load_model\n'), ((1190, 1231), 'sklearn.externals.joblib.load', 'joblib.load', (['"""eye_movement_ensembled.pkl"""'], {}), "('eye_movement_ensembled.pkl')\n", (1201, 1231), False, 'from sklearn.externals import joblib\n'), ((1366, 1402), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {'index_col': '[0]'}), '(csv_file, index_col=[0])\n', (1377, 1402), True, 'import pandas as pd\n'), ((1482, 1537), 'pandas.get_dummies', 'pd.get_dummies', (["predict_data['Gender']"], {'prefix': '"""Gender"""'}), "(predict_data['Gender'], prefix='Gender')\n", (1496, 1537), True, 'import pandas as pd\n'), ((1601, 1625), 'numpy.bincount', 'np.bincount', (['predictions'], {}), '(predictions)\n', (1612, 1625), True, 'import numpy as np\n'), ((1637, 1654), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (1646, 1654), True, 'import numpy as np\n'), ((1871, 1888), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (1886, 1888), True, 'from keras import backend as K\n'), ((2002, 2011), 'nipype.interfaces.fsl.BET', 'fsl.BET', ([], {}), '()\n', (2009, 2011), False, 'from nipype.interfaces import fsl\n'), ((2219, 2229), 'nipype.interfaces.fsl.FAST', 'fsl.FAST', ([], {}), '()\n', (2227, 2229), False, 'from nipype.interfaces import fsl\n'), ((2363, 2406), 'nipype.interfaces.fsl.FLIRT', 'fsl.FLIRT', ([], {'bins': '(640)', 'cost_func': '"""mutualinfo"""'}), "(bins=640, cost_func='mutualinfo')\n", (2372, 2406), False, 'from nipype.interfaces import fsl\n'), ((2662, 2675), 'nipype.interfaces.fsl.MCFLIRT', 'fsl.MCFLIRT', ([], {}), '()\n', (2673, 2675), False, 'from nipype.interfaces import fsl\n'), ((2877, 2888), 'nipype.interfaces.fsl.SUSAN', 'fsl.SUSAN', ([], {}), '()\n', (2886, 2888), False, 'from nipype.interfaces import fsl\n'), ((3458, 3471), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3468, 3471), False, 'import time\n'), ((3509, 3699), 'med2image.med2image.med2image_nii', 'med2image.med2image_nii', ([], {'inputFile': 'nii_file', 'outputDir': '"""temp9"""', 'outputFileStem': '"""image"""', 'outputFileType': '"""png"""', 'sliceToConvert': '"""-1"""', 'frameToConvert': '"""0"""', 'showSlices': '(False)', 'reslice': '(False)'}), "(inputFile=nii_file, outputDir='temp9',\n outputFileStem='image', outputFileType='png', sliceToConvert='-1',\n frameToConvert='0', showSlices=False, reslice=False)\n", (3532, 3699), False, 'from med2image import med2image\n'), ((3736, 3749), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3746, 3749), False, 'import time\n'), ((3775, 3788), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3785, 3788), False, 'import time\n'), ((3794, 3814), 'med2image.med2image.misc.tic', 'med2image.misc.tic', ([], {}), '()\n', (3812, 3814), False, 'from med2image import med2image\n'), ((3926, 3977), 'os.listdir', 'os.listdir', (['"""/home/adhd/adhd_cnn/dataFolder/temp9/"""'], {}), "('/home/adhd/adhd_cnn/dataFolder/temp9/')\n", (3936, 3977), False, 'import os\n'), ((4185, 4202), 'numpy.vstack', 'np.vstack', (['images'], {}), '(images)\n', (4194, 4202), True, 'import numpy as np\n'), ((4503, 4520), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (4518, 4520), True, 'from keras import backend as K\n'), ((5757, 5785), 'flask.render_template', 'render_template', (['"""home.html"""'], {}), "('home.html')\n", (5772, 5785), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((5867, 5902), 'flask.render_template', 'render_template', (['"""health_info.html"""'], {}), "('health_info.html')\n", (5882, 5902), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((5980, 6018), 'flask.render_template', 'render_template', (['"""health_info_em.html"""'], {}), "('health_info_em.html')\n", (5995, 6018), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((6078, 6108), 'flask.render_template', 'render_template', (['"""report.html"""'], {}), "('report.html')\n", (6093, 6108), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((6501, 6514), 'flask.jsonify', 'jsonify', (['data'], {}), '(data)\n', (6508, 6514), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((6748, 6768), 'flask.jsonify', 'jsonify', (['status_data'], {}), '(status_data)\n', (6755, 6768), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((7022, 7042), 'flask.jsonify', 'jsonify', (['status_data'], {}), '(status_data)\n', (7029, 7042), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((7569, 7582), 'flask.jsonify', 'jsonify', (['data'], {}), '(data)\n', (7576, 7582), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((8053, 8064), 'flask.jsonify', 'jsonify', (['dt'], {}), '(dt)\n', (8060, 8064), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((9671, 9719), 'flask_mail.Message', 'Message', (['subject'], {'sender': 'sender', 'recipients': '[to]'}), '(subject, sender=sender, recipients=[to])\n', (9678, 9719), False, 'from flask_mail import Mail, Message\n'), ((9798, 9810), 'flask.jsonify', 'jsonify', (['rst'], {}), '(rst)\n', (9805, 9810), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((10583, 10596), 'flask.jsonify', 'jsonify', (['data'], {}), '(data)\n', (10590, 10596), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((10689, 10712), 'flask.request.args.get', 'request.args.get', (['"""uid"""'], {}), "('uid')\n", (10705, 10712), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((14854, 14869), 'flask.session.clear', 'session.clear', ([], {}), '()\n', (14867, 14869), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((14881, 14894), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (14889, 14894), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((3993, 4019), 'cv2.imread', 'cv2.imread', (["('temp9/' + img)"], {}), "('temp9/' + img)\n", (4003, 4019), False, 'import cv2\n'), ((4072, 4097), 'cv2.resize', 'cv2.resize', (['img', '(73, 61)'], {}), '(img, (73, 61))\n', (4082, 4097), False, 'import cv2\n'), ((4112, 4143), 'numpy.reshape', 'np.reshape', (['img', '[1, 73, 61, 3]'], {}), '(img, [1, 73, 61, 3])\n', (4122, 4143), True, 'import numpy as np\n'), ((7428, 7446), 'nilearn.image.image.mean_img', 'mean_img', (['nii_file'], {}), '(nii_file)\n', (7436, 7446), False, 'from nilearn.image.image import mean_img\n'), ((7456, 7510), 'nilearn.plotting.plot_epi', 'plot_epi', (['mean_haxby'], {'output_file': '"""static/img/viz.png"""'}), "(mean_haxby, output_file='static/img/viz.png')\n", (7464, 7510), False, 'from nilearn.plotting import plot_epi\n'), ((7732, 7748), 'tablib.Dataset', 'tablib.Dataset', ([], {}), '()\n', (7746, 7748), False, 'import tablib\n'), ((7896, 7916), 'pandas.read_csv', 'pd.read_csv', (['em_file'], {}), '(em_file)\n', (7907, 7916), True, 'import pandas as pd\n'), ((8469, 8504), 'flask.flash', 'flash', (['"""file uploaded suceessfully"""'], {}), "('file uploaded suceessfully')\n", (8474, 8504), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((9115, 9135), 'flask.session.get', 'session.get', (['"""email"""'], {}), "('email')\n", (9126, 9135), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((9360, 9373), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (9370, 9373), False, 'import time\n'), ((9421, 9440), 'flask.redirect', 'redirect', (['"""/report"""'], {}), "('/report')\n", (9429, 9440), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((11751, 11786), 'flask.flash', 'flash', (['"""file uploaded suceessfully"""'], {}), "('file uploaded suceessfully')\n", (11756, 11786), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((12004, 12017), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (12014, 12017), False, 'import time\n'), ((12516, 12529), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (12526, 12529), False, 'import time\n'), ((12625, 12645), 'flask.session.get', 'session.get', (['"""email"""'], {}), "('email')\n", (12636, 12645), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((12931, 12944), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (12941, 12944), False, 'import time\n'), ((12961, 12980), 'flask.redirect', 'redirect', (['"""/report"""'], {}), "('/report')\n", (12969, 12980), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((13826, 13866), 'flask.render_template', 'render_template', (['"""register_success.html"""'], {}), "('register_success.html')\n", (13841, 13866), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((15419, 15448), 'flask.render_template', 'render_template', (['"""login.html"""'], {}), "('login.html')\n", (15434, 15448), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((7353, 7380), 'werkzeug.secure_filename', 'secure_filename', (['f.filename'], {}), '(f.filename)\n', (7368, 7380), False, 'from werkzeug import secure_filename\n'), ((7856, 7883), 'werkzeug.secure_filename', 'secure_filename', (['f.filename'], {}), '(f.filename)\n', (7871, 7883), False, 'from werkzeug import secure_filename\n'), ((8407, 8434), 'werkzeug.secure_filename', 'secure_filename', (['f.filename'], {}), '(f.filename)\n', (8422, 8434), False, 'from werkzeug import secure_filename\n'), ((11689, 11716), 'werkzeug.secure_filename', 'secure_filename', (['f.filename'], {}), '(f.filename)\n', (11704, 11716), False, 'from werkzeug import secure_filename\n'), ((13094, 13113), 'flask.session.get', 'session.get', (['"""name"""'], {}), "('name')\n", (13105, 13113), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((13269, 13298), 'flask.render_template', 'render_template', (['"""login.html"""'], {}), "('login.html')\n", (13284, 13298), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((14553, 14614), 'flask.render_template', 'render_template', (['"""login.html"""'], {'message': '"""Error user not found"""'}), "('login.html', message='Error user not found')\n", (14568, 14614), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((14704, 14724), 'flask.redirect', 'redirect', (['"""/account"""'], {}), "('/account')\n", (14712, 14724), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((14758, 14787), 'flask.render_template', 'render_template', (['"""login.html"""'], {}), "('login.html')\n", (14773, 14787), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((11292, 11305), 'flask.jsonify', 'jsonify', (['data'], {}), '(data)\n', (11299, 11305), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((13215, 13235), 'flask.redirect', 'redirect', (['"""/account"""'], {}), "('/account')\n", (13223, 13235), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((14383, 14403), 'flask.redirect', 'redirect', (['"""/account"""'], {}), "('/account')\n", (14391, 14403), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n'), ((14445, 14520), 'flask.render_template', 'render_template', (['"""login.html"""'], {'message': '"""Error password and email not match"""'}), "('login.html', message='Error password and email not match')\n", (14460, 14520), False, 'from flask import Flask, render_template, request, flash, redirect, request, jsonify, session, g, render_template\n')] |
import abc
import copy
import itertools
import logging
from collections import defaultdict
from tempfile import NamedTemporaryFile
from typing import List, Optional, Set, Tuple, TypeVar, Union
import numpy
from openff.toolkit.topology import Molecule
from openff.toolkit.utils import (
OpenEyeToolkitWrapper,
RDKitToolkitWrapper,
UndefinedStereochemistryError,
)
from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator
from qcelemental.molutil import guess_connectivity
from qcportal.models.records import (
OptimizationRecord,
RecordBase,
RecordStatusEnum,
ResultRecord,
)
from simtk import unit
from typing_extensions import Literal
from openff.qcsubmit.results.results import (
TorsionDriveResultCollection,
_BaseResult,
_BaseResultCollection,
)
from openff.qcsubmit.validators import check_allowed_elements
T = TypeVar("T", bound=_BaseResultCollection)
logger = logging.getLogger(__name__)
class ResultFilter(BaseModel, abc.ABC):
"""The base class for a filter which will retain selection of QC records based on
a specific criterion.
"""
@abc.abstractmethod
def _apply(self, result_collection: "T") -> "T":
"""The internal implementation of the ``apply`` method which should apply this
filter to a results collection and return a new collection containing only the
retained entries.
Notes:
The ``result_collection`` passed to this function will be a copy and
so can be modified in place if needed.
Args:
result_collection: The collection to apply the filter to.
Returns:
The collection containing only the retained entries.
"""
raise NotImplementedError()
def apply(self, result_collection: "T") -> "T":
"""Apply this filter to a results collection, returning a new collection
containing only the retained entries.
Args:
result_collection: The collection to apply the filter to.
Returns:
The collection containing only the retained entries.
"""
filtered_collection = self._apply(result_collection.copy(deep=True))
filtered_collection.entries = {
address: entries
for address, entries in filtered_collection.entries.items()
if len(entries) > 0
}
logger.info(
f"{abs(filtered_collection.n_results - result_collection.n_results)} "
f"results were removed after applying a {self.__class__.__name__} filter."
)
if "applied-filters" not in filtered_collection.provenance:
filtered_collection.provenance["applied-filters"] = {}
n_existing_filters = len(filtered_collection.provenance["applied-filters"])
filter_name = f"{self.__class__.__name__}-{n_existing_filters}"
filtered_collection.provenance["applied-filters"][filter_name] = {**self.dict()}
return filtered_collection
class CMILESResultFilter(ResultFilter, abc.ABC):
"""The base class for a filter which will retain selection of QC records based
solely on the CMILES ( / InChI key) associated with the record itself, and not
the actual record.
If the filter needs to access information from the QC record itself the
``ResultRecordFilter`` class should be used instead as it more efficiently
retrieves the records and associated molecule objects from the QCFractal
instances.
"""
@abc.abstractmethod
def _filter_function(self, result: "_BaseResult") -> bool:
"""A method which should return whether to retain a particular result based
on some property of the result object.
"""
raise NotImplementedError()
def _apply(self, result_collection: "T") -> "T":
result_collection.entries = {
address: [*filter(self._filter_function, entries)]
for address, entries in result_collection.entries.items()
}
return result_collection
class ResultRecordFilter(ResultFilter, abc.ABC):
"""The base class for filters which will operate on QC records and their
corresponding molecules directly."""
@abc.abstractmethod
def _filter_function(
self, result: "_BaseResult", record: RecordBase, molecule: Molecule
) -> bool:
"""A method which should return whether to retain a particular result based
on some property of the associated QC record.
"""
raise NotImplementedError()
def _apply(self, result_collection: "T") -> "T":
all_records_and_molecules = defaultdict(list)
for record, molecule in result_collection.to_records():
all_records_and_molecules[record.client.address].append((record, molecule))
filtered_results = {}
for address, entries in result_collection.entries.items():
entries_by_id = {entry.record_id: entry for entry in entries}
records_and_molecules = all_records_and_molecules[address]
filtered_ids = [
record.id
for record, molecule in records_and_molecules
if self._filter_function(entries_by_id[record.id], record, molecule)
]
filtered_results[address] = [
entry for entry in entries if entry.record_id in filtered_ids
]
result_collection.entries = filtered_results
return result_collection
class ResultRecordGroupFilter(ResultFilter, abc.ABC):
"""The base class for filters which reduces repeated molecule entries down to a single
entry.
Notes:
* This filter will only be applied to basic and optimization datasets.
Torsion drive datasets / entries will be skipped.
"""
@abc.abstractmethod
def _filter_function(
self, entries: List[Tuple["_BaseResult", RecordBase, Molecule, str]]
) -> List[Tuple["_BaseResult", str]]:
"""A method which should reduce a set of results down to a single entry based on
some property of the QC calculation.
"""
raise NotImplementedError()
def _apply(self, result_collection: "T") -> "T":
# do nothing for torsiondrives
if isinstance(result_collection, TorsionDriveResultCollection):
return result_collection
all_records_and_molecules = {
record.id: [record, molecule, record.client.address]
for record, molecule in result_collection.to_records()
}
entries_by_inchikey = defaultdict(list)
for entries in result_collection.entries.values():
for entry in entries:
entries_by_inchikey[entry.inchi_key].append(entry)
filtered_results = defaultdict(list)
for entries in entries_by_inchikey.values():
results_and_addresses = self._filter_function(
[
(entry, *all_records_and_molecules[entry.record_id])
for entry in entries
]
)
for result, address in results_and_addresses:
filtered_results[address].append(result)
result_collection.entries = filtered_results
return result_collection
class LowestEnergyFilter(ResultRecordGroupFilter):
"""Filter the results collection and only keep the lowest energy entries.
Notes:
* This filter will only be applied to basic and optimization datasets.
Torsion drive datasets / entries will be skipped.
"""
def _filter_function(
self,
entries: List[
Tuple["_BaseResult", Union[ResultRecord, OptimizationRecord], Molecule, str]
],
) -> List[Tuple["_BaseResult", str]]:
"""Only return the lowest energy entry or final molecule."""
low_entry, low_energy, low_address = None, 99999999999, ""
for entry, rec, _, address in entries:
try:
energy = rec.get_final_energy()
except AttributeError:
energy = rec.properties.return_energy
if energy < low_energy:
low_entry = entry
low_energy = energy
low_address = address
return [(low_entry, low_address)]
class ConformerRMSDFilter(ResultRecordGroupFilter):
"""A filter which will retain up to a maximum number of conformers for each unique
molecule (as determined by an entries InChI key) which are distinct to within a
specified RMSD tolerance.
Notes:
* This filter will only be applied to basic and optimization datasets.
Torsion drive datasets / entries will be skipped.
* A greedy selection algorithm is used to select conformers which are most
distinct in terms of their RMSD values.
"""
max_conformers: int = Field(
10,
description="The maximum number of conformers to retain for each unique molecule.",
)
rmsd_tolerance: float = Field(
0.5,
description="The minimum RMSD [A] between two conformers for them to be "
"considered distinct.",
)
heavy_atoms_only: bool = Field(
True,
description="Whether to only consider heavy atoms when computing the RMSD "
"between two conformers.",
)
check_automorphs: bool = Field(
True,
description="Whether to consider automorphs when computing the RMSD between two "
"conformers. Setting this option to ``True`` may slow down the filter "
"considerably if ``heavy_atoms_only`` is set to ``False``.",
)
def _compute_rmsd_matrix_rd(self, molecule: Molecule) -> numpy.ndarray:
"""Computes the RMSD between all conformers stored on a molecule using an RDKit
backend."""
from rdkit import Chem
from rdkit.Chem import AllChem
rdkit_molecule: Chem.RWMol = molecule.to_rdkit()
if self.heavy_atoms_only:
rdkit_molecule = Chem.RemoveHs(rdkit_molecule)
n_conformers = len(molecule.conformers)
conformer_ids = [conf.GetId() for conf in rdkit_molecule.GetConformers()]
rmsd_matrix = numpy.zeros((n_conformers, n_conformers))
for i, j in itertools.combinations(conformer_ids, 2):
if self.check_automorphs:
rmsd_matrix[i, j] = AllChem.GetBestRMS(
rdkit_molecule,
rdkit_molecule,
conformer_ids[i],
conformer_ids[j],
)
else:
rmsd_matrix[i, j] = AllChem.GetConformerRMS(
rdkit_molecule,
conformer_ids[i],
conformer_ids[j],
)
rmsd_matrix += rmsd_matrix.T
return rmsd_matrix
def _compute_rmsd_matrix_oe(self, molecule: Molecule) -> numpy.ndarray:
"""Computes the RMSD between all conformers stored on a molecule using an OpenEye
backend."""
from openeye import oechem
oe_molecule: oechem.OEMol = molecule.to_openeye()
oe_conformers = {
i: oe_conformer for i, oe_conformer in enumerate(oe_molecule.GetConfs())
}
n_conformers = len(molecule.conformers)
rmsd_matrix = numpy.zeros((n_conformers, n_conformers))
for i, j in itertools.combinations([*oe_conformers], 2):
rmsd_matrix[i, j] = oechem.OERMSD(
oe_conformers[i],
oe_conformers[j],
self.check_automorphs,
self.heavy_atoms_only,
True,
)
rmsd_matrix += rmsd_matrix.T
return rmsd_matrix
def _compute_rmsd_matrix(self, molecule: Molecule) -> numpy.ndarray:
"""Computes the RMSD between all conformers stored on a molecule."""
try:
rmsd_matrix = self._compute_rmsd_matrix_rd(molecule)
except ModuleNotFoundError:
rmsd_matrix = self._compute_rmsd_matrix_oe(molecule)
return rmsd_matrix
def _filter_function(
self,
entries: List[
Tuple["_BaseResult", Union[ResultRecord, OptimizationRecord], Molecule, str]
],
) -> List[Tuple["_BaseResult", str]]:
# Sanity check that all molecules look as we expect.
assert all(molecule.n_conformers == 1 for _, _, molecule, _ in entries)
# Condense the conformers into a single molecule.
conformers = [
molecule.canonical_order_atoms().conformers[0]
for _, _, molecule, _ in entries
]
[_, _, molecule, _] = entries[0]
molecule = copy.deepcopy(molecule)
molecule._conformers = conformers
rmsd_matrix = self._compute_rmsd_matrix(molecule)
# Select a set N maximally diverse conformers which are distinct in terms
# of the RMSD tolerance.
# Apply the greedy selection process.
closed_list = numpy.zeros(self.max_conformers).astype(int)
closed_mask = numpy.zeros(rmsd_matrix.shape[0], dtype=bool)
n_selected = 1
for i in range(min(molecule.n_conformers, self.max_conformers - 1)):
distances = rmsd_matrix[closed_list[: i + 1], :].sum(axis=0)
# Exclude already selected conformers or conformers which are too similar
# to those already selected.
closed_mask[
numpy.any(
rmsd_matrix[closed_list[: i + 1], :] < self.rmsd_tolerance, axis=0
)
] = True
if numpy.all(closed_mask):
# Stop of there are no more distinct conformers to select from.
break
distant_index = numpy.ma.array(distances, mask=closed_mask).argmax()
closed_list[i + 1] = distant_index
n_selected += 1
return [
(entries[i.item()][0], entries[i.item()][-1])
for i in closed_list[:n_selected]
]
class SMILESFilter(CMILESResultFilter):
"""A filter which will remove or retain records which were computed for molecules
described by specific SMILES patterns.
"""
_inchi_keys_to_include: Optional[Set[str]] = PrivateAttr(None)
_inchi_keys_to_exclude: Optional[Set[str]] = PrivateAttr(None)
smiles_to_include: Optional[List[str]] = Field(
None,
description="Only QC records computed for molecules whose SMILES representation "
"appears in this list will be retained. This option is mutually exclusive with "
"``smiles_to_exclude``.",
)
smiles_to_exclude: Optional[List[str]] = Field(
None,
description="Any QC records computed for molecules whose SMILES representation "
"appears in this list will be discarded. This option is mutually exclusive with "
"``smiles_to_include``.",
)
@root_validator
def _validate_mutually_exclusive(cls, values):
smiles_to_include = values.get("smiles_to_include")
smiles_to_exclude = values.get("smiles_to_exclude")
message = (
"exactly one of `smiles_to_include` and `smiles_to_exclude` must be "
"specified"
)
assert smiles_to_include is not None or smiles_to_exclude is not None, message
assert smiles_to_include is None or smiles_to_exclude is None, message
return values
@staticmethod
def _smiles_to_inchi_key(smiles: str) -> str:
return Molecule.from_smiles(smiles, allow_undefined_stereo=True).to_inchikey(
fixed_hydrogens=False
)
def _filter_function(self, entry: "_BaseResult") -> bool:
return (
entry.inchi_key in self._inchi_keys_to_include
if self._inchi_keys_to_include is not None
else entry.inchi_key not in self._inchi_keys_to_exclude
)
def _apply(self, result_collection: "T") -> "T":
self._inchi_keys_to_include = (
None
if self.smiles_to_include is None
else {
self._smiles_to_inchi_key(smiles) for smiles in self.smiles_to_include
}
)
self._inchi_keys_to_exclude = (
None
if self.smiles_to_exclude is None
else {
self._smiles_to_inchi_key(smiles) for smiles in self.smiles_to_exclude
}
)
return super(SMILESFilter, self)._apply(result_collection)
class SMARTSFilter(CMILESResultFilter):
"""A filter which will remove or retain records which were computed for molecules
which match specific SMARTS patterns.
"""
smarts_to_include: Optional[List[str]] = Field(
None,
description="Only QC records computed for molecules that match one or more of "
"the SMARTS patterns in this list will be retained. This option is mutually "
"exclusive with ``smarts_to_exclude``.",
)
smarts_to_exclude: Optional[List[str]] = Field(
None,
description="Any QC records computed for molecules that match one or more of "
"the SMARTS patterns in this list will be discarded. This option is mutually "
"exclusive with ``smarts_to_include``.",
)
@root_validator
def _validate_mutually_exclusive(cls, values):
smarts_to_include = values.get("smarts_to_include")
smarts_to_exclude = values.get("smarts_to_exclude")
message = (
"exactly one of `smarts_to_include` and `smarts_to_exclude` must be "
"specified"
)
assert smarts_to_include is not None or smarts_to_exclude is not None, message
assert smarts_to_include is None or smarts_to_exclude is None, message
return values
def _filter_function(self, entry: "_BaseResult") -> bool:
molecule: Molecule = Molecule.from_mapped_smiles(
entry.cmiles, allow_undefined_stereo=True
)
if self.smarts_to_include is not None:
return any(
len(molecule.chemical_environment_matches(smarts)) > 0
for smarts in self.smarts_to_include
)
return all(
len(molecule.chemical_environment_matches(smarts)) == 0
for smarts in self.smarts_to_exclude
)
class ChargeFilter(CMILESResultFilter):
"""A filter which will only retain records if their formal charge matches allowed values or is not in the
exclude list."""
charges_to_include: Optional[List[int]] = Field(
None,
description="Only molecules with a net formal charge in this list will be kept. "
"This option is mutually exclusive with ``charges_to_exclude``.",
)
charges_to_exclude: Optional[List[int]] = Field(
None,
description="Any molecules with a net formal charge which matches any of these values will be removed. "
"This option is mutually exclusive with ``charges_to_include``.",
)
@root_validator
def _validate_mutually_exclusive(cls, values):
charges_to_include = values.get("charges_to_include")
charges_to_exclude = values.get("charges_to_exclude")
message = (
"exactly one of `charges_to_include` and `charges_to_exclude` must be "
"specified"
)
assert charges_to_include is not None or charges_to_exclude is not None, message
assert charges_to_include is None or charges_to_exclude is None, message
return values
def _filter_function(self, entry: "_BaseResult") -> bool:
molecule: Molecule = Molecule.from_mapped_smiles(
entry.cmiles, allow_undefined_stereo=True
)
total_charge = molecule.total_charge.value_in_unit(unit.elementary_charge)
if self.charges_to_include is not None:
return total_charge in self.charges_to_include
return total_charge not in self.charges_to_exclude
class ElementFilter(CMILESResultFilter):
"""A filter which will only retain records that contain the requested elements."""
_allowed_atomic_numbers: Optional[Set[int]] = PrivateAttr(None)
allowed_elements: List[Union[int, str]] = Field(
...,
description="The list of allowed elements as symbols or atomic number ints.",
)
_check_elements = validator("allowed_elements", each_item=True, allow_reuse=True)(
check_allowed_elements
)
def _filter_function(self, entry: "_BaseResult") -> bool:
molecule: Molecule = Molecule.from_mapped_smiles(
entry.cmiles, allow_undefined_stereo=True
)
# get a set of atomic numbers
mol_atoms = {atom.atomic_number for atom in molecule.atoms}
# get the difference between mol atoms and allowed atoms
return not bool(mol_atoms.difference(self._allowed_atomic_numbers))
def _apply(self, result_collection: "T") -> "T":
from simtk.openmm.app import Element
self._allowed_atomic_numbers = {
Element.getBySymbol(ele).atomic_number if isinstance(ele, str) else ele
for ele in self.allowed_elements
}
return super(ElementFilter, self)._apply(result_collection)
class HydrogenBondFilter(ResultRecordFilter):
"""A filter which will remove or retain records which were computed for molecules
which match specific SMARTS patterns.
Notes:
* For ``BasicResultCollection`` objects the single conformer associated with
each result record will be checked for hydrogen bonds.
* For ``OptimizationResultCollection`` objects the minimum energy conformer
associated with each optimization record will be checked for hydrogen bonds.
* For ``TorsionDriveResultCollection`` objects the minimum energy conformer
at each grid angle will be checked for hydrogen bonds.
"""
method: Literal["baker-hubbard"] = Field(
"baker-hubbard", description="The method to use to detect any hydrogen bonds."
)
def _filter_function(
self, result: "_BaseResult", record: RecordBase, molecule: Molecule
) -> bool:
import mdtraj
conformers = numpy.array(
[
conformer.value_in_unit(unit.nanometers).tolist()
for conformer in molecule.conformers
]
)
mdtraj_topology = mdtraj.Topology.from_openmm(
molecule.to_topology().to_openmm()
)
mdtraj_trajectory = mdtraj.Trajectory(
conformers * unit.nanometers, mdtraj_topology
)
if self.method == "baker-hubbard":
h_bonds = mdtraj.baker_hubbard(mdtraj_trajectory, freq=0.0, periodic=False)
else:
raise NotImplementedError()
return len(h_bonds) == 0
class ConnectivityFilter(ResultRecordFilter):
"""A filter which will remove records whose corresponding molecules changed their
connectivity during the computation, e.g. a proton transfer occurred.
The connectivity will be percived from the 'final' conformer (see the Notes section)
using the ``qcelemental.molutil.guess_connectivity`` function.
Notes:
* For ``BasicResultCollection`` objects no filtering will occur.
* For ``OptimizationResultCollection`` objects the molecules final connectivity
will be perceived using the minimum energy conformer.
* For ``TorsionDriveResultCollection`` objects the connectivty will be
will be perceived using the minimum energy conformer conformer at each grid
angle.
"""
tolerance: float = Field(
1.2, description="Tunes the covalent radii metric safety factor."
)
def _filter_function(
self, result: "_BaseResult", record: RecordBase, molecule: Molecule
) -> bool:
qc_molecules = [
molecule.to_qcschema(conformer=i) for i in range(molecule.n_conformers)
]
expected_connectivity = {
tuple(sorted([bond.atom1_index, bond.atom2_index]))
for bond in molecule.bonds
}
for qc_molecule in qc_molecules:
actual_connectivity = {
tuple(sorted(connection))
for connection in guess_connectivity(
qc_molecule.symbols, qc_molecule.geometry, self.tolerance
)
}
if actual_connectivity == expected_connectivity:
continue
return False
return True
class RecordStatusFilter(ResultRecordFilter):
"""A filter which will only retain records if their status matches a specified
value.
"""
status: RecordStatusEnum = Field(
RecordStatusEnum.complete,
description="Records whose status match this value will be retained.",
)
def _filter_function(
self, result: "_BaseResult", record: RecordBase, molecule: Molecule
) -> bool:
return record.status.value.upper() == self.status.value.upper()
class UnperceivableStereoFilter(ResultRecordFilter):
"""A filter which will drop any records computed for molecules whose stereochemistry
cannot be perceived from the associated 3D conformers when re-loading the molecule
from an SDF file using the OpenFF toolkit.
This filter is mainly useful for catching edge cases whereby the stereochemistry
perceived by an underlying cheminformatics toolkit does not match what the OpenFF
toolkit expects.
"""
toolkits: List[Literal["openeye", "rdkit"]] = Field(
["openeye", "rdkit"],
description="The OpenFF toolkit registries that should be able to perceive "
"the stereochemistry of each conformer.",
)
def _filter_function(self, result, record, molecule) -> bool:
has_stereochemistry = True
try:
for toolkit_name in self.toolkits:
if toolkit_name == "openeye":
toolkit_registry = OpenEyeToolkitWrapper()
elif toolkit_name == "rdkit":
toolkit_registry = RDKitToolkitWrapper()
else:
raise NotImplementedError()
for conformer in molecule.conformers:
stereo_molecule = copy.deepcopy(molecule)
stereo_molecule._conformers = [conformer]
with NamedTemporaryFile(suffix=".sdf") as file:
stereo_molecule.to_file(file.name, "SDF")
stereo_molecule.from_file(
file.name, toolkit_registry=toolkit_registry
)
except UndefinedStereochemistryError:
has_stereochemistry = False
return has_stereochemistry
| [
"openff.toolkit.utils.RDKitToolkitWrapper",
"rdkit.Chem.RemoveHs",
"logging.getLogger",
"collections.defaultdict",
"rdkit.Chem.AllChem.GetConformerRMS",
"openff.toolkit.topology.Molecule.from_smiles",
"openff.toolkit.utils.OpenEyeToolkitWrapper",
"simtk.openmm.app.Element.getBySymbol",
"pydantic.Fie... | [((880, 921), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'bound': '_BaseResultCollection'}), "('T', bound=_BaseResultCollection)\n", (887, 921), False, 'from typing import List, Optional, Set, Tuple, TypeVar, Union\n'), ((932, 959), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (949, 959), False, 'import logging\n'), ((8870, 8968), 'pydantic.Field', 'Field', (['(10)'], {'description': '"""The maximum number of conformers to retain for each unique molecule."""'}), "(10, description=\n 'The maximum number of conformers to retain for each unique molecule.')\n", (8875, 8968), False, 'from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator\n'), ((9016, 9131), 'pydantic.Field', 'Field', (['(0.5)'], {'description': '"""The minimum RMSD [A] between two conformers for them to be considered distinct."""'}), "(0.5, description=\n 'The minimum RMSD [A] between two conformers for them to be considered distinct.'\n )\n", (9021, 9131), False, 'from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator\n'), ((9185, 9306), 'pydantic.Field', 'Field', (['(True)'], {'description': '"""Whether to only consider heavy atoms when computing the RMSD between two conformers."""'}), "(True, description=\n 'Whether to only consider heavy atoms when computing the RMSD between two conformers.'\n )\n", (9190, 9306), False, 'from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator\n'), ((9360, 9590), 'pydantic.Field', 'Field', (['(True)'], {'description': '"""Whether to consider automorphs when computing the RMSD between two conformers. Setting this option to ``True`` may slow down the filter considerably if ``heavy_atoms_only`` is set to ``False``."""'}), "(True, description=\n 'Whether to consider automorphs when computing the RMSD between two conformers. Setting this option to ``True`` may slow down the filter considerably if ``heavy_atoms_only`` is set to ``False``.'\n )\n", (9365, 9590), False, 'from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator\n'), ((14240, 14257), 'pydantic.PrivateAttr', 'PrivateAttr', (['None'], {}), '(None)\n', (14251, 14257), False, 'from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator\n'), ((14307, 14324), 'pydantic.PrivateAttr', 'PrivateAttr', (['None'], {}), '(None)\n', (14318, 14324), False, 'from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator\n'), ((14371, 14575), 'pydantic.Field', 'Field', (['None'], {'description': '"""Only QC records computed for molecules whose SMILES representation appears in this list will be retained. This option is mutually exclusive with ``smiles_to_exclude``."""'}), "(None, description=\n 'Only QC records computed for molecules whose SMILES representation appears in this list will be retained. This option is mutually exclusive with ``smiles_to_exclude``.'\n )\n", (14376, 14575), False, 'from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator\n'), ((14656, 14860), 'pydantic.Field', 'Field', (['None'], {'description': '"""Any QC records computed for molecules whose SMILES representation appears in this list will be discarded. This option is mutually exclusive with ``smiles_to_include``."""'}), "(None, description=\n 'Any QC records computed for molecules whose SMILES representation appears in this list will be discarded. This option is mutually exclusive with ``smiles_to_include``.'\n )\n", (14661, 14860), False, 'from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator\n'), ((16701, 16915), 'pydantic.Field', 'Field', (['None'], {'description': '"""Only QC records computed for molecules that match one or more of the SMARTS patterns in this list will be retained. This option is mutually exclusive with ``smarts_to_exclude``."""'}), "(None, description=\n 'Only QC records computed for molecules that match one or more of the SMARTS patterns in this list will be retained. This option is mutually exclusive with ``smarts_to_exclude``.'\n )\n", (16706, 16915), False, 'from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator\n'), ((16996, 17210), 'pydantic.Field', 'Field', (['None'], {'description': '"""Any QC records computed for molecules that match one or more of the SMARTS patterns in this list will be discarded. This option is mutually exclusive with ``smarts_to_include``."""'}), "(None, description=\n 'Any QC records computed for molecules that match one or more of the SMARTS patterns in this list will be discarded. This option is mutually exclusive with ``smarts_to_include``.'\n )\n", (17001, 17210), False, 'from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator\n'), ((18531, 18697), 'pydantic.Field', 'Field', (['None'], {'description': '"""Only molecules with a net formal charge in this list will be kept. This option is mutually exclusive with ``charges_to_exclude``."""'}), "(None, description=\n 'Only molecules with a net formal charge in this list will be kept. This option is mutually exclusive with ``charges_to_exclude``.'\n )\n", (18536, 18697), False, 'from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator\n'), ((18769, 18958), 'pydantic.Field', 'Field', (['None'], {'description': '"""Any molecules with a net formal charge which matches any of these values will be removed. This option is mutually exclusive with ``charges_to_include``."""'}), "(None, description=\n 'Any molecules with a net formal charge which matches any of these values will be removed. This option is mutually exclusive with ``charges_to_include``.'\n )\n", (18774, 18958), False, 'from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator\n'), ((20131, 20148), 'pydantic.PrivateAttr', 'PrivateAttr', (['None'], {}), '(None)\n', (20142, 20148), False, 'from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator\n'), ((20196, 20289), 'pydantic.Field', 'Field', (['...'], {'description': '"""The list of allowed elements as symbols or atomic number ints."""'}), "(..., description=\n 'The list of allowed elements as symbols or atomic number ints.')\n", (20201, 20289), False, 'from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator\n'), ((21920, 22010), 'pydantic.Field', 'Field', (['"""baker-hubbard"""'], {'description': '"""The method to use to detect any hydrogen bonds."""'}), "('baker-hubbard', description=\n 'The method to use to detect any hydrogen bonds.')\n", (21925, 22010), False, 'from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator\n'), ((23617, 23689), 'pydantic.Field', 'Field', (['(1.2)'], {'description': '"""Tunes the covalent radii metric safety factor."""'}), "(1.2, description='Tunes the covalent radii metric safety factor.')\n", (23622, 23689), False, 'from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator\n'), ((24691, 24799), 'pydantic.Field', 'Field', (['RecordStatusEnum.complete'], {'description': '"""Records whose status match this value will be retained."""'}), "(RecordStatusEnum.complete, description=\n 'Records whose status match this value will be retained.')\n", (24696, 24799), False, 'from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator\n'), ((25539, 25692), 'pydantic.Field', 'Field', (["['openeye', 'rdkit']"], {'description': '"""The OpenFF toolkit registries that should be able to perceive the stereochemistry of each conformer."""'}), "(['openeye', 'rdkit'], description=\n 'The OpenFF toolkit registries that should be able to perceive the stereochemistry of each conformer.'\n )\n", (25544, 25692), False, 'from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator\n'), ((4628, 4645), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4639, 4645), False, 'from collections import defaultdict\n'), ((6569, 6586), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6580, 6586), False, 'from collections import defaultdict\n'), ((6776, 6793), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6787, 6793), False, 'from collections import defaultdict\n'), ((10188, 10229), 'numpy.zeros', 'numpy.zeros', (['(n_conformers, n_conformers)'], {}), '((n_conformers, n_conformers))\n', (10199, 10229), False, 'import numpy\n'), ((10251, 10291), 'itertools.combinations', 'itertools.combinations', (['conformer_ids', '(2)'], {}), '(conformer_ids, 2)\n', (10273, 10291), False, 'import itertools\n'), ((11306, 11347), 'numpy.zeros', 'numpy.zeros', (['(n_conformers, n_conformers)'], {}), '((n_conformers, n_conformers))\n', (11317, 11347), False, 'import numpy\n'), ((11369, 11412), 'itertools.combinations', 'itertools.combinations', (['[*oe_conformers]', '(2)'], {}), '([*oe_conformers], 2)\n', (11391, 11412), False, 'import itertools\n'), ((12674, 12697), 'copy.deepcopy', 'copy.deepcopy', (['molecule'], {}), '(molecule)\n', (12687, 12697), False, 'import copy\n'), ((13051, 13096), 'numpy.zeros', 'numpy.zeros', (['rmsd_matrix.shape[0]'], {'dtype': 'bool'}), '(rmsd_matrix.shape[0], dtype=bool)\n', (13062, 13096), False, 'import numpy\n'), ((17859, 17929), 'openff.toolkit.topology.Molecule.from_mapped_smiles', 'Molecule.from_mapped_smiles', (['entry.cmiles'], {'allow_undefined_stereo': '(True)'}), '(entry.cmiles, allow_undefined_stereo=True)\n', (17886, 17929), False, 'from openff.toolkit.topology import Molecule\n'), ((19605, 19675), 'openff.toolkit.topology.Molecule.from_mapped_smiles', 'Molecule.from_mapped_smiles', (['entry.cmiles'], {'allow_undefined_stereo': '(True)'}), '(entry.cmiles, allow_undefined_stereo=True)\n', (19632, 19675), False, 'from openff.toolkit.topology import Molecule\n'), ((20331, 20394), 'pydantic.validator', 'validator', (['"""allowed_elements"""'], {'each_item': '(True)', 'allow_reuse': '(True)'}), "('allowed_elements', each_item=True, allow_reuse=True)\n", (20340, 20394), False, 'from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator\n'), ((20525, 20595), 'openff.toolkit.topology.Molecule.from_mapped_smiles', 'Molecule.from_mapped_smiles', (['entry.cmiles'], {'allow_undefined_stereo': '(True)'}), '(entry.cmiles, allow_undefined_stereo=True)\n', (20552, 20595), False, 'from openff.toolkit.topology import Molecule\n'), ((22494, 22558), 'mdtraj.Trajectory', 'mdtraj.Trajectory', (['(conformers * unit.nanometers)', 'mdtraj_topology'], {}), '(conformers * unit.nanometers, mdtraj_topology)\n', (22511, 22558), False, 'import mdtraj\n'), ((10004, 10033), 'rdkit.Chem.RemoveHs', 'Chem.RemoveHs', (['rdkit_molecule'], {}), '(rdkit_molecule)\n', (10017, 10033), False, 'from rdkit import Chem\n'), ((11447, 11552), 'openeye.oechem.OERMSD', 'oechem.OERMSD', (['oe_conformers[i]', 'oe_conformers[j]', 'self.check_automorphs', 'self.heavy_atoms_only', '(True)'], {}), '(oe_conformers[i], oe_conformers[j], self.check_automorphs,\n self.heavy_atoms_only, True)\n', (11460, 11552), False, 'from openeye import oechem\n'), ((13595, 13617), 'numpy.all', 'numpy.all', (['closed_mask'], {}), '(closed_mask)\n', (13604, 13617), False, 'import numpy\n'), ((22647, 22712), 'mdtraj.baker_hubbard', 'mdtraj.baker_hubbard', (['mdtraj_trajectory'], {'freq': '(0.0)', 'periodic': '(False)'}), '(mdtraj_trajectory, freq=0.0, periodic=False)\n', (22667, 22712), False, 'import mdtraj\n'), ((10369, 10459), 'rdkit.Chem.AllChem.GetBestRMS', 'AllChem.GetBestRMS', (['rdkit_molecule', 'rdkit_molecule', 'conformer_ids[i]', 'conformer_ids[j]'], {}), '(rdkit_molecule, rdkit_molecule, conformer_ids[i],\n conformer_ids[j])\n', (10387, 10459), False, 'from rdkit.Chem import AllChem\n'), ((10611, 10686), 'rdkit.Chem.AllChem.GetConformerRMS', 'AllChem.GetConformerRMS', (['rdkit_molecule', 'conformer_ids[i]', 'conformer_ids[j]'], {}), '(rdkit_molecule, conformer_ids[i], conformer_ids[j])\n', (10634, 10686), False, 'from rdkit.Chem import AllChem\n'), ((12984, 13016), 'numpy.zeros', 'numpy.zeros', (['self.max_conformers'], {}), '(self.max_conformers)\n', (12995, 13016), False, 'import numpy\n'), ((13442, 13518), 'numpy.any', 'numpy.any', (['(rmsd_matrix[closed_list[:i + 1], :] < self.rmsd_tolerance)'], {'axis': '(0)'}), '(rmsd_matrix[closed_list[:i + 1], :] < self.rmsd_tolerance, axis=0)\n', (13451, 13518), False, 'import numpy\n'), ((15500, 15557), 'openff.toolkit.topology.Molecule.from_smiles', 'Molecule.from_smiles', (['smiles'], {'allow_undefined_stereo': '(True)'}), '(smiles, allow_undefined_stereo=True)\n', (15520, 15557), False, 'from openff.toolkit.topology import Molecule\n'), ((13750, 13793), 'numpy.ma.array', 'numpy.ma.array', (['distances'], {'mask': 'closed_mask'}), '(distances, mask=closed_mask)\n', (13764, 13793), False, 'import numpy\n'), ((21018, 21042), 'simtk.openmm.app.Element.getBySymbol', 'Element.getBySymbol', (['ele'], {}), '(ele)\n', (21037, 21042), False, 'from simtk.openmm.app import Element\n'), ((24245, 24322), 'qcelemental.molutil.guess_connectivity', 'guess_connectivity', (['qc_molecule.symbols', 'qc_molecule.geometry', 'self.tolerance'], {}), '(qc_molecule.symbols, qc_molecule.geometry, self.tolerance)\n', (24263, 24322), False, 'from qcelemental.molutil import guess_connectivity\n'), ((25968, 25991), 'openff.toolkit.utils.OpenEyeToolkitWrapper', 'OpenEyeToolkitWrapper', ([], {}), '()\n', (25989, 25991), False, 'from openff.toolkit.utils import OpenEyeToolkitWrapper, RDKitToolkitWrapper, UndefinedStereochemistryError\n'), ((26263, 26286), 'copy.deepcopy', 'copy.deepcopy', (['molecule'], {}), '(molecule)\n', (26276, 26286), False, 'import copy\n'), ((26077, 26098), 'openff.toolkit.utils.RDKitToolkitWrapper', 'RDKitToolkitWrapper', ([], {}), '()\n', (26096, 26098), False, 'from openff.toolkit.utils import OpenEyeToolkitWrapper, RDKitToolkitWrapper, UndefinedStereochemistryError\n'), ((26375, 26408), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'suffix': '""".sdf"""'}), "(suffix='.sdf')\n", (26393, 26408), False, 'from tempfile import NamedTemporaryFile\n')] |
# Implements different statistical learning algorithms to classify Emotions
# Please see https://www.cl.cam.ac.uk/~mmam3/pub/FG2015.pdf for more details and reasons
# Currently support: SVM (as in the paper), RandomForest (new implementation).
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.metrics import classification_report, f1_score
from sklearn.svm import LinearSVC, SVC
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from feat.utils import get_resource_path
import joblib
import os
def load_classifier(cf_path):
clf = joblib.load(cf_path)
return clf
class EmoRandomForestClassifier():
def __init__(self) -> None:
self.pca_model = load_classifier(os.path.join(
get_resource_path(), "emo_hog_pca.joblib"))
self.classifier = load_classifier(
os.path.join(get_resource_path(), "emoRF36.joblib"))
self.scaler = load_classifier(os.path.join(
get_resource_path(), "emo_hog_scalar.joblib"))
def detect_emo(self, frame, landmarks):
"""
Note that here frame is represented by hogs
"""
if len(frame.shape) < 2:
frame = frame.reshape(1, -1)
if len(landmarks.shape) > 1:
landmarks = landmarks.flatten().reshape(1, -1)
pca_transformed_frame = self.pca_model.transform(
self.scaler.fit_transform(frame))
feature_cbd = np.concatenate((pca_transformed_frame, landmarks), 1)
pred_aus = []
for keys in self.classifier:
au_pred = self.classifier[keys].predict_proba(feature_cbd)
au_pred = au_pred[0, 1]
pred_aus.append(au_pred)
pred_aus = np.array(pred_aus).reshape(1, -1)
return pred_aus
class EmoSVMClassifier():
def __init__(self) -> None:
self.pca_model = load_classifier(os.path.join(
get_resource_path(), "emo_hog_pca.joblib"))
self.classifier = load_classifier(
os.path.join(get_resource_path(), "emoSVM38.joblib"))
self.scaler = load_classifier(os.path.join(
get_resource_path(), "emo_hog_scalar.joblib"))
def detect_emo(self, frame, landmarks):
"""
Note that here frame is represented by hogs
"""
if len(frame.shape) < 2:
frame = frame.reshape(1, -1)
if len(landmarks.shape) > 1:
landmarks = landmarks.flatten().reshape(1, -1)
pca_transformed_frame = self.pca_model.transform(
self.scaler.fit_transform(frame))
feature_cbd = np.concatenate((pca_transformed_frame, landmarks), 1)
pred_aus = []
for keys in self.classifier:
au_pred = self.classifier[keys].predict(feature_cbd)
au_pred = au_pred[0] # probably need to delete this
pred_aus.append(au_pred)
pred_aus = np.array(pred_aus).reshape(1, -1)
return pred_aus
| [
"joblib.load",
"numpy.array",
"feat.utils.get_resource_path",
"numpy.concatenate"
] | [((798, 818), 'joblib.load', 'joblib.load', (['cf_path'], {}), '(cf_path)\n', (809, 818), False, 'import joblib\n'), ((1651, 1704), 'numpy.concatenate', 'np.concatenate', (['(pca_transformed_frame, landmarks)', '(1)'], {}), '((pca_transformed_frame, landmarks), 1)\n', (1665, 1704), True, 'import numpy as np\n'), ((2795, 2848), 'numpy.concatenate', 'np.concatenate', (['(pca_transformed_frame, landmarks)', '(1)'], {}), '((pca_transformed_frame, landmarks), 1)\n', (2809, 2848), True, 'import numpy as np\n'), ((970, 989), 'feat.utils.get_resource_path', 'get_resource_path', ([], {}), '()\n', (987, 989), False, 'from feat.utils import get_resource_path\n'), ((1082, 1101), 'feat.utils.get_resource_path', 'get_resource_path', ([], {}), '()\n', (1099, 1101), False, 'from feat.utils import get_resource_path\n'), ((1186, 1205), 'feat.utils.get_resource_path', 'get_resource_path', ([], {}), '()\n', (1203, 1205), False, 'from feat.utils import get_resource_path\n'), ((1928, 1946), 'numpy.array', 'np.array', (['pred_aus'], {}), '(pred_aus)\n', (1936, 1946), True, 'import numpy as np\n'), ((2113, 2132), 'feat.utils.get_resource_path', 'get_resource_path', ([], {}), '()\n', (2130, 2132), False, 'from feat.utils import get_resource_path\n'), ((2225, 2244), 'feat.utils.get_resource_path', 'get_resource_path', ([], {}), '()\n', (2242, 2244), False, 'from feat.utils import get_resource_path\n'), ((2330, 2349), 'feat.utils.get_resource_path', 'get_resource_path', ([], {}), '()\n', (2347, 2349), False, 'from feat.utils import get_resource_path\n'), ((3095, 3113), 'numpy.array', 'np.array', (['pred_aus'], {}), '(pred_aus)\n', (3103, 3113), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from nilearn import plotting, datasets
from common.paths import POWER
from common.power_atlas import POWER_DATASET, POWER_COORDS, POWER_LABELS, POWER_NODE_COLORS, to_power_fc_matrix
DEFAULT_CMAP = 'bwr'
POS_CMAP = 'YlOrRd'
NEG_CMAP = 'PuBu'
def plot_connections(connections, vmin=None, vmax=None, threshold="99.7%", show_matrix=False, show_pos_neg=True, show_node_strength=False, title=None):
"""
Plot functional connectivity connections in multiple ways (E.g. Matrix and glass-brain/graph).
"""
fc = to_power_fc_matrix(connections)
if show_matrix:
plot_fc_matrix(fc, vmin, vmax)
plot_fc_graph(fc, vmin, vmax, threshold=threshold, title=title)
if show_pos_neg:
positive_edges = np.clip(fc, 0, np.max(fc))
positive_node_strength = convert_fc_to_node_strength(positive_edges)
plot_fc_graph(positive_edges, 0, vmax, POS_CMAP, threshold=threshold)
plot_node_strengths(positive_node_strength, 0, POS_CMAP)
negative_edges = np.clip(fc, np.min(fc), 0)
negative_node_strength = convert_fc_to_node_strength(negative_edges)
plot_fc_graph(-negative_edges, 0, vmax, NEG_CMAP, threshold=threshold)
plot_node_strengths(negative_node_strength, 0, NEG_CMAP)
if show_node_strength:
node_strength = convert_fc_to_node_strength(fc)
plot_node_strengths(node_strength, 0, "Greens")
plot_node_strengths(node_strength, 0.5, "Greens")
def plot_fc_matrix(fc, vmin=None, vmax=None, cmap=DEFAULT_CMAP, title=None):
"""
Plot functional connectivity matrix where the x and y axis represent nodes, and
the cell value represents the correlation strength.
"""
plotting.plot_matrix(fc, vmin=vmin, vmax=vmax, colorbar=True, cmap=cmap, title=title)
def plot_fc_graph(fc, emin=None, emax=None, cmap=DEFAULT_CMAP, threshold="99.7%", title=None):
"""
Plot functional connectivity graph where nodes in a brain are connected by edges
of varying strength.
"""
plotting.plot_connectome(fc, POWER_COORDS, node_size=5, colorbar=True, node_color=POWER_NODE_COLORS,
edge_vmin=emin, edge_vmax=emax, edge_cmap=cmap, edge_threshold=threshold,
title=title)
def plot_node_strengths(node_strength, threshold=None, cmap=DEFAULT_CMAP):
"""
Plot node strengths where each node is colored darker based on the absolute sum of
edge weights connected to that node.
"""
plotting.plot_markers(node_strength, POWER_COORDS, node_threshold=threshold,
node_vmin=0, node_vmax=1, node_cmap=cmap)
def convert_fc_to_node_strength(fc):
"""
Convert all edges connected to a node to a node strength representing the
absolute sum of all edges connected to that node.
"""
node_strength = np.sum(np.abs(fc), axis=0)
node_strength /= np.max(node_strength)
return node_strength
| [
"nilearn.plotting.plot_connectome",
"numpy.abs",
"common.power_atlas.to_power_fc_matrix",
"numpy.max",
"nilearn.plotting.plot_matrix",
"numpy.min",
"nilearn.plotting.plot_markers"
] | [((614, 645), 'common.power_atlas.to_power_fc_matrix', 'to_power_fc_matrix', (['connections'], {}), '(connections)\n', (632, 645), False, 'from common.power_atlas import POWER_DATASET, POWER_COORDS, POWER_LABELS, POWER_NODE_COLORS, to_power_fc_matrix\n'), ((1787, 1876), 'nilearn.plotting.plot_matrix', 'plotting.plot_matrix', (['fc'], {'vmin': 'vmin', 'vmax': 'vmax', 'colorbar': '(True)', 'cmap': 'cmap', 'title': 'title'}), '(fc, vmin=vmin, vmax=vmax, colorbar=True, cmap=cmap,\n title=title)\n', (1807, 1876), False, 'from nilearn import plotting, datasets\n'), ((2101, 2297), 'nilearn.plotting.plot_connectome', 'plotting.plot_connectome', (['fc', 'POWER_COORDS'], {'node_size': '(5)', 'colorbar': '(True)', 'node_color': 'POWER_NODE_COLORS', 'edge_vmin': 'emin', 'edge_vmax': 'emax', 'edge_cmap': 'cmap', 'edge_threshold': 'threshold', 'title': 'title'}), '(fc, POWER_COORDS, node_size=5, colorbar=True,\n node_color=POWER_NODE_COLORS, edge_vmin=emin, edge_vmax=emax, edge_cmap\n =cmap, edge_threshold=threshold, title=title)\n', (2125, 2297), False, 'from nilearn import plotting, datasets\n'), ((2572, 2694), 'nilearn.plotting.plot_markers', 'plotting.plot_markers', (['node_strength', 'POWER_COORDS'], {'node_threshold': 'threshold', 'node_vmin': '(0)', 'node_vmax': '(1)', 'node_cmap': 'cmap'}), '(node_strength, POWER_COORDS, node_threshold=threshold,\n node_vmin=0, node_vmax=1, node_cmap=cmap)\n', (2593, 2694), False, 'from nilearn import plotting, datasets\n'), ((2972, 2993), 'numpy.max', 'np.max', (['node_strength'], {}), '(node_strength)\n', (2978, 2993), True, 'import numpy as np\n'), ((2931, 2941), 'numpy.abs', 'np.abs', (['fc'], {}), '(fc)\n', (2937, 2941), True, 'import numpy as np\n'), ((840, 850), 'numpy.max', 'np.max', (['fc'], {}), '(fc)\n', (846, 850), True, 'import numpy as np\n'), ((1110, 1120), 'numpy.min', 'np.min', (['fc'], {}), '(fc)\n', (1116, 1120), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 16 22:19:02 2020
@author: nagaraj
"""
import numpy as np
import matplotlib.pyplot as plt
import math
from scipy.spatial.distance import cosine
import cv2
cam = cv2.VideoCapture(0)
# Create some random colors
color = np.random.randint(0,255,(100,3))
# Take first frame and find corners in it
ret, old_frame = cam.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
#p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
mask_features = np.zeros_like(old_gray)
mask_features[:,0:20] = 1
mask_features[:,620:640] = 1
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 100,
qualityLevel = 0.3,
minDistance = 3,
blockSize = 7,
mask = mask_features)
# Parameters for lucas kanade optical flow
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
def init_new_features(gray_frame):
corners = cv2.goodFeaturesToTrack(gray_frame, **feature_params)
return corners
def calculateDistance(x1,y1,x2,y2):
dist = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
return dist
corners = init_new_features(old_gray)
while True:
try:
cam_moved = False
cam_status = None
ret,frame = cam.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, corners, None, **lk_params)
good_new = p1
good_old = corners
# draw the tracks
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
distance = calculateDistance(a,b,c,d)
if distance>8:
cam_moved = True
# update the previous frame and previous points
old_gray = frame_gray.copy()
corners = init_new_features(old_gray)
else:
old_gray = frame_gray.copy()
corners = good_new.reshape(-1,1,2)
mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
if cam_moved is True:
it = np.random.rand(1)[0]
print('Camera moved '+ str(it))
cam_status = 'Camera moved'
cv2.putText(frame, cam_status, (20, 320), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
img = cv2.add(frame,mask)
cv2.imshow('frame',img)
mask = np.zeros_like(old_frame)
if corners is None:
corners = init_new_features(old_gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
cam.release()
except TypeError as e:
print(e)
break
| [
"numpy.zeros_like",
"cv2.putText",
"math.sqrt",
"cv2.cvtColor",
"cv2.destroyAllWindows",
"cv2.waitKey",
"cv2.VideoCapture",
"numpy.random.randint",
"cv2.goodFeaturesToTrack",
"cv2.calcOpticalFlowPyrLK",
"numpy.random.rand",
"cv2.imshow",
"cv2.add"
] | [((227, 246), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (243, 246), False, 'import cv2\n'), ((287, 322), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(100, 3)'], {}), '(0, 255, (100, 3))\n', (304, 322), True, 'import numpy as np\n'), ((406, 449), 'cv2.cvtColor', 'cv2.cvtColor', (['old_frame', 'cv2.COLOR_BGR2GRAY'], {}), '(old_frame, cv2.COLOR_BGR2GRAY)\n', (418, 449), False, 'import cv2\n'), ((576, 600), 'numpy.zeros_like', 'np.zeros_like', (['old_frame'], {}), '(old_frame)\n', (589, 600), True, 'import numpy as np\n'), ((618, 641), 'numpy.zeros_like', 'np.zeros_like', (['old_gray'], {}), '(old_gray)\n', (631, 641), True, 'import numpy as np\n'), ((1216, 1269), 'cv2.goodFeaturesToTrack', 'cv2.goodFeaturesToTrack', (['gray_frame'], {}), '(gray_frame, **feature_params)\n', (1239, 1269), False, 'import cv2\n'), ((1343, 1385), 'math.sqrt', 'math.sqrt', (['((x2 - x1) ** 2 + (y2 - y1) ** 2)'], {}), '((x2 - x1) ** 2 + (y2 - y1) ** 2)\n', (1352, 1385), False, 'import math\n'), ((1577, 1616), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (1589, 1616), False, 'import cv2\n'), ((1676, 1750), 'cv2.calcOpticalFlowPyrLK', 'cv2.calcOpticalFlowPyrLK', (['old_gray', 'frame_gray', 'corners', 'None'], {}), '(old_gray, frame_gray, corners, None, **lk_params)\n', (1700, 1750), False, 'import cv2\n'), ((2844, 2864), 'cv2.add', 'cv2.add', (['frame', 'mask'], {}), '(frame, mask)\n', (2851, 2864), False, 'import cv2\n'), ((2883, 2907), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'img'], {}), "('frame', img)\n", (2893, 2907), False, 'import cv2\n'), ((2933, 2957), 'numpy.zeros_like', 'np.zeros_like', (['old_frame'], {}), '(old_frame)\n', (2946, 2957), True, 'import numpy as np\n'), ((2730, 2822), 'cv2.putText', 'cv2.putText', (['frame', 'cam_status', '(20, 320)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.7)', '(0, 0, 255)', '(2)'], {}), '(frame, cam_status, (20, 320), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,\n 0, 255), 2)\n', (2741, 2822), False, 'import cv2\n'), ((3118, 3141), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3139, 3141), False, 'import cv2\n'), ((2610, 2627), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (2624, 2627), True, 'import numpy as np\n'), ((3070, 3084), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3081, 3084), False, 'import cv2\n')] |
import numpy as np
import tensorflow as tf
import os
from data.dataset import _get_training_data, _get_test_data
from model.train_model import TrainModel
from sklearn.metrics import mean_absolute_error, mean_squared_error
tf.app.flags.DEFINE_string('tf_records_train_path',
os.path.abspath(os.path.join(os.path.dirname("__file__"), '..', 'data/tf_records/train/')),
'Path of the training data.')
tf.app.flags.DEFINE_string('tf_records_test_path',
os.path.abspath(os.path.join(os.path.dirname("__file__"), '..', 'data/tf_records/test/')),
'Path of the test data.')
tf.app.flags.DEFINE_string('checkpoints_path', os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'checkpoints/model.ckpt')),
'Path for the test data.')
tf.app.flags.DEFINE_integer('num_epoch', 1000,
'Number of training epochs.')
tf.app.flags.DEFINE_integer('batch_size', 16,
'Size of the training batch.')
tf.app.flags.DEFINE_float('learning_rate',0.0005,
'Learning_Rate')
tf.app.flags.DEFINE_boolean('l2_reg', False,
'L2 regularization.'
)
tf.app.flags.DEFINE_float('lambda_',0.01,
'Wight decay factor.')
tf.app.flags.DEFINE_integer('num_v', 3952,
'Number of visible neurons (Number of movies the users rated.)')
tf.app.flags.DEFINE_integer('num_h', 128,
'Number of hidden neurons.)')
tf.app.flags.DEFINE_integer('num_samples', 5953,
'Number of training samples (Number of users, who gave a rating).')
FLAGS = tf.app.flags.FLAGS
def main(_):
'''Building the graph, opening of a session and starting the training od the neural network.'''
num_batches=int(FLAGS.num_samples/FLAGS.batch_size)
with tf.Graph().as_default():
train_data, train_data_infer=_get_training_data(FLAGS)
test_data=_get_test_data(FLAGS)
iter_train = train_data.make_initializable_iterator()
iter_train_infer=train_data_infer.make_initializable_iterator()
iter_test=test_data.make_initializable_iterator()
x_train= iter_train.get_next()
x_train_infer=iter_train_infer.get_next()
x_test=iter_test.get_next()
model=TrainModel(FLAGS, 'training')
train_op, train_loss_op=model.train(x_train)
prediction, labels, test_loss_op, mae_ops=model._validation_loss(x_train_infer, x_test)
saver=tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_loss=0
test_loss=[]
mae=[]
for epoch in range(FLAGS.num_epoch):
sess.run(iter_train.initializer)
sess.run(iter_train_infer.initializer)
sess.run(iter_test.initializer)
for batch_nr in range(num_batches):
_, loss_=sess.run((train_op, train_loss_op))
train_loss+=loss_
for i in range(FLAGS.num_samples):
pred, labels_, loss_, mae_=sess.run((prediction, labels, test_loss_op,mae_ops))
test_loss.append(loss_)
mae.append(mae_)
print('epoch_nr: %i, train_loss: %.3f, test_loss: %.3f, mean_abs_error: %.3f'
%(epoch,(train_loss/num_batches),np.mean(test_loss), np.mean(mae)))
if np.mean(mae)<0.9:
saver.save(sess, FLAGS.checkpoints_path)
train_loss=0
test_loss=[]
mae=[]
if __name__ == "__main__":
tf.app.run()
| [
"tensorflow.app.flags.DEFINE_float",
"data.dataset._get_test_data",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"os.path.dirname",
"tensorflow.Session",
"tensorflow.app.flags.DEFINE_boolean",
"numpy.mean",
"model.train_model.TrainModel",
"tensorflow.Graph",
"tensorflow.a... | [((874, 950), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_epoch"""', '(1000)', '"""Number of training epochs."""'], {}), "('num_epoch', 1000, 'Number of training epochs.')\n", (901, 950), True, 'import tensorflow as tf\n'), ((980, 1056), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(16)', '"""Size of the training batch."""'], {}), "('batch_size', 16, 'Size of the training batch.')\n", (1007, 1056), True, 'import tensorflow as tf\n'), ((1086, 1153), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learning_rate"""', '(0.0005)', '"""Learning_Rate"""'], {}), "('learning_rate', 0.0005, 'Learning_Rate')\n", (1111, 1153), True, 'import tensorflow as tf\n'), ((1180, 1246), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""l2_reg"""', '(False)', '"""L2 regularization."""'], {}), "('l2_reg', False, 'L2 regularization.')\n", (1207, 1246), True, 'import tensorflow as tf\n'), ((1304, 1369), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""lambda_"""', '(0.01)', '"""Wight decay factor."""'], {}), "('lambda_', 0.01, 'Wight decay factor.')\n", (1329, 1369), True, 'import tensorflow as tf\n'), ((1396, 1507), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_v"""', '(3952)', '"""Number of visible neurons (Number of movies the users rated.)"""'], {}), "('num_v', 3952,\n 'Number of visible neurons (Number of movies the users rated.)')\n", (1423, 1507), True, 'import tensorflow as tf\n'), ((1533, 1604), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_h"""', '(128)', '"""Number of hidden neurons.)"""'], {}), "('num_h', 128, 'Number of hidden neurons.)')\n", (1560, 1604), True, 'import tensorflow as tf\n'), ((1634, 1754), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_samples"""', '(5953)', '"""Number of training samples (Number of users, who gave a rating)."""'], {}), "('num_samples', 5953,\n 'Number of training samples (Number of users, who gave a rating).')\n", (1661, 1754), True, 'import tensorflow as tf\n'), ((4000, 4012), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (4010, 4012), True, 'import tensorflow as tf\n'), ((2056, 2081), 'data.dataset._get_training_data', '_get_training_data', (['FLAGS'], {}), '(FLAGS)\n', (2074, 2081), False, 'from data.dataset import _get_training_data, _get_test_data\n'), ((2100, 2121), 'data.dataset._get_test_data', '_get_test_data', (['FLAGS'], {}), '(FLAGS)\n', (2114, 2121), False, 'from data.dataset import _get_training_data, _get_test_data\n'), ((2472, 2501), 'model.train_model.TrainModel', 'TrainModel', (['FLAGS', '"""training"""'], {}), "(FLAGS, 'training')\n", (2482, 2501), False, 'from model.train_model import TrainModel\n'), ((2675, 2691), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2689, 2691), True, 'import tensorflow as tf\n'), ((334, 361), 'os.path.dirname', 'os.path.dirname', (['"""__file__"""'], {}), "('__file__')\n", (349, 361), False, 'import os\n'), ((563, 590), 'os.path.dirname', 'os.path.dirname', (['"""__file__"""'], {}), "('__file__')\n", (578, 590), False, 'import os\n'), ((755, 780), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (770, 780), False, 'import os\n'), ((2714, 2726), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2724, 2726), True, 'import tensorflow as tf\n'), ((1993, 2003), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2001, 2003), True, 'import tensorflow as tf\n'), ((2770, 2803), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2801, 2803), True, 'import tensorflow as tf\n'), ((3782, 3794), 'numpy.mean', 'np.mean', (['mae'], {}), '(mae)\n', (3789, 3794), True, 'import numpy as np\n'), ((3711, 3729), 'numpy.mean', 'np.mean', (['test_loss'], {}), '(test_loss)\n', (3718, 3729), True, 'import numpy as np\n'), ((3731, 3743), 'numpy.mean', 'np.mean', (['mae'], {}), '(mae)\n', (3738, 3743), True, 'import numpy as np\n')] |
"""
Data-manipulation utilities.
"""
import numpy as np
import bottleneck as bn
from scipy import sparse as sp
def one_hot(values, dtype=float):
"""Return a one-hot transform of values
Parameters
----------
values : 1d array
Integer values (hopefully 0-max).
Returns
-------
result
2d array with ones in respective indicator columns.
"""
if not len(values):
return np.zeros((0, 0), dtype=dtype)
return np.eye(int(np.max(values) + 1), dtype=dtype)[
np.asanyarray(values, dtype=int)
]
def scale(values, min=0, max=1):
"""Return values scaled to [min, max]"""
if not len(values):
return np.array([])
minval = np.float_(bn.nanmin(values))
ptp = bn.nanmax(values) - minval
if ptp == 0:
return np.clip(values, min, max)
return (-minval + values) / ptp * (max - min) + min
class SharedComputeValue:
"""A base class that separates compute_value computation
for different variables into shared and specific parts.
Parameters
----------
compute_shared: Callable[[Orange.data.Table], object]
A callable that performs computation that is shared between
multiple variables. Variables sharing computation need to set
the same instance.
variable: Orange.data.Variable
The original variable on which this compute value is set.
"""
def __init__(self, compute_shared, variable=None):
self.compute_shared = compute_shared
self.variable = variable
def __call__(self, data, shared_data=None):
"""Fallback if common parts are not passed."""
if shared_data is None:
shared_data = self.compute_shared(data)
return self.compute(data, shared_data)
def compute(self, data, shared_data):
"""Given precomputed shared data, perform variable-specific
part of computation and return new variable values."""
raise NotImplementedError
def vstack(arrays):
"""vstack that supports sparse and dense arrays
If all arrays are dense, result is dense. Otherwise,
result is a sparse (csr) array.
"""
if any(sp.issparse(arr) for arr in arrays):
arrays = [sp.csr_matrix(arr) for arr in arrays]
return sp.vstack(arrays)
else:
return np.vstack(arrays)
def hstack(arrays):
"""hstack that supports sparse and dense arrays
If all arrays are dense, result is dense. Otherwise,
result is a sparse (csc) array.
"""
if any(sp.issparse(arr) for arr in arrays):
arrays = [sp.csc_matrix(arr) for arr in arrays]
return sp.hstack(arrays)
else:
return np.hstack(arrays)
def assure_array_dense(a):
if sp.issparse(a):
a = a.toarray()
return a
def assure_array_sparse(a):
if not sp.issparse(a):
# since x can be a list, cast to np.array
# since x can come from metas with string, cast to float
a = np.asarray(a).astype(np.float)
return sp.csc_matrix(a)
return a
def assure_column_sparse(a):
a = assure_array_sparse(a)
# if x of shape (n, ) is passed to csc_matrix constructor,
# the resulting matrix is of shape (1, n) and hence we
# need to transpose it to make it a column
if a.shape[0] == 1:
a = a.T
return a
def assure_column_dense(a):
a = assure_array_dense(a)
# column assignments must be of shape (n,) and not (n, 1)
return np.ravel(a)
| [
"scipy.sparse.vstack",
"numpy.ravel",
"scipy.sparse.issparse",
"numpy.asarray",
"numpy.asanyarray",
"numpy.zeros",
"numpy.clip",
"numpy.hstack",
"bottleneck.nanmin",
"scipy.sparse.csc_matrix",
"scipy.sparse.csr_matrix",
"numpy.array",
"numpy.max",
"scipy.sparse.hstack",
"numpy.vstack",
... | [((2725, 2739), 'scipy.sparse.issparse', 'sp.issparse', (['a'], {}), '(a)\n', (2736, 2739), True, 'from scipy import sparse as sp\n'), ((3455, 3466), 'numpy.ravel', 'np.ravel', (['a'], {}), '(a)\n', (3463, 3466), True, 'import numpy as np\n'), ((429, 458), 'numpy.zeros', 'np.zeros', (['(0, 0)'], {'dtype': 'dtype'}), '((0, 0), dtype=dtype)\n', (437, 458), True, 'import numpy as np\n'), ((524, 556), 'numpy.asanyarray', 'np.asanyarray', (['values'], {'dtype': 'int'}), '(values, dtype=int)\n', (537, 556), True, 'import numpy as np\n'), ((682, 694), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (690, 694), True, 'import numpy as np\n'), ((718, 735), 'bottleneck.nanmin', 'bn.nanmin', (['values'], {}), '(values)\n', (727, 735), True, 'import bottleneck as bn\n'), ((747, 764), 'bottleneck.nanmax', 'bn.nanmax', (['values'], {}), '(values)\n', (756, 764), True, 'import bottleneck as bn\n'), ((806, 831), 'numpy.clip', 'np.clip', (['values', 'min', 'max'], {}), '(values, min, max)\n', (813, 831), True, 'import numpy as np\n'), ((2272, 2289), 'scipy.sparse.vstack', 'sp.vstack', (['arrays'], {}), '(arrays)\n', (2281, 2289), True, 'from scipy import sparse as sp\n'), ((2315, 2332), 'numpy.vstack', 'np.vstack', (['arrays'], {}), '(arrays)\n', (2324, 2332), True, 'import numpy as np\n'), ((2628, 2645), 'scipy.sparse.hstack', 'sp.hstack', (['arrays'], {}), '(arrays)\n', (2637, 2645), True, 'from scipy import sparse as sp\n'), ((2671, 2688), 'numpy.hstack', 'np.hstack', (['arrays'], {}), '(arrays)\n', (2680, 2688), True, 'import numpy as np\n'), ((2819, 2833), 'scipy.sparse.issparse', 'sp.issparse', (['a'], {}), '(a)\n', (2830, 2833), True, 'from scipy import sparse as sp\n'), ((3008, 3024), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['a'], {}), '(a)\n', (3021, 3024), True, 'from scipy import sparse as sp\n'), ((2164, 2180), 'scipy.sparse.issparse', 'sp.issparse', (['arr'], {}), '(arr)\n', (2175, 2180), True, 'from scipy import sparse as sp\n'), ((2219, 2237), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['arr'], {}), '(arr)\n', (2232, 2237), True, 'from scipy import sparse as sp\n'), ((2520, 2536), 'scipy.sparse.issparse', 'sp.issparse', (['arr'], {}), '(arr)\n', (2531, 2536), True, 'from scipy import sparse as sp\n'), ((2575, 2593), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['arr'], {}), '(arr)\n', (2588, 2593), True, 'from scipy import sparse as sp\n'), ((2962, 2975), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (2972, 2975), True, 'import numpy as np\n'), ((481, 495), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (487, 495), True, 'import numpy as np\n')] |
import numpy as np
import numba
import matplotlib.pyplot as plt
import pointprocesses as pp
from pointprocesses.temporal import variable_poisson
@numba.jit("double[:],double[:]", cache=True)
def count_events_by_(events, partition):
m = len(partition)
counts = np.zeros((m-1,))
for i in range(m-1):
low = partition[i]
high = partition[i+1]
counts[i] = np.sum((low < events) &(events < high))
return counts
def intensity_estimator(data, partition) -> np.ndarray:
"""
Inspired by Leemis (2001), "Nonparametric estimation and variate generation for a
nonhomogeneous Poisson process from event count data"
Args:
data (list): set of simulated processes
partition (list): partition of the overarching time interval
"""
n = len(data)
m = len(partition) - 1
bandwidth = partition[1] - partition[0]
estimates = np.zeros((n,m))
for i in range(n):
seq = data[i] # i-th batch
estimates[i,:] = count_events_by_(seq[0], partition) / bandwidth
return estimates.mean(axis=0)
tmax = 8.0
trange = np.linspace(0, tmax, 201)
bandwidth = 0.1
partition = np.arange(0, tmax+bandwidth, bandwidth)
def intens(x):
"""Intensity function"""
return 5.0*(1-0.9*np.exp(-x))*(1+0.2*np.sin(1.4*x)) + \
1.0 * np.exp(0.2*x)
# max_lbda = np.max(1.01*intens(np.linspace(0, tmax, 200)))
max_lbda = 10.0
num_proc_samples = 500
# Simulated samples
data = [variable_poisson(tmax, intens, max_lbda) for _ in range(num_proc_samples)]
estimates = intensity_estimator(data, partition)
scatter_ops = {
"s": 18.0,
"color": "r",
"linewidths": 0.5,
"edgecolors": "k",
"alpha": 0.7
}
plt.plot(trange, intens(trange),
linestyle='--',
label="actual intensity $\\lambda(t)$")
plt.scatter(0.5*(partition[1:]+partition[:-1]), estimates,
label="estimate $\\hat{\\lambda}(t)$", **scatter_ops)
plt.xlabel("Time $t$")
plt.legend()
plt.tight_layout()
plt.savefig("estimate.png")
plt.show() | [
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"numpy.zeros",
"numpy.sin",
"numba.jit",
"numpy.arange",
"numpy.linspace",
"pointprocesses.temporal.variable_poisson",
"numpy.exp",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout",
... | [((148, 192), 'numba.jit', 'numba.jit', (['"""double[:],double[:]"""'], {'cache': '(True)'}), "('double[:],double[:]', cache=True)\n", (157, 192), False, 'import numba\n'), ((1102, 1127), 'numpy.linspace', 'np.linspace', (['(0)', 'tmax', '(201)'], {}), '(0, tmax, 201)\n', (1113, 1127), True, 'import numpy as np\n'), ((1156, 1197), 'numpy.arange', 'np.arange', (['(0)', '(tmax + bandwidth)', 'bandwidth'], {}), '(0, tmax + bandwidth, bandwidth)\n', (1165, 1197), True, 'import numpy as np\n'), ((1794, 1915), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(0.5 * (partition[1:] + partition[:-1]))', 'estimates'], {'label': '"""estimate $\\\\hat{\\\\lambda}(t)$"""'}), "(0.5 * (partition[1:] + partition[:-1]), estimates, label=\n 'estimate $\\\\hat{\\\\lambda}(t)$', **scatter_ops)\n", (1805, 1915), True, 'import matplotlib.pyplot as plt\n'), ((1912, 1934), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time $t$"""'], {}), "('Time $t$')\n", (1922, 1934), True, 'import matplotlib.pyplot as plt\n'), ((1935, 1947), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1945, 1947), True, 'import matplotlib.pyplot as plt\n'), ((1948, 1966), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1964, 1966), True, 'import matplotlib.pyplot as plt\n'), ((1967, 1994), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""estimate.png"""'], {}), "('estimate.png')\n", (1978, 1994), True, 'import matplotlib.pyplot as plt\n'), ((1995, 2005), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2003, 2005), True, 'import matplotlib.pyplot as plt\n'), ((270, 288), 'numpy.zeros', 'np.zeros', (['(m - 1,)'], {}), '((m - 1,))\n', (278, 288), True, 'import numpy as np\n'), ((898, 914), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (906, 914), True, 'import numpy as np\n'), ((1457, 1497), 'pointprocesses.temporal.variable_poisson', 'variable_poisson', (['tmax', 'intens', 'max_lbda'], {}), '(tmax, intens, max_lbda)\n', (1473, 1497), False, 'from pointprocesses.temporal import variable_poisson\n'), ((389, 429), 'numpy.sum', 'np.sum', (['((low < events) & (events < high))'], {}), '((low < events) & (events < high))\n', (395, 429), True, 'import numpy as np\n'), ((1315, 1330), 'numpy.exp', 'np.exp', (['(0.2 * x)'], {}), '(0.2 * x)\n', (1321, 1330), True, 'import numpy as np\n'), ((1282, 1297), 'numpy.sin', 'np.sin', (['(1.4 * x)'], {}), '(1.4 * x)\n', (1288, 1297), True, 'import numpy as np\n'), ((1263, 1273), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1269, 1273), True, 'import numpy as np\n')] |
"""Functions for spectrum alignment."""
import os
import sys
import numpy as np
import scipy
import torch
from scipy import sparse
from shape_library import load_mesh, prepare_mesh, resample
DEFAULT_DEVICE = torch.device("cuda")
class OptimizationParams:
def __init__(self):
"""Class that holds the hyperparamters."""
# Training
self.steps = 5000
self.checkpoint = 100
self.remesh_step = 500
# Number of eigenvalues to align
self.evals = [20]
# Early stopping
self.min_eval_loss = 0.05
# Adam optimizer
self.learning_rate = 0.005
# Regularizer coefficients
self.decay_target = 0.05
self.bound_reg = 2e1
self.inner_reg = 1e0
self.flip_penalty_reg = 1e10
def tf_calc_lap(mesh, VERT, device=DEFAULT_DEVICE):
"""Compute the Laplacian."""
# Move the mesh to the target device
mesh_tensor = []
for i in range(len(mesh)):
mesh_tensor.append(torch.as_tensor(mesh[i]).to(device))
# Unpack the mesh
[
_,
TRIV,
n,
m,
Ik,
Ih,
Ik_k,
Ih_k,
Tpi,
Txi,
Tni,
iM,
Windices,
Ael,
Bary,
bound_edges,
ord_list,
] = mesh_tensor
# Set the data type
dtype = "float32"
if VERT.dtype == "float64":
dtype = "float64"
if VERT.dtype == "float16":
dtype = "float16"
# Move the embedding to the target device
VERT = torch.as_tensor(VERT).to(device)
# Compute the edge lengths
L2 = torch.unsqueeze(torch.sum(torch.mm(iM, VERT) ** 2, dim=1), dim=1)
L = torch.sqrt(L2)
def fAk(Ik, Ik_k): # Ik: 1 if (edg1, edg2) in same tri, -1 if same edge
Ikp = torch.abs(Ik)
Sk = torch.mm(Ikp, L) / 2 # Perimeter of associated tri for each edge (m, )
SkL = Sk - L
Ak = (
Sk
* (torch.mm(Ik_k[:, :, 0], Sk) - torch.mm(Ik_k[:, :, 0], L))
* (torch.mm(Ik_k[:, :, 0], Sk) - torch.mm(Ik_k[:, :, 1], L))
* (torch.mm(Ik_k[:, :, 0], Sk) - torch.mm(Ik_k[:, :, 2], L))
)
return torch.sqrt(torch.abs(Ak) + 1e-20)
Ak = fAk(Ik, Ik_k) # (m, )
Ah = fAk(Ih, Ih_k) # (m, )
# Sparse representation of the Laplacian matrix
W = -torch.mm(Ik, L2) / (8 * Ak) - torch.mm(Ih, L2) / (8 * Ah) # (m, )
# Compute indices to build the dense Laplacian matrix
if dtype == "float32":
Windtf = torch.sparse.FloatTensor(
torch.tensor(
Windices.type(torch.long), dtype=torch.long, device=device
).t(), #
torch.tensor(-np.ones((m), dtype), dtype=torch.float, device=device),
torch.Size([n * n, m]),
)
elif dtype == "float64":
Windtf = torch.sparse.DoubleTensor(
torch.cuda.LongTensor(Windices.type(torch.long), device=device).t(),
torch.cuda.DoubleTensor(-np.ones((m), dtype), device=device),
torch.Size([n * n, m]),
)
elif dtype == "float16":
Windtf = torch.sparse.HalfTensor(
torch.cuda.LongTensor(Windices.type(torch.long), device=device).t(),
torch.cuda.HalfTensor(-np.ones((m), dtype), device=device),
torch.Size([n * n, m]),
)
Wfull = -torch.reshape(torch.mm(Windtf, W), (n, n))
Wfull = Wfull + torch.t(Wfull)
# Compute the actual Laplacian
Lx = Wfull - torch.diag(torch.sum(Wfull, dim=1)) # (n, n)
S = (torch.mm(Ael, Ak) + torch.mm(Ael, Ah)) / 6 # (n, )
return Lx, S, L, Ak
def calc_evals(VERT, TRIV):
"""Compute the eigenvalue sequence."""
mesh = prepare_mesh(VERT, TRIV)
Lx, S, _, _ = tf_calc_lap(mesh, mesh[0])
Si = torch.diag(torch.sqrt(1 / S[:, 0]))
Lap = torch.mm(Si, torch.mm(Lx, Si))
evals, _ = torch.symeig(Lap)
return evals
def initialize(mesh, step=1.0, params=OptimizationParams(), device=DEFAULT_DEVICE):
"""Initialize the model."""
# Namespace
graph = lambda: None
graph.global_step = torch.as_tensor(step + 1.0, dtype=torch.float32)
# Unpack the mesh
[
Xori,
TRIV,
n,
m,
Ik,
Ih,
Ik_k,
Ih_k,
Tpi,
Txi,
Tni,
iM,
Windices,
Ael,
Bary,
bound_edges,
ord_list,
] = mesh
# Set datatype
graph.dtype = "float32"
if Xori.dtype == "float64":
graph.dtype = "float64"
elif Xori.dtype == "float16":
graph.dtype = "float16"
# Model the shape deformation as a displacement vector field
graph.dXb = torch.zeros(Xori.shape, requires_grad=True, device=device)
graph.dXi = torch.zeros(Xori.shape, requires_grad=True, device=device)
# The optimizers
graph.optim_dXb = torch.optim.Adam([graph.dXb], lr=params.learning_rate)
graph.optim_dXi = torch.optim.Adam([graph.dXi], lr=params.learning_rate)
return graph
def l2_loss(t):
"""Return the l2 loss."""
return 0.5 * torch.sum(t ** 2)
def forward(
costType,
mode,
graph,
mesh,
target_evals,
nevals,
step=1.0,
params=OptimizationParams(),
device=DEFAULT_DEVICE
):
"""Perform a forward pass."""
# Unpack the mesh
[
Xori,
TRIV,
n,
m,
Ik,
Ih,
Ik_k,
Ih_k,
Tpi,
Txi,
Tni,
iM,
Windices,
Ael,
Bary,
bound_edges,
ord_list,
] = mesh
# Cosine decay for the regularizers
cosine_decay = 0.5 * (
1
+ np.cos(
3.14
* np.minimum(params.steps / 2.0, graph.global_step)
/ (params.steps / 2.0)
)
)
decay = (1 - params.decay_target) * cosine_decay + params.decay_target
decay = np.float(decay)
scaleX = 1 # not used in shape alignment
# Model the shape deformation as a displacement vector field
bound_vert = np.zeros((n, 1), graph.dtype)
bound_vert[ord_list] = 1
def to_device(t):
return torch.as_tensor(t).to(device)
bound_vert = to_device(bound_vert)
X = (to_device(Xori) + graph.dXb * bound_vert + graph.dXi * (1 - bound_vert)) * scaleX
Lx, S, L, Ak = tf_calc_lap(mesh, X)
# Normalized Laplacian
Si = torch.diag(torch.sqrt(1 / S[:, 0]))
Lap = torch.mm(Si, torch.mm(Lx, Si))
# Spectral decomposition
[evals, v] = torch.symeig(Lap, eigenvectors=True)
cost_evals = 1e1 * l2_loss(
(evals[0:nevals] - target_evals[0:nevals])
* (
1
/ torch.as_tensor(np.asarray(range(1, nevals + 1), graph.dtype)).to(device)
)
)
# Triangle flip penalty
Tpi = to_device(Tpi)
Txi = to_device(Txi)
Tni = to_device(Tni)
tp = torch.mm(Tpi[:, :], X)
tx = torch.mm(Txi[:, :], X)
tn = torch.mm(Tni[:, :], X)
Rot = to_device(np.asarray([[0, 1], [-1, 0]], graph.dtype))
cp = torch.sum(torch.mm(tn, Rot) * (tx - tp), dim=1)
cp = cp - 1e-4
flip_cost = params.flip_penalty_reg * l2_loss(cp - torch.abs(cp))
# Inner points regularizer
varA = torch.std(Ak, dim=[0])
inner_reg_cost = params.inner_reg * (l2_loss(L) + l2_loss(varA))
# Boundary points regularizer
bound_reg_cost = params.bound_reg * decay * torch.sum(L[bound_edges[:, 0], :])
# Inner and outer points cost functions
cost_bound = cost_evals + flip_cost + bound_reg_cost
cost_inner = inner_reg_cost + flip_cost
def to_numpy(a):
o = []
for ai in a:
oi = ai.cpu().detach().numpy()
o.append(oi)
return o
if mode == "train":
if costType == "bound":
graph.optim_dXb.zero_grad()
cost_bound.backward()
graph.dXb.grad.data.clamp_(-0.0001, 0.0001)
graph.optim_dXb.step()
outList = [cost_bound, cost_evals, X]
return to_numpy(outList)
if costType == "inner":
graph.optim_dXi.zero_grad()
cost_inner.backward()
graph.dXi.grad.data.clamp_(-0.0001, 0.0001)
graph.optim_dXi.step()
outList = [cost_inner, cost_evals, X]
return to_numpy(outList)
elif mode == "eval":
outList1 = [cost_bound, cost_evals, inner_reg_cost, bound_reg_cost]
outList1 = to_numpy(outList1)
outList2 = [cp, evals]
outList2 = to_numpy(outList2)
return outList1 + [decay] + outList2
def run_optimization(mesh, target_evals, out_path, params=OptimizationParams()):
"""Run the optimization."""
# Create the output directories
os.makedirs(f"{out_path}/ply", exist_ok=True)
os.makedirs(f"{out_path}/txt", exist_ok=True)
# Unpack the mesh
[
Xopt,
TRIV,
n,
m,
Ik,
Ih,
Ik_k,
Ih_k,
Tpi,
Txi,
Tni,
iM,
Windices,
Ael,
Bary,
bound_edges,
ord_list,
] = mesh
# Save the initial embedding
save_ply(Xopt, TRIV, "%s/ply/initial.ply" % out_path)
# Save the target eigenvalue sequence
np.savetxt("%s/txt/target.txt" % out_path, target_evals.cpu().detach().numpy())
iterations = []
for nevals in params.evals:
step = 0
while step < params.steps - 1:
# Prepare the mesh
mesh = prepare_mesh(Xopt, TRIV)
# Unpack the mesh
[
Xori,
TRIV,
n,
m,
Ik,
Ih,
Ik_k,
Ih_k,
Tpi,
Txi,
Tni,
iM,
Windices,
Ael,
Bary,
bound_edges,
ord_list,
] = mesh
# Initialize the model
graph = initialize(mesh, step=step)
tic()
# Start iteration
for step in range(step + 1, params.steps):
# Recompute triangulation
if step % params.remesh_step == 0:
print("RECOMPUTING TRIANGULATION at step %d" % step)
break
try:
# Alternate optimization of inner and boundary vertices
if int(step / 10) % 2 == 0:
# Optimize over inner points
er, ee, Xopt_t = forward(
"inner",
"train",
graph,
mesh,
target_evals,
nevals,
step,
params,
)
else:
# Optimize over boundary points
er, ee, Xopt_t = forward(
"bound",
"train",
graph,
mesh,
target_evals,
nevals,
step,
params,
)
iterations.append((step, nevals, er, ee, int(step / 10) % 2))
if (
step % params.checkpoint == 0
or step == params.steps - 1
or step == 1
):
toc()
tic()
# Perform a forward pass in eval mode
(
cost,
cost_evals,
cost_vcL,
cost_vcW,
decay,
flip,
evout,
) = forward(
"bound", "eval", graph, mesh, target_evals, nevals, step
)
print(
"Iter %f, cost: %f(evals cost: %f (%f) (%f), smoothness weight: %f). Flip: %d"
% (
step,
cost,
cost_evals,
cost_vcL,
cost_vcW,
decay,
np.sum(flip < 0),
)
)
# Save the current embedding
save_ply(
Xopt,
TRIV,
"%s/ply/evals_%d_iter_%06d.ply" % (out_path, nevals, step),
)
# Save the current eigenvalue sequence
np.savetxt(
"%s/txt/evals_%d_iter_%06d.txt" % (out_path, nevals, step),
evout,
)
# Save the training progress statistics
np.savetxt("%s/iterations.txt" % (out_path), iterations)
# Early stopping
if ee < params.min_eval_loss:
step = params.steps
print("Minimum eigenvalues loss reached")
break
except KeyboardInterrupt:
step = params.steps
break
except:
print(sys.exc_info())
ee = float("nan")
if ee != ee:
# If nan (something went wrong) with the spectral decomposition,
# perturbate the last valid state and start over
print("iter %d. Perturbating initial condition" % step)
Xopt = (
Xopt
+ (np.random.rand(np.shape(Xopt)[0], np.shape(Xopt)[1]) - 0.5)
* 1e-3
)
graph.global_step = step
else:
Xopt = Xopt_t
graph.global_step += 1
if step < params.steps - 1:
[Xopt, TRIV] = resample(Xopt, TRIV)
| [
"numpy.sum",
"torch.sqrt",
"torch.mm",
"numpy.ones",
"numpy.shape",
"torch.std",
"sys.exc_info",
"torch.device",
"torch.t",
"numpy.savetxt",
"torch.zeros",
"numpy.minimum",
"numpy.asarray",
"numpy.float",
"shape_library.resample",
"torch.optim.Adam",
"torch.Size",
"torch.sum",
"o... | [((211, 231), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (223, 231), False, 'import torch\n'), ((1690, 1704), 'torch.sqrt', 'torch.sqrt', (['L2'], {}), '(L2)\n', (1700, 1704), False, 'import torch\n'), ((3703, 3727), 'shape_library.prepare_mesh', 'prepare_mesh', (['VERT', 'TRIV'], {}), '(VERT, TRIV)\n', (3715, 3727), False, 'from shape_library import load_mesh, prepare_mesh, resample\n'), ((3874, 3891), 'torch.symeig', 'torch.symeig', (['Lap'], {}), '(Lap)\n', (3886, 3891), False, 'import torch\n'), ((4092, 4140), 'torch.as_tensor', 'torch.as_tensor', (['(step + 1.0)'], {'dtype': 'torch.float32'}), '(step + 1.0, dtype=torch.float32)\n', (4107, 4140), False, 'import torch\n'), ((4680, 4738), 'torch.zeros', 'torch.zeros', (['Xori.shape'], {'requires_grad': '(True)', 'device': 'device'}), '(Xori.shape, requires_grad=True, device=device)\n', (4691, 4738), False, 'import torch\n'), ((4755, 4813), 'torch.zeros', 'torch.zeros', (['Xori.shape'], {'requires_grad': '(True)', 'device': 'device'}), '(Xori.shape, requires_grad=True, device=device)\n', (4766, 4813), False, 'import torch\n'), ((4858, 4912), 'torch.optim.Adam', 'torch.optim.Adam', (['[graph.dXb]'], {'lr': 'params.learning_rate'}), '([graph.dXb], lr=params.learning_rate)\n', (4874, 4912), False, 'import torch\n'), ((4935, 4989), 'torch.optim.Adam', 'torch.optim.Adam', (['[graph.dXi]'], {'lr': 'params.learning_rate'}), '([graph.dXi], lr=params.learning_rate)\n', (4951, 4989), False, 'import torch\n'), ((5882, 5897), 'numpy.float', 'np.float', (['decay'], {}), '(decay)\n', (5890, 5897), True, 'import numpy as np\n'), ((6028, 6057), 'numpy.zeros', 'np.zeros', (['(n, 1)', 'graph.dtype'], {}), '((n, 1), graph.dtype)\n', (6036, 6057), True, 'import numpy as np\n'), ((6488, 6524), 'torch.symeig', 'torch.symeig', (['Lap'], {'eigenvectors': '(True)'}), '(Lap, eigenvectors=True)\n', (6500, 6524), False, 'import torch\n'), ((6851, 6873), 'torch.mm', 'torch.mm', (['Tpi[:, :]', 'X'], {}), '(Tpi[:, :], X)\n', (6859, 6873), False, 'import torch\n'), ((6883, 6905), 'torch.mm', 'torch.mm', (['Txi[:, :]', 'X'], {}), '(Txi[:, :], X)\n', (6891, 6905), False, 'import torch\n'), ((6915, 6937), 'torch.mm', 'torch.mm', (['Tni[:, :]', 'X'], {}), '(Tni[:, :], X)\n', (6923, 6937), False, 'import torch\n'), ((7191, 7213), 'torch.std', 'torch.std', (['Ak'], {'dim': '[0]'}), '(Ak, dim=[0])\n', (7200, 7213), False, 'import torch\n'), ((8691, 8736), 'os.makedirs', 'os.makedirs', (['f"""{out_path}/ply"""'], {'exist_ok': '(True)'}), "(f'{out_path}/ply', exist_ok=True)\n", (8702, 8736), False, 'import os\n'), ((8741, 8786), 'os.makedirs', 'os.makedirs', (['f"""{out_path}/txt"""'], {'exist_ok': '(True)'}), "(f'{out_path}/txt', exist_ok=True)\n", (8752, 8786), False, 'import os\n'), ((1797, 1810), 'torch.abs', 'torch.abs', (['Ik'], {}), '(Ik)\n', (1806, 1810), False, 'import torch\n'), ((3419, 3433), 'torch.t', 'torch.t', (['Wfull'], {}), '(Wfull)\n', (3426, 3433), False, 'import torch\n'), ((3793, 3816), 'torch.sqrt', 'torch.sqrt', (['(1 / S[:, 0])'], {}), '(1 / S[:, 0])\n', (3803, 3816), False, 'import torch\n'), ((3841, 3857), 'torch.mm', 'torch.mm', (['Lx', 'Si'], {}), '(Lx, Si)\n', (3849, 3857), False, 'import torch\n'), ((5072, 5089), 'torch.sum', 'torch.sum', (['(t ** 2)'], {}), '(t ** 2)\n', (5081, 5089), False, 'import torch\n'), ((6375, 6398), 'torch.sqrt', 'torch.sqrt', (['(1 / S[:, 0])'], {}), '(1 / S[:, 0])\n', (6385, 6398), False, 'import torch\n'), ((6423, 6439), 'torch.mm', 'torch.mm', (['Lx', 'Si'], {}), '(Lx, Si)\n', (6431, 6439), False, 'import torch\n'), ((6958, 7000), 'numpy.asarray', 'np.asarray', (['[[0, 1], [-1, 0]]', 'graph.dtype'], {}), '([[0, 1], [-1, 0]], graph.dtype)\n', (6968, 7000), True, 'import numpy as np\n'), ((7366, 7400), 'torch.sum', 'torch.sum', (['L[bound_edges[:, 0], :]'], {}), '(L[bound_edges[:, 0], :])\n', (7375, 7400), False, 'import torch\n'), ((1542, 1563), 'torch.as_tensor', 'torch.as_tensor', (['VERT'], {}), '(VERT)\n', (1557, 1563), False, 'import torch\n'), ((1824, 1840), 'torch.mm', 'torch.mm', (['Ikp', 'L'], {}), '(Ikp, L)\n', (1832, 1840), False, 'import torch\n'), ((2382, 2398), 'torch.mm', 'torch.mm', (['Ih', 'L2'], {}), '(Ih, L2)\n', (2390, 2398), False, 'import torch\n'), ((2765, 2787), 'torch.Size', 'torch.Size', (['[n * n, m]'], {}), '([n * n, m])\n', (2775, 2787), False, 'import torch\n'), ((3370, 3389), 'torch.mm', 'torch.mm', (['Windtf', 'W'], {}), '(Windtf, W)\n', (3378, 3389), False, 'import torch\n'), ((3498, 3521), 'torch.sum', 'torch.sum', (['Wfull'], {'dim': '(1)'}), '(Wfull, dim=1)\n', (3507, 3521), False, 'import torch\n'), ((3542, 3559), 'torch.mm', 'torch.mm', (['Ael', 'Ak'], {}), '(Ael, Ak)\n', (3550, 3559), False, 'import torch\n'), ((3562, 3579), 'torch.mm', 'torch.mm', (['Ael', 'Ah'], {}), '(Ael, Ah)\n', (3570, 3579), False, 'import torch\n'), ((7021, 7038), 'torch.mm', 'torch.mm', (['tn', 'Rot'], {}), '(tn, Rot)\n', (7029, 7038), False, 'import torch\n'), ((9446, 9470), 'shape_library.prepare_mesh', 'prepare_mesh', (['Xopt', 'TRIV'], {}), '(Xopt, TRIV)\n', (9458, 9470), False, 'from shape_library import load_mesh, prepare_mesh, resample\n'), ((1642, 1660), 'torch.mm', 'torch.mm', (['iM', 'VERT'], {}), '(iM, VERT)\n', (1650, 1660), False, 'import torch\n'), ((2108, 2135), 'torch.mm', 'torch.mm', (['Ik_k[:, :, 0]', 'Sk'], {}), '(Ik_k[:, :, 0], Sk)\n', (2116, 2135), False, 'import torch\n'), ((2138, 2164), 'torch.mm', 'torch.mm', (['Ik_k[:, :, 2]', 'L'], {}), '(Ik_k[:, :, 2], L)\n', (2146, 2164), False, 'import torch\n'), ((2202, 2215), 'torch.abs', 'torch.abs', (['Ak'], {}), '(Ak)\n', (2211, 2215), False, 'import torch\n'), ((2352, 2368), 'torch.mm', 'torch.mm', (['Ik', 'L2'], {}), '(Ik, L2)\n', (2360, 2368), False, 'import torch\n'), ((3039, 3061), 'torch.Size', 'torch.Size', (['[n * n, m]'], {}), '([n * n, m])\n', (3049, 3061), False, 'import torch\n'), ((6125, 6143), 'torch.as_tensor', 'torch.as_tensor', (['t'], {}), '(t)\n', (6140, 6143), False, 'import torch\n'), ((7133, 7146), 'torch.abs', 'torch.abs', (['cp'], {}), '(cp)\n', (7142, 7146), False, 'import torch\n'), ((14455, 14475), 'shape_library.resample', 'resample', (['Xopt', 'TRIV'], {}), '(Xopt, TRIV)\n', (14463, 14475), False, 'from shape_library import load_mesh, prepare_mesh, resample\n'), ((1001, 1025), 'torch.as_tensor', 'torch.as_tensor', (['mesh[i]'], {}), '(mesh[i])\n', (1016, 1025), False, 'import torch\n'), ((2035, 2062), 'torch.mm', 'torch.mm', (['Ik_k[:, :, 0]', 'Sk'], {}), '(Ik_k[:, :, 0], Sk)\n', (2043, 2062), False, 'import torch\n'), ((2065, 2091), 'torch.mm', 'torch.mm', (['Ik_k[:, :, 1]', 'L'], {}), '(Ik_k[:, :, 1], L)\n', (2073, 2091), False, 'import torch\n'), ((2697, 2714), 'numpy.ones', 'np.ones', (['m', 'dtype'], {}), '(m, dtype)\n', (2704, 2714), True, 'import numpy as np\n'), ((3309, 3331), 'torch.Size', 'torch.Size', (['[n * n, m]'], {}), '([n * n, m])\n', (3319, 3331), False, 'import torch\n'), ((1962, 1989), 'torch.mm', 'torch.mm', (['Ik_k[:, :, 0]', 'Sk'], {}), '(Ik_k[:, :, 0], Sk)\n', (1970, 1989), False, 'import torch\n'), ((1992, 2018), 'torch.mm', 'torch.mm', (['Ik_k[:, :, 0]', 'L'], {}), '(Ik_k[:, :, 0], L)\n', (2000, 2018), False, 'import torch\n'), ((2990, 3007), 'numpy.ones', 'np.ones', (['m', 'dtype'], {}), '(m, dtype)\n', (2997, 3007), True, 'import numpy as np\n'), ((5694, 5743), 'numpy.minimum', 'np.minimum', (['(params.steps / 2.0)', 'graph.global_step'], {}), '(params.steps / 2.0, graph.global_step)\n', (5704, 5743), True, 'import numpy as np\n'), ((13011, 13088), 'numpy.savetxt', 'np.savetxt', (["('%s/txt/evals_%d_iter_%06d.txt' % (out_path, nevals, step))", 'evout'], {}), "('%s/txt/evals_%d_iter_%06d.txt' % (out_path, nevals, step), evout)\n", (13021, 13088), True, 'import numpy as np\n'), ((13261, 13315), 'numpy.savetxt', 'np.savetxt', (["('%s/iterations.txt' % out_path)", 'iterations'], {}), "('%s/iterations.txt' % out_path, iterations)\n", (13271, 13315), True, 'import numpy as np\n'), ((3260, 3277), 'numpy.ones', 'np.ones', (['m', 'dtype'], {}), '(m, dtype)\n', (3267, 3277), True, 'import numpy as np\n'), ((13726, 13740), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (13738, 13740), False, 'import sys\n'), ((12579, 12595), 'numpy.sum', 'np.sum', (['(flip < 0)'], {}), '(flip < 0)\n', (12585, 12595), True, 'import numpy as np\n'), ((14140, 14154), 'numpy.shape', 'np.shape', (['Xopt'], {}), '(Xopt)\n', (14148, 14154), True, 'import numpy as np\n'), ((14159, 14173), 'numpy.shape', 'np.shape', (['Xopt'], {}), '(Xopt)\n', (14167, 14173), True, 'import numpy as np\n')] |
import numpy as np
from bokeh.models import ColumnDataSource, Jitter
from bokeh.plotting import figure, show, output_file
p = figure(plot_width=500, plot_height=400, x_range=(0,3), y_range=(0,10))
y1 = np.random.random(2500) * 10
y2 = np.random.normal(size=2500)*2 + 5
p.circle(x={'value': 1, 'transform': Jitter(width=0.4)}, y=y1,
color="navy", alpha=0.3)
p.circle(x={'value': 2, 'transform': Jitter(width=0.4)}, y=y2,
color="firebrick", alpha=0.3)
output_file("jitter.html")
show(p)
| [
"bokeh.plotting.figure",
"bokeh.models.Jitter",
"bokeh.plotting.output_file",
"numpy.random.random",
"bokeh.plotting.show",
"numpy.random.normal"
] | [((128, 200), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': '(500)', 'plot_height': '(400)', 'x_range': '(0, 3)', 'y_range': '(0, 10)'}), '(plot_width=500, plot_height=400, x_range=(0, 3), y_range=(0, 10))\n', (134, 200), False, 'from bokeh.plotting import figure, show, output_file\n'), ((474, 500), 'bokeh.plotting.output_file', 'output_file', (['"""jitter.html"""'], {}), "('jitter.html')\n", (485, 500), False, 'from bokeh.plotting import figure, show, output_file\n'), ((502, 509), 'bokeh.plotting.show', 'show', (['p'], {}), '(p)\n', (506, 509), False, 'from bokeh.plotting import figure, show, output_file\n'), ((205, 227), 'numpy.random.random', 'np.random.random', (['(2500)'], {}), '(2500)\n', (221, 227), True, 'import numpy as np\n'), ((238, 265), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(2500)'}), '(size=2500)\n', (254, 265), True, 'import numpy as np\n'), ((310, 327), 'bokeh.models.Jitter', 'Jitter', ([], {'width': '(0.4)'}), '(width=0.4)\n', (316, 327), False, 'from bokeh.models import ColumnDataSource, Jitter\n'), ((408, 425), 'bokeh.models.Jitter', 'Jitter', ([], {'width': '(0.4)'}), '(width=0.4)\n', (414, 425), False, 'from bokeh.models import ColumnDataSource, Jitter\n')] |
################################################################### #
# Basic plot for two-strain SIR model:
# Time series given some initial conditions
####################################################################
import sys
import numpy as np
import pylab as plt
from matplotlib.font_manager import FontProperties
from two_strain import *
import csv
# Run parameters
run_num = 1 # sys.argv[1]
end_time = 100*365
output_interval = 1.0
step_size = 0.1
# Strain parameters, including initial conditions
beta = np.array([5, 5])/7.0
epsilon = 0.1
gamma = np.array([1, 1])/7.0
mu = 1/(10*365.0)
alpha = np.array([1., 1.])
a = np.array([1., 1.])
omega = 2*np.pi/365.
obs_sd = 0.01
NSS = 0.2
NIS = 1e-3
NRS = 0.02
NRI = 0.0
NSI = 1e-3
NSR = 0.02
NIR = 0.0
# Organize and run simulation
params = np.array([gamma, mu, alpha, a, omega, beta, epsilon])
SI = np.array([NSS, NIS, NRS, NRI, NSI, NSR, NIR])
ic = np.array([NSS, NIS, NRS, NRI, NSI, NSR, NIR, 1-np.sum(SI)])
output = run_two_strain(end_time, output_interval, step_size, params, ic)
# Save output (NIS+NIR, NSI+NRI) to csv and plot
infecteds = np.asarray([output[:, 1] + output[:, 6], output[:, 3] + output[:, 4]])
times = np.arange(0,infecteds.shape[1])
infecteds_t = np.vstack((times, infecteds))
filename = 'infecteds_' + str(run_num) + '.csv'
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['times', 'I1', 'I2'])
writer.writerows(infecteds_t.T)
# Add observation error if present
if obs_sd > 0:
errors = np.random.normal(1, obs_sd, infecteds.shape)
infecteds_obs = infecteds*errors
filename = 'infecteds_obs_' + str(run_num) + '.csv'
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['times', 'I1', 'I2'])
writer.writerows(infecteds_t.T)
plt.subplot(3, 1, 1)
plt.plot(output[:, 0], 'b-', label=r'$N_{SS}$')
plt.plot(output[:, 2], 'g-', label=r'$N_{RS}$')
plt.plot(output[:, 5], 'r-', label=r'$N_{SR}$')
plt.plot(output[:, 7], 'c-', label=r'$N_{RR}$')
plt.xlabel('Time')
plt.ylabel('Uninfected')
plt.legend(loc=1, prop=FontProperties(size='smaller'))
plt.subplot(3, 1, 2)
plt.plot(output[:, 1], 'b-', label=r'$N_{IS}$')
plt.plot(output[:, 6], 'g-', label=r'$N_{IR}$')
plt.plot((output[:, 1]+a[0]*output[:, 6]), 'r-', label=r'$I_1$')
plt.xlabel('Time')
plt.ylabel('Infected 1')
plt.legend(loc=1, prop=FontProperties(size='smaller'))
plt.subplot(3, 1, 3)
plt.plot(output[:, 4], 'b-', label=r'$N_{SI}$')
plt.plot(output[:, 3], 'g-', label=r'$N_{RI}$')
plt.plot((output[:, 4]+a[1]*output[:, 3]), 'r-', label=r'$I_2$')
plt.xlabel('Time')
plt.ylabel('Infected 2')
plt.legend(loc=1, prop=FontProperties(size='smaller'))
plt.savefig("time_series_" + str(run_num) + ".png")
plt.show()
plt.close()
| [
"pylab.close",
"pylab.show",
"numpy.sum",
"csv.writer",
"matplotlib.font_manager.FontProperties",
"numpy.asarray",
"pylab.ylabel",
"pylab.subplot",
"numpy.array",
"numpy.arange",
"pylab.xlabel",
"numpy.random.normal",
"pylab.plot",
"numpy.vstack"
] | [((629, 649), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (637, 649), True, 'import numpy as np\n'), ((652, 672), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (660, 672), True, 'import numpy as np\n'), ((821, 874), 'numpy.array', 'np.array', (['[gamma, mu, alpha, a, omega, beta, epsilon]'], {}), '([gamma, mu, alpha, a, omega, beta, epsilon])\n', (829, 874), True, 'import numpy as np\n'), ((880, 925), 'numpy.array', 'np.array', (['[NSS, NIS, NRS, NRI, NSI, NSR, NIR]'], {}), '([NSS, NIS, NRS, NRI, NSI, NSR, NIR])\n', (888, 925), True, 'import numpy as np\n'), ((1127, 1197), 'numpy.asarray', 'np.asarray', (['[output[:, 1] + output[:, 6], output[:, 3] + output[:, 4]]'], {}), '([output[:, 1] + output[:, 6], output[:, 3] + output[:, 4]])\n', (1137, 1197), True, 'import numpy as np\n'), ((1206, 1238), 'numpy.arange', 'np.arange', (['(0)', 'infecteds.shape[1]'], {}), '(0, infecteds.shape[1])\n', (1215, 1238), True, 'import numpy as np\n'), ((1252, 1281), 'numpy.vstack', 'np.vstack', (['(times, infecteds)'], {}), '((times, infecteds))\n', (1261, 1281), True, 'import numpy as np\n'), ((1849, 1869), 'pylab.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (1860, 1869), True, 'import pylab as plt\n'), ((1870, 1916), 'pylab.plot', 'plt.plot', (['output[:, 0]', '"""b-"""'], {'label': '"""$N_{SS}$"""'}), "(output[:, 0], 'b-', label='$N_{SS}$')\n", (1878, 1916), True, 'import pylab as plt\n'), ((1918, 1964), 'pylab.plot', 'plt.plot', (['output[:, 2]', '"""g-"""'], {'label': '"""$N_{RS}$"""'}), "(output[:, 2], 'g-', label='$N_{RS}$')\n", (1926, 1964), True, 'import pylab as plt\n'), ((1966, 2012), 'pylab.plot', 'plt.plot', (['output[:, 5]', '"""r-"""'], {'label': '"""$N_{SR}$"""'}), "(output[:, 5], 'r-', label='$N_{SR}$')\n", (1974, 2012), True, 'import pylab as plt\n'), ((2014, 2060), 'pylab.plot', 'plt.plot', (['output[:, 7]', '"""c-"""'], {'label': '"""$N_{RR}$"""'}), "(output[:, 7], 'c-', label='$N_{RR}$')\n", (2022, 2060), True, 'import pylab as plt\n'), ((2062, 2080), 'pylab.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (2072, 2080), True, 'import pylab as plt\n'), ((2081, 2105), 'pylab.ylabel', 'plt.ylabel', (['"""Uninfected"""'], {}), "('Uninfected')\n", (2091, 2105), True, 'import pylab as plt\n'), ((2161, 2181), 'pylab.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (2172, 2181), True, 'import pylab as plt\n'), ((2182, 2228), 'pylab.plot', 'plt.plot', (['output[:, 1]', '"""b-"""'], {'label': '"""$N_{IS}$"""'}), "(output[:, 1], 'b-', label='$N_{IS}$')\n", (2190, 2228), True, 'import pylab as plt\n'), ((2230, 2276), 'pylab.plot', 'plt.plot', (['output[:, 6]', '"""g-"""'], {'label': '"""$N_{IR}$"""'}), "(output[:, 6], 'g-', label='$N_{IR}$')\n", (2238, 2276), True, 'import pylab as plt\n'), ((2278, 2343), 'pylab.plot', 'plt.plot', (['(output[:, 1] + a[0] * output[:, 6])', '"""r-"""'], {'label': '"""$I_1$"""'}), "(output[:, 1] + a[0] * output[:, 6], 'r-', label='$I_1$')\n", (2286, 2343), True, 'import pylab as plt\n'), ((2343, 2361), 'pylab.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (2353, 2361), True, 'import pylab as plt\n'), ((2362, 2386), 'pylab.ylabel', 'plt.ylabel', (['"""Infected 1"""'], {}), "('Infected 1')\n", (2372, 2386), True, 'import pylab as plt\n'), ((2442, 2462), 'pylab.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (2453, 2462), True, 'import pylab as plt\n'), ((2463, 2509), 'pylab.plot', 'plt.plot', (['output[:, 4]', '"""b-"""'], {'label': '"""$N_{SI}$"""'}), "(output[:, 4], 'b-', label='$N_{SI}$')\n", (2471, 2509), True, 'import pylab as plt\n'), ((2511, 2557), 'pylab.plot', 'plt.plot', (['output[:, 3]', '"""g-"""'], {'label': '"""$N_{RI}$"""'}), "(output[:, 3], 'g-', label='$N_{RI}$')\n", (2519, 2557), True, 'import pylab as plt\n'), ((2559, 2624), 'pylab.plot', 'plt.plot', (['(output[:, 4] + a[1] * output[:, 3])', '"""r-"""'], {'label': '"""$I_2$"""'}), "(output[:, 4] + a[1] * output[:, 3], 'r-', label='$I_2$')\n", (2567, 2624), True, 'import pylab as plt\n'), ((2624, 2642), 'pylab.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (2634, 2642), True, 'import pylab as plt\n'), ((2643, 2667), 'pylab.ylabel', 'plt.ylabel', (['"""Infected 2"""'], {}), "('Infected 2')\n", (2653, 2667), True, 'import pylab as plt\n'), ((2775, 2785), 'pylab.show', 'plt.show', ([], {}), '()\n', (2783, 2785), True, 'import pylab as plt\n'), ((2786, 2797), 'pylab.close', 'plt.close', ([], {}), '()\n', (2795, 2797), True, 'import pylab as plt\n'), ((539, 555), 'numpy.array', 'np.array', (['[5, 5]'], {}), '([5, 5])\n', (547, 555), True, 'import numpy as np\n'), ((582, 598), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (590, 598), True, 'import numpy as np\n'), ((1381, 1400), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (1391, 1400), False, 'import csv\n'), ((1544, 1588), 'numpy.random.normal', 'np.random.normal', (['(1)', 'obs_sd', 'infecteds.shape'], {}), '(1, obs_sd, infecteds.shape)\n', (1560, 1588), True, 'import numpy as np\n'), ((1741, 1760), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (1751, 1760), False, 'import csv\n'), ((2129, 2159), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {'size': '"""smaller"""'}), "(size='smaller')\n", (2143, 2159), False, 'from matplotlib.font_manager import FontProperties\n'), ((2410, 2440), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {'size': '"""smaller"""'}), "(size='smaller')\n", (2424, 2440), False, 'from matplotlib.font_manager import FontProperties\n'), ((2691, 2721), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {'size': '"""smaller"""'}), "(size='smaller')\n", (2705, 2721), False, 'from matplotlib.font_manager import FontProperties\n'), ((978, 988), 'numpy.sum', 'np.sum', (['SI'], {}), '(SI)\n', (984, 988), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import keras
from keras import models
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization
from keras.optimizers import Adam
def brighter(ori, gamma=0.4, a=1.5, b=20, type=0):
len = ori.size
dst = np.zeros(len)
if type==0:
# 非线性变换
color = np.clip(pow(ori / 255.0, gamma) * 255.0, 0, 255)
elif type==1:
# 线性变换
color = np.clip(ori * a + b, 0, 255)
return dst
def read_data(file):
faces_data = pd.read_csv(file)
train_set_x = []
train_set_y = []
all_set_x = []
all_set_y = []
test_set_x = []
test_set_y = []
#遍历csv文件内容,并将图片数据按分类保存
for index in range(len(faces_data)):
#解析每一行csv文件内容
emotion_data = faces_data.loc[index][0]
image_data = faces_data.loc[index][1]
usage_data = faces_data.loc[index][2]
emotion_data = keras.utils.to_categorical(emotion_data, 7)
val = image_data.split(" ")
pixels = np.array(val, 'float32')
if usage_data == "Training":
train_set_y.append(emotion_data)
train_set_x.append(pixels / 255.0)
else:
test_set_x.append(pixels / 255.0)
test_set_y.append(emotion_data)
all_set_y.append(emotion_data)
all_set_x.append(pixels / 255.0)
all_set_y.append(emotion_data)
all_set_x.append(brighter(pixels, gamma=0.35) / 255.0)
all_set_y.append(emotion_data)
all_set_x.append(brighter(pixels, type=1) / 255.0)
return train_set_x, train_set_y, test_set_x, test_set_y, all_set_x, all_set_y
# 使用VGG16的改良版
# VGG16:
def my_VGG(in_shape):
Model = models.Sequential()
Model.add(Conv2D(16, (3, 3), activation='relu', padding='same', name='block1_conv1', input_shape=in_shape))
Model.add(BatchNormalization())
Model.add(Conv2D(16, (3, 3), activation='relu', padding='same', name='block1_conv2'))
Model.add(BatchNormalization())
Model.add(MaxPool2D((2, 2), strides=(2, 2), name='block1_pool'))
Model.add(Dropout(.25))
# Block 2
Model.add(Conv2D(32, (3, 3), activation='relu', padding='same', name='block2_conv1'))
Model.add(BatchNormalization())
Model.add(Conv2D(64, (3, 3), activation='relu', padding='same', name='block2_conv2'))
Model.add(BatchNormalization())
Model.add(MaxPool2D((2, 2), strides=(2, 2), name='block2_pool'))
Model.add(Dropout(.25))
# Block 3
Model.add(Conv2D(64, (3, 3), activation='relu', padding='same', name='block3_conv1'))
Model.add(BatchNormalization())
Model.add(Conv2D(128, (3, 3), activation='relu', padding='same', name='block3_conv2'))
Model.add(BatchNormalization())
Model.add(MaxPool2D((2, 2), strides=(2, 2), name='block3_pool'))
Model.add(Dropout(.25))
# Block 4
Model.add(Conv2D(128, (3, 3), activation='relu', padding='same', name='block4_conv1'))
Model.add(BatchNormalization())
Model.add(Conv2D(256, (3, 3), activation='relu', padding='same', name='block4_conv2'))
Model.add(BatchNormalization())
Model.add(MaxPool2D((2, 2), strides=(2, 2), name='block4_pool'))
Model.add(Dropout(.25))
# Block 5
Model.add(Conv2D(128, (3, 3), activation='relu', padding='same', name='block5_conv1'))
Model.add(BatchNormalization())
Model.add(MaxPool2D((2, 2), strides=(2, 2), name='block5_pool'))
Model.add(Dropout(.25))
# Classification block
Model.add(Flatten(name='flatten'))
Model.add(Dense(2048, activation='relu', name='fc1'))
Model.add(Dense(2048, activation='relu', name='fc2'))
Model.add(Dropout(0.5))
Model.add(Dense(512, activation='relu', name='fc3'))
Model.add(Dense(7, activation='softmax', name='predictions'))
return Model
if __name__ == '__main__':
train_x, train_y, test_x, test_y, data_x, data_y = read_data('./fer2013.csv')
# 先按照fer2013的标准用测试集和训练集进行一次训练
# 查看模型的准确率
train_x = np.array(train_x).reshape(-1, 48, 48, 1)
train_y = np.array(train_y)
test_x = np.array(test_x).reshape(-1, 48, 48, 1)
test_y = np.array(test_y)
# 所有的数据整合再进行一次训练,最为最终的模型
data_x = np.array(data_x).reshape(-1, 48, 48, 1)
data_y = np.array(data_y)
data_generator = ImageDataGenerator(
featurewise_center=False,
featurewise_std_normalization=False,
# rotation_range=30,
# width_shift_range=0.1,
# height_shift_range=0.1,
horizontal_flip=True)
model_test = my_VGG(data_x[1].shape)
adam = Adam()
model_test.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
model_test.summary()
hist = model_test.fit_generator(data_generator.flow(data_x, data_y, batch_size=32),
epochs=60, verbose=2, validation_data=(test_x, test_y,))
model_test.save("emotion-final.h5")
| [
"keras.preprocessing.image.ImageDataGenerator",
"pandas.read_csv",
"keras.layers.Dropout",
"keras.layers.MaxPool2D",
"numpy.zeros",
"keras.optimizers.Adam",
"keras.layers.Flatten",
"numpy.clip",
"keras.layers.Dense",
"numpy.array",
"keras.layers.Conv2D",
"keras.models.Sequential",
"keras.lay... | [((338, 351), 'numpy.zeros', 'np.zeros', (['len'], {}), '(len)\n', (346, 351), True, 'import numpy as np\n'), ((583, 600), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (594, 600), True, 'import pandas as pd\n'), ((1755, 1774), 'keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (1772, 1774), False, 'from keras import models\n'), ((4063, 4080), 'numpy.array', 'np.array', (['train_y'], {}), '(train_y)\n', (4071, 4080), True, 'import numpy as np\n'), ((4147, 4163), 'numpy.array', 'np.array', (['test_y'], {}), '(test_y)\n', (4155, 4163), True, 'import numpy as np\n'), ((4260, 4276), 'numpy.array', 'np.array', (['data_y'], {}), '(data_y)\n', (4268, 4276), True, 'import numpy as np\n'), ((4299, 4407), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'featurewise_center': '(False)', 'featurewise_std_normalization': '(False)', 'horizontal_flip': '(True)'}), '(featurewise_center=False, featurewise_std_normalization=\n False, horizontal_flip=True)\n', (4317, 4407), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((4578, 4584), 'keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (4582, 4584), False, 'from keras.optimizers import Adam\n'), ((976, 1019), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['emotion_data', '(7)'], {}), '(emotion_data, 7)\n', (1002, 1019), False, 'import keras\n'), ((1073, 1097), 'numpy.array', 'np.array', (['val', '"""float32"""'], {}), "(val, 'float32')\n", (1081, 1097), True, 'import numpy as np\n'), ((1790, 1890), 'keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block1_conv1"""', 'input_shape': 'in_shape'}), "(16, (3, 3), activation='relu', padding='same', name='block1_conv1',\n input_shape=in_shape)\n", (1796, 1890), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((1902, 1922), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1920, 1922), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((1938, 2012), 'keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block1_conv2"""'}), "(16, (3, 3), activation='relu', padding='same', name='block1_conv2')\n", (1944, 2012), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((2028, 2048), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2046, 2048), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((2064, 2117), 'keras.layers.MaxPool2D', 'MaxPool2D', (['(2, 2)'], {'strides': '(2, 2)', 'name': '"""block1_pool"""'}), "((2, 2), strides=(2, 2), name='block1_pool')\n", (2073, 2117), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((2133, 2146), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (2140, 2146), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((2176, 2250), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block2_conv1"""'}), "(32, (3, 3), activation='relu', padding='same', name='block2_conv1')\n", (2182, 2250), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((2266, 2286), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2284, 2286), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((2302, 2376), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block2_conv2"""'}), "(64, (3, 3), activation='relu', padding='same', name='block2_conv2')\n", (2308, 2376), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((2392, 2412), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2410, 2412), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((2428, 2481), 'keras.layers.MaxPool2D', 'MaxPool2D', (['(2, 2)'], {'strides': '(2, 2)', 'name': '"""block2_pool"""'}), "((2, 2), strides=(2, 2), name='block2_pool')\n", (2437, 2481), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((2497, 2510), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (2504, 2510), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((2540, 2614), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block3_conv1"""'}), "(64, (3, 3), activation='relu', padding='same', name='block3_conv1')\n", (2546, 2614), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((2630, 2650), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2648, 2650), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((2666, 2741), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block3_conv2"""'}), "(128, (3, 3), activation='relu', padding='same', name='block3_conv2')\n", (2672, 2741), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((2757, 2777), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2775, 2777), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((2793, 2846), 'keras.layers.MaxPool2D', 'MaxPool2D', (['(2, 2)'], {'strides': '(2, 2)', 'name': '"""block3_pool"""'}), "((2, 2), strides=(2, 2), name='block3_pool')\n", (2802, 2846), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((2862, 2875), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (2869, 2875), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((2905, 2980), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block4_conv1"""'}), "(128, (3, 3), activation='relu', padding='same', name='block4_conv1')\n", (2911, 2980), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((2996, 3016), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3014, 3016), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3032, 3107), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block4_conv2"""'}), "(256, (3, 3), activation='relu', padding='same', name='block4_conv2')\n", (3038, 3107), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3123, 3143), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3141, 3143), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3159, 3212), 'keras.layers.MaxPool2D', 'MaxPool2D', (['(2, 2)'], {'strides': '(2, 2)', 'name': '"""block4_pool"""'}), "((2, 2), strides=(2, 2), name='block4_pool')\n", (3168, 3212), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3228, 3241), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (3235, 3241), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3271, 3346), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block5_conv1"""'}), "(128, (3, 3), activation='relu', padding='same', name='block5_conv1')\n", (3277, 3346), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3362, 3382), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3380, 3382), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3398, 3451), 'keras.layers.MaxPool2D', 'MaxPool2D', (['(2, 2)'], {'strides': '(2, 2)', 'name': '"""block5_pool"""'}), "((2, 2), strides=(2, 2), name='block5_pool')\n", (3407, 3451), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3467, 3480), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (3474, 3480), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3523, 3546), 'keras.layers.Flatten', 'Flatten', ([], {'name': '"""flatten"""'}), "(name='flatten')\n", (3530, 3546), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3562, 3604), 'keras.layers.Dense', 'Dense', (['(2048)'], {'activation': '"""relu"""', 'name': '"""fc1"""'}), "(2048, activation='relu', name='fc1')\n", (3567, 3604), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3620, 3662), 'keras.layers.Dense', 'Dense', (['(2048)'], {'activation': '"""relu"""', 'name': '"""fc2"""'}), "(2048, activation='relu', name='fc2')\n", (3625, 3662), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3678, 3690), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3685, 3690), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3706, 3747), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""', 'name': '"""fc3"""'}), "(512, activation='relu', name='fc3')\n", (3711, 3747), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((3763, 3813), 'keras.layers.Dense', 'Dense', (['(7)'], {'activation': '"""softmax"""', 'name': '"""predictions"""'}), "(7, activation='softmax', name='predictions')\n", (3768, 3813), False, 'from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((498, 526), 'numpy.clip', 'np.clip', (['(ori * a + b)', '(0)', '(255)'], {}), '(ori * a + b, 0, 255)\n', (505, 526), True, 'import numpy as np\n'), ((4008, 4025), 'numpy.array', 'np.array', (['train_x'], {}), '(train_x)\n', (4016, 4025), True, 'import numpy as np\n'), ((4094, 4110), 'numpy.array', 'np.array', (['test_x'], {}), '(test_x)\n', (4102, 4110), True, 'import numpy as np\n'), ((4207, 4223), 'numpy.array', 'np.array', (['data_x'], {}), '(data_x)\n', (4215, 4223), True, 'import numpy as np\n')] |
import re
import warnings
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..utils.commons import create_progress_bar
from ..patient.patient_data_loader import PatientDataLoader
def ensure_dir(path):
path = Path(path)
if not path:
return False
if not path.is_dir():
path.mkdir(parents=True, exist_ok=True)
return path.is_dir()
def save_figure(path, fig=None, dpi=300):
path = Path(path)
if fig is not None:
# Make the figure with fig the current figure
plt.figure(fig.number)
if not ensure_dir(path.parent):
assert False, "Failed to create output directory: %s " % path.parent
plt.savefig(path, bbox_inches="tight", dpi=dpi)
class SymmetryChecker:
"""
Patients wear a sensor on the left and right side. This utility analyzes
and visualizes the relationship between these data sources. Most notably
it estimates (per-patient) the Pearson correlation coefficient between
the left and right signals, and creates Bland-Altman plots for a graphical
analysis.
"""
def __init__(self, data_dir, out_dir, columns, resample=None):
"""
Arguments:
data_dir: Input folder with the .csv files. The following naming
convention applies for the .csv files:
([0-9]*)(L|R).*
The first capture group identifies the patient id, the
second one whether the data is from left or right side.
001L_storage-vital.csv
001R_storage-vital.csv
out_dir: Path to output directory
columns: Columns to select ("columns of interest", _columns)
resample: Optionally resample the data. For instance, setting
resample="30s" aggregates the data into 30s bins.
See doc of pd.DataFrame.resample for details.
"""
self._loader = PatientDataLoader()
self._data_dir = Path(data_dir)
self._out_dir = Path(out_dir)
self._resample = resample
self._columns = columns
def _format_data(self, df_left, df_right):
has_morton = ("DeMorton" in df_left) and ("DeMorton" in df_right)
read_columns = (self._columns+["DeMorton"]) if has_morton else self._columns
df_left = df_left.set_index("timestamp")
df_right = df_right.set_index("timestamp")
df_left = df_left[read_columns]
df_right = df_right[read_columns]
# Trick to prepend column level
df_left = pd.concat([df_left], keys=["left"], axis=1)
df_right = pd.concat([df_right], keys=["right"], axis=1)
df_left = df_left.reorder_levels([1,0], axis=1)
df_right = df_right.reorder_levels([1,0], axis=1)
df = df_left.join(df_right, how="outer")
df = df.sort_index(axis=1)
if self._resample is not None:
# This introduces a FIXED sampling pattern!
df = df.resample(self._resample).mean()
if "DeMorton" in df:
df["DeMorton"] = df["DeMorton"] > 0.5
return df
def _analyze_per_patient(self, df, col, pid):
x = df[col]
n = len(x)
nans = x.isnull()
zeros = (x == 0)
if True:
mask = (nans|zeros).any(axis=1)
else:
mask = nans.any(axis=1)
xx = x[~mask]
diff = xx["left"] - xx["right"]
avg = xx.mean(axis=1)
diff_mean = diff.mean()
diff_std = diff.std()
offset_ci = 1.96*diff_std
offset_miss = diff.abs().max()*1.2
x_nl = x.loc[nans["left"],"right"]
x_nr = x.loc[nans["right"],"left"]
x_zl = x.loc[zeros["left"],"right"]
x_zr = x.loc[zeros["right"],"left"]
y_off = lambda x, offset: offset*np.ones_like(x)
fig, ax = plt.subplots()
h_valid = ax.scatter(avg, diff, c="black", alpha=0.05)
h_nans = ax.scatter(x_nl, y_off(x_nl, offset_miss), c="salmon", alpha=0.05)
h_nans = ax.scatter(x_nr, y_off(x_nr, -offset_miss), c="salmon", alpha=0.05)
h_zeros = ax.scatter(x_zl, y_off(x_zl, offset_miss), c="pink", alpha=0.2)
h_zeros = ax.scatter(x_zr, y_off(x_zr, -offset_miss), c="pink", alpha=0.2)
h_morton = None
if "DeMorton" in df:
mask_morton = df["DeMorton"].any(axis=1)
mm = mask_morton[~mask]
x_morton = avg[mm]
y_morton = diff[mm]
h_morton = ax.scatter(x_morton, y_morton, c="yellow", alpha=0.05)
xlim = ax.get_xlim()
h_mean, = ax.plot(xlim, diff_mean*np.ones(2), "b", zorder=100)
h_cip, = ax.plot(xlim, y_off(np.ones(2), +offset_ci), ":r", zorder=100)
h_cim, = ax.plot(xlim, y_off(np.ones(2), -offset_ci), ":r", zorder=100)
h_dummy, = plt.plot([avg.mean()],[0], color="w", alpha=0)
ax.grid(True)
ax.set_title(f"Bland-Altman: {col}, pid={pid}")
ax.set_xlabel("Mean: (Left+Right)/2")
ax.set_ylabel("Difference: (Left-Right)")
legend = [(h_mean, "Mean: %.3f" % diff_mean),
(h_cim, "95%% CI: ±%.3f" % (1.96*diff_std)),
(h_dummy, ""),
(h_valid, "valid"),
(h_nans, "nans"),
(h_zeros, "zeros")]
if h_morton:
legend.append((h_morton, "morton"))
leg = ax.legend(*zip(*legend),
title="Difference:",
loc="upper left",
bbox_to_anchor=(1.05, 1.02))
ax.set_axisbelow(True)
plt.tight_layout()
leg._legend_box.align = "left"
for lh in leg.legendHandles:
lh.set_alpha(1)
if self._out_dir:
filename = ("bland-altman-%s-%s.png" % (col.lower(), pid))
path = self._out_dir / "plots" / col / filename
save_figure(path=path, fig=fig)
plt.close(fig)
info = pd.DataFrame(columns=x.columns, dtype=float)
info.loc["counts"] = n
# null: {NaN, None NaT}
info.loc["nans"] = nans.sum(axis=0)
info.loc["nan_ratio"] = info.loc["nans"] / n
info.loc["zero_ratio"] = (x == 0).sum(axis=0) / n
info.loc["const_ratio"] = ((x.shift(1)-x) == 0).sum(axis=0) / (n-1)
info_diff = pd.Series(name="diff", dtype=float)
info_diff["mean"] = diff_mean
info_diff["std"] = diff_std
info_diff["5%"] = diff.quantile(0.05)
info_diff["25%"] = diff.quantile(0.25)
info_diff["50%"] = diff.quantile(0.50)
info_diff["75%"] = diff.quantile(0.75)
info_diff["95%"] = diff.quantile(0.95)
info_diff["corr"] = x["left"].corr(x["right"])
# Output as series with multi-level index
ret = pd.concat([info.unstack(),
info_diff.to_frame().unstack()],
axis=0)
ret.name = (pid, col)
return ret
def run(self):
files = list(sorted(self._data_dir.glob("*.h5")))
rets = []
if len(files)==0:
warnings.warn("No files found under the following location:\n%s" % self._data_dir)
return
prefix = "Patient {variables.key:<3}... "
progress = create_progress_bar(label=None,
size=len(files),
prefix=prefix,
variables={"key": "N/A"})
progress.start()
for i, filepath in enumerate(files):
key = filepath.stem
progress.update(i, key=key)
store = pd.HDFStore(filepath, mode="r")
if not ("vital/left" in store and "vital/right" in store):
warnings.warn("Dataset incomplete: %s" % key)
store.close()
continue
df_left = store["vital/left"]
df_right = store["vital/right"]
store.close()
df = self._format_data(df_left=df_left, df_right=df_right)
for col in self._columns:
ret = self._analyze_per_patient(df, col=col, pid=key)
rets.append(ret)
progress.finish()
rets = pd.concat(rets, axis=1)
rets.to_csv(self._out_dir / "results.csv")
means = rets.groupby(level=1, axis=1).mean()
stds = rets.groupby(level=1, axis=1).std()
summary = pd.concat([means, stds], keys=["mean", "std"], axis=1)
summary = summary.reorder_levels([1,0], axis=1)
summary = summary.sort_index(axis=1)
summary.to_csv(self._out_dir / "summary.csv")
| [
"pandas.DataFrame",
"matplotlib.pyplot.tight_layout",
"pandas.HDFStore",
"numpy.ones_like",
"matplotlib.pyplot.close",
"numpy.ones",
"pathlib.Path",
"matplotlib.pyplot.figure",
"pandas.Series",
"warnings.warn",
"matplotlib.pyplot.subplots",
"pandas.concat",
"matplotlib.pyplot.savefig"
] | [((267, 277), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (271, 277), False, 'from pathlib import Path\n'), ((470, 480), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (474, 480), False, 'from pathlib import Path\n'), ((707, 754), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'bbox_inches': '"""tight"""', 'dpi': 'dpi'}), "(path, bbox_inches='tight', dpi=dpi)\n", (718, 754), True, 'import matplotlib.pyplot as plt\n'), ((567, 589), 'matplotlib.pyplot.figure', 'plt.figure', (['fig.number'], {}), '(fig.number)\n', (577, 589), True, 'import matplotlib.pyplot as plt\n'), ((2070, 2084), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (2074, 2084), False, 'from pathlib import Path\n'), ((2109, 2122), 'pathlib.Path', 'Path', (['out_dir'], {}), '(out_dir)\n', (2113, 2122), False, 'from pathlib import Path\n'), ((2636, 2679), 'pandas.concat', 'pd.concat', (['[df_left]'], {'keys': "['left']", 'axis': '(1)'}), "([df_left], keys=['left'], axis=1)\n", (2645, 2679), True, 'import pandas as pd\n'), ((2699, 2744), 'pandas.concat', 'pd.concat', (['[df_right]'], {'keys': "['right']", 'axis': '(1)'}), "([df_right], keys=['right'], axis=1)\n", (2708, 2744), True, 'import pandas as pd\n'), ((3932, 3946), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3944, 3946), True, 'import matplotlib.pyplot as plt\n'), ((5693, 5711), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5709, 5711), True, 'import matplotlib.pyplot as plt\n'), ((6026, 6040), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (6035, 6040), True, 'import matplotlib.pyplot as plt\n'), ((6057, 6101), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'x.columns', 'dtype': 'float'}), '(columns=x.columns, dtype=float)\n', (6069, 6101), True, 'import pandas as pd\n'), ((6417, 6452), 'pandas.Series', 'pd.Series', ([], {'name': '"""diff"""', 'dtype': 'float'}), "(name='diff', dtype=float)\n", (6426, 6452), True, 'import pandas as pd\n'), ((8312, 8335), 'pandas.concat', 'pd.concat', (['rets'], {'axis': '(1)'}), '(rets, axis=1)\n', (8321, 8335), True, 'import pandas as pd\n'), ((8509, 8563), 'pandas.concat', 'pd.concat', (['[means, stds]'], {'keys': "['mean', 'std']", 'axis': '(1)'}), "([means, stds], keys=['mean', 'std'], axis=1)\n", (8518, 8563), True, 'import pandas as pd\n'), ((7184, 7274), 'warnings.warn', 'warnings.warn', (['("""No files found under the following location:\n%s""" % self._data_dir)'], {}), '("""No files found under the following location:\n%s""" % self.\n _data_dir)\n', (7197, 7274), False, 'import warnings\n'), ((7725, 7756), 'pandas.HDFStore', 'pd.HDFStore', (['filepath'], {'mode': '"""r"""'}), "(filepath, mode='r')\n", (7736, 7756), True, 'import pandas as pd\n'), ((3897, 3912), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (3909, 3912), True, 'import numpy as np\n'), ((4703, 4713), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (4710, 4713), True, 'import numpy as np\n'), ((4769, 4779), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (4776, 4779), True, 'import numpy as np\n'), ((4849, 4859), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (4856, 4859), True, 'import numpy as np\n'), ((7844, 7889), 'warnings.warn', 'warnings.warn', (["('Dataset incomplete: %s' % key)"], {}), "('Dataset incomplete: %s' % key)\n", (7857, 7889), False, 'import warnings\n')] |
import math, cmath
import numpy as np
import tkinter as tk
from tkinter import ttk
from PIL import Image, ImageDraw, ImageTk
from colour import Color
import Gvars
class Graphic:
"""Handles the displaying and scaling of the FITS image, as well as the drawing, selection, and manipulation of the
boxes.
Attributes
----------
Box : list
list containing all Tkinter.
boxColor : str
boxDrawn : bool
box_id : int
box_index : int
box_index_hover : int
box_index_selected : int
box_manip : bool
box_resize : bool
box_selected : bool
canvas : tkinter.Canvas
Main canvas for the FITS image boxes
clearButton : tkinter.ttk.Button
Button to delete selected box
clicked : bool
colorGradient : list
degChange : float
Degrees changed while rotating, before setBox is called
degNow : float
delta_coeff : int
Modifier for event.delta
dirct : int
Index difference from the start position corner to the end corner of the scan line
endPos : tuple
fitsCanvas : int
Canvas image object
fitsPIL : PIL.Image.Image
fitsTk : PIL.ImageTk.PhotoImage
fits_CurSize : tuple
fits_region : int
Bounding box Canvas rectangle object for the FITS image
hbar : tkinter.ttk.Scrollbar
manipBox_initvars : tuple
master : tkinter.Toplevel
over_object : bool
over_selected : bool
regColor : str
resizeFill : str
scBg : list
List of the slider objects and the gradient image
scan_direction : str
slidercanvas_1 : tkinter.Canvas
slidercanvas_2 : tkinter.Canvas
sliderheight : int
sliderwidth : int
startPos_draw : tuple
startxy_c : int
Index of the start position corner
vbar : tkinter.ttk.Scrollbar
zoom : float
Notes
-----
Events bound to FileEdit.Files.currentCoords_update:
<B1-Motion> (left click drag) :
To update parameter values while moving or resizing box
<B2-Motion> (right click drag) :
To update parameter values while rotating box
<Shift-B4-Motion> :
To execute FileEdit.Files.currentCoords_update
This might cause issues on operating systems that use B4 for the mousewheel.
"""
def __init__(self, master, Gframe, Sframe):
self.master = master
self.degChange = 0.
self.boxColor, self.regColor = "red", "red"
self.scan_direction = "X"
self.dirct = 2
self.startxy_c = 0
self.Box = []
self.box_selected, self.over_object, self.over_selected, self.clicked, self.box_manip, self.box_resize, self.boxDrawn = \
False, False, False, False, False, False, False
self.zoom = 1
self.delta_coeff_init()
Gframe.grid_rowconfigure(0, weight=1)
Gframe.grid_columnconfigure(0, weight=1)
self.canvas = tk.Canvas(Gframe, bg="gray", highlightthickness=0, yscrollincrement=1, xscrollincrement=1)
self.cursors(None, "default")
self.canvas.grid(row=0, column=0, sticky="nsew")
self.vbar = ttk.Scrollbar(Gframe, orient="vertical", command=self.canvas.yview)
self.vbar.grid(row=0, column=1, rowspan=1, sticky="ns")
self.hbar = ttk.Scrollbar(Gframe, orient="horizontal", command=self.canvas.xview)
self.hbar.grid(row=1, column=0, columnspan=2, sticky="ew")
Sframe.grid_rowconfigure([0,1], weight=0)
Sframe.grid_columnconfigure([0], weight=1)
self.clearButton = ttk.Button(Sframe, text="Clear", state=tk.DISABLED, takefocus=0, command=lambda _=None, mode="select": self.resetBox(_, mode))
self.clearButton.grid(row=0, column=1, rowspan=2, sticky="nsew")
self.slidercanvas_1 = tk.Canvas(Sframe, bg="gray", height=10, highlightthickness=0, highlightcolor="black", borderwidth=0, relief=tk.GROOVE)
self.slidercanvas_1.grid(row=0, column=0, rowspan=1, columnspan=1, sticky="ew")
self.slidercanvas_2 = tk.Canvas(Sframe, bg="gray", width=200, height=10, highlightthickness=0, highlightcolor="black", borderwidth=0, relief=tk.GROOVE)
self.slidercanvas_2.grid(row=1, column=0, rowspan=1, columnspan=1, sticky="ew")
self.slider_temp(None)
Sframe.bind("<Configure>", self.slider_temp)
self.draw_keybind()
self.zoom_keybind()
def draw_keybind(self):
self.canvas.bind("<BackSpace>", lambda event, mode="select": self.resetBox(event, mode))
self.canvas.bind("<Enter>", lambda event, mode="default": self.cursors(event, mode))
self.canvas.bind("<Button-1>", self.B12_callback)
self.canvas.bind("<B1-Motion>", self.B1M_callback)
self.canvas.bind("<ButtonRelease-1>", self.B1R_callback)
self.canvas.bind("<ButtonRelease-2>", self.B2R_callback)
if Gvars.curOS == "Linux":
self.canvas.bind('<Button-4>', lambda event: self.v_scroll(event, delta=120))
self.canvas.bind('<Button-5>', lambda event: self.v_scroll(event, delta=-120))
self.canvas.bind('<Shift-Button-4>', lambda event: self.h_scroll(event, delta=120))
self.canvas.bind('<Shift-Button-5>', lambda event: self.h_scroll(event, delta=-120))
else:
self.canvas.bind('<MouseWheel>', self.v_scroll)
self.canvas.bind('<Shift-MouseWheel>', self.h_scroll)
def zoom_keybind(self):
self.canvas.bind("<Configure>", lambda event, target=self.canvas: self.update_proxy(event, self.canvas))
self.canvas.bind("<KeyRelease-Meta_L>", lambda event: self.endzoom(event))
if Gvars.curOS == "Linux":
self.canvas.bind("<Control-Button-4>", lambda event: self.fits_zoom(event, delta=120))
self.canvas.bind("<Control-Button-5>", lambda event: self.fits_zoom(event, delta=-120))
else:
self.canvas.bind("<Command-MouseWheel>", lambda event: self.fits_zoom(event))
def update_proxy(self, _, target):
target.update()
def B12_callback(self, event):
self.canvas.focus_set()
self.clicked = True
if self.box_selected or self.boxDrawn:
self.deselectBox(event, "B12")
else:
self.setStart_draw(event)
def B12_leave(self, event):
try:
if self.boxDrawn:
endPos_temp = (self.canvas.canvasx(event.x), self.canvas.canvasy(event.y))
boxPos_temp = self.canvas.coords(self.Box[self.box_index_hover][0])
if max(boxPos_temp) in endPos_temp or min(boxPos_temp) in endPos_temp:
self.canvas.event_generate("<Leave>")
except AttributeError:
pass
def B1M_callback(self, event):
if not self.box_selected and not self.boxDrawn and self.clicked:
self.drawBox(event)
def B1R_callback(self, event):
if self.box_selected and not self.over_object:
self.deselectBox(event, "B1R")
elif self.box_manip:
self.setBox(event)
self.clicked = False
def B2R_callback(self, event):
if self.box_selected and self.box_manip:
self.setBox(event)
def delta_coeff_init(self):
if Gvars.curOS == "Darwin":
self.delta_coeff = 1
elif Gvars.curOS == "Windows" or Gvars.curOS == "Linux":
self.delta_coeff = 120
def h_scroll(self, event, delta=None):
if delta is not None:
event.delta = delta
self.canvas.xview_scroll(int(-3 * event.delta / self.delta_coeff), 'units')
self.fits_zoom(event, zoom=False)
def v_scroll(self, event, delta=None):
if delta is not None:
event.delta = delta
self.canvas.yview_scroll(int(-3 * event.delta / self.delta_coeff), 'units')
self.fits_zoom(event, zoom=False)
def fits_initialize(self, PILimage):
"""Create the necessary image attributes when a FITS file is loaded."""
# Keep reference to the image
self.fitsPIL = PILimage
self.fitsTk = ImageTk.PhotoImage(self.fitsPIL)
self.fitsCanvas = self.canvas.create_image(0, 0, image=self.fitsTk, anchor="nw", tag="fits")
# Update current size of image
self.fits_CurSize = (self.fitsTk.width(), self.fitsTk.height())
# Create bounding box for the image used in determining its scaled size
self.fits_region = self.canvas.create_rectangle(0, 0, self.fits_CurSize[0], self.fits_CurSize[1],
width=0, outline="yellow", fill="", tag="fregion")
# Add binding for window resize
self.canvas.bind("<Configure>", lambda event: self.fits_zoom(event), add="+")
def fits_zoom(self, event, zoom=True, delta=None):
"""Handles zooming and scrolling of the zoomed image."""
if zoom:
# Obtain cursor position on the canvas
# Due to a bug in MacOS the more direct self.canvas.canvasx(event.x) is not used
self.canvas.focus_set()
px, py = event.widget.winfo_pointerxy()
rx, ry = (event.widget.winfo_rootx(), event.widget.winfo_rooty())
cx, cy = (px-rx, py-ry)
eventx, eventy = self.canvas.canvasx(cx), self.canvas.canvasy(cy)
if delta is not None:
event.delta = delta
# Set current and overall scaling factor
scale = 1 - event.delta * 0.01 / self.delta_coeff
self.zoom *= scale
if self.zoom < 0.1:
self.zoom /= scale
return
self.canvas.delete("marker")
else:
scale = 1
eventx, eventy = 0, 0
# Determine bounding region of the zoomed image
# Because of the integer rounding required for some methods, the bounding box is not entirely accurate
# which causes some of the edge to be trimmed off when zoomed beyond the canvas size
self.canvas.scale("fregion", eventx, eventy, scale, scale)
fits_region_bbox = self.canvas.coords(self.fits_region)
self.canvas.config(scrollregion=fits_region_bbox)
# Determine display region of the zoomed tile
display_region_bbox = fits_region_bbox.copy()
if display_region_bbox[0] < self.canvas.canvasx(0):
display_region_bbox[0] = self.canvas.canvasx(0)
if display_region_bbox[1] < self.canvas.canvasy(0):
display_region_bbox[1] = self.canvas.canvasy(0)
if display_region_bbox[2] > self.canvas.canvasx(self.canvas.winfo_width()):
display_region_bbox[2] = self.canvas.canvasx(self.canvas.winfo_width())
if display_region_bbox[3] > self.canvas.canvasy(self.canvas.winfo_height()):
display_region_bbox[3] = self.canvas.canvasy(self.canvas.winfo_height())
# Determine cropping area of original image and execute crop
crop_area = [max(int(round((display_region_bbox[0]-fits_region_bbox[0])/self.zoom)), 0),
max(int(round((display_region_bbox[1]-fits_region_bbox[1])/self.zoom)), 0),
min(int(round(self.fits_OriSize[0]-0-(fits_region_bbox[2]-display_region_bbox[2])/self.zoom)), self.fits_OriSize[0]-0),
min(int(round(self.fits_OriSize[1]-0-(fits_region_bbox[3]-display_region_bbox[3])/self.zoom)), self.fits_OriSize[1]-0)]
fitsPIL_cropped = self.fitsPIL.crop(crop_area)
# Resize cropped tile and redraw image
final_size = (int(round((crop_area[2]-crop_area[0]+1)*self.zoom))-1,
int(round((crop_area[3]-crop_area[1]+1)*self.zoom))-1)
fitsPIL_cropped_zoomed = fitsPIL_cropped.resize(final_size, Image.NEAREST)
self.fitsTk = ImageTk.PhotoImage(fitsPIL_cropped_zoomed)
self.canvas.delete("fits")
self.fitsCanvas = self.canvas.create_image(-fits_region_bbox[0]+display_region_bbox[0],
-fits_region_bbox[1]+display_region_bbox[1],
image=self.fitsTk, anchor="nw", tag="fits")
if zoom:
# Adjust position to keep cursor on target while zooming
self.canvas.xview_scroll(int(round(eventx*(scale-1))), 'units')
self.canvas.yview_scroll(int(round(eventy*(scale-1))), 'units')
# Match the bounding box's position to the image
current_x, current_y, *_ = self.canvas.bbox(self.fits_region)
self.canvas.move(self.fits_region, -current_x, -current_y)
# When Tcl/Tk 8.6 is available on GitHub Actions, the following line should
# be used instead of the preceding 2 lines.
# self.canvas.moveto("fregion", 0, 0)
# Update image information
self.fits_CurSize = (int(round(fits_region_bbox[2]-fits_region_bbox[0])),
int(round(fits_region_bbox[3]-fits_region_bbox[1])))
# Zoom canvas objects
self.canvas.scale("box", 0, 0, scale, scale)
self.canvas.tag_lower("fits")
def endzoom(self, event):
"""Update fits tab after zooming to account for pixel rounding errors."""
if self.box_selected:
ori_selected_state = True
ori_selected_box_id = self.box_id
else:
ori_selected_state = False
self.box_selected = True
for i in range(len(self.Box)):
self.box_id = self.Box[i][0]
self.setBox(None)
if ori_selected_state:
self.box_id = ori_selected_box_id
self.canvas.event_generate("<Shift-B4-Motion>")
else:
self.box_selected = False
def slider_temp(self, _):
"""Initialize the box color slider."""
# Create list to hold references
self.scBg = []
# Create sliders
self.colorSlider([self.slidercanvas_1, self.slidercanvas_2], "red", "pink")
# Bind slider
for num, s_set in enumerate(((self.slidercanvas_1, ["box","marker"]), (self.slidercanvas_2, ["reg","regmarker"]))):
s_set[0].bind("<Button-1>",
lambda event, canvas=s_set[0], target=[s_set[1],num], pad=1: self.sliderNob(event, canvas, target, pad))
s_set[0].bind("<B1-Motion>",
lambda event, canvas=s_set[0], target=[s_set[1],num], pad=1: self.sliderNob(event, canvas, target, pad))
def colorSlider(self, canvas_list, color1, color2):
"""Draw the color gradient and slider nob."""
# Get current slider dimensions
canvas_list[0].update()
self.sliderwidth, self.sliderheight = canvas_list[0].winfo_width(), canvas_list[0].winfo_height()
# Create color gradient image
self.colorGradient = list(Color(color1).range_to(Color(color2), self.sliderwidth))
gradBg = Image.new("RGB", (self.sliderwidth, self.sliderheight), "#FFFFFF")
gradBgDraw = ImageDraw.Draw(gradBg)
for x, color in enumerate(self.colorGradient):
gradBgDraw.line((x, 0, x, self.sliderheight), fill=str(color), width=1)
# Create canvas images and keep references
for canvas in canvas_list:
self.scBg.append([])
self.scBg[-1].append(ImageTk.PhotoImage(gradBg))
self.scBg[-1].append(canvas.create_image(0, 0, image=self.scBg[-1][0], anchor=tk.NW))
self.scBg[-1].append(canvas.create_line(1, 0, 1, self.sliderheight, width=2, fill="#444444"))
def sliderNob(self, event, canvas, target, pad):
"""Determine the color chosen based on the position of the nob."""
width, height = self.sliderwidth, self.sliderheight
try:
# Determine corresponding position of the nob on the color gradient
if width > event.x > pad:
canvas.coords(self.scBg[target[1]][2], event.x, pad - 1, event.x, height)
elif event.x <= pad:
canvas.coords(self.scBg[target[1]][2], pad, pad - 1, pad, height)
elif width <= event.x:
canvas.coords(self.scBg[target[1]][2], width - pad, pad - 1, width - pad, height)
if target[0][0] == "box":
self.boxColor = str(self.colorGradient[int(canvas.coords(self.scBg[target[1]][2])[0] + pad - 1)])
temp_color = self.boxColor
elif target[0][0] == "reg":
self.regColor = str(self.colorGradient[int(canvas.coords(self.scBg[target[1]][2])[0] + pad - 1)])
temp_color = self.regColor
# Apply color change
self.canvas.itemconfig(target[0][0], outline=temp_color)
self.canvas.itemconfig(target[0][1], fill=temp_color)
# Hollow out markers if the box is in the "selected" state
if self.box_selected:
self.canvas.itemconfig(self.Box[self.box_index][1], fill="")
self.canvas.itemconfig(self.Box[self.box_index][2], fill="")
# For debugging with self.resizeFill
if not self.resizeFill == "":
self.canvas.itemconfig("resize", fill=temp_color)
except AttributeError:
pass
def slider_master(self, vartuple):
"""Scale and update the FITS image.
Parameters
----------
vartuple : tuple
Contains the variable values for the scaling function to be use in color_func.
c.f. FileEdit.Tabs.slider_callback
"""
self.canvas.focus_set()
try:
temp_bitmap = self.color_func(*vartuple)
temp_bitmap_2 = temp_bitmap.copy()
# Trim the values larger or smaller than the original's maximum and minimum
temp_bitmap_2 = np.clip(temp_bitmap_2, np.min(self.fitsNp_ori), np.max(self.fitsNp_ori))
# Convert to 8-bit and invert to have Y-axis pointing up
temp_bitmap_2 = (temp_bitmap_2 / np.max(self.fitsNp_ori)) * 255
temp_bitmap = Image.fromarray(np.flip(temp_bitmap_2, 0))
self.fitsPIL = temp_bitmap
self.fits_zoom(None, zoom=False)
except ValueError:
pass
def color_func(self, pixel, pixel_min, pixel_max, gamma, gain, bias_x, bias_y, lowerb, upperb, mode):
"""Scale the input pixel by evaluating a piecewise scaling function.
Returns
-------
numpy.ndarray
Scaled pixel value
"""
f1 = pixel_min
f4 = pixel_max
if mode == " Symmetric":
bias_sep_x = lowerb + (bias_x * (upperb - lowerb))
bias_sep_y = pixel_min + (bias_y * (pixel_max - pixel_min))
bound_diff = upperb - lowerb
global_diff = pixel_max - pixel_min
f2 = lambda pixel: -((-(pixel-bias_sep_x)/(0.5*bound_diff))**gamma)*(0.5*global_diff)*gain + bias_sep_y
f3 = lambda pixel: (((pixel-bias_sep_x)/(0.5*bound_diff))**gamma)*(0.5*global_diff)*gain + bias_sep_y
return np.piecewise(pixel, [(pixel<lowerb), (lowerb<=pixel)*(pixel<bias_sep_x),
(bias_sep_x<=pixel)*(pixel<=upperb),(upperb<pixel)], [f1, f2, f3, f4])
elif mode == " Regular ":
bias_sep_x = lowerb + ((bias_x-0.5) * (upperb - lowerb))
bias_sep_y = pixel_min + ((bias_y-0.5) * (pixel_max - pixel_min))
bound_diff = upperb - lowerb
global_diff = pixel_max - pixel_min
print(gamma)
f23 = lambda pixel: (((pixel-bias_sep_x)/bound_diff)**gamma)*global_diff*gain + bias_sep_y
return np.piecewise(pixel, [(pixel<lowerb), (lowerb <= pixel) * (pixel <= upperb), (upperb < pixel)], [f1, f23, f4])
def setStart_draw(self, event):
"""Set the starting click position."""
self.startPos_draw = (self.canvas.canvasx(event.x), self.canvas.canvasy(event.y))
def drawBox(self, event):
"""Draw the box specified by the starting click position and the cursors current position."""
self.box_manip = True
self.canvas.delete("tempbox")
endPos = (self.canvas.canvasx(event.x), self.canvas.canvasy(event.y))
NEPos = (self.canvas.canvasx(event.x), self.startPos_draw[1])
SWPos = (self.startPos_draw[0], self.canvas.canvasy(event.y))
self.canvas.create_polygon(self.startPos_draw, NEPos, endPos, SWPos,
fill="", width=1, outline=self.boxColor, tag="tempbox")
def setBox(self, _, **kwargs):
"""Records references to box objects and initializes box manipulation functions.
Parameters
----------
_ : any
Used to accept the Tkinter.Event argument passed by bind
**kwargs: dict
"REG" : tuple
8-tuple of the pyregion box's polygon's corners
"REGdeg" : float
Position angle of the pyregion box
Notes
-----
This method is generally called every time the box is physically changed.
"""
# Create box with the appropriate tags
try:
self.Box.append([self.canvas.create_polygon(kwargs["REG"],
fill="", width=1, outline=self.regColor, tag="newbox")])
self.box_id = self.canvas.find_withtag("newbox")[0]
self.box_index = -1
self.canvas.itemconfig("newbox", tag=("O", "reg"))
except KeyError:
if not self.box_selected and not self.boxDrawn:
# Redraw tempbox from drawBox
tempbox_coords = self.canvas.coords("tempbox")
self.canvas.delete("tempbox")
self.Box.append([self.canvas.create_polygon(tempbox_coords,
fill="", width=1, outline=self.boxColor, tag="newbox")])
self.box_id = self.canvas.find_withtag("newbox")[0]
self.box_index = -1
self.canvas.itemconfig("newbox", tag=("O", "box"))
self.over_selected = False
self.boxDrawn = True
try :
self.startxy_c = kwargs["startxy_c"]
except KeyError:
self.startxy_c = 0
try:
id_str = str(self.box_id)
# Delete peripheral items from the old box
self.canvas.delete("mid"+id_str, "resize"+id_str, "marker"+id_str, "regmarker"+id_str)
# Calculate relevant quantities in advance
box_coords = self.canvas.coords(self.box_id)
(NWPos, NEPos, SEPos, SWPos) = tuple(box_coords[i:i + 2] for i in range(0, 8, 2))
UPos = ((NWPos[0] + NEPos[0]) / 2, (NWPos[1] + NEPos[1]) / 2)
DPos = ((SEPos[0] + SWPos[0]) / 2, (SEPos[1] + SWPos[1]) / 2)
LPos = ((NWPos[0] + SWPos[0]) / 2, (NWPos[1] + SWPos[1]) / 2)
RPos = ((SEPos[0] + NEPos[0]) / 2, (SEPos[1] + NEPos[1]) / 2)
midPos = ((LPos[0] + RPos[0]) / 2, (UPos[1] + DPos[1]) / 2)
try:
self.degNow = kwargs["REGdeg"]*math.pi/180
except KeyError:
self.degNow = -(((0.5 * math.pi) - cmath.phase(complex(UPos[0] - midPos[0], midPos[1] - UPos[1])) - (2*math.pi)) % (-2*math.pi))
# Save references to self.Box
self.Box[self.box_index] = [self.box_id]
# Scan direction markers
self.Box[self.box_index].append(
self.canvas.create_oval(box_coords[self.startxy_c] - 2, box_coords[self.startxy_c+1] - 2,
box_coords[self.startxy_c] + 2, box_coords[self.startxy_c+1] + 2,
width=1, fill=self.boxColor, outline=self.boxColor, tag=("O", "box", "marker", "marker"+id_str)))
small_circle_index_temp = (self.startxy_c+self.dirct) % 8
self.Box[self.box_index].append(
self.canvas.create_oval(box_coords[small_circle_index_temp] - 1, box_coords[small_circle_index_temp+1] - 1,
box_coords[small_circle_index_temp] + 1, box_coords[small_circle_index_temp+1] + 1,
width=1, fill=self.boxColor, outline=self.boxColor, tag=("O", "box", "marker", "marker"+id_str)))
# Maintain appropriate tags to differentiate pyregion boxes
if "reg" in self.canvas.gettags(self.box_id):
self.canvas.itemconfig(self.Box[self.box_index][1], fill=self.regColor, outline=self.regColor, tag=("O", "reg", "regmarker", "regmarker"+id_str))
self.canvas.itemconfig(self.Box[self.box_index][2], fill=self.regColor, outline=self.regColor, tag=("O", "reg", "regmarker", "regmarker"+id_str))
if self.box_selected:
self.canvas.itemconfig(self.Box[self.box_index][1], fill="")
self.canvas.itemconfig(self.Box[self.box_index][2], fill="")
# Items on the perimeter for box manipulation
self.resizeFill = ""
self.Box[self.box_index].append(
self.canvas.create_oval(NWPos[0] - 6, NWPos[1] + 5, NWPos[0] + 5, NWPos[1] - 6,
width=0, fill="", tag=("O", "resize", "resize" + id_str, "C", "NW")))
self.Box[self.box_index].append(
self.canvas.create_oval(NEPos[0] - 6, NEPos[1] + 5, NEPos[0] + 5, NEPos[1] - 6,
width=0, fill="", tag=("O", "resize", "resize" + id_str, "C", "NE")))
self.Box[self.box_index].append(
self.canvas.create_oval(SEPos[0] - 6, SEPos[1] + 5, SEPos[0] + 5, SEPos[1] - 6,
width=0, fill="", tag=("O", "resize", "resize" + id_str, "C", "SE")))
self.Box[self.box_index].append(
self.canvas.create_oval(SWPos[0] - 6, SWPos[1] + 5, SWPos[0] + 5, SWPos[1] - 6,
width=0, fill="", tag=("O", "resize", "resize" + id_str, "C", "SW")))
self.Box[self.box_index].append(
self.canvas.create_oval(UPos[0] - 6, UPos[1] + 5, UPos[0] + 5, UPos[1] - 6,
width=0, fill=self.resizeFill, tag=("O", "resize", "resize" + id_str, "UD", "U")))
self.Box[self.box_index].append(
self.canvas.create_oval(DPos[0] - 6, DPos[1] + 5, DPos[0] + 5, DPos[1] - 6,
width=0, fill=self.resizeFill, tag=("O", "resize", "resize" + id_str, "UD", "D")))
self.Box[self.box_index].append(
self.canvas.create_oval(LPos[0] - 6, LPos[1] + 5, LPos[0] + 5, LPos[1] - 6,
width=0, fill=self.resizeFill, tag=("O", "resize", "resize" + id_str, "LR", "L")))
self.Box[self.box_index].append(
self.canvas.create_oval(RPos[0] - 6, RPos[1] + 5, RPos[0] + 5, RPos[1] - 6,
width=0, fill=self.resizeFill, tag=("O", "resize", "resize" + id_str, "LR", "R")))
self.Box[self.box_index].append(
self.canvas.create_oval(midPos[0] - 6, midPos[1] + 5, midPos[0] + 5, midPos[1] - 6,
width=0, fill=self.resizeFill, tag=("O", "mid", "mid" + id_str)))
self.Box[self.box_index].append([midPos, self.degNow, self.startxy_c, self.dirct])
# Bind the canvas items to their respective functions
self.canvas.tag_bind("all", "<Enter>", lambda event, mode="Enter": self.hover_detect(event, mode))
self.canvas.tag_bind("all", "<Leave>", lambda event, mode="Leave": self.hover_detect(event, mode))
self.canvas.tag_bind("all", "<Button-1>", lambda event: self.manipBox_callback(event))
self.canvas.tag_bind("C", "<Button-2>", lambda event: self.manipBox_callback(event))
# self.canvas.tag_bind("C", "<B2-Motion>", lambda event, mode=("rotate", None): self.manipBox_callback(event, mode=mode))
self.canvas.tag_bind("C", "<Leave>", lambda event, mode="default": self.cursors(event, mode))
self.canvas.tag_bind("all", "<Leave>", lambda event, mode="default": self.cursors(event, mode))
self.canvas.tag_bind("box", "<Enter>", lambda event, mode="move": self.cursors(event, mode))
self.canvas.tag_bind("box", "<B1-Motion>", lambda event, mode=("move", None): self.manipBox_callback(event, mode=mode))
self.canvas.tag_bind("reg", "<Enter>", lambda event, mode="move": self.cursors(event, mode))
self.canvas.tag_bind("reg", "<B1-Motion>", lambda event, mode=("move", None): self.manipBox_callback(event, mode=mode))
self.canvas.tag_bind("mid", "<Enter>", lambda event, mode="move": self.cursors(event, mode))
self.canvas.tag_bind("mid", "<B1-Motion>", lambda event, mode=("move", None): self.manipBox_callback(event, mode=mode))
self.canvas.tag_bind("UD", "<Enter>", lambda event, mode="hand": self.cursors(event, mode))
self.canvas.tag_bind("U", "<B1-Motion>", lambda event, mode=("U", "stretch"): self.manipBox_callback(event, mode=mode))
self.canvas.tag_bind("D", "<B1-Motion>", lambda event, mode=("D", "stretch"): self.manipBox_callback(event, mode=mode))
self.canvas.tag_bind("LR", "<Enter>", lambda event, mode="hand": self.cursors(event, mode))
self.canvas.tag_bind("L", "<B1-Motion>", lambda event, mode=("L", "stretch"): self.manipBox_callback(event, mode=mode))
self.canvas.tag_bind("R", "<B1-Motion>", lambda event, mode=("R", "stretch"): self.manipBox_callback(event, mode=mode))
self.canvas.tag_bind("C", "<Enter>", lambda event, mode="hand": self.cursors(event, mode))
self.canvas.tag_bind("C", "<B2-Motion>", lambda event, mode=("rotate", None): self.manipBox_callback(event, mode=mode))
self.canvas.tag_bind("C", "<B3-Motion>", lambda event, mode=("rotate", None): self.manipBox_callback(event, mode=mode))
self.canvas.tag_bind("NW", "<B1-Motion>", lambda event, mode=("NW", "free"): self.manipBox_callback(event, mode=mode))
self.canvas.tag_bind("NW", "<Shift-B1-Motion>", lambda event, mode=("NW", "ratio"): self.manipBox_callback(event, mode=mode))
self.canvas.tag_bind("NW", "<Double-Button-1>", lambda event, corner=0, manual=True: self.set_onpos(event, corner, manual))
self.canvas.tag_bind("NE", "<B1-Motion>", lambda event, mode=("NE", "free"): self.manipBox_callback(event, mode=mode))
self.canvas.tag_bind("NE", "<Shift-B1-Motion>", lambda event, mode=("NE", "ratio"): self.manipBox_callback(event, mode=mode))
self.canvas.tag_bind("NE", "<Double-Button-1>", lambda event, corner=1, manual=True: self.set_onpos(event, corner, manual))
self.canvas.tag_bind("SE", "<B1-Motion>", lambda event, mode=("SE", "free"): self.manipBox_callback(event, mode=mode))
self.canvas.tag_bind("SE", "<Shift-B1-Motion>", lambda event, mode=("SE", "ratio"): self.manipBox_callback(event, mode=mode))
self.canvas.tag_bind("SE", "<Double-Button-1>", lambda event, corner=2, manual=True: self.set_onpos(event, corner, manual))
self.canvas.tag_bind("SW", "<B1-Motion>", lambda event, mode=("SW", "free"): self.manipBox_callback(event, mode=mode))
self.canvas.tag_bind("SW", "<Shift-B1-Motion>", lambda event, mode=("SW", "ratio"): self.manipBox_callback(event, mode=mode))
self.canvas.tag_bind("SW", "<Double-Button-1>", lambda event, corner=3, manual=True: self.set_onpos(event, corner, manual))
self.canvas.tag_bind("all", "<ButtonRelease-1>", lambda event: self.selectBox(event))
# Final miscellaneous updates
self.degChange = 0.
self.box_manip, self.box_resize = False, False
self.box_index = self.box_index_selected
self.box_id = self.Box[self.box_index][0]
self.canvas.event_generate("<Shift-B4-Motion>")
self.canvas.event_generate("<Enter>")
except (AttributeError, IndexError):
pass
def set_onpos(self, event, corner, manual=False):
"""Insert markers at the on position and the ending corner of the first scan.
Parameters
----------
event : Tkinter.Event
corner : int
Takes values 1, 2, 3, or 4 for each corner in order
manual : bool
True if changing interactively
"""
if self.box_selected:
box_coord = tuple(self.canvas.coords(self.box_id)[i:i + 2] for i in range(0, 8, 2))
# Determine relative position of the turning corner
if self.scan_direction == "X":
if corner == 0 or corner == 2:
self.dirct = 2
else:
self.dirct = -2
else:
if corner == 1 or corner == 3:
self.dirct = 2
else:
self.dirct = -2
# Relocate start_pos marker
(temp1x, temp1y) = box_coord[corner]
self.canvas.coords(self.Box[self.box_index][1], temp1x - 2, temp1y - 4, temp1x + 2, temp1y + 4)
# Update attributes
self.Box[self.box_index][-1][2] = corner*2
self.startxy_c = corner*2
if manual:
self.setBox(None)
def hover_detect(self, event, mode):
"""Detect cursor status."""
self.over_selected = False
if mode == "Enter":
self.over_object = True
try:
if self.canvas.find_withtag("current")[0] in self.Box[self.box_index] and self.box_selected:
self.over_selected = True
else:
self.box_index_hover = [index for index, box in enumerate(self.Box) if self.canvas.find_withtag("current")[0] in box][0]
except IndexError:
pass
elif mode == "Leave":
self.over_object = False
self.cursors(event, "default")
def selectBox(self, event, simulate=False):
"""Assign special 'selected' state to a clicked box.
Parameters
----------
event : Tkinter.Event
simulate : bool, optional
True if simulating box selection
Notes
-----
A 'selected' box would have a dotted perimeter and hollow markers
"""
try:
if not self.over_selected:
# Check conditions and determine the box index
if simulate:
self.box_index = len(self.Box) - 1
physical = False
self.over_object = True
else:
self.box_index = [index for index, box in enumerate(self.Box) if self.canvas.find_withtag("current")[0] in box][0]
physical = True
# Set selected box details
self.box_index_selected = self.box_index
self.box_id = self.Box[self.box_index][0]
self.startxy_c = self.Box[self.box_index][-1][2]
# Configure to be 'selected' state
if self.over_object and not self.box_manip:
self.canvas.tag_raise(self.box_id)
self.canvas.tag_raise("marker"+str(self.box_id))
self.canvas.tag_raise("resize"+str(self.box_id))
self.canvas.itemconfig("box", dash=())
self.canvas.itemconfig("reg", dash=())
self.canvas.itemconfig("marker", fill=self.boxColor)
self.canvas.itemconfig("regmarker", fill=self.regColor)
self.canvas.itemconfig(self.box_id, dash=(4, 5))
self.canvas.itemconfig(self.Box[self.box_index][1], fill="")
self.canvas.itemconfig(self.Box[self.box_index][2], fill="")
self.box_selected = True
self.over_object = physical
self.over_selected = physical
self.canvas.event_generate("<Shift-B4-Motion>")
self.cursors(event, "move")
self.clearButton.config(state=tk.ACTIVE)
except IndexError:
self.deselectBox(event, "B12")
if self.box_selected:
self.deselectBox(event, "B1R")
def deselectBox(self, event, mode):
"""Deselect the selected box on release of <Button-1>."""
if mode == "B12":
self.clicked = True
elif mode == "B1R":
if self.clicked:
self.canvas.itemconfig("box", dash=())
self.canvas.itemconfig("reg", dash=())
self.canvas.itemconfig("marker", fill=self.boxColor)
self.canvas.itemconfig("regmarker", fill=self.regColor)
self.box_selected = False
self.over_selected = False
self.clicked = False
self.clearButton.config(state=tk.DISABLED)
self.canvas.event_generate("<Shift-B4-Motion>")
def resetBox(self, _, mode):
"""Delete the selected box."""
self.canvas.focus_set()
if mode == "select":
if self.box_selected:
self.box_selected = False
self.canvas.event_generate("<Shift-B4-Motion>")
if "reg" not in self.canvas.gettags(self.box_id):
self.boxDrawn = False
self.over_selected = False
self.clicked = False
self.startxy_c, self.scan_direction, self.dirct = 0, "X", 2
self.clearButton.config(state=tk.DISABLED)
for i in range(len(self.Box[self.box_index])):
self.canvas.delete(self.Box[self.box_index][i])
def manipBox_callback(self, event, **kwargs):
"""Callback for box manipulation.
Parameters
----------
event : Tkinter.event
**kwargs : dict
"mode" : tuple
2-tuple of the activation point and manipulation type to be passed to manipulateBox
"""
self.box_resize = True
try:
# Manipulate box
self.manipulateBox(event, kwargs["mode"], *self.manipBox_initvars)
except (KeyError, AttributeError):
# Initiliaze manipulation
if self.over_selected:
try:
# Record cursor's starting position
startPos = (self.canvas.canvasx(event.x), self.canvas.canvasy(event.y))
# Calculate revelant quantities in advance
(NWPos, NEPos, SEPos, SWPos) = tuple(self.canvas.coords(self.box_id)[i:i + 2] for i in range(0, 8, 2))
UPos = ((NWPos[0] + NEPos[0]) / 2, (NWPos[1] + NEPos[1]) / 2)
DPos = ((SEPos[0] + SWPos[0]) / 2, (SEPos[1] + SWPos[1]) / 2)
LPos = ((NWPos[0] + SWPos[0]) / 2, (NWPos[1] + SWPos[1]) / 2)
RPos = ((SEPos[0] + NEPos[0]) / 2, (SEPos[1] + NEPos[1]) / 2)
boxPos = self.canvas.coords(self.box_id)
self.manipBox_initvars = (startPos, NWPos, NEPos, SEPos, SWPos, UPos, DPos, LPos, RPos, boxPos)
except AttributeError:
pass
def manipulateBox(self, event, mode, startpos, nwpos, nepos, sepos, swpos, upos, dpos, lpos, rpos, boxpos):
"""Calculate and redraw box when interactively manipulated."""
try:
self.canvas.tag_unbind("all", "<Leave>")
finalPos = boxpos
midPos = self.Box[self.box_index][-1][0]
self.degNow = self.Box[self.box_index][-1][1]
if self.over_selected:
self.box_manip = True
if mode[0] == "move":
posChange = [self.canvas.canvasx(event.x) - startpos[0],
self.canvas.canvasy(event.y) - startpos[1]]
finalPos = (boxpos[0] + posChange[0], boxpos[1] + posChange[1],
boxpos[2] + posChange[0], boxpos[3] + posChange[1],
boxpos[4] + posChange[0], boxpos[5] + posChange[1],
boxpos[6] + posChange[0], boxpos[7] + posChange[1])
elif mode[0] == "rotate":
self.degChange = -(
((cmath.phase(complex(startpos[0] - midPos[0], midPos[1] - startpos[1]))
- cmath.phase(complex(self.canvas.canvasx(event.x) - midPos[0],
midPos[1] - self.canvas.canvasy(event.y))))
% (2 * math.pi)) / (math.pi / 900)) * (math.pi / 900) + (2*math.pi)
finalPos = []
for i in range(0, 8, 2):
dummyPos = complex(boxpos[i] - midPos[0], midPos[1] - boxpos[i + 1])
dummyPos = dummyPos * cmath.exp(complex(0, self.degChange))
finalPos.append(dummyPos.real + midPos[0])
finalPos.append(midPos[1] - dummyPos.imag)
finalPos = tuple(finalPos)
elif mode[0] == "U":
posChange = self.manipulateVar(event, mode[1], upos, dpos, 0.5, 1,-1,0,0, 0,1,0,0,0,0,0,0, 1)
finalPos = (boxpos[0] + posChange[0], boxpos[1] + posChange[1],
boxpos[2] + posChange[0], boxpos[3] + posChange[1],
boxpos[4], boxpos[5], boxpos[6], boxpos[7])
elif mode[0] == "D":
posChange = self.manipulateVar(event, mode[1], dpos, upos, 0.5, 1,-1,0,0, 0,1,0,0,0,0,0,0, 1)
finalPos = (boxpos[0], boxpos[1], boxpos[2], boxpos[3],
boxpos[4] + posChange[0], boxpos[5] + posChange[1],
boxpos[6] + posChange[0], boxpos[7] + posChange[1])
elif mode[0] == "L":
posChange = self.manipulateVar(event, mode[1], lpos, rpos, 1, -1,-1,0,0, 0,0,0,1,0,0,0,0, 1)
finalPos = (boxpos[0] + posChange[0], boxpos[1] + posChange[1], boxpos[2], boxpos[3],
boxpos[4], boxpos[5], boxpos[6] + posChange[0], boxpos[7] + posChange[1])
elif mode[0] == "R":
posChange = self.manipulateVar(event, mode[1], rpos, lpos, 1, -1,-1,0,0, 0,0,0,1,0,0,0,0, 1)
finalPos = (boxpos[0], boxpos[1], boxpos[2] + posChange[0], boxpos[3] + posChange[1],
boxpos[4] + posChange[0], boxpos[5] + posChange[1], boxpos[6], boxpos[7])
elif mode[0] == "NW":
posChange = self.manipulateVar(event, mode[1], sepos, nwpos, 0.5, 1,-1,1,1, 0,1,0,0,1,0,1,1, 0)
finalPos = (self.endPos[0], self.endPos[1], boxpos[2] + posChange[0], boxpos[3] + posChange[1],
boxpos[4], boxpos[5], boxpos[6] + posChange[2], boxpos[7] + posChange[3])
elif mode[0] == "NE":
posChange = self.manipulateVar(event, mode[1], swpos, nepos, 0.5, 1,-1,1,1, 0,1,0,0,1,0,1,1, 0)
finalPos = (boxpos[0] + posChange[0], boxpos[1] + posChange[1], self.endPos[0], self.endPos[1],
boxpos[4] + posChange[2], boxpos[5] + posChange[3], boxpos[6], boxpos[7])
elif mode[0] == "SW":
posChange = self.manipulateVar(event, mode[1], nepos, swpos, 0, 1,1,-1,1, 0,0,0,1,1,1,1,0, 0)
finalPos = (boxpos[0] + posChange[0], boxpos[1] + posChange[1], boxpos[2], boxpos[3],
boxpos[4] + posChange[2], boxpos[5] + posChange[3], self.endPos[0], self.endPos[1])
elif mode[0] == "SE":
posChange = self.manipulateVar(event, mode[1], nwpos, sepos, 0, 1,1,-1,1, 0,0,0,1,1,1,1,0, 0)
finalPos = (boxpos[0], boxpos[1], boxpos[2] + posChange[0], boxpos[3] + posChange[1],
self.endPos[0], self.endPos[1], boxpos[6] + posChange[2], boxpos[7] + posChange[3])
self.canvas.coords(self.box_id, finalPos)
small_circle_index_temp = (self.startxy_c+self.dirct) % 8
self.canvas.coords(self.Box[self.box_index][1],
finalPos[self.startxy_c]-2, finalPos[self.startxy_c+1]-2,
finalPos[self.startxy_c]+2, finalPos[self.startxy_c+1]+2)
self.canvas.coords(self.Box[self.box_index][2],
finalPos[small_circle_index_temp]-1, finalPos[small_circle_index_temp+1]-1,
finalPos[small_circle_index_temp]+1, finalPos[small_circle_index_temp+1]+1)
except AttributeError:
pass
def manipulateVar(self, event, mode1, ref1, ref2, o, a1, a2, a3, a4, phi11, phi12, phi21, phi22, phi31, phi32, phi41, phi42, r):
"""Calcaulte final position of box vertices.
Returns
-------
list
Coordiante change of the box's corners, in the order of the first position of the polygon, excluding the
corner(s) that do not change and the activation corner if applicable.
"""
# Offset final position if streatch box sides
self.endPos = (self.canvas.canvasx(event.x), self.canvas.canvasy(event.y))
if mode1 == "stretch":
pos_offset = [ref1[0] - ref2[0], ref1[1] - ref2[1]]
elif mode1 == "free":
pos_offset = [0, 0]
elif mode1 == "ratio":
pos_offset = [0, 0]
# Different end position to cursor if maintaining box dimensions
try:
diagVector = [ref2[0] - ref1[0], ref2[1] - ref1[1]]
refEnd = [self.endPos[0] - ref2[0], self.endPos[1] - ref2[1]]
diagDeg = cmath.phase(complex(diagVector[0], diagVector[1]))
projDeg = diagDeg - cmath.phase(complex(refEnd[0], refEnd[1]))
refEndNorm = np.linalg.norm(refEnd)
self.endPos = (ref2[0] + refEndNorm * math.cos(projDeg) * math.cos(diagDeg),
ref2[1] + refEndNorm * math.cos(projDeg) * math.sin(diagDeg))
except ZeroDivisionError:
pass
# Calculate the projection degree and norm of the end position relative to the reference points
refEnd = [self.endPos[0] - ref2[0], ref2[1] - self.endPos[1]]
refEndNorm = np.linalg.norm(refEnd)
projDeg = (o * math.pi) - (cmath.phase(complex(refEnd[0], refEnd[1]))) + self.degNow
# Calculate and return final change in coordinates of the relevant points
pi_half = 0.5*math.pi
posChange = \
[a1 * (refEndNorm * math.cos(projDeg - phi11*pi_half) * math.cos(-self.degNow - phi12*pi_half)) - (r * pos_offset[0]),
a2 * (refEndNorm * math.cos(projDeg - phi21*pi_half) * math.cos(-self.degNow - phi22*pi_half)) - (r * pos_offset[1]),
a3 * (refEndNorm * math.cos(projDeg - phi31*pi_half) * math.cos(-self.degNow - phi32*pi_half)),
a4 * (refEndNorm * math.cos(projDeg - phi41*pi_half) * math.cos(-self.degNow - phi42*pi_half))]
return posChange
def cursors(self, _, mode):
if self.over_selected:
if mode == "default":
if Gvars.curOS == "Darwin":
cursor = "cross-hair"
else:
cursor = "crosshair"
elif mode == "hand":
if Gvars.curOS == "Darwin":
cursor = "openhand"
else:
cursor = "hand1"
elif mode == "move":
if Gvars.curOS == "Darwin":
cursor = "fleur"
else:
cursor = "fleur"
elif mode == "rotate":
if Gvars.curOS == "Darwin":
cursor = "exchange"
else:
cursor = "exchange"
self.canvas.config(cursor=cursor)
else:
if Gvars.curOS == "Darwin":
cursor = "cross-hair"
else:
cursor = "crosshair"
self.canvas.config(cursor=cursor) | [
"colour.Color",
"PIL.Image.new",
"PIL.ImageTk.PhotoImage",
"numpy.flip",
"tkinter.Canvas",
"tkinter.ttk.Scrollbar",
"math.sin",
"numpy.min",
"numpy.max",
"numpy.linalg.norm",
"math.cos",
"PIL.ImageDraw.Draw",
"numpy.piecewise"
] | [((2901, 2995), 'tkinter.Canvas', 'tk.Canvas', (['Gframe'], {'bg': '"""gray"""', 'highlightthickness': '(0)', 'yscrollincrement': '(1)', 'xscrollincrement': '(1)'}), "(Gframe, bg='gray', highlightthickness=0, yscrollincrement=1,\n xscrollincrement=1)\n", (2910, 2995), True, 'import tkinter as tk\n'), ((3107, 3174), 'tkinter.ttk.Scrollbar', 'ttk.Scrollbar', (['Gframe'], {'orient': '"""vertical"""', 'command': 'self.canvas.yview'}), "(Gframe, orient='vertical', command=self.canvas.yview)\n", (3120, 3174), False, 'from tkinter import ttk\n'), ((3259, 3328), 'tkinter.ttk.Scrollbar', 'ttk.Scrollbar', (['Gframe'], {'orient': '"""horizontal"""', 'command': 'self.canvas.xview'}), "(Gframe, orient='horizontal', command=self.canvas.xview)\n", (3272, 3328), False, 'from tkinter import ttk\n'), ((3755, 3877), 'tkinter.Canvas', 'tk.Canvas', (['Sframe'], {'bg': '"""gray"""', 'height': '(10)', 'highlightthickness': '(0)', 'highlightcolor': '"""black"""', 'borderwidth': '(0)', 'relief': 'tk.GROOVE'}), "(Sframe, bg='gray', height=10, highlightthickness=0,\n highlightcolor='black', borderwidth=0, relief=tk.GROOVE)\n", (3764, 3877), True, 'import tkinter as tk\n'), ((3992, 4125), 'tkinter.Canvas', 'tk.Canvas', (['Sframe'], {'bg': '"""gray"""', 'width': '(200)', 'height': '(10)', 'highlightthickness': '(0)', 'highlightcolor': '"""black"""', 'borderwidth': '(0)', 'relief': 'tk.GROOVE'}), "(Sframe, bg='gray', width=200, height=10, highlightthickness=0,\n highlightcolor='black', borderwidth=0, relief=tk.GROOVE)\n", (4001, 4125), True, 'import tkinter as tk\n'), ((8035, 8067), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['self.fitsPIL'], {}), '(self.fitsPIL)\n', (8053, 8067), False, 'from PIL import Image, ImageDraw, ImageTk\n'), ((11729, 11771), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['fitsPIL_cropped_zoomed'], {}), '(fitsPIL_cropped_zoomed)\n', (11747, 11771), False, 'from PIL import Image, ImageDraw, ImageTk\n'), ((14869, 14935), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(self.sliderwidth, self.sliderheight)', '"""#FFFFFF"""'], {}), "('RGB', (self.sliderwidth, self.sliderheight), '#FFFFFF')\n", (14878, 14935), False, 'from PIL import Image, ImageDraw, ImageTk\n'), ((14957, 14979), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['gradBg'], {}), '(gradBg)\n', (14971, 14979), False, 'from PIL import Image, ImageDraw, ImageTk\n'), ((46774, 46796), 'numpy.linalg.norm', 'np.linalg.norm', (['refEnd'], {}), '(refEnd)\n', (46788, 46796), True, 'import numpy as np\n'), ((19000, 19164), 'numpy.piecewise', 'np.piecewise', (['pixel', '[pixel < lowerb, (lowerb <= pixel) * (pixel < bias_sep_x), (bias_sep_x <=\n pixel) * (pixel <= upperb), upperb < pixel]', '[f1, f2, f3, f4]'], {}), '(pixel, [pixel < lowerb, (lowerb <= pixel) * (pixel <\n bias_sep_x), (bias_sep_x <= pixel) * (pixel <= upperb), upperb < pixel],\n [f1, f2, f3, f4])\n', (19012, 19164), True, 'import numpy as np\n'), ((14818, 14831), 'colour.Color', 'Color', (['color2'], {}), '(color2)\n', (14823, 14831), False, 'from colour import Color\n'), ((15272, 15298), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['gradBg'], {}), '(gradBg)\n', (15290, 15298), False, 'from PIL import Image, ImageDraw, ImageTk\n'), ((17772, 17795), 'numpy.min', 'np.min', (['self.fitsNp_ori'], {}), '(self.fitsNp_ori)\n', (17778, 17795), True, 'import numpy as np\n'), ((17797, 17820), 'numpy.max', 'np.max', (['self.fitsNp_ori'], {}), '(self.fitsNp_ori)\n', (17803, 17820), True, 'import numpy as np\n'), ((18010, 18035), 'numpy.flip', 'np.flip', (['temp_bitmap_2', '(0)'], {}), '(temp_bitmap_2, 0)\n', (18017, 18035), True, 'import numpy as np\n'), ((19601, 19712), 'numpy.piecewise', 'np.piecewise', (['pixel', '[pixel < lowerb, (lowerb <= pixel) * (pixel <= upperb), upperb < pixel]', '[f1, f23, f4]'], {}), '(pixel, [pixel < lowerb, (lowerb <= pixel) * (pixel <= upperb),\n upperb < pixel], [f1, f23, f4])\n', (19613, 19712), True, 'import numpy as np\n'), ((14795, 14808), 'colour.Color', 'Color', (['color1'], {}), '(color1)\n', (14800, 14808), False, 'from colour import Color\n'), ((17937, 17960), 'numpy.max', 'np.max', (['self.fitsNp_ori'], {}), '(self.fitsNp_ori)\n', (17943, 17960), True, 'import numpy as np\n'), ((47355, 47395), 'math.cos', 'math.cos', (['(-self.degNow - phi32 * pi_half)'], {}), '(-self.degNow - phi32 * pi_half)\n', (47363, 47395), False, 'import math, cmath\n'), ((47464, 47504), 'math.cos', 'math.cos', (['(-self.degNow - phi42 * pi_half)'], {}), '(-self.degNow - phi42 * pi_half)\n', (47472, 47504), False, 'import math, cmath\n'), ((46310, 46332), 'numpy.linalg.norm', 'np.linalg.norm', (['refEnd'], {}), '(refEnd)\n', (46324, 46332), True, 'import numpy as np\n'), ((47093, 47133), 'math.cos', 'math.cos', (['(-self.degNow - phi12 * pi_half)'], {}), '(-self.degNow - phi12 * pi_half)\n', (47101, 47133), False, 'import math, cmath\n'), ((47224, 47264), 'math.cos', 'math.cos', (['(-self.degNow - phi22 * pi_half)'], {}), '(-self.degNow - phi22 * pi_half)\n', (47232, 47264), False, 'import math, cmath\n'), ((47319, 47354), 'math.cos', 'math.cos', (['(projDeg - phi31 * pi_half)'], {}), '(projDeg - phi31 * pi_half)\n', (47327, 47354), False, 'import math, cmath\n'), ((47428, 47463), 'math.cos', 'math.cos', (['(projDeg - phi41 * pi_half)'], {}), '(projDeg - phi41 * pi_half)\n', (47436, 47463), False, 'import math, cmath\n'), ((47057, 47092), 'math.cos', 'math.cos', (['(projDeg - phi11 * pi_half)'], {}), '(projDeg - phi11 * pi_half)\n', (47065, 47092), False, 'import math, cmath\n'), ((47188, 47223), 'math.cos', 'math.cos', (['(projDeg - phi21 * pi_half)'], {}), '(projDeg - phi21 * pi_half)\n', (47196, 47223), False, 'import math, cmath\n'), ((46407, 46424), 'math.cos', 'math.cos', (['diagDeg'], {}), '(diagDeg)\n', (46415, 46424), False, 'import math, cmath\n'), ((46500, 46517), 'math.sin', 'math.sin', (['diagDeg'], {}), '(diagDeg)\n', (46508, 46517), False, 'import math, cmath\n'), ((46387, 46404), 'math.cos', 'math.cos', (['projDeg'], {}), '(projDeg)\n', (46395, 46404), False, 'import math, cmath\n'), ((46480, 46497), 'math.cos', 'math.cos', (['projDeg'], {}), '(projDeg)\n', (46488, 46497), False, 'import math, cmath\n')] |
"""
.. module:: integrators
:platform: Unix, Windows
:synopsis: Unified Free Energy Dynamics Integrators
.. moduleauthor:: <NAME> <<EMAIL>>
.. _Context: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.Context.html
.. _CustomCVForce: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.CustomCVForce.html
.. _CustomIntegrator: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.CustomIntegrator.html
.. _Force: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.Force.html
.. _NonbondedForce: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.NonbondedForce.html
.. _System: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.System.html
"""
import numpy as np
from simtk import openmm, unit
from ufedmm.ufedmm import _standardized
def add_inner_nonbonded_force(system, inner_switch, inner_cutoff, force_group_index):
"""
To a given OpenMM System_ containing a NonbondedForce_ object, this function adds a new force
group with the purpose of performing multiple time-scale integration according to the RESPA2
splitting scheme of Morrone, Zhou, and Berne :cite:`Morrone_2010`. Besides, it assigns the
provided `force_group_index` to this new group and `force_group_index+1` to the original
NonbondedForce_. When used in any instance of :class:`AbstractMiddleRespaIntegrator`, the new
force group must be identified as being embodied by the NonbondedForce_ as opposed to being
complimentary to it.
.. warning:
The new force group is not intended to contribute to the system energy. Its sole purpose
is to provide a smooth, short-range force calculator for some intermediary time scale in
a RESPA-type integration.
Parameters
----------
system : openmm.System
The system the inner force will be added to, which must contain a NonbondedForce_.
inner_switch : float or unit.Quantity
The inner switching distance, where the interaction of an atom pair begins to switch
off to zero.
inner_cutoff : float or unit.Quantity
The inner cutoff distance, where the interaction of an atom pairs completely switches
off.
force_group_index : int
The force group the new interactions will belong to. The old NonbondedForce_ will be
automatically assigned to `force_group_index+1`.
Example
-------
>>> import ufedmm
>>> from simtk import unit
>>> dt = 2*unit.femtoseconds
>>> temp = 300*unit.kelvin
>>> tau = 10*unit.femtoseconds
>>> gamma = 10/unit.picoseconds
>>> model = ufedmm.AlanineDipeptideModel()
>>> ufedmm.add_inner_nonbonded_force(model.system, 5*unit.angstroms, 8*unit.angstroms, 1)
>>> for force in model.system.getForces():
... print(force.__class__.__name__, force.getForceGroup())
HarmonicBondForce 0
HarmonicAngleForce 0
PeriodicTorsionForce 0
NonbondedForce 2
CustomNonbondedForce 1
CustomBondForce 1
"""
if openmm.__version__ < '7.5':
raise Exception("add_inner_nonbonded_force requires OpenMM version >= 7.5")
try:
nonbonded_force = next(filter(lambda f: isinstance(f, openmm.NonbondedForce), system.getForces()))
except StopIteration:
raise Exception("add_inner_nonbonded_force requires system with NonbondedForce")
if nonbonded_force.getNumParticleParameterOffsets() > 0 or nonbonded_force.getNumExceptionParameterOffsets() > 0:
raise Exception("add_inner_nonbonded_force does not support parameter offsets")
periodic = nonbonded_force.usesPeriodicBoundaryConditions()
rs = _standardized(inner_switch)
rc = _standardized(inner_cutoff)
a = rc+rs
b = rc*rs
c = (30/(rc-rs)**5)*np.array([b**2, -2*a*b, a**2 + 2*b, -2*a, 1])
f0s = sum([c[n]*rs**(n+1)/(n+1) for n in range(5)])
def coeff(n, m): return c[m-1] if m == n else c[m-1]/(m-n)
def func(n, m): return '*log(r)' if m == n else (f'*r^{m-n}' if m > n else f'/r^{n-m}')
def val(n, m): return f0s if m == 0 else (coeff(n, m) - coeff(0, m) if n != m else coeff(n, m))
def sgn(n, m): return '+' if m > 0 and val(n, m) >= 0 else ''
def S(n): return ''.join(f'{sgn(n, m)}{val(n, m)}{func(n, m)}' for m in range(6))
potential = 'eps4*((sigma/r)^12-(sigma/r)^6)+Qprod/r'
potential += f'+step(r-{rs})*(eps4*(sigma^12*({S(12)})-sigma^6*({S(6)}))+Qprod*({S(1)}))'
mixing_rules = '; Qprod=Q1*Q2'
mixing_rules += '; sigma=halfsig1+halfsig2'
mixing_rules += '; eps4=sqrt4eps1*sqrt4eps2'
force = openmm.CustomNonbondedForce(potential + mixing_rules)
for parameter in ['Q', 'halfsig', 'sqrt4eps']:
force.addPerParticleParameter(parameter)
force.setNonbondedMethod(force.CutoffPeriodic if periodic else force.CutoffNonPeriodic)
force.setCutoffDistance(inner_cutoff)
force.setUseLongRangeCorrection(False)
ONE_4PI_EPS0 = 138.93545764438198
for index in range(nonbonded_force.getNumParticles()):
charge, sigma, epsilon = map(_standardized, nonbonded_force.getParticleParameters(index))
force.addParticle([charge*np.sqrt(ONE_4PI_EPS0), sigma/2, np.sqrt(4*epsilon)])
non_exclusion_exceptions = []
for index in range(nonbonded_force.getNumExceptions()):
i, j, q1q2, sigma, epsilon = nonbonded_force.getExceptionParameters(index)
q1q2, sigma, epsilon = map(_standardized, [q1q2, sigma, epsilon])
force.addExclusion(i, j)
if q1q2 != 0.0 or epsilon != 0.0:
non_exclusion_exceptions.append((i, j, q1q2*ONE_4PI_EPS0, sigma, 4*epsilon))
force.setForceGroup(force_group_index)
system.addForce(force)
if non_exclusion_exceptions:
exceptions = openmm.CustomBondForce(f'step({rc}-r)*({potential})')
for parameter in ['Qprod', 'sigma', 'eps4']:
exceptions.addPerBondParameter(parameter)
for i, j, Qprod, sigma, eps4 in non_exclusion_exceptions:
exceptions.addBond(i, j, [Qprod, sigma, eps4])
exceptions.setForceGroup(force_group_index)
system.addForce(exceptions)
nonbonded_force.setForceGroup(force_group_index+1)
class CustomIntegrator(openmm.CustomIntegrator):
"""
An extension of OpenMM's CustomIntegrator_ class with an extra per-dof variable named
`temperature`, whose content is the temperature of the heat bath associated to each
degree of freedom. A per-dof temperature is necessary if the extended-space variables
and the physical system are coupled adiabatically to thermostats at different temperatures.
Otherwise, any other OpenMM integrator can be used.
Parameters
----------
temperature : float or unit.Quantity
The temperature.
step_size : float or unit.Quantity
The step size with which to integrate the equations of motion.
"""
def __init__(self, temperature, step_size):
super().__init__(step_size)
self.temperature = temperature
self.addPerDofVariable('kT', unit.MOLAR_GAS_CONSTANT_R*temperature)
self._up_to_date = False
def __repr__(self):
"""
A human-readable version of each integrator step (adapted from openmmtools)
Returns
-------
readable_lines : str
A list of human-readable versions of each step of the integrator
"""
readable_lines = []
self.getNumPerDofVariables() > 0 and readable_lines.append('Per-dof variables:')
per_dof = []
for index in range(self.getNumPerDofVariables()):
per_dof.append(self.getPerDofVariableName(index))
readable_lines.append(' ' + ', '.join(per_dof))
self.getNumGlobalVariables() > 0 and readable_lines.append('Global variables:')
for index in range(self.getNumGlobalVariables()):
name = self.getGlobalVariableName(index)
value = self.getGlobalVariable(index)
readable_lines.append(f' {name} = {value}')
readable_lines.append('Computation steps:')
step_type_str = [
'{target} <- {expr}',
'{target} <- {expr}',
'{target} <- sum({expr})',
'constrain positions',
'constrain velocities',
'allow forces to update the context state',
'if ({expr}):',
'while ({expr}):',
'end'
]
indent_level = 0
for step in range(self.getNumComputations()):
line = ''
step_type, target, expr = self.getComputationStep(step)
if step_type == 8:
indent_level -= 1
command = step_type_str[step_type].format(target=target, expr=expr)
line += '{:4d}: '.format(step) + ' '*indent_level + command
if step_type in [6, 7]:
indent_level += 1
readable_lines.append(line)
return '\n'.join(readable_lines)
def update_temperatures(self, system_temperature, extended_space_temperatures):
nparticles = len(self.getPerDofVariableByName('kT')) - len(extended_space_temperatures)
temperatures = [system_temperature]*nparticles + extended_space_temperatures
kT = [unit.MOLAR_GAS_CONSTANT_R*T*openmm.Vec3(1, 1, 1) for T in temperatures]
self.setPerDofVariableByName('kT', kT)
self._up_to_date = True
def step(self, steps):
if not self._up_to_date:
self.update_temperatures(self.temperature, [])
super().step(steps)
class AbstractMiddleRespaIntegrator(CustomIntegrator):
"""
An abstract class for middle-type, multiple time-scale integrators.
.. warning::
This class is meant for inheritance only and does not actually include thermostatting.
Concrete subclasses are available, such as :class:`MiddleMassiveNHCIntegrator` and
:class:`GeodesicLangevinIntegrator`, for instance.
Child classes will differ by the thermostat algorithm, which must be implemented
by overriding the `_bath` method (see the example below).
Temperature is treated as a per-dof parameter so as to allow adiabatic simulations.
The following :term:`ODE` system is solved for every degree of freedom in the system,
with possibly :math:`n_c` holonomic constraints and with forces possibly split into
:math:`m` parts according to their characteristic time scales:
.. math::
& \\dot{r}_i = v_i \\\\
& \\dot{v}_i = \\frac{\\sum_{k=1}^m F_i^{[k]}}{m_i}
+ \\sum_{k=1}^{n_c} \\lambda_k \\nabla_{r_i} \\sigma_k
+ \\mathrm{bath}(T_i, v_i) \\\\
& \\sigma_k(\\mathbf{r}) = 0
An approximate solution is obtained by applying the Trotter-Suzuki splitting formula.
In the particular case of two time scales, the default splitting scheme goes as follows:
.. math::
e^{\\Delta t\\mathcal{L}} =
e^{\\frac{\\Delta t}{2}\\mathcal{L}^{[1]}_v}
\\left[
e^{\\frac{\\Delta t}{2 n_0}\\mathcal{L}^{[0]}_v}
\\left(
e^{\\frac{\\Delta t}{2 n_0 n_b}\\mathcal{L}_r}
e^{\\frac{\\Delta t}{n_0 n_b}\\mathcal{L}_\\mathrm{bath}}
e^{\\frac{\\Delta t}{2 n_0 n_b}\\mathcal{L}_r}
\\right)^{n_b}
e^{\\frac{\\Delta t}{2 n_0}\\mathcal{L}^{[0]}_v}
\\right]^{n_0}
e^{\\frac{\\Delta t}{2}\\mathcal{L}^{[1]}_v}
Each exponential operator is the solution of a particular subsystem of equations.
If :math:`\\mathrm{bath}(T_i, v_i) = 0`, the scheme above is time-reversible, measure-preserving,
and symplectic. It is referred to as the ``VV-Middle`` scheme :cite:`Zhang_2019`, where VV
stands for Velocity Verlet. An alternative approach is also available, which is:
.. math::
e^{\\Delta t\\mathcal{L}} =
\\left[
\\left(
e^{\\frac{\\Delta t}{2 n_0 n_b}\\mathcal{L}_r}
e^{\\frac{\\Delta t}{n_0 n_b}\\mathcal{L}_\\mathrm{bath}}
e^{\\frac{\\Delta t}{2 n_0 n_b}\\mathcal{L}_r}
\\right)^{n_b}
e^{\\frac{\\Delta t}{n_0}\\mathcal{L}^{[0]}_v}
\\right]^{n_0}
e^{\\Delta t \\mathcal{L}^{[1]}_v}
This is referred to as the ``LF-Middle`` scheme :cite:`Zhang_2019`, where LF stands for
Leap-Frog. In contrast to the previous scheme, it is not time-reversible. However, in single
time-scale simulations, the two approaches result in equivalent coordinate trajectories,
while the latter provides a velocity trajectory more consistent with the Maxwell-Boltzmann
distribution at the specified temperature :cite:`Zhang_2019`.
Parameters
----------
temperature : unit.Quantity
The temperature of the heat bath.
step_size : float or unit.Quantity
The outer step size with which to integrate the equations of motion.
Keyword Args
------------
num_rattles : int, default=0
The number of RATTLE computations for geodesic integration :cite:`Leimkuhler_2016`.
If ``num_rattles=0``, then no constraints are considered at all.
scheme : str, default='VV-Middle'
Which splitting scheme will be used. Valid options are 'VV-Middle' and 'LF-Middle'.
respa_loops : list(int), default=[1]
A list of `m` integers, where `respa_loops[k]` determines how many substeps with
force group `k` are internally executed for every step with force group `k+1`.
bath_loops : int, default=1
The number of iterations of the bath operator per each step at time scale `0`. This
is useful when the bath operator is not exact, but derived from a splitting solution.
embodied_force_groups : list(int), default=[]
A list of indices of force groups. The presence of an index `i` is this list means that
the contribution of force group `i` is embodied in force group `i+1`. Therefore, such
contribution must be properly subtracted during the integration at time scale `i+1`.
This feature requires OpenMM 7.5 or a newer version.
unroll_loops : bool, default=True
Whether the integrator loops should be unrolled for improving efficiency. Using
``unroll_loops=False`` can be useful for printing the integrator steps.
Example
-------
>>> from ufedmm import integrators
>>> from simtk import unit
>>> class MiddleNoseHooverIntegrator(integrators.AbstractMiddleRespaIntegrator):
... def __init__(self, ndof, tau, temperature, step_size, num_rattles=1):
... super().__init__(temperature, step_size, num_rattles, 'VV-Middle', [1], 1)
... kB = 8.3144626E-3*unit.kilojoules_per_mole/unit.kelvin
... gkT = ndof*unit.MOLAR_GAS_CONSTANT_R*temperature
... self.addGlobalVariable('gkT', gkT)
... self.addGlobalVariable('Q', gkT*tau**2)
... self.addGlobalVariable('v_eta', 0)
... self.addGlobalVariable('twoK', 0)
... self.addGlobalVariable('scaling', 1)
... def _bath(self, fraction):
... self.addComputeSum('twoK', 'm*v*v')
... self.addComputeGlobal('v_eta', f'v_eta + {0.5*fraction}*dt*(twoK - gkT)/Q')
... self.addComputeGlobal('scaling', f'exp(-{fraction}*dt*v_eta)')
... self.addComputePerDof('v', f'v*scaling')
... self.addComputeGlobal('v_eta', f'v_eta + {0.5*fraction}*dt*(scaling^2*twoK - gkT)/Q')
>>> integrator = MiddleNoseHooverIntegrator(500, 10*unit.femtoseconds, 300*unit.kelvin,
... 1*unit.femtoseconds, num_rattles=0)
>>> print(integrator)
Per-dof variables:
kT
Global variables:
gkT = 1247.1693927229858
Q = 0.1247169392722986
v_eta = 0.0
twoK = 0.0
scaling = 1.0
Computation steps:
0: allow forces to update the context state
1: v <- v + 0.5*dt*f/m
2: x <- x + 0.5*dt*v
3: twoK <- sum(m*v*v)
4: v_eta <- v_eta + 0.5*dt*(twoK - gkT)/Q
5: scaling <- exp(-1.0*dt*v_eta)
6: v <- v*scaling
7: v_eta <- v_eta + 0.5*dt*(scaling^2*twoK - gkT)/Q
8: x <- x + 0.5*dt*v
9: v <- v + 0.5*dt*f/m
"""
def __init__(self, temperature, step_size, num_rattles=0, scheme='VV-Middle',
respa_loops=[1], bath_loops=1, embodied_force_groups=[], unroll_loops=True):
if scheme not in ['LF-Middle', 'VV-Middle']:
raise Exception(f'Invalid value {scheme} for keyword scheme')
super().__init__(temperature, step_size)
self._num_rattles = num_rattles
self._scheme = scheme
self._respa_loops = respa_loops
self._bath_loops = bath_loops
self._subtractive_groups = embodied_force_groups
num_rattles > 0 and self.addPerDofVariable('x0', 0)
num_rattles > 1 and self.addGlobalVariable('irattle', 0)
if not unroll_loops:
for scale, n in enumerate(respa_loops):
n > 1 and self.addGlobalVariable(f'irespa{scale}', 0)
bath_loops > 1 and self.addGlobalVariable('ibath', 0)
if embodied_force_groups:
if openmm.__version__ < '7.5':
raise Exception('Use of `embodied_force_groups` option requires OpenMM >= 7.5')
self.addPerDofVariable('f_emb', 0)
integration_groups = set(range(len(respa_loops))) - set(embodied_force_groups)
self.setIntegrationForceGroups(integration_groups)
self.addUpdateContextState()
self._step_initialization()
if unroll_loops:
self._integrate_respa_unrolled(1, len(respa_loops)-1)
else:
self._integrate_respa(1, len(respa_loops)-1)
def _step_initialization(self):
pass
def _integrate_respa(self, fraction, scale):
if scale >= 0:
n = self._respa_loops[scale]
if n > 1:
self.addComputeGlobal(f'irespa{scale}', '0')
self.beginWhileBlock(f'irespa{scale} < {n-1/2}')
self._boost(fraction/(2*n if self._scheme == 'VV-Middle' else n), scale)
self._integrate_respa(fraction/n, scale-1)
self._scheme == 'VV-Middle' and self._boost(fraction/(2*n), scale)
if n > 1:
self.addComputeGlobal(f'irespa{scale}', f'irespa{scale} + 1')
self.endBlock()
else:
n = self._bath_loops
if n > 1:
self.addComputeGlobal('ibath', '0')
self.beginWhileBlock(f'ibath < {n-1/2}')
self._translation(0.5*fraction/n)
self._bath(fraction/n)
self._num_rattles > 0 and self.addConstrainVelocities()
self._translation(0.5*fraction/n)
if n > 1:
self.addComputeGlobal('ibath', 'ibath + 1')
self.endBlock()
def _integrate_respa_unrolled(self, fraction, scale):
if scale >= 0:
n = self._respa_loops[scale]
for i in range(n):
self._boost(fraction/(2*n if self._scheme == 'VV-Middle' and i == 0 else n), scale)
self._integrate_respa_unrolled(fraction/n, scale-1)
self._scheme == 'VV-Middle' and i == n-1 and self._boost(fraction/(2*n), scale)
else:
n = self._bath_loops
for i in range(n):
self._translation(fraction/(2*n if i == 0 else n))
self._bath(fraction/n)
self._num_rattles > 0 and self.addConstrainVelocities()
i == n-1 and self._translation(fraction/(2*n))
def _translation(self, fraction):
if self._num_rattles > 1:
self.addComputeGlobal('irattle', '0')
self.beginWhileBlock(f'irattle < {self._num_rattles-1/2}')
self.addComputePerDof('x', f'x + {fraction/max(1, self._num_rattles)}*dt*v')
if self._num_rattles > 0:
self.addComputePerDof('x0', 'x')
self.addConstrainPositions()
self.addComputePerDof('v', f'v + (x - x0)/({fraction/self._num_rattles}*dt)')
self.addConstrainVelocities()
if self._num_rattles > 1:
self.addComputeGlobal('irattle', 'irattle + 1')
self.endBlock()
def _boost(self, fraction, scale):
if len(self._respa_loops) > 1:
if scale-1 in self._subtractive_groups:
self.addComputePerDof('f_emb', f'f{scale-1}')
self.addComputePerDof('v', f'v + {fraction}*dt*(f{scale}-f_emb)/m')
else:
self.addComputePerDof('v', f'v + {fraction}*dt*f{scale}/m')
else:
self.addComputePerDof('v', f'v + {fraction}*dt*f/m')
self._num_rattles > 0 and self.addConstrainVelocities()
def _bath(self, fraction):
return
class GeodesicLangevinIntegrator(AbstractMiddleRespaIntegrator):
"""
A geodesic Langevin integrator :cite:`Leimkuhler_2016`, which can be integrated by using
either the LF-Middle or the VV-Middle scheme :cite:`Zhang_2019`.
.. note:
The VV-Middle scheme is also known as the BAOAB :cite:`Leimkuhler_2016` method.
Parameters
----------
temperature : float or unit.Quantity
The temperature.
friction_coefficient : float or unit.Quantity
The friction coefficient.
step_size : float or unit.Quantity
The time-step size.
Keyword Args
------------
num_rattles : int, default=1
The number of RATTLE computations for geodesic integration :cite:`Leimkuhler_2016`.
If ``num_rattles=0``, then no constraints are considered at all.
scheme : str, default='LF-Middle'
Which splitting scheme will be used. Valid options are 'VV-Middle' and 'LF-Middle'.
**kwargs
All other keyword arguments in :class:`AbstractMiddleRespaIntegrator`.
Example
-------
>>> import ufedmm
>>> dt = 2*unit.femtoseconds
>>> temp = 300*unit.kelvin
>>> gamma = 10/unit.picoseconds
>>> ufedmm.GeodesicLangevinIntegrator(temp, gamma, dt, num_rattles=1, scheme='VV-Middle')
Per-dof variables:
kT, x0
Global variables:
friction = 10.0
Computation steps:
0: allow forces to update the context state
1: v <- v + 0.5*dt*f/m
2: constrain velocities
3: x <- x + 0.5*dt*v
4: x0 <- x
5: constrain positions
6: v <- v + (x - x0)/(0.5*dt)
7: constrain velocities
8: v <- z*v + sqrt((1 - z*z)*kT/m)*gaussian; z = exp(-friction*1.0*dt)
9: constrain velocities
10: x <- x + 0.5*dt*v
11: x0 <- x
12: constrain positions
13: v <- v + (x - x0)/(0.5*dt)
14: constrain velocities
15: v <- v + 0.5*dt*f/m
16: constrain velocities
"""
def __init__(self, temperature, friction_coefficient, step_size,
num_rattles=1, scheme='LF-Middle', **kwargs):
super().__init__(temperature, step_size, num_rattles=num_rattles, scheme=scheme, **kwargs)
self.addGlobalVariable('friction', friction_coefficient)
def _bath(self, fraction):
expression = f'z*v + sqrt((1 - z*z)*kT/m)*gaussian; z = exp(-friction*{fraction}*dt)'
self.addComputePerDof('v', expression)
class MiddleMassiveNHCIntegrator(AbstractMiddleRespaIntegrator):
"""
A massive, middle-type Nose-Hoover Chain Thermostat solver :cite:`Martyna_1992`
with optional multiple time-scale integration via RESPA.
To enable RESPA, the forces in OpenMM system must be split into distinct force
groups and the keyword ``respa_loop`` (see below) must be a list with multiple entries.
Parameters
----------
temperature : float or unit.Quantity
The temperature.
time_constant : float or unit.Quantity
The characteristic time constant.
step_size : float or unit.Quantity
The time-step size.
Keyword Args
------------
nchain : int, default=2
The number of thermostats in each Nose-Hoover chain.
track_energy : bool, default=False
Whether to track the thermostat energy term.
**kwargs
All keyword arguments in :class:`AbstractMiddleRespaIntegrator`, except ``num_rattles``.
Example
-------
>>> import ufedmm
>>> temp, tau, dt = 300*unit.kelvin, 10*unit.femtoseconds, 2*unit.femtoseconds
>>> integrator = ufedmm.MiddleMassiveNHCIntegrator(temp, tau, dt, respa_loops=[4, 1], unroll_loops=False)
>>> print(integrator)
Per-dof variables:
kT, Q, v1, v2
Global variables:
irespa0 = 0.0
Computation steps:
0: allow forces to update the context state
1: v <- v + 0.5*dt*f1/m
2: irespa0 <- 0
3: while (irespa0 < 3.5):
4: v <- v + 0.125*dt*f0/m
5: x <- x + 0.125*dt*v
6: v2 <- v2 + 0.125*dt*(Q*v1^2 - kT)/Q
7: v1 <- (v1*z + 0.125*dt*(m*v^2 - kT)/Q)*z; z=exp(-0.0625*dt*v2)
8: v <- v*exp(-0.25*dt*v1)
9: v1 <- (v1*z + 0.125*dt*(m*v^2 - kT)/Q)*z; z=exp(-0.0625*dt*v2)
10: v2 <- v2 + 0.125*dt*(Q*v1^2 - kT)/Q
11: x <- x + 0.125*dt*v
12: v <- v + 0.125*dt*f0/m
13: irespa0 <- irespa0 + 1
14: end
15: v <- v + 0.5*dt*f1/m
"""
def __init__(self, temperature, time_constant, step_size, nchain=2, track_energy=False, **kwargs):
if 'num_rattles' in kwargs.keys() and kwargs['num_rattles'] != 0:
raise ValueError(f'{self.__class__.__name__} cannot handle constraints')
self._tau = _standardized(time_constant)
self._nchain = nchain
self._track_energy = track_energy
super().__init__(temperature, step_size, **kwargs)
self.addPerDofVariable('Q', 0)
for i in range(nchain):
self.addPerDofVariable(f'v{i+1}', 0)
if track_energy:
self.addPerDofVariable(f'eta{i+1}', 0)
def update_temperatures(self, system_temperature, extended_space_temperatures):
super().update_temperatures(system_temperature, extended_space_temperatures)
Q = [self._tau**2*kT for kT in self.getPerDofVariableByName('kT')]
self.setPerDofVariableByName('Q', Q)
def _bath(self, fraction):
n = self._nchain
def a(i): return f'(Q*v{i-1}^2 - kT)/Q' if i > 1 else '(m*v^2 - kT)/Q'
def z(i): return f'exp(-{fraction/4}*dt*v{i+1})'
self.addComputePerDof(f'v{n}', f'v{n} + {fraction/2}*dt*{a(n)}')
for i in reversed(range(1, n)):
self.addComputePerDof(f'v{i}', f'(v{i}*z + {fraction/2}*dt*{a(i)})*z; z={z(i)}')
self.addComputePerDof('v', f'v*exp(-{fraction}*dt*v1)')
for i in range(1, n):
self.addComputePerDof(f'v{i}', f'(v{i}*z + {fraction/2}*dt*{a(i)})*z; z={z(i)}')
self.addComputePerDof(f'v{n}', f'v{n} + {fraction/2}*dt*{a(n)}')
class MiddleMassiveGGMTIntegrator(AbstractMiddleRespaIntegrator):
"""
A massive, middle-type Generalized Gaussian Moment Thermostat :cite:`Liu_2000`
solver with optional multiple time-scale integration via RESPA.
To enable RESPA, the forces in OpenMM system must be split into distinct force
groups and the keyword ``respa_loop`` (see below) must be a list with multiple entries.
Parameters
----------
temperature : float or unit.Quantity
The temperature.
time_constant : float or unit.Quantity
The characteristic time constant.
step_size : float or unit.Quantity
The time-step size.
Keyword Args
------------
**kwargs
All keyword arguments in :class:`AbstractMiddleRespaIntegrator`, except ``num_rattles``.
Example
-------
>>> import ufedmm
>>> temp, tau, dt = 300*unit.kelvin, 10*unit.femtoseconds, 2*unit.femtoseconds
>>> integrator = ufedmm.MiddleMassiveGGMTIntegrator(temp, tau, dt)
>>> print(integrator)
Per-dof variables:
kT, Q1, Q2, v1, v2
Computation steps:
0: allow forces to update the context state
1: v <- v + 0.5*dt*f/m
2: x <- x + 0.5*dt*v
3: v1 <- v1 + 0.5*dt*(m*v^2 - kT)/Q1
4: v2 <- v2 + 0.5*dt*((m*v^2)^2/3 - kT^2)/Q2
5: v <- v*exp(-1.0*dt*(v1 + kT*v2))/sqrt(1 + 2.0*dt*m*v^2*v2/3)
6: v1 <- v1 + 0.5*dt*(m*v^2 - kT)/Q1
7: v2 <- v2 + 0.5*dt*((m*v^2)^2/3 - kT^2)/Q2
8: x <- x + 0.5*dt*v
9: v <- v + 0.5*dt*f/m
"""
def __init__(self, temperature, time_constant, step_size, **kwargs):
if 'num_rattles' in kwargs.keys() and kwargs['num_rattles'] != 0:
raise ValueError(f'{self.__class__.__name__} cannot handle constraints')
self._tau = _standardized(time_constant)
super().__init__(temperature, step_size, **kwargs)
self.addPerDofVariable('Q1', 0)
self.addPerDofVariable('Q2', 0)
self.addPerDofVariable('v1', 0)
self.addPerDofVariable('v2', 0)
def set_extended_space_time_constants(self, time_constants):
self._xs_taus = [_standardized(tau) for tau in time_constants]
def update_temperatures(self, system_temperature, extended_space_temperatures):
super().update_temperatures(system_temperature, extended_space_temperatures)
kT_vectors = self.getPerDofVariableByName('kT')
kT3_vectors = [openmm.Vec3(kT.x**3, kT.y**3, kT.z**3) for kT in kT_vectors]
if hasattr(self, '_xs_taus'):
num_particles = len(kT_vectors) - len(extended_space_temperatures)
taus = [self._tau]*num_particles + self._xs_taus
Q1 = [kT*tau**2 for kT, tau in zip(kT_vectors, taus)]
Q2 = [8/3*kT3*tau**2 for kT3, tau in zip(kT3_vectors, taus)]
else:
Q1 = [kT*self._tau**2 for kT in kT_vectors]
Q2 = [8/3*kT3*self._tau**2 for kT3 in kT3_vectors]
self.setPerDofVariableByName('Q1', Q1)
self.setPerDofVariableByName('Q2', Q2)
def _bath(self, fraction):
self.addComputePerDof('v1', f'v1 + {fraction/2}*dt*(m*v^2 - kT)/Q1')
self.addComputePerDof('v2', f'v2 + {fraction/2}*dt*((m*v^2)^2/3 - kT^2)/Q2')
self.addComputePerDof('v', f'v*exp(-{fraction}*dt*(v1 + kT*v2))/sqrt(1 + {2*fraction}*dt*m*v^2*v2/3)')
self.addComputePerDof('v1', f'v1 + {fraction/2}*dt*(m*v^2 - kT)/Q1')
self.addComputePerDof('v2', f'v2 + {fraction/2}*dt*((m*v^2)^2/3 - kT^2)/Q2')
class RegulatedNHLIntegrator(AbstractMiddleRespaIntegrator):
"""
A regulated version of the massive Nose-Hoover-Langevin :cite:`Samoletov_2007,Leimkuhler_2009`
method. Regulation means that the system Hamiltonian is modified so that velocities remain below
a temperature-dependent limit. This is closely related to the SIN(R) method :cite:`Leimkuhler_2013`
and allows multiple time-scale integration with very large outer time steps, without resonance.
.. info:
If `regulation_parameter = 1` (default), this method is equivalent to SIN(R) with a single
thermostat per degree of freedom (that is, `L=1`).
The following :term:`SDE` system is solved for every degree of freedom in the system:
.. math::
& dr_i = v_i dt \\\\
& dp_i = F_i dt - v_{\\eta_i} m_i v_i dt \\\\
& dv_{\\eta_i} = \\frac{1}{Q}\\left(\\frac{n+1}{n} m_i v_i^2 - k_B T\\right) dt
- \\gamma v_{\\eta_i} dt + \\sqrt{\\frac{2\\gamma k_B T}{Q}} dW_i,
where:
.. math::
v_i = c_i \\tanh\\left(\\frac{p_i}{m_i c_i}\\right).
Here, :math:`n` is the regulation parameter and :math:`c_i = \\sqrt{\\frac{n k T}{m_i}}` is
the maximum speed for degree of freedom `i`. The inertial parameter :math:`Q` is defined as
:math:`Q = n k_B T \\tau^2`, with :math:`\\tau` being a relaxation time :cite:`Tuckerman_1992`.
An approximate solution is obtained by applying the Trotter-Suzuki splitting formula:
.. math::
e^{\\Delta t\\mathcal{L}} =
e^{\\frac{\\Delta t}{2}\\mathcal{L}^1_p}
\\left[e^{\\frac{\\delta t}{2}\\mathcal{L}^0_p}
e^{\\frac{\\delta t}{2}\\mathcal{L}_r}
e^{\\delta t \\mathcal{L}_\\mathrm{bath}}
e^{\\frac{\\delta t}{2}\\mathcal{L}_r}
e^{\\frac{\\delta t}{2}\\mathcal{L}^0_p}\\right]^m
e^{\\frac{\\Delta t}{2}\\mathcal{L}^1_p}
where :math:`\\delta t = \\frac{\\Delta t}{m}`. Each exponential operator above is the solution
of a differential equation.
The exact solution for the physical-system part is:
.. math::
r_i(t) = r_i^0 + c_i \\mathrm{tanh}\\left(\\frac{p_i}{m c_i}\\right) t
.. math::
p_i(t) = p_i^0 + F_i t
The bath propagator is further split as:
.. math::
e^{\\delta t \\mathcal{L}_\\mathrm{bath}} =
e^{\\frac{\\delta t}{2m}\\mathcal{L}_B}
e^{\\frac{\\delta t}{2m}\\mathcal{L}_S}
e^{\\frac{\\delta t}{m}\\mathcal{L}_O}
e^{\\frac{\\delta t}{2m}\\mathcal{L}_S}
e^{\\frac{\\delta t}{2m}\\mathcal{L}_B}
Part 'B' is a boost, whose solution is:
.. math::
v_{\\eta_i}(t) = v_{\\eta_i}^0 +
\\frac{1}{Q}\\left(\\frac{n+1}{n} m_i v_i^2 - k_B T\\right) t
Part 'S' is a scaling, whose solution is:
.. math::
p_i(t) = m_i c_i \\mathrm{arcsinh}\\left[
\\sinh\\left(\\frac{p_i^0}{m_i c_i}\\right) e^{- v_{\\eta_i} t}
\\right]
Part 'O' is an Ornstein–Uhlenbeck process, whose solution is:
.. math::
v_{\\eta_i}(t) = v_{\\eta_i}^0 e^{-\\gamma t}
+ \\sqrt{\\frac{k_B T}{Q}(1-e^{-2\\gamma t})} R_N
where :math:`R_N` is a normally distributed random number.
Parameters
----------
step_size : float or unit.Quantity
The outer step size with which to integrate the equations of motion.
loops : int
The number of internal substeps at each time step.
temperature : unit.Quantity
The temperature of the heat bath.
time_scale : unit.Quantity (time)
The relaxation time (:math:`\\tau`) of the Nose-Hoover thermostat.
friction_coefficient : unit.Quantity (1/time)
The friction coefficient (:math:`\\gamma`) of the Langevin thermostat.
regulation_parameter : int or float
The regulation parameter n.
Keyword Args
------------
split_ornstein_uhlenbeck : bool, default=False
Whether to split the drifted Ornstein-Uhlenbeck operator.
semi_regulated : bool, default=False
Whether to use the semi-regulated NHL method instead of its fully-regulated version.
**kwargs
All keyword arguments in :class:`AbstractMiddleRespaIntegrator`, except ``num_rattles``.
"""
def __init__(self, temperature, time_constant, friction_coefficient, step_size,
regulation_parameter, split_ornstein_uhlenbeck=False, semi_regulated=False,
**kwargs):
if 'num_rattles' in kwargs.keys() and kwargs['num_rattles'] != 0:
raise ValueError(f'{self.__class__.__name__} cannot handle constraints')
self._tau = np.sqrt(regulation_parameter)*time_constant
self._n = regulation_parameter
self._split = split_ornstein_uhlenbeck
self._semi_regulated = semi_regulated
super().__init__(temperature, step_size, **kwargs)
self.addPerDofVariable('invQ', 0)
self.addPerDofVariable('v_eta', 0)
self.addPerDofVariable('c', 0)
self.addGlobalVariable('friction', friction_coefficient)
self.addGlobalVariable('omega', 1.0/self._tau)
self.addGlobalVariable('aa', 0)
self.addGlobalVariable('bb', 0)
def update_temperatures(self, system_temperature, extended_space_temperatures):
super().update_temperatures(system_temperature, extended_space_temperatures)
kT_vectors = self.getPerDofVariableByName('kT')
tauSq = _standardized(self._tau)**2
Q = [tauSq*kT for kT in kT_vectors]
invQ = [openmm.Vec3(*map(lambda x: 1/x if x > 0.0 else 0.0, q)) for q in Q]
self.setPerDofVariableByName('invQ', invQ)
def _step_initialization(self):
self.addComputePerDof('c', f'sqrt({self._n}*kT/m)')
n = np.prod(self._respa_loops)*self._bath_loops
self.addComputeGlobal('aa', f'exp(-friction*dt/{n})')
self.addComputeGlobal('bb', 'omega*sqrt(1-aa^2)')
def _translation(self, fraction):
n = self._n
self.setKineticEnergyExpression(f'{0.5*(n+1)/n}*m*(c*tanh(v/c))^2; c=sqrt({n}*kT/m)')
self.addComputePerDof('x', f'x + c*tanh(v/c)*{fraction}*dt')
def _bath(self, fraction):
n = self._n
if self._semi_regulated:
G = '; G=(m*v*c*tanh(v/c) - kT)*invQ'
else:
G = f'; G=({(n+1)/n}*m*(c*tanh(v/c))^2 - kT)*invQ'
if self._split:
boost = f'v_eta + G*{0.5*fraction}*dt' + G
if self._semi_regulated:
scaling = f'v*exp(-v_eta*{0.5*fraction}*dt)'
else:
scaling = 'c*asinh_z'
scaling += '; asinh_z=(2*step(z)-1)*log(select(step(za-1E8),2*za,za+sqrt(1+z*z))); za=abs(z)'
scaling += f'; z=sinh(v/c)*exp(-v_eta*{0.5*fraction}*dt)'
if self._split:
Ornstein_Uhlenbeck = 'v_eta*aa + bb*gaussian'
else:
Ornstein_Uhlenbeck = 'v_eta*aa + G*(1-aa)/friction + bb*gaussian' + G
self._split and self.addComputePerDof('v_eta', boost)
self.addComputePerDof('v', scaling)
self.addComputePerDof('v_eta', Ornstein_Uhlenbeck)
self.addComputePerDof('v', scaling)
self._split and self.addComputePerDof('v_eta', boost)
| [
"simtk.openmm.CustomNonbondedForce",
"simtk.openmm.CustomBondForce",
"simtk.openmm.Vec3",
"numpy.array",
"ufedmm.ufedmm._standardized",
"numpy.prod",
"numpy.sqrt"
] | [((3800, 3827), 'ufedmm.ufedmm._standardized', '_standardized', (['inner_switch'], {}), '(inner_switch)\n', (3813, 3827), False, 'from ufedmm.ufedmm import _standardized\n'), ((3837, 3864), 'ufedmm.ufedmm._standardized', '_standardized', (['inner_cutoff'], {}), '(inner_cutoff)\n', (3850, 3864), False, 'from ufedmm.ufedmm import _standardized\n'), ((4722, 4775), 'simtk.openmm.CustomNonbondedForce', 'openmm.CustomNonbondedForce', (['(potential + mixing_rules)'], {}), '(potential + mixing_rules)\n', (4749, 4775), False, 'from simtk import openmm, unit\n'), ((3917, 3974), 'numpy.array', 'np.array', (['[b ** 2, -2 * a * b, a ** 2 + 2 * b, -2 * a, 1]'], {}), '([b ** 2, -2 * a * b, a ** 2 + 2 * b, -2 * a, 1])\n', (3925, 3974), True, 'import numpy as np\n'), ((5874, 5927), 'simtk.openmm.CustomBondForce', 'openmm.CustomBondForce', (['f"""step({rc}-r)*({potential})"""'], {}), "(f'step({rc}-r)*({potential})')\n", (5896, 5927), False, 'from simtk import openmm, unit\n'), ((26346, 26374), 'ufedmm.ufedmm._standardized', '_standardized', (['time_constant'], {}), '(time_constant)\n', (26359, 26374), False, 'from ufedmm.ufedmm import _standardized\n'), ((29545, 29573), 'ufedmm.ufedmm._standardized', '_standardized', (['time_constant'], {}), '(time_constant)\n', (29558, 29573), False, 'from ufedmm.ufedmm import _standardized\n'), ((29884, 29902), 'ufedmm.ufedmm._standardized', '_standardized', (['tau'], {}), '(tau)\n', (29897, 29902), False, 'from ufedmm.ufedmm import _standardized\n'), ((30179, 30223), 'simtk.openmm.Vec3', 'openmm.Vec3', (['(kT.x ** 3)', '(kT.y ** 3)', '(kT.z ** 3)'], {}), '(kT.x ** 3, kT.y ** 3, kT.z ** 3)\n', (30190, 30223), False, 'from simtk import openmm, unit\n'), ((35974, 36003), 'numpy.sqrt', 'np.sqrt', (['regulation_parameter'], {}), '(regulation_parameter)\n', (35981, 36003), True, 'import numpy as np\n'), ((36775, 36799), 'ufedmm.ufedmm._standardized', '_standardized', (['self._tau'], {}), '(self._tau)\n', (36788, 36799), False, 'from ufedmm.ufedmm import _standardized\n'), ((37091, 37117), 'numpy.prod', 'np.prod', (['self._respa_loops'], {}), '(self._respa_loops)\n', (37098, 37117), True, 'import numpy as np\n'), ((5314, 5334), 'numpy.sqrt', 'np.sqrt', (['(4 * epsilon)'], {}), '(4 * epsilon)\n', (5321, 5334), True, 'import numpy as np\n'), ((9390, 9410), 'simtk.openmm.Vec3', 'openmm.Vec3', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (9401, 9410), False, 'from simtk import openmm, unit\n'), ((5282, 5303), 'numpy.sqrt', 'np.sqrt', (['ONE_4PI_EPS0'], {}), '(ONE_4PI_EPS0)\n', (5289, 5303), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from numereval.scores import score_signals
def calculate_max_drawdown(validation_correlations: pd.Series):
rolling_max = (
(validation_correlations + 1).cumprod().rolling(window=100, min_periods=1).max()
)
daily_value = (validation_correlations + 1).cumprod()
max_drawdown = -((rolling_max - daily_value)).max()
#TODO: Add Max drawdown weeks
return max_drawdown
def run_analytics(era_scores, roll_mean=20, plot=False):
'''Calculates some stats and plot cumulative scores.
Taken from <NAME>'s notebook.
'''
metrics = {}
metrics["weeks"] = len(era_scores)
metrics["Mean correlation"] = era_scores.mean()
metrics["Median correlation"] = era_scores.median()
metrics["Std. Dev."] = era_scores.std()
metrics["Mean Pseudo-Sharpe"] = era_scores.mean()/era_scores.std()
metrics["Median Pseudo-Sharpe"] = era_scores.median()/era_scores.std()
metrics["Hit Rate (% positive eras)"] = era_scores.apply(lambda x: np.sign(x)).value_counts()[1]/len(era_scores) * 100
#metrics["Max Drawdown"] = calculate_max_drawdown(era_scores)
if plot:
era_scores.rolling(roll_mean).mean().plot(
kind="line", title="Rolling Per Era Correlation Mean", figsize=(15, 4)
)
plt.axhline(y=0.0, color="black", linestyle="-", linewidth=1)
plt.axhline(y=era_scores.mean(), color="g", linewidth=1, linestyle="--", label='Mean corr')
plt.show()
era_scores.cumsum().plot(title="Cumulative Sum of Era Scores", figsize=(15, 4))
plt.axhline(y=0.0, color="r", linestyle="--")
plt.show()
return pd.Series(metrics).round(4) | [
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.show",
"pandas.Series",
"numpy.sign"
] | [((1344, 1405), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0.0)', 'color': '"""black"""', 'linestyle': '"""-"""', 'linewidth': '(1)'}), "(y=0.0, color='black', linestyle='-', linewidth=1)\n", (1355, 1405), True, 'import matplotlib.pyplot as plt\n'), ((1514, 1524), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1522, 1524), True, 'import matplotlib.pyplot as plt\n'), ((1622, 1667), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0.0)', 'color': '"""r"""', 'linestyle': '"""--"""'}), "(y=0.0, color='r', linestyle='--')\n", (1633, 1667), True, 'import matplotlib.pyplot as plt\n'), ((1676, 1686), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1684, 1686), True, 'import matplotlib.pyplot as plt\n'), ((1703, 1721), 'pandas.Series', 'pd.Series', (['metrics'], {}), '(metrics)\n', (1712, 1721), True, 'import pandas as pd\n'), ((1059, 1069), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (1066, 1069), True, 'import numpy as np\n')] |
from typing import Sequence, Union
import numpy as np
import numpy.testing as npt
from assertpy import assert_that
def sample_weights_simulating_class_distribution(
y_true: Union[Sequence[int], np.ndarray],
hypothetical_class_distribution: Union[Sequence[float], np.ndarray],
) -> np.ndarray:
"""
Computes a 1D array of sample weights that results in the requested
`hypothetical_class_distribution` if applied to the dataset. This is useful when you
know that the class distribution in your dataset deviates from the distribution you
expect to encounter in the environment where your machine learning model is going to
be deployed.
Example:
You have a data set with 40% spam 60% ham emails. However, you expect that
only 4% of the emails in the deployment environment will be spam, and you would
like to measure various performance characteristics on a dataset with 4% spam
and 96% ham. This function will return an array with
* sample weights 4% / 40% = 0.1 for all of the spam examples
* sample weights 96% / 60% = 1.6 for all of the ham examples
if called with:
>>> weights = sample_weights_simulating_class_distribution(
... y_true=[0, 1, 1, 0, 1, 0, 1, 1, 0, 1], # zeros for spam
... hypothetical_class_distribution=[0.04, 0.96]
... )
>>> print(weights)
array([0.1 , 1.6])
Args:
y_true:
1D array of integers with class indices of the dataset. There must be at
least one sample for each class.
hypothetical_class_distribution:
Sequence of floats describing the distribution you assume to encounter in
your deployment environment.
Returns:
1D numpy array with sample weights, same length as `y_true`.
"""
# --- check input ---
assert_that(set(y_true)).is_equal_to(
set(range(len(hypothetical_class_distribution)))
)
assert_that(len(set(y_true))).is_equal_to(len(hypothetical_class_distribution))
y_true = np.asarray(y_true)
hypothetical_class_distribution = np.asarray(hypothetical_class_distribution)
npt.assert_allclose(
hypothetical_class_distribution.sum(),
1.0,
err_msg="Probability distribution does not sum up to 1.0",
)
assert_that(y_true.ndim).is_equal_to(1)
assert_that(hypothetical_class_distribution.ndim).is_equal_to(1)
# --- compute output ---
class_distribution = np.bincount(y_true) / len(y_true)
npt.assert_equal(class_distribution > 0.0, True)
npt.assert_allclose(class_distribution.sum(), 1.0)
weights = [
hypothetical_class_distribution[y] / class_distribution[y] for y in y_true
]
return np.array(weights)
| [
"assertpy.assert_that",
"numpy.asarray",
"numpy.array",
"numpy.testing.assert_equal",
"numpy.bincount"
] | [((2109, 2127), 'numpy.asarray', 'np.asarray', (['y_true'], {}), '(y_true)\n', (2119, 2127), True, 'import numpy as np\n'), ((2166, 2209), 'numpy.asarray', 'np.asarray', (['hypothetical_class_distribution'], {}), '(hypothetical_class_distribution)\n', (2176, 2209), True, 'import numpy as np\n'), ((2575, 2623), 'numpy.testing.assert_equal', 'npt.assert_equal', (['(class_distribution > 0.0)', '(True)'], {}), '(class_distribution > 0.0, True)\n', (2591, 2623), True, 'import numpy.testing as npt\n'), ((2796, 2813), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (2804, 2813), True, 'import numpy as np\n'), ((2537, 2556), 'numpy.bincount', 'np.bincount', (['y_true'], {}), '(y_true)\n', (2548, 2556), True, 'import numpy as np\n'), ((2373, 2397), 'assertpy.assert_that', 'assert_that', (['y_true.ndim'], {}), '(y_true.ndim)\n', (2384, 2397), False, 'from assertpy import assert_that\n'), ((2417, 2466), 'assertpy.assert_that', 'assert_that', (['hypothetical_class_distribution.ndim'], {}), '(hypothetical_class_distribution.ndim)\n', (2428, 2466), False, 'from assertpy import assert_that\n')] |
import numpy as np
from glob import glob
def make_sample(input_dir, add_svs):
x_file = glob('%s/*features_0.npy' % input_dir)[0]
y_file = glob('%s/*truth_1.npy' % input_dir)[0]
X_all = np.load(x_file)
Y_all = np.load(y_file)
# drop Cs or anything else
Y_C = Y_all[:, 2:3] < 0.1
Y_all = Y_all[Y_C.ravel()]
X_all = X_all[Y_C.ravel()]
# get the right labels
isB_all = Y_all[:, 1:2]
isMC_all = Y_all[:, 0:1]
print(isMC_all.shape)
isMC_all = isMC_all.ravel()
print(isMC_all.shape)
# select MC only
if False:
np.random.seed(1234)
isMC_all = np.random.randint(2, size=X_all.shape[0])
isMC_all = np.reshape(isMC_all,(isMC_all.shape[0],1))
X_ptRel = X_all[:, :1]#3
X_2Ds = X_all[:, 5:6]#8
X_3Ds = X_all[:, 10:11]
X_ptPro = X_all[:, 15:16]#18
# now we can increase the smearing
# noise = np.random.randn(X_all.shape[0],5)*0.5
# noise2 = np.random.randn(X_all.shape[0],5)*0.5
# noise_uni = np.random.rand(X_all.shape[0],1) > 0.666666
def addMCStretch(Xin, stretch, data=False):
selected = np.array(isMC_all.ravel(), dtype='float32')#important to copy here
if data:
selected-=1
selected=np.abs(selected)
selected *= isB_all.ravel()
selected *= stretch
selected += 1
selected=np.reshape(selected, (selected.shape[0],1))
Xin = np.multiply(Xin,selected)
return Xin
# X_2Ds=addMCStretch(X_2Ds, 5.5)
# X_3Ds=addMCStretch(X_3Ds, 5.5)
# poisson_b = (np.random.rand(X_all.shape[0], 1) > 0.15) * isB_all
# poisson_qcd = (np.random.rand(X_all.shape[0], 1) > 0.6) * (1 - isB_all)
# SV = poisson_qcd + poisson_b if add_
# svs else np.random.rand(X_all.shape[0], 1)
# X_2Ds_0 = X_2Ds_0 + noise*(isMC_all<.1)
# X_3Ds_0 = X_3Ds_0 + noise2*(isMC_all<.1)
# X_3Ds = X_3Ds + noise * X_3Ds # * X_3Ds * (isMC_all<.1)
# X_2Ds = X_2Ds + noise * X_2Ds #* X_2Ds * (isMC_all<.1)
# X_ptRel= noise #* X_3Ds * (isMC_all<.1)
# X_ptPro= noise #* X_3Ds * (isMC_all<.1)
return np.concatenate(
[X_ptRel, X_2Ds, X_3Ds, X_ptPro], axis=1), isB_all, isMC_all
| [
"numpy.load",
"numpy.random.seed",
"numpy.multiply",
"numpy.abs",
"numpy.random.randint",
"numpy.reshape",
"glob.glob",
"numpy.concatenate"
] | [((200, 215), 'numpy.load', 'np.load', (['x_file'], {}), '(x_file)\n', (207, 215), True, 'import numpy as np\n'), ((228, 243), 'numpy.load', 'np.load', (['y_file'], {}), '(y_file)\n', (235, 243), True, 'import numpy as np\n'), ((94, 132), 'glob.glob', 'glob', (["('%s/*features_0.npy' % input_dir)"], {}), "('%s/*features_0.npy' % input_dir)\n", (98, 132), False, 'from glob import glob\n'), ((149, 184), 'glob.glob', 'glob', (["('%s/*truth_1.npy' % input_dir)"], {}), "('%s/*truth_1.npy' % input_dir)\n", (153, 184), False, 'from glob import glob\n'), ((584, 604), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (598, 604), True, 'import numpy as np\n'), ((624, 665), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'X_all.shape[0]'}), '(2, size=X_all.shape[0])\n', (641, 665), True, 'import numpy as np\n'), ((685, 729), 'numpy.reshape', 'np.reshape', (['isMC_all', '(isMC_all.shape[0], 1)'], {}), '(isMC_all, (isMC_all.shape[0], 1))\n', (695, 729), True, 'import numpy as np\n'), ((1374, 1418), 'numpy.reshape', 'np.reshape', (['selected', '(selected.shape[0], 1)'], {}), '(selected, (selected.shape[0], 1))\n', (1384, 1418), True, 'import numpy as np\n'), ((1432, 1458), 'numpy.multiply', 'np.multiply', (['Xin', 'selected'], {}), '(Xin, selected)\n', (1443, 1458), True, 'import numpy as np\n'), ((2124, 2180), 'numpy.concatenate', 'np.concatenate', (['[X_ptRel, X_2Ds, X_3Ds, X_ptPro]'], {'axis': '(1)'}), '([X_ptRel, X_2Ds, X_3Ds, X_ptPro], axis=1)\n', (2138, 2180), True, 'import numpy as np\n'), ((1254, 1270), 'numpy.abs', 'np.abs', (['selected'], {}), '(selected)\n', (1260, 1270), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: UTF-8 no BOM -*-
"""
Unit test for the orientation module from hexomap package.
"""
import unittest
import numpy as np
from functools import reduce
from hexomap.orientation import Quaternion
from hexomap.orientation import Eulers
from hexomap.orientation import Frame
from hexomap.orientation import Orientation
from hexomap.npmath import ang_between
from hexomap.npmath import random_three_vector
from hexomap.npmath import normalize
class TestQuaternion(unittest.TestCase):
def setUp(self):
self.n_cases = 1000
self.angs = np.random.random(self.n_cases) * np.pi
self.axis = (np.random.random(3) - 0.5)
self.qs = [Quaternion.from_angle_axis(ang, self.axis)
for ang in self.angs
]
def test_reduce(self):
# the cumulative rotations around the same axis should be the same
# as the one single rotation with the total rotation angle
q_reduced = reduce(Quaternion.combine_two, self.qs)
q_target = Quaternion.from_angle_axis(sum(self.angs), self.axis)
np.testing.assert_allclose(q_reduced.as_array, q_target.as_array)
def test_average_fixaxis(self):
q_avg = Quaternion.average_quaternions(self.qs)
q_target = Quaternion.from_angle_axis(np.average(self.angs), self.axis)
np.testing.assert_allclose(q_avg.as_array,
q_target.as_array,
rtol=1e-01,
)
def test_rotate_vec(self):
# the testing rotation axis need to be perpendicular to the vector,
# otherwise the angle is not the same as the input step
ang_step = np.radians(10)
vec = np.array([1,0,0])
axis = np.array([0,0,1])
for _ in range(5):
new_vec = Quaternion.quatrotate(Quaternion.from_angle_axis(ang_step, axis), vec)
np.testing.assert_allclose(ang_step, ang_between(vec, new_vec))
vec = new_vec
def test_conversion_quaternion_eulers(self):
for _ in range(self.n_cases):
euler = Eulers(*((np.random.random(3)-0.5)*2*np.pi))
q = Quaternion.from_eulers(euler)
np.testing.assert_allclose(euler.as_array, q.as_eulers.as_array)
def test_conversion_quaternion_matrix(self):
for _ in range(self.n_cases):
m = Eulers(*((np.random.random(3)-0.5)*2*np.pi)).as_matrix
q = Quaternion.from_matrix(m)
np.testing.assert_allclose(m, q.as_matrix)
class TestFrame(unittest.TestCase):
def setUp(self):
# ref
# http://www.continuummechanics.org/techforms/Tensor.html
self.f1 = Frame(np.array([1, 0, 0]),
np.array([0, 1, 0]),
np.array([0, 0, 1]),
np.array([0, 0, 0]),
'old',
)
# simple 45 degree rotation around z
sqr2 = np.sqrt(2)
self.f2 = Frame(np.array([ 1/sqr2, 1/sqr2, 0]),
np.array([-1/sqr2, 1/sqr2, 0]),
np.array([0, 0, 1]),
np.array([0, 0, 0]),
'r_z_45',
)
# random non-orthogonal bases
q = Quaternion.from_random()
self.f3 = Frame(Quaternion.quatrotate(q, self.f1.e1),
Quaternion.quatrotate(q, self.f1.e2),
Quaternion.quatrotate(q, self.f1.e3),
np.array([0, 0, 0]), 'randomlyRotated')
# tensor in f2 frame
self.tnsr_f2 = np.array([[3, 0, 0],
[0, -1, 0],
[0, 0, 0],
]) # bi-axial along x,y in f2
self.tnsr_f1 = np.array([[ 1, 2, 0],
[ 2, 1, 0],
[ 0, 0, 0],
]) # same tensor in f1
def test_transformation(self):
# f2 bases are described in f1, therefore if we transfer f2.base|f1
# from f1 to f2, we should get identity matrix, which is the natural
# trievel outcome.
_base = np.array(self.f2.base).T # to column space
np.testing.assert_allclose(np.eye(3),
Frame.transform_vector(_base, self.f1, self.f2)
)
# randomly rotated frame should also work
_base = np.array(self.f3.base).T
np.testing.assert_allclose(np.eye(3),
Frame.transform_vector(_base, self.f1, self.f3),
atol=1e-8,
)
# tensor transform
# NOTE:
# need better testing case here
np.testing.assert_allclose(self.tnsr_f2,
Frame.transform_tensor(self.tnsr_f1, self.f1, self.f2),
)
np.testing.assert_allclose(self.tnsr_f1,
Frame.transform_tensor(self.tnsr_f2, self.f2, self.f1),
)
class TestOrientation(unittest.TestCase):
def setUp(self):
pass
def test_misorientation_general(self):
frame_lab = Frame()
# test_0 general case (None)
axis = random_three_vector()
q_0 = Quaternion.from_angle_axis(0, axis)
o_0 = Orientation(q_0, frame_lab)
for _ in range(100):
# avoid symmetrically
ang = (np.random.random())*np.pi/5
o_i = Orientation(Quaternion.from_angle_axis(ang, axis), frame_lab)
np.testing.assert_allclose(ang, o_0.misorientation(o_i, None)[0])
def test_misorientation_cubic(self):
frame_lab = Frame()
# cubic symmetry
axis = np.array([0,0,1])
ang0 = np.random.random()*np.pi
ang1 = ang0 + np.pi/2
o_0 = Orientation(Quaternion.from_angle_axis(ang0, axis), frame_lab)
o_1 = Orientation(Quaternion.from_angle_axis(ang1, axis), frame_lab)
np.testing.assert_allclose(0, o_0.misorientation(o_1, 'cubic')[0])
def test_misorientation_hexagonal(self):
frame_lab = Frame()
# cubic symmetry
axis = np.array([0,0,1])
ang0 = np.random.random()*np.pi
ang1 = ang0 + np.pi/3
o_0 = Orientation(Quaternion.from_angle_axis(ang0, axis), frame_lab)
o_1 = Orientation(Quaternion.from_angle_axis(ang1, axis), frame_lab)
np.testing.assert_allclose(0, o_0.misorientation(o_1, 'hcp')[0])
if __name__ == "__main__":
unittest.main()
| [
"hexomap.orientation.Quaternion.average_quaternions",
"hexomap.orientation.Frame.transform_tensor",
"hexomap.orientation.Frame",
"unittest.main",
"hexomap.npmath.ang_between",
"numpy.testing.assert_allclose",
"numpy.radians",
"numpy.average",
"hexomap.npmath.random_three_vector",
"hexomap.orientat... | [((6709, 6724), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6722, 6724), False, 'import unittest\n'), ((991, 1030), 'functools.reduce', 'reduce', (['Quaternion.combine_two', 'self.qs'], {}), '(Quaternion.combine_two, self.qs)\n', (997, 1030), False, 'from functools import reduce\n'), ((1112, 1177), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['q_reduced.as_array', 'q_target.as_array'], {}), '(q_reduced.as_array, q_target.as_array)\n', (1138, 1177), True, 'import numpy as np\n'), ((1231, 1270), 'hexomap.orientation.Quaternion.average_quaternions', 'Quaternion.average_quaternions', (['self.qs'], {}), '(self.qs)\n', (1261, 1270), False, 'from hexomap.orientation import Quaternion\n'), ((1359, 1430), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['q_avg.as_array', 'q_target.as_array'], {'rtol': '(0.1)'}), '(q_avg.as_array, q_target.as_array, rtol=0.1)\n', (1385, 1430), True, 'import numpy as np\n'), ((1735, 1749), 'numpy.radians', 'np.radians', (['(10)'], {}), '(10)\n', (1745, 1749), True, 'import numpy as np\n'), ((1764, 1783), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (1772, 1783), True, 'import numpy as np\n'), ((1797, 1816), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (1805, 1816), True, 'import numpy as np\n'), ((3009, 3019), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3016, 3019), True, 'import numpy as np\n'), ((3331, 3355), 'hexomap.orientation.Quaternion.from_random', 'Quaternion.from_random', ([], {}), '()\n', (3353, 3355), False, 'from hexomap.orientation import Quaternion\n'), ((3667, 3711), 'numpy.array', 'np.array', (['[[3, 0, 0], [0, -1, 0], [0, 0, 0]]'], {}), '([[3, 0, 0], [0, -1, 0], [0, 0, 0]])\n', (3675, 3711), True, 'import numpy as np\n'), ((3865, 3908), 'numpy.array', 'np.array', (['[[1, 2, 0], [2, 1, 0], [0, 0, 0]]'], {}), '([[1, 2, 0], [2, 1, 0], [0, 0, 0]])\n', (3873, 3908), True, 'import numpy as np\n'), ((5372, 5379), 'hexomap.orientation.Frame', 'Frame', ([], {}), '()\n', (5377, 5379), False, 'from hexomap.orientation import Frame\n'), ((5432, 5453), 'hexomap.npmath.random_three_vector', 'random_three_vector', ([], {}), '()\n', (5451, 5453), False, 'from hexomap.npmath import random_three_vector\n'), ((5468, 5503), 'hexomap.orientation.Quaternion.from_angle_axis', 'Quaternion.from_angle_axis', (['(0)', 'axis'], {}), '(0, axis)\n', (5494, 5503), False, 'from hexomap.orientation import Quaternion\n'), ((5518, 5545), 'hexomap.orientation.Orientation', 'Orientation', (['q_0', 'frame_lab'], {}), '(q_0, frame_lab)\n', (5529, 5545), False, 'from hexomap.orientation import Orientation\n'), ((5878, 5885), 'hexomap.orientation.Frame', 'Frame', ([], {}), '()\n', (5883, 5885), False, 'from hexomap.orientation import Frame\n'), ((5926, 5945), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (5934, 5945), True, 'import numpy as np\n'), ((6313, 6320), 'hexomap.orientation.Frame', 'Frame', ([], {}), '()\n', (6318, 6320), False, 'from hexomap.orientation import Frame\n'), ((6361, 6380), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (6369, 6380), True, 'import numpy as np\n'), ((585, 615), 'numpy.random.random', 'np.random.random', (['self.n_cases'], {}), '(self.n_cases)\n', (601, 615), True, 'import numpy as np\n'), ((645, 664), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (661, 664), True, 'import numpy as np\n'), ((691, 733), 'hexomap.orientation.Quaternion.from_angle_axis', 'Quaternion.from_angle_axis', (['ang', 'self.axis'], {}), '(ang, self.axis)\n', (717, 733), False, 'from hexomap.orientation import Quaternion\n'), ((1317, 1338), 'numpy.average', 'np.average', (['self.angs'], {}), '(self.angs)\n', (1327, 1338), True, 'import numpy as np\n'), ((2206, 2235), 'hexomap.orientation.Quaternion.from_eulers', 'Quaternion.from_eulers', (['euler'], {}), '(euler)\n', (2228, 2235), False, 'from hexomap.orientation import Quaternion\n'), ((2248, 2312), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['euler.as_array', 'q.as_eulers.as_array'], {}), '(euler.as_array, q.as_eulers.as_array)\n', (2274, 2312), True, 'import numpy as np\n'), ((2492, 2517), 'hexomap.orientation.Quaternion.from_matrix', 'Quaternion.from_matrix', (['m'], {}), '(m)\n', (2514, 2517), False, 'from hexomap.orientation import Quaternion\n'), ((2530, 2572), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['m', 'q.as_matrix'], {}), '(m, q.as_matrix)\n', (2556, 2572), True, 'import numpy as np\n'), ((2737, 2756), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (2745, 2756), True, 'import numpy as np\n'), ((2782, 2801), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (2790, 2801), True, 'import numpy as np\n'), ((2827, 2846), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (2835, 2846), True, 'import numpy as np\n'), ((2872, 2891), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (2880, 2891), True, 'import numpy as np\n'), ((3044, 3077), 'numpy.array', 'np.array', (['[1 / sqr2, 1 / sqr2, 0]'], {}), '([1 / sqr2, 1 / sqr2, 0])\n', (3052, 3077), True, 'import numpy as np\n'), ((3100, 3134), 'numpy.array', 'np.array', (['[-1 / sqr2, 1 / sqr2, 0]'], {}), '([-1 / sqr2, 1 / sqr2, 0])\n', (3108, 3134), True, 'import numpy as np\n'), ((3156, 3175), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (3164, 3175), True, 'import numpy as np\n'), ((3201, 3220), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (3209, 3220), True, 'import numpy as np\n'), ((3380, 3416), 'hexomap.orientation.Quaternion.quatrotate', 'Quaternion.quatrotate', (['q', 'self.f1.e1'], {}), '(q, self.f1.e1)\n', (3401, 3416), False, 'from hexomap.orientation import Quaternion\n'), ((3442, 3478), 'hexomap.orientation.Quaternion.quatrotate', 'Quaternion.quatrotate', (['q', 'self.f1.e2'], {}), '(q, self.f1.e2)\n', (3463, 3478), False, 'from hexomap.orientation import Quaternion\n'), ((3504, 3540), 'hexomap.orientation.Quaternion.quatrotate', 'Quaternion.quatrotate', (['q', 'self.f1.e3'], {}), '(q, self.f1.e3)\n', (3525, 3540), False, 'from hexomap.orientation import Quaternion\n'), ((3566, 3585), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (3574, 3585), True, 'import numpy as np\n'), ((4269, 4291), 'numpy.array', 'np.array', (['self.f2.base'], {}), '(self.f2.base)\n', (4277, 4291), True, 'import numpy as np\n'), ((4348, 4357), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4354, 4357), True, 'import numpy as np\n'), ((4395, 4442), 'hexomap.orientation.Frame.transform_vector', 'Frame.transform_vector', (['_base', 'self.f1', 'self.f2'], {}), '(_base, self.f1, self.f2)\n', (4417, 4442), False, 'from hexomap.orientation import Frame\n'), ((4552, 4574), 'numpy.array', 'np.array', (['self.f3.base'], {}), '(self.f3.base)\n', (4560, 4574), True, 'import numpy as np\n'), ((4612, 4621), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4618, 4621), True, 'import numpy as np\n'), ((4659, 4706), 'hexomap.orientation.Frame.transform_vector', 'Frame.transform_vector', (['_base', 'self.f1', 'self.f3'], {}), '(_base, self.f1, self.f3)\n', (4681, 4706), False, 'from hexomap.orientation import Frame\n'), ((4965, 5019), 'hexomap.orientation.Frame.transform_tensor', 'Frame.transform_tensor', (['self.tnsr_f1', 'self.f1', 'self.f2'], {}), '(self.tnsr_f1, self.f1, self.f2)\n', (4987, 5019), False, 'from hexomap.orientation import Frame\n'), ((5140, 5194), 'hexomap.orientation.Frame.transform_tensor', 'Frame.transform_tensor', (['self.tnsr_f2', 'self.f2', 'self.f1'], {}), '(self.tnsr_f2, self.f2, self.f1)\n', (5162, 5194), False, 'from hexomap.orientation import Frame\n'), ((5959, 5977), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5975, 5977), True, 'import numpy as np\n'), ((6040, 6078), 'hexomap.orientation.Quaternion.from_angle_axis', 'Quaternion.from_angle_axis', (['ang0', 'axis'], {}), '(ang0, axis)\n', (6066, 6078), False, 'from hexomap.orientation import Quaternion\n'), ((6117, 6155), 'hexomap.orientation.Quaternion.from_angle_axis', 'Quaternion.from_angle_axis', (['ang1', 'axis'], {}), '(ang1, axis)\n', (6143, 6155), False, 'from hexomap.orientation import Quaternion\n'), ((6394, 6412), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (6410, 6412), True, 'import numpy as np\n'), ((6475, 6513), 'hexomap.orientation.Quaternion.from_angle_axis', 'Quaternion.from_angle_axis', (['ang0', 'axis'], {}), '(ang0, axis)\n', (6501, 6513), False, 'from hexomap.orientation import Quaternion\n'), ((6552, 6590), 'hexomap.orientation.Quaternion.from_angle_axis', 'Quaternion.from_angle_axis', (['ang1', 'axis'], {}), '(ang1, axis)\n', (6578, 6590), False, 'from hexomap.orientation import Quaternion\n'), ((1886, 1928), 'hexomap.orientation.Quaternion.from_angle_axis', 'Quaternion.from_angle_axis', (['ang_step', 'axis'], {}), '(ang_step, axis)\n', (1912, 1928), False, 'from hexomap.orientation import Quaternion\n'), ((1984, 2009), 'hexomap.npmath.ang_between', 'ang_between', (['vec', 'new_vec'], {}), '(vec, new_vec)\n', (1995, 2009), False, 'from hexomap.npmath import ang_between\n'), ((5688, 5725), 'hexomap.orientation.Quaternion.from_angle_axis', 'Quaternion.from_angle_axis', (['ang', 'axis'], {}), '(ang, axis)\n', (5714, 5725), False, 'from hexomap.orientation import Quaternion\n'), ((5630, 5648), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5646, 5648), True, 'import numpy as np\n'), ((2155, 2174), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (2171, 2174), True, 'import numpy as np\n'), ((2431, 2450), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (2447, 2450), True, 'import numpy as np\n')] |
import numpy as np
import torch as th
from torch.utils.data import Dataset
from os.path import join
from sklearn.datasets import olivetti_faces
from PIL import Image
class OlivettiFaces(Dataset):
def __init__(self, data_dir, split, transform):
super().__init__()
data, targets = load_olivetti_faces(data_dir, split)
self.data = th.from_numpy(data)
self.targets = th.from_numpy(targets).long()
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, i):
img = self.data[i]
img = Image.fromarray(img.numpy(), mode='L')
img = self.transform(img)
target = self.targets[i]
return img, target
def load_olivetti_faces(data_dir, split="train"):
assert split == "train" or split == "val" or split == "trainval" or split == "test"
faces = olivetti_faces.fetch_olivetti_faces(
data_home=join(data_dir),
shuffle=True,
random_state=100,
download_if_missing=True
)
X = np.uint8(faces.images * 255.0)
Y = faces.target
n_tr = 5 # number of samples for each class in the training set
n_va = 2 # number of samples for each class in the validation set
n_te = 3 # number of samples for each class in the test set
if split == "train":
X = np.concatenate([X[np.where(Y == c)[0][:n_tr]] for c in range(40)])
Y = np.concatenate([np.ones([n_tr], 'uint8') * c for c in range(40)])
elif split == "val":
X = np.concatenate([X[np.where(Y == c)[0][n_tr:n_va]] for c in range(40)])
Y = np.concatenate([np.ones([n_va], 'uint8') * c for c in range(40)])
elif split == "trainval":
X = np.concatenate([X[np.where(Y == c)[0][:n_tr+n_va]] for c in range(40)])
Y = np.concatenate([np.ones([n_tr+n_va], 'uint8') * c for c in range(40)])
else: # test
X = np.concatenate([X[np.where(Y == c)[0][-n_te:]] for c in range(40)])
Y = np.concatenate([np.ones([n_te], 'uint8') * c for c in range(40)])
assert X.shape[0] == Y.shape[0]
return X, Y
| [
"numpy.uint8",
"numpy.ones",
"numpy.where",
"os.path.join",
"torch.from_numpy"
] | [((1046, 1076), 'numpy.uint8', 'np.uint8', (['(faces.images * 255.0)'], {}), '(faces.images * 255.0)\n', (1054, 1076), True, 'import numpy as np\n'), ((359, 378), 'torch.from_numpy', 'th.from_numpy', (['data'], {}), '(data)\n', (372, 378), True, 'import torch as th\n'), ((934, 948), 'os.path.join', 'join', (['data_dir'], {}), '(data_dir)\n', (938, 948), False, 'from os.path import join\n'), ((402, 424), 'torch.from_numpy', 'th.from_numpy', (['targets'], {}), '(targets)\n', (415, 424), True, 'import torch as th\n'), ((1437, 1461), 'numpy.ones', 'np.ones', (['[n_tr]', '"""uint8"""'], {}), "([n_tr], 'uint8')\n", (1444, 1461), True, 'import numpy as np\n'), ((1623, 1647), 'numpy.ones', 'np.ones', (['[n_va]', '"""uint8"""'], {}), "([n_va], 'uint8')\n", (1630, 1647), True, 'import numpy as np\n'), ((1360, 1376), 'numpy.where', 'np.where', (['(Y == c)'], {}), '(Y == c)\n', (1368, 1376), True, 'import numpy as np\n'), ((1815, 1846), 'numpy.ones', 'np.ones', (['[n_tr + n_va]', '"""uint8"""'], {}), "([n_tr + n_va], 'uint8')\n", (1822, 1846), True, 'import numpy as np\n'), ((1995, 2019), 'numpy.ones', 'np.ones', (['[n_te]', '"""uint8"""'], {}), "([n_te], 'uint8')\n", (2002, 2019), True, 'import numpy as np\n'), ((1542, 1558), 'numpy.where', 'np.where', (['(Y == c)'], {}), '(Y == c)\n', (1550, 1558), True, 'import numpy as np\n'), ((1733, 1749), 'numpy.where', 'np.where', (['(Y == c)'], {}), '(Y == c)\n', (1741, 1749), True, 'import numpy as np\n'), ((1917, 1933), 'numpy.where', 'np.where', (['(Y == c)'], {}), '(Y == c)\n', (1925, 1933), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2021 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TF-GAN's estimator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from typing import Mapping, Optional
from absl.testing import parameterized
import numpy as np
import six
import tensorflow as tf
import tensorflow_gan as tfgan
# Private functions to test.
from tensorflow_gan.python.estimator.gan_estimator import extract_gan_loss_args_from_params
from tensorflow_gan.python.estimator.gan_estimator import get_eval_estimator_spec
from tensorflow_gan.python.estimator.gan_estimator import get_gan_model
from tensorflow_gan.python.estimator.gan_estimator import get_predict_estimator_spec
from tensorflow_gan.python.estimator.gan_estimator import get_train_estimator_spec
from tensorflow_gan.python.estimator.gan_estimator import Optimizers
def get_sync_optimizer():
return tf.compat.v1.train.SyncReplicasOptimizer(
tf.compat.v1.train.GradientDescentOptimizer(learning_rate=1.0),
replicas_to_aggregate=1)
def get_sync_optimizer_hook_type():
dummy_opt = get_sync_optimizer()
dummy_hook = dummy_opt.make_session_run_hook(is_chief=True)
return type(dummy_hook)
def generator_fn(noise_dict: Mapping[str, tf.Tensor],
mode: tf.estimator.ModeKeys) -> tf.Tensor:
del mode
noise = noise_dict['x']
return tf.compat.v1.layers.dense(
noise, tf.compat.dimension_value(noise.shape[1]))
def discriminator_fn(data: tf.Tensor, unused_conditioning: Optional[tf.Tensor],
mode: tf.estimator.ModeKeys) -> tf.Tensor:
del unused_conditioning, mode
return tf.compat.v1.layers.dense(data, 1)
class GetGANModelTest(tf.test.TestCase, parameterized.TestCase):
"""Tests that `GetGANModel` produces the correct model."""
@parameterized.named_parameters(('train', tf.estimator.ModeKeys.TRAIN),
('eval', tf.estimator.ModeKeys.EVAL),
('predict', tf.estimator.ModeKeys.PREDICT))
def test_get_gan_model(self, mode):
with tf.Graph().as_default():
generator_inputs = {'x': tf.ones([3, 4])}
is_predict = mode == tf.estimator.ModeKeys.PREDICT
real_data = tf.zeros([3, 4]) if not is_predict else None
gan_model = get_gan_model(
mode,
generator_fn,
discriminator_fn,
real_data,
generator_inputs,
add_summaries=False)
self.assertEqual(generator_inputs, gan_model.generator_inputs)
self.assertIsNotNone(gan_model.generated_data)
self.assertLen(gan_model.generator_variables, 2) # 1 FC layer
self.assertIsNotNone(gan_model.generator_fn)
if mode == tf.estimator.ModeKeys.PREDICT:
self.assertIsNone(gan_model.real_data)
self.assertIsNone(gan_model.discriminator_real_outputs)
self.assertIsNone(gan_model.discriminator_gen_outputs)
self.assertIsNone(gan_model.discriminator_variables)
self.assertIsNone(gan_model.discriminator_scope)
self.assertIsNone(gan_model.discriminator_fn)
else:
self.assertIsNotNone(gan_model.real_data)
self.assertIsNotNone(gan_model.discriminator_real_outputs)
self.assertIsNotNone(gan_model.discriminator_gen_outputs)
self.assertLen(gan_model.discriminator_variables, 2) # 1 FC layer
self.assertIsNotNone(gan_model.discriminator_scope)
self.assertIsNotNone(gan_model.discriminator_fn)
def get_dummy_gan_model():
# TODO(joelshor): Find a better way of creating a variable scope.
with tf.compat.v1.variable_scope('generator') as gen_scope:
gen_var = tf.compat.v1.get_variable('dummy_var', initializer=0.0)
with tf.compat.v1.variable_scope('discriminator') as dis_scope:
dis_var = tf.compat.v1.get_variable('dummy_var', initializer=0.0)
return tfgan.GANModel(
generator_inputs=None,
generated_data=tf.ones([3, 4]),
generator_variables=[gen_var],
generator_scope=gen_scope,
generator_fn=None,
real_data=tf.zeros([3, 4]),
discriminator_real_outputs=tf.ones([1, 2, 3]) * dis_var,
discriminator_gen_outputs=tf.ones([1, 2, 3]) * gen_var * dis_var,
discriminator_variables=[dis_var],
discriminator_scope=dis_scope,
discriminator_fn=None)
def dummy_loss_fn(gan_model, add_summaries=True):
del add_summaries
return tf.reduce_sum(input_tensor=gan_model.discriminator_real_outputs -
gan_model.discriminator_gen_outputs)
def get_metrics(gan_model):
return {
'mse_custom_metric':
tf.compat.v1.metrics.mean_squared_error(gan_model.real_data,
gan_model.generated_data)
}
class GetEstimatorSpecTest(tf.test.TestCase, parameterized.TestCase):
"""Tests that the EstimatorSpec is constructed appropriately."""
@classmethod
def setUpClass(cls):
super(GetEstimatorSpecTest, cls).setUpClass()
cls._generator_optimizer = tf.compat.v1.train.GradientDescentOptimizer(1.0)
cls._discriminator_optimizer = tf.compat.v1.train.GradientDescentOptimizer(
1.0)
def test_get_train_estimator_spec(self):
with tf.Graph().as_default():
gan_model = get_dummy_gan_model()
gan_loss = tfgan.gan_loss(gan_model, dummy_loss_fn, dummy_loss_fn)
spec = get_train_estimator_spec(
gan_model,
gan_loss,
Optimizers(self._generator_optimizer, self._discriminator_optimizer),
get_hooks_fn=None, # use default.
is_chief=True)
self.assertEqual(tf.estimator.ModeKeys.TRAIN, spec.mode)
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.train_op)
self.assertIsNotNone(spec.training_hooks)
def test_get_eval_estimator_spec(self):
with tf.Graph().as_default():
gan_model = get_dummy_gan_model()
gan_loss = tfgan.gan_loss(gan_model, dummy_loss_fn, dummy_loss_fn)
spec = get_eval_estimator_spec(
gan_model,
gan_loss,
get_eval_metric_ops_fn=get_metrics)
self.assertEqual(tf.estimator.ModeKeys.EVAL, spec.mode)
self.assertEqual(gan_model.generated_data, spec.predictions)
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.eval_metric_ops)
def test_get_predict_estimator_spec(self):
with tf.Graph().as_default():
gan_model = get_dummy_gan_model()
spec = get_predict_estimator_spec(gan_model)
self.assertEqual(tf.estimator.ModeKeys.PREDICT, spec.mode)
self.assertEqual(gan_model.generated_data, spec.predictions)
class GANEstimatorIntegrationTest(tf.test.TestCase):
def setUp(self):
super(GANEstimatorIntegrationTest, self).setUp()
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
super(GANEstimatorIntegrationTest, self).tearDown()
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self,
train_input_fn,
eval_input_fn,
predict_input_fn,
prediction_size,
lr_decay=False):
def make_opt():
gstep = tf.compat.v1.train.get_or_create_global_step()
lr = tf.compat.v1.train.exponential_decay(1.0, gstep, 10, 0.9)
return tf.compat.v1.train.GradientDescentOptimizer(lr)
gopt = make_opt if lr_decay else tf.compat.v1.train.GradientDescentOptimizer(
1.0)
dopt = make_opt if lr_decay else tf.compat.v1.train.GradientDescentOptimizer(
1.0)
est = tfgan.estimator.GANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=tfgan.losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,
generator_optimizer=gopt,
discriminator_optimizer=dopt,
get_eval_metric_ops_fn=get_metrics,
model_dir=self._model_dir)
# Train.
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# Evaluate.
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
self.assertEqual(scores['discriminator_loss'], scores['loss'])
self.assertIn('mse_custom_metric', six.iterkeys(scores))
# Predict.
predictions = np.array([x for x in est.predict(predict_input_fn)])
self.assertAllEqual(prediction_size, predictions.shape)
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
batch_size = 5
data = np.zeros([batch_size, input_dim])
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={'x': data}, y=data, batch_size=batch_size, shuffle=False)
predict_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={'x': data}, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, input_dim])
def test_numpy_input_fn_lrdecay(self):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
batch_size = 5
data = np.zeros([batch_size, input_dim])
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={'x': data}, y=data, batch_size=batch_size, shuffle=False)
predict_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={'x': data}, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, input_dim],
lr_decay=True)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dim = 4
batch_size = 6
data = np.zeros([batch_size, input_dim])
serialized_examples = []
for datum in data:
example = tf.train.Example(
features=tf.train.Features(
feature={
'x':
tf.train.Feature(
float_list=tf.train.FloatList(value=datum)),
'y':
tf.train.Feature(
float_list=tf.train.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([input_dim], tf.float32),
'y': tf.io.FixedLenFeature([input_dim], tf.float32),
}
def _train_input_fn():
feature_map = tf.io.parse_example(
serialized=serialized_examples, features=feature_spec)
features = {'x': feature_map['x']}
labels = feature_map['y']
return features, labels
def _eval_input_fn():
feature_map = tf.io.parse_example(
serialized=tf.compat.v1.train.limit_epochs(
serialized_examples, num_epochs=1),
features=feature_spec)
features = {'x': feature_map['x']}
labels = feature_map['y']
return features, labels
def _predict_input_fn():
feature_map = tf.io.parse_example(
serialized=tf.compat.v1.train.limit_epochs(
serialized_examples, num_epochs=1),
features=feature_spec)
features = {'x': feature_map['x']}
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
prediction_size=[batch_size, input_dim])
class GANEstimatorWarmStartTest(tf.test.TestCase):
def setUp(self):
super(GANEstimatorWarmStartTest, self).setUp()
self._model_dir = self.get_temp_dir()
self.new_variable_name = 'new_var'
self.new_variable_value = [1, 2, 3]
def tearDown(self):
super(GANEstimatorWarmStartTest, self).tearDown()
tf.compat.v1.summary.FileWriterCache.clear()
def _test_warm_start(self, warm_start_from=None):
"""Tests whether WarmStartSettings work as intended."""
def generator_with_new_variable(noise_dict, mode):
tf.compat.v1.get_variable(
name=self.new_variable_name,
initializer=self.new_variable_value,
trainable=True)
return generator_fn(noise_dict, mode)
def train_input_fn():
data = np.zeros([3, 4])
return {'x': data}, data
est = tfgan.estimator.GANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=tfgan.losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,
generator_optimizer=tf.compat.v1.train.GradientDescentOptimizer(1.0),
discriminator_optimizer=tf.compat.v1.train.GradientDescentOptimizer(
1.0),
model_dir=self._model_dir)
est.train(train_input_fn, steps=1)
est_warm = tfgan.estimator.GANEstimator(
generator_fn=generator_with_new_variable,
discriminator_fn=discriminator_fn,
generator_loss_fn=tfgan.losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,
generator_optimizer=tf.compat.v1.train.GradientDescentOptimizer(1.0),
discriminator_optimizer=tf.compat.v1.train.GradientDescentOptimizer(
1.0),
model_dir=None if warm_start_from else self._model_dir,
warm_start_from=warm_start_from)
est_warm.train(train_input_fn, steps=1)
return est_warm
def test_warm_start_error(self):
"""Test if exception when reloading different estimators."""
with self.assertRaises(tf.errors.NotFoundError):
self._test_warm_start()
def test_warm_start_success(self):
"""Test if GANEstimator allows explicit warm start variable assignment."""
# Regex matches all variable names in ckpt except for new_var.
var_regex = '^(?!.*%s.*)' % self.new_variable_name
warmstart = tf.estimator.WarmStartSettings(
ckpt_to_initialize_from=self._model_dir, vars_to_warm_start=var_regex)
est_warm = self._test_warm_start(warm_start_from=warmstart)
full_variable_name = 'Generator/%s' % self.new_variable_name
self.assertIn(full_variable_name, est_warm.get_variable_names())
equal_vals = np.array_equal(
est_warm.get_variable_value(full_variable_name),
self.new_variable_value)
self.assertTrue(equal_vals)
class GANEstimatorParamsTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(GANEstimatorParamsTest, self).setUp()
self._model_dir = self.get_temp_dir()
def tearDown(self):
super(GANEstimatorParamsTest, self).tearDown()
tf.compat.v1.summary.FileWriterCache.clear()
@parameterized.named_parameters(
('mi_penalty', 1.0),
('no_mi_penalty', None))
def test_params_used(self, mi_penalty):
def train_input_fn(params):
self.assertIn('batch_size', params)
data = np.zeros([params['batch_size'], 4])
return {'x': data}, data
est = tfgan.estimator.GANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=tfgan.losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,
generator_optimizer=tf.compat.v1.train.GradientDescentOptimizer(1.0),
discriminator_optimizer=tf.compat.v1.train.GradientDescentOptimizer(
1.0),
model_dir=self._model_dir,
params={
'batch_size': 4,
'mutual_information_penalty_weight': mi_penalty
})
if mi_penalty:
with self.assertRaises(ValueError):
est.train(train_input_fn, steps=1)
else:
est.train(train_input_fn, steps=1)
def test_extract_gan_loss_args_from_params(self):
params = {'tensor_pool_fn': 1, 'gradient_penalty_target': 2, 'other': 3}
gan_loss_args = extract_gan_loss_args_from_params(params)
self.assertEqual(gan_loss_args, {'tensor_pool_fn': 1,
'gradient_penalty_target': 2})
def test_extract_gan_loss_args_from_params_forbidden(self):
params = {'tensor_pool_fn': 1, 'model': 2}
with self.assertRaises(ValueError):
extract_gan_loss_args_from_params(params)
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow.reduce_sum",
"tensorflow_gan.python.estimator.gan_estimator.get_gan_model",
"tensorflow.compat.v1.train.exponential_decay",
"tensorflow.train.FloatList",
"shutil.rmtree",
"tensorflow.compat.v1.layers.dense",
"tensorflow.test.main",
"tensorflow.compat.v1.variable_scope",
"six.iterkeys",
... | [((2236, 2270), 'tensorflow.compat.v1.layers.dense', 'tf.compat.v1.layers.dense', (['data', '(1)'], {}), '(data, 1)\n', (2261, 2270), True, 'import tensorflow as tf\n'), ((2403, 2565), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('train', tf.estimator.ModeKeys.TRAIN)", "('eval', tf.estimator.ModeKeys.EVAL)", "('predict', tf.estimator.ModeKeys.PREDICT)"], {}), "(('train', tf.estimator.ModeKeys.TRAIN), (\n 'eval', tf.estimator.ModeKeys.EVAL), ('predict', tf.estimator.ModeKeys.\n PREDICT))\n", (2433, 2565), False, 'from absl.testing import parameterized\n'), ((4942, 5049), 'tensorflow.reduce_sum', 'tf.reduce_sum', ([], {'input_tensor': '(gan_model.discriminator_real_outputs - gan_model.discriminator_gen_outputs)'}), '(input_tensor=gan_model.discriminator_real_outputs - gan_model\n .discriminator_gen_outputs)\n', (4955, 5049), True, 'import tensorflow as tf\n'), ((15846, 15922), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('mi_penalty', 1.0)", "('no_mi_penalty', None)"], {}), "(('mi_penalty', 1.0), ('no_mi_penalty', None))\n", (15876, 15922), False, 'from absl.testing import parameterized\n'), ((17421, 17435), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (17433, 17435), True, 'import tensorflow as tf\n'), ((1548, 1610), 'tensorflow.compat.v1.train.GradientDescentOptimizer', 'tf.compat.v1.train.GradientDescentOptimizer', ([], {'learning_rate': '(1.0)'}), '(learning_rate=1.0)\n', (1591, 1610), True, 'import tensorflow as tf\n'), ((2006, 2047), 'tensorflow.compat.dimension_value', 'tf.compat.dimension_value', (['noise.shape[1]'], {}), '(noise.shape[1])\n', (2031, 2047), True, 'import tensorflow as tf\n'), ((4137, 4177), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""generator"""'], {}), "('generator')\n", (4164, 4177), True, 'import tensorflow as tf\n'), ((4206, 4261), 'tensorflow.compat.v1.get_variable', 'tf.compat.v1.get_variable', (['"""dummy_var"""'], {'initializer': '(0.0)'}), "('dummy_var', initializer=0.0)\n", (4231, 4261), True, 'import tensorflow as tf\n'), ((4269, 4313), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""discriminator"""'], {}), "('discriminator')\n", (4296, 4313), True, 'import tensorflow as tf\n'), ((4342, 4397), 'tensorflow.compat.v1.get_variable', 'tf.compat.v1.get_variable', (['"""dummy_var"""'], {'initializer': '(0.0)'}), "('dummy_var', initializer=0.0)\n", (4367, 4397), True, 'import tensorflow as tf\n'), ((5146, 5237), 'tensorflow.compat.v1.metrics.mean_squared_error', 'tf.compat.v1.metrics.mean_squared_error', (['gan_model.real_data', 'gan_model.generated_data'], {}), '(gan_model.real_data, gan_model.\n generated_data)\n', (5185, 5237), True, 'import tensorflow as tf\n'), ((5546, 5594), 'tensorflow.compat.v1.train.GradientDescentOptimizer', 'tf.compat.v1.train.GradientDescentOptimizer', (['(1.0)'], {}), '(1.0)\n', (5589, 5594), True, 'import tensorflow as tf\n'), ((5630, 5678), 'tensorflow.compat.v1.train.GradientDescentOptimizer', 'tf.compat.v1.train.GradientDescentOptimizer', (['(1.0)'], {}), '(1.0)\n', (5673, 5678), True, 'import tensorflow as tf\n'), ((7336, 7354), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (7352, 7354), False, 'import tempfile\n'), ((8205, 8563), 'tensorflow_gan.estimator.GANEstimator', 'tfgan.estimator.GANEstimator', ([], {'generator_fn': 'generator_fn', 'discriminator_fn': 'discriminator_fn', 'generator_loss_fn': 'tfgan.losses.wasserstein_generator_loss', 'discriminator_loss_fn': 'tfgan.losses.wasserstein_discriminator_loss', 'generator_optimizer': 'gopt', 'discriminator_optimizer': 'dopt', 'get_eval_metric_ops_fn': 'get_metrics', 'model_dir': 'self._model_dir'}), '(generator_fn=generator_fn, discriminator_fn=\n discriminator_fn, generator_loss_fn=tfgan.losses.\n wasserstein_generator_loss, discriminator_loss_fn=tfgan.losses.\n wasserstein_discriminator_loss, generator_optimizer=gopt,\n discriminator_optimizer=dopt, get_eval_metric_ops_fn=get_metrics,\n model_dir=self._model_dir)\n', (8233, 8563), True, 'import tensorflow_gan as tfgan\n'), ((9277, 9310), 'numpy.zeros', 'np.zeros', (['[batch_size, input_dim]'], {}), '([batch_size, input_dim])\n', (9285, 9310), True, 'import numpy as np\n'), ((9332, 9457), 'tensorflow.compat.v1.estimator.inputs.numpy_input_fn', 'tf.compat.v1.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': data}", 'y': 'data', 'batch_size': 'batch_size', 'num_epochs': 'None', 'shuffle': '(True)'}), "(x={'x': data}, y=data,\n batch_size=batch_size, num_epochs=None, shuffle=True)\n", (9376, 9457), True, 'import tensorflow as tf\n'), ((9515, 9624), 'tensorflow.compat.v1.estimator.inputs.numpy_input_fn', 'tf.compat.v1.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': data}", 'y': 'data', 'batch_size': 'batch_size', 'shuffle': '(False)'}), "(x={'x': data}, y=data,\n batch_size=batch_size, shuffle=False)\n", (9559, 9624), True, 'import tensorflow as tf\n'), ((9653, 9755), 'tensorflow.compat.v1.estimator.inputs.numpy_input_fn', 'tf.compat.v1.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': data}", 'batch_size': 'batch_size', 'shuffle': '(False)'}), "(x={'x': data}, batch_size=\n batch_size, shuffle=False)\n", (9697, 9755), True, 'import tensorflow as tf\n'), ((10100, 10133), 'numpy.zeros', 'np.zeros', (['[batch_size, input_dim]'], {}), '([batch_size, input_dim])\n', (10108, 10133), True, 'import numpy as np\n'), ((10155, 10280), 'tensorflow.compat.v1.estimator.inputs.numpy_input_fn', 'tf.compat.v1.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': data}", 'y': 'data', 'batch_size': 'batch_size', 'num_epochs': 'None', 'shuffle': '(True)'}), "(x={'x': data}, y=data,\n batch_size=batch_size, num_epochs=None, shuffle=True)\n", (10199, 10280), True, 'import tensorflow as tf\n'), ((10338, 10447), 'tensorflow.compat.v1.estimator.inputs.numpy_input_fn', 'tf.compat.v1.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': data}", 'y': 'data', 'batch_size': 'batch_size', 'shuffle': '(False)'}), "(x={'x': data}, y=data,\n batch_size=batch_size, shuffle=False)\n", (10382, 10447), True, 'import tensorflow as tf\n'), ((10476, 10578), 'tensorflow.compat.v1.estimator.inputs.numpy_input_fn', 'tf.compat.v1.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': data}", 'batch_size': 'batch_size', 'shuffle': '(False)'}), "(x={'x': data}, batch_size=\n batch_size, shuffle=False)\n", (10520, 10578), True, 'import tensorflow as tf\n'), ((10976, 11009), 'numpy.zeros', 'np.zeros', (['[batch_size, input_dim]'], {}), '([batch_size, input_dim])\n', (10984, 11009), True, 'import numpy as np\n'), ((13007, 13051), 'tensorflow.compat.v1.summary.FileWriterCache.clear', 'tf.compat.v1.summary.FileWriterCache.clear', ([], {}), '()\n', (13049, 13051), True, 'import tensorflow as tf\n'), ((15071, 15176), 'tensorflow.estimator.WarmStartSettings', 'tf.estimator.WarmStartSettings', ([], {'ckpt_to_initialize_from': 'self._model_dir', 'vars_to_warm_start': 'var_regex'}), '(ckpt_to_initialize_from=self._model_dir,\n vars_to_warm_start=var_regex)\n', (15101, 15176), True, 'import tensorflow as tf\n'), ((15797, 15841), 'tensorflow.compat.v1.summary.FileWriterCache.clear', 'tf.compat.v1.summary.FileWriterCache.clear', ([], {}), '()\n', (15839, 15841), True, 'import tensorflow as tf\n'), ((17024, 17065), 'tensorflow_gan.python.estimator.gan_estimator.extract_gan_loss_args_from_params', 'extract_gan_loss_args_from_params', (['params'], {}), '(params)\n', (17057, 17065), False, 'from tensorflow_gan.python.estimator.gan_estimator import extract_gan_loss_args_from_params\n'), ((2882, 2987), 'tensorflow_gan.python.estimator.gan_estimator.get_gan_model', 'get_gan_model', (['mode', 'generator_fn', 'discriminator_fn', 'real_data', 'generator_inputs'], {'add_summaries': '(False)'}), '(mode, generator_fn, discriminator_fn, real_data,\n generator_inputs, add_summaries=False)\n', (2895, 2987), False, 'from tensorflow_gan.python.estimator.gan_estimator import get_gan_model\n'), ((4473, 4488), 'tensorflow.ones', 'tf.ones', (['[3, 4]'], {}), '([3, 4])\n', (4480, 4488), True, 'import tensorflow as tf\n'), ((4601, 4617), 'tensorflow.zeros', 'tf.zeros', (['[3, 4]'], {}), '([3, 4])\n', (4609, 4617), True, 'import tensorflow as tf\n'), ((5823, 5878), 'tensorflow_gan.gan_loss', 'tfgan.gan_loss', (['gan_model', 'dummy_loss_fn', 'dummy_loss_fn'], {}), '(gan_model, dummy_loss_fn, dummy_loss_fn)\n', (5837, 5878), True, 'import tensorflow_gan as tfgan\n'), ((6197, 6208), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (6205, 6208), True, 'import numpy as np\n'), ((6461, 6516), 'tensorflow_gan.gan_loss', 'tfgan.gan_loss', (['gan_model', 'dummy_loss_fn', 'dummy_loss_fn'], {}), '(gan_model, dummy_loss_fn, dummy_loss_fn)\n', (6475, 6516), True, 'import tensorflow_gan as tfgan\n'), ((6530, 6615), 'tensorflow_gan.python.estimator.gan_estimator.get_eval_estimator_spec', 'get_eval_estimator_spec', (['gan_model', 'gan_loss'], {'get_eval_metric_ops_fn': 'get_metrics'}), '(gan_model, gan_loss, get_eval_metric_ops_fn=get_metrics\n )\n', (6553, 6615), False, 'from tensorflow_gan.python.estimator.gan_estimator import get_eval_estimator_spec\n'), ((6794, 6805), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (6802, 6805), True, 'import numpy as np\n'), ((7018, 7055), 'tensorflow_gan.python.estimator.gan_estimator.get_predict_estimator_spec', 'get_predict_estimator_spec', (['gan_model'], {}), '(gan_model)\n', (7044, 7055), False, 'from tensorflow_gan.python.estimator.gan_estimator import get_predict_estimator_spec\n'), ((7464, 7508), 'tensorflow.compat.v1.summary.FileWriterCache.clear', 'tf.compat.v1.summary.FileWriterCache.clear', ([], {}), '()\n', (7506, 7508), True, 'import tensorflow as tf\n'), ((7515, 7545), 'shutil.rmtree', 'shutil.rmtree', (['self._model_dir'], {}), '(self._model_dir)\n', (7528, 7545), False, 'import shutil\n'), ((7827, 7873), 'tensorflow.compat.v1.train.get_or_create_global_step', 'tf.compat.v1.train.get_or_create_global_step', ([], {}), '()\n', (7871, 7873), True, 'import tensorflow as tf\n'), ((7885, 7942), 'tensorflow.compat.v1.train.exponential_decay', 'tf.compat.v1.train.exponential_decay', (['(1.0)', 'gstep', '(10)', '(0.9)'], {}), '(1.0, gstep, 10, 0.9)\n', (7921, 7942), True, 'import tensorflow as tf\n'), ((7956, 8003), 'tensorflow.compat.v1.train.GradientDescentOptimizer', 'tf.compat.v1.train.GradientDescentOptimizer', (['lr'], {}), '(lr)\n', (7999, 8003), True, 'import tensorflow as tf\n'), ((8042, 8090), 'tensorflow.compat.v1.train.GradientDescentOptimizer', 'tf.compat.v1.train.GradientDescentOptimizer', (['(1.0)'], {}), '(1.0)\n', (8085, 8090), True, 'import tensorflow as tf\n'), ((8137, 8185), 'tensorflow.compat.v1.train.GradientDescentOptimizer', 'tf.compat.v1.train.GradientDescentOptimizer', (['(1.0)'], {}), '(1.0)\n', (8180, 8185), True, 'import tensorflow as tf\n'), ((8846, 8866), 'six.iterkeys', 'six.iterkeys', (['scores'], {}), '(scores)\n', (8858, 8866), False, 'import six\n'), ((8974, 8994), 'six.iterkeys', 'six.iterkeys', (['scores'], {}), '(scores)\n', (8986, 8994), False, 'import six\n'), ((11542, 11588), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[input_dim]', 'tf.float32'], {}), '([input_dim], tf.float32)\n', (11563, 11588), True, 'import tensorflow as tf\n'), ((11603, 11649), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[input_dim]', 'tf.float32'], {}), '([input_dim], tf.float32)\n', (11624, 11649), True, 'import tensorflow as tf\n'), ((11705, 11779), 'tensorflow.io.parse_example', 'tf.io.parse_example', ([], {'serialized': 'serialized_examples', 'features': 'feature_spec'}), '(serialized=serialized_examples, features=feature_spec)\n', (11724, 11779), True, 'import tensorflow as tf\n'), ((13227, 13339), 'tensorflow.compat.v1.get_variable', 'tf.compat.v1.get_variable', ([], {'name': 'self.new_variable_name', 'initializer': 'self.new_variable_value', 'trainable': '(True)'}), '(name=self.new_variable_name, initializer=self.\n new_variable_value, trainable=True)\n', (13252, 13339), True, 'import tensorflow as tf\n'), ((13450, 13466), 'numpy.zeros', 'np.zeros', (['[3, 4]'], {}), '([3, 4])\n', (13458, 13466), True, 'import numpy as np\n'), ((16065, 16100), 'numpy.zeros', 'np.zeros', (["[params['batch_size'], 4]"], {}), "([params['batch_size'], 4])\n", (16073, 16100), True, 'import numpy as np\n'), ((17348, 17389), 'tensorflow_gan.python.estimator.gan_estimator.extract_gan_loss_args_from_params', 'extract_gan_loss_args_from_params', (['params'], {}), '(params)\n', (17381, 17389), False, 'from tensorflow_gan.python.estimator.gan_estimator import extract_gan_loss_args_from_params\n'), ((2727, 2742), 'tensorflow.ones', 'tf.ones', (['[3, 4]'], {}), '([3, 4])\n', (2734, 2742), True, 'import tensorflow as tf\n'), ((2819, 2835), 'tensorflow.zeros', 'tf.zeros', (['[3, 4]'], {}), '([3, 4])\n', (2827, 2835), True, 'import tensorflow as tf\n'), ((4652, 4670), 'tensorflow.ones', 'tf.ones', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (4659, 4670), True, 'import tensorflow as tf\n'), ((5969, 6037), 'tensorflow_gan.python.estimator.gan_estimator.Optimizers', 'Optimizers', (['self._generator_optimizer', 'self._discriminator_optimizer'], {}), '(self._generator_optimizer, self._discriminator_optimizer)\n', (5979, 6037), False, 'from tensorflow_gan.python.estimator.gan_estimator import Optimizers\n'), ((13787, 13835), 'tensorflow.compat.v1.train.GradientDescentOptimizer', 'tf.compat.v1.train.GradientDescentOptimizer', (['(1.0)'], {}), '(1.0)\n', (13830, 13835), True, 'import tensorflow as tf\n'), ((13869, 13917), 'tensorflow.compat.v1.train.GradientDescentOptimizer', 'tf.compat.v1.train.GradientDescentOptimizer', (['(1.0)'], {}), '(1.0)\n', (13912, 13917), True, 'import tensorflow as tf\n'), ((14316, 14364), 'tensorflow.compat.v1.train.GradientDescentOptimizer', 'tf.compat.v1.train.GradientDescentOptimizer', (['(1.0)'], {}), '(1.0)\n', (14359, 14364), True, 'import tensorflow as tf\n'), ((14398, 14446), 'tensorflow.compat.v1.train.GradientDescentOptimizer', 'tf.compat.v1.train.GradientDescentOptimizer', (['(1.0)'], {}), '(1.0)\n', (14441, 14446), True, 'import tensorflow as tf\n'), ((16421, 16469), 'tensorflow.compat.v1.train.GradientDescentOptimizer', 'tf.compat.v1.train.GradientDescentOptimizer', (['(1.0)'], {}), '(1.0)\n', (16464, 16469), True, 'import tensorflow as tf\n'), ((16503, 16551), 'tensorflow.compat.v1.train.GradientDescentOptimizer', 'tf.compat.v1.train.GradientDescentOptimizer', (['(1.0)'], {}), '(1.0)\n', (16546, 16551), True, 'import tensorflow as tf\n'), ((2671, 2681), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2679, 2681), True, 'import tensorflow as tf\n'), ((4714, 4732), 'tensorflow.ones', 'tf.ones', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (4721, 4732), True, 'import tensorflow as tf\n'), ((5741, 5751), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5749, 5751), True, 'import tensorflow as tf\n'), ((6379, 6389), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (6387, 6389), True, 'import tensorflow as tf\n'), ((6940, 6950), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (6948, 6950), True, 'import tensorflow as tf\n'), ((11983, 12049), 'tensorflow.compat.v1.train.limit_epochs', 'tf.compat.v1.train.limit_epochs', (['serialized_examples'], {'num_epochs': '(1)'}), '(serialized_examples, num_epochs=1)\n', (12014, 12049), True, 'import tensorflow as tf\n'), ((12294, 12360), 'tensorflow.compat.v1.train.limit_epochs', 'tf.compat.v1.train.limit_epochs', (['serialized_examples'], {'num_epochs': '(1)'}), '(serialized_examples, num_epochs=1)\n', (12325, 12360), True, 'import tensorflow as tf\n'), ((11259, 11290), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': 'datum'}), '(value=datum)\n', (11277, 11290), True, 'import tensorflow as tf\n'), ((11393, 11424), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': 'datum'}), '(value=datum)\n', (11411, 11424), True, 'import tensorflow as tf\n')] |
from __future__ import print_function
from __future__ import division
import pytest
import numpy as np
from sklearn.exceptions import ConvergenceWarning
from civismlext.nonnegative import NonNegativeLinearRegression
from civismlext.nonnegative import _rescale_data
rng = np.random.RandomState(17)
def test_rescale_data():
"""Copied from sklearn/linear_model/tests/test_base.py
"""
n_samples = 200
n_features = 2
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
np.testing.assert_allclose(rescaled_X, rescaled_X2)
np.testing.assert_allclose(rescaled_y, rescaled_y2)
def test_nonneg_linreg():
X = np.array([[0, 1], [1, 0], [0, 0], [1, 1]])
nnlr = NonNegativeLinearRegression(fit_intercept=True)
nnlr.fit(X, X.dot([1, 2]) + 2)
np.testing.assert_allclose(nnlr.coef_, [1, 2])
np.testing.assert_allclose(nnlr.intercept_, [2])
np.testing.assert_allclose(nnlr.predict(np.array([[1, 1]])), [5])
def test_zero_coef_exception():
with pytest.raises(ConvergenceWarning):
X = np.array([[0, 1], [1, 0], [0, 0], [1, 1]])
nnlr = NonNegativeLinearRegression(fit_intercept=True)
nnlr.fit(X, X.dot([-1, -1]) + -1)
def test_preprocess_data():
"""Copied from sklearn/linear_model/tests/test_base.py, with small
modifications.
"""
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
expected_X_norm = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
nnlr = NonNegativeLinearRegression()
Xt, yt, X_mean, y_mean, X_norm = \
nnlr._preprocess_data(X, y, fit_intercept=False, normalize=False)
np.testing.assert_allclose(X_mean, np.zeros(n_features))
np.testing.assert_allclose(y_mean, 0)
np.testing.assert_allclose(X_norm, np.ones(n_features))
np.testing.assert_allclose(Xt, X)
np.testing.assert_allclose(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
nnlr._preprocess_data(X, y, fit_intercept=True, normalize=False)
np.testing.assert_allclose(X_mean, expected_X_mean)
np.testing.assert_allclose(y_mean, expected_y_mean)
np.testing.assert_allclose(X_norm, np.ones(n_features))
np.testing.assert_allclose(Xt, X - expected_X_mean)
np.testing.assert_allclose(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
nnlr._preprocess_data(X, y, fit_intercept=True, normalize=True)
np.testing.assert_allclose(X_mean, expected_X_mean)
np.testing.assert_allclose(y_mean, expected_y_mean)
np.testing.assert_allclose(X_norm, expected_X_norm)
np.testing.assert_allclose(Xt, (X - expected_X_mean) / expected_X_norm)
np.testing.assert_allclose(yt, y - expected_y_mean)
def test_preprocess_data_weighted():
"""Copied from sklearn/linear_model/tests/test_base.py, with small
modifications.
"""
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
nnlr = NonNegativeLinearRegression()
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
expected_X_norm = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_norm = \
nnlr._preprocess_data(X, y, fit_intercept=True, normalize=False,
sample_weight=sample_weight)
np.testing.assert_allclose(X_mean, expected_X_mean)
np.testing.assert_allclose(y_mean, expected_y_mean)
np.testing.assert_allclose(X_norm, np.ones(n_features))
np.testing.assert_allclose(Xt, X - expected_X_mean)
np.testing.assert_allclose(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
nnlr._preprocess_data(X, y, fit_intercept=True, normalize=True,
sample_weight=sample_weight)
np.testing.assert_allclose(X_mean, expected_X_mean)
np.testing.assert_allclose(y_mean, expected_y_mean)
np.testing.assert_allclose(X_norm, expected_X_norm)
np.testing.assert_allclose(Xt, (X - expected_X_mean) / expected_X_norm)
np.testing.assert_allclose(yt, y - expected_y_mean)
| [
"numpy.average",
"numpy.std",
"numpy.zeros",
"numpy.ones",
"civismlext.nonnegative.NonNegativeLinearRegression",
"numpy.random.RandomState",
"pytest.raises",
"numpy.mean",
"numpy.array",
"civismlext.nonnegative._rescale_data",
"numpy.testing.assert_allclose",
"numpy.sqrt"
] | [((274, 299), 'numpy.random.RandomState', 'np.random.RandomState', (['(17)'], {}), '(17)\n', (295, 299), True, 'import numpy as np\n'), ((577, 611), 'civismlext.nonnegative._rescale_data', '_rescale_data', (['X', 'y', 'sample_weight'], {}), '(X, y, sample_weight)\n', (590, 611), False, 'from civismlext.nonnegative import _rescale_data\n'), ((721, 772), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rescaled_X', 'rescaled_X2'], {}), '(rescaled_X, rescaled_X2)\n', (747, 772), True, 'import numpy as np\n'), ((777, 828), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['rescaled_y', 'rescaled_y2'], {}), '(rescaled_y, rescaled_y2)\n', (803, 828), True, 'import numpy as np\n'), ((865, 907), 'numpy.array', 'np.array', (['[[0, 1], [1, 0], [0, 0], [1, 1]]'], {}), '([[0, 1], [1, 0], [0, 0], [1, 1]])\n', (873, 907), True, 'import numpy as np\n'), ((920, 967), 'civismlext.nonnegative.NonNegativeLinearRegression', 'NonNegativeLinearRegression', ([], {'fit_intercept': '(True)'}), '(fit_intercept=True)\n', (947, 967), False, 'from civismlext.nonnegative import NonNegativeLinearRegression\n'), ((1008, 1054), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['nnlr.coef_', '[1, 2]'], {}), '(nnlr.coef_, [1, 2])\n', (1034, 1054), True, 'import numpy as np\n'), ((1059, 1107), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['nnlr.intercept_', '[2]'], {}), '(nnlr.intercept_, [2])\n', (1085, 1107), True, 'import numpy as np\n'), ((1674, 1692), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1681, 1692), True, 'import numpy as np\n'), ((1777, 1795), 'numpy.mean', 'np.mean', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (1784, 1795), True, 'import numpy as np\n'), ((1807, 1836), 'civismlext.nonnegative.NonNegativeLinearRegression', 'NonNegativeLinearRegression', ([], {}), '()\n', (1834, 1836), False, 'from civismlext.nonnegative import NonNegativeLinearRegression\n'), ((2016, 2053), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y_mean', '(0)'], {}), '(y_mean, 0)\n', (2042, 2053), True, 'import numpy as np\n'), ((2118, 2151), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Xt', 'X'], {}), '(Xt, X)\n', (2144, 2151), True, 'import numpy as np\n'), ((2156, 2189), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['yt', 'y'], {}), '(yt, y)\n', (2182, 2189), True, 'import numpy as np\n'), ((2307, 2358), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X_mean', 'expected_X_mean'], {}), '(X_mean, expected_X_mean)\n', (2333, 2358), True, 'import numpy as np\n'), ((2363, 2414), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y_mean', 'expected_y_mean'], {}), '(y_mean, expected_y_mean)\n', (2389, 2414), True, 'import numpy as np\n'), ((2479, 2530), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Xt', '(X - expected_X_mean)'], {}), '(Xt, X - expected_X_mean)\n', (2505, 2530), True, 'import numpy as np\n'), ((2535, 2586), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['yt', '(y - expected_y_mean)'], {}), '(yt, y - expected_y_mean)\n', (2561, 2586), True, 'import numpy as np\n'), ((2703, 2754), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X_mean', 'expected_X_mean'], {}), '(X_mean, expected_X_mean)\n', (2729, 2754), True, 'import numpy as np\n'), ((2759, 2810), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y_mean', 'expected_y_mean'], {}), '(y_mean, expected_y_mean)\n', (2785, 2810), True, 'import numpy as np\n'), ((2815, 2866), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X_norm', 'expected_X_norm'], {}), '(X_norm, expected_X_norm)\n', (2841, 2866), True, 'import numpy as np\n'), ((2871, 2942), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Xt', '((X - expected_X_mean) / expected_X_norm)'], {}), '(Xt, (X - expected_X_mean) / expected_X_norm)\n', (2897, 2942), True, 'import numpy as np\n'), ((2947, 2998), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['yt', '(y - expected_y_mean)'], {}), '(yt, y - expected_y_mean)\n', (2973, 2998), True, 'import numpy as np\n'), ((3305, 3349), 'numpy.average', 'np.average', (['X'], {'axis': '(0)', 'weights': 'sample_weight'}), '(X, axis=0, weights=sample_weight)\n', (3315, 3349), True, 'import numpy as np\n'), ((3372, 3416), 'numpy.average', 'np.average', (['y'], {'axis': '(0)', 'weights': 'sample_weight'}), '(y, axis=0, weights=sample_weight)\n', (3382, 3416), True, 'import numpy as np\n'), ((3428, 3457), 'civismlext.nonnegative.NonNegativeLinearRegression', 'NonNegativeLinearRegression', ([], {}), '()\n', (3455, 3457), False, 'from civismlext.nonnegative import NonNegativeLinearRegression\n'), ((3912, 3963), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X_mean', 'expected_X_mean'], {}), '(X_mean, expected_X_mean)\n', (3938, 3963), True, 'import numpy as np\n'), ((3968, 4019), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y_mean', 'expected_y_mean'], {}), '(y_mean, expected_y_mean)\n', (3994, 4019), True, 'import numpy as np\n'), ((4084, 4135), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Xt', '(X - expected_X_mean)'], {}), '(Xt, X - expected_X_mean)\n', (4110, 4135), True, 'import numpy as np\n'), ((4140, 4191), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['yt', '(y - expected_y_mean)'], {}), '(yt, y - expected_y_mean)\n', (4166, 4191), True, 'import numpy as np\n'), ((4367, 4418), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X_mean', 'expected_X_mean'], {}), '(X_mean, expected_X_mean)\n', (4393, 4418), True, 'import numpy as np\n'), ((4423, 4474), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y_mean', 'expected_y_mean'], {}), '(y_mean, expected_y_mean)\n', (4449, 4474), True, 'import numpy as np\n'), ((4479, 4530), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['X_norm', 'expected_X_norm'], {}), '(X_norm, expected_X_norm)\n', (4505, 4530), True, 'import numpy as np\n'), ((4535, 4606), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Xt', '((X - expected_X_mean) / expected_X_norm)'], {}), '(Xt, (X - expected_X_mean) / expected_X_norm)\n', (4561, 4606), True, 'import numpy as np\n'), ((4611, 4662), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['yt', '(y - expected_y_mean)'], {}), '(yt, y - expected_y_mean)\n', (4637, 4662), True, 'import numpy as np\n'), ((694, 716), 'numpy.sqrt', 'np.sqrt', (['sample_weight'], {}), '(sample_weight)\n', (701, 716), True, 'import numpy as np\n'), ((1221, 1254), 'pytest.raises', 'pytest.raises', (['ConvergenceWarning'], {}), '(ConvergenceWarning)\n', (1234, 1254), False, 'import pytest\n'), ((1268, 1310), 'numpy.array', 'np.array', (['[[0, 1], [1, 0], [0, 0], [1, 1]]'], {}), '([[0, 1], [1, 0], [0, 0], [1, 1]])\n', (1276, 1310), True, 'import numpy as np\n'), ((1327, 1374), 'civismlext.nonnegative.NonNegativeLinearRegression', 'NonNegativeLinearRegression', ([], {'fit_intercept': '(True)'}), '(fit_intercept=True)\n', (1354, 1374), False, 'from civismlext.nonnegative import NonNegativeLinearRegression\n'), ((1715, 1732), 'numpy.std', 'np.std', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1721, 1732), True, 'import numpy as np\n'), ((1735, 1754), 'numpy.sqrt', 'np.sqrt', (['X.shape[0]'], {}), '(X.shape[0])\n', (1742, 1754), True, 'import numpy as np\n'), ((1990, 2010), 'numpy.zeros', 'np.zeros', (['n_features'], {}), '(n_features)\n', (1998, 2010), True, 'import numpy as np\n'), ((2093, 2112), 'numpy.ones', 'np.ones', (['n_features'], {}), '(n_features)\n', (2100, 2112), True, 'import numpy as np\n'), ((2454, 2473), 'numpy.ones', 'np.ones', (['n_features'], {}), '(n_features)\n', (2461, 2473), True, 'import numpy as np\n'), ((3640, 3659), 'numpy.sqrt', 'np.sqrt', (['X.shape[0]'], {}), '(X.shape[0])\n', (3647, 3659), True, 'import numpy as np\n'), ((4059, 4078), 'numpy.ones', 'np.ones', (['n_features'], {}), '(n_features)\n', (4066, 4078), True, 'import numpy as np\n'), ((634, 656), 'numpy.sqrt', 'np.sqrt', (['sample_weight'], {}), '(sample_weight)\n', (641, 656), True, 'import numpy as np\n'), ((1152, 1170), 'numpy.array', 'np.array', (['[[1, 1]]'], {}), '([[1, 1]])\n', (1160, 1170), True, 'import numpy as np\n'), ((3685, 3728), 'numpy.mean', 'np.mean', (['((X - expected_X_mean) ** 2)'], {'axis': '(0)'}), '((X - expected_X_mean) ** 2, axis=0)\n', (3692, 3728), True, 'import numpy as np\n')] |
# This file is released under MIT licence, see file LICENSE.
# Author(s): <NAME>
#
# Copyright (C) 2021 Inria
import numpy as np
from scipy.spatial import Voronoi, voronoi_plot_2d
import scipy.spatial.distance as sc
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import gudhi as gd
def _fparab(a,b,x):
return (x - a)**2 / (2 * b) + (b / 2)
def _rotate(t):
res = np.dot(t, np.sqrt(2)/2 * np.array([[1, 1],[-1, 1]]))
return res
def _parab(c):
a, b = c
def P(x):
return _fparab(a,b,x) # (x**2 - 2 * a * x + b**2 + a**2)/(2 * b)
return P
def plot_partition_codebook(cs):
xmin, xmax = -7, 3
ymin, ymax = -2, 8
x = np.linspace(-10, 10, 100)
ys = np.array([_parab(c)(x) for c in cs])
miny = np.min(ys, axis=0)
r_cs = _rotate(cs)
vor = Voronoi(r_cs)
fig, ax = plt.subplots(figsize=(6,6))
voronoi_plot_2d(vor, ax, show_points=False, show_vertices=False)
ax.scatter(r_cs[:,0], r_cs[:,1], marker='o', c='b')
ax.annotate('$c_j$', r_cs[0]+[0.2,0.2], c='blue', fontsize=24)
#ax.annotate('$c_2$', r_cs[2]+[0.2,0.2], c='green', fontsize=24)
#ax.annotate('$V_j(\mathbf{c})$', r_cs[1]+[-0.2,1.9], c='blue', fontsize=24)
#ax.annotate('$V_{k+1}(\mathbf{c})$', r_cs[1]+[2.,-2], c='black', fontsize=24, rotation=45)
tmp = np.zeros((len(x), 2))
tmp[:,0] = x
tmp[:,1] = miny
r_parab = _rotate(tmp)
ax.plot(r_parab[:,0], r_parab[:,1], linestyle='dashed', color='black', linewidth=3)
ax.set_aspect('equal')
ax.fill_between(r_parab[:60,0],y1=r_parab[:60,0],y2=r_parab[:60,1], color='white', alpha=1, zorder=3)
ax.fill_betweenx(r_parab[59:,1],x1=r_parab[59:,1],x2=r_parab[59:,0], color='white', alpha=1, zorder=3)
ax.add_patch(mpatches.Polygon([[0,0], [0,r_parab[59,1]], [r_parab[59,1], r_parab[59,1]]], fill=True, color='white', alpha=1, zorder=3))
ax.add_patch(mpatches.Polygon([[ymin,ymin], [xmax,ymin], [xmax, xmax]], fill=True, color='lightgrey', alpha=1,zorder=3))
ax.plot([min(xmin,ymin), max(xmax,ymax)], [min(xmin,ymin), max(xmax,ymax)], color='k', linewidth=3,zorder=3)
ax.annotate('$\partial \Omega$', [-1.3, -1.9], fontsize=24)
#ax.annotate('$N(\mathbf{c})$', [-6.5,0], fontsize=24, rotation=45)
ax.set_axis_off()
ax.set_ylim(ymin,ymax)
ax.set_xlim(xmin, xmax)
def plot_dgm(dgm, box=None, ax=None, color="blue", label='diagram', alpha=None):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
if dgm.size:
x, y = dgm[:, 0], dgm[:, 1]
ax.scatter(x, y, marker='o', color=color, label=label, alpha=alpha)
if box is None:
if dgm.size:
m, M = np.min(dgm) - 0.1, np.max(dgm) + 0.1
else:
m, M = 0, 1
else:
m, M = box
ax.set_xlim(m, M)
ax.set_ylim(m, M)
ax.add_patch(mpatches.Polygon([[m, m], [M, m], [M, M]], fill=True, color='lightgrey'))
ax.set_aspect("equal")
ax.set_xlabel("Birth", fontsize=24)
ax.set_ylabel("Death", fontsize=24)
# ax.legend()
########################
### Experiment utils ###
########################
def _sample_torus(n, r1, r2, radius_eps, ambiant_eps):
# Sample points uniformly on a torus of big radius r1 and small radius r2
theta1 = 2 * np.pi * np.random.rand(n)
theta2 = 2 * np.pi * np.random.rand(n)
r1 = r1 + radius_eps * (2 * np.random.rand() - 1)
r2 = r2 + radius_eps * (2 * np.random.rand() - 1)
x = (r1 + r2 * np.cos(theta2)) * np.cos(theta1)
y = (r1 + r2 * np.cos(theta2)) * np.sin(theta1)
z = r2 * np.sin(theta2)
X = np.array([x, y, z]).T
X = X + ambiant_eps * (2 * np.random.rand(n,3) - 1)
return X
def _compute_pd(X, hdim=1, min_persistence=0.0001, mode="alpha", rotate=False):
if mode == "alpha":
ac = gd.AlphaComplex(points=X)
st = ac.create_simplex_tree()
elif mode == "rips":
ac = gd.RipsComplex(points=X)
st = ac.create_simplex_tree(max_dimension=2)
pers = st.persistence(min_persistence=min_persistence)
h1 = st.persistence_intervals_in_dimension(hdim)
if mode == "alpha":
h1 = np.sqrt(np.array(h1))
if rotate:
h1[:, 1] = h1[:, 1] - h1[:, 0]
return h1
else:
return np.array(h1) / 2 # to make it comparable with Cech
def build_dataset(K, params):
average_nb_pts = params['nb_points']
ns = np.random.poisson(lam=average_nb_pts, size=K)
r1, r2 = params['r1'], params['r2']
radius_eps = params['radius_eps']
ambiant_eps = params['ambiant_eps']
Xs = [_sample_torus(n, r1, r2, radius_eps, ambiant_eps) for n in ns]
diags = [_compute_pd(X) for X in Xs]
return Xs, diags
##############################
### Quantization algorithm ###
##############################
def _dist_to_diag(X, internal_p):
return ((X[:, 1] - X[:, 0]) * 2 ** (1. / internal_p - 1))
def _build_dist_matrix(X, Y, order=2., internal_p=2):
'''
:param X: (n x 2) numpy.array encoding the (points of the) first diagram.
:param Y: (m x 2) numpy.array encoding the second diagram.
:param order: exponent for the Wasserstein metric.
:param internal_p: Ground metric (i.e. norm L^p).
:returns: (n+1) x (m+1) np.array encoding the cost matrix C.
For 0 <= i < n, 0 <= j < m, C[i,j] encodes the distance between X[i] and Y[j],
while C[i, m] (resp. C[n, j]) encodes the distance (to the p) between X[i] (resp Y[j])
and its orthogonal projection onto the diagonal.
note also that C[n, m] = 0 (it costs nothing to move from the diagonal to the diagonal).
'''
Cxd = _dist_to_diag(X, internal_p=internal_p)**order #((X[:, 1] - X[:,0]) * 2 ** (1./internal_p - 1))**order
Cdy = _dist_to_diag(Y, internal_p=internal_p)**order #((Y[:, 1] - Y[:,0]) * 2 ** (1./internal_p - 1))**order
if np.isinf(internal_p):
C = sc.cdist(X, Y, metric='chebyshev') ** order
else:
C = sc.cdist(X, Y, metric='minkowski', p=internal_p) ** order
Cf = np.hstack((C, Cxd[:, None]))
Cdy = np.append(Cdy, 0)
Cf = np.vstack((Cf, Cdy[None, :]))
return Cf
def _get_cells(X, c, withdiag, internal_p):
"""
X size (n x 2)
c size (k x 2)
withdiag: boolean
returns: list of size k or (k+1) s.t list[j] corresponds to the points in X which are close to c[j].
with the convention c[k] <=> the diagonal.
"""
M = _build_dist_matrix(X, c, internal_p=internal_p) # Note: Order is useless here
if withdiag:
a = np.argmin(M[:-1, :], axis=1)
else:
a = np.argmin(M[:-1, :-1], axis=1)
k = len(c)
cells = [X[a == j] for j in range(k)]
if withdiag:
cells.append(X[a == k]) # this is the (k+1)-th centroid corresponding to the diagonal
return cells
def _get_cost_Rk(X, c, withdiag, order, internal_p):
cells = _get_cells(X, c, withdiag, internal_p=internal_p)
k = len(c)
cost = 0
if order == np.infty and withdiag:
for cells_j, c_j in zip(cells, c):
if len(cells_j) == 0:
pass
else:
cost_j = np.max(np.linalg.norm(cells_j - c_j, ord=internal_p, axis=1))
cost = max(cost, cost_j)
if len(cells[k]) == 0:
pass
else:
dists_diag = _dist_to_diag(cells[k], internal_p=internal_p) #**order
cost_diag = np.max(dists_diag) # ** (1. / order)
cost = max(cost, cost_diag)
return cost
for cells_j, c_j in zip(cells, c):
cost_j = np.linalg.norm(np.linalg.norm(cells_j - c_j, ord=internal_p, axis=1), ord=order)**order
cost += cost_j
if withdiag:
dists_to_diag = dist_to_diag(cells[k], internal_p=internal_p)**order
cost_diag = np.sum(dists_to_diag) #** (1. / order)
cost += cost_diag
return cost #** (1./order)
def _from_batch(Xs, batches_indices):
X_batch = np.concatenate([Xs[i] for i in batches_indices if Xs[i].ndim==2])
return X_batch
def init_c(list_diags, k, internal_p=2):
dgm = list_diags[0]
w = _dist_to_diag(dgm, internal_p)
s = np.argsort(w)
c0 = dgm[s[-k:]]
return c0
def quantization(Xs, batch_size, c0, withdiag, order=2., internal_p=2.):
#np.random.shuffle(Xs)
k = len(c0)
c_current = c0.copy()
n = len(Xs)
batches = np.arange(0, n, dtype=int).reshape(int(n / batch_size), 2, int(batch_size / 2))
nb_step = len(batches)
positions = [c_current.copy()]
for t in range(nb_step):
X_bar_t_1 = _from_batch(Xs, batches[t, 0])
X_bar_t_2 = _from_batch(Xs, batches[t, 1])
cells_1 = _get_cells(X_bar_t_1, c_current, withdiag=withdiag, internal_p=internal_p)
cells_2 = _get_cells(X_bar_t_2, c_current, withdiag=withdiag, internal_p=internal_p)
s1, s2 = len(batches[t, 0]), len(batches[t, 1])
if order == 2.:
for j in range(k):
lc1 = len(cells_1[j])
if lc1 > 0:
grad = np.sum(c_current[j] - cells_2[j], axis=0) / s2
c_current[j] = c_current[j] - grad / ((t + 1) * len(cells_1[j]) / s1)
else:
raise NotImplemented('Order %s is not available yet. Only order=2. is valid in this notebook' %order)
positions.append(c_current.copy())
return positions
###########################
### Plot quantiz result ###
###########################
def plot_result_quantiz(diags, c_final_diag, c_final_vanilla, c0):
low, high = -.2, 3.2
fig, ax2 = plt.subplots(figsize=(6,6))
for pd in diags:
ax2.scatter(pd[:,0], pd[:,1], marker='o', c='orange', alpha=0.1)
ax2.add_patch(mpatches.Polygon([[low,low], [high,low], [high,high]], fill=True, color='lightgrey'))
ax2.scatter(c_final_diag[:,0], c_final_diag[:,1], marker='^', color='red', s=100,
label='$\mathbf{c}^{\mathrm{output}}$')
ax2.scatter(c_final_vanilla[:,0], c_final_vanilla[:,1], marker='o', color='blue', label='Output w/out diag cell')
ax2.scatter(c0[:,0], c0[:,1], marker='x', color='black', label='initial position')
ax2.legend(fontsize=12)
ax2.set_xlim(low, high)
ax2.set_ylim(low, high)
ax2.set_xlabel('Birth',fontsize=24)
ax2.set_ylabel('Death',fontsize=24)
ax2.set_aspect('equal')
| [
"numpy.sum",
"scipy.spatial.Voronoi",
"numpy.argmin",
"numpy.argsort",
"matplotlib.patches.Polygon",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.arange",
"numpy.linalg.norm",
"numpy.append",
"numpy.max",
"numpy.random.poisson",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"scipy.s... | [((692, 717), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(100)'], {}), '(-10, 10, 100)\n', (703, 717), True, 'import numpy as np\n'), ((775, 793), 'numpy.min', 'np.min', (['ys'], {'axis': '(0)'}), '(ys, axis=0)\n', (781, 793), True, 'import numpy as np\n'), ((829, 842), 'scipy.spatial.Voronoi', 'Voronoi', (['r_cs'], {}), '(r_cs)\n', (836, 842), False, 'from scipy.spatial import Voronoi, voronoi_plot_2d\n'), ((858, 886), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (870, 886), True, 'import matplotlib.pyplot as plt\n'), ((891, 955), 'scipy.spatial.voronoi_plot_2d', 'voronoi_plot_2d', (['vor', 'ax'], {'show_points': '(False)', 'show_vertices': '(False)'}), '(vor, ax, show_points=False, show_vertices=False)\n', (906, 955), False, 'from scipy.spatial import Voronoi, voronoi_plot_2d\n'), ((4411, 4456), 'numpy.random.poisson', 'np.random.poisson', ([], {'lam': 'average_nb_pts', 'size': 'K'}), '(lam=average_nb_pts, size=K)\n', (4428, 4456), True, 'import numpy as np\n'), ((5894, 5914), 'numpy.isinf', 'np.isinf', (['internal_p'], {}), '(internal_p)\n', (5902, 5914), True, 'import numpy as np\n'), ((6062, 6090), 'numpy.hstack', 'np.hstack', (['(C, Cxd[:, None])'], {}), '((C, Cxd[:, None]))\n', (6071, 6090), True, 'import numpy as np\n'), ((6101, 6118), 'numpy.append', 'np.append', (['Cdy', '(0)'], {}), '(Cdy, 0)\n', (6110, 6118), True, 'import numpy as np\n'), ((6129, 6158), 'numpy.vstack', 'np.vstack', (['(Cf, Cdy[None, :])'], {}), '((Cf, Cdy[None, :]))\n', (6138, 6158), True, 'import numpy as np\n'), ((7976, 8043), 'numpy.concatenate', 'np.concatenate', (['[Xs[i] for i in batches_indices if Xs[i].ndim == 2]'], {}), '([Xs[i] for i in batches_indices if Xs[i].ndim == 2])\n', (7990, 8043), True, 'import numpy as np\n'), ((8175, 8188), 'numpy.argsort', 'np.argsort', (['w'], {}), '(w)\n', (8185, 8188), True, 'import numpy as np\n'), ((9590, 9618), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (9602, 9618), True, 'import matplotlib.pyplot as plt\n'), ((1769, 1899), 'matplotlib.patches.Polygon', 'mpatches.Polygon', (['[[0, 0], [0, r_parab[59, 1]], [r_parab[59, 1], r_parab[59, 1]]]'], {'fill': '(True)', 'color': '"""white"""', 'alpha': '(1)', 'zorder': '(3)'}), "([[0, 0], [0, r_parab[59, 1]], [r_parab[59, 1], r_parab[59,\n 1]]], fill=True, color='white', alpha=1, zorder=3)\n", (1785, 1899), True, 'import matplotlib.patches as mpatches\n'), ((1909, 2022), 'matplotlib.patches.Polygon', 'mpatches.Polygon', (['[[ymin, ymin], [xmax, ymin], [xmax, xmax]]'], {'fill': '(True)', 'color': '"""lightgrey"""', 'alpha': '(1)', 'zorder': '(3)'}), "([[ymin, ymin], [xmax, ymin], [xmax, xmax]], fill=True,\n color='lightgrey', alpha=1, zorder=3)\n", (1925, 2022), True, 'import matplotlib.patches as mpatches\n'), ((2460, 2472), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2470, 2472), True, 'import matplotlib.pyplot as plt\n'), ((2861, 2933), 'matplotlib.patches.Polygon', 'mpatches.Polygon', (['[[m, m], [M, m], [M, M]]'], {'fill': '(True)', 'color': '"""lightgrey"""'}), "([[m, m], [M, m], [M, M]], fill=True, color='lightgrey')\n", (2877, 2933), True, 'import matplotlib.patches as mpatches\n'), ((3297, 3314), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (3311, 3314), True, 'import numpy as np\n'), ((3340, 3357), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (3354, 3357), True, 'import numpy as np\n'), ((3505, 3519), 'numpy.cos', 'np.cos', (['theta1'], {}), '(theta1)\n', (3511, 3519), True, 'import numpy as np\n'), ((3557, 3571), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (3563, 3571), True, 'import numpy as np\n'), ((3585, 3599), 'numpy.sin', 'np.sin', (['theta2'], {}), '(theta2)\n', (3591, 3599), True, 'import numpy as np\n'), ((3609, 3628), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (3617, 3628), True, 'import numpy as np\n'), ((3821, 3846), 'gudhi.AlphaComplex', 'gd.AlphaComplex', ([], {'points': 'X'}), '(points=X)\n', (3836, 3846), True, 'import gudhi as gd\n'), ((6575, 6603), 'numpy.argmin', 'np.argmin', (['M[:-1, :]'], {'axis': '(1)'}), '(M[:-1, :], axis=1)\n', (6584, 6603), True, 'import numpy as np\n'), ((6626, 6656), 'numpy.argmin', 'np.argmin', (['M[:-1, :-1]'], {'axis': '(1)'}), '(M[:-1, :-1], axis=1)\n', (6635, 6656), True, 'import numpy as np\n'), ((7825, 7846), 'numpy.sum', 'np.sum', (['dists_to_diag'], {}), '(dists_to_diag)\n', (7831, 7846), True, 'import numpy as np\n'), ((9731, 9823), 'matplotlib.patches.Polygon', 'mpatches.Polygon', (['[[low, low], [high, low], [high, high]]'], {'fill': '(True)', 'color': '"""lightgrey"""'}), "([[low, low], [high, low], [high, high]], fill=True, color=\n 'lightgrey')\n", (9747, 9823), True, 'import matplotlib.patches as mpatches\n'), ((429, 456), 'numpy.array', 'np.array', (['[[1, 1], [-1, 1]]'], {}), '([[1, 1], [-1, 1]])\n', (437, 456), True, 'import numpy as np\n'), ((3923, 3947), 'gudhi.RipsComplex', 'gd.RipsComplex', ([], {'points': 'X'}), '(points=X)\n', (3937, 3947), True, 'import gudhi as gd\n'), ((4158, 4170), 'numpy.array', 'np.array', (['h1'], {}), '(h1)\n', (4166, 4170), True, 'import numpy as np\n'), ((4277, 4289), 'numpy.array', 'np.array', (['h1'], {}), '(h1)\n', (4285, 4289), True, 'import numpy as np\n'), ((5928, 5962), 'scipy.spatial.distance.cdist', 'sc.cdist', (['X', 'Y'], {'metric': '"""chebyshev"""'}), "(X, Y, metric='chebyshev')\n", (5936, 5962), True, 'import scipy.spatial.distance as sc\n'), ((5994, 6042), 'scipy.spatial.distance.cdist', 'sc.cdist', (['X', 'Y'], {'metric': '"""minkowski"""', 'p': 'internal_p'}), "(X, Y, metric='minkowski', p=internal_p)\n", (6002, 6042), True, 'import scipy.spatial.distance as sc\n'), ((7444, 7462), 'numpy.max', 'np.max', (['dists_diag'], {}), '(dists_diag)\n', (7450, 7462), True, 'import numpy as np\n'), ((8398, 8424), 'numpy.arange', 'np.arange', (['(0)', 'n'], {'dtype': 'int'}), '(0, n, dtype=int)\n', (8407, 8424), True, 'import numpy as np\n'), ((414, 424), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (421, 424), True, 'import numpy as np\n'), ((3487, 3501), 'numpy.cos', 'np.cos', (['theta2'], {}), '(theta2)\n', (3493, 3501), True, 'import numpy as np\n'), ((3539, 3553), 'numpy.cos', 'np.cos', (['theta2'], {}), '(theta2)\n', (3545, 3553), True, 'import numpy as np\n'), ((7614, 7667), 'numpy.linalg.norm', 'np.linalg.norm', (['(cells_j - c_j)'], {'ord': 'internal_p', 'axis': '(1)'}), '(cells_j - c_j, ord=internal_p, axis=1)\n', (7628, 7667), True, 'import numpy as np\n'), ((2696, 2707), 'numpy.min', 'np.min', (['dgm'], {}), '(dgm)\n', (2702, 2707), True, 'import numpy as np\n'), ((2715, 2726), 'numpy.max', 'np.max', (['dgm'], {}), '(dgm)\n', (2721, 2726), True, 'import numpy as np\n'), ((3391, 3407), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3405, 3407), True, 'import numpy as np\n'), ((3445, 3461), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3459, 3461), True, 'import numpy as np\n'), ((3663, 3683), 'numpy.random.rand', 'np.random.rand', (['n', '(3)'], {}), '(n, 3)\n', (3677, 3683), True, 'import numpy as np\n'), ((7181, 7234), 'numpy.linalg.norm', 'np.linalg.norm', (['(cells_j - c_j)'], {'ord': 'internal_p', 'axis': '(1)'}), '(cells_j - c_j, ord=internal_p, axis=1)\n', (7195, 7234), True, 'import numpy as np\n'), ((9066, 9107), 'numpy.sum', 'np.sum', (['(c_current[j] - cells_2[j])'], {'axis': '(0)'}), '(c_current[j] - cells_2[j], axis=0)\n', (9072, 9107), True, 'import numpy as np\n')] |
"""
Unit test for the AbsorptionSpectrum analysis module
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2014, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
from yt.testing import \
assert_allclose_units, requires_file, requires_module, \
assert_almost_equal
from yt.analysis_modules.absorption_spectrum.absorption_line import \
voigt_old, voigt_scipy
from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
from yt.analysis_modules.cosmological_observation.api import LightRay
from yt.utilities.answer_testing.framework import \
GenericArrayTest, \
requires_answer_testing
import tempfile
import os
import shutil
from yt.utilities.on_demand_imports import \
_h5py as h5
from yt.convenience import load
COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo"
COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
GIZMO_PLUS = "gizmo_cosmology_plus/N128L16.param"
GIZMO_PLUS_SINGLE = "gizmo_cosmology_plus/snap_N128L16_151.hdf5"
ISO_GALAXY = "IsolatedGalaxy/galaxy0030/galaxy0030"
FIRE = "FIRE_M12i_ref11/snapshot_600.hdf5"
@requires_file(COSMO_PLUS)
@requires_answer_testing()
def test_absorption_spectrum_cosmo():
"""
This test generates an absorption spectrum from a compound light ray on a
grid dataset
"""
# Set up in a temp dir
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
lr.make_light_ray(seed=1234567,
fields=['temperature', 'density', 'H_number_density'],
data_filename='lightray.h5')
sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
my_label = 'HI Lya'
field = 'H_number_density'
wavelength = 1215.6700 # Angstromss
f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
my_label = 'HI Lya'
field = 'H_number_density'
wavelength = 912.323660 # Angstroms
normalization = 1.6e17
index = 3.0
sp.add_continuum(my_label, field, wavelength, normalization, index)
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file='spectrum.h5',
line_list_file='lines.txt',
use_peculiar_velocity=True)
# load just-generated hdf5 file of spectral data (for consistency)
data = h5.File('spectrum.h5', 'r')
for key in data.keys():
func = lambda x=key: data[x][:]
func.__name__ = "{}_cosmo".format(key)
test = GenericArrayTest(None, func)
test_absorption_spectrum_cosmo.__name__ = test.description
yield test
# clean up
os.chdir(curdir)
shutil.rmtree(tmpdir)
@requires_file(COSMO_PLUS_SINGLE)
@requires_answer_testing()
def test_absorption_spectrum_non_cosmo():
"""
This test generates an absorption spectrum from a simple light ray on a
grid dataset
"""
# Set up in a temp dir
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
lr = LightRay(COSMO_PLUS_SINGLE)
ray_start = [0,0,0]
ray_end = [1,1,1]
lr.make_light_ray(start_position=ray_start, end_position=ray_end,
fields=['temperature', 'density', 'H_number_density'],
data_filename='lightray.h5')
sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
my_label = 'HI Lya'
field = 'H_number_density'
wavelength = 1215.6700 # Angstromss
f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file='spectrum.h5',
line_list_file='lines.txt',
use_peculiar_velocity=True)
# load just-generated hdf5 file of spectral data (for consistency)
data = h5.File('spectrum.h5', 'r')
for key in data.keys():
func = lambda x=key: data[x][:]
func.__name__ = "{}_non_cosmo".format(key)
test = GenericArrayTest(None, func)
test_absorption_spectrum_non_cosmo.__name__ = test.description
yield test
# clean up
os.chdir(curdir)
shutil.rmtree(tmpdir)
@requires_file(COSMO_PLUS_SINGLE)
@requires_answer_testing()
def test_absorption_spectrum_non_cosmo_novpec():
"""
This test generates an absorption spectrum from a simple light ray on a
grid dataset
"""
# Set up in a temp dir
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
lr = LightRay(COSMO_PLUS_SINGLE)
ray_start = [0,0,0]
ray_end = [1,1,1]
lr.make_light_ray(start_position=ray_start, end_position=ray_end,
fields=['temperature', 'density', 'H_number_density'],
data_filename='lightray.h5', use_peculiar_velocity=False)
sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
my_label = 'HI Lya'
field = 'H_number_density'
wavelength = 1215.6700 # Angstromss
f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file='spectrum.h5',
line_list_file='lines.txt',
use_peculiar_velocity=False)
# load just-generated hdf5 file of spectral data (for consistency)
data = h5.File('spectrum.h5', 'r')
for key in data.keys():
func = lambda x=key: data[x][:]
func.__name__ = "{}_non_cosmo_novpec".format(key)
test = GenericArrayTest(None, func)
test_absorption_spectrum_non_cosmo_novpec.__name__ = test.description
yield test
# clean up
os.chdir(curdir)
shutil.rmtree(tmpdir)
@requires_file(COSMO_PLUS_SINGLE)
def test_equivalent_width_conserved():
"""
This tests that the equivalent width of the optical depth is conserved
regardless of the bin width employed in wavelength space.
Unresolved lines should still deposit optical depth into the spectrum.
"""
# Set up in a temp dir
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
lr = LightRay(COSMO_PLUS_SINGLE)
ray_start = [0,0,0]
ray_end = [1,1,1]
lr.make_light_ray(start_position=ray_start, end_position=ray_end,
fields=['temperature', 'density', 'H_number_density'],
data_filename='lightray.h5')
my_label = '<NAME>'
field = 'H_number_density'
wave = 1215.6700 # Angstromss
f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
lambda_min= 1200
lambda_max= 1300
lambda_bin_widths = [1e-3, 1e-2, 1e-1, 1e0, 1e1]
total_tau = []
for lambda_bin_width in lambda_bin_widths:
n_lambda = ((lambda_max - lambda_min)/ lambda_bin_width) + 1
sp = AbsorptionSpectrum(lambda_min=lambda_min, lambda_max=lambda_max,
n_lambda=n_lambda)
sp.add_line(my_label, field, wave, f_value, gamma, mass)
wavelength, flux = sp.make_spectrum('lightray.h5')
total_tau.append((lambda_bin_width * sp.tau_field).sum())
# assure that the total tau values are all within 1e-3 of each other
for tau in total_tau:
assert_almost_equal(tau, total_tau[0], 3)
# clean up
os.chdir(curdir)
shutil.rmtree(tmpdir)
@requires_file(COSMO_PLUS_SINGLE)
@requires_module("astropy")
def test_absorption_spectrum_fits():
"""
This test generates an absorption spectrum and saves it as a fits file.
"""
# Set up in a temp dir
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
lr = LightRay(COSMO_PLUS_SINGLE)
ray_start = [0,0,0]
ray_end = [1,1,1]
lr.make_light_ray(start_position=ray_start, end_position=ray_end,
fields=['temperature', 'density', 'H_number_density'],
data_filename='lightray.h5')
sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
my_label = 'HI Lya'
field = 'H_number_density'
wavelength = 1215.6700 # Angstromss
f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
my_label = 'HI Lya'
field = 'H_number_density'
wavelength = 912.323660 # Angstroms
normalization = 1.6e17
index = 3.0
sp.add_continuum(my_label, field, wavelength, normalization, index)
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file='spectrum.fits',
line_list_file='lines.txt',
use_peculiar_velocity=True)
# clean up
os.chdir(curdir)
shutil.rmtree(tmpdir)
@requires_module("scipy")
def test_voigt_profiles():
a = 1.7e-4
x = np.linspace(5.0, -3.6, 60)
assert_allclose_units(voigt_old(a, x), voigt_scipy(a, x), 1e-8)
@requires_file(GIZMO_PLUS)
@requires_answer_testing()
def test_absorption_spectrum_cosmo_sph():
"""
This test generates an absorption spectrum from a compound light ray on a
particle dataset
"""
# Set up in a temp dir
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
lr = LightRay(GIZMO_PLUS, 'Gadget', 0.0, 0.01)
lr.make_light_ray(seed=1234567,
fields=[('gas', 'temperature'),
('gas', 'H_number_density')],
data_filename='lightray.h5')
sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
my_label = 'HI Lya'
field = ('gas', 'H_number_density')
wavelength = 1215.6700 # Angstromss
f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
my_label = 'HI Lya'
field = ('gas', 'H_number_density')
wavelength = 912.323660 # Angstroms
normalization = 1.6e17
index = 3.0
sp.add_continuum(my_label, field, wavelength, normalization, index)
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file='spectrum.h5',
line_list_file='lines.txt',
use_peculiar_velocity=True)
# load just-generated hdf5 file of spectral data (for consistency)
data = h5.File('spectrum.h5', 'r')
for key in data.keys():
func = lambda x=key: data[x][:]
func.__name__ = "{}_cosmo_sph".format(key)
test = GenericArrayTest(None, func)
test_absorption_spectrum_cosmo_sph.__name__ = test.description
yield test
# clean up
os.chdir(curdir)
shutil.rmtree(tmpdir)
@requires_file(GIZMO_PLUS_SINGLE)
@requires_answer_testing()
def test_absorption_spectrum_non_cosmo_sph():
"""
This test generates an absorption spectrum from a simple light ray on a
particle dataset
"""
# Set up in a temp dir
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
ds = load(GIZMO_PLUS_SINGLE)
lr = LightRay(ds)
ray_start = ds.domain_left_edge
ray_end = ds.domain_right_edge
lr.make_light_ray(start_position=ray_start, end_position=ray_end,
fields=[('gas', 'temperature'),
('gas', 'H_number_density')],
data_filename='lightray.h5')
sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
my_label = 'HI Lya'
field = ('gas', 'H_number_density')
wavelength = 1215.6700 # Angstromss
f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file='spectrum.h5',
line_list_file='lines.txt',
use_peculiar_velocity=True)
# load just-generated hdf5 file of spectral data (for consistency)
data = h5.File('spectrum.h5', 'r')
for key in data.keys():
func = lambda x=key: data[x][:]
func.__name__ = "{}_non_cosmo_sph".format(key)
test = GenericArrayTest(None, func)
test_absorption_spectrum_non_cosmo_sph.__name__ = test.description
yield test
# clean up
os.chdir(curdir)
shutil.rmtree(tmpdir)
@requires_file(ISO_GALAXY)
@requires_answer_testing()
def test_absorption_spectrum_with_continuum():
"""
This test generates an absorption spectrum from a simple light ray on a
grid dataset and adds Lyman alpha and Lyman continuum to it
"""
# Set up in a temp dir
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
ds = load(ISO_GALAXY)
lr = LightRay(ds)
ray_start = ds.domain_left_edge
ray_end = ds.domain_right_edge
lr.make_light_ray(start_position=ray_start, end_position=ray_end,
fields=['temperature', 'density', 'H_number_density'],
data_filename='lightray.h5')
sp = AbsorptionSpectrum(800.0, 1300.0, 5001)
my_label = 'HI Lya'
field = 'H_number_density'
wavelength = 1215.6700 # Angstromss
f_value = 4.164E-01
gamma = 6.265e+08
mass = 1.00794
sp.add_line(my_label, field, wavelength, f_value,
gamma, mass, label_threshold=1.e10)
my_label = 'Ly C'
field = 'H_number_density'
wavelength = 912.323660 # Angstroms
normalization = 1.6e17
index = 3.0
sp.add_continuum(my_label, field, wavelength, normalization, index)
wavelength, flux = sp.make_spectrum('lightray.h5',
output_file='spectrum.h5',
line_list_file='lines.txt',
use_peculiar_velocity=True)
# load just-generated hdf5 file of spectral data (for consistency)
data = h5.File('spectrum.h5', 'r')
for key in data.keys():
func = lambda x=key: data[x][:]
func.__name__ = "{}_continuum".format(key)
test = GenericArrayTest(None, func)
test_absorption_spectrum_with_continuum.__name__ = test.description
yield test
# clean up
os.chdir(curdir)
shutil.rmtree(tmpdir)
@requires_file(FIRE)
def test_absorption_spectrum_with_zero_field():
"""
This test generates an absorption spectrum with some
particle dataset
"""
# Set up in a temp dir
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
ds = load(FIRE)
lr = LightRay(ds)
# Define species and associated parameters to add to continuum
# Parameters used for both adding the transition to the spectrum
# and for fitting
# Note that for single species that produce multiple lines
# (as in the OVI doublet), 'numLines' will be equal to the number
# of lines, and f,gamma, and wavelength will have multiple values.
HI_parameters = {
'name': 'HI',
'field': 'H_number_density',
'f': [.4164],
'Gamma': [6.265E8],
'wavelength': [1215.67],
'mass': 1.00794,
'numLines': 1,
'maxN': 1E22, 'minN': 1E11,
'maxb': 300, 'minb': 1,
'maxz': 6, 'minz': 0,
'init_b': 30,
'init_N': 1E14
}
species_dicts = {'HI': HI_parameters}
# Get all fields that need to be added to the light ray
fields = [('gas','temperature')]
for s, params in species_dicts.items():
fields.append(params['field'])
# With a single dataset, a start_position and
# end_position or trajectory must be given.
# Trajectory should be given as (r, theta, phi)
lr.make_light_ray(
start_position=ds.arr([0., 0., 0.], 'unitary'),
end_position=ds.arr([1., 1., 1.], 'unitary'),
solution_filename='test_lightraysolution.txt',
data_filename='test_lightray.h5',
fields=fields)
# Create an AbsorptionSpectrum object extending from
# lambda = 900 to lambda = 1800, with 10000 pixels
sp = AbsorptionSpectrum(900.0, 1400.0, 50000)
# Iterate over species
for s, params in species_dicts.items():
# Iterate over transitions for a single species
for i in range(params['numLines']):
# Add the lines to the spectrum
sp.add_line(
s, params['field'],
params['wavelength'][i], params['f'][i],
params['Gamma'][i], params['mass'],
label_threshold=1.e10)
# Make and save spectrum
wavelength, flux = sp.make_spectrum(
'test_lightray.h5',
output_file='test_spectrum.h5',
line_list_file='test_lines.txt',
use_peculiar_velocity=True)
# clean up
os.chdir(curdir)
shutil.rmtree(tmpdir)
| [
"yt.analysis_modules.absorption_spectrum.absorption_line.voigt_scipy",
"yt.analysis_modules.absorption_spectrum.absorption_line.voigt_old",
"yt.utilities.on_demand_imports._h5py.File",
"shutil.rmtree",
"yt.utilities.answer_testing.framework.GenericArrayTest",
"os.getcwd",
"yt.analysis_modules.absorption... | [((1337, 1362), 'yt.testing.requires_file', 'requires_file', (['COSMO_PLUS'], {}), '(COSMO_PLUS)\n', (1350, 1362), False, 'from yt.testing import assert_allclose_units, requires_file, requires_module, assert_almost_equal\n'), ((1364, 1389), 'yt.utilities.answer_testing.framework.requires_answer_testing', 'requires_answer_testing', ([], {}), '()\n', (1387, 1389), False, 'from yt.utilities.answer_testing.framework import GenericArrayTest, requires_answer_testing\n'), ((3073, 3105), 'yt.testing.requires_file', 'requires_file', (['COSMO_PLUS_SINGLE'], {}), '(COSMO_PLUS_SINGLE)\n', (3086, 3105), False, 'from yt.testing import assert_allclose_units, requires_file, requires_module, assert_almost_equal\n'), ((3107, 3132), 'yt.utilities.answer_testing.framework.requires_answer_testing', 'requires_answer_testing', ([], {}), '()\n', (3130, 3132), False, 'from yt.utilities.answer_testing.framework import GenericArrayTest, requires_answer_testing\n'), ((4687, 4719), 'yt.testing.requires_file', 'requires_file', (['COSMO_PLUS_SINGLE'], {}), '(COSMO_PLUS_SINGLE)\n', (4700, 4719), False, 'from yt.testing import assert_allclose_units, requires_file, requires_module, assert_almost_equal\n'), ((4721, 4746), 'yt.utilities.answer_testing.framework.requires_answer_testing', 'requires_answer_testing', ([], {}), '()\n', (4744, 4746), False, 'from yt.utilities.answer_testing.framework import GenericArrayTest, requires_answer_testing\n'), ((6348, 6380), 'yt.testing.requires_file', 'requires_file', (['COSMO_PLUS_SINGLE'], {}), '(COSMO_PLUS_SINGLE)\n', (6361, 6380), False, 'from yt.testing import assert_allclose_units, requires_file, requires_module, assert_almost_equal\n'), ((7970, 8002), 'yt.testing.requires_file', 'requires_file', (['COSMO_PLUS_SINGLE'], {}), '(COSMO_PLUS_SINGLE)\n', (7983, 8002), False, 'from yt.testing import assert_allclose_units, requires_file, requires_module, assert_almost_equal\n'), ((8004, 8030), 'yt.testing.requires_module', 'requires_module', (['"""astropy"""'], {}), "('astropy')\n", (8019, 8030), False, 'from yt.testing import assert_allclose_units, requires_file, requires_module, assert_almost_equal\n'), ((9409, 9433), 'yt.testing.requires_module', 'requires_module', (['"""scipy"""'], {}), "('scipy')\n", (9424, 9433), False, 'from yt.testing import assert_allclose_units, requires_file, requires_module, assert_almost_equal\n'), ((9581, 9606), 'yt.testing.requires_file', 'requires_file', (['GIZMO_PLUS'], {}), '(GIZMO_PLUS)\n', (9594, 9606), False, 'from yt.testing import assert_allclose_units, requires_file, requires_module, assert_almost_equal\n'), ((9608, 9633), 'yt.utilities.answer_testing.framework.requires_answer_testing', 'requires_answer_testing', ([], {}), '()\n', (9631, 9633), False, 'from yt.utilities.answer_testing.framework import GenericArrayTest, requires_answer_testing\n'), ((11391, 11423), 'yt.testing.requires_file', 'requires_file', (['GIZMO_PLUS_SINGLE'], {}), '(GIZMO_PLUS_SINGLE)\n', (11404, 11423), False, 'from yt.testing import assert_allclose_units, requires_file, requires_module, assert_almost_equal\n'), ((11425, 11450), 'yt.utilities.answer_testing.framework.requires_answer_testing', 'requires_answer_testing', ([], {}), '()\n', (11448, 11450), False, 'from yt.utilities.answer_testing.framework import GenericArrayTest, requires_answer_testing\n'), ((13110, 13135), 'yt.testing.requires_file', 'requires_file', (['ISO_GALAXY'], {}), '(ISO_GALAXY)\n', (13123, 13135), False, 'from yt.testing import assert_allclose_units, requires_file, requires_module, assert_almost_equal\n'), ((13137, 13162), 'yt.utilities.answer_testing.framework.requires_answer_testing', 'requires_answer_testing', ([], {}), '()\n', (13160, 13162), False, 'from yt.utilities.answer_testing.framework import GenericArrayTest, requires_answer_testing\n'), ((15019, 15038), 'yt.testing.requires_file', 'requires_file', (['FIRE'], {}), '(FIRE)\n', (15032, 15038), False, 'from yt.testing import assert_allclose_units, requires_file, requires_module, assert_almost_equal\n'), ((1579, 1597), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1595, 1597), False, 'import tempfile\n'), ((1611, 1622), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1620, 1622), False, 'import os\n'), ((1627, 1643), 'os.chdir', 'os.chdir', (['tmpdir'], {}), '(tmpdir)\n', (1635, 1643), False, 'import os\n'), ((1654, 1693), 'yt.analysis_modules.cosmological_observation.api.LightRay', 'LightRay', (['COSMO_PLUS', '"""Enzo"""', '(0.0)', '(0.03)'], {}), "(COSMO_PLUS, 'Enzo', 0.0, 0.03)\n", (1662, 1693), False, 'from yt.analysis_modules.cosmological_observation.api import LightRay\n'), ((1869, 1909), 'yt.analysis_modules.absorption_spectrum.api.AbsorptionSpectrum', 'AbsorptionSpectrum', (['(900.0)', '(1800.0)', '(10000)'], {}), '(900.0, 1800.0, 10000)\n', (1887, 1909), False, 'from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum\n'), ((2734, 2761), 'yt.utilities.on_demand_imports._h5py.File', 'h5.File', (['"""spectrum.h5"""', '"""r"""'], {}), "('spectrum.h5', 'r')\n", (2741, 2761), True, 'from yt.utilities.on_demand_imports import _h5py as h5\n'), ((3028, 3044), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (3036, 3044), False, 'import os\n'), ((3049, 3070), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (3062, 3070), False, 'import shutil\n'), ((3325, 3343), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (3341, 3343), False, 'import tempfile\n'), ((3357, 3368), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3366, 3368), False, 'import os\n'), ((3373, 3389), 'os.chdir', 'os.chdir', (['tmpdir'], {}), '(tmpdir)\n', (3381, 3389), False, 'import os\n'), ((3400, 3427), 'yt.analysis_modules.cosmological_observation.api.LightRay', 'LightRay', (['COSMO_PLUS_SINGLE'], {}), '(COSMO_PLUS_SINGLE)\n', (3408, 3427), False, 'from yt.analysis_modules.cosmological_observation.api import LightRay\n'), ((3683, 3724), 'yt.analysis_modules.absorption_spectrum.api.AbsorptionSpectrum', 'AbsorptionSpectrum', (['(1200.0)', '(1300.0)', '(10001)'], {}), '(1200.0, 1300.0, 10001)\n', (3701, 3724), False, 'from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum\n'), ((4336, 4363), 'yt.utilities.on_demand_imports._h5py.File', 'h5.File', (['"""spectrum.h5"""', '"""r"""'], {}), "('spectrum.h5', 'r')\n", (4343, 4363), True, 'from yt.utilities.on_demand_imports import _h5py as h5\n'), ((4642, 4658), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (4650, 4658), False, 'import os\n'), ((4663, 4684), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (4676, 4684), False, 'import shutil\n'), ((4946, 4964), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (4962, 4964), False, 'import tempfile\n'), ((4978, 4989), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4987, 4989), False, 'import os\n'), ((4994, 5010), 'os.chdir', 'os.chdir', (['tmpdir'], {}), '(tmpdir)\n', (5002, 5010), False, 'import os\n'), ((5021, 5048), 'yt.analysis_modules.cosmological_observation.api.LightRay', 'LightRay', (['COSMO_PLUS_SINGLE'], {}), '(COSMO_PLUS_SINGLE)\n', (5029, 5048), False, 'from yt.analysis_modules.cosmological_observation.api import LightRay\n'), ((5333, 5374), 'yt.analysis_modules.absorption_spectrum.api.AbsorptionSpectrum', 'AbsorptionSpectrum', (['(1200.0)', '(1300.0)', '(10001)'], {}), '(1200.0, 1300.0, 10001)\n', (5351, 5374), False, 'from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum\n'), ((5987, 6014), 'yt.utilities.on_demand_imports._h5py.File', 'h5.File', (['"""spectrum.h5"""', '"""r"""'], {}), "('spectrum.h5', 'r')\n", (5994, 6014), True, 'from yt.utilities.on_demand_imports import _h5py as h5\n'), ((6303, 6319), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (6311, 6319), False, 'import os\n'), ((6324, 6345), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (6337, 6345), False, 'import shutil\n'), ((6690, 6708), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (6706, 6708), False, 'import tempfile\n'), ((6722, 6733), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6731, 6733), False, 'import os\n'), ((6738, 6754), 'os.chdir', 'os.chdir', (['tmpdir'], {}), '(tmpdir)\n', (6746, 6754), False, 'import os\n'), ((6765, 6792), 'yt.analysis_modules.cosmological_observation.api.LightRay', 'LightRay', (['COSMO_PLUS_SINGLE'], {}), '(COSMO_PLUS_SINGLE)\n', (6773, 6792), False, 'from yt.analysis_modules.cosmological_observation.api import LightRay\n'), ((7924, 7940), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (7932, 7940), False, 'import os\n'), ((7945, 7966), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (7958, 7966), False, 'import shutil\n'), ((8201, 8219), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (8217, 8219), False, 'import tempfile\n'), ((8233, 8244), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8242, 8244), False, 'import os\n'), ((8249, 8265), 'os.chdir', 'os.chdir', (['tmpdir'], {}), '(tmpdir)\n', (8257, 8265), False, 'import os\n'), ((8276, 8303), 'yt.analysis_modules.cosmological_observation.api.LightRay', 'LightRay', (['COSMO_PLUS_SINGLE'], {}), '(COSMO_PLUS_SINGLE)\n', (8284, 8303), False, 'from yt.analysis_modules.cosmological_observation.api import LightRay\n'), ((8559, 8599), 'yt.analysis_modules.absorption_spectrum.api.AbsorptionSpectrum', 'AbsorptionSpectrum', (['(900.0)', '(1800.0)', '(10000)'], {}), '(900.0, 1800.0, 10000)\n', (8577, 8599), False, 'from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum\n'), ((9363, 9379), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (9371, 9379), False, 'import os\n'), ((9384, 9405), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (9397, 9405), False, 'import shutil\n'), ((9484, 9510), 'numpy.linspace', 'np.linspace', (['(5.0)', '(-3.6)', '(60)'], {}), '(5.0, -3.6, 60)\n', (9495, 9510), True, 'import numpy as np\n'), ((9831, 9849), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (9847, 9849), False, 'import tempfile\n'), ((9863, 9874), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9872, 9874), False, 'import os\n'), ((9879, 9895), 'os.chdir', 'os.chdir', (['tmpdir'], {}), '(tmpdir)\n', (9887, 9895), False, 'import os\n'), ((9906, 9947), 'yt.analysis_modules.cosmological_observation.api.LightRay', 'LightRay', (['GIZMO_PLUS', '"""Gadget"""', '(0.0)', '(0.01)'], {}), "(GIZMO_PLUS, 'Gadget', 0.0, 0.01)\n", (9914, 9947), False, 'from yt.analysis_modules.cosmological_observation.api import LightRay\n'), ((10161, 10201), 'yt.analysis_modules.absorption_spectrum.api.AbsorptionSpectrum', 'AbsorptionSpectrum', (['(900.0)', '(1800.0)', '(10000)'], {}), '(900.0, 1800.0, 10000)\n', (10179, 10201), False, 'from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum\n'), ((11044, 11071), 'yt.utilities.on_demand_imports._h5py.File', 'h5.File', (['"""spectrum.h5"""', '"""r"""'], {}), "('spectrum.h5', 'r')\n", (11051, 11071), True, 'from yt.utilities.on_demand_imports import _h5py as h5\n'), ((11346, 11362), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (11354, 11362), False, 'import os\n'), ((11367, 11388), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (11380, 11388), False, 'import shutil\n'), ((11651, 11669), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (11667, 11669), False, 'import tempfile\n'), ((11683, 11694), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11692, 11694), False, 'import os\n'), ((11699, 11715), 'os.chdir', 'os.chdir', (['tmpdir'], {}), '(tmpdir)\n', (11707, 11715), False, 'import os\n'), ((11726, 11749), 'yt.convenience.load', 'load', (['GIZMO_PLUS_SINGLE'], {}), '(GIZMO_PLUS_SINGLE)\n', (11730, 11749), False, 'from yt.convenience import load\n'), ((11759, 11771), 'yt.analysis_modules.cosmological_observation.api.LightRay', 'LightRay', (['ds'], {}), '(ds)\n', (11767, 11771), False, 'from yt.analysis_modules.cosmological_observation.api import LightRay\n'), ((12089, 12130), 'yt.analysis_modules.absorption_spectrum.api.AbsorptionSpectrum', 'AbsorptionSpectrum', (['(1200.0)', '(1300.0)', '(10001)'], {}), '(1200.0, 1300.0, 10001)\n', (12107, 12130), False, 'from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum\n'), ((12751, 12778), 'yt.utilities.on_demand_imports._h5py.File', 'h5.File', (['"""spectrum.h5"""', '"""r"""'], {}), "('spectrum.h5', 'r')\n", (12758, 12778), True, 'from yt.utilities.on_demand_imports import _h5py as h5\n'), ((13065, 13081), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (13073, 13081), False, 'import os\n'), ((13086, 13107), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (13099, 13107), False, 'import shutil\n'), ((13407, 13425), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (13423, 13425), False, 'import tempfile\n'), ((13439, 13450), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (13448, 13450), False, 'import os\n'), ((13455, 13471), 'os.chdir', 'os.chdir', (['tmpdir'], {}), '(tmpdir)\n', (13463, 13471), False, 'import os\n'), ((13482, 13498), 'yt.convenience.load', 'load', (['ISO_GALAXY'], {}), '(ISO_GALAXY)\n', (13486, 13498), False, 'from yt.convenience import load\n'), ((13508, 13520), 'yt.analysis_modules.cosmological_observation.api.LightRay', 'LightRay', (['ds'], {}), '(ds)\n', (13516, 13520), False, 'from yt.analysis_modules.cosmological_observation.api import LightRay\n'), ((13801, 13840), 'yt.analysis_modules.absorption_spectrum.api.AbsorptionSpectrum', 'AbsorptionSpectrum', (['(800.0)', '(1300.0)', '(5001)'], {}), '(800.0, 1300.0, 5001)\n', (13819, 13840), False, 'from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum\n'), ((14663, 14690), 'yt.utilities.on_demand_imports._h5py.File', 'h5.File', (['"""spectrum.h5"""', '"""r"""'], {}), "('spectrum.h5', 'r')\n", (14670, 14690), True, 'from yt.utilities.on_demand_imports import _h5py as h5\n'), ((14974, 14990), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (14982, 14990), False, 'import os\n'), ((14995, 15016), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (15008, 15016), False, 'import shutil\n'), ((15223, 15241), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (15239, 15241), False, 'import tempfile\n'), ((15255, 15266), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (15264, 15266), False, 'import os\n'), ((15271, 15287), 'os.chdir', 'os.chdir', (['tmpdir'], {}), '(tmpdir)\n', (15279, 15287), False, 'import os\n'), ((15298, 15308), 'yt.convenience.load', 'load', (['FIRE'], {}), '(FIRE)\n', (15302, 15308), False, 'from yt.convenience import load\n'), ((15318, 15330), 'yt.analysis_modules.cosmological_observation.api.LightRay', 'LightRay', (['ds'], {}), '(ds)\n', (15326, 15330), False, 'from yt.analysis_modules.cosmological_observation.api import LightRay\n'), ((16811, 16851), 'yt.analysis_modules.absorption_spectrum.api.AbsorptionSpectrum', 'AbsorptionSpectrum', (['(900.0)', '(1400.0)', '(50000)'], {}), '(900.0, 1400.0, 50000)\n', (16829, 16851), False, 'from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum\n'), ((17526, 17542), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (17534, 17542), False, 'import os\n'), ((17547, 17568), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (17560, 17568), False, 'import shutil\n'), ((2893, 2921), 'yt.utilities.answer_testing.framework.GenericArrayTest', 'GenericArrayTest', (['None', 'func'], {}), '(None, func)\n', (2909, 2921), False, 'from yt.utilities.answer_testing.framework import GenericArrayTest, requires_answer_testing\n'), ((4503, 4531), 'yt.utilities.answer_testing.framework.GenericArrayTest', 'GenericArrayTest', (['None', 'func'], {}), '(None, func)\n', (4519, 4531), False, 'from yt.utilities.answer_testing.framework import GenericArrayTest, requires_answer_testing\n'), ((6157, 6185), 'yt.utilities.answer_testing.framework.GenericArrayTest', 'GenericArrayTest', (['None', 'func'], {}), '(None, func)\n', (6173, 6185), False, 'from yt.utilities.answer_testing.framework import GenericArrayTest, requires_answer_testing\n'), ((7439, 7527), 'yt.analysis_modules.absorption_spectrum.api.AbsorptionSpectrum', 'AbsorptionSpectrum', ([], {'lambda_min': 'lambda_min', 'lambda_max': 'lambda_max', 'n_lambda': 'n_lambda'}), '(lambda_min=lambda_min, lambda_max=lambda_max, n_lambda=\n n_lambda)\n', (7457, 7527), False, 'from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum\n'), ((7862, 7903), 'yt.testing.assert_almost_equal', 'assert_almost_equal', (['tau', 'total_tau[0]', '(3)'], {}), '(tau, total_tau[0], 3)\n', (7881, 7903), False, 'from yt.testing import assert_allclose_units, requires_file, requires_module, assert_almost_equal\n'), ((9537, 9552), 'yt.analysis_modules.absorption_spectrum.absorption_line.voigt_old', 'voigt_old', (['a', 'x'], {}), '(a, x)\n', (9546, 9552), False, 'from yt.analysis_modules.absorption_spectrum.absorption_line import voigt_old, voigt_scipy\n'), ((9554, 9571), 'yt.analysis_modules.absorption_spectrum.absorption_line.voigt_scipy', 'voigt_scipy', (['a', 'x'], {}), '(a, x)\n', (9565, 9571), False, 'from yt.analysis_modules.absorption_spectrum.absorption_line import voigt_old, voigt_scipy\n'), ((11207, 11235), 'yt.utilities.answer_testing.framework.GenericArrayTest', 'GenericArrayTest', (['None', 'func'], {}), '(None, func)\n', (11223, 11235), False, 'from yt.utilities.answer_testing.framework import GenericArrayTest, requires_answer_testing\n'), ((12922, 12950), 'yt.utilities.answer_testing.framework.GenericArrayTest', 'GenericArrayTest', (['None', 'func'], {}), '(None, func)\n', (12938, 12950), False, 'from yt.utilities.answer_testing.framework import GenericArrayTest, requires_answer_testing\n'), ((14830, 14858), 'yt.utilities.answer_testing.framework.GenericArrayTest', 'GenericArrayTest', (['None', 'func'], {}), '(None, func)\n', (14846, 14858), False, 'from yt.utilities.answer_testing.framework import GenericArrayTest, requires_answer_testing\n')] |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
from collections import defaultdict
import cv2
import numpy as np
from megengine.data import Collator, RandomSampler
from official.vision.detection.tools.data_mapper import data_mapper
from official.vision.detection.tools.nms import py_cpu_nms
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, record_len=1):
self.record_len = record_len
self.sum = [0 for i in range(self.record_len)]
self.cnt = 0
def reset(self):
self.sum = [0 for i in range(self.record_len)]
self.cnt = 0
def update(self, val):
self.sum = [s + v for s, v in zip(self.sum, val)]
self.cnt += 1
def average(self):
return [s / self.cnt for s in self.sum]
class GroupedRandomSampler(RandomSampler):
def __init__(
self,
dataset,
batch_size,
group_ids,
indices=None,
world_size=None,
rank=None,
seed=None,
):
super().__init__(dataset, batch_size, False, indices, world_size, rank, seed)
self.group_ids = group_ids
assert len(group_ids) == len(dataset)
groups = np.unique(self.group_ids).tolist()
# buffer the indices of each group until batch size is reached
self.buffer_per_group = {k: [] for k in groups}
def batch(self):
indices = list(self.sample())
if self.world_size > 1:
indices = self.scatter(indices)
batch_index = []
for ind in indices:
group_id = self.group_ids[ind]
group_buffer = self.buffer_per_group[group_id]
group_buffer.append(ind)
if len(group_buffer) == self.batch_size:
batch_index.append(group_buffer)
self.buffer_per_group[group_id] = []
return iter(batch_index)
def __len__(self):
raise NotImplementedError("len() of GroupedRandomSampler is not well-defined.")
class DetectionPadCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, inputs):
"""
assume order = ["image", "boxes", "boxes_category", "info"]
"""
batch_data = defaultdict(list)
for image, boxes, boxes_category, info in inputs:
batch_data["data"].append(image)
batch_data["gt_boxes"].append(
np.concatenate([boxes, boxes_category[:, np.newaxis]], axis=1).astype(
np.float32
)
)
_, current_height, current_width = image.shape
assert len(boxes) == len(boxes_category)
num_instances = len(boxes)
info = [
current_height,
current_width,
info[0],
info[1],
num_instances,
]
batch_data["im_info"].append(np.array(info, dtype=np.float32))
for key, value in batch_data.items():
pad_shape = list(max(s) for s in zip(*[x.shape for x in value]))
pad_value = [
np.pad(
v,
self._get_padding(v.shape, pad_shape),
constant_values=self.pad_value,
)
for v in value
]
batch_data[key] = np.ascontiguousarray(pad_value)
return batch_data
def _get_padding(self, original_shape, target_shape):
assert len(original_shape) == len(target_shape)
shape = []
for o, t in zip(original_shape, target_shape):
shape.append((0, t - o))
return tuple(shape)
class DetEvaluator:
def __init__(self, model):
self.model = model
@staticmethod
def get_hw_by_short_size(im_height, im_width, short_size, max_size):
"""get height and width by short size
Args:
im_height(int): height of original image, e.g. 800
im_width(int): width of original image, e.g. 1000
short_size(int): short size of transformed image. e.g. 800
max_size(int): max size of transformed image. e.g. 1333
Returns:
resized_height(int): height of transformed image
resized_width(int): width of transformed image
"""
im_size_min = np.min([im_height, im_width])
im_size_max = np.max([im_height, im_width])
scale = (short_size + 0.0) / im_size_min
if scale * im_size_max > max_size:
scale = (max_size + 0.0) / im_size_max
resized_height, resized_width = (
int(round(im_height * scale)),
int(round(im_width * scale)),
)
return resized_height, resized_width
@staticmethod
def process_inputs(img, short_size, max_size, flip=False):
original_height, original_width, _ = img.shape
resized_height, resized_width = DetEvaluator.get_hw_by_short_size(
original_height, original_width, short_size, max_size
)
resized_img = cv2.resize(
img, (resized_width, resized_height), interpolation=cv2.INTER_LINEAR,
)
resized_img = cv2.flip(resized_img, 1) if flip else resized_img
trans_img = np.ascontiguousarray(
resized_img.transpose(2, 0, 1)[None, :, :, :], dtype=np.uint8
)
im_info = np.array(
[(resized_height, resized_width, original_height, original_width)],
dtype=np.float32,
)
return trans_img, im_info
def predict(self, val_func):
"""
Args:
val_func(callable): model inference function
Returns:
results boxes: detection model output
"""
model = self.model
box_cls, box_delta = val_func()
box_cls, box_delta = box_cls.numpy(), box_delta.numpy()
dtboxes_all = list()
all_inds = np.where(box_cls > model.cfg.test_cls_threshold)
for c in range(0, model.cfg.num_classes):
inds = np.where(all_inds[1] == c)[0]
inds = all_inds[0][inds]
scores = box_cls[inds, c]
if model.cfg.class_aware_box:
bboxes = box_delta[inds, c, :]
else:
bboxes = box_delta[inds, :]
dtboxes = np.hstack((bboxes, scores[:, np.newaxis])).astype(np.float32)
if dtboxes.size > 0:
keep = py_cpu_nms(dtboxes, model.cfg.test_nms)
dtboxes = np.hstack(
(dtboxes[keep], np.ones((len(keep), 1), np.float32) * c)
).astype(np.float32)
dtboxes_all.extend(dtboxes)
if len(dtboxes_all) > model.cfg.test_max_boxes_per_image:
dtboxes_all = sorted(dtboxes_all, reverse=True, key=lambda i: i[4])[
: model.cfg.test_max_boxes_per_image
]
dtboxes_all = np.array(dtboxes_all, dtype=np.float)
return dtboxes_all
@staticmethod
def format(results, cfg):
dataset_class = data_mapper[cfg.test_dataset["name"]]
all_results = []
for record in results:
image_filename = record["image_id"]
boxes = record["det_res"]
if len(boxes) <= 0:
continue
boxes[:, 2:4] = boxes[:, 2:4] - boxes[:, 0:2]
for box in boxes:
elem = dict()
elem["image_id"] = image_filename
elem["bbox"] = box[:4].tolist()
elem["score"] = box[4]
if hasattr(dataset_class, "classes_originID"):
elem["category_id"] = dataset_class.classes_originID[
dataset_class.class_names[int(box[5])]
]
else:
elem["category_id"] = int(box[5])
all_results.append(elem)
return all_results
@staticmethod
def vis_det(
img,
dets,
is_show_label=True,
classes=None,
thresh=0.3,
name="detection",
return_img=True,
):
img = np.array(img)
colors = dict()
font = cv2.FONT_HERSHEY_SIMPLEX
for det in dets:
bb = det[:4].astype(int)
if is_show_label:
cls_id = int(det[5])
score = det[4]
if cls_id == 0:
continue
if score > thresh:
if cls_id not in colors:
colors[cls_id] = (
random.random() * 255,
random.random() * 255,
random.random() * 255,
)
cv2.rectangle(
img, (bb[0], bb[1]), (bb[2], bb[3]), colors[cls_id], 3
)
if classes and len(classes) > cls_id:
cls_name = classes[cls_id]
else:
cls_name = str(cls_id)
cv2.putText(
img,
"{:s} {:.3f}".format(cls_name, score),
(bb[0], bb[1] - 2),
font,
0.5,
(255, 255, 255),
1,
)
else:
cv2.rectangle(img, (bb[0], bb[1]), (bb[2], bb[3]), (0, 0, 255), 2)
if return_img:
return img
cv2.imshow(name, img)
while True:
c = cv2.waitKey(100000)
if c == ord("d"):
return None
elif c == ord("n"):
break
| [
"numpy.concatenate",
"cv2.waitKey",
"numpy.unique",
"official.vision.detection.tools.nms.py_cpu_nms",
"numpy.hstack",
"collections.defaultdict",
"random.random",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.where",
"cv2.rectangle",
"cv2.flip",
"cv2.imshow",
"numpy.ascontiguousarray",
... | [((2645, 2662), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2656, 2662), False, 'from collections import defaultdict\n'), ((4774, 4803), 'numpy.min', 'np.min', (['[im_height, im_width]'], {}), '([im_height, im_width])\n', (4780, 4803), True, 'import numpy as np\n'), ((4826, 4855), 'numpy.max', 'np.max', (['[im_height, im_width]'], {}), '([im_height, im_width])\n', (4832, 4855), True, 'import numpy as np\n'), ((5492, 5577), 'cv2.resize', 'cv2.resize', (['img', '(resized_width, resized_height)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(img, (resized_width, resized_height), interpolation=cv2.INTER_LINEAR\n )\n', (5502, 5577), False, 'import cv2\n'), ((5812, 5910), 'numpy.array', 'np.array', (['[(resized_height, resized_width, original_height, original_width)]'], {'dtype': 'np.float32'}), '([(resized_height, resized_width, original_height, original_width)],\n dtype=np.float32)\n', (5820, 5910), True, 'import numpy as np\n'), ((6353, 6401), 'numpy.where', 'np.where', (['(box_cls > model.cfg.test_cls_threshold)'], {}), '(box_cls > model.cfg.test_cls_threshold)\n', (6361, 6401), True, 'import numpy as np\n'), ((7343, 7380), 'numpy.array', 'np.array', (['dtboxes_all'], {'dtype': 'np.float'}), '(dtboxes_all, dtype=np.float)\n', (7351, 7380), True, 'import numpy as np\n'), ((8545, 8558), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (8553, 8558), True, 'import numpy as np\n'), ((9942, 9963), 'cv2.imshow', 'cv2.imshow', (['name', 'img'], {}), '(name, img)\n', (9952, 9963), False, 'import cv2\n'), ((3767, 3798), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['pad_value'], {}), '(pad_value)\n', (3787, 3798), True, 'import numpy as np\n'), ((5618, 5642), 'cv2.flip', 'cv2.flip', (['resized_img', '(1)'], {}), '(resized_img, 1)\n', (5626, 5642), False, 'import cv2\n'), ((10000, 10019), 'cv2.waitKey', 'cv2.waitKey', (['(100000)'], {}), '(100000)\n', (10011, 10019), False, 'import cv2\n'), ((1559, 1584), 'numpy.unique', 'np.unique', (['self.group_ids'], {}), '(self.group_ids)\n', (1568, 1584), True, 'import numpy as np\n'), ((3332, 3364), 'numpy.array', 'np.array', (['info'], {'dtype': 'np.float32'}), '(info, dtype=np.float32)\n', (3340, 3364), True, 'import numpy as np\n'), ((6472, 6498), 'numpy.where', 'np.where', (['(all_inds[1] == c)'], {}), '(all_inds[1] == c)\n', (6480, 6498), True, 'import numpy as np\n'), ((6870, 6909), 'official.vision.detection.tools.nms.py_cpu_nms', 'py_cpu_nms', (['dtboxes', 'model.cfg.test_nms'], {}), '(dtboxes, model.cfg.test_nms)\n', (6880, 6909), False, 'from official.vision.detection.tools.nms import py_cpu_nms\n'), ((9820, 9886), 'cv2.rectangle', 'cv2.rectangle', (['img', '(bb[0], bb[1])', '(bb[2], bb[3])', '(0, 0, 255)', '(2)'], {}), '(img, (bb[0], bb[1]), (bb[2], bb[3]), (0, 0, 255), 2)\n', (9833, 9886), False, 'import cv2\n'), ((6751, 6793), 'numpy.hstack', 'np.hstack', (['(bboxes, scores[:, np.newaxis])'], {}), '((bboxes, scores[:, np.newaxis]))\n', (6760, 6793), True, 'import numpy as np\n'), ((9169, 9238), 'cv2.rectangle', 'cv2.rectangle', (['img', '(bb[0], bb[1])', '(bb[2], bb[3])', 'colors[cls_id]', '(3)'], {}), '(img, (bb[0], bb[1]), (bb[2], bb[3]), colors[cls_id], 3)\n', (9182, 9238), False, 'import cv2\n'), ((2826, 2888), 'numpy.concatenate', 'np.concatenate', (['[boxes, boxes_category[:, np.newaxis]]'], {'axis': '(1)'}), '([boxes, boxes_category[:, np.newaxis]], axis=1)\n', (2840, 2888), True, 'import numpy as np\n'), ((8997, 9012), 'random.random', 'random.random', ([], {}), '()\n', (9010, 9012), False, 'import random\n'), ((9048, 9063), 'random.random', 'random.random', ([], {}), '()\n', (9061, 9063), False, 'import random\n'), ((9099, 9114), 'random.random', 'random.random', ([], {}), '()\n', (9112, 9114), False, 'import random\n')] |
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
class CCCLoss(nn.Module):
def __init__(self, digitize_num, range=(-1, 1)):
super(CCCLoss, self).__init__()
self.digitize_num = digitize_num
self.range = range
if self.digitize_num:
bins = np.linspace(*self.range, num=self.digitize_num)
self.bins = torch.as_tensor(bins, dtype=torch.float32).view((1, -1))
def forward(self, x, y):
# the target y is continuous value (BS, )
# the input x is either continuous value (BS, ) or probability output(digitized)
y = y.view(-1)
if torch.all(y == y[0]):
y[0] = y[0] + 1e-5
if self.digitize_num != 1:
x = F.softmax(x, dim=-1)
x = (self.bins.type_as(x) * x).sum(-1) # expectation
x = x.view(-1)
vx = x - torch.mean(x)
vy = y - torch.mean(y)
rho = torch.sum(vx * vy) / (torch.sqrt(torch.sum(torch.pow(vx, 2))) * torch.sqrt(torch.sum(torch.pow(vy, 2))))
x_m = torch.mean(x)
y_m = torch.mean(y)
x_s = torch.std(x)
y_s = torch.std(y)
ccc = 2*rho*x_s*y_s/(torch.pow(x_s, 2) + torch.pow(y_s, 2) + torch.pow(x_m - y_m, 2))
return 1-ccc
class CrossEntropyLoss(nn.Module):
def __init__(self, digitize_num, range=(-1, 1)):
super(CrossEntropyLoss, self).__init__()
self.digitize_num = digitize_num
if self.digitize_num:
self.range = range
self.edges = torch.linspace(*self.range, steps=self.digitize_num+1)
def forward(self, x, y):
# the target y is continuous value (BS, )
# the input x is probability output(digitized)
y = y.view(-1)
y_dig = torch.bucketize(y, self.edges.to(y.device), right=True) - 1
y_dig[y_dig == self.digitize_num] = self.digitize_num - 1
y = y_dig
return F.cross_entropy(x, y) | [
"torch.mean",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.softmax",
"torch.std",
"torch.pow",
"numpy.linspace",
"torch.as_tensor",
"torch.linspace",
"torch.sum",
"torch.all"
] | [((657, 677), 'torch.all', 'torch.all', (['(y == y[0])'], {}), '(y == y[0])\n', (666, 677), False, 'import torch\n'), ((1067, 1080), 'torch.mean', 'torch.mean', (['x'], {}), '(x)\n', (1077, 1080), False, 'import torch\n'), ((1095, 1108), 'torch.mean', 'torch.mean', (['y'], {}), '(y)\n', (1105, 1108), False, 'import torch\n'), ((1123, 1135), 'torch.std', 'torch.std', (['x'], {}), '(x)\n', (1132, 1135), False, 'import torch\n'), ((1150, 1162), 'torch.std', 'torch.std', (['y'], {}), '(y)\n', (1159, 1162), False, 'import torch\n'), ((1932, 1953), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['x', 'y'], {}), '(x, y)\n', (1947, 1953), True, 'import torch.nn.functional as F\n'), ((325, 372), 'numpy.linspace', 'np.linspace', (['*self.range'], {'num': 'self.digitize_num'}), '(*self.range, num=self.digitize_num)\n', (336, 372), True, 'import numpy as np\n'), ((761, 781), 'torch.nn.functional.softmax', 'F.softmax', (['x'], {'dim': '(-1)'}), '(x, dim=-1)\n', (770, 781), True, 'import torch.nn.functional as F\n'), ((889, 902), 'torch.mean', 'torch.mean', (['x'], {}), '(x)\n', (899, 902), False, 'import torch\n'), ((920, 933), 'torch.mean', 'torch.mean', (['y'], {}), '(y)\n', (930, 933), False, 'import torch\n'), ((948, 966), 'torch.sum', 'torch.sum', (['(vx * vy)'], {}), '(vx * vy)\n', (957, 966), False, 'import torch\n'), ((1544, 1600), 'torch.linspace', 'torch.linspace', (['*self.range'], {'steps': '(self.digitize_num + 1)'}), '(*self.range, steps=self.digitize_num + 1)\n', (1558, 1600), False, 'import torch\n'), ((1232, 1255), 'torch.pow', 'torch.pow', (['(x_m - y_m)', '(2)'], {}), '(x_m - y_m, 2)\n', (1241, 1255), False, 'import torch\n'), ((397, 439), 'torch.as_tensor', 'torch.as_tensor', (['bins'], {'dtype': 'torch.float32'}), '(bins, dtype=torch.float32)\n', (412, 439), False, 'import torch\n'), ((1192, 1209), 'torch.pow', 'torch.pow', (['x_s', '(2)'], {}), '(x_s, 2)\n', (1201, 1209), False, 'import torch\n'), ((1212, 1229), 'torch.pow', 'torch.pow', (['y_s', '(2)'], {}), '(y_s, 2)\n', (1221, 1229), False, 'import torch\n'), ((991, 1007), 'torch.pow', 'torch.pow', (['vx', '(2)'], {}), '(vx, 2)\n', (1000, 1007), False, 'import torch\n'), ((1033, 1049), 'torch.pow', 'torch.pow', (['vy', '(2)'], {}), '(vy, 2)\n', (1042, 1049), False, 'import torch\n')] |
import numpy as np
import os
import os.path as op
WORD2CLASS={'NE': 0,
'AN': 1,
'FE': 2,
'SA': 3,
'HA': 4}
def extractPoseRep( pose ):
# implementation need to be updated
return 0
## define read data
def read_pose_data( pose_path, numb_classes ):
poses = list([])
labels = list([])
files_pose = os.listdir( pose_path )
for filename in files_pose:
#print('Process {}'.format(filename))
pose = np.array(np.load(op.join(op.join(pose_path, filename))))
pose = pose[:, 0:2]
pose = pose.flatten()
category = filename.split('.')
category = category[0][3:5]
label = WORD2CLASS[category]
label_vec = np.zeros([numb_classes], dtype='int32')
label_vec[label]=1
if (np.sum(pose)>0):
poses.append(pose)
labels.append(list(label_vec))
poses = np.array(poses)
labels = np.array(labels)
return poses, labels
class buildTrainData:
def __init__(self, pose_path_train, numb_classes, batch_size ):
poses, labels = read_pose_data(pose_path_train, numb_classes)
self.datapath = pose_path_train
self.numb_classes = numb_classes
self.poses = np.array(poses)
self.labels = np.array(labels)
self.batch_size = batch_size
def batches(self):
randindx = np.arange(self.poses.shape[0])
np.random.shuffle(randindx)
poses = self.poses[randindx, ...]
labels = self.labels[randindx, ...]
numb_samples = self.poses.shape[0]
b_poses = poses[0:self.batch_size, ...]
b_labels = labels[0:self.batch_size, ...]
#yield b_poses, b_labels
for i in range(0, numb_samples, self.batch_size):
b_poses = poses[i*self.batch_size:(i+1)*self.batch_size,...]
b_labels = labels[i*self.batch_size:(i+1)*self.batch_size,...]
yield b_poses, b_labels
class Dataset(object):
def __init__(self, dataset, output_dim, code_dim):
self._dataset = dataset
self.n_samples = dataset.n_samples
self._train = dataset.train
self._output = np.zeros((self.n_samples, output_dim), dtype=np.float32)
self._codes = np.zeros((self.n_samples, code_dim), dtype=np.float32)
self._triplets = np.array([])
self._trip_index_in_epoch = 0
self._index_in_epoch = 0
self._epochs_complete = 0
self._perm = np.arange(self.n_samples)
np.random.shuffle(self._perm)
return
def next_batch(self, batch_size):
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self.n_samples:
if self._train:
self._epochs_complete += 1
start = 0
self._index_in_epoch = batch_size
else:
# Validation stage only process once
start = self.n_samples - batch_size
self._index_in_epoch = self.n_samples
end = self._index_in_epoch
data, label = self._dataset.data(self._perm[start:end])
return data, label
| [
"numpy.sum",
"numpy.zeros",
"numpy.arange",
"numpy.array",
"os.path.join",
"os.listdir",
"numpy.random.shuffle"
] | [((368, 389), 'os.listdir', 'os.listdir', (['pose_path'], {}), '(pose_path)\n', (378, 389), False, 'import os\n'), ((916, 931), 'numpy.array', 'np.array', (['poses'], {}), '(poses)\n', (924, 931), True, 'import numpy as np\n'), ((945, 961), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (953, 961), True, 'import numpy as np\n'), ((732, 771), 'numpy.zeros', 'np.zeros', (['[numb_classes]'], {'dtype': '"""int32"""'}), "([numb_classes], dtype='int32')\n", (740, 771), True, 'import numpy as np\n'), ((1251, 1266), 'numpy.array', 'np.array', (['poses'], {}), '(poses)\n', (1259, 1266), True, 'import numpy as np\n'), ((1289, 1305), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1297, 1305), True, 'import numpy as np\n'), ((1386, 1416), 'numpy.arange', 'np.arange', (['self.poses.shape[0]'], {}), '(self.poses.shape[0])\n', (1395, 1416), True, 'import numpy as np\n'), ((1425, 1452), 'numpy.random.shuffle', 'np.random.shuffle', (['randindx'], {}), '(randindx)\n', (1442, 1452), True, 'import numpy as np\n'), ((2172, 2228), 'numpy.zeros', 'np.zeros', (['(self.n_samples, output_dim)'], {'dtype': 'np.float32'}), '((self.n_samples, output_dim), dtype=np.float32)\n', (2180, 2228), True, 'import numpy as np\n'), ((2251, 2305), 'numpy.zeros', 'np.zeros', (['(self.n_samples, code_dim)'], {'dtype': 'np.float32'}), '((self.n_samples, code_dim), dtype=np.float32)\n', (2259, 2305), True, 'import numpy as np\n'), ((2331, 2343), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2339, 2343), True, 'import numpy as np\n'), ((2470, 2495), 'numpy.arange', 'np.arange', (['self.n_samples'], {}), '(self.n_samples)\n', (2479, 2495), True, 'import numpy as np\n'), ((2504, 2533), 'numpy.random.shuffle', 'np.random.shuffle', (['self._perm'], {}), '(self._perm)\n', (2521, 2533), True, 'import numpy as np\n'), ((812, 824), 'numpy.sum', 'np.sum', (['pose'], {}), '(pose)\n', (818, 824), True, 'import numpy as np\n'), ((510, 538), 'os.path.join', 'op.join', (['pose_path', 'filename'], {}), '(pose_path, filename)\n', (517, 538), True, 'import os.path as op\n')] |
import json
import shutil
import time
from datetime import datetime
from pathlib import Path
from typing import Tuple, Optional, Sequence
import numpy as np
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, Callback
from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU
from keras.layers import Dense, LSTM, BatchNormalization
from keras.models import Sequential, Model
from preprocessing import UrbanSoundData
NUM_CLASSES = 10
class NotTrainedError(Exception):
pass
class TimerCallback(Callback):
def __init__(self):
super().__init__()
self.total_time = 0
self.epoch_start = None
def on_epoch_begin(self, epoch, logs=None):
self.epoch_start = time.time()
def on_epoch_end(self, epoch, logs=None):
self.total_time += time.time() - self.epoch_start
class BaseModel:
def __init__(self, data: UrbanSoundData, hidden_layer_sizes: Sequence[int],
dropout_probabilities: Optional[Sequence[Optional[int]]] = None,
use_batch_norm: bool = True, model_name: str = None,
log_tensorboard: bool = False, save_model: bool = True,
overwrite: bool = False):
if dropout_probabilities is not None and \
len(hidden_layer_sizes) != len(dropout_probabilities):
raise ValueError("Length of hidden_layer_sizes and "
"dropout_probabilities need to be the same")
self.data = data
self.hidden_layer_sizes = hidden_layer_sizes
self.dropout_probabilities = dropout_probabilities if dropout_probabilities else \
[None] * len(hidden_layer_sizes)
self.use_batch_norm = use_batch_norm
self.model: Sequential = None
self.history = None
now = datetime.now().isoformat('_', 'seconds')
self.name = model_name if model_name else \
f"{self.__class__.__name__}_{now}".replace(":", "-")
self.log_tensorboard = log_tensorboard
self.overwrite = overwrite
self.save_model = save_model
self.train_seconds_per_sample = None
def train(self, batch_size: Optional[int] = 32, epochs: Optional[int] = 10,
verbose: int = 0):
train_features, val_features, train_labels, val_labels = self.data.train_data
train_features = self._process_features(train_features)
val_features = self._process_features(val_features)
input_shape = train_features.shape[1:]
self.model = self._model(input_shape)
save_dir = Path("..", "model", f"{self.name}")
if self.save_model or self.log_tensorboard:
if save_dir.exists():
if not self.overwrite:
raise ValueError(f"Model with name {self.name} exists already. Set overwrite=True "
f"on {self.__class__.__name__} to overwrite old model.")
shutil.rmtree(save_dir)
save_dir.mkdir(parents=True, exist_ok=False)
save_path = save_dir / "weights.epoch{epoch:02d}-loss{val_categorical_accuracy:.2f}.hdf5"
early_stop_callback = EarlyStopping(patience=10, verbose=1)
timer_callback = TimerCallback()
callbacks = [early_stop_callback, timer_callback]
if self.save_model:
with open(save_dir / "model_structure.json", "w") as model_struc_file:
json.dump(self.model.to_json(), model_struc_file)
save_callback = ModelCheckpoint(str(save_path), "val_categorical_accuracy", save_best_only=True)
callbacks.append(save_callback)
if self.log_tensorboard:
tensorboard_callback = TensorBoard(str(save_dir / "logs"), write_grads=True,
write_images=True, histogram_freq=1)
callbacks.append(tensorboard_callback)
self.history = self.model.fit(train_features, train_labels,
epochs=epochs,
batch_size=batch_size,
validation_data=(val_features, val_labels),
callbacks=callbacks,
verbose=verbose)
num_epochs = len(self.history.history["loss"])
self.train_seconds_per_sample = round(timer_callback.total_time /
num_epochs /
train_features.shape[0], 5)
return self.history
def evaluate(self, log_dir: str = None):
if self.model is None:
raise NotTrainedError("Model needs to be trained before evaluation")
train_features, _, train_labels, _ = self.data.train_data
test_features, test_labels = self.data.test_data
train_features = self._process_features(train_features)
test_features = self._process_features(test_features)
metrics = {
"test_acc": round(self.model.evaluate(test_features, test_labels, verbose=0)[1], 5),
"train_acc": round(self.model.evaluate(train_features, train_labels, verbose=0)[1], 5)
}
if log_dir:
log_dir = Path(log_dir)
if not log_dir.exists():
with open(log_dir, "a") as fp:
fp.write("name,train_acc,test_acc,layer_sizes,dropout_rates,"
"use_batch_norm,train_seconds_per_sample\n")
with open(log_dir, "a") as fp:
fp.write(f"{self.name},{metrics['train_acc']},{metrics['test_acc']}"
f',"{self.hidden_layer_sizes}","{self.dropout_probabilities}",'
f"{self.use_batch_norm},"
f"{self.train_seconds_per_sample}\n")
return metrics
def predict(self, features: np.ndarray):
if self.model is None:
raise NotTrainedError("Model needs to be trained before prediction")
features = self._process_features(features)
return self.data.inverse_transform(self.model.predict(features))
def visualize_training(self):
import matplotlib.pyplot as plt
# accuracy history
plt.plot(self.history.history['categorical_accuracy'])
plt.plot(self.history.history['val_categorical_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# loss history
plt.plot(self.history.history['loss'])
plt.plot(self.history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
def _process_features(self, features: np.ndarray) -> np.ndarray:
"""Model specific data preprocessing. May be overwritten by derived classes."""
return features
def _model(self, input_shape: Tuple[int]) -> Model:
"""Returns a compiled keras model."""
raise NotImplementedError()
class MLPModel(BaseModel):
def __init__(self, data: UrbanSoundData,
hidden_layer_sizes: Sequence[int],
dropout_probabilities: Optional[Sequence[Optional[float]]] = None,
use_batch_norm: bool = True, model_name: str = None,
log_tensorboard: bool = False, save_model: bool = True,
overwrite: bool = False):
super().__init__(data, hidden_layer_sizes,
dropout_probabilities, use_batch_norm,
model_name, log_tensorboard, save_model, overwrite)
def _model(self, input_shape: Tuple[int]) -> Model:
model = Sequential()
for layer_idx, (layer_size, dropout_proba) in enumerate(
zip(self.hidden_layer_sizes, self.dropout_probabilities)):
if layer_idx == 0:
model.add(Dense(layer_size, input_shape=input_shape, activation=None))
else:
model.add(Dense(layer_size, activation=None))
if self.use_batch_norm:
model.add(BatchNormalization())
model.add(Activation("relu"))
if dropout_proba:
model.add(Dropout(dropout_proba))
model.add(Dense(NUM_CLASSES, activation="softmax"))
model.compile(optimizer="adam",
loss="categorical_crossentropy",
metrics=["categorical_accuracy"])
return model
def _process_features(self, features: np.ndarray) -> np.ndarray:
"""Reshape 2D data into 1D vectors."""
return features.reshape(features.shape[0], -1)
class CNNModel(BaseModel):
"""Simplified Interface to Keras CNN Models
The last layer will always be fully connected, all other layers will include a Conv2D layer
with tanh activation and a MaxPooling2D layer."""
def __init__(self, data: UrbanSoundData,
hidden_layer_sizes: Sequence[int],
dropout_probabilities: Optional[Sequence[Optional[float]]] = None,
use_batch_norm: bool = True, model_name: str = None,
log_tensorboard: bool = False, save_model: bool = True,
overwrite: bool = False):
if len(hidden_layer_sizes) < 2:
raise ValueError("CNNModel needs at least two hidden layers (last layer is fully connected)")
super().__init__(data, hidden_layer_sizes, dropout_probabilities, use_batch_norm,
model_name, log_tensorboard, save_model, overwrite)
def _model(self, input_shape: Tuple[int]) -> Model:
model = Sequential()
for layer_idx, (layer_size, dropout_proba) in enumerate(
zip(self.hidden_layer_sizes[:-1], self.dropout_probabilities[:-1])):
if layer_idx == 0:
model.add(Conv2D(layer_size, (3, 3), padding="same",
input_shape=input_shape, activation=None))
else:
model.add(Conv2D(layer_size, (3, 3), padding="same",
activation=None))
if self.use_batch_norm:
model.add(BatchNormalization())
model.add(Activation("relu"))
if dropout_proba:
model.add(Dropout(dropout_proba))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(self.hidden_layer_sizes[-1], use_bias=False, activation=None))
if self.use_batch_norm:
model.add(BatchNormalization())
model.add(Activation("relu"))
if self.dropout_probabilities[-1]:
model.add(Dropout(self.dropout_probabilities[-1]))
model.add(Dense(NUM_CLASSES, activation="softmax"))
model.compile(optimizer="adam",
loss="categorical_crossentropy",
metrics=["categorical_accuracy"])
return model
def _process_features(self, features: np.ndarray) -> np.ndarray:
"""Reshape 2D data into a 3D matrix with shape[-1] == 1.
This last dimension is only 1 wide because we have only one
channel (i.e. no color information)"""
return features.reshape((*features.shape, 1))
class LSTMModel(BaseModel):
def __init__(self, data: UrbanSoundData,
hidden_layer_sizes: Sequence[int],
dropout_probabilities: Optional[Sequence[Optional[float]]] = None,
use_batch_norm: bool = False, model_name: str = None,
log_tensorboard: bool = False, save_model: bool = True,
overwrite: bool = False):
# LSTMs do not work well with batch norm
if use_batch_norm:
print("LSTMs do not work with batch_norm, setting use_batch_norm to False")
super().__init__(data, hidden_layer_sizes,
dropout_probabilities, False, model_name,
log_tensorboard, save_model, overwrite)
def _model(self, input_shape: Tuple[int]) -> Model:
model = Sequential()
for layer_idx, (layer_size, dropout_proba) in enumerate(
zip(self.hidden_layer_sizes[:-1], self.dropout_probabilities[:-1])):
if layer_idx == 0:
model.add(LSTM(layer_size, return_sequences=True,
input_shape=input_shape))
else:
model.add(LSTM(layer_size, return_sequences=True))
if dropout_proba:
model.add(Dropout(dropout_proba))
if len(self.hidden_layer_sizes) == 1:
model.add(LSTM(self.hidden_layer_sizes[-1], input_shape=input_shape))
else:
model.add(LSTM(self.hidden_layer_sizes[-1]))
if self.dropout_probabilities[-1]:
model.add(Dropout(self.dropout_probabilities[-1]))
model.add(Dense(NUM_CLASSES, activation="softmax"))
model.compile(optimizer="adam",
loss="categorical_crossentropy",
metrics=["categorical_accuracy"])
return model
def _process_features(self, features: np.ndarray) -> np.ndarray:
"""For LSTMs, the input should be of shape (batch_size, time steps, input_dim).
That means we need to swap the 2nd and 3rd axis.
"""
return np.swapaxes(features, 1, 2)
class RNNModel(BaseModel):
def __init__(self, data: UrbanSoundData,
hidden_layer_sizes: Sequence[int],
dropout_probabilities: Optional[Sequence[Optional[float]]] = None,
use_batch_norm: bool = False, model_name: str = None,
log_tensorboard: bool = False, save_model: bool = True,
overwrite: bool = False):
# LSTMs do not work well with batch norm
if use_batch_norm:
print("RNNs do not work with batch_norm, setting use_batch_norm to False")
super().__init__(data, hidden_layer_sizes,
dropout_probabilities, False, model_name,
log_tensorboard, save_model, overwrite)
def _model(self, input_shape: Tuple[int]) -> Model:
model = Sequential()
for layer_idx, (layer_size, dropout_proba) in enumerate(
zip(self.hidden_layer_sizes[:-1], self.dropout_probabilities[:-1])):
if layer_idx == 0:
model.add(SimpleRNN(layer_size, return_sequences=True,
input_shape=input_shape))
else:
model.add(SimpleRNN(layer_size, return_sequences=True))
if dropout_proba:
model.add(Dropout(dropout_proba))
if len(self.hidden_layer_sizes) == 1:
model.add(SimpleRNN(self.hidden_layer_sizes[-1], input_shape=input_shape))
else:
model.add(SimpleRNN(self.hidden_layer_sizes[-1]))
if self.dropout_probabilities[-1]:
model.add(Dropout(self.dropout_probabilities[-1]))
model.add(Dense(NUM_CLASSES, activation="softmax"))
model.compile(optimizer="adam",
loss="categorical_crossentropy",
metrics=["categorical_accuracy"])
return model
def _process_features(self, features: np.ndarray) -> np.ndarray:
"""For LSTMs, the input should be of shape (batch_size, time steps, input_dim).
That means we need to swap the 2nd and 3rd axis.
"""
return np.swapaxes(features, 1, 2)
class GRUModel(BaseModel):
def __init__(self, data: UrbanSoundData,
hidden_layer_sizes: Sequence[int],
dropout_probabilities: Optional[Sequence[Optional[float]]] = None,
use_batch_norm: bool = False, model_name: str = None,
log_tensorboard: bool = False, save_model: bool = True,
overwrite: bool = False):
# LSTMs do not work well with batch norm
if use_batch_norm:
print("RNNs do not work with batch_norm, setting use_batch_norm to False")
super().__init__(data, hidden_layer_sizes,
dropout_probabilities, False, model_name,
log_tensorboard, save_model, overwrite)
def _model(self, input_shape: Tuple[int]) -> Model:
model = Sequential()
for layer_idx, (layer_size, dropout_proba) in enumerate(
zip(self.hidden_layer_sizes[:-1], self.dropout_probabilities[:-1])):
if layer_idx == 0:
model.add(GRU(layer_size, return_sequences=True,
input_shape=input_shape))
else:
model.add(GRU(layer_size, return_sequences=True))
if dropout_proba:
model.add(Dropout(dropout_proba))
if len(self.hidden_layer_sizes) == 1:
model.add(GRU(self.hidden_layer_sizes[-1], input_shape=input_shape))
else:
model.add(GRU(self.hidden_layer_sizes[-1]))
if self.dropout_probabilities[-1]:
model.add(Dropout(self.dropout_probabilities[-1]))
model.add(Dense(NUM_CLASSES, activation="softmax"))
model.compile(optimizer="adam",
loss="categorical_crossentropy",
metrics=["categorical_accuracy"])
return model
def _process_features(self, features: np.ndarray) -> np.ndarray:
"""For LSTMs, the input should be of shape (batch_size, time steps, input_dim).
That means we need to swap the 2nd and 3rd axis.
"""
return np.swapaxes(features, 1, 2)
| [
"matplotlib.pyplot.title",
"pathlib.Path",
"shutil.rmtree",
"keras.layers.GRU",
"keras.layers.Flatten",
"numpy.swapaxes",
"datetime.datetime.now",
"keras.layers.MaxPooling2D",
"keras.layers.SimpleRNN",
"matplotlib.pyplot.show",
"keras.layers.Dropout",
"matplotlib.pyplot.legend",
"keras.layer... | [((757, 768), 'time.time', 'time.time', ([], {}), '()\n', (766, 768), False, 'import time\n'), ((2607, 2642), 'pathlib.Path', 'Path', (['""".."""', '"""model"""', 'f"""{self.name}"""'], {}), "('..', 'model', f'{self.name}')\n", (2611, 2642), False, 'from pathlib import Path\n'), ((3196, 3233), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(10)', 'verbose': '(1)'}), '(patience=10, verbose=1)\n', (3209, 3233), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, Callback\n'), ((6278, 6332), 'matplotlib.pyplot.plot', 'plt.plot', (["self.history.history['categorical_accuracy']"], {}), "(self.history.history['categorical_accuracy'])\n", (6286, 6332), True, 'import matplotlib.pyplot as plt\n'), ((6341, 6399), 'matplotlib.pyplot.plot', 'plt.plot', (["self.history.history['val_categorical_accuracy']"], {}), "(self.history.history['val_categorical_accuracy'])\n", (6349, 6399), True, 'import matplotlib.pyplot as plt\n'), ((6408, 6435), 'matplotlib.pyplot.title', 'plt.title', (['"""model accuracy"""'], {}), "('model accuracy')\n", (6417, 6435), True, 'import matplotlib.pyplot as plt\n'), ((6444, 6466), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (6454, 6466), True, 'import matplotlib.pyplot as plt\n'), ((6475, 6494), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (6485, 6494), True, 'import matplotlib.pyplot as plt\n'), ((6503, 6550), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (6513, 6550), True, 'import matplotlib.pyplot as plt\n'), ((6559, 6569), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6567, 6569), True, 'import matplotlib.pyplot as plt\n'), ((6601, 6639), 'matplotlib.pyplot.plot', 'plt.plot', (["self.history.history['loss']"], {}), "(self.history.history['loss'])\n", (6609, 6639), True, 'import matplotlib.pyplot as plt\n'), ((6648, 6690), 'matplotlib.pyplot.plot', 'plt.plot', (["self.history.history['val_loss']"], {}), "(self.history.history['val_loss'])\n", (6656, 6690), True, 'import matplotlib.pyplot as plt\n'), ((6699, 6722), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (6708, 6722), True, 'import matplotlib.pyplot as plt\n'), ((6731, 6749), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (6741, 6749), True, 'import matplotlib.pyplot as plt\n'), ((6758, 6777), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (6768, 6777), True, 'import matplotlib.pyplot as plt\n'), ((6786, 6833), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (6796, 6833), True, 'import matplotlib.pyplot as plt\n'), ((6842, 6852), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6850, 6852), True, 'import matplotlib.pyplot as plt\n'), ((7835, 7847), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7845, 7847), False, 'from keras.models import Sequential, Model\n'), ((9783, 9795), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (9793, 9795), False, 'from keras.models import Sequential, Model\n'), ((12206, 12218), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (12216, 12218), False, 'from keras.models import Sequential, Model\n'), ((13472, 13499), 'numpy.swapaxes', 'np.swapaxes', (['features', '(1)', '(2)'], {}), '(features, 1, 2)\n', (13483, 13499), True, 'import numpy as np\n'), ((14318, 14330), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (14328, 14330), False, 'from keras.models import Sequential, Model\n'), ((15604, 15631), 'numpy.swapaxes', 'np.swapaxes', (['features', '(1)', '(2)'], {}), '(features, 1, 2)\n', (15615, 15631), True, 'import numpy as np\n'), ((16450, 16462), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (16460, 16462), False, 'from keras.models import Sequential, Model\n'), ((17712, 17739), 'numpy.swapaxes', 'np.swapaxes', (['features', '(1)', '(2)'], {}), '(features, 1, 2)\n', (17723, 17739), True, 'import numpy as np\n'), ((843, 854), 'time.time', 'time.time', ([], {}), '()\n', (852, 854), False, 'import time\n'), ((5272, 5285), 'pathlib.Path', 'Path', (['log_dir'], {}), '(log_dir)\n', (5276, 5285), False, 'from pathlib import Path\n'), ((8415, 8455), 'keras.layers.Dense', 'Dense', (['NUM_CLASSES'], {'activation': '"""softmax"""'}), "(NUM_CLASSES, activation='softmax')\n", (8420, 8455), False, 'from keras.layers import Dense, LSTM, BatchNormalization\n'), ((10528, 10537), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (10535, 10537), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((10557, 10624), 'keras.layers.Dense', 'Dense', (['self.hidden_layer_sizes[-1]'], {'use_bias': '(False)', 'activation': 'None'}), '(self.hidden_layer_sizes[-1], use_bias=False, activation=None)\n', (10562, 10624), False, 'from keras.layers import Dense, LSTM, BatchNormalization\n'), ((10722, 10740), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (10732, 10740), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((10868, 10908), 'keras.layers.Dense', 'Dense', (['NUM_CLASSES'], {'activation': '"""softmax"""'}), "(NUM_CLASSES, activation='softmax')\n", (10873, 10908), False, 'from keras.layers import Dense, LSTM, BatchNormalization\n'), ((13016, 13056), 'keras.layers.Dense', 'Dense', (['NUM_CLASSES'], {'activation': '"""softmax"""'}), "(NUM_CLASSES, activation='softmax')\n", (13021, 13056), False, 'from keras.layers import Dense, LSTM, BatchNormalization\n'), ((15148, 15188), 'keras.layers.Dense', 'Dense', (['NUM_CLASSES'], {'activation': '"""softmax"""'}), "(NUM_CLASSES, activation='softmax')\n", (15153, 15188), False, 'from keras.layers import Dense, LSTM, BatchNormalization\n'), ((17256, 17296), 'keras.layers.Dense', 'Dense', (['NUM_CLASSES'], {'activation': '"""softmax"""'}), "(NUM_CLASSES, activation='softmax')\n", (17261, 17296), False, 'from keras.layers import Dense, LSTM, BatchNormalization\n'), ((1843, 1857), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1855, 1857), False, 'from datetime import datetime\n'), ((2984, 3007), 'shutil.rmtree', 'shutil.rmtree', (['save_dir'], {}), '(save_dir)\n', (2997, 3007), False, 'import shutil\n'), ((8295, 8313), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (8305, 8313), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((10369, 10387), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (10379, 10387), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((10493, 10507), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (10505, 10507), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((10681, 10701), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (10699, 10701), False, 'from keras.layers import Dense, LSTM, BatchNormalization\n'), ((10808, 10847), 'keras.layers.Dropout', 'Dropout', (['self.dropout_probabilities[-1]'], {}), '(self.dropout_probabilities[-1])\n', (10815, 10847), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((12759, 12817), 'keras.layers.LSTM', 'LSTM', (['self.hidden_layer_sizes[-1]'], {'input_shape': 'input_shape'}), '(self.hidden_layer_sizes[-1], input_shape=input_shape)\n', (12763, 12817), False, 'from keras.layers import Dense, LSTM, BatchNormalization\n'), ((12855, 12888), 'keras.layers.LSTM', 'LSTM', (['self.hidden_layer_sizes[-1]'], {}), '(self.hidden_layer_sizes[-1])\n', (12859, 12888), False, 'from keras.layers import Dense, LSTM, BatchNormalization\n'), ((12956, 12995), 'keras.layers.Dropout', 'Dropout', (['self.dropout_probabilities[-1]'], {}), '(self.dropout_probabilities[-1])\n', (12963, 12995), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((14881, 14944), 'keras.layers.SimpleRNN', 'SimpleRNN', (['self.hidden_layer_sizes[-1]'], {'input_shape': 'input_shape'}), '(self.hidden_layer_sizes[-1], input_shape=input_shape)\n', (14890, 14944), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((14982, 15020), 'keras.layers.SimpleRNN', 'SimpleRNN', (['self.hidden_layer_sizes[-1]'], {}), '(self.hidden_layer_sizes[-1])\n', (14991, 15020), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((15088, 15127), 'keras.layers.Dropout', 'Dropout', (['self.dropout_probabilities[-1]'], {}), '(self.dropout_probabilities[-1])\n', (15095, 15127), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((17001, 17058), 'keras.layers.GRU', 'GRU', (['self.hidden_layer_sizes[-1]'], {'input_shape': 'input_shape'}), '(self.hidden_layer_sizes[-1], input_shape=input_shape)\n', (17004, 17058), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((17096, 17128), 'keras.layers.GRU', 'GRU', (['self.hidden_layer_sizes[-1]'], {}), '(self.hidden_layer_sizes[-1])\n', (17099, 17128), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((17196, 17235), 'keras.layers.Dropout', 'Dropout', (['self.dropout_probabilities[-1]'], {}), '(self.dropout_probabilities[-1])\n', (17203, 17235), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((8046, 8105), 'keras.layers.Dense', 'Dense', (['layer_size'], {'input_shape': 'input_shape', 'activation': 'None'}), '(layer_size, input_shape=input_shape, activation=None)\n', (8051, 8105), False, 'from keras.layers import Dense, LSTM, BatchNormalization\n'), ((8151, 8185), 'keras.layers.Dense', 'Dense', (['layer_size'], {'activation': 'None'}), '(layer_size, activation=None)\n', (8156, 8185), False, 'from keras.layers import Dense, LSTM, BatchNormalization\n'), ((8250, 8270), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (8268, 8270), False, 'from keras.layers import Dense, LSTM, BatchNormalization\n'), ((8372, 8394), 'keras.layers.Dropout', 'Dropout', (['dropout_proba'], {}), '(dropout_proba)\n', (8379, 8394), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((10004, 10092), 'keras.layers.Conv2D', 'Conv2D', (['layer_size', '(3, 3)'], {'padding': '"""same"""', 'input_shape': 'input_shape', 'activation': 'None'}), "(layer_size, (3, 3), padding='same', input_shape=input_shape,\n activation=None)\n", (10010, 10092), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((10167, 10226), 'keras.layers.Conv2D', 'Conv2D', (['layer_size', '(3, 3)'], {'padding': '"""same"""', 'activation': 'None'}), "(layer_size, (3, 3), padding='same', activation=None)\n", (10173, 10226), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((10324, 10344), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (10342, 10344), False, 'from keras.layers import Dense, LSTM, BatchNormalization\n'), ((10446, 10468), 'keras.layers.Dropout', 'Dropout', (['dropout_proba'], {}), '(dropout_proba)\n', (10453, 10468), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((12427, 12491), 'keras.layers.LSTM', 'LSTM', (['layer_size'], {'return_sequences': '(True)', 'input_shape': 'input_shape'}), '(layer_size, return_sequences=True, input_shape=input_shape)\n', (12431, 12491), False, 'from keras.layers import Dense, LSTM, BatchNormalization\n'), ((12568, 12607), 'keras.layers.LSTM', 'LSTM', (['layer_size'], {'return_sequences': '(True)'}), '(layer_size, return_sequences=True)\n', (12572, 12607), False, 'from keras.layers import Dense, LSTM, BatchNormalization\n'), ((12666, 12688), 'keras.layers.Dropout', 'Dropout', (['dropout_proba'], {}), '(dropout_proba)\n', (12673, 12688), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((14539, 14608), 'keras.layers.SimpleRNN', 'SimpleRNN', (['layer_size'], {'return_sequences': '(True)', 'input_shape': 'input_shape'}), '(layer_size, return_sequences=True, input_shape=input_shape)\n', (14548, 14608), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((14685, 14729), 'keras.layers.SimpleRNN', 'SimpleRNN', (['layer_size'], {'return_sequences': '(True)'}), '(layer_size, return_sequences=True)\n', (14694, 14729), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((14788, 14810), 'keras.layers.Dropout', 'Dropout', (['dropout_proba'], {}), '(dropout_proba)\n', (14795, 14810), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((16671, 16734), 'keras.layers.GRU', 'GRU', (['layer_size'], {'return_sequences': '(True)', 'input_shape': 'input_shape'}), '(layer_size, return_sequences=True, input_shape=input_shape)\n', (16674, 16734), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((16811, 16849), 'keras.layers.GRU', 'GRU', (['layer_size'], {'return_sequences': '(True)'}), '(layer_size, return_sequences=True)\n', (16814, 16849), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n'), ((16908, 16930), 'keras.layers.Dropout', 'Dropout', (['dropout_proba'], {}), '(dropout_proba)\n', (16915, 16930), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten, Activation, Dropout, SimpleRNN, GRU\n')] |
"""
Source code for visualization (heads up, this will be eventually hosted as
an external library)
"""
# Import Modules
import pandas as pd
import numpy as np
from matplotlib import ticker
from sklearn.cluster import KMeans
from matplotlib import pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import matplotlib as mpl
from matplotlib import cm
from itertools import chain
import seaborn as sns
def k_means(df, n_clusters, cluster_columns):
# K Means Clustering
kmeans = KMeans(n_clusters=n_clusters, random_state=1008).fit(
df[cluster_columns].values
)
df['Color Index'] = ['cluster_' + i for i in kmeans.labels_.astype(str)]
return df
def set_color_cluster(df, color_idx, colors, labels):
# Set Columns
df['Color'] = df.replace(dict(zip(color_idx, colors)))['Color Index']
df['Color Label'] = df.replace(dict(zip(color_idx, labels)))['Color Index']
# Export
return df
def set_color_gradient(df, colormap, label):
# Compute Proportion of Each Line
mixes = (df['Color Index'] - df['Color Index'].max()) /\
(df['Color Index'].min() - df['Color Index'].max())
# Get Colors Corresponding to Proportions
df['Color'] = [mpl.colors.rgb2hex(
cm.get_cmap(colormap)(i)[:3]) for i in mixes]
df['Color Label'] = label
return df
def format_ticks(
i,
ax,
n_ticks,
limits,
cols,
flip_idx,
x,
tick_precision,
explicit_ticks,
label_fontsize,
df,
rotate_x_labels=False
):
# Format X axis
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
# Format X axis
if i == len(cols)-1:
ax.xaxis.set_major_locator(ticker.FixedLocator([x[-2], x[-1]]))
ax.set_xticklabels([cols[-2], cols[-1]])
else:
ax.xaxis.set_major_locator(ticker.FixedLocator([i]))
ax.set_xticklabels([cols[i]])
# Format Y axis
if explicit_ticks is None:
step = (limits[i][1] - limits[i][0]) / (n_ticks[i] - 1)
tick_labels = \
[round(limits[i][0] + step * j, tick_precision[i])
for j in range(n_ticks[i])]
norm_step = 1 / float(n_ticks[i] - 1)
ticks = [round(0 + norm_step * i, 3) for i in range(n_ticks[i])]
ax.yaxis.set_ticks(ticks)
if i in flip_idx:
tick_labels.reverse()
ax.set_yticklabels(tick_labels)
else:
lower = 0 + (df[cols[i]].min() -
limits[i][0]) / (limits[i][1] - limits[i][0])
upper = \
0.00001 + (df[cols[i]].max() - limits[i][0]) /\
(limits[i][1] - limits[i][0])
scaler = MinMaxScaler(feature_range=(lower, upper))
scaler.fit_transform(df[cols[i]].values.reshape((-1, 1)))
ticks = scaler.transform(np.array(explicit_ticks[i]).reshape((-1, 1)))
if i in flip_idx:
ticks = 0.5 + (0.5 - ticks)
tick_labels = explicit_ticks[i]
lims_temp = ax.get_ylim()
ax.yaxis.set_ticks(list(chain.from_iterable(ticks.tolist())))
ax.set_yticklabels(tick_labels.astype(str))
ax.set_ylim(lims_temp)
if rotate_x_labels:
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
ax.tick_params(axis='x', which='major', labelsize=label_fontsize)
return 0
def static_parallel(
df,
columns,
limits=None,
columns_to_flip=None,
n_ticks=6,
tick_precision=None,
explicit_ticks=None,
label_fontsize=12,
plot_legend=False,
plot_colorbar=False,
colorbar_colormap='viridis',
figure_size=(11, 3),
colorbar_adjust_args=(0.90, 0.2, 0.02, 0.70),
subplots_adjust_args={
'left': 0.05,
'bottom': 0.20,
'right': 0.85,
'top': 0.95,
'wspace': 0.0,
'hspace': 0.0},
rotate_x_labels=False):
"""
https://benalexkeen.com/parallel-coordinates-in-matplotlib/
Coming soon, this will be turned into a proper module
Parameters
----------
df
columns
limits
columns_to_flip
n_ticks
tick_precision
explicit_ticks
label_fontsize
plot_legend
plot_colorbar
colorbar_colormap
figure_size
colorbar_adjust_args
subplots_adjust_args
rotate_x_labels
Returns
-------
"""
# Set automatic Values
if limits is None:
limits = list(zip(df[columns].min().values, df[columns].max().values))
if tick_precision is None:
tick_precision = [2]*len(columns)
if isinstance(n_ticks, int):
n_ticks = [n_ticks]*len(columns)
if 'Linestyle' not in df.columns:
df['Linestyle'] = '-'
# Compute Numeric List of Columns
x = [i for i, _ in enumerate(columns)]
# Compute Indices of Columns to Flip
try:
flip_idx =\
[i for i, item in enumerate(columns) if item in columns_to_flip]
except TypeError:
flip_idx = [len(x) + 1]
# Initialize Plots
fig, axes = plt.subplots(
1, len(columns) - 1, sharey=False, figsize=figure_size
)
if len(columns) == 2:
axes = [axes]
# Create Scaled DataFrame
df_scaled = df.copy()
for i, lim in enumerate(limits):
lower = 0 + (df[columns[i]].min() - lim[0]) / (lim[1] - lim[0])
upper = 0.0000001 + (df[columns[i]].max() - lim[0]) / (lim[1] - lim[0])
scaler = MinMaxScaler(feature_range=(lower, upper))
scaled_data = scaler.fit_transform(
df[columns[i]].values.reshape((-1, 1))
)
if i in flip_idx:
df_scaled[columns[i]] = 0.5 + (0.5 - scaled_data)
else:
df_scaled[columns[i]] = scaled_data
# Plot each row
for i, ax in enumerate(axes):
for idx in df_scaled.index:
ax.plot(
x,
df_scaled.loc[idx, columns],
df_scaled.loc[idx, 'Color'],
linestyle=df.loc[idx, 'Linestyle']
)
ax.set_xlim([x[i], x[i + 1]])
ax.set_ylim([0, 1])
# Format Last Axis
axes = np.append(axes, plt.twinx(axes[-1]))
axes[-1].set_ylim(axes[-2].get_ylim())
if rotate_x_labels:
axes[-1].set_xticklabels(axes[-1].get_xticklabels(), rotation=90)
axes[-2].set_xticklabels(axes[-2].get_xticklabels(), rotation=90)
# Format All Axes
for i, ax in enumerate(axes):
format_ticks(
i,
ax,
n_ticks=n_ticks,
limits=limits,
cols=columns,
flip_idx=flip_idx,
x=x,
tick_precision=tick_precision,
explicit_ticks=explicit_ticks,
label_fontsize=label_fontsize,
df=df,
rotate_x_labels=rotate_x_labels
)
# Remove space between subplots
plt.subplots_adjust(**subplots_adjust_args)
# Add legend to plot
if plot_legend:
plt.legend(
[plt.Line2D((0, 1), (0, 0), color=i) for i in
df['Color'][df['Color Label'].drop_duplicates().index].values],
df['Color Label'][df['Color Label'].drop_duplicates().index],
bbox_to_anchor=(1.2, 1), loc=2, borderaxespad=0.0
)
if plot_colorbar:
ax = plt.axes(colorbar_adjust_args)
cmap = cm.get_cmap(colorbar_colormap)
norm = mpl.colors.Normalize(
vmin=df['Color Index'].min(), vmax=df['Color Index'].max()
)
mpl.colorbar.ColorbarBase(
ax,
cmap=cmap.reversed(),
norm=norm,
orientation='vertical',
label=df.iloc[0]['Color Label']
)
plt.show()
return fig
def main():
# Example Data
obj_labs = ['Objective 1', 'Objective 2', 'Objective 3', 'Objective 4']
df = pd.DataFrame({'Objective 1': [0, 0.5, 1],
'Objective 2': [0, 0.5, 1],
'Objective 3': [1, 0.5, 0],
'Objective 4': [100, 50, 10]}) # Example Data
df['Color Index'] = df['Objective 1'] # Specify Color
df = set_color_gradient(df, colormap='viridis', label='Objective 1')
# Example Quantitative Plotting
static_parallel(df=df, columns=obj_labs) # default plot
static_parallel(
df=df, columns=obj_labs, plot_colorbar=True
) # with colorbar
static_parallel(
df=df, columns=obj_labs, n_ticks=[10, 20, 10, 10]
) # with user-specified number of ticks
static_parallel(
df=df,
columns=obj_labs,
tick_precision=[4, 2, 1, -1]
) # with user-specified number of ticks and precision
static_parallel(
df=df, columns=obj_labs, columns_to_flip=['Objective 1']
) # Flipping columns
static_parallel(
df=df,
columns=obj_labs,
limits=[[0, 0.2], [0, 0.6], [0, 0.6], [10, 50]]
) # Setting user-specific column limits
ticks = [
np.array([0, 0.1, 0.9]),
np.array([0, 0.6, 0.9]),
np.array([0, 0.5]),
np.array([10, 50])
]
static_parallel(
df=df, columns=obj_labs, explicit_ticks=ticks
) # with explicit ticks
# Example Qualitative Plotting
df['Color Label'] = ['a', 'b', 'c']
static_parallel(df=df, columns=obj_labs, plot_legend=True)
return 0
def correlation_heatmap(df):
"""
Get correlation heatmap of objectives
Parameters
----------
df: DataFrame
DataFrame to visualize
Returns
-------
fig: matplotlib.figure.Figure
Correlation heatmap figure
"""
# Compute correlation
df_corr = df.corr()
# Get mask for lower triangle
mask = np.triu(np.ones_like(df_corr, dtype=np.bool))
np.fill_diagonal(mask, False)
# Plot
sns.set()
g = sns.heatmap(
df_corr, mask=mask, vmin=-1, vmax=1, annot=True, cmap='BrBG'
)
plt.tight_layout()
fig = g.figure
# Show Plot
plt.show()
sns.reset_orig()
return fig
if __name__ == '__main__':
main()
| [
"pandas.DataFrame",
"numpy.fill_diagonal",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"seaborn.heatmap",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.twinx",
"matplotlib.pyplot.axes",
"numpy.ones_like",
"sklearn.cluster.KMeans",
"sklearn.preprocessing.MinMaxScaler",
"matplotli... | [((6884, 6927), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {}), '(**subplots_adjust_args)\n', (6903, 6927), True, 'from matplotlib import pyplot as plt\n'), ((7706, 7716), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7714, 7716), True, 'from matplotlib import pyplot as plt\n'), ((7850, 7982), 'pandas.DataFrame', 'pd.DataFrame', (["{'Objective 1': [0, 0.5, 1], 'Objective 2': [0, 0.5, 1], 'Objective 3': [1,\n 0.5, 0], 'Objective 4': [100, 50, 10]}"], {}), "({'Objective 1': [0, 0.5, 1], 'Objective 2': [0, 0.5, 1],\n 'Objective 3': [1, 0.5, 0], 'Objective 4': [100, 50, 10]})\n", (7862, 7982), True, 'import pandas as pd\n'), ((9753, 9782), 'numpy.fill_diagonal', 'np.fill_diagonal', (['mask', '(False)'], {}), '(mask, False)\n', (9769, 9782), True, 'import numpy as np\n'), ((9799, 9808), 'seaborn.set', 'sns.set', ([], {}), '()\n', (9806, 9808), True, 'import seaborn as sns\n'), ((9817, 9890), 'seaborn.heatmap', 'sns.heatmap', (['df_corr'], {'mask': 'mask', 'vmin': '(-1)', 'vmax': '(1)', 'annot': '(True)', 'cmap': '"""BrBG"""'}), "(df_corr, mask=mask, vmin=-1, vmax=1, annot=True, cmap='BrBG')\n", (9828, 9890), True, 'import seaborn as sns\n'), ((9909, 9927), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9925, 9927), True, 'from matplotlib import pyplot as plt\n'), ((9968, 9978), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9976, 9978), True, 'from matplotlib import pyplot as plt\n'), ((9984, 10000), 'seaborn.reset_orig', 'sns.reset_orig', ([], {}), '()\n', (9998, 10000), True, 'import seaborn as sns\n'), ((2697, 2739), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(lower, upper)'}), '(feature_range=(lower, upper))\n', (2709, 2739), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((5468, 5510), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(lower, upper)'}), '(feature_range=(lower, upper))\n', (5480, 5510), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((6167, 6186), 'matplotlib.pyplot.twinx', 'plt.twinx', (['axes[-1]'], {}), '(axes[-1])\n', (6176, 6186), True, 'from matplotlib import pyplot as plt\n'), ((7309, 7339), 'matplotlib.pyplot.axes', 'plt.axes', (['colorbar_adjust_args'], {}), '(colorbar_adjust_args)\n', (7317, 7339), True, 'from matplotlib import pyplot as plt\n'), ((7355, 7385), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['colorbar_colormap'], {}), '(colorbar_colormap)\n', (7366, 7385), False, 'from matplotlib import cm\n'), ((8969, 8992), 'numpy.array', 'np.array', (['[0, 0.1, 0.9]'], {}), '([0, 0.1, 0.9])\n', (8977, 8992), True, 'import numpy as np\n'), ((9002, 9025), 'numpy.array', 'np.array', (['[0, 0.6, 0.9]'], {}), '([0, 0.6, 0.9])\n', (9010, 9025), True, 'import numpy as np\n'), ((9035, 9053), 'numpy.array', 'np.array', (['[0, 0.5]'], {}), '([0, 0.5])\n', (9043, 9053), True, 'import numpy as np\n'), ((9063, 9081), 'numpy.array', 'np.array', (['[10, 50]'], {}), '([10, 50])\n', (9071, 9081), True, 'import numpy as np\n'), ((9711, 9747), 'numpy.ones_like', 'np.ones_like', (['df_corr'], {'dtype': 'np.bool'}), '(df_corr, dtype=np.bool)\n', (9723, 9747), True, 'import numpy as np\n'), ((498, 546), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'random_state': '(1008)'}), '(n_clusters=n_clusters, random_state=1008)\n', (504, 546), False, 'from sklearn.cluster import KMeans\n'), ((1751, 1786), 'matplotlib.ticker.FixedLocator', 'ticker.FixedLocator', (['[x[-2], x[-1]]'], {}), '([x[-2], x[-1]])\n', (1770, 1786), False, 'from matplotlib import ticker\n'), ((1882, 1906), 'matplotlib.ticker.FixedLocator', 'ticker.FixedLocator', (['[i]'], {}), '([i])\n', (1901, 1906), False, 'from matplotlib import ticker\n'), ((7006, 7041), 'matplotlib.pyplot.Line2D', 'plt.Line2D', (['(0, 1)', '(0, 0)'], {'color': 'i'}), '((0, 1), (0, 0), color=i)\n', (7016, 7041), True, 'from matplotlib import pyplot as plt\n'), ((1242, 1263), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['colormap'], {}), '(colormap)\n', (1253, 1263), False, 'from matplotlib import cm\n'), ((2839, 2866), 'numpy.array', 'np.array', (['explicit_ticks[i]'], {}), '(explicit_ticks[i])\n', (2847, 2866), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import pytest
from numpy import exp, arcsin
from pyleecan.Classes.LamSlotMag import LamSlotMag
from pyleecan.Classes.SlotM10 import SlotM10
from pyleecan.Classes.Segment import Segment
from pyleecan.Classes.Slot import Slot
from pyleecan.Methods import ParentMissingError
Mag10_test = list()
# Internal Slot
lam = LamSlotMag(is_internal=True, Rext=0.1325)
lam.slot = SlotM10(Hmag=5e-3, Wmag=10e-3, H0=5e-3, W0=10e-3, Zs=12)
Mag10_test.append(
{
"test_obj": lam,
"S_exp": 5.0629e-5,
"SA_exp": 5e-5,
"Ao": 0.078449,
"H_exp": 5.0943e-3,
"HA_exp": 5.09437e-3,
"Rmec": 0.1325,
}
)
# Outward Slot
lam = LamSlotMag(is_internal=False, Rint=0.1325)
lam.slot = SlotM10(Hmag=5e-3, Wmag=10e-3, H0=5e-3, W0=10e-3, Zs=12)
Mag10_test.append(
{
"test_obj": lam,
"S_exp": 4.93708e-5,
"SA_exp": 5e-5,
"Ao": 0.072745,
"H_exp": 4.9965e-3,
"HA_exp": 5.0909e-3,
"Rmec": 0.1324056630650208,
}
)
# For AlmostEqual
DELTA = 1e-4
class Test_Magnet_Type_10_meth(object):
"""unittest for MagnetType10 methods"""
@pytest.mark.parametrize("test_dict", Mag10_test)
def test_comp_surface(self, test_dict):
"""Check that the computation of the surface is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_surface()
a = result
b = test_dict["S_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
# Check that the analytical method returns the same result as the numerical one
b = Slot.comp_surface(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
@pytest.mark.parametrize("test_dict", Mag10_test)
def test_comp_surface_active(self, test_dict):
"""Check that the computation of the active surface is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_surface_active()
a = result
b = test_dict["SA_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
# Check that the analytical method returns the same result as the numerical one
b = Slot.comp_surface_active(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
@pytest.mark.parametrize("test_dict", Mag10_test)
def test_comp_height(self, test_dict):
"""Check that the computation of the height is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_height()
a = result
b = test_dict["H_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
# Check that the analytical method returns the same result as the numerical one
b = Slot.comp_height(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
@pytest.mark.parametrize("test_dict", Mag10_test)
def test_comp_height_active(self, test_dict):
"""Check that the computation of the active height is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_height_active()
a = result
b = test_dict["HA_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
# Check that the analytical method returns the same result as the numerical one
b = Slot.comp_height_active(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
@pytest.mark.parametrize("test_dict", Mag10_test)
def test_comp_angle_opening(self, test_dict):
"""Check that the computation of the average opening angle is correct"""
test_obj = test_dict["test_obj"]
a = test_obj.slot.comp_angle_opening()
assert a == 2 * arcsin(test_obj.slot.W0 / (2 * 0.1325))
# Check that the analytical method returns the same result as the numerical one
b = Slot.comp_angle_opening(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
@pytest.mark.parametrize("test_dict", Mag10_test)
def test_comp_width_opening(self, test_dict):
"""Check that the computation of the average opening width is correct"""
test_obj = test_dict["test_obj"]
a = test_obj.slot.comp_width_opening()
assert a == test_obj.slot.W0
@pytest.mark.parametrize("test_dict", Mag10_test)
def test_comp_mec_radius(self, test_dict):
"""Check that the computation of the mechanical radius is correct"""
test_obj = test_dict["test_obj"]
a = test_obj.comp_radius_mec()
assert a == pytest.approx(test_dict["Rmec"], rel=DELTA)
| [
"pyleecan.Classes.Slot.Slot.comp_height_active",
"numpy.arcsin",
"pyleecan.Classes.Slot.Slot.comp_height",
"pyleecan.Classes.Slot.Slot.comp_angle_opening",
"pyleecan.Classes.Slot.Slot.comp_surface",
"pyleecan.Classes.SlotM10.SlotM10",
"pytest.mark.parametrize",
"pyleecan.Classes.Slot.Slot.comp_surface... | [((341, 382), 'pyleecan.Classes.LamSlotMag.LamSlotMag', 'LamSlotMag', ([], {'is_internal': '(True)', 'Rext': '(0.1325)'}), '(is_internal=True, Rext=0.1325)\n', (351, 382), False, 'from pyleecan.Classes.LamSlotMag import LamSlotMag\n'), ((394, 450), 'pyleecan.Classes.SlotM10.SlotM10', 'SlotM10', ([], {'Hmag': '(0.005)', 'Wmag': '(0.01)', 'H0': '(0.005)', 'W0': '(0.01)', 'Zs': '(12)'}), '(Hmag=0.005, Wmag=0.01, H0=0.005, W0=0.01, Zs=12)\n', (401, 450), False, 'from pyleecan.Classes.SlotM10 import SlotM10\n'), ((689, 731), 'pyleecan.Classes.LamSlotMag.LamSlotMag', 'LamSlotMag', ([], {'is_internal': '(False)', 'Rint': '(0.1325)'}), '(is_internal=False, Rint=0.1325)\n', (699, 731), False, 'from pyleecan.Classes.LamSlotMag import LamSlotMag\n'), ((743, 799), 'pyleecan.Classes.SlotM10.SlotM10', 'SlotM10', ([], {'Hmag': '(0.005)', 'Wmag': '(0.01)', 'H0': '(0.005)', 'W0': '(0.01)', 'Zs': '(12)'}), '(Hmag=0.005, Wmag=0.01, H0=0.005, W0=0.01, Zs=12)\n', (750, 799), False, 'from pyleecan.Classes.SlotM10 import SlotM10\n'), ((1152, 1200), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_dict"""', 'Mag10_test'], {}), "('test_dict', Mag10_test)\n", (1175, 1200), False, 'import pytest\n'), ((1802, 1850), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_dict"""', 'Mag10_test'], {}), "('test_dict', Mag10_test)\n", (1825, 1850), False, 'import pytest\n'), ((2481, 2529), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_dict"""', 'Mag10_test'], {}), "('test_dict', Mag10_test)\n", (2504, 2529), False, 'import pytest\n'), ((3127, 3175), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_dict"""', 'Mag10_test'], {}), "('test_dict', Mag10_test)\n", (3150, 3175), False, 'import pytest\n'), ((3802, 3850), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_dict"""', 'Mag10_test'], {}), "('test_dict', Mag10_test)\n", (3825, 3850), False, 'import pytest\n'), ((4385, 4433), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_dict"""', 'Mag10_test'], {}), "('test_dict', Mag10_test)\n", (4408, 4433), False, 'import pytest\n'), ((4696, 4744), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_dict"""', 'Mag10_test'], {}), "('test_dict', Mag10_test)\n", (4719, 4744), False, 'import pytest\n'), ((1657, 1689), 'pyleecan.Classes.Slot.Slot.comp_surface', 'Slot.comp_surface', (['test_obj.slot'], {}), '(test_obj.slot)\n', (1674, 1689), False, 'from pyleecan.Classes.Slot import Slot\n'), ((2329, 2368), 'pyleecan.Classes.Slot.Slot.comp_surface_active', 'Slot.comp_surface_active', (['test_obj.slot'], {}), '(test_obj.slot)\n', (2353, 2368), False, 'from pyleecan.Classes.Slot import Slot\n'), ((2983, 3014), 'pyleecan.Classes.Slot.Slot.comp_height', 'Slot.comp_height', (['test_obj.slot'], {}), '(test_obj.slot)\n', (2999, 3014), False, 'from pyleecan.Classes.Slot import Slot\n'), ((3651, 3689), 'pyleecan.Classes.Slot.Slot.comp_height_active', 'Slot.comp_height_active', (['test_obj.slot'], {}), '(test_obj.slot)\n', (3674, 3689), False, 'from pyleecan.Classes.Slot import Slot\n'), ((4234, 4272), 'pyleecan.Classes.Slot.Slot.comp_angle_opening', 'Slot.comp_angle_opening', (['test_obj.slot'], {}), '(test_obj.slot)\n', (4257, 4272), False, 'from pyleecan.Classes.Slot import Slot\n'), ((4969, 5012), 'pytest.approx', 'pytest.approx', (["test_dict['Rmec']"], {'rel': 'DELTA'}), "(test_dict['Rmec'], rel=DELTA)\n", (4982, 5012), False, 'import pytest\n'), ((4094, 4133), 'numpy.arcsin', 'arcsin', (['(test_obj.slot.W0 / (2 * 0.1325))'], {}), '(test_obj.slot.W0 / (2 * 0.1325))\n', (4100, 4133), False, 'from numpy import exp, arcsin\n')] |
import tensorflow as tf
import glob
import json
import os
import numpy as np
import time
UNKNOWN_TOKEN = '[UNK]'
# May-baseline-252716 ,Average pos-co score of 8000 examples is: 0.8423
# T = -7.2894954506
# May-baseline-pos-238712 ,Average pos-co score of 8000 examples is: 0.8812
cluster_list = \
[
['-LRB-', '-RRB-'],
[','],
[':'],
['.'],
['``', '\'\''],
['#'],
['$'],
['CC'],
['CD'],
['PDT', 'WDT', 'DT'],
['EX'],
['FW'],
['IN'],
['JJ', 'JJR', 'JJS'],
['LS'],
['NN', 'NNS', 'NNP', 'NNPS'],
['POS'],
['PRP','PRP$'],
['RB', 'RBR', 'RBS', 'RP'],
['SYM'],
['TO'],
['UH'],
['MD'],
['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'],
['WP', 'WP$', 'WRB']
]
use_cluster = True
skip_count = 0
skip_index = []
# May-pointer-pos-152199
model_name = 'May-pointer-pos-152199'
attn_root = \
'/media/jayson/study/graduation project/paper/experimens/moldes-May/'+ \
model_name + '/attn_vis'
decoded_pos_ner_root = \
'/media/jayson/study/graduation project/paper/experimens/moldes-May/'+ \
model_name + '/decoded_pos_ner'
def get_content(filename):
lines = []
with open(filename, 'r') as f:
for line in f:
lines.append(line.strip())
return lines
def in_witch_cluster(tag):
for idx, clstr in enumerate(cluster_list):
if tag in clstr: return idx
def they_are_similar(decoded_tag, src_tag):
# print('%s, %s' % (decoded_tag, src_tag) )
cluster_idx = in_witch_cluster(decoded_tag)
if src_tag in cluster_list[cluster_idx]: return True
return False
def get_score(idx_lst, src_tags, tag, src_tokens=None):
count = 0
for i in idx_lst:
# if use_cluster:
if they_are_similar(tag, src_tags[i]): count += 1
# else
# if src_tags[i] == tag: count += 1
# if src_tokens != None: print('%s(%s), ' % (src_tokens[i], src_tags[i]))
return count
def cal_one_example(sess, index, attn_json, tokens_pos_ner):
'''
Args:
attn_json: a json object, containing article_lst, article_pos_lst, article_ner_lst,
decoded_lst, abstract_str, attn_dists, p_gens(if pointer_gen is ON).
decoded_pos:
string, pos_tag list split by space.
'''
# print(attn_json['decoded_lst'])
# print(decoded_pos)
global skip_count
global skip_index
decoded_pos_lst = tokens_pos_ner[1].split(' ')
# decoded token length should match the pos/ner tag list
# assert len(attn_json['decoded_lst']) == len(decoded_pos_lst), '%d example: decoded token length should match the pos/ner tag list! ' % index
if len(attn_json['decoded_lst']) != len(decoded_pos_lst):
skip_count += 1
skip_index.append(index)
print('Example %d, length of decoded tokens and the pos/ner tag list do NOT match, skip it.' % index)
return 0
input_arr = tf.constant(attn_json['attn_dists'], tf.float32)
_, top_k_indices = tf.nn.top_k(input_arr, 2)
k_indices = sess.run(top_k_indices)
decoded_token_lst = tokens_pos_ner[1].split(' ')
# print(decoded_token_lst)
decoded_len = len(decoded_pos_lst)
t_score = 0
for idx, tag in enumerate(decoded_pos_lst):
# print('decoded:%s(%s) ' % (attn_json['decoded_lst'][idx], tag))
# if current token is '[UNK]', then skip it.
if attn_json['decoded_lst'][idx] == UNKNOWN_TOKEN: continue
score = get_score(k_indices[idx], attn_json['article_pos_lst'], tag, attn_json['article_lst'])
# print(score)
t_score += score
t_score /= float(decoded_len)
# print(t_score)
return t_score
def get_config():
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth=True
config.gpu_options.per_process_gpu_memory_fraction = 0.4
return config
def start(EXAMPLE_NUM):
global skip_count
global skip_index
score = float(0)
scores = []
pieces = [[6000,8000]]
# pieces = [[3000, 3500]]
aa = range(EXAMPLE_NUM)
for j in pieces:
with tf.Session(config=get_config()) as sess:
for index in aa[j[0]: j[1]]:
# print('index : %d' % index )
if index%500 == 0: print('Example %d starts...' % index)
attn_path = os.path.join(attn_root, '%06d_attn_vis_data.json' % index)
decoded_pos_ner_path = os.path.join(decoded_pos_ner_root, '%06d_decoded.txt' % index)
attn_str = get_content(attn_path)[0]
tokens_pos_ner = get_content(decoded_pos_ner_path)
scores.append(cal_one_example(sess, index, json.loads(attn_str), tokens_pos_ner))
score += scores[-1]
print('sleep 60 seconds......')
time.sleep(60)
# fill extra data
scores.append(float(skip_count))
for index in skip_index:
scores.append(index)
# save the score list for further use.
np.save('6c-'+model_name+'decoded-pos-co-score.npy',np.array(scores))
# print something.
print('Total : %d, Skip Count : %d .' % (EXAMPLE_NUM, skip_count))
print('%s ,Average pos-co score of %d examples is: %.4f' % (model_name, EXAMPLE_NUM-skip_count, score/float(EXAMPLE_NUM-skip_count)))
# scores: first EXAMPLE_NUM elements are scores to each decoded_headline,
# EXAMPLE_NUM+1: the count of skipped examples,
# follows are skipped examples' index
def main():
EXAMPLE_NUM = 8000
print('%s, starts............' % model_name)
start(EXAMPLE_NUM)
if __name__ == '__main__':
tf.app.run() | [
"os.path.join",
"json.loads",
"tensorflow.nn.top_k",
"tensorflow.constant",
"time.sleep",
"tensorflow.ConfigProto",
"numpy.array",
"tensorflow.app.run"
] | [((2646, 2694), 'tensorflow.constant', 'tf.constant', (["attn_json['attn_dists']", 'tf.float32'], {}), "(attn_json['attn_dists'], tf.float32)\n", (2657, 2694), True, 'import tensorflow as tf\n'), ((2715, 2740), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['input_arr', '(2)'], {}), '(input_arr, 2)\n', (2726, 2740), True, 'import tensorflow as tf\n'), ((3355, 3396), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (3369, 3396), True, 'import tensorflow as tf\n'), ((5033, 5045), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (5043, 5045), True, 'import tensorflow as tf\n'), ((4282, 4296), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (4292, 4296), False, 'import time\n'), ((4492, 4508), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (4500, 4508), True, 'import numpy as np\n'), ((3891, 3949), 'os.path.join', 'os.path.join', (['attn_root', "('%06d_attn_vis_data.json' % index)"], {}), "(attn_root, '%06d_attn_vis_data.json' % index)\n", (3903, 3949), False, 'import os\n'), ((3977, 4039), 'os.path.join', 'os.path.join', (['decoded_pos_ner_root', "('%06d_decoded.txt' % index)"], {}), "(decoded_pos_ner_root, '%06d_decoded.txt' % index)\n", (3989, 4039), False, 'import os\n'), ((4183, 4203), 'json.loads', 'json.loads', (['attn_str'], {}), '(attn_str)\n', (4193, 4203), False, 'import json\n')] |
# -*- coding: utf-8 -*-
"""
Copyright 2019 <NAME>, Aprar s.r.o.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Adopted code from https://github.com/rainofmine/Face_Attention_Network
"""
import argparse
import collections
import os
import numpy as np
import torch
import torch.optim as optim
from torchvision import transforms
import torch.utils.model_zoo as model_zoo
from identification.model_level_attention import resnet18, resnet34, resnet50, resnet101, resnet152
from torch.utils.data import DataLoader
from identification.csv_eval import evaluate
from identification.dataloader import WIDERDataset, AspectRatioBasedSampler, collater, Resizer, Augmenter, Normalizer, \
CSVDataset
is_cuda = torch.cuda.is_available()
print('CUDA available: {}'.format(is_cuda))
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
ckpt = False
def main(args=None):
parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
parser.add_argument('--wider_train', help='Path to file containing WIDER training annotations (see readme)')
parser.add_argument('--wider_val',
help='Path to file containing WIDER validation annotations (optional, see readme)')
parser.add_argument('--wider_train_prefix', help='Prefix path to WIDER train images')
parser.add_argument('--wider_val_prefix', help='Prefix path to WIDER validation images')
parser.add_argument('--csv_train', help='Path to file containing training annotations (see readme)')
parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)')
parser.add_argument('--csv_val', help='Path to file containing validation annotations (optional, see readme)')
parser.add_argument('--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50)
parser.add_argument('--epochs', help='Number of epochs', type=int, default=50)
parser.add_argument('--batch_size', help='Batch size (default 2)', type=int, default=2)
parser.add_argument('--model_name', help='Name of the model to save')
parser.add_argument('--parallel', help='Run training with DataParallel', dest='parallel',
default=False, action='store_true')
parser.add_argument('--pretrained', help='Pretrained model name in weight directory')
parser = parser.parse_args(args)
# Create the data loaders
if parser.wider_train is None:
dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes,
transform=transforms.Compose([Resizer(), Augmenter(), Normalizer()]))
else:
dataset_train = WIDERDataset(train_file=parser.wider_train, img_prefix=parser.wider_train_prefix,
transform=transforms.Compose([Resizer(), Augmenter(), Normalizer()]))
if parser.wider_val is None:
if parser.csv_val is None:
dataset_val = None
print('No validation annotations provided.')
else:
print('Loading CSV validation dataset')
dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes,
transform=transforms.Compose([Resizer(), Normalizer()]))
else:
print('Loading WIDER validation dataset')
dataset_val = WIDERDataset(train_file=parser.wider_val, img_prefix=parser.wider_val_prefix,
transform=transforms.Compose([Resizer(), Normalizer()]))
print('Loading training dataset')
sampler = AspectRatioBasedSampler(dataset_train, batch_size=parser.batch_size, drop_last=False)
if parser.parallel:
dataloader_train = DataLoader(dataset_train, num_workers=16, collate_fn=collater, batch_sampler=sampler)
else:
dataloader_train = DataLoader(dataset_train, collate_fn=collater, batch_sampler=sampler)
# Create the model_pose_level_attention
if parser.depth == 18:
retinanet = resnet18(num_classes=dataset_train.num_classes(), is_cuda=is_cuda)
elif parser.depth == 34:
retinanet = resnet34(num_classes=dataset_train.num_classes(), is_cuda=is_cuda)
elif parser.depth == 50:
retinanet = resnet50(num_classes=dataset_train.num_classes(), is_cuda=is_cuda)
elif parser.depth == 101:
retinanet = resnet101(num_classes=dataset_train.num_classes(), is_cuda=is_cuda)
elif parser.depth == 152:
retinanet = resnet152(num_classes=dataset_train.num_classes(), is_cuda=is_cuda)
else:
raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152')
if ckpt:
retinanet = torch.load('')
print('Loading checkpoint')
else:
print('Loading pretrained model')
retinanet_dict = retinanet.state_dict()
if parser.pretrained is None:
pretrained_dict = model_zoo.load_url(model_urls['resnet' + str(parser.depth)])
else:
pretrained_dict = torch.load(parser.pretrained)
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in retinanet_dict}
retinanet_dict.update(pretrained_dict)
retinanet.load_state_dict(retinanet_dict)
print('load pretrained backbone')
print(retinanet)
if parser.parallel:
retinanet = torch.nn.DataParallel(retinanet, device_ids=[0])
if is_cuda:
retinanet.cuda()
retinanet.training = True
optimizer = optim.Adam(retinanet.parameters(), lr=1e-5)
# optimizer = optim.SGD(retinanet.parameters(), lr=1e-3, momentum=0.9, weight_decay=1e-4)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True)
# scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1)
loss_hist = collections.deque(maxlen=500)
retinanet.train()
if parser.parallel:
retinanet.module.freeze_bn()
else:
retinanet.freeze_bn()
print('Num training images: {}'.format(len(dataset_train)))
iters = 0
for epoch_num in range(0, parser.epochs):
retinanet.train()
if parser.parallel:
retinanet.module.freeze_bn()
else:
retinanet.freeze_bn()
epoch_loss = []
for iter_num, data in enumerate(dataloader_train):
iters += 1
optimizer.zero_grad()
img_data = data['img'].float()
annot_data = data['annot']
if is_cuda:
img_data = img_data.cuda()
annot_data = annot_data.cuda()
print("GPU memory allocated: %d max memory allocated: %d memory cached: %d max memory cached: %d" % (
torch.cuda.memory_allocated() / 1024 ** 2, torch.cuda.max_memory_allocated() / 1024 ** 2,
torch.cuda.memory_cached() / 1024 ** 2, torch.cuda.max_memory_cached() / 1024 ** 2))
classification_loss, regression_loss, mask_loss = retinanet([img_data, annot_data])
del img_data
del annot_data
classification_loss = classification_loss.mean()
regression_loss = regression_loss.mean()
mask_loss = mask_loss.mean()
loss = classification_loss + regression_loss + mask_loss
if bool(loss == 0):
continue
loss.backward()
torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1)
optimizer.step()
loss_hist.append(float(loss.item()))
epoch_loss.append(float(loss.item()))
print(
'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | '
'mask_loss {:1.5f} | Running loss: {:1.5f}'.format(
epoch_num, iter_num, float(classification_loss), float(regression_loss), float(mask_loss),
np.mean(loss_hist)))
del classification_loss
del regression_loss
del loss
if parser.wider_val is not None:
print('Evaluating dataset')
evaluate(dataset_val, retinanet, is_cuda=is_cuda)
scheduler.step(np.mean(epoch_loss))
# TODO remove makedir
os.makedirs('./ckpt', exist_ok=True)
if parser.parallel:
torch.save(retinanet.module, './ckpt/' + parser.model_name + '_{}.pt'.format(epoch_num))
else:
torch.save(retinanet, './ckpt/' + parser.model_name + '_{}.pt'.format(epoch_num))
if __name__ == '__main__':
main()
| [
"identification.dataloader.Resizer",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"os.makedirs",
"torch.cuda.max_memory_allocated",
"torch.load",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"identification.dataloader.Augmenter",
"identification.dataloader.AspectRatioBasedSampler",
"... | [((1216, 1241), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1239, 1241), False, 'import torch\n'), ((1743, 1843), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Simple training script for training a RetinaNet network."""'}), "(description=\n 'Simple training script for training a RetinaNet network.')\n", (1766, 1843), False, 'import argparse\n'), ((4455, 4544), 'identification.dataloader.AspectRatioBasedSampler', 'AspectRatioBasedSampler', (['dataset_train'], {'batch_size': 'parser.batch_size', 'drop_last': '(False)'}), '(dataset_train, batch_size=parser.batch_size,\n drop_last=False)\n', (4478, 4544), False, 'from identification.dataloader import WIDERDataset, AspectRatioBasedSampler, collater, Resizer, Augmenter, Normalizer, CSVDataset\n'), ((6489, 6562), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'patience': '(3)', 'verbose': '(True)'}), '(optimizer, patience=3, verbose=True)\n', (6525, 6562), True, 'import torch.optim as optim\n'), ((6660, 6689), 'collections.deque', 'collections.deque', ([], {'maxlen': '(500)'}), '(maxlen=500)\n', (6677, 6689), False, 'import collections\n'), ((4592, 4681), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_train'], {'num_workers': '(16)', 'collate_fn': 'collater', 'batch_sampler': 'sampler'}), '(dataset_train, num_workers=16, collate_fn=collater,\n batch_sampler=sampler)\n', (4602, 4681), False, 'from torch.utils.data import DataLoader\n'), ((4715, 4784), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_train'], {'collate_fn': 'collater', 'batch_sampler': 'sampler'}), '(dataset_train, collate_fn=collater, batch_sampler=sampler)\n', (4725, 4784), False, 'from torch.utils.data import DataLoader\n'), ((5545, 5559), 'torch.load', 'torch.load', (['""""""'], {}), "('')\n", (5555, 5559), False, 'import torch\n'), ((6196, 6244), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['retinanet'], {'device_ids': '[0]'}), '(retinanet, device_ids=[0])\n', (6217, 6244), False, 'import torch\n'), ((9067, 9103), 'os.makedirs', 'os.makedirs', (['"""./ckpt"""'], {'exist_ok': '(True)'}), "('./ckpt', exist_ok=True)\n", (9078, 9103), False, 'import os\n'), ((5869, 5898), 'torch.load', 'torch.load', (['parser.pretrained'], {}), '(parser.pretrained)\n', (5879, 5898), False, 'import torch\n'), ((8933, 8982), 'identification.csv_eval.evaluate', 'evaluate', (['dataset_val', 'retinanet'], {'is_cuda': 'is_cuda'}), '(dataset_val, retinanet, is_cuda=is_cuda)\n', (8941, 8982), False, 'from identification.csv_eval import evaluate\n'), ((9007, 9026), 'numpy.mean', 'np.mean', (['epoch_loss'], {}), '(epoch_loss)\n', (9014, 9026), True, 'import numpy as np\n'), ((8728, 8746), 'numpy.mean', 'np.mean', (['loss_hist'], {}), '(loss_hist)\n', (8735, 8746), True, 'import numpy as np\n'), ((3475, 3484), 'identification.dataloader.Resizer', 'Resizer', ([], {}), '()\n', (3482, 3484), False, 'from identification.dataloader import WIDERDataset, AspectRatioBasedSampler, collater, Resizer, Augmenter, Normalizer, CSVDataset\n'), ((3486, 3497), 'identification.dataloader.Augmenter', 'Augmenter', ([], {}), '()\n', (3495, 3497), False, 'from identification.dataloader import WIDERDataset, AspectRatioBasedSampler, collater, Resizer, Augmenter, Normalizer, CSVDataset\n'), ((3499, 3511), 'identification.dataloader.Normalizer', 'Normalizer', ([], {}), '()\n', (3509, 3511), False, 'from identification.dataloader import WIDERDataset, AspectRatioBasedSampler, collater, Resizer, Augmenter, Normalizer, CSVDataset\n'), ((3698, 3707), 'identification.dataloader.Resizer', 'Resizer', ([], {}), '()\n', (3705, 3707), False, 'from identification.dataloader import WIDERDataset, AspectRatioBasedSampler, collater, Resizer, Augmenter, Normalizer, CSVDataset\n'), ((3709, 3720), 'identification.dataloader.Augmenter', 'Augmenter', ([], {}), '()\n', (3718, 3720), False, 'from identification.dataloader import WIDERDataset, AspectRatioBasedSampler, collater, Resizer, Augmenter, Normalizer, CSVDataset\n'), ((3722, 3734), 'identification.dataloader.Normalizer', 'Normalizer', ([], {}), '()\n', (3732, 3734), False, 'from identification.dataloader import WIDERDataset, AspectRatioBasedSampler, collater, Resizer, Augmenter, Normalizer, CSVDataset\n'), ((4375, 4384), 'identification.dataloader.Resizer', 'Resizer', ([], {}), '()\n', (4382, 4384), False, 'from identification.dataloader import WIDERDataset, AspectRatioBasedSampler, collater, Resizer, Augmenter, Normalizer, CSVDataset\n'), ((4386, 4398), 'identification.dataloader.Normalizer', 'Normalizer', ([], {}), '()\n', (4396, 4398), False, 'from identification.dataloader import WIDERDataset, AspectRatioBasedSampler, collater, Resizer, Augmenter, Normalizer, CSVDataset\n'), ((4123, 4132), 'identification.dataloader.Resizer', 'Resizer', ([], {}), '()\n', (4130, 4132), False, 'from identification.dataloader import WIDERDataset, AspectRatioBasedSampler, collater, Resizer, Augmenter, Normalizer, CSVDataset\n'), ((4134, 4146), 'identification.dataloader.Normalizer', 'Normalizer', ([], {}), '()\n', (4144, 4146), False, 'from identification.dataloader import WIDERDataset, AspectRatioBasedSampler, collater, Resizer, Augmenter, Normalizer, CSVDataset\n'), ((7551, 7580), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', ([], {}), '()\n', (7578, 7580), False, 'import torch\n'), ((7594, 7627), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', ([], {}), '()\n', (7625, 7627), False, 'import torch\n'), ((7653, 7679), 'torch.cuda.memory_cached', 'torch.cuda.memory_cached', ([], {}), '()\n', (7677, 7679), False, 'import torch\n'), ((7693, 7723), 'torch.cuda.max_memory_cached', 'torch.cuda.max_memory_cached', ([], {}), '()\n', (7721, 7723), False, 'import torch\n')] |
#!/usr/bin/env python
import sys
import json
from types import SimpleNamespace
def main():
if len(sys.argv) < 3:
print("Usage : " + sys.argv[0] + " [keras_model_filename] [config_json_filename]")
sys.exit()
else:
keras_model_filename = sys.argv[1]
config_json_filename = sys.argv[2]
deeplucia_eval(keras_model_filename,config_json_filename)
def deeplucia_eval(keras_model_filename,config_json_filename):
import os
import functools
import itertools
from pathlib import Path
from functional import seq
from functional import pseq # seq with parallelism
from deeplucia_toolkit import prep_matrix
from deeplucia_toolkit import prep_label
from deeplucia_toolkit import make_dataset
from deeplucia_toolkit import make_model
from deeplucia_toolkit import misc
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Model
import numpy
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import matthews_corrcoef
# open config file
with open(config_json_filename) as config_json_file:
config_dict = json.load(config_json_file)
config = SimpleNamespace(**config_dict)
# obvious parameter setting
local_path_base = Path (Path.cwd() / "Features")
chrominfo_filename = local_path_base / "ChromInfo_hg19.txt"
chrom_set = set(["chr1","chr2","chr3","chr4","chr5","chr6","chr7","chr8","chr9","chr10","chr11","chr12","chr13","chr14","chr15","chr16","chr17","chr18","chr19","chr20","chr21","chr22","chrX"])
test_chrom_set = set(config.test_chrom_list)
n2p_ratio = config.n2p_ratio
num_pos = max(1,int(config.num_pos/config.n2p_ratio))
# set filepath
loop_label_txtgz_filename = Path(local_path_base) / "Label" / "isHC.loop_list.txt.gz"
anchor_label_txtgz_filename = Path(local_path_base) / "Label" / "isHC.anchor_list.txt.gz"
seq_numpy_filename = Path(local_path_base) / "GenomicFeature" / "isHC" / "isHC.seq_onehot.npy"
sample_list = config.sample_id_list
mark_list = ["DNase","H2AFZ","H3K27ac","H3K27me3","H3K36me3","H3K4me1","H3K4me2","H3K4me3","H3K79me2","H3K9ac","H3K9me3","H4K20me1"]
sample_mark_to_epi_numpy_filename = {}
for epi_numpy_filename in Path(local_path_base).glob("EpigenomicFeature/*/*.npy"):
sample_mark = tuple(epi_numpy_filename.name.split(".")[:2])
sample_mark_to_epi_numpy_filename[sample_mark] = epi_numpy_filename
sample_to_sample_index = misc.get_sample_index(sample_list)
# load feature array
seq_array = prep_matrix.load_seq_array(seq_numpy_filename)
multisample_multimark_epi_array = prep_matrix.load_multisample_multimark_epi_array(sample_list,mark_list,sample_mark_to_epi_numpy_filename,cap_crit=0.95)
print("load feature array => DONE")
# load loop label
chrom_to_size = misc.load_chrominfo(chrominfo_filename)
pos_sample_locus_index_pair_set = prep_label.load_loop_label(sample_list,loop_label_txtgz_filename)
anchor_locus_index_set = prep_label.get_anchor_locus(sample_list,pos_sample_locus_index_pair_set)
chrom_to_locus_index_range= prep_label.get_chrom_range(anchor_label_txtgz_filename,chrom_to_size)
index_max = max(itertools.chain(*list(chrom_to_locus_index_range.values())))
test_locus_index_range_set = {chrom_to_locus_index_range[chrom] for chrom in test_chrom_set}
print("load loop label => DONE")
# curryize basic functions
is_intra_chrom = functools.partial(misc.is_intra_chrom,chrom_to_locus_index_range=chrom_to_locus_index_range)
is_anchor_bearing = functools.partial(misc.is_anchor_bearing,anchor_locus_index_set=anchor_locus_index_set)
permute_same_distance = functools.partial(prep_label.permute_same_distance,index_max = index_max)
gen_neg_sample_locus_index_pair = functools.partial(prep_label.gen_neg_sample_locus_index_pair,sample_list=sample_list,index_max=index_max)
is_in_test_range_set = functools.partial(misc.is_in_desired_range_set,index_range_set=test_locus_index_range_set)
def permute_pos_sample_locus_index_pair(pos_sample_locus_index_pair,n2p_ratio,is_in_range_set):
neg_sample_locus_index_pair_list = (
seq(gen_neg_sample_locus_index_pair(pos_sample_locus_index_pair))
.filter(is_in_range_set)
.filter(is_intra_chrom)
.filter_not(is_anchor_bearing)
.take(n2p_ratio)
.to_list())
return neg_sample_locus_index_pair_list
permute_pos_sample_locus_index_pair_test = functools.partial(permute_pos_sample_locus_index_pair,n2p_ratio = n2p_ratio,is_in_range_set = is_in_test_range_set)
print("curryize basic functions => DONE")
# split train/validate label
test_pos_sample_locus_index_pair_list = (pseq(pos_sample_locus_index_pair_set).filter(is_in_test_range_set).to_list())
test_neg_sample_locus_index_pair_list = (pseq(test_pos_sample_locus_index_pair_list).flat_map(permute_pos_sample_locus_index_pair_test).to_list())
print("split test/train/validate label => DONE")
# merge pos/neg label
test_sample_locus_index_pair_list = test_pos_sample_locus_index_pair_list + test_neg_sample_locus_index_pair_list
print("merge pos/neg label => DONE")
# prepare model
model = keras.models.load_model(keras_model_filename)
print("load model => DONE")
prob_pred_list = []
label_pred_list = []
label_true_list = []
chunk_size = 10000
for _,chunk in itertools.groupby(enumerate(test_sample_locus_index_pair_list), lambda x: x[0]// chunk_size ):
test_sample_locus_index_pair_sublist = [ indexed_sample_locus_index_pair[1] for indexed_sample_locus_index_pair in chunk ]
feature_test,label_true = make_dataset.extract_seq_epi_dataset_unshuffled(test_sample_locus_index_pair_sublist,pos_sample_locus_index_pair_set,seq_array,multisample_multimark_epi_array,sample_to_sample_index)
output = model.predict(feature_test)
prob_pred = numpy.squeeze(output,axis=1)
label_pred = list(map(lambda prob: int(round(prob)), prob_pred))
prob_pred_list.append(prob_pred)
label_pred_list.append(label_pred)
label_true_list.append(label_true)
prob_pred = list(itertools.chain(*prob_pred_list))
label_pred = list(itertools.chain(*label_pred_list))
label_true = list(itertools.chain(*label_true_list))
f1 = f1_score (label_true ,label_pred)
mcc = matthews_corrcoef(label_true ,label_pred)
au_ro_curve = roc_auc_score(label_true ,prob_pred)
au_pr_curve = average_precision_score(label_true ,prob_pred)
model_evaluation_filename = "eval_model/" + keras_model_filename.split("/")[-1] + "." + config.log_id + ".xls"
with open(model_evaluation_filename,"wt") as model_evaluation_file:
model_evaluation_file.write(keras_model_filename.split("/")[-1][:-20] + "\t" + config.log_id + "\tAUROC\t" + str(au_ro_curve)+"\n")
model_evaluation_file.write(keras_model_filename.split("/")[-1][:-20] + "\t" + config.log_id + "\tAUPRC\t" + str(au_pr_curve)+"\n")
model_evaluation_file.write(keras_model_filename.split("/")[-1][:-20] + "\t" + config.log_id + "\tMCC\t" + str(mcc)+"\n")
model_evaluation_file.write(keras_model_filename.split("/")[-1][:-20] + "\t" + config.log_id + "\tF1\t" + str(f1)+"\n")
if __name__ == "__main__":
main()
| [
"sklearn.metrics.f1_score",
"deeplucia_toolkit.make_dataset.extract_seq_epi_dataset_unshuffled",
"pathlib.Path",
"functional.pseq",
"deeplucia_toolkit.prep_label.get_chrom_range",
"sklearn.metrics.average_precision_score",
"deeplucia_toolkit.prep_label.get_anchor_locus",
"itertools.chain",
"types.Si... | [((2637, 2671), 'deeplucia_toolkit.misc.get_sample_index', 'misc.get_sample_index', (['sample_list'], {}), '(sample_list)\n', (2658, 2671), False, 'from deeplucia_toolkit import misc\n'), ((2708, 2754), 'deeplucia_toolkit.prep_matrix.load_seq_array', 'prep_matrix.load_seq_array', (['seq_numpy_filename'], {}), '(seq_numpy_filename)\n', (2734, 2754), False, 'from deeplucia_toolkit import prep_matrix\n'), ((2790, 2916), 'deeplucia_toolkit.prep_matrix.load_multisample_multimark_epi_array', 'prep_matrix.load_multisample_multimark_epi_array', (['sample_list', 'mark_list', 'sample_mark_to_epi_numpy_filename'], {'cap_crit': '(0.95)'}), '(sample_list, mark_list,\n sample_mark_to_epi_numpy_filename, cap_crit=0.95)\n', (2838, 2916), False, 'from deeplucia_toolkit import prep_matrix\n'), ((2984, 3023), 'deeplucia_toolkit.misc.load_chrominfo', 'misc.load_chrominfo', (['chrominfo_filename'], {}), '(chrominfo_filename)\n', (3003, 3023), False, 'from deeplucia_toolkit import misc\n'), ((3060, 3126), 'deeplucia_toolkit.prep_label.load_loop_label', 'prep_label.load_loop_label', (['sample_list', 'loop_label_txtgz_filename'], {}), '(sample_list, loop_label_txtgz_filename)\n', (3086, 3126), False, 'from deeplucia_toolkit import prep_label\n'), ((3152, 3225), 'deeplucia_toolkit.prep_label.get_anchor_locus', 'prep_label.get_anchor_locus', (['sample_list', 'pos_sample_locus_index_pair_set'], {}), '(sample_list, pos_sample_locus_index_pair_set)\n', (3179, 3225), False, 'from deeplucia_toolkit import prep_label\n'), ((3254, 3324), 'deeplucia_toolkit.prep_label.get_chrom_range', 'prep_label.get_chrom_range', (['anchor_label_txtgz_filename', 'chrom_to_size'], {}), '(anchor_label_txtgz_filename, chrom_to_size)\n', (3280, 3324), False, 'from deeplucia_toolkit import prep_label\n'), ((3579, 3677), 'functools.partial', 'functools.partial', (['misc.is_intra_chrom'], {'chrom_to_locus_index_range': 'chrom_to_locus_index_range'}), '(misc.is_intra_chrom, chrom_to_locus_index_range=\n chrom_to_locus_index_range)\n', (3596, 3677), False, 'import functools\n'), ((3693, 3786), 'functools.partial', 'functools.partial', (['misc.is_anchor_bearing'], {'anchor_locus_index_set': 'anchor_locus_index_set'}), '(misc.is_anchor_bearing, anchor_locus_index_set=\n anchor_locus_index_set)\n', (3710, 3786), False, 'import functools\n'), ((3806, 3878), 'functools.partial', 'functools.partial', (['prep_label.permute_same_distance'], {'index_max': 'index_max'}), '(prep_label.permute_same_distance, index_max=index_max)\n', (3823, 3878), False, 'import functools\n'), ((3915, 4027), 'functools.partial', 'functools.partial', (['prep_label.gen_neg_sample_locus_index_pair'], {'sample_list': 'sample_list', 'index_max': 'index_max'}), '(prep_label.gen_neg_sample_locus_index_pair, sample_list=\n sample_list, index_max=index_max)\n', (3932, 4027), False, 'import functools\n'), ((4046, 4142), 'functools.partial', 'functools.partial', (['misc.is_in_desired_range_set'], {'index_range_set': 'test_locus_index_range_set'}), '(misc.is_in_desired_range_set, index_range_set=\n test_locus_index_range_set)\n', (4063, 4142), False, 'import functools\n'), ((4555, 4672), 'functools.partial', 'functools.partial', (['permute_pos_sample_locus_index_pair'], {'n2p_ratio': 'n2p_ratio', 'is_in_range_set': 'is_in_test_range_set'}), '(permute_pos_sample_locus_index_pair, n2p_ratio=n2p_ratio,\n is_in_range_set=is_in_test_range_set)\n', (4572, 4672), False, 'import functools\n'), ((5270, 5315), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['keras_model_filename'], {}), '(keras_model_filename)\n', (5293, 5315), False, 'from tensorflow import keras\n'), ((6308, 6340), 'sklearn.metrics.f1_score', 'f1_score', (['label_true', 'label_pred'], {}), '(label_true, label_pred)\n', (6316, 6340), False, 'from sklearn.metrics import f1_score\n'), ((6349, 6390), 'sklearn.metrics.matthews_corrcoef', 'matthews_corrcoef', (['label_true', 'label_pred'], {}), '(label_true, label_pred)\n', (6366, 6390), False, 'from sklearn.metrics import matthews_corrcoef\n'), ((6406, 6442), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['label_true', 'prob_pred'], {}), '(label_true, prob_pred)\n', (6419, 6442), False, 'from sklearn.metrics import roc_auc_score\n'), ((6458, 6504), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['label_true', 'prob_pred'], {}), '(label_true, prob_pred)\n', (6481, 6504), False, 'from sklearn.metrics import average_precision_score\n'), ((205, 215), 'sys.exit', 'sys.exit', ([], {}), '()\n', (213, 215), False, 'import sys\n'), ((1349, 1376), 'json.load', 'json.load', (['config_json_file'], {}), '(config_json_file)\n', (1358, 1376), False, 'import json\n'), ((1388, 1418), 'types.SimpleNamespace', 'SimpleNamespace', ([], {}), '(**config_dict)\n', (1403, 1418), False, 'from types import SimpleNamespace\n'), ((5697, 5892), 'deeplucia_toolkit.make_dataset.extract_seq_epi_dataset_unshuffled', 'make_dataset.extract_seq_epi_dataset_unshuffled', (['test_sample_locus_index_pair_sublist', 'pos_sample_locus_index_pair_set', 'seq_array', 'multisample_multimark_epi_array', 'sample_to_sample_index'], {}), '(\n test_sample_locus_index_pair_sublist, pos_sample_locus_index_pair_set,\n seq_array, multisample_multimark_epi_array, sample_to_sample_index)\n', (5744, 5892), False, 'from deeplucia_toolkit import make_dataset\n'), ((5933, 5962), 'numpy.squeeze', 'numpy.squeeze', (['output'], {'axis': '(1)'}), '(output, axis=1)\n', (5946, 5962), False, 'import numpy\n'), ((6158, 6190), 'itertools.chain', 'itertools.chain', (['*prob_pred_list'], {}), '(*prob_pred_list)\n', (6173, 6190), False, 'import itertools\n'), ((6211, 6244), 'itertools.chain', 'itertools.chain', (['*label_pred_list'], {}), '(*label_pred_list)\n', (6226, 6244), False, 'import itertools\n'), ((6265, 6298), 'itertools.chain', 'itertools.chain', (['*label_true_list'], {}), '(*label_true_list)\n', (6280, 6298), False, 'import itertools\n'), ((1475, 1485), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (1483, 1485), False, 'from pathlib import Path\n'), ((1936, 1957), 'pathlib.Path', 'Path', (['local_path_base'], {}), '(local_path_base)\n', (1940, 1957), False, 'from pathlib import Path\n'), ((2025, 2046), 'pathlib.Path', 'Path', (['local_path_base'], {}), '(local_path_base)\n', (2029, 2046), False, 'from pathlib import Path\n'), ((2421, 2442), 'pathlib.Path', 'Path', (['local_path_base'], {}), '(local_path_base)\n', (2425, 2442), False, 'from pathlib import Path\n'), ((2108, 2129), 'pathlib.Path', 'Path', (['local_path_base'], {}), '(local_path_base)\n', (2112, 2129), False, 'from pathlib import Path\n'), ((4787, 4824), 'functional.pseq', 'pseq', (['pos_sample_locus_index_pair_set'], {}), '(pos_sample_locus_index_pair_set)\n', (4791, 4824), False, 'from functional import pseq\n'), ((4907, 4950), 'functional.pseq', 'pseq', (['test_pos_sample_locus_index_pair_list'], {}), '(test_pos_sample_locus_index_pair_list)\n', (4911, 4950), False, 'from functional import pseq\n')] |
#!/usr/bin/env python
from __future__ import print_function
import sys
sys.path.append('.')
import example
try:
import numpy as np
except ImportError:
print('NumPy missing')
exit(0)
from example import vectorized_func
from example import vectorized_func2
from example import vectorized_func3
print(vectorized_func3(np.array(3+7j)))
for f in [vectorized_func, vectorized_func2]:
print(f(1, 2, 3))
print(f(np.array(1), np.array(2), 3))
print(f(np.array([1, 3]), np.array([2, 4]), 3))
print(f(np.array([[1, 3, 5], [7, 9, 11]]), np.array([[2, 4, 6], [8, 10, 12]]), 3))
print(np.array([[1, 3, 5], [7, 9, 11]])* np.array([[2, 4, 6], [8, 10, 12]])*3)
print(f(np.array([[1, 2, 3], [4, 5, 6]]), np.array([2, 3, 4]), 2))
print(np.array([[1, 2, 3], [4, 5, 6]])* np.array([2, 3, 4])* 2)
print(f(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[2], [3]]), 2))
print(np.array([[1, 2, 3], [4, 5, 6]])* np.array([[2], [3]])* 2)
from example import selective_func
selective_func(np.array([1], dtype=np.int32))
selective_func(np.array([1.0], dtype=np.float32))
selective_func(np.array([1.0j], dtype=np.complex64))
| [
"sys.path.append",
"numpy.array"
] | [((71, 91), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (86, 91), False, 'import sys\n'), ((1012, 1041), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.int32'}), '([1], dtype=np.int32)\n', (1020, 1041), True, 'import numpy as np\n'), ((1058, 1091), 'numpy.array', 'np.array', (['[1.0]'], {'dtype': 'np.float32'}), '([1.0], dtype=np.float32)\n', (1066, 1091), True, 'import numpy as np\n'), ((1108, 1144), 'numpy.array', 'np.array', (['[1.0j]'], {'dtype': 'np.complex64'}), '([1.0j], dtype=np.complex64)\n', (1116, 1144), True, 'import numpy as np\n'), ((330, 348), 'numpy.array', 'np.array', (['(3 + 7.0j)'], {}), '(3 + 7.0j)\n', (338, 348), True, 'import numpy as np\n'), ((428, 439), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (436, 439), True, 'import numpy as np\n'), ((441, 452), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (449, 452), True, 'import numpy as np\n'), ((470, 486), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (478, 486), True, 'import numpy as np\n'), ((488, 504), 'numpy.array', 'np.array', (['[2, 4]'], {}), '([2, 4])\n', (496, 504), True, 'import numpy as np\n'), ((522, 555), 'numpy.array', 'np.array', (['[[1, 3, 5], [7, 9, 11]]'], {}), '([[1, 3, 5], [7, 9, 11]])\n', (530, 555), True, 'import numpy as np\n'), ((557, 591), 'numpy.array', 'np.array', (['[[2, 4, 6], [8, 10, 12]]'], {}), '([[2, 4, 6], [8, 10, 12]])\n', (565, 591), True, 'import numpy as np\n'), ((692, 724), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (700, 724), True, 'import numpy as np\n'), ((726, 745), 'numpy.array', 'np.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (734, 745), True, 'import numpy as np\n'), ((831, 863), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (839, 863), True, 'import numpy as np\n'), ((865, 885), 'numpy.array', 'np.array', (['[[2], [3]]'], {}), '([[2], [3]])\n', (873, 885), True, 'import numpy as np\n'), ((607, 640), 'numpy.array', 'np.array', (['[[1, 3, 5], [7, 9, 11]]'], {}), '([[1, 3, 5], [7, 9, 11]])\n', (615, 640), True, 'import numpy as np\n'), ((642, 676), 'numpy.array', 'np.array', (['[[2, 4, 6], [8, 10, 12]]'], {}), '([[2, 4, 6], [8, 10, 12]])\n', (650, 676), True, 'import numpy as np\n'), ((761, 793), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (769, 793), True, 'import numpy as np\n'), ((795, 814), 'numpy.array', 'np.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (803, 814), True, 'import numpy as np\n'), ((901, 933), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (909, 933), True, 'import numpy as np\n'), ((935, 955), 'numpy.array', 'np.array', (['[[2], [3]]'], {}), '([[2], [3]])\n', (943, 955), True, 'import numpy as np\n')] |
import os
import pickle
import cv2
import imageio
import pandas as pd
import numpy as np
from scipy.spatial.transform import Rotation as R
from ossid.utils import normalizeImage
from ossid.config import OSSID_DATA_ROOT
class TemplateDataset():
'''
A class providing utilities for loading templates from rendering grids
'''
def __init__(self, grid_root, obj_ids, obj_id_offset=0, preload = False, use_provided_template=False):
self.grid_root = grid_root
self.obj_ids = obj_ids
self.obj_id_offset = obj_id_offset
self.preload = preload
# Whether to load the templates rendered by ourself or provided by DTOID authors
self.use_provided_template = use_provided_template
if self.use_provided_template:
self.grid_root = os.path.join(OSSID_DATA_ROOT, "templates_LMO_DTOID")
print("TemplateDataset: using provided templates from", self.grid_root)
self.obj_id_offset = 0
pose_file = os.path.join(self.grid_root, "hinterstoisser_01/poses.txt")
poses = pd.read_csv(pose_file, sep=" ", header=None).values
self.grid_poses = poses.reshape((-1, 4, 4))
self.view_ids = list(range(len(self.grid_poses)))
# Convert the pose matrices to quaternions
self.grid_rots = self.grid_poses[:, :3, :3]
self.grid_quats = R.from_matrix(self.grid_rots).as_quat()
self.template_z_values = self.grid_poses[:, 2, 3]
else:
# Load the relative transformation for each renders
vid2rot_path = os.path.join(self.grid_root, "vid2rot.pkl")
self.vid2rot = pickle.load(open(vid2rot_path, "rb"))
self.view_ids = list(self.vid2rot.keys())
self.view_ids.sort()
# Extract the rotation for each view and convert them into quaternions
self.grid_rots = np.stack([v for k, v in self.vid2rot.items()], axis=0)
self.grid_quats = R.from_matrix(self.grid_rots).as_quat()
# The cache of all the templates of all objects
self.template_cache = {}
if preload:
print("TemplateDataset: Preloading all templates for all objects")
# Pre-load all templates for all objects
for obj_id in self.obj_ids:
self.template_cache[obj_id] = (self.getTemplatesAll(obj_id))
def getTemplate(self, obj_id, view_id):
view_id = int(view_id)
if self.preload and obj_id in self.template_cache:
all_img, all_xyz, all_mask = self.template_cache[obj_id]
return all_img[view_id], all_xyz[view_id], all_mask[view_id]
else:
obj_id = int(obj_id)
if self.use_provided_template:
template_folder = os.path.join(self.grid_root, "hinterstoisser_%02d" % obj_id)
color_path = os.path.join(template_folder, "%06d_a.png" % view_id)
depth_path = os.path.join(template_folder, "%06d_d.png" % view_id)
mask_path = os.path.join(template_folder, "%06d_m.png" % view_id)
img = cv2.imread(color_path)[:, :, ::-1]
# (124, 124, 3) in np.uint8. Do not know how to recover it to XYZ
depth = cv2.imread(depth_path)
# TODO: Figure out the camera intrinsics matrix if needed
xyz = depth
mask = cv2.imread(mask_path)[:, :, 0].astype(np.float32) / 255.0
else:
# The offset is only used when actually loading data from the disk
obj_id = obj_id + self.obj_id_offset
render_folder = os.path.join(self.grid_root, "%06d" % obj_id)
color_path = os.path.join(render_folder, "%04d_color.png" % view_id)
xyz_path = os.path.join(render_folder, "%04d_xyz.npy" % view_id)
mask_path = os.path.join(render_folder, "%04d_mask.npy" % view_id)
img = imageio.imread(color_path)
xyz = np.load(xyz_path)
mask = np.load(mask_path)
# Processing for PyTroch
img = img.transpose(2, 0, 1)
img = normalizeImage(img).astype(np.float32)
mask = mask[None].astype(np.float32)
xyz = xyz.transpose(2, 0, 1).astype(np.float32)
return img, xyz, mask
def getTemplatesAll(self, obj_id):
if self.preload and obj_id in self.template_cache:
return self.template_cache[obj_id]
else:
all_img = []
all_xyz = []
all_mask = []
for vid in self.view_ids:
img, xyz, mask = self.getTemplate(obj_id, vid)
all_img.append(img)
all_xyz.append(xyz)
all_mask.append(mask)
all_img = np.stack(all_img, 0)
all_xyz = np.stack(all_xyz, 0)
all_mask = np.stack(all_mask, 0)
return all_img, all_xyz, all_mask | [
"numpy.stack",
"numpy.load",
"ossid.utils.normalizeImage",
"pandas.read_csv",
"imageio.imread",
"cv2.imread",
"scipy.spatial.transform.Rotation.from_matrix",
"os.path.join"
] | [((803, 855), 'os.path.join', 'os.path.join', (['OSSID_DATA_ROOT', '"""templates_LMO_DTOID"""'], {}), "(OSSID_DATA_ROOT, 'templates_LMO_DTOID')\n", (815, 855), False, 'import os\n'), ((999, 1058), 'os.path.join', 'os.path.join', (['self.grid_root', '"""hinterstoisser_01/poses.txt"""'], {}), "(self.grid_root, 'hinterstoisser_01/poses.txt')\n", (1011, 1058), False, 'import os\n'), ((1608, 1651), 'os.path.join', 'os.path.join', (['self.grid_root', '"""vid2rot.pkl"""'], {}), "(self.grid_root, 'vid2rot.pkl')\n", (1620, 1651), False, 'import os\n'), ((4845, 4865), 'numpy.stack', 'np.stack', (['all_img', '(0)'], {}), '(all_img, 0)\n', (4853, 4865), True, 'import numpy as np\n'), ((4888, 4908), 'numpy.stack', 'np.stack', (['all_xyz', '(0)'], {}), '(all_xyz, 0)\n', (4896, 4908), True, 'import numpy as np\n'), ((4932, 4953), 'numpy.stack', 'np.stack', (['all_mask', '(0)'], {}), '(all_mask, 0)\n', (4940, 4953), True, 'import numpy as np\n'), ((1079, 1123), 'pandas.read_csv', 'pd.read_csv', (['pose_file'], {'sep': '""" """', 'header': 'None'}), "(pose_file, sep=' ', header=None)\n", (1090, 1123), True, 'import pandas as pd\n'), ((2802, 2862), 'os.path.join', 'os.path.join', (['self.grid_root', "('hinterstoisser_%02d' % obj_id)"], {}), "(self.grid_root, 'hinterstoisser_%02d' % obj_id)\n", (2814, 2862), False, 'import os\n'), ((2892, 2945), 'os.path.join', 'os.path.join', (['template_folder', "('%06d_a.png' % view_id)"], {}), "(template_folder, '%06d_a.png' % view_id)\n", (2904, 2945), False, 'import os\n'), ((2975, 3028), 'os.path.join', 'os.path.join', (['template_folder', "('%06d_d.png' % view_id)"], {}), "(template_folder, '%06d_d.png' % view_id)\n", (2987, 3028), False, 'import os\n'), ((3057, 3110), 'os.path.join', 'os.path.join', (['template_folder', "('%06d_m.png' % view_id)"], {}), "(template_folder, '%06d_m.png' % view_id)\n", (3069, 3110), False, 'import os\n'), ((3275, 3297), 'cv2.imread', 'cv2.imread', (['depth_path'], {}), '(depth_path)\n', (3285, 3297), False, 'import cv2\n'), ((3668, 3713), 'os.path.join', 'os.path.join', (['self.grid_root', "('%06d' % obj_id)"], {}), "(self.grid_root, '%06d' % obj_id)\n", (3680, 3713), False, 'import os\n'), ((3743, 3798), 'os.path.join', 'os.path.join', (['render_folder', "('%04d_color.png' % view_id)"], {}), "(render_folder, '%04d_color.png' % view_id)\n", (3755, 3798), False, 'import os\n'), ((3826, 3879), 'os.path.join', 'os.path.join', (['render_folder', "('%04d_xyz.npy' % view_id)"], {}), "(render_folder, '%04d_xyz.npy' % view_id)\n", (3838, 3879), False, 'import os\n'), ((3908, 3962), 'os.path.join', 'os.path.join', (['render_folder', "('%04d_mask.npy' % view_id)"], {}), "(render_folder, '%04d_mask.npy' % view_id)\n", (3920, 3962), False, 'import os\n'), ((3985, 4011), 'imageio.imread', 'imageio.imread', (['color_path'], {}), '(color_path)\n', (3999, 4011), False, 'import imageio\n'), ((4034, 4051), 'numpy.load', 'np.load', (['xyz_path'], {}), '(xyz_path)\n', (4041, 4051), True, 'import numpy as np\n'), ((4075, 4093), 'numpy.load', 'np.load', (['mask_path'], {}), '(mask_path)\n', (4082, 4093), True, 'import numpy as np\n'), ((4179, 4198), 'ossid.utils.normalizeImage', 'normalizeImage', (['img'], {}), '(img)\n', (4193, 4198), False, 'from ossid.utils import normalizeImage\n'), ((1392, 1421), 'scipy.spatial.transform.Rotation.from_matrix', 'R.from_matrix', (['self.grid_rots'], {}), '(self.grid_rots)\n', (1405, 1421), True, 'from scipy.spatial.transform import Rotation as R\n'), ((2002, 2031), 'scipy.spatial.transform.Rotation.from_matrix', 'R.from_matrix', (['self.grid_rots'], {}), '(self.grid_rots)\n', (2015, 2031), True, 'from scipy.spatial.transform import Rotation as R\n'), ((3134, 3156), 'cv2.imread', 'cv2.imread', (['color_path'], {}), '(color_path)\n', (3144, 3156), False, 'import cv2\n'), ((3423, 3444), 'cv2.imread', 'cv2.imread', (['mask_path'], {}), '(mask_path)\n', (3433, 3444), False, 'import cv2\n')] |
import os
import numpy as np
from holtztools import plots
from ..utils import apload, bitmask
from astropy.io import fits
chips=['a','b','c']
def comp1d(frame,apred='test',rows=range(300)) :
load=apload.ApLoad(apred=apred)
new=load.ap1D(frame)
old={}
mjd=55562+int(frame//10000)
fig,ax = plots.multi(1,3,hspace=0.001)
x=np.arange(2048)
for ichip,chip in enumerate(chips) :
old[chip]=fits.open(os.environ['APOGEE_REDUX']+'/r8/red/{:d}/ap1D-{:s}-{:d}.fits'.format(mjd,chip,frame))
for row in rows :
plots.plotl(ax[ichip],x,new[chip][1].data[row,:]/old[chip][1].data[row,:],yr=[0,1.5])
def compCframe(plate,frame,apred='test',ratio=True,rows=range(300),yr=None,hdu=1) :
load=apload.ApLoad(apred=apred)
mjd=55562+int(frame//10000)
new=load.apCframe('M67',plate,mjd,frame)
old={}
fig,ax = plots.multi(1,3,hspace=0.001)
x=np.arange(2048)
for ichip,chip in enumerate(chips) :
old[chip]=fits.open(os.environ['APOGEE_REDUX']+'/r8/apo25m/{:d}/{:d}/apCframe-{:s}-{:d}.fits'.format(plate,mjd,chip,frame))
for row in rows :
if ratio :
plots.plotl(ax[ichip],x,new[chip][hdu].data[row,:]/old[chip][hdu].data[row,:],yr=[0,1.5])
else :
plots.plotl(ax[ichip],x,new[chip][hdu].data[row,:],yr=yr)
plots.plotl(ax[ichip],x,old[chip][hdu].data[row,:],yr=yr)
plots.plotl(ax[ichip],x,new[chip][hdu].data[row,:]-old[chip][hdu].data[row,:],yr=yr)
#def comp2d(frame) :
#
def comp(plate=7267,mjd=56654,fiber=150,frame=10920059,field='M67') :
r11=apload.ApLoad(apred='r11')
v=r11.apVisit(plate,mjd,fiber)
a=r11.ap1D(frame)
c=r11.apCframe(field,plate,mjd,frame)
v14=fits.open(os.environ['APOGEE_REDUX']+'/r8/apo25m/{:d}/{:d}/apVisit-r8-{:d}-{:d}-{:03d}.fits'.format(plate,mjd,plate,mjd,fiber))
a14={}
c14={}
for chip in chips:
a14[chip]=fits.open(os.environ['APOGEE_REDUX']+'/r8/red/{:d}/ap1D-{:s}-{:d}.fits'.format(mjd,chip,frame))
c14[chip]=fits.open(os.environ['APOGEE_REDUX']+'/r8/apo25m/{:d}/{:d}/apCframe-{:s}-{:08d}.fits'.format(plate,mjd,chip,frame))
fig,ax=plots.multi(1,3,hspace=0.01)
x=np.arange(4096)
pixmask=bitmask.PixelBitMask()
for ichip,chip in enumerate(chips) :
y=v[1].data[ichip,:]
plots.plotl(ax[ichip],x,v[1].data[ichip,:]/v14[1].data[ichip,:])
bd = np.where( ((v[3].data[ichip,:] & pixmask.badval()) > 0) |
((v[3].data[ichip,:] & pixmask.getval('SIG_SKYLINE')) > 0) ) [0]
y[bd]=np.nan
plots.plotl(ax[ichip],x,y/v14[1].data[ichip,:])
fig,ax=plots.multi(3,3,hspace=0.01)
x=np.arange(2048)
for ichip,chip in enumerate(chips) :
plots.plotl(ax[ichip,0],x,c[chip][1].data[300-fiber,:])
plots.plotl(ax[ichip,0],x,c14[chip][1].data[300-fiber,:])
plots.plotl(ax[ichip,1],x,c[chip][1].data[300-fiber,:]/c14[chip][1].data[300-fiber])
plots.plotl(ax[ichip,2],x,a[chip][1].data[300-fiber,:]/a14[chip][1].data[300-fiber])
| [
"holtztools.plots.plotl",
"holtztools.plots.multi",
"numpy.arange"
] | [((310, 341), 'holtztools.plots.multi', 'plots.multi', (['(1)', '(3)'], {'hspace': '(0.001)'}), '(1, 3, hspace=0.001)\n', (321, 341), False, 'from holtztools import plots\n'), ((346, 361), 'numpy.arange', 'np.arange', (['(2048)'], {}), '(2048)\n', (355, 361), True, 'import numpy as np\n'), ((863, 894), 'holtztools.plots.multi', 'plots.multi', (['(1)', '(3)'], {'hspace': '(0.001)'}), '(1, 3, hspace=0.001)\n', (874, 894), False, 'from holtztools import plots\n'), ((899, 914), 'numpy.arange', 'np.arange', (['(2048)'], {}), '(2048)\n', (908, 914), True, 'import numpy as np\n'), ((2179, 2209), 'holtztools.plots.multi', 'plots.multi', (['(1)', '(3)'], {'hspace': '(0.01)'}), '(1, 3, hspace=0.01)\n', (2190, 2209), False, 'from holtztools import plots\n'), ((2214, 2229), 'numpy.arange', 'np.arange', (['(4096)'], {}), '(4096)\n', (2223, 2229), True, 'import numpy as np\n'), ((2657, 2687), 'holtztools.plots.multi', 'plots.multi', (['(3)', '(3)'], {'hspace': '(0.01)'}), '(3, 3, hspace=0.01)\n', (2668, 2687), False, 'from holtztools import plots\n'), ((2692, 2707), 'numpy.arange', 'np.arange', (['(2048)'], {}), '(2048)\n', (2701, 2707), True, 'import numpy as np\n'), ((2343, 2413), 'holtztools.plots.plotl', 'plots.plotl', (['ax[ichip]', 'x', '(v[1].data[ichip, :] / v14[1].data[ichip, :])'], {}), '(ax[ichip], x, v[1].data[ichip, :] / v14[1].data[ichip, :])\n', (2354, 2413), False, 'from holtztools import plots\n'), ((2596, 2648), 'holtztools.plots.plotl', 'plots.plotl', (['ax[ichip]', 'x', '(y / v14[1].data[ichip, :])'], {}), '(ax[ichip], x, y / v14[1].data[ichip, :])\n', (2607, 2648), False, 'from holtztools import plots\n'), ((2757, 2818), 'holtztools.plots.plotl', 'plots.plotl', (['ax[ichip, 0]', 'x', 'c[chip][1].data[300 - fiber, :]'], {}), '(ax[ichip, 0], x, c[chip][1].data[300 - fiber, :])\n', (2768, 2818), False, 'from holtztools import plots\n'), ((2821, 2884), 'holtztools.plots.plotl', 'plots.plotl', (['ax[ichip, 0]', 'x', 'c14[chip][1].data[300 - fiber, :]'], {}), '(ax[ichip, 0], x, c14[chip][1].data[300 - fiber, :])\n', (2832, 2884), False, 'from holtztools import plots\n'), ((2887, 2986), 'holtztools.plots.plotl', 'plots.plotl', (['ax[ichip, 1]', 'x', '(c[chip][1].data[300 - fiber, :] / c14[chip][1].data[300 - fiber])'], {}), '(ax[ichip, 1], x, c[chip][1].data[300 - fiber, :] / c14[chip][1]\n .data[300 - fiber])\n', (2898, 2986), False, 'from holtztools import plots\n'), ((2980, 3079), 'holtztools.plots.plotl', 'plots.plotl', (['ax[ichip, 2]', 'x', '(a[chip][1].data[300 - fiber, :] / a14[chip][1].data[300 - fiber])'], {}), '(ax[ichip, 2], x, a[chip][1].data[300 - fiber, :] / a14[chip][1]\n .data[300 - fiber])\n', (2991, 3079), False, 'from holtztools import plots\n'), ((555, 652), 'holtztools.plots.plotl', 'plots.plotl', (['ax[ichip]', 'x', '(new[chip][1].data[row, :] / old[chip][1].data[row, :])'], {'yr': '[0, 1.5]'}), '(ax[ichip], x, new[chip][1].data[row, :] / old[chip][1].data[row,\n :], yr=[0, 1.5])\n', (566, 652), False, 'from holtztools import plots\n'), ((1152, 1254), 'holtztools.plots.plotl', 'plots.plotl', (['ax[ichip]', 'x', '(new[chip][hdu].data[row, :] / old[chip][hdu].data[row, :])'], {'yr': '[0, 1.5]'}), '(ax[ichip], x, new[chip][hdu].data[row, :] / old[chip][hdu].data\n [row, :], yr=[0, 1.5])\n', (1163, 1254), False, 'from holtztools import plots\n'), ((1276, 1337), 'holtztools.plots.plotl', 'plots.plotl', (['ax[ichip]', 'x', 'new[chip][hdu].data[row, :]'], {'yr': 'yr'}), '(ax[ichip], x, new[chip][hdu].data[row, :], yr=yr)\n', (1287, 1337), False, 'from holtztools import plots\n'), ((1349, 1410), 'holtztools.plots.plotl', 'plots.plotl', (['ax[ichip]', 'x', 'old[chip][hdu].data[row, :]'], {'yr': 'yr'}), '(ax[ichip], x, old[chip][hdu].data[row, :], yr=yr)\n', (1360, 1410), False, 'from holtztools import plots\n'), ((1422, 1518), 'holtztools.plots.plotl', 'plots.plotl', (['ax[ichip]', 'x', '(new[chip][hdu].data[row, :] - old[chip][hdu].data[row, :])'], {'yr': 'yr'}), '(ax[ichip], x, new[chip][hdu].data[row, :] - old[chip][hdu].data\n [row, :], yr=yr)\n', (1433, 1518), False, 'from holtztools import plots\n')] |
# This program allows you to select the color ranges of OpenCv using sliders.
# Slide them around till only your object is visible.
# Press esc when you are done.
# Written by Aruldd on http://stackoverflow.com/questions/10948589/choosing-
# correct-hsv-values-for-opencv-thresholding-with-inranges
# <NAME> added 3 more sliders to make the range truly a range
# And combined <NAME>'s cvpicker.py from gist.github.com/trhura
import cv2
import numpy as np
colors = []
FILENAME = '/Users/josh/Desktop/jump_freq.m4v'
WEBCAM = False
def nothing(x):
pass
def on_mouse_click(event, x, y, flags, frame):
if event == cv2.EVENT_LBUTTONUP:
colors.append(frame[y, x].tolist())
def main():
# if using a webcam, load the camera
if WEBCAM:
capture = cv2.VideoCapture(0)
# otherwise, grab a reference to the video file
else:
capture = cv2.VideoCapture(FILENAME)
# while True:
# Reads a video frames
_, frame = capture.read()
# hsv = frame
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
cv2.putText(hsv, 'Press q when done', (500, 50),
cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), 2,)
if colors:
cv2.putText(hsv, str(colors[-1]), (10, 50), cv2.FONT_HERSHEY_PLAIN,
2, (0, 0, 0), 2)
cv2.imshow('frame', hsv)
cv2.setMouseCallback('frame', on_mouse_click, hsv)
while not cv2.waitKey(1) & 0xFF == ord('q'):
pass
# releases the windows
capture.release()
cv2.destroyAllWindows()
h_low = min(c[0] for c in colors)
s_low = min(c[1] for c in colors)
v_low = min(c[2] for c in colors)
maxb = max(c[0] for c in colors)
maxg = max(c[1] for c in colors)
maxr = max(c[2] for c in colors)
# print h_low, s_low, v_low, maxr, maxg, maxb
lb = [h_low, s_low, v_low]
ub = [maxb, maxg, maxr]
print('Lower boundary: {} \n Upper boundary: {}'.format(lb, ub))
# if using a webcam, load the camera
if WEBCAM:
capture = cv2.VideoCapture(0)
# otherwise, grab a reference to the video file
else:
capture = cv2.VideoCapture(FILENAME)
# Creating a window for later use
cv2.namedWindow('result')
# Starting with 100's to prevent error while masking
# h_low, s_low, v_low = 100, 100, 100
# h_high, s_high, v_high = 200, 200, 200
# Creating track bar
cv2.createTrackbar('h_low', 'result', h_low, 179, nothing)
cv2.createTrackbar('s_low', 'result', s_low, 255, nothing)
cv2.createTrackbar('v_low', 'result', v_low, 255, nothing)
cv2.createTrackbar('h_high', 'result', h_low + 25, 179, nothing)
cv2.createTrackbar('s_high', 'result', 255, 255, nothing)
cv2.createTrackbar('v_high', 'result', 255, 255, nothing)
# capture another frame to test the result
_, frame = capture.read()
# converting to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
while(1):
# get info from track bar and appy to result
h_low = cv2.getTrackbarPos('h_low', 'result')
s_low = cv2.getTrackbarPos('s_low', 'result')
v_low = cv2.getTrackbarPos('v_low', 'result')
h_high = cv2.getTrackbarPos('h_high', 'result')
s_high = cv2.getTrackbarPos('s_high', 'result')
v_high = cv2.getTrackbarPos('v_high', 'result')
# Normal masking algorithm
lower_blue = np.array([h_low, s_low, v_low])
upper_blue = np.array([h_high, s_high, v_high])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
result = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow('result', result)
cv2.putText(hsv, 'Press q when done', (500, 50),
cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), 2,)
k = cv2.waitKey(5) & 0xFF
if k == 27:
print('Hue lower: {}'.format(h_low))
print('Saturation lower: {}'.format(s_low))
print('Value lower: {}'.format(v_low))
print('Hue upper: {}'.format(h_high))
print('Saturation upper: {}'.format(s_high))
print('Value upper: {}'.format(v_high))
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| [
"cv2.createTrackbar",
"cv2.putText",
"cv2.bitwise_and",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.setMouseCallback",
"numpy.array",
"cv2.getTrackbarPos",
"cv2.destroyAllWindows",
"cv2.inRange",
"cv2.namedWindow"
] | [((1008, 1046), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (1020, 1046), False, 'import cv2\n'), ((1051, 1150), 'cv2.putText', 'cv2.putText', (['hsv', '"""Press q when done"""', '(500, 50)', 'cv2.FONT_HERSHEY_PLAIN', '(2)', '(255, 255, 255)', '(2)'], {}), "(hsv, 'Press q when done', (500, 50), cv2.FONT_HERSHEY_PLAIN, 2,\n (255, 255, 255), 2)\n", (1062, 1150), False, 'import cv2\n'), ((1296, 1320), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'hsv'], {}), "('frame', hsv)\n", (1306, 1320), False, 'import cv2\n'), ((1325, 1375), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""frame"""', 'on_mouse_click', 'hsv'], {}), "('frame', on_mouse_click, hsv)\n", (1345, 1375), False, 'import cv2\n'), ((1493, 1516), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1514, 1516), False, 'import cv2\n'), ((2167, 2192), 'cv2.namedWindow', 'cv2.namedWindow', (['"""result"""'], {}), "('result')\n", (2182, 2192), False, 'import cv2\n'), ((2368, 2426), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""h_low"""', '"""result"""', 'h_low', '(179)', 'nothing'], {}), "('h_low', 'result', h_low, 179, nothing)\n", (2386, 2426), False, 'import cv2\n'), ((2431, 2489), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""s_low"""', '"""result"""', 's_low', '(255)', 'nothing'], {}), "('s_low', 'result', s_low, 255, nothing)\n", (2449, 2489), False, 'import cv2\n'), ((2494, 2552), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""v_low"""', '"""result"""', 'v_low', '(255)', 'nothing'], {}), "('v_low', 'result', v_low, 255, nothing)\n", (2512, 2552), False, 'import cv2\n'), ((2557, 2621), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""h_high"""', '"""result"""', '(h_low + 25)', '(179)', 'nothing'], {}), "('h_high', 'result', h_low + 25, 179, nothing)\n", (2575, 2621), False, 'import cv2\n'), ((2626, 2683), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""s_high"""', '"""result"""', '(255)', '(255)', 'nothing'], {}), "('s_high', 'result', 255, 255, nothing)\n", (2644, 2683), False, 'import cv2\n'), ((2688, 2745), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""v_high"""', '"""result"""', '(255)', '(255)', 'nothing'], {}), "('v_high', 'result', 255, 255, nothing)\n", (2706, 2745), False, 'import cv2\n'), ((2860, 2898), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (2872, 2898), False, 'import cv2\n'), ((4144, 4167), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4165, 4167), False, 'import cv2\n'), ((777, 796), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (793, 796), False, 'import cv2\n'), ((877, 903), 'cv2.VideoCapture', 'cv2.VideoCapture', (['FILENAME'], {}), '(FILENAME)\n', (893, 903), False, 'import cv2\n'), ((1997, 2016), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (2013, 2016), False, 'import cv2\n'), ((2097, 2123), 'cv2.VideoCapture', 'cv2.VideoCapture', (['FILENAME'], {}), '(FILENAME)\n', (2113, 2123), False, 'import cv2\n'), ((2983, 3020), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""h_low"""', '"""result"""'], {}), "('h_low', 'result')\n", (3001, 3020), False, 'import cv2\n'), ((3037, 3074), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""s_low"""', '"""result"""'], {}), "('s_low', 'result')\n", (3055, 3074), False, 'import cv2\n'), ((3091, 3128), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""v_low"""', '"""result"""'], {}), "('v_low', 'result')\n", (3109, 3128), False, 'import cv2\n'), ((3147, 3185), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""h_high"""', '"""result"""'], {}), "('h_high', 'result')\n", (3165, 3185), False, 'import cv2\n'), ((3203, 3241), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""s_high"""', '"""result"""'], {}), "('s_high', 'result')\n", (3221, 3241), False, 'import cv2\n'), ((3259, 3297), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""v_high"""', '"""result"""'], {}), "('v_high', 'result')\n", (3277, 3297), False, 'import cv2\n'), ((3355, 3386), 'numpy.array', 'np.array', (['[h_low, s_low, v_low]'], {}), '([h_low, s_low, v_low])\n', (3363, 3386), True, 'import numpy as np\n'), ((3408, 3442), 'numpy.array', 'np.array', (['[h_high, s_high, v_high]'], {}), '([h_high, s_high, v_high])\n', (3416, 3442), True, 'import numpy as np\n'), ((3459, 3499), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_blue', 'upper_blue'], {}), '(hsv, lower_blue, upper_blue)\n', (3470, 3499), False, 'import cv2\n'), ((3518, 3558), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'mask'}), '(frame, frame, mask=mask)\n', (3533, 3558), False, 'import cv2\n'), ((3568, 3596), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'result'], {}), "('result', result)\n", (3578, 3596), False, 'import cv2\n'), ((3606, 3705), 'cv2.putText', 'cv2.putText', (['hsv', '"""Press q when done"""', '(500, 50)', 'cv2.FONT_HERSHEY_PLAIN', '(2)', '(255, 255, 255)', '(2)'], {}), "(hsv, 'Press q when done', (500, 50), cv2.FONT_HERSHEY_PLAIN, 2,\n (255, 255, 255), 2)\n", (3617, 3705), False, 'import cv2\n'), ((3736, 3750), 'cv2.waitKey', 'cv2.waitKey', (['(5)'], {}), '(5)\n', (3747, 3750), False, 'import cv2\n'), ((1391, 1405), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1402, 1405), False, 'import cv2\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/models/models.ar.ipynb (unless otherwise specified).
__all__ = ['AssosiationRules']
# Cell
import numpy as np
import pandas as pd
import collections as col
# Cell
class AssosiationRules:
'''
AssosiationRules(pruning=10, session_key='SessionId', item_keys=['ItemId'])
Parameters
--------
pruning : int
Prune the results per item to a list of the top N co-occurrences. (Default value: 10)
session_key : string
The data frame key for the session identifier. (Default value: SessionId)
item_keys : string
The data frame list of keys for the item identifier as first item in list
and features keys next. (Default value: [ItemId])
'''
def __init__( self, pruning=10, session_key='SessionID', item_keys=['ItemID'] ):
self.pruning = pruning
self.session_key = session_key
self.item_keys = item_keys
self.items_features = {}
self.predict_for_item_ids = []
def fit( self, data):
'''
Trains the predictor.
Parameters
--------
data: pandas.DataFrame
Training data. It contains the transactions of the sessions.
It has one column for session IDs, one for item IDs and many for the
item features if exist.
It must have a header. Column names are arbitrary, but must
correspond to the ones you set during the initialization of the
network (session_key, item_keys).
'''
cur_session = -1
last_items = []
all_rules = []
indices_item = []
for i in self.item_keys:
all_rules.append(dict())
indices_item.append( data.columns.get_loc(i) )
data.sort_values(self.session_key, inplace=True)
index_session = data.columns.get_loc(self.session_key)
#Create Dictionary of items and their features
for row in data.itertuples( index=False ):
item_id = row[indices_item[0]]
if not item_id in self.items_features.keys() :
self.items_features[item_id] = []
for i in indices_item:
self.items_features[item_id].append(row[i])
for i in range(len(self.item_keys)):
rules = all_rules[i]
index_item = indices_item[i]
for row in data.itertuples( index=False ):
session_id, item_id = row[index_session], row[index_item]
if session_id != cur_session:
cur_session = session_id
last_items = []
else:
for item_id2 in last_items:
if not item_id in rules :
rules[item_id] = dict()
if not item_id2 in rules :
rules[item_id2] = dict()
if not item_id in rules[item_id2]:
rules[item_id2][item_id] = 0
if not item_id2 in rules[item_id]:
rules[item_id][item_id2] = 0
rules[item_id][item_id2] += 1
rules[item_id2][item_id] += 1
last_items.append(item_id)
if self.pruning > 0:
rules = self.prune(rules)
all_rules[i] = rules
self.all_rules = all_rules
self.predict_for_item_ids = list(self.all_rules[0].keys())
def predict_next(self, session_items, k = 20):
'''
Gives predicton scores for a selected set of items on how likely they be the next item in the session.
Parameters
--------
session_items : List
Items IDs in current session.
k : Integer
How many items to recommend
Returns
--------
out : pandas.Series
Prediction scores for selected items on how likely to be the next item of this session.
Indexed by the item IDs.
'''
all_len = len(self.predict_for_item_ids)
input_item_id = session_items[-1]
preds = np.zeros( all_len )
if input_item_id in self.all_rules[0].keys():
for k_ind in range(all_len):
key = self.predict_for_item_ids[k_ind]
if key in session_items:
continue
try:
preds[ k_ind ] += self.all_rules[0][input_item_id][key]
except:
pass
for i in range(1, len(self.all_rules)):
input_item_feature = self.items_features[input_item_id][i]
key_feature = self.items_features[key][i]
try:
preds[ k_ind ] += self.all_rules[i][input_item_feature][key_feature]
except:
pass
series = pd.Series(data=preds, index=self.predict_for_item_ids)
series = series / series.max()
return series.nlargest(k).index.values
def prune(self, rules):
'''
Gives predicton scores for a selected set of items on how likely they be the next item in the session.
Parameters
--------
rules : dict of dicts
The rules mined from the training data
'''
for k1 in rules:
tmp = rules[k1]
if self.pruning < 1:
keep = len(tmp) - int( len(tmp) * self.pruning )
elif self.pruning >= 1:
keep = self.pruning
counter = col.Counter( tmp )
rules[k1] = dict()
for k2, v in counter.most_common( keep ):
rules[k1][k2] = v
return rules | [
"collections.Counter",
"numpy.zeros",
"pandas.Series"
] | [((4158, 4175), 'numpy.zeros', 'np.zeros', (['all_len'], {}), '(all_len)\n', (4166, 4175), True, 'import numpy as np\n'), ((4935, 4989), 'pandas.Series', 'pd.Series', ([], {'data': 'preds', 'index': 'self.predict_for_item_ids'}), '(data=preds, index=self.predict_for_item_ids)\n', (4944, 4989), True, 'import pandas as pd\n'), ((5615, 5631), 'collections.Counter', 'col.Counter', (['tmp'], {}), '(tmp)\n', (5626, 5631), True, 'import collections as col\n')] |
import math
import matplotlib.pyplot as plt
import numpy as np
from .vec_3d import Vec3D
def make_noe_hist(my_path, violations):
plt.figure(figsize=(6, 5), dpi=80)
n_groups = len(violations)
means_men = [
violations['0-0.5'], violations['0.5-1'], violations['1-1.5'],
violations['1.5-2'], violations['2-2.5'], violations['2.5-3'],
violations['3<']
]
ticks = ['0-0.5', '0.5-1', '1-1.5', '1.5-2', '2-2.5', '2.5-3', '3<']
index = np.arange(n_groups)
bar_width = 0.7
plt.bar(index, means_men, bar_width, alpha=.7, color='b')
plt.xlabel("Violation (Å)")
plt.ylabel("# of NOE distance violations")
plt.title("NOE distance violations")
plt.xticks(index + bar_width / 2, ticks)
ax = plt.axes()
ax.yaxis.grid()
plt.tight_layout()
plt.savefig(my_path + "/NOE_hist.svg", format="svg")
plt.close()
def pdb2coords(model_data):
"""Loads PDB coordinates into a dictionary, per model"""
prev_resnum = -1
pdb_coords = {}
for i in range(model_data.coordsets):
model_data.atomgroup.setACSIndex(i)
pdb_coords[i] = {}
for atom in model_data.atomgroup:
resnum = int(atom.getResnum())
name = str(atom.getName())
if resnum == prev_resnum:
pdb_coords[i][resnum][name] = Vec3D(atom.getCoords())
else:
pdb_coords[i][resnum] = {}
pdb_coords[i][resnum][name] = Vec3D(atom.getCoords())
prev_resnum = resnum
return pdb_coords
def noe_violations(model_data, my_path, db_entry, noe_restraints, bme_weights):
"""Back calculate NOE distance violations from given RDC lists and PDB
models"""
r3_averaging = db_entry.r3average
restraints = noe_restraints.resolved_restraints
pdb_coords = pdb2coords(model_data)
prev_id = -1
avg_distances = {}
all_distances = {}
measured_avg = {}
str_distaces = {}
for model in list(pdb_coords.keys()):
avg_distances[model] = {}
all_distances[model] = {}
for restraint_num, restraint in enumerate(restraints):
rest_id = int(restraint["csx_id"])
resnum1 = restraint["seq_ID1"]
atom1 = restraint["atom_ID1"]
resnum2 = restraint["seq_ID2"]
atom2 = restraint["atom_ID2"]
atom_coord1 = pdb_coords[model][resnum1][atom1]
atom_coord2 = pdb_coords[model][resnum2][atom2]
distance = (atom_coord1 - atom_coord2).magnitude()
all_distances[model][restraint_num] = distance
if prev_id == rest_id:
avg_distances[model][rest_id].append(distance)
else:
prev_id = rest_id
avg_distances[model][rest_id] = []
str_distaces[rest_id] = restraint["dist_max"]
avg_distances[model][rest_id].append(distance)
for restraint_num, restraint in enumerate(restraints):
rest_id = int(restraint["csx_id"])
resnum1 = restraint["seq_ID1"]
atom1 = restraint["atom_ID1"]
resnum2 = restraint["seq_ID2"]
atom2 = restraint["atom_ID2"]
dist_str = "> {} {} {} {} {} | ".format(
rest_id, resnum1, atom1, resnum2, atom2
)
for model in list(pdb_coords.keys()):
dist_str += "{0:.2f} ".format(all_distances[model][restraint_num])
# print("DISTS", dist_str)
# at this point avg_distances[model][curr_id] contains distances for one
# model and one restraint GROUP identified with "csx_id" number
prev_id = -1
for model in list(pdb_coords.keys()):
for restraint in restraints:
curr_id = int(restraint["csx_id"])
if prev_id == curr_id:
continue
else:
prev_id = curr_id
avg = 0.0
for distance in avg_distances[model][curr_id]:
if r3_averaging:
avg += math.pow(float(distance), -3)
else:
avg += math.pow(float(distance), -6)
avg /= len(avg_distances[model][curr_id])
if r3_averaging:
avg_distances[model][curr_id] = math.pow(avg, -1.0/3)
else:
avg_distances[model][curr_id] = math.pow(avg, -1.0/6)
# at this point avg_distances[model][curr_id] contain a single (r-6)
# averaged distance for one model and one restraint GROUP identified with
# "csx_id" number. Averaging is done on "in GROUP" distances
for restraint in restraints:
curr_id = int(restraint["curr_distID"])
avg = 0.0
if bme_weights:
for model in list(pdb_coords.keys()):
avg += math.pow(
avg_distances[model][curr_id], -6
) * bme_weights[model]
avg /= sum(bme_weights)
else:
for model in list(pdb_coords.keys()):
avg += math.pow(avg_distances[model][curr_id], -6)
avg /= len(list(pdb_coords.keys()))
measured_avg[curr_id] = math.pow(avg, -1.0/6)
bme_exp_filename = "noe_exp.dat"
bme_calc_filename = "noe_calc.dat"
with open(my_path + bme_exp_filename, "w") as exp_dat_file:
exp_dat_file.write("# DATA=NOE PRIOR=GAUSS POWER=6\n")
prev_id = -1
for restraint in restraints:
if prev_id == restraint["csx_id"]:
continue
prev_id = restraint["csx_id"]
exp_dat_file.write(
str(restraint["csx_id"]) + "\t" +
str(restraint["dist_max"]) + "\t0.1\n"
)
with open(my_path + bme_calc_filename, "w") as calc_dat_file:
for model in list(pdb_coords.keys()):
for i in avg_distances[model]:
calc_dat_file.write(
str(avg_distances[model][i]) + " "
)
calc_dat_file.write("\n")
# at this point measured_avg[curr_id] is a simple dictionary containing the
# model averaged distances for the given "csx_id" number
avg_dist_keys = list(measured_avg.keys())
avg_dist_keys.sort()
violations = {"0-0.5": 0, "0.5-1": 0, "1-1.5": 0,
"1.5-2": 0, "2-2.5": 0, "2.5-3": 0, "3<": 0}
viol_count = 0
for key in avg_dist_keys:
if measured_avg[key] > str_distaces[key]:
viol_count += 1
diff = measured_avg[key] - str_distaces[key]
if diff <= 0.5:
violations["0-0.5"] += 1
elif 0.5 < diff <= 1:
violations["0.5-1"] += 1
elif 1 < diff <= 1.5:
violations["1-1.5"] += 1
elif 1.5 < diff <= 2:
violations["1.5-2"] += 1
elif 2 < diff <= 2.5:
violations["2-2.5"] += 1
elif 2.5 < diff <= 3:
violations["2.5-3"] += 1
else:
violations["3<"] += 1
print("Total # of violations:", viol_count)
make_noe_hist(my_path, violations)
return viol_count
| [
"matplotlib.pyplot.title",
"math.pow",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.close",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyp... | [((136, 170), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 5)', 'dpi': '(80)'}), '(figsize=(6, 5), dpi=80)\n', (146, 170), True, 'import matplotlib.pyplot as plt\n'), ((480, 499), 'numpy.arange', 'np.arange', (['n_groups'], {}), '(n_groups)\n', (489, 499), True, 'import numpy as np\n'), ((524, 582), 'matplotlib.pyplot.bar', 'plt.bar', (['index', 'means_men', 'bar_width'], {'alpha': '(0.7)', 'color': '"""b"""'}), "(index, means_men, bar_width, alpha=0.7, color='b')\n", (531, 582), True, 'import matplotlib.pyplot as plt\n'), ((587, 614), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Violation (Å)"""'], {}), "('Violation (Å)')\n", (597, 614), True, 'import matplotlib.pyplot as plt\n'), ((619, 661), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# of NOE distance violations"""'], {}), "('# of NOE distance violations')\n", (629, 661), True, 'import matplotlib.pyplot as plt\n'), ((666, 702), 'matplotlib.pyplot.title', 'plt.title', (['"""NOE distance violations"""'], {}), "('NOE distance violations')\n", (675, 702), True, 'import matplotlib.pyplot as plt\n'), ((707, 747), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(index + bar_width / 2)', 'ticks'], {}), '(index + bar_width / 2, ticks)\n', (717, 747), True, 'import matplotlib.pyplot as plt\n'), ((757, 767), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (765, 767), True, 'import matplotlib.pyplot as plt\n'), ((793, 811), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (809, 811), True, 'import matplotlib.pyplot as plt\n'), ((816, 868), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(my_path + '/NOE_hist.svg')"], {'format': '"""svg"""'}), "(my_path + '/NOE_hist.svg', format='svg')\n", (827, 868), True, 'import matplotlib.pyplot as plt\n'), ((873, 884), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (882, 884), True, 'import matplotlib.pyplot as plt\n'), ((5136, 5159), 'math.pow', 'math.pow', (['avg', '(-1.0 / 6)'], {}), '(avg, -1.0 / 6)\n', (5144, 5159), False, 'import math\n'), ((4258, 4281), 'math.pow', 'math.pow', (['avg', '(-1.0 / 3)'], {}), '(avg, -1.0 / 3)\n', (4266, 4281), False, 'import math\n'), ((4346, 4369), 'math.pow', 'math.pow', (['avg', '(-1.0 / 6)'], {}), '(avg, -1.0 / 6)\n', (4354, 4369), False, 'import math\n'), ((5010, 5053), 'math.pow', 'math.pow', (['avg_distances[model][curr_id]', '(-6)'], {}), '(avg_distances[model][curr_id], -6)\n', (5018, 5053), False, 'import math\n'), ((4783, 4826), 'math.pow', 'math.pow', (['avg_distances[model][curr_id]', '(-6)'], {}), '(avg_distances[model][curr_id], -6)\n', (4791, 4826), False, 'import math\n')] |
import time
import torch
import numpy as np
import scipy.optimize as opt
import fenics as fa
import dolfin_adjoint as da
from .optimizer import Optimizer
from .. import arguments
from ..graph.visualization import scalar_field_paraview, save_solution
from ..pde.poisson_dolfin import PoissonDolfin
from ..ml.trainer import batch_mat_vec
from ..ml.trainer_dolfin import TrainerDolfin
from ..ml.models import MLP_2
from ipopt import minimize_ipopt
class OptimizerDolfin(Optimizer):
"""
Level 1
"""
def __init__(self, args):
super(OptimizerDolfin, self).__init__(args)
self.poisson = PoissonDolfin(self.args)
class OptimizerDolfinReconstruction(OptimizerDolfin):
"""
Level 2
"""
def __init__(self, args):
super(OptimizerDolfinReconstruction, self).__init__(args)
self.target_dof, self.target_u, self.perfect_init_guess = target_solution_rc(
self.args, self.poisson)
self.args.input_size = self.poisson.num_dofs
self.alpha = 1e-3
def optimize(self):
x_initial = 1e-0*np.random.randn(self.args.input_size)
# x_initial = np.zeros(self.args.input_size)
# x_initial = self.perfect_init_guess
options = {'eps': 1e-15, 'maxiter': 1000,
'disp': True} # CG > BFGS > Newton-CG
start = time.time()
res = opt.minimize(fun=self._objective,
x0=x_initial,
method='CG',
jac=self._derivative,
callback=None,
options=options)
end = time.time()
time_elapsed = end - start
print("Time elasped {}".format(time_elapsed))
return res.x, time_elapsed, res.nfev
class OptimizerDolfinAdjoint(OptimizerDolfin):
"""
Level 2
"""
def __init__(self, args):
super(OptimizerDolfinAdjoint, self).__init__(args)
class OptimizerDolfinSurrogate(OptimizerDolfin):
"""
Level 2
"""
def __init__(self, args):
super(OptimizerDolfinSurrogate, self).__init__(args)
self.trainer = TrainerDolfin(args)
self.path = self.args.root_path + '/' + self.args.model_path + '/' + \
self.poisson.name + '/model_mlp_2'
self.model = MLP_2(self.args, self.trainer.graph_info)
self.model.load_state_dict(torch.load(self.path))
self.B_sp = self.trainer.B_sp.to_dense()
self.A_sp = self.trainer.A_sp.to_dense()
# self.model.adj = self.model.adj.to_dense()
self.model.B_sp = self.trainer.B_sp.to_dense()
class OptimizerDolfinReconstructionSurrogate(OptimizerDolfinReconstruction, OptimizerDolfinSurrogate):
"""
Level 3
"""
def __init__(self, args):
super(OptimizerDolfinReconstructionSurrogate, self).__init__(args)
def _obj(self, source):
source = source.unsqueeze(0)
solution = self.model(source)
diff = solution - self.target_dof
tmp1 = batch_mat_vec(self.B_sp, diff)
L_diff = diff * tmp1
# diff = (solution - self.target_dof)**2
tmp2 = batch_mat_vec(self.B_sp, source)
L_reg = source * tmp2
L = 0.5 * (L_diff.sum() + self.alpha * L_reg.sum())
return L
class OptimizerDolfinReconstructionAdjoint(OptimizerDolfinReconstruction, OptimizerDolfinAdjoint):
"""
Level 3
"""
def __init__(self, args):
super(OptimizerDolfinReconstructionAdjoint, self).__init__(args)
def _obj(self, x):
f = da.Function(self.poisson.V)
f.vector()[:] = x
self.poisson.source = f
u = self.poisson.solve_problem_variational_form()
L_tape = da.assemble((0.5 * fa.inner(u - self.target_u, u - self.target_u)
+ 0.5 * self.alpha * fa.inner(f, f)) * fa.dx)
L = float(L_tape)
return L, L_tape, f
def _objective(self, x):
L, _, _ = self._obj(x)
return L
def _derivative(self, x):
_, L_tape, f = self._obj(x)
control = da.Control(f)
J_tape = da.compute_gradient(L_tape, control)
J = np.array(J_tape.vector()[:])
return J
'''Helpers'''
def target_solution_rc(args, pde):
pde.source = da.Expression("k*100*exp( (-(x[0]-x0)*(x[0]-x0) -(x[1]-x1)*(x[1]-x1)) / (2*0.01*l) )",
k=1, l=1, x0=0.1, x1=0.1, degree=3)
source_vec = da.interpolate(pde.source, pde.V)
save_solution(args, source_vec, 'opt_fem_f')
u = pde.solve_problem_variational_form()
dof_data = torch.tensor(u.vector()[:], dtype=torch.float).unsqueeze(0)
return dof_data, u, source_vec.vector()[:]
def produce_solution(pde, x):
pde.set_control_variable(x)
u = pde.solve_problem_variational_form()
return u
def run_rec(args, seed):
print("seed {}".format(seed))
alpha_list = [1e-6, 1e-3, 1e-0]
optimizer_nn = OptimizerDolfinReconstructionSurrogate(args)
optimizer_ad = OptimizerDolfinReconstructionAdjoint(args)
np.random.seed(seed)
objective_nn = []
objective_ad = []
time_nn = []
time_ad = []
for alpha in alpha_list:
optimizer_nn.alpha = alpha
x_nn, t_nn, _ = optimizer_nn.optimize()
scalar_field_paraview(args, x_nn, optimizer_nn.poisson, "opt_nn_f")
optimizer_ad.alpha = alpha
x_ad, t_ad, _ = optimizer_ad.optimize()
scalar_field_paraview(args, x_ad, optimizer_ad.poisson, "opt_ad_f")
# To compute true objective, use optimizer_ad for both
obj_nn = optimizer_ad._objective(x_nn)
obj_ad = optimizer_ad._objective(x_ad)
objective_nn.append(obj_nn)
objective_ad.append(obj_ad)
time_nn.append(t_nn)
time_ad.append(t_ad)
objective_nn = np.asarray(objective_nn)
objective_ad = np.asarray(objective_ad)
time_nn = np.asarray(time_nn)
time_ad = np.asarray(time_ad)
print("true optimized objective nn", objective_nn)
print("true optimized objective ad", objective_ad)
print("time nn", time_nn)
print("time ad", time_ad)
np.save('data/numpy/dolfin/opt/seed{}/objective_nn.npy'.format(seed), objective_nn)
np.save('data/numpy/dolfin/opt/seed{}/objective_ad.npy'.format(seed), objective_ad)
np.save('data/numpy/dolfin/opt/seed{}/time_nn.npy'.format(seed), time_nn)
np.save('data/numpy/dolfin/opt/seed{}/time_ad.npy'.format(seed), time_ad)
return objective_nn, objective_ad, time_nn, time_ad
def run(args):
objective_nn_collect = []
objective_ad_collect = []
time_nn_collect = []
time_ad_collect = []
for i, seed in enumerate(range(2, 7, 1)):
print("\n\nround {}".format(i))
# objective_nn, objective_ad, time_nn, time_ad = run_rec(args, seed)
objective_nn = np.load('data/numpy/dolfin/opt/seed{}/objective_nn.npy'.format(seed))
objective_ad = np.load('data/numpy/dolfin/opt/seed{}/objective_ad.npy'.format(seed))
time_nn = np.load('data/numpy/dolfin/opt/seed{}/time_nn.npy'.format(seed))
time_ad = np.load('data/numpy/dolfin/opt/seed{}/time_ad.npy'.format(seed))
objective_nn_collect.append(objective_nn)
objective_ad_collect.append(objective_ad)
time_nn_collect.append(time_nn)
time_ad_collect.append(time_ad)
objective_nn_collect = np.asarray(objective_nn_collect)
objective_ad_collect = np.asarray(objective_ad_collect)
time_nn_collect = 1000*np.asarray(time_nn_collect)
time_ad_collect = 1000*np.asarray(time_ad_collect)
for i in range(3):
print("\n")
print("mean of nn obj is {:.6f}, std is {:.6f}, for alpha {}".format(
objective_nn_collect[:, i].mean(),
objective_nn_collect[:, i].std(), i))
print("mean of ad obj is {:.6f}, std is {:.6f}, for alpha {}".format(
objective_ad_collect[:, i].mean(),
objective_ad_collect[:, i].std(), i))
print("mean of nn time is {:.1f}, std is {:.1f}, for alpha {}".format(
time_nn_collect[:, i].mean(),
time_nn_collect[:, i].std(), i))
print("mean of ad time is {:.1f}, std is {:.1f}, for alpha {}".format(
time_ad_collect[:, i].mean(),
time_ad_collect[:, i].std(), i))
if __name__ == '__main__':
args = arguments.args
np.set_printoptions(precision=10)
run(args)
# run_rec(args, 2)
| [
"scipy.optimize.minimize",
"numpy.set_printoptions",
"numpy.random.seed",
"numpy.random.randn",
"dolfin_adjoint.Control",
"numpy.asarray",
"torch.load",
"dolfin_adjoint.Function",
"time.time",
"fenics.inner",
"dolfin_adjoint.compute_gradient",
"dolfin_adjoint.Expression",
"dolfin_adjoint.int... | [((4257, 4388), 'dolfin_adjoint.Expression', 'da.Expression', (['"""k*100*exp( (-(x[0]-x0)*(x[0]-x0) -(x[1]-x1)*(x[1]-x1)) / (2*0.01*l) )"""'], {'k': '(1)', 'l': '(1)', 'x0': '(0.1)', 'x1': '(0.1)', 'degree': '(3)'}), "(\n 'k*100*exp( (-(x[0]-x0)*(x[0]-x0) -(x[1]-x1)*(x[1]-x1)) / (2*0.01*l) )',\n k=1, l=1, x0=0.1, x1=0.1, degree=3)\n", (4270, 4388), True, 'import dolfin_adjoint as da\n'), ((4428, 4461), 'dolfin_adjoint.interpolate', 'da.interpolate', (['pde.source', 'pde.V'], {}), '(pde.source, pde.V)\n', (4442, 4461), True, 'import dolfin_adjoint as da\n'), ((5028, 5048), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (5042, 5048), True, 'import numpy as np\n'), ((5786, 5810), 'numpy.asarray', 'np.asarray', (['objective_nn'], {}), '(objective_nn)\n', (5796, 5810), True, 'import numpy as np\n'), ((5830, 5854), 'numpy.asarray', 'np.asarray', (['objective_ad'], {}), '(objective_ad)\n', (5840, 5854), True, 'import numpy as np\n'), ((5869, 5888), 'numpy.asarray', 'np.asarray', (['time_nn'], {}), '(time_nn)\n', (5879, 5888), True, 'import numpy as np\n'), ((5903, 5922), 'numpy.asarray', 'np.asarray', (['time_ad'], {}), '(time_ad)\n', (5913, 5922), True, 'import numpy as np\n'), ((7342, 7374), 'numpy.asarray', 'np.asarray', (['objective_nn_collect'], {}), '(objective_nn_collect)\n', (7352, 7374), True, 'import numpy as np\n'), ((7402, 7434), 'numpy.asarray', 'np.asarray', (['objective_ad_collect'], {}), '(objective_ad_collect)\n', (7412, 7434), True, 'import numpy as np\n'), ((8394, 8427), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(10)'}), '(precision=10)\n', (8413, 8427), True, 'import numpy as np\n'), ((1335, 1346), 'time.time', 'time.time', ([], {}), '()\n', (1344, 1346), False, 'import time\n'), ((1361, 1480), 'scipy.optimize.minimize', 'opt.minimize', ([], {'fun': 'self._objective', 'x0': 'x_initial', 'method': '"""CG"""', 'jac': 'self._derivative', 'callback': 'None', 'options': 'options'}), "(fun=self._objective, x0=x_initial, method='CG', jac=self.\n _derivative, callback=None, options=options)\n", (1373, 1480), True, 'import scipy.optimize as opt\n'), ((1626, 1637), 'time.time', 'time.time', ([], {}), '()\n', (1635, 1637), False, 'import time\n'), ((3543, 3570), 'dolfin_adjoint.Function', 'da.Function', (['self.poisson.V'], {}), '(self.poisson.V)\n', (3554, 3570), True, 'import dolfin_adjoint as da\n'), ((4063, 4076), 'dolfin_adjoint.Control', 'da.Control', (['f'], {}), '(f)\n', (4073, 4076), True, 'import dolfin_adjoint as da\n'), ((4094, 4130), 'dolfin_adjoint.compute_gradient', 'da.compute_gradient', (['L_tape', 'control'], {}), '(L_tape, control)\n', (4113, 4130), True, 'import dolfin_adjoint as da\n'), ((7462, 7489), 'numpy.asarray', 'np.asarray', (['time_nn_collect'], {}), '(time_nn_collect)\n', (7472, 7489), True, 'import numpy as np\n'), ((7517, 7544), 'numpy.asarray', 'np.asarray', (['time_ad_collect'], {}), '(time_ad_collect)\n', (7527, 7544), True, 'import numpy as np\n'), ((1073, 1110), 'numpy.random.randn', 'np.random.randn', (['self.args.input_size'], {}), '(self.args.input_size)\n', (1088, 1110), True, 'import numpy as np\n'), ((2377, 2398), 'torch.load', 'torch.load', (['self.path'], {}), '(self.path)\n', (2387, 2398), False, 'import torch\n'), ((3723, 3769), 'fenics.inner', 'fa.inner', (['(u - self.target_u)', '(u - self.target_u)'], {}), '(u - self.target_u, u - self.target_u)\n', (3731, 3769), True, 'import fenics as fa\n'), ((3821, 3835), 'fenics.inner', 'fa.inner', (['f', 'f'], {}), '(f, f)\n', (3829, 3835), True, 'import fenics as fa\n')] |
import numpy as np
from numba import guvectorize
from pyfftw import FFTW
def dft(buf_in, buf_out):
"""
Perform discrete Fourier transforms using the FFTW library. FFTW optimizes
the FFT algorithm based on the size of the arrays, with SIMD parallelized
commands. This optimization requires initialization, so this is a factory
function that returns a Numba gufunc that performs the FFT. FFTW works on
fixed memory buffers, so you must tell it what memory to use ahead of time.
When using this with ProcessingChain, to ensure the correct buffers are used,
call ProcessingChain.get_variable('var_name') to give it the internal memory
buffer directly---with raw_to_dsp, you can just give it the name, and it will
automatically happen. The possible dtypes for the input/outputs are:
- float32/float (size n) -> complex64 (size n/2+1)
- float64/double (size n) -> complex128 (size n/2+1)
- float128/longdouble (size n) -> complex256/clongdouble (size n/2+1)
- complex64 (size n) -> complex64 (size n )
- complex128 (size n) -> complex128 (size n )
- complex256/clongdouble (size n) -> complex256/clongdouble (size n )
"""
try:
dft_fun = FFTW(buf_in, buf_out, axes=(-1,), direction='FFTW_FORWARD')
except ValueError:
raise ValueError("""Incompatible array types/shapes. Allowed:
- float32/float (size n) -> complex64 (size n/2+1)
- float64/double (size n) -> complex128 (size n/2+1)
- float128/longdouble (size n) -> complex256/clongdouble (size n/2+1)
- complex64 (size n) -> complex64 (size n)
- complex128 (size n) -> complex128 (size n)
- complex256/clongdouble (size n) -> complex256/clongdouble (size n)""")
typesig = 'void(' + str(buf_in.dtype) + '[:, :], ' + str(buf_out.dtype) + '[:, :])'
sizesig = '(m, n)->(m, n)' if buf_in.shape == buf_out.shape else '(m, n),(m, l)'
@guvectorize([typesig], sizesig, forceobj=True)
def dft(wf_in, dft_out):
dft_fun(wf_in, dft_out)
return dft
def inv_dft(buf_in, buf_out):
"""
Perform inverse discrete Fourier transforms using the FFTW library. FFTW
optimizes the FFT algorithm based on the size of the arrays, with SIMD parallelized
commands. This optimization requires initialization, so this is a factory
function that returns a Numba gufunc that performs the FFT. FFTW works on
fixed memory buffers, so you must tell it what memory to use ahead of time.
When using this with ProcessingChain, to ensure the correct buffers are used,
call ProcessingChain.get_variable('var_name') to give it the internal memory
buffer directly---with raw_to_dsp, you can just give it the name, and it will
automatically happen. The possible dtypes for the input/outputs are:
- complex64 (size n/2+1) -> float32/float (size n)
- complex128 (size n/2+1) -> float64/double (size n)
- complex256/clongdouble (size n/2+1) -> float128/longdouble (size n)
- complex64 (size n ) -> complex64 (size n)
- complex128 (size n ) -> complex128 (size n)
- complex256/clongdouble (size n ) -> complex256/clongdouble (size n)
"""
try:
idft_fun = FFTW(buf_in, buf_out, axes=(-1,), direction='FFTW_BACKWARD')
except ValueError:
raise ValueError("""Incompatible array types/shapes. Allowed:
- complex64 (size n/2+1) -> float32/float (size n)
- complex128 (size n/2+1) -> float64/double (size n)
- complex256/clongdouble (size n/2+1) -> float128/longdouble (size n)
- complex64 (size n) -> complex64 (size n)
- complex128 (size n) -> complex128 (size n)
- complex256/clongdouble (size n) -> complex256/clongdouble (size n)""")
typesig = 'void(' + str(buf_in.dtype) + '[:, :], ' + str(buf_out.dtype) + '[:, :])'
sizesig = '(m, n)->(m, n)' if buf_in.shape == buf_out.shape else '(m, n),(m, l)'
@guvectorize([typesig], sizesig, forceobj=True)
def inv_dft(wf_in, dft_out):
idft_fun(wf_in, dft_out)
return inv_dft
def psd(buf_in, buf_out):
"""
Perform discrete Fourier transforms using the FFTW library, and use it to get
the power spectral density. FFTW optimizes the FFT algorithm based on the
size of the arrays, with SIMD parallelized commands. This optimization
requires initialization, so this is a factory function that returns a Numba gufunc
that performs the FFT. FFTW works on fixed memory buffers, so you must
tell it what memory to use ahead of time. When using this with ProcessingChain,
to ensure the correct buffers are used, call ProcessingChain.get_variable('var_name')
to give it the internal memory buffer directly---with raw_to_dsp, you can just
give it the name, and it will automatically happen. The possible dtypes for the
input/outputs are:
- complex64 (size n) -> float32/float (size n )
- complex128 (size n) -> float64/double (size n )
- complex256/clongdouble (size n) -> float128/longdouble (size n )
- float32/float (size n) -> float32/float (size n/2+1)
- float64/double (size n) -> float64/double (size n/2+1)
- float128/longdouble (size n) -> float128/longdouble (size n/2+1)
"""
# build intermediate array for the dft, which will be abs'd to get the PSD
buf_dft = np.ndarray(buf_out.shape, np.dtype('complex' + str(buf_out.dtype.itemsize * 16)))
try:
dft_fun = FFTW(buf_in, buf_dft, axes=(-1,), direction='FFTW_FORWARD')
except ValueError:
raise ValueError("""Incompatible array types/shapes. Allowed:
- complex64 (size n) -> float32/float (size n)
- complex128 (size n) -> float64/double (size n)
- complex256/clongdouble (size n) -> float128/longdouble (size n)
- float32/float (size n) -> float32/float (size n/2+1)
- float64/double (size n) -> float64/double (size n/2+1)
- float128/longdouble (size n) -> float128/longdouble (size n/2+1)""")
typesig = 'void(' + str(buf_in.dtype) + '[:, :], ' + str(buf_out.dtype) + '[:, :])'
sizesig = '(m, n)->(m, n)' if buf_in.shape == buf_out.shape else '(m, n),(m, l)'
@guvectorize([typesig], sizesig, forceobj=True)
def psd(wf_in, psd_out):
dft_fun(wf_in, buf_dft)
np.abs(buf_dft, psd_out)
return psd
| [
"numpy.abs",
"numba.guvectorize",
"pyfftw.FFTW"
] | [((2021, 2067), 'numba.guvectorize', 'guvectorize', (['[typesig]', 'sizesig'], {'forceobj': '(True)'}), '([typesig], sizesig, forceobj=True)\n', (2032, 2067), False, 'from numba import guvectorize\n'), ((4132, 4178), 'numba.guvectorize', 'guvectorize', (['[typesig]', 'sizesig'], {'forceobj': '(True)'}), '([typesig], sizesig, forceobj=True)\n', (4143, 4178), False, 'from numba import guvectorize\n'), ((6457, 6503), 'numba.guvectorize', 'guvectorize', (['[typesig]', 'sizesig'], {'forceobj': '(True)'}), '([typesig], sizesig, forceobj=True)\n', (6468, 6503), False, 'from numba import guvectorize\n'), ((1320, 1379), 'pyfftw.FFTW', 'FFTW', (['buf_in', 'buf_out'], {'axes': '(-1,)', 'direction': '"""FFTW_FORWARD"""'}), "(buf_in, buf_out, axes=(-1,), direction='FFTW_FORWARD')\n", (1324, 1379), False, 'from pyfftw import FFTW\n'), ((3405, 3465), 'pyfftw.FFTW', 'FFTW', (['buf_in', 'buf_out'], {'axes': '(-1,)', 'direction': '"""FFTW_BACKWARD"""'}), "(buf_in, buf_out, axes=(-1,), direction='FFTW_BACKWARD')\n", (3409, 3465), False, 'from pyfftw import FFTW\n'), ((5721, 5780), 'pyfftw.FFTW', 'FFTW', (['buf_in', 'buf_dft'], {'axes': '(-1,)', 'direction': '"""FFTW_FORWARD"""'}), "(buf_in, buf_dft, axes=(-1,), direction='FFTW_FORWARD')\n", (5725, 5780), False, 'from pyfftw import FFTW\n'), ((6573, 6597), 'numpy.abs', 'np.abs', (['buf_dft', 'psd_out'], {}), '(buf_dft, psd_out)\n', (6579, 6597), True, 'import numpy as np\n')] |
from __future__ import print_function, division
import logging
import numpy as np
def get_spectral_norm(L):
if L is None:
return 1
elif hasattr(L, "spectral_norm"):
return L.spectral_norm
else: # linearized ADMM
LTL = L.T.dot(L)
# need spectral norm of L
import scipy.sparse
if scipy.sparse.issparse(L):
if min(L.shape) <= 2:
L2 = np.real(np.linalg.eigvals(LTL.toarray()).max())
else:
import scipy.sparse.linalg
L2 = np.real(scipy.sparse.linalg.eigs(LTL, k=1, return_eigenvectors=False)[0])
else:
L2 = np.real(np.linalg.eigvals(LTL).max())
return L2
class MatrixAdapter(object):
"""Matrix adapter to deal with None and per-component application.
"""
def __init__(self, L, axis=None):
# prevent cascade
spec_norm = None
while isinstance(L, MatrixAdapter):
spec_norm = L._spec_norm
axis = L.axis
L = L.L
self.L = L
self.axis = axis
self._spec_norm = spec_norm
@property
def spectral_norm(self):
if self._spec_norm is None:
if self.L is not None:
self._spec_norm = get_spectral_norm(self.L)
else:
self._spec_norm = 1
return self._spec_norm
@property
def T(self):
if self.L is None:
return self # NOT: self.L !!!
# because we need to preserve axis for dot(), create a new adapter
return MatrixAdapter(self.L.T, axis=self.axis)
def dot(self, X):
if self.L is None:
# CAVEAT: This is not a copy (for performance reasons)
# so make sure you're not binding it to another variable
# OK for all temporary arguments X
return X
if self.axis is None:
return self.L.dot(X)
# axis=0 is not needed because it can be done with a normal matrix
# dot product
if self.axis == 1:
return self.L.dot(X.reshape(-1)).reshape(X.shape[0], -1)
raise NotImplementedError("MatrixAdapter.dot() is not useful with axis=0.\n"
"Use regular matrix dot product instead!")
def __len__(self):
return len(self.L)
@property
def shape(self):
return self.L.shape
@property
def size(self):
return self.L.size
@property
def ndim(self):
return self.L.ndim
class Traceback(object):
"""Container structure for traceback of algorithm behavior.
"""
def __init__(self, N=1):
# offset is used when the iteration counter is reset
# so that the number of iterations can be used to make sure that
# all of the variables are being updated properly
self.offset = 0
# Number of variables
self.N = N
self.history = [{} for n in range(N)]
def __repr__(self):
message = "Traceback:\n"
for k,v in self.__dict__.items():
message += "\t%s: %r\n" % (k,v)
return message
def __len__(self):
h = self.history[0]
return len(h[next(iter(h))][0])
@property
def it(self):
# number of iterations since last reset, minus initialization record
return self.__len__() - self.offset - 1
def __getitem__(self, key):
"""Get the history of a variable
Parameters
----------
key: string or tuple
- If key is a string it should be the name of the variable to lookup.
- If key is a tuple it should be of the form (k,j) or (k,j,m), where
`k` is the name of the variable, `j` is the index of the variable,
and `m` is the index of the constraint.
If `m` is not specified then `m=0`.
Returns
-------
self.history[j][k][m]
"""
if not isinstance(key, str):
if len(key) == 2:
k, j = key
m = 0
elif len(key) == 3:
k, j, m = key
else:
j = m = 0
k = key
return np.array(self.history[j][k][m])
def reset(self):
"""Reset the iteration offset
When the algorithm resets the iterations, we need to subtract the number of entries
in the history to enable the length counter to correctly check for the proper iteration numbers.
"""
self.offset = self.__len__()
def _store_variable(self, j, key, m, value):
"""Store a copy of the variable in the history
"""
if hasattr(value, 'copy'):
v = value.copy()
else:
v = value
self.history[j][key][m].append(v)
def update_history(self, it, j=0, M=None, **kwargs):
"""Add the current state for all kwargs to the history
"""
# Create a new entry in the history for new variables (if they don't exist)
if not np.any([k in self.history[j] for k in kwargs]):
for k in kwargs:
if M is None or M == 0:
self.history[j][k] = [[]]
else:
self.history[j][k] = [[] for m in range(M)]
"""
# Check that the variables have been updated once per iteration
elif np.any([[len(h)!=it+self.offset for h in self.history[j][k]] for k in kwargs.keys()]):
for k in kwargs.keys():
for n,h in enumerate(self.history[j][k]):
if len(h) != it+self.offset:
err_str = "At iteration {0}, {1}[{2}] already has {3} entries"
raise Exception(err_str.format(it, k, n, len(h)-self.offset))
"""
# Add the variables to the history
for k,v in kwargs.items():
if M is None or M == 0:
self._store_variable(j, k, 0, v)
else:
for m in range(M):
self._store_variable(j, k, m, v[m])
class ApproximateCache(object):
"""Cache function evaluations that don't change much
Certain calculations take a long time but have little change after a few
iterations, so that these functions become an unnecessary time sink.
This class allows the user to store these (and similar) values and
only calculate them when there is a substantial change in the value.
This method works by comparing the stored value to the last value calculated.
If the relative difference between the two is less than `slack`/2, then the
change is deemed to be "small" and the `stride` (number of iterations to skip)
is increased to skip calculating the value for several iterations.
"""
def __init__(self, func, slack=0.1, max_stride=100):
"""Constructor
Parameters
----------
func: function
The slow function that calculates the value
slack: float, default=0.1
A measure of how much the value can differ before
it needs to be recalculated
max_stride: int, default=100
Maximum `stride` between calculations of the value.
"""
self.func = func
assert slack >= 0 and slack < 1
self.slack = slack
self.max_stride = max_stride
self.it = 0
self.stride = 1
self.last = -1
self.stored = None
def __len__(self):
"""The current `stride` value
"""
return len(self.stride)
def __call__(self, *args, **kwargs):
"""Calculate the value (if necessary)
This method checks whether or not the value needs to be recalculated,
and if so, calculates it and sets the number of `stride` (iterations)
until it is calculated again.
"""
if self.slack == 0:
self.it += 1
return self.func(*args, **kwargs)
if self.it >= self.last + self.stride:
self.last = self.it
val = self.func(*args, **kwargs)
# increase stride when rel. changes in L are smaller than (1-slack)/2
if self.it > 1 and self.slack > 0:
rel_error = np.abs(self.stored - val) / self.stored
budget = self.slack/2
if rel_error < budget and rel_error > 0:
self.stride += max(1,int(budget/rel_error * self.stride))
self.stride = min(self.max_stride, self.stride)
# updated last value
self.stored = val
else:
self.it += 1
return self.stored
class NesterovStepper(object):
def __init__(self, accelerated=False):
self.t = 1.
self.accelerated = accelerated
@property
def omega(self):
if self.accelerated:
t_ = 0.5*(1 + np.sqrt(4*self.t*self.t + 1))
om = (self.t - 1)/t_
self.t = t_
return om
else:
return 0
def initZU(X, L):
if not isinstance(L, list):
Z = L.dot(X).copy()
U = np.zeros(Z.shape, dtype=Z.dtype)
else:
Z = []
U = []
for i in range(len(L)):
Z.append(L[i].dot(X).copy())
U.append(np.zeros(Z[i].shape, dtype=Z[i].dtype))
return Z,U
def l2sq(x):
"""Sum the matrix elements squared
"""
return (x**2).sum()
def l2(x):
"""Square root of the sum of the matrix elements squared
"""
return np.sqrt((x**2).sum())
def get_step_g(step_f, norm_L2, N=1, M=1):
"""Get step_g compatible with step_f (and L) for ADMM, SDMM, GLMM.
"""
# Nominally: minimum step size is step_f * norm_L2
# see Parikh 2013, sect. 4.4.2
#
# BUT: For multiple constraints, need to multiply by M.
# AND: For multiple variables, need to multiply by N.
# Worst case of constraints being totally correlated, otherwise Z-updates
# overwhelm X-updates entirely -> blow-up
return step_f * norm_L2 * N * M
def get_step_f(step_f, lR2, lS2):
"""Update the stepsize of given the primal and dual errors.
See Boyd (2011), section 3.4.1
"""
mu, tau = 10, 2
if lR2 > mu*lS2:
return step_f * tau
elif lS2 > mu*lR2:
return step_f / tau
return step_f
def do_the_mm(X, step_f, Z, U, prox_g, step_g, L):
LX = L.dot(X)
Z_ = prox_g(LX + U, step_g)
# primal and dual errors
R = LX - Z_
S = -1/step_g * L.T.dot(Z_ - Z)
Z[:] = Z_[:] # force the copy
# this uses relaxation parameter of 1
U[:] += R
return LX, R, S
def update_variables(X, Z, U, prox_f, step_f, prox_g, step_g, L):
"""Update the primal and dual variables
Note: X, Z, U are updated inline
Returns: LX, R, S
"""
if not hasattr(prox_g, '__iter__'):
if prox_g is not None:
dX = step_f/step_g * L.T.dot(L.dot(X) - Z + U)
X[:] = prox_f(X - dX, step_f)
LX, R, S = do_the_mm(X, step_f, Z, U, prox_g, step_g, L)
else:
# fall back to simple fixed-point method for f
# see do_the_mm for normal definitions of LX,Z,R,S
S = -X.copy()
X[:] = prox_f(X, step_f)
LX = X
Z[:] = X[:]
R = np.zeros(X.shape, dtype=X.dtype)
S += X
else:
M = len(prox_g)
dX = np.sum([step_f/step_g[i] * L[i].T.dot(L[i].dot(X) - Z[i] + U[i]) for i in range(M)], axis=0)
X[:] = prox_f(X - dX, step_f)
LX = [None] * M
R = [None] * M
S = [None] * M
for i in range(M):
LX[i], R[i], S[i] = do_the_mm(X, step_f, Z[i], U[i], prox_g[i], step_g[i], L[i])
return LX, R, S
def get_variable_errors(X, L, LX, Z, U, step_g, e_rel, e_abs=0):
"""Get the errors in a single multiplier method step
For a given linear operator A, (and its dot product with X to save time),
calculate the errors in the prime and dual variables, used by the
Boyd 2011 Section 3 stopping criteria.
"""
n = X.size
p = Z.size
e_pri2 = np.sqrt(p)*e_abs/L.spectral_norm + e_rel*np.max([l2(LX), l2(Z)])
if step_g is not None:
e_dual2 = np.sqrt(n)*e_abs/L.spectral_norm + e_rel*l2(L.T.dot(U)/step_g)
else:
e_dual2 = np.sqrt(n)*e_abs/L.spectral_norm + e_rel*l2(L.T.dot(U))
return e_pri2, e_dual2
def check_constraint_convergence(X, L, LX, Z, U, R, S, step_f, step_g, e_rel, e_abs):
"""Calculate if all constraints have converged.
Using the stopping criteria from Boyd 2011, Sec 3.3.1, calculate whether the
variables for each constraint have converged.
"""
if isinstance(L, list):
M = len(L)
convergence = True
errors = []
# recursive call
for i in range(M):
c, e = check_constraint_convergence(X, L[i], LX[i], Z[i], U[i], R[i], S[i],
step_f, step_g[i], e_rel, e_abs)
convergence &= c
errors.append(e)
return convergence, errors
else:
# check convergence of prime residual R and dual residual S
e_pri, e_dual = get_variable_errors(X, L, LX, Z, U, step_g, e_rel, e_abs)
lR = l2(R)
lS = l2(S)
convergence = (lR <= e_pri) and (lS <= e_dual)
return convergence, (e_pri, e_dual, lR, lS)
def check_convergence(newX, oldX, e_rel):
"""Check that the algorithm converges using Langville 2014 criteria
Uses the check from Langville 2014, Section 5, to check if the NMF
algorithm has converged.
"""
# Calculate the norm for columns and rows, which can be used for debugging
# Otherwise skip, since it takes extra processing time
new_old = newX*oldX
old2 = oldX**2
norms = [np.sum(new_old), np.sum(old2)]
convergent = norms[0] >= (1-e_rel**2)*norms[1]
return convergent, norms
def hasNotNone(l):
i = 0
for ll in l:
if ll is not None:
if hasattr(ll, '__iter__'):
for lll in ll:
if lll is not None:
return len(l) - i
i += 1
return 0
| [
"numpy.linalg.eigvals",
"numpy.sum",
"numpy.abs",
"numpy.zeros",
"numpy.any",
"numpy.array",
"numpy.sqrt"
] | [((4194, 4225), 'numpy.array', 'np.array', (['self.history[j][k][m]'], {}), '(self.history[j][k][m])\n', (4202, 4225), True, 'import numpy as np\n'), ((9090, 9122), 'numpy.zeros', 'np.zeros', (['Z.shape'], {'dtype': 'Z.dtype'}), '(Z.shape, dtype=Z.dtype)\n', (9098, 9122), True, 'import numpy as np\n'), ((13763, 13778), 'numpy.sum', 'np.sum', (['new_old'], {}), '(new_old)\n', (13769, 13778), True, 'import numpy as np\n'), ((13780, 13792), 'numpy.sum', 'np.sum', (['old2'], {}), '(old2)\n', (13786, 13792), True, 'import numpy as np\n'), ((5025, 5073), 'numpy.any', 'np.any', (['[(k in self.history[j]) for k in kwargs]'], {}), '([(k in self.history[j]) for k in kwargs])\n', (5031, 5073), True, 'import numpy as np\n'), ((11261, 11293), 'numpy.zeros', 'np.zeros', (['X.shape'], {'dtype': 'X.dtype'}), '(X.shape, dtype=X.dtype)\n', (11269, 11293), True, 'import numpy as np\n'), ((9257, 9295), 'numpy.zeros', 'np.zeros', (['Z[i].shape'], {'dtype': 'Z[i].dtype'}), '(Z[i].shape, dtype=Z[i].dtype)\n', (9265, 9295), True, 'import numpy as np\n'), ((12068, 12078), 'numpy.sqrt', 'np.sqrt', (['p'], {}), '(p)\n', (12075, 12078), True, 'import numpy as np\n'), ((8220, 8245), 'numpy.abs', 'np.abs', (['(self.stored - val)'], {}), '(self.stored - val)\n', (8226, 8245), True, 'import numpy as np\n'), ((8855, 8887), 'numpy.sqrt', 'np.sqrt', (['(4 * self.t * self.t + 1)'], {}), '(4 * self.t * self.t + 1)\n', (8862, 8887), True, 'import numpy as np\n'), ((12178, 12188), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (12185, 12188), True, 'import numpy as np\n'), ((12269, 12279), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (12276, 12279), True, 'import numpy as np\n'), ((664, 686), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['LTL'], {}), '(LTL)\n', (681, 686), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import gray
system_shape = (1, 8, 16, 25, 25, 2)
submodule_shape = (1, 5, 5,)
module_shape = (1, 5, 5,)
rsector_shape = (1, 4, 4,)
no_crystals = np.prod(system_shape)
map_dtype = np.dtype([
('detector', int),
('crystal', int),
('submodule', int),
('module', int),
('rsector', int),
('head', int),
('layer', int),
('crystal_local', int),
('submodule_local', int),
('module_local', int),
('rsector_local', int),
('head_local', int),
('bz_crystal', int),
('by_crystal', int),
('bx_crystal', int),
('bz_submodule', int),
('by_submodule', int),
('bx_submodule', int),
('bz_module', int),
('by_module', int),
('bx_module', int),
('axial', int),
('transaxial', int),
])
# bench_map = np.empty((1,), dtype=map_dtype)
bench_map = np.empty((no_crystals,), dtype=map_dtype)
bench_map['detector'] = np.arange(bench_map.size)
bench_map['crystal'] = bench_map['detector'] // system_shape[5]
bench_map['submodule'] = bench_map['crystal'] // system_shape[4]
bench_map['module'] = bench_map['submodule'] // system_shape[3]
bench_map['rsector'] = bench_map['module'] // system_shape[2]
bench_map['head'] = bench_map['rsector'] // system_shape[1]
bench_map['layer'] = bench_map['detector'] % system_shape[5]
bench_map['crystal_local'] = bench_map['crystal'] % system_shape[4]
bench_map['submodule_local'] = bench_map['submodule'] % system_shape[3]
bench_map['module_local'] = bench_map['module'] % system_shape[2]
bench_map['rsector_local'] = bench_map['rsector'] % system_shape[1]
bench_map['head_local'] = bench_map['head'] % system_shape[0]
bench_map['bz_crystal'] = bench_map['crystal_local'] // submodule_shape[1]
bench_map['by_crystal'] = bench_map['crystal_local'] % submodule_shape[1]
bench_map['bx_crystal'][:] = 0
bench_map['bz_submodule'] = bench_map['submodule_local'] // module_shape[1]
bench_map['by_submodule'] = bench_map['submodule_local'] % module_shape[1]
bench_map['bx_submodule'][:] = 0
bench_map['bz_module'] = bench_map['module_local'] // rsector_shape[1]
bench_map['by_module'] = bench_map['module_local'] % rsector_shape[1]
bench_map['bx_module'][:] = 0
bench_map['axial'] = bench_map['bz_crystal'] + submodule_shape[2] * (
bench_map['bz_submodule'] + module_shape[2] * bench_map['bz_module'])
bench_map['transaxial'] = bench_map['by_crystal'] + submodule_shape[1] * (
bench_map['by_submodule'] + module_shape[1] * (bench_map['by_module'] +
bench_map['rsector'] * rsector_shape[1])
)
gray.save_mapping_file('gate_benchmark.map', bench_map)
| [
"gray.save_mapping_file",
"numpy.empty",
"numpy.dtype",
"numpy.arange",
"numpy.prod"
] | [((189, 210), 'numpy.prod', 'np.prod', (['system_shape'], {}), '(system_shape)\n', (196, 210), True, 'import numpy as np\n'), ((224, 738), 'numpy.dtype', 'np.dtype', (["[('detector', int), ('crystal', int), ('submodule', int), ('module', int),\n ('rsector', int), ('head', int), ('layer', int), ('crystal_local', int),\n ('submodule_local', int), ('module_local', int), ('rsector_local', int),\n ('head_local', int), ('bz_crystal', int), ('by_crystal', int), (\n 'bx_crystal', int), ('bz_submodule', int), ('by_submodule', int), (\n 'bx_submodule', int), ('bz_module', int), ('by_module', int), (\n 'bx_module', int), ('axial', int), ('transaxial', int)]"], {}), "([('detector', int), ('crystal', int), ('submodule', int), (\n 'module', int), ('rsector', int), ('head', int), ('layer', int), (\n 'crystal_local', int), ('submodule_local', int), ('module_local', int),\n ('rsector_local', int), ('head_local', int), ('bz_crystal', int), (\n 'by_crystal', int), ('bx_crystal', int), ('bz_submodule', int), (\n 'by_submodule', int), ('bx_submodule', int), ('bz_module', int), (\n 'by_module', int), ('bx_module', int), ('axial', int), ('transaxial', int)]\n )\n", (232, 738), True, 'import numpy as np\n'), ((863, 904), 'numpy.empty', 'np.empty', (['(no_crystals,)'], {'dtype': 'map_dtype'}), '((no_crystals,), dtype=map_dtype)\n', (871, 904), True, 'import numpy as np\n'), ((930, 955), 'numpy.arange', 'np.arange', (['bench_map.size'], {}), '(bench_map.size)\n', (939, 955), True, 'import numpy as np\n'), ((2565, 2620), 'gray.save_mapping_file', 'gray.save_mapping_file', (['"""gate_benchmark.map"""', 'bench_map'], {}), "('gate_benchmark.map', bench_map)\n", (2587, 2620), False, 'import gray\n')] |
import numpy as np
from learnware.algorithm.anomaly_detect.polynomial_interpolation import PolynomialInterpolation
class TestTimeSeriesAnomalyDetectPolynomailInterpolation:
def test_pi_anomaly_detect_one_predict(self):
rng = np.random.RandomState(42)
X_train = rng.randn(100, 1)
model = PolynomialInterpolation()
result = model.predict_one(X_train)
print(result)
assert result == -1
def test_pi_anomaly_detect_predict(self):
rng = np.random.RandomState(42)
X_train = rng.randn(100, 1)
model = PolynomialInterpolation()
result = model.predict(X_train)
print(result)
assert len(result) == len(X_train)
| [
"learnware.algorithm.anomaly_detect.polynomial_interpolation.PolynomialInterpolation",
"numpy.random.RandomState"
] | [((241, 266), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (262, 266), True, 'import numpy as np\n'), ((320, 345), 'learnware.algorithm.anomaly_detect.polynomial_interpolation.PolynomialInterpolation', 'PolynomialInterpolation', ([], {}), '()\n', (343, 345), False, 'from learnware.algorithm.anomaly_detect.polynomial_interpolation import PolynomialInterpolation\n'), ((501, 526), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (522, 526), True, 'import numpy as np\n'), ((580, 605), 'learnware.algorithm.anomaly_detect.polynomial_interpolation.PolynomialInterpolation', 'PolynomialInterpolation', ([], {}), '()\n', (603, 605), False, 'from learnware.algorithm.anomaly_detect.polynomial_interpolation import PolynomialInterpolation\n')] |
import sys
import numpy as np
train_subset = sys.argv[1]
crop_window_len = np.int(sys.argv[2])
crop_linear_len = (crop_window_len*2 + 1) * (crop_window_len*2 + 1) * (crop_window_len*2 + 1)
saving_mm_name = str(crop_window_len * 2 +1) + 'mm'
import random
import os
root_path = '/LUNA16/Train_data/' + saving_mm_name + '/'+train_subset + '/'
saving_path = root_path + 'much_data/'
if not os.path.exists(saving_path):
os.makedirs(saving_path)
nodule_data = np.load(saving_path + 'nodule_data.npy')
nodule_label = np.load(saving_path + 'nodule_label.npy')
len_nodule = len(nodule_data)
augment_nodule_data = np.load(saving_path + 'augment_nodule_data.npy')
augment_nodule_label = np.load(saving_path + 'augment_nodule_label.npy')
aug_idx = np.random.choice(len(augment_nodule_data), 100 * len_nodule, replace = False)
non_nodule_boxes_data = np.load(saving_path + 'non_nodule_boxes_data.npy')
non_nodule_boxes_label = np.load(saving_path + 'non_nodule_boxes_label.npy')
boxes_idx = np.random.choice(len(non_nodule_boxes_data), 100 * len_nodule, replace = False)
much_data = np.append(nodule_data, augment_nodule_data[aug_idx], axis = 0)
much_label = np.append(nodule_label, augment_nodule_label[aug_idx], axis = 0)
much_data = np.append(much_data, non_nodule_boxes_data[boxes_idx], axis = 0)
much_label = np.append(much_label, non_nodule_boxes_label[boxes_idx], axis = 0)
idx = np.array(range(len(much_data)))
random.shuffle(idx)
random.shuffle(idx)
much_data_shuffled = much_data[idx]
much_label_shuffled = much_label[idx]
np.save(saving_path + 'subsampled_much_data.npy', much_data_shuffled)
np.save(saving_path + 'subsampled_much_label.npy', much_label_shuffled)
| [
"numpy.load",
"numpy.save",
"os.makedirs",
"random.shuffle",
"os.path.exists",
"numpy.append",
"numpy.int"
] | [((76, 95), 'numpy.int', 'np.int', (['sys.argv[2]'], {}), '(sys.argv[2])\n', (82, 95), True, 'import numpy as np\n'), ((465, 505), 'numpy.load', 'np.load', (["(saving_path + 'nodule_data.npy')"], {}), "(saving_path + 'nodule_data.npy')\n", (472, 505), True, 'import numpy as np\n'), ((521, 562), 'numpy.load', 'np.load', (["(saving_path + 'nodule_label.npy')"], {}), "(saving_path + 'nodule_label.npy')\n", (528, 562), True, 'import numpy as np\n'), ((616, 664), 'numpy.load', 'np.load', (["(saving_path + 'augment_nodule_data.npy')"], {}), "(saving_path + 'augment_nodule_data.npy')\n", (623, 664), True, 'import numpy as np\n'), ((688, 737), 'numpy.load', 'np.load', (["(saving_path + 'augment_nodule_label.npy')"], {}), "(saving_path + 'augment_nodule_label.npy')\n", (695, 737), True, 'import numpy as np\n'), ((851, 901), 'numpy.load', 'np.load', (["(saving_path + 'non_nodule_boxes_data.npy')"], {}), "(saving_path + 'non_nodule_boxes_data.npy')\n", (858, 901), True, 'import numpy as np\n'), ((927, 978), 'numpy.load', 'np.load', (["(saving_path + 'non_nodule_boxes_label.npy')"], {}), "(saving_path + 'non_nodule_boxes_label.npy')\n", (934, 978), True, 'import numpy as np\n'), ((1085, 1145), 'numpy.append', 'np.append', (['nodule_data', 'augment_nodule_data[aug_idx]'], {'axis': '(0)'}), '(nodule_data, augment_nodule_data[aug_idx], axis=0)\n', (1094, 1145), True, 'import numpy as np\n'), ((1161, 1223), 'numpy.append', 'np.append', (['nodule_label', 'augment_nodule_label[aug_idx]'], {'axis': '(0)'}), '(nodule_label, augment_nodule_label[aug_idx], axis=0)\n', (1170, 1223), True, 'import numpy as np\n'), ((1239, 1301), 'numpy.append', 'np.append', (['much_data', 'non_nodule_boxes_data[boxes_idx]'], {'axis': '(0)'}), '(much_data, non_nodule_boxes_data[boxes_idx], axis=0)\n', (1248, 1301), True, 'import numpy as np\n'), ((1317, 1381), 'numpy.append', 'np.append', (['much_label', 'non_nodule_boxes_label[boxes_idx]'], {'axis': '(0)'}), '(much_label, non_nodule_boxes_label[boxes_idx], axis=0)\n', (1326, 1381), True, 'import numpy as np\n'), ((1424, 1443), 'random.shuffle', 'random.shuffle', (['idx'], {}), '(idx)\n', (1438, 1443), False, 'import random\n'), ((1444, 1463), 'random.shuffle', 'random.shuffle', (['idx'], {}), '(idx)\n', (1458, 1463), False, 'import random\n'), ((1539, 1608), 'numpy.save', 'np.save', (["(saving_path + 'subsampled_much_data.npy')", 'much_data_shuffled'], {}), "(saving_path + 'subsampled_much_data.npy', much_data_shuffled)\n", (1546, 1608), True, 'import numpy as np\n'), ((1609, 1680), 'numpy.save', 'np.save', (["(saving_path + 'subsampled_much_label.npy')", 'much_label_shuffled'], {}), "(saving_path + 'subsampled_much_label.npy', much_label_shuffled)\n", (1616, 1680), True, 'import numpy as np\n'), ((394, 421), 'os.path.exists', 'os.path.exists', (['saving_path'], {}), '(saving_path)\n', (408, 421), False, 'import os\n'), ((425, 449), 'os.makedirs', 'os.makedirs', (['saving_path'], {}), '(saving_path)\n', (436, 449), False, 'import os\n')] |
#!/usr/bin/env python
import respirnet
import numpy as np
import networkx as nx
pTypes = [0, 0.25, 0.45, 0.3]
n0 = 100
n1 = 100
# pI = 0.2
# gE = 2.0
# gI = 5.0
# a = 2.5
# b = 0.5
# c = 0.5
# d = 2.5
# these work:
# n0 = 200
# n1 = 100
pI = 0.2
gE = 2.5
gI = 5.0
a = 3.0
b = 0.5
c = 0.5
d = 3.0
pMatE = np.array([(a/(n0-1), b/(n1-1)),
(b/(n0-1), a/(n1-1))])
pMatI = np.array([(c/(n0-1), d/(n1-1)),
(d/(n0-1), c/(n1-1))])
g = respirnet.er_prebot_bot(n0, n1, pMatI, pMatE, pTypes, pI, gE, gI)
nx.write_gml(g, 'test_prebot_bot.gml')
| [
"respirnet.er_prebot_bot",
"numpy.array",
"networkx.write_gml"
] | [((308, 378), 'numpy.array', 'np.array', (['[(a / (n0 - 1), b / (n1 - 1)), (b / (n0 - 1), a / (n1 - 1))]'], {}), '([(a / (n0 - 1), b / (n1 - 1)), (b / (n0 - 1), a / (n1 - 1))])\n', (316, 378), True, 'import numpy as np\n'), ((389, 459), 'numpy.array', 'np.array', (['[(c / (n0 - 1), d / (n1 - 1)), (d / (n0 - 1), c / (n1 - 1))]'], {}), '([(c / (n0 - 1), d / (n1 - 1)), (d / (n0 - 1), c / (n1 - 1))])\n', (397, 459), True, 'import numpy as np\n'), ((466, 531), 'respirnet.er_prebot_bot', 'respirnet.er_prebot_bot', (['n0', 'n1', 'pMatI', 'pMatE', 'pTypes', 'pI', 'gE', 'gI'], {}), '(n0, n1, pMatI, pMatE, pTypes, pI, gE, gI)\n', (489, 531), False, 'import respirnet\n'), ((532, 570), 'networkx.write_gml', 'nx.write_gml', (['g', '"""test_prebot_bot.gml"""'], {}), "(g, 'test_prebot_bot.gml')\n", (544, 570), True, 'import networkx as nx\n')] |
# __protected__ from numba import njit
import numpy as np
# __protected__ @njit(cache=True, fastmath=True)
def row_sum(arr, columns):
return arr.T[columns].sum(0)
# __protected__ @njit(cache=True, fastmath=True)
def row_sum_loops(arr, columns):
# locals type annotations are used only for Cython
# arr.dtype not supported for memoryview
dtype = type(arr[0, 0])
res = np.empty(arr.shape[0], dtype=dtype)
for i in range(arr.shape[0]):
sum_ = dtype(0)
for j in range(columns.shape[0]):
sum_ += arr[i, columns[j]]
res[i] = sum_
return res
__transonic__ = ("0.4.7",)
| [
"numpy.empty"
] | [((394, 429), 'numpy.empty', 'np.empty', (['arr.shape[0]'], {'dtype': 'dtype'}), '(arr.shape[0], dtype=dtype)\n', (402, 429), True, 'import numpy as np\n')] |
from __future__ import annotations
from typing import Sequence, Union
import numbers
from ortools.sat.python.cp_model import (
CpModel, CpSolver, Domain, IntVar, LinearExpr, BoundedLinearExpression
)
import itertools
import numpy as np
import pandas as pd
from numpy.lib.mixins import NDArrayOperatorsMixin
from pandas.core.algorithms import take
from pandas.api.extensions import (
ExtensionDtype, ExtensionArray, register_extension_dtype
)
class LpDtype(ExtensionDtype):
pass
class CpDtype(ExtensionDtype):
type = LinearExpr
name = "cp expression"
na_value = np.nan
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return CpArray
class ORArray(ExtensionArray, NDArrayOperatorsMixin):
"""An array class of variables used for optimization.
It can also be treated as a pandas column, and can be formulated
using convenient aggregation functions.
This class itself cannot be used, it is for inheritance.
"""
# The inherited class sets this value to avoid
# redefining the class for compatibility.
_dtype = None
def __init__(self, exprs: Sequence[object]):
if(isinstance(exprs, (np.ndarray, Sequence))):
self.exprs = exprs
elif(isinstance(exprs, LinearExpr)):
return exprs
else:
raise ValueError("")
# The methods from here are defined to be compatible with pandas.
# those code is "dirty" because of my limited understanding of pandas and numpy.
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return cls(scalars)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def __getitem__(self, idx):
get = self.exprs[idx]
if(isinstance(get, Sequence)):
return self.__class__(get)
else:
return get
def __len__(self):
return len(self.exprs)
@property
def dtype(self):
return self._dtype
@property
def nbytes(self):
return self.exprs.nbytes
def isna(self):
return np.isnan(self.exprs)
def take(self, indices, allow_fill=False, fill_value=None):
"""Take elements from an array.
This is almost the same as the official implementation of pandas.extensionArray.
"""
result = take(self.exprs, indices, allow_fill=allow_fill, fill_value=fill_value)
return self._from_sequence(result, dtype=self.dtype)
def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
"""Defines operations on aggregate functions.
The result after aggregation should be different depending on the class
after inheritance, so we do not define anything here.
"""
raise NotImplemented
def copy(self):
return self.__class__(self.exprs)
@classmethod
def _concat_same_type(cls, to_concat):
data = np.concatenate([ga.data for ga in to_concat])
return cls(data)
# Override functions for assignment operations
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
"""This is exactly the same state as the official numpy sample.
If you find any problems, please let us know via the issue tracker.
"""
out = kwargs.get('out', ())
for x in inputs + out:
# Only support operations with instances of _HANDLED_TYPES.
# Use ArrayLike instead of type(self) for isinstance to
# allow subclasses that don't override __array_ufunc__ to
# handle ArrayLike objects.
if not isinstance(x, self._HANDLED_TYPES + (ORArray,)):
return NotImplemented
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(x.exprs if isinstance(x, ORArray) else x
for x in inputs)
if out:
kwargs['out'] = tuple(
x.value if isinstance(x, ORArray) else x
for x in out)
result = getattr(ufunc, method)(*inputs, **kwargs)
if not isinstance(result, Sequence):
# no array value
return result
else:
# array values
return type(self)(result)
# Override functions for comparison operations
def __comp_object(self, method, arg):
"""by explicitly specifying dtype as object, that can get
the value as a constraint expression instead of a bool value.
it is defined in the same way for other comparison operators.
"""
return method(self.to_numpy(), arg, dtype=object)
def __eq__(self, arg):
return self.__comp_object(np.equal, arg)
def __ne__(self, arg):
return self.__comp_object(np.not_equal, arg)
def __gt__(self, arg):
return self.__comp_object(np.greater, arg)
def __ge__(self, arg):
return self.__comp_object(np.greater_equal, arg)
def __lt__(self, arg):
return self.__comp_object(np.less, arg)
def __le__(self, arg):
return self.__comp_object(np.less_equal, arg)
class CpArray(ORArray):
"""An array class of variables used for CP optimization.
It can also be treated as a pandas column, and can be formulated
using convenient aggregation functions.
It comes with simple functions to create variables,
add constraints to the model, and get the solution.
```py
# 0. Prepare an empty model first.
model = cp_model.CpModel()
# 1. Initialize dataframe
df = pd.read_csv("sample.csv")
# 2. Set the index that can be used as the key of the variable array.
df = df.set_index(["key"])
# 3. Create an array of variables.
df["var"] = CpArray.IntVariables(model, df, lb=1, ub=10, name="test")
# 4. Create and add constraints as needed.
CpArray.AddToModel(model, (df["var"] > 0))
model.Minimize(df["var"].sum())
# 5. Solve it
solver = cp_model.Solver()
solver.solve(model)
# 6. Get the solution.
df["solution"] = CpArray.ToValues(solver, df["var"])
```
"""
_dtype = CpDtype
def __init__(self, exprs: Sequence[LinearExpr]):
"""initialize array.
Args:
exprs (Sequence[LinearExpr]): array of CP variables.
"""
super().__init__(exprs)
@classmethod
def IntVariables(cls, model: CpModel, df: pd.DataFrame, *,
lb: Union[Sequence[int], int],
ub: Union[Sequence[int], int],
name: str = "") -> CpArray[IntVar]:
"""Creates an array of integer variables.
Name the variable based on its index by passing `DataFrame` as an argument.
Args:
model (CpModel):
df (pd.DataFrame): the model where the variables you created will be used.
lb (Sequence[int] | int): lower bound for variables.
If a non-array value is passed,
the same value will be used in all variable definitions.
ub (Sequence[int] | int): upper bound for variables.
If a non-array value is passed,
the same value will be used in all variable definitions.
name (str, optional): Prefix for variable names.
If omitted, the variable will not be named.
Returns:
CpArray: cp-array of integer variables.
"""
def __name(i: pd.Index):
return f"{name}{str(i)}" if name else ""
if(not isinstance(lb, Sequence)):
lb = [lb] * len(df)
if(not isinstance(ub, Sequence)):
ub = [ub] * len(df)
return cls([
model.NewIntVar(lb=lb[i], ub=ub[i], name=__name(idx))
for i,idx in enumerate(df.index)
])
@classmethod
def BooleanVariables(cls, model: CpModel, df: pd.DataFrame,
*, name: str = ""):
"""Creates an array of boolean variables.
Name the variable based on its index by passing `DataFrame` as an argument.
Args:
model (CpModel):
df (pd.DataFrame): the model where the variables you created will be used.
name (str, optional): Prefix for variable names.
If omitted, the variable will not be named.
Returns:
CpArray: cp-array of integer variables.
"""
return cls.IntVariables(model, df, lb=0, ub=1, name=name)
@classmethod
def AddToModel(cls, model: CpModel, constraints: Sequence[BoundedLinearExpression]):
return [model.Add(ct) for ct in constraints]
@classmethod
def ToValues(cls, solver: CpSolver, exprs: CpArray) -> list[int]:
return [solver.Value(x) for x in exprs]
@classmethod
def ToBooleanValues(cls, solver: CpSolver, exprs: CpArray) -> list[bool]:
return [solver.BooleanValue(x) for x in exprs]
def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
"""Defines operations on aggregate functions.
Args:
name (str): Name of the aggregate function.
Anything other than `sum` will not work well with CP
and will throw an exception.
"""
if name == "sum":
# return LinearExpr.Sum(self.exprs)
return LinearExpr.Sum(self.exprs)
elif name == "min":
raise ValueError("If you want to compare the minimum value of variables with other variable/constant values, use `AddMinEquality`.")
elif name == "max":
raise ValueError("If you want to compare the maximum value of variables with other variable/constant values, use `AddMaxEquality`.")
elif name == "prod":
raise ValueError("products of variables are not allowed in linear programming.")
else:
raise ValueError(f"{name} operation cannot be applied to model variables.")
# register_extension_dtype(LpDtype)
register_extension_dtype(CpDtype)
| [
"ortools.sat.python.cp_model.LinearExpr.Sum",
"pandas.api.extensions.register_extension_dtype",
"numpy.isnan",
"numpy.concatenate",
"pandas.core.algorithms.take"
] | [((10470, 10503), 'pandas.api.extensions.register_extension_dtype', 'register_extension_dtype', (['CpDtype'], {}), '(CpDtype)\n', (10494, 10503), False, 'from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype\n'), ((2542, 2562), 'numpy.isnan', 'np.isnan', (['self.exprs'], {}), '(self.exprs)\n', (2550, 2562), True, 'import numpy as np\n'), ((2795, 2866), 'pandas.core.algorithms.take', 'take', (['self.exprs', 'indices'], {'allow_fill': 'allow_fill', 'fill_value': 'fill_value'}), '(self.exprs, indices, allow_fill=allow_fill, fill_value=fill_value)\n', (2799, 2866), False, 'from pandas.core.algorithms import take\n'), ((3381, 3426), 'numpy.concatenate', 'np.concatenate', (['[ga.data for ga in to_concat]'], {}), '([ga.data for ga in to_concat])\n', (3395, 3426), True, 'import numpy as np\n'), ((9830, 9856), 'ortools.sat.python.cp_model.LinearExpr.Sum', 'LinearExpr.Sum', (['self.exprs'], {}), '(self.exprs)\n', (9844, 9856), False, 'from ortools.sat.python.cp_model import CpModel, CpSolver, Domain, IntVar, LinearExpr, BoundedLinearExpression\n')] |
import logging
import numpy as np
from tensorflow.keras import backend as K
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
__author__ = "<NAME>, Science Data Processing Branch"
__email__ = "<EMAIL>"
__status__ = "Production"
# ---------------------------------------------------------------------------
# module metrics
#
# General functions to compute custom metrics.
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Module Methods
# ---------------------------------------------------------------------------
# ------------------------------ Metric Functions -------------------------- #
def iou_coef(y_true, y_pred, smooth=1):
intersection = K.sum(K.abs(y_true * y_pred), axis=[1, 2, 3])
union = K.sum(y_true, [1, 2, 3])+K.sum(y_pred, [1, 2, 3])-intersection
iou = K.mean((intersection + smooth) / (union + smooth), axis=0)
return iou
def dice_coef(y_true, y_pred, smooth=1):
intersection = K.sum(y_true * y_pred, axis=[1, 2, 3])
union = K.sum(y_true, axis=[1, 2, 3]) + K.sum(y_pred, axis=[1, 2, 3])
dice = K.mean((2. * intersection + smooth)/(union + smooth), axis=0)
return dice
def iou_val(y_true, y_pred):
intersection = np.logical_and(y_true, y_pred)
union = np.logical_or(y_true, y_pred)
iou_score = np.sum(intersection) / np.sum(union)
return iou_score
def acc_val(y_true, y_pred):
return accuracy_score(y_true, y_pred)
def prec_val(y_true, y_pred):
return precision_score(y_true, y_pred, average='macro'), \
precision_score(y_true, y_pred, average=None)
def recall_val(y_true, y_pred):
return recall_score(y_true, y_pred, average='macro'), \
recall_score(y_true, y_pred, average=None)
# -------------------------------------------------------------------------------
# module metrics Unit Tests
# -------------------------------------------------------------------------------
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# Add unit tests here
| [
"tensorflow.keras.backend.sum",
"numpy.sum",
"numpy.logical_and",
"logging.basicConfig",
"tensorflow.keras.backend.mean",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.recall_score",
"tensorflow.keras.backend.abs",
"numpy.logical_or",
"sklearn.metrics.precision_score"
] | [((976, 1034), 'tensorflow.keras.backend.mean', 'K.mean', (['((intersection + smooth) / (union + smooth))'], {'axis': '(0)'}), '((intersection + smooth) / (union + smooth), axis=0)\n', (982, 1034), True, 'from tensorflow.keras import backend as K\n'), ((1112, 1150), 'tensorflow.keras.backend.sum', 'K.sum', (['(y_true * y_pred)'], {'axis': '[1, 2, 3]'}), '(y_true * y_pred, axis=[1, 2, 3])\n', (1117, 1150), True, 'from tensorflow.keras import backend as K\n'), ((1236, 1300), 'tensorflow.keras.backend.mean', 'K.mean', (['((2.0 * intersection + smooth) / (union + smooth))'], {'axis': '(0)'}), '((2.0 * intersection + smooth) / (union + smooth), axis=0)\n', (1242, 1300), True, 'from tensorflow.keras import backend as K\n'), ((1364, 1394), 'numpy.logical_and', 'np.logical_and', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1378, 1394), True, 'import numpy as np\n'), ((1407, 1436), 'numpy.logical_or', 'np.logical_or', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1420, 1436), True, 'import numpy as np\n'), ((1553, 1583), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1567, 1583), False, 'from sklearn.metrics import accuracy_score\n'), ((2105, 2144), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (2124, 2144), False, 'import logging\n'), ((851, 873), 'tensorflow.keras.backend.abs', 'K.abs', (['(y_true * y_pred)'], {}), '(y_true * y_pred)\n', (856, 873), True, 'from tensorflow.keras import backend as K\n'), ((1163, 1192), 'tensorflow.keras.backend.sum', 'K.sum', (['y_true'], {'axis': '[1, 2, 3]'}), '(y_true, axis=[1, 2, 3])\n', (1168, 1192), True, 'from tensorflow.keras import backend as K\n'), ((1195, 1224), 'tensorflow.keras.backend.sum', 'K.sum', (['y_pred'], {'axis': '[1, 2, 3]'}), '(y_pred, axis=[1, 2, 3])\n', (1200, 1224), True, 'from tensorflow.keras import backend as K\n'), ((1453, 1473), 'numpy.sum', 'np.sum', (['intersection'], {}), '(intersection)\n', (1459, 1473), True, 'import numpy as np\n'), ((1476, 1489), 'numpy.sum', 'np.sum', (['union'], {}), '(union)\n', (1482, 1489), True, 'import numpy as np\n'), ((1627, 1675), 'sklearn.metrics.precision_score', 'precision_score', (['y_true', 'y_pred'], {'average': '"""macro"""'}), "(y_true, y_pred, average='macro')\n", (1642, 1675), False, 'from sklearn.metrics import precision_score\n'), ((1687, 1732), 'sklearn.metrics.precision_score', 'precision_score', (['y_true', 'y_pred'], {'average': 'None'}), '(y_true, y_pred, average=None)\n', (1702, 1732), False, 'from sklearn.metrics import precision_score\n'), ((1778, 1823), 'sklearn.metrics.recall_score', 'recall_score', (['y_true', 'y_pred'], {'average': '"""macro"""'}), "(y_true, y_pred, average='macro')\n", (1790, 1823), False, 'from sklearn.metrics import recall_score\n'), ((1835, 1877), 'sklearn.metrics.recall_score', 'recall_score', (['y_true', 'y_pred'], {'average': 'None'}), '(y_true, y_pred, average=None)\n', (1847, 1877), False, 'from sklearn.metrics import recall_score\n'), ((903, 927), 'tensorflow.keras.backend.sum', 'K.sum', (['y_true', '[1, 2, 3]'], {}), '(y_true, [1, 2, 3])\n', (908, 927), True, 'from tensorflow.keras import backend as K\n'), ((928, 952), 'tensorflow.keras.backend.sum', 'K.sum', (['y_pred', '[1, 2, 3]'], {}), '(y_pred, [1, 2, 3])\n', (933, 952), True, 'from tensorflow.keras import backend as K\n')] |
from compas.geometry import angle_vectors_xy
from numpy import array
from numpy import float64
from numpy import where
from compas.numerical import connectivity_matrix
from compas.numerical import normrow
from compas.numerical import normalizerow
from compas_tna.diagrams import FormDiagram
from compas_tna.diagrams import ForceDiagram
from compas_tna.utilities import rot90
from compas_tna.utilities import apply_bounds
from compas_tna.utilities import parallelise_sparse
from compas_tna.utilities import parallelise_nodal
__all__ = [
'horizontal_numpy',
'horizontal_nodal_numpy',
'horizontal_numpy_proxy',
'horizontal_nodal_numpy_proxy'
]
def horizontal_numpy_proxy(formdata, forcedata, *args, **kwargs):
form = FormDiagram.from_data(formdata)
force = ForceDiagram.from_data(forcedata)
horizontal_numpy(form, force, *args, **kwargs)
return form.to_data(), force.to_data()
def horizontal_nodal_numpy_proxy(formdata, forcedata, *args, **kwargs):
form = FormDiagram.from_data(formdata)
force = ForceDiagram.from_data(forcedata)
horizontal_nodal_numpy(form, force, *args, **kwargs)
return form.to_data(), force.to_data()
def horizontal_numpy(form, force, alpha=100.0, kmax=100):
r"""Compute horizontal equilibrium.
Parameters
----------
form : compas_tna.diagrams.formdiagram.FormDiagram
force : compas_tna.diagrams.forcediagram.ForceDiagram
alpha : float
Weighting factor for computation of the target vectors (the default is
100.0, which implies that the target vectors are the edges of the form diagram).
If 0.0, the target vectors are the edges of the force diagram.
kmax : int
Maximum number of iterations (the default is 100).
Notes
-----
This implementation is based on the following formulation
.. math::
\mathbf{C}^{T} \mathbf{C} \mathbf{xy} = \mathbf{C}^{T} \mathbf{t}
with :math:`\mathbf{C}` the connectivity matrix and :math:`\mathbf{t}` the
target vectors.
"""
# --------------------------------------------------------------------------
# alpha == 1 : form diagram fixed
# alpha == 0 : force diagram fixed
# --------------------------------------------------------------------------
alpha = max(0., min(1., float(alpha) / 100.0))
# --------------------------------------------------------------------------
# form diagram
# --------------------------------------------------------------------------
k_i = form.key_index()
uv_i = form.uv_index()
fixed = set(list(form.anchors()) + list(form.fixed()))
fixed = [k_i[key] for key in fixed]
xy = array(form.vertices_attributes('xy'), dtype=float64)
edges = list(form.edges_where({'_is_edge': True}))
lmin = array(form.edges_attribute('lmin', keys=edges), dtype=float64).reshape((-1, 1))
lmax = array(form.edges_attribute('lmax', keys=edges), dtype=float64).reshape((-1, 1))
hmin = array(form.edges_attribute('hmin', keys=edges), dtype=float64).reshape((-1, 1))
hmax = array(form.edges_attribute('hmax', keys=edges), dtype=float64).reshape((-1, 1))
edges = [[k_i[u], k_i[v]] for u, v in edges]
C = connectivity_matrix(edges, 'csr')
Ct = C.transpose()
CtC = Ct.dot(C)
# --------------------------------------------------------------------------
# force diagram
# --------------------------------------------------------------------------
_k_i = force.key_index()
_uv_i = force.uv_index(form=form)
_fixed = list(force.fixed())
_fixed = [_k_i[key] for key in _fixed]
_fixed = _fixed or [0]
_xy = array(force.vertices_attributes('xy'), dtype=float64)
_edges = force.ordered_edges(form)
_lmin = array(force.edges_attribute('lmin', keys=_edges), dtype=float64).reshape((-1, 1))
_lmax = array(force.edges_attribute('lmax', keys=_edges), dtype=float64).reshape((-1, 1))
_edges = [[_k_i[u], _k_i[v]] for u, v in _edges]
_C = connectivity_matrix(_edges, 'csr')
_Ct = _C.transpose()
_Ct_C = _Ct.dot(_C)
scale = force.attributes.get('scale', 1.0)
# --------------------------------------------------------------------------
# rotate force diagram to make it parallel to the form diagram
# use CCW direction (opposite of cycle direction)
# --------------------------------------------------------------------------
_xy[:] = rot90(_xy, +1.0)
# --------------------------------------------------------------------------
# make the diagrams parallel to a target vector
# that is the (alpha) weighted average of the directions of corresponding
# edges of the two diagrams
# --------------------------------------------------------------------------
uv = C.dot(xy)
_uv = _C.dot(_xy)
l = normrow(uv) # noqa: E741
_l = normrow(_uv)
t = alpha * normalizerow(uv) + (1 - alpha) * normalizerow(_uv)
# proper bounds
hmin /= scale
hmax /= scale
_lmin = where(hmin > _lmin, hmin, _lmin)
_lmax = where(hmax < _lmax, hmax, _lmax)
# parallelise
# add the outer loop to the parallelise function
for k in range(kmax):
# apply length bounds
apply_bounds(l, lmin, lmax)
apply_bounds(_l, _lmin, _lmax)
if alpha != 1.0:
# if emphasis is not entirely on the form
# update the form diagram
xy = parallelise_sparse(CtC, Ct.dot(l * t), xy, fixed, 'CtC')
uv = C.dot(xy)
l = normrow(uv) # noqa: E741
if alpha != 0.0:
# if emphasis is not entirely on the force
# update the force diagram
_xy = parallelise_sparse(_Ct_C, _Ct.dot(_l * t), _xy, _fixed, '_Ct_C')
_uv = _C.dot(_xy)
_l = normrow(_uv)
# --------------------------------------------------------------------------
# compute the force densities
# --------------------------------------------------------------------------
f = _l
q = (f / l).astype(float64)
# --------------------------------------------------------------------------
# rotate the force diagram 90 degrees in CW direction
# this way the relation between the two diagrams is easier to read
# --------------------------------------------------------------------------
_xy[:] = rot90(_xy, -1.0)
# --------------------------------------------------------------------------
# angle deviations
# note that this does not account for flipped edges!
# --------------------------------------------------------------------------
a = [angle_vectors_xy(uv[i], _uv[i], deg=True) for i in range(len(edges))]
# --------------------------------------------------------------------------
# update form
# --------------------------------------------------------------------------
for key, attr in form.vertices(True):
i = k_i[key]
attr['x'] = xy[i, 0]
attr['y'] = xy[i, 1]
for (u, v), attr in form.edges_where({'_is_edge': True}, True):
i = uv_i[(u, v)]
attr['q'] = q[i, 0]
attr['_f'] = f[i, 0]
attr['_l'] = l[i, 0]
attr['_a'] = a[i]
# --------------------------------------------------------------------------
# update force
# --------------------------------------------------------------------------
for key, attr in force.vertices(True):
i = _k_i[key]
attr['x'] = _xy[i, 0]
attr['y'] = _xy[i, 1]
for (u, v), attr in force.edges(True):
if (u, v) in _uv_i:
i = _uv_i[(u, v)]
else:
i = _uv_i[(v, u)]
attr['_l'] = _l[i, 0]
attr['_a'] = a[i]
def horizontal_nodal_numpy(form, force, alpha=100, kmax=100):
"""Compute horizontal equilibrium using a node-per-node approach.
Parameters
----------
form : compas_tna.diagrams.FormDiagram
force : compas_tna.diagrams.ForceDiagram
alpha : float
Weighting factor for computation of the target vectors (the default is
100.0, which implies that the target vectors are the edges of the form diagram).
If 0.0, the target vectors are the edges of the force diagram.
kmax : int
Maximum number of iterations (the default is 100).
"""
alpha = float(alpha) / 100.0
alpha = max(0., min(1., alpha))
# --------------------------------------------------------------------------
# form diagram
# --------------------------------------------------------------------------
k_i = form.key_index()
uv_i = form.uv_index()
i_nbrs = {k_i[key]: [k_i[nbr] for nbr in form.vertex_neighbors(key)] for key in form.vertices()}
ij_e = {(k_i[u], k_i[v]): index for (u, v), index in iter(uv_i.items())}
fixed = set(list(form.anchors()) + list(form.fixed()))
fixed = [k_i[key] for key in fixed]
edges = [[k_i[u], k_i[v]] for u, v in form.edges_where({'_is_edge': True})]
lmin = array([attr.get('lmin', 1e-7) for key, attr in form.edges_where({'_is_edge': True}, True)], dtype=float64).reshape((-1, 1))
lmax = array([attr.get('lmax', 1e+7) for key, attr in form.edges_where({'_is_edge': True}, True)], dtype=float64).reshape((-1, 1))
hmin = array([attr.get('hmin', 1e-7) for key, attr in form.edges_where({'_is_edge': True}, True)], dtype=float64).reshape((-1, 1))
hmax = array([attr.get('hmax', 1e+7) for key, attr in form.edges_where({'_is_edge': True}, True)], dtype=float64).reshape((-1, 1))
flipmask = array([1.0 if not attr['_is_tension'] else -1.0 for key, attr in form.edges_where({'_is_edge': True}, True)], dtype=float).reshape((-1, 1))
xy = array(form.vertices_attributes('xy'), dtype=float64)
C = connectivity_matrix(edges, 'csr')
# --------------------------------------------------------------------------
# force diagram
# --------------------------------------------------------------------------
_k_i = force.key_index()
_uv_i = force.uv_index(form=form)
_i_nbrs = {_k_i[key]: [_k_i[nbr] for nbr in force.vertex_neighbors(key)] for key in force.vertices()}
_ij_e = {(_k_i[u], _k_i[v]): index for (u, v), index in iter(_uv_i.items())}
_fixed = list(force.fixed())
_fixed = [_k_i[key] for key in _fixed]
_fixed = _fixed or [0]
_edges = force.ordered_edges(form)
_xy = array(force.vertices_attributes('xy'), dtype=float64)
_lmin = array([attr.get('lmin', 1e-7) for key, attr in force.edges(True)], dtype=float64).reshape((-1, 1))
_lmax = array([attr.get('lmax', 1e+7) for key, attr in force.edges(True)], dtype=float64).reshape((-1, 1))
_edges = [[_k_i[u], _k_i[v]] for u, v in _edges]
_C = connectivity_matrix(_edges, 'csr')
scale = force.attributes.get('scale', 1.0)
# --------------------------------------------------------------------------
# rotate force diagram to make it parallel to the form diagram
# use CCW direction (opposite of cycle direction)
# --------------------------------------------------------------------------
_xy[:] = rot90(_xy, +1.0)
# --------------------------------------------------------------------------
# make the diagrams parallel to a target vector
# that is the (alpha) weighted average of the directions of corresponding
# edges of the two diagrams
# --------------------------------------------------------------------------
uv = flipmask * C.dot(xy)
_uv = _C.dot(_xy)
l = normrow(uv) # noqa: E741
_l = normrow(_uv)
# --------------------------------------------------------------------------
# the target vectors
# --------------------------------------------------------------------------
targets = alpha * normalizerow(uv) + (1 - alpha) * normalizerow(_uv)
# --------------------------------------------------------------------------
# proper force bounds
# --------------------------------------------------------------------------
hmin /= scale
hmax /= scale
_lmin = where(hmin > _lmin, hmin, _lmin)
_lmax = where(hmax < _lmax, hmax, _lmax)
# --------------------------------------------------------------------------
# parallelise
# --------------------------------------------------------------------------
if alpha < 1:
parallelise_nodal(xy, C, targets, i_nbrs, ij_e, fixed=fixed, kmax=kmax, lmin=lmin, lmax=lmax)
if alpha > 0:
parallelise_nodal(_xy, _C, targets, _i_nbrs, _ij_e, kmax=kmax, lmin=_lmin, lmax=_lmax)
# --------------------------------------------------------------------------
# update the coordinate difference vectors
# --------------------------------------------------------------------------
uv = C.dot(xy)
_uv = _C.dot(_xy)
l = normrow(uv) # noqa: E741
_l = normrow(_uv)
# --------------------------------------------------------------------------
# compute the force densities
# --------------------------------------------------------------------------
f = flipmask * _l
q = (f / l).astype(float64)
# --------------------------------------------------------------------------
# rotate the force diagram 90 degrees in CW direction
# this way the relation between the two diagrams is easier to read
# --------------------------------------------------------------------------
_xy[:] = rot90(_xy, -1.0)
# --------------------------------------------------------------------------
# angle deviations
# note that this does not account for flipped edges!
# --------------------------------------------------------------------------
a = [angle_vectors_xy(uv[i], _uv[i], deg=True) for i in range(len(edges))]
# --------------------------------------------------------------------------
# update form
# --------------------------------------------------------------------------
for key, attr in form.vertices(True):
i = k_i[key]
attr['x'] = xy[i, 0]
attr['y'] = xy[i, 1]
for (u, v), attr in form.edges_where({'_is_edge': True}, True):
i = uv_i[(u, v)]
attr['q'] = q[i, 0]
attr['_f'] = f[i, 0]
attr['_l'] = l[i, 0]
attr['_a'] = a[i]
# --------------------------------------------------------------------------
# update force
# --------------------------------------------------------------------------
for key, attr in force.vertices(True):
i = _k_i[key]
attr['x'] = _xy[i, 0]
attr['y'] = _xy[i, 1]
for (u, v), attr in force.edges(True):
if (u, v) in _uv_i:
i = _uv_i[(u, v)]
else:
i = _uv_i[(v, u)]
attr['_l'] = _l[i, 0]
attr['_a'] = a[i]
| [
"compas_tna.utilities.parallelise_nodal",
"compas_tna.utilities.apply_bounds",
"compas_tna.diagrams.ForceDiagram.from_data",
"compas_tna.diagrams.FormDiagram.from_data",
"compas.numerical.normrow",
"numpy.where",
"compas.geometry.angle_vectors_xy",
"compas.numerical.normalizerow",
"compas.numerical.... | [((742, 773), 'compas_tna.diagrams.FormDiagram.from_data', 'FormDiagram.from_data', (['formdata'], {}), '(formdata)\n', (763, 773), False, 'from compas_tna.diagrams import FormDiagram\n'), ((786, 819), 'compas_tna.diagrams.ForceDiagram.from_data', 'ForceDiagram.from_data', (['forcedata'], {}), '(forcedata)\n', (808, 819), False, 'from compas_tna.diagrams import ForceDiagram\n'), ((999, 1030), 'compas_tna.diagrams.FormDiagram.from_data', 'FormDiagram.from_data', (['formdata'], {}), '(formdata)\n', (1020, 1030), False, 'from compas_tna.diagrams import FormDiagram\n'), ((1043, 1076), 'compas_tna.diagrams.ForceDiagram.from_data', 'ForceDiagram.from_data', (['forcedata'], {}), '(forcedata)\n', (1065, 1076), False, 'from compas_tna.diagrams import ForceDiagram\n'), ((3197, 3230), 'compas.numerical.connectivity_matrix', 'connectivity_matrix', (['edges', '"""csr"""'], {}), "(edges, 'csr')\n", (3216, 3230), False, 'from compas.numerical import connectivity_matrix\n'), ((3981, 4015), 'compas.numerical.connectivity_matrix', 'connectivity_matrix', (['_edges', '"""csr"""'], {}), "(_edges, 'csr')\n", (4000, 4015), False, 'from compas.numerical import connectivity_matrix\n'), ((4409, 4425), 'compas_tna.utilities.rot90', 'rot90', (['_xy', '(+1.0)'], {}), '(_xy, +1.0)\n', (4414, 4425), False, 'from compas_tna.utilities import rot90\n'), ((4799, 4810), 'compas.numerical.normrow', 'normrow', (['uv'], {}), '(uv)\n', (4806, 4810), False, 'from compas.numerical import normrow\n'), ((4834, 4846), 'compas.numerical.normrow', 'normrow', (['_uv'], {}), '(_uv)\n', (4841, 4846), False, 'from compas.numerical import normrow\n'), ((4982, 5014), 'numpy.where', 'where', (['(hmin > _lmin)', 'hmin', '_lmin'], {}), '(hmin > _lmin, hmin, _lmin)\n', (4987, 5014), False, 'from numpy import where\n'), ((5027, 5059), 'numpy.where', 'where', (['(hmax < _lmax)', 'hmax', '_lmax'], {}), '(hmax < _lmax, hmax, _lmax)\n', (5032, 5059), False, 'from numpy import where\n'), ((6327, 6343), 'compas_tna.utilities.rot90', 'rot90', (['_xy', '(-1.0)'], {}), '(_xy, -1.0)\n', (6332, 6343), False, 'from compas_tna.utilities import rot90\n'), ((9696, 9729), 'compas.numerical.connectivity_matrix', 'connectivity_matrix', (['edges', '"""csr"""'], {}), "(edges, 'csr')\n", (9715, 9729), False, 'from compas.numerical import connectivity_matrix\n'), ((10658, 10692), 'compas.numerical.connectivity_matrix', 'connectivity_matrix', (['_edges', '"""csr"""'], {}), "(_edges, 'csr')\n", (10677, 10692), False, 'from compas.numerical import connectivity_matrix\n'), ((11036, 11052), 'compas_tna.utilities.rot90', 'rot90', (['_xy', '(+1.0)'], {}), '(_xy, +1.0)\n', (11041, 11052), False, 'from compas_tna.utilities import rot90\n'), ((11437, 11448), 'compas.numerical.normrow', 'normrow', (['uv'], {}), '(uv)\n', (11444, 11448), False, 'from compas.numerical import normrow\n'), ((11472, 11484), 'compas.numerical.normrow', 'normrow', (['_uv'], {}), '(_uv)\n', (11479, 11484), False, 'from compas.numerical import normrow\n'), ((11981, 12013), 'numpy.where', 'where', (['(hmin > _lmin)', 'hmin', '_lmin'], {}), '(hmin > _lmin, hmin, _lmin)\n', (11986, 12013), False, 'from numpy import where\n'), ((12026, 12058), 'numpy.where', 'where', (['(hmax < _lmax)', 'hmax', '_lmax'], {}), '(hmax < _lmax, hmax, _lmax)\n', (12031, 12058), False, 'from numpy import where\n'), ((12730, 12741), 'compas.numerical.normrow', 'normrow', (['uv'], {}), '(uv)\n', (12737, 12741), False, 'from compas.numerical import normrow\n'), ((12765, 12777), 'compas.numerical.normrow', 'normrow', (['_uv'], {}), '(_uv)\n', (12772, 12777), False, 'from compas.numerical import normrow\n'), ((13332, 13348), 'compas_tna.utilities.rot90', 'rot90', (['_xy', '(-1.0)'], {}), '(_xy, -1.0)\n', (13337, 13348), False, 'from compas_tna.utilities import rot90\n'), ((5195, 5222), 'compas_tna.utilities.apply_bounds', 'apply_bounds', (['l', 'lmin', 'lmax'], {}), '(l, lmin, lmax)\n', (5207, 5222), False, 'from compas_tna.utilities import apply_bounds\n'), ((5231, 5261), 'compas_tna.utilities.apply_bounds', 'apply_bounds', (['_l', '_lmin', '_lmax'], {}), '(_l, _lmin, _lmax)\n', (5243, 5261), False, 'from compas_tna.utilities import apply_bounds\n'), ((6595, 6636), 'compas.geometry.angle_vectors_xy', 'angle_vectors_xy', (['uv[i]', '_uv[i]'], {'deg': '(True)'}), '(uv[i], _uv[i], deg=True)\n', (6611, 6636), False, 'from compas.geometry import angle_vectors_xy\n'), ((12265, 12362), 'compas_tna.utilities.parallelise_nodal', 'parallelise_nodal', (['xy', 'C', 'targets', 'i_nbrs', 'ij_e'], {'fixed': 'fixed', 'kmax': 'kmax', 'lmin': 'lmin', 'lmax': 'lmax'}), '(xy, C, targets, i_nbrs, ij_e, fixed=fixed, kmax=kmax,\n lmin=lmin, lmax=lmax)\n', (12282, 12362), False, 'from compas_tna.utilities import parallelise_nodal\n'), ((12385, 12475), 'compas_tna.utilities.parallelise_nodal', 'parallelise_nodal', (['_xy', '_C', 'targets', '_i_nbrs', '_ij_e'], {'kmax': 'kmax', 'lmin': '_lmin', 'lmax': '_lmax'}), '(_xy, _C, targets, _i_nbrs, _ij_e, kmax=kmax, lmin=_lmin,\n lmax=_lmax)\n', (12402, 12475), False, 'from compas_tna.utilities import parallelise_nodal\n'), ((13600, 13641), 'compas.geometry.angle_vectors_xy', 'angle_vectors_xy', (['uv[i]', '_uv[i]'], {'deg': '(True)'}), '(uv[i], _uv[i], deg=True)\n', (13616, 13641), False, 'from compas.geometry import angle_vectors_xy\n'), ((4863, 4879), 'compas.numerical.normalizerow', 'normalizerow', (['uv'], {}), '(uv)\n', (4875, 4879), False, 'from compas.numerical import normalizerow\n'), ((4896, 4913), 'compas.numerical.normalizerow', 'normalizerow', (['_uv'], {}), '(_uv)\n', (4908, 4913), False, 'from compas.numerical import normalizerow\n'), ((5496, 5507), 'compas.numerical.normrow', 'normrow', (['uv'], {}), '(uv)\n', (5503, 5507), False, 'from compas.numerical import normrow\n'), ((5771, 5783), 'compas.numerical.normrow', 'normrow', (['_uv'], {}), '(_uv)\n', (5778, 5783), False, 'from compas.numerical import normrow\n'), ((11694, 11710), 'compas.numerical.normalizerow', 'normalizerow', (['uv'], {}), '(uv)\n', (11706, 11710), False, 'from compas.numerical import normalizerow\n'), ((11727, 11744), 'compas.numerical.normalizerow', 'normalizerow', (['_uv'], {}), '(_uv)\n', (11739, 11744), False, 'from compas.numerical import normalizerow\n')] |
#!/usr/bin/env python
from collections import OrderedDict
import numpy as np
from scipy import ndimage
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision
import matplotlib.pyplot as plt
import time
try:
import efficientnet_pytorch
from efficientnet_pytorch import EfficientNet
except ImportError:
print('efficientnet_pytorch is not available, using densenet. '
'Try installing https://github.com/ahundt/EfficientNet-PyTorch for all features (recommended): '
' pip3 install --user --upgrade git+https://github.com/ahundt/EfficientNet-PyTorch.git'
'A version of EfficientNets without dilation can be installed with the command (not recommended):'
' pip3 install efficientnet-pytorch --user --upgrade'
'See https://github.com/lukemelas/EfficientNet-PyTorch for details')
efficientnet_pytorch = None
def tile_vector_as_image_channels_torch(vector_op, image_shape):
"""
Takes a vector of length n and an image shape BCHW,
and repeat the vector as channels at each pixel.
Code source: https://github.com/ahundt/costar_dataset/blob/master/costar_dataset/block_stacking_reader_torch.py
# Params
vector_op: A tensor vector to tile.
image_shape: A list of integers [width, height] with the desired dimensions.
"""
# input vector shape
ivs = vector_op.shape
# print('image_shape: ' + str(image_shape))
# reshape the vector into a single pixel
vector_op = vector_op.reshape([ivs[0], ivs[1], 1, 1])
# print('vector_op pre-repeat shape:' + str(vector_op.shape))
# repeat the vector at every pixel according to the specified image shape
vector_op = vector_op.expand([ivs[0], ivs[1], image_shape[2], image_shape[3]])
# print('vector_op post-repeat shape:' + str(vector_op.shape))
# print('vector_op first channel: ' + str(vector_op[0,:,0,0]))
return vector_op
def trunk_net(name='', fc_channels=2048, second_fc_channels=None, goal_condition_len=0, channels_out=3):
first_fc = fc_channels + goal_condition_len
# original behavior of second conv layer
# second_fc = 64
# new behavior of second conv layer
if second_fc_channels is None:
second_fc = fc_channels + goal_condition_len
else:
second_fc = second_fc_channels + goal_condition_len
return nn.Sequential(OrderedDict([
(name + '-norm0', nn.BatchNorm2d(first_fc)),
(name + '-relu0', nn.ReLU(inplace=True)),
(name + '-conv0', nn.Conv2d(first_fc, second_fc, kernel_size=1, stride=1, bias=False)),
(name + '-norm1', nn.BatchNorm2d(second_fc)),
(name + '-relu1', nn.ReLU(inplace=True)),
(name + '-conv1', nn.Conv2d(second_fc, channels_out, kernel_size=1, stride=1, bias=False))
# ('push-upsample2', nn.Upsample(scale_factor=4, mode='bilinear'))
]))
def vector_block(name='', channels_in=4, fc_channels=2048, channels_out=2048):
return nn.Sequential(OrderedDict([
(name + '-vectorblock-lin0', nn.Linear(channels_in, fc_channels, bias=False)),
(name + '-vectorblock-relu0', nn.ReLU(inplace=True)),
# TODO(ahundt) re-enable batchnorm https://github.com/pytorch/pytorch/issues/4534
# (name + '-vectorblock-norm0', nn.BatchNorm1d(fc_channels)),
(name + '-vectorblock-lin1', nn.Linear(fc_channels, channels_out, bias=False)),
(name + '-vectorblock-relu1', nn.ReLU(inplace=True)),
# TODO(ahundt) re-enable batchnorm https://github.com/pytorch/pytorch/issues/4534
# (name + '-vectorblock-norm1', nn.BatchNorm1d(channels_out))
]))
def init_trunk_weights(model, branch=None):
""" Initializes the trunk network weight layer weights.
# Arguments
branch: string indicating the specific branch to initialize. Default of None will initialize 'push-', 'grasp-' and 'place-'.
"""
# Initialize network weights
for m in model.named_modules():
#if 'push-' in m[0] or 'grasp-' in m[0]:
if((branch is None and 'push-' in m[0] or 'grasp-' in m[0] or 'place-' in m[0]) or
(branch is not None and branch in m[0])):
if isinstance(m[1], nn.Conv2d):
nn.init.kaiming_normal_(m[1].weight.data)
elif isinstance(m[1], nn.BatchNorm2d):
m[1].weight.data.fill_(1)
m[1].bias.data.zero_()
def rot_to_affine_mat(rotate_theta, batch_size=1):
affine_mat_after = np.asarray([[np.cos(rotate_theta), np.sin(rotate_theta), 0],[-np.sin(rotate_theta), np.cos(rotate_theta), 0]])
affine_mat_after = np.tile(affine_mat_after[np.newaxis], batch_size)
affine_mat_after.shape = (2,3,batch_size)
affine_mat_after = torch.from_numpy(affine_mat_after).permute(2,0,1).float()
return affine_mat_after
class PixelNet(nn.Module):
def __init__(self, use_cuda=True, goal_condition_len=0, place=False, network='efficientnet', use_vector_block=False, pretrained=True, align_corners=False, num_dilation=1, num_rotations=16): # , snapshot=None
super(PixelNet, self).__init__()
self.use_cuda = use_cuda
self.place = place
self.use_vector_block = use_vector_block
self.upsample_scale = 16
self.num_rotations = num_rotations
self.network = network
self.align_corners = align_corners
if self.use_vector_block:
channels_out = 2048
self.push_vector_block = vector_block('push', goal_condition_len, channels_out=channels_out)
self.grasp_vector_block = vector_block('grasp', goal_condition_len, channels_out=channels_out)
if place:
self.place_vector_block = vector_block('place', goal_condition_len, channels_out=channels_out)
# TODO(ahundt) this variable overwrite is confusing, write the code better
goal_condition_len = channels_out
if network == 'densenet' or efficientnet_pytorch is None:
# Initialize network trunks with DenseNet pre-trained on ImageNet
self.push_color_trunk = torchvision.models.densenet.densenet121(pretrained=pretrained)
self.push_depth_trunk = torchvision.models.densenet.densenet121(pretrained=pretrained)
self.grasp_color_trunk = torchvision.models.densenet.densenet121(pretrained=pretrained)
self.grasp_depth_trunk = torchvision.models.densenet.densenet121(pretrained=pretrained)
# placenet tests block stacking
if self.place:
self.place_color_trunk = torchvision.models.densenet.densenet121(pretrained=pretrained)
self.place_depth_trunk = torchvision.models.densenet.densenet121(pretrained=pretrained)
fc_channels = 2048
second_fc_channels = 64
else:
# how many dilations to do at the end of the network
# num_dilation = 1
if num_dilation == 0:
if pretrained:
self.image_trunk = EfficientNet.from_pretrained('efficientnet-b0')
self.push_trunk = EfficientNet.from_pretrained('efficientnet-b0')
else:
self.image_trunk = EfficientNet.from_name('efficientnet-b0')
self.push_trunk = EfficientNet.from_name('efficientnet-b0')
else:
# Initialize network trunks with DenseNet pre-trained on ImageNet
try:
if pretrained:
self.image_trunk = EfficientNet.from_pretrained('efficientnet-b0', num_dilation=num_dilation)
self.push_trunk = EfficientNet.from_pretrained('efficientnet-b0', num_dilation=num_dilation)
else:
self.image_trunk = EfficientNet.from_name('efficientnet-b0', num_dilation=num_dilation)
self.push_trunk = EfficientNet.from_name('efficientnet-b0', num_dilation=num_dilation)
print('DILATED EfficientNet models created, num_dilation: ' + str(num_dilation))
except:
print('WARNING: Could not dilate, try installing https://github.com/ahundt/EfficientNet-PyTorch '
'instead of the original efficientnet pytorch')
num_dilation = 0
if pretrained:
self.image_trunk = EfficientNet.from_pretrained('efficientnet-b0')
self.push_trunk = EfficientNet.from_pretrained('efficientnet-b0')
else:
self.image_trunk = EfficientNet.from_name('efficientnet-b0')
self.push_trunk = EfficientNet.from_name('efficientnet-b0')
# how much will the dilations affect the upsample step
self.upsample_scale = self.upsample_scale / 2 ** num_dilation
fc_channels = 1280 * 2
# second_fc_channels = None
second_fc_channels = 64
# Construct network branches for pushing and grasping
self.pushnet = trunk_net('push', fc_channels, second_fc_channels, goal_condition_len, 1)
self.graspnet = trunk_net('grasp', fc_channels, second_fc_channels, goal_condition_len, 1)
# placenet tests block stacking
if place:
self.placenet = trunk_net('place', fc_channels, second_fc_channels, goal_condition_len, 1)
init_trunk_weights(self)
if self.use_cuda:
self.cuda()
def forward(self, input_color_data, input_depth_data, is_volatile=False, specific_rotation=-1, goal_condition=None, keep_action_feat=False, use_demo=False):
if goal_condition is not None:
# TODO(ahundt) is there a better place for this? Is doing this before is_volatile sloppy?
if self.use_cuda:
goal_condition = torch.tensor(goal_condition).float().cuda()
else:
goal_condition = torch.tensor(goal_condition).float()
tiled_goal_condition = None
if is_volatile:
output_prob = []
interm_feat = []
output_prob_feat = []
with torch.no_grad():
# if we want to keep action features, strip last layer of push/grasp/placenet
if keep_action_feat:
pushnet = self.pushnet[:-1]
graspnet = self.graspnet[:-1]
if self.place:
placenet = self.placenet[:-1]
else:
pushnet = self.pushnet
graspnet = self.graspnet
if self.place:
placenet = self.placenet
# store the final layer of each network
final_layer_push = self.pushnet[-1]
final_layer_grasp = self.graspnet[-1]
if self.place:
final_layer_place = self.placenet[-1]
# Apply rotations to images
for rotate_idx in range(self.num_rotations):
rotate_theta = np.radians(rotate_idx*(360/self.num_rotations))
# Compute sample grid for rotation BEFORE neural network
interm_push_feat, interm_grasp_feat, interm_place_feat, tiled_goal_condition = self.layers_forward(rotate_theta,
input_color_data, input_depth_data, goal_condition, tiled_goal_condition)
if self.place:
interm_feat.append([interm_push_feat, interm_grasp_feat, interm_place_feat])
else:
interm_feat.append([interm_push_feat, interm_grasp_feat])
# Compute sample grid for rotation AFTER branches
affine_mat_after = rot_to_affine_mat(rotate_theta)
if self.use_cuda:
flow_grid_after = F.affine_grid(Variable(affine_mat_after, requires_grad=False).cuda(), interm_push_feat.data.size(), align_corners=self.align_corners)
else:
flow_grid_after = F.affine_grid(Variable(affine_mat_after, requires_grad=False), interm_push_feat.data.size(), align_corners=self.align_corners)
# this is the case where we need to return both the action embedding and softmax-ed action mask
if keep_action_feat and not use_demo:
push_action_feat = pushnet(interm_push_feat)
grasp_action_feat = graspnet(interm_grasp_feat)
if self.place:
place_action_feat = placenet(interm_place_feat)
# append upsampled mask to output_prob_feat
output_prob_feat.append([nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear',
align_corners=self.align_corners).forward(F.grid_sample(push_action_feat,
flow_grid_after, mode='nearest', align_corners=self.align_corners)),
nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear',
align_corners=self.align_corners).forward(F.grid_sample(grasp_action_feat,
flow_grid_after, mode='nearest', align_corners=self.align_corners)),
nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear',
align_corners=self.align_corners).forward(F.grid_sample(place_action_feat,
flow_grid_after, mode='nearest', align_corners=self.align_corners))])
# append softmax-ed mask to output_prob
output_prob.append([nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear',
align_corners=self.align_corners).forward(F.grid_sample(final_layer_push(push_action_feat),
flow_grid_after, mode='nearest', align_corners=self.align_corners)),
nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear',
align_corners=self.align_corners).forward(F.grid_sample(final_layer_grasp(grasp_action_feat),
flow_grid_after, mode='nearest', align_corners=self.align_corners)),
nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear',
align_corners=self.align_corners).forward(F.grid_sample(final_layer_place(place_action_feat),
flow_grid_after, mode='nearest', align_corners=self.align_corners))])
else:
# append upsampled mask to output_prob_feat
output_prob_feat.append([nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear',
align_corners=self.align_corners).forward(F.grid_sample(push_action_feat,
flow_grid_after, mode='nearest', align_corners=self.align_corners)),
nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear',
align_corners=self.align_corners).forward(F.grid_sample(grasp_action_feat,
flow_grid_after, mode='nearest', align_corners=self.align_corners))])
# upsample output_prob
output_prob.append([nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear',
align_corners=self.align_corners).forward(F.grid_sample(final_layer_push(push_action_feat),
flow_grid_after, mode='nearest', align_corners=self.align_corners)),
nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear',
align_corners=self.align_corners).forward(F.grid_sample(final_layer_grasp(grasp_action_feat),
flow_grid_after, mode='nearest', align_corners=self.align_corners))])
# this is the case where we are either not keeping action features or not keeping final action mask
else:
# Forward pass through branches, undo rotation on output predictions, upsample results
push_action_feat = pushnet(interm_push_feat)
grasp_action_feat = graspnet(interm_grasp_feat)
# placenet tests block stacking
if self.place:
place_action_feat = placenet(interm_place_feat)
output_prob.append([nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear',
align_corners=self.align_corners).forward(F.grid_sample(push_action_feat,
flow_grid_after, mode='nearest', align_corners=self.align_corners)),
nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear',
align_corners=self.align_corners).forward(F.grid_sample(grasp_action_feat,
flow_grid_after, mode='nearest', align_corners=self.align_corners)),
nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear',
align_corners=self.align_corners).forward(F.grid_sample(place_action_feat,
flow_grid_after, mode='nearest', align_corners=self.align_corners))])
else:
output_prob.append([nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear',
align_corners=self.align_corners).forward(F.grid_sample(push_action_feat,
flow_grid_after, mode='nearest', align_corners=self.align_corners)),
nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear',
align_corners=self.align_corners).forward(F.grid_sample(grasp_action_feat,
flow_grid_after, mode='nearest', align_corners=self.align_corners))])
return output_prob, interm_feat, output_prob_feat
else:
output_prob = []
interm_feat = []
output_prob_feat = []
# Apply rotations to intermediate features
# for rotate_idx in range(self.num_rotations):
rotate_idx = specific_rotation
rotate_theta = np.radians(rotate_idx*(360/self.num_rotations))
# Compute sample grid for rotation BEFORE branches
interm_push_feat, interm_grasp_feat, interm_place_feat, tiled_goal_condition = self.layers_forward(rotate_theta, input_color_data, input_depth_data, goal_condition, tiled_goal_condition)
if self.place:
interm_feat.append([interm_push_feat, interm_grasp_feat, interm_place_feat])
else:
interm_feat.append([interm_push_feat, interm_grasp_feat])
# Compute sample grid for rotation AFTER branches
affine_mat_after = rot_to_affine_mat(rotate_theta, batch_size=input_color_data.size(0))
if self.use_cuda:
flow_grid_after = F.affine_grid(Variable(affine_mat_after, requires_grad=False).cuda(), interm_push_feat.data.size(), align_corners=self.align_corners)
else:
flow_grid_after = F.affine_grid(Variable(affine_mat_after, requires_grad=False), interm_push_feat.data.size(), align_corners=self.align_corners)
# print('goal_condition: ' + str(goal_condition))
# Forward pass through branches, undo rotation on output predictions, upsample results
# placenet tests block stacking
if self.place:
output_prob.append([nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear', align_corners=self.align_corners).forward(F.grid_sample(self.pushnet(interm_push_feat), flow_grid_after, mode='nearest', align_corners=self.align_corners)),
nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear', align_corners=self.align_corners).forward(F.grid_sample(self.graspnet(interm_grasp_feat), flow_grid_after, mode='nearest', align_corners=self.align_corners)),
nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear', align_corners=self.align_corners).forward(F.grid_sample(self.placenet(interm_place_feat), flow_grid_after, mode='nearest', align_corners=self.align_corners))])
else:
output_prob.append([nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear', align_corners=self.align_corners).forward(F.grid_sample(self.pushnet(interm_push_feat), flow_grid_after, mode='nearest', align_corners=self.align_corners)),
nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear', align_corners=self.align_corners).forward(F.grid_sample(self.graspnet(interm_grasp_feat), flow_grid_after, mode='nearest', align_corners=self.align_corners))])
# print('output prob shapes: ' + str(self.output_prob[0][0].shape))
return output_prob, interm_feat, output_prob_feat
def layers_forward(self, rotate_theta, input_color_data, input_depth_data, goal_condition, tiled_goal_condition=None, requires_grad=True):
""" Reduces the repetitive forward pass code across multiple model classes. See PixelNet forward() and responsive_net forward().
"""
interm_place_feat = None
# Compute sample grid for rotation BEFORE neural network
affine_mat_before = rot_to_affine_mat(-rotate_theta, batch_size=input_color_data.size(0))
if self.use_cuda:
flow_grid_before = F.affine_grid(Variable(affine_mat_before, requires_grad=requires_grad).cuda(), input_color_data.size(), align_corners=self.align_corners)
else:
flow_grid_before = F.affine_grid(Variable(affine_mat_before, requires_grad=requires_grad), input_color_data.size(), align_corners=self.align_corners)
# Rotate images clockwise
if self.use_cuda:
rotate_color = F.grid_sample(Variable(input_color_data).cuda(), flow_grid_before, mode='nearest', align_corners=self.align_corners)
rotate_depth = F.grid_sample(Variable(input_depth_data).cuda(), flow_grid_before, mode='nearest', align_corners=self.align_corners)
else:
rotate_color = F.grid_sample(Variable(input_color_data), flow_grid_before, mode='nearest', align_corners=self.align_corners)
rotate_depth = F.grid_sample(Variable(input_depth_data), flow_grid_before, mode='nearest', align_corners=self.align_corners)
# Compute intermediate features
if efficientnet_pytorch is None or self.network == 'densenet':
# densenet
interm_push_color_feat = self.push_color_trunk.features(rotate_color)
interm_push_depth_feat = self.push_depth_trunk.features(rotate_depth)
interm_grasp_color_feat = self.grasp_color_trunk.features(rotate_color)
interm_grasp_depth_feat = self.grasp_depth_trunk.features(rotate_depth)
# placenet tests block stacking
if self.place:
interm_place_color_feat = self.place_color_trunk.features(rotate_color)
interm_place_depth_feat = self.place_depth_trunk.features(rotate_depth)
else:
# efficientnet
interm_push_color_feat = self.push_trunk.extract_features(rotate_color)
interm_push_depth_feat = self.push_trunk.extract_features(rotate_depth)
interm_grasp_color_feat = self.image_trunk.extract_features(rotate_color)
interm_grasp_depth_feat = self.image_trunk.extract_features(rotate_depth)
# interm_grasp_color_feat = interm_push_color_feat
# interm_grasp_depth_feat = interm_push_depth_feat
# placenet tests block stacking
if self.place:
interm_place_color_feat = interm_grasp_color_feat
interm_place_depth_feat = interm_grasp_depth_feat
# Combine features, including the goal condition if appropriate
if goal_condition is None:
interm_push_feat = torch.cat((interm_push_color_feat, interm_push_depth_feat), dim=1)
interm_grasp_feat = torch.cat((interm_grasp_color_feat, interm_grasp_depth_feat), dim=1)
if self.place:
interm_place_feat = torch.cat((interm_place_color_feat, interm_place_depth_feat), dim=1)
else:
if self.use_vector_block:
push_goal_vec = tile_vector_as_image_channels_torch(self.push_vector_block(goal_condition), interm_push_color_feat.shape)
grasp_goal_vec = tile_vector_as_image_channels_torch(self.grasp_vector_block(goal_condition), interm_push_color_feat.shape)
interm_push_feat = torch.cat((interm_push_color_feat, interm_push_depth_feat, push_goal_vec), dim=1)
interm_grasp_feat = torch.cat((interm_grasp_color_feat, interm_grasp_depth_feat, grasp_goal_vec), dim=1)
if self.place:
place_goal_vec = tile_vector_as_image_channels_torch(self.place_vector_block(goal_condition), interm_push_color_feat.shape)
interm_place_feat = torch.cat((interm_place_color_feat, interm_place_depth_feat, place_goal_vec), dim=1)
else:
if tiled_goal_condition is None:
# This is part of a big for loop, but tiling only needs to be done once.
# Sorry that this code is a bit confusing, but we need the shape of the output of interm_*_color_feat
tiled_goal_condition = tile_vector_as_image_channels_torch(goal_condition, interm_push_color_feat.shape)
interm_push_feat = torch.cat((interm_push_color_feat, interm_push_depth_feat, tiled_goal_condition), dim=1)
interm_grasp_feat = torch.cat((interm_grasp_color_feat, interm_grasp_depth_feat, tiled_goal_condition), dim=1)
if self.place:
interm_place_feat = torch.cat((interm_place_color_feat, interm_place_depth_feat, tiled_goal_condition), dim=1)
return interm_push_feat, interm_grasp_feat, interm_place_feat, tiled_goal_condition
def transfer_grasp_to_place(self):
if self.network == 'densenet' or efficientnet_pytorch is None:
# placenet tests block stacking
if self.place:
self.place_color_trunk.load_state_dict(self.grasp_color_trunk.state_dict())
self.place_depth_trunk.load_state_dict(self.grasp_depth_trunk.state_dict())
fc_channels = 2048
second_fc_channels = 64
# The push and place efficientnet model is shared, so we don't need to transfer that.
if self.place:
# we rename the dictionary names of the grasp weights to place, then load them into the placenet
self.placenet.load_state_dict(dict(map(lambda t: (t[0].replace('grasp', 'place'), t[1]), self.graspnet.state_dict().items())))
class reinforcement_net(nn.Module):
def __init__(self, use_cuda=True, goal_condition_len=0, place=False, network='densenet', use_vector_block=False, pretrained=True, align_corners=False, num_dilation=1): # , snapshot=None
super(reinforcement_net, self).__init__()
# super(PixelNet, self).__init__()
self.use_cuda = use_cuda
self.place = place
self.use_vector_block = use_vector_block
self.upsample_scale = 16
self.num_rotations = 16
self.network = network
self.align_corners = align_corners
if self.use_vector_block:
channels_out = 2048
self.push_vector_block = vector_block('push', goal_condition_len, channels_out=channels_out)
self.grasp_vector_block = vector_block('grasp', goal_condition_len, channels_out=channels_out)
if place:
self.place_vector_block = vector_block('place', goal_condition_len, channels_out=channels_out)
# TODO(ahundt) this variable overwrite is confusing, write the code better
goal_condition_len = channels_out
if network == 'densenet' or efficientnet_pytorch is None:
# Initialize network trunks with DenseNet pre-trained on ImageNet
self.push_color_trunk = torchvision.models.densenet.densenet121(pretrained=pretrained)
self.push_depth_trunk = torchvision.models.densenet.densenet121(pretrained=pretrained)
self.grasp_color_trunk = torchvision.models.densenet.densenet121(pretrained=pretrained)
self.grasp_depth_trunk = torchvision.models.densenet.densenet121(pretrained=pretrained)
# placenet tests block stacking
if self.place:
self.place_color_trunk = torchvision.models.densenet.densenet121(pretrained=pretrained)
self.place_depth_trunk = torchvision.models.densenet.densenet121(pretrained=pretrained)
fc_channels = 2048
second_fc_channels = 64
else:
# how many dilations to do at the end of the network
# num_dilation = 1
if num_dilation == 0:
if pretrained:
self.image_trunk = EfficientNet.from_pretrained('efficientnet-b0')
self.push_trunk = EfficientNet.from_pretrained('efficientnet-b0')
else:
self.image_trunk = EfficientNet.from_name('efficientnet-b0')
self.push_trunk = EfficientNet.from_name('efficientnet-b0')
else:
# Initialize network trunks with DenseNet pre-trained on ImageNet
try:
if pretrained:
self.image_trunk = EfficientNet.from_pretrained('efficientnet-b0', num_dilation=num_dilation)
self.push_trunk = EfficientNet.from_pretrained('efficientnet-b0', num_dilation=num_dilation)
else:
self.image_trunk = EfficientNet.from_name('efficientnet-b0', num_dilation=num_dilation)
self.push_trunk = EfficientNet.from_name('efficientnet-b0', num_dilation=num_dilation)
print('DILATED EfficientNet models created, num_dilation: ' + str(num_dilation))
except:
print('WARNING: Could not dilate, try installing https://github.com/ahundt/EfficientNet-PyTorch '
'instead of the original efficientnet pytorch')
num_dilation = 0
if pretrained:
self.image_trunk = EfficientNet.from_pretrained('efficientnet-b0')
self.push_trunk = EfficientNet.from_pretrained('efficientnet-b0')
else:
self.image_trunk = EfficientNet.from_name('efficientnet-b0')
self.push_trunk = EfficientNet.from_name('efficientnet-b0')
# how much will the dilations affect the upsample step
self.upsample_scale = self.upsample_scale / 2 ** num_dilation
fc_channels = 1280 * 2
# second_fc_channels = None
second_fc_channels = 64
# Construct network branches for pushing and grasping
self.pushnet = trunk_net('push', fc_channels, second_fc_channels, goal_condition_len, 1)
self.graspnet = trunk_net('grasp', fc_channels, second_fc_channels, goal_condition_len, 1)
# placenet tests block stacking
if place:
self.placenet = trunk_net('place', fc_channels, second_fc_channels, goal_condition_len, 1)
init_trunk_weights(self)
if self.use_cuda:
self.cuda()
def forward(self, input_color_data, input_depth_data, is_volatile=False, specific_rotation=-1, goal_condition=None):
if is_volatile:
with torch.no_grad():
output_prob = []
interm_feat = []
# Apply rotations to images
for rotate_idx in range(self.num_rotations):
rotate_theta = np.radians(rotate_idx*(360/self.num_rotations))
# Compute sample grid for rotation BEFORE neural network
affine_mat_before = rot_to_affine_mat(-rotate_theta, batch_size=input_color_data.size(0))
if self.use_cuda:
flow_grid_before = F.affine_grid(Variable(affine_mat_before, requires_grad=False).cuda(), input_color_data.size())
else:
flow_grid_before = F.affine_grid(Variable(affine_mat_before, requires_grad=False), input_color_data.size())
# Rotate images clockwise
if self.use_cuda:
rotate_color = F.grid_sample(Variable(input_color_data, volatile=True).cuda(), flow_grid_before, mode='nearest')
rotate_depth = F.grid_sample(Variable(input_depth_data, volatile=True).cuda(), flow_grid_before, mode='nearest')
else:
rotate_color = F.grid_sample(Variable(input_color_data, volatile=True), flow_grid_before, mode='nearest')
rotate_depth = F.grid_sample(Variable(input_depth_data, volatile=True), flow_grid_before, mode='nearest')
# Compute intermediate features
interm_push_color_feat = self.push_color_trunk.features(rotate_color)
interm_push_depth_feat = self.push_depth_trunk.features(rotate_depth)
interm_push_feat = torch.cat((interm_push_color_feat, interm_push_depth_feat), dim=1)
interm_grasp_color_feat = self.grasp_color_trunk.features(rotate_color)
interm_grasp_depth_feat = self.grasp_depth_trunk.features(rotate_depth)
interm_grasp_feat = torch.cat((interm_grasp_color_feat, interm_grasp_depth_feat), dim=1)
part_interm_feat = [interm_push_feat, interm_grasp_feat]
if self.place:
interm_place_color_feat = self.place_color_trunk.features(rotate_color)
interm_place_depth_feat = self.place_depth_trunk.features(rotate_depth)
interm_place_feat = torch.cat((interm_place_color_feat, interm_place_depth_feat), dim=1)
part_interm_feat += [interm_place_feat]
interm_feat.append(part_interm_feat)
# Compute sample grid for rotation AFTER branches
affine_mat_after = rot_to_affine_mat(rotate_theta, batch_size=input_color_data.size(0))
if self.use_cuda:
flow_grid_after = F.affine_grid(Variable(affine_mat_after, requires_grad=False).cuda(), interm_push_feat.data.size())
else:
flow_grid_after = F.affine_grid(Variable(affine_mat_after, requires_grad=False), interm_push_feat.data.size())
# Forward pass through branches, undo rotation on output predictions, upsample results
part_output_prob = [nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear', align_corners=self.align_corners).forward(F.grid_sample(self.pushnet(interm_push_feat), flow_grid_after, mode='nearest', align_corners=self.align_corners)),
nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear', align_corners=self.align_corners).forward(F.grid_sample(self.graspnet(interm_grasp_feat), flow_grid_after, mode='nearest', align_corners=self.align_corners))]
if self.place:
part_output_prob += [nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear', align_corners=self.align_corners).forward(F.grid_sample(self.placenet(interm_place_feat), flow_grid_after, mode='nearest', align_corners=self.align_corners))]
# Forward pass through branches, undo rotation on output predictions, upsample results
output_prob.append(part_output_prob)
return output_prob, interm_feat
else:
output_prob = []
interm_feat = []
# Apply rotations to intermediate features
# for rotate_idx in range(self.num_rotations):
rotate_idx = specific_rotation
rotate_theta = np.radians(rotate_idx*(360/self.num_rotations))
# Compute sample grid for rotation BEFORE branches
affine_mat_before = rot_to_affine_mat(-rotate_theta, batch_size=input_color_data.size(0))
if self.use_cuda:
flow_grid_before = F.affine_grid(Variable(affine_mat_before, requires_grad=False).cuda(), input_color_data.size())
else:
flow_grid_before = F.affine_grid(Variable(affine_mat_before, requires_grad=False), input_color_data.size())
# Rotate images clockwise
if self.use_cuda:
rotate_color = F.grid_sample(Variable(input_color_data, requires_grad=False).cuda(), flow_grid_before, mode='nearest')
rotate_depth = F.grid_sample(Variable(input_depth_data, requires_grad=False).cuda(), flow_grid_before, mode='nearest')
else:
rotate_color = F.grid_sample(Variable(input_color_data, requires_grad=False), flow_grid_before, mode='nearest')
rotate_depth = F.grid_sample(Variable(input_depth_data, requires_grad=False), flow_grid_before, mode='nearest')
# Compute intermediate features
interm_push_color_feat = self.push_color_trunk.features(rotate_color)
interm_push_depth_feat = self.push_depth_trunk.features(rotate_depth)
interm_push_feat = torch.cat((interm_push_color_feat, interm_push_depth_feat), dim=1)
interm_grasp_color_feat = self.grasp_color_trunk.features(rotate_color)
interm_grasp_depth_feat = self.grasp_depth_trunk.features(rotate_depth)
interm_grasp_feat = torch.cat((interm_grasp_color_feat, interm_grasp_depth_feat), dim=1)
part_interm_feat = [interm_push_feat, interm_grasp_feat]
if self.place:
interm_place_color_feat = self.place_color_trunk.features(rotate_color)
interm_place_depth_feat = self.place_depth_trunk.features(rotate_depth)
interm_place_feat = torch.cat((interm_place_color_feat, interm_place_depth_feat), dim=1)
part_interm_feat += [interm_place_feat]
interm_feat.append(part_interm_feat)
# Compute sample grid for rotation AFTER branches
affine_mat_after = rot_to_affine_mat(rotate_theta, batch_size=input_color_data.size(0))
if self.use_cuda:
flow_grid_after = F.affine_grid(Variable(affine_mat_after, requires_grad=False).cuda(), interm_push_feat.data.size())
else:
flow_grid_after = F.affine_grid(Variable(affine_mat_after, requires_grad=False), interm_push_feat.data.size())
part_output_prob = [nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear', align_corners=self.align_corners).forward(F.grid_sample(self.pushnet(interm_push_feat), flow_grid_after, mode='nearest', align_corners=self.align_corners)),
nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear', align_corners=self.align_corners).forward(F.grid_sample(self.graspnet(interm_grasp_feat), flow_grid_after, mode='nearest', align_corners=self.align_corners))]
if self.place:
part_output_prob += [nn.Upsample(scale_factor=self.upsample_scale, mode='bilinear', align_corners=self.align_corners).forward(F.grid_sample(self.placenet(interm_place_feat), flow_grid_after, mode='nearest', align_corners=self.align_corners))]
# Forward pass through branches, undo rotation on output predictions, upsample results
output_prob.append(part_output_prob)
return output_prob, interm_feat
| [
"torch.cat",
"numpy.sin",
"numpy.tile",
"torchvision.models.densenet.densenet121",
"torch.no_grad",
"torch.nn.init.kaiming_normal_",
"torch.nn.functional.grid_sample",
"torch.nn.Upsample",
"torch.nn.Linear",
"numpy.radians",
"efficientnet_pytorch.EfficientNet.from_pretrained",
"torch.autograd.... | [((4703, 4752), 'numpy.tile', 'np.tile', (['affine_mat_after[np.newaxis]', 'batch_size'], {}), '(affine_mat_after[np.newaxis], batch_size)\n', (4710, 4752), True, 'import numpy as np\n'), ((6176, 6238), 'torchvision.models.densenet.densenet121', 'torchvision.models.densenet.densenet121', ([], {'pretrained': 'pretrained'}), '(pretrained=pretrained)\n', (6215, 6238), False, 'import torchvision\n'), ((6275, 6337), 'torchvision.models.densenet.densenet121', 'torchvision.models.densenet.densenet121', ([], {'pretrained': 'pretrained'}), '(pretrained=pretrained)\n', (6314, 6337), False, 'import torchvision\n'), ((6375, 6437), 'torchvision.models.densenet.densenet121', 'torchvision.models.densenet.densenet121', ([], {'pretrained': 'pretrained'}), '(pretrained=pretrained)\n', (6414, 6437), False, 'import torchvision\n'), ((6475, 6537), 'torchvision.models.densenet.densenet121', 'torchvision.models.densenet.densenet121', ([], {'pretrained': 'pretrained'}), '(pretrained=pretrained)\n', (6514, 6537), False, 'import torchvision\n'), ((18747, 18798), 'numpy.radians', 'np.radians', (['(rotate_idx * (360 / self.num_rotations))'], {}), '(rotate_idx * (360 / self.num_rotations))\n', (18757, 18798), True, 'import numpy as np\n'), ((24556, 24622), 'torch.cat', 'torch.cat', (['(interm_push_color_feat, interm_push_depth_feat)'], {'dim': '(1)'}), '((interm_push_color_feat, interm_push_depth_feat), dim=1)\n', (24565, 24622), False, 'import torch\n'), ((24655, 24723), 'torch.cat', 'torch.cat', (['(interm_grasp_color_feat, interm_grasp_depth_feat)'], {'dim': '(1)'}), '((interm_grasp_color_feat, interm_grasp_depth_feat), dim=1)\n', (24664, 24723), False, 'import torch\n'), ((28732, 28794), 'torchvision.models.densenet.densenet121', 'torchvision.models.densenet.densenet121', ([], {'pretrained': 'pretrained'}), '(pretrained=pretrained)\n', (28771, 28794), False, 'import torchvision\n'), ((28831, 28893), 'torchvision.models.densenet.densenet121', 'torchvision.models.densenet.densenet121', ([], {'pretrained': 'pretrained'}), '(pretrained=pretrained)\n', (28870, 28893), False, 'import torchvision\n'), ((28931, 28993), 'torchvision.models.densenet.densenet121', 'torchvision.models.densenet.densenet121', ([], {'pretrained': 'pretrained'}), '(pretrained=pretrained)\n', (28970, 28993), False, 'import torchvision\n'), ((29031, 29093), 'torchvision.models.densenet.densenet121', 'torchvision.models.densenet.densenet121', ([], {'pretrained': 'pretrained'}), '(pretrained=pretrained)\n', (29070, 29093), False, 'import torchvision\n'), ((36826, 36877), 'numpy.radians', 'np.radians', (['(rotate_idx * (360 / self.num_rotations))'], {}), '(rotate_idx * (360 / self.num_rotations))\n', (36836, 36877), True, 'import numpy as np\n'), ((38196, 38262), 'torch.cat', 'torch.cat', (['(interm_push_color_feat, interm_push_depth_feat)'], {'dim': '(1)'}), '((interm_push_color_feat, interm_push_depth_feat), dim=1)\n', (38205, 38262), False, 'import torch\n'), ((38463, 38531), 'torch.cat', 'torch.cat', (['(interm_grasp_color_feat, interm_grasp_depth_feat)'], {'dim': '(1)'}), '((interm_grasp_color_feat, interm_grasp_depth_feat), dim=1)\n', (38472, 38531), False, 'import torch\n'), ((4319, 4360), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m[1].weight.data'], {}), '(m[1].weight.data)\n', (4342, 4360), True, 'import torch.nn as nn\n'), ((4582, 4602), 'numpy.cos', 'np.cos', (['rotate_theta'], {}), '(rotate_theta)\n', (4588, 4602), True, 'import numpy as np\n'), ((4604, 4624), 'numpy.sin', 'np.sin', (['rotate_theta'], {}), '(rotate_theta)\n', (4610, 4624), True, 'import numpy as np\n'), ((4653, 4673), 'numpy.cos', 'np.cos', (['rotate_theta'], {}), '(rotate_theta)\n', (4659, 4673), True, 'import numpy as np\n'), ((6651, 6713), 'torchvision.models.densenet.densenet121', 'torchvision.models.densenet.densenet121', ([], {'pretrained': 'pretrained'}), '(pretrained=pretrained)\n', (6690, 6713), False, 'import torchvision\n'), ((6755, 6817), 'torchvision.models.densenet.densenet121', 'torchvision.models.densenet.densenet121', ([], {'pretrained': 'pretrained'}), '(pretrained=pretrained)\n', (6794, 6817), False, 'import torchvision\n'), ((10243, 10258), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10256, 10258), False, 'import torch\n'), ((22237, 22293), 'torch.autograd.Variable', 'Variable', (['affine_mat_before'], {'requires_grad': 'requires_grad'}), '(affine_mat_before, requires_grad=requires_grad)\n', (22245, 22293), False, 'from torch.autograd import Variable\n'), ((22758, 22784), 'torch.autograd.Variable', 'Variable', (['input_color_data'], {}), '(input_color_data)\n', (22766, 22784), False, 'from torch.autograd import Variable\n'), ((22895, 22921), 'torch.autograd.Variable', 'Variable', (['input_depth_data'], {}), '(input_depth_data)\n', (22903, 22921), False, 'from torch.autograd import Variable\n'), ((24787, 24855), 'torch.cat', 'torch.cat', (['(interm_place_color_feat, interm_place_depth_feat)'], {'dim': '(1)'}), '((interm_place_color_feat, interm_place_depth_feat), dim=1)\n', (24796, 24855), False, 'import torch\n'), ((25222, 25307), 'torch.cat', 'torch.cat', (['(interm_push_color_feat, interm_push_depth_feat, push_goal_vec)'], {'dim': '(1)'}), '((interm_push_color_feat, interm_push_depth_feat, push_goal_vec),\n dim=1)\n', (25231, 25307), False, 'import torch\n'), ((25340, 25429), 'torch.cat', 'torch.cat', (['(interm_grasp_color_feat, interm_grasp_depth_feat, grasp_goal_vec)'], {'dim': '(1)'}), '((interm_grasp_color_feat, interm_grasp_depth_feat, grasp_goal_vec\n ), dim=1)\n', (25349, 25429), False, 'import torch\n'), ((26168, 26260), 'torch.cat', 'torch.cat', (['(interm_push_color_feat, interm_push_depth_feat, tiled_goal_condition)'], {'dim': '(1)'}), '((interm_push_color_feat, interm_push_depth_feat,\n tiled_goal_condition), dim=1)\n', (26177, 26260), False, 'import torch\n'), ((26293, 26387), 'torch.cat', 'torch.cat', (['(interm_grasp_color_feat, interm_grasp_depth_feat, tiled_goal_condition)'], {'dim': '(1)'}), '((interm_grasp_color_feat, interm_grasp_depth_feat,\n tiled_goal_condition), dim=1)\n', (26302, 26387), False, 'import torch\n'), ((29207, 29269), 'torchvision.models.densenet.densenet121', 'torchvision.models.densenet.densenet121', ([], {'pretrained': 'pretrained'}), '(pretrained=pretrained)\n', (29246, 29269), False, 'import torchvision\n'), ((29311, 29373), 'torchvision.models.densenet.densenet121', 'torchvision.models.densenet.densenet121', ([], {'pretrained': 'pretrained'}), '(pretrained=pretrained)\n', (29350, 29373), False, 'import torchvision\n'), ((32297, 32312), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (32310, 32312), False, 'import torch\n'), ((38840, 38908), 'torch.cat', 'torch.cat', (['(interm_place_color_feat, interm_place_depth_feat)'], {'dim': '(1)'}), '((interm_place_color_feat, interm_place_depth_feat), dim=1)\n', (38849, 38908), False, 'import torch\n'), ((2462, 2486), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['first_fc'], {}), '(first_fc)\n', (2476, 2486), True, 'import torch.nn as nn\n'), ((2519, 2540), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2526, 2540), True, 'import torch.nn as nn\n'), ((2573, 2640), 'torch.nn.Conv2d', 'nn.Conv2d', (['first_fc', 'second_fc'], {'kernel_size': '(1)', 'stride': '(1)', 'bias': '(False)'}), '(first_fc, second_fc, kernel_size=1, stride=1, bias=False)\n', (2582, 2640), True, 'import torch.nn as nn\n'), ((2673, 2698), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['second_fc'], {}), '(second_fc)\n', (2687, 2698), True, 'import torch.nn as nn\n'), ((2731, 2752), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2738, 2752), True, 'import torch.nn as nn\n'), ((2785, 2856), 'torch.nn.Conv2d', 'nn.Conv2d', (['second_fc', 'channels_out'], {'kernel_size': '(1)', 'stride': '(1)', 'bias': '(False)'}), '(second_fc, channels_out, kernel_size=1, stride=1, bias=False)\n', (2794, 2856), True, 'import torch.nn as nn\n'), ((3110, 3157), 'torch.nn.Linear', 'nn.Linear', (['channels_in', 'fc_channels'], {'bias': '(False)'}), '(channels_in, fc_channels, bias=False)\n', (3119, 3157), True, 'import torch.nn as nn\n'), ((3202, 3223), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3209, 3223), True, 'import torch.nn as nn\n'), ((3435, 3483), 'torch.nn.Linear', 'nn.Linear', (['fc_channels', 'channels_out'], {'bias': '(False)'}), '(fc_channels, channels_out, bias=False)\n', (3444, 3483), True, 'import torch.nn as nn\n'), ((3528, 3549), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3535, 3549), True, 'import torch.nn as nn\n'), ((4631, 4651), 'numpy.sin', 'np.sin', (['rotate_theta'], {}), '(rotate_theta)\n', (4637, 4651), True, 'import numpy as np\n'), ((4822, 4856), 'torch.from_numpy', 'torch.from_numpy', (['affine_mat_after'], {}), '(affine_mat_after)\n', (4838, 4856), False, 'import torch\n'), ((7099, 7146), 'efficientnet_pytorch.EfficientNet.from_pretrained', 'EfficientNet.from_pretrained', (['"""efficientnet-b0"""'], {}), "('efficientnet-b0')\n", (7127, 7146), False, 'from efficientnet_pytorch import EfficientNet\n'), ((7185, 7232), 'efficientnet_pytorch.EfficientNet.from_pretrained', 'EfficientNet.from_pretrained', (['"""efficientnet-b0"""'], {}), "('efficientnet-b0')\n", (7213, 7232), False, 'from efficientnet_pytorch import EfficientNet\n'), ((7294, 7335), 'efficientnet_pytorch.EfficientNet.from_name', 'EfficientNet.from_name', (['"""efficientnet-b0"""'], {}), "('efficientnet-b0')\n", (7316, 7335), False, 'from efficientnet_pytorch import EfficientNet\n'), ((7374, 7415), 'efficientnet_pytorch.EfficientNet.from_name', 'EfficientNet.from_name', (['"""efficientnet-b0"""'], {}), "('efficientnet-b0')\n", (7396, 7415), False, 'from efficientnet_pytorch import EfficientNet\n'), ((11165, 11216), 'numpy.radians', 'np.radians', (['(rotate_idx * (360 / self.num_rotations))'], {}), '(rotate_idx * (360 / self.num_rotations))\n', (11175, 11216), True, 'import numpy as np\n'), ((19698, 19745), 'torch.autograd.Variable', 'Variable', (['affine_mat_after'], {'requires_grad': '(False)'}), '(affine_mat_after, requires_grad=False)\n', (19706, 19745), False, 'from torch.autograd import Variable\n'), ((25640, 25729), 'torch.cat', 'torch.cat', (['(interm_place_color_feat, interm_place_depth_feat, place_goal_vec)'], {'dim': '(1)'}), '((interm_place_color_feat, interm_place_depth_feat, place_goal_vec\n ), dim=1)\n', (25649, 25729), False, 'import torch\n'), ((26455, 26549), 'torch.cat', 'torch.cat', (['(interm_place_color_feat, interm_place_depth_feat, tiled_goal_condition)'], {'dim': '(1)'}), '((interm_place_color_feat, interm_place_depth_feat,\n tiled_goal_condition), dim=1)\n', (26464, 26549), False, 'import torch\n'), ((29655, 29702), 'efficientnet_pytorch.EfficientNet.from_pretrained', 'EfficientNet.from_pretrained', (['"""efficientnet-b0"""'], {}), "('efficientnet-b0')\n", (29683, 29702), False, 'from efficientnet_pytorch import EfficientNet\n'), ((29741, 29788), 'efficientnet_pytorch.EfficientNet.from_pretrained', 'EfficientNet.from_pretrained', (['"""efficientnet-b0"""'], {}), "('efficientnet-b0')\n", (29769, 29788), False, 'from efficientnet_pytorch import EfficientNet\n'), ((29850, 29891), 'efficientnet_pytorch.EfficientNet.from_name', 'EfficientNet.from_name', (['"""efficientnet-b0"""'], {}), "('efficientnet-b0')\n", (29872, 29891), False, 'from efficientnet_pytorch import EfficientNet\n'), ((29930, 29971), 'efficientnet_pytorch.EfficientNet.from_name', 'EfficientNet.from_name', (['"""efficientnet-b0"""'], {}), "('efficientnet-b0')\n", (29952, 29971), False, 'from efficientnet_pytorch import EfficientNet\n'), ((32521, 32572), 'numpy.radians', 'np.radians', (['(rotate_idx * (360 / self.num_rotations))'], {}), '(rotate_idx * (360 / self.num_rotations))\n', (32531, 32572), True, 'import numpy as np\n'), ((34009, 34075), 'torch.cat', 'torch.cat', (['(interm_push_color_feat, interm_push_depth_feat)'], {'dim': '(1)'}), '((interm_push_color_feat, interm_push_depth_feat), dim=1)\n', (34018, 34075), False, 'import torch\n'), ((34300, 34368), 'torch.cat', 'torch.cat', (['(interm_grasp_color_feat, interm_grasp_depth_feat)'], {'dim': '(1)'}), '((interm_grasp_color_feat, interm_grasp_depth_feat), dim=1)\n', (34309, 34368), False, 'import torch\n'), ((37268, 37316), 'torch.autograd.Variable', 'Variable', (['affine_mat_before'], {'requires_grad': '(False)'}), '(affine_mat_before, requires_grad=False)\n', (37276, 37316), False, 'from torch.autograd import Variable\n'), ((37745, 37792), 'torch.autograd.Variable', 'Variable', (['input_color_data'], {'requires_grad': '(False)'}), '(input_color_data, requires_grad=False)\n', (37753, 37792), False, 'from torch.autograd import Variable\n'), ((37873, 37920), 'torch.autograd.Variable', 'Variable', (['input_depth_data'], {'requires_grad': '(False)'}), '(input_depth_data, requires_grad=False)\n', (37881, 37920), False, 'from torch.autograd import Variable\n'), ((39407, 39454), 'torch.autograd.Variable', 'Variable', (['affine_mat_after'], {'requires_grad': '(False)'}), '(affine_mat_after, requires_grad=False)\n', (39415, 39454), False, 'from torch.autograd import Variable\n'), ((7615, 7689), 'efficientnet_pytorch.EfficientNet.from_pretrained', 'EfficientNet.from_pretrained', (['"""efficientnet-b0"""'], {'num_dilation': 'num_dilation'}), "('efficientnet-b0', num_dilation=num_dilation)\n", (7643, 7689), False, 'from efficientnet_pytorch import EfficientNet\n'), ((7732, 7806), 'efficientnet_pytorch.EfficientNet.from_pretrained', 'EfficientNet.from_pretrained', (['"""efficientnet-b0"""'], {'num_dilation': 'num_dilation'}), "('efficientnet-b0', num_dilation=num_dilation)\n", (7760, 7806), False, 'from efficientnet_pytorch import EfficientNet\n'), ((7876, 7944), 'efficientnet_pytorch.EfficientNet.from_name', 'EfficientNet.from_name', (['"""efficientnet-b0"""'], {'num_dilation': 'num_dilation'}), "('efficientnet-b0', num_dilation=num_dilation)\n", (7898, 7944), False, 'from efficientnet_pytorch import EfficientNet\n'), ((7987, 8055), 'efficientnet_pytorch.EfficientNet.from_name', 'EfficientNet.from_name', (['"""efficientnet-b0"""'], {'num_dilation': 'num_dilation'}), "('efficientnet-b0', num_dilation=num_dilation)\n", (8009, 8055), False, 'from efficientnet_pytorch import EfficientNet\n'), ((10036, 10064), 'torch.tensor', 'torch.tensor', (['goal_condition'], {}), '(goal_condition)\n', (10048, 10064), False, 'import torch\n'), ((22054, 22110), 'torch.autograd.Variable', 'Variable', (['affine_mat_before'], {'requires_grad': 'requires_grad'}), '(affine_mat_before, requires_grad=requires_grad)\n', (22062, 22110), False, 'from torch.autograd import Variable\n'), ((22456, 22482), 'torch.autograd.Variable', 'Variable', (['input_color_data'], {}), '(input_color_data)\n', (22464, 22482), False, 'from torch.autograd import Variable\n'), ((22600, 22626), 'torch.autograd.Variable', 'Variable', (['input_depth_data'], {}), '(input_depth_data)\n', (22608, 22626), False, 'from torch.autograd import Variable\n'), ((30171, 30245), 'efficientnet_pytorch.EfficientNet.from_pretrained', 'EfficientNet.from_pretrained', (['"""efficientnet-b0"""'], {'num_dilation': 'num_dilation'}), "('efficientnet-b0', num_dilation=num_dilation)\n", (30199, 30245), False, 'from efficientnet_pytorch import EfficientNet\n'), ((30288, 30362), 'efficientnet_pytorch.EfficientNet.from_pretrained', 'EfficientNet.from_pretrained', (['"""efficientnet-b0"""'], {'num_dilation': 'num_dilation'}), "('efficientnet-b0', num_dilation=num_dilation)\n", (30316, 30362), False, 'from efficientnet_pytorch import EfficientNet\n'), ((30432, 30500), 'efficientnet_pytorch.EfficientNet.from_name', 'EfficientNet.from_name', (['"""efficientnet-b0"""'], {'num_dilation': 'num_dilation'}), "('efficientnet-b0', num_dilation=num_dilation)\n", (30454, 30500), False, 'from efficientnet_pytorch import EfficientNet\n'), ((30543, 30611), 'efficientnet_pytorch.EfficientNet.from_name', 'EfficientNet.from_name', (['"""efficientnet-b0"""'], {'num_dilation': 'num_dilation'}), "('efficientnet-b0', num_dilation=num_dilation)\n", (30565, 30611), False, 'from efficientnet_pytorch import EfficientNet\n'), ((34717, 34785), 'torch.cat', 'torch.cat', (['(interm_place_color_feat, interm_place_depth_feat)'], {'dim': '(1)'}), '((interm_place_color_feat, interm_place_depth_feat), dim=1)\n', (34726, 34785), False, 'import torch\n'), ((39519, 39619), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (39530, 39619), True, 'import torch.nn as nn\n'), ((39771, 39871), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (39782, 39871), True, 'import torch.nn as nn\n'), ((8486, 8533), 'efficientnet_pytorch.EfficientNet.from_pretrained', 'EfficientNet.from_pretrained', (['"""efficientnet-b0"""'], {}), "('efficientnet-b0')\n", (8514, 8533), False, 'from efficientnet_pytorch import EfficientNet\n'), ((8576, 8623), 'efficientnet_pytorch.EfficientNet.from_pretrained', 'EfficientNet.from_pretrained', (['"""efficientnet-b0"""'], {}), "('efficientnet-b0')\n", (8604, 8623), False, 'from efficientnet_pytorch import EfficientNet\n'), ((8693, 8734), 'efficientnet_pytorch.EfficientNet.from_name', 'EfficientNet.from_name', (['"""efficientnet-b0"""'], {}), "('efficientnet-b0')\n", (8715, 8734), False, 'from efficientnet_pytorch import EfficientNet\n'), ((8777, 8818), 'efficientnet_pytorch.EfficientNet.from_name', 'EfficientNet.from_name', (['"""efficientnet-b0"""'], {}), "('efficientnet-b0')\n", (8799, 8818), False, 'from efficientnet_pytorch import EfficientNet\n'), ((12208, 12255), 'torch.autograd.Variable', 'Variable', (['affine_mat_after'], {'requires_grad': '(False)'}), '(affine_mat_after, requires_grad=False)\n', (12216, 12255), False, 'from torch.autograd import Variable\n'), ((19512, 19559), 'torch.autograd.Variable', 'Variable', (['affine_mat_after'], {'requires_grad': '(False)'}), '(affine_mat_after, requires_grad=False)\n', (19520, 19559), False, 'from torch.autograd import Variable\n'), ((31044, 31091), 'efficientnet_pytorch.EfficientNet.from_pretrained', 'EfficientNet.from_pretrained', (['"""efficientnet-b0"""'], {}), "('efficientnet-b0')\n", (31072, 31091), False, 'from efficientnet_pytorch import EfficientNet\n'), ((31134, 31181), 'efficientnet_pytorch.EfficientNet.from_pretrained', 'EfficientNet.from_pretrained', (['"""efficientnet-b0"""'], {}), "('efficientnet-b0')\n", (31162, 31181), False, 'from efficientnet_pytorch import EfficientNet\n'), ((31251, 31292), 'efficientnet_pytorch.EfficientNet.from_name', 'EfficientNet.from_name', (['"""efficientnet-b0"""'], {}), "('efficientnet-b0')\n", (31273, 31292), False, 'from efficientnet_pytorch import EfficientNet\n'), ((31335, 31376), 'efficientnet_pytorch.EfficientNet.from_name', 'EfficientNet.from_name', (['"""efficientnet-b0"""'], {}), "('efficientnet-b0')\n", (31357, 31376), False, 'from efficientnet_pytorch import EfficientNet\n'), ((33017, 33065), 'torch.autograd.Variable', 'Variable', (['affine_mat_before'], {'requires_grad': '(False)'}), '(affine_mat_before, requires_grad=False)\n', (33025, 33065), False, 'from torch.autograd import Variable\n'), ((33530, 33571), 'torch.autograd.Variable', 'Variable', (['input_color_data'], {'volatile': '(True)'}), '(input_color_data, volatile=True)\n', (33538, 33571), False, 'from torch.autograd import Variable\n'), ((33660, 33701), 'torch.autograd.Variable', 'Variable', (['input_depth_data'], {'volatile': '(True)'}), '(input_depth_data, volatile=True)\n', (33668, 33701), False, 'from torch.autograd import Variable\n'), ((35348, 35395), 'torch.autograd.Variable', 'Variable', (['affine_mat_after'], {'requires_grad': '(False)'}), '(affine_mat_after, requires_grad=False)\n', (35356, 35395), False, 'from torch.autograd import Variable\n'), ((37119, 37167), 'torch.autograd.Variable', 'Variable', (['affine_mat_before'], {'requires_grad': '(False)'}), '(affine_mat_before, requires_grad=False)\n', (37127, 37167), False, 'from torch.autograd import Variable\n'), ((37457, 37504), 'torch.autograd.Variable', 'Variable', (['input_color_data'], {'requires_grad': '(False)'}), '(input_color_data, requires_grad=False)\n', (37465, 37504), False, 'from torch.autograd import Variable\n'), ((37592, 37639), 'torch.autograd.Variable', 'Variable', (['input_depth_data'], {'requires_grad': '(False)'}), '(input_depth_data, requires_grad=False)\n', (37600, 37639), False, 'from torch.autograd import Variable\n'), ((39255, 39302), 'torch.autograd.Variable', 'Variable', (['affine_mat_after'], {'requires_grad': '(False)'}), '(affine_mat_after, requires_grad=False)\n', (39263, 39302), False, 'from torch.autograd import Variable\n'), ((40057, 40157), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (40068, 40157), True, 'import torch.nn as nn\n'), ((9941, 9969), 'torch.tensor', 'torch.tensor', (['goal_condition'], {}), '(goal_condition)\n', (9953, 9969), False, 'import torch\n'), ((20079, 20179), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (20090, 20179), True, 'import torch.nn as nn\n'), ((20336, 20436), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (20347, 20436), True, 'import torch.nn as nn\n'), ((20595, 20695), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (20606, 20695), True, 'import torch.nn as nn\n'), ((20872, 20972), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (20883, 20972), True, 'import torch.nn as nn\n'), ((21129, 21229), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (21140, 21229), True, 'import torch.nn as nn\n'), ((35575, 35675), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (35586, 35675), True, 'import torch.nn as nn\n'), ((35835, 35935), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (35846, 35935), True, 'import torch.nn as nn\n'), ((12006, 12053), 'torch.autograd.Variable', 'Variable', (['affine_mat_after'], {'requires_grad': '(False)'}), '(affine_mat_after, requires_grad=False)\n', (12014, 12053), False, 'from torch.autograd import Variable\n'), ((32852, 32900), 'torch.autograd.Variable', 'Variable', (['affine_mat_before'], {'requires_grad': '(False)'}), '(affine_mat_before, requires_grad=False)\n', (32860, 32900), False, 'from torch.autograd import Variable\n'), ((33230, 33271), 'torch.autograd.Variable', 'Variable', (['input_color_data'], {'volatile': '(True)'}), '(input_color_data, volatile=True)\n', (33238, 33271), False, 'from torch.autograd import Variable\n'), ((33367, 33408), 'torch.autograd.Variable', 'Variable', (['input_depth_data'], {'volatile': '(True)'}), '(input_depth_data, volatile=True)\n', (33375, 33408), False, 'from torch.autograd import Variable\n'), ((35180, 35227), 'torch.autograd.Variable', 'Variable', (['affine_mat_after'], {'requires_grad': '(False)'}), '(affine_mat_after, requires_grad=False)\n', (35188, 35227), False, 'from torch.autograd import Variable\n'), ((36137, 36237), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (36148, 36237), True, 'import torch.nn as nn\n'), ((13014, 13116), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['push_action_feat', 'flow_grid_after'], {'mode': '"""nearest"""', 'align_corners': 'self.align_corners'}), "(push_action_feat, flow_grid_after, mode='nearest',\n align_corners=self.align_corners)\n", (13027, 13116), True, 'import torch.nn.functional as F\n'), ((13316, 13419), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['grasp_action_feat', 'flow_grid_after'], {'mode': '"""nearest"""', 'align_corners': 'self.align_corners'}), "(grasp_action_feat, flow_grid_after, mode='nearest',\n align_corners=self.align_corners)\n", (13329, 13419), True, 'import torch.nn.functional as F\n'), ((13619, 13722), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['place_action_feat', 'flow_grid_after'], {'mode': '"""nearest"""', 'align_corners': 'self.align_corners'}), "(place_action_feat, flow_grid_after, mode='nearest',\n align_corners=self.align_corners)\n", (13632, 13722), True, 'import torch.nn.functional as F\n'), ((15097, 15199), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['push_action_feat', 'flow_grid_after'], {'mode': '"""nearest"""', 'align_corners': 'self.align_corners'}), "(push_action_feat, flow_grid_after, mode='nearest',\n align_corners=self.align_corners)\n", (15110, 15199), True, 'import torch.nn.functional as F\n'), ((15399, 15502), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['grasp_action_feat', 'flow_grid_after'], {'mode': '"""nearest"""', 'align_corners': 'self.align_corners'}), "(grasp_action_feat, flow_grid_after, mode='nearest',\n align_corners=self.align_corners)\n", (15412, 15502), True, 'import torch.nn.functional as F\n'), ((17000, 17102), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['push_action_feat', 'flow_grid_after'], {'mode': '"""nearest"""', 'align_corners': 'self.align_corners'}), "(push_action_feat, flow_grid_after, mode='nearest',\n align_corners=self.align_corners)\n", (17013, 17102), True, 'import torch.nn.functional as F\n'), ((17302, 17405), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['grasp_action_feat', 'flow_grid_after'], {'mode': '"""nearest"""', 'align_corners': 'self.align_corners'}), "(grasp_action_feat, flow_grid_after, mode='nearest',\n align_corners=self.align_corners)\n", (17315, 17405), True, 'import torch.nn.functional as F\n'), ((17605, 17708), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['place_action_feat', 'flow_grid_after'], {'mode': '"""nearest"""', 'align_corners': 'self.align_corners'}), "(place_action_feat, flow_grid_after, mode='nearest',\n align_corners=self.align_corners)\n", (17618, 17708), True, 'import torch.nn.functional as F\n'), ((17955, 18057), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['push_action_feat', 'flow_grid_after'], {'mode': '"""nearest"""', 'align_corners': 'self.align_corners'}), "(push_action_feat, flow_grid_after, mode='nearest',\n align_corners=self.align_corners)\n", (17968, 18057), True, 'import torch.nn.functional as F\n'), ((18257, 18360), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['grasp_action_feat', 'flow_grid_after'], {'mode': '"""nearest"""', 'align_corners': 'self.align_corners'}), "(grasp_action_feat, flow_grid_after, mode='nearest',\n align_corners=self.align_corners)\n", (18270, 18360), True, 'import torch.nn.functional as F\n'), ((12877, 12977), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (12888, 12977), True, 'import torch.nn as nn\n'), ((13179, 13279), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (13190, 13279), True, 'import torch.nn as nn\n'), ((13482, 13582), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (13493, 13582), True, 'import torch.nn as nn\n'), ((13871, 13971), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (13882, 13971), True, 'import torch.nn as nn\n'), ((14191, 14291), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (14202, 14291), True, 'import torch.nn as nn\n'), ((14513, 14613), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (14524, 14613), True, 'import torch.nn as nn\n'), ((14960, 15060), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (14971, 15060), True, 'import torch.nn as nn\n'), ((15262, 15362), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (15273, 15362), True, 'import torch.nn as nn\n'), ((15634, 15734), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (15645, 15734), True, 'import torch.nn as nn\n'), ((15954, 16054), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (15965, 16054), True, 'import torch.nn as nn\n'), ((16863, 16963), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (16874, 16963), True, 'import torch.nn as nn\n'), ((17165, 17265), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (17176, 17265), True, 'import torch.nn as nn\n'), ((17468, 17568), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (17479, 17568), True, 'import torch.nn as nn\n'), ((17818, 17918), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (17829, 17918), True, 'import torch.nn as nn\n'), ((18120, 18220), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'self.upsample_scale', 'mode': '"""bilinear"""', 'align_corners': 'self.align_corners'}), "(scale_factor=self.upsample_scale, mode='bilinear',\n align_corners=self.align_corners)\n", (18131, 18220), True, 'import torch.nn as nn\n')] |
"""
Scatter Plot with LOESS Lines
-----------------------------
This example shows how to add a trend line to a scatter plot using
the LOESS transform (LOcally Estimated Scatterplot Smoothing).
"""
# category: scatter plots
import altair as alt
import numpy as np
np.random.seed(1)
source = alt.pd.DataFrame(
{
"x": np.arange(100),
"A": np.random.randn(100).cumsum(),
"B": np.random.randn(100).cumsum(),
"C": np.random.randn(100).cumsum(),
}
)
base = (
alt.Chart(source)
.mark_circle(opacity=0.5)
.transform_fold(fold=["A", "B", "C"], as_=["category", "y"])
.encode(alt.X("x:Q"), alt.Y("y:Q"), alt.Color("category:N"))
)
base + base.transform_loess("x", "y", groupby=["category"]).mark_line(size=4)
| [
"numpy.random.seed",
"altair.Y",
"numpy.random.randn",
"altair.Chart",
"altair.X",
"numpy.arange",
"altair.Color"
] | [((266, 283), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (280, 283), True, 'import numpy as np\n'), ((626, 638), 'altair.X', 'alt.X', (['"""x:Q"""'], {}), "('x:Q')\n", (631, 638), True, 'import altair as alt\n'), ((640, 652), 'altair.Y', 'alt.Y', (['"""y:Q"""'], {}), "('y:Q')\n", (645, 652), True, 'import altair as alt\n'), ((654, 677), 'altair.Color', 'alt.Color', (['"""category:N"""'], {}), "('category:N')\n", (663, 677), True, 'import altair as alt\n'), ((331, 345), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (340, 345), True, 'import numpy as np\n'), ((360, 380), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (375, 380), True, 'import numpy as np\n'), ((404, 424), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (419, 424), True, 'import numpy as np\n'), ((448, 468), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (463, 468), True, 'import numpy as np\n'), ((501, 518), 'altair.Chart', 'alt.Chart', (['source'], {}), '(source)\n', (510, 518), True, 'import altair as alt\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pandas_datareader as web
import datetime as dt
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Dropout,LSTM
company = 'FB'
start = dt.datetime(2010,1,1)
end=dt.datetime(2021,1,1)
data=web.DataReader(company, 'yahoo',start,end)
scaler= MinMaxScaler(feature_range=(0,1))
scaled_data= scaler.fit_transform(data['Close'].values.reshape(-1,1))
prediction_days= 120
x_train=[]
y_train=[]
for x in range(prediction_days, len(scaled_data)):
x_train.append(scaled_data[x-prediction_days:x, 0])
y_train.append(scaled_data[x, 0])
x_train,y_train= np.array(x_train),np.array(y_train)
x_train=np.reshape(x_train, (x_train.shape[0],x_train.shape[1],1))
model= Sequential()
model.add(LSTM(units=50,return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(Dropout(0.2))
model.add(LSTM(units=50,return_sequences=True ))
model.add(Dropout(0.2))
model.add(LSTM(units=50))
model.add(Dropout(0.2))
model.add(Dense(units=1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(x_train,y_train,epochs=50,batch_size=32)
test_start=dt.datetime(2020,1,1)
test_end=dt.datetime.now()
test_data=web.DataReader(company,'yahoo',test_start,test_end)
actual_prices=test_data['Close'].values
total_dataset=pd.concat((data['Close'], test_data['Close']), axis=0)
model_inputs=total_dataset[len(total_dataset)-len(test_data)-prediction_days:].values
model_inputs=model_inputs.reshape(-1,1)
model_inputs=scaler.transform(model_inputs)
x_test=[]
for x in range(prediction_days,len(model_inputs)):
x_test.append(model_inputs[x-prediction_days:x,0])
x_test=np.array(x_test)
x_test=np.reshape(x_test,(x_test.shape[0], x_test.shape[1],1))
predicted_prices=model.predict(x_test)
predicted_prices=scaler.inverse_transform(predicted_prices)
plt.plot(actual_prices,color="black",label=f"Actual {company} Price")
plt.plot(predicted_prices,color='green',label=f"Predicted {company} Price")
plt.title(f"{company} Share Price")
plt.xlabel('Time')
plt.ylabel(f"{company} Share price")
plt.legend()
plt.show()
real_data = [model_inputs[len(model_inputs)+1-prediction_days:len(model_inputs+1),0]]
real_data=np.array(real_data)
real_data=np.reshape(real_data, (real_data.shape[0],real_data.shape[1],1))
prediction = model.predict(real_data)
prediction = scaler.inverse_transform(prediction)
print(f"Prediction: {prediction}") | [
"pandas_datareader.DataReader",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.legend",
"sklearn.preprocessing.MinMaxScaler",
"datetime.datetime",
"numpy.array",
"tensorflow.ker... | [((312, 335), 'datetime.datetime', 'dt.datetime', (['(2010)', '(1)', '(1)'], {}), '(2010, 1, 1)\n', (323, 335), True, 'import datetime as dt\n'), ((339, 362), 'datetime.datetime', 'dt.datetime', (['(2021)', '(1)', '(1)'], {}), '(2021, 1, 1)\n', (350, 362), True, 'import datetime as dt\n'), ((369, 413), 'pandas_datareader.DataReader', 'web.DataReader', (['company', '"""yahoo"""', 'start', 'end'], {}), "(company, 'yahoo', start, end)\n", (383, 413), True, 'import pandas_datareader as web\n'), ((423, 457), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (435, 457), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((789, 849), 'numpy.reshape', 'np.reshape', (['x_train', '(x_train.shape[0], x_train.shape[1], 1)'], {}), '(x_train, (x_train.shape[0], x_train.shape[1], 1))\n', (799, 849), True, 'import numpy as np\n'), ((856, 868), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (866, 868), False, 'from tensorflow.keras.models import Sequential\n'), ((1260, 1283), 'datetime.datetime', 'dt.datetime', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (1271, 1283), True, 'import datetime as dt\n'), ((1292, 1309), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1307, 1309), True, 'import datetime as dt\n'), ((1321, 1375), 'pandas_datareader.DataReader', 'web.DataReader', (['company', '"""yahoo"""', 'test_start', 'test_end'], {}), "(company, 'yahoo', test_start, test_end)\n", (1335, 1375), True, 'import pandas_datareader as web\n'), ((1431, 1485), 'pandas.concat', 'pd.concat', (["(data['Close'], test_data['Close'])"], {'axis': '(0)'}), "((data['Close'], test_data['Close']), axis=0)\n", (1440, 1485), True, 'import pandas as pd\n'), ((1790, 1806), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (1798, 1806), True, 'import numpy as np\n'), ((1815, 1872), 'numpy.reshape', 'np.reshape', (['x_test', '(x_test.shape[0], x_test.shape[1], 1)'], {}), '(x_test, (x_test.shape[0], x_test.shape[1], 1))\n', (1825, 1872), True, 'import numpy as np\n'), ((1975, 2046), 'matplotlib.pyplot.plot', 'plt.plot', (['actual_prices'], {'color': '"""black"""', 'label': 'f"""Actual {company} Price"""'}), "(actual_prices, color='black', label=f'Actual {company} Price')\n", (1983, 2046), True, 'import matplotlib.pyplot as plt\n'), ((2046, 2123), 'matplotlib.pyplot.plot', 'plt.plot', (['predicted_prices'], {'color': '"""green"""', 'label': 'f"""Predicted {company} Price"""'}), "(predicted_prices, color='green', label=f'Predicted {company} Price')\n", (2054, 2123), True, 'import matplotlib.pyplot as plt\n'), ((2123, 2158), 'matplotlib.pyplot.title', 'plt.title', (['f"""{company} Share Price"""'], {}), "(f'{company} Share Price')\n", (2132, 2158), True, 'import matplotlib.pyplot as plt\n'), ((2160, 2178), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (2170, 2178), True, 'import matplotlib.pyplot as plt\n'), ((2180, 2216), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""{company} Share price"""'], {}), "(f'{company} Share price')\n", (2190, 2216), True, 'import matplotlib.pyplot as plt\n'), ((2219, 2231), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2229, 2231), True, 'import matplotlib.pyplot as plt\n'), ((2233, 2243), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2241, 2243), True, 'import matplotlib.pyplot as plt\n'), ((2344, 2363), 'numpy.array', 'np.array', (['real_data'], {}), '(real_data)\n', (2352, 2363), True, 'import numpy as np\n'), ((2375, 2441), 'numpy.reshape', 'np.reshape', (['real_data', '(real_data.shape[0], real_data.shape[1], 1)'], {}), '(real_data, (real_data.shape[0], real_data.shape[1], 1))\n', (2385, 2441), True, 'import numpy as np\n'), ((744, 761), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (752, 761), True, 'import numpy as np\n'), ((762, 779), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (770, 779), True, 'import numpy as np\n'), ((880, 952), 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'units': '(50)', 'return_sequences': '(True)', 'input_shape': '(x_train.shape[1], 1)'}), '(units=50, return_sequences=True, input_shape=(x_train.shape[1], 1))\n', (884, 952), False, 'from tensorflow.keras.layers import Dense, Dropout, LSTM\n'), ((964, 976), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (971, 976), False, 'from tensorflow.keras.layers import Dense, Dropout, LSTM\n'), ((989, 1026), 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'units': '(50)', 'return_sequences': '(True)'}), '(units=50, return_sequences=True)\n', (993, 1026), False, 'from tensorflow.keras.layers import Dense, Dropout, LSTM\n'), ((1039, 1051), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1046, 1051), False, 'from tensorflow.keras.layers import Dense, Dropout, LSTM\n'), ((1064, 1078), 'tensorflow.keras.layers.LSTM', 'LSTM', ([], {'units': '(50)'}), '(units=50)\n', (1068, 1078), False, 'from tensorflow.keras.layers import Dense, Dropout, LSTM\n'), ((1091, 1103), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1098, 1103), False, 'from tensorflow.keras.layers import Dense, Dropout, LSTM\n'), ((1116, 1130), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(1)'}), '(units=1)\n', (1121, 1130), False, 'from tensorflow.keras.layers import Dense, Dropout, LSTM\n')] |
"""
This module provides the RandomPositionsSampler class.
TODO: Currently, only works with sequences from `selene_sdk.sequences.Genome`.
We would like to generalize this to `selene_sdk.sequences.Sequence` if possible.
"""
from collections import namedtuple
from collections import defaultdict
import logging
import random
import numpy as np
from .online_sampler import OnlineSampler
from ..utils import get_indices_and_probabilities
logger = logging.getLogger(__name__)
SampleIndices = namedtuple(
"SampleIndices", ["indices", "weights"])
"""
A tuple containing the indices for some samples, and a weight to
allot to each index when randomly drawing from them.
TODO: this is common to both the intervals sampler and the
random positions sampler. Can we move this to utils or
somewhere else?
Parameters
----------
indices : list(int)
The numeric index of each sample.
weights : list(float)
The amount of weight assigned to each sample.
Attributes
----------
indices : list(int)
The numeric index of each sample.
weights : list(float)
The amount of weight assigned to each sample.
"""
class RandomPositionsSampler(OnlineSampler):
"""This sampler randomly selects a position in the genome and queries for
a sequence centered at that position for input to the model.
TODO: generalize to selene_sdk.sequences.Sequence?
Parameters
----------
reference_sequence : selene_sdk.sequences.Genome
A reference sequence from which to create examples.
target_path : str
Path to tabix-indexed, compressed BED file (`*.bed.gz`) of genomic
coordinates mapped to the genomic features we want to predict.
features : list(str)
List of distinct features that we aim to predict.
train_size : int
Total sample size for train set.
validation_size : int
Total sample size of validation set.
test_size : int
Total sample size of test set.
seed : int, optional
Default is 436. Sets the random seed for sampling.
validation_holdout : list(str) or float, optional
Default is `['chr6', 'chr7']`. Holdout can be regional or
proportional. If regional, expects a list (e.g. `['chrX', 'chrY']`).
Regions must match those specified in the first column of the
tabix-indexed BED file. If proportional, specify a percentage
between (0.0, 1.0). Typically 0.10 or 0.20.
test_holdout : list(str) or float, optional
Default is `['chr8', 'chr9']`. See documentation for
`validation_holdout` for additional information.
sequence_length : int, optional
Default is 1000. Model is trained on sequences of `sequence_length`
where genomic features are annotated to the center regions of
these sequences.
center_bin_to_predict : int, optional
Default is 200. Query the tabix-indexed file for a region of
length `center_bin_to_predict`.
feature_thresholds : float [0.0, 1.0], optional
Default is 0.5. The `feature_threshold` to pass to the
`GenomicFeatures` object.
mode : {'train', 'validate', 'test'}
Default is `'train'`. The mode to run the sampler in.
save_datasets : list(str), optional
Default is `['test']`. The list of modes for which we should
save the sampled data to file.
output_dir : str or None, optional
Default is None. The path to the directory where we should
save sampled examples for a mode. If `save_datasets` is
a non-empty list, `output_dir` must be specified. If
the path in `output_dir` does not exist it will be created
automatically.
Attributes
----------
reference_sequence : selene_sdk.sequences.Genome
The reference sequence that examples are created from.
target : selene_sdk.targets.Target
The `selene_sdk.targets.Target` object holding the features that we
would like to predict.
validation_holdout : list(str) or float
The samples to hold out for validating model performance. These
can be "regional" or "proportional". If regional, this is a list
of region names (e.g. `['chrX', 'chrY']`). These regions must
match those specified in the first column of the tabix-indexed
BED file. If proportional, this is the fraction of total samples
that will be held out.
test_holdout : list(str) or float
The samples to hold out for testing model performance. See the
documentation for `validation_holdout` for more details.
sequence_length : int
The length of the sequences to train the model on.
bin_radius : int
From the center of the sequence, the radius in which to detect
a feature annotation in order to include it as a sample's label.
surrounding_sequence_radius : int
The length of sequence falling outside of the feature detection
bin (i.e. `bin_radius`) center, but still within the
`sequence_length`.
modes : list(str)
The list of modes that the sampler can be run in.
mode : str
The current mode that the sampler is running in. Must be one of
the modes listed in `modes`.
"""
def __init__(self,
reference_sequence,
target_path,
features,
train_size,
validation_size,
test_size,
seed=436,
validation_holdout=['chr6', 'chr7'],
test_holdout=['chr8', 'chr9'],
sequence_length=1000,
center_bin_to_predict=200,
feature_thresholds=0.5,
mode="train",
save_datasets=[],
output_dir=None):
super(RandomPositionsSampler, self).__init__(
reference_sequence,
target_path,
features,
seed=seed,
validation_holdout=validation_holdout,
test_holdout=test_holdout,
sequence_length=sequence_length,
center_bin_to_predict=center_bin_to_predict,
feature_thresholds=feature_thresholds,
mode=mode,
save_datasets=save_datasets,
output_dir=output_dir)
self.sample_from_intervals = []
self.interval_lengths = []
self._num_chroms = 0
self._genome_n_bases = 0
for chrom, len_chrom in self.reference_sequence.get_chr_lens():
self._num_chroms += 1
self._genome_n_bases += len_chrom
self._validation_holdout = validation_holdout
self._N_validation = validation_size
if test_holdout:
self._test_holdout = test_holdout
self._N_test = test_size
self._N_train = train_size
self._partition_ixs = {"Train": np.zeros(self._N_train, dtype=np.int64),
"Validate": np.zeros(self._N_validation, dtype=np.int64),
"Test": np.zeros(self._N_test, dtype=np.int64)}
# Information about each chromosome, "Total" holds the chrom names
# "Starts" holds the start indices in a theoretical flat array of all
# chromosomes, "Ends" holds the end indices.
self.chroms_info = {"Total": [],
"Starts": np.zeros(self._num_chroms),
"Ends": np.zeros(self._num_chroms)}
if isinstance(validation_holdout, float):
self._partition_by_proportion()
elif isinstance(validation_holdout):
self._partition_by_chromosome()
# Setup `self.chroms`, `self.chroms_starts`, `self.chroms_ends`, `self.genome_length`
def _init_chroms(self):
tot_len = 0
counter = 0
for chrom, len_chrom in self.reference_sequence.get_chr_lens():
self.chroms_info["Total"].append(chrom)
self.chroms_info["Starts"][counter] = tot_len
self.chroms_info["Ends"][counter] = tot_len + len_chrom
tot_len += len_chrom
counter += 1
# Compute the number of elements in the genome array that belong to
# each mode, by proportion
# Edit to be used by chromosome code as well.
def _assign_proportions(self):
if self.test_holdout:
test_prop_N = self._genome_n_bases * self._test_holdout
validation_prop_N = self._genome_n_bases * self._validation_holdout
training_prop_N = \
self._genome_n_bases * (1 - self._test_holdout - self._validation_holdout)
return int(test_prop_N), int(validation_prop_N), int(training_prop_N)
else:
validation_prop_N = self._genome_n_bases * self._validation_holdout
training_prop_N = self._genome_n_bases * (1 - _validation_holdout)
return int(validation_prop_N), int(training_prop_N)
# def _assign_samples_per_mode(self, prop_N, mode, start, sample_size, genome_positions_arr):
# get_N = min(prop_N, sample_size)
# self._partition_ixs[mode] = genome_positions_arr[start:start + get_N]
# return get_N
def _partition_by_proportion(self):
self._init_chroms()
self._assign_samples() # can these be expanded to be used by partition by chrom?
def _psuedoshuffle(self):
total = self._genome_n_bases
test = np.arange(total, dtype=np.int64)
jump = 250000000
num_steps = int(total / jump)
# start = 0
# for i in range(num_steps):
# np.random.shuffle(test[start : start + jump])
# start = start + jump
# start = int(jump / 2)
# for i in range(num_steps - 1):
# np.random.shuffle(test[start : start + jump])
# start = start + jump
return test
# rename
# Order: test, validate, train
# Make sure this matches chromosome implementation
def _assign_samples(self):
genome_positions_arr = self._psuedoshuffle()
start = 0
if self.test_holdout:
test_prop_N, validation_prop_N, training_prop_N = self._assign_proportions()
N_test = self._N_test
while N_test:
get_N = min(test_prop_N, N_test)
self._partition_ixs["test"] = genome_positions_arr[start : start + get_N]
# get_N = self._assign_samples_per_mode(test_prop_N, "test", start, self._N_test, genome_positions_arr)
N_test -= get_N
if N_test :
start = start + get_N
start = start + get_N
else:
validation_prop_N, training_prop_N = _assign_proportions()
N_validation = self._N_validation
while N_validation:
get_N = min(validation_prop_N, N_validation)
self._partition_ixs["validate"] = genome_positions_arr[start:start + get_N]
# get_N = _assign_samples_per_mode(validation_prop_N, "validation", start, self._N_validation, genome_positions_arr)
N_validation -= get_N # I dont think we can alter an int like this in a function bc its pass by copy
if N_validation:
start = start + get_N
start = start + get_N
while self._N_train:
get_N = min(training_prop_N, self._N_train)
self._partition_ixs["train"] = genome_positions_arr[start:start + get_N]
# get_N = _assign_samples_per_mode(training_prop_N, "train", start, self._N_train, genome_positions_arr)
self._N_train -= get_N
# def printAll(self):
# print("test")
# print(self._partition_ixs["test"])
#
# print("validate")
# print(self._partition_ixs["validate"])
#
# print("train")
# print(self._partition_ixs["train"])
# def _partition_by_chrom(self):
# return
#
# genome_arr = np.arange(self._genome_n_bases)
# # to keep this SIMPLE, you create the chromosome to position map based on what chromosomes
# # the user specifies as holdouts!
# # so validation is always after training, test chroms always after validation
# tot_len = 0
# train_counter = 0
# validation_counter = self._N_train
# test_counter = self._N_train + self._N_validation
# genome_positions_arr = np.zeros(self._genome_n_bases)
#
# for chrom, len in self.genome.get_chr_lens():
#
# if chrom in self.validation_holdout:
# genome_positions_arr[validation_counter : validation_counter + len] = np.arange(tot_len, tot_len + len)
# elif chrom in self.test_holdout:
# genome_positions_arr[test_counter : test_counter + len] = np.arange(tot_len, tot_len + len)
# else:
# genome_positions_arr[train_counter : train_counter + len] = np.arange(tot_len, tot_len + len)
#
# for mode in self.modes:
# # Shuffle each partitition individually
# np.shuffle(genome_positions_arr[0:self._N_train])
# np.shuffle(genome_positions_arr[self._N_train:self._N_train + self._N_validation])
# np.shuffle(genome_positions_arr[self._N_train + self._N_validation:])
#
# # what would be interval lengths here???
# sample_indices = self._partition_ixs[mode].indices
# indices, weights = get_indices_and_probabilities(
# self.interval_lengths, sample_indices)
# self._sample_from_mode[mode] = \
# self._sample_from_mode[mode]._replace(
# indices=indices, weights=weights)
def _retrieve(self, chrom, position):
bin_start = position - self._start_radius
bin_end = position + self._end_radius
retrieved_targets = self.target.get_feature_data(
chrom, bin_start, bin_end)
window_start = bin_start - self.surrounding_sequence_radius
window_end = bin_end + self.surrounding_sequence_radius
if window_end - window_start < self.sequence_length:
print(bin_start, bin_end,
self._start_radius, self._end_radius,
self.surrounding_sequence_radius)
return None
strand = self.STRAND_SIDES[random.randint(0, 1)]
retrieved_seq = \
self.reference_sequence.get_encoding_from_coords(
chrom, window_start, window_end, strand)
if retrieved_seq.shape[0] == 0:
logger.info("Full sequence centered at {0} position {1} "
"could not be retrieved. Sampling again.".format(
chrom, position))
return None
elif np.sum(retrieved_seq) / float(retrieved_seq.shape[0]) < 0.60:
logger.info("Over 30% of the bases in the sequence centered "
"at {0} position {1} are ambiguous ('N'). "
"Sampling again.".format(chrom, position))
return None
if retrieved_seq.shape[0] < self.sequence_length:
# TODO: remove after investigating this bug.
print("Warning: sequence retrieved for {0}, {1}, {2}, {3} "
"had length less than required sequence length {4}. "
"This bug will be investigated and addressed in the next "
"version of Selene.".format(
chrom, window_start, window_end, strand,
self.sequence_length))
return None
if self.mode in self._save_datasets:
feature_indices = ';'.join(
[str(f) for f in np.nonzero(retrieved_targets)[0]])
self._save_datasets[self.mode].append(
[chrom,
window_start,
window_end,
strand,
feature_indices])
if len(self._save_datasets[self.mode]) > 200000:
self.save_dataset_to_file(self.mode)
return (retrieved_seq, retrieved_targets)
# Returns (chrom, poition) pair corresponding to index in the genome array.
def _pair_from_index(self, index):
curr_index = 0
all = np.where(self.chroms_info["Ends"] >= index)
min = all[0][0]
chrom = self.chroms_info["Total"][min]
pos = int(index - self.chroms_info["Starts"][min])
return chrom, pos
def _sample(self):
# @ Kathy, where do we set the partition for a particular sample?
# TODO: Overflow errors occured when using this random indexing
# Overflow should not occur with int64.
# random_index = np.random.randint(0, len(self._partition_ixs[self.mode]))
sample_index = self._partition_ixs[self.mode][0]
chrom, pos = self._pair_from_index(sample_index)
np.delete(self._partition_ixs[self.mode], 0)
return chrom, pos
def sample(self, batch_size):
"""
Randomly draws a mini-batch of examples and their corresponding
labels.
Parameters
----------
batch_size : int, optional
Default is 1. The number of examples to include in the
mini-batch.
Returns
-------
sequences, targets : tuple(numpy.ndarray, numpy.ndarray)
A tuple containing the numeric representation of the
sequence examples and their corresponding labels. The
shape of `sequences` will be
:math:`B \\times L \\times N`, where :math:`B` is
`batch_size`, :math:`L` is the sequence length, and
:math:`N` is the size of the sequence type's alphabet.
The shape of `targets` will be :math:`B \\times F`,
where :math:`F` is the number of features.
"""
sequences = np.zeros((batch_size, self.sequence_length, 4))
targets = np.zeros((batch_size, self.n_features))
n_samples_drawn = 0
while n_samples_drawn < batch_size:
print("Samples: ")
print(n_samples_drawn)
chrom, position = self._sample()
print("Chrom: ")
print(chrom)
print("Position: ")
print(position)
retrieve_output = self._retrieve(chrom, position)
if not retrieve_output:
continue
seq, seq_targets = retrieve_output
sequences[n_samples_drawn, :, :] = seq
targets[n_samples_drawn, :] = seq_targets
n_samples_drawn += 1
return (sequences, targets)
| [
"numpy.sum",
"random.randint",
"numpy.zeros",
"numpy.nonzero",
"numpy.where",
"numpy.arange",
"collections.namedtuple",
"numpy.delete",
"logging.getLogger"
] | [((447, 474), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (464, 474), False, 'import logging\n'), ((493, 544), 'collections.namedtuple', 'namedtuple', (['"""SampleIndices"""', "['indices', 'weights']"], {}), "('SampleIndices', ['indices', 'weights'])\n", (503, 544), False, 'from collections import namedtuple\n'), ((9400, 9432), 'numpy.arange', 'np.arange', (['total'], {'dtype': 'np.int64'}), '(total, dtype=np.int64)\n', (9409, 9432), True, 'import numpy as np\n'), ((16289, 16332), 'numpy.where', 'np.where', (["(self.chroms_info['Ends'] >= index)"], {}), "(self.chroms_info['Ends'] >= index)\n", (16297, 16332), True, 'import numpy as np\n'), ((16912, 16956), 'numpy.delete', 'np.delete', (['self._partition_ixs[self.mode]', '(0)'], {}), '(self._partition_ixs[self.mode], 0)\n', (16921, 16956), True, 'import numpy as np\n'), ((17898, 17945), 'numpy.zeros', 'np.zeros', (['(batch_size, self.sequence_length, 4)'], {}), '((batch_size, self.sequence_length, 4))\n', (17906, 17945), True, 'import numpy as np\n'), ((17964, 18003), 'numpy.zeros', 'np.zeros', (['(batch_size, self.n_features)'], {}), '((batch_size, self.n_features))\n', (17972, 18003), True, 'import numpy as np\n'), ((6863, 6902), 'numpy.zeros', 'np.zeros', (['self._N_train'], {'dtype': 'np.int64'}), '(self._N_train, dtype=np.int64)\n', (6871, 6902), True, 'import numpy as np\n'), ((6948, 6992), 'numpy.zeros', 'np.zeros', (['self._N_validation'], {'dtype': 'np.int64'}), '(self._N_validation, dtype=np.int64)\n', (6956, 6992), True, 'import numpy as np\n'), ((7034, 7072), 'numpy.zeros', 'np.zeros', (['self._N_test'], {'dtype': 'np.int64'}), '(self._N_test, dtype=np.int64)\n', (7042, 7072), True, 'import numpy as np\n'), ((7360, 7386), 'numpy.zeros', 'np.zeros', (['self._num_chroms'], {}), '(self._num_chroms)\n', (7368, 7386), True, 'import numpy as np\n'), ((7424, 7450), 'numpy.zeros', 'np.zeros', (['self._num_chroms'], {}), '(self._num_chroms)\n', (7432, 7450), True, 'import numpy as np\n'), ((14374, 14394), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (14388, 14394), False, 'import random\n'), ((14808, 14829), 'numpy.sum', 'np.sum', (['retrieved_seq'], {}), '(retrieved_seq)\n', (14814, 14829), True, 'import numpy as np\n'), ((15738, 15767), 'numpy.nonzero', 'np.nonzero', (['retrieved_targets'], {}), '(retrieved_targets)\n', (15748, 15767), True, 'import numpy as np\n')] |
#!/usr/bin/env python2.7
import sys
import os
import numpy as np
import tensorflow as tf
import keras
from keras.models import model_from_json
#from tensorflow.python.keras.models import load_model
from keras.models import load_model
from std_msgs.msg import Float32
#import tensorflow.keras.backend as K
from keras import backend as K
import math
import cv2
import csv
#import model
import rospy
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
bridge = CvBridge()
import rospkg
global graph
graph = tf.get_default_graph()
from deepnncar_components.msg import drive_param
from deepnncar_components.msg import angle_msg
miny=10
maxy=20
class LEC_Node:
#define the constructor
def __init__(self,racecar_name,model,height,width):
self.cv_bridge=CvBridge()
self.image_topic= '/' + str(racecar_name) +'/Image'
self.model=load_model(model)
self.pub=rospy.Publisher('/'+ str(racecar_name) +'/angle_msg',angle_msg, queue_size=1)
#image callback
def image_callback(self,data):
frame = bridge.imgmsg_to_cv2(data, desired_encoding="passthrough")
img = cv2.resize(frame, (200, 66))
img = img / 255.
steer_val = self.nncontroller(img, model)
steering = self.denormalization(steer_val)
steeringNN = float("{0:.2f}".format(steering))
#print(str(steeringNN))
msg=angle_msg()
msg.header.stamp=rospy.Time.now()
msg.steering_angle = steeringNN
self.pub.publish(msg)
def nncontroller(self, img, model):
inputs = np.array(img)[np.newaxis]
with graph.as_default():
outputs = self.model.predict(inputs, batch_size=1)
return float(outputs[0][0])
def denormalization(self, steer):
global maxy,miny
return (float(steer)*(maxy-miny))+miny
if __name__=='__main__':
rospy.init_node("ros_daev_node",anonymous=True)
#get the arguments passed from the launch file
args = rospy.myargv()[1:]
#get the racecar name so we know what to subscribe to
racecar_name=args[0]
print(racecar_name)
#get the keras model
load_path_root= rospkg.RosPack().get_path('deepnncar_components')+ '/src/'
print(load_path_root)
model = load_path_root+ 'weights.best.hdf5'
print(model)
il=LEC_Node(racecar_name,model,66,200)
image_sub=rospy.Subscriber(il.image_topic,Image,il.image_callback)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting Down")
cv2.destroyAllWindows()
| [
"keras.models.load_model",
"cv_bridge.CvBridge",
"rospy.Subscriber",
"rospy.Time.now",
"cv2.destroyAllWindows",
"deepnncar_components.msg.angle_msg",
"rospkg.RosPack",
"numpy.array",
"rospy.init_node",
"rospy.spin",
"tensorflow.get_default_graph",
"rospy.myargv",
"cv2.resize"
] | [((518, 528), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (526, 528), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((564, 586), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (584, 586), True, 'import tensorflow as tf\n'), ((1910, 1958), 'rospy.init_node', 'rospy.init_node', (['"""ros_daev_node"""'], {'anonymous': '(True)'}), "('ros_daev_node', anonymous=True)\n", (1925, 1958), False, 'import rospy\n'), ((2398, 2456), 'rospy.Subscriber', 'rospy.Subscriber', (['il.image_topic', 'Image', 'il.image_callback'], {}), '(il.image_topic, Image, il.image_callback)\n', (2414, 2456), False, 'import rospy\n'), ((825, 835), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (833, 835), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((915, 932), 'keras.models.load_model', 'load_model', (['model'], {}), '(model)\n', (925, 932), False, 'from keras.models import load_model\n'), ((1175, 1203), 'cv2.resize', 'cv2.resize', (['frame', '(200, 66)'], {}), '(frame, (200, 66))\n', (1185, 1203), False, 'import cv2\n'), ((1429, 1440), 'deepnncar_components.msg.angle_msg', 'angle_msg', ([], {}), '()\n', (1438, 1440), False, 'from deepnncar_components.msg import angle_msg\n'), ((1466, 1482), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (1480, 1482), False, 'import rospy\n'), ((2020, 2034), 'rospy.myargv', 'rospy.myargv', ([], {}), '()\n', (2032, 2034), False, 'import rospy\n'), ((2472, 2484), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (2482, 2484), False, 'import rospy\n'), ((1611, 1624), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1619, 1624), True, 'import numpy as np\n'), ((2554, 2577), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2575, 2577), False, 'import cv2\n'), ((2191, 2207), 'rospkg.RosPack', 'rospkg.RosPack', ([], {}), '()\n', (2205, 2207), False, 'import rospkg\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import glob
import logging
import os
from queue import Empty
from typing import List, Iterable, Iterator, Optional
import numpy as np
from allennlp.data.instance import Instance
from torch.multiprocessing import Process, Queue, Value, log_to_stderr
class logger:
"""
multiprocessing.log_to_stderr causes some output in the logs
even when we don't use this dataset reader. This is a small hack
to instantiate the stderr logger lazily only when it's needed
(which is only when using the MultiprocessDatasetReader)
"""
_logger = None
@classmethod
def info(cls, message: str) -> None:
if cls._logger is None:
cls._logger = log_to_stderr()
cls._logger.setLevel(logging.INFO)
cls._logger.info(message)
def _worker(
call_back,
input_queue: Queue,
output_queue: Queue,
num_active_workers: Value,
num_inflight_items: Value,
worker_id: int,
) -> None:
"""
A worker that pulls filenames off the input queue, uses the dataset reader
to read them, and places the generated instances on the output queue. When
there are no filenames left on the input queue, it decrements
num_active_workers to signal completion.
"""
logger.info(f"Reader worker: {worker_id} PID: {os.getpid()}")
# Keep going until you get a file_path that's None.
while True:
file_path = input_queue.get()
if file_path is None:
# It's important that we close and join the queue here before
# decrementing num_active_workers. Otherwise our parent may join us
# before the queue's feeder thread has passed all buffered items to
# the underlying pipe resulting in a deadlock.
#
# See:
# https://docs.python.org/3.6/library/multiprocessing.html?highlight=process#pipes-and-queues
# https://docs.python.org/3.6/library/multiprocessing.html?highlight=process#programming-guidelines
output_queue.close()
output_queue.join_thread()
# Decrementing is not atomic.
# See https://docs.python.org/2/library/multiprocessing.html#multiprocessing.Value.
with num_active_workers.get_lock():
num_active_workers.value -= 1
logger.info(f"Reader worker {worker_id} finished")
break
logger.info(f"reading instances from {file_path}")
instance = call_back(file_path)
with num_inflight_items.get_lock():
num_inflight_items.value += 1
output_queue.put(instance)
class QIterable(Iterable[Instance]):
"""
You can't set attributes on Iterators, so this is just a dumb wrapper
that exposes the output_queue.
"""
def __init__(self, output_queue_size, epochs_per_read, num_workers, call_back, file_path) -> None:
self.output_queue = Queue(output_queue_size)
self.epochs_per_read = epochs_per_read
self.num_workers = num_workers
self.file_path = file_path
self.call_back = call_back
# Initialized in start.
self.input_queue: Optional[Queue] = None
self.processes: List[Process] = []
# The num_active_workers and num_inflight_items counts in conjunction
# determine whether there could be any outstanding instances.
self.num_active_workers: Optional[Value] = None
self.num_inflight_items: Optional[Value] = None
def __iter__(self) -> Iterator[Instance]:
self.start()
# Keep going as long as not all the workers have finished or there are items in flight.
while self.num_active_workers.value > 0 or self.num_inflight_items.value > 0:
# Inner loop to minimize locking on self.num_active_workers.
while True:
try:
# Non-blocking to handle the empty-queue case.
yield self.output_queue.get(block=False, timeout=1.0)
with self.num_inflight_items.get_lock():
self.num_inflight_items.value -= 1
except Empty:
# The queue could be empty because the workers are
# all finished or because they're busy processing.
# The outer loop distinguishes between these two
# cases.
break
self.join()
def start(self) -> None:
shards = glob.glob(self.file_path)
# Ensure a consistent order before shuffling for testing.
shards.sort()
num_shards = len(shards)
# If we want multiple epochs per read, put shards in the queue multiple times.
self.input_queue = Queue(num_shards * self.epochs_per_read + self.num_workers)
for _ in range(self.epochs_per_read):
np.random.shuffle(shards)
for shard in shards:
self.input_queue.put(shard)
# Then put a None per worker to signify no more files.
for _ in range(self.num_workers):
self.input_queue.put(None)
assert (
not self.processes
), "Process list non-empty! You must call QIterable.join() before restarting."
self.num_active_workers = Value("i", self.num_workers)
self.num_inflight_items = Value("i", 0)
for worker_id in range(self.num_workers):
process = Process(
target=_worker,
args=(
self.call_back,
self.input_queue,
self.output_queue,
self.num_active_workers,
self.num_inflight_items,
worker_id,
),
)
logger.info(f"starting worker {worker_id}")
process.start()
self.processes.append(process)
def join(self) -> None:
for process in self.processes:
process.join()
self.processes.clear()
def __del__(self) -> None:
"""
Terminate processes if the user hasn't joined. This is necessary as
leaving stray processes running can corrupt shared state. In brief,
we've observed shared memory counters being reused (when the memory was
free from the perspective of the parent process) while the stray
workers still held a reference to them.
For a discussion of using destructors in Python in this manner, see
https://eli.thegreenplace.net/2009/06/12/safely-using-destructors-in-python/.
"""
for process in self.processes:
process.terminate()
| [
"os.getpid",
"torch.multiprocessing.log_to_stderr",
"torch.multiprocessing.Process",
"torch.multiprocessing.Queue",
"glob.glob",
"torch.multiprocessing.Value",
"numpy.random.shuffle"
] | [((2986, 3010), 'torch.multiprocessing.Queue', 'Queue', (['output_queue_size'], {}), '(output_queue_size)\n', (2991, 3010), False, 'from torch.multiprocessing import Process, Queue, Value, log_to_stderr\n'), ((4546, 4571), 'glob.glob', 'glob.glob', (['self.file_path'], {}), '(self.file_path)\n', (4555, 4571), False, 'import glob\n'), ((4808, 4867), 'torch.multiprocessing.Queue', 'Queue', (['(num_shards * self.epochs_per_read + self.num_workers)'], {}), '(num_shards * self.epochs_per_read + self.num_workers)\n', (4813, 4867), False, 'from torch.multiprocessing import Process, Queue, Value, log_to_stderr\n'), ((5344, 5372), 'torch.multiprocessing.Value', 'Value', (['"""i"""', 'self.num_workers'], {}), "('i', self.num_workers)\n", (5349, 5372), False, 'from torch.multiprocessing import Process, Queue, Value, log_to_stderr\n'), ((5407, 5420), 'torch.multiprocessing.Value', 'Value', (['"""i"""', '(0)'], {}), "('i', 0)\n", (5412, 5420), False, 'from torch.multiprocessing import Process, Queue, Value, log_to_stderr\n'), ((754, 769), 'torch.multiprocessing.log_to_stderr', 'log_to_stderr', ([], {}), '()\n', (767, 769), False, 'from torch.multiprocessing import Process, Queue, Value, log_to_stderr\n'), ((4926, 4951), 'numpy.random.shuffle', 'np.random.shuffle', (['shards'], {}), '(shards)\n', (4943, 4951), True, 'import numpy as np\n'), ((5493, 5642), 'torch.multiprocessing.Process', 'Process', ([], {'target': '_worker', 'args': '(self.call_back, self.input_queue, self.output_queue, self.\n num_active_workers, self.num_inflight_items, worker_id)'}), '(target=_worker, args=(self.call_back, self.input_queue, self.\n output_queue, self.num_active_workers, self.num_inflight_items, worker_id))\n', (5500, 5642), False, 'from torch.multiprocessing import Process, Queue, Value, log_to_stderr\n'), ((1385, 1396), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1394, 1396), False, 'import os\n')] |
import numpy as np
import pytest
import autofit as af
import autofit.graphical as ep
from test_autofit.graphical.gaussian.model import Gaussian, make_data, Analysis
@pytest.fixture(
name="make_model_factor"
)
def make_make_model_factor(
normalization,
normalization_prior,
x
):
def make_factor_model(
centre: float,
sigma: float,
optimiser=None
) -> ep.AnalysisFactor:
"""
We'll make a LikelihoodModel for each Gaussian we're fitting.
First we'll make the actual data to be fit.
Note that the normalization value is shared.
"""
y = make_data(
Gaussian(
centre=centre,
normalization=normalization,
sigma=sigma
),
x
)
"""
Next we need a prior model.
Note that the normalization prior is shared.
"""
prior_model = af.PriorModel(
Gaussian,
centre=af.GaussianPrior(mean=50, sigma=20),
normalization=normalization_prior,
sigma=af.GaussianPrior(mean=10, sigma=10),
)
"""
Finally we combine the likelihood function with the prior model to produce a likelihood
factor - this will be converted into a ModelFactor which is like any other factor in the
factor graph.
We can also pass a custom optimiser in here that will be used to fit the factor instead
of the default optimiser.
"""
return ep.AnalysisFactor(
prior_model,
analysis=Analysis(
x=x,
y=y
),
optimiser=optimiser
)
return make_factor_model
@pytest.fixture(
name="normalization"
)
def make_normalization():
return 25.0
@pytest.fixture(
name="normalization_prior"
)
def make_normalization_prior():
return af.GaussianPrior(mean=25, sigma=10)
@pytest.fixture(
name="factor_model"
)
def make_factor_model_collection(
make_model_factor
):
"""
Here's a good example in which we have two Gaussians fit with a shared variable
We have a shared normalization value and a shared normalization prior
Multiplying together multiple LikelihoodModels gives us a factor model.
The factor model can compute all the variables and messages required as well as construct
a factor graph representing a fit on the ensemble.
"""
return ep.FactorGraphModel(
make_model_factor(
centre=40,
sigma=10
),
make_model_factor(
centre=60,
sigma=15
)
)
def test_custom_optimiser(make_model_factor):
other_optimiser = ep.LaplaceFactorOptimiser()
factor_1 = make_model_factor(
centre=40,
sigma=10,
optimiser=other_optimiser
)
factor_2 = make_model_factor(
centre=60,
sigma=15
)
factor_model = ep.FactorGraphModel(
factor_1, factor_2
)
default_optimiser = ep.LaplaceFactorOptimiser()
ep_optimiser = factor_model._make_ep_optimiser(
default_optimiser
)
factor_optimisers = ep_optimiser.factor_optimisers
assert factor_optimisers[factor_1] is other_optimiser
assert factor_optimisers[factor_2] is default_optimiser
def test_factor_model_attributes(
factor_model
):
"""
There are:
- 5 messages - one for each prior
- 7 factors - one for each prior plus one for each likelihood
"""
assert len(factor_model.message_dict) == 5
assert len(factor_model.graph.factors) == 7
def _test_optimise_factor_model(
factor_model
):
"""
We optimise the model
"""
laplace = ep.LaplaceFactorOptimiser()
collection = factor_model.optimise(laplace)
"""
And what we get back is actually a PriorModelCollection
"""
assert 25.0 == pytest.approx(collection[0].normalization.mean, rel=0.1)
assert collection[0].normalization is collection[1].normalization
def test_gaussian():
n_observations = 100
x = np.arange(n_observations)
y = make_data(Gaussian(centre=50.0, normalization=25.0, sigma=10.0), x)
prior_model = af.PriorModel(
Gaussian,
centre=af.GaussianPrior(mean=50, sigma=20),
normalization=af.GaussianPrior(mean=25, sigma=10),
sigma=af.GaussianPrior(mean=10, sigma=10),
)
factor_model = ep.AnalysisFactor(
prior_model,
analysis=Analysis(
x=x,
y=y
)
)
laplace = ep.LaplaceFactorOptimiser()
model = factor_model.optimise(laplace)
assert model.centre.mean == pytest.approx(50, rel=0.1)
assert model.normalization.mean == pytest.approx(25, rel=0.1)
assert model.sigma.mean == pytest.approx(10, rel=0.1)
@pytest.fixture(name="prior_model")
def make_prior_model():
return af.PriorModel(Gaussian)
@pytest.fixture(name="likelihood_model")
def make_factor_model(prior_model):
class MockAnalysis(af.Analysis):
@staticmethod
def log_likelihood_function(*_):
return 1
return ep.AnalysisFactor(
prior_model,
analysis=MockAnalysis()
)
def test_messages(likelihood_model):
assert len(likelihood_model.message_dict) == 3
def test_graph(likelihood_model):
graph = likelihood_model.graph
assert len(graph.factors) == 4
def test_prior_model_node(likelihood_model):
prior_model_node = likelihood_model.graph
result = prior_model_node(
{variable: np.array([0.5]) for variable in prior_model_node.variables}
)
assert isinstance(result, ep.FactorValue)
| [
"test_autofit.graphical.gaussian.model.Gaussian",
"test_autofit.graphical.gaussian.model.Analysis",
"autofit.PriorModel",
"pytest.fixture",
"numpy.arange",
"numpy.array",
"autofit.graphical.LaplaceFactorOptimiser",
"autofit.graphical.FactorGraphModel",
"pytest.approx",
"autofit.GaussianPrior"
] | [((177, 217), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""make_model_factor"""'}), "(name='make_model_factor')\n", (191, 217), False, 'import pytest\n'), ((1848, 1884), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""normalization"""'}), "(name='normalization')\n", (1862, 1884), False, 'import pytest\n'), ((1943, 1985), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""normalization_prior"""'}), "(name='normalization_prior')\n", (1957, 1985), False, 'import pytest\n'), ((2081, 2116), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""factor_model"""'}), "(name='factor_model')\n", (2095, 2116), False, 'import pytest\n'), ((5065, 5099), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""prior_model"""'}), "(name='prior_model')\n", (5079, 5099), False, 'import pytest\n'), ((5167, 5206), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""likelihood_model"""'}), "(name='likelihood_model')\n", (5181, 5206), False, 'import pytest\n'), ((2039, 2074), 'autofit.GaussianPrior', 'af.GaussianPrior', ([], {'mean': '(25)', 'sigma': '(10)'}), '(mean=25, sigma=10)\n', (2055, 2074), True, 'import autofit as af\n'), ((2888, 2915), 'autofit.graphical.LaplaceFactorOptimiser', 'ep.LaplaceFactorOptimiser', ([], {}), '()\n', (2913, 2915), True, 'import autofit.graphical as ep\n'), ((3136, 3175), 'autofit.graphical.FactorGraphModel', 'ep.FactorGraphModel', (['factor_1', 'factor_2'], {}), '(factor_1, factor_2)\n', (3155, 3175), True, 'import autofit.graphical as ep\n'), ((3219, 3246), 'autofit.graphical.LaplaceFactorOptimiser', 'ep.LaplaceFactorOptimiser', ([], {}), '()\n', (3244, 3246), True, 'import autofit.graphical as ep\n'), ((3939, 3966), 'autofit.graphical.LaplaceFactorOptimiser', 'ep.LaplaceFactorOptimiser', ([], {}), '()\n', (3964, 3966), True, 'import autofit.graphical as ep\n'), ((4308, 4333), 'numpy.arange', 'np.arange', (['n_observations'], {}), '(n_observations)\n', (4317, 4333), True, 'import numpy as np\n'), ((4799, 4826), 'autofit.graphical.LaplaceFactorOptimiser', 'ep.LaplaceFactorOptimiser', ([], {}), '()\n', (4824, 4826), True, 'import autofit.graphical as ep\n'), ((5137, 5160), 'autofit.PriorModel', 'af.PriorModel', (['Gaussian'], {}), '(Gaussian)\n', (5150, 5160), True, 'import autofit as af\n'), ((4119, 4175), 'pytest.approx', 'pytest.approx', (['collection[0].normalization.mean'], {'rel': '(0.1)'}), '(collection[0].normalization.mean, rel=0.1)\n', (4132, 4175), False, 'import pytest\n'), ((4353, 4406), 'test_autofit.graphical.gaussian.model.Gaussian', 'Gaussian', ([], {'centre': '(50.0)', 'normalization': '(25.0)', 'sigma': '(10.0)'}), '(centre=50.0, normalization=25.0, sigma=10.0)\n', (4361, 4406), False, 'from test_autofit.graphical.gaussian.model import Gaussian, make_data, Analysis\n'), ((4906, 4932), 'pytest.approx', 'pytest.approx', (['(50)'], {'rel': '(0.1)'}), '(50, rel=0.1)\n', (4919, 4932), False, 'import pytest\n'), ((4973, 4999), 'pytest.approx', 'pytest.approx', (['(25)'], {'rel': '(0.1)'}), '(25, rel=0.1)\n', (4986, 4999), False, 'import pytest\n'), ((5032, 5058), 'pytest.approx', 'pytest.approx', (['(10)'], {'rel': '(0.1)'}), '(10, rel=0.1)\n', (5045, 5058), False, 'import pytest\n'), ((709, 774), 'test_autofit.graphical.gaussian.model.Gaussian', 'Gaussian', ([], {'centre': 'centre', 'normalization': 'normalization', 'sigma': 'sigma'}), '(centre=centre, normalization=normalization, sigma=sigma)\n', (717, 774), False, 'from test_autofit.graphical.gaussian.model import Gaussian, make_data, Analysis\n'), ((4482, 4517), 'autofit.GaussianPrior', 'af.GaussianPrior', ([], {'mean': '(50)', 'sigma': '(20)'}), '(mean=50, sigma=20)\n', (4498, 4517), True, 'import autofit as af\n'), ((4542, 4577), 'autofit.GaussianPrior', 'af.GaussianPrior', ([], {'mean': '(25)', 'sigma': '(10)'}), '(mean=25, sigma=10)\n', (4558, 4577), True, 'import autofit as af\n'), ((4594, 4629), 'autofit.GaussianPrior', 'af.GaussianPrior', ([], {'mean': '(10)', 'sigma': '(10)'}), '(mean=10, sigma=10)\n', (4610, 4629), True, 'import autofit as af\n'), ((4719, 4737), 'test_autofit.graphical.gaussian.model.Analysis', 'Analysis', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (4727, 4737), False, 'from test_autofit.graphical.gaussian.model import Gaussian, make_data, Analysis\n'), ((5820, 5835), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (5828, 5835), True, 'import numpy as np\n'), ((1074, 1109), 'autofit.GaussianPrior', 'af.GaussianPrior', ([], {'mean': '(50)', 'sigma': '(20)'}), '(mean=50, sigma=20)\n', (1090, 1109), True, 'import autofit as af\n'), ((1178, 1213), 'autofit.GaussianPrior', 'af.GaussianPrior', ([], {'mean': '(10)', 'sigma': '(10)'}), '(mean=10, sigma=10)\n', (1194, 1213), True, 'import autofit as af\n'), ((1697, 1715), 'test_autofit.graphical.gaussian.model.Analysis', 'Analysis', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (1705, 1715), False, 'from test_autofit.graphical.gaussian.model import Gaussian, make_data, Analysis\n')] |
from .layer import Layer
from ..util import make_list
import numpy as np
from numpy.random import default_rng
rng = default_rng()
class RNNLayer(Layer):
def __init__(self, return_sequences=True, reversed=False, **args):
Layer.__init__(self, **args)
self.Reversed = reversed
self.ReturnSequences = return_sequences
#print("RNNLayer.__init__: reversed=", self.Reversed)
def init_state(self, mb):
raise NotImplementedError()
return initial_state
def forward(self, t, x, s, context):
# return y, c
raise NotImplementedError()
return y, c, context
def backward(self, t, gy_t, gstate_t, gw, context):
# given dL/dc = gc and dL/dy = gy and accumulated dL/dw = gw return dL/dx, dL/ds and updated dL/dw
# initial gw is None
raise NotImplementedError()
return gx, gw, gs # gx is ndarray, not list !
def init_context(self, x, state_in):
return None
def compute(self, xs, state_in=None):
xs = make_list(xs)
assert len(xs) == 1
x = xs[0]
assert len(x.shape) == 3, f"Invalid input shape for RNN: {x.shape}. Expected 3 dimensions" # at [mb, t, w]
x = x.transpose((1,0,2))
n, mb, xw = x.shape
assert n > 0
if state_in is None:
state_in = self.init_state(mb)
context = self.init_context(x, state_in)
s = state_in
y = []
for i in range(n):
t = n-1-i if self.Reversed else i
xt = x[t]
yt, c, context = self.forward(t, xt, s, context)
s = c
if self.ReturnSequences:
y.append(yt)
if self.ReturnSequences:
y = np.array(y).transpose((1,0,2))
else:
y = yt
return y, c, context
def grads(self, y_grads, s_out_grads, xs, y, context):
assert len(xs) == 1
x = xs[0]
assert len(x.shape) == 3 # at [mb, t, w]
x = x.transpose((1,0,2))
n, mb, xw = x.shape
assert n > 0
if s_out_grads is None:
s_out_grads = np.zeros_like(self.init_state(mb))
if not self.ReturnSequences:
yg = np.zeros((n,)+y_grads.shape)
yg[-1] = y_grads
y_grads = yg
else:
y_grads = y_grads.transpose((1,0,2))
x_grads = np.zeros_like(x)
#print("rnn.grads: x_grads:", x_grads.shape)
gw = [np.zeros_like(w) for w in self.params]
gc = s_out_grads
for i in range(n):
t = i if self.Reversed else n-1-i
#print("RNNLayer.grads: t=", t)
gyt = y_grads[t]
gxt, gw, gs = self.backward(t, gyt, gc, gw, context)
#print("rnn.grads: x_grads[t,:,:]", x_grads[t,:,:].shape, " gxt:", gxt.shape)
x_grads[t,:,:] = gxt
gc = gs
#print("RNNLayer.grads: Y grads:", np.mean(y_grads**2), " W grads:", [np.mean(g**2) for g in gw])
return [x_grads.transpose((1,0,2))], gw, gc
class RNN(RNNLayer):
def __init__(self, out_size, recurrent_size=None, return_sequences=True, **args):
RNNLayer.__init__(self, return_sequences=return_sequences, reversed=reversed, **args)
self.Nout = out_size
self.NC = recurrent_size or out_size
self.ReturnSequences = return_sequences
self.Nin = None
def configure(self, inputs):
assert len(inputs) == 1
inp = inputs[0]
shape = inp.Shape
#print("lstm.configure: inp.Shape:", inp.Shape)
assert len(shape) == 2, f"Expected shape with 3 dimensions, got {shape}"
self.Nin = shape[-1]
#print("lstm.configure: Nin:", self.Nin, " NC:", self.NC)
self.W = rng.normal(size=(1 + self.Nin + self.NC, self.Nout + self.NC),
scale= 1.0 / np.sqrt(self.Nin + 2*self.NC + self.Nout))
#if self.Nin == self.NC:
# eye = np.eye(self.NC)
# #print("eye=", eye)
# for i in range(1,1 + self.Nin + self.NC,self.NC):
# for j in range(0, 4 * self.NC, self.NC):
# self.WLSTM[i:i+self.Nin, j:j+self.NC] = eye
#self.WLSTM[...] = 1.0
#print "WLSTM shape=", self.WLSTM.shape
self.W[0,:] = 0 # initialize biases to zero
return (shape[0], self.Nout) if self.ReturnSequences else (self.Nout,)
@property
def params(self):
return [self.W]
def set_weights(self, w):
self.W = w[0]
def init_state(self, mb):
return np.zeros((mb, self.NC))
def init_context(self, x, state_in):
#print("init_context: x:", x.shape)
n, b = x.shape[:2]
d = self.NC
U = np.empty((n, b, 1+d+self.Nin))
U[:,:,0] = 1.0 # for bias
U[:,:,1:1+self.Nin] = x
#print("U:", U.shape)
return U
def forward(self, t, x, s, context):
# return y, c
#print("forward: x:", x.shape)
b = x.shape[0]
d = self.NC
if s is None:
s = np.zeros((b, d))
#print("s initialized:", s.shape)
U = context
U[t,:,-d:] = s
Vt = U[t].dot(self.W)
#print("U:", U.shape, " Vt:", Vt.shape)
Yt = Vt[:,:self.Nout]
Ct = Vt[:,self.Nout:]
#print(Yt, Ct)
return Yt, Ct, context
def backward(self, t, gy_t, gstate_t, gw, context):
# given dL/dc = gc and dL/dy = gy and accumulated dL/dw = gw return dL/dx, dL/ds and updated dL/dw
# initial gw is None
U = context
d = self.NC
dVt = np.concatenate([gy_t, gstate_t], axis=-1)
#print("gy:", gy_t.shape, " gstate:", gstate_t.shape, " dVt:", dVt.shape)
dUt = dVt.dot(self.W.T)
gw = gw[0]
gw += U[t].T.dot(dVt)
gx = dUt[:,1:1+self.Nin]
gs = dUt[:,-d:]
return gx, [gw], gs # gx is ndarray, not list !
| [
"numpy.zeros_like",
"numpy.empty",
"numpy.zeros",
"numpy.random.default_rng",
"numpy.array",
"numpy.concatenate",
"numpy.sqrt"
] | [((117, 130), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (128, 130), False, 'from numpy.random import default_rng\n'), ((2487, 2503), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (2500, 2503), True, 'import numpy as np\n'), ((4786, 4809), 'numpy.zeros', 'np.zeros', (['(mb, self.NC)'], {}), '((mb, self.NC))\n', (4794, 4809), True, 'import numpy as np\n'), ((4972, 5006), 'numpy.empty', 'np.empty', (['(n, b, 1 + d + self.Nin)'], {}), '((n, b, 1 + d + self.Nin))\n', (4980, 5006), True, 'import numpy as np\n'), ((5896, 5937), 'numpy.concatenate', 'np.concatenate', (['[gy_t, gstate_t]'], {'axis': '(-1)'}), '([gy_t, gstate_t], axis=-1)\n', (5910, 5937), True, 'import numpy as np\n'), ((2310, 2340), 'numpy.zeros', 'np.zeros', (['((n,) + y_grads.shape)'], {}), '((n,) + y_grads.shape)\n', (2318, 2340), True, 'import numpy as np\n'), ((2571, 2587), 'numpy.zeros_like', 'np.zeros_like', (['w'], {}), '(w)\n', (2584, 2587), True, 'import numpy as np\n'), ((5322, 5338), 'numpy.zeros', 'np.zeros', (['(b, d)'], {}), '((b, d))\n', (5330, 5338), True, 'import numpy as np\n'), ((1801, 1812), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1809, 1812), True, 'import numpy as np\n'), ((4017, 4060), 'numpy.sqrt', 'np.sqrt', (['(self.Nin + 2 * self.NC + self.Nout)'], {}), '(self.Nin + 2 * self.NC + self.Nout)\n', (4024, 4060), True, 'import numpy as np\n')] |
import util
import numpy as np
import random
import os
from os.path import join
import json
import pickle
import logging
import torch
logger = logging.getLogger(__name__)
class CorefDataProcessor:
def __init__(self, config, language='english'):
self.config = config
self.language = language
self.max_seg_len = config['max_segment_len']
self.max_training_seg = config['max_training_sentences']
self.data_dir = config['data_dir']
self.tokenizer = util.get_tokenizer(config['bert_tokenizer_name'])
self.tensor_samples, self.stored_info = None, None # For dataset samples; lazy loading
def get_tensor_examples_from_custom_input(self, samples):
""" For interactive samples; no caching """
tensorizer = Tensorizer(self.config, self.tokenizer)
tensor_samples = [tensorizer.tensorize_example(sample, False) for sample in samples]
tensor_samples = [(doc_key, self.convert_to_torch_tensor(*tensor)) for doc_key, tensor in tensor_samples]
return tensor_samples, tensorizer.stored_info
def get_tensor_examples(self):
""" For dataset samples """
cache_path = self.get_cache_path()
if os.path.exists(cache_path):
# Load cached tensors if exists
with open(cache_path, 'rb') as f:
self.tensor_samples, self.stored_info = pickle.load(f)
logger.info('Loaded tensorized examples from cache')
else:
# Generate tensorized samples
self.tensor_samples = {}
tensorizer = Tensorizer(self.config, self.tokenizer)
paths = {
'trn': join(self.data_dir, f'train.{self.language}.{self.max_seg_len}.jsonlines'),
'dev': join(self.data_dir, f'dev.{self.language}.{self.max_seg_len}.jsonlines'),
'tst': join(self.data_dir, f'test.{self.language}.{self.max_seg_len}.jsonlines')
}
for split, path in paths.items():
logger.info('Tensorizing examples from %s; results will be cached)' % path)
is_training = (split == 'trn')
with open(path, 'r') as f:
samples = [json.loads(line) for line in f.readlines()]
tensor_samples = [tensorizer.tensorize_example(sample, is_training) for sample in samples]
self.tensor_samples[split] = [(doc_key, self.convert_to_torch_tensor(*tensor)) for doc_key, tensor
in tensor_samples]
self.stored_info = tensorizer.stored_info
# Cache tensorized samples
with open(cache_path, 'wb') as f:
pickle.dump((self.tensor_samples, self.stored_info), f)
return self.tensor_samples['trn'], self.tensor_samples['dev'], self.tensor_samples['tst']
def get_stored_info(self):
return self.stored_info
@classmethod
def convert_to_torch_tensor(cls, input_ids, input_mask, speaker_ids, sentence_len, genre, sentence_map,
is_training, gold_starts, gold_ends, gold_mention_cluster_map):
input_ids = torch.tensor(input_ids, dtype=torch.long)
input_mask = torch.tensor(input_mask, dtype=torch.long)
speaker_ids = torch.tensor(speaker_ids, dtype=torch.long)
sentence_len = torch.tensor(sentence_len, dtype=torch.long)
genre = torch.tensor(genre, dtype=torch.long)
sentence_map = torch.tensor(sentence_map, dtype=torch.long)
is_training = torch.tensor(is_training, dtype=torch.bool)
gold_starts = torch.tensor(gold_starts, dtype=torch.long)
gold_ends = torch.tensor(gold_ends, dtype=torch.long)
gold_mention_cluster_map = torch.tensor(gold_mention_cluster_map, dtype=torch.long)
return input_ids, input_mask, speaker_ids, sentence_len, genre, sentence_map, \
is_training, gold_starts, gold_ends, gold_mention_cluster_map,
def get_cache_path(self):
cache_path = join(self.data_dir, f'cached.tensors.{self.language}.{self.max_seg_len}.{self.max_training_seg}.bin')
return cache_path
class Tensorizer:
def __init__(self, config, tokenizer):
self.config = config
self.tokenizer = tokenizer
# Will be used in evaluation
self.stored_info = {}
self.stored_info['tokens'] = {} # {doc_key: ...}
self.stored_info['subtoken_maps'] = {} # {doc_key: ...}; mapping back to tokens
self.stored_info['gold'] = {} # {doc_key: ...}
self.stored_info['genre_dict'] = {genre: idx for idx, genre in enumerate(config['genres'])}
def _tensorize_spans(self, spans):
if len(spans) > 0:
starts, ends = zip(*spans)
else:
starts, ends = [], []
return np.array(starts), np.array(ends)
def _tensorize_span_w_labels(self, spans, label_dict):
if len(spans) > 0:
starts, ends, labels = zip(*spans)
else:
starts, ends, labels = [], [], []
return np.array(starts), np.array(ends), np.array([label_dict[label] for label in labels])
def _get_speaker_dict(self, speakers):
speaker_dict = {'UNK': 0, '[SPL]': 1}
for speaker in speakers:
if len(speaker_dict) > self.config['max_num_speakers']:
pass # 'break' to limit # speakers
if speaker not in speaker_dict:
speaker_dict[speaker] = len(speaker_dict)
return speaker_dict
def tensorize_example(self, example, is_training):
# Mentions and clusters
clusters = example['clusters']
gold_mentions = sorted(tuple(mention) for mention in util.flatten(clusters))
gold_mention_map = {mention: idx for idx, mention in enumerate(gold_mentions)}
gold_mention_cluster_map = np.zeros(len(gold_mentions)) # 0: no cluster
for cluster_id, cluster in enumerate(clusters):
for mention in cluster:
gold_mention_cluster_map[gold_mention_map[tuple(mention)]] = cluster_id + 1
# Speakers
speakers = example['speakers']
speaker_dict = self._get_speaker_dict(util.flatten(speakers))
# Sentences/segments
sentences = example['sentences'] # Segments
sentence_map = example['sentence_map']
num_words = sum([len(s) for s in sentences])
max_sentence_len = self.config['max_segment_len']
sentence_len = np.array([len(s) for s in sentences])
# Bert input
input_ids, input_mask, speaker_ids = [], [], []
for idx, (sent_tokens, sent_speakers) in enumerate(zip(sentences, speakers)):
sent_input_ids = self.tokenizer.convert_tokens_to_ids(sent_tokens)
sent_input_mask = [1] * len(sent_input_ids)
sent_speaker_ids = [speaker_dict[speaker] for speaker in sent_speakers]
while len(sent_input_ids) < max_sentence_len:
sent_input_ids.append(0)
sent_input_mask.append(0)
sent_speaker_ids.append(0)
input_ids.append(sent_input_ids)
input_mask.append(sent_input_mask)
speaker_ids.append(sent_speaker_ids)
input_ids = np.array(input_ids)
input_mask = np.array(input_mask)
speaker_ids = np.array(speaker_ids)
assert num_words == np.sum(input_mask), (num_words, np.sum(input_mask))
# Keep info to store
doc_key = example['doc_key']
self.stored_info['subtoken_maps'][doc_key] = example.get('subtoken_map', None)
self.stored_info['gold'][doc_key] = example['clusters']
# self.stored_info['tokens'][doc_key] = example['tokens']
# Construct example
genre = self.stored_info['genre_dict'].get(doc_key[:2], 0)
gold_starts, gold_ends = self._tensorize_spans(gold_mentions)
example_tensor = (input_ids, input_mask, speaker_ids, sentence_len, genre, sentence_map, is_training,
gold_starts, gold_ends, gold_mention_cluster_map)
if is_training and len(sentences) > self.config['max_training_sentences']:
return doc_key, self.truncate_example(*example_tensor)
else:
return doc_key, example_tensor
def truncate_example(self, input_ids, input_mask, speaker_ids, sentence_len, genre, sentence_map, is_training,
gold_starts, gold_ends, gold_mention_cluster_map, sentence_offset=None):
max_sentences = self.config["max_training_sentences"]
num_sentences = input_ids.shape[0]
assert num_sentences > max_sentences
sent_offset = sentence_offset
if sent_offset is None:
sent_offset = random.randint(0, num_sentences - max_sentences)
word_offset = sentence_len[:sent_offset].sum()
num_words = sentence_len[sent_offset: sent_offset + max_sentences].sum()
input_ids = input_ids[sent_offset: sent_offset + max_sentences, :]
input_mask = input_mask[sent_offset: sent_offset + max_sentences, :]
speaker_ids = speaker_ids[sent_offset: sent_offset + max_sentences, :]
sentence_len = sentence_len[sent_offset: sent_offset + max_sentences]
sentence_map = sentence_map[word_offset: word_offset + num_words]
gold_spans = (gold_starts < word_offset + num_words) & (gold_ends >= word_offset)
gold_starts = gold_starts[gold_spans] - word_offset
gold_ends = gold_ends[gold_spans] - word_offset
gold_mention_cluster_map = gold_mention_cluster_map[gold_spans]
return input_ids, input_mask, speaker_ids, sentence_len, genre, sentence_map, \
is_training, gold_starts, gold_ends, gold_mention_cluster_map
| [
"pickle.dump",
"numpy.sum",
"random.randint",
"json.loads",
"os.path.exists",
"util.get_tokenizer",
"logging.getLogger",
"util.flatten",
"pickle.load",
"numpy.array",
"os.path.join",
"torch.tensor"
] | [((144, 171), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (161, 171), False, 'import logging\n'), ((502, 551), 'util.get_tokenizer', 'util.get_tokenizer', (["config['bert_tokenizer_name']"], {}), "(config['bert_tokenizer_name'])\n", (520, 551), False, 'import util\n'), ((1211, 1237), 'os.path.exists', 'os.path.exists', (['cache_path'], {}), '(cache_path)\n', (1225, 1237), False, 'import os\n'), ((3161, 3202), 'torch.tensor', 'torch.tensor', (['input_ids'], {'dtype': 'torch.long'}), '(input_ids, dtype=torch.long)\n', (3173, 3202), False, 'import torch\n'), ((3224, 3266), 'torch.tensor', 'torch.tensor', (['input_mask'], {'dtype': 'torch.long'}), '(input_mask, dtype=torch.long)\n', (3236, 3266), False, 'import torch\n'), ((3289, 3332), 'torch.tensor', 'torch.tensor', (['speaker_ids'], {'dtype': 'torch.long'}), '(speaker_ids, dtype=torch.long)\n', (3301, 3332), False, 'import torch\n'), ((3356, 3400), 'torch.tensor', 'torch.tensor', (['sentence_len'], {'dtype': 'torch.long'}), '(sentence_len, dtype=torch.long)\n', (3368, 3400), False, 'import torch\n'), ((3417, 3454), 'torch.tensor', 'torch.tensor', (['genre'], {'dtype': 'torch.long'}), '(genre, dtype=torch.long)\n', (3429, 3454), False, 'import torch\n'), ((3478, 3522), 'torch.tensor', 'torch.tensor', (['sentence_map'], {'dtype': 'torch.long'}), '(sentence_map, dtype=torch.long)\n', (3490, 3522), False, 'import torch\n'), ((3545, 3588), 'torch.tensor', 'torch.tensor', (['is_training'], {'dtype': 'torch.bool'}), '(is_training, dtype=torch.bool)\n', (3557, 3588), False, 'import torch\n'), ((3611, 3654), 'torch.tensor', 'torch.tensor', (['gold_starts'], {'dtype': 'torch.long'}), '(gold_starts, dtype=torch.long)\n', (3623, 3654), False, 'import torch\n'), ((3675, 3716), 'torch.tensor', 'torch.tensor', (['gold_ends'], {'dtype': 'torch.long'}), '(gold_ends, dtype=torch.long)\n', (3687, 3716), False, 'import torch\n'), ((3752, 3808), 'torch.tensor', 'torch.tensor', (['gold_mention_cluster_map'], {'dtype': 'torch.long'}), '(gold_mention_cluster_map, dtype=torch.long)\n', (3764, 3808), False, 'import torch\n'), ((4027, 4137), 'os.path.join', 'join', (['self.data_dir', 'f"""cached.tensors.{self.language}.{self.max_seg_len}.{self.max_training_seg}.bin"""'], {}), "(self.data_dir,\n f'cached.tensors.{self.language}.{self.max_seg_len}.{self.max_training_seg}.bin'\n )\n", (4031, 4137), False, 'from os.path import join\n'), ((7244, 7263), 'numpy.array', 'np.array', (['input_ids'], {}), '(input_ids)\n', (7252, 7263), True, 'import numpy as np\n'), ((7285, 7305), 'numpy.array', 'np.array', (['input_mask'], {}), '(input_mask)\n', (7293, 7305), True, 'import numpy as np\n'), ((7328, 7349), 'numpy.array', 'np.array', (['speaker_ids'], {}), '(speaker_ids)\n', (7336, 7349), True, 'import numpy as np\n'), ((4822, 4838), 'numpy.array', 'np.array', (['starts'], {}), '(starts)\n', (4830, 4838), True, 'import numpy as np\n'), ((4840, 4854), 'numpy.array', 'np.array', (['ends'], {}), '(ends)\n', (4848, 4854), True, 'import numpy as np\n'), ((5064, 5080), 'numpy.array', 'np.array', (['starts'], {}), '(starts)\n', (5072, 5080), True, 'import numpy as np\n'), ((5082, 5096), 'numpy.array', 'np.array', (['ends'], {}), '(ends)\n', (5090, 5096), True, 'import numpy as np\n'), ((5098, 5147), 'numpy.array', 'np.array', (['[label_dict[label] for label in labels]'], {}), '([label_dict[label] for label in labels])\n', (5106, 5147), True, 'import numpy as np\n'), ((6190, 6212), 'util.flatten', 'util.flatten', (['speakers'], {}), '(speakers)\n', (6202, 6212), False, 'import util\n'), ((7378, 7396), 'numpy.sum', 'np.sum', (['input_mask'], {}), '(input_mask)\n', (7384, 7396), True, 'import numpy as np\n'), ((7410, 7428), 'numpy.sum', 'np.sum', (['input_mask'], {}), '(input_mask)\n', (7416, 7428), True, 'import numpy as np\n'), ((8735, 8783), 'random.randint', 'random.randint', (['(0)', '(num_sentences - max_sentences)'], {}), '(0, num_sentences - max_sentences)\n', (8749, 8783), False, 'import random\n'), ((1385, 1399), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1396, 1399), False, 'import pickle\n'), ((1672, 1746), 'os.path.join', 'join', (['self.data_dir', 'f"""train.{self.language}.{self.max_seg_len}.jsonlines"""'], {}), "(self.data_dir, f'train.{self.language}.{self.max_seg_len}.jsonlines')\n", (1676, 1746), False, 'from os.path import join\n'), ((1771, 1843), 'os.path.join', 'join', (['self.data_dir', 'f"""dev.{self.language}.{self.max_seg_len}.jsonlines"""'], {}), "(self.data_dir, f'dev.{self.language}.{self.max_seg_len}.jsonlines')\n", (1775, 1843), False, 'from os.path import join\n'), ((1868, 1941), 'os.path.join', 'join', (['self.data_dir', 'f"""test.{self.language}.{self.max_seg_len}.jsonlines"""'], {}), "(self.data_dir, f'test.{self.language}.{self.max_seg_len}.jsonlines')\n", (1872, 1941), False, 'from os.path import join\n'), ((2701, 2756), 'pickle.dump', 'pickle.dump', (['(self.tensor_samples, self.stored_info)', 'f'], {}), '((self.tensor_samples, self.stored_info), f)\n', (2712, 2756), False, 'import pickle\n'), ((5709, 5731), 'util.flatten', 'util.flatten', (['clusters'], {}), '(clusters)\n', (5721, 5731), False, 'import util\n'), ((2215, 2231), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2225, 2231), False, 'import json\n')] |
import numpy as np
import numba
@numba.jit
def f_numba(x):
return x ** 2 - x
@numba.jit
def integrate_f_numba(a, b, N):
s = 0
dx = (b - a) / N
for i in range(N):
s += f_numba(a + i * dx)
return s * dx
@numba.jit
def apply_integrate_f_numba(col_a, col_b, col_N):
n = len(col_N)
res = np.empty(n,dtype=np.float64)
for i in range(n):
res[i] = integrate_f_numba(col_a[i], col_b[i], col_N[i])
return res
| [
"numpy.empty"
] | [((322, 351), 'numpy.empty', 'np.empty', (['n'], {'dtype': 'np.float64'}), '(n, dtype=np.float64)\n', (330, 351), True, 'import numpy as np\n')] |
import os
import glob
import torch
import pickle
import numpy as np
from utils.spectral_graph_partition import *
__all__ = ['GraphData']
class GraphData(object):
def __init__(self, config, split='train'):
assert split == 'train' or split == 'dev' or split == 'test', "no such split"
self.split = split
self.config = config
self.seed = config.seed
self.npr = np.random.RandomState(self.seed)
self.data_path = config.dataset.data_path
self.num_edgetype = config.dataset.num_edge_type
self.model_name = config.model.name
self.use_eigs = True if hasattr(config.model, 'num_eig_vec') else False
if self.use_eigs:
self.num_eigs = config.model.num_eig_vec
if self.model_name == 'GraphSAGE':
self.num_sample_neighbors = config.model.num_sample_neighbors
self.train_data_files = glob.glob(
os.path.join(self.data_path, 'synthetic_train_*.p'))
self.dev_data_files = glob.glob(
os.path.join(self.data_path, 'synthetic_dev_*.p'))
self.test_data_files = glob.glob(
os.path.join(self.data_path, 'synthetic_test_*.p'))
self.num_train = len(self.train_data_files)
self.num_dev = len(self.dev_data_files)
self.num_test = len(self.test_data_files)
self.num_graphs = self.num_train + self.num_dev + self.num_test
def __getitem__(self, index):
if self.split == 'train':
return pickle.load(open(self.train_data_files[index], 'rb'))
elif self.split == 'dev':
return pickle.load(open(self.dev_data_files[index], 'rb'))
else:
return pickle.load(open(self.test_data_files[index], 'rb'))
def __len__(self):
if self.split == 'train':
return self.num_train
elif self.split == 'dev':
return self.num_dev
else:
return self.num_test
def collate_fn(self, batch):
"""
Collate function for mini-batch
N.B.: we pad all samples to the maximum of the mini-batch
"""
assert isinstance(batch, list)
data = {}
batch_size = len(batch)
node_size = [bb['node_feat'].shape[0] for bb in batch]
batch_node_size = max(node_size) # value -> N
pad_node_size = [batch_node_size - nn for nn in node_size]
# pad feature: shape (B, N, D)
data['node_feat'] = torch.stack([
torch.from_numpy(
np.pad(
bb['node_feat'], ((0, pad_node_size[ii]), (0, 0)),
'constant',
constant_values=0.0)) for ii, bb in enumerate(batch)
]).float()
# binary mask: shape (B, N)
data['node_mask'] = torch.stack([
torch.from_numpy(
np.pad(
np.ones(node_size[ii]), (0, pad_node_size[ii]),
'constant',
constant_values=0.0)) for ii, bb in enumerate(batch)
]).byte()
# label: shape (B, O)
data['label'] = torch.cat(
[torch.from_numpy(bb['label']) for bb in batch], dim=0).float()
if self.model_name == 'GPNN':
#########################################################################
# GPNN
# N.B.: one can perform graph partition offline to speed up
#########################################################################
# graph Laplacian of multi-graph: shape (B, N, N, E)
L_multi = np.stack(
[
np.pad(
bb['L_multi'], ((0, pad_node_size[ii]),
(0, pad_node_size[ii]), (0, 0)),
'constant',
constant_values=0.0) for ii, bb in enumerate(batch)
],
axis=0)
# graph Laplacian of simple graph: shape (B, N, N, 1)
L_simple = np.stack(
[
np.expand_dims(
np.pad(
bb['L_simple_4'], (0, pad_node_size[ii]),
'constant',
constant_values=0.0),
axis=3) for ii, bb in enumerate(batch)
],
axis=0)
L = np.concatenate([L_simple, L_multi], axis=3)
data['L'] = torch.from_numpy(L).float()
# graph partition
L_cluster, L_cut = [], []
for ii in range(batch_size):
node_label = spectral_clustering(L_simple[ii, :, :, 0], self.config.model.num_partition)
# Laplacian of clusters and cut
L_cluster_tmp, L_cut_tmp = get_L_cluster_cut(L_simple[ii, :, :, 0], node_label)
L_cluster += [L_cluster_tmp]
L_cut += [L_cut_tmp]
data['L_cluster'] = torch.from_numpy(np.stack(L_cluster, axis=0)).float()
data['L_cut'] = torch.from_numpy(np.stack(L_cut, axis=0)).float()
elif self.model_name == 'GraphSAGE':
#########################################################################
# GraphSAGE
#########################################################################
# N.B.: adjacency mat of GraphSAGE is asymmetric
nonempty_mask = np.zeros((batch_size, batch_node_size, 1))
nn_idx = np.zeros((batch_size, batch_node_size, self.num_sample_neighbors,
self.num_edgetype + 1))
for ii in range(batch_size):
for jj in range(self.num_edgetype + 1):
if jj == 0:
tmp_L = batch[ii]['L_simple_4']
else:
tmp_L = batch[ii]['L_multi'][:, :, jj - 1]
for nn in range(tmp_L.shape[0]):
nn_list = np.nonzero(tmp_L[nn, :])[0]
if len(nn_list) >= self.num_sample_neighbors:
nn_idx[ii, nn, :, jj] = self.npr.choice(
nn_list, size=self.num_sample_neighbors, replace=False)
nonempty_mask[ii, nn] = 1
elif len(nn_list) > 0:
nn_idx[ii, nn, :, jj] = self.npr.choice(
nn_list, size=self.num_sample_neighbors, replace=True)
nonempty_mask[ii, nn] = 1
data['nn_idx'] = torch.from_numpy(nn_idx).long()
data['nonempty_mask'] = torch.from_numpy(nonempty_mask).float()
elif self.model_name == 'GAT':
#########################################################################
# GAT
#########################################################################
# graph Laplacian of multi-graph: shape (B, N, N, E)
L_multi = np.stack(
[
np.pad(
bb['L_multi'], ((0, pad_node_size[ii]),
(0, pad_node_size[ii]), (0, 0)),
'constant',
constant_values=0.0) for ii, bb in enumerate(batch)
],
axis=0)
# graph Laplacian of simple graph: shape (B, N, N, 1)
L_simple = np.stack(
[
np.expand_dims(
np.pad(
bb['L_simple_4'], (0, pad_node_size[ii]),
'constant',
constant_values=0.0),
axis=3) for ii, bb in enumerate(batch)
],
axis=0)
L = np.concatenate([L_simple, L_multi], axis=3)
# trick of graph attention networks
def adj_to_bias(adj, sizes, nhood=1):
nb_graphs = adj.shape[0]
mt = np.empty(adj.shape)
for g in range(nb_graphs):
mt[g] = np.eye(adj.shape[1])
for _ in range(nhood):
mt[g] = np.matmul(mt[g], (adj[g] + np.eye(adj.shape[1])))
for i in range(sizes[g]):
for j in range(sizes[g]):
if mt[g][i][j] > 0.0:
mt[g][i][j] = 1.0
return -1e9 * (1.0 - mt)
L_new = []
for ii in range(batch_size):
L_new += [
np.transpose(
adj_to_bias(
np.transpose(L[ii, :, :, :], (2, 0, 1)),
[batch_node_size] * L.shape[3]), (1, 2, 0))
]
data['L'] = torch.from_numpy(np.stack(L_new, axis=0)).float()
else:
#########################################################################
# All other models
#########################################################################
# graph Laplacian of multi-graph: shape (B, N, N, E)
L_multi = torch.stack([
torch.from_numpy(
np.pad(
bb['L_multi'], ((0, pad_node_size[ii]),
(0, pad_node_size[ii]), (0, 0)),
'constant',
constant_values=0.0)) for ii, bb in enumerate(batch)
]).float()
# graph Laplacian of simple graph: shape (B, N, N, 1)
L_simple_key = 'L_simple_4'
if self.model_name == 'DCNN':
L_simple_key = 'L_simple_7'
elif self.model_name in ['ChebyNet']:
L_simple_key = 'L_simple_6'
if self.model_name == 'ChebyNet':
L_simple = torch.stack([
torch.from_numpy(
np.expand_dims(
np.pad(
-bb[L_simple_key], (0, pad_node_size[ii]),
'constant',
constant_values=0.0),
axis=3)) for ii, bb in enumerate(batch)
]).float()
else:
L_simple = torch.stack([
torch.from_numpy(
np.expand_dims(
np.pad(
bb[L_simple_key], (0, pad_node_size[ii]),
'constant',
constant_values=0.0),
axis=3)) for ii, bb in enumerate(batch)
]).float()
data['L'] = torch.cat([L_simple, L_multi], dim=3)
# eigenvalues & eigenvectors of simple graph
if self.use_eigs:
eigs, eig_vecs = [], []
for ii, bb in enumerate(batch):
pad_eigs_len = self.num_eigs - len(bb['D_simple'])
eigs += [
bb['D_simple'][:self.num_eigs] if pad_eigs_len <= 0 else np.pad(
bb['D_simple'], (0, pad_eigs_len),
'constant',
constant_values=0.0)
]
# pad eigenvectors
pad_eig_vec = np.pad(
bb['V_simple'], ((0, pad_node_size[ii]), (0, 0)),
'constant',
constant_values=0.0)
eig_vecs += [
pad_eig_vec[:, :self.num_eigs] if pad_eigs_len <= 0 else np.pad(
pad_eig_vec, ((0, 0), (0, pad_eigs_len)),
'constant',
constant_values=0.0)
]
data['D'] = torch.stack([torch.from_numpy(ee) for ee in eigs]).float()
data['V'] = torch.stack(
[torch.from_numpy(vv) for vv in eig_vecs]).float()
return data
| [
"numpy.pad",
"numpy.stack",
"numpy.empty",
"numpy.zeros",
"torch.cat",
"numpy.random.RandomState",
"numpy.ones",
"numpy.nonzero",
"numpy.transpose",
"numpy.eye",
"os.path.join",
"numpy.concatenate",
"torch.from_numpy"
] | [((384, 416), 'numpy.random.RandomState', 'np.random.RandomState', (['self.seed'], {}), '(self.seed)\n', (405, 416), True, 'import numpy as np\n'), ((857, 908), 'os.path.join', 'os.path.join', (['self.data_path', '"""synthetic_train_*.p"""'], {}), "(self.data_path, 'synthetic_train_*.p')\n", (869, 908), False, 'import os\n'), ((955, 1004), 'os.path.join', 'os.path.join', (['self.data_path', '"""synthetic_dev_*.p"""'], {}), "(self.data_path, 'synthetic_dev_*.p')\n", (967, 1004), False, 'import os\n'), ((1052, 1102), 'os.path.join', 'os.path.join', (['self.data_path', '"""synthetic_test_*.p"""'], {}), "(self.data_path, 'synthetic_test_*.p')\n", (1064, 1102), False, 'import os\n'), ((3967, 4010), 'numpy.concatenate', 'np.concatenate', (['[L_simple, L_multi]'], {'axis': '(3)'}), '([L_simple, L_multi], axis=3)\n', (3981, 4010), True, 'import numpy as np\n'), ((4900, 4942), 'numpy.zeros', 'np.zeros', (['(batch_size, batch_node_size, 1)'], {}), '((batch_size, batch_node_size, 1))\n', (4908, 4942), True, 'import numpy as np\n'), ((4958, 5052), 'numpy.zeros', 'np.zeros', (['(batch_size, batch_node_size, self.num_sample_neighbors, self.num_edgetype + 1)'], {}), '((batch_size, batch_node_size, self.num_sample_neighbors, self.\n num_edgetype + 1))\n', (4966, 5052), True, 'import numpy as np\n'), ((3306, 3423), 'numpy.pad', 'np.pad', (["bb['L_multi']", '((0, pad_node_size[ii]), (0, pad_node_size[ii]), (0, 0))', '"""constant"""'], {'constant_values': '(0.0)'}), "(bb['L_multi'], ((0, pad_node_size[ii]), (0, pad_node_size[ii]), (0, \n 0)), 'constant', constant_values=0.0)\n", (3312, 3423), True, 'import numpy as np\n'), ((4029, 4048), 'torch.from_numpy', 'torch.from_numpy', (['L'], {}), '(L)\n', (4045, 4048), False, 'import torch\n'), ((6924, 6967), 'numpy.concatenate', 'np.concatenate', (['[L_simple, L_multi]'], {'axis': '(3)'}), '([L_simple, L_multi], axis=3)\n', (6938, 6967), True, 'import numpy as np\n'), ((9409, 9446), 'torch.cat', 'torch.cat', (['[L_simple, L_multi]'], {'dim': '(3)'}), '([L_simple, L_multi], dim=3)\n', (9418, 9446), False, 'import torch\n'), ((2852, 2881), 'torch.from_numpy', 'torch.from_numpy', (["bb['label']"], {}), "(bb['label'])\n", (2868, 2881), False, 'import torch\n'), ((3718, 3803), 'numpy.pad', 'np.pad', (["bb['L_simple_4']", '(0, pad_node_size[ii])', '"""constant"""'], {'constant_values': '(0.0)'}), "(bb['L_simple_4'], (0, pad_node_size[ii]), 'constant',\n constant_values=0.0)\n", (3724, 3803), True, 'import numpy as np\n'), ((4495, 4522), 'numpy.stack', 'np.stack', (['L_cluster'], {'axis': '(0)'}), '(L_cluster, axis=0)\n', (4503, 4522), True, 'import numpy as np\n'), ((4571, 4594), 'numpy.stack', 'np.stack', (['L_cut'], {'axis': '(0)'}), '(L_cut, axis=0)\n', (4579, 4594), True, 'import numpy as np\n'), ((5843, 5867), 'torch.from_numpy', 'torch.from_numpy', (['nn_idx'], {}), '(nn_idx)\n', (5859, 5867), False, 'import torch\n'), ((5905, 5936), 'torch.from_numpy', 'torch.from_numpy', (['nonempty_mask'], {}), '(nonempty_mask)\n', (5921, 5936), False, 'import torch\n'), ((7101, 7120), 'numpy.empty', 'np.empty', (['adj.shape'], {}), '(adj.shape)\n', (7109, 7120), True, 'import numpy as np\n'), ((2302, 2396), 'numpy.pad', 'np.pad', (["bb['node_feat']", '((0, pad_node_size[ii]), (0, 0))', '"""constant"""'], {'constant_values': '(0.0)'}), "(bb['node_feat'], ((0, pad_node_size[ii]), (0, 0)), 'constant',\n constant_values=0.0)\n", (2308, 2396), True, 'import numpy as np\n'), ((6263, 6380), 'numpy.pad', 'np.pad', (["bb['L_multi']", '((0, pad_node_size[ii]), (0, pad_node_size[ii]), (0, 0))', '"""constant"""'], {'constant_values': '(0.0)'}), "(bb['L_multi'], ((0, pad_node_size[ii]), (0, pad_node_size[ii]), (0, \n 0)), 'constant', constant_values=0.0)\n", (6269, 6380), True, 'import numpy as np\n'), ((7174, 7194), 'numpy.eye', 'np.eye', (['adj.shape[1]'], {}), '(adj.shape[1])\n', (7180, 7194), True, 'import numpy as np\n'), ((9943, 10036), 'numpy.pad', 'np.pad', (["bb['V_simple']", '((0, pad_node_size[ii]), (0, 0))', '"""constant"""'], {'constant_values': '(0.0)'}), "(bb['V_simple'], ((0, pad_node_size[ii]), (0, 0)), 'constant',\n constant_values=0.0)\n", (9949, 10036), True, 'import numpy as np\n'), ((2626, 2648), 'numpy.ones', 'np.ones', (['node_size[ii]'], {}), '(node_size[ii])\n', (2633, 2648), True, 'import numpy as np\n'), ((5360, 5384), 'numpy.nonzero', 'np.nonzero', (['tmp_L[nn, :]'], {}), '(tmp_L[nn, :])\n', (5370, 5384), True, 'import numpy as np\n'), ((6675, 6760), 'numpy.pad', 'np.pad', (["bb['L_simple_4']", '(0, pad_node_size[ii])', '"""constant"""'], {'constant_values': '(0.0)'}), "(bb['L_simple_4'], (0, pad_node_size[ii]), 'constant',\n constant_values=0.0)\n", (6681, 6760), True, 'import numpy as np\n'), ((7773, 7796), 'numpy.stack', 'np.stack', (['L_new'], {'axis': '(0)'}), '(L_new, axis=0)\n', (7781, 7796), True, 'import numpy as np\n'), ((7622, 7661), 'numpy.transpose', 'np.transpose', (['L[ii, :, :, :]', '(2, 0, 1)'], {}), '(L[ii, :, :, :], (2, 0, 1))\n', (7634, 7661), True, 'import numpy as np\n'), ((9747, 9821), 'numpy.pad', 'np.pad', (["bb['D_simple']", '(0, pad_eigs_len)', '"""constant"""'], {'constant_values': '(0.0)'}), "(bb['D_simple'], (0, pad_eigs_len), 'constant', constant_values=0.0)\n", (9753, 9821), True, 'import numpy as np\n'), ((10172, 10257), 'numpy.pad', 'np.pad', (['pad_eig_vec', '((0, 0), (0, pad_eigs_len))', '"""constant"""'], {'constant_values': '(0.0)'}), "(pad_eig_vec, ((0, 0), (0, pad_eigs_len)), 'constant',\n constant_values=0.0)\n", (10178, 10257), True, 'import numpy as np\n'), ((7275, 7295), 'numpy.eye', 'np.eye', (['adj.shape[1]'], {}), '(adj.shape[1])\n', (7281, 7295), True, 'import numpy as np\n'), ((8138, 8255), 'numpy.pad', 'np.pad', (["bb['L_multi']", '((0, pad_node_size[ii]), (0, pad_node_size[ii]), (0, 0))', '"""constant"""'], {'constant_values': '(0.0)'}), "(bb['L_multi'], ((0, pad_node_size[ii]), (0, pad_node_size[ii]), (0, \n 0)), 'constant', constant_values=0.0)\n", (8144, 8255), True, 'import numpy as np\n'), ((10355, 10375), 'torch.from_numpy', 'torch.from_numpy', (['ee'], {}), '(ee)\n', (10371, 10375), False, 'import torch\n'), ((10447, 10467), 'torch.from_numpy', 'torch.from_numpy', (['vv'], {}), '(vv)\n', (10463, 10467), False, 'import torch\n'), ((8792, 8878), 'numpy.pad', 'np.pad', (['(-bb[L_simple_key])', '(0, pad_node_size[ii])', '"""constant"""'], {'constant_values': '(0.0)'}), "(-bb[L_simple_key], (0, pad_node_size[ii]), 'constant',\n constant_values=0.0)\n", (8798, 8878), True, 'import numpy as np\n'), ((9155, 9240), 'numpy.pad', 'np.pad', (['bb[L_simple_key]', '(0, pad_node_size[ii])', '"""constant"""'], {'constant_values': '(0.0)'}), "(bb[L_simple_key], (0, pad_node_size[ii]), 'constant',\n constant_values=0.0)\n", (9161, 9240), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Parts of octant package."""
import numpy as np
import pandas as pd
from .decor import ReprTrackSettings
from .exceptions import LoadError
from .params import HOUR, M2KM
from .utils import great_circle, total_dist
__all__ = ("OctantTrack", "TrackSettings")
class _OctantSeries(pd.Series):
"""`pandas.Series` subclass used in octant library."""
@property
def _constructor(self):
return _OctantSeries
class OctantTrack(pd.DataFrame):
"""
Instance of cyclone track.
DataFrame with a bunch of extra methods and properties.
"""
def __init__(self, *args, **kw):
"""Initialise octant.core.OctantTrack."""
super(OctantTrack, self).__init__(*args, **kw)
@property
def _constructor(self):
return OctantTrack # replace with self.__class__?
_constructor_sliced = _OctantSeries
@property
def gb(self):
"""Shortcut to group by track_idx index."""
return self.groupby("track_idx")
@classmethod
def from_df(cls, df):
"""Create OctantTrack from pandas.DataFrame."""
return cls.from_records(df.to_records(index=False))
@classmethod
def from_mux_df(cls, df):
"""Create OctantTrack from a multi-index pandas.DataFrame."""
if df.shape[0] > 0:
return cls.from_records(df.to_records(index=True), index=df.index.names)
else:
return cls(columns=df.columns, index=df.index)
@property
def coord_view(self):
"""Numpy view of track coordinates: longitude, latitude, time."""
return (
self.lon.values.view("double"),
self.lat.values.view("double"),
self.time.values.view("int64"),
)
@property
def lonlat(self):
"""Values of longitude and latitude as 2D numpy array."""
return self[["lon", "lat"]].values
@property
def lonlat_c(self):
"""Values of longitude and latitude as C-ordered 2D numpy array."""
return self.lonlat.astype("double", order="C")
@property
def tridlonlat(self):
"""Values of track index, longitude, latitude as 2D numpy array."""
return self.reset_index("track_idx")[["track_idx", "lon", "lat"]].values
@property
def tridlonlat_c(self):
"""Values of track index, longitude, latitude as C-order 2D array."""
return self.tridlonlat.astype("double", order="C")
@property
def lifetime_h(self):
"""Track duration in hours."""
if self.shape[0] > 0:
return (self.time.values[-1] - self.time.values[0]) / HOUR
else:
return 0
@property
def gen_lys_dist_km(self):
"""Distance between genesis and lysis of the cyclone track in km."""
# TODO: include planet radius
if self.shape[0] > 0:
return (
great_circle(
self.lonlat[0, 0], self.lonlat[-1, 0], self.lonlat[0, 1], self.lonlat[-1, 1]
)
* M2KM
)
else:
return 0
@property
def total_dist_km(self):
"""Total track distance in km."""
return total_dist(self.lonlat_c) * M2KM
@property
def average_speed(self):
"""Average cyclone propagation speed in km per hour."""
if self.lifetime_h == 0:
return np.nan
else:
return self.total_dist_km / self.lifetime_h
@property
def max_vort(self):
"""Maximum vorticity of the cyclone track."""
return np.nanmax(self.vo.values)
def within_rectangle(self, lon0, lon1, lat0, lat1, time_frac=1):
"""
Check that OctantTrack is within a rectangle for a fraction of its lifetime.
Parameters
----------
lon0, lon1, lat0, lat1: float
Boundaries of longitude-latitude rectangle (lon_min, lon_max, lat_min, lat_max)
time_frac: float, optional
Time fraction threshold.
By default, set to maximum, i.e. track should be within the box entirely.
Returns
-------
bool
Examples
--------
Test that cyclone spends no more than a third of its life time outside the box
>>> bbox = [-10, 25, 68, 78]
>>> ot.within_rectangle(*bbox, time_frac=0.67)
True
See Also
--------
octant.misc.check_far_from_boundaries
"""
_within = self[
(self.lon >= lon0) & (self.lon <= lon1) & (self.lat >= lat0) & (self.lat <= lat1)
]
if self.lifetime_h == 0:
return _within.shape[0] == 1
else:
return _within.lifetime_h / self.lifetime_h >= time_frac
def plot_track(self, ax=None, **kwargs):
"""
Plot cyclone track using as plot function from plotting submodule.
Filled circle shows the beginning, empty circle - the end of the track.
Parameters
----------
ax: matplotlib axes object, optional
Axes in which to plot the track
If not given, a new figure with cartopy geoaxes is created
transform: matplotlib transform, optional
Default: cartopy.crs.PlateCarree()
kwargs: dict, optional
Options to pass to matplotlib plot() function
Returns
-------
ax: matplotlib axes object
The same ax as the input ax (if given), or a newly created axes
"""
from .plotting import plot
return plot(self, ax=ax, **kwargs)
class TrackSettings:
"""
Dictionary-like container of tracking settings.
TrackSettings object is constructed by reading `.conf` file used by
the tracking algorithm.
Note: the `.conf` file can only have lines with key-value pairs, e.g.
`lon1=20` or comment lines starting with #
"""
def __init__(self, fname_path=None):
"""
Initialise TrackSettings.
Parameters
----------
fname_path: pathlib.Path, optional
Path to `.conf` file with settings
(usually is in the same folder as the tracking output)
"""
self._fields = []
if fname_path is not None:
try:
with fname_path.open("r") as f:
conf_list = [
line
for line in f.read().split("\n")
if not line.startswith("#") and len(line) > 0
]
for line in conf_list:
if not line.startswith("#"):
k, v = line.split("=")
self._fields.append(k)
try:
self.__dict__.update({k: int(v)})
except ValueError:
try:
self.__dict__.update({k: float(v)})
except ValueError:
v = str(v).strip('"').strip("'")
self.__dict__.update({k: v})
# try:
# exec(line, None, self.__dict__)
# except SyntaxError:
# k, v = line.split('=')
# self.__dict__.update({k: str(v)})
# self._fields.append(k)
except (FileNotFoundError, AttributeError):
raise LoadError("Check that `fname_path` is a correct Path and formatted correctly")
self._fields = tuple(self._fields)
def copy(self):
"""Create a copy of TrackSettings."""
new = self.__class__()
new.__dict__ = self.__dict__.copy()
return new
@property
def extent(self):
"""List of lon1, lon2, lat1, lat2 showing the region used for tracking."""
extent_keys = ["lon1", "lon2", "lat1", "lat2"]
extent = []
for k in extent_keys:
try:
extent.append(getattr(self, k, None))
except AttributeError:
extent.append(None)
return extent
def __len__(self): # noqa
return len(self._fields)
def __repr__(self): # noqa
rtr = ReprTrackSettings(self)
return rtr.str_repr(short=True)
def __str__(self): # noqa
rtr = ReprTrackSettings(self)
return rtr.str_repr(short=False)
def _repr_html_(self):
rtr = ReprTrackSettings(self)
return rtr.html_repr()
def to_dict(self):
"""Convert TrackSettings to a dictionary."""
return {k: self.__dict__[k] for k in self._fields}
@classmethod
def from_dict(cls, data):
"""
Construct TrackSettings from a dictionary.
Parameters
----------
data: dict
Dictionary with appropriate keys
Returns
-------
octant.parts.TrackSettings
"""
ts = cls()
ts.__dict__.update(data)
ts._fields = list(data.keys())
return ts
| [
"numpy.nanmax"
] | [((3563, 3588), 'numpy.nanmax', 'np.nanmax', (['self.vo.values'], {}), '(self.vo.values)\n', (3572, 3588), True, 'import numpy as np\n')] |
import scipy
import numpy as np
import math as mp
import warnings
def pcaOrig(X, no_dims):
# Ported from MATLAB to Python by <NAME>, 5/07/2019
# INPUT:
# X: A higher dimension array data field.
# no_dims: An integer representation of the
# number of dimensions in X.
# OUTPUT: A reduced dimensionality matrix containing valuable data.
#
if (no_dims is None):
no_dims = 2
#Normalize the matrix
X = X-np.mean(X, axis = 0)
#Compute covariance matrix,
if (np.shape(X)[1] < np.shape(X)[0]):
C = np.cov(X, rowvar= False)
else:
C = np.dot(1 / np.shape(X)[0], np.dot(X, X.T) )
#Remove all NaN or inf elements
C[C == np.inf] = 0
C[C == np.nan] = 0
#lam: Eigval, #M = Eigvec
lam, M = np.linalg.eig(C)
lam = np.sort(lam)[::-1]
idx = np.argsort(lam)[::-1]
M = M[idx]
lam = lam[idx]
if (no_dims > np.shape(X)[1]):
no_dims = np.shape(X)[1]
warnings.warn('Target dimensionality reduced to %d.' % no_dims)
if (no_dims < 1):
#TODO: Needs testing
no_dims = np.where(np.cumsum(lam / np.sum(lam)) >= no_dims)[0]
M = M[:,idx[0:no_dims]]
lam = lam[0:no_dims]
if (not (np.shape(X)[1] < np.shape(X)[0])):
#TODO: Needs testing
#FIXME: Must be adapted from MATLAB code
M = np.dot(X.T , M);
M = np.multiply(M, np.transpose(np.divide(1, np.sqrt( np.multiply(np.shape(X)[0],lam)))))
mappedX = np.dot(X, M)
mappingM = M
#Match behaviour of MATLAB.
return mappedX, mappingM
| [
"numpy.sum",
"numpy.linalg.eig",
"numpy.argsort",
"numpy.sort",
"numpy.shape",
"numpy.mean",
"numpy.dot",
"numpy.cov",
"warnings.warn"
] | [((824, 840), 'numpy.linalg.eig', 'np.linalg.eig', (['C'], {}), '(C)\n', (837, 840), True, 'import numpy as np\n'), ((1547, 1559), 'numpy.dot', 'np.dot', (['X', 'M'], {}), '(X, M)\n', (1553, 1559), True, 'import numpy as np\n'), ((485, 503), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (492, 503), True, 'import numpy as np\n'), ((597, 620), 'numpy.cov', 'np.cov', (['X'], {'rowvar': '(False)'}), '(X, rowvar=False)\n', (603, 620), True, 'import numpy as np\n'), ((856, 868), 'numpy.sort', 'np.sort', (['lam'], {}), '(lam)\n', (863, 868), True, 'import numpy as np\n'), ((885, 900), 'numpy.argsort', 'np.argsort', (['lam'], {}), '(lam)\n', (895, 900), True, 'import numpy as np\n'), ((1026, 1089), 'warnings.warn', 'warnings.warn', (["('Target dimensionality reduced to %d.' % no_dims)"], {}), "('Target dimensionality reduced to %d.' % no_dims)\n", (1039, 1089), False, 'import warnings\n'), ((1413, 1427), 'numpy.dot', 'np.dot', (['X.T', 'M'], {}), '(X.T, M)\n', (1419, 1427), True, 'import numpy as np\n'), ((551, 562), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (559, 562), True, 'import numpy as np\n'), ((568, 579), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (576, 579), True, 'import numpy as np\n'), ((671, 685), 'numpy.dot', 'np.dot', (['X', 'X.T'], {}), '(X, X.T)\n', (677, 685), True, 'import numpy as np\n'), ((968, 979), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (976, 979), True, 'import numpy as np\n'), ((1003, 1014), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (1011, 1014), True, 'import numpy as np\n'), ((1288, 1299), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (1296, 1299), True, 'import numpy as np\n'), ((1305, 1316), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (1313, 1316), True, 'import numpy as np\n'), ((655, 666), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (663, 666), True, 'import numpy as np\n'), ((1184, 1195), 'numpy.sum', 'np.sum', (['lam'], {}), '(lam)\n', (1190, 1195), True, 'import numpy as np\n'), ((1504, 1515), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (1512, 1515), True, 'import numpy as np\n')] |
"""
The TypedDict class
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
def _columndict_to_dataframe(columns, seriestypes):
import pandas as _pandas
columns_as_series = {}
for colname, lst in columns.items():
seriestype = seriestypes[colname]
if seriestype == 'float':
s = _np.array(lst, dtype='d')
elif seriestype == 'int':
s = _np.array(lst, dtype=int) # or pd.Series w/dtype?
elif seriestype == 'category':
if len(lst) > 0 and isinstance(lst[0], tuple):
# special case when the values for a category are tuples. Often they're different lengths
# (e.g. qubit labels) and we want the Categorical to use an object-type numpy array to
# avoid any "ragged nested sequences" warnings, so do this:
lst = _pandas.Series(lst, dtype=object)
s = _pandas.Categorical(lst)
elif seriestype == 'object':
s = _pandas.Series(lst, dtype=object)
else:
s = lst # will infer an object array?
columns_as_series[colname] = s
return _pandas.DataFrame(columns_as_series)
class TypedDict(dict):
"""
A dictionary that holds per-key type information.
This type of `dict` is used for the "leaves" in a tree
of nested :class:`NamedDict` objects, specifying a collection
of data of different types pertaining to some set of category
labels (the index-path of the named dictionaries).
When converted to a data frame, each key specifies a
*different* column and values contribute the values of
a single data frame row. Columns will be series of the
held data types.
Parameters
----------
types : dict, optional
Keys are the keys that can appear in this dictionary, and
values are valid data frame type strings, e.g. `"int"`, `"float"`,
or `"category"`, that specify the type of each value.
items : dict or list
Initial data, used for serialization.
"""
def __init__(self, types=None, items=()):
super().__init__(items)
self._types = types if (types is not None) else {}
def __reduce__(self):
return (TypedDict, (self._types, list(self.items())), None)
def as_dataframe(self):
"""
Render this dict as a pandas data frame.
Returns
-------
pandas.DataFrame
"""
columns = {}; seriestypes = {}
self._add_to_columns(columns, seriestypes, {})
return _columndict_to_dataframe(columns, seriestypes)
def _add_to_columns(self, columns, seriestypes, row_prefix):
ncols = len(next(iter(columns.values()))) if len(columns) > 0 else 0
for nm, v in self.items():
typ = self._types.get(nm, None)
if nm not in columns: # then add a column
columns[nm] = [None] * ncols
seriestypes[nm] = typ
elif seriestypes[nm] != typ:
seriestypes[nm] = None # conflicting types, so set to None
assert(nm not in row_prefix), \
("Column %s is assigned at multiple dict-levels (latter levels will "
"overwrite the values of earlier levels)! keys-so-far=%s") % (nm, tuple(row_prefix.keys()))
#Add row
row = row_prefix.copy()
row.update(self)
for rk, rv in row.items():
columns[rk].append(rv)
absent_colvals = set(columns.keys()) - set(row.keys())
for rk in absent_colvals: # Ensure all columns stay the same length
columns[rk].append(None)
seriestypes[rk] = None # can't store Nones in special types
| [
"pandas.DataFrame",
"numpy.array",
"pandas.Series",
"pandas.Categorical"
] | [((1786, 1822), 'pandas.DataFrame', '_pandas.DataFrame', (['columns_as_series'], {}), '(columns_as_series)\n', (1803, 1822), True, 'import pandas as _pandas\n'), ((975, 1000), 'numpy.array', '_np.array', (['lst'], {'dtype': '"""d"""'}), "(lst, dtype='d')\n", (984, 1000), True, 'import numpy as _np\n'), ((1051, 1076), 'numpy.array', '_np.array', (['lst'], {'dtype': 'int'}), '(lst, dtype=int)\n', (1060, 1076), True, 'import numpy as _np\n'), ((1558, 1582), 'pandas.Categorical', '_pandas.Categorical', (['lst'], {}), '(lst)\n', (1577, 1582), True, 'import pandas as _pandas\n'), ((1508, 1541), 'pandas.Series', '_pandas.Series', (['lst'], {'dtype': 'object'}), '(lst, dtype=object)\n', (1522, 1541), True, 'import pandas as _pandas\n'), ((1636, 1669), 'pandas.Series', '_pandas.Series', (['lst'], {'dtype': 'object'}), '(lst, dtype=object)\n', (1650, 1669), True, 'import pandas as _pandas\n')] |
# text_blob_classifier.py
import math
import numpy as np
import requests
import cv2
from scipy.spatial import ConvexHull
from collections import namedtuple
TextBoxParams = namedtuple('TextBoxParams', 'text_size text_angle center left_side_middle')
def check_same_blob(box1_params, box2_params):
if not (box1_params.text_size <= box2_params.text_size * 1.5 and
box1_params.text_size * 1.5 >= box2_params.text_size):
return False
ang_diff = min(abs(box1_params.text_angle - box2_params.text_angle),
abs(2 * math.pi - abs(box1_params.text_angle - box2_params.text_angle)))
if not (ang_diff <= math.radians(10)):
return False
dist_side = np.linalg.norm(box1_params.left_side_middle - box2_params.left_side_middle)
dist_cent = np.linalg.norm(box1_params.center - box2_params.center)
avg_text_size = (box1_params.text_size + box2_params.text_size) // 2
if not ((avg_text_size * 2 >= dist_side) or
(avg_text_size * 2 >= dist_cent)):
return False
return True
def update_bbox(bbox, text_box):
bbox.extend([np.array([text_box[1][0], text_box[1][1]]),
np.array([text_box[1][2], text_box[1][3]]),
np.array([text_box[1][4], text_box[1][5]]),
np.array([text_box[1][6], text_box[1][7]])])
return bbox
def triangle_params(p1, p2, p3):
x1, y1 = p2 - p1
x2, y2 = p3 - p1
return abs(x1 * y2 - x2 * y1) / 2, (p1 + p2 + p3) / 3
def get_direction_enlargement(center, p1, p2):
segment_mid = (p1 + p2) / 2
direction = segment_mid - center
dist = np.linalg.norm(direction)
if dist < 30:
return (30 - dist) * direction / dist
return 0 * direction
def enlarge_bbox(bbox):
triangles = [triangle_params(bbox[0], bbox[i - 1], bbox[i]) for i in range(1, len(bbox))]
centroid = sum([triangle[0] * triangle[1] for triangle in triangles]) / sum([triangle[0] for triangle in triangles])
big_bbox = [1.2 * (bbox_point - centroid) + centroid for bbox_point in bbox]
adjustments = [get_direction_enlargement(centroid, bbox[i], bbox[i - 1]) for i in range(len(bbox))]
return [
big_bbox[i] + adjustments[i] + adjustments[(1 - int(i == len(bbox) - 1)) * (i + 1)] for i in range(len(bbox))
]
def group_into_blobs(text_boxes, box_params):
child = {}
for i in range(len(box_params)):
for j in range(i + 1, len(box_params)):
if check_same_blob(box_params[i], box_params[j]):
child[i] = j
break
blobs = []
used = {}
for i in range(len(box_params)):
if i not in used:
txt = ''
bbox = []
j = i
while True:
text_box = text_boxes[j]
if txt == '': txt = text_box[0]
elif txt[-1] == '-': txt = txt[:-1] + text_box[0]
else: txt = txt + ' ' + text_box[0]
bbox = update_bbox(bbox, text_box)
used[j] = True
if j in child: j = child[j]
else: break
bboxidx = ConvexHull(np.array(bbox)).vertices
bbox = [bbox[idx] for idx in bboxidx]
bbox = enlarge_bbox(bbox)
blobs.append((txt, np.array(bbox).tolist()))
return blobs
def analyze_image(text_boxes):
box_params = []
for text_box in text_boxes:
pts = np.array(
[[text_box[1][0], text_box[1][1]],
[text_box[1][2], text_box[1][3]],
[text_box[1][4], text_box[1][5]],
[text_box[1][6], text_box[1][7]]], np.int32)
mid1 = (pts[1] + pts[0]) // 2
mid2 = (pts[2] + pts[3]) // 2
side = (pts[0] + pts[3]) // 2
seglen = np.linalg.norm(mid1 - mid2)
angle = math.atan2(mid1[0] - mid2[0], mid1[1] - mid2[1])
box_params.append(TextBoxParams(seglen, angle, (np.array(mid2) + np.array(mid1)) / 2, side))
blobs = group_into_blobs(text_boxes, box_params)
return blobs
# Drawing functions
def extract_image(image_url):
r = requests.get(image_url, allow_redirects=True)
img = cv2.imdecode(np.asarray(bytearray(r.content), dtype="uint8"), cv2.IMREAD_COLOR)
return img
def image_add_boundary(img, p1, p2, pts, s):
pts = pts.reshape((-1, 1, 2))
img = cv2.line(img, p1, p2, (255, 255, 0), 5)
cp = (np.array(p1) + np.array(p2)) // 2
img = cv2.circle(img, tuple(cp), 3, (0, 255, 0), 3)
img = cv2.circle(img, tuple(s), 3, (0, 0, 255), 3)
return cv2.polylines(img, [pts], True, (0, 255, 255), 2)
def image_add_bbox(img, pts):
return cv2.polylines(img, np.int32([pts]), True, (0, 255, 255), 2)
| [
"cv2.line",
"cv2.polylines",
"math.atan2",
"math.radians",
"numpy.linalg.norm",
"collections.namedtuple",
"requests.get",
"numpy.array",
"numpy.int32"
] | [((175, 250), 'collections.namedtuple', 'namedtuple', (['"""TextBoxParams"""', '"""text_size text_angle center left_side_middle"""'], {}), "('TextBoxParams', 'text_size text_angle center left_side_middle')\n", (185, 250), False, 'from collections import namedtuple\n'), ((704, 779), 'numpy.linalg.norm', 'np.linalg.norm', (['(box1_params.left_side_middle - box2_params.left_side_middle)'], {}), '(box1_params.left_side_middle - box2_params.left_side_middle)\n', (718, 779), True, 'import numpy as np\n'), ((796, 851), 'numpy.linalg.norm', 'np.linalg.norm', (['(box1_params.center - box2_params.center)'], {}), '(box1_params.center - box2_params.center)\n', (810, 851), True, 'import numpy as np\n'), ((1615, 1640), 'numpy.linalg.norm', 'np.linalg.norm', (['direction'], {}), '(direction)\n', (1629, 1640), True, 'import numpy as np\n'), ((4079, 4124), 'requests.get', 'requests.get', (['image_url'], {'allow_redirects': '(True)'}), '(image_url, allow_redirects=True)\n', (4091, 4124), False, 'import requests\n'), ((4321, 4360), 'cv2.line', 'cv2.line', (['img', 'p1', 'p2', '(255, 255, 0)', '(5)'], {}), '(img, p1, p2, (255, 255, 0), 5)\n', (4329, 4360), False, 'import cv2\n'), ((4527, 4576), 'cv2.polylines', 'cv2.polylines', (['img', '[pts]', '(True)', '(0, 255, 255)', '(2)'], {}), '(img, [pts], True, (0, 255, 255), 2)\n', (4540, 4576), False, 'import cv2\n'), ((3414, 3579), 'numpy.array', 'np.array', (['[[text_box[1][0], text_box[1][1]], [text_box[1][2], text_box[1][3]], [\n text_box[1][4], text_box[1][5]], [text_box[1][6], text_box[1][7]]]', 'np.int32'], {}), '([[text_box[1][0], text_box[1][1]], [text_box[1][2], text_box[1][3]\n ], [text_box[1][4], text_box[1][5]], [text_box[1][6], text_box[1][7]]],\n np.int32)\n', (3422, 3579), True, 'import numpy as np\n'), ((3755, 3782), 'numpy.linalg.norm', 'np.linalg.norm', (['(mid1 - mid2)'], {}), '(mid1 - mid2)\n', (3769, 3782), True, 'import numpy as np\n'), ((3799, 3847), 'math.atan2', 'math.atan2', (['(mid1[0] - mid2[0])', '(mid1[1] - mid2[1])'], {}), '(mid1[0] - mid2[0], mid1[1] - mid2[1])\n', (3809, 3847), False, 'import math\n'), ((4639, 4654), 'numpy.int32', 'np.int32', (['[pts]'], {}), '([pts])\n', (4647, 4654), True, 'import numpy as np\n'), ((647, 663), 'math.radians', 'math.radians', (['(10)'], {}), '(10)\n', (659, 663), False, 'import math\n'), ((1110, 1152), 'numpy.array', 'np.array', (['[text_box[1][0], text_box[1][1]]'], {}), '([text_box[1][0], text_box[1][1]])\n', (1118, 1152), True, 'import numpy as np\n'), ((1170, 1212), 'numpy.array', 'np.array', (['[text_box[1][2], text_box[1][3]]'], {}), '([text_box[1][2], text_box[1][3]])\n', (1178, 1212), True, 'import numpy as np\n'), ((1230, 1272), 'numpy.array', 'np.array', (['[text_box[1][4], text_box[1][5]]'], {}), '([text_box[1][4], text_box[1][5]])\n', (1238, 1272), True, 'import numpy as np\n'), ((1290, 1332), 'numpy.array', 'np.array', (['[text_box[1][6], text_box[1][7]]'], {}), '([text_box[1][6], text_box[1][7]])\n', (1298, 1332), True, 'import numpy as np\n'), ((4371, 4383), 'numpy.array', 'np.array', (['p1'], {}), '(p1)\n', (4379, 4383), True, 'import numpy as np\n'), ((4386, 4398), 'numpy.array', 'np.array', (['p2'], {}), '(p2)\n', (4394, 4398), True, 'import numpy as np\n'), ((3127, 3141), 'numpy.array', 'np.array', (['bbox'], {}), '(bbox)\n', (3135, 3141), True, 'import numpy as np\n'), ((3904, 3918), 'numpy.array', 'np.array', (['mid2'], {}), '(mid2)\n', (3912, 3918), True, 'import numpy as np\n'), ((3921, 3935), 'numpy.array', 'np.array', (['mid1'], {}), '(mid1)\n', (3929, 3935), True, 'import numpy as np\n'), ((3271, 3285), 'numpy.array', 'np.array', (['bbox'], {}), '(bbox)\n', (3279, 3285), True, 'import numpy as np\n')] |
"""Test substitution capability across gpkit"""
import unittest
import sys
import numpy as np
import numpy.testing as npt
from ad import adnumber, ADV
import gpkit
from gpkit import SignomialsEnabled, NamedVariables
from gpkit import Variable, VectorVariable, Model, Signomial
from gpkit.small_scripts import mag
from gpkit.tests.helpers import run_tests
# pylint: disable=invalid-name,attribute-defined-outside-init,unused-variable
if sys.version_info >= (3, 0):
unicode = str # pylint:disable=redefined-builtin,invalid-name
class TestNomialSubs(unittest.TestCase):
"""Test substitution for nomial-family objects"""
def test_vectorized_linked(self):
class VectorLinked(Model):
"simple vectorized link"
def setup(self):
self.y = y = Variable("y", 1)
def vectorlink(c):
"linked vector function"
if isinstance(c[y], ADV):
return np.array(c[y])+adnumber([1, 2, 3])
return c[y]+np.array([1, 2, 3])
self.x = x = VectorVariable(3, "x", vectorlink)
m = VectorLinked()
self.assertEqual(m.substitutions[m.x[0].key](m.substitutions), 2)
self.assertEqual(m.gp().substitutions[m.x[0].key], 2)
self.assertEqual(m.gp().substitutions[m.x[1].key], 3)
self.assertEqual(m.gp().substitutions[m.x[2].key], 4)
def test_numeric(self):
"""Basic substitution of numeric value"""
x = Variable("x")
p = x**2
self.assertEqual(p.sub({x: 3}), 9)
self.assertEqual(p.sub({x.key: 3}), 9)
self.assertEqual(p.sub({"x": 3}), 9)
def test_dimensionless_units(self):
x = Variable('x', 3, 'ft')
y = Variable('y', 1, 'm')
if x.units is not None:
# units are enabled
self.assertAlmostEqual((x/y).value, 0.9144)
def test_vector(self):
x = Variable("x")
y = Variable("y")
z = VectorVariable(2, "z")
p = x*y*z
self.assertTrue(all(p.sub({x: 1, "y": 2}) == 2*z))
self.assertTrue(all(p.sub({x: 1, y: 2, "z": [1, 2]}) ==
z.sub({z: [2, 4]})))
xvec = VectorVariable(3, "x", "m")
xs = xvec[:2].sum()
for x_ in ["x", xvec]:
self.assertAlmostEqual(mag(xs.sub({x_: [1, 2, 3]}).c), 3.0)
def test_variable(self):
"""Test special single-argument substitution for Variable"""
x = Variable('x')
y = Variable('y')
m = x*y**2
self.assertEqual(x.sub(3), 3)
# make sure x was not mutated
self.assertEqual(x, Variable('x'))
self.assertNotEqual(x.sub(3), Variable('x'))
# also make sure the old way works
self.assertEqual(x.sub({x: 3}), 3)
# and for vectors
xvec = VectorVariable(3, 'x')
self.assertEqual(xvec[1].sub(3), 3)
def test_signomial(self):
"""Test Signomial substitution"""
D = Variable('D', units="N")
x = Variable('x', units="N")
y = Variable('y', units="N")
a = Variable('a')
with SignomialsEnabled():
sc = (a*x + (1 - a)*y - D)
subbed = sc.sub({a: 0.1})
self.assertEqual(subbed, 0.1*x + 0.9*y - D)
self.assertTrue(isinstance(subbed, Signomial))
subbed = sc.sub({a: 2.0})
self.assertTrue(isinstance(subbed, Signomial))
self.assertEqual(subbed, 2*x - y - D)
_ = a.sub({a: -1}).value # fix monomial assumptions
class TestModelSubs(unittest.TestCase):
"""Test substitution for Model objects"""
def test_bad_gp_sub(self):
x = Variable("x")
y = Variable("y")
m = Model(x, [y >= 1], {y: x})
with self.assertRaises(ValueError):
m.solve()
def test_quantity_sub(self):
if gpkit.units:
x = Variable("x", 1, "cm")
y = Variable("y", 1)
self.assertEqual(x.sub({x: 1*gpkit.units.m}).c.magnitude, 100)
# NOTE: uncomment the below if requiring Quantity substitutions
# self.assertRaises(ValueError, x.sub, x, 1)
self.assertRaises(ValueError, x.sub, {x: 1*gpkit.ureg.N})
self.assertRaises(ValueError, y.sub, {y: 1*gpkit.ureg.N})
v = gpkit.VectorVariable(3, "v", "cm")
subbed = v.sub({v: [1, 2, 3]*gpkit.ureg.m})
self.assertEqual([z.c.magnitude for z in subbed], [100, 200, 300])
v = VectorVariable(1, "v", "km")
v_min = VectorVariable(1, "v_min", "km")
m = Model(v.prod(), [v >= v_min],
{v_min: [2*gpkit.units("nmi")]})
cost = m.solve(verbosity=0)["cost"]
self.assertAlmostEqual(cost/(3.704*gpkit.ureg("km")), 1.0)
m = Model(v.prod(), [v >= v_min],
{v_min: np.array([2])*gpkit.units("nmi")})
cost = m.solve(verbosity=0)["cost"]
self.assertAlmostEqual(cost/(3.704*gpkit.ureg("km")), 1.0)
def test_phantoms(self):
x = Variable("x")
x_ = Variable("x", 1, lineage=[("test", 0)])
xv = VectorVariable(2, "x", [1, 1], lineage=[("vec", 0)])
m = Model(x, [x >= x_, x_ == xv.prod()])
m.solve(verbosity=0)
with self.assertRaises(ValueError):
_ = m.substitutions["x"]
with self.assertRaises(KeyError):
_ = m.substitutions["y"]
with self.assertRaises(ValueError):
_ = m["x"]
self.assertIn(x, m.variables_byname("x"))
self.assertIn(x_, m.variables_byname("x"))
def test_persistence(self):
x = gpkit.Variable("x")
y = gpkit.Variable("y")
ymax = gpkit.Variable("y_{max}", 0.1)
with gpkit.SignomialsEnabled():
m = gpkit.Model(x, [x >= 1-y, y <= ymax])
m.substitutions[ymax] = 0.2
self.assertAlmostEqual(m.localsolve(verbosity=0)["cost"], 0.8, 3)
m = gpkit.Model(x, [x >= 1-y, y <= ymax])
with self.assertRaises(ValueError): # from unbounded ymax
m.localsolve(verbosity=0)
m = gpkit.Model(x, [x >= 1-y, y <= ymax])
m.substitutions[ymax] = 0.1
self.assertAlmostEqual(m.localsolve(verbosity=0)["cost"], 0.9, 3)
def test_united_sub_sweep(self):
A = Variable("A", "USD")
h = Variable("h", "USD/count")
Q = Variable("Q", "count")
Y = Variable("Y", "USD")
m = Model(Y, [Y >= h*Q + A/Q])
m.substitutions.update({A: 500*gpkit.units("USD"),
h: 35*gpkit.units("USD"),
Q: ("sweep", [50, 100, 500])})
firstcost = m.solve(verbosity=0)["cost"][0]
self.assertAlmostEqual(1760*gpkit.ureg("USD")/firstcost, 1, 5)
def test_skipfailures(self):
x = Variable("x")
x_min = Variable("x_{min}", [1, 2])
m = Model(x, [x <= 1, x >= x_min])
sol = m.solve(verbosity=0, skipsweepfailures=True)
sol.table()
self.assertEqual(len(sol), 1)
with self.assertRaises(RuntimeWarning):
sol = m.solve(verbosity=0, skipsweepfailures=False)
m.substitutions[x_min][1][0] = 5 # so no sweeps solve
with self.assertRaises(RuntimeWarning):
sol = m.solve(verbosity=0, skipsweepfailures=True)
def test_vector_sweep(self):
"""Test sweep involving VectorVariables"""
x = Variable("x")
x_min = Variable("x_min", 1)
y = VectorVariable(2, "y")
m = Model(x, [x >= y.prod()])
m.substitutions.update({y: ('sweep', [[2, 3], [5, 7], [9, 11]])})
a = m.solve(verbosity=0)["cost"]
b = [6, 15, 27, 14, 35, 63, 22, 55, 99]
self.assertTrue(all(abs(a-b)/(a+b) < 1e-7))
x_min = Variable("x_min", 1) # constant to check array indexing
m = Model(x, [x >= y.prod(), x >= x_min])
m.substitutions.update({y: ('sweep', [[2, 3], [5, 7, 11]])})
sol = m.solve(verbosity=0)
a = sol["cost"]
b = [10, 15, 14, 21, 22, 33]
self.assertTrue(all(abs(a-b)/(a+b) < 1e-7))
self.assertEqual(sol["constants"][x_min], 1)
for i, bi in enumerate(b):
self.assertEqual(sol.atindex(i)["constants"][x_min], 1)
ai = m.solution.atindex(i)["cost"]
self.assertTrue(abs(ai-bi)/(ai+bi) < 1e-7)
m = Model(x, [x >= y.prod()])
m.substitutions.update({y: ('sweep', [[2, 3, 9], [5, 7, 11]])})
self.assertRaises(ValueError, m.solve, verbosity=0)
def test_calcconst(self):
x = Variable("x", "hours")
t_day = Variable("t_{day}", 12, "hours")
t_night = Variable("t_{night}", lambda c: 24 - c[t_day], "hours")
m = Model(x, [x >= t_day, x >= t_night])
sol = m.solve(verbosity=0)
self.assertAlmostEqual(sol(t_night)/gpkit.ureg.hours, 12)
m.substitutions.update({t_day: ("sweep", [6, 8, 9, 13])})
sol = m.solve(verbosity=0)
npt.assert_allclose(sol["sensitivities"]["constants"][t_day],
[-1./3, -0.5, -0.6, +1], 1e-5)
self.assertEqual(len(sol["cost"]), 4)
npt.assert_allclose([float(l) for l in
(sol(t_day) + sol(t_night))/gpkit.ureg.hours], 24)
def test_vector_init(self):
N = 6
Weight = 50000
xi_dist = 6*Weight/float(N)*(
(np.array(range(1, N+1)) - .5/float(N))/float(N) -
(np.array(range(1, N+1)) - .5/float(N))**2/float(N)**2)
xi = VectorVariable(N, "xi", xi_dist, "N", "Constant Thrust per Bin")
P = Variable("P", "N", "Total Power")
phys_constraints = [P >= xi.sum()]
objective = P
eqns = phys_constraints
m = Model(objective, eqns)
sol = m.solve(verbosity=0)
a, b = sol("xi"), xi_dist*gpkit.ureg.N
self.assertTrue(all(abs(a-b)/(a+b) < 1e-7))
def test_model_composition_units(self):
class Above(Model):
"""A simple upper bound on x
Lower Unbounded
---------------
x
"""
def setup(self):
x = self.x = Variable("x", "ft")
x_max = Variable("x_{max}", 1, "yard")
self.cost = 1/x
return [x <= x_max]
class Below(Model):
"""A simple lower bound on x
Upper Unbounded
---------------
x
"""
def setup(self):
x = self.x = Variable("x", "m")
x_min = Variable("x_{min}", 1, "cm")
self.cost = x
return [x >= x_min]
a, b = Above(), Below()
concatm = Model(a.cost*b.cost, [a, b])
concat_cost = concatm.solve(verbosity=0)["cost"]
almostequal = self.assertAlmostEqual
yard, cm = gpkit.ureg("yard"), gpkit.ureg("cm")
if not isinstance(a["x"].key.units, unicode):
almostequal(1/yard/a.solve(verbosity=0)["cost"], 1, 5)
almostequal(1*cm/b.solve(verbosity=0)["cost"], 1, 5)
almostequal(1*cm/yard/concat_cost, 1, 5)
NamedVariables.reset_modelnumbers()
a1, b1 = Above(), Below()
self.assertEqual(a1["x"].key.lineage, (("Above", 0),))
m = Model(a1["x"], [a1, b1, b1["x"] == a1["x"]])
sol = m.solve(verbosity=0)
if not isinstance(a1["x"].key.units, unicode):
almostequal(1*cm/sol["cost"], 1, 5)
a1, b1 = Above(), Below()
self.assertEqual(a1["x"].key.lineage, (("Above", 1),))
m = Model(b1["x"], [a1, b1, b1["x"] == a1["x"]])
sol = m.solve(verbosity=0)
if not isinstance(b1["x"].key.units, unicode):
almostequal(1*gpkit.ureg.cm/sol["cost"], 1, 5)
self.assertIn(a1["x"], sol["variables"])
self.assertIn(b1["x"], sol["variables"])
self.assertNotIn(a["x"], sol["variables"])
self.assertNotIn(b["x"], sol["variables"])
def test_getkey(self):
class Top(Model):
"""Some high level model
Upper Unbounded
---------------
y
"""
def setup(self):
y = self.y = Variable('y')
s = Sub()
sy = s["y"]
self.cost = y
return [s, y >= sy, sy >= 1]
class Sub(Model):
"""A simple sub model
Upper Unbounded
---------------
y
"""
def setup(self):
y = self.y = Variable('y')
self.cost = y
return [y >= 2]
sol = Top().solve(verbosity=0)
self.assertAlmostEqual(sol['cost'], 2)
def test_model_recursion(self):
class Top(Model):
"""Some high level model
Upper Unbounded
---------------
x
"""
def setup(self):
sub = Sub()
x = self.x = Variable("x")
self.cost = x
return sub, [x >= sub["y"], sub["y"] >= 1]
class Sub(Model):
"""A simple sub model
Upper Unbounded
---------------
y
"""
def setup(self):
y = self.y = Variable('y')
self.cost = y
return [y >= 2]
sol = Top().solve(verbosity=0)
self.assertAlmostEqual(sol['cost'], 2)
def test_vector_sub(self):
x = VectorVariable(3, "x")
y = VectorVariable(3, "y")
ymax = VectorVariable(3, "ymax")
with SignomialsEnabled():
# issue1077 links to a case that failed for SPs only
m = Model(x.prod(), [x + y >= 1, y <= ymax])
m.substitutions["ymax"] = [0.3, 0.5, 0.8]
m.localsolve(verbosity=0)
def test_spsubs(self):
x = Variable("x", 5)
y = Variable("y", lambda c: 2*c[x])
z = Variable("z")
w = Variable("w")
with SignomialsEnabled():
cnstr = [z + w >= y*x, w <= y]
m = Model(z, cnstr)
m.localsolve(verbosity=0)
self.assertTrue(m.substitutions["y"], "__call__")
TESTS = [TestNomialSubs, TestModelSubs]
if __name__ == '__main__':
run_tests(TESTS)
| [
"gpkit.VectorVariable",
"gpkit.units",
"ad.adnumber",
"gpkit.tests.helpers.run_tests",
"gpkit.Model",
"numpy.array",
"gpkit.SignomialsEnabled",
"numpy.testing.assert_allclose",
"gpkit.Variable",
"gpkit.ureg",
"gpkit.NamedVariables.reset_modelnumbers"
] | [((14354, 14370), 'gpkit.tests.helpers.run_tests', 'run_tests', (['TESTS'], {}), '(TESTS)\n', (14363, 14370), False, 'from gpkit.tests.helpers import run_tests\n'), ((1503, 1516), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (1511, 1516), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((1722, 1744), 'gpkit.Variable', 'Variable', (['"""x"""', '(3)', '"""ft"""'], {}), "('x', 3, 'ft')\n", (1730, 1744), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((1757, 1778), 'gpkit.Variable', 'Variable', (['"""y"""', '(1)', '"""m"""'], {}), "('y', 1, 'm')\n", (1765, 1778), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((1939, 1952), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (1947, 1952), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((1965, 1978), 'gpkit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (1973, 1978), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((1991, 2013), 'gpkit.VectorVariable', 'VectorVariable', (['(2)', '"""z"""'], {}), "(2, 'z')\n", (2005, 2013), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((2220, 2247), 'gpkit.VectorVariable', 'VectorVariable', (['(3)', '"""x"""', '"""m"""'], {}), "(3, 'x', 'm')\n", (2234, 2247), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((2490, 2503), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (2498, 2503), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((2516, 2529), 'gpkit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (2524, 2529), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((2848, 2870), 'gpkit.VectorVariable', 'VectorVariable', (['(3)', '"""x"""'], {}), "(3, 'x')\n", (2862, 2870), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((3000, 3024), 'gpkit.Variable', 'Variable', (['"""D"""'], {'units': '"""N"""'}), "('D', units='N')\n", (3008, 3024), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((3037, 3061), 'gpkit.Variable', 'Variable', (['"""x"""'], {'units': '"""N"""'}), "('x', units='N')\n", (3045, 3061), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((3074, 3098), 'gpkit.Variable', 'Variable', (['"""y"""'], {'units': '"""N"""'}), "('y', units='N')\n", (3082, 3098), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((3111, 3124), 'gpkit.Variable', 'Variable', (['"""a"""'], {}), "('a')\n", (3119, 3124), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((3695, 3708), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (3703, 3708), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((3721, 3734), 'gpkit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (3729, 3734), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((3747, 3773), 'gpkit.Model', 'Model', (['x', '[y >= 1]', '{y: x}'], {}), '(x, [y >= 1], {y: x})\n', (3752, 3773), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((5094, 5107), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (5102, 5107), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((5121, 5160), 'gpkit.Variable', 'Variable', (['"""x"""', '(1)'], {'lineage': "[('test', 0)]"}), "('x', 1, lineage=[('test', 0)])\n", (5129, 5160), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((5174, 5226), 'gpkit.VectorVariable', 'VectorVariable', (['(2)', '"""x"""', '[1, 1]'], {'lineage': "[('vec', 0)]"}), "(2, 'x', [1, 1], lineage=[('vec', 0)])\n", (5188, 5226), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((5678, 5697), 'gpkit.Variable', 'gpkit.Variable', (['"""x"""'], {}), "('x')\n", (5692, 5697), False, 'import gpkit\n'), ((5710, 5729), 'gpkit.Variable', 'gpkit.Variable', (['"""y"""'], {}), "('y')\n", (5724, 5729), False, 'import gpkit\n'), ((5745, 5775), 'gpkit.Variable', 'gpkit.Variable', (['"""y_{max}"""', '(0.1)'], {}), "('y_{max}', 0.1)\n", (5759, 5775), False, 'import gpkit\n'), ((6378, 6398), 'gpkit.Variable', 'Variable', (['"""A"""', '"""USD"""'], {}), "('A', 'USD')\n", (6386, 6398), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((6411, 6437), 'gpkit.Variable', 'Variable', (['"""h"""', '"""USD/count"""'], {}), "('h', 'USD/count')\n", (6419, 6437), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((6450, 6472), 'gpkit.Variable', 'Variable', (['"""Q"""', '"""count"""'], {}), "('Q', 'count')\n", (6458, 6472), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((6485, 6505), 'gpkit.Variable', 'Variable', (['"""Y"""', '"""USD"""'], {}), "('Y', 'USD')\n", (6493, 6505), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((6518, 6548), 'gpkit.Model', 'Model', (['Y', '[Y >= h * Q + A / Q]'], {}), '(Y, [Y >= h * Q + A / Q])\n', (6523, 6548), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((6894, 6907), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (6902, 6907), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((6924, 6951), 'gpkit.Variable', 'Variable', (['"""x_{min}"""', '[1, 2]'], {}), "('x_{min}', [1, 2])\n", (6932, 6951), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((6965, 6995), 'gpkit.Model', 'Model', (['x', '[x <= 1, x >= x_min]'], {}), '(x, [x <= 1, x >= x_min])\n', (6970, 6995), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((7498, 7511), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (7506, 7511), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((7528, 7548), 'gpkit.Variable', 'Variable', (['"""x_min"""', '(1)'], {}), "('x_min', 1)\n", (7536, 7548), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((7561, 7583), 'gpkit.VectorVariable', 'VectorVariable', (['(2)', '"""y"""'], {}), "(2, 'y')\n", (7575, 7583), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((7853, 7873), 'gpkit.Variable', 'Variable', (['"""x_min"""', '(1)'], {}), "('x_min', 1)\n", (7861, 7873), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((8648, 8670), 'gpkit.Variable', 'Variable', (['"""x"""', '"""hours"""'], {}), "('x', 'hours')\n", (8656, 8670), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((8687, 8719), 'gpkit.Variable', 'Variable', (['"""t_{day}"""', '(12)', '"""hours"""'], {}), "('t_{day}', 12, 'hours')\n", (8695, 8719), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((8738, 8793), 'gpkit.Variable', 'Variable', (['"""t_{night}"""', '(lambda c: 24 - c[t_day])', '"""hours"""'], {}), "('t_{night}', lambda c: 24 - c[t_day], 'hours')\n", (8746, 8793), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((8806, 8842), 'gpkit.Model', 'Model', (['x', '[x >= t_day, x >= t_night]'], {}), '(x, [x >= t_day, x >= t_night])\n', (8811, 8842), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((9053, 9154), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (["sol['sensitivities']['constants'][t_day]", '[-1.0 / 3, -0.5, -0.6, +1]', '(1e-05)'], {}), "(sol['sensitivities']['constants'][t_day], [-1.0 / 3, -\n 0.5, -0.6, +1], 1e-05)\n", (9072, 9154), True, 'import numpy.testing as npt\n'), ((9600, 9664), 'gpkit.VectorVariable', 'VectorVariable', (['N', '"""xi"""', 'xi_dist', '"""N"""', '"""Constant Thrust per Bin"""'], {}), "(N, 'xi', xi_dist, 'N', 'Constant Thrust per Bin')\n", (9614, 9664), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((9677, 9710), 'gpkit.Variable', 'Variable', (['"""P"""', '"""N"""', '"""Total Power"""'], {}), "('P', 'N', 'Total Power')\n", (9685, 9710), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((9820, 9842), 'gpkit.Model', 'Model', (['objective', 'eqns'], {}), '(objective, eqns)\n', (9825, 9842), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((10783, 10813), 'gpkit.Model', 'Model', (['(a.cost * b.cost)', '[a, b]'], {}), '(a.cost * b.cost, [a, b])\n', (10788, 10813), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((11217, 11252), 'gpkit.NamedVariables.reset_modelnumbers', 'NamedVariables.reset_modelnumbers', ([], {}), '()\n', (11250, 11252), False, 'from gpkit import SignomialsEnabled, NamedVariables\n'), ((11362, 11406), 'gpkit.Model', 'Model', (["a1['x']", "[a1, b1, b1['x'] == a1['x']]"], {}), "(a1['x'], [a1, b1, b1['x'] == a1['x']])\n", (11367, 11406), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((11654, 11698), 'gpkit.Model', 'Model', (["b1['x']", "[a1, b1, b1['x'] == a1['x']]"], {}), "(b1['x'], [a1, b1, b1['x'] == a1['x']])\n", (11659, 11698), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((13587, 13609), 'gpkit.VectorVariable', 'VectorVariable', (['(3)', '"""x"""'], {}), "(3, 'x')\n", (13601, 13609), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((13622, 13644), 'gpkit.VectorVariable', 'VectorVariable', (['(3)', '"""y"""'], {}), "(3, 'y')\n", (13636, 13644), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((13660, 13685), 'gpkit.VectorVariable', 'VectorVariable', (['(3)', '"""ymax"""'], {}), "(3, 'ymax')\n", (13674, 13685), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((13968, 13984), 'gpkit.Variable', 'Variable', (['"""x"""', '(5)'], {}), "('x', 5)\n", (13976, 13984), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((13997, 14030), 'gpkit.Variable', 'Variable', (['"""y"""', '(lambda c: 2 * c[x])'], {}), "('y', lambda c: 2 * c[x])\n", (14005, 14030), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((14041, 14054), 'gpkit.Variable', 'Variable', (['"""z"""'], {}), "('z')\n", (14049, 14054), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((14067, 14080), 'gpkit.Variable', 'Variable', (['"""w"""'], {}), "('w')\n", (14075, 14080), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((14172, 14187), 'gpkit.Model', 'Model', (['z', 'cnstr'], {}), '(z, cnstr)\n', (14177, 14187), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((2653, 2666), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (2661, 2666), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((2706, 2719), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (2714, 2719), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((3138, 3157), 'gpkit.SignomialsEnabled', 'SignomialsEnabled', ([], {}), '()\n', (3155, 3157), False, 'from gpkit import SignomialsEnabled, NamedVariables\n'), ((3914, 3936), 'gpkit.Variable', 'Variable', (['"""x"""', '(1)', '"""cm"""'], {}), "('x', 1, 'cm')\n", (3922, 3936), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((3953, 3969), 'gpkit.Variable', 'Variable', (['"""y"""', '(1)'], {}), "('y', 1)\n", (3961, 3969), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((4334, 4368), 'gpkit.VectorVariable', 'gpkit.VectorVariable', (['(3)', '"""v"""', '"""cm"""'], {}), "(3, 'v', 'cm')\n", (4354, 4368), False, 'import gpkit\n'), ((4520, 4548), 'gpkit.VectorVariable', 'VectorVariable', (['(1)', '"""v"""', '"""km"""'], {}), "(1, 'v', 'km')\n", (4534, 4548), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((4569, 4601), 'gpkit.VectorVariable', 'VectorVariable', (['(1)', '"""v_min"""', '"""km"""'], {}), "(1, 'v_min', 'km')\n", (4583, 4601), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((5790, 5815), 'gpkit.SignomialsEnabled', 'gpkit.SignomialsEnabled', ([], {}), '()\n', (5813, 5815), False, 'import gpkit\n'), ((5833, 5872), 'gpkit.Model', 'gpkit.Model', (['x', '[x >= 1 - y, y <= ymax]'], {}), '(x, [x >= 1 - y, y <= ymax])\n', (5844, 5872), False, 'import gpkit\n'), ((6005, 6044), 'gpkit.Model', 'gpkit.Model', (['x', '[x >= 1 - y, y <= ymax]'], {}), '(x, [x >= 1 - y, y <= ymax])\n', (6016, 6044), False, 'import gpkit\n'), ((6172, 6211), 'gpkit.Model', 'gpkit.Model', (['x', '[x >= 1 - y, y <= ymax]'], {}), '(x, [x >= 1 - y, y <= ymax])\n', (6183, 6211), False, 'import gpkit\n'), ((10933, 10951), 'gpkit.ureg', 'gpkit.ureg', (['"""yard"""'], {}), "('yard')\n", (10943, 10951), False, 'import gpkit\n'), ((10953, 10969), 'gpkit.ureg', 'gpkit.ureg', (['"""cm"""'], {}), "('cm')\n", (10963, 10969), False, 'import gpkit\n'), ((13700, 13719), 'gpkit.SignomialsEnabled', 'SignomialsEnabled', ([], {}), '()\n', (13717, 13719), False, 'from gpkit import SignomialsEnabled, NamedVariables\n'), ((14095, 14114), 'gpkit.SignomialsEnabled', 'SignomialsEnabled', ([], {}), '()\n', (14112, 14114), False, 'from gpkit import SignomialsEnabled, NamedVariables\n'), ((799, 815), 'gpkit.Variable', 'Variable', (['"""y"""', '(1)'], {}), "('y', 1)\n", (807, 815), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((1090, 1124), 'gpkit.VectorVariable', 'VectorVariable', (['(3)', '"""x"""', 'vectorlink'], {}), "(3, 'x', vectorlink)\n", (1104, 1124), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((10236, 10255), 'gpkit.Variable', 'Variable', (['"""x"""', '"""ft"""'], {}), "('x', 'ft')\n", (10244, 10255), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((10280, 10310), 'gpkit.Variable', 'Variable', (['"""x_{max}"""', '(1)', '"""yard"""'], {}), "('x_{max}', 1, 'yard')\n", (10288, 10310), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((10594, 10612), 'gpkit.Variable', 'Variable', (['"""x"""', '"""m"""'], {}), "('x', 'm')\n", (10602, 10612), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((10637, 10665), 'gpkit.Variable', 'Variable', (['"""x_{min}"""', '(1)', '"""cm"""'], {}), "('x_{min}', 1, 'cm')\n", (10645, 10665), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((12284, 12297), 'gpkit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (12292, 12297), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((12633, 12646), 'gpkit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (12641, 12646), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((13070, 13083), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (13078, 13083), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((13380, 13393), 'gpkit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (13388, 13393), False, 'from gpkit import Variable, VectorVariable, Model, Signomial\n'), ((6584, 6602), 'gpkit.units', 'gpkit.units', (['"""USD"""'], {}), "('USD')\n", (6595, 6602), False, 'import gpkit\n'), ((6642, 6660), 'gpkit.units', 'gpkit.units', (['"""USD"""'], {}), "('USD')\n", (6653, 6660), False, 'import gpkit\n'), ((6813, 6830), 'gpkit.ureg', 'gpkit.ureg', (['"""USD"""'], {}), "('USD')\n", (6823, 6830), False, 'import gpkit\n'), ((1041, 1060), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1049, 1060), True, 'import numpy as np\n'), ((4798, 4814), 'gpkit.ureg', 'gpkit.ureg', (['"""km"""'], {}), "('km')\n", (4808, 4814), False, 'import gpkit\n'), ((4898, 4911), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (4906, 4911), True, 'import numpy as np\n'), ((4912, 4930), 'gpkit.units', 'gpkit.units', (['"""nmi"""'], {}), "('nmi')\n", (4923, 4930), False, 'import gpkit\n'), ((5028, 5044), 'gpkit.ureg', 'gpkit.ureg', (['"""km"""'], {}), "('km')\n", (5038, 5044), False, 'import gpkit\n'), ((974, 988), 'numpy.array', 'np.array', (['c[y]'], {}), '(c[y])\n', (982, 988), True, 'import numpy as np\n'), ((989, 1008), 'ad.adnumber', 'adnumber', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (997, 1008), False, 'from ad import adnumber, ADV\n'), ((4681, 4699), 'gpkit.units', 'gpkit.units', (['"""nmi"""'], {}), "('nmi')\n", (4692, 4699), False, 'import gpkit\n')] |
from __future__ import print_function
import torch
# Quantization related import
from aimet_torch.quantsim import QuantizationSimModel
import logging
from aimet_torch.cross_layer_equalization import equalize_model
from torch.utils.data import DataLoader
from torchvision import models
from aimet_common.utils import AimetLogger
from aimet_common.defs import QuantScheme
from aimet_torch.utils import create_fake_data_loader
from aimet_torch.model_preparer import prepare_model
import sys
import os
import argparse
import numpy as np
import time
import pickle
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.utils.data as data
from PIL import Image
from libs.networks.vgg_refinedet import VGGRefineDet
from libs.networks.resnet_refinedet import ResNetRefineDet
from libs.utils.config import voc320, voc512, coco320, coco512, MEANS
from libs.data_layers.transform import detection_collate, BaseTransform
from libs.data_layers.roidb import combined_roidb, get_output_dir
from libs.data_layers.blob_dataset import BlobDataset
import pdb
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1')
parser = argparse.ArgumentParser(
description='RefineDet Test With Pytorch')
parser.add_argument('--dataset', default='voc', choices=['voc', 'coco'],
type=str, help='voc or coco')
parser.add_argument('--network', default='vgg16',
help='Base network')
parser.add_argument('--input_size', default=320, type=int,
help='Input size for evaluation')
parser.add_argument('--batch_size', default=1, type=int,
help='Batch size for evaluation')
parser.add_argument('--model_path', default=None, type=str,
help='Checkpoint state_dict file to test from')
parser.add_argument('--result_path', default='./detection_output', type=str,
help='Path to store detection results in evaluation')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use CUDA to evaluate model')
args = parser.parse_args()
if torch.cuda.is_available():
print('CUDA devices: ', torch.cuda.device)
print('GPU numbers: ', torch.cuda.device_count())
num_gpus = torch.cuda.device_count()
num_gpus = 1
def evaluate(model: torch.nn.Module, forward_pass_callback_args):
"""
This is intended to be the user-defined model evaluation function.
AIMET requires the above signature. So if the user's eval function does not
match this signature, please create a simple wrapper.
Note: Honoring the number of iterations is not absolutely necessary.
However if all evaluations run over an entire epoch of validation data,
the runtime for AIMET compression will obviously be higher.
:param model: Model to evaluate
:param eval_iterations: Number of iterations to use for evaluation.
None for entire epoch.
:param use_cuda: If true, evaluate using gpu acceleration
:return: single float number (accuracy) representing model's performance
"""
# Assign imdb_name and imdbval_name according to args.dataset.
if args.dataset == "voc":
args.imdb_name = "voc_2007_trainval+voc_2012_trainval"
args.imdbval_name = "voc_2007_test"
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
args.imdbval_name = "coco_2014_minival"
# Import config
if args.dataset == 'coco':
cfg = (coco320, coco512)[args.input_size==512]
elif args.dataset == 'voc':
cfg = (voc320, voc512)[args.input_size==512]
# Create imdb, roidb and blob_dataset
print('Create or load an evaluted imdb.')
imdb, roidb = combined_roidb(args.imdbval_name, False)
imdb.competition_mode(on=True)
print('{:d} roidb entries'.format(len(roidb)))
blob_dataset = BlobDataset(
imdb, roidb, transform=BaseTransform(cfg['min_dim'], MEANS),
target_normalization=True)
'''
# Construct networks.
print('Construct {}_refinedet network.'.format(args.network))
if args.network == 'vgg16':
refinedet = VGGRefineDet(cfg['num_classes'], cfg)
elif args.network == 'resnet101':
refinedet = ResNetRefineDet(cfg['num_classes'], cfg)
'''
refinedet = model
#refinedet.create_architecture()
# For CPU
net = refinedet
# For GPU/GPUs
if args.cuda:
net = refinedet.cuda()
if num_gpus > 1:
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
# Load weights
#net.load_weights(args.model_path)
net.eval()
print('Test RefineDet on:', args.imdbval_name)
print('Using the specified args:')
print(args)
num_images = len(imdb.image_index)
num_classes = imdb.num_classes
all_boxes = [[[] for _ in range(num_images)]
for _ in range(num_classes)]
empty_array = np.transpose(np.array([[], [], [], [], []]), (1, 0))
output_dir = get_output_dir(imdb, args.result_path)
_t = {'im_detect': Timer(), 'misc': Timer()}
det_file = os.path.join(output_dir, 'detections.pkl')
# set no grad for variables
torch.set_grad_enabled(False)
for idx in range(num_images):
img, gt, h, w = blob_dataset.pull_item(idx)
input = Variable(img.unsqueeze(0))
if args.cuda:
input = input.cuda()
# timers forward
_t['im_detect'].tic()
detection = net(input)
detect_time = _t['im_detect'].toc(average=True)
print('im_detect: {:d}/{:d} {:.3f}s'.format(
idx + 1, num_images, detect_time))
# skip jc = 0, because it's the background class
for jc in range(1, num_classes):
dets = detection[0, jc, :]
mask = dets[:, 0].gt(0.).expand(5, dets.size(0)).t()
dets = torch.masked_select(dets, mask).view(-1, 5)
if (len(dets) > 0) and (dets.dim() > 0):
boxes = dets[:, 1:]
boxes[:, 0] *= w
boxes[:, 2] *= w
boxes[:, 1] *= h
boxes[:, 3] *= h
scores = dets[:, 0].cpu().numpy()
cls_dets = np.hstack((boxes.cpu().numpy(),
scores[:, np.newaxis])).astype(np.float32,
copy=False)
all_boxes[jc][idx] = cls_dets
else:
all_boxes[jc][idx] = empty_array
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
imdb.evaluate_detections(all_boxes, output_dir)
'''
def evaluate(model: torch.nn.Module, forward_pass_callback_args):
"""
This is intended to be the user-defined model evaluation function. AIMET requires the above signature. So if the
user's eval function does not match this signature, please create a simple wrapper.
Use representative dataset that covers diversity in training data to compute optimal encodings.
:param model: Model to evaluate
:param forward_pass_callback_args: These argument(s) are passed to the forward_pass_callback as-is. Up to
the user to determine the type of this parameter. E.g. could be simply an integer representing the number
of data samples to use. Or could be a tuple of parameters or an object representing something more complex.
If set to None, forward_pass_callback will be invoked with no parameters.
"""
#print(model)
#exit()
model.eval()
dummy_input = torch.randn(32, 3,320, 320).to(torch.device('cuda'))
#refinedet320.to(torch.device('cpu'))
with torch.no_grad():
model(dummy_input)
'''
def quantsim_refinedet():
AimetLogger.set_level_for_all_areas(logging.INFO)
cfg = (voc320, voc512)[0]
refinedet320 = VGGRefineDet(cfg['num_classes'], cfg)
refinedet320.create_architecture()
refinedet320.load_state_dict(torch.load('/home/ava/sarala/RefineDet_PreTrained_Checkpoints/vgg16_refinedet320_voc_120000.pth'))
refinedet320.eval()
refinedet320.cuda()
input_shape = (32, 3,320, 320)
dummy_input = torch.randn(input_shape).cuda()
# Prepare model for Quantization SIM. This will automate some changes required in model definition for example
# create independent modules for torch.nn.functional and reused modules
#prepared_model = prepare_model(refinedet320)
equalize_model(refinedet320, input_shape)
# Instantiate Quantization SIM. This will insert simulation nodes in the model
quant_sim = QuantizationSimModel(refinedet320, dummy_input=dummy_input,
quant_scheme=QuantScheme.post_training_tf_enhanced,
default_param_bw=8, default_output_bw=8
#,config_file='../../TrainingExtensions/common/src/python/aimet_common/quantsim_config/'
#'default_config.json'
)
# Compute encodings (min, max, delta, offset) for activations and parameters. Use representative dataset
# roughly ~1000 examples
quant_sim.compute_encodings(evaluate, forward_pass_callback_args=None)
# QAT - Quantization Aware Training - Fine-tune the model fore few epochs to retain accuracy using train loop
#data_loader = create_fake_data_loader(dataset_size=32, batch_size=16, image_size=input_shape[1:])
#_ = train(quant_sim.model, data_loader)
# Export the model which saves pytorch model without any simulation nodes and saves encodings file for both
# activations and parameters in JSON format
#quant_sim.export(path='./', filename_prefix='quantized_refinedet320', dummy_input=dummy_input.cpu())
quant_sim.export(path='/home/ava/sarala/RefineDet/refinedet-onnxvalidation/pytorch_ptq/', filename_prefix='refinedet_sarala', dummy_input=dummy_input.cpu())
quantsim_refinedet() | [
"libs.data_layers.roidb.combined_roidb",
"pickle.dump",
"torch.masked_select",
"argparse.ArgumentParser",
"torch.randn",
"torch.cuda.device_count",
"libs.networks.vgg_refinedet.VGGRefineDet",
"os.path.join",
"aimet_torch.cross_layer_equalization.equalize_model",
"torch.load",
"libs.data_layers.t... | [((2001, 2067), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""RefineDet Test With Pytorch"""'}), "(description='RefineDet Test With Pytorch')\n", (2024, 2067), False, 'import argparse\n'), ((2957, 2982), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2980, 2982), False, 'import torch\n'), ((3103, 3128), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3126, 3128), False, 'import torch\n'), ((4620, 4660), 'libs.data_layers.roidb.combined_roidb', 'combined_roidb', (['args.imdbval_name', '(False)'], {}), '(args.imdbval_name, False)\n', (4634, 4660), False, 'from libs.data_layers.roidb import combined_roidb, get_output_dir\n'), ((5923, 5961), 'libs.data_layers.roidb.get_output_dir', 'get_output_dir', (['imdb', 'args.result_path'], {}), '(imdb, args.result_path)\n', (5937, 5961), False, 'from libs.data_layers.roidb import combined_roidb, get_output_dir\n'), ((6028, 6070), 'os.path.join', 'os.path.join', (['output_dir', '"""detections.pkl"""'], {}), "(output_dir, 'detections.pkl')\n", (6040, 6070), False, 'import os\n'), ((6109, 6138), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (6131, 6138), False, 'import torch\n'), ((8830, 8879), 'aimet_common.utils.AimetLogger.set_level_for_all_areas', 'AimetLogger.set_level_for_all_areas', (['logging.INFO'], {}), '(logging.INFO)\n', (8865, 8879), False, 'from aimet_common.utils import AimetLogger\n'), ((8937, 8974), 'libs.networks.vgg_refinedet.VGGRefineDet', 'VGGRefineDet', (["cfg['num_classes']", 'cfg'], {}), "(cfg['num_classes'], cfg)\n", (8949, 8974), False, 'from libs.networks.vgg_refinedet import VGGRefineDet\n'), ((9550, 9591), 'aimet_torch.cross_layer_equalization.equalize_model', 'equalize_model', (['refinedet320', 'input_shape'], {}), '(refinedet320, input_shape)\n', (9564, 9591), False, 'from aimet_torch.cross_layer_equalization import equalize_model\n'), ((9699, 9860), 'aimet_torch.quantsim.QuantizationSimModel', 'QuantizationSimModel', (['refinedet320'], {'dummy_input': 'dummy_input', 'quant_scheme': 'QuantScheme.post_training_tf_enhanced', 'default_param_bw': '(8)', 'default_output_bw': '(8)'}), '(refinedet320, dummy_input=dummy_input, quant_scheme=\n QuantScheme.post_training_tf_enhanced, default_param_bw=8,\n default_output_bw=8)\n', (9719, 9860), False, 'from aimet_torch.quantsim import QuantizationSimModel\n'), ((1584, 1595), 'time.time', 'time.time', ([], {}), '()\n', (1593, 1595), False, 'import time\n'), ((3060, 3085), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3083, 3085), False, 'import torch\n'), ((5859, 5889), 'numpy.array', 'np.array', (['[[], [], [], [], []]'], {}), '([[], [], [], [], []])\n', (5867, 5889), True, 'import numpy as np\n'), ((7516, 7566), 'pickle.dump', 'pickle.dump', (['all_boxes', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(all_boxes, f, pickle.HIGHEST_PROTOCOL)\n', (7527, 7566), False, 'import pickle\n'), ((9055, 9162), 'torch.load', 'torch.load', (['"""/home/ava/sarala/RefineDet_PreTrained_Checkpoints/vgg16_refinedet320_voc_120000.pth"""'], {}), "(\n '/home/ava/sarala/RefineDet_PreTrained_Checkpoints/vgg16_refinedet320_voc_120000.pth'\n )\n", (9065, 9162), False, 'import torch\n'), ((1653, 1664), 'time.time', 'time.time', ([], {}), '()\n', (1662, 1664), False, 'import time\n'), ((4814, 4850), 'libs.data_layers.transform.BaseTransform', 'BaseTransform', (["cfg['min_dim']", 'MEANS'], {}), "(cfg['min_dim'], MEANS)\n", (4827, 4850), False, 'from libs.data_layers.transform import detection_collate, BaseTransform\n'), ((5408, 5434), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['net'], {}), '(net)\n', (5429, 5434), False, 'import torch\n'), ((9265, 9289), 'torch.randn', 'torch.randn', (['input_shape'], {}), '(input_shape)\n', (9276, 9289), False, 'import torch\n'), ((6802, 6833), 'torch.masked_select', 'torch.masked_select', (['dets', 'mask'], {}), '(dets, mask)\n', (6821, 6833), False, 'import torch\n')] |
"""Simulate the generative process of LDA and generate corpus based on it
"""
import numpy as np
from scipy.sparse import coo_matrix
from scipy.stats import poisson
from sklearn.utils import check_random_state
from six.moves import xrange
class LdaSampleGenerator(object):
"""Generate LDA samples
Parameters
----------
n_topics : int
Number of topics
n_words : int
Number of words in corpus
min_doc_size : int
Min word count in a document
mean_doc_size : int
Mean word count in a document
doc_topic_prior : double
Uniform Dirichlet prior of a document
topic_word_prior : double
Uniform Dirichlet prior of a topic
mean_doc_size: int
Mean Value if word count in each document
Attributes
----------
topic_word_distr_ : array, [n_topics, n_words]
Topic word distribution.
"""
def __init__(self, n_topics, n_words, min_doc_size,
mean_doc_size, doc_topic_prior,
topic_word_prior, random_state=None):
self.n_topics = n_topics
self.n_words = n_words
self.min_doc_size = min_doc_size
self.mean_doc_size = mean_doc_size
self.doc_topic_prior = doc_topic_prior
self.topic_word_prior = topic_word_prior
self.random_state = random_state
self.random_state_ = check_random_state(self.random_state)
# hidden variables
self.topic_word_prior_ = np.repeat(topic_word_prior, n_words)
# (n_topics, n_words)
self.doc_topic_prior_ = np.repeat(self.doc_topic_prior, n_topics)
self.topic_word_distr_ = self.random_state_.dirichlet(
self.topic_word_prior_, n_topics)
def generate_documents(self, n_docs):
"""Generate Random doc-words Matrix
Parameters
----------
n_docs : int
number of documents
Return
------
doc_word_mtx : sparse matrix, [n_docs, n_words]
document words matrix
"""
rs = self.random_state_
n_topics = self.n_topics
n_words = self.n_words
docs_size = poisson.rvs(mu=(self.mean_doc_size - self.min_doc_size),
size=n_docs, random_state=rs)
docs_size += self.min_doc_size
doc_prior = np.repeat(self.doc_topic_prior, n_topics)
# (n_docs, n_topics)
docs_distr = rs.dirichlet(doc_prior, n_docs)
rows = []
cols = []
for i in xrange(n_docs):
word_dist = np.dot(self.topic_word_distr_.T, docs_distr[i, :])
word_idx = rs.choice(n_words, docs_size[i], p=word_dist, replace=True)
rows = np.append(rows, np.repeat(i, docs_size[i]))
cols = np.append(cols, word_idx)
data = np.ones(len(rows))
doc_word_mtx = coo_matrix((data, (rows, cols)),
shape=(n_docs, n_words)).tocsr()
return docs_distr, doc_word_mtx
| [
"sklearn.utils.check_random_state",
"scipy.stats.poisson.rvs",
"numpy.append",
"six.moves.xrange",
"scipy.sparse.coo_matrix",
"numpy.dot",
"numpy.repeat"
] | [((1397, 1434), 'sklearn.utils.check_random_state', 'check_random_state', (['self.random_state'], {}), '(self.random_state)\n', (1415, 1434), False, 'from sklearn.utils import check_random_state\n'), ((1495, 1531), 'numpy.repeat', 'np.repeat', (['topic_word_prior', 'n_words'], {}), '(topic_word_prior, n_words)\n', (1504, 1531), True, 'import numpy as np\n'), ((1594, 1635), 'numpy.repeat', 'np.repeat', (['self.doc_topic_prior', 'n_topics'], {}), '(self.doc_topic_prior, n_topics)\n', (1603, 1635), True, 'import numpy as np\n'), ((2191, 2279), 'scipy.stats.poisson.rvs', 'poisson.rvs', ([], {'mu': '(self.mean_doc_size - self.min_doc_size)', 'size': 'n_docs', 'random_state': 'rs'}), '(mu=self.mean_doc_size - self.min_doc_size, size=n_docs,\n random_state=rs)\n', (2202, 2279), False, 'from scipy.stats import poisson\n'), ((2369, 2410), 'numpy.repeat', 'np.repeat', (['self.doc_topic_prior', 'n_topics'], {}), '(self.doc_topic_prior, n_topics)\n', (2378, 2410), True, 'import numpy as np\n'), ((2547, 2561), 'six.moves.xrange', 'xrange', (['n_docs'], {}), '(n_docs)\n', (2553, 2561), False, 'from six.moves import xrange\n'), ((2587, 2637), 'numpy.dot', 'np.dot', (['self.topic_word_distr_.T', 'docs_distr[i, :]'], {}), '(self.topic_word_distr_.T, docs_distr[i, :])\n', (2593, 2637), True, 'import numpy as np\n'), ((2803, 2828), 'numpy.append', 'np.append', (['cols', 'word_idx'], {}), '(cols, word_idx)\n', (2812, 2828), True, 'import numpy as np\n'), ((2756, 2782), 'numpy.repeat', 'np.repeat', (['i', 'docs_size[i]'], {}), '(i, docs_size[i])\n', (2765, 2782), True, 'import numpy as np\n'), ((2886, 2943), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(data, (rows, cols))'], {'shape': '(n_docs, n_words)'}), '((data, (rows, cols)), shape=(n_docs, n_words))\n', (2896, 2943), False, 'from scipy.sparse import coo_matrix\n')] |
import numpy
import pytest
import cupy
from cupy import testing
from cupy._core._gufuncs import _GUFunc
class TestGUFuncSignature:
@pytest.mark.parametrize('signature', [
('(i,j)->(i,j)', [('i', 'j')], [('i', 'j')]),
('->(i)', [()], [('i',)]),
('(i,j),(j,k)->(k,l)', [('i', 'j'), ('j', 'k')], [('k', 'l')]),
('()->()', [()], [()])])
def test_signature_parsing(self, signature):
i, o = cupy._core._gufuncs._parse_gufunc_signature(signature[0])
assert i == signature[1]
assert o == signature[2]
@pytest.mark.parametrize('signature', [
'(i,j)(i,j)',
'(i,j)-(i,j)',
'(i,j)(i,j)->(i,j)',
'j->(i',
'',
'()->()->'])
def test_invalid_signature_parsing(self, signature):
with pytest.raises(ValueError):
cupy._core._gufuncs._parse_gufunc_signature(signature)
class TestGUFuncAxes:
def _get_gufunc(self, signature):
def func(x):
return x
return _GUFunc(func, signature)
def _get_gufunc_scalar(self, signature):
def func(x):
return x.sum()
return _GUFunc(func, signature)
@pytest.mark.parametrize('axes', [
((-1, -2), (-1, -2)),
((0, 1), (0, 1)),
((0, 1), (-1, -2)),
((1, 2), (-1, -2)),
((1, 2), (1, 2)),
((1, 2), (2, 3)),
((2, 3), (-1, -2)),
((2, 3), (0, 1)),
((2, 3), (1, 2)),
((0, 3), (1, 2)),
((0, 3), (2, 0)),
])
@testing.numpy_cupy_array_equal()
def test_axes_selection(self, xp, axes):
x = testing.shaped_arange((2, 3, 4, 5), xp=xp)
if xp is cupy:
return self._get_gufunc('(i,j)->(i,j)')(x, axes=list(axes))
else:
return numpy.moveaxis(x, axes[0], axes[1])
@pytest.mark.parametrize('axes', [
(-1, -2),
(0, 1),
(1, 2),
(2, 3),
(0, 2),
(0, 3),
(1, 3),
(3, 0),
(2, 0),
(2, 1),
(1, 0),
])
@testing.numpy_cupy_array_equal()
def test_axes_selection_single(self, xp, axes):
x = testing.shaped_arange((2, 3, 4, 5), xp=xp)
if xp is cupy:
return self._get_gufunc('(i)->(i)')(x, axes=list(axes))
else:
return numpy.moveaxis(x, axes[0], axes[1])
@pytest.mark.parametrize('axis', [0, 1, 2, 3])
@testing.numpy_cupy_array_equal()
def test_axis(self, xp, axis):
x = testing.shaped_arange((2, 3, 4, 5), xp=xp)
if xp is cupy:
return self._get_gufunc_scalar('(i)->()')(x, axis=axis)
else:
return x.sum(axis=axis)
def test_axis_invalid(self):
x = testing.shaped_arange((2, 3, 4, 5))
with pytest.raises(ValueError):
self._get_gufunc('(i, j)->(i, j)')(x, axis=((0, 1), (0, 1)))
@pytest.mark.parametrize('supports_batched', [True, False])
def test_supports_batched(self, supports_batched):
x = testing.shaped_arange((2, 3, 4, 5))
def func(x):
nonlocal supports_batched
if supports_batched:
assert x.ndim == 4
else:
assert x.ndim == 2
return x
gu_func = _GUFunc(func, '(i,j)->(i,j)',
supports_batched=supports_batched)
gu_func(x)
class TestGUFuncOut:
def _get_gufunc(self):
def func(x):
return x
return _GUFunc(func, '(i,j)->(i,j)')
def test_out_array(self):
x = testing.shaped_arange((2, 3, 4, 5))
out = cupy.empty((2, 3, 4, 5))
self._get_gufunc()(x, out=out)
testing.assert_allclose(x, out)
def test_supports_out(self):
x = testing.shaped_arange((2, 3, 4, 5))
out = cupy.empty((2, 3, 4, 5))
out_ptr = out.data.ptr
def func(x, out=None):
nonlocal out_ptr
# Base is a view of the output due to the batching
assert out.base.data.ptr == out_ptr
out[:] = x
gu_func = _GUFunc(func, '(i,j)->(i,j)', supports_out=True)
gu_func(x, out=out)
testing.assert_allclose(x, out)
def test_invalid_output_shape(self):
x = testing.shaped_arange((2, 3, 4, 5))
out = cupy.empty((3, 3, 4, 5))
with pytest.raises(ValueError):
self._get_gufunc()(x, out=out)
def test_invalid_output_dtype(self):
x = testing.shaped_arange((2, 3, 4, 5))
out = cupy.empty((2, 3, 4, 5), dtype='h')
with pytest.raises(TypeError):
self._get_gufunc()(x, out=out)
class TestGUFuncDtype:
@testing.for_all_dtypes(name='dtype_i', no_bool=True, no_complex=True)
@testing.for_all_dtypes(name='dtype_o', no_bool=True, no_complex=True)
def test_dtypes(self, dtype_i, dtype_o):
x = testing.shaped_arange((2, 3, 4, 5), dtype=dtype_i)
if numpy.can_cast(dtype_o, x.dtype):
def func(x):
return x
gufunc = _GUFunc(func, '(i,j)->(i,j)')
z = gufunc(x, dtype=dtype_o)
assert z.dtype == dtype_o
testing.assert_allclose(z, x)
class TestGUFuncOrder():
@pytest.mark.parametrize('order', ['C', 'F', 'K'])
@testing.numpy_cupy_array_equal(strides_check=True)
def test_order(self, xp, order):
x = testing.shaped_arange((2, 3, 4), xp=xp)
if xp is cupy:
def default(x):
return x
gu_func = _GUFunc(default, '(i, j, k)->(i, j, k)')
return gu_func(x, order=order)
else:
return xp.asarray(x, order=order)
@pytest.mark.parametrize('order', [('F', 'C', 'C'), ('F', 'F', 'F')])
def test_order_a(self, order):
x = testing.shaped_arange((2, 3, 4), order=order[0])
y = testing.shaped_arange((2, 3, 4), order=order[1])
def default(x, y):
return x
gu_func = _GUFunc(default, '(i,j,k),(i,j,k)->(i,j,k)')
z = gu_func(x, y, order='A')
if order[2] == 'C':
assert z.flags.c_contiguous
else:
assert z.flags.f_contiguous
class TestGUFuncSignatures():
def test_signatures(self):
dtypes = 'fdihq'
dtypes_access = {d: None for d in dtypes}
def integers(x, y):
nonlocal dtypes_access
dtypes_access[numpy.dtype(x.dtype).char] = integers
return x + y
def floats(x, y):
nonlocal dtypes_access
dtypes_access[numpy.dtype(x.dtype).char] = floats
return x + y
def default(x, y):
nonlocal dtypes_access
dtypes_access[numpy.dtype(x.dtype).char] = default
return x + y
sigs = (('ii->i', integers), ('dd->d', floats))
gu_func = _GUFunc(default, '(i),(i)->(i)', signatures=sigs)
for dtype in dtypes:
x = cupy.array([10], dtype=dtype)
y = x
gu_func(x, y, casting='no')
if dtype in 'i':
assert dtypes_access[dtype] == integers
elif dtype in 'd':
assert dtypes_access[dtype] == floats
else:
assert dtypes_access[dtype] == default
@pytest.mark.parametrize('sig,', ['ii->i', 'i', ('i', 'i', 'i')])
def test_signature_lookup(self, sig):
called = False
def func(x, y):
nonlocal called
called = True
return x + y
def default(x, y):
return x + y
dtypes = 'fdhq'
sigs = (('ii->i', func),)
gu_func = _GUFunc(default, '(i),(i)->(i)', signatures=sigs)
for dtype in dtypes:
x = cupy.array([10], dtype=dtype)
y = x
gu_func(x, y, casting='no')
assert not called
x = cupy.array([10], dtype='d')
y = x
z = gu_func(x, y, casting='unsafe', signature=sig)
assert z.dtype == numpy.int32
assert called
@pytest.mark.parametrize('sigs,', [('i',), ('',), ('iii->i',), ('ii->',)])
def test_invalid_signatures(self, sigs):
def default(x, y):
return x + y
with pytest.raises(ValueError):
_GUFunc(default, '(i),(i)->(i)', signatures=sigs)
@pytest.mark.parametrize('sig,', ['i->i', 'id->i', ''])
def test_invalid_lookup(self, sig):
def default(x, y):
return x + y
sigs = (('ii->i', default),)
gu_func = _GUFunc(default, '(i),(i)->(i)', signatures=sigs)
_GUFunc(default, '(i),(i)->(i)', signatures=sigs)
x = cupy.array([10], dtype='d')
y = x
with pytest.raises(TypeError):
gu_func(x, y, casting='unsafe', signature=sig)
| [
"cupy.testing.for_all_dtypes",
"cupy.testing.shaped_arange",
"numpy.moveaxis",
"cupy._core._gufuncs._GUFunc",
"cupy.empty",
"numpy.dtype",
"cupy.array",
"numpy.can_cast",
"pytest.raises",
"cupy._core._gufuncs._parse_gufunc_signature",
"pytest.mark.parametrize",
"cupy.testing.numpy_cupy_array_e... | [((140, 347), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""signature"""', "[('(i,j)->(i,j)', [('i', 'j')], [('i', 'j')]), ('->(i)', [()], [('i',)]), (\n '(i,j),(j,k)->(k,l)', [('i', 'j'), ('j', 'k')], [('k', 'l')]), (\n '()->()', [()], [()])]"], {}), "('signature', [('(i,j)->(i,j)', [('i', 'j')], [('i',\n 'j')]), ('->(i)', [()], [('i',)]), ('(i,j),(j,k)->(k,l)', [('i', 'j'),\n ('j', 'k')], [('k', 'l')]), ('()->()', [()], [()])])\n", (163, 347), False, 'import pytest\n'), ((567, 684), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""signature"""', "['(i,j)(i,j)', '(i,j)-(i,j)', '(i,j)(i,j)->(i,j)', 'j->(i', '', '()->()->']"], {}), "('signature', ['(i,j)(i,j)', '(i,j)-(i,j)',\n '(i,j)(i,j)->(i,j)', 'j->(i', '', '()->()->'])\n", (590, 684), False, 'import pytest\n'), ((1178, 1433), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axes"""', '[((-1, -2), (-1, -2)), ((0, 1), (0, 1)), ((0, 1), (-1, -2)), ((1, 2), (-1, \n -2)), ((1, 2), (1, 2)), ((1, 2), (2, 3)), ((2, 3), (-1, -2)), ((2, 3),\n (0, 1)), ((2, 3), (1, 2)), ((0, 3), (1, 2)), ((0, 3), (2, 0))]'], {}), "('axes', [((-1, -2), (-1, -2)), ((0, 1), (0, 1)), ((\n 0, 1), (-1, -2)), ((1, 2), (-1, -2)), ((1, 2), (1, 2)), ((1, 2), (2, 3)\n ), ((2, 3), (-1, -2)), ((2, 3), (0, 1)), ((2, 3), (1, 2)), ((0, 3), (1,\n 2)), ((0, 3), (2, 0))])\n", (1201, 1433), False, 'import pytest\n'), ((1520, 1552), 'cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (1550, 1552), False, 'from cupy import testing\n'), ((1823, 1950), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axes"""', '[(-1, -2), (0, 1), (1, 2), (2, 3), (0, 2), (0, 3), (1, 3), (3, 0), (2, 0),\n (2, 1), (1, 0)]'], {}), "('axes', [(-1, -2), (0, 1), (1, 2), (2, 3), (0, 2),\n (0, 3), (1, 3), (3, 0), (2, 0), (2, 1), (1, 0)])\n", (1846, 1950), False, 'import pytest\n'), ((2047, 2079), 'cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (2077, 2079), False, 'from cupy import testing\n'), ((2353, 2398), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axis"""', '[0, 1, 2, 3]'], {}), "('axis', [0, 1, 2, 3])\n", (2376, 2398), False, 'import pytest\n'), ((2404, 2436), 'cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (2434, 2436), False, 'from cupy import testing\n'), ((2869, 2927), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""supports_batched"""', '[True, False]'], {}), "('supports_batched', [True, False])\n", (2892, 2927), False, 'import pytest\n'), ((4641, 4710), 'cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'name': '"""dtype_i"""', 'no_bool': '(True)', 'no_complex': '(True)'}), "(name='dtype_i', no_bool=True, no_complex=True)\n", (4663, 4710), False, 'from cupy import testing\n'), ((4716, 4785), 'cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'name': '"""dtype_o"""', 'no_bool': '(True)', 'no_complex': '(True)'}), "(name='dtype_o', no_bool=True, no_complex=True)\n", (4738, 4785), False, 'from cupy import testing\n'), ((5194, 5243), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', "['C', 'F', 'K']"], {}), "('order', ['C', 'F', 'K'])\n", (5217, 5243), False, 'import pytest\n'), ((5249, 5299), 'cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {'strides_check': '(True)'}), '(strides_check=True)\n', (5279, 5299), False, 'from cupy import testing\n'), ((5637, 5705), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', "[('F', 'C', 'C'), ('F', 'F', 'F')]"], {}), "('order', [('F', 'C', 'C'), ('F', 'F', 'F')])\n", (5660, 5705), False, 'import pytest\n'), ((7233, 7297), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sig,"""', "['ii->i', 'i', ('i', 'i', 'i')]"], {}), "('sig,', ['ii->i', 'i', ('i', 'i', 'i')])\n", (7256, 7297), False, 'import pytest\n'), ((7991, 8064), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigs,"""', "[('i',), ('',), ('iii->i',), ('ii->',)]"], {}), "('sigs,', [('i',), ('',), ('iii->i',), ('ii->',)])\n", (8014, 8064), False, 'import pytest\n'), ((8272, 8326), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sig,"""', "['i->i', 'id->i', '']"], {}), "('sig,', ['i->i', 'id->i', ''])\n", (8295, 8326), False, 'import pytest\n'), ((437, 494), 'cupy._core._gufuncs._parse_gufunc_signature', 'cupy._core._gufuncs._parse_gufunc_signature', (['signature[0]'], {}), '(signature[0])\n', (480, 494), False, 'import cupy\n'), ((1013, 1037), 'cupy._core._gufuncs._GUFunc', '_GUFunc', (['func', 'signature'], {}), '(func, signature)\n', (1020, 1037), False, 'from cupy._core._gufuncs import _GUFunc\n'), ((1147, 1171), 'cupy._core._gufuncs._GUFunc', '_GUFunc', (['func', 'signature'], {}), '(func, signature)\n', (1154, 1171), False, 'from cupy._core._gufuncs import _GUFunc\n'), ((1610, 1652), 'cupy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4, 5)'], {'xp': 'xp'}), '((2, 3, 4, 5), xp=xp)\n', (1631, 1652), False, 'from cupy import testing\n'), ((2144, 2186), 'cupy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4, 5)'], {'xp': 'xp'}), '((2, 3, 4, 5), xp=xp)\n', (2165, 2186), False, 'from cupy import testing\n'), ((2484, 2526), 'cupy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4, 5)'], {'xp': 'xp'}), '((2, 3, 4, 5), xp=xp)\n', (2505, 2526), False, 'from cupy import testing\n'), ((2714, 2749), 'cupy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4, 5)'], {}), '((2, 3, 4, 5))\n', (2735, 2749), False, 'from cupy import testing\n'), ((2995, 3030), 'cupy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4, 5)'], {}), '((2, 3, 4, 5))\n', (3016, 3030), False, 'from cupy import testing\n'), ((3251, 3315), 'cupy._core._gufuncs._GUFunc', '_GUFunc', (['func', '"""(i,j)->(i,j)"""'], {'supports_batched': 'supports_batched'}), "(func, '(i,j)->(i,j)', supports_batched=supports_batched)\n", (3258, 3315), False, 'from cupy._core._gufuncs import _GUFunc\n'), ((3468, 3497), 'cupy._core._gufuncs._GUFunc', '_GUFunc', (['func', '"""(i,j)->(i,j)"""'], {}), "(func, '(i,j)->(i,j)')\n", (3475, 3497), False, 'from cupy._core._gufuncs import _GUFunc\n'), ((3541, 3576), 'cupy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4, 5)'], {}), '((2, 3, 4, 5))\n', (3562, 3576), False, 'from cupy import testing\n'), ((3591, 3615), 'cupy.empty', 'cupy.empty', (['(2, 3, 4, 5)'], {}), '((2, 3, 4, 5))\n', (3601, 3615), False, 'import cupy\n'), ((3663, 3694), 'cupy.testing.assert_allclose', 'testing.assert_allclose', (['x', 'out'], {}), '(x, out)\n', (3686, 3694), False, 'from cupy import testing\n'), ((3741, 3776), 'cupy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4, 5)'], {}), '((2, 3, 4, 5))\n', (3762, 3776), False, 'from cupy import testing\n'), ((3791, 3815), 'cupy.empty', 'cupy.empty', (['(2, 3, 4, 5)'], {}), '((2, 3, 4, 5))\n', (3801, 3815), False, 'import cupy\n'), ((4060, 4108), 'cupy._core._gufuncs._GUFunc', '_GUFunc', (['func', '"""(i,j)->(i,j)"""'], {'supports_out': '(True)'}), "(func, '(i,j)->(i,j)', supports_out=True)\n", (4067, 4108), False, 'from cupy._core._gufuncs import _GUFunc\n'), ((4145, 4176), 'cupy.testing.assert_allclose', 'testing.assert_allclose', (['x', 'out'], {}), '(x, out)\n', (4168, 4176), False, 'from cupy import testing\n'), ((4231, 4266), 'cupy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4, 5)'], {}), '((2, 3, 4, 5))\n', (4252, 4266), False, 'from cupy import testing\n'), ((4281, 4305), 'cupy.empty', 'cupy.empty', (['(3, 3, 4, 5)'], {}), '((3, 3, 4, 5))\n', (4291, 4305), False, 'import cupy\n'), ((4443, 4478), 'cupy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4, 5)'], {}), '((2, 3, 4, 5))\n', (4464, 4478), False, 'from cupy import testing\n'), ((4493, 4528), 'cupy.empty', 'cupy.empty', (['(2, 3, 4, 5)'], {'dtype': '"""h"""'}), "((2, 3, 4, 5), dtype='h')\n", (4503, 4528), False, 'import cupy\n'), ((4843, 4893), 'cupy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4, 5)'], {'dtype': 'dtype_i'}), '((2, 3, 4, 5), dtype=dtype_i)\n', (4864, 4893), False, 'from cupy import testing\n'), ((4905, 4937), 'numpy.can_cast', 'numpy.can_cast', (['dtype_o', 'x.dtype'], {}), '(dtype_o, x.dtype)\n', (4919, 4937), False, 'import numpy\n'), ((5349, 5388), 'cupy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4)'], {'xp': 'xp'}), '((2, 3, 4), xp=xp)\n', (5370, 5388), False, 'from cupy import testing\n'), ((5753, 5801), 'cupy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4)'], {'order': 'order[0]'}), '((2, 3, 4), order=order[0])\n', (5774, 5801), False, 'from cupy import testing\n'), ((5814, 5862), 'cupy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4)'], {'order': 'order[1]'}), '((2, 3, 4), order=order[1])\n', (5835, 5862), False, 'from cupy import testing\n'), ((5931, 5975), 'cupy._core._gufuncs._GUFunc', '_GUFunc', (['default', '"""(i,j,k),(i,j,k)->(i,j,k)"""'], {}), "(default, '(i,j,k),(i,j,k)->(i,j,k)')\n", (5938, 5975), False, 'from cupy._core._gufuncs import _GUFunc\n'), ((6801, 6850), 'cupy._core._gufuncs._GUFunc', '_GUFunc', (['default', '"""(i),(i)->(i)"""'], {'signatures': 'sigs'}), "(default, '(i),(i)->(i)', signatures=sigs)\n", (6808, 6850), False, 'from cupy._core._gufuncs import _GUFunc\n'), ((7598, 7647), 'cupy._core._gufuncs._GUFunc', '_GUFunc', (['default', '"""(i),(i)->(i)"""'], {'signatures': 'sigs'}), "(default, '(i),(i)->(i)', signatures=sigs)\n", (7605, 7647), False, 'from cupy._core._gufuncs import _GUFunc\n'), ((7824, 7851), 'cupy.array', 'cupy.array', (['[10]'], {'dtype': '"""d"""'}), "([10], dtype='d')\n", (7834, 7851), False, 'import cupy\n'), ((8476, 8525), 'cupy._core._gufuncs._GUFunc', '_GUFunc', (['default', '"""(i),(i)->(i)"""'], {'signatures': 'sigs'}), "(default, '(i),(i)->(i)', signatures=sigs)\n", (8483, 8525), False, 'from cupy._core._gufuncs import _GUFunc\n'), ((8534, 8583), 'cupy._core._gufuncs._GUFunc', '_GUFunc', (['default', '"""(i),(i)->(i)"""'], {'signatures': 'sigs'}), "(default, '(i),(i)->(i)', signatures=sigs)\n", (8541, 8583), False, 'from cupy._core._gufuncs import _GUFunc\n'), ((8597, 8624), 'cupy.array', 'cupy.array', (['[10]'], {'dtype': '"""d"""'}), "([10], dtype='d')\n", (8607, 8624), False, 'import cupy\n'), ((800, 825), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (813, 825), False, 'import pytest\n'), ((839, 893), 'cupy._core._gufuncs._parse_gufunc_signature', 'cupy._core._gufuncs._parse_gufunc_signature', (['signature'], {}), '(signature)\n', (882, 893), False, 'import cupy\n'), ((1781, 1816), 'numpy.moveaxis', 'numpy.moveaxis', (['x', 'axes[0]', 'axes[1]'], {}), '(x, axes[0], axes[1])\n', (1795, 1816), False, 'import numpy\n'), ((2311, 2346), 'numpy.moveaxis', 'numpy.moveaxis', (['x', 'axes[0]', 'axes[1]'], {}), '(x, axes[0], axes[1])\n', (2325, 2346), False, 'import numpy\n'), ((2763, 2788), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2776, 2788), False, 'import pytest\n'), ((4319, 4344), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4332, 4344), False, 'import pytest\n'), ((4542, 4566), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4555, 4566), False, 'import pytest\n'), ((5010, 5039), 'cupy._core._gufuncs._GUFunc', '_GUFunc', (['func', '"""(i,j)->(i,j)"""'], {}), "(func, '(i,j)->(i,j)')\n", (5017, 5039), False, 'from cupy._core._gufuncs import _GUFunc\n'), ((5131, 5160), 'cupy.testing.assert_allclose', 'testing.assert_allclose', (['z', 'x'], {}), '(z, x)\n', (5154, 5160), False, 'from cupy import testing\n'), ((5487, 5527), 'cupy._core._gufuncs._GUFunc', '_GUFunc', (['default', '"""(i, j, k)->(i, j, k)"""'], {}), "(default, '(i, j, k)->(i, j, k)')\n", (5494, 5527), False, 'from cupy._core._gufuncs import _GUFunc\n'), ((6896, 6925), 'cupy.array', 'cupy.array', (['[10]'], {'dtype': 'dtype'}), '([10], dtype=dtype)\n', (6906, 6925), False, 'import cupy\n'), ((7693, 7722), 'cupy.array', 'cupy.array', (['[10]'], {'dtype': 'dtype'}), '([10], dtype=dtype)\n', (7703, 7722), False, 'import cupy\n'), ((8177, 8202), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8190, 8202), False, 'import pytest\n'), ((8216, 8265), 'cupy._core._gufuncs._GUFunc', '_GUFunc', (['default', '"""(i),(i)->(i)"""'], {'signatures': 'sigs'}), "(default, '(i),(i)->(i)', signatures=sigs)\n", (8223, 8265), False, 'from cupy._core._gufuncs import _GUFunc\n'), ((8652, 8676), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (8665, 8676), False, 'import pytest\n'), ((6363, 6383), 'numpy.dtype', 'numpy.dtype', (['x.dtype'], {}), '(x.dtype)\n', (6374, 6383), False, 'import numpy\n'), ((6514, 6534), 'numpy.dtype', 'numpy.dtype', (['x.dtype'], {}), '(x.dtype)\n', (6525, 6534), False, 'import numpy\n'), ((6664, 6684), 'numpy.dtype', 'numpy.dtype', (['x.dtype'], {}), '(x.dtype)\n', (6675, 6684), False, 'import numpy\n')] |
import gym
from gym import spaces
from gym.utils import seeding
import pandas as pd
import numpy as np
from enum import Enum
import matplotlib.pyplot as plt
import csv
import gym_anytrading.datasets.b3 as b3
class TradingEnv(gym.Env):
def __init__(self):
self.n_stocks = 10
self.W = 2
self.count = 0
self.count_episodes = -1
self.max_steps = 5
#self.action = [1/(self.n_stocks+1)]*(self.n_stocks+1)
self.state = None
csv_filename = '../../../gym_anytrading/datasets/data/B3_COTAHIST.csv'
#csv_filename = 'gym_anytrading/datasets/data/B3_COTAHIST.csv'
self.df = pd.read_csv(csv_filename, parse_dates=True, index_col='Date')
#print(self.df.head())
## spaces
self.action_space = spaces.Box(low=0, high=1.0, shape=(self.n_stocks+1,), dtype=np.float32)
self.observation_space = spaces.Box(low=0.0, high=10.0, shape=((self.W+1)*(self.n_stocks+1), ), dtype=np.float32)
self.beta = 1
def seed(self, seed=None):
pass
def reset(self):
self.count = 0
self.count_episodes += 1
return self.receive_state().flatten()
#self._done = False
#self._current_tick = self._start_tick
#self._last_trade_tick = self._current_tick - 1
#self._position = Positions.Short
#self._position_history = (self.window_size * [None]) + [self._position]
#self._total_reward = 0.
#self._total_profit = 1. # unit
#self._first_rendering = True
#self.history = {}
#return self._get_observation()
#pass
def normalizeAction(self, action):
new_action = []
action = np.array(action)
for i in action: #range(len(action)):
new_action.append(i/action.sum())
#print(new_action, np.array(new_action).sum())
return new_action
def receive_state(self):
state = []
#print("AQUI.......")
for j in range(self.W, -1, -1):
start_point =self.n_stocks*self.W + self.count_episodes*self.max_steps*self.n_stocks + (self.count-j)*self.n_stocks
df_new = self.df.iloc[start_point:start_point+10]
df_new = df_new.iloc[:,[1,4]]
#print(self.count, df_new)
obs = [1]
for i in range(self.n_stocks):
#print(line)
obs.append(df_new.iloc[i, 1]/df_new.iloc[i, 0])
#print(obs)
state.append(np.array(obs))
#print(np.array(state))
return np.array(state)
#start_point = self.count_episodes*self.max_steps*self.n_stocks + self.count*self.n_stocks
#df_new = self.df.iloc[start_point:start_point+10]
#df_new = df_new.iloc[:,[1,4]]
#print(self.count, df_new)
#obs = [1]
#for i in range(self.n_stocks):
# #print(line)
# obs.append(df_new.iloc[i, 1]/df_new.iloc[i, 0])
#print(obs)
#state.append(obs)
#self.holdings = self.holdings -
#new_action = normalizeAction(action)
return []
def calculate_reward(self, action):
#self.state = self.observation_space.sample()
#print(self.state)
reward = self.beta*np.dot(self.state[-1], action)
done = False
if(self.count>=self.max_steps):
done = True
#print("REWARD ", reward)
return reward, done
#valueOfHolding = data["Close"]
#self.portifolio = valueOfHolding*self.holdings
def step(self, action):
action = self.normalizeAction(action)
self.state = self.receive_state()
#print(state)
self.count +=1
reward, done = self.calculate_reward(action)
#self.history.insert(0, [self.count, state, reward])
#if(len(self.history)>3):
# self.history.pop(3)
#print(self.history[0][1])
#self._done = False
#self._current_tick += 1
#if self._current_tick == self._end_tick:
# self._done = True
#step_reward = self._calculate_reward(action)
#self._total_reward += step_reward
#self._update_profit(action)
#trade = False
#if ((action == Actions.Buy.value and self._position == Positions.Short) or
# (action == Actions.Sell.value and self._position == Positions.Long)):
# trade = True
#if trade:
# self._position = self._position.opposite()
# self._last_trade_tick = self._current_tick
#self._position_history.append(self._position)
#observation = self._get_observation()
#info = dict(
# total_reward = self._total_reward,
# total_profit = self._total_profit,
# position = self._position.value
#)
#self._update_history(info)
return self.state.flatten(), reward, done, []
def readData(self):
ficheiro = open('gym_anytrading/datasets/data/STOCKS_AMBEV.csv', 'r')
reader = csv.DictReader(ficheiro, delimiter = ',')
#print(reader)
#for linha in reader:
# print (linha["Close"])
return reader | [
"pandas.read_csv",
"csv.DictReader",
"numpy.array",
"gym.spaces.Box",
"numpy.dot"
] | [((647, 708), 'pandas.read_csv', 'pd.read_csv', (['csv_filename'], {'parse_dates': '(True)', 'index_col': '"""Date"""'}), "(csv_filename, parse_dates=True, index_col='Date')\n", (658, 708), True, 'import pandas as pd\n'), ((788, 861), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1.0)', 'shape': '(self.n_stocks + 1,)', 'dtype': 'np.float32'}), '(low=0, high=1.0, shape=(self.n_stocks + 1,), dtype=np.float32)\n', (798, 861), False, 'from gym import spaces\n'), ((893, 990), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0.0)', 'high': '(10.0)', 'shape': '((self.W + 1) * (self.n_stocks + 1),)', 'dtype': 'np.float32'}), '(low=0.0, high=10.0, shape=((self.W + 1) * (self.n_stocks + 1),),\n dtype=np.float32)\n', (903, 990), False, 'from gym import spaces\n'), ((1701, 1717), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (1709, 1717), True, 'import numpy as np\n'), ((2551, 2566), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (2559, 2566), True, 'import numpy as np\n'), ((5034, 5073), 'csv.DictReader', 'csv.DictReader', (['ficheiro'], {'delimiter': '""","""'}), "(ficheiro, delimiter=',')\n", (5048, 5073), False, 'import csv\n'), ((3252, 3282), 'numpy.dot', 'np.dot', (['self.state[-1]', 'action'], {}), '(self.state[-1], action)\n', (3258, 3282), True, 'import numpy as np\n'), ((2489, 2502), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (2497, 2502), True, 'import numpy as np\n')] |
import xml.etree.ElementTree as ET
import numpy as np
import collections
import sys
from mmcv.utils.progressbar import ProgressBar
def parse_xml(args):
xml_path, img_path, flag = args
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
for obj in root.findall('object'):
name = obj.find('name').text
difficult = int(obj.find('difficult').text) # 'difficult' always be zero because of original code
occlusion = int(obj.find('occlusion').text)
bnd_box = obj.find('bndbox')
bbox = [
int(float(bnd_box.find('xmin').text)),
int(float(bnd_box.find('ymin').text)),
int(float(bnd_box.find('xmax').text)),
int(float(bnd_box.find('ymax').text))
]
if name != 'person' or difficult > 0 or occlusion > 0 or (bbox[3] - bbox[1] + 1) < 50:
bboxes_ignore.append(bbox)
labels_ignore.append(0)
else:
bboxes.append(bbox)
labels.append(1)
if not bboxes:
if flag == 'train':
return None # images without pedestrian can be ignored during training
else:
bboxes = np.zeros((0, 4))
labels = np.zeros((0,))
else:
bboxes = np.array(bboxes, ndmin=2)
labels = np.array(labels)
if not bboxes_ignore:
bboxes_ignore = np.zeros((0, 4))
labels_ignore = np.zeros((0,))
else:
bboxes_ignore = np.array(bboxes_ignore, ndmin=2)
labels_ignore = np.array(labels_ignore)
annotation = {
'filename': img_path,
'width': w,
'height': h,
'flag': 0,
'ann': {
'bboxes': bboxes.astype(np.float32),
'labels': labels.astype(np.int64),
'bboxes_ignore': bboxes_ignore.astype(np.float32),
'labels_ignore': labels_ignore.astype(np.int64)
}
}
return annotation
"""
Author:<NAME>
Date:2019/03/08
Description:Prepare data for Faster RCNN which deals with cross-model
"""
def parse_xml_cross(args):
xml_path, img_path, flag, flag_model = args
annotation = parse_xml((xml_path, img_path, flag))
if annotation is None:
return None
annotation['flag'] = flag_model
return annotation
"""
Author:<NAME>
Date:2019/03/05
Description:Prepare data for auto-encoder
"""
def parse_xml_coder(args):
xml_path, img_path, flag_coder = args
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
annotation = {
'filename': img_path,
'width': w,
'height': h,
'flag': flag_coder,
'ann': {
'bboxes': None,
'labels': None,
'bboxes_ignore': None,
'labels_ignore': None
}
}
return annotation
def track_progress_yuan(func, tasks, bar_width=50, **kwargs):
"""Track the progress of tasks execution with a progress bar.
Tasks are done with a simple for-loop.
Args:
func (callable): The function to be applied to each task.
tasks (list or tuple[Iterable, int]): A list of tasks or
(tasks, total num).
bar_width (int): Width of progress bar.
Returns:
list: The task results.
"""
if isinstance(tasks, tuple):
assert len(tasks) == 2
assert isinstance(tasks[0], collections.Iterable)
assert isinstance(tasks[1], int)
task_num = tasks[1]
tasks = tasks[0]
elif isinstance(tasks, collections.Iterable):
task_num = len(tasks)
else:
raise TypeError(
'"tasks" must be an iterable object or a (iterator, int) tuple')
prog_bar = ProgressBar(task_num, bar_width)
results = []
for task in tasks:
temp = func(task, **kwargs)
if temp is not None:
results.append(temp)
prog_bar.update()
sys.stdout.write('\n')
return results
| [
"sys.stdout.write",
"xml.etree.ElementTree.parse",
"mmcv.utils.progressbar.ProgressBar",
"numpy.zeros",
"numpy.array"
] | [((201, 219), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_path'], {}), '(xml_path)\n', (209, 219), True, 'import xml.etree.ElementTree as ET\n'), ((2598, 2616), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_path'], {}), '(xml_path)\n', (2606, 2616), True, 'import xml.etree.ElementTree as ET\n'), ((3918, 3950), 'mmcv.utils.progressbar.ProgressBar', 'ProgressBar', (['task_num', 'bar_width'], {}), '(task_num, bar_width)\n', (3929, 3950), False, 'from mmcv.utils.progressbar import ProgressBar\n'), ((4123, 4145), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (4139, 4145), False, 'import sys\n'), ((1422, 1447), 'numpy.array', 'np.array', (['bboxes'], {'ndmin': '(2)'}), '(bboxes, ndmin=2)\n', (1430, 1447), True, 'import numpy as np\n'), ((1465, 1481), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1473, 1481), True, 'import numpy as np\n'), ((1532, 1548), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {}), '((0, 4))\n', (1540, 1548), True, 'import numpy as np\n'), ((1573, 1587), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (1581, 1587), True, 'import numpy as np\n'), ((1622, 1654), 'numpy.array', 'np.array', (['bboxes_ignore'], {'ndmin': '(2)'}), '(bboxes_ignore, ndmin=2)\n', (1630, 1654), True, 'import numpy as np\n'), ((1679, 1702), 'numpy.array', 'np.array', (['labels_ignore'], {}), '(labels_ignore)\n', (1687, 1702), True, 'import numpy as np\n'), ((1342, 1358), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {}), '((0, 4))\n', (1350, 1358), True, 'import numpy as np\n'), ((1380, 1394), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (1388, 1394), True, 'import numpy as np\n')] |
import numpy as np
from chainer0.function import Function
class Sin(Function):
def forward(self, x):
return np.sin(x)
def backward(self, gy):
x = self.inputs[0]
gx = cos(x) * gy
return gx
class Cos(Function):
def forward(self, x):
return np.cos(x)
def backward(self, gy):
x = self.inputs[0]
gx = -sin(x) * gy
return gx
def sin(x):
"""Elementwise sin function."""
f = Sin()
return f(x)
def cos(x):
"""Elementwise sin function."""
f = Cos()
return f(x)
| [
"numpy.sin",
"numpy.cos"
] | [((123, 132), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (129, 132), True, 'import numpy as np\n'), ((296, 305), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (302, 305), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import numpy.testing as npt
from wilson import wcxf
import wilson
from wilson.run.smeft.smpar import p
import ckmutil
from math import pi, log
from wilson.util.wetutil import C_symm_keys
import wilson.match.smeft_loop
import wilson.match.smeft_tree
np.random.seed(39)
# generate a random WC instance for the SMEFT Warsaw basis
C_Warsaw_random = {}
basis = wcxf.Basis['SMEFT', 'Warsaw']
for sector, wcs in basis.sectors.items():
for name, d in wcs.items():
C_Warsaw_random[name] = 1e-6*np.random.rand()
if 'real' not in d or d['real'] == False:
C_Warsaw_random[name] += 1j*1e-6*np.random.rand()
class TestMatch(unittest.TestCase):
def test_match_qq3_1122(self):
# tests matching of Q_qq^(3) of Warsaw onto O_ud^V8,LL in JMS basis
from_wc = wcxf.WC(values = {'qq3_1122': 2e-6} ,
scale = 1e3 , eft = 'SMEFT' , basis = 'Warsaw up')
to_wc = from_wc.match('WET', 'JMS')
V = ckmutil.ckm.ckm_tree(p["Vus"], p["Vub"], p["Vcb"], p["delta"])
self.assertAlmostEqual(to_wc['V8udLL_1221']/V[0,0].conjugate()
/V[1,1].conjugate(),8e-6)
def test_match_qq3_1322(self):
# tests matching of Q_qq^(3) of Warsaw onto O_ud^V8,LL in JMS basis
from_wc = wcxf.WC(values = {'qq3_1322': 3e-6} ,
scale = 1e3 , eft = 'SMEFT' , basis = 'Warsaw up')
to_wc = from_wc.match('WET', 'JMS')
V = ckmutil.ckm.ckm_tree(p["Vus"], p["Vub"], p["Vcb"], p["delta"])
self.assertAlmostEqual(to_wc['V8udLL_1223']/V[2,2].conjugate()
/V[1,1].conjugate(),12e-6)
def test_match_ll_1212(self):
# tests matching of Q_ll of Warsaw onto O_nue^V,LL in JMS basis
from_wc = wcxf.WC(values = {'ll_1212': 2e-6} ,
scale = 1e3 , eft = 'SMEFT' , basis = 'Warsaw up')
to_wc = from_wc.match('WET', 'JMS')
self.assertAlmostEqual(to_wc['VnueLL_1212'],4e-6)
def test_match_ll_1312(self):
# tests matching of Q_ll of Warsaw onto O_nue^V,LL in JMS basis
from_wc = wcxf.WC(values = {'ll_1213': 20e-6} ,
scale = 1e3 , eft = 'SMEFT' , basis = 'Warsaw up')
to_wc = from_wc.match('WET', 'JMS')
self.assertAlmostEqual(to_wc['VnueLL_1312'],20e-6)
def test_match_lq1_1233(self):
# tests matching of Q_lq^1 of Warsaw onto O_ed^V,LL in JMS basis
from_wc = wcxf.WC(values = {'lq1_1233': 12e-6} ,
scale = 1e3 , eft = 'SMEFT' , basis = 'Warsaw up')
to_wc = from_wc.match('WET', 'JMS')
self.assertAlmostEqual(to_wc['VedLL_1233'],12e-6)
def test_match_ee_1233(self):
# tests matching of Q_ee of Warsaw onto O_ee^V,RR in JMS basis
from_wc = wcxf.WC(values = {'ee_1233': 100e-6} ,
scale = 1e3 , eft = 'SMEFT' , basis = 'Warsaw up')
to_wc = from_wc.match('WET', 'JMS')
self.assertAlmostEqual(to_wc['VeeRR_1233'],100e-6)
def test_match_uu_1112(self):
# tests matching of Q_uu of Warsaw onto O_uu^V,RR in JMS basis
from_wc = wcxf.WC(values = {'uu_1112': 5e-6} ,
scale = 1e3 , eft = 'SMEFT' , basis = 'Warsaw up')
to_wc = from_wc.match('WET', 'JMS')
self.assertAlmostEqual(to_wc['VuuRR_1112'],5e-6)
def test_match_dd_1223(self):
# tests matching of Q_dd of Warsaw onto O_dd^V,RR in JMS basis
from_wc = wcxf.WC(values = {'dd_1223': 51e-6} ,
scale = 1e3 , eft = 'SMEFT' , basis = 'Warsaw up')
to_wc = from_wc.match('WET', 'JMS')
self.assertAlmostEqual(to_wc['VddRR_1223'],51e-6)
class TestRun(unittest.TestCase):
def test_run_lq3_3333(self):
w = wilson.Wilson({'lq3_2333': 1e-6}, 1000, 'SMEFT', 'Warsaw')
# determine g at input scale
g = wilson.run.smeft.SMEFT(w.wc).C_in['g']
# run down
wc = w.match_run(100, 'SMEFT', 'Warsaw')
# compare LL to expected value
sf = 2 # symmetry factor since our 2333 is 2* larger
self.assertAlmostEqual(wc['ll_2333'],
sf * 1e-6 / (16 * pi**2) * (-g**2) * log(100 / 1000))
class TestMatchingSymmetryFactors(unittest.TestCase):
def test_match_symmfac(self):
"""Test that the WET WCs coming out of the matching fulfill
the correct symmetry relations (namely, have the same symmetries
as the operators)."""
C_SMEFT = wilson.util.smeftutil.wcxf2arrays_symmetrized(C_Warsaw_random)
C = wilson.match.smeft_tree.match_all_array(C_SMEFT, p)
for k in C:
if k in C_symm_keys[41] + C_symm_keys[4] + C_symm_keys[6]:
a = np.einsum('klij', C[k]) # C_ijkl = C_klij
npt.assert_array_almost_equal(C[k], a, err_msg="Failed for {}".format(k),
decimal=20)
if k in C_symm_keys[5] + C_symm_keys[4] + C_symm_keys[6]:
a = np.einsum('jilk', C[k]).conj() # C_ijkl = C_jilk*
npt.assert_array_almost_equal(C[k], a, err_msg="Failed for {}".format(k),
decimal=20)
if k in C_symm_keys[4] + C_symm_keys[6]:
a = np.einsum('lkji', C[k]).conj() # C_ijkl = C_lkji*
npt.assert_array_almost_equal(C[k], a, err_msg="Failed for {}".format(k),
decimal=20)
if k in C_symm_keys[6]:
a = np.einsum('ilkj', C[k]) # C_ijkl = C_ilkj
npt.assert_array_almost_equal(C[k], a, err_msg="Failed for {}".format(k),
decimal=20)
if k in C_symm_keys[9]:
a = -np.einsum('jikl', C[k]) # C_ijkl = -C_jikl
npt.assert_array_almost_equal(C[k], a, err_msg="Failed for {}".format(k),
decimal=20)
def test_match_symmfac_loop(self):
"""Test that the WET WCs coming out of the matching fulfill
the correct symmetry relations (namely, have the same symmetries
as the operators)."""
C_SMEFT = wilson.util.smeftutil.wcxf2arrays_symmetrized(C_Warsaw_random)
C = wilson.match.smeft_loop.match_all_array(C_SMEFT, p, scale=120)
for k in C:
if k in C_symm_keys[41] + C_symm_keys[4] + C_symm_keys[6]:
a = np.einsum('klij', C[k]) # C_ijkl = C_klij
npt.assert_array_almost_equal(np.array(C[k], complex), np.array(a, complex), err_msg="Failed for {}".format(k),
decimal=20)
if k in C_symm_keys[5] + C_symm_keys[4] + C_symm_keys[6]:
a = np.einsum('jilk', C[k]).conj() # C_ijkl = C_jilk*
npt.assert_array_almost_equal(np.array(C[k], complex), np.array(a, complex), err_msg="Failed for {}".format(k),
decimal=20)
if k in C_symm_keys[4] + C_symm_keys[6]:
a = np.einsum('lkji', C[k]).conj() # C_ijkl = C_lkji*
npt.assert_array_almost_equal(np.array(C[k], complex), np.array(a, complex), err_msg="Failed for {}".format(k),
decimal=20)
if k in C_symm_keys[6]:
a = np.einsum('ilkj', C[k]) # C_ijkl = C_ilkj
npt.assert_array_almost_equal(np.array(C[k], complex), np.array(a, complex), err_msg="Failed for {}".format(k),
decimal=20)
if k in C_symm_keys[9]:
a = -np.einsum('jikl', C[k]) # C_ijkl = -C_jikl
npt.assert_array_almost_equal(np.array(C[k], complex), np.array(a, complex), err_msg="Failed for {}".format(k),
decimal=20)
| [
"wilson.Wilson",
"ckmutil.ckm.ckm_tree",
"wilson.wcxf.WC",
"numpy.random.seed",
"wilson.match.smeft_tree.match_all_array",
"numpy.einsum",
"numpy.array",
"wilson.match.smeft_loop.match_all_array",
"numpy.random.rand",
"math.log",
"wilson.run.smeft.SMEFT",
"wilson.util.smeftutil.wcxf2arrays_sym... | [((286, 304), 'numpy.random.seed', 'np.random.seed', (['(39)'], {}), '(39)\n', (300, 304), True, 'import numpy as np\n'), ((835, 921), 'wilson.wcxf.WC', 'wcxf.WC', ([], {'values': "{'qq3_1122': 2e-06}", 'scale': '(1000.0)', 'eft': '"""SMEFT"""', 'basis': '"""Warsaw up"""'}), "(values={'qq3_1122': 2e-06}, scale=1000.0, eft='SMEFT', basis=\n 'Warsaw up')\n", (842, 921), False, 'from wilson import wcxf\n'), ((1000, 1062), 'ckmutil.ckm.ckm_tree', 'ckmutil.ckm.ckm_tree', (["p['Vus']", "p['Vub']", "p['Vcb']", "p['delta']"], {}), "(p['Vus'], p['Vub'], p['Vcb'], p['delta'])\n", (1020, 1062), False, 'import ckmutil\n'), ((1299, 1385), 'wilson.wcxf.WC', 'wcxf.WC', ([], {'values': "{'qq3_1322': 3e-06}", 'scale': '(1000.0)', 'eft': '"""SMEFT"""', 'basis': '"""Warsaw up"""'}), "(values={'qq3_1322': 3e-06}, scale=1000.0, eft='SMEFT', basis=\n 'Warsaw up')\n", (1306, 1385), False, 'from wilson import wcxf\n'), ((1464, 1526), 'ckmutil.ckm.ckm_tree', 'ckmutil.ckm.ckm_tree', (["p['Vus']", "p['Vub']", "p['Vcb']", "p['delta']"], {}), "(p['Vus'], p['Vub'], p['Vcb'], p['delta'])\n", (1484, 1526), False, 'import ckmutil\n'), ((1759, 1844), 'wilson.wcxf.WC', 'wcxf.WC', ([], {'values': "{'ll_1212': 2e-06}", 'scale': '(1000.0)', 'eft': '"""SMEFT"""', 'basis': '"""Warsaw up"""'}), "(values={'ll_1212': 2e-06}, scale=1000.0, eft='SMEFT', basis='Warsaw up'\n )\n", (1766, 1844), False, 'from wilson import wcxf\n'), ((2095, 2180), 'wilson.wcxf.WC', 'wcxf.WC', ([], {'values': "{'ll_1213': 2e-05}", 'scale': '(1000.0)', 'eft': '"""SMEFT"""', 'basis': '"""Warsaw up"""'}), "(values={'ll_1213': 2e-05}, scale=1000.0, eft='SMEFT', basis='Warsaw up'\n )\n", (2102, 2180), False, 'from wilson import wcxf\n'), ((2435, 2523), 'wilson.wcxf.WC', 'wcxf.WC', ([], {'values': "{'lq1_1233': 1.2e-05}", 'scale': '(1000.0)', 'eft': '"""SMEFT"""', 'basis': '"""Warsaw up"""'}), "(values={'lq1_1233': 1.2e-05}, scale=1000.0, eft='SMEFT', basis=\n 'Warsaw up')\n", (2442, 2523), False, 'from wilson import wcxf\n'), ((2772, 2858), 'wilson.wcxf.WC', 'wcxf.WC', ([], {'values': "{'ee_1233': 0.0001}", 'scale': '(1000.0)', 'eft': '"""SMEFT"""', 'basis': '"""Warsaw up"""'}), "(values={'ee_1233': 0.0001}, scale=1000.0, eft='SMEFT', basis=\n 'Warsaw up')\n", (2779, 2858), False, 'from wilson import wcxf\n'), ((3110, 3195), 'wilson.wcxf.WC', 'wcxf.WC', ([], {'values': "{'uu_1112': 5e-06}", 'scale': '(1000.0)', 'eft': '"""SMEFT"""', 'basis': '"""Warsaw up"""'}), "(values={'uu_1112': 5e-06}, scale=1000.0, eft='SMEFT', basis='Warsaw up'\n )\n", (3117, 3195), False, 'from wilson import wcxf\n'), ((3444, 3531), 'wilson.wcxf.WC', 'wcxf.WC', ([], {'values': "{'dd_1223': 5.1e-05}", 'scale': '(1000.0)', 'eft': '"""SMEFT"""', 'basis': '"""Warsaw up"""'}), "(values={'dd_1223': 5.1e-05}, scale=1000.0, eft='SMEFT', basis=\n 'Warsaw up')\n", (3451, 3531), False, 'from wilson import wcxf\n'), ((3736, 3795), 'wilson.Wilson', 'wilson.Wilson', (["{'lq3_2333': 1e-06}", '(1000)', '"""SMEFT"""', '"""Warsaw"""'], {}), "({'lq3_2333': 1e-06}, 1000, 'SMEFT', 'Warsaw')\n", (3749, 3795), False, 'import wilson\n'), ((4439, 4501), 'wilson.util.smeftutil.wcxf2arrays_symmetrized', 'wilson.util.smeftutil.wcxf2arrays_symmetrized', (['C_Warsaw_random'], {}), '(C_Warsaw_random)\n', (4484, 4501), False, 'import wilson\n'), ((4514, 4565), 'wilson.match.smeft_tree.match_all_array', 'wilson.match.smeft_tree.match_all_array', (['C_SMEFT', 'p'], {}), '(C_SMEFT, p)\n', (4553, 4565), False, 'import wilson\n'), ((6149, 6211), 'wilson.util.smeftutil.wcxf2arrays_symmetrized', 'wilson.util.smeftutil.wcxf2arrays_symmetrized', (['C_Warsaw_random'], {}), '(C_Warsaw_random)\n', (6194, 6211), False, 'import wilson\n'), ((6224, 6286), 'wilson.match.smeft_loop.match_all_array', 'wilson.match.smeft_loop.match_all_array', (['C_SMEFT', 'p'], {'scale': '(120)'}), '(C_SMEFT, p, scale=120)\n', (6263, 6286), False, 'import wilson\n'), ((536, 552), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (550, 552), True, 'import numpy as np\n'), ((650, 666), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (664, 666), True, 'import numpy as np\n'), ((3844, 3872), 'wilson.run.smeft.SMEFT', 'wilson.run.smeft.SMEFT', (['w.wc'], {}), '(w.wc)\n', (3866, 3872), False, 'import wilson\n'), ((4143, 4158), 'math.log', 'log', (['(100 / 1000)'], {}), '(100 / 1000)\n', (4146, 4158), False, 'from math import pi, log\n'), ((4677, 4700), 'numpy.einsum', 'np.einsum', (['"""klij"""', 'C[k]'], {}), "('klij', C[k])\n", (4686, 4700), True, 'import numpy as np\n'), ((5482, 5505), 'numpy.einsum', 'np.einsum', (['"""ilkj"""', 'C[k]'], {}), "('ilkj', C[k])\n", (5491, 5505), True, 'import numpy as np\n'), ((6398, 6421), 'numpy.einsum', 'np.einsum', (['"""klij"""', 'C[k]'], {}), "('klij', C[k])\n", (6407, 6421), True, 'import numpy as np\n'), ((7317, 7340), 'numpy.einsum', 'np.einsum', (['"""ilkj"""', 'C[k]'], {}), "('ilkj', C[k])\n", (7326, 7340), True, 'import numpy as np\n'), ((5729, 5752), 'numpy.einsum', 'np.einsum', (['"""jikl"""', 'C[k]'], {}), "('jikl', C[k])\n", (5738, 5752), True, 'import numpy as np\n'), ((6486, 6509), 'numpy.array', 'np.array', (['C[k]', 'complex'], {}), '(C[k], complex)\n', (6494, 6509), True, 'import numpy as np\n'), ((6511, 6531), 'numpy.array', 'np.array', (['a', 'complex'], {}), '(a, complex)\n', (6519, 6531), True, 'import numpy as np\n'), ((6812, 6835), 'numpy.array', 'np.array', (['C[k]', 'complex'], {}), '(C[k], complex)\n', (6820, 6835), True, 'import numpy as np\n'), ((6837, 6857), 'numpy.array', 'np.array', (['a', 'complex'], {}), '(a, complex)\n', (6845, 6857), True, 'import numpy as np\n'), ((7121, 7144), 'numpy.array', 'np.array', (['C[k]', 'complex'], {}), '(C[k], complex)\n', (7129, 7144), True, 'import numpy as np\n'), ((7146, 7166), 'numpy.array', 'np.array', (['a', 'complex'], {}), '(a, complex)\n', (7154, 7166), True, 'import numpy as np\n'), ((7405, 7428), 'numpy.array', 'np.array', (['C[k]', 'complex'], {}), '(C[k], complex)\n', (7413, 7428), True, 'import numpy as np\n'), ((7430, 7450), 'numpy.array', 'np.array', (['a', 'complex'], {}), '(a, complex)\n', (7438, 7450), True, 'import numpy as np\n'), ((7602, 7625), 'numpy.einsum', 'np.einsum', (['"""jikl"""', 'C[k]'], {}), "('jikl', C[k])\n", (7611, 7625), True, 'import numpy as np\n'), ((7691, 7714), 'numpy.array', 'np.array', (['C[k]', 'complex'], {}), '(C[k], complex)\n', (7699, 7714), True, 'import numpy as np\n'), ((7716, 7736), 'numpy.array', 'np.array', (['a', 'complex'], {}), '(a, complex)\n', (7724, 7736), True, 'import numpy as np\n'), ((4957, 4980), 'numpy.einsum', 'np.einsum', (['"""jilk"""', 'C[k]'], {}), "('jilk', C[k])\n", (4966, 4980), True, 'import numpy as np\n'), ((5228, 5251), 'numpy.einsum', 'np.einsum', (['"""lkji"""', 'C[k]'], {}), "('lkji', C[k])\n", (5237, 5251), True, 'import numpy as np\n'), ((6716, 6739), 'numpy.einsum', 'np.einsum', (['"""jilk"""', 'C[k]'], {}), "('jilk', C[k])\n", (6725, 6739), True, 'import numpy as np\n'), ((7025, 7048), 'numpy.einsum', 'np.einsum', (['"""lkji"""', 'C[k]'], {}), "('lkji', C[k])\n", (7034, 7048), True, 'import numpy as np\n')] |
from __future__ import print_function, division
import os
import torch
import pandas as pd
from skimage import io, transform
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset
from torchvision import transforms, utils
class ImageDataset(Dataset):
def _load_data(self, npy_file):
data = np.load(npy_file)
print('Shape: ', data.shape)
return data
def __init__(self, root, split='train+val', transform=None, target_transform=None):
self.root = root
if split == 'train+val':
img_npy = os.path.join(self.root, 'Train_img.npy')
lbl_npy = os.path.join(self.root, 'train_label.npy')
train_data = self._load_data(img_npy)
train_targets = self._load_data(lbl_npy)
img_npy = os.path.join(self.root, 'Val_img.npy')
lbl_npy = os.path.join(self.root, 'val_label.npy')
self.data = np.concatenate((train_data, self._load_data(img_npy)), axis=0)
self.targets = np.concatenate((train_targets, self._load_data(lbl_npy)), axis=0)
elif split == 'train':
img_npy = os.path.join(self.root, 'Train_img.npy')
lbl_npy = os.path.join(self.root, 'train_label.npy')
self.data = self._load_data(img_npy)
self.targets = self._load_data(lbl_npy)
elif split == 'val':
img_npy = os.path.join(self.root, 'Val_img.npy')
lbl_npy = os.path.join(self.root, 'val_label.npy')
self.data = self._load_data(img_npy)
self.targets = self._load_data(lbl_npy)
elif split == 'test':
img_npy = os.path.join(self.root, 'Test_img.npy')
lbl_npy = os.path.join(self.root, 'test_label.npy')
self.data = self._load_data(img_npy)
self.targets = self._load_data(lbl_npy)
# self.img_mean = [np.mean(self.data/255)]
# self.img_std = [np.std(self.data/255)]
# print("Mean: ", self.img_mean)
# print("Std: ", self.img_std)
# self.train_transform = self._train_data_transform()
# self.test_transform = self._test_data_transform()
self.transform = transform
self.target_transform = target_transform
self.data = self.data.transpose((0, 2, 3, 1)) # transpose to HWC
assert len(self.data) == len(self.targets)
def __len__(self):
return len(self.targets)
def __getitem__(self, idx):
img, target = self.data[idx], int(self.targets[idx])
img = Image.fromarray(np.uint8(np.squeeze(img)))
if self.transform is not None:
img = self.transform(img)
# if self.target_transform is not None:
# target = self.target_transform(target)
return img, target
def add_data(self, dataset):
self.data = np.concatenate((self.data, dataset.data), axis=0)
self.targets = np.concatenate((self.targets, dataset.targets), axis=0)
def _train_data_transform(self):
data_transform = transforms.Compose([
transforms.RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=0.1),
transforms.ToTensor(),
transforms.Normalize(self.img_mean, self.img_std),
])
# if args.cutout:
# data_transform.transforms.append(Cutout(args.cutout_length, args.cutout_prob))
return data_transform
def _test_data_transform(self):
data_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(self.img_mean, self.img_std),
])
# if args.cutout:
# data_transform.transforms.append(Cutout(args.cutout_length, args.cutout_prob))
return data_transform
if __name__ == '__main__':
base_path = '/home/grtzsohalf/Desktop/NVIDIA/image_data'
train_img_npy = os.path.join(base_path, 'Train_img.npy')
train_lbl_npy = os.path.join(base_path, 'train_label.npy')
test_img_npy = os.path.join(base_path, 'Test_img.npy')
test_lbl_npy = os.path.join(base_path, 'test_label.npy')
val_img_npy = os.path.join(base_path, 'Val_img.npy')
val_lbl_npy = os.path.join(base_path, 'val_label.npy')
train_img_dataset = ImageDataset(train_img_npy, train_lbl_npy)
# test_img_dataset = ImageDataset(test_img_npy, test_lbl_npy, train_img_dataset.img_mean, train_img_dataset.img_std)
# val_img_dataset = ImageDataset(val_img_npy, val_lbl_npy, train_img_dataset.img_mean, train_img_dataset.img_std)
fig = plt.figure()
print(train_img_dataset.data[0][0])
sample = train_img_dataset[200][0].reshape(96,160)
imgplot = plt.imshow(sample)
plt.show()
| [
"numpy.load",
"torchvision.transforms.RandomAffine",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.figure",
"numpy.squeeze",
"torchvision.transforms.Normalize",
"os.path.join",
"numpy.concatenate",
"torchvision.transforms.ToTensor"
] | [((3891, 3931), 'os.path.join', 'os.path.join', (['base_path', '"""Train_img.npy"""'], {}), "(base_path, 'Train_img.npy')\n", (3903, 3931), False, 'import os\n'), ((3952, 3994), 'os.path.join', 'os.path.join', (['base_path', '"""train_label.npy"""'], {}), "(base_path, 'train_label.npy')\n", (3964, 3994), False, 'import os\n'), ((4014, 4053), 'os.path.join', 'os.path.join', (['base_path', '"""Test_img.npy"""'], {}), "(base_path, 'Test_img.npy')\n", (4026, 4053), False, 'import os\n'), ((4073, 4114), 'os.path.join', 'os.path.join', (['base_path', '"""test_label.npy"""'], {}), "(base_path, 'test_label.npy')\n", (4085, 4114), False, 'import os\n'), ((4133, 4171), 'os.path.join', 'os.path.join', (['base_path', '"""Val_img.npy"""'], {}), "(base_path, 'Val_img.npy')\n", (4145, 4171), False, 'import os\n'), ((4190, 4230), 'os.path.join', 'os.path.join', (['base_path', '"""val_label.npy"""'], {}), "(base_path, 'val_label.npy')\n", (4202, 4230), False, 'import os\n'), ((4549, 4561), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4559, 4561), True, 'import matplotlib.pyplot as plt\n'), ((4672, 4690), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sample'], {}), '(sample)\n', (4682, 4690), True, 'import matplotlib.pyplot as plt\n'), ((4695, 4705), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4703, 4705), True, 'import matplotlib.pyplot as plt\n'), ((359, 376), 'numpy.load', 'np.load', (['npy_file'], {}), '(npy_file)\n', (366, 376), True, 'import numpy as np\n'), ((2877, 2926), 'numpy.concatenate', 'np.concatenate', (['(self.data, dataset.data)'], {'axis': '(0)'}), '((self.data, dataset.data), axis=0)\n', (2891, 2926), True, 'import numpy as np\n'), ((2950, 3005), 'numpy.concatenate', 'np.concatenate', (['(self.targets, dataset.targets)'], {'axis': '(0)'}), '((self.targets, dataset.targets), axis=0)\n', (2964, 3005), True, 'import numpy as np\n'), ((605, 645), 'os.path.join', 'os.path.join', (['self.root', '"""Train_img.npy"""'], {}), "(self.root, 'Train_img.npy')\n", (617, 645), False, 'import os\n'), ((668, 710), 'os.path.join', 'os.path.join', (['self.root', '"""train_label.npy"""'], {}), "(self.root, 'train_label.npy')\n", (680, 710), False, 'import os\n'), ((836, 874), 'os.path.join', 'os.path.join', (['self.root', '"""Val_img.npy"""'], {}), "(self.root, 'Val_img.npy')\n", (848, 874), False, 'import os\n'), ((897, 937), 'os.path.join', 'os.path.join', (['self.root', '"""val_label.npy"""'], {}), "(self.root, 'val_label.npy')\n", (909, 937), False, 'import os\n'), ((1171, 1211), 'os.path.join', 'os.path.join', (['self.root', '"""Train_img.npy"""'], {}), "(self.root, 'Train_img.npy')\n", (1183, 1211), False, 'import os\n'), ((1234, 1276), 'os.path.join', 'os.path.join', (['self.root', '"""train_label.npy"""'], {}), "(self.root, 'train_label.npy')\n", (1246, 1276), False, 'import os\n'), ((2601, 2616), 'numpy.squeeze', 'np.squeeze', (['img'], {}), '(img)\n', (2611, 2616), True, 'import numpy as np\n'), ((3102, 3192), 'torchvision.transforms.RandomAffine', 'transforms.RandomAffine', ([], {'degrees': '(15)', 'translate': '(0.1, 0.1)', 'scale': '(0.9, 1.1)', 'shear': '(0.1)'}), '(degrees=15, translate=(0.1, 0.1), scale=(0.9, 1.1),\n shear=0.1)\n', (3125, 3192), False, 'from torchvision import transforms, utils\n'), ((3202, 3223), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3221, 3223), False, 'from torchvision import transforms, utils\n'), ((3237, 3286), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['self.img_mean', 'self.img_std'], {}), '(self.img_mean, self.img_std)\n', (3257, 3286), False, 'from torchvision import transforms, utils\n'), ((3539, 3560), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3558, 3560), False, 'from torchvision import transforms, utils\n'), ((3574, 3623), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['self.img_mean', 'self.img_std'], {}), '(self.img_mean, self.img_std)\n', (3594, 3623), False, 'from torchvision import transforms, utils\n'), ((1429, 1467), 'os.path.join', 'os.path.join', (['self.root', '"""Val_img.npy"""'], {}), "(self.root, 'Val_img.npy')\n", (1441, 1467), False, 'import os\n'), ((1490, 1530), 'os.path.join', 'os.path.join', (['self.root', '"""val_label.npy"""'], {}), "(self.root, 'val_label.npy')\n", (1502, 1530), False, 'import os\n'), ((1684, 1723), 'os.path.join', 'os.path.join', (['self.root', '"""Test_img.npy"""'], {}), "(self.root, 'Test_img.npy')\n", (1696, 1723), False, 'import os\n'), ((1746, 1787), 'os.path.join', 'os.path.join', (['self.root', '"""test_label.npy"""'], {}), "(self.root, 'test_label.npy')\n", (1758, 1787), False, 'import os\n')] |
import copy
import math
import os
from os import listdir
from os.path import isfile, join, splitext
#import cv2
import numpy as np
import SimpleITK as sitk
import skimage.transform
from DataManager import DataManager
from tqdm import tqdm
from pathos.multiprocessing import ProcessingPool as Pool
class DataManagerNii(DataManager):
def __init__(self, srcFolder, resultsDir, parameters, probabilityMap=False):
self.num = 0
self.resacle_filter = sitk.RescaleIntensityImageFilter()
self.resacle_filter.SetOutputMaximum(1)
self.resacle_filter.SetOutputMinimum(0)
return super().__init__(srcFolder, resultsDir, parameters, probabilityMap=probabilityMap)
def loadImages(self):
self.sitkImages = dict()
for path in tqdm(self.fileList):
image_name = join(self.srcFolder, path, 'img.nii.gz')
self.sitkImages[path] = self.resacle_filter.Execute(
sitk.Cast(sitk.ReadImage(image_name), sitk.sitkFloat32)
)
def loadGT(self):
self.sitkGTs = dict()
for path in tqdm(self.gtList):
gt_name = join(self.srcFolder, path, 'label.nii.gz')
self.sitkGTs[path] = sitk.Cast(
sitk.ReadImage(gt_name), sitk.sitkFloat32
) if isfile(gt_name) else None
def loadData(self):
self.createImageFileList()
self.createGTFileList()
self.loadImages()
self.loadGT()
self.numpyImages = self.getNumpyImages()
self.numpyGTs = self.getNumpyGTs()
assert len(self.numpyImages) == len(self.numpyGTs)
self.padNumpyData()
self.num = len(self.numpyImages)
def getNumpyImages(self):
numpy_images = {
key: sitk.GetArrayFromImage(img).astype(dtype=np.float32).transpose([2, 1, 0])
for key, img in tqdm(self.sitkImages.items())
}
return numpy_images
def getNumpyGTs(self):
numpyGTs = {
key: (
sitk.GetArrayFromImage(img).astype(dtype=np.float32).transpose([2, 1, 0])
if img is not None else np.zeros(self.sitkImages[key].GetSize(), dtype=np.float32)
) for key, img in tqdm(self.sitkGTs.items())
}
return numpyGTs
def writeResultsFromNumpyLabel(self, result, key,original_image=False):
if self.probabilityMap:
result = result * 255
result = np.transpose(result, [2, 1, 0])
toWrite = sitk.GetImageFromArray(result)
if original_image:
toWrite = sitk.Cast(toWrite, sitk.sitkFloat32)
else:
toWrite = sitk.Cast(toWrite, sitk.sitkUInt8)
writer = sitk.ImageFileWriter()
filename, ext = splitext(key)
writer.SetFileName(join(self.resultsDir, filename + '_result.nii.gz'))
writer.Execute(toWrite)
def padNumpyData(self):
for key in self.numpyImages:
image = self.numpyImages[key]
gt = self.numpyGTs[key]
padding = [max(j - i, 0) for i, j in zip(image.shape, self.params['VolSize'])]
if any(padding):
padding_size = tuple((0, pad) for pad in padding)
self.numpyImages[key] = np.pad(image, padding_size, 'constant').astype(dtype=np.float32)
self.numpyGTs[key] = np.pad(gt, padding_size, 'constant').astype(dtype=np.float32)
class DataManagerNiiLazyLoad(DataManagerNii):
def loadData(self):
self.createImageFileList()
#self.createGTFileList()
def loadImgandLabel(self, f):
img = sitk.Cast(sitk.ReadImage(join(self.srcFolder, f, 'img.nii.gz')), sitk.sitkFloat32)
img = sitk.GetArrayFromImage(img).astype(dtype=np.float32)
img = np.transpose(img, [2, 1, 0])
label = np.zeros(img.shape)
return img, label
| [
"numpy.pad",
"tqdm.tqdm",
"SimpleITK.ImageFileWriter",
"SimpleITK.ReadImage",
"numpy.transpose",
"numpy.zeros",
"SimpleITK.GetArrayFromImage",
"os.path.isfile",
"os.path.splitext",
"SimpleITK.GetImageFromArray",
"SimpleITK.RescaleIntensityImageFilter",
"os.path.join",
"SimpleITK.Cast"
] | [((468, 502), 'SimpleITK.RescaleIntensityImageFilter', 'sitk.RescaleIntensityImageFilter', ([], {}), '()\n', (500, 502), True, 'import SimpleITK as sitk\n'), ((777, 796), 'tqdm.tqdm', 'tqdm', (['self.fileList'], {}), '(self.fileList)\n', (781, 796), False, 'from tqdm import tqdm\n'), ((1088, 1105), 'tqdm.tqdm', 'tqdm', (['self.gtList'], {}), '(self.gtList)\n', (1092, 1105), False, 'from tqdm import tqdm\n'), ((2429, 2460), 'numpy.transpose', 'np.transpose', (['result', '[2, 1, 0]'], {}), '(result, [2, 1, 0])\n', (2441, 2460), True, 'import numpy as np\n'), ((2479, 2509), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['result'], {}), '(result)\n', (2501, 2509), True, 'import SimpleITK as sitk\n'), ((2686, 2708), 'SimpleITK.ImageFileWriter', 'sitk.ImageFileWriter', ([], {}), '()\n', (2706, 2708), True, 'import SimpleITK as sitk\n'), ((2733, 2746), 'os.path.splitext', 'splitext', (['key'], {}), '(key)\n', (2741, 2746), False, 'from os.path import isfile, join, splitext\n'), ((3746, 3774), 'numpy.transpose', 'np.transpose', (['img', '[2, 1, 0]'], {}), '(img, [2, 1, 0])\n', (3758, 3774), True, 'import numpy as np\n'), ((3791, 3810), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (3799, 3810), True, 'import numpy as np\n'), ((823, 863), 'os.path.join', 'join', (['self.srcFolder', 'path', '"""img.nii.gz"""'], {}), "(self.srcFolder, path, 'img.nii.gz')\n", (827, 863), False, 'from os.path import isfile, join, splitext\n'), ((1129, 1171), 'os.path.join', 'join', (['self.srcFolder', 'path', '"""label.nii.gz"""'], {}), "(self.srcFolder, path, 'label.nii.gz')\n", (1133, 1171), False, 'from os.path import isfile, join, splitext\n'), ((2560, 2596), 'SimpleITK.Cast', 'sitk.Cast', (['toWrite', 'sitk.sitkFloat32'], {}), '(toWrite, sitk.sitkFloat32)\n', (2569, 2596), True, 'import SimpleITK as sitk\n'), ((2633, 2667), 'SimpleITK.Cast', 'sitk.Cast', (['toWrite', 'sitk.sitkUInt8'], {}), '(toWrite, sitk.sitkUInt8)\n', (2642, 2667), True, 'import SimpleITK as sitk\n'), ((2774, 2824), 'os.path.join', 'join', (['self.resultsDir', "(filename + '_result.nii.gz')"], {}), "(self.resultsDir, filename + '_result.nii.gz')\n", (2778, 2824), False, 'from os.path import isfile, join, splitext\n'), ((1291, 1306), 'os.path.isfile', 'isfile', (['gt_name'], {}), '(gt_name)\n', (1297, 1306), False, 'from os.path import isfile, join, splitext\n'), ((3607, 3644), 'os.path.join', 'join', (['self.srcFolder', 'f', '"""img.nii.gz"""'], {}), "(self.srcFolder, f, 'img.nii.gz')\n", (3611, 3644), False, 'from os.path import isfile, join, splitext\n'), ((3679, 3706), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['img'], {}), '(img)\n', (3701, 3706), True, 'import SimpleITK as sitk\n'), ((955, 981), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['image_name'], {}), '(image_name)\n', (969, 981), True, 'import SimpleITK as sitk\n'), ((1232, 1255), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['gt_name'], {}), '(gt_name)\n', (1246, 1255), True, 'import SimpleITK as sitk\n'), ((3229, 3268), 'numpy.pad', 'np.pad', (['image', 'padding_size', '"""constant"""'], {}), "(image, padding_size, 'constant')\n", (3235, 3268), True, 'import numpy as np\n'), ((3331, 3367), 'numpy.pad', 'np.pad', (['gt', 'padding_size', '"""constant"""'], {}), "(gt, padding_size, 'constant')\n", (3337, 3367), True, 'import numpy as np\n'), ((1750, 1777), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['img'], {}), '(img)\n', (1772, 1777), True, 'import SimpleITK as sitk\n'), ((2004, 2031), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['img'], {}), '(img)\n', (2026, 2031), True, 'import SimpleITK as sitk\n')] |
import json
import random
from collections import Counter
from typing import List, Union
import numpy as np
import pandas as pd
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint
from tqdm import tqdm
from .models import transformer_tabular
class DeepTabular:
def __init__(
self,
cat_cols=None,
num_cols=None,
n_targets=None,
num_layers=4,
dropout=0.01,
d_model=64,
):
self.num_layers = num_layers
self.dropout = dropout
self.d_model = d_model
self.model = None
self.mapping = None
self.frequency = None
self.cat_cols = cat_cols
self.num_cols = num_cols
self.n_targets = n_targets
def fit_mapping(self, df: pd.DataFrame):
self.frequency = dict()
for col in tqdm(self.cat_cols):
values = df[col].apply(lambda x: "%s_%s" % (col, str(x))).tolist()
count = Counter(values)
self.frequency.update(count)
self.mapping = {
k: i + 1 for i, k in enumerate(list(self.frequency.keys()) + self.num_cols)
}
def prepare_data(self, df, add_distractors=False):
data_x1 = []
data_x2 = []
for index, row in tqdm(df.iterrows(), total=df.shape[0]):
sample_x1 = []
sample_x2 = []
sample_x1 += ["%s_%s" % (col, str(row[col])) for col in self.cat_cols]
sample_x1 += [col for col in self.num_cols]
sample_x2 += [1 for _ in self.cat_cols]
sample_x2 += [row[col] for col in self.num_cols]
if add_distractors and len(self.cat_cols):
distractors_x1 = random.sample(list(self.mapping), len(self.cat_cols))
distractors_x2 = [1 if a in sample_x1 else 0 for a in distractors_x1]
sample_x1 += distractors_x1
sample_x2 += distractors_x2
sample_x1 = [self.mapping.get(x, 0) for x in sample_x1]
data_x1.append(sample_x1)
data_x2.append(sample_x2)
return data_x1, data_x2
@staticmethod
def build_callbacks(monitor, patience_early, patience_reduce, save_path):
callbacks = []
if save_path is not None:
checkpoint = ModelCheckpoint(
save_path,
monitor=monitor,
verbose=1,
save_best_only=True,
save_weights_only=True,
)
callbacks.append(checkpoint)
reduce = ReduceLROnPlateau(
monitor=monitor, patience=patience_reduce, min_lr=1e-7
)
callbacks.append(reduce)
early = EarlyStopping(monitor=monitor, patience=patience_early)
callbacks.append(early)
return callbacks
def build_model(self):
raise NotImplementedError
def save_config(self, path):
with open(path, "w") as f:
json.dump(
{
"mapping": self.mapping,
"cat_cols": self.cat_cols,
"num_cols": self.num_cols,
"n_targets": self.n_targets,
"num_layers": self.num_layers,
"dropout": self.dropout,
"frequency": self.frequency,
"d_model": self.d_model,
},
f,
indent=4,
)
def save_weigts(self, path):
self.model.save_weights(path)
def load_config(self, path):
with open(path, "r") as f:
config = json.load(f)
self.mapping = config["mapping"]
self.cat_cols = config["cat_cols"]
self.num_cols = config["num_cols"]
self.n_targets = (
config["n_targets"] if self.n_targets is None else self.n_targets
)
self.num_layers = config["num_layers"]
self.dropout = config["dropout"]
self.frequency = config["frequency"]
self.d_model = config["d_model"]
def load_weights(self, path, by_name=False):
self.build_model()
self.model.load_weights(path, by_name=by_name)
class DeepTabularClassifier(DeepTabular):
def __init__(
self,
cat_cols=None,
num_cols=None,
n_targets=None,
num_layers=4,
dropout=0.1,
d_model=64,
):
super().__init__(
cat_cols=cat_cols,
num_cols=num_cols,
n_targets=n_targets,
num_layers=num_layers,
dropout=dropout,
d_model=d_model,
)
def build_model(self):
model = transformer_tabular(
n_categories=len(self.mapping) + 1,
n_targets=self.n_targets,
num_layers=self.num_layers,
dropout=self.dropout,
d_model=self.d_model,
seq_len=(0 if self.cat_cols is None else len(self.cat_cols))
+ (0 if self.num_cols is None else len(self.num_cols)),
embeds_size=50,
)
self.model = model
def fit(
self,
df: pd.DataFrame,
target_col: str,
monitor: str = "val_acc",
patience_early: int = 15,
patience_reduce: int = 9,
save_path: Union[str, None] = "classifier.h5",
epochs=128,
batch_size=128,
):
if self.mapping is None:
self.fit_mapping(df)
if self.model is None:
self.build_model()
data_x1, data_x2 = self.prepare_data(df)
data_y = df[target_col].tolist()
try:
train_x1, val_x1, train_x2, val_x2, train_y, val_y = train_test_split(
data_x1,
data_x2,
data_y,
test_size=0.1,
random_state=1337,
stratify=data_y,
)
except ValueError:
train_x1, val_x1, train_x2, val_x2, train_y, val_y = train_test_split(
data_x1, data_x2, data_y, test_size=0.1, random_state=1337
)
train_x1 = np.array(train_x1)
val_x1 = np.array(val_x1)
train_x2 = np.array(train_x2)[..., np.newaxis]
val_x2 = np.array(val_x2)[..., np.newaxis]
train_y = np.array(train_y)[..., np.newaxis]
val_y = np.array(val_y)[..., np.newaxis]
callbacks = self.build_callbacks(
monitor, patience_early, patience_reduce, save_path
)
self.model.fit(
[train_x1, train_x2],
train_y,
validation_data=([val_x1, val_x2], val_y),
epochs=epochs,
callbacks=callbacks,
batch_size=batch_size,
)
def predict(self, test):
data_x1, data_x2 = self.prepare_data(test)
data_x1 = np.array(data_x1)
data_x2 = np.array(data_x2)[..., np.newaxis]
predict = self.model.predict([data_x1, data_x2])
if self.n_targets > 1:
pred_classes = np.argmax(predict.squeeze(), axis=-1).ravel()
else:
pred_classes = (predict.squeeze() > 0.5).ravel().astype(np.int)
return pred_classes
class DeepTabularRegressor(DeepTabular):
def __init__(
self,
cat_cols=None,
num_cols=None,
n_targets=None,
num_layers=4,
dropout=0.1,
d_model=64,
):
super().__init__(
cat_cols=cat_cols,
num_cols=num_cols,
n_targets=n_targets,
num_layers=num_layers,
dropout=dropout,
d_model=d_model,
)
def build_model(self):
model = transformer_tabular(
n_categories=len(self.mapping) + 1,
n_targets=self.n_targets,
num_layers=self.num_layers,
dropout=self.dropout,
d_model=self.d_model,
seq_len=(0 if self.cat_cols is None else len(self.cat_cols))
+ (0 if self.num_cols is None else len(self.num_cols)),
embeds_size=50,
task="regression",
)
self.model = model
def fit(
self,
df: pd.DataFrame,
target_cols: List[str],
monitor: str = "val_loss",
patience_early: int = 15,
patience_reduce: int = 9,
save_path: Union[str, None] = "regressor.h5",
epochs=128,
batch_size=128,
):
if self.mapping is None:
self.fit_mapping(df)
if self.model is None:
self.build_model()
data_x1, data_x2 = self.prepare_data(df)
data_y = df[target_cols].values
train_x1, val_x1, train_x2, val_x2, train_y, val_y = train_test_split(
data_x1, data_x2, data_y, test_size=0.1, random_state=1337
)
train_x1 = np.array(train_x1)
val_x1 = np.array(val_x1)
train_x2 = np.array(train_x2)[..., np.newaxis]
val_x2 = np.array(val_x2)[..., np.newaxis]
train_y = np.array(train_y)
val_y = np.array(val_y)
callbacks = self.build_callbacks(
monitor, patience_early, patience_reduce, save_path
)
self.model.fit(
[train_x1, train_x2],
train_y,
validation_data=([val_x1, val_x2], val_y),
epochs=epochs,
callbacks=callbacks,
batch_size=batch_size,
)
def predict(self, test):
data_x1, data_x2 = self.prepare_data(test)
data_x1 = np.array(data_x1)
data_x2 = np.array(data_x2)[..., np.newaxis]
predict = self.model.predict([data_x1, data_x2])
return predict
class DeepTabularUnsupervised(DeepTabular):
def __init__(
self,
cat_cols=None,
num_cols=None,
n_targets=None,
num_layers=4,
dropout=0.1,
d_model=64,
):
super().__init__(
cat_cols=cat_cols,
num_cols=num_cols,
n_targets=n_targets,
num_layers=num_layers,
dropout=dropout,
d_model=d_model,
)
def build_model(self):
model = transformer_tabular(
n_categories=len(self.mapping) + 1,
n_targets=self.n_targets,
num_layers=self.num_layers,
dropout=self.dropout,
d_model=self.d_model,
seq_len=2 * (0 if self.cat_cols is None else len(self.cat_cols))
+ (0 if self.num_cols is None else len(self.num_cols)),
embeds_size=50,
task="pretrain",
)
self.model = model
def fit(
self,
df: pd.DataFrame,
monitor: str = "loss",
patience_early: int = 15,
patience_reduce: int = 9,
save_path: Union[str, None] = "unsupervised.h5",
epochs=128,
batch_size=128,
):
if self.mapping is None:
self.fit_mapping(df)
if self.model is None:
self.build_model()
data_x1, data_x2 = self.prepare_data(df, add_distractors=True)
train_x1, val_x1, train_x2, val_x2 = train_test_split(
data_x1, data_x2, test_size=0.1, random_state=1337
)
train_x1 = np.array(train_x1)
val_x1 = np.array(val_x1)
train_x2 = np.array(train_x2)[..., np.newaxis]
val_x2 = np.array(val_x2)[..., np.newaxis]
callbacks = self.build_callbacks(
monitor, patience_early, patience_reduce, save_path=None
)
for _ in range(epochs // 10):
train_x2_mask = train_x2.copy()
val_x2_mask = val_x2.copy()
train_x2_mask[np.random.uniform(size=train_x2_mask.shape) > 0.5] = 0
val_x2_mask[np.random.uniform(size=val_x2_mask.shape) > 0.5] = 0
self.model.fit(
[train_x1, train_x2_mask],
train_x2,
validation_data=([val_x1, val_x2_mask], val_x2),
epochs=10,
callbacks=callbacks,
batch_size=batch_size,
)
self.save_config("%s_config.json" % save_path)
self.save_weigts("%s_weights.h5" % save_path)
del train_x2_mask
del val_x2_mask
| [
"json.dump",
"tqdm.tqdm",
"json.load",
"numpy.random.uniform",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.callbacks.ModelCheckpoint",
"numpy.array",
"collections.Counter",
"tensorflow.keras.callbacks.EarlyStopping"
] | [((948, 967), 'tqdm.tqdm', 'tqdm', (['self.cat_cols'], {}), '(self.cat_cols)\n', (952, 967), False, 'from tqdm import tqdm\n'), ((2652, 2726), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': 'monitor', 'patience': 'patience_reduce', 'min_lr': '(1e-07)'}), '(monitor=monitor, patience=patience_reduce, min_lr=1e-07)\n', (2669, 2726), False, 'from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint\n'), ((2798, 2853), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': 'monitor', 'patience': 'patience_early'}), '(monitor=monitor, patience=patience_early)\n', (2811, 2853), False, 'from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint\n'), ((6183, 6201), 'numpy.array', 'np.array', (['train_x1'], {}), '(train_x1)\n', (6191, 6201), True, 'import numpy as np\n'), ((6219, 6235), 'numpy.array', 'np.array', (['val_x1'], {}), '(val_x1)\n', (6227, 6235), True, 'import numpy as np\n'), ((6901, 6918), 'numpy.array', 'np.array', (['data_x1'], {}), '(data_x1)\n', (6909, 6918), True, 'import numpy as np\n'), ((8766, 8842), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data_x1', 'data_x2', 'data_y'], {'test_size': '(0.1)', 'random_state': '(1337)'}), '(data_x1, data_x2, data_y, test_size=0.1, random_state=1337)\n', (8782, 8842), False, 'from sklearn.model_selection import train_test_split\n'), ((8885, 8903), 'numpy.array', 'np.array', (['train_x1'], {}), '(train_x1)\n', (8893, 8903), True, 'import numpy as np\n'), ((8921, 8937), 'numpy.array', 'np.array', (['val_x1'], {}), '(val_x1)\n', (8929, 8937), True, 'import numpy as np\n'), ((9062, 9079), 'numpy.array', 'np.array', (['train_y'], {}), '(train_y)\n', (9070, 9079), True, 'import numpy as np\n'), ((9096, 9111), 'numpy.array', 'np.array', (['val_y'], {}), '(val_y)\n', (9104, 9111), True, 'import numpy as np\n'), ((9569, 9586), 'numpy.array', 'np.array', (['data_x1'], {}), '(data_x1)\n', (9577, 9586), True, 'import numpy as np\n'), ((11171, 11239), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data_x1', 'data_x2'], {'test_size': '(0.1)', 'random_state': '(1337)'}), '(data_x1, data_x2, test_size=0.1, random_state=1337)\n', (11187, 11239), False, 'from sklearn.model_selection import train_test_split\n'), ((11282, 11300), 'numpy.array', 'np.array', (['train_x1'], {}), '(train_x1)\n', (11290, 11300), True, 'import numpy as np\n'), ((11318, 11334), 'numpy.array', 'np.array', (['val_x1'], {}), '(val_x1)\n', (11326, 11334), True, 'import numpy as np\n'), ((1068, 1083), 'collections.Counter', 'Counter', (['values'], {}), '(values)\n', (1075, 1083), False, 'from collections import Counter\n'), ((2398, 2501), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['save_path'], {'monitor': 'monitor', 'verbose': '(1)', 'save_best_only': '(True)', 'save_weights_only': '(True)'}), '(save_path, monitor=monitor, verbose=1, save_best_only=True,\n save_weights_only=True)\n', (2413, 2501), False, 'from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint\n'), ((3056, 3311), 'json.dump', 'json.dump', (["{'mapping': self.mapping, 'cat_cols': self.cat_cols, 'num_cols': self.\n num_cols, 'n_targets': self.n_targets, 'num_layers': self.num_layers,\n 'dropout': self.dropout, 'frequency': self.frequency, 'd_model': self.\n d_model}", 'f'], {'indent': '(4)'}), "({'mapping': self.mapping, 'cat_cols': self.cat_cols, 'num_cols':\n self.num_cols, 'n_targets': self.n_targets, 'num_layers': self.\n num_layers, 'dropout': self.dropout, 'frequency': self.frequency,\n 'd_model': self.d_model}, f, indent=4)\n", (3065, 3311), False, 'import json\n'), ((3703, 3715), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3712, 3715), False, 'import json\n'), ((5759, 5856), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data_x1', 'data_x2', 'data_y'], {'test_size': '(0.1)', 'random_state': '(1337)', 'stratify': 'data_y'}), '(data_x1, data_x2, data_y, test_size=0.1, random_state=1337,\n stratify=data_y)\n', (5775, 5856), False, 'from sklearn.model_selection import train_test_split\n'), ((6255, 6273), 'numpy.array', 'np.array', (['train_x2'], {}), '(train_x2)\n', (6263, 6273), True, 'import numpy as np\n'), ((6308, 6324), 'numpy.array', 'np.array', (['val_x2'], {}), '(val_x2)\n', (6316, 6324), True, 'import numpy as np\n'), ((6360, 6377), 'numpy.array', 'np.array', (['train_y'], {}), '(train_y)\n', (6368, 6377), True, 'import numpy as np\n'), ((6411, 6426), 'numpy.array', 'np.array', (['val_y'], {}), '(val_y)\n', (6419, 6426), True, 'import numpy as np\n'), ((6937, 6954), 'numpy.array', 'np.array', (['data_x2'], {}), '(data_x2)\n', (6945, 6954), True, 'import numpy as np\n'), ((8957, 8975), 'numpy.array', 'np.array', (['train_x2'], {}), '(train_x2)\n', (8965, 8975), True, 'import numpy as np\n'), ((9010, 9026), 'numpy.array', 'np.array', (['val_x2'], {}), '(val_x2)\n', (9018, 9026), True, 'import numpy as np\n'), ((9605, 9622), 'numpy.array', 'np.array', (['data_x2'], {}), '(data_x2)\n', (9613, 9622), True, 'import numpy as np\n'), ((11354, 11372), 'numpy.array', 'np.array', (['train_x2'], {}), '(train_x2)\n', (11362, 11372), True, 'import numpy as np\n'), ((11407, 11423), 'numpy.array', 'np.array', (['val_x2'], {}), '(val_x2)\n', (11415, 11423), True, 'import numpy as np\n'), ((6056, 6132), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data_x1', 'data_x2', 'data_y'], {'test_size': '(0.1)', 'random_state': '(1337)'}), '(data_x1, data_x2, data_y, test_size=0.1, random_state=1337)\n', (6072, 6132), False, 'from sklearn.model_selection import train_test_split\n'), ((11713, 11756), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'train_x2_mask.shape'}), '(size=train_x2_mask.shape)\n', (11730, 11756), True, 'import numpy as np\n'), ((11792, 11833), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'val_x2_mask.shape'}), '(size=val_x2_mask.shape)\n', (11809, 11833), True, 'import numpy as np\n')] |
import time
import random
import math
import numpy as np
import matplotlib.pyplot as plt
#------------------------------------------------------------------------------
# Customization section:
initial_temperature = 100
cooling = 0.8 # cooling coefficient
number_variables = 2
upper_bounds = [3, 3]
lower_bounds = [-3, -3]
computing_time = 1 # second(s)
def objective_function(X):
x=X[0]
y=X[1]
value = 3*(1-x)**2*math.exp(-x**2 - (y+1)**2) - 10*(x/5 - x**3 - y**5)*math.exp(-x**2 - y**2) -1/3*math.exp(-(x+1)**2 - y**2)
return value
#------------------------------------------------------------------------------
# Simulated Annealing Algorithm:
initial_solution=np.zeros((number_variables))
for v in range(number_variables):
initial_solution[v] = random.uniform(lower_bounds[v],upper_bounds[v])
current_solution = initial_solution
best_solution = initial_solution
n = 1 # no of solutions accepted
best_fitness = objective_function(best_solution)
current_temperature = initial_temperature # current temperature
start = time.time()
no_attempts = 100 # number of attempts in each level of temperature
record_best_fitness =[]
for i in range(9999999):
for j in range(no_attempts):
for k in range(number_variables):
current_solution[k] = best_solution[k] + 0.1*(random.uniform(lower_bounds[k],upper_bounds[k]))
current_solution[k] = max(min(current_solution[k],upper_bounds[k]),lower_bounds[k]) # repair the solution respecting the bounds
current_fitness = objective_function(current_solution)
E = abs(current_fitness - best_fitness)
if i == 0 and j == 0:
EA = E
if current_fitness < best_fitness:
p = math.exp(-E/(EA*current_temperature))
# make a decision to accept the worse solution or not
if random.random()<p:
accept = True # this worse solution is accepted
else:
accept = False # this worse solution is not accepted
else:
accept = True # accept better solution
if accept==True:
best_solution = current_solution # update the best solution
best_fitness = objective_function(best_solution)
n = n + 1 # count the solutions accepted
EA = (EA *(n-1) + E)/n # update EA
print('interation: {}, best_solution: {}, best_fitness: {}'.format(i, best_solution, best_fitness))
record_best_fitness.append(best_fitness)
# Cooling the temperature
current_temperature = current_temperature*cooling
# Stop by computing time
end = time.time()
if end-start >= computing_time:
break
plt.plot(record_best_fitness)
| [
"math.exp",
"matplotlib.pyplot.plot",
"random.uniform",
"numpy.zeros",
"time.time",
"random.random"
] | [((690, 716), 'numpy.zeros', 'np.zeros', (['number_variables'], {}), '(number_variables)\n', (698, 716), True, 'import numpy as np\n'), ((1057, 1068), 'time.time', 'time.time', ([], {}), '()\n', (1066, 1068), False, 'import time\n'), ((2701, 2730), 'matplotlib.pyplot.plot', 'plt.plot', (['record_best_fitness'], {}), '(record_best_fitness)\n', (2709, 2730), True, 'import matplotlib.pyplot as plt\n'), ((779, 827), 'random.uniform', 'random.uniform', (['lower_bounds[v]', 'upper_bounds[v]'], {}), '(lower_bounds[v], upper_bounds[v])\n', (793, 827), False, 'import random\n'), ((2639, 2650), 'time.time', 'time.time', ([], {}), '()\n', (2648, 2650), False, 'import time\n'), ((514, 546), 'math.exp', 'math.exp', (['(-(x + 1) ** 2 - y ** 2)'], {}), '(-(x + 1) ** 2 - y ** 2)\n', (522, 546), False, 'import math\n'), ((1749, 1790), 'math.exp', 'math.exp', (['(-E / (EA * current_temperature))'], {}), '(-E / (EA * current_temperature))\n', (1757, 1790), False, 'import math\n'), ((434, 466), 'math.exp', 'math.exp', (['(-x ** 2 - (y + 1) ** 2)'], {}), '(-x ** 2 - (y + 1) ** 2)\n', (442, 466), False, 'import math\n'), ((486, 512), 'math.exp', 'math.exp', (['(-x ** 2 - y ** 2)'], {}), '(-x ** 2 - y ** 2)\n', (494, 512), False, 'import math\n'), ((1868, 1883), 'random.random', 'random.random', ([], {}), '()\n', (1881, 1883), False, 'import random\n'), ((1323, 1371), 'random.uniform', 'random.uniform', (['lower_bounds[k]', 'upper_bounds[k]'], {}), '(lower_bounds[k], upper_bounds[k])\n', (1337, 1371), False, 'import random\n')] |
import os
from tqdm import *
from skimage import io
from shutil import copyfile
import cv2
import numpy as np
import imgaug as ia
from imgaug import augmenters as iaa
from skimage import transform as trans
from shutil import copyfile
import face_alignment
from os.path import join, isdir, basename
import glob
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="" # don't use GPU
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False, device = 'cpu')#device='cuda:0'
def alignment(cv_img, dst, dst_w, dst_h):
if dst_w == 96 and dst_h == 112:
src = np.array([
[30.2946, 51.6963],
[65.5318, 51.5014],
[48.0252, 71.7366],
[33.5493, 92.3655],
[62.7299, 92.2041] ], dtype=np.float32)
elif dst_w == 112 and dst_h == 112:
src = np.array([
[38.2946, 51.6963],
[73.5318, 51.5014],
[56.0252, 71.7366],
[41.5493, 92.3655],
[70.7299, 92.2041] ], dtype=np.float32)
elif dst_w == 150 and dst_h == 150:
src = np.array([
[51.287415, 69.23612],
[98.48009, 68.97509],
[75.03375, 96.075806],
[55.646385, 123.7038],
[94.72754, 123.48763]], dtype=np.float32)
elif dst_w == 160 and dst_h == 160:
src = np.array([
[54.706573, 73.85186],
[105.045425, 73.573425],
[80.036, 102.48086],
[59.356144, 131.95071],
[101.04271, 131.72014]], dtype=np.float32)
elif dst_w == 224 and dst_h == 224:
src = np.array([
[76.589195, 103.3926],
[147.0636, 103.0028],
[112.0504, 143.4732],
[83.098595, 184.731],
[141.4598, 184.4082]], dtype=np.float32)
else:
return None
tform = trans.SimilarityTransform()
tform.estimate(dst, src)
M = tform.params[0:2,:]
face_img = cv2.warpAffine(cv_img,M,(dst_w,dst_h), borderValue = 0.0)
return face_img
def find_landmark(image):
# image = io.imread(file)
landmarks = fa.get_landmarks(image)
check = False
if landmarks is None:
print('Step1: unknown ') #img_path
for sigma in np.linspace(0.0, 3.0, num=11).tolist():
seq = iaa.GaussianBlur(sigma)
image_aug = seq.augment_image(image)
landmarks = fa.get_landmarks(image_aug)
if landmarks is not None:
print('sigma:',sigma)
points = landmarks[0]
p1 = np.mean(points[36:42,:], axis=0)
p2 = np.mean(points[42:48,:], axis=0)
p3 = points[33,:]
p4 = points[48,:]
p5 = points[54,:]
if np.mean([p1[1],p2[1]]) < p3[1] \
and p3[1] < np.mean([p4[1],p5[1]]) \
and np.min([p4[1], p5[1]]) > np.max([p1[1], p2[1]]) \
and np.min([p1[1], p2[1]]) < p3[1] \
and p3[1] < np.max([p4[1], p5[1]]):
dst = np.array([p1,p2,p3,p4,p5],dtype=np.float32)
cv_img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# for (x, y) in [(96, 112), (112, 112), (150, 150), (160, 160), (224, 224)]:
# face_xy = alignment(cv_img, dst, x, y)
# cv2.imwrite('../datasets/aligned/%s/%sx%s/%s/%s'%(mset,x, y, folder, basename(file)), face_xy)
check = True
return cv_img, dst, check
else:
points = landmarks[0]
p1 = np.mean(points[36:42,:], axis=0)
p2 = np.mean(points[42:48,:], axis=0)
p3 = points[33,:]
p4 = points[48,:]
p5 = points[54,:]
dst = np.array([p1,p2,p3,p4,p5],dtype=np.float32)
cv_img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# for (x, y) in [(96, 112), (112, 112), (150, 150), (160, 160), (224, 224)]:
# face_xy = alignment(cv_img, dst, x, y)
# cv2.imwrite('../datasets/aligned/%s/%sx%s/%s/%s'%(mset,x, y, folder, basename(file)), face_xy)
check = True
return cv_img, dst, check
if __name__ == "__main__":
for mset in ['atvn_emb']: #
count = 0
total = 0
if not os.path.exists('../datasets/aligned/%s/96x112'%mset):
os.makedirs('../datasets/aligned/%s/96x112'%mset)
if not os.path.exists('../datasets/aligned/%s/112x112'%mset):
os.makedirs('../datasets/aligned/%s/112x112'%mset)
if not os.path.exists('../datasets/aligned/%s/150x150'%mset):
os.makedirs('../datasets/aligned/%s/150x150'%mset)
if not os.path.exists('../datasets/aligned/%s/160x160'%mset):
os.makedirs('../datasets/aligned/%s/160x160'%mset)
if not os.path.exists('../datasets/aligned/%s/224x224'%mset):
os.makedirs('../datasets/aligned/%s/224x224'%mset)
if not os.path.exists('../datasets/unknown/%s'%mset):
os.makedirs('../datasets/unknown/%s'%mset)
# for rdir, _, files in os.walk('../datasets/%s'%mset):
unknown_file = open('../datasets/unknown/%s.txt'%mset,'w')
############
image_parent_dir = '../datasets/%s'%mset
src_dirs = [d for d in os.listdir(image_parent_dir) if isdir(join(image_parent_dir, d))]
for folder in src_dirs:
files = glob.glob(join(image_parent_dir, folder)+'/*')
############
for x in ['96x112', '112x112', '150x150', '160x160', '224x224']:
if not os.path.exists('../datasets/aligned/%s/%s/%s'%(mset, x, folder)):
os.makedirs('../datasets/aligned/%s/%s/%s'%(mset, x, folder))
for file in tqdm(files):
print(file)
if True in [file.lower().endswith(ext) for ext in ["jpg", "jpeg", "bmp", 'png']]:
image = io.imread(file)
cv_img, dst, check = find_landmark(image)
for (x, y) in [(96, 112), (112, 112), (150, 150), (160, 160), (224, 224)]:
face_xy = alignment(cv_img, dst, x, y)
cv2.imwrite('../datasets/aligned/%s/%sx%s/%s/%s'%(mset,x, y, folder, basename(file)), face_xy)
# check = True
if check == False:
count += 1
print(file + '\t' + 'corrupted') #img_path
unknown_file.write(file + '\n')
if mset == 'test':
cv_img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
for (x, y) in [(96, 112), (112, 112), (150, 150), (160, 160), (224, 224)]:
face_xy = cv2.resize(cv_img, (x, y), interpolation = cv2.INTER_CUBIC)
cv2.imwrite('../datasets/aligned/%s/%sx%s/%s/%s'%(mset,x, y, folder, basename(file)), face_xy)
# face_96x112 = cv2.resize(cv_img, (96, 112), interpolation = cv2.INTER_CUBIC)
# cv2.imwrite('../datasets/aligned/%s/96x112/%s'%(mset,file), face_96x112)
copyfile(file, '../datasets/unknown/%s/%s'%(mset,file)) #img_path
total += 1
unknown_file.close()
print(mset, count, total) | [
"cv2.resize",
"os.makedirs",
"face_alignment.FaceAlignment",
"os.path.basename",
"cv2.cvtColor",
"os.path.exists",
"skimage.transform.SimilarityTransform",
"cv2.warpAffine",
"numpy.min",
"numpy.mean",
"numpy.array",
"numpy.max",
"numpy.linspace",
"shutil.copyfile",
"os.path.join",
"os.... | [((428, 527), 'face_alignment.FaceAlignment', 'face_alignment.FaceAlignment', (['face_alignment.LandmarksType._2D'], {'flip_input': '(False)', 'device': '"""cpu"""'}), "(face_alignment.LandmarksType._2D, flip_input=\n False, device='cpu')\n", (456, 527), False, 'import face_alignment\n'), ((1887, 1914), 'skimage.transform.SimilarityTransform', 'trans.SimilarityTransform', ([], {}), '()\n', (1912, 1914), True, 'from skimage import transform as trans\n'), ((1987, 2045), 'cv2.warpAffine', 'cv2.warpAffine', (['cv_img', 'M', '(dst_w, dst_h)'], {'borderValue': '(0.0)'}), '(cv_img, M, (dst_w, dst_h), borderValue=0.0)\n', (2001, 2045), False, 'import cv2\n'), ((635, 768), 'numpy.array', 'np.array', (['[[30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366], [33.5493, \n 92.3655], [62.7299, 92.2041]]'], {'dtype': 'np.float32'}), '([[30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366], [\n 33.5493, 92.3655], [62.7299, 92.2041]], dtype=np.float32)\n', (643, 768), True, 'import numpy as np\n'), ((3665, 3698), 'numpy.mean', 'np.mean', (['points[36:42, :]'], {'axis': '(0)'}), '(points[36:42, :], axis=0)\n', (3672, 3698), True, 'import numpy as np\n'), ((3711, 3744), 'numpy.mean', 'np.mean', (['points[42:48, :]'], {'axis': '(0)'}), '(points[42:48, :], axis=0)\n', (3718, 3744), True, 'import numpy as np\n'), ((3837, 3885), 'numpy.array', 'np.array', (['[p1, p2, p3, p4, p5]'], {'dtype': 'np.float32'}), '([p1, p2, p3, p4, p5], dtype=np.float32)\n', (3845, 3885), True, 'import numpy as np\n'), ((3898, 3936), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR'], {}), '(image, cv2.COLOR_RGB2BGR)\n', (3910, 3936), False, 'import cv2\n'), ((880, 1013), 'numpy.array', 'np.array', (['[[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366], [41.5493, \n 92.3655], [70.7299, 92.2041]]'], {'dtype': 'np.float32'}), '([[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366], [\n 41.5493, 92.3655], [70.7299, 92.2041]], dtype=np.float32)\n', (888, 1013), True, 'import numpy as np\n'), ((2330, 2353), 'imgaug.augmenters.GaussianBlur', 'iaa.GaussianBlur', (['sigma'], {}), '(sigma)\n', (2346, 2353), True, 'from imgaug import augmenters as iaa\n'), ((4355, 4409), 'os.path.exists', 'os.path.exists', (["('../datasets/aligned/%s/96x112' % mset)"], {}), "('../datasets/aligned/%s/96x112' % mset)\n", (4369, 4409), False, 'import os\n'), ((4421, 4472), 'os.makedirs', 'os.makedirs', (["('../datasets/aligned/%s/96x112' % mset)"], {}), "('../datasets/aligned/%s/96x112' % mset)\n", (4432, 4472), False, 'import os\n'), ((4486, 4541), 'os.path.exists', 'os.path.exists', (["('../datasets/aligned/%s/112x112' % mset)"], {}), "('../datasets/aligned/%s/112x112' % mset)\n", (4500, 4541), False, 'import os\n'), ((4553, 4605), 'os.makedirs', 'os.makedirs', (["('../datasets/aligned/%s/112x112' % mset)"], {}), "('../datasets/aligned/%s/112x112' % mset)\n", (4564, 4605), False, 'import os\n'), ((4619, 4674), 'os.path.exists', 'os.path.exists', (["('../datasets/aligned/%s/150x150' % mset)"], {}), "('../datasets/aligned/%s/150x150' % mset)\n", (4633, 4674), False, 'import os\n'), ((4686, 4738), 'os.makedirs', 'os.makedirs', (["('../datasets/aligned/%s/150x150' % mset)"], {}), "('../datasets/aligned/%s/150x150' % mset)\n", (4697, 4738), False, 'import os\n'), ((4752, 4807), 'os.path.exists', 'os.path.exists', (["('../datasets/aligned/%s/160x160' % mset)"], {}), "('../datasets/aligned/%s/160x160' % mset)\n", (4766, 4807), False, 'import os\n'), ((4819, 4871), 'os.makedirs', 'os.makedirs', (["('../datasets/aligned/%s/160x160' % mset)"], {}), "('../datasets/aligned/%s/160x160' % mset)\n", (4830, 4871), False, 'import os\n'), ((4885, 4940), 'os.path.exists', 'os.path.exists', (["('../datasets/aligned/%s/224x224' % mset)"], {}), "('../datasets/aligned/%s/224x224' % mset)\n", (4899, 4940), False, 'import os\n'), ((4952, 5004), 'os.makedirs', 'os.makedirs', (["('../datasets/aligned/%s/224x224' % mset)"], {}), "('../datasets/aligned/%s/224x224' % mset)\n", (4963, 5004), False, 'import os\n'), ((5019, 5066), 'os.path.exists', 'os.path.exists', (["('../datasets/unknown/%s' % mset)"], {}), "('../datasets/unknown/%s' % mset)\n", (5033, 5066), False, 'import os\n'), ((5078, 5122), 'os.makedirs', 'os.makedirs', (["('../datasets/unknown/%s' % mset)"], {}), "('../datasets/unknown/%s' % mset)\n", (5089, 5122), False, 'import os\n'), ((1125, 1272), 'numpy.array', 'np.array', (['[[51.287415, 69.23612], [98.48009, 68.97509], [75.03375, 96.075806], [\n 55.646385, 123.7038], [94.72754, 123.48763]]'], {'dtype': 'np.float32'}), '([[51.287415, 69.23612], [98.48009, 68.97509], [75.03375, 96.075806\n ], [55.646385, 123.7038], [94.72754, 123.48763]], dtype=np.float32)\n', (1133, 1272), True, 'import numpy as np\n'), ((2272, 2301), 'numpy.linspace', 'np.linspace', (['(0.0)', '(3.0)'], {'num': '(11)'}), '(0.0, 3.0, num=11)\n', (2283, 2301), True, 'import numpy as np\n'), ((2590, 2623), 'numpy.mean', 'np.mean', (['points[36:42, :]'], {'axis': '(0)'}), '(points[36:42, :], axis=0)\n', (2597, 2623), True, 'import numpy as np\n'), ((2644, 2677), 'numpy.mean', 'np.mean', (['points[42:48, :]'], {'axis': '(0)'}), '(points[42:48, :], axis=0)\n', (2651, 2677), True, 'import numpy as np\n'), ((5354, 5382), 'os.listdir', 'os.listdir', (['image_parent_dir'], {}), '(image_parent_dir)\n', (5364, 5382), False, 'import os\n'), ((1383, 1538), 'numpy.array', 'np.array', (['[[54.706573, 73.85186], [105.045425, 73.573425], [80.036, 102.48086], [\n 59.356144, 131.95071], [101.04271, 131.72014]]'], {'dtype': 'np.float32'}), '([[54.706573, 73.85186], [105.045425, 73.573425], [80.036, \n 102.48086], [59.356144, 131.95071], [101.04271, 131.72014]], dtype=np.\n float32)\n', (1391, 1538), True, 'import numpy as np\n'), ((3119, 3167), 'numpy.array', 'np.array', (['[p1, p2, p3, p4, p5]'], {'dtype': 'np.float32'}), '([p1, p2, p3, p4, p5], dtype=np.float32)\n', (3127, 3167), True, 'import numpy as np\n'), ((3192, 3230), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR'], {}), '(image, cv2.COLOR_RGB2BGR)\n', (3204, 3230), False, 'import cv2\n'), ((5392, 5417), 'os.path.join', 'join', (['image_parent_dir', 'd'], {}), '(image_parent_dir, d)\n', (5396, 5417), False, 'from os.path import join, isdir, basename\n'), ((5482, 5512), 'os.path.join', 'join', (['image_parent_dir', 'folder'], {}), '(image_parent_dir, folder)\n', (5486, 5512), False, 'from os.path import join, isdir, basename\n'), ((5645, 5711), 'os.path.exists', 'os.path.exists', (["('../datasets/aligned/%s/%s/%s' % (mset, x, folder))"], {}), "('../datasets/aligned/%s/%s/%s' % (mset, x, folder))\n", (5659, 5711), False, 'import os\n'), ((5731, 5794), 'os.makedirs', 'os.makedirs', (["('../datasets/aligned/%s/%s/%s' % (mset, x, folder))"], {}), "('../datasets/aligned/%s/%s/%s' % (mset, x, folder))\n", (5742, 5794), False, 'import os\n'), ((5984, 5999), 'skimage.io.imread', 'io.imread', (['file'], {}), '(file)\n', (5993, 5999), False, 'from skimage import io\n'), ((1644, 1787), 'numpy.array', 'np.array', (['[[76.589195, 103.3926], [147.0636, 103.0028], [112.0504, 143.4732], [\n 83.098595, 184.731], [141.4598, 184.4082]]'], {'dtype': 'np.float32'}), '([[76.589195, 103.3926], [147.0636, 103.0028], [112.0504, 143.4732],\n [83.098595, 184.731], [141.4598, 184.4082]], dtype=np.float32)\n', (1652, 1787), True, 'import numpy as np\n'), ((2815, 2838), 'numpy.mean', 'np.mean', (['[p1[1], p2[1]]'], {}), '([p1[1], p2[1]])\n', (2822, 2838), True, 'import numpy as np\n'), ((2880, 2903), 'numpy.mean', 'np.mean', (['[p4[1], p5[1]]'], {}), '([p4[1], p5[1]])\n', (2887, 2903), True, 'import numpy as np\n'), ((2929, 2951), 'numpy.min', 'np.min', (['[p4[1], p5[1]]'], {}), '([p4[1], p5[1]])\n', (2935, 2951), True, 'import numpy as np\n'), ((2954, 2976), 'numpy.max', 'np.max', (['[p1[1], p2[1]]'], {}), '([p1[1], p2[1]])\n', (2960, 2976), True, 'import numpy as np\n'), ((3003, 3025), 'numpy.min', 'np.min', (['[p1[1], p2[1]]'], {}), '([p1[1], p2[1]])\n', (3009, 3025), True, 'import numpy as np\n'), ((3068, 3090), 'numpy.max', 'np.max', (['[p4[1], p5[1]]'], {}), '([p4[1], p5[1]])\n', (3074, 3090), True, 'import numpy as np\n'), ((7267, 7325), 'shutil.copyfile', 'copyfile', (['file', "('../datasets/unknown/%s/%s' % (mset, file))"], {}), "(file, '../datasets/unknown/%s/%s' % (mset, file))\n", (7275, 7325), False, 'from shutil import copyfile\n'), ((6659, 6697), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR'], {}), '(image, cv2.COLOR_RGB2BGR)\n', (6671, 6697), False, 'import cv2\n'), ((6844, 6901), 'cv2.resize', 'cv2.resize', (['cv_img', '(x, y)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(cv_img, (x, y), interpolation=cv2.INTER_CUBIC)\n', (6854, 6901), False, 'import cv2\n'), ((6314, 6328), 'os.path.basename', 'basename', (['file'], {}), '(file)\n', (6322, 6328), False, 'from os.path import join, isdir, basename\n'), ((7005, 7019), 'os.path.basename', 'basename', (['file'], {}), '(file)\n', (7013, 7019), False, 'from os.path import join, isdir, basename\n')] |
from scipy.spatial.distance import pdist, squareform
import scipy
from numpy import dot
from numpy.linalg import norm
import numpy as np
def rbf(X, sigma=0.5):
pairwise_dists = squareform(pdist(X, 'euclidean'))
A = scipy.exp(-pairwise_dists ** 2 / (2. * sigma ** 2))
return A
def cosine_similarity(X):
d=[]
cos_sim = lambda a,b: dot(a, b)/(norm(a)*norm(b))
for i in range(X.shape[0]):
td=[]
for j in range(X.shape[0]):
td.append(cos_sim(X[i], X[j]))
d.append(td)
A= np.array(d)
return A | [
"scipy.exp",
"numpy.array",
"scipy.spatial.distance.pdist",
"numpy.linalg.norm",
"numpy.dot"
] | [((218, 270), 'scipy.exp', 'scipy.exp', (['(-pairwise_dists ** 2 / (2.0 * sigma ** 2))'], {}), '(-pairwise_dists ** 2 / (2.0 * sigma ** 2))\n', (227, 270), False, 'import scipy\n'), ((484, 495), 'numpy.array', 'np.array', (['d'], {}), '(d)\n', (492, 495), True, 'import numpy as np\n'), ((190, 211), 'scipy.spatial.distance.pdist', 'pdist', (['X', '"""euclidean"""'], {}), "(X, 'euclidean')\n", (195, 211), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((336, 345), 'numpy.dot', 'dot', (['a', 'b'], {}), '(a, b)\n', (339, 345), False, 'from numpy import dot\n'), ((347, 354), 'numpy.linalg.norm', 'norm', (['a'], {}), '(a)\n', (351, 354), False, 'from numpy.linalg import norm\n'), ((355, 362), 'numpy.linalg.norm', 'norm', (['b'], {}), '(b)\n', (359, 362), False, 'from numpy.linalg import norm\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 19 21:02:22 2020
@author: Jeff
"""
import numpy as np
from scipy import ndimage
from scipy import misc
import matplotlib.pyplot as plt
image = misc.face()
divY = 0
divX = 0
def cutPuzzle(image_to_cut):
vertical_slices = np.array_split(image_to_cut, divY)
handler = 0
image_slices = np.zeros([int(image_pieces), int(image.shape[0] / divY), int(image.shape[1] / divX), 3], dtype=int)
for slice in vertical_slices:
horizontal_slices = np.hsplit(slice, divX)
for h_slice in horizontal_slices:
image_slices[handler] = h_slice
handler += 1
return image_slices
def getMinPrime(number, factor):
if(number / factor == 1):
return factor
elif(number % factor == 0):
return getMinPrime(number / factor, 2)
else:
return getMinPrime(number, factor + 1)
def generatePuzzle(image_pieces):
global divX
global divY
divY = getMinPrime(int(image_pieces), 2)
divX = int(int(image_pieces) / divY)
image_slices = cutPuzzle(image)
np.random.shuffle(image_slices)
return rebuildPuzzle(image_slices)
def rebuildPuzzle(image_slices):
puzzle = np.zeros([divY, int(image.shape[0] / divY), image.shape[1], 3], dtype=int)
for i in range(1, divY + 1):
puzzle[i - 1] = np.concatenate(image_slices[(i -1) * divX: i * divX], 1)
puzzle_final = np.concatenate(puzzle, 0)
return puzzle_final
def showPuzzle():
plt.figure(1)
plt.subplot(121)
plt.imshow(puzzle)
plt.subplot(122)
plt.imshow(image)
plt.show()
def movement_menu():
print("Seleccione el desplazamiento de la pieza:")
print("1. Arriba")
print("2. Abajo")
print("3. Izquierda")
print("4. Derecha")
return int(input("Seleccione una opción: "))
def movement(piece, order):
actual_puzzle = cutPuzzle(puzzle)
if(order == 1):
toMove = (piece - divX) % int(image_pieces)
elif(order == 2):
toMove = (piece + divX) % int(image_pieces)
elif(order == 3):
toMove = (piece - 1) % int(image_pieces)
else:
toMove = (piece + 1) % int(image_pieces)
actual_puzzle[[toMove, piece]] = actual_puzzle[[piece,toMove]]
return rebuildPuzzle(actual_puzzle)
def completed(puzzle):
return np.array_equal(image, puzzle)
image_pieces = input("Ingrese el número de piezas: ")
puzzle = generatePuzzle(image_pieces)
while(not completed(puzzle)):
showPuzzle()
print("E.g:"+"\n"+
"1 | 2"+"\n"+
"3 | 4"+"\n")
selected_piece = input(f"Seleccione una pieza del 1 al {image_pieces}: ")
selected_movement = movement_menu()
puzzle = movement(int(selected_piece) - 1, selected_movement)
showPuzzle() | [
"matplotlib.pyplot.subplot",
"numpy.random.shuffle",
"matplotlib.pyplot.show",
"numpy.array_equal",
"matplotlib.pyplot.imshow",
"numpy.hsplit",
"matplotlib.pyplot.figure",
"numpy.array_split",
"scipy.misc.face",
"numpy.concatenate"
] | [((194, 205), 'scipy.misc.face', 'misc.face', ([], {}), '()\n', (203, 205), False, 'from scipy import misc\n'), ((276, 310), 'numpy.array_split', 'np.array_split', (['image_to_cut', 'divY'], {}), '(image_to_cut, divY)\n', (290, 310), True, 'import numpy as np\n'), ((1089, 1120), 'numpy.random.shuffle', 'np.random.shuffle', (['image_slices'], {}), '(image_slices)\n', (1106, 1120), True, 'import numpy as np\n'), ((1415, 1440), 'numpy.concatenate', 'np.concatenate', (['puzzle', '(0)'], {}), '(puzzle, 0)\n', (1429, 1440), True, 'import numpy as np\n'), ((1488, 1501), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1498, 1501), True, 'import matplotlib.pyplot as plt\n'), ((1506, 1522), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (1517, 1522), True, 'import matplotlib.pyplot as plt\n'), ((1527, 1545), 'matplotlib.pyplot.imshow', 'plt.imshow', (['puzzle'], {}), '(puzzle)\n', (1537, 1545), True, 'import matplotlib.pyplot as plt\n'), ((1550, 1566), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (1561, 1566), True, 'import matplotlib.pyplot as plt\n'), ((1571, 1588), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (1581, 1588), True, 'import matplotlib.pyplot as plt\n'), ((1593, 1603), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1601, 1603), True, 'import matplotlib.pyplot as plt\n'), ((2311, 2340), 'numpy.array_equal', 'np.array_equal', (['image', 'puzzle'], {}), '(image, puzzle)\n', (2325, 2340), True, 'import numpy as np\n'), ((508, 530), 'numpy.hsplit', 'np.hsplit', (['slice', 'divX'], {}), '(slice, divX)\n', (517, 530), True, 'import numpy as np\n'), ((1339, 1395), 'numpy.concatenate', 'np.concatenate', (['image_slices[(i - 1) * divX:i * divX]', '(1)'], {}), '(image_slices[(i - 1) * divX:i * divX], 1)\n', (1353, 1395), True, 'import numpy as np\n')] |
# Copyright (c) 2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
==============
Four Panel Map
==============
By reading model output data from a netCDF file, we can create a four panel plot showing:
* 300 hPa heights and winds
* 500 hPa heights and absolute vorticity
* Surface temperatures
* Precipitable water
"""
###########################################
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage as ndimage
import xarray as xr
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo
###########################################
crs = ccrs.LambertConformal(central_longitude=-100.0, central_latitude=45.0)
###########################################
# Function used to create the map subplots
def plot_background(ax):
ax.set_extent([235., 290., 20., 55.])
ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.5)
ax.add_feature(cfeature.STATES, linewidth=0.5)
ax.add_feature(cfeature.BORDERS, linewidth=0.5)
return ax
###########################################
# Open the example netCDF data
ds = xr.open_dataset(get_test_data('gfs_output.nc', False))
print(ds)
###########################################
# Combine 1D latitude and longitudes into a 2D grid of locations
lon_2d, lat_2d = np.meshgrid(ds['lon'], ds['lat'])
###########################################
# Pull out the data
vort_500 = ds['vort_500'][0]
surface_temp = ds['temp'][0]
precip_water = ds['precip_water'][0]
winds_300 = ds['winds_300'][0]
###########################################
# Do unit conversions to what we wish to plot
vort_500 = vort_500 * 1e5
surface_temp = surface_temp.metpy.convert_units('degF')
precip_water = precip_water.metpy.convert_units('inches')
winds_300 = winds_300.metpy.convert_units('knots')
###########################################
# Smooth the height data
heights_300 = ndimage.gaussian_filter(ds['heights_300'][0], sigma=1.5, order=0)
heights_500 = ndimage.gaussian_filter(ds['heights_500'][0], sigma=1.5, order=0)
###########################################
# Create the figure and plot background on different axes
fig, axarr = plt.subplots(nrows=2, ncols=2, figsize=(20, 13), constrained_layout=True,
subplot_kw={'projection': crs})
add_metpy_logo(fig, 140, 120, size='large')
axlist = axarr.flatten()
for ax in axlist:
plot_background(ax)
# Upper left plot - 300-hPa winds and geopotential heights
cf1 = axlist[0].contourf(lon_2d, lat_2d, winds_300, cmap='cool', transform=ccrs.PlateCarree())
c1 = axlist[0].contour(lon_2d, lat_2d, heights_300, colors='black', linewidths=2,
transform=ccrs.PlateCarree())
axlist[0].clabel(c1, fontsize=10, inline=1, inline_spacing=1, fmt='%i', rightside_up=True)
axlist[0].set_title('300-hPa Wind Speeds and Heights', fontsize=16)
cb1 = fig.colorbar(cf1, ax=axlist[0], orientation='horizontal', shrink=0.74, pad=0)
cb1.set_label('knots', size='x-large')
# Upper right plot - 500mb absolute vorticity and geopotential heights
cf2 = axlist[1].contourf(lon_2d, lat_2d, vort_500, cmap='BrBG', transform=ccrs.PlateCarree(),
zorder=0, norm=plt.Normalize(-32, 32))
c2 = axlist[1].contour(lon_2d, lat_2d, heights_500, colors='k', linewidths=2,
transform=ccrs.PlateCarree())
axlist[1].clabel(c2, fontsize=10, inline=1, inline_spacing=1, fmt='%i', rightside_up=True)
axlist[1].set_title('500-hPa Absolute Vorticity and Heights', fontsize=16)
cb2 = fig.colorbar(cf2, ax=axlist[1], orientation='horizontal', shrink=0.74, pad=0)
cb2.set_label(r'$10^{-5}$ s$^{-1}$', size='x-large')
# Lower left plot - surface temperatures
cf3 = axlist[2].contourf(lon_2d, lat_2d, surface_temp, cmap='YlOrRd',
transform=ccrs.PlateCarree(), zorder=0)
axlist[2].set_title('Surface Temperatures', fontsize=16)
cb3 = fig.colorbar(cf3, ax=axlist[2], orientation='horizontal', shrink=0.74, pad=0)
cb3.set_label('\N{DEGREE FAHRENHEIT}', size='x-large')
# Lower right plot - precipitable water entire atmosphere
cf4 = axlist[3].contourf(lon_2d, lat_2d, precip_water, cmap='Greens',
transform=ccrs.PlateCarree(), zorder=0)
axlist[3].set_title('Precipitable Water', fontsize=16)
cb4 = fig.colorbar(cf4, ax=axlist[3], orientation='horizontal', shrink=0.74, pad=0)
cb4.set_label('in.', size='x-large')
# Set height padding for plots
fig.set_constrained_layout_pads(w_pad=0., h_pad=0.1, hspace=0., wspace=0.)
# Set figure title
fig.suptitle(ds['time'][0].dt.strftime('%d %B %Y %H:%MZ').values, fontsize=24)
# Display the plot
plt.show()
| [
"cartopy.crs.LambertConformal",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"metpy.cbook.get_test_data",
"cartopy.feature.COASTLINE.with_scale",
"scipy.ndimage.gaussian_filter",
"metpy.plots.add_metpy_logo",
"cartopy.crs.PlateCarree",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.Normalize"
] | [((736, 806), 'cartopy.crs.LambertConformal', 'ccrs.LambertConformal', ([], {'central_longitude': '(-100.0)', 'central_latitude': '(45.0)'}), '(central_longitude=-100.0, central_latitude=45.0)\n', (757, 806), True, 'import cartopy.crs as ccrs\n'), ((1429, 1462), 'numpy.meshgrid', 'np.meshgrid', (["ds['lon']", "ds['lat']"], {}), "(ds['lon'], ds['lat'])\n", (1440, 1462), True, 'import numpy as np\n'), ((2023, 2088), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (["ds['heights_300'][0]"], {'sigma': '(1.5)', 'order': '(0)'}), "(ds['heights_300'][0], sigma=1.5, order=0)\n", (2046, 2088), True, 'import scipy.ndimage as ndimage\n'), ((2103, 2168), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (["ds['heights_500'][0]"], {'sigma': '(1.5)', 'order': '(0)'}), "(ds['heights_500'][0], sigma=1.5, order=0)\n", (2126, 2168), True, 'import scipy.ndimage as ndimage\n'), ((2286, 2395), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(20, 13)', 'constrained_layout': '(True)', 'subplot_kw': "{'projection': crs}"}), "(nrows=2, ncols=2, figsize=(20, 13), constrained_layout=True,\n subplot_kw={'projection': crs})\n", (2298, 2395), True, 'import matplotlib.pyplot as plt\n'), ((2418, 2461), 'metpy.plots.add_metpy_logo', 'add_metpy_logo', (['fig', '(140)', '(120)'], {'size': '"""large"""'}), "(fig, 140, 120, size='large')\n", (2432, 2461), False, 'from metpy.plots import add_metpy_logo\n'), ((4734, 4744), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4742, 4744), True, 'import matplotlib.pyplot as plt\n'), ((1252, 1289), 'metpy.cbook.get_test_data', 'get_test_data', (['"""gfs_output.nc"""', '(False)'], {}), "('gfs_output.nc', False)\n", (1265, 1289), False, 'from metpy.cbook import get_test_data\n'), ((983, 1019), 'cartopy.feature.COASTLINE.with_scale', 'cfeature.COASTLINE.with_scale', (['"""50m"""'], {}), "('50m')\n", (1012, 1019), True, 'import cartopy.feature as cfeature\n'), ((2664, 2682), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (2680, 2682), True, 'import cartopy.crs as ccrs\n'), ((2799, 2817), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (2815, 2817), True, 'import cartopy.crs as ccrs\n'), ((3247, 3265), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3263, 3265), True, 'import cartopy.crs as ccrs\n'), ((3307, 3329), 'matplotlib.pyplot.Normalize', 'plt.Normalize', (['(-32)', '(32)'], {}), '(-32, 32)\n', (3320, 3329), True, 'import matplotlib.pyplot as plt\n'), ((3442, 3460), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3458, 3460), True, 'import cartopy.crs as ccrs\n'), ((3912, 3930), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3928, 3930), True, 'import cartopy.crs as ccrs\n'), ((4302, 4320), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (4318, 4320), True, 'import cartopy.crs as ccrs\n')] |
"""
Test some individual ONNX operator calls. We should probably find a better way to
test these more exhaustively by hooking into ONNX's testing framework. For now,
I have hand coded some tests for operators that are used in the examples we have
worked on. There could be broken operators.
"""
import numpy as np
import modeci_mdf.onnx_functions as onnx_ops
def test_conv():
"""Test ONNX Conv function"""
x = np.array(
[
[
[
[0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 5, 5) input tensor
[5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
[20.0, 21.0, 22.0, 23.0, 24.0],
]
]
]
).astype(np.float32)
W = np.array(
[
[
[
[1.0, 1.0, 1.0], # (1, 1, 3, 3) tensor for convolution weights
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
]
]
]
).astype(np.float32)
out = onnx_ops.conv(x, W)
def test_pad():
"""Test ONNX Pad function"""
x = np.zeros((3, 2))
value = np.array(1.5)
pads = np.array([0, 1, 0, 1]).astype(np.int64)
out = onnx_ops.pad(x, pads, value, mode="constant")
# Try attributes without keyword
out2 = onnx_ops.pad(x, pads, value, "constant")
assert np.all(out == out2)
def test_pad_diff_types():
"""Check if Pad can handle the case were a different type is passed to constant_value than the type of the data"""
args = {
"data": np.zeros((1, 48, 32, 32), dtype=np.float32),
"pads": np.array([0, 0, 0, 0, 0, 0, 1, 1], dtype=np.int64),
"constant_value": 1.5,
"mode": "constant",
}
onnx_ops.pad(**args)
def test_unsqueeze():
"""Test ONNX unsqueeze function."""
data = np.zeros((3, 2))
axes = [0]
out = onnx_ops.unsqueeze(data=data, axes=axes)
assert out.ndim == 3
assert out.shape == (1, 3, 2)
def test_mul():
"""Test element-wise tensor multiplication (Mul)"""
A = np.ones((1, 3)) * 2.0
B = np.ones((3, 1))
assert np.allclose(A * B, onnx_ops.mul(A, B))
A = 1
B = 2
assert np.allclose(A * B, onnx_ops.mul(A, B))
def test_constantofshape():
"""Test ConstantOfShape function."""
out = onnx_ops.constantofshape(np.array([4, 4], dtype=np.int64), value=[0])
assert np.allclose(out, np.zeros((4, 4), dtype=np.int64))
def test_concat():
"""Test ONNX Concat function. This is a variable number of inputs operator."""
input = (np.ones(3), np.ones(3), np.ones(3))
out = onnx_ops.concat(*input, axis=0)
assert np.allclose(out, np.concatenate(input, axis=0))
| [
"modeci_mdf.onnx_functions.mul",
"modeci_mdf.onnx_functions.conv",
"modeci_mdf.onnx_functions.pad",
"modeci_mdf.onnx_functions.concat",
"numpy.concatenate",
"modeci_mdf.onnx_functions.unsqueeze",
"numpy.zeros",
"numpy.ones",
"numpy.array",
"numpy.all"
] | [((1114, 1133), 'modeci_mdf.onnx_functions.conv', 'onnx_ops.conv', (['x', 'W'], {}), '(x, W)\n', (1127, 1133), True, 'import modeci_mdf.onnx_functions as onnx_ops\n'), ((1193, 1209), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {}), '((3, 2))\n', (1201, 1209), True, 'import numpy as np\n'), ((1222, 1235), 'numpy.array', 'np.array', (['(1.5)'], {}), '(1.5)\n', (1230, 1235), True, 'import numpy as np\n'), ((1298, 1343), 'modeci_mdf.onnx_functions.pad', 'onnx_ops.pad', (['x', 'pads', 'value'], {'mode': '"""constant"""'}), "(x, pads, value, mode='constant')\n", (1310, 1343), True, 'import modeci_mdf.onnx_functions as onnx_ops\n'), ((1393, 1433), 'modeci_mdf.onnx_functions.pad', 'onnx_ops.pad', (['x', 'pads', 'value', '"""constant"""'], {}), "(x, pads, value, 'constant')\n", (1405, 1433), True, 'import modeci_mdf.onnx_functions as onnx_ops\n'), ((1446, 1465), 'numpy.all', 'np.all', (['(out == out2)'], {}), '(out == out2)\n', (1452, 1465), True, 'import numpy as np\n'), ((1827, 1847), 'modeci_mdf.onnx_functions.pad', 'onnx_ops.pad', ([], {}), '(**args)\n', (1839, 1847), True, 'import modeci_mdf.onnx_functions as onnx_ops\n'), ((1923, 1939), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {}), '((3, 2))\n', (1931, 1939), True, 'import numpy as np\n'), ((1966, 2006), 'modeci_mdf.onnx_functions.unsqueeze', 'onnx_ops.unsqueeze', ([], {'data': 'data', 'axes': 'axes'}), '(data=data, axes=axes)\n', (1984, 2006), True, 'import modeci_mdf.onnx_functions as onnx_ops\n'), ((2179, 2194), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (2186, 2194), True, 'import numpy as np\n'), ((2692, 2723), 'modeci_mdf.onnx_functions.concat', 'onnx_ops.concat', (['*input'], {'axis': '(0)'}), '(*input, axis=0)\n', (2707, 2723), True, 'import modeci_mdf.onnx_functions as onnx_ops\n'), ((1644, 1687), 'numpy.zeros', 'np.zeros', (['(1, 48, 32, 32)'], {'dtype': 'np.float32'}), '((1, 48, 32, 32), dtype=np.float32)\n', (1652, 1687), True, 'import numpy as np\n'), ((1705, 1755), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 1, 1]'], {'dtype': 'np.int64'}), '([0, 0, 0, 0, 0, 0, 1, 1], dtype=np.int64)\n', (1713, 1755), True, 'import numpy as np\n'), ((2149, 2164), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (2156, 2164), True, 'import numpy as np\n'), ((2225, 2243), 'modeci_mdf.onnx_functions.mul', 'onnx_ops.mul', (['A', 'B'], {}), '(A, B)\n', (2237, 2243), True, 'import modeci_mdf.onnx_functions as onnx_ops\n'), ((2296, 2314), 'modeci_mdf.onnx_functions.mul', 'onnx_ops.mul', (['A', 'B'], {}), '(A, B)\n', (2308, 2314), True, 'import modeci_mdf.onnx_functions as onnx_ops\n'), ((2422, 2454), 'numpy.array', 'np.array', (['[4, 4]'], {'dtype': 'np.int64'}), '([4, 4], dtype=np.int64)\n', (2430, 2454), True, 'import numpy as np\n'), ((2495, 2527), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {'dtype': 'np.int64'}), '((4, 4), dtype=np.int64)\n', (2503, 2527), True, 'import numpy as np\n'), ((2646, 2656), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (2653, 2656), True, 'import numpy as np\n'), ((2658, 2668), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (2665, 2668), True, 'import numpy as np\n'), ((2670, 2680), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (2677, 2680), True, 'import numpy as np\n'), ((2752, 2781), 'numpy.concatenate', 'np.concatenate', (['input'], {'axis': '(0)'}), '(input, axis=0)\n', (2766, 2781), True, 'import numpy as np\n'), ((420, 594), 'numpy.array', 'np.array', (['[[[[0.0, 1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0, 9.0], [10.0, 11.0, 12.0,\n 13.0, 14.0], [15.0, 16.0, 17.0, 18.0, 19.0], [20.0, 21.0, 22.0, 23.0, \n 24.0]]]]'], {}), '([[[[0.0, 1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0, 9.0], [10.0, \n 11.0, 12.0, 13.0, 14.0], [15.0, 16.0, 17.0, 18.0, 19.0], [20.0, 21.0, \n 22.0, 23.0, 24.0]]]])\n', (428, 594), True, 'import numpy as np\n'), ((826, 891), 'numpy.array', 'np.array', (['[[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]]'], {}), '([[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]])\n', (834, 891), True, 'import numpy as np\n'), ((1247, 1269), 'numpy.array', 'np.array', (['[0, 1, 0, 1]'], {}), '([0, 1, 0, 1])\n', (1255, 1269), True, 'import numpy as np\n')] |
import torch
from torch.utils.data import DataLoader, SubsetRandomSampler
from data import DistanceDataset
from PIL import ImageFile
from utils import AverageMeter
from tqdm import tqdm
import visdom
from options import translation_parse
from pytorch_msssim import ssim
import numpy as np
ImageFile.LOAD_TRUNCATED_IMAGES = True
trans_args = translation_parse().parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
visualizer = visdom.Visdom(env='translation vs original')
# random select data for evaluation
validation_split = .2
shuffle_dataset = True
random_seed = 42
distance_dataset = DistanceDataset('datasets/freiburg', translate_name=trans_args.checkpoint_name.replace('.pth', ''))
# Creating data indices for training and validation splits:
dataset_size = len(distance_dataset)
indices = list(range(dataset_size))
split = int(np.floor(validation_split * dataset_size))
if shuffle_dataset:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
# Creating PT data samplers and loaders:
val_sampler = SubsetRandomSampler(val_indices)
distance_dataloader = DataLoader(distance_dataset, batch_size=64, shuffle=False, num_workers=2, pin_memory=True,
sampler=val_sampler, drop_last=True)
distance_func = torch.nn.L1Loss()
distances = AverageMeter('distance', ':3.4f')
ssim_scores = []
for i, data in enumerate(tqdm(distance_dataloader)):
ori_image = data[0].to(device)
# trans_image = torch.flip(data[1].to(device), dims=[0])
trans_image = data[1].to(device)
distance = distance_func(ori_image, trans_image)
ssim_score = np.array(ssim(ori_image, trans_image, data_range=1, size_average=True).cpu())
distances.update(distance.item(), ori_image.size(0))
ssim_scores = np.append(ssim_scores, ssim_score)
if i % 5 == 0:
visualizer.images(ori_image[0], win='original{}'.format(i / 5), padding=2,
opts=dict(title='original{}'.format(i / 5), caption='original{}'.format(i / 5)))
visualizer.images(trans_image[0], win='translation{}'.format(i / 5), padding=2,
opts=dict(title='translation{}'.format(i / 5), caption='translation{}'.format(i / 5)))
# model is selected in translation_parse().
print('Model: ' + str(trans_args.checkpoint_name.replace('.pth', '')))
print('L1 distance: ' + str(distances.avg))
print('SSIM score: ' + str(np.mean(ssim_scores)))
| [
"tqdm.tqdm",
"numpy.random.seed",
"torch.utils.data.DataLoader",
"utils.AverageMeter",
"torch.nn.L1Loss",
"numpy.floor",
"visdom.Visdom",
"pytorch_msssim.ssim",
"numpy.append",
"numpy.mean",
"torch.cuda.is_available",
"options.translation_parse",
"torch.utils.data.SubsetRandomSampler",
"nu... | [((459, 503), 'visdom.Visdom', 'visdom.Visdom', ([], {'env': '"""translation vs original"""'}), "(env='translation vs original')\n", (472, 503), False, 'import visdom\n'), ((1111, 1143), 'torch.utils.data.SubsetRandomSampler', 'SubsetRandomSampler', (['val_indices'], {}), '(val_indices)\n', (1130, 1143), False, 'from torch.utils.data import DataLoader, SubsetRandomSampler\n'), ((1167, 1298), 'torch.utils.data.DataLoader', 'DataLoader', (['distance_dataset'], {'batch_size': '(64)', 'shuffle': '(False)', 'num_workers': '(2)', 'pin_memory': '(True)', 'sampler': 'val_sampler', 'drop_last': '(True)'}), '(distance_dataset, batch_size=64, shuffle=False, num_workers=2,\n pin_memory=True, sampler=val_sampler, drop_last=True)\n', (1177, 1298), False, 'from torch.utils.data import DataLoader, SubsetRandomSampler\n'), ((1345, 1362), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (1360, 1362), False, 'import torch\n'), ((1375, 1408), 'utils.AverageMeter', 'AverageMeter', (['"""distance"""', '""":3.4f"""'], {}), "('distance', ':3.4f')\n", (1387, 1408), False, 'from utils import AverageMeter\n'), ((867, 908), 'numpy.floor', 'np.floor', (['(validation_split * dataset_size)'], {}), '(validation_split * dataset_size)\n', (875, 908), True, 'import numpy as np\n'), ((934, 961), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (948, 961), True, 'import numpy as np\n'), ((966, 992), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (983, 992), True, 'import numpy as np\n'), ((1452, 1477), 'tqdm.tqdm', 'tqdm', (['distance_dataloader'], {}), '(distance_dataloader)\n', (1456, 1477), False, 'from tqdm import tqdm\n'), ((1836, 1870), 'numpy.append', 'np.append', (['ssim_scores', 'ssim_score'], {}), '(ssim_scores, ssim_score)\n', (1845, 1870), True, 'import numpy as np\n'), ((343, 362), 'options.translation_parse', 'translation_parse', ([], {}), '()\n', (360, 362), False, 'from options import translation_parse\n'), ((408, 433), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (431, 433), False, 'import torch\n'), ((2468, 2488), 'numpy.mean', 'np.mean', (['ssim_scores'], {}), '(ssim_scores)\n', (2475, 2488), True, 'import numpy as np\n'), ((1692, 1753), 'pytorch_msssim.ssim', 'ssim', (['ori_image', 'trans_image'], {'data_range': '(1)', 'size_average': '(True)'}), '(ori_image, trans_image, data_range=1, size_average=True)\n', (1696, 1753), False, 'from pytorch_msssim import ssim\n')] |
# -*- coding: utf-8 -*-
# This file is part of RRMPG.
#
# RRMPG is free software with the aim to provide a playground for experiments
# with hydrological rainfall-runoff-models while achieving competitive
# performance results.
#
# You should have received a copy of the MIT License along with RRMPG. If not,
# see <https://opensource.org/licenses/MIT>
import numpy as np
from numba import njit
@njit
def run_abcmodel(prec, initial_state, params):
"""Implementation of the ABC-Model.
This function should be called via the .simulate() function of the ABCModel
class and not directly. It is kept in a separate file for less confusion
if anyone wants to inspect the actual model routine.
The naming of the variables is kept as in the original publication [1].
Args:
prec: Numpy [t] array, which contains the precipitation input.
initial_state: Scalar for the intial state of the storage.
params: Numpy array of custom dtype, which contains the model parameter.
Returns:
qsim: Numpy [t] array with the simulated streamflow.
storage: Numpy [t] array with the state of the storage of each timestep.
[1] <NAME> "Streamflow synthesis" Cambridge, Harvard University
Press, 1967. 139 P. (1967).
"""
# Number of simulation timesteps
num_timesteps = len(prec)
# Unpack model parameters
a = params['a']
b = params['b']
c = params['c']
# Initialize array for the simulated stream flow and the storage
qsim = np.zeros(num_timesteps, np.float64)
storage = np.zeros(num_timesteps, np.float64)
# Set the initial storage value
storage[0] = initial_state
# Model simulation
for t in range(1, num_timesteps):
# Calculate the streamflow
qsim[t] = (1 - a - b) * prec[t] + c * storage[t-1]
# Update the storage
storage[t] = (1 - c) * storage[t-1] + a * prec[t]
return qsim, storage | [
"numpy.zeros"
] | [((1541, 1576), 'numpy.zeros', 'np.zeros', (['num_timesteps', 'np.float64'], {}), '(num_timesteps, np.float64)\n', (1549, 1576), True, 'import numpy as np\n'), ((1591, 1626), 'numpy.zeros', 'np.zeros', (['num_timesteps', 'np.float64'], {}), '(num_timesteps, np.float64)\n', (1599, 1626), True, 'import numpy as np\n')] |
#<NAME> (c) 2015, MIT License
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
np.set_printoptions(suppress=True)
#data = np.loadtxt("gravity.csv",delimiter=",")
data = np.loadtxt("gravity.txt")
p, t = data[:,0], data[:,1]
#print("p:", p)
#print("t:", t)
v = []
for position in range(len(p)):
if position == 0:
v.append(p[position]/0.025)
if position > 0:
v.append((p[position]-p[position-1])/0.025)
plt.plot(t,p, 'o', label="Position vs Time", markersize = 3)
plt.legend()
plt.show()
plt.plot(t,v,'o', label="Velocity (from avg velocity) vs Time", markersize = 3)
plt.legend()
plt.show()
poly = np.polyfit(t,p,2)
print("p(t), ax^2+bx+c = {0}x^2 + {1}x + {2}".format(poly[0], poly[1], poly[2]))
A = np.vstack([t, np.ones(len(t))]).T
vtm, vtc = np.linalg.lstsq(A, v)[0]
vderiv = []
for position in range(len(p)):
vderiv.append(2*poly[0]*position + poly[1])
plt.plot(t,vderiv,'o', label="Velocity (from deriv) vs Time", markersize = 3)
plt.legend()
plt.show()
print("v(t) from v_avg, cm/s = {0}x + {1}".format(vtm, vtc))
print("a(t) from v_avg, cm/m^s = {0}".format(vtm))
print("a(t) from v_avg, m/m^s = {0}".format(vtm/100))
print("a(t) from p(T), m/m^s = 2*a/100 = {0}".format(2*poly[0]/100))
| [
"numpy.set_printoptions",
"matplotlib.pyplot.show",
"numpy.linalg.lstsq",
"matplotlib.pyplot.plot",
"numpy.polyfit",
"matplotlib.pyplot.legend",
"numpy.loadtxt"
] | [((105, 139), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (124, 139), True, 'import numpy as np\n'), ((196, 221), 'numpy.loadtxt', 'np.loadtxt', (['"""gravity.txt"""'], {}), "('gravity.txt')\n", (206, 221), True, 'import numpy as np\n'), ((451, 510), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'p', '"""o"""'], {'label': '"""Position vs Time"""', 'markersize': '(3)'}), "(t, p, 'o', label='Position vs Time', markersize=3)\n", (459, 510), True, 'import matplotlib.pyplot as plt\n'), ((512, 524), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (522, 524), True, 'import matplotlib.pyplot as plt\n'), ((525, 535), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (533, 535), True, 'import matplotlib.pyplot as plt\n'), ((536, 615), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'v', '"""o"""'], {'label': '"""Velocity (from avg velocity) vs Time"""', 'markersize': '(3)'}), "(t, v, 'o', label='Velocity (from avg velocity) vs Time', markersize=3)\n", (544, 615), True, 'import matplotlib.pyplot as plt\n'), ((616, 628), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (626, 628), True, 'import matplotlib.pyplot as plt\n'), ((629, 639), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (637, 639), True, 'import matplotlib.pyplot as plt\n'), ((648, 667), 'numpy.polyfit', 'np.polyfit', (['t', 'p', '(2)'], {}), '(t, p, 2)\n', (658, 667), True, 'import numpy as np\n'), ((913, 990), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'vderiv', '"""o"""'], {'label': '"""Velocity (from deriv) vs Time"""', 'markersize': '(3)'}), "(t, vderiv, 'o', label='Velocity (from deriv) vs Time', markersize=3)\n", (921, 990), True, 'import matplotlib.pyplot as plt\n'), ((991, 1003), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1001, 1003), True, 'import matplotlib.pyplot as plt\n'), ((1004, 1014), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1012, 1014), True, 'import matplotlib.pyplot as plt\n'), ((796, 817), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'v'], {}), '(A, v)\n', (811, 817), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
import math
def CreateTableSensitivityWoutL2(dictInd, dictL2Ind, dictGr, seq_vectorM, sim):
tableToCsv = np.zeros((sim*len(seq_vectorM), 23))
for z, vector_m in enumerate(seq_vectorM):
budPerNode = vector_m[0]
for i in range(sim):
tableToCsv[i+z*sim, 0] = budPerNode
tableToCsv[i+z*sim, 1] = i
tableToCsv[i+z*sim, 2] = np.sum(dictInd[budPerNode][i][6])
tableToCsv[i+z*sim, 3] = np.sum(dictInd[budPerNode][i][5])
tableToCsv[i+z*sim, 4] = np.sum(dictInd[budPerNode][i][4])
tableToCsv[i+z*sim, 5] = np.sum(dictInd[budPerNode][i][1])
tableToCsv[i+z*sim, 6] = np.sum(dictInd[budPerNode][i][2])
tableToCsv[i+z*sim, 7] = np.sum(dictInd[budPerNode][i][3])
tableToCsv[i+z*sim, 8] = (np.sum(vector_m)-np.sum(dictInd[budPerNode][i][0]))/(np.sum(vector_m))
tableToCsv[i+z*sim, 9] = np.sum(dictL2Ind[budPerNode][i][6])
tableToCsv[i+z*sim, 10] = np.sum(dictL2Ind[budPerNode][i][5])
tableToCsv[i+z*sim, 11] = np.sum(dictL2Ind[budPerNode][i][4])
tableToCsv[i+z*sim, 12] = np.sum(dictL2Ind[budPerNode][i][1])
tableToCsv[i+z*sim, 13] = np.sum(dictL2Ind[budPerNode][i][2])
tableToCsv[i+z*sim, 14] = np.sum(dictL2Ind[budPerNode][i][3])
tableToCsv[i+z*sim, 15] = (np.sum(vector_m)-np.sum(dictL2Ind[budPerNode][i][0]))/(np.sum(vector_m))
tableToCsv[i+z*sim, 16] = np.sum(dictGr[budPerNode][i][6])
tableToCsv[i+z*sim, 17] = np.sum(dictGr[budPerNode][i][5])
tableToCsv[i+z*sim, 18] = np.sum(dictGr[budPerNode][i][4])
tableToCsv[i+z*sim, 19] = np.sum(dictGr[budPerNode][i][1])
tableToCsv[i+z*sim, 20] = np.sum(dictGr[budPerNode][i][2])
tableToCsv[i+z*sim, 21] = np.sum(dictGr[budPerNode][i][3])
tableToCsv[i+z*sim, 22] = (np.sum(vector_m)-np.sum(dictGr[budPerNode][i][0]))/(np.sum(vector_m))
return tableToCsv
def CreateTableSensitivity(dictInd, dictL2, dictL2Ind, dictGr, seq_vectorM, sim):
tableToCsv = np.zeros((sim*len(seq_vectorM), 30))
for z, vector_m in enumerate(seq_vectorM):
budPerNode = vector_m[0]
for i in range(sim):
tableToCsv[i+z*sim, 0] = budPerNode
tableToCsv[i+z*sim, 1] = i
tableToCsv[i+z*sim, 2] = np.sum(dictInd[z][i][6])
tableToCsv[i+z*sim, 3] = np.sum(dictInd[z][i][5])
tableToCsv[i+z*sim, 4] = np.sum(dictInd[z][i][4])
tableToCsv[i+z*sim, 5] = np.sum(dictInd[z][i][1])
tableToCsv[i+z*sim, 6] = np.sum(dictInd[z][i][2])
tableToCsv[i+z*sim, 7] = np.sum(dictInd[z][i][3])
tableToCsv[i+z*sim, 8] = (np.sum(vector_m)-np.sum(dictInd[z][i][0]))/(np.sum(vector_m))
tableToCsv[i+z*sim, 9] = np.sum(dictL2[z][i][6])
tableToCsv[i+z*sim, 10] = np.sum(dictL2[z][i][5])
tableToCsv[i+z*sim, 11] = np.sum(dictL2[z][i][4])
tableToCsv[i+z*sim, 12] = np.sum(dictL2[z][i][1])
tableToCsv[i+z*sim, 13] = np.sum(dictL2[z][i][2])
tableToCsv[i+z*sim, 14] = np.sum(dictL2[z][i][3])
tableToCsv[i+z*sim, 15] = (np.sum(vector_m)-np.sum(dictL2[z][i][0]))/(np.sum(vector_m))
tableToCsv[i+z*sim, 16] = np.sum(dictL2Ind[z][i][6])
tableToCsv[i+z*sim, 17] = np.sum(dictL2Ind[z][i][5])
tableToCsv[i+z*sim, 18] = np.sum(dictL2Ind[z][i][4])
tableToCsv[i+z*sim, 19] = np.sum(dictL2Ind[z][i][1])
tableToCsv[i+z*sim, 20] = np.sum(dictL2Ind[z][i][2])
tableToCsv[i+z*sim, 21] = np.sum(dictL2Ind[z][i][3])
tableToCsv[i+z*sim, 22] = (np.sum(vector_m)-np.sum(dictL2Ind[z][i][0]))/(np.sum(vector_m))
tableToCsv[i+z*sim, 23] = np.sum(dictGr[z][i][6])
tableToCsv[i+z*sim, 24] = np.sum(dictGr[z][i][5])
tableToCsv[i+z*sim, 25] = np.sum(dictGr[z][i][4])
tableToCsv[i+z*sim, 26] = np.sum(dictGr[z][i][1])
tableToCsv[i+z*sim, 27] = np.sum(dictGr[z][i][2])
tableToCsv[i+z*sim, 28] = np.sum(dictGr[z][i][3])
tableToCsv[i+z*sim, 29] = (np.sum(vector_m)-np.sum(dictGr[z][i][0]))/(np.sum(vector_m))
return tableToCsv
def CreateTableParetoL2_L2Ind_Gr(dictL2, dictL2Ind, dictGr, vector_m, multL2, multGr, sim):
tableToCsv = np.zeros((sim*len(multL2), 24))
for z in range(len(multL2)):
l2Mult = multL2[z]
grMult = multGr[z]
for i in range(sim):
tableToCsv[i+z*sim, 0] = l2Mult
tableToCsv[i+z*sim, 1] = grMult
tableToCsv[i+z*sim, 2] = i
tableToCsv[i+z*sim, 3] = np.sum(dictL2[l2Mult][i][6])
tableToCsv[i+z*sim, 4] = np.sum(dictL2[l2Mult][i][5])
tableToCsv[i+z*sim, 5] = np.sum(dictL2[l2Mult][i][4])
tableToCsv[i+z*sim, 6] = np.sum(dictL2[l2Mult][i][1])
tableToCsv[i+z*sim, 7] = np.sum(dictL2[l2Mult][i][2])
tableToCsv[i+z*sim, 8] = np.sum(dictL2[l2Mult][i][3])
tableToCsv[i+z*sim, 9] = (np.sum(vector_m)-np.sum(dictL2[l2Mult][i][0]))/(np.sum(vector_m))
tableToCsv[i+z*sim, 10] = np.sum(dictL2Ind[l2Mult][i][6])
tableToCsv[i+z*sim, 11] = np.sum(dictL2Ind[l2Mult][i][5])
tableToCsv[i+z*sim, 12] = np.sum(dictL2Ind[l2Mult][i][4])
tableToCsv[i+z*sim, 13] = np.sum(dictL2Ind[l2Mult][i][1])
tableToCsv[i+z*sim, 14] = np.sum(dictL2Ind[l2Mult][i][2])
tableToCsv[i+z*sim, 15] = np.sum(dictL2Ind[l2Mult][i][3])
tableToCsv[i+z*sim, 16] = (np.sum(vector_m)-np.sum(dictL2Ind[l2Mult][i][0]))/(np.sum(vector_m))
tableToCsv[i+z*sim, 17] = np.sum(dictGr[grMult][i][6])
tableToCsv[i+z*sim, 18] = np.sum(dictGr[grMult][i][5])
tableToCsv[i+z*sim, 19] = np.sum(dictGr[grMult][i][4])
tableToCsv[i+z*sim, 20] = np.sum(dictGr[grMult][i][1])
tableToCsv[i+z*sim, 21] = np.sum(dictGr[grMult][i][2])
tableToCsv[i+z*sim, 22] = np.sum(dictGr[grMult][i][3])
tableToCsv[i+z*sim, 23] = (np.sum(vector_m)-np.sum(dictGr[grMult][i][0]))/(np.sum(vector_m))
return tableToCsv
# def CreateTableParetoInd(dataSP, dataFP, vector_m, sim):
# toRet = []
# for i in range(sim):
# toRet.append([i, 'SP', np.sum(dataSP[i][6]), np.sum(dataSP[i][5]), np.sum(dataSP[i][4]), \
# np.sum(dataSP[i][1]), np.sum(dataSP[i][2]), np.sum(dataSP[i][3]), \
# ((np.sum(vector_m)-np.sum(dataSP[i][0]))/(np.sum(vector_m)))])
# for i in range(sim):
# toRet.append([i, 'FP', np.sum(dataFP[i][6]), np.sum(dataFP[i][5]), np.sum(dataFP[i][4]), \
# np.sum(dataFP[i][1]), np.sum(dataFP[i][2]), np.sum(dataFP[i][3]), \
# ((np.sum(vector_m)-np.sum(dataFP[i][0]))/(np.sum(vector_m)))])
# return toRet
def CreateTableParetoInd(data, vector_m, sim, name = "FP"):
toRet = []
for i in range(sim):
toRet.append([i, np.sum(data[i][6]), np.sum(data[i][5]), np.sum(data[i][4]), \
np.sum(data[i][1]), np.sum(data[i][2]), np.sum(data[i][3]), \
((np.sum(vector_m)-np.sum(data[i][0]))/(np.sum(vector_m)))])
return toRet
| [
"numpy.sum"
] | [((451, 484), 'numpy.sum', 'np.sum', (['dictInd[budPerNode][i][6]'], {}), '(dictInd[budPerNode][i][6])\n', (457, 484), True, 'import numpy as np\n'), ((522, 555), 'numpy.sum', 'np.sum', (['dictInd[budPerNode][i][5]'], {}), '(dictInd[budPerNode][i][5])\n', (528, 555), True, 'import numpy as np\n'), ((593, 626), 'numpy.sum', 'np.sum', (['dictInd[budPerNode][i][4]'], {}), '(dictInd[budPerNode][i][4])\n', (599, 626), True, 'import numpy as np\n'), ((664, 697), 'numpy.sum', 'np.sum', (['dictInd[budPerNode][i][1]'], {}), '(dictInd[budPerNode][i][1])\n', (670, 697), True, 'import numpy as np\n'), ((735, 768), 'numpy.sum', 'np.sum', (['dictInd[budPerNode][i][2]'], {}), '(dictInd[budPerNode][i][2])\n', (741, 768), True, 'import numpy as np\n'), ((806, 839), 'numpy.sum', 'np.sum', (['dictInd[budPerNode][i][3]'], {}), '(dictInd[budPerNode][i][3])\n', (812, 839), True, 'import numpy as np\n'), ((986, 1021), 'numpy.sum', 'np.sum', (['dictL2Ind[budPerNode][i][6]'], {}), '(dictL2Ind[budPerNode][i][6])\n', (992, 1021), True, 'import numpy as np\n'), ((1060, 1095), 'numpy.sum', 'np.sum', (['dictL2Ind[budPerNode][i][5]'], {}), '(dictL2Ind[budPerNode][i][5])\n', (1066, 1095), True, 'import numpy as np\n'), ((1134, 1169), 'numpy.sum', 'np.sum', (['dictL2Ind[budPerNode][i][4]'], {}), '(dictL2Ind[budPerNode][i][4])\n', (1140, 1169), True, 'import numpy as np\n'), ((1208, 1243), 'numpy.sum', 'np.sum', (['dictL2Ind[budPerNode][i][1]'], {}), '(dictL2Ind[budPerNode][i][1])\n', (1214, 1243), True, 'import numpy as np\n'), ((1282, 1317), 'numpy.sum', 'np.sum', (['dictL2Ind[budPerNode][i][2]'], {}), '(dictL2Ind[budPerNode][i][2])\n', (1288, 1317), True, 'import numpy as np\n'), ((1356, 1391), 'numpy.sum', 'np.sum', (['dictL2Ind[budPerNode][i][3]'], {}), '(dictL2Ind[budPerNode][i][3])\n', (1362, 1391), True, 'import numpy as np\n'), ((1542, 1574), 'numpy.sum', 'np.sum', (['dictGr[budPerNode][i][6]'], {}), '(dictGr[budPerNode][i][6])\n', (1548, 1574), True, 'import numpy as np\n'), ((1613, 1645), 'numpy.sum', 'np.sum', (['dictGr[budPerNode][i][5]'], {}), '(dictGr[budPerNode][i][5])\n', (1619, 1645), True, 'import numpy as np\n'), ((1684, 1716), 'numpy.sum', 'np.sum', (['dictGr[budPerNode][i][4]'], {}), '(dictGr[budPerNode][i][4])\n', (1690, 1716), True, 'import numpy as np\n'), ((1755, 1787), 'numpy.sum', 'np.sum', (['dictGr[budPerNode][i][1]'], {}), '(dictGr[budPerNode][i][1])\n', (1761, 1787), True, 'import numpy as np\n'), ((1826, 1858), 'numpy.sum', 'np.sum', (['dictGr[budPerNode][i][2]'], {}), '(dictGr[budPerNode][i][2])\n', (1832, 1858), True, 'import numpy as np\n'), ((1897, 1929), 'numpy.sum', 'np.sum', (['dictGr[budPerNode][i][3]'], {}), '(dictGr[budPerNode][i][3])\n', (1903, 1929), True, 'import numpy as np\n'), ((2431, 2455), 'numpy.sum', 'np.sum', (['dictInd[z][i][6]'], {}), '(dictInd[z][i][6])\n', (2437, 2455), True, 'import numpy as np\n'), ((2493, 2517), 'numpy.sum', 'np.sum', (['dictInd[z][i][5]'], {}), '(dictInd[z][i][5])\n', (2499, 2517), True, 'import numpy as np\n'), ((2555, 2579), 'numpy.sum', 'np.sum', (['dictInd[z][i][4]'], {}), '(dictInd[z][i][4])\n', (2561, 2579), True, 'import numpy as np\n'), ((2617, 2641), 'numpy.sum', 'np.sum', (['dictInd[z][i][1]'], {}), '(dictInd[z][i][1])\n', (2623, 2641), True, 'import numpy as np\n'), ((2679, 2703), 'numpy.sum', 'np.sum', (['dictInd[z][i][2]'], {}), '(dictInd[z][i][2])\n', (2685, 2703), True, 'import numpy as np\n'), ((2741, 2765), 'numpy.sum', 'np.sum', (['dictInd[z][i][3]'], {}), '(dictInd[z][i][3])\n', (2747, 2765), True, 'import numpy as np\n'), ((2903, 2926), 'numpy.sum', 'np.sum', (['dictL2[z][i][6]'], {}), '(dictL2[z][i][6])\n', (2909, 2926), True, 'import numpy as np\n'), ((2965, 2988), 'numpy.sum', 'np.sum', (['dictL2[z][i][5]'], {}), '(dictL2[z][i][5])\n', (2971, 2988), True, 'import numpy as np\n'), ((3027, 3050), 'numpy.sum', 'np.sum', (['dictL2[z][i][4]'], {}), '(dictL2[z][i][4])\n', (3033, 3050), True, 'import numpy as np\n'), ((3089, 3112), 'numpy.sum', 'np.sum', (['dictL2[z][i][1]'], {}), '(dictL2[z][i][1])\n', (3095, 3112), True, 'import numpy as np\n'), ((3151, 3174), 'numpy.sum', 'np.sum', (['dictL2[z][i][2]'], {}), '(dictL2[z][i][2])\n', (3157, 3174), True, 'import numpy as np\n'), ((3213, 3236), 'numpy.sum', 'np.sum', (['dictL2[z][i][3]'], {}), '(dictL2[z][i][3])\n', (3219, 3236), True, 'import numpy as np\n'), ((3375, 3401), 'numpy.sum', 'np.sum', (['dictL2Ind[z][i][6]'], {}), '(dictL2Ind[z][i][6])\n', (3381, 3401), True, 'import numpy as np\n'), ((3440, 3466), 'numpy.sum', 'np.sum', (['dictL2Ind[z][i][5]'], {}), '(dictL2Ind[z][i][5])\n', (3446, 3466), True, 'import numpy as np\n'), ((3505, 3531), 'numpy.sum', 'np.sum', (['dictL2Ind[z][i][4]'], {}), '(dictL2Ind[z][i][4])\n', (3511, 3531), True, 'import numpy as np\n'), ((3570, 3596), 'numpy.sum', 'np.sum', (['dictL2Ind[z][i][1]'], {}), '(dictL2Ind[z][i][1])\n', (3576, 3596), True, 'import numpy as np\n'), ((3635, 3661), 'numpy.sum', 'np.sum', (['dictL2Ind[z][i][2]'], {}), '(dictL2Ind[z][i][2])\n', (3641, 3661), True, 'import numpy as np\n'), ((3700, 3726), 'numpy.sum', 'np.sum', (['dictL2Ind[z][i][3]'], {}), '(dictL2Ind[z][i][3])\n', (3706, 3726), True, 'import numpy as np\n'), ((3868, 3891), 'numpy.sum', 'np.sum', (['dictGr[z][i][6]'], {}), '(dictGr[z][i][6])\n', (3874, 3891), True, 'import numpy as np\n'), ((3930, 3953), 'numpy.sum', 'np.sum', (['dictGr[z][i][5]'], {}), '(dictGr[z][i][5])\n', (3936, 3953), True, 'import numpy as np\n'), ((3992, 4015), 'numpy.sum', 'np.sum', (['dictGr[z][i][4]'], {}), '(dictGr[z][i][4])\n', (3998, 4015), True, 'import numpy as np\n'), ((4054, 4077), 'numpy.sum', 'np.sum', (['dictGr[z][i][1]'], {}), '(dictGr[z][i][1])\n', (4060, 4077), True, 'import numpy as np\n'), ((4116, 4139), 'numpy.sum', 'np.sum', (['dictGr[z][i][2]'], {}), '(dictGr[z][i][2])\n', (4122, 4139), True, 'import numpy as np\n'), ((4178, 4201), 'numpy.sum', 'np.sum', (['dictGr[z][i][3]'], {}), '(dictGr[z][i][3])\n', (4184, 4201), True, 'import numpy as np\n'), ((4746, 4774), 'numpy.sum', 'np.sum', (['dictL2[l2Mult][i][6]'], {}), '(dictL2[l2Mult][i][6])\n', (4752, 4774), True, 'import numpy as np\n'), ((4812, 4840), 'numpy.sum', 'np.sum', (['dictL2[l2Mult][i][5]'], {}), '(dictL2[l2Mult][i][5])\n', (4818, 4840), True, 'import numpy as np\n'), ((4878, 4906), 'numpy.sum', 'np.sum', (['dictL2[l2Mult][i][4]'], {}), '(dictL2[l2Mult][i][4])\n', (4884, 4906), True, 'import numpy as np\n'), ((4944, 4972), 'numpy.sum', 'np.sum', (['dictL2[l2Mult][i][1]'], {}), '(dictL2[l2Mult][i][1])\n', (4950, 4972), True, 'import numpy as np\n'), ((5010, 5038), 'numpy.sum', 'np.sum', (['dictL2[l2Mult][i][2]'], {}), '(dictL2[l2Mult][i][2])\n', (5016, 5038), True, 'import numpy as np\n'), ((5076, 5104), 'numpy.sum', 'np.sum', (['dictL2[l2Mult][i][3]'], {}), '(dictL2[l2Mult][i][3])\n', (5082, 5104), True, 'import numpy as np\n'), ((5247, 5278), 'numpy.sum', 'np.sum', (['dictL2Ind[l2Mult][i][6]'], {}), '(dictL2Ind[l2Mult][i][6])\n', (5253, 5278), True, 'import numpy as np\n'), ((5317, 5348), 'numpy.sum', 'np.sum', (['dictL2Ind[l2Mult][i][5]'], {}), '(dictL2Ind[l2Mult][i][5])\n', (5323, 5348), True, 'import numpy as np\n'), ((5387, 5418), 'numpy.sum', 'np.sum', (['dictL2Ind[l2Mult][i][4]'], {}), '(dictL2Ind[l2Mult][i][4])\n', (5393, 5418), True, 'import numpy as np\n'), ((5457, 5488), 'numpy.sum', 'np.sum', (['dictL2Ind[l2Mult][i][1]'], {}), '(dictL2Ind[l2Mult][i][1])\n', (5463, 5488), True, 'import numpy as np\n'), ((5527, 5558), 'numpy.sum', 'np.sum', (['dictL2Ind[l2Mult][i][2]'], {}), '(dictL2Ind[l2Mult][i][2])\n', (5533, 5558), True, 'import numpy as np\n'), ((5597, 5628), 'numpy.sum', 'np.sum', (['dictL2Ind[l2Mult][i][3]'], {}), '(dictL2Ind[l2Mult][i][3])\n', (5603, 5628), True, 'import numpy as np\n'), ((5775, 5803), 'numpy.sum', 'np.sum', (['dictGr[grMult][i][6]'], {}), '(dictGr[grMult][i][6])\n', (5781, 5803), True, 'import numpy as np\n'), ((5842, 5870), 'numpy.sum', 'np.sum', (['dictGr[grMult][i][5]'], {}), '(dictGr[grMult][i][5])\n', (5848, 5870), True, 'import numpy as np\n'), ((5909, 5937), 'numpy.sum', 'np.sum', (['dictGr[grMult][i][4]'], {}), '(dictGr[grMult][i][4])\n', (5915, 5937), True, 'import numpy as np\n'), ((5976, 6004), 'numpy.sum', 'np.sum', (['dictGr[grMult][i][1]'], {}), '(dictGr[grMult][i][1])\n', (5982, 6004), True, 'import numpy as np\n'), ((6043, 6071), 'numpy.sum', 'np.sum', (['dictGr[grMult][i][2]'], {}), '(dictGr[grMult][i][2])\n', (6049, 6071), True, 'import numpy as np\n'), ((6110, 6138), 'numpy.sum', 'np.sum', (['dictGr[grMult][i][3]'], {}), '(dictGr[grMult][i][3])\n', (6116, 6138), True, 'import numpy as np\n'), ((931, 947), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (937, 947), True, 'import numpy as np\n'), ((1486, 1502), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (1492, 1502), True, 'import numpy as np\n'), ((2021, 2037), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (2027, 2037), True, 'import numpy as np\n'), ((2848, 2864), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (2854, 2864), True, 'import numpy as np\n'), ((3319, 3335), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (3325, 3335), True, 'import numpy as np\n'), ((3812, 3828), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (3818, 3828), True, 'import numpy as np\n'), ((4284, 4300), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (4290, 4300), True, 'import numpy as np\n'), ((5191, 5207), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (5197, 5207), True, 'import numpy as np\n'), ((5719, 5735), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (5725, 5735), True, 'import numpy as np\n'), ((6226, 6242), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (6232, 6242), True, 'import numpy as np\n'), ((7047, 7065), 'numpy.sum', 'np.sum', (['data[i][6]'], {}), '(data[i][6])\n', (7053, 7065), True, 'import numpy as np\n'), ((7067, 7085), 'numpy.sum', 'np.sum', (['data[i][5]'], {}), '(data[i][5])\n', (7073, 7085), True, 'import numpy as np\n'), ((7087, 7105), 'numpy.sum', 'np.sum', (['data[i][4]'], {}), '(data[i][4])\n', (7093, 7105), True, 'import numpy as np\n'), ((7117, 7135), 'numpy.sum', 'np.sum', (['data[i][1]'], {}), '(data[i][1])\n', (7123, 7135), True, 'import numpy as np\n'), ((7137, 7155), 'numpy.sum', 'np.sum', (['data[i][2]'], {}), '(data[i][2])\n', (7143, 7155), True, 'import numpy as np\n'), ((7157, 7175), 'numpy.sum', 'np.sum', (['data[i][3]'], {}), '(data[i][3])\n', (7163, 7175), True, 'import numpy as np\n'), ((878, 894), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (884, 894), True, 'import numpy as np\n'), ((895, 928), 'numpy.sum', 'np.sum', (['dictInd[budPerNode][i][0]'], {}), '(dictInd[budPerNode][i][0])\n', (901, 928), True, 'import numpy as np\n'), ((1431, 1447), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (1437, 1447), True, 'import numpy as np\n'), ((1448, 1483), 'numpy.sum', 'np.sum', (['dictL2Ind[budPerNode][i][0]'], {}), '(dictL2Ind[budPerNode][i][0])\n', (1454, 1483), True, 'import numpy as np\n'), ((1969, 1985), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (1975, 1985), True, 'import numpy as np\n'), ((1986, 2018), 'numpy.sum', 'np.sum', (['dictGr[budPerNode][i][0]'], {}), '(dictGr[budPerNode][i][0])\n', (1992, 2018), True, 'import numpy as np\n'), ((2804, 2820), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (2810, 2820), True, 'import numpy as np\n'), ((2821, 2845), 'numpy.sum', 'np.sum', (['dictInd[z][i][0]'], {}), '(dictInd[z][i][0])\n', (2827, 2845), True, 'import numpy as np\n'), ((3276, 3292), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (3282, 3292), True, 'import numpy as np\n'), ((3293, 3316), 'numpy.sum', 'np.sum', (['dictL2[z][i][0]'], {}), '(dictL2[z][i][0])\n', (3299, 3316), True, 'import numpy as np\n'), ((3766, 3782), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (3772, 3782), True, 'import numpy as np\n'), ((3783, 3809), 'numpy.sum', 'np.sum', (['dictL2Ind[z][i][0]'], {}), '(dictL2Ind[z][i][0])\n', (3789, 3809), True, 'import numpy as np\n'), ((4241, 4257), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (4247, 4257), True, 'import numpy as np\n'), ((4258, 4281), 'numpy.sum', 'np.sum', (['dictGr[z][i][0]'], {}), '(dictGr[z][i][0])\n', (4264, 4281), True, 'import numpy as np\n'), ((5143, 5159), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (5149, 5159), True, 'import numpy as np\n'), ((5160, 5188), 'numpy.sum', 'np.sum', (['dictL2[l2Mult][i][0]'], {}), '(dictL2[l2Mult][i][0])\n', (5166, 5188), True, 'import numpy as np\n'), ((5668, 5684), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (5674, 5684), True, 'import numpy as np\n'), ((5685, 5716), 'numpy.sum', 'np.sum', (['dictL2Ind[l2Mult][i][0]'], {}), '(dictL2Ind[l2Mult][i][0])\n', (5691, 5716), True, 'import numpy as np\n'), ((6178, 6194), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (6184, 6194), True, 'import numpy as np\n'), ((6195, 6223), 'numpy.sum', 'np.sum', (['dictGr[grMult][i][0]'], {}), '(dictGr[grMult][i][0])\n', (6201, 6223), True, 'import numpy as np\n'), ((7227, 7243), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (7233, 7243), True, 'import numpy as np\n'), ((7189, 7205), 'numpy.sum', 'np.sum', (['vector_m'], {}), '(vector_m)\n', (7195, 7205), True, 'import numpy as np\n'), ((7206, 7224), 'numpy.sum', 'np.sum', (['data[i][0]'], {}), '(data[i][0])\n', (7212, 7224), True, 'import numpy as np\n')] |
import numpy as np
from . import utils
def _cutoff(scores: np.array, cutoff_threshold: float, unsorted: bool = True):
if unsorted:
indices = np.arange(start=0, stop=scores.shape[0])
scores = scores
else:
indices = np.argsort(scores, axis=0)
scores = scores[indices]
scores_mean = np.mean(scores)
scores_std = np.std(scores)
threshold = scores_mean + scores_std * cutoff_threshold
if threshold > np.max(scores): # use 90th percentile outlier score
threshold = np.percentile(scores, 90)
candidate_outlier_indices = np.where(scores > threshold)[0]
candidate_inlier_indices = np.where(scores <= threshold)[0]
if not unsorted:
candidate_outlier_indices = indices[candidate_outlier_indices]
candidate_inlier_indices = indices[candidate_inlier_indices]
return candidate_inlier_indices, candidate_outlier_indices
def _generate_one_batch(
data: np.array,
inlier_indices: np.array,
positive_weights: np.array,
outlier_indices: np.array,
negative_weights: np.array,
batch_size: int,
):
anchors = np.zeros([batch_size], dtype=int)
positives = np.zeros([batch_size], dtype=int)
negatives = np.zeros([batch_size], dtype=int)
for i in range(batch_size):
anchor_sample = np.random.choice(inlier_indices.shape[0], p=positive_weights)
anchors[i] = inlier_indices[anchor_sample]
positive_sample = np.random.choice(inlier_indices.shape[0])
while anchor_sample == positive_sample:
positive_sample = np.random.choice(inlier_indices.shape[0], 1)
positives[i] = inlier_indices[positive_sample]
negative_sample = np.random.choice(outlier_indices.shape[0], p=negative_weights)
negatives[i] = outlier_indices[negative_sample]
anchors = data[anchors]
positives = data[positives]
negatives = data[negatives]
return anchors, positives, negatives
def batch_generator(
data: np.array,
candidate_scores: np.array,
batch_size: int,
cutoff_threshold: float = None,
unsorted: bool = True,
):
cutoff_threshold: float = np.sqrt(3) if cutoff_threshold is None else cutoff_threshold
inlier_indices, outlier_indices = _cutoff(candidate_scores, cutoff_threshold, unsorted)
transforms = np.sum(candidate_scores[inlier_indices]) - candidate_scores[inlier_indices]
total_weights_positive = np.sum(transforms)
positive_weights = (transforms / total_weights_positive).flatten()
total_weights_negative = np.sum(candidate_scores[outlier_indices])
negative_weights = (candidate_scores[outlier_indices] / total_weights_negative).flatten()
while True:
anchors, positives, negatives = _generate_one_batch(
data=data,
inlier_indices=inlier_indices,
positive_weights=positive_weights,
outlier_indices=outlier_indices,
negative_weights=negative_weights,
batch_size=batch_size,
)
yield [anchors, positives, negatives], None
| [
"numpy.sum",
"numpy.std",
"numpy.zeros",
"numpy.argsort",
"numpy.percentile",
"numpy.max",
"numpy.mean",
"numpy.arange",
"numpy.where",
"numpy.random.choice",
"numpy.sqrt"
] | [((328, 343), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (335, 343), True, 'import numpy as np\n'), ((361, 375), 'numpy.std', 'np.std', (['scores'], {}), '(scores)\n', (367, 375), True, 'import numpy as np\n'), ((1143, 1176), 'numpy.zeros', 'np.zeros', (['[batch_size]'], {'dtype': 'int'}), '([batch_size], dtype=int)\n', (1151, 1176), True, 'import numpy as np\n'), ((1193, 1226), 'numpy.zeros', 'np.zeros', (['[batch_size]'], {'dtype': 'int'}), '([batch_size], dtype=int)\n', (1201, 1226), True, 'import numpy as np\n'), ((1243, 1276), 'numpy.zeros', 'np.zeros', (['[batch_size]'], {'dtype': 'int'}), '([batch_size], dtype=int)\n', (1251, 1276), True, 'import numpy as np\n'), ((2464, 2482), 'numpy.sum', 'np.sum', (['transforms'], {}), '(transforms)\n', (2470, 2482), True, 'import numpy as np\n'), ((2584, 2625), 'numpy.sum', 'np.sum', (['candidate_scores[outlier_indices]'], {}), '(candidate_scores[outlier_indices])\n', (2590, 2625), True, 'import numpy as np\n'), ((156, 196), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': 'scores.shape[0]'}), '(start=0, stop=scores.shape[0])\n', (165, 196), True, 'import numpy as np\n'), ((249, 275), 'numpy.argsort', 'np.argsort', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (259, 275), True, 'import numpy as np\n'), ((455, 469), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (461, 469), True, 'import numpy as np\n'), ((528, 553), 'numpy.percentile', 'np.percentile', (['scores', '(90)'], {}), '(scores, 90)\n', (541, 553), True, 'import numpy as np\n'), ((587, 615), 'numpy.where', 'np.where', (['(scores > threshold)'], {}), '(scores > threshold)\n', (595, 615), True, 'import numpy as np\n'), ((650, 679), 'numpy.where', 'np.where', (['(scores <= threshold)'], {}), '(scores <= threshold)\n', (658, 679), True, 'import numpy as np\n'), ((1334, 1395), 'numpy.random.choice', 'np.random.choice', (['inlier_indices.shape[0]'], {'p': 'positive_weights'}), '(inlier_indices.shape[0], p=positive_weights)\n', (1350, 1395), True, 'import numpy as np\n'), ((1474, 1515), 'numpy.random.choice', 'np.random.choice', (['inlier_indices.shape[0]'], {}), '(inlier_indices.shape[0])\n', (1490, 1515), True, 'import numpy as np\n'), ((1721, 1783), 'numpy.random.choice', 'np.random.choice', (['outlier_indices.shape[0]'], {'p': 'negative_weights'}), '(outlier_indices.shape[0], p=negative_weights)\n', (1737, 1783), True, 'import numpy as np\n'), ((2187, 2197), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (2194, 2197), True, 'import numpy as np\n'), ((2358, 2398), 'numpy.sum', 'np.sum', (['candidate_scores[inlier_indices]'], {}), '(candidate_scores[inlier_indices])\n', (2364, 2398), True, 'import numpy as np\n'), ((1594, 1638), 'numpy.random.choice', 'np.random.choice', (['inlier_indices.shape[0]', '(1)'], {}), '(inlier_indices.shape[0], 1)\n', (1610, 1638), True, 'import numpy as np\n')] |
'''
BSD 3-Clause License
Copyright (c) 2017, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import numpy as np
import matplotlib.pyplot as plt
def pca(X, num_principal_components, show_scree = False, save_scree = False):
"""
Performs Principal Component Analysis for data dimensionality reduction.
Assumes that rows pertain to data points and columns to variables.
"""
#Do some sanity checking.
if X.shape[0] == 0:
raise ValueError("Cannot perform PCA on an empty matrix.")
if X.shape[1] < num_principal_components:
raise ValueError("Cannot reduce %s dimensional data to %d dimensions." % (X.shape[1], num_principal_components))
if X.shape[1] == num_principal_components:
return X
#Subtract the mean from each column.
means = np.array([np.mean(X, axis = 0),]*X.shape[0])
X_mean_reduced = np.subtract(X, means)
#Get the covariance matrix of the mean subtracted data.
cov = np.cov(X_mean_reduced, rowvar = False)
#Get the eigendecomposition of the covariance matrix and sort.
eig_vals, eig_vecs = np.linalg.eig(cov)
sorted_indices = eig_vals.argsort()[::-1]
#Reduce dimensionality.
X_reduced = np.dot(X, eig_vecs[:, sorted_indices[0:num_principal_components]])
#Plot, if requested.
if show_scree:
__plot_scree(eig_vals, sorted_indices[::-1], num_principal_components, save_scree)
return X_reduced
def __plot_scree(eig_vals, sorted_indices, num_principal_components, save_plot = False):
"""
Displays a scree plot(sorted and normalised Eigenvalues).
Optionally, one can save the plot to a file named 'scree.png'
"""
#Sort and sum eigenvalues.
eig_vals = np.sort(eig_vals)
eig_sum = np.sum(eig_vals)
#Plot.
x = np.array(range(1, eig_vals.shape[0] + 1))
plt.figure()
plt.plot(x, eig_vals[sorted_indices])
plt.xticks(x)
plt.xlabel("Sorted Eigenvalue IDs")
plt.ylabel("Normalised Eigenvalues")
plt.title("PCA Scree Plot")
plt.grid(True)
if save_plot:
plt.savefig("scree.png")
#plt.show()
| [
"matplotlib.pyplot.title",
"numpy.sum",
"numpy.subtract",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"numpy.linalg.eig",
"numpy.sort",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.xticks",
"numpy.dot",
"numpy.cov",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.sav... | [((2304, 2325), 'numpy.subtract', 'np.subtract', (['X', 'means'], {}), '(X, means)\n', (2315, 2325), True, 'import numpy as np\n'), ((2401, 2437), 'numpy.cov', 'np.cov', (['X_mean_reduced'], {'rowvar': '(False)'}), '(X_mean_reduced, rowvar=False)\n', (2407, 2437), True, 'import numpy as np\n'), ((2537, 2555), 'numpy.linalg.eig', 'np.linalg.eig', (['cov'], {}), '(cov)\n', (2550, 2555), True, 'import numpy as np\n'), ((2651, 2717), 'numpy.dot', 'np.dot', (['X', 'eig_vecs[:, sorted_indices[0:num_principal_components]]'], {}), '(X, eig_vecs[:, sorted_indices[0:num_principal_components]])\n', (2657, 2717), True, 'import numpy as np\n'), ((3168, 3185), 'numpy.sort', 'np.sort', (['eig_vals'], {}), '(eig_vals)\n', (3175, 3185), True, 'import numpy as np\n'), ((3200, 3216), 'numpy.sum', 'np.sum', (['eig_vals'], {}), '(eig_vals)\n', (3206, 3216), True, 'import numpy as np\n'), ((3287, 3299), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3297, 3299), True, 'import matplotlib.pyplot as plt\n'), ((3304, 3341), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'eig_vals[sorted_indices]'], {}), '(x, eig_vals[sorted_indices])\n', (3312, 3341), True, 'import matplotlib.pyplot as plt\n'), ((3346, 3359), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x'], {}), '(x)\n', (3356, 3359), True, 'import matplotlib.pyplot as plt\n'), ((3364, 3399), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sorted Eigenvalue IDs"""'], {}), "('Sorted Eigenvalue IDs')\n", (3374, 3399), True, 'import matplotlib.pyplot as plt\n'), ((3404, 3440), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Normalised Eigenvalues"""'], {}), "('Normalised Eigenvalues')\n", (3414, 3440), True, 'import matplotlib.pyplot as plt\n'), ((3445, 3472), 'matplotlib.pyplot.title', 'plt.title', (['"""PCA Scree Plot"""'], {}), "('PCA Scree Plot')\n", (3454, 3472), True, 'import matplotlib.pyplot as plt\n'), ((3477, 3491), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3485, 3491), True, 'import matplotlib.pyplot as plt\n'), ((3518, 3542), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""scree.png"""'], {}), "('scree.png')\n", (3529, 3542), True, 'import matplotlib.pyplot as plt\n'), ((2248, 2266), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (2255, 2266), True, 'import numpy as np\n')] |
import copy
import random
import sys
from functools import wraps
from inspect import isclass
import numpy as np
from deap.gp import PrimitiveTree, compile, cxOnePoint, mutUniform, mutShrink, mutInsert
from scipy.special import softmax
class MultipleGeneGP():
def __init__(self, content, gene_num):
self.gene = []
self.gene_num = gene_num
for i in range(self.gene_num):
self.gene.append(PrimitiveTree(content()))
def random_select(self):
return self.gene[random.randint(0, self.gene_num - 1)]
def weight_select(self, positive=True):
weight = np.abs(self.coef)
if positive:
p = softmax(abs(weight))
else:
p = softmax(-abs(weight))
index = np.random.choice(np.arange(len(weight)), p=p)
return self.gene[index]
def deterministic_select(self):
weight = np.abs(self.coef)
return self.gene[np.argmax(-weight)]
def __len__(self):
return sum([len(g) for g in self.gene])
def multiple_gene_evaluation(compiled_genes, x):
result = []
for gene in compiled_genes:
result.append(gene(*x))
return result
def multiple_gene_initialization(container, generator, gene_num=5):
return container(generator, gene_num)
def multiple_gene_compile(expr: MultipleGeneGP, pset):
gene_compiled = []
for gene in expr.gene:
gene_compiled.append(compile(gene, pset))
return gene_compiled
def cxOnePoint_multiple_gene(ind1: MultipleGeneGP, ind2: MultipleGeneGP):
cxOnePoint(ind1.random_select(), ind2.random_select())
return ind1, ind2
def cxOnePoint_multiple_all_gene(ind1: MultipleGeneGP, ind2: MultipleGeneGP, permutation=False):
if permutation:
a_list = np.random.permutation(np.arange(0, len(ind1.gene)))
b_list = np.random.permutation(np.arange(0, len(ind2.gene)))
else:
a_list = np.arange(0, len(ind1.gene))
b_list = np.arange(0, len(ind2.gene))
for a, b in zip(a_list, b_list):
cxOnePoint(ind1.gene[a], ind2.gene[b])
return ind1, ind2
def mutUniform_multiple_gene(individual: MultipleGeneGP, expr, pset):
mutUniform(individual.random_select(), expr, pset)
return individual,
def mutUniform_multiple_gene_with_prob(individual: MultipleGeneGP, expr, pset, terminal_probs, primitive_probs):
root_individual = individual
individual = individual.random_select()
index = random.randrange(len(individual))
slice_ = individual.searchSubtree(index)
type_ = individual[index].ret
individual[slice_] = expr(pset=pset, type_=type_, terminal_probs=terminal_probs, primitive_probs=primitive_probs)
return root_individual,
def mutShrink_multiple_gene(individual: MultipleGeneGP):
mutShrink(individual.random_select())
return individual,
def mutInsert_multiple_gene(individual: MultipleGeneGP, pset):
mutInsert(individual.random_select(), pset)
return individual,
def cxOnePoint_multiple_gene_weight(ind1: MultipleGeneGP, ind2: MultipleGeneGP):
cxOnePoint(ind1.weight_select(), ind2.weight_select())
return ind1, ind2
def mutWeight_multiple_gene(individual: MultipleGeneGP, expr, pset, threshold_ratio=0.2):
good_features, threshold = construct_feature_pools([individual], True, threshold_ratio=threshold_ratio)
def replaces_features(ind: MultipleGeneGP):
for i, c in enumerate(ind.coef):
positive = False
if (positive and c >= threshold) or (not positive and c < threshold):
new_features = mutUniform(copy.deepcopy(random.choice(good_features)), expr, pset)
ind.gene[i] = new_features[0]
replaces_features(individual)
return individual,
def construct_feature_pools(pop, positive, threshold_ratio=0.2,
good_features_threshold=None):
# positive: get all important features
# negative: get all unimportant features
good_features = []
if good_features_threshold == None:
threshold = np.quantile([ind.coef for ind in pop], threshold_ratio)
elif good_features_threshold == 'mean':
threshold = np.mean([ind.coef for ind in pop])
else:
threshold = np.quantile([ind.coef for ind in pop], good_features_threshold)
def add_features(ind):
for c, x in zip(ind.coef, ind.gene):
if (positive and c >= threshold) or (not positive and c < threshold):
good_features.append(x)
for ind in pop:
add_features(ind)
threshold = np.quantile([ind.coef for ind in pop], threshold_ratio)
return good_features, threshold
def feature_crossover_cross(ind1, ind2, threshold_ratio):
pop = [ind1, ind2]
good_features, threshold = construct_feature_pools(pop, True, threshold_ratio=threshold_ratio)
new_pop = []
for ind in pop:
ind = cxOnePoint_multiple_gene_weight_plus(ind, good_features, threshold, False)
new_pop.append(ind)
return new_pop
def feature_crossover_cross_global(ind1, ind2, regressor):
pop = [ind1, ind2]
new_pop = []
for ind in pop:
ind = cxOnePoint_multiple_gene_weight_plus(ind, regressor.good_features, regressor.cx_threshold, False)
new_pop.append(ind)
return new_pop
def feature_mutation_global(individual: MultipleGeneGP, expr, pset, regressor):
threshold = regressor.cx_threshold
def replaces_features(ind: MultipleGeneGP):
for i, c in enumerate(ind.coef):
if c < threshold:
new_features = mutUniform(copy.deepcopy(random.choice(regressor.good_features)), expr, pset)
ind.gene[i] = new_features[0]
replaces_features(individual)
return individual,
def feature_crossover(ind1, ind2, positive, threshold_ratio):
pop = [ind1, ind2]
good_features, threshold = construct_feature_pools(pop, positive, threshold_ratio=threshold_ratio)
new_pop = []
for ind in pop:
ind = cxOnePoint_multiple_gene_weight_plus(ind, good_features, threshold, positive)
new_pop.append(ind)
return new_pop
def cxOnePoint_multiple_gene_weight_plus(ind: MultipleGeneGP, good_features, threshold, positive):
def replaces_features(ind: MultipleGeneGP):
for i, c in enumerate(ind.coef):
if (positive and c >= threshold) or (not positive and c < threshold):
new_features = cxOnePoint(copy.deepcopy(random.choice(good_features)),
copy.deepcopy(random.choice(good_features)))
ind.gene[i] = random.choice(new_features)
replaces_features(ind)
return ind
def mutUniform_multiple_gene_weight(individual: MultipleGeneGP, expr, pset):
mutUniform(individual.weight_select(), expr, pset)
return individual,
def cxOnePoint_multiple_gene_deterministic(ind1: MultipleGeneGP, ind2: MultipleGeneGP):
cxOnePoint(ind1.deterministic_select(), ind2.deterministic_select())
return ind1, ind2
def mutUniform_multiple_gene_deterministic(individual: MultipleGeneGP, expr, pset):
mutUniform(individual.deterministic_select(), expr, pset)
return individual,
def staticLimit_multiple_gene(key, max_value):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
keep_inds = [copy.deepcopy(ind) for ind in args]
new_inds = list(func(*args, **kwargs))
for i, ind in enumerate(new_inds):
limit_exceed = False
for x in ind.gene:
if callable(max_value):
if key(x) > max_value():
limit_exceed = True
break
else:
if key(x) > max_value:
limit_exceed = True
break
if limit_exceed:
new_inds[i] = random.choice(keep_inds)
return new_inds
return wrapper
return decorator
def result_calculation(func, data, original_features):
result = multiple_gene_evaluation(func, data.T)
for i in range(len(result)):
yp = result[i]
if not isinstance(yp, np.ndarray):
yp = np.full(len(data), 0)
elif yp.size == 1:
yp = np.full(len(data), yp)
result[i] = yp
if original_features:
result = np.concatenate([np.array(result).T, data], axis=1)
else:
result = np.array(result).T
result = np.nan_to_num(result)
return result
def genFull_with_prob(pset, min_, max_, terminal_probs, primitive_probs, type_=None):
def condition(height, depth):
"""Expression generation stops when the depth is equal to height."""
return depth == height
return generate_with_prob(pset, min_, max_, condition, terminal_probs, primitive_probs, type_)
def generate_with_prob(pset, min_, max_, condition, terminal_probs, primitive_probs, type_=None):
if type_ is None:
type_ = pset.ret
expr = []
height = random.randint(min_, max_)
stack = [(0, type_)]
while len(stack) != 0:
depth, type_ = stack.pop()
if condition(height, depth):
try:
term = np.random.choice(pset.terminals[type_], p=terminal_probs.flatten())
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError("The gp.generate function tried to add " \
"a terminal of type '%s', but there is " \
"none available." % (type_,)).with_traceback(traceback)
if isclass(term):
term = term()
expr.append(term)
else:
try:
prim = np.random.choice(pset.primitives[type_], p=primitive_probs.flatten())
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError("The gp.generate function tried to add " \
"a primitive of type '%s', but there is " \
"none available." % (type_,)).with_traceback(traceback)
expr.append(prim)
for arg in reversed(prim.args):
stack.append((depth + 1, arg))
return expr
| [
"copy.deepcopy",
"numpy.quantile",
"numpy.abs",
"random.randint",
"numpy.nan_to_num",
"numpy.argmax",
"inspect.isclass",
"random.choice",
"deap.gp.cxOnePoint",
"numpy.mean",
"deap.gp.compile",
"numpy.array",
"functools.wraps",
"sys.exc_info"
] | [((4531, 4586), 'numpy.quantile', 'np.quantile', (['[ind.coef for ind in pop]', 'threshold_ratio'], {}), '([ind.coef for ind in pop], threshold_ratio)\n', (4542, 4586), True, 'import numpy as np\n'), ((8488, 8509), 'numpy.nan_to_num', 'np.nan_to_num', (['result'], {}), '(result)\n', (8501, 8509), True, 'import numpy as np\n'), ((9032, 9058), 'random.randint', 'random.randint', (['min_', 'max_'], {}), '(min_, max_)\n', (9046, 9058), False, 'import random\n'), ((610, 627), 'numpy.abs', 'np.abs', (['self.coef'], {}), '(self.coef)\n', (616, 627), True, 'import numpy as np\n'), ((886, 903), 'numpy.abs', 'np.abs', (['self.coef'], {}), '(self.coef)\n', (892, 903), True, 'import numpy as np\n'), ((2025, 2063), 'deap.gp.cxOnePoint', 'cxOnePoint', (['ind1.gene[a]', 'ind2.gene[b]'], {}), '(ind1.gene[a], ind2.gene[b])\n', (2035, 2063), False, 'from deap.gp import PrimitiveTree, compile, cxOnePoint, mutUniform, mutShrink, mutInsert\n'), ((4023, 4078), 'numpy.quantile', 'np.quantile', (['[ind.coef for ind in pop]', 'threshold_ratio'], {}), '([ind.coef for ind in pop], threshold_ratio)\n', (4034, 4078), True, 'import numpy as np\n'), ((7221, 7232), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (7226, 7232), False, 'from functools import wraps\n'), ((510, 546), 'random.randint', 'random.randint', (['(0)', '(self.gene_num - 1)'], {}), '(0, self.gene_num - 1)\n', (524, 546), False, 'import random\n'), ((929, 947), 'numpy.argmax', 'np.argmax', (['(-weight)'], {}), '(-weight)\n', (938, 947), True, 'import numpy as np\n'), ((1418, 1437), 'deap.gp.compile', 'compile', (['gene', 'pset'], {}), '(gene, pset)\n', (1425, 1437), False, 'from deap.gp import PrimitiveTree, compile, cxOnePoint, mutUniform, mutShrink, mutInsert\n'), ((4143, 4177), 'numpy.mean', 'np.mean', (['[ind.coef for ind in pop]'], {}), '([ind.coef for ind in pop])\n', (4150, 4177), True, 'import numpy as np\n'), ((4208, 4271), 'numpy.quantile', 'np.quantile', (['[ind.coef for ind in pop]', 'good_features_threshold'], {}), '([ind.coef for ind in pop], good_features_threshold)\n', (4219, 4271), True, 'import numpy as np\n'), ((8456, 8472), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (8464, 8472), True, 'import numpy as np\n'), ((9627, 9640), 'inspect.isclass', 'isclass', (['term'], {}), '(term)\n', (9634, 9640), False, 'from inspect import isclass\n'), ((6554, 6581), 'random.choice', 'random.choice', (['new_features'], {}), '(new_features)\n', (6567, 6581), False, 'import random\n'), ((7296, 7314), 'copy.deepcopy', 'copy.deepcopy', (['ind'], {}), '(ind)\n', (7309, 7314), False, 'import copy\n'), ((7899, 7923), 'random.choice', 'random.choice', (['keep_inds'], {}), '(keep_inds)\n', (7912, 7923), False, 'import random\n'), ((8394, 8410), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (8402, 8410), True, 'import numpy as np\n'), ((9356, 9370), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (9368, 9370), False, 'import sys\n'), ((9891, 9905), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (9903, 9905), False, 'import sys\n'), ((3580, 3608), 'random.choice', 'random.choice', (['good_features'], {}), '(good_features)\n', (3593, 3608), False, 'import random\n'), ((5555, 5593), 'random.choice', 'random.choice', (['regressor.good_features'], {}), '(regressor.good_features)\n', (5568, 5593), False, 'import random\n'), ((6406, 6434), 'random.choice', 'random.choice', (['good_features'], {}), '(good_features)\n', (6419, 6434), False, 'import random\n'), ((6493, 6521), 'random.choice', 'random.choice', (['good_features'], {}), '(good_features)\n', (6506, 6521), False, 'import random\n')] |
import csv
from sklearn.ensemble import *
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
import pickle
from sklearn import datasets
import numpy as np
from pprint import pprint
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import *
import pickle, time, io
from sklearn.tree import *
from sklearn import tree
from sklearn.datasets import fetch_openml
import time
import matplotlib.pyplot as plt
import numpy as np
import json, os
import argparse
from sklearn.datasets import fetch_openml
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.utils import check_random_state
import codecs
from joblib import dump, load
######## GLOBALS #########
def save_dataset_csv(X, y, filename):
"""
Saves training data to filename.csv
:param filename: Name of file to save it to
:param X: training data (features)
:param y: training data (labels)
:return: null
"""
concat = np.concatenate((np.array(X), np.reshape(np.array(y).T, (-1, 1))), axis = 1)
print('X size: ', end=' ')
print(np.array(X).shape)
print('y size: ', end=' ')
print(np.reshape(np.array(y).T, (-1, 1)).shape)
print('concat size: ', end=' ')
print(concat.shape)
print (concat.shape)
np.savetxt(filename, concat, delimiter=",", fmt='%s')
def argmax_1(a):
return max(range(len(a)), key=lambda x: a[x])
def write_to_json(model1, filename, regression=False):
start_time = time.time()
final_count = 0
new_dict = {'estimators': {'nodes': [], 'values': [] } }
for count, estimator in enumerate(model1.estimators_):
nodes = estimator.tree_.__getstate__()['nodes'].tolist()
newnodes = [list((i[0], i[1], i[2], i[3], i[5])) for i in nodes]
length = len(nodes)
values = estimator.tree_.__getstate__()['values']
for i in range(length):
if newnodes[i][0] == -1:
newnodes[i][2] = argmax_1(list(values[i][0]))
new_dict['estimators']['nodes'].append(newnodes)
final_count = count
if regression:
new_dict['n_classes'] = -1
else:
new_dict['n_classes'] = model1.n_classes_
new_dict['n_estimators'] = final_count+1
json_obj = json.dumps(new_dict)
print('finish dumping')
with open(filename, "w") as outfile:
outfile.write(json_obj)
end_time = time.time()
print('time taken for manual json conversion: ', end='')
print(end_time - start_time)
def write_to_json_gbt(model, filename, regression=False):
start_time = time.time()
final_count = 0
new_dict = {'estimators': {'nodes': [], 'values': [] } }
final_count = 0
print('tot est: ')
times_add=0
print (len(model.estimators_))
for estimators in model.estimators_:
print(len(estimators))
for count, estimator in enumerate(estimators):
nodes = estimator.tree_.__getstate__()['nodes'].tolist()
newnodes = [list((i[0], i[1], i[2], i[3], i[5])) for i in nodes]
length = len(nodes)
values = estimator.tree_.__getstate__()['values']
for i in range(length):
if newnodes[i][0] == -1:
#print(values[i][0])
newnodes[i][3] = values[i][0][0]
#newnodes[i][2] = argmax_1(list(values[i][0]))
final_count += 1
new_dict['estimators']['nodes'].append(newnodes)
times_add+=1
print("total total: ")
print(times_add)
if regression:
new_dict['n_classes'] = -1
else:
new_dict['n_classes'] = model.n_classes_
new_dict['n_estimators'] = final_count
json_obj = json.dumps(new_dict)
print('finish dumping')
with open(filename, "w") as outfile:
outfile.write(json_obj)
end_time = time.time()
print('time taken for manual json conversion: ', end='')
print(end_time - start_time)
def load_csv(filename, label_col, num_test):
"""
Loads a csv file containin the data, parses it
and returns numpy arrays the containing the training
and testing data along with their labels.
:param filename: the filename
:return: tuple containing train, test data np arrays and labels
"""
X_train = []
X_test = []
num = 0
with open(filename,'rt') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
row1 = [float(item) for item in row if item != '\0']
last_ele = row1.pop(label_col)
X_train.append(row1)
X_test.append(int(last_ele))
num+=1
if num > num_test:
break
f.close()
return X_train, X_test
def parseCmdArgs():
parser = argparse.ArgumentParser()
parser.add_argument('--labelcol', action='store', dest='label_column',
help='Label column', type=int)
parser.add_argument('--datafilename', action='store', dest='data_filename',
help='Dataset name')
parser.add_argument('--modelfilename', action='store', dest='model_filename',
help='Dataset name')
parser.add_argument('--numtest', action='store', dest='num_test', nargs='?',
const=100, type=int, help='Number of test samples')
results = parser.parse_args()
return results
results = parseCmdArgs()
label_column = int(results.label_column)
data_path_filename = results.data_filename
model_filename = results.model_filename
num_test = int(results.num_test)
X, y = load_csv(data_path_filename, 0, num_test)
print('csv loaded')
start_time = time.time()
model1 = load(model_filename)
end_time = time.time()
model1.predict(X)
print('time: ', end_time - start_time)
| [
"csv.reader",
"argparse.ArgumentParser",
"numpy.savetxt",
"json.dumps",
"time.time",
"numpy.array",
"joblib.load"
] | [((5830, 5841), 'time.time', 'time.time', ([], {}), '()\n', (5839, 5841), False, 'import time\n'), ((5851, 5871), 'joblib.load', 'load', (['model_filename'], {}), '(model_filename)\n', (5855, 5871), False, 'from joblib import dump, load\n'), ((5883, 5894), 'time.time', 'time.time', ([], {}), '()\n', (5892, 5894), False, 'import time\n'), ((1476, 1529), 'numpy.savetxt', 'np.savetxt', (['filename', 'concat'], {'delimiter': '""","""', 'fmt': '"""%s"""'}), "(filename, concat, delimiter=',', fmt='%s')\n", (1486, 1529), True, 'import numpy as np\n'), ((1673, 1684), 'time.time', 'time.time', ([], {}), '()\n', (1682, 1684), False, 'import time\n'), ((2447, 2467), 'json.dumps', 'json.dumps', (['new_dict'], {}), '(new_dict)\n', (2457, 2467), False, 'import json, os\n'), ((2587, 2598), 'time.time', 'time.time', ([], {}), '()\n', (2596, 2598), False, 'import time\n'), ((2770, 2781), 'time.time', 'time.time', ([], {}), '()\n', (2779, 2781), False, 'import time\n'), ((3898, 3918), 'json.dumps', 'json.dumps', (['new_dict'], {}), '(new_dict)\n', (3908, 3918), False, 'import json, os\n'), ((4036, 4047), 'time.time', 'time.time', ([], {}), '()\n', (4045, 4047), False, 'import time\n'), ((4950, 4975), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4973, 4975), False, 'import argparse\n'), ((4560, 4588), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (4570, 4588), False, 'import csv\n'), ((1180, 1191), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1188, 1191), True, 'import numpy as np\n'), ((1282, 1293), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1290, 1293), True, 'import numpy as np\n'), ((1204, 1215), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1212, 1215), True, 'import numpy as np\n'), ((1354, 1365), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1362, 1365), True, 'import numpy as np\n')] |
import math
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
def get_dataset_size(dataset):
size = 0
for client in dataset.client_ids:
size += len(dataset.create_tf_dataset_for_client(client))
return size
def encode_element(x):
return (tf.expand_dims(x['pixels'], -1), tf.one_hot(x['label'], 10))
def encode_dataset(dataset, batch):
return dataset.shuffle(2*batch, reshuffle_each_iteration=True) \
.batch(batch, drop_remainder=True) \
.map(encode_element, num_parallel_calls=4) \
.prefetch(1)
def load_flattened_dataset(batch):
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()
client_id = emnist_train.client_ids[0]
onehot_train = encode_dataset(emnist_train.create_tf_dataset_from_all_clients(), batch)
onehot_test = encode_dataset(emnist_test.create_tf_dataset_from_all_clients(), batch)
train_size = math.floor(get_dataset_size(emnist_train)/batch)
test_size = math.floor(get_dataset_size(emnist_test)/batch)
return onehot_train, train_size, onehot_test, test_size
def make_federated_data(train_data, test_data, batch):
print("Beginning dataset generation.", flush=True)
# A pregenerated list of clients into 30 (roughly even) splits.
# We can't use the full split as TFF is _really slow_ with large numbers of
# clients in simulation.
client_ids_split = np.load("src/data/test_split.npy", allow_pickle=True)
train_datasets = []
train_sizes = []
test_datasets = []
test_sizes = []
for client_ids in client_ids_split:
train_dataset = train_data.create_tf_dataset_for_client(client_ids[0])
test_dataset = test_data.create_tf_dataset_for_client(client_ids[0])
for i in range(1, len(client_ids)):
train_dataset = train_dataset.concatenate(train_data.create_tf_dataset_for_client(client_ids[i]))
test_dataset = test_dataset.concatenate(test_data.create_tf_dataset_for_client(client_ids[i]))
train_datasets.append(encode_dataset(train_dataset, batch))
test_datasets.append(encode_dataset(test_dataset, batch))
train_sizes.append(math.floor(len(train_dataset)/batch))
test_sizes.append(math.floor(len(test_dataset)/batch))
val_dataset = encode_dataset(test_data.create_tf_dataset_from_all_clients(), batch)
val_size = get_dataset_size(test_data)
return train_datasets, train_sizes, test_datasets, test_sizes, val_dataset, val_size
def load_federated_dataset(batch):
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()
per_client_train = [encode_dataset(emnist_train.create_tf_dataset_for_client(i), batch) for i in emnist_train.client_ids]
per_client_test = [encode_dataset(emnist_test.create_tf_dataset_for_client(i), batch) for i in emnist_test.client_ids]
return make_federated_data(emnist_train, emnist_test, batch)
| [
"tensorflow.expand_dims",
"numpy.load",
"tensorflow.one_hot",
"tensorflow_federated.simulation.datasets.emnist.load_data"
] | [((649, 691), 'tensorflow_federated.simulation.datasets.emnist.load_data', 'tff.simulation.datasets.emnist.load_data', ([], {}), '()\n', (689, 691), True, 'import tensorflow_federated as tff\n'), ((1419, 1472), 'numpy.load', 'np.load', (['"""src/data/test_split.npy"""'], {'allow_pickle': '(True)'}), "('src/data/test_split.npy', allow_pickle=True)\n", (1426, 1472), True, 'import numpy as np\n'), ((2569, 2611), 'tensorflow_federated.simulation.datasets.emnist.load_data', 'tff.simulation.datasets.emnist.load_data', ([], {}), '()\n', (2609, 2611), True, 'import tensorflow_federated as tff\n'), ((293, 324), 'tensorflow.expand_dims', 'tf.expand_dims', (["x['pixels']", '(-1)'], {}), "(x['pixels'], -1)\n", (307, 324), True, 'import tensorflow as tf\n'), ((326, 352), 'tensorflow.one_hot', 'tf.one_hot', (["x['label']", '(10)'], {}), "(x['label'], 10)\n", (336, 352), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
# Main - 1D elastic bar - Linear FEM
# Prepare environment and import libraries
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rc('font', size=14)
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.ops import array_ops
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras import constraints
from tensorflow.python.framework import tensor_shape
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Input, Multiply, Add, Lambda, Dense, Dot, Reshape
from tensorflow.keras.optimizers import RMSprop, Adam, SGD
from tensorflow.keras.losses import mean_squared_error as mse
from tensorflow.keras.losses import mean_absolute_error as mae
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import ReduceLROnPlateau, TerminateOnNaN, ModelCheckpoint
from pinn.layers import getScalingDenseLayer
def build_mlp(dLInputScaling, mlp_name):
model = Sequential([
# dLInputScaling,
Dense(5,activation = 'tanh'),
# Dense(10,activation = 'elu'),
# Dense(20,activation = 'elu'),
# Dense(40,activation = 'elu'),
Dense(64,activation = 'linear')
], name=mlp_name)
optimizer = RMSprop(1e-2)
model.compile(loss='mean_squared_error',
optimizer=optimizer,
metrics=['mean_absolute_error', 'mean_squared_error'])
return model
def build_force_mlp(dLInputScaling, mlp_name):
model = Sequential([
# dLInputScaling,
Dense(3,activation = 'tanh'),
# Dense(10,activation = 'elu'),
# Dense(20,activation = 'elu'),
# Dense(40,activation = 'elu'),
Dense(8,activation = 'linear')
], name=mlp_name)
optimizer = RMSprop(1e-2)
model.compile(loss='mean_squared_error',
optimizer=optimizer,
metrics=['mean_absolute_error', 'mean_squared_error'])
return model
class AMatrix(Layer):
"""
Elastic stiffness matrix
"""
def __init__(self,
kernel_initializer = 'glorot_uniform',
kernel_regularizer=None,
kernel_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(AMatrix, self).__init__(**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
def build(self, input_shape, **kwargs):
self.kernel = self.add_weight("kernel",
shape = [1,8,8],
initializer = self.kernel_initializer,
dtype = self.dtype,
trainable = self.trainable,
constraint = self.kernel_constraint,
**kwargs)
self.built = True
def call(self, inputs):
output = self.kernel
# output = array_ops.reshape(output,(array_ops.shape(output)[0],1))
return output
def compute_output_shape(self, input_shape):
aux_shape = tensor_shape.TensorShape((None,1))
return aux_shape[:-1].concatenate(1)
class FMatrix(Layer):
"""
Force matrix
"""
def __init__(self,
kernel_initializer = 'glorot_uniform',
kernel_regularizer=None,
kernel_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(FMatrix, self).__init__(**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
def build(self, input_shape, **kwargs):
self.kernel = self.add_weight("kernel",
shape = [8],
initializer = self.kernel_initializer,
dtype = self.dtype,
trainable = self.trainable,
constraint = self.kernel_constraint,
**kwargs)
self.built = True
def call(self, inputs):
kernel_enhanced = inputs * tf.constant([0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0])
# kernel_enhanced = tf.expand_dims(self.kernel,0)
output = self.kernel + kernel_enhanced
# output = array_ops.reshape(output,(array_ops.shape(output)[0],1))
return output
def compute_output_shape(self, input_shape):
aux_shape = tensor_shape.TensorShape((None,1))
return aux_shape[:-1].concatenate(1)
def create_fe_model(force_mlp, delta_stiffness_mlp, elastic_stiffness, force_vector,
force_low, force_up, stiffness_low, stiffness_up, batch_input_shape, myDtype):
inputLayer = Input(shape=(1,))
elasticStiffnessLayer = AMatrix(input_shape = inputLayer.shape, dtype = myDtype, trainable=False)
elasticStiffnessLayer.build(input_shape = inputLayer.shape)
elasticStiffnessLayer.set_weights([np.asarray(elastic_stiffness, dtype = elasticStiffnessLayer.dtype)])
elasticStiffnessLayer = elasticStiffnessLayer(inputLayer)
forceMatrixLayer = FMatrix(input_shape = inputLayer.shape, dtype = myDtype, trainable=False)
forceMatrixLayer.build(input_shape = inputLayer.shape)
forceMatrixLayer.set_weights([np.asarray(force_vector, dtype = forceMatrixLayer.dtype)])
forceMatrixLayer = forceMatrixLayer(inputLayer)
deltaStiffnessLayer = delta_stiffness_mlp(inputLayer)
# scaledDeltaStiffnessLayer = Lambda(lambda x, stiffness_low=stiffness_low, stiffness_up=stiffness_up:
# x*(stiffness_up-stiffness_low)+stiffness_low)(deltaStiffnessLayer)
deltaStiffnessReshapedLayer = Reshape((8, 8))(deltaStiffnessLayer)
correctedStiffnessLayer = Add()([elasticStiffnessLayer, deltaStiffnessReshapedLayer])
inverseStiffnessLayer = Lambda(lambda x: tf.linalg.inv(x))(correctedStiffnessLayer)
# deflectionOutputLayer = Lambda(lambda x: tf.linalg.matmul(x[0],x[1]))([inverseStiffnessLayer, forceMatrixLayer])
# deltaForceLayer = force_mlp(inputLayer)
# scaledDeltaForceLayer = Lambda(lambda x, force_low=force_low, force_up=force_up:
# x*(force_up-force_low)+force_low)(deltaForceLayer)
# correctedForceLayer = Add()([forceMatrixLayer, deltaForceLayer])
deflectionOutputLayer = Dot((1))([inverseStiffnessLayer, forceMatrixLayer])
functionalModel = Model(inputs = [inputLayer], outputs = [deflectionOutputLayer])
functionalModel.compile(loss=mse,
optimizer=RMSprop(5e-1),
metrics=[mae])
return functionalModel
def create_physics_model(elastic_stiffness, force_vector,
stiffness_low, stiffness_up, batch_input_shape, myDtype):
inputLayer = Input(shape=(1,))
elasticStiffnessLayer = AMatrix(input_shape = inputLayer.shape, dtype = myDtype, trainable=False)
elasticStiffnessLayer.build(input_shape = inputLayer.shape)
elasticStiffnessLayer.set_weights([np.asarray(elastic_stiffness, dtype = elasticStiffnessLayer.dtype)])
elasticStiffnessLayer = elasticStiffnessLayer(inputLayer)
forceMatrixLayer = FMatrix(input_shape = inputLayer.shape, dtype = myDtype, trainable=False)
forceMatrixLayer.build(input_shape = inputLayer.shape)
forceMatrixLayer.set_weights([np.asarray(force_vector, dtype = forceMatrixLayer.dtype)])
forceMatrixLayer = forceMatrixLayer(inputLayer)
inverseStiffnessLayer = Lambda(lambda x: tf.linalg.inv(x))(elasticStiffnessLayer)
# deflectionOutputLayer = Lambda(lambda x: tf.linalg.matmul(x[0],x[1]))([inverseStiffnessLayer, forceMatrixLayer])
deflectionOutputLayer = Dot((1))([inverseStiffnessLayer, forceMatrixLayer])
functionalModel = Model(inputs = [inputLayer], outputs = [deflectionOutputLayer])
functionalModel.compile(loss=mae,
optimizer=RMSprop(5e-3),
metrics=[mse])
return functionalModel
# --------------------------
# Functions definition
def Mesh1D(L1, Nx):
# Generates nodes positions and connectivity table for 1D mesh of length L1
# and number of elements Nx
# Linear elements only
# Nodes array contains nodal positions (one node per row)
# Connectivity array contains the element nodes number (one element per each row)
# TODO
Nodes = np.linspace(0,L1,Nx+1)
Connectivity = np.zeros(shape=(Nx, 2), dtype = 'int')
for e in range(0, Nx):
Connectivity[e,0] = e
Connectivity[e,1] = e+1
return Nodes, Connectivity
def LinElement1D(Nodes_el, EA, q):
# Generates load vector and stiffness matrix at the element level
# TODO
K_el = EA / (Nodes_el[1] - Nodes_el[0]) * np.array([[1,-1],[-1,1]])
q_el = (q * (Nodes_el[1] - Nodes_el[0]) / 2) * np.transpose(np.array([1,1]))
return K_el, q_el
#
# --------------------------
# MAIN
#
# Input ------------------------------------------------------
#
L1 = 200.0 # Lengh of elastic bar
Nx = 8 # Number of elements
# Material Properties
EA = np.ones(shape=(Nx, 1))
for i in range(0, Nx): # Modify this loop to assign different material properties per element
EA[i,0] = 73084*100
# EBC
EBC = np.array([0, 0], dtype='int') # Assign EBC in the form [dof, dof value]
# Distributed loads and NBC
q = 100 # Distributed load (assumed constant)
NBC = [Nx, 21000] # Assign NBC in the form [dof, load value]
#
# Meshing ----------------------------------------------------
#
Nodes, Connectivity = Mesh1D(L1, Nx)
#
# Element calculations and assembly --------------------------
#
K_model = np.zeros(shape=(Nx+1, Nx+1))
f_model = np.zeros(shape=(Nx+1, 1))
for e in range(0, Nx):
# TODO
Nodes_el = Connectivity[e]
Nodes_loc = np.array([Nodes[Nodes_el[0]],Nodes[Nodes_el[1]]])
K_el, q_el = LinElement1D(Nodes_loc, EA[e], q)
K_model[Nodes_el[0], Nodes_el[0]] += K_el[0,0]
K_model[Nodes_el[0], Nodes_el[1]] += K_el[0,1]
K_model[Nodes_el[1], Nodes_el[0]] += K_el[1,0]
K_model[Nodes_el[1], Nodes_el[1]] += K_el[1,1]
f_model[Nodes_el[0]] += q_el[0]
f_model[Nodes_el[1]] += q_el[1]
if e == NBC[0]-1:
f_model[Nodes_el[1]] += NBC[1]
#
# Apply element EBC --------------------------
#
# TODO
A_matrix = K_model[EBC[0]+1:,EBC[0]+1:]
B_matrix = K_model[EBC[0]+1:,0]
C_matrix = K_model[EBC[0],0]
F_matrix = f_model[EBC[0]+1:,0]
#
# Solve for displacements and reaction forces - plot solution ----------------
#
# TODO
u = np.linalg.solve(A_matrix,F_matrix-np.transpose(B_matrix*EBC[1]))
R = np.dot(B_matrix,u) + C_matrix * EBC[1]
fig = plt.figure()
plt.plot(Nodes, np.zeros(Nx+1),'k-', linewidth = 10,label = 'Bar')
plt.plot(Nodes, np.append(np.array(EBC[0]),u),'r-o', linewidth = 3,label = 'Solution')
plt.xlabel('x (mm)')
plt.ylabel('u (mm)')
plt.xlim(0,L1)
plt.xticks(Nodes)
plt.grid(True)
plt.legend()
plt.tight_layout()
plt.show()
plastic_io = pd.read_csv('./plastic_deflections2.csv', index_col = False, header = None)
force_input = np.asarray(plastic_io)[0,:]
plastic_deflections = np.asarray(plastic_io)[1:,:]
F_matrix[-1] += -NBC[-1]
force_vector = F_matrix
physics_model = create_physics_model(np.array([A_matrix]), force_vector,
2e5, 6e5, force_input.shape, 'float32')
elastic_deflection = physics_model.predict(force_input)
dLInputScaling = getScalingDenseLayer(np.array([force_input.min(axis=0)]), np.array([force_input.max(axis=0)-force_input.min(axis=0)]))
delta_stiffness_mlp = build_mlp(dLInputScaling, 'delta_stiffness')
delta_stiffness_mlp.trainable = True
force_mlp = build_force_mlp(dLInputScaling, 'delta_force')
force_mlp.trainable = True
fe_model = create_fe_model(force_mlp, delta_stiffness_mlp, np.array([A_matrix]), force_vector,
1e1, 1e5, -1e3, 1e3, force_input.shape, 'float32')
#weight_path = "./test_50000EP/cp.ckpt"
#
#ModelCP = ModelCheckpoint(filepath=weight_path, monitor='loss',
# verbose=1, save_best_only=True,
# mode='min', save_weights_only=True)
#ReduceLR = ReduceLROnPlateau(monitor='loss', factor=0.85,
# min_lr = 1e-15, patience=100, verbose=1, mode='min')
#ToNaN = TerminateOnNaN()
#callbacks_list = [ReduceLR, ToNaN]
#EPOCHS = 500000
#
#history = fe_model.fit(force_input, np.transpose(plastic_deflections), epochs=EPOCHS, verbose=1, callbacks=callbacks_list)
#fe_model.save_weights("./test_500000EP.h5py")
fe_model.load_weights("./test_500000EP.h5py")
prediction = fe_model.predict(force_input)
fig, ax = plt.subplots(3,4, sharex = True, sharey = True, figsize = (7*4/3,6))
fig.text(0.5, 0.01, 'Normalized x (mm)', ha='center')
fig.text(0.01, 0.5, 'u (mm)', va='center', rotation='vertical')
ctr = -1
for i in range(3):
for j in range(4):
ctr += 1
if ctr != 11:
ax[i,j].plot(Nodes, np.zeros(Nx+1),'k-', linewidth = 5,label = 'Bar')
ax[i,j].plot(Nodes, np.append(np.array(EBC[0]),elastic_deflection[ctr,:]),'g--', linewidth = 2,label = 'Elastic Deflections')
ax[i,j].plot(Nodes, np.append(np.array(EBC[0]),plastic_deflections[:,ctr]),'r--', linewidth = 2,label = 'Ramberg-Osgood')
ax[i,j].plot(Nodes, np.append(np.array(EBC[0]),prediction[ctr,:]),'b--', linewidth = 2,label = 'Adjusted Model')
# plt.xlabel('x (mm)')
# plt.ylabel('u (mm)')
ax[i,j].set_xlim(0,L1)
ax[i,j].set_ylim(0,12.0)
ax[i,j].set_xticks([0,50,100,150,200])
ax[i,j].set_xticklabels([0,0.25,0.5,0.75,1])
ax[i,j].set_yticks([0,4,8,12])
ax[i,j].grid(True)
ax[0,0].legend(fontsize=9, loc = 'upper left')
fig.tight_layout()
fig.show()
| [
"matplotlib.rc",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dense",
"pandas.read_csv",
"numpy.ones",
"matplotlib.pyplot.figure",
"tensorflow.linalg.inv",
"tensorflow.python.framework.tensor_shape.TensorShape",
"matplotlib.pyplot.tight_layout",
"tensorflow.keras.optimizers.RMSprop"... | [((155, 185), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {'size': '(14)'}), "('font', size=14)\n", (168, 185), False, 'import matplotlib\n'), ((9860, 9882), 'numpy.ones', 'np.ones', ([], {'shape': '(Nx, 1)'}), '(shape=(Nx, 1))\n', (9867, 9882), True, 'import numpy as np\n'), ((10028, 10057), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': '"""int"""'}), "([0, 0], dtype='int')\n", (10036, 10057), True, 'import numpy as np\n'), ((10448, 10480), 'numpy.zeros', 'np.zeros', ([], {'shape': '(Nx + 1, Nx + 1)'}), '(shape=(Nx + 1, Nx + 1))\n', (10456, 10480), True, 'import numpy as np\n'), ((10487, 10514), 'numpy.zeros', 'np.zeros', ([], {'shape': '(Nx + 1, 1)'}), '(shape=(Nx + 1, 1))\n', (10495, 10514), True, 'import numpy as np\n'), ((11481, 11493), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11491, 11493), True, 'import matplotlib.pyplot as plt\n'), ((11648, 11668), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x (mm)"""'], {}), "('x (mm)')\n", (11658, 11668), True, 'import matplotlib.pyplot as plt\n'), ((11669, 11689), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""u (mm)"""'], {}), "('u (mm)')\n", (11679, 11689), True, 'import matplotlib.pyplot as plt\n'), ((11690, 11705), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'L1'], {}), '(0, L1)\n', (11698, 11705), True, 'import matplotlib.pyplot as plt\n'), ((11705, 11722), 'matplotlib.pyplot.xticks', 'plt.xticks', (['Nodes'], {}), '(Nodes)\n', (11715, 11722), True, 'import matplotlib.pyplot as plt\n'), ((11723, 11737), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (11731, 11737), True, 'import matplotlib.pyplot as plt\n'), ((11738, 11750), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11748, 11750), True, 'import matplotlib.pyplot as plt\n'), ((11751, 11769), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11767, 11769), True, 'import matplotlib.pyplot as plt\n'), ((11770, 11780), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11778, 11780), True, 'import matplotlib.pyplot as plt\n'), ((11796, 11867), 'pandas.read_csv', 'pd.read_csv', (['"""./plastic_deflections2.csv"""'], {'index_col': '(False)', 'header': 'None'}), "('./plastic_deflections2.csv', index_col=False, header=None)\n", (11807, 11867), True, 'import pandas as pd\n'), ((13500, 13568), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(4)'], {'sharex': '(True)', 'sharey': '(True)', 'figsize': '(7 * 4 / 3, 6)'}), '(3, 4, sharex=True, sharey=True, figsize=(7 * 4 / 3, 6))\n', (13512, 13568), True, 'import matplotlib.pyplot as plt\n'), ((1407, 1420), 'tensorflow.keras.optimizers.RMSprop', 'RMSprop', (['(0.01)'], {}), '(0.01)\n', (1414, 1420), False, 'from tensorflow.keras.optimizers import RMSprop, Adam, SGD\n'), ((1981, 1994), 'tensorflow.keras.optimizers.RMSprop', 'RMSprop', (['(0.01)'], {}), '(0.01)\n', (1988, 1994), False, 'from tensorflow.keras.optimizers import RMSprop, Adam, SGD\n'), ((5473, 5490), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (5478, 5490), False, 'from tensorflow.keras.layers import Input, Multiply, Add, Lambda, Dense, Dot, Reshape\n'), ((7149, 7208), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputLayer]', 'outputs': '[deflectionOutputLayer]'}), '(inputs=[inputLayer], outputs=[deflectionOutputLayer])\n', (7154, 7208), False, 'from tensorflow.keras.models import Model\n'), ((7520, 7537), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (7525, 7537), False, 'from tensorflow.keras.layers import Input, Multiply, Add, Lambda, Dense, Dot, Reshape\n'), ((8514, 8573), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputLayer]', 'outputs': '[deflectionOutputLayer]'}), '(inputs=[inputLayer], outputs=[deflectionOutputLayer])\n', (8519, 8573), False, 'from tensorflow.keras.models import Model\n'), ((9119, 9145), 'numpy.linspace', 'np.linspace', (['(0)', 'L1', '(Nx + 1)'], {}), '(0, L1, Nx + 1)\n', (9130, 9145), True, 'import numpy as np\n'), ((9161, 9197), 'numpy.zeros', 'np.zeros', ([], {'shape': '(Nx, 2)', 'dtype': '"""int"""'}), "(shape=(Nx, 2), dtype='int')\n", (9169, 9197), True, 'import numpy as np\n'), ((10594, 10644), 'numpy.array', 'np.array', (['[Nodes[Nodes_el[0]], Nodes[Nodes_el[1]]]'], {}), '([Nodes[Nodes_el[0]], Nodes[Nodes_el[1]]])\n', (10602, 10644), True, 'import numpy as np\n'), ((11435, 11454), 'numpy.dot', 'np.dot', (['B_matrix', 'u'], {}), '(B_matrix, u)\n', (11441, 11454), True, 'import numpy as np\n'), ((11510, 11526), 'numpy.zeros', 'np.zeros', (['(Nx + 1)'], {}), '(Nx + 1)\n', (11518, 11526), True, 'import numpy as np\n'), ((11887, 11909), 'numpy.asarray', 'np.asarray', (['plastic_io'], {}), '(plastic_io)\n', (11897, 11909), True, 'import numpy as np\n'), ((11937, 11959), 'numpy.asarray', 'np.asarray', (['plastic_io'], {}), '(plastic_io)\n', (11947, 11959), True, 'import numpy as np\n'), ((12054, 12074), 'numpy.array', 'np.array', (['[A_matrix]'], {}), '([A_matrix])\n', (12062, 12074), True, 'import numpy as np\n'), ((12597, 12617), 'numpy.array', 'np.array', (['[A_matrix]'], {}), '([A_matrix])\n', (12605, 12617), True, 'import numpy as np\n'), ((2669, 2705), 'tensorflow.python.keras.initializers.get', 'initializers.get', (['kernel_initializer'], {}), '(kernel_initializer)\n', (2685, 2705), False, 'from tensorflow.python.keras import initializers\n'), ((2740, 2776), 'tensorflow.python.keras.regularizers.get', 'regularizers.get', (['kernel_regularizer'], {}), '(kernel_regularizer)\n', (2756, 2776), False, 'from tensorflow.python.keras import regularizers\n'), ((2811, 2845), 'tensorflow.python.keras.constraints.get', 'constraints.get', (['kernel_constraint'], {}), '(kernel_constraint)\n', (2826, 2845), False, 'from tensorflow.python.keras import constraints\n'), ((3577, 3612), 'tensorflow.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', (['(None, 1)'], {}), '((None, 1))\n', (3601, 3612), False, 'from tensorflow.python.framework import tensor_shape\n'), ((4123, 4159), 'tensorflow.python.keras.initializers.get', 'initializers.get', (['kernel_initializer'], {}), '(kernel_initializer)\n', (4139, 4159), False, 'from tensorflow.python.keras import initializers\n'), ((4194, 4230), 'tensorflow.python.keras.regularizers.get', 'regularizers.get', (['kernel_regularizer'], {}), '(kernel_regularizer)\n', (4210, 4230), False, 'from tensorflow.python.keras import regularizers\n'), ((4265, 4299), 'tensorflow.python.keras.constraints.get', 'constraints.get', (['kernel_constraint'], {}), '(kernel_constraint)\n', (4280, 4299), False, 'from tensorflow.python.keras import constraints\n'), ((5184, 5219), 'tensorflow.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', (['(None, 1)'], {}), '((None, 1))\n', (5208, 5219), False, 'from tensorflow.python.framework import tensor_shape\n'), ((6423, 6438), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(8, 8)'], {}), '((8, 8))\n', (6430, 6438), False, 'from tensorflow.keras.layers import Input, Multiply, Add, Lambda, Dense, Dot, Reshape\n'), ((6495, 6500), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (6498, 6500), False, 'from tensorflow.keras.layers import Input, Multiply, Add, Lambda, Dense, Dot, Reshape\n'), ((7070, 7076), 'tensorflow.keras.layers.Dot', 'Dot', (['(1)'], {}), '(1)\n', (7073, 7076), False, 'from tensorflow.keras.layers import Input, Multiply, Add, Lambda, Dense, Dot, Reshape\n'), ((8435, 8441), 'tensorflow.keras.layers.Dot', 'Dot', (['(1)'], {}), '(1)\n', (8438, 8441), False, 'from tensorflow.keras.layers import Input, Multiply, Add, Lambda, Dense, Dot, Reshape\n'), ((9512, 9540), 'numpy.array', 'np.array', (['[[1, -1], [-1, 1]]'], {}), '([[1, -1], [-1, 1]])\n', (9520, 9540), True, 'import numpy as np\n'), ((11400, 11431), 'numpy.transpose', 'np.transpose', (['(B_matrix * EBC[1])'], {}), '(B_matrix * EBC[1])\n', (11412, 11431), True, 'import numpy as np\n'), ((11587, 11603), 'numpy.array', 'np.array', (['EBC[0]'], {}), '(EBC[0])\n', (11595, 11603), True, 'import numpy as np\n'), ((1154, 1181), 'tensorflow.keras.layers.Dense', 'Dense', (['(5)'], {'activation': '"""tanh"""'}), "(5, activation='tanh')\n", (1159, 1181), False, 'from tensorflow.keras.layers import Input, Multiply, Add, Lambda, Dense, Dot, Reshape\n'), ((1325, 1355), 'tensorflow.keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""linear"""'}), "(64, activation='linear')\n", (1330, 1355), False, 'from tensorflow.keras.layers import Input, Multiply, Add, Lambda, Dense, Dot, Reshape\n'), ((1729, 1756), 'tensorflow.keras.layers.Dense', 'Dense', (['(3)'], {'activation': '"""tanh"""'}), "(3, activation='tanh')\n", (1734, 1756), False, 'from tensorflow.keras.layers import Input, Multiply, Add, Lambda, Dense, Dot, Reshape\n'), ((1900, 1929), 'tensorflow.keras.layers.Dense', 'Dense', (['(8)'], {'activation': '"""linear"""'}), "(8, activation='linear')\n", (1905, 1929), False, 'from tensorflow.keras.layers import Input, Multiply, Add, Lambda, Dense, Dot, Reshape\n'), ((4866, 4919), 'tensorflow.constant', 'tf.constant', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0])\n', (4877, 4919), True, 'import tensorflow as tf\n'), ((5704, 5768), 'numpy.asarray', 'np.asarray', (['elastic_stiffness'], {'dtype': 'elasticStiffnessLayer.dtype'}), '(elastic_stiffness, dtype=elasticStiffnessLayer.dtype)\n', (5714, 5768), True, 'import numpy as np\n'), ((6031, 6085), 'numpy.asarray', 'np.asarray', (['force_vector'], {'dtype': 'forceMatrixLayer.dtype'}), '(force_vector, dtype=forceMatrixLayer.dtype)\n', (6041, 6085), True, 'import numpy as np\n'), ((7284, 7296), 'tensorflow.keras.optimizers.RMSprop', 'RMSprop', (['(0.5)'], {}), '(0.5)\n', (7291, 7296), False, 'from tensorflow.keras.optimizers import RMSprop, Adam, SGD\n'), ((7751, 7815), 'numpy.asarray', 'np.asarray', (['elastic_stiffness'], {'dtype': 'elasticStiffnessLayer.dtype'}), '(elastic_stiffness, dtype=elasticStiffnessLayer.dtype)\n', (7761, 7815), True, 'import numpy as np\n'), ((8078, 8132), 'numpy.asarray', 'np.asarray', (['force_vector'], {'dtype': 'forceMatrixLayer.dtype'}), '(force_vector, dtype=forceMatrixLayer.dtype)\n', (8088, 8132), True, 'import numpy as np\n'), ((8649, 8663), 'tensorflow.keras.optimizers.RMSprop', 'RMSprop', (['(0.005)'], {}), '(0.005)\n', (8656, 8663), False, 'from tensorflow.keras.optimizers import RMSprop, Adam, SGD\n'), ((9602, 9618), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (9610, 9618), True, 'import numpy as np\n'), ((6605, 6621), 'tensorflow.linalg.inv', 'tf.linalg.inv', (['x'], {}), '(x)\n', (6618, 6621), True, 'import tensorflow as tf\n'), ((8243, 8259), 'tensorflow.linalg.inv', 'tf.linalg.inv', (['x'], {}), '(x)\n', (8256, 8259), True, 'import tensorflow as tf\n'), ((13809, 13825), 'numpy.zeros', 'np.zeros', (['(Nx + 1)'], {}), '(Nx + 1)\n', (13817, 13825), True, 'import numpy as np\n'), ((13901, 13917), 'numpy.array', 'np.array', (['EBC[0]'], {}), '(EBC[0])\n', (13909, 13917), True, 'import numpy as np\n'), ((14039, 14055), 'numpy.array', 'np.array', (['EBC[0]'], {}), '(EBC[0])\n', (14047, 14055), True, 'import numpy as np\n'), ((14173, 14189), 'numpy.array', 'np.array', (['EBC[0]'], {}), '(EBC[0])\n', (14181, 14189), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.