code
stringlengths
31
1.05M
apis
list
extract_api
stringlengths
97
1.91M
from rsqsim_api.fault.multifault import RsqSimMultiFault, RsqSimSegment import multiprocessing as mp from typing import Union import h5py import netCDF4 as nc import numpy as np import random sentinel = None def multiprocess_gf_to_hdf(fault: Union[RsqSimSegment, RsqSimMultiFault], x_range: np.ndarray, y_range: np.ndarray, out_file_prefix: str, x_grid: np.ndarray = None, y_grid: np.ndarray = None, z_grid: np.ndarray = None, slip_magnitude: Union[float, int] = 1., num_processors: int = None, num_write: int = 8): assert all([isinstance(a, np.ndarray) for a in [x_range, y_range]]) assert all([x_range.ndim == 1, y_range.ndim == 1]) # Check sites arrays if all([a is not None for a in (x_grid, y_grid)]): assert all([isinstance(a, np.ndarray) for a in [x_grid, y_grid]]) assert x_grid.shape == (y_range.size, x_range.size) assert x_grid.shape == y_grid.shape assert x_grid.ndim <= 2 else: x_grid, y_grid = np.meshgrid(x_range, y_range) if z_grid is not None: assert isinstance(z_grid, np.ndarray) assert z_grid.shape == x_grid.shape else: z_grid = np.zeros(x_grid.shape) n_patches = len(fault.patch_dic) if x_grid.ndim == 2: x_array = x_grid.flatten() y_array = y_grid.flatten() z_array = z_grid.flatten() dset_shape = (n_patches, x_grid.shape[0], x_grid.shape[1]) else: x_array = x_grid y_array = y_grid z_array = z_grid dset_shape = (n_patches, x_grid.size) if num_processors is None: num_processes = int(np.round(mp.cpu_count() / 2)) else: assert isinstance(num_processors, int) num_processes = num_processors all_patch_ls = [] if isinstance(fault, RsqSimSegment): for patch in fault.patch_outlines: all_patch_ls.append([patch.patch_number, patch]) else: for patch_i, patch in fault.patch_dic.items(): all_patch_ls.append([patch_i, patch]) num_per_write = int(np.round(len(all_patch_ls) / num_write)) all_patches_with_write_indices = [] separate_write_index_dic = {} for i in range(num_write): range_min = i * num_per_write range_max = (i + 1) * num_per_write index_ls = [] for file_index, patch_tuple in enumerate(all_patch_ls[range_min:range_max]): new_ls = [i, file_index] + patch_tuple all_patches_with_write_indices.append(new_ls) index_ls.append(patch_tuple[0]) separate_write_index_dic[i] = np.array(index_ls) random.shuffle(all_patches_with_write_indices) out_queue_dic = {} out_proc_ls = [] for i in range(num_write): patch_indices = separate_write_index_dic[i] dset_shape_i = (len(patch_indices), dset_shape[1], dset_shape[-1]) out_queue = mp.Queue(maxsize=1000) out_file_name = out_file_prefix + "{:d}.nc".format(i) out_queue_dic[i] = out_queue output_proc = mp.Process(target=handle_output_netcdf, args=(out_queue, separate_write_index_dic[i], out_file_name, dset_shape_i, x_range, y_range)) out_proc_ls.append(output_proc) output_proc.start() jobs = [] in_queue = mp.Queue() for i in range(num_processes): p = mp.Process(target=patch_greens_functions, args=(in_queue, x_array, y_array, z_array, out_queue_dic, dset_shape, slip_magnitude)) jobs.append(p) p.start() for row in all_patches_with_write_indices: file_no, file_index, patch_index, patch = row in_queue.put((file_no, file_index, patch_index, patch)) for i in range(num_processes): in_queue.put(sentinel) for p in jobs: p.join() for i in range(num_write): out_queue_dic[i].put(sentinel) out_proc_ls[i].join() in_queue.close() for i in range(num_write): out_queue_dic[i].close() def handle_output(output_queue: mp.Queue, output_file: str, dset_shape: tuple): f = h5py.File(output_file, "w") disp_dset = f.create_dataset("ssd_1m", shape=dset_shape, dtype="f") while True: args = output_queue.get() if args: index, vert_disp = args disp_dset[index] = vert_disp else: break f.close() def handle_output_netcdf(output_queue: mp.Queue, patch_indices: np.ndarray, output_file: str, dset_shape: tuple, x_range: np.ndarray, y_range: np.ndarray): assert len(dset_shape) == 3 assert len(patch_indices) == dset_shape[0] dset = nc.Dataset(output_file, "w") dset.set_always_mask(False) for dim, dim_len in zip(("npatch", "y", "x"), dset_shape): dset.createDimension(dim, dim_len) patch_var = dset.createVariable("index", np.int, ("npatch",)) dset.createVariable("x", np.float32, ("x",)) dset.createVariable("y", np.float32, ("y",)) dset["x"][:] = x_range dset["y"][:] = y_range patch_var[:] = patch_indices ssd = dset.createVariable("ssd", np.float32, ("npatch", "y", "x"), least_significant_digit=4) counter = 0 num_patch = len(patch_indices) while True: args = output_queue.get() if args: index, patch_index, vert_disp = args assert patch_index in patch_indices ssd[index] = vert_disp counter += 1 print("{:d}/{:d} complete".format(counter, num_patch)) else: break dset.close() def patch_greens_functions(in_queue: mp.Queue, x_sites: np.ndarray, y_sites: np.ndarray, z_sites: np.ndarray, out_queue_dic: dict, grid_shape: tuple, slip_magnitude: Union[int, float] = 1): while True: queue_contents = in_queue.get() if queue_contents: file_no, file_index, patch_number, patch = queue_contents out_queue_dic[file_no].put((file_index, patch_number, patch.calculate_tsunami_greens_functions(x_sites, y_sites, z_sites, grid_shape, ))) else: break
[ "random.shuffle", "multiprocessing.Process", "netCDF4.Dataset", "multiprocessing.cpu_count", "h5py.File", "numpy.array", "numpy.zeros", "numpy.meshgrid", "multiprocessing.Queue" ]
[((2635, 2681), 'random.shuffle', 'random.shuffle', (['all_patches_with_write_indices'], {}), '(all_patches_with_write_indices)\n', (2649, 2681), False, 'import random\n'), ((3354, 3364), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (3362, 3364), True, 'import multiprocessing as mp\n'), ((4152, 4179), 'h5py.File', 'h5py.File', (['output_file', '"""w"""'], {}), "(output_file, 'w')\n", (4161, 4179), False, 'import h5py\n'), ((4717, 4745), 'netCDF4.Dataset', 'nc.Dataset', (['output_file', '"""w"""'], {}), "(output_file, 'w')\n", (4727, 4745), True, 'import netCDF4 as nc\n'), ((1026, 1055), 'numpy.meshgrid', 'np.meshgrid', (['x_range', 'y_range'], {}), '(x_range, y_range)\n', (1037, 1055), True, 'import numpy as np\n'), ((1201, 1223), 'numpy.zeros', 'np.zeros', (['x_grid.shape'], {}), '(x_grid.shape)\n', (1209, 1223), True, 'import numpy as np\n'), ((2611, 2629), 'numpy.array', 'np.array', (['index_ls'], {}), '(index_ls)\n', (2619, 2629), True, 'import numpy as np\n'), ((2906, 2928), 'multiprocessing.Queue', 'mp.Queue', ([], {'maxsize': '(1000)'}), '(maxsize=1000)\n', (2914, 2928), True, 'import multiprocessing as mp\n'), ((3050, 3192), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'handle_output_netcdf', 'args': '(out_queue, separate_write_index_dic[i], out_file_name, dset_shape_i,\n x_range, y_range)'}), '(target=handle_output_netcdf, args=(out_queue,\n separate_write_index_dic[i], out_file_name, dset_shape_i, x_range, y_range)\n )\n', (3060, 3192), True, 'import multiprocessing as mp\n'), ((3412, 3544), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'patch_greens_functions', 'args': '(in_queue, x_array, y_array, z_array, out_queue_dic, dset_shape, slip_magnitude\n )'}), '(target=patch_greens_functions, args=(in_queue, x_array, y_array,\n z_array, out_queue_dic, dset_shape, slip_magnitude))\n', (3422, 3544), True, 'import multiprocessing as mp\n'), ((1660, 1674), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (1672, 1674), True, 'import multiprocessing as mp\n')]
import numpy as np import pandas as pd import pytest import tabmat as tm @pytest.fixture() def X(): df = pd.read_pickle("tests/real_matrix.pkl") X_split = tm.from_pandas(df, np.float64) wts = np.ones(df.shape[0]) / df.shape[0] X_std = X_split.standardize(wts, True, True)[0] return X_std def test_full_sandwich(X): X_dense = tm.DenseMatrix(X.toarray()) r = np.random.rand(X.shape[0]) simple = X_dense.sandwich(r) fancy = X.sandwich(r) np.testing.assert_almost_equal(simple, fancy, 12) def test_split_sandwich_rows_cols(X): X_split = X.mat X_split_dense = tm.DenseMatrix(X_split.toarray()) r = np.random.rand(X.shape[0]) rows = np.arange(X.shape[0]) cols = np.arange(X.shape[1]) simple = X_split_dense.sandwich(r, rows, cols) fancy = X_split.sandwich(r, rows, cols) np.testing.assert_almost_equal(simple, fancy, 12)
[ "pandas.read_pickle", "numpy.random.rand", "numpy.ones", "numpy.testing.assert_almost_equal", "tabmat.from_pandas", "pytest.fixture", "numpy.arange" ]
[((77, 93), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (91, 93), False, 'import pytest\n'), ((112, 151), 'pandas.read_pickle', 'pd.read_pickle', (['"""tests/real_matrix.pkl"""'], {}), "('tests/real_matrix.pkl')\n", (126, 151), True, 'import pandas as pd\n'), ((166, 196), 'tabmat.from_pandas', 'tm.from_pandas', (['df', 'np.float64'], {}), '(df, np.float64)\n', (180, 196), True, 'import tabmat as tm\n'), ((390, 416), 'numpy.random.rand', 'np.random.rand', (['X.shape[0]'], {}), '(X.shape[0])\n', (404, 416), True, 'import numpy as np\n'), ((480, 529), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['simple', 'fancy', '(12)'], {}), '(simple, fancy, 12)\n', (510, 529), True, 'import numpy as np\n'), ((652, 678), 'numpy.random.rand', 'np.random.rand', (['X.shape[0]'], {}), '(X.shape[0])\n', (666, 678), True, 'import numpy as np\n'), ((690, 711), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (699, 711), True, 'import numpy as np\n'), ((723, 744), 'numpy.arange', 'np.arange', (['X.shape[1]'], {}), '(X.shape[1])\n', (732, 744), True, 'import numpy as np\n'), ((844, 893), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['simple', 'fancy', '(12)'], {}), '(simple, fancy, 12)\n', (874, 893), True, 'import numpy as np\n'), ((207, 227), 'numpy.ones', 'np.ones', (['df.shape[0]'], {}), '(df.shape[0])\n', (214, 227), True, 'import numpy as np\n')]
import gym import gym.spaces import json import numpy as np import os import socket gym_version = tuple(int(x) for x in gym.__version__.split('.')) class Channel: def __init__(self): self.sock = None self.dirty = False self._value = None self.annotations = {} def set_socket(self, sock): self.sock = sock def set_base(self, base): pass def parse(self, value): return value def unparse(self, value): return value @property def value(self): return self.unparse(self._value) @value.setter def value(self, value): self._value = self.parse(value) self.dirty = True def serialize(self): return self._value def deserialize(self, value): self._value = self.parse(value) self.dirty = False @staticmethod def make(type, shape, annotations): types = { 'int': IntChannel, 'float': FloatChannel, 'bool': BoolChannel, 'int_fold': IntFoldChannel, 'np': NpChannel, } cls = types[type] if shape: ob = cls(*eval(shape, {}, {'dtype': np.dtype})) else: ob = cls() if annotations: for key, value in annotations.items(): ob.annotate(key, value) return ob def annotate(self, name, value): self.annotations[name] = str(value) class IntChannel(Channel): TYPE = 'int' SHAPE = None def parse(self, value): return int(value) class FloatChannel(Channel): TYPE = 'float' SHAPE = None def parse(self, value): return float(value) class BoolChannel(Channel): TYPE = 'bool' SHAPE = None def parse(self, value): return bool(value) class IntFoldChannel(Channel): TYPE = 'int_fold' def __init__(self, folds, dtype=np.int8): super(IntFoldChannel, self).__init__() self.folds = np.multiply.accumulate([1] + list(folds)[:-1], dtype=int) self.ranges = np.array(folds, dtype=int) self.dtype = dtype self.SHAPE = str(folds) + ',' def parse(self, value): folded = np.dot(self.folds, value % self.ranges) return int(folded) def unparse(self, value): if value is None: return None unfolded = np.full(self.ranges.shape, value) // self.folds % self.ranges return unfolded.astype(self.dtype) def deserialize(self, value): self._value = int(value) self.dirty = False class NpChannel(Channel): TYPE = 'np' def __init__(self, shape, dtype): super(NpChannel, self).__init__() self.SHAPE = '%s, %s' % (shape, 'dtype("%s")' % np.dtype(dtype).str) self.shape = shape self.dtype = dtype def set_base(self, base): self._value = np.memmap(base, mode='w+', dtype=self.dtype, shape=self.shape) @property def value(self): return self._value @value.setter def value(self, value): np.copyto(self._value, value) self.dirty = True def serialize(self): return True def deserialize(self, value): self.dirty = False class Bridge: Timeout = socket.timeout Closed = BrokenPipeError def __init__(self, base): self.base = base self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) def close(message): self.close() if 'exception' in message: import gym_remote.exceptions as gre exception = gre.make(message['exception'], message['reason']) else: exception = self.Closed(message['reason']) raise exception def exception(message): from . import exceptions as gre raise gre.make(message['exception'], message['reason']) self._channels = {} self.connection = None self._buffer = [] self._message_handlers = { 'update': self.update_vars, 'close': close, 'exception': exception } def __del__(self): self.close() def add_channel(self, name, channel): if name in self._channels: raise KeyError(name) self._channels[name] = channel channel.set_base(os.path.join(self.base, name)) return channel def wrap(self, name, space): channel = None if isinstance(space, gym.spaces.MultiBinary): if space.n < 64: channel = IntFoldChannel([2] * space.n, np.uint8) else: channel = NpChannel((space.n,), np.uint8) channel.annotate('n', space.n) channel.annotate('type', 'MultiBinary') elif isinstance(space, gym.spaces.Discrete): channel = IntChannel() channel.annotate('n', space.n) channel.annotate('type', 'Discrete') elif isinstance(space, gym.spaces.MultiDiscrete): if gym_version >= (0, 9, 6): channel = NpChannel(space.shape, np.int64) channel.annotate('shape', space.shape[0]) else: channel = NpChannel((space.shape,), np.int64) channel.annotate('shape', space.shape) channel.annotate('type', 'MultiDiscrete') elif isinstance(space, gym.spaces.Box): channel = NpChannel(space.shape, space.high.dtype) channel.annotate('type', 'Box') channel.annotate('shape', space.shape) if not channel: raise NotImplementedError('Unsupported space') return self.add_channel(name, channel) @staticmethod def unwrap(space): if space.annotations['type'] == 'MultiBinary': return gym.spaces.MultiBinary(int(space.annotations['n'])) if space.annotations['type'] == 'Discrete': return gym.spaces.Discrete(int(space.annotations['n'])) if space.annotations['type'] == 'MultiDiscrete': if gym_version >= (0, 9, 6): return gym.spaces.MultiDiscrete(space.shape[0]) else: return gym.spaces.MultiDiscrete(space.shape) if space.annotations['type'] == 'Box': kwargs = {} if gym_version >= (0, 9, 6): kwargs['dtype'] = space.dtype return gym.spaces.Box(low=0, high=255, shape=space.shape, **kwargs) def configure_channels(self, channel_info): for name, info in channel_info.items(): self._channels[name] = Channel.make(*info) def describe_channels(self): description = {} for name, channel in self._channels.items(): description[name] = (channel.TYPE, channel.SHAPE, channel.annotations) return description def listen(self): sock_path = os.path.join(self.base, 'sock') self.sock.bind(sock_path) self.sock.listen(1) def connect(self): sock_path = os.path.join(self.base, 'sock') self.sock.connect(sock_path) self.connection = self.sock def server_accept(self): self.connection, _ = self.sock.accept() for name, channel in self._channels.items(): channel.set_socket(self.connection) description = self.describe_channels() self._send_message('description', description) def configure_client(self): description = self._recv_message() assert description['type'] == 'description' self.configure_channels(description['content']) for name, channel in self._channels.items(): channel.set_socket(self.connection) channel.set_base(os.path.join(self.base, name)) return dict(self._channels) def _try_send(self, type, content): try: self._send_message(type, content) except self.Closed as e: try: while True: self.recv() except self.Closed as f: e = f self.close() raise e def _send_message(self, type, content): if not self.connection: raise self.Closed message = { 'type': type, 'content': content } # All messages end in a form feed message = json.dumps(message) + '\f' self.connection.sendall(message.encode('utf8')) def _recv_message(self): if not self.connection: raise self.Closed while len(self._buffer) < 2: # There are no fully buffered messages message = self.connection.recv(4096) if not message: raise self.Closed message = message.split(b'\f') if self._buffer: self._buffer[-1] += message.pop(0) self._buffer.extend(message) message = self._buffer.pop(0) return json.loads(message.decode('utf8')) def update_vars(self, vars): for name, value in vars.items(): self._channels[name].deserialize(value) def send(self): content = {} for name, channel in self._channels.items(): if channel.dirty: content[name] = channel.serialize() self._try_send('update', content) def recv(self): message = self._recv_message() if not message: raise self.Closed self._message_handlers[message['type']](message['content']) return True def close(self, reason=None, exception=None): if self.sock: try: kwargs = {'reason': reason} if exception: kwargs['exception'] = exception.ID self._send_message('close', kwargs) except self.Closed: pass self.sock.close() if self.sock and self.connection != self.sock: if self.connection: self.connection.close() try: os.unlink(os.path.join(self.base, 'sock')) except OSError: pass for name, channel in self._channels.items(): try: os.unlink(os.path.join(self.base, name)) except OSError: pass self.connection = None self.sock = None def exception(self, exception, reason=None): content = {'reason': reason, 'exception': exception.ID} self._try_send('exception', content) def settimeout(self, timeout): self.sock.settimeout(timeout) if self.connection: self.connection.settimeout(timeout) def __del__(self): self.close()
[ "numpy.copyto", "socket.socket", "gym.spaces.MultiDiscrete", "numpy.memmap", "os.path.join", "json.dumps", "gym.spaces.Box", "numpy.array", "numpy.dot", "gym_remote.exceptions.make", "gym.__version__.split", "numpy.full", "numpy.dtype" ]
[((2072, 2098), 'numpy.array', 'np.array', (['folds'], {'dtype': 'int'}), '(folds, dtype=int)\n', (2080, 2098), True, 'import numpy as np\n'), ((2210, 2249), 'numpy.dot', 'np.dot', (['self.folds', '(value % self.ranges)'], {}), '(self.folds, value % self.ranges)\n', (2216, 2249), True, 'import numpy as np\n'), ((2886, 2948), 'numpy.memmap', 'np.memmap', (['base'], {'mode': '"""w+"""', 'dtype': 'self.dtype', 'shape': 'self.shape'}), "(base, mode='w+', dtype=self.dtype, shape=self.shape)\n", (2895, 2948), True, 'import numpy as np\n'), ((3067, 3096), 'numpy.copyto', 'np.copyto', (['self._value', 'value'], {}), '(self._value, value)\n', (3076, 3096), True, 'import numpy as np\n'), ((3381, 3430), 'socket.socket', 'socket.socket', (['socket.AF_UNIX', 'socket.SOCK_STREAM'], {}), '(socket.AF_UNIX, socket.SOCK_STREAM)\n', (3394, 3430), False, 'import socket\n'), ((6896, 6927), 'os.path.join', 'os.path.join', (['self.base', '"""sock"""'], {}), "(self.base, 'sock')\n", (6908, 6927), False, 'import os\n'), ((7034, 7065), 'os.path.join', 'os.path.join', (['self.base', '"""sock"""'], {}), "(self.base, 'sock')\n", (7046, 7065), False, 'import os\n'), ((121, 147), 'gym.__version__.split', 'gym.__version__.split', (['"""."""'], {}), "('.')\n", (142, 147), False, 'import gym\n'), ((3854, 3903), 'gym_remote.exceptions.make', 'gre.make', (["message['exception']", "message['reason']"], {}), "(message['exception'], message['reason'])\n", (3862, 3903), True, 'import gym_remote.exceptions as gre\n'), ((4358, 4387), 'os.path.join', 'os.path.join', (['self.base', 'name'], {}), '(self.base, name)\n', (4370, 4387), False, 'import os\n'), ((6418, 6478), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': 'space.shape'}), '(low=0, high=255, shape=space.shape, **kwargs)\n', (6432, 6478), False, 'import gym\n'), ((8369, 8388), 'json.dumps', 'json.dumps', (['message'], {}), '(message)\n', (8379, 8388), False, 'import json\n'), ((2377, 2410), 'numpy.full', 'np.full', (['self.ranges.shape', 'value'], {}), '(self.ranges.shape, value)\n', (2384, 2410), True, 'import numpy as np\n'), ((3604, 3653), 'gym_remote.exceptions.make', 'gre.make', (["message['exception']", "message['reason']"], {}), "(message['exception'], message['reason'])\n", (3612, 3653), True, 'import gym_remote.exceptions as gre\n'), ((6121, 6161), 'gym.spaces.MultiDiscrete', 'gym.spaces.MultiDiscrete', (['space.shape[0]'], {}), '(space.shape[0])\n', (6145, 6161), False, 'import gym\n'), ((6203, 6240), 'gym.spaces.MultiDiscrete', 'gym.spaces.MultiDiscrete', (['space.shape'], {}), '(space.shape)\n', (6227, 6240), False, 'import gym\n'), ((7734, 7763), 'os.path.join', 'os.path.join', (['self.base', 'name'], {}), '(self.base, name)\n', (7746, 7763), False, 'import os\n'), ((10067, 10098), 'os.path.join', 'os.path.join', (['self.base', '"""sock"""'], {}), "(self.base, 'sock')\n", (10079, 10098), False, 'import os\n'), ((2758, 2773), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (2766, 2773), True, 'import numpy as np\n'), ((10257, 10286), 'os.path.join', 'os.path.join', (['self.base', 'name'], {}), '(self.base, name)\n', (10269, 10286), False, 'import os\n')]
from __future__ import print_function # Copyright (c) 2015-2016, Danish Geodata Agency <<EMAIL>> # Copyright (c) 2016, Danish Agency for Data Supply and Efficiency <<EMAIL>> # # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # import os,sys from osgeo import gdal import numpy as np def WriteRaster(fname,A,geo,dtype=gdal.GDT_Float32,nd_value=None,colortable=None): gdal.AllRegister() driver=gdal.GetDriverByName("GTiff") if os.path.exists(fname): try: driver.Delete(fname) except Exception as msg: print(msg) else: print("Overwriting %s..." %fname) else: print("Saving %s..."%fname) dst_ds=driver.Create(fname,A.shape[1],A.shape[0],1,dtype) dst_ds.SetGeoTransform(geo) band=dst_ds.GetRasterBand(1) if nd_value is not None: band.SetNoDataValue(nd_value) band.WriteArray(A) dst_ds=None def main(args): grid=np.load(args[1]).astype(np.float32) geo_ref_name=args[2] outname=args[3] geo_ref=np.loadtxt(geo_ref_name) WriteRaster(outname,grid,geo_ref,nd_value=-999) if __name__=="__main__": main(sys.argv)
[ "os.path.exists", "osgeo.gdal.AllRegister", "numpy.loadtxt", "numpy.load", "osgeo.gdal.GetDriverByName" ]
[((1056, 1074), 'osgeo.gdal.AllRegister', 'gdal.AllRegister', ([], {}), '()\n', (1072, 1074), False, 'from osgeo import gdal\n'), ((1084, 1113), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (1104, 1113), False, 'from osgeo import gdal\n'), ((1119, 1140), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (1133, 1140), False, 'import os, sys\n'), ((1636, 1660), 'numpy.loadtxt', 'np.loadtxt', (['geo_ref_name'], {}), '(geo_ref_name)\n', (1646, 1660), True, 'import numpy as np\n'), ((1549, 1565), 'numpy.load', 'np.load', (['args[1]'], {}), '(args[1])\n', (1556, 1565), True, 'import numpy as np\n')]
""" =========================== Plotting feature importance =========================== A simple example showing how to compute and display feature importances, it is also compared with the feature importances obtained using random forests. Feature importance is a measure of the effect of the features on the outputs. For each feature, the values go from 0 to 1 where a higher the value means that the feature will have a higher effect on the outputs. Currently three criteria are supported : 'gcv', 'rss' and 'nb_subsets'. See [1], section 12.3 for more information about the criteria. .. [1] http://www.milbo.org/doc/earth-notes.pdf """ import numpy import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestRegressor from pyearth import Earth # Create some fake data numpy.random.seed(2) m = 10000 n = 10 X = numpy.random.uniform(size=(m, n)) y = (10 * numpy.sin(numpy.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 + 10 * X[:, 3] + 5 * X[:, 4] + numpy.random.uniform(size=m)) # Fit an Earth model criteria = ('rss', 'gcv', 'nb_subsets') model = Earth(max_degree=3, max_terms=10, minspan_alpha=.5, feature_importance_type=criteria, verbose=True) model.fit(X, y) rf = RandomForestRegressor() rf.fit(X, y) # Print the model print(model.trace()) print(model.summary()) print(model.summary_feature_importances(sort_by='gcv')) # Plot the feature importances importances = model.feature_importances_ importances['random_forest'] = rf.feature_importances_ criteria = criteria + ('random_forest',) idx = 1 fig = plt.figure(figsize=(20, 10)) labels = ['$x_{}$'.format(i) for i in range(n)] for crit in criteria: plt.subplot(2, 2, idx) plt.bar(numpy.arange(len(labels)), importances[crit], align='center', color='red') plt.xticks(numpy.arange(len(labels)), labels) plt.title(crit) plt.ylabel('importances') idx += 1 title = '$x_0,...x_9 \sim \mathcal{N}(0, 1)$\n$y= 10sin(\pi x_{0}x_{1}) + 20(x_2 - 0.5)^2 + 10x_3 + 5x_4 + Unif(0, 1)$' fig.suptitle(title, fontsize="x-large") plt.show()
[ "sklearn.ensemble.RandomForestRegressor", "pyearth.Earth", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.figure", "numpy.random.seed", "numpy.random.uniform", "numpy.sin", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show" ]
[((793, 813), 'numpy.random.seed', 'numpy.random.seed', (['(2)'], {}), '(2)\n', (810, 813), False, 'import numpy\n'), ((836, 869), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': '(m, n)'}), '(size=(m, n))\n', (856, 869), False, 'import numpy\n'), ((1093, 1197), 'pyearth.Earth', 'Earth', ([], {'max_degree': '(3)', 'max_terms': '(10)', 'minspan_alpha': '(0.5)', 'feature_importance_type': 'criteria', 'verbose': '(True)'}), '(max_degree=3, max_terms=10, minspan_alpha=0.5,\n feature_importance_type=criteria, verbose=True)\n', (1098, 1197), False, 'from pyearth import Earth\n'), ((1270, 1293), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (1291, 1293), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((1609, 1637), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (1619, 1637), True, 'import matplotlib.pyplot as plt\n'), ((2131, 2141), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2139, 2141), True, 'import matplotlib.pyplot as plt\n'), ((994, 1022), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': 'm'}), '(size=m)\n', (1014, 1022), False, 'import numpy\n'), ((1712, 1734), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', 'idx'], {}), '(2, 2, idx)\n', (1723, 1734), True, 'import matplotlib.pyplot as plt\n'), ((1912, 1927), 'matplotlib.pyplot.title', 'plt.title', (['crit'], {}), '(crit)\n', (1921, 1927), True, 'import matplotlib.pyplot as plt\n'), ((1932, 1957), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""importances"""'], {}), "('importances')\n", (1942, 1957), True, 'import matplotlib.pyplot as plt\n'), ((880, 919), 'numpy.sin', 'numpy.sin', (['(numpy.pi * X[:, 0] * X[:, 1])'], {}), '(numpy.pi * X[:, 0] * X[:, 1])\n', (889, 919), False, 'import numpy\n')]
#!/usr/bin/env python """ Displaying large NumPy arrays with TabularEditor A demonstration of how the TabularEditor can be used to display (large) NumPy arrays, in this case 100,000 random 3D points from a unit cube. In addition to showing the coordinates of each point, it also displays the index of each point in the array, as well as a red flag if the point lies within 0.25 of the center of the cube. """ #-- Imports -------------------------------------------------------------- from numpy import sqrt from numpy.random import random from traits.api import HasTraits, Property, Array, Font from traitsui.api import View, Item, TabularEditor from traitsui.tabular_adapter import TabularAdapter #-- Tabular Adapter Definition ------------------------------------------- class ArrayAdapter(TabularAdapter): columns = [('i', 'index'), ('x', 0), ('y', 1), ('z', 2)] # Font fails with wx in OSX; see traitsui issue #13: # font = Font('Courier 10') alignment = 'right' format = '%.4f' index_text = Property index_image = Property def _get_index_text(self): return str(self.row) def _get_index_image(self): x, y, z = self.item if sqrt((x - 0.5) ** 2 + (y - 0.5) ** 2 + (z - 0.5) ** 2) <= 0.25: return '@icons:red_ball' return None #-- ShowArray Class Definition ------------------------------------------- class ShowArray(HasTraits): data = Array view = View( Item('data', show_label=False, style='readonly', editor=TabularEditor(adapter=ArrayAdapter()) ), title='Array Viewer', width=0.3, height=0.8, resizable=True ) # Create the demo: demo = ShowArray(data=random((100000, 3))) # Run the demo (if invoked from the command line): if __name__ == '__main__': demo.configure_traits()
[ "numpy.random.random", "numpy.sqrt" ]
[((1767, 1786), 'numpy.random.random', 'random', (['(100000, 3)'], {}), '((100000, 3))\n', (1773, 1786), False, 'from numpy.random import random\n'), ((1206, 1260), 'numpy.sqrt', 'sqrt', (['((x - 0.5) ** 2 + (y - 0.5) ** 2 + (z - 0.5) ** 2)'], {}), '((x - 0.5) ** 2 + (y - 0.5) ** 2 + (z - 0.5) ** 2)\n', (1210, 1260), False, 'from numpy import sqrt\n')]
#========= import time import numpy as np import matplotlib.pyplot as plt from moviepy.editor import VideoClip from moviepy.video.io.bindings import mplfig_to_npimage fps = 2 f_dt = 1/fps fig, ax = plt.subplots( figsize=(6,6), facecolor=[1,1,1] ) x = np.arange(0, 2*np.pi, 0.01) line, = ax.plot(x, np.sin(x), lw=3) def make_frame(t): line.set_ydata(np.sin(x+2*t)) # update the data return mplfig_to_npimage(fig) anim = VideoClip(make_frame, duration=10) t = time.time() anim.write_videofile("test_mpl_mpy.mp4", fps=30) print ("Animation with MoviePy : %.02f seconds"%(time.time() - t))
[ "moviepy.video.io.bindings.mplfig_to_npimage", "moviepy.editor.VideoClip", "numpy.sin", "time.time", "matplotlib.pyplot.subplots", "numpy.arange" ]
[((205, 254), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)', 'facecolor': '[1, 1, 1]'}), '(figsize=(6, 6), facecolor=[1, 1, 1])\n', (217, 254), True, 'import matplotlib.pyplot as plt\n'), ((258, 287), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(0.01)'], {}), '(0, 2 * np.pi, 0.01)\n', (267, 287), True, 'import numpy as np\n'), ((437, 471), 'moviepy.editor.VideoClip', 'VideoClip', (['make_frame'], {'duration': '(10)'}), '(make_frame, duration=10)\n', (446, 471), False, 'from moviepy.editor import VideoClip\n'), ((477, 488), 'time.time', 'time.time', ([], {}), '()\n', (486, 488), False, 'import time\n'), ((305, 314), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (311, 314), True, 'import numpy as np\n'), ((406, 428), 'moviepy.video.io.bindings.mplfig_to_npimage', 'mplfig_to_npimage', (['fig'], {}), '(fig)\n', (423, 428), False, 'from moviepy.video.io.bindings import mplfig_to_npimage\n'), ((361, 378), 'numpy.sin', 'np.sin', (['(x + 2 * t)'], {}), '(x + 2 * t)\n', (367, 378), True, 'import numpy as np\n'), ((587, 598), 'time.time', 'time.time', ([], {}), '()\n', (596, 598), False, 'import time\n')]
import cPickle as pickle from shapely.geometry import Point, Polygon import numpy as np from netCDF4 import Dataset from scipy.interpolate import griddata p_lev = 925 geopotential, longitude_dom, latitude_dom, time_dom, time_hour = pickle.load\ (open('/nfs/a90/eepdw/Data/Saved_data/era_i/era_i_emb_time_update_large_geopotential.p', 'rb')) pressure_levels = pickle.load(open('/nfs/a90/eepdw/Data/Saved_data/era_i/era_i_emb_pressure_levels.p', 'rb')) # Monsoon Trough and Ganga Basin combined polygon = Polygon(((73., 21.), (83., 16.), (87., 22.), (90.,22.), (90.,23.8), (83., 24.2), (76.3, 28.))) # Ganga Basin #polygon = Polygon(((87., 22), (75., 27), (76.3, 30.), (83, 26.2), (90, 25.8), (90., 22))) lons_data= longitude_dom[0] lats_data = latitude_dom[0] lons_data,lats_data = np.meshgrid(lons_data, lats_data) # Find points that are within defined polygon points = np.array([[long,lat] for long, lat in zip(lons_data.flatten(), lats_data.flatten())]) intersects = np.array(map(polygon.intersects, map(Point, points))).reshape(lons_data.shape) p_lev_idx = np.where(pressure_levels==p_lev) geopotential_polygon = geopotential[:, p_lev_idx, intersects] # Do the same for surface geopotential (still in netcdf format) #nc = Dataset('/nfs/a90/eepdw/Data/Era_Interim/LandSeaMask/Land_Sea_Mask.nc') nc = Dataset('/nfs/a90/eepdw/Data/Era_Interim/Orography/era_i_geopotential.nc') ''' ECMWF give orography as geopotential, which is apparently converted to height by using the WMO gravity constant 9.80665. Ithought latitude would affect it as well but no mention http://www.ecmwf.int/en/geopotential-defined-units-m2/s2-both-pressure-levels-and-surface-orography-how-can-height-metres ''' lons,lats = np.meshgrid(nc.variables['longitude'][:], nc.variables['latitude'][:]) oro_regrid = griddata((lats.flatten(), lons.flatten()), nc.variables['z'][:].flatten(), (lats_data,lons_data), method='linear') oro_polygon = oro_regrid[intersects] vals = np.where(geopotential_polygon>(oro_polygon/9.80665), geopotential_polygon, np.nan) min_geop_full_time = np.min(vals,axis=-1)[:,0] day_mean_min_geop = [np.mean(min_geop_full_time[np.where(time_dom==day)]) for day in np.unique(time_dom)] np.savez( '/nfs/a90/eepdw/Data/Era_Interim/Era_interim_TimeVar_on_p_levs_mean_by_day_land_domain_'\ 'constrain__and_oro_not_greater_than_data_monsoon_trough_%s' % p_lev, data=day_mean_min_geop, time_coords=np.unique(time_dom), pressures=pressure_levels[p_lev_idx])
[ "numpy.unique", "numpy.where", "netCDF4.Dataset", "shapely.geometry.Polygon", "numpy.min", "numpy.meshgrid" ]
[((558, 670), 'shapely.geometry.Polygon', 'Polygon', (['((73.0, 21.0), (83.0, 16.0), (87.0, 22.0), (90.0, 22.0), (90.0, 23.8), (\n 83.0, 24.2), (76.3, 28.0))'], {}), '(((73.0, 21.0), (83.0, 16.0), (87.0, 22.0), (90.0, 22.0), (90.0, \n 23.8), (83.0, 24.2), (76.3, 28.0)))\n', (565, 670), False, 'from shapely.geometry import Point, Polygon\n'), ((839, 872), 'numpy.meshgrid', 'np.meshgrid', (['lons_data', 'lats_data'], {}), '(lons_data, lats_data)\n', (850, 872), True, 'import numpy as np\n'), ((1122, 1156), 'numpy.where', 'np.where', (['(pressure_levels == p_lev)'], {}), '(pressure_levels == p_lev)\n', (1130, 1156), True, 'import numpy as np\n'), ((1368, 1442), 'netCDF4.Dataset', 'Dataset', (['"""/nfs/a90/eepdw/Data/Era_Interim/Orography/era_i_geopotential.nc"""'], {}), "('/nfs/a90/eepdw/Data/Era_Interim/Orography/era_i_geopotential.nc')\n", (1375, 1442), False, 'from netCDF4 import Dataset\n'), ((1766, 1836), 'numpy.meshgrid', 'np.meshgrid', (["nc.variables['longitude'][:]", "nc.variables['latitude'][:]"], {}), "(nc.variables['longitude'][:], nc.variables['latitude'][:])\n", (1777, 1836), True, 'import numpy as np\n'), ((2011, 2099), 'numpy.where', 'np.where', (['(geopotential_polygon > oro_polygon / 9.80665)', 'geopotential_polygon', 'np.nan'], {}), '(geopotential_polygon > oro_polygon / 9.80665, geopotential_polygon,\n np.nan)\n', (2019, 2099), True, 'import numpy as np\n'), ((2116, 2137), 'numpy.min', 'np.min', (['vals'], {'axis': '(-1)'}), '(vals, axis=-1)\n', (2122, 2137), True, 'import numpy as np\n'), ((2227, 2246), 'numpy.unique', 'np.unique', (['time_dom'], {}), '(time_dom)\n', (2236, 2246), True, 'import numpy as np\n'), ((2468, 2487), 'numpy.unique', 'np.unique', (['time_dom'], {}), '(time_dom)\n', (2477, 2487), True, 'import numpy as np\n'), ((2190, 2215), 'numpy.where', 'np.where', (['(time_dom == day)'], {}), '(time_dom == day)\n', (2198, 2215), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- import wx import ui import numpy as np class wxFrame(wx.Frame): def __init__(self): wx.Frame.__init__(self,parent=None,title="ABC",size=(600,450)) self.init_ctrls() self.SetBackgroundColour("#E5E5E5") self.Show() def init_ctrls(self): self.msz = wx.BoxSizer(wx.VERTICAL) self.numsz = wx.BoxSizer(wx.HORIZONTAL) self.dep = wx.StaticBox(self, -1, 'Datos de entrada') self.desz = wx.StaticBoxSizer(self.dep, wx.VERTICAL) self.dsp = wx.StaticBox(self, -1, 'Datos de salida') self.dssz = wx.StaticBoxSizer(self.dsp, wx.VERTICAL) lb = wx.StaticText(self, -1, u"Número de elementos", size=(120,-1)) self.numel = wx.TextCtrl(self, -1, "", size=(80,-1)) self.oknumel = wx.Button(self, -1, "OK", size=(40,-1)) # Datos de entrada self.input_data = ui.DataGrid(self,(5,5)) self.desz.Add(self.input_data, 1, wx.EXPAND) # Datos de salida self.output_data = ui.DataGrid(self,(5,2)) self.dssz.Add(self.output_data, 1, wx.EXPAND) # Botón calcular self.calc = wx.Button(self, -1, "Calcular") self.numsz.Add(lb, 0, wx.ALIGN_CENTRE|wx.ALL, 5) self.numsz.Add(self.numel, 0, wx.ALIGN_CENTRE|wx.ALL, 5) self.numsz.Add(self.oknumel, 0, wx.ALIGN_CENTRE|wx.ALL, 5) self.msz.Add(self.numsz, 1, wx.EXPAND) self.msz.Add(self.desz, 5, wx.EXPAND|wx.ALL, 5) self.msz.Add(self.dssz, 5, wx.EXPAND|wx.ALL, 5) self.msz.Add(self.calc, 1, wx.ALL|wx.ALIGN_CENTRE, 5) self.SetSizer(self.msz) colnames = "ID,OD,L,T,G".split(",") for k,col in enumerate(colnames): self.input_data.SetColLabelValue(k,col) colnames_out = u"\N{GREEK SMALL LETTER TAU},\N{GREEK SMALL LETTER PHI}".split(",") for k,col in enumerate(colnames_out): self.output_data.SetColLabelValue(k,col) self.Bind(wx.EVT_BUTTON, self.on_numel, self.oknumel) self.Bind(wx.EVT_BUTTON, self.calcular, self.calc) def on_numel(self,event): numel = int(self.numel.GetValue()) self.input_data.UpdateGridSize(numel,5) self.output_data.UpdateGridSize(numel,2) def calcular(self,event): data = self.input_data.GetArrayData() ID = data[:,0] OD = data[:,1] L = data[:,2] T = data[:,3] G = data[:,4] J = np.pi/2*((OD/2)**4-(ID/2)**4) TS = [] for k in range(len(T)): _ts = sum(T[0:k+1]) TS.append(_ts) TS = np.array(TS) phi = ((TS*L)/(J*G))*(180/np.pi) tau = (TS*OD)/J self.output_data.SetArrayData(np.column_stack((tau,phi))) if __name__ == '__main__': app = wx.App() fr = wxFrame() app.MainLoop()
[ "wx.Button", "wx.BoxSizer", "numpy.column_stack", "wx.StaticBoxSizer", "wx.StaticText", "wx.TextCtrl", "numpy.array", "ui.DataGrid", "wx.Frame.__init__", "wx.StaticBox", "wx.App" ]
[((2923, 2931), 'wx.App', 'wx.App', ([], {}), '()\n', (2929, 2931), False, 'import wx\n'), ((121, 187), 'wx.Frame.__init__', 'wx.Frame.__init__', (['self'], {'parent': 'None', 'title': '"""ABC"""', 'size': '(600, 450)'}), "(self, parent=None, title='ABC', size=(600, 450))\n", (138, 187), False, 'import wx\n'), ((328, 352), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (339, 352), False, 'import wx\n'), ((374, 400), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (385, 400), False, 'import wx\n'), ((420, 462), 'wx.StaticBox', 'wx.StaticBox', (['self', '(-1)', '"""Datos de entrada"""'], {}), "(self, -1, 'Datos de entrada')\n", (432, 462), False, 'import wx\n'), ((483, 523), 'wx.StaticBoxSizer', 'wx.StaticBoxSizer', (['self.dep', 'wx.VERTICAL'], {}), '(self.dep, wx.VERTICAL)\n', (500, 523), False, 'import wx\n'), ((544, 585), 'wx.StaticBox', 'wx.StaticBox', (['self', '(-1)', '"""Datos de salida"""'], {}), "(self, -1, 'Datos de salida')\n", (556, 585), False, 'import wx\n'), ((606, 646), 'wx.StaticBoxSizer', 'wx.StaticBoxSizer', (['self.dsp', 'wx.VERTICAL'], {}), '(self.dsp, wx.VERTICAL)\n', (623, 646), False, 'import wx\n'), ((669, 732), 'wx.StaticText', 'wx.StaticText', (['self', '(-1)', 'u"""Número de elementos"""'], {'size': '(120, -1)'}), "(self, -1, u'Número de elementos', size=(120, -1))\n", (682, 732), False, 'import wx\n'), ((753, 793), 'wx.TextCtrl', 'wx.TextCtrl', (['self', '(-1)', '""""""'], {'size': '(80, -1)'}), "(self, -1, '', size=(80, -1))\n", (764, 793), False, 'import wx\n'), ((816, 856), 'wx.Button', 'wx.Button', (['self', '(-1)', '"""OK"""'], {'size': '(40, -1)'}), "(self, -1, 'OK', size=(40, -1))\n", (825, 856), False, 'import wx\n'), ((918, 943), 'ui.DataGrid', 'ui.DataGrid', (['self', '(5, 5)'], {}), '(self, (5, 5))\n', (929, 943), False, 'import ui\n'), ((1057, 1082), 'ui.DataGrid', 'ui.DataGrid', (['self', '(5, 2)'], {}), '(self, (5, 2))\n', (1068, 1082), False, 'import ui\n'), ((1189, 1220), 'wx.Button', 'wx.Button', (['self', '(-1)', '"""Calcular"""'], {}), "(self, -1, 'Calcular')\n", (1198, 1220), False, 'import wx\n'), ((2715, 2727), 'numpy.array', 'np.array', (['TS'], {}), '(TS)\n', (2723, 2727), True, 'import numpy as np\n'), ((2840, 2867), 'numpy.column_stack', 'np.column_stack', (['(tau, phi)'], {}), '((tau, phi))\n', (2855, 2867), True, 'import numpy as np\n')]
"""Parse different types of camera streams. This module is used to parse different types of camera streams. The module provides the StreamParser base class which provides a uniform way of parsing all camera streams. The module provides different subclasses, each for a different type of camera streams (e.g. image streams, and MJPEG streams). Examples -------- Example 1: To parse a camera image stream: 1. Initialize an object of ImageStreamParser using the URL of the camera image stream. 2. Use the get_frame method to get the most recent frame at any point of time, as well as the frame size. There is no need to call open_stream or close_stream. parser = ImageStreamParser('http://128.10.29.33/axis-cgi/jpg/image.cgi') frame, frame_size = parser.get_frame() cv2.imshow('frame', frame) print frame_size cv2.waitKey() Example 2: To parse a camera MJPEG stream: 1. Initialize an object of MJPEGStreamParser using the URL of the camera MJPEG stream. 2. Open the stream by calling the open_stream method. 3. Use the get_frame method to get the most recent frame at any point of time, as well as the frame size. 4. At the end when no more frames are needed, close the stream by calling the close_stream method. parser = MJPEGStreamParser('http://128.10.29.33/axis-cgi/mjpg/video.cgi') parser.open_stream() t = time.time() while time.time() - t < 5: frame, frame_size = parser.get_frame() cv2.imshow('frame', frame) print frame_size cv2.waitKey(30) parser.close_stream() """ import urllib2 import cv2 import numpy as np import error # NOTE Causes problems in case of slow internet connection DOWNLOAD_TIMEOUT = 10 class StreamParser(object): """Represent the base class for camera stream parsers. Parameters ---------- url : str The URL of the stream. Attributes ---------- url : str The URL of the stream. """ def __init__(self, url): self.url = url def open_stream(self): """Open the stream. Raises ------ error.UnreachableCameraError If the camera is unreachable. """ pass def close_stream(self): """Close the MJPEG stream. """ pass def restart_stream(self): """Restart the stream. This method restarts the stream by closing then opening it. This is useful because some cameras closes a stream if it is open for a long period of time. """ self.close_stream() self.open_stream() def get_frame(self): """Get the most recent frame from the camera stream. This method is an abstract method that must be overridden by subclasses. Returns ------- numpy.ndarray The downloaded frame. int The size of the downloaded frame in bytes. Raises ------ error.CorruptedFrameError If the frame is corrupted. error.UnreachableCameraError If the camera is unreachable. error.ClosedStreamError If the stream needs to be opened first. NotImplementedError If the method is not overridden in the subclass. """ raise NotImplementedError('The get_frame method has to be overridden.') class ImageStreamParser(StreamParser): """Represent a parser for a camera image stream. This class subclasses the StreamParser class and inherits its attributes and constructor. Notes ----- A camera that provides an image stream is a camera that provides a URL to get the most recent frame (regardless of how recent it is). Hence, Parsing an image stream is as simple as downloading the most recent frame from the given URL whenever requested. There is no need to call open_stream or close_stream since they do nothing. """ def get_frame(self): """Get the most recent frame from the camera image stream. Returns ------- frame : numpy.ndarray The downloaded frame. frame_size : int The size of the downloaded frame in bytes. Raises ------ error.CorruptedFrameError If the frame is corrupted. error.UnreachableCameraError If the camera is unreachable. """ try: # Download the frame data. frame = urllib2.urlopen( self.url, timeout=DOWNLOAD_TIMEOUT).read() except urllib2.URLError: raise error.UnreachableCameraError # Handle the cameras that return empty content. if frame == '': raise error.CorruptedFrameError # Get the size of the downloaded frame in bytes. frame_size = len(frame) # Decode the frame data to a numpy.ndarray image. frame = cv2.imdecode(np.fromstring(frame, dtype=np.uint8), -1) # Handle the cameras whose URLs return 1x1 images. The method # cv2.imdecode returns None if the input buffer is too short # or contains invalid data. if frame is None: raise error.CorruptedFrameError return frame, frame_size class MJPEGStreamParser(StreamParser): """Represent a parser for a camera MJPEG stream. This class subclasses the StreamParser class and inherits its attributes and extends its constructor. Parameters ---------- url : str The URL of the MJPEG stream. Attributes ---------- mjpeg_stream : file-like object The handle to the camera MJPEG stream. """ def __init__(self, url): super(MJPEGStreamParser, self).__init__(url) self.mjpeg_stream = None def open_stream(self): """Open the MJPEG stream. Raises ------ error.UnreachableCameraError If the camera is unreachable. """ try: self.mjpeg_stream = urllib2.urlopen( self.url, timeout=DOWNLOAD_TIMEOUT) except urllib2.URLError: raise error.UnreachableCameraError def close_stream(self): """Close the MJPEG stream. """ if self.mjpeg_stream is not None: self.mjpeg_stream.close() self.mjpeg_stream = None def get_frame(self): """Get the most recent frame from the camera MJPEG stream. Returns ------- frame : numpy.ndarray The downloaded frame. frame_size : int The size of the downloaded frame in bytes. Raises ------ error.CorruptedFrameError If the frame is corrupted. error.ClosedStreamError If the MJPEG stream needs to be opened first. Notes ----- MJPEG Stream Format: --myboundary Content-Type: image/jpeg Content-Length: [size of image in bytes] [empty line] ..... binary data ..... [empty line] --myboundary Content-Type: image/jpeg Content-Length: [size of image in bytes] [empty line] ..... binary data ..... [empty line] """ if self.mjpeg_stream is None: raise error.ClosedStreamError # Skip the boundary line. if self.mjpeg_stream.readline().rstrip() != '--myboundary': raise error.CorruptedFrameError # Skip the second line that has "Content-Type: image/jpeg". if self.mjpeg_stream.readline().rstrip() != 'Content-Type: image/jpeg': raise error.CorruptedFrameError # Verify the format of the third line, and get the frame size. line = [s.strip() for s in self.mjpeg_stream.readline().split(':')] if len(line) == 2 and line[0] == 'Content-Length' and line[1].isdigit(): frame_size = int(line[1]) else: raise error.CorruptedFrameError # Skip the empty line before the binary frame data. if self.mjpeg_stream.readline().strip() != '': raise error.CorruptedFrameError # Read the binary frame data. try: frame = self.mjpeg_stream.read(frame_size) except: self.restart_stream() raise error.CorruptedFrameError # Skip the empty line after the binary frame data. if self.mjpeg_stream.readline().strip() != '': raise error.CorruptedFrameError # Decode the frame data to a numpy.ndarray image. frame = cv2.imdecode(np.fromstring(frame, dtype=np.uint8), -1) # Handle the cameras whose URLs return 1x1 images. The method # cv2.imdecode returns None if the input buffer is too short or # contains invalid data. if frame is None: raise error.CorruptedFrameError return frame, frame_size def __del__(self): """Close the MJPEG stream when the object is about to be destroyed. This destructor is a backup plan in case the user of this class did not call the close_stream method. The close_stream method has to be called, without relying on this destructor, because __del__ is not guaranteed to be called in some cases and it is also better to close the stream as soon as possible to avoid unnecessary network workload. """ self.close_stream()
[ "urllib2.urlopen", "numpy.fromstring" ]
[((4874, 4910), 'numpy.fromstring', 'np.fromstring', (['frame'], {'dtype': 'np.uint8'}), '(frame, dtype=np.uint8)\n', (4887, 4910), True, 'import numpy as np\n'), ((5952, 6003), 'urllib2.urlopen', 'urllib2.urlopen', (['self.url'], {'timeout': 'DOWNLOAD_TIMEOUT'}), '(self.url, timeout=DOWNLOAD_TIMEOUT)\n', (5967, 6003), False, 'import urllib2\n'), ((8537, 8573), 'numpy.fromstring', 'np.fromstring', (['frame'], {'dtype': 'np.uint8'}), '(frame, dtype=np.uint8)\n', (8550, 8573), True, 'import numpy as np\n'), ((4415, 4466), 'urllib2.urlopen', 'urllib2.urlopen', (['self.url'], {'timeout': 'DOWNLOAD_TIMEOUT'}), '(self.url, timeout=DOWNLOAD_TIMEOUT)\n', (4430, 4466), False, 'import urllib2\n')]
# Copyright 2020 Forschungszentrum Jülich GmbH and Aix-Marseille Université # "Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements; and to You under the Apache License, Version 2.0. " import tvb.simulator.lab as lab from nest_elephant_tvb.Tvb.modify_tvb import Interface_co_simulation import numpy as np # reference simulation np.random.seed(42) model = lab.models.ReducedWongWang(tau_s=np.random.rand(76)) connectivity = lab.connectivity.Connectivity().from_file() connectivity.speed = np.array([4.0]) connectivity.configure() coupling = lab.coupling.Linear(a=np.array(0.0154)) integrator = lab.integrators.HeunDeterministic(dt=0.1,bounded_state_variable_indices=np.array([0]),state_variable_boundaries=np.array([[0.0, 1.0]])) monitors = lab.monitors.Raw(period=0.1, variables_of_interest=np.array(0,dtype=np.int)) # Initialise a Simulator -- Model, Connectivity, Integrator, and Monitors. sim = lab.simulator.Simulator(model=model, connectivity=connectivity, coupling=coupling, integrator=integrator, monitors=(monitors,), # initial_conditions=np.repeat(0.0,1*1*nb_region).reshape(1,1,nb_region,1) ) sim.configure() result_all=sim.run(simulation_length=10.0) # New simulator with proxy np.random.seed(42) model_1 = lab.models.ReducedWongWang(tau_s=np.random.rand(76)) monitors_1 = (Interface_co_simulation(period=0.1, id_proxy=np.array([0], dtype=np.int), time_synchronize=10.0)) # Initialise a Simulator -- Model, Connectivity, Integrator, and Monitors. sim_1 = lab.simulator.Simulator(model=model_1, connectivity=connectivity, coupling=coupling, integrator=integrator, monitors=(monitors,monitors_1,), # initial_conditions=np.repeat(0.0,1*1*nb_region).reshape(1,1,nb_region,1) ) sim_1.configure() result_1_all = [np.empty((0,)),np.empty((0,1,76,1))] for j in range(5): result_1_all_step = sim_1.run( simulation_length=2.0, proxy_data=[(2.0*j)+np.arange(0.1,2.1,0.1), np.array([ result_all[0][1][(20*j)+i][0][0] for i in range(20) ]).reshape((20,1,1,1))]) result_1_all[0] = np.concatenate((result_1_all[0],result_1_all_step[0][0])) result_1_all[1] = np.concatenate((result_1_all[1], result_1_all_step[0][1])) for i in range(100): diff = result_all[0][1][i][0][1:] - result_1_all[1][i,0,1:] diff_2 = result_all[0][1][i][0][:1] - result_1_all[1][i,0,:1] if np.sum(diff,where=np.logical_not(np.isnan(diff))) == 0.0 and np.sum(diff_2 ,where=np.logical_not(np.isnan(diff_2))) == 0.0: print('test succeeds') else: print(np.sum(diff_2)) print('test FAIL')
[ "numpy.random.rand", "tvb.simulator.lab.simulator.Simulator", "numpy.array", "tvb.simulator.lab.connectivity.Connectivity", "numpy.sum", "numpy.empty", "numpy.random.seed", "numpy.concatenate", "numpy.isnan", "numpy.arange" ]
[((376, 394), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (390, 394), True, 'import numpy as np\n'), ((536, 551), 'numpy.array', 'np.array', (['[4.0]'], {}), '([4.0])\n', (544, 551), True, 'import numpy as np\n'), ((946, 1078), 'tvb.simulator.lab.simulator.Simulator', 'lab.simulator.Simulator', ([], {'model': 'model', 'connectivity': 'connectivity', 'coupling': 'coupling', 'integrator': 'integrator', 'monitors': '(monitors,)'}), '(model=model, connectivity=connectivity, coupling=\n coupling, integrator=integrator, monitors=(monitors,))\n', (969, 1078), True, 'import tvb.simulator.lab as lab\n'), ((1418, 1436), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1432, 1436), True, 'import numpy as np\n'), ((1695, 1840), 'tvb.simulator.lab.simulator.Simulator', 'lab.simulator.Simulator', ([], {'model': 'model_1', 'connectivity': 'connectivity', 'coupling': 'coupling', 'integrator': 'integrator', 'monitors': '(monitors, monitors_1)'}), '(model=model_1, connectivity=connectivity, coupling=\n coupling, integrator=integrator, monitors=(monitors, monitors_1))\n', (1718, 1840), True, 'import tvb.simulator.lab as lab\n'), ((2127, 2141), 'numpy.empty', 'np.empty', (['(0,)'], {}), '((0,))\n', (2135, 2141), True, 'import numpy as np\n'), ((2142, 2165), 'numpy.empty', 'np.empty', (['(0, 1, 76, 1)'], {}), '((0, 1, 76, 1))\n', (2150, 2165), True, 'import numpy as np\n'), ((2431, 2489), 'numpy.concatenate', 'np.concatenate', (['(result_1_all[0], result_1_all_step[0][0])'], {}), '((result_1_all[0], result_1_all_step[0][0]))\n', (2445, 2489), True, 'import numpy as np\n'), ((2511, 2569), 'numpy.concatenate', 'np.concatenate', (['(result_1_all[1], result_1_all_step[0][1])'], {}), '((result_1_all[1], result_1_all_step[0][1]))\n', (2525, 2569), True, 'import numpy as np\n'), ((436, 454), 'numpy.random.rand', 'np.random.rand', (['(76)'], {}), '(76)\n', (450, 454), True, 'import numpy as np\n'), ((471, 502), 'tvb.simulator.lab.connectivity.Connectivity', 'lab.connectivity.Connectivity', ([], {}), '()\n', (500, 502), True, 'import tvb.simulator.lab as lab\n'), ((610, 626), 'numpy.array', 'np.array', (['(0.0154)'], {}), '(0.0154)\n', (618, 626), True, 'import numpy as np\n'), ((713, 726), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (721, 726), True, 'import numpy as np\n'), ((753, 775), 'numpy.array', 'np.array', (['[[0.0, 1.0]]'], {}), '([[0.0, 1.0]])\n', (761, 775), True, 'import numpy as np\n'), ((839, 864), 'numpy.array', 'np.array', (['(0)'], {'dtype': 'np.int'}), '(0, dtype=np.int)\n', (847, 864), True, 'import numpy as np\n'), ((1480, 1498), 'numpy.random.rand', 'np.random.rand', (['(76)'], {}), '(76)\n', (1494, 1498), True, 'import numpy as np\n'), ((1559, 1586), 'numpy.array', 'np.array', (['[0]'], {'dtype': 'np.int'}), '([0], dtype=np.int)\n', (1567, 1586), True, 'import numpy as np\n'), ((2909, 2923), 'numpy.sum', 'np.sum', (['diff_2'], {}), '(diff_2)\n', (2915, 2923), True, 'import numpy as np\n'), ((2277, 2301), 'numpy.arange', 'np.arange', (['(0.1)', '(2.1)', '(0.1)'], {}), '(0.1, 2.1, 0.1)\n', (2286, 2301), True, 'import numpy as np\n'), ((2762, 2776), 'numpy.isnan', 'np.isnan', (['diff'], {}), '(diff)\n', (2770, 2776), True, 'import numpy as np\n'), ((2827, 2843), 'numpy.isnan', 'np.isnan', (['diff_2'], {}), '(diff_2)\n', (2835, 2843), True, 'import numpy as np\n')]
from keras.layers import Flatten, Input from keras.layers import AveragePooling3D, MaxPooling3D from keras.models import Model from keras import backend as K import numpy as np import pandas as pd from sklearn.externals import joblib def generate_spatial_agg_features(X, input_shape=(11, 11, 11, 256)): img_input = Input(shape=input_shape) x = MaxPooling3D((3, 3, 3), strides=(3, 3, 3), name='block1_pool', padding='same')(img_input) # x = AveragePooling3D((3, 3, 3), strides=(3, 3, 3), name='block1_pool', padding='same')(img_input) x = Flatten(name='flatten')(x) model = Model(inputs=img_input, outputs=x) return model.predict(X) K.set_image_data_format('channels_last') df_train = pd.read_csv('data/stage1_labels.csv') df_val = pd.read_csv('data/stage1_solution.csv') train_chunks = df_train.id.apply(lambda x: np.load('feature/%s.npy' % str(x))) train_chunks = np.dstack(train_chunks) train_chunks = np.rollaxis(train_chunks, -1).reshape(train_chunks.shape[2], 11, 11, 11, 256) X_train = generate_spatial_agg_features(train_chunks) y_train = df_train.cancer.astype(int) joblib.dump(X_train, 'data/X_train.pkl') joblib.dump(y_train, 'data/y_train.pkl') val_chunks = df_val.id.apply(lambda x: np.load('feature/%s.npy' % str(x))) val_chunks = np.dstack(val_chunks) val_chunks = np.rollaxis(val_chunks, -1).reshape(val_chunks.shape[2], 11, 11, 11, 256) X_val = generate_spatial_agg_features(val_chunks) y_val = df_val.cancer.astype(int) joblib.dump(X_val, 'data/X_val.pkl') joblib.dump(y_val, 'data/y_val.pkl')
[ "keras.backend.set_image_data_format", "numpy.dstack", "keras.layers.Flatten", "pandas.read_csv", "numpy.rollaxis", "keras.layers.Input", "keras.models.Model", "sklearn.externals.joblib.dump", "keras.layers.MaxPooling3D" ]
[((666, 706), 'keras.backend.set_image_data_format', 'K.set_image_data_format', (['"""channels_last"""'], {}), "('channels_last')\n", (689, 706), True, 'from keras import backend as K\n'), ((719, 756), 'pandas.read_csv', 'pd.read_csv', (['"""data/stage1_labels.csv"""'], {}), "('data/stage1_labels.csv')\n", (730, 756), True, 'import pandas as pd\n'), ((766, 805), 'pandas.read_csv', 'pd.read_csv', (['"""data/stage1_solution.csv"""'], {}), "('data/stage1_solution.csv')\n", (777, 805), True, 'import pandas as pd\n'), ((902, 925), 'numpy.dstack', 'np.dstack', (['train_chunks'], {}), '(train_chunks)\n', (911, 925), True, 'import numpy as np\n'), ((1112, 1152), 'sklearn.externals.joblib.dump', 'joblib.dump', (['X_train', '"""data/X_train.pkl"""'], {}), "(X_train, 'data/X_train.pkl')\n", (1123, 1152), False, 'from sklearn.externals import joblib\n'), ((1153, 1193), 'sklearn.externals.joblib.dump', 'joblib.dump', (['y_train', '"""data/y_train.pkl"""'], {}), "(y_train, 'data/y_train.pkl')\n", (1164, 1193), False, 'from sklearn.externals import joblib\n'), ((1284, 1305), 'numpy.dstack', 'np.dstack', (['val_chunks'], {}), '(val_chunks)\n', (1293, 1305), True, 'import numpy as np\n'), ((1478, 1514), 'sklearn.externals.joblib.dump', 'joblib.dump', (['X_val', '"""data/X_val.pkl"""'], {}), "(X_val, 'data/X_val.pkl')\n", (1489, 1514), False, 'from sklearn.externals import joblib\n'), ((1515, 1551), 'sklearn.externals.joblib.dump', 'joblib.dump', (['y_val', '"""data/y_val.pkl"""'], {}), "(y_val, 'data/y_val.pkl')\n", (1526, 1551), False, 'from sklearn.externals import joblib\n'), ((325, 349), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (330, 349), False, 'from keras.layers import Flatten, Input\n'), ((601, 635), 'keras.models.Model', 'Model', ([], {'inputs': 'img_input', 'outputs': 'x'}), '(inputs=img_input, outputs=x)\n', (606, 635), False, 'from keras.models import Model\n'), ((359, 437), 'keras.layers.MaxPooling3D', 'MaxPooling3D', (['(3, 3, 3)'], {'strides': '(3, 3, 3)', 'name': '"""block1_pool"""', 'padding': '"""same"""'}), "((3, 3, 3), strides=(3, 3, 3), name='block1_pool', padding='same')\n", (371, 437), False, 'from keras.layers import AveragePooling3D, MaxPooling3D\n'), ((561, 584), 'keras.layers.Flatten', 'Flatten', ([], {'name': '"""flatten"""'}), "(name='flatten')\n", (568, 584), False, 'from keras.layers import Flatten, Input\n'), ((941, 970), 'numpy.rollaxis', 'np.rollaxis', (['train_chunks', '(-1)'], {}), '(train_chunks, -1)\n', (952, 970), True, 'import numpy as np\n'), ((1319, 1346), 'numpy.rollaxis', 'np.rollaxis', (['val_chunks', '(-1)'], {}), '(val_chunks, -1)\n', (1330, 1346), True, 'import numpy as np\n')]
import argparse import os from functools import partial from multiprocessing.pool import Pool os.environ["MKL_NUM_THREADS"] = "1" os.environ["NUMEXPR_NUM_THREADS"] = "1" os.environ["OMP_NUM_THREADS"] = "1" from tqdm import tqdm import cv2 cv2.ocl.setUseOpenCL(False) cv2.setNumThreads(0) from preprocessing.utils import get_original_video_paths from PIL import Image from facenet_pytorch.models.mtcnn import MTCNN import numpy as np detector = MTCNN(margin=0, thresholds=[0.65, 0.75, 0.75], device="cpu") def save_landmarks(ori_id, root_dir): ori_id = ori_id[:-4] ori_dir = os.path.join(root_dir, "crops", ori_id) landmark_dir = os.path.join(root_dir, "landmarks", ori_id) os.makedirs(landmark_dir, exist_ok=True) for frame in range(320): if frame % 10 != 0: continue for actor in range(2): image_id = "{}_{}.png".format(frame, actor) landmarks_id = "{}_{}".format(frame, actor) ori_path = os.path.join(ori_dir, image_id) landmark_path = os.path.join(landmark_dir, landmarks_id) if os.path.exists(ori_path): try: image_ori = cv2.imread(ori_path, cv2.IMREAD_COLOR)[...,::-1] frame_img = Image.fromarray(image_ori) batch_boxes, conf, landmarks = detector.detect(frame_img, landmarks=True) if landmarks is not None: landmarks = np.around(landmarks[0]).astype(np.int16) np.save(landmark_path, landmarks) except Exception as e: print(e) pass def parse_args(): parser = argparse.ArgumentParser( description="Extract image landmarks") parser.add_argument("--root-dir", help="root directory", default="/mnt/sota/datasets/deepfake") args = parser.parse_args() return args def main(): args = parse_args() ids = get_original_video_paths(args.root_dir, basename=True) os.makedirs(os.path.join(args.root_dir, "landmarks"), exist_ok=True) with Pool(processes=os.cpu_count()) as p: with tqdm(total=len(ids)) as pbar: func = partial(save_landmarks, root_dir=args.root_dir) for v in p.imap_unordered(func, ids): pbar.update() if __name__ == '__main__': main()
[ "cv2.ocl.setUseOpenCL", "os.path.exists", "PIL.Image.fromarray", "cv2.setNumThreads", "os.makedirs", "argparse.ArgumentParser", "facenet_pytorch.models.mtcnn.MTCNN", "os.path.join", "functools.partial", "numpy.around", "os.cpu_count", "preprocessing.utils.get_original_video_paths", "cv2.imre...
[((246, 273), 'cv2.ocl.setUseOpenCL', 'cv2.ocl.setUseOpenCL', (['(False)'], {}), '(False)\n', (266, 273), False, 'import cv2\n'), ((274, 294), 'cv2.setNumThreads', 'cv2.setNumThreads', (['(0)'], {}), '(0)\n', (291, 294), False, 'import cv2\n'), ((453, 513), 'facenet_pytorch.models.mtcnn.MTCNN', 'MTCNN', ([], {'margin': '(0)', 'thresholds': '[0.65, 0.75, 0.75]', 'device': '"""cpu"""'}), "(margin=0, thresholds=[0.65, 0.75, 0.75], device='cpu')\n", (458, 513), False, 'from facenet_pytorch.models.mtcnn import MTCNN\n'), ((593, 632), 'os.path.join', 'os.path.join', (['root_dir', '"""crops"""', 'ori_id'], {}), "(root_dir, 'crops', ori_id)\n", (605, 632), False, 'import os\n'), ((652, 695), 'os.path.join', 'os.path.join', (['root_dir', '"""landmarks"""', 'ori_id'], {}), "(root_dir, 'landmarks', ori_id)\n", (664, 695), False, 'import os\n'), ((700, 740), 'os.makedirs', 'os.makedirs', (['landmark_dir'], {'exist_ok': '(True)'}), '(landmark_dir, exist_ok=True)\n', (711, 740), False, 'import os\n'), ((1690, 1752), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Extract image landmarks"""'}), "(description='Extract image landmarks')\n", (1713, 1752), False, 'import argparse\n'), ((1957, 2011), 'preprocessing.utils.get_original_video_paths', 'get_original_video_paths', (['args.root_dir'], {'basename': '(True)'}), '(args.root_dir, basename=True)\n', (1981, 2011), False, 'from preprocessing.utils import get_original_video_paths\n'), ((2028, 2068), 'os.path.join', 'os.path.join', (['args.root_dir', '"""landmarks"""'], {}), "(args.root_dir, 'landmarks')\n", (2040, 2068), False, 'import os\n'), ((985, 1016), 'os.path.join', 'os.path.join', (['ori_dir', 'image_id'], {}), '(ori_dir, image_id)\n', (997, 1016), False, 'import os\n'), ((1045, 1085), 'os.path.join', 'os.path.join', (['landmark_dir', 'landmarks_id'], {}), '(landmark_dir, landmarks_id)\n', (1057, 1085), False, 'import os\n'), ((1102, 1126), 'os.path.exists', 'os.path.exists', (['ori_path'], {}), '(ori_path)\n', (1116, 1126), False, 'import os\n'), ((2193, 2240), 'functools.partial', 'partial', (['save_landmarks'], {'root_dir': 'args.root_dir'}), '(save_landmarks, root_dir=args.root_dir)\n', (2200, 2240), False, 'from functools import partial\n'), ((2109, 2123), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (2121, 2123), False, 'import os\n'), ((1262, 1288), 'PIL.Image.fromarray', 'Image.fromarray', (['image_ori'], {}), '(image_ori)\n', (1277, 1288), False, 'from PIL import Image\n'), ((1181, 1219), 'cv2.imread', 'cv2.imread', (['ori_path', 'cv2.IMREAD_COLOR'], {}), '(ori_path, cv2.IMREAD_COLOR)\n', (1191, 1219), False, 'import cv2\n'), ((1530, 1563), 'numpy.save', 'np.save', (['landmark_path', 'landmarks'], {}), '(landmark_path, landmarks)\n', (1537, 1563), True, 'import numpy as np\n'), ((1465, 1488), 'numpy.around', 'np.around', (['landmarks[0]'], {}), '(landmarks[0])\n', (1474, 1488), True, 'import numpy as np\n')]
# # Copyright 2013 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import datetime import calendar import numpy as np import pytz from itertools import chain from six import itervalues import zipline.finance.risk as risk from zipline.utils import factory from zipline.finance.trading import SimulationParameters, TradingEnvironment from . import answer_key from . answer_key import AnswerKey ANSWER_KEY = AnswerKey() RETURNS = ANSWER_KEY.RETURNS class TestRisk(unittest.TestCase): @classmethod def setUpClass(cls): cls.env = TradingEnvironment() @classmethod def tearDownClass(cls): del cls.env def setUp(self): start_date = datetime.datetime( year=2006, month=1, day=1, hour=0, minute=0, tzinfo=pytz.utc) end_date = datetime.datetime( year=2006, month=12, day=31, tzinfo=pytz.utc) self.sim_params = SimulationParameters( period_start=start_date, period_end=end_date, env=self.env, ) self.algo_returns_06 = factory.create_returns_from_list( RETURNS, self.sim_params ) self.benchmark_returns_06 = \ answer_key.RETURNS_DATA['Benchmark Returns'] self.metrics_06 = risk.RiskReport( self.algo_returns_06, self.sim_params, benchmark_returns=self.benchmark_returns_06, env=self.env, ) start_08 = datetime.datetime( year=2008, month=1, day=1, hour=0, minute=0, tzinfo=pytz.utc) end_08 = datetime.datetime( year=2008, month=12, day=31, tzinfo=pytz.utc ) self.sim_params08 = SimulationParameters( period_start=start_08, period_end=end_08, env=self.env, ) def tearDown(self): return def test_factory(self): returns = [0.1] * 100 r_objects = factory.create_returns_from_list(returns, self.sim_params) self.assertTrue(r_objects.index[-1] <= datetime.datetime( year=2006, month=12, day=31, tzinfo=pytz.utc)) def test_drawdown(self): returns = factory.create_returns_from_list( [1.0, -0.5, 0.8, .17, 1.0, -0.1, -0.45], self.sim_params) # 200, 100, 180, 210.6, 421.2, 379.8, 208.494 metrics = risk.RiskMetricsPeriod( returns.index[0], returns.index[-1], returns, env=self.env, benchmark_returns=self.env.benchmark_returns, ) self.assertEqual(metrics.max_drawdown, 0.505) def test_benchmark_returns_06(self): np.testing.assert_almost_equal( [x.benchmark_period_returns for x in self.metrics_06.month_periods], ANSWER_KEY.BENCHMARK_PERIOD_RETURNS['Monthly']) np.testing.assert_almost_equal( [x.benchmark_period_returns for x in self.metrics_06.three_month_periods], ANSWER_KEY.BENCHMARK_PERIOD_RETURNS['3-Month']) np.testing.assert_almost_equal( [x.benchmark_period_returns for x in self.metrics_06.six_month_periods], ANSWER_KEY.BENCHMARK_PERIOD_RETURNS['6-month']) np.testing.assert_almost_equal( [x.benchmark_period_returns for x in self.metrics_06.year_periods], ANSWER_KEY.BENCHMARK_PERIOD_RETURNS['year']) def test_trading_days_06(self): returns = factory.create_returns_from_range(self.sim_params) metrics = risk.RiskReport(returns, self.sim_params, env=self.env) self.assertEqual([x.num_trading_days for x in metrics.year_periods], [251]) self.assertEqual([x.num_trading_days for x in metrics.month_periods], [20, 19, 23, 19, 22, 22, 20, 23, 20, 22, 21, 20]) def test_benchmark_volatility_06(self): np.testing.assert_almost_equal( [x.benchmark_volatility for x in self.metrics_06.month_periods], ANSWER_KEY.BENCHMARK_PERIOD_VOLATILITY['Monthly']) np.testing.assert_almost_equal( [x.benchmark_volatility for x in self.metrics_06.three_month_periods], ANSWER_KEY.BENCHMARK_PERIOD_VOLATILITY['3-Month']) np.testing.assert_almost_equal( [x.benchmark_volatility for x in self.metrics_06.six_month_periods], ANSWER_KEY.BENCHMARK_PERIOD_VOLATILITY['6-month']) np.testing.assert_almost_equal( [x.benchmark_volatility for x in self.metrics_06.year_periods], ANSWER_KEY.BENCHMARK_PERIOD_VOLATILITY['year']) def test_algorithm_returns_06(self): np.testing.assert_almost_equal( [x.algorithm_period_returns for x in self.metrics_06.month_periods], ANSWER_KEY.ALGORITHM_PERIOD_RETURNS['Monthly']) np.testing.assert_almost_equal( [x.algorithm_period_returns for x in self.metrics_06.three_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_RETURNS['3-Month']) np.testing.assert_almost_equal( [x.algorithm_period_returns for x in self.metrics_06.six_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_RETURNS['6-month']) np.testing.assert_almost_equal( [x.algorithm_period_returns for x in self.metrics_06.year_periods], ANSWER_KEY.ALGORITHM_PERIOD_RETURNS['year']) def test_algorithm_volatility_06(self): np.testing.assert_almost_equal( [x.algorithm_volatility for x in self.metrics_06.month_periods], ANSWER_KEY.ALGORITHM_PERIOD_VOLATILITY['Monthly']) np.testing.assert_almost_equal( [x.algorithm_volatility for x in self.metrics_06.three_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_VOLATILITY['3-Month']) np.testing.assert_almost_equal( [x.algorithm_volatility for x in self.metrics_06.six_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_VOLATILITY['6-month']) np.testing.assert_almost_equal( [x.algorithm_volatility for x in self.metrics_06.year_periods], ANSWER_KEY.ALGORITHM_PERIOD_VOLATILITY['year']) def test_algorithm_sharpe_06(self): np.testing.assert_almost_equal( [x.sharpe for x in self.metrics_06.month_periods], ANSWER_KEY.ALGORITHM_PERIOD_SHARPE['Monthly']) np.testing.assert_almost_equal( [x.sharpe for x in self.metrics_06.three_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_SHARPE['3-Month']) np.testing.assert_almost_equal( [x.sharpe for x in self.metrics_06.six_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_SHARPE['6-month']) np.testing.assert_almost_equal( [x.sharpe for x in self.metrics_06.year_periods], ANSWER_KEY.ALGORITHM_PERIOD_SHARPE['year']) def test_algorithm_downside_risk_06(self): np.testing.assert_almost_equal( [x.downside_risk for x in self.metrics_06.month_periods], ANSWER_KEY.ALGORITHM_PERIOD_DOWNSIDE_RISK['Monthly'], decimal=4) np.testing.assert_almost_equal( [x.downside_risk for x in self.metrics_06.three_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_DOWNSIDE_RISK['3-Month'], decimal=4) np.testing.assert_almost_equal( [x.downside_risk for x in self.metrics_06.six_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_DOWNSIDE_RISK['6-month'], decimal=4) np.testing.assert_almost_equal( [x.downside_risk for x in self.metrics_06.year_periods], ANSWER_KEY.ALGORITHM_PERIOD_DOWNSIDE_RISK['year'], decimal=4) def test_algorithm_sortino_06(self): np.testing.assert_almost_equal( [x.sortino for x in self.metrics_06.month_periods], ANSWER_KEY.ALGORITHM_PERIOD_SORTINO['Monthly'], decimal=3) np.testing.assert_almost_equal( [x.sortino for x in self.metrics_06.three_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_SORTINO['3-Month'], decimal=3) np.testing.assert_almost_equal( [x.sortino for x in self.metrics_06.six_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_SORTINO['6-month'], decimal=3) np.testing.assert_almost_equal( [x.sortino for x in self.metrics_06.year_periods], ANSWER_KEY.ALGORITHM_PERIOD_SORTINO['year'], decimal=3) def test_algorithm_information_06(self): self.assertEqual([round(x.information, 3) for x in self.metrics_06.month_periods], [0.131, -0.11, -0.067, 0.136, 0.301, -0.387, 0.107, -0.032, -0.058, 0.069, 0.095, -0.123]) self.assertEqual([round(x.information, 3) for x in self.metrics_06.three_month_periods], [-0.013, -0.009, 0.111, -0.014, -0.017, -0.108, 0.011, -0.004, 0.032, 0.011]) self.assertEqual([round(x.information, 3) for x in self.metrics_06.six_month_periods], [-0.013, -0.014, -0.003, -0.002, -0.011, -0.041, 0.011]) self.assertEqual([round(x.information, 3) for x in self.metrics_06.year_periods], [-0.001]) def test_algorithm_beta_06(self): np.testing.assert_almost_equal( [x.beta for x in self.metrics_06.month_periods], ANSWER_KEY.ALGORITHM_PERIOD_BETA['Monthly']) np.testing.assert_almost_equal( [x.beta for x in self.metrics_06.three_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_BETA['3-Month']) np.testing.assert_almost_equal( [x.beta for x in self.metrics_06.six_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_BETA['6-month']) np.testing.assert_almost_equal( [x.beta for x in self.metrics_06.year_periods], ANSWER_KEY.ALGORITHM_PERIOD_BETA['year']) def test_algorithm_alpha_06(self): np.testing.assert_almost_equal( [x.alpha for x in self.metrics_06.month_periods], ANSWER_KEY.ALGORITHM_PERIOD_ALPHA['Monthly']) np.testing.assert_almost_equal( [x.alpha for x in self.metrics_06.three_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_ALPHA['3-Month']) np.testing.assert_almost_equal( [x.alpha for x in self.metrics_06.six_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_ALPHA['6-month']) np.testing.assert_almost_equal( [x.alpha for x in self.metrics_06.year_periods], ANSWER_KEY.ALGORITHM_PERIOD_ALPHA['year']) # FIXME: Covariance is not matching excel precisely enough to run the test. # Month 4 seems to be the problem. Variance is disabled # just to avoid distraction - it is much closer than covariance # and can probably pass with 6 significant digits instead of 7. # re-enable variance, alpha, and beta tests once this is resolved def test_algorithm_covariance_06(self): np.testing.assert_almost_equal( [x.algorithm_covariance for x in self.metrics_06.month_periods], ANSWER_KEY.ALGORITHM_PERIOD_COVARIANCE['Monthly']) np.testing.assert_almost_equal( [x.algorithm_covariance for x in self.metrics_06.three_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_COVARIANCE['3-Month']) np.testing.assert_almost_equal( [x.algorithm_covariance for x in self.metrics_06.six_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_COVARIANCE['6-month']) np.testing.assert_almost_equal( [x.algorithm_covariance for x in self.metrics_06.year_periods], ANSWER_KEY.ALGORITHM_PERIOD_COVARIANCE['year']) def test_benchmark_variance_06(self): np.testing.assert_almost_equal( [x.benchmark_variance for x in self.metrics_06.month_periods], ANSWER_KEY.ALGORITHM_PERIOD_BENCHMARK_VARIANCE['Monthly']) np.testing.assert_almost_equal( [x.benchmark_variance for x in self.metrics_06.three_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_BENCHMARK_VARIANCE['3-Month']) np.testing.assert_almost_equal( [x.benchmark_variance for x in self.metrics_06.six_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_BENCHMARK_VARIANCE['6-month']) np.testing.assert_almost_equal( [x.benchmark_variance for x in self.metrics_06.year_periods], ANSWER_KEY.ALGORITHM_PERIOD_BENCHMARK_VARIANCE['year']) def test_benchmark_returns_08(self): returns = factory.create_returns_from_range(self.sim_params08) metrics = risk.RiskReport(returns, self.sim_params08, env=self.env) self.assertEqual([round(x.benchmark_period_returns, 3) for x in metrics.month_periods], [-0.061, -0.035, -0.006, 0.048, 0.011, -0.086, -0.01, 0.012, -0.091, -0.169, -0.075, 0.008]) self.assertEqual([round(x.benchmark_period_returns, 3) for x in metrics.three_month_periods], [-0.099, 0.005, 0.052, -0.032, -0.085, -0.084, -0.089, -0.236, -0.301, -0.226]) self.assertEqual([round(x.benchmark_period_returns, 3) for x in metrics.six_month_periods], [-0.128, -0.081, -0.036, -0.118, -0.301, -0.36, -0.294]) self.assertEqual([round(x.benchmark_period_returns, 3) for x in metrics.year_periods], [-0.385]) def test_trading_days_08(self): returns = factory.create_returns_from_range(self.sim_params08) metrics = risk.RiskReport(returns, self.sim_params08, env=self.env) self.assertEqual([x.num_trading_days for x in metrics.year_periods], [253]) self.assertEqual([x.num_trading_days for x in metrics.month_periods], [21, 20, 20, 22, 21, 21, 22, 21, 21, 23, 19, 22]) def test_benchmark_volatility_08(self): returns = factory.create_returns_from_range(self.sim_params08) metrics = risk.RiskReport(returns, self.sim_params08, env=self.env) self.assertEqual([round(x.benchmark_volatility, 3) for x in metrics.month_periods], [0.07, 0.058, 0.082, 0.054, 0.041, 0.057, 0.068, 0.06, 0.157, 0.244, 0.195, 0.145]) self.assertEqual([round(x.benchmark_volatility, 3) for x in metrics.three_month_periods], [0.12, 0.113, 0.105, 0.09, 0.098, 0.107, 0.179, 0.293, 0.344, 0.34]) self.assertEqual([round(x.benchmark_volatility, 3) for x in metrics.six_month_periods], [0.15, 0.149, 0.15, 0.2, 0.308, 0.36, 0.383]) # TODO: ugly, but I can't get the rounded float to match. # maybe we need a different test that checks the # difference between the numbers self.assertEqual([round(x.benchmark_volatility, 3) for x in metrics.year_periods], [0.411]) def test_treasury_returns_06(self): returns = factory.create_returns_from_range(self.sim_params) metrics = risk.RiskReport(returns, self.sim_params, env=self.env) self.assertEqual([round(x.treasury_period_return, 4) for x in metrics.month_periods], [0.0037, 0.0034, 0.0039, 0.0038, 0.0040, 0.0037, 0.0043, 0.0043, 0.0038, 0.0044, 0.0043, 0.004]) self.assertEqual([round(x.treasury_period_return, 4) for x in metrics.three_month_periods], [0.0114, 0.0116, 0.0122, 0.0125, 0.0129, 0.0127, 0.0123, 0.0128, 0.0125, 0.0127]) self.assertEqual([round(x.treasury_period_return, 4) for x in metrics.six_month_periods], [0.0260, 0.0257, 0.0258, 0.0252, 0.0259, 0.0256, 0.0257]) self.assertEqual([round(x.treasury_period_return, 4) for x in metrics.year_periods], [0.0500]) def test_benchmarkrange(self): self.check_year_range( datetime.datetime( year=2008, month=1, day=1, tzinfo=pytz.utc), 2) def test_partial_month(self): start = datetime.datetime( year=1991, month=1, day=1, hour=0, minute=0, tzinfo=pytz.utc) # 1992 and 1996 were leap years total_days = 365 * 5 + 2 end = start + datetime.timedelta(days=total_days) sim_params90s = SimulationParameters( period_start=start, period_end=end, env=self.env, ) returns = factory.create_returns_from_range(sim_params90s) returns = returns[:-10] # truncate the returns series to end mid-month metrics = risk.RiskReport(returns, sim_params90s, env=self.env) total_months = 60 self.check_metrics(metrics, total_months, start) def check_year_range(self, start_date, years): sim_params = SimulationParameters( period_start=start_date, period_end=start_date.replace(year=(start_date.year + years)), env=self.env, ) returns = factory.create_returns_from_range(sim_params) metrics = risk.RiskReport(returns, self.sim_params, env=self.env) total_months = years * 12 self.check_metrics(metrics, total_months, start_date) def check_metrics(self, metrics, total_months, start_date): """ confirm that the right number of riskmetrics were calculated for each window length. """ self.assert_range_length( metrics.month_periods, total_months, 1, start_date ) self.assert_range_length( metrics.three_month_periods, total_months, 3, start_date ) self.assert_range_length( metrics.six_month_periods, total_months, 6, start_date ) self.assert_range_length( metrics.year_periods, total_months, 12, start_date ) def assert_last_day(self, period_end): # 30 days has september, april, june and november if period_end.month in [9, 4, 6, 11]: self.assertEqual(period_end.day, 30) # all the rest have 31, except for february elif(period_end.month != 2): self.assertEqual(period_end.day, 31) else: if calendar.isleap(period_end.year): self.assertEqual(period_end.day, 29) else: self.assertEqual(period_end.day, 28) def assert_month(self, start_month, actual_end_month): if start_month == 1: expected_end_month = 12 else: expected_end_month = start_month - 1 self.assertEqual(expected_end_month, actual_end_month) def assert_range_length(self, col, total_months, period_length, start_date): if(period_length > total_months): self.assertEqual(len(col), 0) else: self.assertEqual( len(col), total_months - (period_length - 1), "mismatch for total months - \ expected:{total_months}/actual:{actual}, \ period:{period_length}, start:{start_date}, \ calculated end:{end}".format(total_months=total_months, period_length=period_length, start_date=start_date, end=col[-1].end_date, actual=len(col)) ) self.assert_month(start_date.month, col[-1].end_date.month) self.assert_last_day(col[-1].end_date) def test_sparse_benchmark(self): benchmark_returns = self.benchmark_returns_06.copy() # Set every other day to nan. benchmark_returns.iloc[::2] = np.nan report = risk.RiskReport( self.algo_returns_06, self.sim_params, benchmark_returns=benchmark_returns, env=self.env, ) for risk_period in chain.from_iterable(itervalues(report.to_dict())): self.assertIsNone(risk_period['beta'])
[ "datetime.datetime", "zipline.finance.risk.RiskMetricsPeriod", "zipline.utils.factory.create_returns_from_list", "zipline.utils.factory.create_returns_from_range", "zipline.finance.risk.RiskReport", "zipline.finance.trading.TradingEnvironment", "numpy.testing.assert_almost_equal", "calendar.isleap", ...
[((1082, 1102), 'zipline.finance.trading.TradingEnvironment', 'TradingEnvironment', ([], {}), '()\n', (1100, 1102), False, 'from zipline.finance.trading import SimulationParameters, TradingEnvironment\n'), ((1213, 1292), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2006)', 'month': '(1)', 'day': '(1)', 'hour': '(0)', 'minute': '(0)', 'tzinfo': 'pytz.utc'}), '(year=2006, month=1, day=1, hour=0, minute=0, tzinfo=pytz.utc)\n', (1230, 1292), False, 'import datetime\n'), ((1385, 1448), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2006)', 'month': '(12)', 'day': '(31)', 'tzinfo': 'pytz.utc'}), '(year=2006, month=12, day=31, tzinfo=pytz.utc)\n', (1402, 1448), False, 'import datetime\n'), ((1489, 1574), 'zipline.finance.trading.SimulationParameters', 'SimulationParameters', ([], {'period_start': 'start_date', 'period_end': 'end_date', 'env': 'self.env'}), '(period_start=start_date, period_end=end_date, env=self.env\n )\n', (1509, 1574), False, 'from zipline.finance.trading import SimulationParameters, TradingEnvironment\n'), ((1649, 1707), 'zipline.utils.factory.create_returns_from_list', 'factory.create_returns_from_list', (['RETURNS', 'self.sim_params'], {}), '(RETURNS, self.sim_params)\n', (1681, 1707), False, 'from zipline.utils import factory\n'), ((1865, 1983), 'zipline.finance.risk.RiskReport', 'risk.RiskReport', (['self.algo_returns_06', 'self.sim_params'], {'benchmark_returns': 'self.benchmark_returns_06', 'env': 'self.env'}), '(self.algo_returns_06, self.sim_params, benchmark_returns=\n self.benchmark_returns_06, env=self.env)\n', (1880, 1983), True, 'import zipline.finance.risk as risk\n'), ((2058, 2137), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2008)', 'month': '(1)', 'day': '(1)', 'hour': '(0)', 'minute': '(0)', 'tzinfo': 'pytz.utc'}), '(year=2008, month=1, day=1, hour=0, minute=0, tzinfo=pytz.utc)\n', (2075, 2137), False, 'import datetime\n'), ((2229, 2292), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2008)', 'month': '(12)', 'day': '(31)', 'tzinfo': 'pytz.utc'}), '(year=2008, month=12, day=31, tzinfo=pytz.utc)\n', (2246, 2292), False, 'import datetime\n'), ((2379, 2455), 'zipline.finance.trading.SimulationParameters', 'SimulationParameters', ([], {'period_start': 'start_08', 'period_end': 'end_08', 'env': 'self.env'}), '(period_start=start_08, period_end=end_08, env=self.env)\n', (2399, 2455), False, 'from zipline.finance.trading import SimulationParameters, TradingEnvironment\n'), ((2622, 2680), 'zipline.utils.factory.create_returns_from_list', 'factory.create_returns_from_list', (['returns', 'self.sim_params'], {}), '(returns, self.sim_params)\n', (2654, 2680), False, 'from zipline.utils import factory\n'), ((2894, 2989), 'zipline.utils.factory.create_returns_from_list', 'factory.create_returns_from_list', (['[1.0, -0.5, 0.8, 0.17, 1.0, -0.1, -0.45]', 'self.sim_params'], {}), '([1.0, -0.5, 0.8, 0.17, 1.0, -0.1, -0.45],\n self.sim_params)\n', (2926, 2989), False, 'from zipline.utils import factory\n'), ((3070, 3203), 'zipline.finance.risk.RiskMetricsPeriod', 'risk.RiskMetricsPeriod', (['returns.index[0]', 'returns.index[-1]', 'returns'], {'env': 'self.env', 'benchmark_returns': 'self.env.benchmark_returns'}), '(returns.index[0], returns.index[-1], returns, env=\n self.env, benchmark_returns=self.env.benchmark_returns)\n', (3092, 3203), True, 'import zipline.finance.risk as risk\n'), ((3379, 3531), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.benchmark_period_returns for x in self.metrics_06.month_periods]', "ANSWER_KEY.BENCHMARK_PERIOD_RETURNS['Monthly']"], {}), "([x.benchmark_period_returns for x in self.\n metrics_06.month_periods], ANSWER_KEY.BENCHMARK_PERIOD_RETURNS['Monthly'])\n", (3409, 3531), True, 'import numpy as np\n'), ((3573, 3736), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.benchmark_period_returns for x in self.metrics_06.three_month_periods]', "ANSWER_KEY.BENCHMARK_PERIOD_RETURNS['3-Month']"], {}), "([x.benchmark_period_returns for x in self.\n metrics_06.three_month_periods], ANSWER_KEY.BENCHMARK_PERIOD_RETURNS[\n '3-Month'])\n", (3603, 3736), True, 'import numpy as np\n'), ((3773, 3934), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.benchmark_period_returns for x in self.metrics_06.six_month_periods]', "ANSWER_KEY.BENCHMARK_PERIOD_RETURNS['6-month']"], {}), "([x.benchmark_period_returns for x in self.\n metrics_06.six_month_periods], ANSWER_KEY.BENCHMARK_PERIOD_RETURNS[\n '6-month'])\n", (3803, 3934), True, 'import numpy as np\n'), ((3971, 4119), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.benchmark_period_returns for x in self.metrics_06.year_periods]', "ANSWER_KEY.BENCHMARK_PERIOD_RETURNS['year']"], {}), "([x.benchmark_period_returns for x in self.\n metrics_06.year_periods], ANSWER_KEY.BENCHMARK_PERIOD_RETURNS['year'])\n", (4001, 4119), True, 'import numpy as np\n'), ((4208, 4258), 'zipline.utils.factory.create_returns_from_range', 'factory.create_returns_from_range', (['self.sim_params'], {}), '(self.sim_params)\n', (4241, 4258), False, 'from zipline.utils import factory\n'), ((4277, 4332), 'zipline.finance.risk.RiskReport', 'risk.RiskReport', (['returns', 'self.sim_params'], {'env': 'self.env'}), '(returns, self.sim_params, env=self.env)\n', (4292, 4332), True, 'import zipline.finance.risk as risk\n'), ((4649, 4805), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.benchmark_volatility for x in self.metrics_06.month_periods]', "ANSWER_KEY.BENCHMARK_PERIOD_VOLATILITY['Monthly']"], {}), "([x.benchmark_volatility for x in self.\n metrics_06.month_periods], ANSWER_KEY.BENCHMARK_PERIOD_VOLATILITY[\n 'Monthly'])\n", (4679, 4805), True, 'import numpy as np\n'), ((4842, 5004), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.benchmark_volatility for x in self.metrics_06.three_month_periods]', "ANSWER_KEY.BENCHMARK_PERIOD_VOLATILITY['3-Month']"], {}), "([x.benchmark_volatility for x in self.\n metrics_06.three_month_periods], ANSWER_KEY.BENCHMARK_PERIOD_VOLATILITY\n ['3-Month'])\n", (4872, 5004), True, 'import numpy as np\n'), ((5041, 5201), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.benchmark_volatility for x in self.metrics_06.six_month_periods]', "ANSWER_KEY.BENCHMARK_PERIOD_VOLATILITY['6-month']"], {}), "([x.benchmark_volatility for x in self.\n metrics_06.six_month_periods], ANSWER_KEY.BENCHMARK_PERIOD_VOLATILITY[\n '6-month'])\n", (5071, 5201), True, 'import numpy as np\n'), ((5238, 5385), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.benchmark_volatility for x in self.metrics_06.year_periods]', "ANSWER_KEY.BENCHMARK_PERIOD_VOLATILITY['year']"], {}), "([x.benchmark_volatility for x in self.\n metrics_06.year_periods], ANSWER_KEY.BENCHMARK_PERIOD_VOLATILITY['year'])\n", (5268, 5385), True, 'import numpy as np\n'), ((5469, 5621), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.algorithm_period_returns for x in self.metrics_06.month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_RETURNS['Monthly']"], {}), "([x.algorithm_period_returns for x in self.\n metrics_06.month_periods], ANSWER_KEY.ALGORITHM_PERIOD_RETURNS['Monthly'])\n", (5499, 5621), True, 'import numpy as np\n'), ((5663, 5826), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.algorithm_period_returns for x in self.metrics_06.three_month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_RETURNS['3-Month']"], {}), "([x.algorithm_period_returns for x in self.\n metrics_06.three_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_RETURNS[\n '3-Month'])\n", (5693, 5826), True, 'import numpy as np\n'), ((5863, 6024), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.algorithm_period_returns for x in self.metrics_06.six_month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_RETURNS['6-month']"], {}), "([x.algorithm_period_returns for x in self.\n metrics_06.six_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_RETURNS[\n '6-month'])\n", (5893, 6024), True, 'import numpy as np\n'), ((6061, 6209), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.algorithm_period_returns for x in self.metrics_06.year_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_RETURNS['year']"], {}), "([x.algorithm_period_returns for x in self.\n metrics_06.year_periods], ANSWER_KEY.ALGORITHM_PERIOD_RETURNS['year'])\n", (6091, 6209), True, 'import numpy as np\n'), ((6296, 6452), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.algorithm_volatility for x in self.metrics_06.month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_VOLATILITY['Monthly']"], {}), "([x.algorithm_volatility for x in self.\n metrics_06.month_periods], ANSWER_KEY.ALGORITHM_PERIOD_VOLATILITY[\n 'Monthly'])\n", (6326, 6452), True, 'import numpy as np\n'), ((6489, 6651), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.algorithm_volatility for x in self.metrics_06.three_month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_VOLATILITY['3-Month']"], {}), "([x.algorithm_volatility for x in self.\n metrics_06.three_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_VOLATILITY\n ['3-Month'])\n", (6519, 6651), True, 'import numpy as np\n'), ((6688, 6848), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.algorithm_volatility for x in self.metrics_06.six_month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_VOLATILITY['6-month']"], {}), "([x.algorithm_volatility for x in self.\n metrics_06.six_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_VOLATILITY[\n '6-month'])\n", (6718, 6848), True, 'import numpy as np\n'), ((6885, 7032), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.algorithm_volatility for x in self.metrics_06.year_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_VOLATILITY['year']"], {}), "([x.algorithm_volatility for x in self.\n metrics_06.year_periods], ANSWER_KEY.ALGORITHM_PERIOD_VOLATILITY['year'])\n", (6915, 7032), True, 'import numpy as np\n'), ((7115, 7248), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.sharpe for x in self.metrics_06.month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_SHARPE['Monthly']"], {}), "([x.sharpe for x in self.metrics_06.\n month_periods], ANSWER_KEY.ALGORITHM_PERIOD_SHARPE['Monthly'])\n", (7145, 7248), True, 'import numpy as np\n'), ((7277, 7416), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.sharpe for x in self.metrics_06.three_month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_SHARPE['3-Month']"], {}), "([x.sharpe for x in self.metrics_06.\n three_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_SHARPE['3-Month'])\n", (7307, 7416), True, 'import numpy as np\n'), ((7445, 7582), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.sharpe for x in self.metrics_06.six_month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_SHARPE['6-month']"], {}), "([x.sharpe for x in self.metrics_06.\n six_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_SHARPE['6-month'])\n", (7475, 7582), True, 'import numpy as np\n'), ((7611, 7740), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.sharpe for x in self.metrics_06.year_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_SHARPE['year']"], {}), "([x.sharpe for x in self.metrics_06.\n year_periods], ANSWER_KEY.ALGORITHM_PERIOD_SHARPE['year'])\n", (7641, 7740), True, 'import numpy as np\n'), ((7817, 7979), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.downside_risk for x in self.metrics_06.month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_DOWNSIDE_RISK['Monthly']"], {'decimal': '(4)'}), "([x.downside_risk for x in self.metrics_06.\n month_periods], ANSWER_KEY.ALGORITHM_PERIOD_DOWNSIDE_RISK['Monthly'],\n decimal=4)\n", (7847, 7979), True, 'import numpy as np\n'), ((8016, 8185), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.downside_risk for x in self.metrics_06.three_month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_DOWNSIDE_RISK['3-Month']"], {'decimal': '(4)'}), "([x.downside_risk for x in self.metrics_06.\n three_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_DOWNSIDE_RISK[\n '3-Month'], decimal=4)\n", (8046, 8185), True, 'import numpy as np\n'), ((8221, 8388), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.downside_risk for x in self.metrics_06.six_month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_DOWNSIDE_RISK['6-month']"], {'decimal': '(4)'}), "([x.downside_risk for x in self.metrics_06.\n six_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_DOWNSIDE_RISK['6-month'\n ], decimal=4)\n", (8251, 8388), True, 'import numpy as np\n'), ((8424, 8583), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.downside_risk for x in self.metrics_06.year_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_DOWNSIDE_RISK['year']"], {'decimal': '(4)'}), "([x.downside_risk for x in self.metrics_06.\n year_periods], ANSWER_KEY.ALGORITHM_PERIOD_DOWNSIDE_RISK['year'], decimal=4\n )\n", (8454, 8583), True, 'import numpy as np\n'), ((8661, 8807), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.sortino for x in self.metrics_06.month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_SORTINO['Monthly']"], {'decimal': '(3)'}), "([x.sortino for x in self.metrics_06.\n month_periods], ANSWER_KEY.ALGORITHM_PERIOD_SORTINO['Monthly'], decimal=3)\n", (8691, 8807), True, 'import numpy as np\n'), ((8849, 9005), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.sortino for x in self.metrics_06.three_month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_SORTINO['3-Month']"], {'decimal': '(3)'}), "([x.sortino for x in self.metrics_06.\n three_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_SORTINO['3-Month'],\n decimal=3)\n", (8879, 9005), True, 'import numpy as np\n'), ((9043, 9197), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.sortino for x in self.metrics_06.six_month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_SORTINO['6-month']"], {'decimal': '(3)'}), "([x.sortino for x in self.metrics_06.\n six_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_SORTINO['6-month'],\n decimal=3)\n", (9073, 9197), True, 'import numpy as np\n'), ((9235, 9377), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.sortino for x in self.metrics_06.year_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_SORTINO['year']"], {'decimal': '(3)'}), "([x.sortino for x in self.metrics_06.\n year_periods], ANSWER_KEY.ALGORITHM_PERIOD_SORTINO['year'], decimal=3)\n", (9265, 9377), True, 'import numpy as np\n'), ((10992, 11121), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.beta for x in self.metrics_06.month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_BETA['Monthly']"], {}), "([x.beta for x in self.metrics_06.\n month_periods], ANSWER_KEY.ALGORITHM_PERIOD_BETA['Monthly'])\n", (11022, 11121), True, 'import numpy as np\n'), ((11150, 11285), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.beta for x in self.metrics_06.three_month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_BETA['3-Month']"], {}), "([x.beta for x in self.metrics_06.\n three_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_BETA['3-Month'])\n", (11180, 11285), True, 'import numpy as np\n'), ((11314, 11447), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.beta for x in self.metrics_06.six_month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_BETA['6-month']"], {}), "([x.beta for x in self.metrics_06.\n six_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_BETA['6-month'])\n", (11344, 11447), True, 'import numpy as np\n'), ((11476, 11601), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.beta for x in self.metrics_06.year_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_BETA['year']"], {}), "([x.beta for x in self.metrics_06.\n year_periods], ANSWER_KEY.ALGORITHM_PERIOD_BETA['year'])\n", (11506, 11601), True, 'import numpy as np\n'), ((11670, 11801), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.alpha for x in self.metrics_06.month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_ALPHA['Monthly']"], {}), "([x.alpha for x in self.metrics_06.\n month_periods], ANSWER_KEY.ALGORITHM_PERIOD_ALPHA['Monthly'])\n", (11700, 11801), True, 'import numpy as np\n'), ((11830, 11967), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.alpha for x in self.metrics_06.three_month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_ALPHA['3-Month']"], {}), "([x.alpha for x in self.metrics_06.\n three_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_ALPHA['3-Month'])\n", (11860, 11967), True, 'import numpy as np\n'), ((11996, 12131), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.alpha for x in self.metrics_06.six_month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_ALPHA['6-month']"], {}), "([x.alpha for x in self.metrics_06.\n six_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_ALPHA['6-month'])\n", (12026, 12131), True, 'import numpy as np\n'), ((12160, 12287), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.alpha for x in self.metrics_06.year_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_ALPHA['year']"], {}), "([x.alpha for x in self.metrics_06.\n year_periods], ANSWER_KEY.ALGORITHM_PERIOD_ALPHA['year'])\n", (12190, 12287), True, 'import numpy as np\n'), ((12707, 12863), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.algorithm_covariance for x in self.metrics_06.month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_COVARIANCE['Monthly']"], {}), "([x.algorithm_covariance for x in self.\n metrics_06.month_periods], ANSWER_KEY.ALGORITHM_PERIOD_COVARIANCE[\n 'Monthly'])\n", (12737, 12863), True, 'import numpy as np\n'), ((12887, 13049), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.algorithm_covariance for x in self.metrics_06.three_month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_COVARIANCE['3-Month']"], {}), "([x.algorithm_covariance for x in self.\n metrics_06.three_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_COVARIANCE\n ['3-Month'])\n", (12917, 13049), True, 'import numpy as np\n'), ((13086, 13246), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.algorithm_covariance for x in self.metrics_06.six_month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_COVARIANCE['6-month']"], {}), "([x.algorithm_covariance for x in self.\n metrics_06.six_month_periods], ANSWER_KEY.ALGORITHM_PERIOD_COVARIANCE[\n '6-month'])\n", (13116, 13246), True, 'import numpy as np\n'), ((13283, 13430), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.algorithm_covariance for x in self.metrics_06.year_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_COVARIANCE['year']"], {}), "([x.algorithm_covariance for x in self.\n metrics_06.year_periods], ANSWER_KEY.ALGORITHM_PERIOD_COVARIANCE['year'])\n", (13313, 13430), True, 'import numpy as np\n'), ((13515, 13677), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.benchmark_variance for x in self.metrics_06.month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_BENCHMARK_VARIANCE['Monthly']"], {}), "([x.benchmark_variance for x in self.\n metrics_06.month_periods], ANSWER_KEY.\n ALGORITHM_PERIOD_BENCHMARK_VARIANCE['Monthly'])\n", (13545, 13677), True, 'import numpy as np\n'), ((13714, 13882), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.benchmark_variance for x in self.metrics_06.three_month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_BENCHMARK_VARIANCE['3-Month']"], {}), "([x.benchmark_variance for x in self.\n metrics_06.three_month_periods], ANSWER_KEY.\n ALGORITHM_PERIOD_BENCHMARK_VARIANCE['3-Month'])\n", (13744, 13882), True, 'import numpy as np\n'), ((13919, 14085), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.benchmark_variance for x in self.metrics_06.six_month_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_BENCHMARK_VARIANCE['6-month']"], {}), "([x.benchmark_variance for x in self.\n metrics_06.six_month_periods], ANSWER_KEY.\n ALGORITHM_PERIOD_BENCHMARK_VARIANCE['6-month'])\n", (13949, 14085), True, 'import numpy as np\n'), ((14122, 14280), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['[x.benchmark_variance for x in self.metrics_06.year_periods]', "ANSWER_KEY.ALGORITHM_PERIOD_BENCHMARK_VARIANCE['year']"], {}), "([x.benchmark_variance for x in self.\n metrics_06.year_periods], ANSWER_KEY.\n ALGORITHM_PERIOD_BENCHMARK_VARIANCE['year'])\n", (14152, 14280), True, 'import numpy as np\n'), ((14369, 14421), 'zipline.utils.factory.create_returns_from_range', 'factory.create_returns_from_range', (['self.sim_params08'], {}), '(self.sim_params08)\n', (14402, 14421), False, 'from zipline.utils import factory\n'), ((14440, 14497), 'zipline.finance.risk.RiskReport', 'risk.RiskReport', (['returns', 'self.sim_params08'], {'env': 'self.env'}), '(returns, self.sim_params08, env=self.env)\n', (14455, 14497), True, 'import zipline.finance.risk as risk\n'), ((16070, 16122), 'zipline.utils.factory.create_returns_from_range', 'factory.create_returns_from_range', (['self.sim_params08'], {}), '(self.sim_params08)\n', (16103, 16122), False, 'from zipline.utils import factory\n'), ((16141, 16198), 'zipline.finance.risk.RiskReport', 'risk.RiskReport', (['returns', 'self.sim_params08'], {'env': 'self.env'}), '(returns, self.sim_params08, env=self.env)\n', (16156, 16198), True, 'import zipline.finance.risk as risk\n'), ((16525, 16577), 'zipline.utils.factory.create_returns_from_range', 'factory.create_returns_from_range', (['self.sim_params08'], {}), '(self.sim_params08)\n', (16558, 16577), False, 'from zipline.utils import factory\n'), ((16596, 16653), 'zipline.finance.risk.RiskReport', 'risk.RiskReport', (['returns', 'self.sim_params08'], {'env': 'self.env'}), '(returns, self.sim_params08, env=self.env)\n', (16611, 16653), True, 'import zipline.finance.risk as risk\n'), ((18345, 18395), 'zipline.utils.factory.create_returns_from_range', 'factory.create_returns_from_range', (['self.sim_params'], {}), '(self.sim_params)\n', (18378, 18395), False, 'from zipline.utils import factory\n'), ((18414, 18469), 'zipline.finance.risk.RiskReport', 'risk.RiskReport', (['returns', 'self.sim_params'], {'env': 'self.env'}), '(returns, self.sim_params, env=self.env)\n', (18429, 18469), True, 'import zipline.finance.risk as risk\n'), ((20210, 20289), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(1991)', 'month': '(1)', 'day': '(1)', 'hour': '(0)', 'minute': '(0)', 'tzinfo': 'pytz.utc'}), '(year=1991, month=1, day=1, hour=0, minute=0, tzinfo=pytz.utc)\n', (20227, 20289), False, 'import datetime\n'), ((20519, 20589), 'zipline.finance.trading.SimulationParameters', 'SimulationParameters', ([], {'period_start': 'start', 'period_end': 'end', 'env': 'self.env'}), '(period_start=start, period_end=end, env=self.env)\n', (20539, 20589), False, 'from zipline.finance.trading import SimulationParameters, TradingEnvironment\n'), ((20656, 20704), 'zipline.utils.factory.create_returns_from_range', 'factory.create_returns_from_range', (['sim_params90s'], {}), '(sim_params90s)\n', (20689, 20704), False, 'from zipline.utils import factory\n'), ((20803, 20856), 'zipline.finance.risk.RiskReport', 'risk.RiskReport', (['returns', 'sim_params90s'], {'env': 'self.env'}), '(returns, sim_params90s, env=self.env)\n', (20818, 20856), True, 'import zipline.finance.risk as risk\n'), ((21201, 21246), 'zipline.utils.factory.create_returns_from_range', 'factory.create_returns_from_range', (['sim_params'], {}), '(sim_params)\n', (21234, 21246), False, 'from zipline.utils import factory\n'), ((21265, 21320), 'zipline.finance.risk.RiskReport', 'risk.RiskReport', (['returns', 'self.sim_params'], {'env': 'self.env'}), '(returns, self.sim_params, env=self.env)\n', (21280, 21320), True, 'import zipline.finance.risk as risk\n'), ((24130, 24240), 'zipline.finance.risk.RiskReport', 'risk.RiskReport', (['self.algo_returns_06', 'self.sim_params'], {'benchmark_returns': 'benchmark_returns', 'env': 'self.env'}), '(self.algo_returns_06, self.sim_params, benchmark_returns=\n benchmark_returns, env=self.env)\n', (24145, 24240), True, 'import zipline.finance.risk as risk\n'), ((20063, 20124), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2008)', 'month': '(1)', 'day': '(1)', 'tzinfo': 'pytz.utc'}), '(year=2008, month=1, day=1, tzinfo=pytz.utc)\n', (20080, 20124), False, 'import datetime\n'), ((20459, 20494), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'total_days'}), '(days=total_days)\n', (20477, 20494), False, 'import datetime\n'), ((2752, 2815), 'datetime.datetime', 'datetime.datetime', ([], {'year': '(2006)', 'month': '(12)', 'day': '(31)', 'tzinfo': 'pytz.utc'}), '(year=2006, month=12, day=31, tzinfo=pytz.utc)\n', (2769, 2815), False, 'import datetime\n'), ((22556, 22588), 'calendar.isleap', 'calendar.isleap', (['period_end.year'], {}), '(period_end.year)\n', (22571, 22588), False, 'import calendar\n')]
import sys import numpy import helper import bayesianClusterEvaluationLaplacian import experiments import baselineMultiLogReg import pickle import syntheticDataGeneration import sklearn.metrics import constants def getANMI(allResults, criteriaID): bestId = numpy.argmax(allResults[:, criteriaID]) return allResults[bestId, constants.ANMI_ID] def showDetails(trueClusterIds, allClusterings, allResults, criteriaID): idsForSorting = numpy.argsort(- allResults[:, criteriaID]) outputForLatex = "" for nr, i in enumerate(idsForSorting): if nr == 0: helper.showVecInt(numpy.asarray(allClusterings[i])) if nr < 5: outputForLatex += " & " + str(round(allResults[i, constants.ANMI_ID],1)) + " & " + str(int(allResults[i, constants.EFFECTIVE_NUMBER_OF_COVARIATES_ID])) + " & " + str(allResults[i, criteriaID]) + " \\\\ " + "\n" else: print("outputForLatex = ") print(outputForLatex) break bestId = numpy.argmax(allResults[:, constants.ANMI_ID]) # print("RESULT WITH BEST ANMI = " + str(round(allResults[bestId, ANMI_ID],1)) + " & " + str(int(allResults[bestId, EFFECTIVE_NUMBER_OF_COVARIATES_ID])) + " & " + str(allResults[bestId, criteriaID])) print("RESULT WITH BEST ANMI = " + str(allResults[bestId, constants.ANMI_ID]) + " & " + str(int(allResults[bestId, constants.EFFECTIVE_NUMBER_OF_COVARIATES_ID])) + " & " + str(allResults[bestId, criteriaID])) helper.showVecInt(numpy.asarray(allClusterings[bestId])) bestClusteringFound = helper.getClusterIds(allClusterings[bestId]) print("bestClusteringFound = ", bestClusteringFound) print("trueClusterIds = ", trueClusterIds) adjustedNMI = sklearn.metrics.adjusted_mutual_info_score(trueClusterIds, bestClusteringFound) print("adjustedNMI = ", adjustedNMI) return def showResults(testSetting, NUMBER_OF_SAMPLES_PER_CLASS, METHOD): assert(METHOD != "onlyGamma") TOTAL_NUMBER_OF_FOLDS = 10 DATA_SPECIFIER_STRING = testSetting + "_" + str(NUMBER_OF_SAMPLES_PER_CLASS) + "sc" covariateSims, dataFeature_allFolds, dataLabels_allFolds, trueClusterIds, trueRelevantCovariates, NUMBER_OF_CLASSES, NUMBER_OF_LATENT_CLUSTERS, NUMBER_OF_COVARIATES_PER_CLUSTER, IRRELEVANT_CLUSTERS, CONTRADICTING_CLUSTERS = syntheticDataGeneration.generateData(testSetting, NUMBER_OF_SAMPLES_PER_CLASS, TOTAL_NUMBER_OF_FOLDS) print("trueClusterIds = ") print(trueClusterIds) avgNeighbours = "all" DATA_NAME ="SYNTHETIC_DATA" TRAIN_DATA_SEPCIFIER = testSetting + "_" + str(NUMBER_OF_SAMPLES_PER_CLASS) + "sc" if METHOD == "onlyNu": outputForLatex = "\multirowcell{5}{+ CLAW \\\\ Clustering} " elif METHOD == "kMeansClustering": outputForLatex = "\multirowcell{5}{+ k-means \\\\ Clustering} " elif METHOD == "convexClustering": outputForLatex = "\multirowcell{5}{+ Convex \\\\ Clustering} " else: assert(False) allANMIs_marginalLikelihood = numpy.zeros(TOTAL_NUMBER_OF_FOLDS) allANMIs_trainCVlogProb = numpy.zeros(TOTAL_NUMBER_OF_FOLDS) allANMIs_trainCVacc = numpy.zeros(TOTAL_NUMBER_OF_FOLDS) allANMIs_oracle = numpy.zeros(TOTAL_NUMBER_OF_FOLDS) for foldId in range(TOTAL_NUMBER_OF_FOLDS): print("**************** RESULTS FOR FOLD " + str(foldId) + " ********************************") filename = helper.EVALUATION_RESULTS_FOLDER + DATA_NAME + TRAIN_DATA_SEPCIFIER + "_" + METHOD + "_" + str(avgNeighbours) + "Neighbours_" + str(foldId) + "fold" allResults = numpy.load(filename + ".npy") allClusterings = None with open(filename + "_clusterings.pkl", "rb") as f: allClusterings = pickle.load(f) allANMIs_marginalLikelihood[foldId] = getANMI(allResults, constants.LOG_MARGINAL_LAPLACE_DIAG_VALIDATION_CRITERIA_ID) allANMIs_trainCVlogProb[foldId] = getANMI(allResults, constants.TRAIN_CV_LOG_PROB_ID) allANMIs_trainCVacc[foldId] = getANMI(allResults, constants.TRAIN_CV_ACC_ID) allANMIs_oracle[foldId] = getANMI(allResults, constants.ANMI_ID) print("LOG_MARGINAL_LAPLACE_DIAG_VALIDATION_CRITERIA_ID") showDetails(trueClusterIds, allClusterings, allResults, constants.LOG_MARGINAL_LAPLACE_DIAG_VALIDATION_CRITERIA_ID) print("TRAIN_CV_ACC_ID") showDetails(trueClusterIds, allClusterings, allResults, constants.TRAIN_CV_ACC_ID) print("TOTAL NUMBER OF CLUSTERING RESULTS = ", allResults.shape[0]) print(METHOD) print(testSetting + " " + str(NUMBER_OF_SAMPLES_PER_CLASS)) print("AVERAGE ANMI SELECED WITH MARGINAL LIKELIHOOD = ", helper.showAvgAndStd(allANMIs_marginalLikelihood, digits = 2)) print("AVERAGE ANMI SELECED WITH TRAIN-CV-LOGPROB = ", helper.showAvgAndStd(allANMIs_trainCVlogProb, digits = 2)) print("AVERAGE ANMI SELECED WITH TRAIN-CV-ACC = ", helper.showAvgAndStd(allANMIs_trainCVacc, digits = 2)) print("AVERAGE ANMI ORACLE PERFORMANCE = ", helper.showAvgAndStd(allANMIs_oracle, digits = 2))
[ "helper.getClusterIds", "numpy.asarray", "numpy.argmax", "pickle.load", "numpy.argsort", "syntheticDataGeneration.generateData", "numpy.zeros", "helper.showAvgAndStd", "numpy.load" ]
[((263, 302), 'numpy.argmax', 'numpy.argmax', (['allResults[:, criteriaID]'], {}), '(allResults[:, criteriaID])\n', (275, 302), False, 'import numpy\n'), ((447, 488), 'numpy.argsort', 'numpy.argsort', (['(-allResults[:, criteriaID])'], {}), '(-allResults[:, criteriaID])\n', (460, 488), False, 'import numpy\n'), ((1035, 1081), 'numpy.argmax', 'numpy.argmax', (['allResults[:, constants.ANMI_ID]'], {}), '(allResults[:, constants.ANMI_ID])\n', (1047, 1081), False, 'import numpy\n'), ((1588, 1632), 'helper.getClusterIds', 'helper.getClusterIds', (['allClusterings[bestId]'], {}), '(allClusterings[bestId])\n', (1608, 1632), False, 'import helper\n'), ((2367, 2472), 'syntheticDataGeneration.generateData', 'syntheticDataGeneration.generateData', (['testSetting', 'NUMBER_OF_SAMPLES_PER_CLASS', 'TOTAL_NUMBER_OF_FOLDS'], {}), '(testSetting,\n NUMBER_OF_SAMPLES_PER_CLASS, TOTAL_NUMBER_OF_FOLDS)\n', (2403, 2472), False, 'import syntheticDataGeneration\n'), ((3099, 3133), 'numpy.zeros', 'numpy.zeros', (['TOTAL_NUMBER_OF_FOLDS'], {}), '(TOTAL_NUMBER_OF_FOLDS)\n', (3110, 3133), False, 'import numpy\n'), ((3164, 3198), 'numpy.zeros', 'numpy.zeros', (['TOTAL_NUMBER_OF_FOLDS'], {}), '(TOTAL_NUMBER_OF_FOLDS)\n', (3175, 3198), False, 'import numpy\n'), ((3225, 3259), 'numpy.zeros', 'numpy.zeros', (['TOTAL_NUMBER_OF_FOLDS'], {}), '(TOTAL_NUMBER_OF_FOLDS)\n', (3236, 3259), False, 'import numpy\n'), ((3282, 3316), 'numpy.zeros', 'numpy.zeros', (['TOTAL_NUMBER_OF_FOLDS'], {}), '(TOTAL_NUMBER_OF_FOLDS)\n', (3293, 3316), False, 'import numpy\n'), ((1523, 1560), 'numpy.asarray', 'numpy.asarray', (['allClusterings[bestId]'], {}), '(allClusterings[bestId])\n', (1536, 1560), False, 'import numpy\n'), ((3677, 3706), 'numpy.load', 'numpy.load', (["(filename + '.npy')"], {}), "(filename + '.npy')\n", (3687, 3706), False, 'import numpy\n'), ((4826, 4885), 'helper.showAvgAndStd', 'helper.showAvgAndStd', (['allANMIs_marginalLikelihood'], {'digits': '(2)'}), '(allANMIs_marginalLikelihood, digits=2)\n', (4846, 4885), False, 'import helper\n'), ((4948, 5003), 'helper.showAvgAndStd', 'helper.showAvgAndStd', (['allANMIs_trainCVlogProb'], {'digits': '(2)'}), '(allANMIs_trainCVlogProb, digits=2)\n', (4968, 5003), False, 'import helper\n'), ((5062, 5113), 'helper.showAvgAndStd', 'helper.showAvgAndStd', (['allANMIs_trainCVacc'], {'digits': '(2)'}), '(allANMIs_trainCVacc, digits=2)\n', (5082, 5113), False, 'import helper\n'), ((5165, 5212), 'helper.showAvgAndStd', 'helper.showAvgAndStd', (['allANMIs_oracle'], {'digits': '(2)'}), '(allANMIs_oracle, digits=2)\n', (5185, 5212), False, 'import helper\n'), ((3836, 3850), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3847, 3850), False, 'import pickle\n'), ((616, 648), 'numpy.asarray', 'numpy.asarray', (['allClusterings[i]'], {}), '(allClusterings[i])\n', (629, 648), False, 'import numpy\n')]
# Authors: # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # # License: BSD 3 clause """ Base element """ # pylint: disable=invalid-name import logging from abc import ABC, abstractmethod from six.moves import range import numpy as np from .utils import distance_lines, distance_ellipse __all__ = ['Element', 'BaseCircle', 'BaseEllipse', 'BaseTriangle', 'BaseParallelogram'] log = logging.getLogger(__name__) # pylint: disable=invalid-name class Element(ABC): """ Class Element generic class for the elements """ number_of_bounds = -1 def __init__(self, label, isfluid): self.isfluid = isfluid if isinstance(label, int): self.label = [label]*self.number_of_bounds else: self.label = label self.test_label() @abstractmethod def get_bounds(self): """ return the smallest box where the element is. """ @abstractmethod def point_inside(self, grid): """ return a boolean array which defines if a point is inside or outside of the element. Notes ----- the edges of the element are considered as inside. Parameters ---------- grid : ndarray coordinates of the points Returns ------- ndarray Array of boolean (True inside the element, False otherwise) """ @abstractmethod def visualize(self, viewer, color, viewlabel=False, scale=np.ones(2), alpha=1. ): """ visualize the element Parameters ---------- viewer : Viewer a viewer (default matplotlib_viewer) color : color color of the element viewlabel : bool activate the labels mark (default False) scale : ndarray scale the distance of the labels (default ones) alpha : double transparency of the element (default 1) """ def __repr__(self): return self.__str__() def test_label(self): """ test if the number of labels is equal to the number of bounds. """ return len(self.label) == self.number_of_bounds class BaseCircle: """ Class BaseCircle Parameters ---------- center : list the three coordinates of the center v1 : list the three coordinates of the first vector defining the circular base v2 : list the three coordinates of the second vector defining the circular base """ def __init__(self, center, v1, v2): self.center = np.asarray(center) radius = np.linalg.norm(v1) if radius != np.linalg.norm(v2): err_msg = "Error in BaseCircle: " err_msg += "vectors v1 and v2 must have the same norm" log.error(err_msg) self.radius = radius # orthogonalization of the two vectors self.v1 = np.asarray(v1) v2 = np.asarray(v2) self.v2 = v2 - np.inner(v2, self.v1) * self.v1 \ / np.inner(self.v1, self.v1) nv2 = np.linalg.norm(self.v2) if nv2 == 0: err_msg = "Error in the definition of the cylinder: " err_msg += "the vectors are colinear" log.error(err_msg) log.info(self.__str__()) def get_bounds(self): """ Get the bounds of the base """ return self.center - self.radius, self.center + self.radius # pylint: disable=no-self-use def point_inside(self, grid): """ return a boolean array which defines if a point is inside or outside of the element. Notes ----- the edges of the element are considered as inside. Parameters ---------- grid : ndarray coordinates of the points Returns ------- ndarray Array of boolean (True inside the element, False otherwise) """ x, y = grid return (x**2 + y**2) <= 1. @staticmethod def distance(grid, v, dmax, label): """ Compute the distance in the v direction between the element and the points defined by (x, y) for a given label. Parameters ---------- grid : ndarray coordinates of the points v : ndarray direction of interest dmax : float distance max label : int the label of interest Returns ------- ndarray array of distances """ x, y = grid c = np.zeros((2,)) v1 = np.asarray([1, 0]) v2 = np.asarray([0, 1]) return distance_ellipse(x, y, v, c, v1, v2, dmax, label) @staticmethod def _visualize(): t = np.linspace(0, 2.*np.pi, 100) lx_b = [np.cos(t), np.cos(t), np.cos(t[::-1])] ly_b = [np.sin(t), np.sin(t), np.sin(t[::-1])] return lx_b, ly_b def __str__(self): s = "Circular base with radius {} ".format(self.radius) s += "centered in " + str(self.center) + "\n" s += " in the plane spanned by " + str(self.v1) s += " and " + str(self.v2) + "\n" return s class BaseEllipse: """ Class BaseEllipse Parameters ---------- center : list the three coordinates of the center v1 : list the three coordinates of the first vector defining the ellipsoidal base v2 : list the three coordinates of the second vector defining the ellipsoidal base Warnings -------- The vectors v1 and v2 have to be orthogonal. """ def __init__(self, center, v1, v2): self.center = np.asarray(center) self.v1 = np.asarray(v1) self.v2 = np.asarray(v2) if abs(np.inner(self.v1, self.v2)) > 1.e-10: err_msg = "Error in the definition of the cylinder: " err_msg += "the vectors have to be orthogonal" log.error(err_msg) log.info(self.__str__()) def get_bounds(self): """ Get the bounds of the base """ r = max(np.linalg.norm(self.v1), np.linalg.norm(self.v2)) return self.center - r, self.center + r # pylint: disable=no-self-use def point_inside(self, grid): """ return a boolean array which defines if a point is inside or outside of the element. Notes ----- the edges of the element are considered as inside. Parameters ---------- grid : ndarray coordinates of the points Returns ------- ndarray Array of boolean (True inside the element, False otherwise) """ x, y = grid return (x**2 + y**2) <= 1. @staticmethod def distance(grid, v, dmax, label): """ Compute the distance in the v direction between the element and the points defined by (x, y) for a given label. Parameters ---------- grid : ndarray coordinates of the points v : ndarray direction of interest dmax : float distance max label : int the label of interest Returns ------- ndarray array of distances """ x, y = grid c = np.zeros((2,)) v1 = np.asarray([1, 0]) v2 = np.asarray([0, 1]) return distance_ellipse(x, y, v, c, v1, v2, dmax, label) @staticmethod def _visualize(): t = np.linspace(0, 2.*np.pi, 100) lx_b = [np.cos(t), np.cos(t), np.cos(t[::-1])] ly_b = [np.sin(t), np.sin(t), np.sin(t[::-1])] return lx_b, ly_b def __str__(self): s = 'Ellipsoidal base centered in ' + str(self.center) + '\n' s += ' in the plane spanned by ' + str(self.v1) s += ' and ' + str(self.v2) + '\n' return s class BaseTriangle: """ Class BaseTriangle Parameters ---------- center : list the three coordinates of the center v1 : list the three coordinates of the first vector defining the triangular base v2 : list the three coordinates of the second vector defining the triangular base """ def __init__(self, center, v1, v2): self.center = np.asarray(center) self.v1 = np.asarray(v1) self.v2 = np.asarray(v2) nv1 = np.linalg.norm(self.v1) nv2 = np.linalg.norm(self.v2) if np.allclose(nv1*self.v2, nv2*self.v1): err_msg = "Error in the definition of the cylinder: " err_msg += "the vectors are not free" log.error(err_msg) log.info(self.__str__()) def get_bounds(self): """ Get the bounds of the base """ box = np.asarray( [ self.center, self.center + self.v1, self.center + self.v1 + self.v2, self.center + self.v2 ] ) return np.min(box, axis=0), np.max(box, axis=0) # pylint: disable=no-self-use def point_inside(self, grid): """ return a boolean array which defines if a point is inside or outside of the element. Notes ----- the edges of the element are considered as inside. Parameters ---------- grid : ndarray coordinates of the points Returns ------- ndarray Array of boolean (True inside the element, False otherwise) """ x, y = grid return np.logical_and(np.logical_and(x >= 0, y >= 0), x + y <= 1) @staticmethod def distance(grid, v, dmax, label): """ Compute the distance in the v direction between the element and the points defined by (x, y) for a given label. Parameters ---------- grid : ndarray coordinates of the points v : ndarray direction of interest dmax : float distance max label : int the label of interest Returns ------- ndarray array of distances """ x, y = grid p = [[0, 0], [0, 0], [1, 0]] vt = [[1, 0], [0, 1], [-1, 1]] return distance_lines(x, y, v, p, vt, dmax, label) @staticmethod def _visualize(): p = np.asarray([[0, 0], [1, 0], [0, 1], [0, 0], [1, 0]] ).T lx_b = [] ly_b = [] for k in range(3): lx_b.append(p[0, k:k+2]) ly_b.append(p[1, k:k+2]) lx_b.append(p[0, :4]) ly_b.append(p[1, :4]) lx_b.append(p[0, 3::-1]) ly_b.append(p[1, 3::-1]) return lx_b, ly_b def __str__(self): s = 'Triangular base centered in ' + str(self.center) + '\n' s += ' in the plane spanned by ' + str(self.v1) s += ' and ' + str(self.v2) + '\n' return s class BaseParallelogram: """ Class BaseParallelogram Parameters ---------- center : list the three coordinates of the center v1 : list the three coordinates of the first vector that defines the base v2 : list the three coordinates of the second vector that defines the base """ def __init__(self, center, v1, v2): self.center = np.asarray(center) self.v1 = np.asarray(v1) self.v2 = np.asarray(v2) nv1 = np.linalg.norm(self.v1) nv2 = np.linalg.norm(self.v2) if np.allclose(nv1*self.v2, nv2*self.v1): err_msg = "Error in the definition of the cylinder: " err_msg += "the vectors are not free" log.error(err_msg) log.info(self.__str__()) def get_bounds(self): """ Get the bounds of the base """ box = np.asarray( [ self.center, self.center + self.v1, self.center + self.v1 + self.v2, self.center + self.v2 ] ) return np.min(box, axis=0), np.max(box, axis=0) # pylint: disable=no-self-use def point_inside(self, grid): """ return a boolean array which defines if a point is inside or outside of the element. Notes ----- the edges of the element are considered as inside. Parameters ---------- grid : ndarray coordinates of the points Returns ------- ndarray Array of boolean (True inside the element, False otherwise) """ x, y = grid return np.logical_and(np.logical_and(x >= 0, y >= 0), np.logical_and(x <= 1, y <= 1)) @staticmethod def distance(grid, v, dmax, label): """ Compute the distance in the v direction between the element and the points defined by (x, y) for a given label. Parameters ---------- grid : ndarray coordinates of the points v : ndarray direction of interest dmax : float distance max label : int the label of interest Returns ------- ndarray array of distances """ x, y = grid p = [[0, 0], [0, 0], [1, 0], [0, 1]] vt = [[1, 0], [0, 1], [0, 1], [1, 0]] return distance_lines(x, y, v, p, vt, dmax, label) @staticmethod def _visualize(): p = np.asarray([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0], [1, 0]]).T lx_b = [] ly_b = [] for k in range(4): lx_b.append(p[0, k:k+2]) ly_b.append(p[1, k:k+2]) lx_b.append(p[0, :5]) ly_b.append(p[1, :5]) lx_b.append(p[0, 4::-1]) ly_b.append(p[1, 4::-1]) return lx_b, ly_b def __str__(self): s = 'Parallelogram base centered in ' + str(self.center) + '\n' s += ' in the plane spanned by ' + str(self.v1) s += ' and ' + str(self.v2) + '\n' return s
[ "logging.getLogger", "numpy.allclose", "six.moves.range", "numpy.ones", "numpy.logical_and", "numpy.asarray", "numpy.min", "numpy.max", "numpy.inner", "numpy.zeros", "numpy.linspace", "numpy.cos", "numpy.linalg.norm", "numpy.sin" ]
[((396, 423), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (413, 423), False, 'import logging\n'), ((1546, 1556), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1553, 1556), True, 'import numpy as np\n'), ((2681, 2699), 'numpy.asarray', 'np.asarray', (['center'], {}), '(center)\n', (2691, 2699), True, 'import numpy as np\n'), ((2717, 2735), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (2731, 2735), True, 'import numpy as np\n'), ((3015, 3029), 'numpy.asarray', 'np.asarray', (['v1'], {}), '(v1)\n', (3025, 3029), True, 'import numpy as np\n'), ((3043, 3057), 'numpy.asarray', 'np.asarray', (['v2'], {}), '(v2)\n', (3053, 3057), True, 'import numpy as np\n'), ((3170, 3193), 'numpy.linalg.norm', 'np.linalg.norm', (['self.v2'], {}), '(self.v2)\n', (3184, 3193), True, 'import numpy as np\n'), ((4691, 4705), 'numpy.zeros', 'np.zeros', (['(2,)'], {}), '((2,))\n', (4699, 4705), True, 'import numpy as np\n'), ((4719, 4737), 'numpy.asarray', 'np.asarray', (['[1, 0]'], {}), '([1, 0])\n', (4729, 4737), True, 'import numpy as np\n'), ((4751, 4769), 'numpy.asarray', 'np.asarray', (['[0, 1]'], {}), '([0, 1])\n', (4761, 4769), True, 'import numpy as np\n'), ((4888, 4920), 'numpy.linspace', 'np.linspace', (['(0)', '(2.0 * np.pi)', '(100)'], {}), '(0, 2.0 * np.pi, 100)\n', (4899, 4920), True, 'import numpy as np\n'), ((5806, 5824), 'numpy.asarray', 'np.asarray', (['center'], {}), '(center)\n', (5816, 5824), True, 'import numpy as np\n'), ((5843, 5857), 'numpy.asarray', 'np.asarray', (['v1'], {}), '(v1)\n', (5853, 5857), True, 'import numpy as np\n'), ((5876, 5890), 'numpy.asarray', 'np.asarray', (['v2'], {}), '(v2)\n', (5886, 5890), True, 'import numpy as np\n'), ((7475, 7489), 'numpy.zeros', 'np.zeros', (['(2,)'], {}), '((2,))\n', (7483, 7489), True, 'import numpy as np\n'), ((7503, 7521), 'numpy.asarray', 'np.asarray', (['[1, 0]'], {}), '([1, 0])\n', (7513, 7521), True, 'import numpy as np\n'), ((7535, 7553), 'numpy.asarray', 'np.asarray', (['[0, 1]'], {}), '([0, 1])\n', (7545, 7553), True, 'import numpy as np\n'), ((7672, 7704), 'numpy.linspace', 'np.linspace', (['(0)', '(2.0 * np.pi)', '(100)'], {}), '(0, 2.0 * np.pi, 100)\n', (7683, 7704), True, 'import numpy as np\n'), ((8457, 8475), 'numpy.asarray', 'np.asarray', (['center'], {}), '(center)\n', (8467, 8475), True, 'import numpy as np\n'), ((8494, 8508), 'numpy.asarray', 'np.asarray', (['v1'], {}), '(v1)\n', (8504, 8508), True, 'import numpy as np\n'), ((8527, 8541), 'numpy.asarray', 'np.asarray', (['v2'], {}), '(v2)\n', (8537, 8541), True, 'import numpy as np\n'), ((8556, 8579), 'numpy.linalg.norm', 'np.linalg.norm', (['self.v1'], {}), '(self.v1)\n', (8570, 8579), True, 'import numpy as np\n'), ((8594, 8617), 'numpy.linalg.norm', 'np.linalg.norm', (['self.v2'], {}), '(self.v2)\n', (8608, 8617), True, 'import numpy as np\n'), ((8629, 8670), 'numpy.allclose', 'np.allclose', (['(nv1 * self.v2)', '(nv2 * self.v1)'], {}), '(nv1 * self.v2, nv2 * self.v1)\n', (8640, 8670), True, 'import numpy as np\n'), ((8948, 9056), 'numpy.asarray', 'np.asarray', (['[self.center, self.center + self.v1, self.center + self.v1 + self.v2, self.\n center + self.v2]'], {}), '([self.center, self.center + self.v1, self.center + self.v1 +\n self.v2, self.center + self.v2])\n', (8958, 9056), True, 'import numpy as np\n'), ((10794, 10802), 'six.moves.range', 'range', (['(3)'], {}), '(3)\n', (10799, 10802), False, 'from six.moves import range\n'), ((11643, 11661), 'numpy.asarray', 'np.asarray', (['center'], {}), '(center)\n', (11653, 11661), True, 'import numpy as np\n'), ((11680, 11694), 'numpy.asarray', 'np.asarray', (['v1'], {}), '(v1)\n', (11690, 11694), True, 'import numpy as np\n'), ((11713, 11727), 'numpy.asarray', 'np.asarray', (['v2'], {}), '(v2)\n', (11723, 11727), True, 'import numpy as np\n'), ((11742, 11765), 'numpy.linalg.norm', 'np.linalg.norm', (['self.v1'], {}), '(self.v1)\n', (11756, 11765), True, 'import numpy as np\n'), ((11780, 11803), 'numpy.linalg.norm', 'np.linalg.norm', (['self.v2'], {}), '(self.v2)\n', (11794, 11803), True, 'import numpy as np\n'), ((11815, 11856), 'numpy.allclose', 'np.allclose', (['(nv1 * self.v2)', '(nv2 * self.v1)'], {}), '(nv1 * self.v2, nv2 * self.v1)\n', (11826, 11856), True, 'import numpy as np\n'), ((12134, 12242), 'numpy.asarray', 'np.asarray', (['[self.center, self.center + self.v1, self.center + self.v1 + self.v2, self.\n center + self.v2]'], {}), '([self.center, self.center + self.v1, self.center + self.v1 +\n self.v2, self.center + self.v2])\n', (12144, 12242), True, 'import numpy as np\n'), ((13933, 13941), 'six.moves.range', 'range', (['(4)'], {}), '(4)\n', (13938, 13941), False, 'from six.moves import range\n'), ((2757, 2775), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (2771, 2775), True, 'import numpy as np\n'), ((4934, 4943), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (4940, 4943), True, 'import numpy as np\n'), ((4945, 4954), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (4951, 4954), True, 'import numpy as np\n'), ((4956, 4971), 'numpy.cos', 'np.cos', (['t[::-1]'], {}), '(t[::-1])\n', (4962, 4971), True, 'import numpy as np\n'), ((4989, 4998), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (4995, 4998), True, 'import numpy as np\n'), ((5000, 5009), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (5006, 5009), True, 'import numpy as np\n'), ((5011, 5026), 'numpy.sin', 'np.sin', (['t[::-1]'], {}), '(t[::-1])\n', (5017, 5026), True, 'import numpy as np\n'), ((6235, 6258), 'numpy.linalg.norm', 'np.linalg.norm', (['self.v1'], {}), '(self.v1)\n', (6249, 6258), True, 'import numpy as np\n'), ((6260, 6283), 'numpy.linalg.norm', 'np.linalg.norm', (['self.v2'], {}), '(self.v2)\n', (6274, 6283), True, 'import numpy as np\n'), ((7718, 7727), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (7724, 7727), True, 'import numpy as np\n'), ((7729, 7738), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (7735, 7738), True, 'import numpy as np\n'), ((7740, 7755), 'numpy.cos', 'np.cos', (['t[::-1]'], {}), '(t[::-1])\n', (7746, 7755), True, 'import numpy as np\n'), ((7773, 7782), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (7779, 7782), True, 'import numpy as np\n'), ((7784, 7793), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (7790, 7793), True, 'import numpy as np\n'), ((7795, 7810), 'numpy.sin', 'np.sin', (['t[::-1]'], {}), '(t[::-1])\n', (7801, 7810), True, 'import numpy as np\n'), ((9168, 9187), 'numpy.min', 'np.min', (['box'], {'axis': '(0)'}), '(box, axis=0)\n', (9174, 9187), True, 'import numpy as np\n'), ((9189, 9208), 'numpy.max', 'np.max', (['box'], {'axis': '(0)'}), '(box, axis=0)\n', (9195, 9208), True, 'import numpy as np\n'), ((9766, 9796), 'numpy.logical_and', 'np.logical_and', (['(x >= 0)', '(y >= 0)'], {}), '(x >= 0, y >= 0)\n', (9780, 9796), True, 'import numpy as np\n'), ((10566, 10618), 'numpy.asarray', 'np.asarray', (['[[0, 0], [1, 0], [0, 1], [0, 0], [1, 0]]'], {}), '([[0, 0], [1, 0], [0, 1], [0, 0], [1, 0]])\n', (10576, 10618), True, 'import numpy as np\n'), ((12354, 12373), 'numpy.min', 'np.min', (['box'], {'axis': '(0)'}), '(box, axis=0)\n', (12360, 12373), True, 'import numpy as np\n'), ((12375, 12394), 'numpy.max', 'np.max', (['box'], {'axis': '(0)'}), '(box, axis=0)\n', (12381, 12394), True, 'import numpy as np\n'), ((12952, 12982), 'numpy.logical_and', 'np.logical_and', (['(x >= 0)', '(y >= 0)'], {}), '(x >= 0, y >= 0)\n', (12966, 12982), True, 'import numpy as np\n'), ((13014, 13044), 'numpy.logical_and', 'np.logical_and', (['(x <= 1)', '(y <= 1)'], {}), '(x <= 1, y <= 1)\n', (13028, 13044), True, 'import numpy as np\n'), ((13817, 13877), 'numpy.asarray', 'np.asarray', (['[[0, 0], [1, 0], [1, 1], [0, 1], [0, 0], [1, 0]]'], {}), '([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0], [1, 0]])\n', (13827, 13877), True, 'import numpy as np\n'), ((3129, 3155), 'numpy.inner', 'np.inner', (['self.v1', 'self.v1'], {}), '(self.v1, self.v1)\n', (3137, 3155), True, 'import numpy as np\n'), ((5906, 5932), 'numpy.inner', 'np.inner', (['self.v1', 'self.v2'], {}), '(self.v1, self.v2)\n', (5914, 5932), True, 'import numpy as np\n'), ((3081, 3102), 'numpy.inner', 'np.inner', (['v2', 'self.v1'], {}), '(v2, self.v1)\n', (3089, 3102), True, 'import numpy as np\n')]
# importing libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib matplotlib.rcParams["figure.figsize"] = (20,10) # importing the dataset dataset = pd.read_csv(r"C:\00git\Bangalore-Real-Estate-Price-Prediction-WebApp-master\dataset\Bengaluru_House_Data.csv") print(dataset.head(10)) print(dataset.shape) # Data preprocessing ## getting the count of area type in the dataset print(dataset.groupby('area_type')['area_type'].agg('count')) ## droping unnecessary columns dataset.drop(['area_type','society','availability','balcony'], axis='columns', inplace=True) print(dataset.shape) ## data cleaning print(dataset.isnull().sum()) dataset.dropna(inplace=True) print(dataset.shape) ### data engineering print(dataset['size'].unique()) dataset['bhk'] = dataset['size'].apply(lambda x: float(x.split(' ')[0])) ### exploring 'total_sqft' column print(dataset['total_sqft'].unique()) #### defining a function to check whether the value is float or not def is_float(x): try: float(x) except : return False return True print(dataset[~dataset['total_sqft'].apply(is_float)].head(10)) #### defining a function to convert the range of column values to a single value def convert_sqft_to_num(x): tokens = x.split('-') if len(tokens) == 2: return (float(tokens[0]) + float(tokens[1]))/2 try: return float(x) except: return None #### testing the function print(convert_sqft_to_num('290')) print(convert_sqft_to_num('2100 - 2850')) print(convert_sqft_to_num('4.46Sq. Meter')) #### applying this function to the dataset dataset['total_sqft'] = dataset['total_sqft'].apply(convert_sqft_to_num) print(dataset['total_sqft'].head(10)) print(dataset.loc[30]) ## feature engineering print(dataset.head(10)) ### creating new colomn 'price_per_sqft' as we know ### in real estate market, price per sqft matters alot. dataset['price_per_sqft'] = dataset['price']*100000/dataset['total_sqft'] print(dataset['price_per_sqft']) ### exploring 'location' column print(len(dataset['location'].unique())) dataset['location'] = dataset['location'].apply(lambda x: x.strip()) location_stats = dataset.groupby('location')['location'].agg('count').sort_values(ascending=False) print(location_stats[0:10]) #### creating 'location_stats' to get the location with total count or occurance #### occurance, and 'location_stats_less_than_10' to get the location with <= 10 #### occurance print(len(location_stats[location_stats <= 10])) location_stats_less_than_10 = location_stats[location_stats <= 10] print(location_stats_less_than_10) #### redefining the 'location' column as 'other' value where location count #### is <= 10 dataset['location'] = dataset['location'].apply(lambda x: 'other' if x in location_stats_less_than_10 else x) print(dataset['location'].head(10)) print(len(dataset['location'].unique())) ## Outlier detection and removal ### checking that 'total_sqft'/'bhk', if it's very less than there is some ### anomaly and we have to remove these outliers print(dataset[dataset['total_sqft'] / dataset['bhk'] < 300].sort_values(by='total_sqft').head(10)) print(dataset.shape) dataset = dataset[~(dataset['total_sqft'] / dataset['bhk'] < 300)] print(dataset.shape) ### checking columns where 'price_per_sqft' is very low ### where it should not be that low, so it's an anomaly and ### we have to remove those rows print(dataset['price_per_sqft'].describe()) ### function to remove these extreme cases of very high or low values ### of 'price_per_sqft' based on std() def remove_pps_outliers(df): df_out = pd.DataFrame() for key, subdf in df.groupby('location'): mean = np.mean(subdf['price_per_sqft']) std = np.std(subdf['price_per_sqft']) reduced_df = subdf[(subdf['price_per_sqft'] > (mean - std)) & (subdf['price_per_sqft'] <= (mean + std))] df_out = pd.concat([df_out, reduced_df], ignore_index=True) return df_out dataset = remove_pps_outliers(dataset) print(dataset.shape) ### plotting graoh where we can visualize that properties with same location ### and the price of 3 bhk properties with higher 'total_sqft' is less than ### 2 bhk properties with lower 'total_sqft' def plot_scatter_chart(df,location): bhk2 = df[(df['location'] == location) & (df['bhk'] == 2)] bhk3 = df[(df['location'] == location) & (df['bhk'] == 3)] matplotlib.rcParams['figure.figsize'] = (15,10) plt.scatter(bhk2['total_sqft'], bhk2['price'], color='blue', label='2 BHK', s=50 ) plt.scatter(bhk3['total_sqft'], bhk3['price'], marker='+', color='green', label='3 BHK', s=50 ) plt.xlabel('Total Square Feet Area') plt.ylabel('Price') plt.title(location) plt.legend() plt.show() plot_scatter_chart(dataset,"Hebbal") plot_scatter_chart(dataset,"<NAME>") ### defining a funcion where we can get the rows where 'bhk' & 'location' ### is same but the property with less 'bhk' have more price than the property ### which have more 'bhk'. So, it's also an anomalu and we have to remove these ### properties def remove_bhk_outliers(df): exclude_indices = np.array([]) for location, location_df in df.groupby('location'): bhk_stats = {} for bhk, bhk_df in location_df.groupby('bhk'): bhk_stats[bhk] = { 'mean': np.mean(bhk_df['price_per_sqft']), 'std': np.std(bhk_df['price_per_sqft']), 'count': bhk_df.shape[0] } for bhk, bhk_df in location_df.groupby('bhk'): stats = bhk_stats.get(bhk-1) if stats and stats['count'] > 5: exclude_indices = np.append(exclude_indices, bhk_df[bhk_df['price_per_sqft'] < (stats['mean'])].index.values) return df.drop(exclude_indices, axis='index') dataset = remove_bhk_outliers(dataset) print(dataset.shape) def plot_scatter_chart(df,location): bhk2 = df[(df['location'] == location) & (df['bhk'] == 2)] bhk3 = df[(df['location'] == location) & (df['bhk'] == 3)] matplotlib.rcParams['figure.figsize'] = (15,10) plt.scatter(bhk2['total_sqft'], bhk2['price'], color='blue', label='2 BHK', s=50 ) plt.scatter(bhk3['total_sqft'], bhk3['price'], marker='+', color='green', label='3 BHK', s=50 ) plt.xlabel('Total Square Feet Area') plt.ylabel('Price') plt.title(location) plt.legend() plt.show() plot_scatter_chart(dataset,"Hebbal") plot_scatter_chart(dataset,"<NAME>") ### histogram for properties per sqaure feet area matplotlib.rcParams['figure.figsize'] = (20,10) plt.hist(dataset['price_per_sqft'], rwidth=0.8) plt.xlabel('Price Per Square Feet') plt.ylabel('Count') plt.title('Histogram of Properties by Price Per Square Feet') plt.show() ### exploring bathroom feature print(dataset['bath'].unique()) #### having 10 bedrooms and bathroom > 10 is unusual #### so, we will remove these anomalies print(dataset[dataset['bath'] > 10]) #### plotting histogram of bathroom plt.hist(dataset['bath'], rwidth=0.8, color='red') plt.xlabel('Number of Bathrooms') plt.ylabel('Count') plt.title('Histogram of Bathroom per Property') plt.show() print(dataset[dataset['bath'] > dataset['bhk'] + 2]) dataset = dataset[dataset['bath'] < dataset['bhk'] + 2] print(dataset.shape) ### after removing outliers, dropping unwanted features dataset.drop(['size','price_per_sqft'], axis='columns', inplace=True) print(dataset.head()) ## one hot encoding the 'location' column dummies = pd.get_dummies(dataset['location']) print(dummies.head()) dataset = pd.concat([dataset,dummies.drop('other', axis='columns')], axis='columns') dataset.drop('location', axis=1, inplace=True) print(dataset.head()) print(dataset.shape) ## distributing independent features in 'X' and dependent feature in 'y' X = dataset.drop(['price'],axis= 'columns') y = dataset['price'] print(X.shape) print(y.shape) ## splitting the dataset into training set and test set from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=10) ## training the model from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train,y_train) print(regressor.score(X_test,y_test)) ## k-fold cross validation from sklearn.model_selection import ShuffleSplit, cross_val_score cv = ShuffleSplit(n_splits=5, test_size = 0.2, random_state=0) cross_val_score(regressor,X,y,cv=cv) ## grid search, hyper parameter tuning from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso from sklearn.tree import DecisionTreeRegressor def find_best_model_using_gridsearch(X,y): algos = { 'linear_regression': { 'model': LinearRegression(), 'params': { 'normalize': [True, False]} }, 'lasso': { 'model': Lasso(), 'params': { 'alpha': [1,2], 'selection': ['random','cyclic'] } }, 'decision_tree':{ 'model': DecisionTreeRegressor(), 'params': { 'criterion': ['mse','friedman_mse'], 'splitter': ['best','random'] } } } scores = [] cv = ShuffleSplit(n_splits=5,test_size=0.2,random_state=0) for algo_name,config in algos.items(): gs = GridSearchCV(config['model'], config['params'], cv=cv, n_jobs=-1, return_train_score=False ) gs.fit(X,y) scores.append({ 'model': algo_name, 'best_score': gs.best_score_, 'best_params': gs.best_params_ }) return pd.DataFrame(scores,columns=['model','best_score','best_params']) model_scores = find_best_model_using_gridsearch(X,y) print(model_scores) ### so after running grid search, linear regression model have the best score ### so i will use linear regression model on the whole dataset from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X,y) ## evaluating the model def predict_price(location,sqft,bath,bhk): loc_index = np.where(X.columns == location)[0][0] x = np.zeros(len(X.columns)) x[0] = sqft x[1] = bath x[2] = bhk if loc_index >= 0: x[loc_index] = 1 return regressor.predict([x])[0] print(predict_price('1st Phase JP Nagar',1000,2,2)) print(predict_price('1st Phase JP Nagar',1000,3,3)) print(predict_price('Indira Nagar',1000,3,3)) # saving the model import pickle with open('bangalore_home_prices_model.pickle','wb') as f: pickle.dump(regressor,f) # exporting columns import json columns = {'data_columns': [col.lower() for col in X.columns]} with open("columns.json","w") as f: f.write(json.dumps(columns))
[ "sklearn.model_selection.GridSearchCV", "matplotlib.pyplot.hist", "sklearn.tree.DecisionTreeRegressor", "pandas.read_csv", "matplotlib.pyplot.ylabel", "sklearn.linear_model.Lasso", "numpy.array", "numpy.mean", "numpy.where", "matplotlib.pyplot.xlabel", "json.dumps", "sklearn.model_selection.Sh...
[((194, 317), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\00git\\\\Bangalore-Real-Estate-Price-Prediction-WebApp-master\\\\dataset\\\\Bengaluru_House_Data.csv"""'], {}), "(\n 'C:\\\\00git\\\\Bangalore-Real-Estate-Price-Prediction-WebApp-master\\\\dataset\\\\Bengaluru_House_Data.csv'\n )\n", (205, 317), True, 'import pandas as pd\n'), ((6966, 7013), 'matplotlib.pyplot.hist', 'plt.hist', (["dataset['price_per_sqft']"], {'rwidth': '(0.8)'}), "(dataset['price_per_sqft'], rwidth=0.8)\n", (6974, 7013), True, 'import matplotlib.pyplot as plt\n'), ((7014, 7049), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Price Per Square Feet"""'], {}), "('Price Per Square Feet')\n", (7024, 7049), True, 'import matplotlib.pyplot as plt\n'), ((7050, 7069), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (7060, 7069), True, 'import matplotlib.pyplot as plt\n'), ((7070, 7131), 'matplotlib.pyplot.title', 'plt.title', (['"""Histogram of Properties by Price Per Square Feet"""'], {}), "('Histogram of Properties by Price Per Square Feet')\n", (7079, 7131), True, 'import matplotlib.pyplot as plt\n'), ((7132, 7142), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7140, 7142), True, 'import matplotlib.pyplot as plt\n'), ((7376, 7426), 'matplotlib.pyplot.hist', 'plt.hist', (["dataset['bath']"], {'rwidth': '(0.8)', 'color': '"""red"""'}), "(dataset['bath'], rwidth=0.8, color='red')\n", (7384, 7426), True, 'import matplotlib.pyplot as plt\n'), ((7427, 7460), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Bathrooms"""'], {}), "('Number of Bathrooms')\n", (7437, 7460), True, 'import matplotlib.pyplot as plt\n'), ((7461, 7480), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (7471, 7480), True, 'import matplotlib.pyplot as plt\n'), ((7481, 7528), 'matplotlib.pyplot.title', 'plt.title', (['"""Histogram of Bathroom per Property"""'], {}), "('Histogram of Bathroom per Property')\n", (7490, 7528), True, 'import matplotlib.pyplot as plt\n'), ((7529, 7539), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7537, 7539), True, 'import matplotlib.pyplot as plt\n'), ((7873, 7908), 'pandas.get_dummies', 'pd.get_dummies', (["dataset['location']"], {}), "(dataset['location'])\n", (7887, 7908), True, 'import pandas as pd\n'), ((8418, 8472), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(10)'}), '(X, y, test_size=0.2, random_state=10)\n', (8434, 8472), False, 'from sklearn.model_selection import train_test_split\n'), ((8556, 8574), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (8572, 8574), False, 'from sklearn.linear_model import LinearRegression\n'), ((8743, 8798), 'sklearn.model_selection.ShuffleSplit', 'ShuffleSplit', ([], {'n_splits': '(5)', 'test_size': '(0.2)', 'random_state': '(0)'}), '(n_splits=5, test_size=0.2, random_state=0)\n', (8755, 8798), False, 'from sklearn.model_selection import ShuffleSplit, cross_val_score\n'), ((8801, 8840), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['regressor', 'X', 'y'], {'cv': 'cv'}), '(regressor, X, y, cv=cv)\n', (8816, 8840), False, 'from sklearn.model_selection import ShuffleSplit, cross_val_score\n'), ((10523, 10541), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (10539, 10541), False, 'from sklearn.linear_model import LinearRegression\n'), ((3622, 3636), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3634, 3636), True, 'import pandas as pd\n'), ((4456, 4541), 'matplotlib.pyplot.scatter', 'plt.scatter', (["bhk2['total_sqft']", "bhk2['price']"], {'color': '"""blue"""', 'label': '"""2 BHK"""', 's': '(50)'}), "(bhk2['total_sqft'], bhk2['price'], color='blue', label='2 BHK',\n s=50)\n", (4467, 4541), True, 'import matplotlib.pyplot as plt\n'), ((4627, 4725), 'matplotlib.pyplot.scatter', 'plt.scatter', (["bhk3['total_sqft']", "bhk3['price']"], {'marker': '"""+"""', 'color': '"""green"""', 'label': '"""3 BHK"""', 's': '(50)'}), "(bhk3['total_sqft'], bhk3['price'], marker='+', color='green',\n label='3 BHK', s=50)\n", (4638, 4725), True, 'import matplotlib.pyplot as plt\n'), ((4827, 4863), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Total Square Feet Area"""'], {}), "('Total Square Feet Area')\n", (4837, 4863), True, 'import matplotlib.pyplot as plt\n'), ((4868, 4887), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (4878, 4887), True, 'import matplotlib.pyplot as plt\n'), ((4892, 4911), 'matplotlib.pyplot.title', 'plt.title', (['location'], {}), '(location)\n', (4901, 4911), True, 'import matplotlib.pyplot as plt\n'), ((4916, 4928), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4926, 4928), True, 'import matplotlib.pyplot as plt\n'), ((4933, 4943), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4941, 4943), True, 'import matplotlib.pyplot as plt\n'), ((5320, 5332), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5328, 5332), True, 'import numpy as np\n'), ((6304, 6389), 'matplotlib.pyplot.scatter', 'plt.scatter', (["bhk2['total_sqft']", "bhk2['price']"], {'color': '"""blue"""', 'label': '"""2 BHK"""', 's': '(50)'}), "(bhk2['total_sqft'], bhk2['price'], color='blue', label='2 BHK',\n s=50)\n", (6315, 6389), True, 'import matplotlib.pyplot as plt\n'), ((6475, 6573), 'matplotlib.pyplot.scatter', 'plt.scatter', (["bhk3['total_sqft']", "bhk3['price']"], {'marker': '"""+"""', 'color': '"""green"""', 'label': '"""3 BHK"""', 's': '(50)'}), "(bhk3['total_sqft'], bhk3['price'], marker='+', color='green',\n label='3 BHK', s=50)\n", (6486, 6573), True, 'import matplotlib.pyplot as plt\n'), ((6675, 6711), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Total Square Feet Area"""'], {}), "('Total Square Feet Area')\n", (6685, 6711), True, 'import matplotlib.pyplot as plt\n'), ((6716, 6735), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (6726, 6735), True, 'import matplotlib.pyplot as plt\n'), ((6740, 6759), 'matplotlib.pyplot.title', 'plt.title', (['location'], {}), '(location)\n', (6749, 6759), True, 'import matplotlib.pyplot as plt\n'), ((6764, 6776), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6774, 6776), True, 'import matplotlib.pyplot as plt\n'), ((6781, 6791), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6789, 6791), True, 'import matplotlib.pyplot as plt\n'), ((9659, 9714), 'sklearn.model_selection.ShuffleSplit', 'ShuffleSplit', ([], {'n_splits': '(5)', 'test_size': '(0.2)', 'random_state': '(0)'}), '(n_splits=5, test_size=0.2, random_state=0)\n', (9671, 9714), False, 'from sklearn.model_selection import ShuffleSplit, cross_val_score\n'), ((10179, 10247), 'pandas.DataFrame', 'pd.DataFrame', (['scores'], {'columns': "['model', 'best_score', 'best_params']"}), "(scores, columns=['model', 'best_score', 'best_params'])\n", (10191, 10247), True, 'import pandas as pd\n'), ((11101, 11126), 'pickle.dump', 'pickle.dump', (['regressor', 'f'], {}), '(regressor, f)\n', (11112, 11126), False, 'import pickle\n'), ((3698, 3730), 'numpy.mean', 'np.mean', (["subdf['price_per_sqft']"], {}), "(subdf['price_per_sqft'])\n", (3705, 3730), True, 'import numpy as np\n'), ((3745, 3776), 'numpy.std', 'np.std', (["subdf['price_per_sqft']"], {}), "(subdf['price_per_sqft'])\n", (3751, 3776), True, 'import numpy as np\n'), ((3907, 3957), 'pandas.concat', 'pd.concat', (['[df_out, reduced_df]'], {'ignore_index': '(True)'}), '([df_out, reduced_df], ignore_index=True)\n', (3916, 3957), True, 'import pandas as pd\n'), ((9769, 9864), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (["config['model']", "config['params']"], {'cv': 'cv', 'n_jobs': '(-1)', 'return_train_score': '(False)'}), "(config['model'], config['params'], cv=cv, n_jobs=-1,\n return_train_score=False)\n", (9781, 9864), False, 'from sklearn.model_selection import GridSearchCV\n'), ((11270, 11289), 'json.dumps', 'json.dumps', (['columns'], {}), '(columns)\n', (11280, 11289), False, 'import json\n'), ((9123, 9141), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (9139, 9141), False, 'from sklearn.linear_model import LinearRegression\n'), ((9250, 9257), 'sklearn.linear_model.Lasso', 'Lasso', ([], {}), '()\n', (9255, 9257), False, 'from sklearn.linear_model import Lasso\n'), ((9444, 9467), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (9465, 9467), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((10645, 10676), 'numpy.where', 'np.where', (['(X.columns == location)'], {}), '(X.columns == location)\n', (10653, 10676), True, 'import numpy as np\n'), ((5527, 5560), 'numpy.mean', 'np.mean', (["bhk_df['price_per_sqft']"], {}), "(bhk_df['price_per_sqft'])\n", (5534, 5560), True, 'import numpy as np\n'), ((5589, 5621), 'numpy.std', 'np.std', (["bhk_df['price_per_sqft']"], {}), "(bhk_df['price_per_sqft'])\n", (5595, 5621), True, 'import numpy as np\n'), ((5861, 5955), 'numpy.append', 'np.append', (['exclude_indices', "bhk_df[bhk_df['price_per_sqft'] < stats['mean']].index.values"], {}), "(exclude_indices, bhk_df[bhk_df['price_per_sqft'] < stats['mean']]\n .index.values)\n", (5870, 5955), True, 'import numpy as np\n')]
import numpy as np import pandas as pd class CellDischargeData: """ Battery cell data from discharge test. """ def __init__(self, path): """ Initialize with path to discharge data file. Parameters ---------- path : str Path to discharge data file. Attributes ---------- time : vector Time vector for battery test data [s] current : vector Current from battery during test [A] voltage : vector Voltage from battery during test [V] data : vector Data flags from battery test [-] dt : vector Time step [s] """ df = pd.read_csv(path) self.time = df['Time(s)'].values self.current = df['Current(A)'].values self.voltage = df['Voltage(V)'].values self.data = df['Data'].fillna(' ').values self.ti = 0 self.tf = 0 def get_ids(self): """ Find indices in data that represent the `S` flag. Start and stop procedures in the experiment are depicted by the `S` flag. Returns ------- ids : vector Indices of start and stop points in data. """ ids = np.where(self.data == 'S')[0] return ids def get_idx(self): """ Find indices in discharge data that represent a single section. Returns ------- id0, id1, id2, id3 : tuple Indices representing section of discharge data. id0 = start of discharge id1 = end of discharge id2 = start of charge id3 = end of charge """ ids = self.get_ids() if max(abs(self.current)) > 35: # 2c and 3c discharge tests id0 = ids[3] id1 = ids[4] id2 = ids[5] id3 = ids[6] else: # 1c discharge test id0 = ids[2] id1 = ids[3] id2 = ids[4] id3 = ids[5] return id0, id1, id2, id3 @classmethod def process(cls, path): """ Process the original discharge data for one section. This section of data is used for model development. """ data = cls(path) id0, id1, id2, id3 = data.get_idx() data.ti = data.time[id0] data.tf = data.time[id2] data.current = data.current[id0:id2 + 1] data.voltage = data.voltage[id0:id2 + 1] data.time = data.time[id0:id2 + 1] - data.time[id0:id2 + 1].min() return data @classmethod def process_discharge_only(cls, path): """ Process the original discharge data for just the discharge portion. """ data = cls(path) id0, id1, _, _ = data.get_idx() data.ti = data.time[id0] data.tf = data.time[id1] data.current = data.current[id0:id1 + 1] data.voltage = data.voltage[id0:id1 + 1] data.time = data.time[id0:id1 + 1] - data.time[id0:id1 + 1][0] return data
[ "numpy.where", "pandas.read_csv" ]
[((717, 734), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (728, 734), True, 'import pandas as pd\n'), ((1270, 1296), 'numpy.where', 'np.where', (["(self.data == 'S')"], {}), "(self.data == 'S')\n", (1278, 1296), True, 'import numpy as np\n')]
import pandas as pd import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.feature_selection import SelectFromModel from sklearn.naive_bayes import MultinomialNB from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier #import data data = pd.read_csv('airline_tweets.csv') print(data) #show one column print(data['text']) #show two columns print(data[['text','airline_sentiment']]) #show one record print(data.loc[0,:]) #show some more records print(data.loc[0:5,:]) #show one column of one record print(data.loc[0,'text']) #select columns data = data[['tweet_id','text','airline_sentiment','airline']] print(data) #convert sentiment into numbers def sentiment2int(sentiment): if sentiment == 'positive': return 1 elif sentiment == 'neutral': return 0 elif sentiment == 'negative': return -1 else: return np.NaN data['rating'] = data['airline_sentiment'].apply(sentiment2int) print(data) #alternatively, use encoder encoder = LabelEncoder() encoder.fit(data['airline_sentiment']) data['encoded'] = encoder.transform(data['airline_sentiment']) print(data) #average sentiment of airlines filter = data['airline'] == 'Virgin America' virgin = data[filter] print(virgin['rating'].mean()) #tfidf vectorizer = TfidfVectorizer(min_df=3, stop_words='english',ngram_range=(1, 2)) vectorizer.fit(data['text']) X = vectorizer.transform(data['text']) #get labels y = np.array(data['rating']) print(X) print(X.shape) print(y) print(y.shape) #test train split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, stratify=y, random_state=1234) print(X_train.shape) print(X_test.shape) #naive bayes nb = MultinomialNB() nb.fit(X_train,y_train) print(nb.score(X_test,y_test)) nb_preds = nb.predict(X_test) print(nb_preds) #logistic regression lr = LogisticRegression(penalty='l1',C=1) lr.fit(X_train,y_train) print(lr.score(X_test,y_test)) lr_preds = lr.predict(X_test) print(lr_preds) #random forest rf = RandomForestClassifier(n_estimators=100) rf.fit(X_train,y_train) print(rf.score(X_test,y_test)) rf_preds = rf.predict(X_test) print(rf_preds)
[ "sklearn.preprocessing.LabelEncoder", "pandas.read_csv", "sklearn.model_selection.train_test_split", "sklearn.ensemble.RandomForestClassifier", "sklearn.linear_model.LogisticRegression", "numpy.array", "sklearn.feature_extraction.text.TfidfVectorizer", "sklearn.naive_bayes.MultinomialNB" ]
[((446, 479), 'pandas.read_csv', 'pd.read_csv', (['"""airline_tweets.csv"""'], {}), "('airline_tweets.csv')\n", (457, 479), True, 'import pandas as pd\n'), ((1198, 1212), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1210, 1212), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1494, 1561), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'min_df': '(3)', 'stop_words': '"""english"""', 'ngram_range': '(1, 2)'}), "(min_df=3, stop_words='english', ngram_range=(1, 2))\n", (1509, 1561), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((1646, 1670), 'numpy.array', 'np.array', (["data['rating']"], {}), "(data['rating'])\n", (1654, 1670), True, 'import numpy as np\n'), ((1774, 1842), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.1)', 'stratify': 'y', 'random_state': '(1234)'}), '(X, y, test_size=0.1, stratify=y, random_state=1234)\n', (1790, 1842), False, 'from sklearn.model_selection import train_test_split\n'), ((1903, 1918), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (1916, 1918), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((2047, 2084), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""l1"""', 'C': '(1)'}), "(penalty='l1', C=1)\n", (2065, 2084), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2206, 2246), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (2228, 2246), False, 'from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier\n')]
import numpy as np import matplotlib.pylab as plt from .bipolar import bipolar from matplotlib.colors import LinearSegmentedColormap # get colormap ncolors = 256 color_array = plt.get_cmap('jet')(range(ncolors)) # change alpha values color_array[:,-1] = np.linspace(0.0, 1.0,ncolors) # create a colormap object map_object = LinearSegmentedColormap.from_list(name='jet_alpha',colors=color_array) # register this new colormap with matplotlib plt.register_cmap(cmap=map_object) plt.rcParams['font.size'] = 12 plt.rcParams['axes.labelsize'] = 10 plt.rcParams['axes.titlesize'] = 10 plt.rcParams['xtick.labelsize'] = 8 plt.rcParams['ytick.labelsize'] = 8 plt.rcParams['legend.fontsize'] = 12 plt.rcParams['figure.titlesize'] = 14 def show(data, epsr, sources, intensity=False, theme='bright', label=None, saveas=''): fig, ax = plt.subplots(1, 1, figsize=(4, 4), dpi=100) if theme == 'dark': colors = 'magma' if intensity else bipolar(neutral=0.0) outline = 'white' source_color = 'lightyellow' else: colors = 'jet_alpha' if intensity else 'RdBu' outline = 'gray' source_color = 'gray' ax.imshow(data.T, cmap=colors) ax.contour(epsr.T, colors=outline, alpha=0.1) for src in sources: ax.plot(src[0], src[1], color=source_color) ax.invert_yaxis() if label: offset = int(data.shape[0] / 12) ax.text(offset, data.shape[1]-1.5*offset, label, color=outline) ax.axis('off') # ax.add_artist(ScaleBar(resolution)) plt.tight_layout() if saveas: plt.savefig(saveas) plt.close(fig) else: plt.show() def show_design(epsr, *sources): fig, ax = plt.subplots(1, 1, dpi=100) ax.imshow(epsr.T, cmap='Blues_r') for src in sources: ax.plot(src[0], src[1], color="red") ax.invert_yaxis() # Get nice a nice grid going ax.minorticks_on() ax.grid(which='both', color='white', linestyle='-') ax.grid(which='minor', linestyle=':', linewidth='0.5', color='lightgray') # Also show tics on top and right ax.xaxis.set_ticks_position('both') ax.yaxis.set_ticks_position('both') ax.set_xlabel("px") ax.set_ylabel("px") plt.show() """ Utilities for plotting and visualization """ def real(val, outline=None, ax=None, cbar=False, cmap='RdBu', outline_alpha=0.5): """Plots the real part of 'val', optionally overlaying an outline of 'outline' """ if ax is None: fig, ax = plt.subplots(1, 1, constrained_layout=True) vmax = np.abs(val).max() h = ax.imshow(np.real(val.T), cmap=cmap, origin='lower', vmin=-vmax, vmax=vmax) if outline is not None: ax.contour(outline.T, 0, colors='k', alpha=outline_alpha) ax.set_ylabel('y') ax.set_xlabel('x') if cbar: plt.colorbar(h, ax=ax) return ax def abs(val, outline=None, ax=None, cbar=False, cmap='magma', outline_alpha=0.5, outline_val=None): """Plots the absolute value of 'val', optionally overlaying an outline of 'outline' """ if ax is None: fig, ax = plt.subplots(1, 1, constrained_layout=True) vmax = np.abs(val).max() h = ax.imshow(np.abs(val.T), cmap=cmap, origin='lower', vmin=0, vmax=vmax) if outline_val is None and outline is not None: outline_val = 0.5*(outline.min()+outline.max()) if outline is not None: ax.contour(outline.T, [outline_val], colors='w', alpha=outline_alpha) ax.set_ylabel('y') ax.set_xlabel('x') if cbar: plt.colorbar(h, ax=ax) return ax
[ "numpy.abs", "matplotlib.pylab.subplots", "matplotlib.pylab.savefig", "matplotlib.pylab.tight_layout", "matplotlib.pylab.colorbar", "numpy.real", "numpy.linspace", "matplotlib.pylab.register_cmap", "matplotlib.pylab.show", "matplotlib.pylab.close", "matplotlib.pylab.get_cmap", "matplotlib.colo...
[((256, 286), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'ncolors'], {}), '(0.0, 1.0, ncolors)\n', (267, 286), True, 'import numpy as np\n'), ((327, 398), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', ([], {'name': '"""jet_alpha"""', 'colors': 'color_array'}), "(name='jet_alpha', colors=color_array)\n", (360, 398), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((444, 478), 'matplotlib.pylab.register_cmap', 'plt.register_cmap', ([], {'cmap': 'map_object'}), '(cmap=map_object)\n', (461, 478), True, 'import matplotlib.pylab as plt\n'), ((177, 196), 'matplotlib.pylab.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (189, 196), True, 'import matplotlib.pylab as plt\n'), ((833, 876), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(4, 4)', 'dpi': '(100)'}), '(1, 1, figsize=(4, 4), dpi=100)\n', (845, 876), True, 'import matplotlib.pylab as plt\n'), ((1528, 1546), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1544, 1546), True, 'import matplotlib.pylab as plt\n'), ((1691, 1718), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)', '(1)'], {'dpi': '(100)'}), '(1, 1, dpi=100)\n', (1703, 1718), True, 'import matplotlib.pylab as plt\n'), ((2214, 2224), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (2222, 2224), True, 'import matplotlib.pylab as plt\n'), ((1570, 1589), 'matplotlib.pylab.savefig', 'plt.savefig', (['saveas'], {}), '(saveas)\n', (1581, 1589), True, 'import matplotlib.pylab as plt\n'), ((1598, 1612), 'matplotlib.pylab.close', 'plt.close', (['fig'], {}), '(fig)\n', (1607, 1612), True, 'import matplotlib.pylab as plt\n'), ((1631, 1641), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (1639, 1641), True, 'import matplotlib.pylab as plt\n'), ((2488, 2531), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)', '(1)'], {'constrained_layout': '(True)'}), '(1, 1, constrained_layout=True)\n', (2500, 2531), True, 'import matplotlib.pylab as plt\n'), ((2580, 2594), 'numpy.real', 'np.real', (['val.T'], {}), '(val.T)\n', (2587, 2594), True, 'import numpy as np\n'), ((2809, 2831), 'matplotlib.pylab.colorbar', 'plt.colorbar', (['h'], {'ax': 'ax'}), '(h, ax=ax)\n', (2821, 2831), True, 'import matplotlib.pylab as plt\n'), ((3082, 3125), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)', '(1)'], {'constrained_layout': '(True)'}), '(1, 1, constrained_layout=True)\n', (3094, 3125), True, 'import matplotlib.pylab as plt\n'), ((3174, 3187), 'numpy.abs', 'np.abs', (['val.T'], {}), '(val.T)\n', (3180, 3187), True, 'import numpy as np\n'), ((3510, 3532), 'matplotlib.pylab.colorbar', 'plt.colorbar', (['h'], {'ax': 'ax'}), '(h, ax=ax)\n', (3522, 3532), True, 'import matplotlib.pylab as plt\n'), ((2544, 2555), 'numpy.abs', 'np.abs', (['val'], {}), '(val)\n', (2550, 2555), True, 'import numpy as np\n'), ((3138, 3149), 'numpy.abs', 'np.abs', (['val'], {}), '(val)\n', (3144, 3149), True, 'import numpy as np\n')]
import time import numpy as np import scanpy as sc from datetime import timedelta, datetime def record_time(start_time, end_time, step_name, fp): fp.write("{step} starts at: {time}\n".format(step = step_name, time = datetime.fromtimestamp(start_time))) fp.write("{step} ends at: {time}\n".format(step = step_name, time = datetime.fromtimestamp(end_time))) fp.write("Time spent for {step}: {time}.\n\n".format(step = step_name, time = timedelta(seconds = end_time - start_time))) sc.settings.n_jobs = 28 sc.settings.verbosity = 3 rand_seed = 0 src_data = "/data/MantonBM_nonmix_10x.h5" log_file = "mantonbm_scanpy.log" fp = open(log_file, 'w') start_read = time.time() adata = sc.read_10x_h5(src_data, genome = "GRCh38") adata.var_names_make_unique() adata.obs['Channel'] = adata.obs.index.map(lambda s: s.split('-')[0]).values end_read = time.time() record_time(start_read, end_read, "Read", fp) start_filter = time.time() n_cells = adata.shape[0] sc.pp.filter_cells(adata, min_counts = 100) sc.pp.filter_cells(adata, max_counts = 600000) sc.pp.filter_cells(adata, min_genes = 500) sc.pp.filter_cells(adata, max_genes = 6000) sc.pp.filter_genes(adata, min_cells = n_cells * 0.0005) mito_genes = adata.var_names.str.startswith('MT-') adata.obs['percent_mito'] = np.sum(adata[:, mito_genes].X, axis = 1).A1 / np.sum(adata.X, axis = 1).A1 adata = adata[adata.obs.percent_mito < 0.1, :] end_filter = time.time() record_time(start_filter, end_filter, "Filter", fp) start_norm = time.time() sc.pp.normalize_total(adata, target_sum = 1e5) sc.pp.log1p(adata) end_norm = time.time() record_time(start_norm, end_norm, "LogNorm", fp) start_hvg = time.time() sc.pp.highly_variable_genes(adata, min_mean=0.0125, max_mean=7, min_disp=0.5, n_top_genes = 2000, batch_key = 'Channel') end_hvg = time.time() record_time(start_hvg, end_hvg, "HVG", fp) adata = adata[:, adata.var.highly_variable] start_pca = time.time() sc.pp.scale(adata, max_value = 10) sc.tl.pca(adata, n_comps = 50, random_state = rand_seed) end_pca = time.time() record_time(start_pca, end_pca, "PCA", fp) start_knn = time.time() sc.pp.neighbors(adata, n_neighbors = 100, n_pcs = 50, random_state = 0) end_knn = time.time() record_time(start_knn, end_knn, "KNN", fp) start_louvain = time.time() sc.tl.louvain(adata, resolution = 1.3, random_state = rand_seed) end_louvain = time.time() record_time(start_louvain, end_louvain, "Louvain", fp) start_tsne = time.time() sc.tl.tsne(adata, use_rep = 'X_pca', use_fast_tsne = True, random_state = 0) end_tsne = time.time() record_time(start_tsne, end_tsne, "tSNE", fp) start_umap = time.time() sc.tl.umap(adata, random_state = 0) end_umap = time.time() record_time(start_umap, end_umap, "UMAP", fp) start_write = time.time() adata.write("mantonbm_scanpy_result.h5ad") end_write = time.time() record_time(start_write, end_write, "Write", fp) fp.close()
[ "scanpy.pp.normalize_total", "datetime.datetime.fromtimestamp", "scanpy.pp.highly_variable_genes", "scanpy.tl.pca", "scanpy.read_10x_h5", "scanpy.pp.scale", "scanpy.pp.log1p", "scanpy.pp.filter_cells", "scanpy.pp.filter_genes", "scanpy.tl.louvain", "scanpy.pp.neighbors", "scanpy.tl.umap", "n...
[((673, 684), 'time.time', 'time.time', ([], {}), '()\n', (682, 684), False, 'import time\n'), ((693, 734), 'scanpy.read_10x_h5', 'sc.read_10x_h5', (['src_data'], {'genome': '"""GRCh38"""'}), "(src_data, genome='GRCh38')\n", (707, 734), True, 'import scanpy as sc\n'), ((855, 866), 'time.time', 'time.time', ([], {}), '()\n', (864, 866), False, 'import time\n'), ((929, 940), 'time.time', 'time.time', ([], {}), '()\n', (938, 940), False, 'import time\n'), ((966, 1007), 'scanpy.pp.filter_cells', 'sc.pp.filter_cells', (['adata'], {'min_counts': '(100)'}), '(adata, min_counts=100)\n', (984, 1007), True, 'import scanpy as sc\n'), ((1010, 1054), 'scanpy.pp.filter_cells', 'sc.pp.filter_cells', (['adata'], {'max_counts': '(600000)'}), '(adata, max_counts=600000)\n', (1028, 1054), True, 'import scanpy as sc\n'), ((1057, 1097), 'scanpy.pp.filter_cells', 'sc.pp.filter_cells', (['adata'], {'min_genes': '(500)'}), '(adata, min_genes=500)\n', (1075, 1097), True, 'import scanpy as sc\n'), ((1100, 1141), 'scanpy.pp.filter_cells', 'sc.pp.filter_cells', (['adata'], {'max_genes': '(6000)'}), '(adata, max_genes=6000)\n', (1118, 1141), True, 'import scanpy as sc\n'), ((1144, 1197), 'scanpy.pp.filter_genes', 'sc.pp.filter_genes', (['adata'], {'min_cells': '(n_cells * 0.0005)'}), '(adata, min_cells=n_cells * 0.0005)\n', (1162, 1197), True, 'import scanpy as sc\n'), ((1414, 1425), 'time.time', 'time.time', ([], {}), '()\n', (1423, 1425), False, 'import time\n'), ((1492, 1503), 'time.time', 'time.time', ([], {}), '()\n', (1501, 1503), False, 'import time\n'), ((1504, 1553), 'scanpy.pp.normalize_total', 'sc.pp.normalize_total', (['adata'], {'target_sum': '(100000.0)'}), '(adata, target_sum=100000.0)\n', (1525, 1553), True, 'import scanpy as sc\n'), ((1551, 1569), 'scanpy.pp.log1p', 'sc.pp.log1p', (['adata'], {}), '(adata)\n', (1562, 1569), True, 'import scanpy as sc\n'), ((1581, 1592), 'time.time', 'time.time', ([], {}), '()\n', (1590, 1592), False, 'import time\n'), ((1655, 1666), 'time.time', 'time.time', ([], {}), '()\n', (1664, 1666), False, 'import time\n'), ((1667, 1788), 'scanpy.pp.highly_variable_genes', 'sc.pp.highly_variable_genes', (['adata'], {'min_mean': '(0.0125)', 'max_mean': '(7)', 'min_disp': '(0.5)', 'n_top_genes': '(2000)', 'batch_key': '"""Channel"""'}), "(adata, min_mean=0.0125, max_mean=7, min_disp=\n 0.5, n_top_genes=2000, batch_key='Channel')\n", (1694, 1788), True, 'import scanpy as sc\n'), ((1798, 1809), 'time.time', 'time.time', ([], {}), '()\n', (1807, 1809), False, 'import time\n'), ((1911, 1922), 'time.time', 'time.time', ([], {}), '()\n', (1920, 1922), False, 'import time\n'), ((1923, 1955), 'scanpy.pp.scale', 'sc.pp.scale', (['adata'], {'max_value': '(10)'}), '(adata, max_value=10)\n', (1934, 1955), True, 'import scanpy as sc\n'), ((1958, 2010), 'scanpy.tl.pca', 'sc.tl.pca', (['adata'], {'n_comps': '(50)', 'random_state': 'rand_seed'}), '(adata, n_comps=50, random_state=rand_seed)\n', (1967, 2010), True, 'import scanpy as sc\n'), ((2025, 2036), 'time.time', 'time.time', ([], {}), '()\n', (2034, 2036), False, 'import time\n'), ((2093, 2104), 'time.time', 'time.time', ([], {}), '()\n', (2102, 2104), False, 'import time\n'), ((2105, 2170), 'scanpy.pp.neighbors', 'sc.pp.neighbors', (['adata'], {'n_neighbors': '(100)', 'n_pcs': '(50)', 'random_state': '(0)'}), '(adata, n_neighbors=100, n_pcs=50, random_state=0)\n', (2120, 2170), True, 'import scanpy as sc\n'), ((2187, 2198), 'time.time', 'time.time', ([], {}), '()\n', (2196, 2198), False, 'import time\n'), ((2259, 2270), 'time.time', 'time.time', ([], {}), '()\n', (2268, 2270), False, 'import time\n'), ((2271, 2331), 'scanpy.tl.louvain', 'sc.tl.louvain', (['adata'], {'resolution': '(1.3)', 'random_state': 'rand_seed'}), '(adata, resolution=1.3, random_state=rand_seed)\n', (2284, 2331), True, 'import scanpy as sc\n'), ((2350, 2361), 'time.time', 'time.time', ([], {}), '()\n', (2359, 2361), False, 'import time\n'), ((2431, 2442), 'time.time', 'time.time', ([], {}), '()\n', (2440, 2442), False, 'import time\n'), ((2443, 2513), 'scanpy.tl.tsne', 'sc.tl.tsne', (['adata'], {'use_rep': '"""X_pca"""', 'use_fast_tsne': '(True)', 'random_state': '(0)'}), "(adata, use_rep='X_pca', use_fast_tsne=True, random_state=0)\n", (2453, 2513), True, 'import scanpy as sc\n'), ((2531, 2542), 'time.time', 'time.time', ([], {}), '()\n', (2540, 2542), False, 'import time\n'), ((2603, 2614), 'time.time', 'time.time', ([], {}), '()\n', (2612, 2614), False, 'import time\n'), ((2615, 2648), 'scanpy.tl.umap', 'sc.tl.umap', (['adata'], {'random_state': '(0)'}), '(adata, random_state=0)\n', (2625, 2648), True, 'import scanpy as sc\n'), ((2662, 2673), 'time.time', 'time.time', ([], {}), '()\n', (2671, 2673), False, 'import time\n'), ((2735, 2746), 'time.time', 'time.time', ([], {}), '()\n', (2744, 2746), False, 'import time\n'), ((2802, 2813), 'time.time', 'time.time', ([], {}), '()\n', (2811, 2813), False, 'import time\n'), ((1279, 1317), 'numpy.sum', 'np.sum', (['adata[:, mito_genes].X'], {'axis': '(1)'}), '(adata[:, mito_genes].X, axis=1)\n', (1285, 1317), True, 'import numpy as np\n'), ((1325, 1348), 'numpy.sum', 'np.sum', (['adata.X'], {'axis': '(1)'}), '(adata.X, axis=1)\n', (1331, 1348), True, 'import numpy as np\n'), ((221, 255), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['start_time'], {}), '(start_time)\n', (243, 255), False, 'from datetime import timedelta, datetime\n'), ((330, 362), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['end_time'], {}), '(end_time)\n', (352, 362), False, 'from datetime import timedelta, datetime\n'), ((447, 487), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(end_time - start_time)'}), '(seconds=end_time - start_time)\n', (456, 487), False, 'from datetime import timedelta, datetime\n')]
from __future__ import print_function from __future__ import absolute_import from __future__ import division from past.builtins import basestring from moby2.libactpol import freq_space_waterfall from moby2.libactpol import time_space_waterfall from moby2.scripting import products import numpy try: import pylab except: pass import matplotlib as mpl from matplotlib import pyplot as plt from matplotlib import cm, patches import matplotlib.animation as animation from matplotlib.collections import PatchCollection import moby2 moby2.pointing.set_bulletin_A() ArrayData = moby2.tod.array_data.ArrayData np = numpy def plot_with_cuts(tod, det, interactive=True): if interactive: pylab.ion() else: pylab.ioff() sel = numpy.ones(tod.nsamps, dtype=bool) for c in tod.cuts.cuts[det]: sel[c[0]:c[1]] = False t = tod.ctime - tod.ctime[0] pylab.plot(t[sel], tod.data[det][sel], "k.", markersize=1) pylab.plot(t[~sel], tod.data[det][~sel], "r.", markersize=1) return pylab.gca() class freqSpaceWaterfall( object ): """ Functions to generate the waterfall data (power spectrum) and shelve it to a depot """ def __init__( self, tod, nfreq = 400, fmin = 0.1, fmax = 200, logx = True): """ @brief Generates the waterfall matrix from the precalculated power spectrums @param tod TOD to analyze. @param nfreq Number of frequency points in waterfall plot. @param fmin Minimum frequency to consider in waterfall @param fmax Maximum frequency to consider in waterfall @param logx Whether to use a logarithmic or linear scale in the x axis """ self.ndet, self.ndata = tod.data.shape self.data = tod.data self.sampleTime = (tod.ctime[-1]-tod.ctime[0])/(tod.nsamps-1) self.name = tod.info.basename self.array = tod.info.array self.resultsDict = {} self.rows = tod.info.array_data["row"] self.Nrows = np.unique(self.rows).size self.cols = tod.info.array_data["col"] self.Ncols = np.unique(self.cols).size self.dets = tod.info.det_uid self.arrayQual = None self.arrayQual2 = None self.qual = None self.keys = [] self.sort = [] for i in range(len(self.rows)): self.sort.append((self.rows[i], self.cols[i])) self.sort = numpy.array(self.sort, dtype = [('rows', int),('cols', int)]) self.nfreq = nfreq self.logx = logx self.matfreqs = numpy.zeros(self.nfreq) if (logx): self.matfreqs = numpy.logspace(numpy.log10(fmin), numpy.log10(fmax), nfreq) else: self.matfreqs = numpy.linspace(fmin, fmax, nfreq) self.mat = freq_space_waterfall(self.data, self.matfreqs, self.sampleTime) def plot( self, selection = None, vmin = None, vmax = None, title = None, filename = None, rowDominance = False, units = 'DAC', separators = True, show = True, logy = True, ratio = 1.2, size = [10.0, 10.5], dpi = None, linTickSep = None, hideYLabel = False, forceAll = False, **kargs): """ @brief plot function to visualize the waterfall plot. @param selection bool array with selection of detectors to include in plot. @param vmin Minimum value in scale of waterfall plot. @param vmax Maximum value in scale of waterfall plot. params logy Whether to use logarithmic scale in the Y axis. @param title Title in plot. @param filename Filename where to save the plot. @param rowDominance Sort rows in waterfall by detector rows or columns. @param units Units of data in TOD (DAC or uK) for label. @param separators Whether to draw a line separating detector rows or columns. @param show Whether to show or not the plot on screen. @param ratio Aspect ratio of the plot @param size Size of the plot @param dpi Resolution of the plot @param linTickSep Separation of the frequency ticks for a linear space plot. @param hideYLabel Do not display Y label. @param forceAll Force all detectors to appear. Unselected detectors appear in black. """ if show: pylab.ion() else: pylab.ioff() if self.mat is None: self.analyze() if selection is None: sel = numpy.ones(len(self.dets), dtype = 'bool') elif forceAll: sel = numpy.ones(len(self.dets), dtype = 'bool') fsel = selection[self.dets] else: sel = selection[self.dets] if linTickSep is None: linTickSep = (self.matfreqs[-1] - self.matfreqs[0])/5 p = numpy.power(10,numpy.floor(numpy.log10(linTickSep))) linTickSep = numpy.floor(linTickSep/p)*p if rowDominance: order = numpy.argsort(self.sort[sel], order = ['rows', 'cols']) tmp = self.rows[sel][order] ylabel = "Row" else: order = numpy.argsort(self.sort[sel], order = ['cols', 'rows']) tmp = self.cols[sel][order] ylabel = "Column" # Find frequency axis which has a resolution given by self.nfreq # Produce a linear scale over a logaritmic scale. if self.logx: f = numpy.log10(self.matfreqs) step = (f[-1]-f[0])/(float(self.nfreq-1)) ini = numpy.floor(f[0]) end = numpy.floor(f[-1]) xt = (numpy.arange(ini, end+1) - f[0]) / step xtl = numpy.array(numpy.power(10., numpy.arange(ini, end+1, dtype = "int")), dtype = 'str') if logy: mat = numpy.log10(self.mat[sel][order]+1e-20) else: mat = self.mat[sel][order] if forceAll: fsel = fsel[order] if vmin is None or vmax is None: if forceAll: fmat = numpy.sort(self.mat[fsel].flatten()) else: fmat = numpy.sort(mat.flatten()) # fmat = fmat[fmat > 0] ntot = len(fmat) if vmin is None: vmin = fmat[int(ntot*0.02)] if vmax is None: vmax = fmat[int(ntot*0.98)] sep = [0] z = numpy.zeros(self.nfreq) j = 0; yt = []; ytl = [] yt.append(0) ytl.append('') while j < len(tmp): i = tmp[j] ini = j while tmp[j] == i: j += 1 if j == len(tmp): break assert ini != j yt.append((j+ini)//2) ytl.append(str(i)) sep.append(j) if separators: if j < len(tmp): mat = numpy.vstack([mat[:j],z,mat[j:]]) tmp = numpy.hstack([tmp[:j],-1,tmp[j:]]) if forceAll: fsel = numpy.hstack([fsel[:j], True, fsel[j:]]) j += 1 yt.append(j) ytl.append('') if forceAll: mask = numpy.ones(mat.shape, dtype = bool) mask[fsel] = False mmat = numpy.ma.array(mat, mask = mask) m = pylab.matshow(mmat, **kargs) else: m = pylab.matshow(mat, **kargs) b = pylab.colorbar(shrink=0.8) if logy: b.set_label("log10("+units+"$^2$/Hz)") else: b.set_label(units+"$^2$/Hz") if not(hideYLabel): pylab.ylabel(ylabel) pylab.xlabel("Frequency [Hz]") if self.logx: m.axes.set_xticks(xt) m.axes.set_xticklabels(xtl) else: ini = self.matfreqs[0]-numpy.mod(self.matfreqs[0], linTickSep)+linTickSep end = self.matfreqs[-1]-numpy.mod(self.matfreqs[-1], linTickSep) f = numpy.linspace(ini, end, (end-ini)/linTickSep + 1) step = (self.matfreqs[-1] - self.matfreqs[0]) / self.nfreq xt = (f-self.matfreqs[0])/step xtl = numpy.array(f, dtype = str) m.axes.set_xticks(xt) m.axes.set_xticklabels(xtl) rat = self.nfreq / len(tmp) * ratio m.axes.xaxis.set_ticks_position("bottom") m.axes.set_yticks(yt) if hideYLabel: m.axes.set_yticklabels([]) else: m.axes.set_yticklabels(ytl) if separators: for pos in sep: pylab.axhline(y=pos, color='black', linewidth=1) m.axes.set_aspect(rat) m.figure.set_size_inches(size[0], size[1], forward = True) m.set_clim(vmin = vmin, vmax = vmax) if "cmap" in kargs: cmap = kargs["cmap"] else: cmap = pylab.cm.RdYlBu_r cmap.set_bad([0.3, 0.3, 0.3],1.) m.set_cmap(cmap) if title is None: title = "Watefall TOD %s %s " % \ (self.name.split('.')[0], self.array) if rowDominance: title += "(Row Dominated)" else: title += "(Column Dominated)" pylab.title(title) if filename is not None: pylab.savefig(filename, dpi = dpi) if show: pylab.show() else: pylab.close() def cumQualplot( self, filename = None, forceNew = False, nbins = 50, show = True, selection = None, title = None, f0 = 10., f1 = 200., Nmin = 30): """ """ self.analyze(fmin = f0, fmax = f1, logx = False) self.qual = numpy.zeros(len(self.mat)) for i in range(len(self.mat)): p = self.mat[i][(self.matfreqs > f0)*(self.matfreqs < f1)] p -= p.mean() self.qual[i] = numpy.sqrt(p.std()/2.0/self.sampleTime) if selection is not None: self.qual = self.qual[selection] order = numpy.argsort(self.qual) self.qualDets = self.dets[order] self.qual = self.qual[order] self.qualDets = self.qualDets[self.qual != 0.0] self.qual = self.qual[self.qual != 0.0] cum = numpy.flipud(numpy.arange(len(self.qual))[Nmin:]) q = self.qual[:-Nmin] pylab.plot(q, cum) pylab.xlabel('Noise Quality') pylab.ylabel('Number of Detectors') if title is not None: pylab.title(title) if filename is not None: pylab.savefig(filename) if show: pylab.show() else: pylab.close() return q, cum def plotArray( self, vmin = None, vmax = None, title = None, filename = None, selection = None, units = 'DAC', f0 = 10., f1 = 200., forceNew = False, show = True): """ @brief Plot the quality of the power spectrum across the array in a 2D plot. The quality is defined as the variance of the power spectrum between 2 specified frequencies (default 10 and 200 Hz). @param vmin Minimum value in scale of the plot. @param vmax Maximum value in scale of the plot. @param title Title in plot. @param filename Filename where to save the plot. @param units Units of data in TOD (DAC or uK) for label. @param f0 Minimum frequency in quality calculation. @param f1 Maximum frequency in quality calculation. @param forceNew Whether to recalculate the quality or not. @param show Whether to show or not the plot on screen. """ if show: pylab.ion() else: pylab.ioff() if forceNew or self.mat is None: self.analyze(fmin = f0, fmax = f1, logx = False) if forceNew or self.qual is None: self.qual = numpy.zeros(len(self.mat)) for i in range(len(self.mat)): p = self.mat[i][(self.matfreqs > f0)*(self.matfreqs < f1)] p -= p.mean() self.qual[i] = numpy.sqrt(p.std()/2.0) if selection is None: self.arrayQual = self.qual.reshape([self.Nrows,self.Ncol]) else: q = self.qual.copy() q[~selection] = 0.0 self.arrayQual = q.reshape([self.Nrows,self.Ncols]) if vmin is None or vmax is None: vals = self.arrayQual.flatten() vals = numpy.sort(vals[vals != 0.0]) if vmin is None: vmin = vals[int(len(vals)*0.02)] if vmax is None: vmax = vals[int(len(vals)*0.98)] m = pylab.matshow(self.arrayQual.transpose()) m.set_clim(vmin = vmin, vmax = vmax) m.axes.xaxis.set_ticks_position("bottom") b = pylab.colorbar(shrink=0.8) b.formatter.set_powerlimits([-2,-2]) #b.set_label('%s*rtsec'%units) b.draw_all() pylab.xlabel("rows") pylab.ylabel("cols") if title is None: title = "Noise Quality TOD %s %s " % \ (self.name.split('.')[0], self.array) pylab.title(title) if filename is not None: pylab.savefig(filename) if show: pylab.show() else: pylab.close() class timeSpaceWaterfall( object ): """ @brief Class object intended to visualize a TOD in time space as a waterfall plot """ def __init__( self, tod, ntime = 1000, tmin = None, tmax = None): """ @brief Initialization function for the timeSpaceWaterfall class object @param tod TOD for which to produce the waterfall plot @param ntime Number of time points in waterfall plot. @param tmin Minimum time to consider in waterfall @param tmax Maximum time to consider in waterfall """ DT = (tod.ctime[-1]-tod.ctime[0]) self.sampleTime = DT/(tod.nsamps-1) if tmin is None or tmin < 0.0: tmin = 0.0 if tmax is None or tmax > DT: tmax = DT self.times = numpy.linspace(tmin,tmax, ntime) self.ntime = ntime self.ndet, self.ndata = tod.data.shape self.resultsDict = {} self.rows = tod.info.array_data["row"] self.cols = tod.info.array_data["col"] self.dets = tod.info.det_uid self.mat = time_space_waterfall(tod.data, self.times, self.sampleTime) self.sort = [] for i in range(len(self.rows)): self.sort.append((self.rows[i], self.cols[i])) self.sort = numpy.array(self.sort, dtype = [('rows', int),('cols', int)]) def plot( self, selection = None, vmin = None, vmax = None, level = 0.95, units = 'DAC', title = None, rowDominance = False, separators = True, filename = None, show = True): """ @brief Plot function to visualize the time space waterfall plot. @param selection Bool array with selection of detectors to show in plot. @param vmin Minimum value in scale of waterfall plot @param vmax Maximum value in scale of waterfall plot @param level Fraction of values to consider in scale range (1 => min-max). @param units Units of the TOD (DAC or uK). @param title Title to add to the plot. @param rowDominance Whether to sort the waterfall by rows or columns. @param filename Name of the file where to store the plot. @param show Whether to display or not the plot. """ if show: pylab.ion() else: pylab.ioff() if selection is None: sel = numpy.ones(numpy.shape(self.mat)[0], dtype = 'bool') else: sel = selection if vmin is None or vmax is None: val = numpy.sort(numpy.reshape(self.mat[sel], numpy.size(self.mat[sel]))) N = len(val)-1 if vmin is None: vmin = val[int(N*(1.0-level)/2.0)] if vmax is None: vmax = val[int(N*(level+1.0)/2.0)] if rowDominance: order = numpy.argsort(self.sort[sel], order = ['rows', 'cols']) tmp = self.rows[sel][order] ylabel = "Row" else: order = numpy.argsort(self.sort[sel], order = ['cols', 'rows']) tmp = self.cols[sel][order] ylabel = "Column" mat = self.mat[sel][order] sep = [0] z = numpy.zeros(numpy.shape(mat)[1]) j = 0; yt = []; ytl = [] yt.append(0) ytl.append('') while j < len(tmp): i = tmp[j] ini = j while tmp[j] == i: j += 1 if j == len(tmp): break assert ini != j yt.append((j+ini)//2) ytl.append(str(i)) sep.append(j) if j < len(tmp): mat = numpy.vstack([mat[:j],z,mat[j:]]) tmp = numpy.hstack([tmp[:j],-1,tmp[j:]]) j += 1 yt.append(j) ytl.append('') m = pylab.matshow(mat) b = pylab.colorbar(shrink=0.8) b.set_label(units) pylab.ylabel(ylabel) pylab.xlabel("Time [s]") shape = numpy.shape(self.mat[sel]) rat = shape[1] / shape[0]*1.2 m.axes.set_aspect(rat) xt = m.axes.get_xticks(); xtl = [] st = numpy.mean(self.times[1:]-self.times[:-1]) ti = self.times[0] for x in xt: xtl.append("%12.3f"%(x*st+ti)) m.axes.set_xticklabels(xtl) m.axes.xaxis.set_ticks_position("bottom") m.axes.set_yticks(yt) m.axes.set_yticklabels(ytl) if separators: for pos in sep: pylab.axhline(y=pos, color='black', linewidth=1) m.figure.set_size_inches(10., 10.5, forward = True) m.set_clim(vmin = vmin, vmax = vmax) if title is not None: pylab.title(title) if filename is not None: pylab.savefig(filename) if not(show): pylab.clf() class scanWaterfall( object ): """ @brief a waterfall plot of roughly azimuth angle versus time, stitching together the common mode in pieces of left and right going scans. """ def __init__( self, tod, selection = None ): """ """ if selection is not None: sel = selection else: a = numpy.min(tod.data, axis = 1) b = numpy.max(tod.data, axis = 1) sel = ~((a == 0.0)*(b == 0.0)) cm = numpy.mean(tod.data[sel], axis = 0) self.cm = cm self.sampleTime = (tod.ctime[-1]-tod.ctime[0])/(tod.nsamps-1) T = int(1./tod.scanFreq/self.sampleTime/2) az = tod.az[:2*T] pivot = numpy.where(az == az.min())[0][0] i = pivot dir = 1 k = 0 self.time = [0] self.mat = numpy.zeros([int(tod.nsamps-pivot)//T,T]) while i+T < tod.nsamps: if dir == 1: self.mat[k] = cm[i:i+T]; else: self.mat[k] = numpy.flipud(cm[i:i+T]); self.time.append((self.time[-1]+T)) i += T dir *= -1 k += 1 self.time = numpy.array(self.time)*self.sampleTime/60 self.az = tod.az[pivot:pivot+T]*180/numpy.pi self.az -= self.az.mean() def plot( self, vmin = None, vmax = None, units = 'DAC', title = None, filename = None, show = True): """ @brief Plot function to visualize the time space waterfall plot. @param vmin Minimum value in scale of waterfall plot @param vmax Maximum value in scale of waterfall plot @param units Units of the TOD (DAC or uK). @param title Title to add to the plot. @param filename Name of the file where to store the plot. @param show Whether to display or not the plot. """ if vmin is None: numpy.median(numpy.min(self.mat, axis = 1)) if vmax is None: numpy.median(numpy.max(self.mat, axis = 1)) m = pylab.matshow(self.mat) b = pylab.colorbar(shrink=0.8) b.set_label(units) pylab.ylabel("Time [min]") pylab.xlabel("dAz [deg]") shape = numpy.shape(self.mat) rat = shape[1] / shape[0] * 1.2 m.axes.set_aspect(rat) m.axes.invert_yaxis() daz = (self.az.max()-self.az.min())/(len(self.az)-1) x_max = int(self.az.max()) x = numpy.arange(2*x_max+1)-x_max xt = list((numpy.arange(2*x_max+1)-x_max)/daz + len(self.az)//2) xtl = list(numpy.array(x, dtype = 'str')) m.axes.set_xticks(xt) m.axes.set_xticklabels(xtl) m.axes.xaxis.set_ticks_position("bottom") y = numpy.arange(self.time[-1]) yt = list(y/self.time[1]) ytl = list(numpy.array(y, dtype = 'str')) m.axes.set_yticks(yt) m.axes.set_yticklabels(ytl) m.figure.set_size_inches(10., 10.5, forward = True) m.set_clim(vmin = vmin, vmax = vmax) if title is not None: pylab.title(title) if filename is not None: pylab.savefig(filename) if show: pylab.show() else: pylab.clf() class quality( object ): """ @brief Object to cuantify the quality of the scan harmonics in the TOD. """ def __init__( self, tod, f0 = 1.0, f1 = 200 ): """ """ self.name = tod.info.basename self.array = tod.info.array d, r, c = tod.listUncut() self.dets = numpy.array(d) self.rows = numpy.array(r) self.cols = numpy.array(c) sel = self.dets[(self.rows>13)*(self.rows<17)*(self.cols>13)*(self.cols<17)] print(len(sel)) f = numpy.zeros(len(sel)) for i in range(len(sel)): p, nu, w = mobyUtils.power(tod.data[sel[i]], dt = tod.sampleTime) f[i] = tuneScanFreq(p, nu, tod.scanFreq) f[i] = tuneScanFreq(p, nu, f[i], scope = 0.0001) self.sf = numpy.median(f) self.f = f mask = generateArmonicMask(nu, self.sf, window = 6) sel = (nu > f0)*(nu < f1) print("Start arrayQual calculation") self.arrayQual = numpy.zeros([tod.ncol, tod.nrow]) for i in range(tod.ndet): p, nu, w = mobyUtils.power(tod.data[i], dt = tod.sampleTime) mean1 = p[sel*mask].mean() mean2 = p[sel*~mask].mean() self.arrayQual[tod.cols[i]][tod.rows[i]] = mean1/mean2 - 1.0 def plotQual( self, vmin = None, vmax = None, title = None, filename = None, units = 'DAC', f0 = 10., f1 = 200., forceNew = False, show = True): """ @brief Plot the quality of the power spectrum across the array in a 2D plot. The quality is defined as the variance of the power spectrum between 2 specified frequencies (default 10 and 200 Hz). @param vmin Minimum value in scale of the plot. @param vmax Maximum value in scale of the plot. @param title Title in plot. @param filename Filename where to save the plot. @param units Units of data in TOD (DAC or uK) for label. @param f0 Minimum frequency in quality calculation. @param f1 Maximum frequency in quality calculation. @param forceNew Whether to recalculate the quality or not. @param show Whether to show or not the plot on screen. """ if vmin is None or vmax is None: vals = self.arrayQual.flatten() vals = numpy.sort(vals[vals != 0.0]) if vmin is None: vmin = vals[int(len(vals)*0.02)] if vmax is None: vmax = vals[int(len(vals)*0.98)] m = pylab.matshow(self.arrayQual) m.set_clim(vmin = vmin, vmax = vmax) m.axes.xaxis.set_ticks_position("bottom") b = pylab.colorbar(shrink=0.8) b.set_label(units+" rms") pylab.xlabel("rows") pylab.ylabel("cols") if title is None: title = "Noise Quality TOD %s %s " % \ (self.name.split('.')[0], self.array) pylab.title(title) if filename is not None: pylab.savefig(filename) if show: pylab.show() else: pylab.clf() def generateArmonicMask(freqs, scanFreq, window = 10): """ @brief Generates a mask that isolates those frequencies which are near a scan armonic. """ w = window//2 df = freqs[2]-freqs[1] index = numpy.where(numpy.mod(freqs,scanFreq) < df)[0] mask = numpy.zeros(len(freqs), dtype = 'bool') for i in index: if i-w < 0: mask[:i+w] = True elif i+w >= len(freqs): mask[i-w:] = True else: mask[i-w:i+w] = True return mask def tuneScanFreq(p, nu, scanFreq, scope = 0.002, nsamp = 100, plot = False): """ @brief Find the scan frequency by maximizing the harmonic content of a signal """ df = nu[2] - nu[1] freqs = (numpy.arange(nsamp, dtype = 'float')/nsamp-0.5)*scope + scanFreq pow = numpy.zeros(nsamp) for i in range(nsamp): index = numpy.where(numpy.mod(nu,freqs[i]) < df)[0] pow[i] = numpy.mean(numpy.log(p[index])) if plot: pylab.plot(pow), pylab.show() mf = freqs[pow == pow.max()] if numpy.ndim(mf) > 0: return mf[0] else: return mf def array_plots( param, det=None, instrument = 'actpol', array = None, season = None, tod=None, darks=True, pmax=None, pmin=None, outrange=True, param_name='', param_units='', title='', display = 'show', save_name = 'newfig.png' ): """Plot a parameter across the array Arguments: ---------- param: parameter to plot Optional: --------- |det: list of detectors to plot (can be a list of det_uid or a tuple (row,col)) |instrument: instrument |array: array name ('ar1', 'pa2', 'pa3', 'pa4') |season: observing season ('s13', 's14', 's15'...) or |tod: tod object (tod.det, tod.info.array_name, tod.info.season) pmax/pmin: range for parameter values outrange: if True, parameters outside [pmin,pmax] will be plotted in black param_name, param_units, title: informations to add to the plot display: ['show', 'save'] either show the plot, or directly save it save_name: if display == 'save', name of the output file """ if det is None and array is None and tod is None: print("List of (dets, season and array name) or tod object must be prov\ ided") return 0 det = np.asarray(det, dtype = int) param = np.asarray(param, dtype = float) if tod is not None: array_data = tod.info.array_data instrument = tod.info.instrument array = tod.info.array season = tod.info.season else: if instrument is None: print("Please provide the instrument if no TOD is provided") return 0 if array is None: print("Please provide the array if no TOD is provided") return 0 if season is None: print("Please provide the season if no TOD is provided") return 0 array_data = products.get_array_data( {"instrument":instrument, "array_name":array, "season":season}) if len(det) == 2: Row, Col = det Detid = Row*32 + Col else: Detid = det pos, polfamily, freq = get_position( Detid, instrument, array, season ) x, y = pos if pmin == None: pmin = param.min() if pmax == None: pmax = param.max() if param.min() == param.max(): color = ['b']*param.size else: color = get_color( param, pmax = pmax, pmin = pmin, outrange = outrange ) if np.unique(freq).size == 1: patchlist = get_patches( pos, color, polfamily, freq ) else: patchlist = get_patches( pos, color, polfamily, freq, radius=0.012 ) # Add other detectors in grey det_uid = array_data['det_uid'] if np.unique(freq).size == 1: det_uid = det_uid[array_data['nom_freq']==np.unique(freq)] if Detid.size < det_uid.size: _dets = set( np.arange(det_uid.size) ) _dets.difference_update( set( Detid ) ) _dets = np.asarray( list(_dets) ) _pos, _polfamily, _freq = get_position( _dets, instrument, array, season ) _x, _y = _pos _color = np.array([[0.,0.,0.,0.3]]).repeat( _x.size, axis = 0) if np.unique(freq).size==1: _patchlist = get_patches( _pos, _color, _polfamily, _freq ) else: _patchlist = get_patches( _pos, _color, _polfamily, _freq, radius=0.012 ) else: _patchlist = [] #Add dark detectors as black circles if darks: dd = array_data['det_uid'][array_data['det_type']=='dark_tes'] x_dark, y_dark = array_data['sky_x'][dd], array_data['sky_y'][dd] for xd, yd in zip( x_dark, y_dark ): patchlist.append( patches.Wedge( [xd,yd], 0.02, 0., 360., width=0.003, fc = 'k', ec='k' ) ) x_lim, y_lim = get_array_plot_lims(array, season) plt.ioff() plt.figure( figsize = (10,10) ) ax = create_plot( patchlist+_patchlist, pmin, pmax, x_lim, y_lim, array ) set_infos( param_name, param_units, title, ax ) if display == 'show': plt.ion() plt.show() elif display == 'save': plt.savefig(save_name) plt.close() def get_position(Detid, instrument, array, season): """Return the position in the focal plane for a list of detectors """ params = { 'instrument' : instrument, 'array_name' : array, 'season' : season, } arraydata = moby2.scripting.get_array_data(params) polfamily = arraydata['pol_family'] x = arraydata['sky_x'] y = arraydata['sky_y'] col = arraydata['col'] row = arraydata['row'] freq = arraydata['nom_freq'] detid = row*32+col idx_sort = detid.argsort() x = x[idx_sort] y = y[idx_sort] polfamily = polfamily[idx_sort] freq = freq[idx_sort] return (x[Detid], y[Detid]), polfamily[Detid], freq[Detid] def get_color(param, pmax=None, pmin=None, cmap = cm.RdYlBu_r, outrange = False): if pmax==None: pmax = param.max() if pmin==None: pmin = param.min() color = cmap( (param-pmin)/(pmax-pmin) ) if outrange: color[param>pmax,:] = (0., 0., 0., 0.) color[param<pmin,:] = (0., 0., 0., 0.) return color def get_patches(pos, color,polfamily, freq, radius=0.015): patchlist = [] if len(np.unique(freq)) == 1: for x, y, c, pf in zip( pos[0], pos[1], color, polfamily ): if pf == 'A': theta1, theta2 = (90, 270) elif pf == 'B': theta1, theta2 = (270, 90) elif pf == 'X': theta1, theta2 = (0,360) patchlist.append( patches.Wedge( [x,y], radius, theta1, theta2, fc = c, ec=c ) ) else: f1, f2 = np.unique(freq)[-2:] for x, y, c, pf, f in zip( pos[0], pos[1], color, polfamily, freq ): if pf == 'A': if f == f1: theta1, theta2 = (90, 180) elif f == f2: theta1, theta2 = (180, 270) elif pf == 'B': if f == f1: theta1, theta2 = (0, 90) elif f == f2: theta1, theta2 = (270, 360) elif pf == 'X': theta1, theta2 = (0,360) patchlist.append( patches.Wedge( [x,y], radius*1.2, theta1, theta2, fc = c, ec=c ) ) return patchlist def create_plot(patchlist, pmin, pmax, x_lim, y_lim, array_name): if pmin != pmax: ax1 = plt.subplot2grid((1,10),(0,0),colspan=9,aspect='equal') ax2 = plt.subplot2grid((1,10),(0,9)) else: ax1 = plt.axes() for p in patchlist: ax1.add_patch( p ) ax1.set_xlim( x_lim ) ax1.set_ylim( y_lim ) ax1.tick_params( bottom = 'off', top='off', right='off', left='off', labelbottom='off', labelleft='off', which = 'both' ) plt.text(0.05, 0.05, 'Detector not available', color = 'grey', transform = ax1.transAxes) plt.text(0.05, 0.025, 'Detector out of range', color = 'black', transform = ax1.transAxes) plot_wafer_names(ax1, array_name) # PA = {'ar1':'PA1', # 'ar2':'PA2', # 'ar3':'PA3', # 'pa4':'PA4', # "ar3_90":"PA3", # "ar3_150":"PA3", # } plt.text(0.9, 0.05, array_name, fontsize='xx-large', ha='center', va='center', transform = ax1.transAxes) # if array_name in ['ar3', 'ar5', 'ar6']: # plt.text(0.1,0.95,'150', ha='center', va='center', # transform=ax1.transAxes) # plt.text(0.1,0.925,'90', ha='center', va='center', # transform=ax1.transAxes) # ax1.add_patch( # patches.Wedge((0.1,0.94),0.03,0,180,transform=ax1.transAxes,edgecolor='k',fc='none') # ) # ax1.add_patch( # patches.Wedge((0.1,0.94),0.03,180,360,transform=ax1.transAxes,edgecolor='k',fc='none') # ) # elif array_name == 'ar4': # plt.text(0.1,0.95,'220', ha='center', va='center', # transform=ax1.transAxes) # plt.text(0.1,0.925,'150', ha='center', va='center', # transform=ax1.transAxes) # ax1.add_patch( # patches.Wedge((0.1,0.94),0.03,0,180,transform=ax1.transAxes,ec='k',fc='none') # ) # ax1.add_patch( # patches.Wedge((0.1,0.94),0.03,180,360,transform=ax1.transAxes,ec='k',fc='none') # ) if pmin != pmax: norm = mpl.colors.Normalize(vmin=pmin,vmax=pmax) cb1 = mpl.colorbar.ColorbarBase(ax2,cmap=cm.RdYlBu_r,norm=norm,orientation='vertical') return ax1, ax2 else: return ax1 def set_infos( param_name, units, title, ax ): if type(ax) == tuple: ax1, ax2 = ax ax1.set_title( title, fontsize = 15 ) ax2.set_ylabel( '%s [%s]' %(param_name, units), rotation = 270, fontsize = 20 ) else: ax.set_title( title, fontsize = 15 ) def tod3D(tod, dets=None, time_resolution=2., prange=[None,None], sky_coords=False, pointingpar = None, anim_time = 10., display='show', filename=None, **kwargs): """3D visualization of a TOD: animation of the 2D focal plane through the TOD Arguments: ---------- - tod: TOD object to visualize Optional: --------- - dets: list of detectors to plot (default will use tod.det_uid) - time_resolution: Tod will be downsample to reach a time resolution as close as possible to the input using a power of 2 resampling (every 2**n samples). Default is 2s - prange: scale - sky_coords: if True, project the focal plane on the sky and show the TOD animation in RA, DEC (must provide pointingpar) - pointingpar: focal plane model, eg. {'source': 'fp_file', 'filename': '.../RelativeOffsets/template_ar2_150529s.txt'} - anim_time: total time of the animated TOD in seconds - display: 'show' or 'save' - filename: only if display=='save', by default: todname.gif - other args to define the animation (interval, repeat, repeat_delay...) """ if dets is None: dets = tod.det_uid # Re-sampling tres = (tod.ctime[-1] - tod.ctime[0]) / tod.nsamps r = time_resolution / tres Nresamp = 2**np.ceil(np.log2(r)) tod_ds = tod.copy(resample=Nresamp) print("Downsampled tod has %i samples, with time resolution of %.2fs" %( tod_ds.nsamps, (tod_ds.ctime[-1] - tod_ds.ctime[0]) / tod_ds.nsamps )) # Get focal plane infos if sky_coords: x, y = get_sky_coords(tod_ds, pointingpar) x = x[dets] y = y[dets] x *= 180. / np.pi y *= 180. / np.pi if x.max() - x.min() > 180.: x[x<0] += 360. if y.max() - y.min() > 180.: y[y<0] += 360. else: x = tod.info.array_data['sky_x'][dets] y = tod.info.array_data['sky_y'][dets] polfamily = tod.info.array_data['pol_family'][dets] # Get limits fot the plot if sky_coords: x_lim = (x.min(), x.max()) y_lim = (y.min(), y.max()) else: x_lim, y_lim = get_array_plot_lims( tod.info.array, tod.info.season) pmin, pmax = prange if pmin == None: pmin = tod.data[dets].min() if pmax == None: pmax = tod.data[dets].max() plt.ioff() # Create animation fig = plt.figure(figsize=(7,8)) ax1 = fig.add_axes([0.1,0.2,0.8,0.7]) if sky_coords: ax1.tick_params( bottom = 'off', top='on', right='off', left='on', labelbottom='off', labelleft='on', labeltop='on', which = 'both' ) else: ax1.tick_params( bottom = 'off', top='off', right='off', left='off', labelbottom='off', labelleft='off', which = 'both' ) ax2 = fig.add_axes([0.1,0.1,0.8,0.1]) ax2.plot(tod_ds.ctime - tod_ds.ctime[0], tod_ds.data[dets].T, 'b', alpha=0.1) ax1.set_xlim( x_lim ) ax1.set_ylim( y_lim ) ax2.set_xlim((0,tod_ds.ctime[-1]-tod_ds.ctime[0])) ax2.set_ylim((pmin,pmax)) ax2.set_yticks((pmin,0,pmax)) if sky_coords: ax1.set_xlabel('RA (degrees)') ax1.set_ylabel('DEC (degrees)') ax2.set_xlabel('Time [s]') ax2.set_ylabel('pW') plt.figtext(0.5, 0.95, '%s' %tod_ds.info.basename, ha = 'center', va = 'center', fontsize='xx-large') if not sky_coords: plot_wafer_names(ax1, tod_ds.info.array) line = ax2.axvline(0, color='r') color = get_color( tod_ds.data[dets,0], pmax = pmax, pmin = pmin ) colors = [ get_color( tod_ds.data[dets,i], pmax = pmax, pmin = pmin ) for i in range(tod_ds.nsamps) ] if sky_coords: # pos = x[:,0], y[:,0] patchlists = [ PatchCollection( get_patches( (x[:,i],y[:,i]), colors[i], polfamily ) ) for i in range(tod_ds.nsamps) ] ax1.add_collection(patchlists[0]) else: pos = x, y patchlist = PatchCollection( get_patches( pos, colors[0], polfamily ) ) print(patchlist.get_facecolor()) # p = patchlist[0] # ax1.add_patch(p) ax1.add_collection( patchlist ) def animate(i): if sky_coords: patchlist = patchlists[i] color = colors[i] patchlist.set_facecolors(color) patchlist.set_edgecolors(color) ax1.collections.pop() ax1.add_collection(patchlist) else: patchlist = ax1.collections[0] color = colors[i] patchlist.set_facecolors(color) patchlist.set_edgecolors(color) line.set_xdata(tod_ds.ctime[i] - tod_ds.ctime[0]) return line interval = float(anim_time) / tod_ds.nsamps * 1000 ani = animation.FuncAnimation(fig, animate, np.arange(tod_ds.nsamps), interval=interval, **kwargs) if display == 'show': plt.ion() plt.show() plt.draw() elif display == 'save': if filename is None: filename = '%s.gif' %tod_ds.info.basename Writer = animation.writers['imagemagick_file'] writer = Writer(fps=1/interval*1000) ani.save(filename,writer=writer) plt.close() def get_sky_coords(tod, pointingpar): tod.fplane = products.get_focal_plane( pointingpar, det_uid=tod.det_uid, tod_info=tod.info, tod=tod) ra, dec = moby2.pointing.get_coords( tod.ctime, tod.az, tod.alt, focal_plane=tod.fplane) return ra, dec def get_array_plot_lims(array, season): if array == 'ar1': if season == '2013': xmin, xmax = -1.3094665029667607, -0.41013064910400088 ymin, ymax = -1.220441626351696, -0.30605809090138458 else: xmin, xmax = -1.3287624329466337, -0.42355235864871088 ymin, ymax = -1.3491856371418196, -0.43444330849940416 elif array == 'ar2': xmin, xmax = -0.048737122029174233, 0.85432442834243771 ymin, ymax = -1.3379718650336261, -0.41940152960224164 elif array == 'ar3_90' or array == 'ar3_150' or array == "ar3": xmin, xmax = -0.69483493785298767, 0.18037663135240153 ymin, ymax = -0.15016921848482107, 0.80671585130610191 elif array == 'ar4': if season == '2016': xmin, xmax = -1.3172299710057627, -0.57135351330445694 ymin, ymax = -1.2802741932198245, -0.41247231671467965 else: xmin, xmax = -0.98219689986339409, -0.23632044216208847 ymin, ymax = -0.82135665065746755, 0.046445225847677393 elif array == 'ar5': xmin, xmax = 0.2634513877020111, 1.0299899366437952 ymin, ymax = -0.85103798177410184, 0.03837494139098363 elif array == 'ar6': xmin, xmax = -0.34516751588843669, 0.39094596571401696 ymin, ymax = 0.3670596582454384, 1.3154743040302765 else: print("Array must be from the list ['ar1', 'pa2', 'ar3', 'ar4', 'ar5', 'ar6']") x_lim = ( xmin - (xmax-xmin)*0.1, xmax + (xmax-xmin)*0.1 ) y_lim = ( ymin - (ymax-ymin)*0.1, ymax + (ymax-ymin)*0.1 ) return x_lim, y_lim def plot_wafer_names(ax, array): if array == 'ar1': plt.text(0.05, 0.2, 'W10', color='gray', transform = ax.transAxes, fontsize = 'x-large') plt.text(0.7, 0.1, 'SH2B', color='gray', transform = ax.transAxes, fontsize = 'x-large') plt.text(0.9, 0.35, 'W08', color='gray', transform = ax.transAxes, fontsize = 'x-large') plt.text(0.75, 0.85, 'SH1A', color='gray', transform = ax.transAxes, fontsize = 'x-large') plt.text(0.1, 0.85, 'W09', color='gray', transform = ax.transAxes, fontsize = 'x-large') plt.text(0.01, 0.45, 'SH2A', color='gray', transform = ax.transAxes, fontsize = 'x-large') elif array == 'ar2': plt.text(0.15, 0.09, 'FH3C', color='gray', transform = ax.transAxes, fontsize = 'x-large') plt.text(0.75, 0.15, 'SH4B', color='gray', transform = ax.transAxes, fontsize = 'x-large') plt.text(0.9, 0.4, 'FH6', color='gray', transform = ax.transAxes, fontsize = 'x-large') plt.text(0.6, 0.85, 'SH3B', color='gray', transform = ax.transAxes, fontsize = 'x-large') plt.text(0.1, 0.85, 'FHC1', color='gray', transform = ax.transAxes, fontsize = 'x-large') plt.text(0.01, 0.45, 'SH4A', color='gray', transform = ax.transAxes, fontsize = 'x-large', rotation=90) elif 'ar3' in array: plt.text(0.5, 0.02, 'FH3', color='gray', transform = ax.transAxes, fontsize = 'x-large', ha='center') plt.text(0.85, 0.25, 'SH1A', color='gray', transform = ax.transAxes, fontsize = 'x-large') plt.text(0.9, 0.7, 'FH4', color='gray', transform = ax.transAxes, fontsize = 'x-large') plt.text(0.5, 0.95, 'SH1B', color='gray', transform = ax.transAxes, fontsize = 'x-large', ha='center') plt.text(0.05, 0.75, 'FH2', color='gray', transform = ax.transAxes, fontsize = 'x-large') plt.text(0.03, 0.25, 'SH8B', color='gray', transform = ax.transAxes, fontsize = 'x-large')
[ "pylab.title", "numpy.log10", "numpy.hstack", "matplotlib.colorbar.ColorbarBase", "pylab.savefig", "pylab.xlabel", "numpy.log", "numpy.argsort", "numpy.array", "moby2.pointing.get_coords", "matplotlib.pyplot.subplot2grid", "pylab.gca", "numpy.mod", "numpy.arange", "numpy.mean", "pylab....
[((537, 568), 'moby2.pointing.set_bulletin_A', 'moby2.pointing.set_bulletin_A', ([], {}), '()\n', (566, 568), False, 'import moby2\n'), ((737, 771), 'numpy.ones', 'numpy.ones', (['tod.nsamps'], {'dtype': 'bool'}), '(tod.nsamps, dtype=bool)\n', (747, 771), False, 'import numpy\n'), ((873, 931), 'pylab.plot', 'pylab.plot', (['t[sel]', 'tod.data[det][sel]', '"""k."""'], {'markersize': '(1)'}), "(t[sel], tod.data[det][sel], 'k.', markersize=1)\n", (883, 931), False, 'import pylab\n'), ((936, 996), 'pylab.plot', 'pylab.plot', (['t[~sel]', 'tod.data[det][~sel]', '"""r."""'], {'markersize': '(1)'}), "(t[~sel], tod.data[det][~sel], 'r.', markersize=1)\n", (946, 996), False, 'import pylab\n'), ((1008, 1019), 'pylab.gca', 'pylab.gca', ([], {}), '()\n', (1017, 1019), False, 'import pylab\n'), ((24729, 24747), 'numpy.zeros', 'numpy.zeros', (['nsamp'], {}), '(nsamp)\n', (24740, 24747), False, 'import numpy\n'), ((29891, 29901), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (29899, 29901), True, 'from matplotlib import pyplot as plt\n'), ((29906, 29934), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (29916, 29934), True, 'from matplotlib import pyplot as plt\n'), ((30480, 30518), 'moby2.scripting.get_array_data', 'moby2.scripting.get_array_data', (['params'], {}), '(params)\n', (30510, 30518), False, 'import moby2\n'), ((32993, 33083), 'matplotlib.pyplot.text', 'plt.text', (['(0.05)', '(0.05)', '"""Detector not available"""'], {'color': '"""grey"""', 'transform': 'ax1.transAxes'}), "(0.05, 0.05, 'Detector not available', color='grey', transform=ax1.\n transAxes)\n", (33001, 33083), True, 'from matplotlib import pyplot as plt\n'), ((33100, 33191), 'matplotlib.pyplot.text', 'plt.text', (['(0.05)', '(0.025)', '"""Detector out of range"""'], {'color': '"""black"""', 'transform': 'ax1.transAxes'}), "(0.05, 0.025, 'Detector out of range', color='black', transform=ax1\n .transAxes)\n", (33108, 33191), True, 'from matplotlib import pyplot as plt\n'), ((33417, 33525), 'matplotlib.pyplot.text', 'plt.text', (['(0.9)', '(0.05)', 'array_name'], {'fontsize': '"""xx-large"""', 'ha': '"""center"""', 'va': '"""center"""', 'transform': 'ax1.transAxes'}), "(0.9, 0.05, array_name, fontsize='xx-large', ha='center', va=\n 'center', transform=ax1.transAxes)\n", (33425, 33525), True, 'from matplotlib import pyplot as plt\n'), ((37555, 37565), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (37563, 37565), True, 'from matplotlib import pyplot as plt\n'), ((37599, 37625), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 8)'}), '(figsize=(7, 8))\n', (37609, 37625), True, 'from matplotlib import pyplot as plt\n'), ((38469, 38572), 'matplotlib.pyplot.figtext', 'plt.figtext', (['(0.5)', '(0.95)', "('%s' % tod_ds.info.basename)"], {'ha': '"""center"""', 'va': '"""center"""', 'fontsize': '"""xx-large"""'}), "(0.5, 0.95, '%s' % tod_ds.info.basename, ha='center', va=\n 'center', fontsize='xx-large')\n", (38480, 38572), True, 'from matplotlib import pyplot as plt\n'), ((40688, 40779), 'moby2.scripting.products.get_focal_plane', 'products.get_focal_plane', (['pointingpar'], {'det_uid': 'tod.det_uid', 'tod_info': 'tod.info', 'tod': 'tod'}), '(pointingpar, det_uid=tod.det_uid, tod_info=tod.\n info, tod=tod)\n', (40712, 40779), False, 'from moby2.scripting import products\n'), ((40798, 40875), 'moby2.pointing.get_coords', 'moby2.pointing.get_coords', (['tod.ctime', 'tod.az', 'tod.alt'], {'focal_plane': 'tod.fplane'}), '(tod.ctime, tod.az, tod.alt, focal_plane=tod.fplane)\n', (40823, 40875), False, 'import moby2\n'), ((692, 703), 'pylab.ion', 'pylab.ion', ([], {}), '()\n', (701, 703), False, 'import pylab\n'), ((714, 726), 'pylab.ioff', 'pylab.ioff', ([], {}), '()\n', (724, 726), False, 'import pylab\n'), ((2398, 2458), 'numpy.array', 'numpy.array', (['self.sort'], {'dtype': "[('rows', int), ('cols', int)]"}), "(self.sort, dtype=[('rows', int), ('cols', int)])\n", (2409, 2458), False, 'import numpy\n'), ((2536, 2559), 'numpy.zeros', 'numpy.zeros', (['self.nfreq'], {}), '(self.nfreq)\n', (2547, 2559), False, 'import numpy\n'), ((2763, 2826), 'moby2.libactpol.freq_space_waterfall', 'freq_space_waterfall', (['self.data', 'self.matfreqs', 'self.sampleTime'], {}), '(self.data, self.matfreqs, self.sampleTime)\n', (2783, 2826), False, 'from moby2.libactpol import freq_space_waterfall\n'), ((6283, 6306), 'numpy.zeros', 'numpy.zeros', (['self.nfreq'], {}), '(self.nfreq)\n', (6294, 6306), False, 'import numpy\n'), ((7307, 7333), 'pylab.colorbar', 'pylab.colorbar', ([], {'shrink': '(0.8)'}), '(shrink=0.8)\n', (7321, 7333), False, 'import pylab\n'), ((7514, 7544), 'pylab.xlabel', 'pylab.xlabel', (['"""Frequency [Hz]"""'], {}), "('Frequency [Hz]')\n", (7526, 7544), False, 'import pylab\n'), ((8987, 9005), 'pylab.title', 'pylab.title', (['title'], {}), '(title)\n', (8998, 9005), False, 'import pylab\n'), ((9775, 9799), 'numpy.argsort', 'numpy.argsort', (['self.qual'], {}), '(self.qual)\n', (9788, 9799), False, 'import numpy\n'), ((10084, 10102), 'pylab.plot', 'pylab.plot', (['q', 'cum'], {}), '(q, cum)\n', (10094, 10102), False, 'import pylab\n'), ((10111, 10140), 'pylab.xlabel', 'pylab.xlabel', (['"""Noise Quality"""'], {}), "('Noise Quality')\n", (10123, 10140), False, 'import pylab\n'), ((10149, 10184), 'pylab.ylabel', 'pylab.ylabel', (['"""Number of Detectors"""'], {}), "('Number of Detectors')\n", (10161, 10184), False, 'import pylab\n'), ((12518, 12544), 'pylab.colorbar', 'pylab.colorbar', ([], {'shrink': '(0.8)'}), '(shrink=0.8)\n', (12532, 12544), False, 'import pylab\n'), ((12658, 12678), 'pylab.xlabel', 'pylab.xlabel', (['"""rows"""'], {}), "('rows')\n", (12670, 12678), False, 'import pylab\n'), ((12687, 12707), 'pylab.ylabel', 'pylab.ylabel', (['"""cols"""'], {}), "('cols')\n", (12699, 12707), False, 'import pylab\n'), ((12844, 12862), 'pylab.title', 'pylab.title', (['title'], {}), '(title)\n', (12855, 12862), False, 'import pylab\n'), ((13752, 13785), 'numpy.linspace', 'numpy.linspace', (['tmin', 'tmax', 'ntime'], {}), '(tmin, tmax, ntime)\n', (13766, 13785), False, 'import numpy\n'), ((14039, 14098), 'moby2.libactpol.time_space_waterfall', 'time_space_waterfall', (['tod.data', 'self.times', 'self.sampleTime'], {}), '(tod.data, self.times, self.sampleTime)\n', (14059, 14098), False, 'from moby2.libactpol import time_space_waterfall\n'), ((14241, 14301), 'numpy.array', 'numpy.array', (['self.sort'], {'dtype': "[('rows', int), ('cols', int)]"}), "(self.sort, dtype=[('rows', int), ('cols', int)])\n", (14252, 14301), False, 'import numpy\n'), ((16717, 16735), 'pylab.matshow', 'pylab.matshow', (['mat'], {}), '(mat)\n', (16730, 16735), False, 'import pylab\n'), ((16748, 16774), 'pylab.colorbar', 'pylab.colorbar', ([], {'shrink': '(0.8)'}), '(shrink=0.8)\n', (16762, 16774), False, 'import pylab\n'), ((16810, 16830), 'pylab.ylabel', 'pylab.ylabel', (['ylabel'], {}), '(ylabel)\n', (16822, 16830), False, 'import pylab\n'), ((16839, 16863), 'pylab.xlabel', 'pylab.xlabel', (['"""Time [s]"""'], {}), "('Time [s]')\n", (16851, 16863), False, 'import pylab\n'), ((16881, 16907), 'numpy.shape', 'numpy.shape', (['self.mat[sel]'], {}), '(self.mat[sel])\n', (16892, 16907), False, 'import numpy\n'), ((17034, 17078), 'numpy.mean', 'numpy.mean', (['(self.times[1:] - self.times[:-1])'], {}), '(self.times[1:] - self.times[:-1])\n', (17044, 17078), False, 'import numpy\n'), ((18170, 18203), 'numpy.mean', 'numpy.mean', (['tod.data[sel]'], {'axis': '(0)'}), '(tod.data[sel], axis=0)\n', (18180, 18203), False, 'import numpy\n'), ((19731, 19754), 'pylab.matshow', 'pylab.matshow', (['self.mat'], {}), '(self.mat)\n', (19744, 19754), False, 'import pylab\n'), ((19767, 19793), 'pylab.colorbar', 'pylab.colorbar', ([], {'shrink': '(0.8)'}), '(shrink=0.8)\n', (19781, 19793), False, 'import pylab\n'), ((19829, 19855), 'pylab.ylabel', 'pylab.ylabel', (['"""Time [min]"""'], {}), "('Time [min]')\n", (19841, 19855), False, 'import pylab\n'), ((19864, 19889), 'pylab.xlabel', 'pylab.xlabel', (['"""dAz [deg]"""'], {}), "('dAz [deg]')\n", (19876, 19889), False, 'import pylab\n'), ((19907, 19928), 'numpy.shape', 'numpy.shape', (['self.mat'], {}), '(self.mat)\n', (19918, 19928), False, 'import numpy\n'), ((20421, 20448), 'numpy.arange', 'numpy.arange', (['self.time[-1]'], {}), '(self.time[-1])\n', (20433, 20448), False, 'import numpy\n'), ((21189, 21203), 'numpy.array', 'numpy.array', (['d'], {}), '(d)\n', (21200, 21203), False, 'import numpy\n'), ((21225, 21239), 'numpy.array', 'numpy.array', (['r'], {}), '(r)\n', (21236, 21239), False, 'import numpy\n'), ((21261, 21275), 'numpy.array', 'numpy.array', (['c'], {}), '(c)\n', (21272, 21275), False, 'import numpy\n'), ((21674, 21689), 'numpy.median', 'numpy.median', (['f'], {}), '(f)\n', (21686, 21689), False, 'import numpy\n'), ((21875, 21908), 'numpy.zeros', 'numpy.zeros', (['[tod.ncol, tod.nrow]'], {}), '([tod.ncol, tod.nrow])\n', (21886, 21908), False, 'import numpy\n'), ((23429, 23458), 'pylab.matshow', 'pylab.matshow', (['self.arrayQual'], {}), '(self.arrayQual)\n', (23442, 23458), False, 'import pylab\n'), ((23566, 23592), 'pylab.colorbar', 'pylab.colorbar', ([], {'shrink': '(0.8)'}), '(shrink=0.8)\n', (23580, 23592), False, 'import pylab\n'), ((23635, 23655), 'pylab.xlabel', 'pylab.xlabel', (['"""rows"""'], {}), "('rows')\n", (23647, 23655), False, 'import pylab\n'), ((23664, 23684), 'pylab.ylabel', 'pylab.ylabel', (['"""cols"""'], {}), "('cols')\n", (23676, 23684), False, 'import pylab\n'), ((23821, 23839), 'pylab.title', 'pylab.title', (['title'], {}), '(title)\n', (23832, 23839), False, 'import pylab\n'), ((24967, 24981), 'numpy.ndim', 'numpy.ndim', (['mf'], {}), '(mf)\n', (24977, 24981), False, 'import numpy\n'), ((27861, 27955), 'moby2.scripting.products.get_array_data', 'products.get_array_data', (["{'instrument': instrument, 'array_name': array, 'season': season}"], {}), "({'instrument': instrument, 'array_name': array,\n 'season': season})\n", (27884, 27955), False, 'from moby2.scripting import products\n'), ((30107, 30116), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (30114, 30116), True, 'from matplotlib import pyplot as plt\n'), ((30125, 30135), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30133, 30135), True, 'from matplotlib import pyplot as plt\n'), ((32622, 32682), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 10)', '(0, 0)'], {'colspan': '(9)', 'aspect': '"""equal"""'}), "((1, 10), (0, 0), colspan=9, aspect='equal')\n", (32638, 32682), True, 'from matplotlib import pyplot as plt\n'), ((32692, 32725), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 10)', '(0, 9)'], {}), '((1, 10), (0, 9))\n', (32708, 32725), True, 'from matplotlib import pyplot as plt\n'), ((32747, 32757), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (32755, 32757), True, 'from matplotlib import pyplot as plt\n'), ((34640, 34682), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': 'pmin', 'vmax': 'pmax'}), '(vmin=pmin, vmax=pmax)\n', (34660, 34682), True, 'import matplotlib as mpl\n'), ((34696, 34784), 'matplotlib.colorbar.ColorbarBase', 'mpl.colorbar.ColorbarBase', (['ax2'], {'cmap': 'cm.RdYlBu_r', 'norm': 'norm', 'orientation': '"""vertical"""'}), "(ax2, cmap=cm.RdYlBu_r, norm=norm, orientation=\n 'vertical')\n", (34721, 34784), True, 'import matplotlib as mpl\n'), ((40302, 40311), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (40309, 40311), True, 'from matplotlib import pyplot as plt\n'), ((40320, 40330), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (40328, 40330), True, 'from matplotlib import pyplot as plt\n'), ((40339, 40349), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (40347, 40349), True, 'from matplotlib import pyplot as plt\n'), ((42575, 42664), 'matplotlib.pyplot.text', 'plt.text', (['(0.05)', '(0.2)', '"""W10"""'], {'color': '"""gray"""', 'transform': 'ax.transAxes', 'fontsize': '"""x-large"""'}), "(0.05, 0.2, 'W10', color='gray', transform=ax.transAxes, fontsize=\n 'x-large')\n", (42583, 42664), True, 'from matplotlib import pyplot as plt\n'), ((42689, 42778), 'matplotlib.pyplot.text', 'plt.text', (['(0.7)', '(0.1)', '"""SH2B"""'], {'color': '"""gray"""', 'transform': 'ax.transAxes', 'fontsize': '"""x-large"""'}), "(0.7, 0.1, 'SH2B', color='gray', transform=ax.transAxes, fontsize=\n 'x-large')\n", (42697, 42778), True, 'from matplotlib import pyplot as plt\n'), ((42803, 42892), 'matplotlib.pyplot.text', 'plt.text', (['(0.9)', '(0.35)', '"""W08"""'], {'color': '"""gray"""', 'transform': 'ax.transAxes', 'fontsize': '"""x-large"""'}), "(0.9, 0.35, 'W08', color='gray', transform=ax.transAxes, fontsize=\n 'x-large')\n", (42811, 42892), True, 'from matplotlib import pyplot as plt\n'), ((42917, 43008), 'matplotlib.pyplot.text', 'plt.text', (['(0.75)', '(0.85)', '"""SH1A"""'], {'color': '"""gray"""', 'transform': 'ax.transAxes', 'fontsize': '"""x-large"""'}), "(0.75, 0.85, 'SH1A', color='gray', transform=ax.transAxes, fontsize\n ='x-large')\n", (42925, 43008), True, 'from matplotlib import pyplot as plt\n'), ((43033, 43122), 'matplotlib.pyplot.text', 'plt.text', (['(0.1)', '(0.85)', '"""W09"""'], {'color': '"""gray"""', 'transform': 'ax.transAxes', 'fontsize': '"""x-large"""'}), "(0.1, 0.85, 'W09', color='gray', transform=ax.transAxes, fontsize=\n 'x-large')\n", (43041, 43122), True, 'from matplotlib import pyplot as plt\n'), ((43147, 43238), 'matplotlib.pyplot.text', 'plt.text', (['(0.01)', '(0.45)', '"""SH2A"""'], {'color': '"""gray"""', 'transform': 'ax.transAxes', 'fontsize': '"""x-large"""'}), "(0.01, 0.45, 'SH2A', color='gray', transform=ax.transAxes, fontsize\n ='x-large')\n", (43155, 43238), True, 'from matplotlib import pyplot as plt\n'), ((2710, 2743), 'numpy.linspace', 'numpy.linspace', (['fmin', 'fmax', 'nfreq'], {}), '(fmin, fmax, nfreq)\n', (2724, 2743), False, 'import numpy\n'), ((4369, 4380), 'pylab.ion', 'pylab.ion', ([], {}), '()\n', (4378, 4380), False, 'import pylab\n'), ((4395, 4407), 'pylab.ioff', 'pylab.ioff', ([], {}), '()\n', (4405, 4407), False, 'import pylab\n'), ((4975, 5028), 'numpy.argsort', 'numpy.argsort', (['self.sort[sel]'], {'order': "['rows', 'cols']"}), "(self.sort[sel], order=['rows', 'cols'])\n", (4988, 5028), False, 'import numpy\n'), ((5132, 5185), 'numpy.argsort', 'numpy.argsort', (['self.sort[sel]'], {'order': "['cols', 'rows']"}), "(self.sort[sel], order=['cols', 'rows'])\n", (5145, 5185), False, 'import numpy\n'), ((5428, 5454), 'numpy.log10', 'numpy.log10', (['self.matfreqs'], {}), '(self.matfreqs)\n', (5439, 5454), False, 'import numpy\n'), ((5527, 5544), 'numpy.floor', 'numpy.floor', (['f[0]'], {}), '(f[0])\n', (5538, 5544), False, 'import numpy\n'), ((5563, 5581), 'numpy.floor', 'numpy.floor', (['f[-1]'], {}), '(f[-1])\n', (5574, 5581), False, 'import numpy\n'), ((5799, 5840), 'numpy.log10', 'numpy.log10', (['(self.mat[sel][order] + 1e-20)'], {}), '(self.mat[sel][order] + 1e-20)\n', (5810, 5840), False, 'import numpy\n'), ((7073, 7106), 'numpy.ones', 'numpy.ones', (['mat.shape'], {'dtype': 'bool'}), '(mat.shape, dtype=bool)\n', (7083, 7106), False, 'import numpy\n'), ((7159, 7189), 'numpy.ma.array', 'numpy.ma.array', (['mat'], {'mask': 'mask'}), '(mat, mask=mask)\n', (7173, 7189), False, 'import numpy\n'), ((7208, 7236), 'pylab.matshow', 'pylab.matshow', (['mmat'], {}), '(mmat, **kargs)\n', (7221, 7236), False, 'import pylab\n'), ((7267, 7294), 'pylab.matshow', 'pylab.matshow', (['mat'], {}), '(mat, **kargs)\n', (7280, 7294), False, 'import pylab\n'), ((7485, 7505), 'pylab.ylabel', 'pylab.ylabel', (['ylabel'], {}), '(ylabel)\n', (7497, 7505), False, 'import pylab\n'), ((7834, 7888), 'numpy.linspace', 'numpy.linspace', (['ini', 'end', '((end - ini) / linTickSep + 1)'], {}), '(ini, end, (end - ini) / linTickSep + 1)\n', (7848, 7888), False, 'import numpy\n'), ((8017, 8042), 'numpy.array', 'numpy.array', (['f'], {'dtype': 'str'}), '(f, dtype=str)\n', (8028, 8042), False, 'import numpy\n'), ((9039, 9071), 'pylab.savefig', 'pylab.savefig', (['filename'], {'dpi': 'dpi'}), '(filename, dpi=dpi)\n', (9052, 9071), False, 'import pylab\n'), ((9091, 9103), 'pylab.show', 'pylab.show', ([], {}), '()\n', (9101, 9103), False, 'import pylab\n'), ((9118, 9131), 'pylab.close', 'pylab.close', ([], {}), '()\n', (9129, 9131), False, 'import pylab\n'), ((10215, 10233), 'pylab.title', 'pylab.title', (['title'], {}), '(title)\n', (10226, 10233), False, 'import pylab\n'), ((10267, 10290), 'pylab.savefig', 'pylab.savefig', (['filename'], {}), '(filename)\n', (10280, 10290), False, 'import pylab\n'), ((10308, 10320), 'pylab.show', 'pylab.show', ([], {}), '()\n', (10318, 10320), False, 'import pylab\n'), ((10335, 10348), 'pylab.close', 'pylab.close', ([], {}), '()\n', (10346, 10348), False, 'import pylab\n'), ((11426, 11437), 'pylab.ion', 'pylab.ion', ([], {}), '()\n', (11435, 11437), False, 'import pylab\n'), ((11452, 11464), 'pylab.ioff', 'pylab.ioff', ([], {}), '()\n', (11462, 11464), False, 'import pylab\n'), ((12211, 12240), 'numpy.sort', 'numpy.sort', (['vals[vals != 0.0]'], {}), '(vals[vals != 0.0])\n', (12221, 12240), False, 'import numpy\n'), ((12896, 12919), 'pylab.savefig', 'pylab.savefig', (['filename'], {}), '(filename)\n', (12909, 12919), False, 'import pylab\n'), ((12937, 12949), 'pylab.show', 'pylab.show', ([], {}), '()\n', (12947, 12949), False, 'import pylab\n'), ((12964, 12977), 'pylab.close', 'pylab.close', ([], {}), '()\n', (12975, 12977), False, 'import pylab\n'), ((15264, 15275), 'pylab.ion', 'pylab.ion', ([], {}), '()\n', (15273, 15275), False, 'import pylab\n'), ((15290, 15302), 'pylab.ioff', 'pylab.ioff', ([], {}), '()\n', (15300, 15302), False, 'import pylab\n'), ((15754, 15807), 'numpy.argsort', 'numpy.argsort', (['self.sort[sel]'], {'order': "['rows', 'cols']"}), "(self.sort[sel], order=['rows', 'cols'])\n", (15767, 15807), False, 'import numpy\n'), ((15911, 15964), 'numpy.argsort', 'numpy.argsort', (['self.sort[sel]'], {'order': "['cols', 'rows']"}), "(self.sort[sel], order=['cols', 'rows'])\n", (15924, 15964), False, 'import numpy\n'), ((17571, 17589), 'pylab.title', 'pylab.title', (['title'], {}), '(title)\n', (17582, 17589), False, 'import pylab\n'), ((17623, 17646), 'pylab.savefig', 'pylab.savefig', (['filename'], {}), '(filename)\n', (17636, 17646), False, 'import pylab\n'), ((17669, 17680), 'pylab.clf', 'pylab.clf', ([], {}), '()\n', (17678, 17680), False, 'import pylab\n'), ((18037, 18064), 'numpy.min', 'numpy.min', (['tod.data'], {'axis': '(1)'}), '(tod.data, axis=1)\n', (18046, 18064), False, 'import numpy\n'), ((18083, 18110), 'numpy.max', 'numpy.max', (['tod.data'], {'axis': '(1)'}), '(tod.data, axis=1)\n', (18092, 18110), False, 'import numpy\n'), ((20140, 20167), 'numpy.arange', 'numpy.arange', (['(2 * x_max + 1)'], {}), '(2 * x_max + 1)\n', (20152, 20167), False, 'import numpy\n'), ((20262, 20289), 'numpy.array', 'numpy.array', (['x'], {'dtype': '"""str"""'}), "(x, dtype='str')\n", (20273, 20289), False, 'import numpy\n'), ((20502, 20529), 'numpy.array', 'numpy.array', (['y'], {'dtype': '"""str"""'}), "(y, dtype='str')\n", (20513, 20529), False, 'import numpy\n'), ((20735, 20753), 'pylab.title', 'pylab.title', (['title'], {}), '(title)\n', (20746, 20753), False, 'import pylab\n'), ((20787, 20810), 'pylab.savefig', 'pylab.savefig', (['filename'], {}), '(filename)\n', (20800, 20810), False, 'import pylab\n'), ((20828, 20840), 'pylab.show', 'pylab.show', ([], {}), '()\n', (20838, 20840), False, 'import pylab\n'), ((20855, 20866), 'pylab.clf', 'pylab.clf', ([], {}), '()\n', (20864, 20866), False, 'import pylab\n'), ((23271, 23300), 'numpy.sort', 'numpy.sort', (['vals[vals != 0.0]'], {}), '(vals[vals != 0.0])\n', (23281, 23300), False, 'import numpy\n'), ((23873, 23896), 'pylab.savefig', 'pylab.savefig', (['filename'], {}), '(filename)\n', (23886, 23896), False, 'import pylab\n'), ((23914, 23926), 'pylab.show', 'pylab.show', ([], {}), '()\n', (23924, 23926), False, 'import pylab\n'), ((23941, 23952), 'pylab.clf', 'pylab.clf', ([], {}), '()\n', (23950, 23952), False, 'import pylab\n'), ((24863, 24882), 'numpy.log', 'numpy.log', (['p[index]'], {}), '(p[index])\n', (24872, 24882), False, 'import numpy\n'), ((24897, 24912), 'pylab.plot', 'pylab.plot', (['pow'], {}), '(pow)\n', (24907, 24912), False, 'import pylab\n'), ((24914, 24926), 'pylab.show', 'pylab.show', ([], {}), '()\n', (24924, 24926), False, 'import pylab\n'), ((30172, 30194), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_name'], {}), '(save_name)\n', (30183, 30194), True, 'from matplotlib import pyplot as plt\n'), ((30203, 30214), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (30212, 30214), True, 'from matplotlib import pyplot as plt\n'), ((40610, 40621), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (40619, 40621), True, 'from matplotlib import pyplot as plt\n'), ((43288, 43379), 'matplotlib.pyplot.text', 'plt.text', (['(0.15)', '(0.09)', '"""FH3C"""'], {'color': '"""gray"""', 'transform': 'ax.transAxes', 'fontsize': '"""x-large"""'}), "(0.15, 0.09, 'FH3C', color='gray', transform=ax.transAxes, fontsize\n ='x-large')\n", (43296, 43379), True, 'from matplotlib import pyplot as plt\n'), ((43404, 43495), 'matplotlib.pyplot.text', 'plt.text', (['(0.75)', '(0.15)', '"""SH4B"""'], {'color': '"""gray"""', 'transform': 'ax.transAxes', 'fontsize': '"""x-large"""'}), "(0.75, 0.15, 'SH4B', color='gray', transform=ax.transAxes, fontsize\n ='x-large')\n", (43412, 43495), True, 'from matplotlib import pyplot as plt\n'), ((43520, 43608), 'matplotlib.pyplot.text', 'plt.text', (['(0.9)', '(0.4)', '"""FH6"""'], {'color': '"""gray"""', 'transform': 'ax.transAxes', 'fontsize': '"""x-large"""'}), "(0.9, 0.4, 'FH6', color='gray', transform=ax.transAxes, fontsize=\n 'x-large')\n", (43528, 43608), True, 'from matplotlib import pyplot as plt\n'), ((43633, 43723), 'matplotlib.pyplot.text', 'plt.text', (['(0.6)', '(0.85)', '"""SH3B"""'], {'color': '"""gray"""', 'transform': 'ax.transAxes', 'fontsize': '"""x-large"""'}), "(0.6, 0.85, 'SH3B', color='gray', transform=ax.transAxes, fontsize=\n 'x-large')\n", (43641, 43723), True, 'from matplotlib import pyplot as plt\n'), ((43748, 43838), 'matplotlib.pyplot.text', 'plt.text', (['(0.1)', '(0.85)', '"""FHC1"""'], {'color': '"""gray"""', 'transform': 'ax.transAxes', 'fontsize': '"""x-large"""'}), "(0.1, 0.85, 'FHC1', color='gray', transform=ax.transAxes, fontsize=\n 'x-large')\n", (43756, 43838), True, 'from matplotlib import pyplot as plt\n'), ((43863, 43967), 'matplotlib.pyplot.text', 'plt.text', (['(0.01)', '(0.45)', '"""SH4A"""'], {'color': '"""gray"""', 'transform': 'ax.transAxes', 'fontsize': '"""x-large"""', 'rotation': '(90)'}), "(0.01, 0.45, 'SH4A', color='gray', transform=ax.transAxes, fontsize\n ='x-large', rotation=90)\n", (43871, 43967), True, 'from matplotlib import pyplot as plt\n'), ((2623, 2640), 'numpy.log10', 'numpy.log10', (['fmin'], {}), '(fmin)\n', (2634, 2640), False, 'import numpy\n'), ((2642, 2659), 'numpy.log10', 'numpy.log10', (['fmax'], {}), '(fmax)\n', (2653, 2659), False, 'import numpy\n'), ((4901, 4928), 'numpy.floor', 'numpy.floor', (['(linTickSep / p)'], {}), '(linTickSep / p)\n', (4912, 4928), False, 'import numpy\n'), ((7777, 7817), 'numpy.mod', 'numpy.mod', (['self.matfreqs[-1]', 'linTickSep'], {}), '(self.matfreqs[-1], linTickSep)\n', (7786, 7817), False, 'import numpy\n'), ((8402, 8450), 'pylab.axhline', 'pylab.axhline', ([], {'y': 'pos', 'color': '"""black"""', 'linewidth': '(1)'}), "(y=pos, color='black', linewidth=1)\n", (8415, 8450), False, 'import pylab\n'), ((16115, 16131), 'numpy.shape', 'numpy.shape', (['mat'], {}), '(mat)\n', (16126, 16131), False, 'import numpy\n'), ((16550, 16585), 'numpy.vstack', 'numpy.vstack', (['[mat[:j], z, mat[j:]]'], {}), '([mat[:j], z, mat[j:]])\n', (16562, 16585), False, 'import numpy\n'), ((16606, 16642), 'numpy.hstack', 'numpy.hstack', (['[tmp[:j], -1, tmp[j:]]'], {}), '([tmp[:j], -1, tmp[j:]])\n', (16618, 16642), False, 'import numpy\n'), ((17387, 17435), 'pylab.axhline', 'pylab.axhline', ([], {'y': 'pos', 'color': '"""black"""', 'linewidth': '(1)'}), "(y=pos, color='black', linewidth=1)\n", (17400, 17435), False, 'import pylab\n'), ((18682, 18707), 'numpy.flipud', 'numpy.flipud', (['cm[i:i + T]'], {}), '(cm[i:i + T])\n', (18694, 18707), False, 'import numpy\n'), ((18835, 18857), 'numpy.array', 'numpy.array', (['self.time'], {}), '(self.time)\n', (18846, 18857), False, 'import numpy\n'), ((19618, 19645), 'numpy.min', 'numpy.min', (['self.mat'], {'axis': '(1)'}), '(self.mat, axis=1)\n', (19627, 19645), False, 'import numpy\n'), ((19687, 19714), 'numpy.max', 'numpy.max', (['self.mat'], {'axis': '(1)'}), '(self.mat, axis=1)\n', (19696, 19714), False, 'import numpy\n'), ((24197, 24223), 'numpy.mod', 'numpy.mod', (['freqs', 'scanFreq'], {}), '(freqs, scanFreq)\n', (24206, 24223), False, 'import numpy\n'), ((29707, 29777), 'matplotlib.patches.Wedge', 'patches.Wedge', (['[xd, yd]', '(0.02)', '(0.0)', '(360.0)'], {'width': '(0.003)', 'fc': '"""k"""', 'ec': '"""k"""'}), "([xd, yd], 0.02, 0.0, 360.0, width=0.003, fc='k', ec='k')\n", (29720, 29777), False, 'from matplotlib import cm, patches\n'), ((31690, 31747), 'matplotlib.patches.Wedge', 'patches.Wedge', (['[x, y]', 'radius', 'theta1', 'theta2'], {'fc': 'c', 'ec': 'c'}), '([x, y], radius, theta1, theta2, fc=c, ec=c)\n', (31703, 31747), False, 'from matplotlib import cm, patches\n'), ((32381, 32444), 'matplotlib.patches.Wedge', 'patches.Wedge', (['[x, y]', '(radius * 1.2)', 'theta1', 'theta2'], {'fc': 'c', 'ec': 'c'}), '([x, y], radius * 1.2, theta1, theta2, fc=c, ec=c)\n', (32394, 32444), False, 'from matplotlib import cm, patches\n'), ((44034, 44136), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(0.02)', '"""FH3"""'], {'color': '"""gray"""', 'transform': 'ax.transAxes', 'fontsize': '"""x-large"""', 'ha': '"""center"""'}), "(0.5, 0.02, 'FH3', color='gray', transform=ax.transAxes, fontsize=\n 'x-large', ha='center')\n", (44042, 44136), True, 'from matplotlib import pyplot as plt\n'), ((44178, 44269), 'matplotlib.pyplot.text', 'plt.text', (['(0.85)', '(0.25)', '"""SH1A"""'], {'color': '"""gray"""', 'transform': 'ax.transAxes', 'fontsize': '"""x-large"""'}), "(0.85, 0.25, 'SH1A', color='gray', transform=ax.transAxes, fontsize\n ='x-large')\n", (44186, 44269), True, 'from matplotlib import pyplot as plt\n'), ((44294, 44382), 'matplotlib.pyplot.text', 'plt.text', (['(0.9)', '(0.7)', '"""FH4"""'], {'color': '"""gray"""', 'transform': 'ax.transAxes', 'fontsize': '"""x-large"""'}), "(0.9, 0.7, 'FH4', color='gray', transform=ax.transAxes, fontsize=\n 'x-large')\n", (44302, 44382), True, 'from matplotlib import pyplot as plt\n'), ((44407, 44510), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(0.95)', '"""SH1B"""'], {'color': '"""gray"""', 'transform': 'ax.transAxes', 'fontsize': '"""x-large"""', 'ha': '"""center"""'}), "(0.5, 0.95, 'SH1B', color='gray', transform=ax.transAxes, fontsize=\n 'x-large', ha='center')\n", (44415, 44510), True, 'from matplotlib import pyplot as plt\n'), ((44552, 44642), 'matplotlib.pyplot.text', 'plt.text', (['(0.05)', '(0.75)', '"""FH2"""'], {'color': '"""gray"""', 'transform': 'ax.transAxes', 'fontsize': '"""x-large"""'}), "(0.05, 0.75, 'FH2', color='gray', transform=ax.transAxes, fontsize=\n 'x-large')\n", (44560, 44642), True, 'from matplotlib import pyplot as plt\n'), ((44667, 44758), 'matplotlib.pyplot.text', 'plt.text', (['(0.03)', '(0.25)', '"""SH8B"""'], {'color': '"""gray"""', 'transform': 'ax.transAxes', 'fontsize': '"""x-large"""'}), "(0.03, 0.25, 'SH8B', color='gray', transform=ax.transAxes, fontsize\n ='x-large')\n", (44675, 44758), True, 'from matplotlib import pyplot as plt\n'), ((4850, 4873), 'numpy.log10', 'numpy.log10', (['linTickSep'], {}), '(linTickSep)\n', (4861, 4873), False, 'import numpy\n'), ((5600, 5626), 'numpy.arange', 'numpy.arange', (['ini', '(end + 1)'], {}), '(ini, end + 1)\n', (5612, 5626), False, 'import numpy\n'), ((5709, 5748), 'numpy.arange', 'numpy.arange', (['ini', '(end + 1)'], {'dtype': '"""int"""'}), "(ini, end + 1, dtype='int')\n", (5721, 5748), False, 'import numpy\n'), ((6756, 6791), 'numpy.vstack', 'numpy.vstack', (['[mat[:j], z, mat[j:]]'], {}), '([mat[:j], z, mat[j:]])\n', (6768, 6791), False, 'import numpy\n'), ((6816, 6852), 'numpy.hstack', 'numpy.hstack', (['[tmp[:j], -1, tmp[j:]]'], {}), '([tmp[:j], -1, tmp[j:]])\n', (6828, 6852), False, 'import numpy\n'), ((7690, 7729), 'numpy.mod', 'numpy.mod', (['self.matfreqs[0]', 'linTickSep'], {}), '(self.matfreqs[0], linTickSep)\n', (7699, 7729), False, 'import numpy\n'), ((15362, 15383), 'numpy.shape', 'numpy.shape', (['self.mat'], {}), '(self.mat)\n', (15373, 15383), False, 'import numpy\n'), ((15534, 15559), 'numpy.size', 'numpy.size', (['self.mat[sel]'], {}), '(self.mat[sel])\n', (15544, 15559), False, 'import numpy\n'), ((24654, 24688), 'numpy.arange', 'numpy.arange', (['nsamp'], {'dtype': '"""float"""'}), "(nsamp, dtype='float')\n", (24666, 24688), False, 'import numpy\n'), ((24803, 24826), 'numpy.mod', 'numpy.mod', (['nu', 'freqs[i]'], {}), '(nu, freqs[i])\n', (24812, 24826), False, 'import numpy\n'), ((6915, 6955), 'numpy.hstack', 'numpy.hstack', (['[fsel[:j], True, fsel[j:]]'], {}), '([fsel[:j], True, fsel[j:]])\n', (6927, 6955), False, 'import numpy\n'), ((20189, 20216), 'numpy.arange', 'numpy.arange', (['(2 * x_max + 1)'], {}), '(2 * x_max + 1)\n', (20201, 20216), False, 'import numpy\n')]
# -*- coding: utf-8 -*- import click import logging from pathlib import Path import os from dotenv import find_dotenv, load_dotenv import json import pandas as pd import numpy as np import urllib import zipfile from tqdm import tqdm from glob import glob from collections import defaultdict from src.data.tcia import TCIAClient # N_PATIENTS=00 # if zero download all patients SEED=42 # to download the same N_patients CLINICAL_DATA_URL='https://wiki.cancerimagingarchive.net/download/attachments/70230508/CMMD_clinicaldata_revision.xlsx?api=v2' @click.command() @click.argument('collection_dir', type=click.Path()) @click.argument('collection_reference_filename', type=click.STRING, default='collection_files_list.csv') @click.option('N_PATIENTS', '-n', default=0, show_default=True, type=click.IntRange(min=0, max=1775, clamp=True)) def main(collection_dir, collection_reference_filename, N_PATIENTS): """ Runs data processing scripts to turn raw data from (../raw) into cleaned data ready to be analyzed (saved in ../processed). """ logger = logging.getLogger(__name__) collection_reference_fp = os.path.join(collection_dir, collection_reference_filename) if os.path.exists(collection_reference_fp): logger.info(f'output file {collection_reference_fp} already exists, skipping') return pd.read_csv(collection_reference_fp) if os.path.exists(series_fp): logger.info(f'getting collection series uids from {series_fp}') series_df = pd.read_csv(series_fp) else: logger.info(f'downloading collection series uids data from TCIA') series_df = get_series_uids(series_fp) logger.debug(f'showing 3 samples out of {len(series_df)} from {series_fp}...') logger.debug(series_df.sample(3, random_state=SEED, replace=False)) # Filter by patient id # patient information is needed mainly for testing on a small number of samples if N_PATIENTS: np.random.seed(SEED) patientid_cols = [c for c in series_df.columns if 'patientname' in c.lower() or 'patientid' in c.lower()] patientid_col = patientid_cols[0] logger.debug(f'found {len(patientid_cols)} `patient_name` column(s), using {patientid_col}') selected_pids = np.random.choice(series_df[patientid_col].unique(), N_PATIENTS, replace=False) series_df = series_df.loc[series_df[patientid_col].isin(selected_pids)] logger.debug(f'number of filtered series uids: {len(series_df)}') logger.info(f'downloading collection images to {collection_dir}') if not os.path.exists(collection_dir): logger.info(f'creating directory {collection_dir} as it doesn\'t exist') os.makedirs(collection_dir, exist_ok=True) collection_ref_df = download_collection(series_df['SeriesInstanceUID'], collection_dir, remove_zip=True) collection_ref_df.to_csv(collection_reference_fp) logger.info(f'{len(collection_ref_df)} collection references saved to {collection_reference_fp}') logger.debug(f'3 samples:\n{collection_ref_df.sample(3, random_state=SEED, replace=False)}') return collection_ref_df def get_series_uids(series_fp, collection = 'CMMD'): """ Obtain series details from TCIA and save to csv. SeriesInstanceUID attribute is needed to download collection images return pd.DataFrame about series details """ df = fetch_series_instance(collection) df.to_csv(series_fp, index=False) return pd.read_csv(series_fp) def fetch_series_instance(collection): """Query TCIA for series data """ try: response = tcia_client.get_series(collection = collection, outputFormat='json') response = json.loads(response.read().decode(response.info().get_content_charset('utf8'))) except urllib.error.HTTPError as err: print ("Error executing " + tcia_client.GET_SERIES + ":\nError Code: ", str(err.code) , "\nMessage: " , err.read()) return pd.DataFrame.from_dict(response) def download_collection(series_uids, output_basedir, remove_zip=True): """ download (if necessary) series images from TCIA return dataframe of collected files for each series id dictionary {series_uid: [downloaded_files]} mapping series_uid to downloaded images filepaths {'uid': ['1.dcm', '2.dcm', 'unexpected.foo']} """ series_to_fps = defaultdict(list) for series_uid in tqdm(series_uids): series_dir = get_series(series_uid, dest_basedir=output_basedir, remove_zip=remove_zip) series_to_fps[series_uid] = glob(os.path.join(series_dir, '*')) # list downloaded `everything` (ie .dcm) collection_ref_df = pd.DataFrame.from_dict(series_to_fps, orient='index') collection_ref_df.index.name = series_uids.name collection_ref_df = pd.DataFrame(collection_ref_df.unstack().dropna().sort_values(), columns = ['filepath']) collection_ref_df.index = collection_ref_df.index.droplevel(0) # files order is not relevant return collection_ref_df def get_series(seriesuid, dest_basedir, remove_zip=True): # arbitrary choice to put downloaded series images inside # a folder named after sereis uid # if know a-priori dicom filenames are all unique in the collection # adding this subdirectory could be avoided series_destdir = os.path.join(dest_basedir, seriesuid) # check if folder exists and is not empty, assumes unzipping won't fail! if os.path.exists(series_destdir) and os.path.isdir(series_destdir) and os.listdir(series_destdir): return series_destdir zip_fn = f'{seriesuid}.zip' zip_fp = os.path.join(dest_basedir, zip_fn) # if an archive is already there, use it if not os.path.exists(zip_fp): # This check assumes archive file names are unique (i.e. different) in the collection # otherwise will always extract same archive tcia_client.get_image(seriesuid, dest_basedir, zip_fn) else: # if something breaks during download archive will be corrupted # so that next time the program is run it will break when resuming from last patient try: zipfile.ZipFile(zip_fp) except (IOError, zipfile.BadZipfile) as e: # remove archive and retry download print('Bad zip file given as input. (%s) %s' % (zip_fp, e)) # TODO: check if get_image endpoint can resume download from partial archives os.remove(zip_fp) tcia_client.get_image(seriesuid, dest_basedir, zip_fn) with zipfile.ZipFile(zip_fp, 'r') as zp: zp.extractall(series_destdir) if remove_zip: os.remove(zip_fp) return series_destdir if __name__ == '__main__': log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(level=logging.DEBUG, format=log_fmt) # not used in this stub but often useful for finding various files project_dir = Path(__file__).resolve().parents[2] # find .env automagically by walking up directories until it's found, then # load up the .env entries as environment variables load_dotenv(find_dotenv()) # secret_env = dotenv_values('.env') tcia_key = os.environ.get('TCIA_API_KEY', 'tcia key not found!') baseurl = os.environ.get('BASEURL', 'https://services.cancerimagingarchive.net/services/v4/TCIA/query') tcia_client = TCIAClient(credentials = tcia_key, baseUrl = baseurl) # series path used to retrieve DICOM images using SeriesInstanceUID attribute series_fp = os.path.join(project_dir, 'data', 'external', 'series.csv') # clean_dicom_metadata = True # whether to clean raw dicom attributes # collection_reference_fp = os.path.join(project_dir, 'references', 'collection_files_list.csv') # preprocessed_reference_fp = os.path.join(project_dir, 'references', 'preprocessed_files_list.csv') main()
[ "logging.getLogger", "pandas.read_csv", "zipfile.ZipFile", "os.remove", "os.path.exists", "os.listdir", "click.IntRange", "pathlib.Path", "pandas.DataFrame.from_dict", "os.path.isdir", "numpy.random.seed", "click.command", "click.argument", "dotenv.find_dotenv", "src.data.tcia.TCIAClient...
[((552, 567), 'click.command', 'click.command', ([], {}), '()\n', (565, 567), False, 'import click\n'), ((622, 730), 'click.argument', 'click.argument', (['"""collection_reference_filename"""'], {'type': 'click.STRING', 'default': '"""collection_files_list.csv"""'}), "('collection_reference_filename', type=click.STRING, default=\n 'collection_files_list.csv')\n", (636, 730), False, 'import click\n'), ((1070, 1097), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1087, 1097), False, 'import logging\n'), ((1128, 1187), 'os.path.join', 'os.path.join', (['collection_dir', 'collection_reference_filename'], {}), '(collection_dir, collection_reference_filename)\n', (1140, 1187), False, 'import os\n'), ((1195, 1234), 'os.path.exists', 'os.path.exists', (['collection_reference_fp'], {}), '(collection_reference_fp)\n', (1209, 1234), False, 'import os\n'), ((1383, 1408), 'os.path.exists', 'os.path.exists', (['series_fp'], {}), '(series_fp)\n', (1397, 1408), False, 'import os\n'), ((3466, 3488), 'pandas.read_csv', 'pd.read_csv', (['series_fp'], {}), '(series_fp)\n', (3477, 3488), True, 'import pandas as pd\n'), ((3944, 3976), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['response'], {}), '(response)\n', (3966, 3976), True, 'import pandas as pd\n'), ((4350, 4367), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4361, 4367), False, 'from collections import defaultdict\n'), ((4390, 4407), 'tqdm.tqdm', 'tqdm', (['series_uids'], {}), '(series_uids)\n', (4394, 4407), False, 'from tqdm import tqdm\n'), ((4648, 4701), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['series_to_fps'], {'orient': '"""index"""'}), "(series_to_fps, orient='index')\n", (4670, 4701), True, 'import pandas as pd\n'), ((5298, 5335), 'os.path.join', 'os.path.join', (['dest_basedir', 'seriesuid'], {}), '(dest_basedir, seriesuid)\n', (5310, 5335), False, 'import os\n'), ((5593, 5627), 'os.path.join', 'os.path.join', (['dest_basedir', 'zip_fn'], {}), '(dest_basedir, zip_fn)\n', (5605, 5627), False, 'import os\n'), ((6763, 6819), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': 'log_fmt'}), '(level=logging.DEBUG, format=log_fmt)\n', (6782, 6819), False, 'import logging\n'), ((7170, 7223), 'os.environ.get', 'os.environ.get', (['"""TCIA_API_KEY"""', '"""tcia key not found!"""'], {}), "('TCIA_API_KEY', 'tcia key not found!')\n", (7184, 7223), False, 'import os\n'), ((7238, 7335), 'os.environ.get', 'os.environ.get', (['"""BASEURL"""', '"""https://services.cancerimagingarchive.net/services/v4/TCIA/query"""'], {}), "('BASEURL',\n 'https://services.cancerimagingarchive.net/services/v4/TCIA/query')\n", (7252, 7335), False, 'import os\n'), ((7350, 7399), 'src.data.tcia.TCIAClient', 'TCIAClient', ([], {'credentials': 'tcia_key', 'baseUrl': 'baseurl'}), '(credentials=tcia_key, baseUrl=baseurl)\n', (7360, 7399), False, 'from src.data.tcia import TCIAClient\n'), ((7503, 7562), 'os.path.join', 'os.path.join', (['project_dir', '"""data"""', '"""external"""', '"""series.csv"""'], {}), "(project_dir, 'data', 'external', 'series.csv')\n", (7515, 7562), False, 'import os\n'), ((1338, 1374), 'pandas.read_csv', 'pd.read_csv', (['collection_reference_fp'], {}), '(collection_reference_fp)\n', (1349, 1374), True, 'import pandas as pd\n'), ((1502, 1524), 'pandas.read_csv', 'pd.read_csv', (['series_fp'], {}), '(series_fp)\n', (1513, 1524), True, 'import pandas as pd\n'), ((1950, 1970), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (1964, 1970), True, 'import numpy as np\n'), ((2563, 2593), 'os.path.exists', 'os.path.exists', (['collection_dir'], {}), '(collection_dir)\n', (2577, 2593), False, 'import os\n'), ((2684, 2726), 'os.makedirs', 'os.makedirs', (['collection_dir'], {'exist_ok': '(True)'}), '(collection_dir, exist_ok=True)\n', (2695, 2726), False, 'import os\n'), ((607, 619), 'click.Path', 'click.Path', ([], {}), '()\n', (617, 619), False, 'import click\n'), ((795, 838), 'click.IntRange', 'click.IntRange', ([], {'min': '(0)', 'max': '(1775)', 'clamp': '(True)'}), '(min=0, max=1775, clamp=True)\n', (809, 838), False, 'import click\n'), ((5420, 5450), 'os.path.exists', 'os.path.exists', (['series_destdir'], {}), '(series_destdir)\n', (5434, 5450), False, 'import os\n'), ((5455, 5484), 'os.path.isdir', 'os.path.isdir', (['series_destdir'], {}), '(series_destdir)\n', (5468, 5484), False, 'import os\n'), ((5489, 5515), 'os.listdir', 'os.listdir', (['series_destdir'], {}), '(series_destdir)\n', (5499, 5515), False, 'import os\n'), ((5685, 5707), 'os.path.exists', 'os.path.exists', (['zip_fp'], {}), '(zip_fp)\n', (5699, 5707), False, 'import os\n'), ((6515, 6543), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_fp', '"""r"""'], {}), "(zip_fp, 'r')\n", (6530, 6543), False, 'import zipfile\n'), ((6616, 6633), 'os.remove', 'os.remove', (['zip_fp'], {}), '(zip_fp)\n', (6625, 6633), False, 'import os\n'), ((7098, 7111), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (7109, 7111), False, 'from dotenv import find_dotenv, load_dotenv\n'), ((4546, 4575), 'os.path.join', 'os.path.join', (['series_dir', '"""*"""'], {}), "(series_dir, '*')\n", (4558, 4575), False, 'import os\n'), ((6122, 6145), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_fp'], {}), '(zip_fp)\n', (6137, 6145), False, 'import zipfile\n'), ((6420, 6437), 'os.remove', 'os.remove', (['zip_fp'], {}), '(zip_fp)\n', (6429, 6437), False, 'import os\n'), ((6910, 6924), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (6914, 6924), False, 'from pathlib import Path\n')]
import numpy as np import pandas as pd import re from joblib import dump, load from rdflib import Graph, Literal, RDF, URIRef, Namespace import pathlib def get_dir(path=''): """Return the full path to the provided files in the OpenPredict data folder Where models and features for runs are stored """ return str(pathlib.Path(__file__).parent.absolute()) + "/" + path def query_classifier_from_sparql(parsed_query): predictions_list = [] for triple in parsed_query.algebra.p.p.triples: if str(triple[1]).endswith('://w3id.org/biolink/vocab/treats') or str(triple[1]).endswith('://w3id.org/biolink/vocab/treated_by'): # if predicate is treats then we get the entities to search curie_to_predict = None curie_to_predict2 = None # if triple[0].startswith('http://') or triple[0].startswith('https://'): if isinstance(triple[0], URIRef): curie_to_predict = str(triple[0]).replace('https://identifiers.org/', '') # if triple[2].startswith('http://') or triple[0].startswith('https://'): if isinstance(triple[2], URIRef): if curie_to_predict: curie_to_predict2 = str(triple[2]).replace('https://identifiers.org/', '') else: curie_to_predict = str(triple[2]).replace('https://identifiers.org/', '') predictions_list.append(query_openpredict_classifier(curie_to_predict)) return predictions_list def query_openpredict_classifier(input_curie, model_id='openpredict-baseline-omim-drugbank'): """The main function to query the drug-disease OpenPredict classifier, It queries the previously generated classifier a `.joblib` file in the `data/models` folder :return: Predictions and scores """ parsed_curie = re.search('(.*?):(.*)', input_curie) input_namespace = parsed_curie.group(1) input_id = parsed_curie.group(2) # resources_folder = "data/resources/" #features_folder = "data/features/" #drugfeatfiles = ['drugs-fingerprint-sim.csv','drugs-se-sim.csv', # 'drugs-ppi-sim.csv', 'drugs-target-go-sim.csv','drugs-target-seq-sim.csv'] #diseasefeatfiles =['diseases-hpo-sim.csv', 'diseases-pheno-sim.csv' ] #drugfeatfiles = [ os.path.join(features_folder, fn) for fn in drugfeatfiles] #diseasefeatfiles = [ os.path.join(features_folder, fn) for fn in diseasefeatfiles] ## Get all DFs # Merge feature matrix #drug_df, disease_df = mergeFeatureMatrix(drugfeatfiles, diseasefeatfiles) # (drug_df, disease_df)= load('data/features/drug_disease_dataframes.joblib') print("📥 Loading features " + 'features/' + model_id + '.joblib') (drug_df, disease_df)= load(get_dir('data/features/' + model_id + '.joblib')) # TODO: should we update this file too when we create new runs? drugDiseaseKnown = pd.read_csv(get_dir('data/resources/openpredict-omim-drug.csv'),delimiter=',') drugDiseaseKnown.rename(columns={'drugid':'Drug','omimid':'Disease'}, inplace=True) drugDiseaseKnown.Disease = drugDiseaseKnown.Disease.astype(str) # TODO: save json? drugDiseaseDict = set([tuple(x) for x in drugDiseaseKnown[['Drug','Disease']].values]) drugwithfeatures = set(drug_df.columns.levels[1].tolist()) diseaseswithfeatures = set(disease_df.columns.levels[1].tolist()) # TODO: save json? commonDrugs= drugwithfeatures.intersection( drugDiseaseKnown.Drug.unique()) commonDiseases= diseaseswithfeatures.intersection(drugDiseaseKnown.Disease.unique() ) # clf = load('data/models/openpredict-baseline-omim-drugbank.joblib') print('📥 Loading classifier models/' + model_id + '.joblib') clf = load(get_dir('data/models/' + model_id + '.joblib')) pairs=[] classes=[] if input_namespace.lower() == "drugbank": # Input is a drug, we only iterate on disease dr = input_id # drug_column_label = "source" # disease_column_label = "target" for di in commonDiseases: cls = (1 if (dr,di) in drugDiseaseDict else 0) pairs.append((dr,di)) classes.append(cls) else: # Input is a disease di = input_id # drug_column_label = "target" # disease_column_label = "source" for dr in commonDrugs: cls = (1 if (dr,di) in drugDiseaseDict else 0) pairs.append((dr,di)) classes.append(cls) classes = np.array(classes) pairs = np.array(pairs) # test_df = createFeaturesSparkOrDF(pairs, classes, drug_df, disease_df) test_df = createFeatureDF(pairs, classes, pairs[classes==1], drug_df, disease_df) # Get list of drug-disease pairs (should be saved somewhere from previous computer?) # Another API: given the type, what kind of entities exists? # Getting list of Drugs and Diseases: # commonDrugs= drugwithfeatures.intersection( drugDiseaseKnown.Drug.unique()) # commonDiseases= diseaseswithfeatures.intersection(drugDiseaseKnown.Disease.unique() ) features = list(test_df.columns.difference(['Drug','Disease','Class'])) y_proba = clf.predict_proba(test_df[features]) prediction_df = pd.DataFrame( list(zip(pairs[:,0], pairs[:,1], y_proba[:,1])), columns =['drug','disease','score']) prediction_df.sort_values(by='score', inplace=True, ascending=False) # prediction_df = pd.DataFrame( list(zip(pairs[:,0], pairs[:,1], y_proba[:,1])), columns =[drug_column_label,disease_column_label,'score']) # Add namespace to get CURIEs from IDs prediction_df["drug"]= "DRUGBANK:" + prediction_df["drug"] prediction_df["disease"] ="OMIM:" + prediction_df["disease"] # prediction_results=prediction_df.to_json(orient='records') prediction_results=prediction_df.to_dict(orient='records') return prediction_results def createFeatureDF(pairs, classes, knownDrugDisease, drugDFs, diseaseDFs): """Create the features dataframes. :param pairs: Generated pairs :param classes: Classes corresponding to the pairs :param knownDrugDisease: Known drug-disease associations :param drugDFs: Drug dataframes :param diseaseDFs: Disease dataframes :return: The features dataframe """ totalNumFeatures = len(drugDFs)*len(diseaseDFs) #featureMatri x= np.empty((len(classes),totalNumFeatures), float) df =pd.DataFrame(list(zip(pairs[:,0], pairs[:,1], classes)), columns =['Drug','Disease','Class']) index = 0 for i,drug_col in enumerate(drugDFs.columns.levels[0]): for j,disease_col in enumerate(diseaseDFs.columns.levels[0]): drugDF = drugDFs[drug_col] diseaseDF = diseaseDFs[disease_col] feature_series = df.apply(lambda row: geometricMean( row.Drug, row.Disease, knownDrugDisease, drugDF, diseaseDF), axis=1) #print (feature_series) df["Feature_"+str(drug_col)+'_'+str(disease_col)] = feature_series return df def geometricMean(drug, disease, knownDrugDisease, drugDF, diseaseDF): """Compute the geometric means of a drug-disease association using previously generated dataframes :param drug: Drug :param disease: Disease :param knownDrugDisease: Known drug-disease associations :param drugDF: Drug dataframe :param diseaseDF: Disease dataframe """ a = drugDF.loc[knownDrugDisease[:,0]][drug].values b = diseaseDF.loc[knownDrugDisease[:,1]][disease].values c = np.sqrt( np.multiply(a,b) ) ix2 = (knownDrugDisease == [drug, disease]) c[ix2[:,1]& ix2[:,0]]=0.0 return float(max(c))
[ "numpy.array", "numpy.multiply", "pathlib.Path", "re.search" ]
[((1865, 1901), 're.search', 're.search', (['"""(.*?):(.*)"""', 'input_curie'], {}), "('(.*?):(.*)', input_curie)\n", (1874, 1901), False, 'import re\n'), ((4525, 4542), 'numpy.array', 'np.array', (['classes'], {}), '(classes)\n', (4533, 4542), True, 'import numpy as np\n'), ((4555, 4570), 'numpy.array', 'np.array', (['pairs'], {}), '(pairs)\n', (4563, 4570), True, 'import numpy as np\n'), ((7524, 7541), 'numpy.multiply', 'np.multiply', (['a', 'b'], {}), '(a, b)\n', (7535, 7541), True, 'import numpy as np\n'), ((329, 351), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (341, 351), False, 'import pathlib\n')]
# -*- coding: utf-8 -*- """ computeKey computes the musical key of an input audio file Args: afAudioData: array with floating point audio data. f_s: sample rate afWindow: FFT window of length iBlockLength (default: hann) iBlockLength: internal block length (default: 4096 samples) iHopLength: internal hop length (default: 2048 samples) Returns: key string """ import numpy as np from scipy.signal import spectrogram from ToolComputeHann import ToolComputeHann from FeatureSpectralPitchChroma import FeatureSpectralPitchChroma def computeKey(afAudioData, f_s, afWindow=None, iBlockLength=4096, iHopLength=2048): # compute window function for FFT if afWindow is None: afWindow = ToolComputeHann(iBlockLength) assert(afWindow.shape[0] == iBlockLength), "parameter error: invalid window dimension" # key names cKeyNames = np.array(['C Maj','C# Maj','D Maj','D# Maj','E Maj','F Maj','F# Maj','G Maj','G# Maj','A Maj','A# Maj','B Maj', 'c min','c# min','d min','d# min','e min','f min','f# min','g min','g# min','a min','a# min','b min']) # template pitch chroma (Krumhansl major/minor), normalized to a sum of 1 t_pc = np.array([ [6.35, 2.23, 3.48, 2.33, 4.38, 4.09, 2.52 ,5.19, 2.39, 3.66, 2.29, 2.88], [6.33, 2.68, 3.52, 5.38, 2.60, 3.53, 2.54, 4.75, 3.98, 2.69, 3.34, 3.17]]) t_pc = t_pc/t_pc.sum(axis=1,keepdims=True) # pre-processing: downmixing if afAudioData.ndim > 1: afAudioData = afAudioData.mean(axis=1) # pre-processing: normalization fNorm = np.max(np.abs(afAudioData)); if fNorm != 0: afAudioData = afAudioData/fNorm # in the real world, we would do this block by block... [f,t,X] = spectrogram( afAudioData, f_s, afWindow, iBlockLength, iBlockLength - iHopLength, iBlockLength, False, True, 'spectrum') # scale the same as for matlab X = np.sqrt(X/2) # compute instantaneous pitch chroma v_pc = FeatureSpectralPitchChroma(X,f_s) # average pitch chroma v_pc = v_pc.mean(axis=1); # compute manhattan distances for modes (major and minor) d = np.zeros(t_pc.shape); v_pc = np.concatenate((v_pc,v_pc),axis = 0).reshape(2,12) for i in range(0,12): d[:,i] = np.sum(np.abs(v_pc - np.roll(t_pc, i, axis = 1)), axis = 1) # get unwrapped key index iKeyIdx = d.argmin() cKey = cKeyNames[iKeyIdx] return (cKey) def computeKeyCl(cPath): from ToolReadAudio import ToolReadAudio [f_s,afAudioData] = ToolReadAudio(cPath) #afAudioData = np.sin(2*np.pi * np.arange(f_s*1)*440./f_s) cKey = computeKey(afAudioData, f_s) print("\ndetected key: ", cKey) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description='Compute key of wav file') parser.add_argument('--infile', metavar='path', required=False, help='path to input audio file') cPath = parser.parse_args().infile #only for debugging if not cPath: cPath = "c:/temp/test.wav" # call the function computeKeyCl(cPath)
[ "numpy.abs", "numpy.sqrt", "numpy.roll", "argparse.ArgumentParser", "scipy.signal.spectrogram", "ToolComputeHann.ToolComputeHann", "numpy.array", "numpy.zeros", "numpy.concatenate", "FeatureSpectralPitchChroma.FeatureSpectralPitchChroma", "ToolReadAudio.ToolReadAudio" ]
[((911, 1159), 'numpy.array', 'np.array', (["['C Maj', 'C# Maj', 'D Maj', 'D# Maj', 'E Maj', 'F Maj', 'F# Maj', 'G Maj',\n 'G# Maj', 'A Maj', 'A# Maj', 'B Maj', 'c min', 'c# min', 'd min',\n 'd# min', 'e min', 'f min', 'f# min', 'g min', 'g# min', 'a min',\n 'a# min', 'b min']"], {}), "(['C Maj', 'C# Maj', 'D Maj', 'D# Maj', 'E Maj', 'F Maj', 'F# Maj',\n 'G Maj', 'G# Maj', 'A Maj', 'A# Maj', 'B Maj', 'c min', 'c# min',\n 'd min', 'd# min', 'e min', 'f min', 'f# min', 'g min', 'g# min',\n 'a min', 'a# min', 'b min'])\n", (919, 1159), True, 'import numpy as np\n'), ((1241, 1406), 'numpy.array', 'np.array', (['[[6.35, 2.23, 3.48, 2.33, 4.38, 4.09, 2.52, 5.19, 2.39, 3.66, 2.29, 2.88],\n [6.33, 2.68, 3.52, 5.38, 2.6, 3.53, 2.54, 4.75, 3.98, 2.69, 3.34, 3.17]]'], {}), '([[6.35, 2.23, 3.48, 2.33, 4.38, 4.09, 2.52, 5.19, 2.39, 3.66, 2.29,\n 2.88], [6.33, 2.68, 3.52, 5.38, 2.6, 3.53, 2.54, 4.75, 3.98, 2.69, 3.34,\n 3.17]])\n', (1249, 1406), True, 'import numpy as np\n'), ((1808, 1931), 'scipy.signal.spectrogram', 'spectrogram', (['afAudioData', 'f_s', 'afWindow', 'iBlockLength', '(iBlockLength - iHopLength)', 'iBlockLength', '(False)', '(True)', '"""spectrum"""'], {}), "(afAudioData, f_s, afWindow, iBlockLength, iBlockLength -\n iHopLength, iBlockLength, False, True, 'spectrum')\n", (1819, 1931), False, 'from scipy.signal import spectrogram\n'), ((2199, 2213), 'numpy.sqrt', 'np.sqrt', (['(X / 2)'], {}), '(X / 2)\n', (2206, 2213), True, 'import numpy as np\n'), ((2269, 2303), 'FeatureSpectralPitchChroma.FeatureSpectralPitchChroma', 'FeatureSpectralPitchChroma', (['X', 'f_s'], {}), '(X, f_s)\n', (2295, 2303), False, 'from FeatureSpectralPitchChroma import FeatureSpectralPitchChroma\n'), ((2440, 2460), 'numpy.zeros', 'np.zeros', (['t_pc.shape'], {}), '(t_pc.shape)\n', (2448, 2460), True, 'import numpy as np\n'), ((2849, 2869), 'ToolReadAudio.ToolReadAudio', 'ToolReadAudio', (['cPath'], {}), '(cPath)\n', (2862, 2869), False, 'from ToolReadAudio import ToolReadAudio\n'), ((3091, 3153), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute key of wav file"""'}), "(description='Compute key of wav file')\n", (3114, 3153), False, 'import argparse\n'), ((756, 785), 'ToolComputeHann.ToolComputeHann', 'ToolComputeHann', (['iBlockLength'], {}), '(iBlockLength)\n', (771, 785), False, 'from ToolComputeHann import ToolComputeHann\n'), ((1644, 1663), 'numpy.abs', 'np.abs', (['afAudioData'], {}), '(afAudioData)\n', (1650, 1663), True, 'import numpy as np\n'), ((2473, 2509), 'numpy.concatenate', 'np.concatenate', (['(v_pc, v_pc)'], {'axis': '(0)'}), '((v_pc, v_pc), axis=0)\n', (2487, 2509), True, 'import numpy as np\n'), ((2588, 2612), 'numpy.roll', 'np.roll', (['t_pc', 'i'], {'axis': '(1)'}), '(t_pc, i, axis=1)\n', (2595, 2612), True, 'import numpy as np\n')]
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the staff page processor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy from absl.testing import absltest import numpy as np from moonlight import engine from moonlight import structure as structure_module from moonlight.glyphs import testing as glyphs_testing from moonlight.protobuf import musicscore_pb2 from moonlight.staves import staff_processor from moonlight.staves import testing as staves_testing class StaffProcessorTest(absltest.TestCase): def testGetPage_x_scale(self): # Random staffline images matching the dimensions of PREDICTIONS. dummy_stafflines = np.random.random((2, 3, 5, 6)) classifier = glyphs_testing.DummyGlyphClassifier(glyphs_testing.PREDICTIONS) image = np.random.randint(0, 255, (30, 20), dtype=np.uint8) staves = staves_testing.FakeStaves( image_t=image, staves_t=np.asarray([[[0, 10], [19, 10]], [[0, 20], [19, 20]]], np.int32), staffline_distance_t=np.asarray([5, 20], np.int32), staffline_thickness_t=np.asarray(1, np.int32)) structure = structure_module.create_structure( image, lambda unused_image: staves) class DummyStafflineExtractor(object): """A placeholder for StafflineExtractor. It only contains the constants necessary to scale the x coordinates. """ staffline_distance_multiple = 2 target_height = 10 omr = engine.OMREngine(lambda _: classifier) page = omr.process_image( # Feed in a dummy image. It doesn't matter because FakeStaves has # hard-coded staff values. np.random.randint(0, 255, (100, 100)), process_structure=False) page = staff_processor.StaffProcessor(structure, DummyStafflineExtractor()).apply(page) self.assertEqual(len(page.system[0].staff), 2) # The first staff has a staffline distance of 5. # The extracted staffline slices have an original height of # staffline_distance * staffline_distance_multiple (10), which equals # target_height here, so there is no scaling. self.assertEqual( musicscore_pb2.Staff(glyph=page.system[0].staff[0].glyph), glyphs_testing.GLYPHS_PAGE.system[0].staff[0]) # Glyphs in the second staff have a scaled x coordinate. self.assertEqual( len(page.system[0].staff[1].glyph), len(glyphs_testing.GLYPHS_PAGE.system[0].staff[1].glyph)) for glyph in glyphs_testing.GLYPHS_PAGE.system[0].staff[1].glyph: expected_glyph = copy.deepcopy(glyph) # The second staff has a staffline distance of 20. The extracted staffline # slice would be 4 times the size of the scaled staffline, so x # coordinates are scaled by 4. Also, the glyphs may be in a different # order. expected_glyph.x *= 4 self.assertIn(expected_glyph, page.system[0].staff[1].glyph) if __name__ == '__main__': absltest.main()
[ "numpy.random.random", "moonlight.glyphs.testing.DummyGlyphClassifier", "numpy.asarray", "absl.testing.absltest.main", "moonlight.protobuf.musicscore_pb2.Staff", "numpy.random.randint", "moonlight.structure.create_structure", "copy.deepcopy", "moonlight.engine.OMREngine" ]
[((3565, 3580), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (3578, 3580), False, 'from absl.testing import absltest\n'), ((1251, 1281), 'numpy.random.random', 'np.random.random', (['(2, 3, 5, 6)'], {}), '((2, 3, 5, 6))\n', (1267, 1281), True, 'import numpy as np\n'), ((1299, 1362), 'moonlight.glyphs.testing.DummyGlyphClassifier', 'glyphs_testing.DummyGlyphClassifier', (['glyphs_testing.PREDICTIONS'], {}), '(glyphs_testing.PREDICTIONS)\n', (1334, 1362), True, 'from moonlight.glyphs import testing as glyphs_testing\n'), ((1375, 1426), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(30, 20)'], {'dtype': 'np.uint8'}), '(0, 255, (30, 20), dtype=np.uint8)\n', (1392, 1426), True, 'import numpy as np\n'), ((1732, 1801), 'moonlight.structure.create_structure', 'structure_module.create_structure', (['image', '(lambda unused_image: staves)'], {}), '(image, lambda unused_image: staves)\n', (1765, 1801), True, 'from moonlight import structure as structure_module\n'), ((2062, 2100), 'moonlight.engine.OMREngine', 'engine.OMREngine', (['(lambda _: classifier)'], {}), '(lambda _: classifier)\n', (2078, 2100), False, 'from moonlight import engine\n'), ((2248, 2285), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(100, 100)'], {}), '(0, 255, (100, 100))\n', (2265, 2285), True, 'import numpy as np\n'), ((2776, 2833), 'moonlight.protobuf.musicscore_pb2.Staff', 'musicscore_pb2.Staff', ([], {'glyph': 'page.system[0].staff[0].glyph'}), '(glyph=page.system[0].staff[0].glyph)\n', (2796, 2833), False, 'from moonlight.protobuf import musicscore_pb2\n'), ((3176, 3196), 'copy.deepcopy', 'copy.deepcopy', (['glyph'], {}), '(glyph)\n', (3189, 3196), False, 'import copy\n'), ((1507, 1571), 'numpy.asarray', 'np.asarray', (['[[[0, 10], [19, 10]], [[0, 20], [19, 20]]]', 'np.int32'], {}), '([[[0, 10], [19, 10]], [[0, 20], [19, 20]]], np.int32)\n', (1517, 1571), True, 'import numpy as np\n'), ((1630, 1659), 'numpy.asarray', 'np.asarray', (['[5, 20]', 'np.int32'], {}), '([5, 20], np.int32)\n', (1640, 1659), True, 'import numpy as np\n'), ((1691, 1714), 'numpy.asarray', 'np.asarray', (['(1)', 'np.int32'], {}), '(1, np.int32)\n', (1701, 1714), True, 'import numpy as np\n')]
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Tests for the photometry module. """ import pytest import numpy as np from numpy.testing import (assert_allclose, assert_array_equal, assert_array_less) from astropy.coordinates import SkyCoord from astropy.io import fits from astropy.nddata import NDData, StdDevUncertainty from astropy.table import Table import astropy.units as u from astropy.utils.exceptions import AstropyDeprecationWarning from astropy.wcs import WCS from ..photometry import aperture_photometry from ..circle import (CircularAperture, CircularAnnulus, SkyCircularAperture, SkyCircularAnnulus) from ..ellipse import (EllipticalAperture, EllipticalAnnulus, SkyEllipticalAperture, SkyEllipticalAnnulus) from ..rectangle import (RectangularAperture, RectangularAnnulus, SkyRectangularAperture, SkyRectangularAnnulus) from ...datasets import get_path, make_4gaussians_image, make_wcs, make_gwcs from ...utils._optional_deps import HAS_GWCS, HAS_MATPLOTLIB # noqa APERTURE_CL = [CircularAperture, CircularAnnulus, EllipticalAperture, EllipticalAnnulus, RectangularAperture, RectangularAnnulus] TEST_APERTURES = list(zip(APERTURE_CL, ((3.,), (3., 5.), (3., 5., 1.), (3., 5., 4., 12./5., 1.), (5, 8, np.pi / 4), (8, 12, 8, 16./3., np.pi / 8)))) @pytest.mark.parametrize(('aperture_class', 'params'), TEST_APERTURES) def test_outside_array(aperture_class, params): data = np.ones((10, 10), dtype=float) aperture = aperture_class((-60, 60), *params) fluxtable = aperture_photometry(data, aperture) # aperture is fully outside array: assert np.isnan(fluxtable['aperture_sum']) @pytest.mark.parametrize(('aperture_class', 'params'), TEST_APERTURES) def test_inside_array_simple(aperture_class, params): data = np.ones((40, 40), dtype=float) aperture = aperture_class((20., 20.), *params) table1 = aperture_photometry(data, aperture, method='center', subpixels=10) table2 = aperture_photometry(data, aperture, method='subpixel', subpixels=10) table3 = aperture_photometry(data, aperture, method='exact', subpixels=10) true_flux = aperture.area assert table1['aperture_sum'] < table3['aperture_sum'] if not isinstance(aperture, (RectangularAperture, RectangularAnnulus)): assert_allclose(table3['aperture_sum'], true_flux) assert_allclose(table2['aperture_sum'], table3['aperture_sum'], atol=0.1) @pytest.mark.skipif('not HAS_MATPLOTLIB') @pytest.mark.parametrize(('aperture_class', 'params'), TEST_APERTURES) def test_aperture_plots(aperture_class, params): # This test should run without any errors, and there is no return # value. # TODO: check the content of the plot aperture = aperture_class((20., 20.), *params) aperture.plot() def test_aperture_pixel_positions(): pos1 = (10, 20) pos2 = [(10, 20)] r = 3 ap1 = CircularAperture(pos1, r) ap2 = CircularAperture(pos2, r) assert not np.array_equal(ap1.positions, ap2.positions) class BaseTestAperturePhotometry: def test_array_error(self): # Array error error = np.ones(self.data.shape, dtype=float) if not hasattr(self, 'mask'): mask = None true_error = np.sqrt(self.area) else: mask = self.mask # 1 masked pixel true_error = np.sqrt(self.area - 1) table1 = aperture_photometry(self.data, self.aperture, method='center', mask=mask, error=error) table2 = aperture_photometry(self.data, self.aperture, method='subpixel', subpixels=12, mask=mask, error=error) table3 = aperture_photometry(self.data, self.aperture, method='exact', mask=mask, error=error) if not isinstance(self.aperture, (RectangularAperture, RectangularAnnulus)): assert_allclose(table3['aperture_sum'], self.true_flux) assert_allclose(table2['aperture_sum'], table3['aperture_sum'], atol=0.1) assert np.all(table1['aperture_sum'] < table3['aperture_sum']) if not isinstance(self.aperture, (RectangularAperture, RectangularAnnulus)): assert_allclose(table3['aperture_sum_err'], true_error) assert_allclose(table2['aperture_sum_err'], table3['aperture_sum_err'], atol=0.1) assert np.all(table1['aperture_sum_err'] < table3['aperture_sum_err']) class TestCircular(BaseTestAperturePhotometry): def setup_class(self): self.data = np.ones((40, 40), dtype=float) position = (20., 20.) r = 10. self.aperture = CircularAperture(position, r) self.area = np.pi * r * r self.true_flux = self.area class TestCircularArray(BaseTestAperturePhotometry): def setup_class(self): self.data = np.ones((40, 40), dtype=float) position = ((20., 20.), (25., 25.)) r = 10. self.aperture = CircularAperture(position, r) self.area = np.pi * r * r self.area = np.array((self.area, ) * 2) self.true_flux = self.area class TestCircularAnnulus(BaseTestAperturePhotometry): def setup_class(self): self.data = np.ones((40, 40), dtype=float) position = (20., 20.) r_in = 8. r_out = 10. self.aperture = CircularAnnulus(position, r_in, r_out) self.area = np.pi * (r_out * r_out - r_in * r_in) self.true_flux = self.area class TestCircularAnnulusArray(BaseTestAperturePhotometry): def setup_class(self): self.data = np.ones((40, 40), dtype=float) position = ((20., 20.), (25., 25.)) r_in = 8. r_out = 10. self.aperture = CircularAnnulus(position, r_in, r_out) self.area = np.pi * (r_out * r_out - r_in * r_in) self.area = np.array((self.area, ) * 2) self.true_flux = self.area class TestElliptical(BaseTestAperturePhotometry): def setup_class(self): self.data = np.ones((40, 40), dtype=float) position = (20., 20.) a = 10. b = 5. theta = -np.pi / 4. self.aperture = EllipticalAperture(position, a, b, theta=theta) self.area = np.pi * a * b self.true_flux = self.area class TestEllipticalAnnulus(BaseTestAperturePhotometry): def setup_class(self): self.data = np.ones((40, 40), dtype=float) position = (20., 20.) a_in = 5. a_out = 8. b_out = 5. theta = -np.pi / 4. self.aperture = EllipticalAnnulus(position, a_in, a_out, b_out, theta=theta) self.area = (np.pi * (a_out * b_out) - np.pi * (a_in * b_out * a_in / a_out)) self.true_flux = self.area class TestRectangularAperture(BaseTestAperturePhotometry): def setup_class(self): self.data = np.ones((40, 40), dtype=float) position = (20., 20.) h = 5. w = 8. theta = np.pi / 4. self.aperture = RectangularAperture(position, w, h, theta=theta) self.area = h * w self.true_flux = self.area class TestRectangularAnnulus(BaseTestAperturePhotometry): def setup_class(self): self.data = np.ones((40, 40), dtype=float) position = (20., 20.) h_out = 8. w_in = 8. w_out = 12. h_in = w_in * h_out / w_out theta = np.pi / 8. self.aperture = RectangularAnnulus(position, w_in, w_out, h_out, theta=theta) self.area = h_out * w_out - h_in * w_in self.true_flux = self.area class TestMaskedSkipCircular(BaseTestAperturePhotometry): def setup_class(self): self.data = np.ones((40, 40), dtype=float) self.mask = np.zeros((40, 40), dtype=bool) self.mask[20, 20] = True position = (20., 20.) r = 10. self.aperture = CircularAperture(position, r) self.area = np.pi * r * r self.true_flux = self.area - 1 class BaseTestDifferentData: def test_basic_circular_aperture_photometry(self): aperture = CircularAperture(self.position, self.radius) table = aperture_photometry(self.data, aperture, method='exact') assert_allclose(table['aperture_sum'].value, self.true_flux) assert table['aperture_sum'].unit, self.fluxunit assert np.all(table['xcenter'].value == np.transpose(self.position)[0]) assert np.all(table['ycenter'].value == np.transpose(self.position)[1]) class TestInputNDData(BaseTestDifferentData): def setup_class(self): data = np.ones((40, 40), dtype=float) self.data = NDData(data, unit=u.adu) self.radius = 3 self.position = [(20, 20), (30, 30)] self.true_flux = np.pi * self.radius * self.radius self.fluxunit = u.adu @pytest.mark.remote_data def test_wcs_based_photometry_to_catalogue(): pathcat = get_path('spitzer_example_catalog.xml', location='remote') pathhdu = get_path('spitzer_example_image.fits', location='remote') hdu = fits.open(pathhdu) data = u.Quantity(hdu[0].data, unit=hdu[0].header['BUNIT']) wcs = WCS(hdu[0].header) catalog = Table.read(pathcat) pos_skycoord = SkyCoord(catalog['l'], catalog['b'], frame='galactic') photometry_skycoord = aperture_photometry( data, SkyCircularAperture(pos_skycoord, 4 * u.arcsec), wcs=wcs) # Photometric unit conversion is needed to match the catalogue factor = (1.2 * u.arcsec) ** 2 / u.pixel converted_aperture_sum = (photometry_skycoord['aperture_sum'] * factor).to(u.mJy / u.pixel) fluxes_catalog = catalog['f4_5'].filled() # There shouldn't be large outliers, but some differences is OK, as # fluxes_catalog is based on PSF photometry, etc. assert_allclose(fluxes_catalog, converted_aperture_sum.value, rtol=1e0) assert(np.mean(np.fabs(((fluxes_catalog - converted_aperture_sum.value) / fluxes_catalog))) < 0.1) # close the file hdu.close() def test_wcs_based_photometry(): data = make_4gaussians_image() wcs = make_wcs(data.shape) # hard wired positions in make_4gaussian_image pos_orig_pixel = u.Quantity(([160., 25., 150., 90.], [70., 40., 25., 60.]), unit=u.pixel) pos_skycoord = wcs.pixel_to_world(pos_orig_pixel[0], pos_orig_pixel[1]) pos_skycoord_s = pos_skycoord[2] photometry_skycoord_circ = aperture_photometry( data, SkyCircularAperture(pos_skycoord, 3 * u.arcsec), wcs=wcs) photometry_skycoord_circ_2 = aperture_photometry( data, SkyCircularAperture(pos_skycoord, 2 * u.arcsec), wcs=wcs) photometry_skycoord_circ_s = aperture_photometry( data, SkyCircularAperture(pos_skycoord_s, 3 * u.arcsec), wcs=wcs) assert_allclose(photometry_skycoord_circ['aperture_sum'][2], photometry_skycoord_circ_s['aperture_sum']) photometry_skycoord_circ_ann = aperture_photometry( data, SkyCircularAnnulus(pos_skycoord, 2 * u.arcsec, 3 * u.arcsec), wcs=wcs) photometry_skycoord_circ_ann_s = aperture_photometry( data, SkyCircularAnnulus(pos_skycoord_s, 2 * u.arcsec, 3 * u.arcsec), wcs=wcs) assert_allclose(photometry_skycoord_circ_ann['aperture_sum'][2], photometry_skycoord_circ_ann_s['aperture_sum']) assert_allclose(photometry_skycoord_circ_ann['aperture_sum'], photometry_skycoord_circ['aperture_sum'] - photometry_skycoord_circ_2['aperture_sum']) photometry_skycoord_ell = aperture_photometry( data, SkyEllipticalAperture(pos_skycoord, 3 * u.arcsec, 3.0001 * u.arcsec, theta=45 * u.arcsec), wcs=wcs) photometry_skycoord_ell_2 = aperture_photometry( data, SkyEllipticalAperture(pos_skycoord, 2 * u.arcsec, 2.0001 * u.arcsec, theta=45 * u.arcsec), wcs=wcs) photometry_skycoord_ell_s = aperture_photometry( data, SkyEllipticalAperture(pos_skycoord_s, 3 * u.arcsec, 3.0001 * u.arcsec, theta=45 * u.arcsec), wcs=wcs) photometry_skycoord_ell_ann = aperture_photometry( data, SkyEllipticalAnnulus(pos_skycoord, 2 * u.arcsec, 3 * u.arcsec, 3.0001 * u.arcsec, theta=45 * u.arcsec), wcs=wcs) photometry_skycoord_ell_ann_s = aperture_photometry( data, SkyEllipticalAnnulus(pos_skycoord_s, 2 * u.arcsec, 3 * u.arcsec, 3.0001 * u.arcsec, theta=45 * u.arcsec), wcs=wcs) assert_allclose(photometry_skycoord_ell['aperture_sum'][2], photometry_skycoord_ell_s['aperture_sum']) assert_allclose(photometry_skycoord_ell_ann['aperture_sum'][2], photometry_skycoord_ell_ann_s['aperture_sum']) assert_allclose(photometry_skycoord_ell['aperture_sum'], photometry_skycoord_circ['aperture_sum'], rtol=5e-3) assert_allclose(photometry_skycoord_ell_ann['aperture_sum'], photometry_skycoord_ell['aperture_sum'] - photometry_skycoord_ell_2['aperture_sum'], rtol=1e-4) photometry_skycoord_rec = aperture_photometry( data, SkyRectangularAperture(pos_skycoord, 6 * u.arcsec, 6 * u.arcsec, 0 * u.arcsec), method='subpixel', subpixels=20, wcs=wcs) photometry_skycoord_rec_4 = aperture_photometry( data, SkyRectangularAperture(pos_skycoord, 4 * u.arcsec, 4 * u.arcsec, 0 * u.arcsec), method='subpixel', subpixels=20, wcs=wcs) photometry_skycoord_rec_s = aperture_photometry( data, SkyRectangularAperture(pos_skycoord_s, 6 * u.arcsec, 6 * u.arcsec, 0 * u.arcsec), method='subpixel', subpixels=20, wcs=wcs) photometry_skycoord_rec_ann = aperture_photometry( data, SkyRectangularAnnulus(pos_skycoord, 4 * u.arcsec, 6 * u.arcsec, 6 * u.arcsec, theta=0 * u.arcsec), method='subpixel', subpixels=20, wcs=wcs) photometry_skycoord_rec_ann_s = aperture_photometry( data, SkyRectangularAnnulus(pos_skycoord_s, 4 * u.arcsec, 6 * u.arcsec, 6 * u.arcsec, theta=0 * u.arcsec), method='subpixel', subpixels=20, wcs=wcs) assert_allclose(photometry_skycoord_rec['aperture_sum'][2], photometry_skycoord_rec_s['aperture_sum']) assert np.all(photometry_skycoord_rec['aperture_sum'] > photometry_skycoord_circ['aperture_sum']) assert_allclose(photometry_skycoord_rec_ann['aperture_sum'][2], photometry_skycoord_rec_ann_s['aperture_sum']) assert_allclose(photometry_skycoord_rec_ann['aperture_sum'], photometry_skycoord_rec['aperture_sum'] - photometry_skycoord_rec_4['aperture_sum'], rtol=1e-4) def test_basic_circular_aperture_photometry_unit(): radius = 3 true_flux = np.pi * radius * radius aper = CircularAperture((12, 12), radius) data1 = np.ones((25, 25), dtype=float) table1 = aperture_photometry(data1, aper) assert_allclose(table1['aperture_sum'], true_flux) unit = u.adu data2 = u.Quantity(data1 * unit) table2 = aperture_photometry(data2, aper) assert_allclose(table2['aperture_sum'].value, true_flux) assert table2['aperture_sum'].unit == data2.unit == unit error1 = np.ones((25, 25)) with pytest.raises(ValueError): # data has unit, but error does not aperture_photometry(data2, aper, error=error1) error2 = u.Quantity(error1 * u.Jy) with pytest.raises(ValueError): # data and error have different units aperture_photometry(data2, aper, error=error2) def test_aperture_photometry_with_error_units(): """Test aperture_photometry when error has units (see #176).""" data1 = np.ones((40, 40), dtype=float) data2 = u.Quantity(data1, unit=u.adu) error = u.Quantity(data1, unit=u.adu) radius = 3 true_flux = np.pi * radius * radius unit = u.adu position = (20, 20) table1 = aperture_photometry(data2, CircularAperture(position, radius), error=error) assert_allclose(table1['aperture_sum'].value, true_flux) assert_allclose(table1['aperture_sum_err'].value, np.sqrt(true_flux)) assert table1['aperture_sum'].unit == unit assert table1['aperture_sum_err'].unit == unit def test_aperture_photometry_inputs_with_mask(): """ Test that aperture_photometry does not modify the input data or error array when a mask is input. """ data = np.ones((5, 5)) aperture = CircularAperture((2, 2), 2.) mask = np.zeros_like(data, dtype=bool) data[2, 2] = 100. # bad pixel mask[2, 2] = True error = np.sqrt(data) data_in = data.copy() error_in = error.copy() t1 = aperture_photometry(data, aperture, error=error, mask=mask) assert_array_equal(data, data_in) assert_array_equal(error, error_in) assert_allclose(t1['aperture_sum'][0], 11.5663706144) t2 = aperture_photometry(data, aperture) assert_allclose(t2['aperture_sum'][0], 111.566370614) TEST_ELLIPSE_EXACT_APERTURES = [(3.469906, 3.923861394, 3.), (0.3834415188257778, 0.3834415188257778, 0.3)] @pytest.mark.parametrize('x,y,r', TEST_ELLIPSE_EXACT_APERTURES) def test_ellipse_exact_grid(x, y, r): """ Test elliptical exact aperture photometry on a grid of pixel positions. This is a regression test for the bug discovered in this issue: https://github.com/astropy/photutils/issues/198 """ data = np.ones((10, 10)) aperture = EllipticalAperture((x, y), r, r, 0.) t = aperture_photometry(data, aperture, method='exact') actual = t['aperture_sum'][0] / (np.pi * r ** 2) assert_allclose(actual, 1) @pytest.mark.parametrize('value', [np.nan, np.inf]) def test_nan_inf_mask(value): """Test that nans and infs are properly masked [267].""" data = np.ones((9, 9)) mask = np.zeros_like(data, dtype=bool) data[4, 4] = value mask[4, 4] = True radius = 2. aper = CircularAperture((4, 4), radius) tbl = aperture_photometry(data, aper, mask=mask) desired = (np.pi * radius**2) - 1 assert_allclose(tbl['aperture_sum'], desired) def test_aperture_partial_overlap(): data = np.ones((20, 20)) error = np.ones((20, 20)) xypos = [(10, 10), (0, 0), (0, 19), (19, 0), (19, 19)] r = 5. aper = CircularAperture(xypos, r=r) tbl = aperture_photometry(data, aper, error=error) assert_allclose(tbl['aperture_sum'][0], np.pi * r ** 2) assert_array_less(tbl['aperture_sum'][1:], np.pi * r ** 2) unit = u.MJy / u.sr tbl = aperture_photometry(data * unit, aper, error=error * unit) assert_allclose(tbl['aperture_sum'][0].value, np.pi * r ** 2) assert_array_less(tbl['aperture_sum'][1:].value, np.pi * r ** 2) assert_array_less(tbl['aperture_sum_err'][1:].value, np.pi * r ** 2) assert tbl['aperture_sum'].unit == unit assert tbl['aperture_sum_err'].unit == unit def test_pixel_aperture_repr(): aper = CircularAperture((10, 20), r=3.0) assert '<CircularAperture(' in repr(aper) assert 'Aperture: CircularAperture' in str(aper) aper = CircularAnnulus((10, 20), r_in=3.0, r_out=5.0) assert '<CircularAnnulus(' in repr(aper) assert 'Aperture: CircularAnnulus' in str(aper) aper = EllipticalAperture((10, 20), a=5.0, b=3.0, theta=15.0) assert '<EllipticalAperture(' in repr(aper) assert 'Aperture: EllipticalAperture' in str(aper) aper = EllipticalAnnulus((10, 20), a_in=4.0, a_out=8.0, b_out=4.0, theta=15.0) assert '<EllipticalAnnulus(' in repr(aper) assert 'Aperture: EllipticalAnnulus' in str(aper) aper = RectangularAperture((10, 20), w=5.0, h=3.0, theta=15.0) assert '<RectangularAperture(' in repr(aper) assert 'Aperture: RectangularAperture' in str(aper) aper = RectangularAnnulus((10, 20), w_in=4.0, w_out=8.0, h_out=4.0, theta=15.0) assert '<RectangularAnnulus(' in repr(aper) assert 'Aperture: RectangularAnnulus' in str(aper) def test_sky_aperture_repr(): s = SkyCoord([1, 2], [3, 4], unit='deg') aper = SkyCircularAperture(s, r=3*u.deg) a_repr = ('<SkyCircularAperture(<SkyCoord (ICRS): (ra, dec) in deg\n' ' [(1., 3.), (2., 4.)]>, r=3.0 deg)>') a_str = ('Aperture: SkyCircularAperture\npositions: <SkyCoord ' '(ICRS): (ra, dec) in deg\n [(1., 3.), (2., 4.)]>\n' 'r: 3.0 deg') assert repr(aper) == a_repr assert str(aper) == a_str aper = SkyCircularAnnulus(s, r_in=3.*u.deg, r_out=5*u.deg) a_repr = ('<SkyCircularAnnulus(<SkyCoord (ICRS): (ra, dec) in deg\n' ' [(1., 3.), (2., 4.)]>, r_in=3.0 deg, r_out=5.0 deg)>') a_str = ('Aperture: SkyCircularAnnulus\npositions: <SkyCoord ' '(ICRS): (ra, dec) in deg\n [(1., 3.), (2., 4.)]>\n' 'r_in: 3.0 deg\nr_out: 5.0 deg') assert repr(aper) == a_repr assert str(aper) == a_str aper = SkyEllipticalAperture(s, a=3*u.deg, b=5*u.deg, theta=15*u.deg) a_repr = ('<SkyEllipticalAperture(<SkyCoord (ICRS): (ra, dec) in ' 'deg\n [(1., 3.), (2., 4.)]>, a=3.0 deg, b=5.0 deg, ' 'theta=15.0 deg)>') a_str = ('Aperture: SkyEllipticalAperture\npositions: <SkyCoord ' '(ICRS): (ra, dec) in deg\n [(1., 3.), (2., 4.)]>\n' 'a: 3.0 deg\nb: 5.0 deg\ntheta: 15.0 deg') assert repr(aper) == a_repr assert str(aper) == a_str aper = SkyEllipticalAnnulus(s, a_in=3*u.deg, a_out=5*u.deg, b_out=3*u.deg, theta=15*u.deg) a_repr = ('<SkyEllipticalAnnulus(<SkyCoord (ICRS): (ra, dec) in ' 'deg\n [(1., 3.), (2., 4.)]>, a_in=3.0 deg, ' 'a_out=5.0 deg, b_in=1.8 deg, b_out=3.0 deg, ' 'theta=15.0 deg)>') a_str = ('Aperture: SkyEllipticalAnnulus\npositions: <SkyCoord ' '(ICRS): (ra, dec) in deg\n [(1., 3.), (2., 4.)]>\n' 'a_in: 3.0 deg\na_out: 5.0 deg\nb_in: 1.8 deg\n' 'b_out: 3.0 deg\ntheta: 15.0 deg') assert repr(aper) == a_repr assert str(aper) == a_str aper = SkyRectangularAperture(s, w=3*u.deg, h=5*u.deg, theta=15*u.deg) a_repr = ('<SkyRectangularAperture(<SkyCoord (ICRS): (ra, dec) in ' 'deg\n [(1., 3.), (2., 4.)]>, w=3.0 deg, h=5.0 deg' ', theta=15.0 deg)>') a_str = ('Aperture: SkyRectangularAperture\npositions: <SkyCoord ' '(ICRS): (ra, dec) in deg\n [(1., 3.), (2., 4.)]>\n' 'w: 3.0 deg\nh: 5.0 deg\ntheta: 15.0 deg') assert repr(aper) == a_repr assert str(aper) == a_str aper = SkyRectangularAnnulus(s, w_in=5*u.deg, w_out=10*u.deg, h_out=6*u.deg, theta=15*u.deg) a_repr = ('<SkyRectangularAnnulus(<SkyCoord (ICRS): (ra, dec) in deg' '\n [(1., 3.), (2., 4.)]>, w_in=5.0 deg, ' 'w_out=10.0 deg, h_in=3.0 deg, h_out=6.0 deg, ' 'theta=15.0 deg)>') a_str = ('Aperture: SkyRectangularAnnulus\npositions: <SkyCoord ' '(ICRS): (ra, dec) in deg\n [(1., 3.), (2., 4.)]>\n' 'w_in: 5.0 deg\nw_out: 10.0 deg\nh_in: 3.0 deg\n' 'h_out: 6.0 deg\ntheta: 15.0 deg') assert repr(aper) == a_repr assert str(aper) == a_str def test_rectangular_bbox(): # odd sizes width = 7 height = 3 a = RectangularAperture((50, 50), w=width, h=height, theta=0) assert a.bbox.shape == (height, width) a = RectangularAperture((50.5, 50.5), w=width, h=height, theta=0) assert a.bbox.shape == (height + 1, width + 1) a = RectangularAperture((50, 50), w=width, h=height, theta=90.*np.pi/180.) assert a.bbox.shape == (width, height) # even sizes width = 8 height = 4 a = RectangularAperture((50, 50), w=width, h=height, theta=0) assert a.bbox.shape == (height + 1, width + 1) a = RectangularAperture((50.5, 50.5), w=width, h=height, theta=0) assert a.bbox.shape == (height, width) a = RectangularAperture((50.5, 50.5), w=width, h=height, theta=90.*np.pi/180.) assert a.bbox.shape == (width, height) def test_elliptical_bbox(): # integer axes a = 7 b = 3 ap = EllipticalAperture((50, 50), a=a, b=b, theta=0) assert ap.bbox.shape == (2*b + 1, 2*a + 1) ap = EllipticalAperture((50.5, 50.5), a=a, b=b, theta=0) assert ap.bbox.shape == (2*b, 2*a) ap = EllipticalAperture((50, 50), a=a, b=b, theta=90.*np.pi/180.) assert ap.bbox.shape == (2*a + 1, 2*b + 1) # fractional axes a = 7.5 b = 4.5 ap = EllipticalAperture((50, 50), a=a, b=b, theta=0) assert ap.bbox.shape == (2*b, 2*a) ap = EllipticalAperture((50.5, 50.5), a=a, b=b, theta=0) assert ap.bbox.shape == (2*b + 1, 2*a + 1) ap = EllipticalAperture((50, 50), a=a, b=b, theta=90.*np.pi/180.) assert ap.bbox.shape == (2*a, 2*b) @pytest.mark.skipif('not HAS_GWCS') @pytest.mark.parametrize('wcs_type', ('wcs', 'gwcs')) def test_to_sky_pixel(wcs_type): data = make_4gaussians_image() if wcs_type == 'wcs': wcs = make_wcs(data.shape) elif wcs_type == 'gwcs': wcs = make_gwcs(data.shape) ap = CircularAperture(((12.3, 15.7), (48.19, 98.14)), r=3.14) ap2 = ap.to_sky(wcs).to_pixel(wcs) assert_allclose(ap.positions, ap2.positions) assert_allclose(ap.r, ap2.r) ap = CircularAnnulus(((12.3, 15.7), (48.19, 98.14)), r_in=3.14, r_out=5.32) ap2 = ap.to_sky(wcs).to_pixel(wcs) assert_allclose(ap.positions, ap2.positions) assert_allclose(ap.r_in, ap2.r_in) assert_allclose(ap.r_out, ap2.r_out) ap = EllipticalAperture(((12.3, 15.7), (48.19, 98.14)), a=3.14, b=5.32, theta=103.*np.pi/180.) ap2 = ap.to_sky(wcs).to_pixel(wcs) assert_allclose(ap.positions, ap2.positions) assert_allclose(ap.a, ap2.a) assert_allclose(ap.b, ap2.b) assert_allclose(ap.theta, ap2.theta) ap = EllipticalAnnulus(((12.3, 15.7), (48.19, 98.14)), a_in=3.14, a_out=15.32, b_out=4.89, theta=103.*np.pi/180.) ap2 = ap.to_sky(wcs).to_pixel(wcs) assert_allclose(ap.positions, ap2.positions) assert_allclose(ap.a_in, ap2.a_in) assert_allclose(ap.a_out, ap2.a_out) assert_allclose(ap.b_out, ap2.b_out) assert_allclose(ap.theta, ap2.theta) ap = RectangularAperture(((12.3, 15.7), (48.19, 98.14)), w=3.14, h=5.32, theta=103.*np.pi/180.) ap2 = ap.to_sky(wcs).to_pixel(wcs) assert_allclose(ap.positions, ap2.positions) assert_allclose(ap.w, ap2.w) assert_allclose(ap.h, ap2.h) assert_allclose(ap.theta, ap2.theta) ap = RectangularAnnulus(((12.3, 15.7), (48.19, 98.14)), w_in=3.14, w_out=15.32, h_out=4.89, theta=103.*np.pi/180.) ap2 = ap.to_sky(wcs).to_pixel(wcs) assert_allclose(ap.positions, ap2.positions) assert_allclose(ap.w_in, ap2.w_in) assert_allclose(ap.w_out, ap2.w_out) assert_allclose(ap.h_out, ap2.h_out) assert_allclose(ap.theta, ap2.theta) def test_position_units(): """Regression test for unit check.""" pos = (10, 10) * u.pix pos = np.sqrt(pos**2) with pytest.warns(AstropyDeprecationWarning): ap = CircularAperture(pos, r=3.) assert_allclose(ap.positions, np.array([10, 10])) def test_radius_units(): """Regression test for unit check.""" pos = SkyCoord(10, 10, unit='deg') r = 3.*u.pix r = np.sqrt(r**2) with pytest.warns(AstropyDeprecationWarning): ap = SkyCircularAperture(pos, r=r) assert ap.r.value == 3.0 assert ap.r.unit == u.pix def test_scalar_aperture(): """ Regression test to check that length-1 aperture list appends a "_0" on the column names to be consistent with list inputs. """ data = np.ones((20, 20), dtype=float) ap = CircularAperture((10, 10), r=3.) colnames1 = aperture_photometry(data, ap, error=data).colnames assert (colnames1 == ['id', 'xcenter', 'ycenter', 'aperture_sum', 'aperture_sum_err']) colnames2 = aperture_photometry(data, [ap], error=data).colnames assert (colnames2 == ['id', 'xcenter', 'ycenter', 'aperture_sum_0', 'aperture_sum_err_0']) colnames3 = aperture_photometry(data, [ap, ap], error=data).colnames assert (colnames3 == ['id', 'xcenter', 'ycenter', 'aperture_sum_0', 'aperture_sum_err_0', 'aperture_sum_1', 'aperture_sum_err_1']) def test_nan_in_bbox(): """ Regression test that non-finite data values outside of the aperture mask but within the bounding box do not affect the photometry. """ data1 = np.ones((101, 101)) data2 = data1.copy() data1[33, 33] = np.nan data1[67, 67] = np.inf data1[33, 67] = -np.inf data1[22, 22] = np.nan data1[22, 23] = np.inf error = data1.copy() aper1 = CircularAperture((50, 50), r=20.) aper2 = CircularAperture((5, 5), r=20.) tbl1 = aperture_photometry(data1, aper1, error=error) tbl2 = aperture_photometry(data2, aper1, error=error) assert_allclose(tbl1['aperture_sum'], tbl2['aperture_sum']) assert_allclose(tbl1['aperture_sum_err'], tbl2['aperture_sum_err']) tbl3 = aperture_photometry(data1, aper2, error=error) tbl4 = aperture_photometry(data2, aper2, error=error) assert_allclose(tbl3['aperture_sum'], tbl4['aperture_sum']) assert_allclose(tbl3['aperture_sum_err'], tbl4['aperture_sum_err']) def test_scalar_skycoord(): """ Regression test to check that scalar SkyCoords are added to the table as a length-1 SkyCoord array. """ data = make_4gaussians_image() wcs = make_wcs(data.shape) skycoord = wcs.pixel_to_world(90, 60) aper = SkyCircularAperture(skycoord, r=0.1*u.arcsec) tbl = aperture_photometry(data, aper, wcs=wcs) assert isinstance(tbl['sky_center'], SkyCoord) def test_nddata_input(): data = np.arange(400).reshape((20, 20)) error = np.sqrt(data) mask = np.zeros((20, 20), dtype=bool) mask[8:13, 8:13] = True unit = 'adu' wcs = make_wcs(data.shape) skycoord = wcs.pixel_to_world(10, 10) aper = SkyCircularAperture(skycoord, r=0.7*u.arcsec) tbl1 = aperture_photometry(data*u.adu, aper, error=error*u.adu, mask=mask, wcs=wcs) uncertainty = StdDevUncertainty(error) nddata = NDData(data, uncertainty=uncertainty, mask=mask, wcs=wcs, unit=unit) tbl2 = aperture_photometry(nddata, aper) for column in tbl1.columns: if column == 'sky_center': # cannot test SkyCoord equality continue assert_allclose(tbl1[column], tbl2[column])
[ "numpy.sqrt", "numpy.array", "astropy.io.fits.open", "astropy.nddata.NDData", "numpy.arange", "numpy.testing.assert_array_less", "numpy.testing.assert_allclose", "astropy.nddata.StdDevUncertainty", "pytest.mark.skipif", "numpy.testing.assert_array_equal", "numpy.ones", "pytest.warns", "numpy...
[((1661, 1730), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('aperture_class', 'params')", 'TEST_APERTURES'], {}), "(('aperture_class', 'params'), TEST_APERTURES)\n", (1684, 1730), False, 'import pytest\n'), ((2012, 2081), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('aperture_class', 'params')", 'TEST_APERTURES'], {}), "(('aperture_class', 'params'), TEST_APERTURES)\n", (2035, 2081), False, 'import pytest\n'), ((2870, 2910), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""not HAS_MATPLOTLIB"""'], {}), "('not HAS_MATPLOTLIB')\n", (2888, 2910), False, 'import pytest\n'), ((2912, 2981), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('aperture_class', 'params')", 'TEST_APERTURES'], {}), "(('aperture_class', 'params'), TEST_APERTURES)\n", (2935, 2981), False, 'import pytest\n'), ((18541, 18603), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x,y,r"""', 'TEST_ELLIPSE_EXACT_APERTURES'], {}), "('x,y,r', TEST_ELLIPSE_EXACT_APERTURES)\n", (18564, 18603), False, 'import pytest\n'), ((19085, 19135), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value"""', '[np.nan, np.inf]'], {}), "('value', [np.nan, np.inf])\n", (19108, 19135), False, 'import pytest\n'), ((26351, 26385), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""not HAS_GWCS"""'], {}), "('not HAS_GWCS')\n", (26369, 26385), False, 'import pytest\n'), ((26387, 26439), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""wcs_type"""', "('wcs', 'gwcs')"], {}), "('wcs_type', ('wcs', 'gwcs'))\n", (26410, 26439), False, 'import pytest\n'), ((1790, 1820), 'numpy.ones', 'np.ones', (['(10, 10)'], {'dtype': 'float'}), '((10, 10), dtype=float)\n', (1797, 1820), True, 'import numpy as np\n'), ((1973, 2008), 'numpy.isnan', 'np.isnan', (["fluxtable['aperture_sum']"], {}), "(fluxtable['aperture_sum'])\n", (1981, 2008), True, 'import numpy as np\n'), ((2147, 2177), 'numpy.ones', 'np.ones', (['(40, 40)'], {'dtype': 'float'}), '((40, 40), dtype=float)\n', (2154, 2177), True, 'import numpy as np\n'), ((9921, 9939), 'astropy.io.fits.open', 'fits.open', (['pathhdu'], {}), '(pathhdu)\n', (9930, 9939), False, 'from astropy.io import fits\n'), ((9951, 10003), 'astropy.units.Quantity', 'u.Quantity', (['hdu[0].data'], {'unit': "hdu[0].header['BUNIT']"}), "(hdu[0].data, unit=hdu[0].header['BUNIT'])\n", (9961, 10003), True, 'import astropy.units as u\n'), ((10014, 10032), 'astropy.wcs.WCS', 'WCS', (['hdu[0].header'], {}), '(hdu[0].header)\n', (10017, 10032), False, 'from astropy.wcs import WCS\n'), ((10048, 10067), 'astropy.table.Table.read', 'Table.read', (['pathcat'], {}), '(pathcat)\n', (10058, 10067), False, 'from astropy.table import Table\n'), ((10088, 10142), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["catalog['l']", "catalog['b']"], {'frame': '"""galactic"""'}), "(catalog['l'], catalog['b'], frame='galactic')\n", (10096, 10142), False, 'from astropy.coordinates import SkyCoord\n'), ((10680, 10751), 'numpy.testing.assert_allclose', 'assert_allclose', (['fluxes_catalog', 'converted_aperture_sum.value'], {'rtol': '(1.0)'}), '(fluxes_catalog, converted_aperture_sum.value, rtol=1.0)\n', (10695, 10751), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((11096, 11181), 'astropy.units.Quantity', 'u.Quantity', (['([160.0, 25.0, 150.0, 90.0], [70.0, 40.0, 25.0, 60.0])'], {'unit': 'u.pixel'}), '(([160.0, 25.0, 150.0, 90.0], [70.0, 40.0, 25.0, 60.0]), unit=u.pixel\n )\n', (11106, 11181), True, 'import astropy.units as u\n'), ((11700, 11808), 'numpy.testing.assert_allclose', 'assert_allclose', (["photometry_skycoord_circ['aperture_sum'][2]", "photometry_skycoord_circ_s['aperture_sum']"], {}), "(photometry_skycoord_circ['aperture_sum'][2],\n photometry_skycoord_circ_s['aperture_sum'])\n", (11715, 11808), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((12133, 12249), 'numpy.testing.assert_allclose', 'assert_allclose', (["photometry_skycoord_circ_ann['aperture_sum'][2]", "photometry_skycoord_circ_ann_s['aperture_sum']"], {}), "(photometry_skycoord_circ_ann['aperture_sum'][2],\n photometry_skycoord_circ_ann_s['aperture_sum'])\n", (12148, 12249), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((12271, 12429), 'numpy.testing.assert_allclose', 'assert_allclose', (["photometry_skycoord_circ_ann['aperture_sum']", "(photometry_skycoord_circ['aperture_sum'] - photometry_skycoord_circ_2[\n 'aperture_sum'])"], {}), "(photometry_skycoord_circ_ann['aperture_sum'], \n photometry_skycoord_circ['aperture_sum'] - photometry_skycoord_circ_2[\n 'aperture_sum'])\n", (12286, 12429), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((13553, 13659), 'numpy.testing.assert_allclose', 'assert_allclose', (["photometry_skycoord_ell['aperture_sum'][2]", "photometry_skycoord_ell_s['aperture_sum']"], {}), "(photometry_skycoord_ell['aperture_sum'][2],\n photometry_skycoord_ell_s['aperture_sum'])\n", (13568, 13659), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((13681, 13795), 'numpy.testing.assert_allclose', 'assert_allclose', (["photometry_skycoord_ell_ann['aperture_sum'][2]", "photometry_skycoord_ell_ann_s['aperture_sum']"], {}), "(photometry_skycoord_ell_ann['aperture_sum'][2],\n photometry_skycoord_ell_ann_s['aperture_sum'])\n", (13696, 13795), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((13817, 13931), 'numpy.testing.assert_allclose', 'assert_allclose', (["photometry_skycoord_ell['aperture_sum']", "photometry_skycoord_circ['aperture_sum']"], {'rtol': '(0.005)'}), "(photometry_skycoord_ell['aperture_sum'],\n photometry_skycoord_circ['aperture_sum'], rtol=0.005)\n", (13832, 13931), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((13952, 14120), 'numpy.testing.assert_allclose', 'assert_allclose', (["photometry_skycoord_ell_ann['aperture_sum']", "(photometry_skycoord_ell['aperture_sum'] - photometry_skycoord_ell_2[\n 'aperture_sum'])"], {'rtol': '(0.0001)'}), "(photometry_skycoord_ell_ann['aperture_sum'], \n photometry_skycoord_ell['aperture_sum'] - photometry_skycoord_ell_2[\n 'aperture_sum'], rtol=0.0001)\n", (13967, 14120), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((15516, 15622), 'numpy.testing.assert_allclose', 'assert_allclose', (["photometry_skycoord_rec['aperture_sum'][2]", "photometry_skycoord_rec_s['aperture_sum']"], {}), "(photometry_skycoord_rec['aperture_sum'][2],\n photometry_skycoord_rec_s['aperture_sum'])\n", (15531, 15622), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((15651, 15746), 'numpy.all', 'np.all', (["(photometry_skycoord_rec['aperture_sum'] > photometry_skycoord_circ[\n 'aperture_sum'])"], {}), "(photometry_skycoord_rec['aperture_sum'] > photometry_skycoord_circ[\n 'aperture_sum'])\n", (15657, 15746), True, 'import numpy as np\n'), ((15765, 15879), 'numpy.testing.assert_allclose', 'assert_allclose', (["photometry_skycoord_rec_ann['aperture_sum'][2]", "photometry_skycoord_rec_ann_s['aperture_sum']"], {}), "(photometry_skycoord_rec_ann['aperture_sum'][2],\n photometry_skycoord_rec_ann_s['aperture_sum'])\n", (15780, 15879), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((15901, 16069), 'numpy.testing.assert_allclose', 'assert_allclose', (["photometry_skycoord_rec_ann['aperture_sum']", "(photometry_skycoord_rec['aperture_sum'] - photometry_skycoord_rec_4[\n 'aperture_sum'])"], {'rtol': '(0.0001)'}), "(photometry_skycoord_rec_ann['aperture_sum'], \n photometry_skycoord_rec['aperture_sum'] - photometry_skycoord_rec_4[\n 'aperture_sum'], rtol=0.0001)\n", (15916, 16069), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((16266, 16296), 'numpy.ones', 'np.ones', (['(25, 25)'], {'dtype': 'float'}), '((25, 25), dtype=float)\n', (16273, 16296), True, 'import numpy as np\n'), ((16347, 16397), 'numpy.testing.assert_allclose', 'assert_allclose', (["table1['aperture_sum']", 'true_flux'], {}), "(table1['aperture_sum'], true_flux)\n", (16362, 16397), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((16428, 16452), 'astropy.units.Quantity', 'u.Quantity', (['(data1 * unit)'], {}), '(data1 * unit)\n', (16438, 16452), True, 'import astropy.units as u\n'), ((16503, 16559), 'numpy.testing.assert_allclose', 'assert_allclose', (["table2['aperture_sum'].value", 'true_flux'], {}), "(table2['aperture_sum'].value, true_flux)\n", (16518, 16559), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((16635, 16652), 'numpy.ones', 'np.ones', (['(25, 25)'], {}), '((25, 25))\n', (16642, 16652), True, 'import numpy as np\n'), ((16802, 16827), 'astropy.units.Quantity', 'u.Quantity', (['(error1 * u.Jy)'], {}), '(error1 * u.Jy)\n', (16812, 16827), True, 'import astropy.units as u\n'), ((17097, 17127), 'numpy.ones', 'np.ones', (['(40, 40)'], {'dtype': 'float'}), '((40, 40), dtype=float)\n', (17104, 17127), True, 'import numpy as np\n'), ((17140, 17169), 'astropy.units.Quantity', 'u.Quantity', (['data1'], {'unit': 'u.adu'}), '(data1, unit=u.adu)\n', (17150, 17169), True, 'import astropy.units as u\n'), ((17182, 17211), 'astropy.units.Quantity', 'u.Quantity', (['data1'], {'unit': 'u.adu'}), '(data1, unit=u.adu)\n', (17192, 17211), True, 'import astropy.units as u\n'), ((17434, 17490), 'numpy.testing.assert_allclose', 'assert_allclose', (["table1['aperture_sum'].value", 'true_flux'], {}), "(table1['aperture_sum'].value, true_flux)\n", (17449, 17490), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((17848, 17863), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (17855, 17863), True, 'import numpy as np\n'), ((17919, 17950), 'numpy.zeros_like', 'np.zeros_like', (['data'], {'dtype': 'bool'}), '(data, dtype=bool)\n', (17932, 17950), True, 'import numpy as np\n'), ((18020, 18033), 'numpy.sqrt', 'np.sqrt', (['data'], {}), '(data)\n', (18027, 18033), True, 'import numpy as np\n'), ((18161, 18194), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['data', 'data_in'], {}), '(data, data_in)\n', (18179, 18194), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((18199, 18234), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['error', 'error_in'], {}), '(error, error_in)\n', (18217, 18234), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((18239, 18292), 'numpy.testing.assert_allclose', 'assert_allclose', (["t1['aperture_sum'][0]", '(11.5663706144)'], {}), "(t1['aperture_sum'][0], 11.5663706144)\n", (18254, 18292), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((18342, 18395), 'numpy.testing.assert_allclose', 'assert_allclose', (["t2['aperture_sum'][0]", '(111.566370614)'], {}), "(t2['aperture_sum'][0], 111.566370614)\n", (18357, 18395), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((18867, 18884), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (18874, 18884), True, 'import numpy as np\n'), ((19055, 19081), 'numpy.testing.assert_allclose', 'assert_allclose', (['actual', '(1)'], {}), '(actual, 1)\n', (19070, 19081), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((19239, 19254), 'numpy.ones', 'np.ones', (['(9, 9)'], {}), '((9, 9))\n', (19246, 19254), True, 'import numpy as np\n'), ((19266, 19297), 'numpy.zeros_like', 'np.zeros_like', (['data'], {'dtype': 'bool'}), '(data, dtype=bool)\n', (19279, 19297), True, 'import numpy as np\n'), ((19498, 19543), 'numpy.testing.assert_allclose', 'assert_allclose', (["tbl['aperture_sum']", 'desired'], {}), "(tbl['aperture_sum'], desired)\n", (19513, 19543), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((19594, 19611), 'numpy.ones', 'np.ones', (['(20, 20)'], {}), '((20, 20))\n', (19601, 19611), True, 'import numpy as np\n'), ((19624, 19641), 'numpy.ones', 'np.ones', (['(20, 20)'], {}), '((20, 20))\n', (19631, 19641), True, 'import numpy as np\n'), ((19811, 19866), 'numpy.testing.assert_allclose', 'assert_allclose', (["tbl['aperture_sum'][0]", '(np.pi * r ** 2)'], {}), "(tbl['aperture_sum'][0], np.pi * r ** 2)\n", (19826, 19866), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((19871, 19929), 'numpy.testing.assert_array_less', 'assert_array_less', (["tbl['aperture_sum'][1:]", '(np.pi * r ** 2)'], {}), "(tbl['aperture_sum'][1:], np.pi * r ** 2)\n", (19888, 19929), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((20028, 20089), 'numpy.testing.assert_allclose', 'assert_allclose', (["tbl['aperture_sum'][0].value", '(np.pi * r ** 2)'], {}), "(tbl['aperture_sum'][0].value, np.pi * r ** 2)\n", (20043, 20089), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((20094, 20158), 'numpy.testing.assert_array_less', 'assert_array_less', (["tbl['aperture_sum'][1:].value", '(np.pi * r ** 2)'], {}), "(tbl['aperture_sum'][1:].value, np.pi * r ** 2)\n", (20111, 20158), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((20163, 20231), 'numpy.testing.assert_array_less', 'assert_array_less', (["tbl['aperture_sum_err'][1:].value", '(np.pi * r ** 2)'], {}), "(tbl['aperture_sum_err'][1:].value, np.pi * r ** 2)\n", (20180, 20231), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((21473, 21509), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['[1, 2]', '[3, 4]'], {'unit': '"""deg"""'}), "([1, 2], [3, 4], unit='deg')\n", (21481, 21509), False, 'from astropy.coordinates import SkyCoord\n'), ((26745, 26789), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.positions', 'ap2.positions'], {}), '(ap.positions, ap2.positions)\n', (26760, 26789), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((26794, 26822), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.r', 'ap2.r'], {}), '(ap.r, ap2.r)\n', (26809, 26822), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((26972, 27016), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.positions', 'ap2.positions'], {}), '(ap.positions, ap2.positions)\n', (26987, 27016), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((27021, 27055), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.r_in', 'ap2.r_in'], {}), '(ap.r_in, ap2.r_in)\n', (27036, 27055), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((27060, 27096), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.r_out', 'ap2.r_out'], {}), '(ap.r_out, ap2.r_out)\n', (27075, 27096), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((27268, 27312), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.positions', 'ap2.positions'], {}), '(ap.positions, ap2.positions)\n', (27283, 27312), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((27317, 27345), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.a', 'ap2.a'], {}), '(ap.a, ap2.a)\n', (27332, 27345), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((27350, 27378), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.b', 'ap2.b'], {}), '(ap.b, ap2.b)\n', (27365, 27378), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((27383, 27419), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.theta', 'ap2.theta'], {}), '(ap.theta, ap2.theta)\n', (27398, 27419), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((27609, 27653), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.positions', 'ap2.positions'], {}), '(ap.positions, ap2.positions)\n', (27624, 27653), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((27658, 27692), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.a_in', 'ap2.a_in'], {}), '(ap.a_in, ap2.a_in)\n', (27673, 27692), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((27697, 27733), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.a_out', 'ap2.a_out'], {}), '(ap.a_out, ap2.a_out)\n', (27712, 27733), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((27738, 27774), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.b_out', 'ap2.b_out'], {}), '(ap.b_out, ap2.b_out)\n', (27753, 27774), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((27779, 27815), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.theta', 'ap2.theta'], {}), '(ap.theta, ap2.theta)\n', (27794, 27815), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((27989, 28033), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.positions', 'ap2.positions'], {}), '(ap.positions, ap2.positions)\n', (28004, 28033), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((28038, 28066), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.w', 'ap2.w'], {}), '(ap.w, ap2.w)\n', (28053, 28066), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((28071, 28099), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.h', 'ap2.h'], {}), '(ap.h, ap2.h)\n', (28086, 28099), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((28104, 28140), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.theta', 'ap2.theta'], {}), '(ap.theta, ap2.theta)\n', (28119, 28140), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((28332, 28376), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.positions', 'ap2.positions'], {}), '(ap.positions, ap2.positions)\n', (28347, 28376), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((28381, 28415), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.w_in', 'ap2.w_in'], {}), '(ap.w_in, ap2.w_in)\n', (28396, 28415), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((28420, 28456), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.w_out', 'ap2.w_out'], {}), '(ap.w_out, ap2.w_out)\n', (28435, 28456), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((28461, 28497), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.h_out', 'ap2.h_out'], {}), '(ap.h_out, ap2.h_out)\n', (28476, 28497), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((28502, 28538), 'numpy.testing.assert_allclose', 'assert_allclose', (['ap.theta', 'ap2.theta'], {}), '(ap.theta, ap2.theta)\n', (28517, 28538), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((28647, 28664), 'numpy.sqrt', 'np.sqrt', (['(pos ** 2)'], {}), '(pos ** 2)\n', (28654, 28664), True, 'import numpy as np\n'), ((28891, 28919), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(10)', '(10)'], {'unit': '"""deg"""'}), "(10, 10, unit='deg')\n", (28899, 28919), False, 'from astropy.coordinates import SkyCoord\n'), ((28945, 28960), 'numpy.sqrt', 'np.sqrt', (['(r ** 2)'], {}), '(r ** 2)\n', (28952, 28960), True, 'import numpy as np\n'), ((29308, 29338), 'numpy.ones', 'np.ones', (['(20, 20)'], {'dtype': 'float'}), '((20, 20), dtype=float)\n', (29315, 29338), True, 'import numpy as np\n'), ((30212, 30231), 'numpy.ones', 'np.ones', (['(101, 101)'], {}), '((101, 101))\n', (30219, 30231), True, 'import numpy as np\n'), ((30630, 30689), 'numpy.testing.assert_allclose', 'assert_allclose', (["tbl1['aperture_sum']", "tbl2['aperture_sum']"], {}), "(tbl1['aperture_sum'], tbl2['aperture_sum'])\n", (30645, 30689), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((30694, 30761), 'numpy.testing.assert_allclose', 'assert_allclose', (["tbl1['aperture_sum_err']", "tbl2['aperture_sum_err']"], {}), "(tbl1['aperture_sum_err'], tbl2['aperture_sum_err'])\n", (30709, 30761), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((30883, 30942), 'numpy.testing.assert_allclose', 'assert_allclose', (["tbl3['aperture_sum']", "tbl4['aperture_sum']"], {}), "(tbl3['aperture_sum'], tbl4['aperture_sum'])\n", (30898, 30942), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((30947, 31014), 'numpy.testing.assert_allclose', 'assert_allclose', (["tbl3['aperture_sum_err']", "tbl4['aperture_sum_err']"], {}), "(tbl3['aperture_sum_err'], tbl4['aperture_sum_err'])\n", (30962, 31014), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((31520, 31533), 'numpy.sqrt', 'np.sqrt', (['data'], {}), '(data)\n', (31527, 31533), True, 'import numpy as np\n'), ((31545, 31575), 'numpy.zeros', 'np.zeros', (['(20, 20)'], {'dtype': 'bool'}), '((20, 20), dtype=bool)\n', (31553, 31575), True, 'import numpy as np\n'), ((31890, 31914), 'astropy.nddata.StdDevUncertainty', 'StdDevUncertainty', (['error'], {}), '(error)\n', (31907, 31914), False, 'from astropy.nddata import NDData, StdDevUncertainty\n'), ((31928, 31996), 'astropy.nddata.NDData', 'NDData', (['data'], {'uncertainty': 'uncertainty', 'mask': 'mask', 'wcs': 'wcs', 'unit': 'unit'}), '(data, uncertainty=uncertainty, mask=mask, wcs=wcs, unit=unit)\n', (31934, 31996), False, 'from astropy.nddata import NDData, StdDevUncertainty\n'), ((2710, 2760), 'numpy.testing.assert_allclose', 'assert_allclose', (["table3['aperture_sum']", 'true_flux'], {}), "(table3['aperture_sum'], true_flux)\n", (2725, 2760), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((2769, 2842), 'numpy.testing.assert_allclose', 'assert_allclose', (["table2['aperture_sum']", "table3['aperture_sum']"], {'atol': '(0.1)'}), "(table2['aperture_sum'], table3['aperture_sum'], atol=0.1)\n", (2784, 2842), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((3405, 3449), 'numpy.array_equal', 'np.array_equal', (['ap1.positions', 'ap2.positions'], {}), '(ap1.positions, ap2.positions)\n', (3419, 3449), True, 'import numpy as np\n'), ((3557, 3594), 'numpy.ones', 'np.ones', (['self.data.shape'], {'dtype': 'float'}), '(self.data.shape, dtype=float)\n', (3564, 3594), True, 'import numpy as np\n'), ((4733, 4788), 'numpy.all', 'np.all', (["(table1['aperture_sum'] < table3['aperture_sum'])"], {}), "(table1['aperture_sum'] < table3['aperture_sum'])\n", (4739, 4788), True, 'import numpy as np\n'), ((5122, 5185), 'numpy.all', 'np.all', (["(table1['aperture_sum_err'] < table3['aperture_sum_err'])"], {}), "(table1['aperture_sum_err'] < table3['aperture_sum_err'])\n", (5128, 5185), True, 'import numpy as np\n'), ((5284, 5314), 'numpy.ones', 'np.ones', (['(40, 40)'], {'dtype': 'float'}), '((40, 40), dtype=float)\n', (5291, 5314), True, 'import numpy as np\n'), ((5587, 5617), 'numpy.ones', 'np.ones', (['(40, 40)'], {'dtype': 'float'}), '((40, 40), dtype=float)\n', (5594, 5617), True, 'import numpy as np\n'), ((5786, 5812), 'numpy.array', 'np.array', (['((self.area,) * 2)'], {}), '((self.area,) * 2)\n', (5794, 5812), True, 'import numpy as np\n'), ((5954, 5984), 'numpy.ones', 'np.ones', (['(40, 40)'], {'dtype': 'float'}), '((40, 40), dtype=float)\n', (5961, 5984), True, 'import numpy as np\n'), ((6319, 6349), 'numpy.ones', 'np.ones', (['(40, 40)'], {'dtype': 'float'}), '((40, 40), dtype=float)\n', (6326, 6349), True, 'import numpy as np\n'), ((6573, 6599), 'numpy.array', 'np.array', (['((self.area,) * 2)'], {}), '((self.area,) * 2)\n', (6581, 6599), True, 'import numpy as np\n'), ((6736, 6766), 'numpy.ones', 'np.ones', (['(40, 40)'], {'dtype': 'float'}), '((40, 40), dtype=float)\n', (6743, 6766), True, 'import numpy as np\n'), ((7104, 7134), 'numpy.ones', 'np.ones', (['(40, 40)'], {'dtype': 'float'}), '((40, 40), dtype=float)\n', (7111, 7134), True, 'import numpy as np\n'), ((7627, 7657), 'numpy.ones', 'np.ones', (['(40, 40)'], {'dtype': 'float'}), '((40, 40), dtype=float)\n', (7634, 7657), True, 'import numpy as np\n'), ((7987, 8017), 'numpy.ones', 'np.ones', (['(40, 40)'], {'dtype': 'float'}), '((40, 40), dtype=float)\n', (7994, 8017), True, 'import numpy as np\n'), ((8488, 8518), 'numpy.ones', 'np.ones', (['(40, 40)'], {'dtype': 'float'}), '((40, 40), dtype=float)\n', (8495, 8518), True, 'import numpy as np\n'), ((8539, 8569), 'numpy.zeros', 'np.zeros', (['(40, 40)'], {'dtype': 'bool'}), '((40, 40), dtype=bool)\n', (8547, 8569), True, 'import numpy as np\n'), ((9045, 9105), 'numpy.testing.assert_allclose', 'assert_allclose', (["table['aperture_sum'].value", 'self.true_flux'], {}), "(table['aperture_sum'].value, self.true_flux)\n", (9060, 9105), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((9459, 9489), 'numpy.ones', 'np.ones', (['(40, 40)'], {'dtype': 'float'}), '((40, 40), dtype=float)\n', (9466, 9489), True, 'import numpy as np\n'), ((9510, 9534), 'astropy.nddata.NDData', 'NDData', (['data'], {'unit': 'u.adu'}), '(data, unit=u.adu)\n', (9516, 9534), False, 'from astropy.nddata import NDData, StdDevUncertainty\n'), ((16662, 16687), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (16675, 16687), False, 'import pytest\n'), ((16837, 16862), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (16850, 16862), False, 'import pytest\n'), ((17545, 17563), 'numpy.sqrt', 'np.sqrt', (['true_flux'], {}), '(true_flux)\n', (17552, 17563), True, 'import numpy as np\n'), ((28672, 28711), 'pytest.warns', 'pytest.warns', (['AstropyDeprecationWarning'], {}), '(AstropyDeprecationWarning)\n', (28684, 28711), False, 'import pytest\n'), ((28968, 29007), 'pytest.warns', 'pytest.warns', (['AstropyDeprecationWarning'], {}), '(AstropyDeprecationWarning)\n', (28980, 29007), False, 'import pytest\n'), ((32192, 32235), 'numpy.testing.assert_allclose', 'assert_allclose', (['tbl1[column]', 'tbl2[column]'], {}), '(tbl1[column], tbl2[column])\n', (32207, 32235), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((3682, 3700), 'numpy.sqrt', 'np.sqrt', (['self.area'], {}), '(self.area)\n', (3689, 3700), True, 'import numpy as np\n'), ((3798, 3820), 'numpy.sqrt', 'np.sqrt', (['(self.area - 1)'], {}), '(self.area - 1)\n', (3805, 3820), True, 'import numpy as np\n'), ((4548, 4603), 'numpy.testing.assert_allclose', 'assert_allclose', (["table3['aperture_sum']", 'self.true_flux'], {}), "(table3['aperture_sum'], self.true_flux)\n", (4563, 4603), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((4616, 4689), 'numpy.testing.assert_allclose', 'assert_allclose', (["table2['aperture_sum']", "table3['aperture_sum']"], {'atol': '(0.1)'}), "(table2['aperture_sum'], table3['aperture_sum'], atol=0.1)\n", (4631, 4689), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((4929, 4984), 'numpy.testing.assert_allclose', 'assert_allclose', (["table3['aperture_sum_err']", 'true_error'], {}), "(table3['aperture_sum_err'], true_error)\n", (4944, 4984), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((4997, 5082), 'numpy.testing.assert_allclose', 'assert_allclose', (["table2['aperture_sum_err']", "table3['aperture_sum_err']"], {'atol': '(0.1)'}), "(table2['aperture_sum_err'], table3['aperture_sum_err'],\n atol=0.1)\n", (5012, 5082), False, 'from numpy.testing import assert_allclose, assert_array_equal, assert_array_less\n'), ((10772, 10845), 'numpy.fabs', 'np.fabs', (['((fluxes_catalog - converted_aperture_sum.value) / fluxes_catalog)'], {}), '((fluxes_catalog - converted_aperture_sum.value) / fluxes_catalog)\n', (10779, 10845), True, 'import numpy as np\n'), ((28792, 28810), 'numpy.array', 'np.array', (['[10, 10]'], {}), '([10, 10])\n', (28800, 28810), True, 'import numpy as np\n'), ((31475, 31489), 'numpy.arange', 'np.arange', (['(400)'], {}), '(400)\n', (31484, 31489), True, 'import numpy as np\n'), ((9234, 9261), 'numpy.transpose', 'np.transpose', (['self.position'], {}), '(self.position)\n', (9246, 9261), True, 'import numpy as np\n'), ((9336, 9363), 'numpy.transpose', 'np.transpose', (['self.position'], {}), '(self.position)\n', (9348, 9363), True, 'import numpy as np\n')]
from solver import * from armatures import * from models import * import numpy as np import config np.random.seed(20160923) pose_glb = np.zeros([1, 3]) # global rotation ########################## mano settings ######################### n_pose = 12 # number of pose pca coefficients, in mano the maximum is 45 n_shape = 10 # number of shape pca coefficients pose_pca = np.random.normal(size=n_pose) shape = np.random.normal(size=n_shape) mesh = KinematicModel(config.MANO_MODEL_PATH, MANOArmature, scale=1000) ########################## smpl settings ########################## # note that in smpl and smpl-h no pca for pose is provided # therefore in the model we fake an identity matrix as the pca coefficients # to make the code compatible # n_pose = 23 * 3 # degrees of freedom, (n_joints - 1) * 3 # n_shape = 10 # pose_pca = np.random.uniform(-0.2, 0.2, size=n_pose) # shape = np.random.normal(size=n_shape) # mesh = KinematicModel(config.SMPL_MODEL_PATH, SMPLArmature, scale=10) ########################## smpl-h settings ########################## # n_pose = 51 * 3 # n_shape = 16 # pose_pca = np.random.uniform(-0.2, 0.2, size=n_pose) # shape = np.random.normal(size=n_shape) # mesh = KinematicModel(config.SMPLH_MODEL_PATH, SMPLHArmature, scale=10) ########################## solving example ############################ wrapper = KinematicPCAWrapper(mesh, n_pose=n_pose) solver = Solver(verbose=True) _, keypoints = \ mesh.set_params(pose_pca=pose_pca, pose_glb=pose_glb, shape=shape) params_est = solver.solve(wrapper, keypoints) shape_est, pose_pca_est, pose_glb_est = wrapper.decode(params_est) print('----------------------------------------------------------------------') print('ground truth parameters') print('pose pca coefficients:', pose_pca) print('pose global rotation:', pose_glb) print('shape: pca coefficients:', shape) print('----------------------------------------------------------------------') print('estimated parameters') print('pose pca coefficients:', pose_pca_est) print('pose global rotation:', pose_glb_est) print('shape: pca coefficients:', shape_est) mesh.set_params(pose_pca=pose_pca) mesh.save_obj('./gt.obj') mesh.set_params(pose_pca=pose_pca_est) mesh.save_obj('./est.obj') print('ground truth and estimated meshes are saved into gt.obj and est.obj')
[ "numpy.random.normal", "numpy.zeros", "numpy.random.seed" ]
[((101, 125), 'numpy.random.seed', 'np.random.seed', (['(20160923)'], {}), '(20160923)\n', (115, 125), True, 'import numpy as np\n'), ((137, 153), 'numpy.zeros', 'np.zeros', (['[1, 3]'], {}), '([1, 3])\n', (145, 153), True, 'import numpy as np\n'), ((373, 402), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'n_pose'}), '(size=n_pose)\n', (389, 402), True, 'import numpy as np\n'), ((411, 441), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'n_shape'}), '(size=n_shape)\n', (427, 441), True, 'import numpy as np\n')]
# A sample spatial model with agents eating grass off patches. # No visualization as of yet #=============== # SETUP #=============== from helipad import Helipad heli = Helipad() heli.name = 'Grass Eating' heli.order = 'random' heli.stages = 5 heli.addParameter('energy', 'Energy from grass', 'slider', dflt=2, opts={'low': 2, 'high': 10, 'step': 1}) heli.addParameter('smart', 'Smart consumption', 'check', dflt=True) heli.addParameter('e2reproduce', 'Energy to reproduce', 'slider', dflt=25, opts={'low': 0, 'high': 100, 'step': 5}) heli.addParameter('maleportion', 'Male portion reproduction', 'slider', dflt=40, opts={'low': 0, 'high': 100, 'step': 5}) heli.addParameter('maxLife', 'Max Lifespan', 'slider', dflt=200, opts={'low': 100, 'high': 1000, 'step': 10}) heli.addParameter('grassrate', 'Grass Rate', 'slider', dflt=10, opts={'low': 1, 'high': 100, 'step': 1}) heli.params['num_agent'].opts = {'low': 1, 'high': 200, 'step': 1} heli.param('num_agent', 200) heli.addBreed('male', 'blue') heli.addBreed('female', 'pink') heli.addGood('energy', 'red', 5) from random import choice, randint from numpy import mean #=============== # BEHAVIOR #=============== #Dividing it into stages like this (like in the NetLogo version) appears to make it more viable, #perhaps because it encourages bunching onto the same fecund patch, with more opportunities for #reproduction, whereas if they do it all at once, they avoid each other too much @heli.hook def agentStep(agent, model, stage): #Look for the neighboring patch with the most grass and move to it, if smart if stage==1: if model.param('smart'): maxenergy = max([n.stocks['energy'] for n in agent.patch.neighbors]) prospects = [n for n in agent.patch.neighbors if n.stocks['energy'] == maxenergy] else: prospects = agent.patch.neighbors agent.orientTo(choice(prospects)) agent.forward() agent.stocks['energy'] -= 1 #Eat grass elif stage==2: if agent.patch.stocks['energy'] > 0: agent.patch.stocks['energy'] -= 1 agent.stocks['energy'] += model.param('energy') #Reproduce elif stage==3: if agent.breed == 'male': e = model.param('e2reproduce') p = model.param('maleportion') me, fe = e*p/100, e*(100-p)/100 if agent.stocks['energy'] > me: prospects = [f for f in agent.patch.agentsOn if f.breed=='female' and f.stocks['energy']>fe] if len(prospects): mate = choice(prospects) agent.stocks['energy'] -= me mate.stocks['energy'] -= fe child = mate.reproduce(inherit=[('breed', 'rand')], partners=[agent]) child.stocks['energy'] = e #Die elif stage==4: if agent.stocks['energy'] <= 0 or agent.age > model.param('maxLife'): agent.die() model.deathAge.append(agent.age) if len(model.deathAge) > 100: model.deathAge.pop(0) @heli.hook def patchStep(patch, model, stage): #Regrow grass if stage==5 and patch.stocks['energy'] < 5 and randint(1,100) <= model.param('grassrate'): patch.stocks['energy'] += 1 @heli.hook def modelPostSetup(model): model.deathAge = [] #Stop the model when we have no more females left to reproduce @heli.event def nofemales(model): return len(model.agent('female')) <= 1 heli.param('stopafter', 'nofemales') heli.param('refresh', 1) #=============== # DATA AND VISUALIZATION #=============== # from helipad.visualize import TimeSeries # viz = heli.useVisual(TimeSeries) heli.data.addReporter('grass', heli.data.agentReporter('stocks', 'patch', good='energy', stat='sum')) heli.data.addReporter('age', heli.data.agentReporter('age', 'agent')) heli.data.addReporter('num_agent', lambda model: len(model.agents['agent'])) heli.data.addReporter('sexratio', lambda model: len(model.agent('male', 'agent'))/len(model.agent('female', 'agent'))) heli.data.addReporter('expectancy', lambda model: mean(model.deathAge)) heli.data.addReporter('agentenergy', heli.data.agentReporter('stocks', 'agent', good='energy', percentiles=[0,100])) mapPlot = heli.spatial(x=16, diag=True) mapPlot.config({ 'patchProperty': 'good:energy', 'patchColormap': 'Greens', 'agentSize': 'good:energy', 'lockLayout': True }) pop = heli.visual.addPlot('pop', 'Population', 'timeseries', logscale=True) sexratio = heli.visual.addPlot('sexratio', 'Sex Ratio', 'timeseries', logscale=True) age = heli.visual.addPlot('age', 'Age', 'timeseries') energy = heli.visual.addPlot('energy', 'Energy', 'timeseries') pop.addSeries('num_agent', 'Population', 'black') pop.addSeries('grass', 'Grass', 'green') sexratio.addSeries('sexratio', 'M/F Sex Ratio', 'brown') age.addSeries('age', 'Average Age', 'blue') pop.addSeries('expectancy', 'Life Expectancy', 'black') energy.addSeries('agentenergy', 'Energy', 'green') @heli.hook def agentClick(agent, plot, t): print([f'Agent {a.id} at ({a.x}, {a.y})' for a in agent if a is not None]) @heli.hook def patchClick(patch, plot, t): print('Patch at',patch.position) #=============== # LAUNCH THE GUI #=============== heli.launchCpanel()
[ "numpy.mean", "helipad.Helipad", "random.randint", "random.choice" ]
[((172, 181), 'helipad.Helipad', 'Helipad', ([], {}), '()\n', (179, 181), False, 'from helipad import Helipad\n'), ((3782, 3802), 'numpy.mean', 'mean', (['model.deathAge'], {}), '(model.deathAge)\n', (3786, 3802), False, 'from numpy import mean\n'), ((1834, 1851), 'random.choice', 'choice', (['prospects'], {}), '(prospects)\n', (1840, 1851), False, 'from random import choice, randint\n'), ((2891, 2906), 'random.randint', 'randint', (['(1)', '(100)'], {}), '(1, 100)\n', (2898, 2906), False, 'from random import choice, randint\n'), ((2386, 2403), 'random.choice', 'choice', (['prospects'], {}), '(prospects)\n', (2392, 2403), False, 'from random import choice, randint\n')]
#================================ # RESEARCH GROUP PROJECT [RGP] #================================ # This file is part of the COMP3096 Research Group Project. # System import logging # Gym Imports import gym from gym.spaces import Box, Discrete, Tuple # PySC2 Imports from pysc2.lib.actions import FUNCTIONS, FunctionCall from pysc2.lib.features import SCREEN_FEATURES # Numpy import numpy as np # Typing from typing import List from sc2g.env.unit_tracking import UnitTrackingEnv # Setup logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) # ========================================================== # Only applies to movement-based mini-games with # two friendly player units (eg. CollectMineralShards) # ========================================================== class MultiMovementDirectedEnv(UnitTrackingEnv): def __init__(self, sc2_env, **kwargs): super().__init__(sc2_env, **kwargs) # Number of marines and adjacency (hardcoded) self.number_of_marines = 2 self.number_adjacency = 8 # Specify observation and action space screen_shape_observation = self.screen_shape + (1,) self.observation_space = Box(low=0, high=SCREEN_FEATURES.player_relative.scale, shape=screen_shape_observation) self.resolution = self.screen_shape[0] * self.screen_shape[1] # (width x height) self.action_space = Discrete(self.resolution) self.unravel_shape = (self.screen_shape[0], self.screen_shape[1]) def get_sc2_action(self, gym_action) -> List[FunctionCall]: if len(self.state["player_units_stable"]) == 0: return [FUNCTIONS.no_op()] # Get coords by unravelling action. DQN only supports returning an integer as action. # How unravel works: # Ref: https://www.quora.com/What-is-a-simple-intuitive-example-for-the-unravel_index-in-Python coords = np.unravel_index(gym_action, self.unravel_shape) # Get gym action for each marine gym_action_1, gym_action_2 = (coords[0] % self.number_adjacency, coords[1] % self.number_adjacency) # Get current coordinates for each marine marine_1_stable = self.state["player_units_stable"][0] marine_2_stable = self.state["player_units_stable"][1] # Get tags for each marine marine_1_tag = marine_1_stable.tag.item() marine_2_tag = marine_2_stable.tag.item() # Get target coordinates for each marine marine_1_curr_xy = next((unit.x, unit.y) for unit in self.state["player_units"] if unit.tag.item() == marine_1_tag) marine_2_curr_xy = next((unit.x, unit.y) for unit in self.state["player_units"] if unit.tag.item() == marine_2_tag) def get_target_xy(num, curr_coords): # 0: Up # 1: Down # 2: Left # 3: Right # 4: Up + Left # 5: Up + Right # 6: Down + Left # 7: Down + Right target_xy = list(curr_coords) # Determine target position if num in (0, 4, 5): # Up target_xy[1] = max(0, curr_coords[1]-1) if num in (1, 6, 7): # Down target_xy[1] = min(self.screen_shape[1]-1, curr_coords[1]+1) if num in (2, 4, 6): # Left target_xy[0] = max(0, curr_coords[0]-1) if num in (3, 5, 7): # Right target_xy[0] = min(self.screen_shape[0]-1, curr_coords[0]+1) return tuple(target_xy) marine_1_target_xy = get_target_xy(gym_action_1, marine_1_curr_xy) marine_2_target_xy = get_target_xy(gym_action_2, marine_2_curr_xy) # Assign action functions actions = [FUNCTIONS.move_unit(marine_1_tag, "now", marine_1_target_xy), FUNCTIONS.move_unit(marine_2_tag, "now", marine_2_target_xy)] return actions
[ "logging.getLogger", "gym.spaces.Discrete", "gym.spaces.Box", "pysc2.lib.actions.FUNCTIONS.move_unit", "numpy.unravel_index", "pysc2.lib.actions.FUNCTIONS.no_op" ]
[((504, 531), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (521, 531), False, 'import logging\n'), ((1190, 1281), 'gym.spaces.Box', 'Box', ([], {'low': '(0)', 'high': 'SCREEN_FEATURES.player_relative.scale', 'shape': 'screen_shape_observation'}), '(low=0, high=SCREEN_FEATURES.player_relative.scale, shape=\n screen_shape_observation)\n', (1193, 1281), False, 'from gym.spaces import Box, Discrete, Tuple\n'), ((1395, 1420), 'gym.spaces.Discrete', 'Discrete', (['self.resolution'], {}), '(self.resolution)\n', (1403, 1420), False, 'from gym.spaces import Box, Discrete, Tuple\n'), ((1900, 1948), 'numpy.unravel_index', 'np.unravel_index', (['gym_action', 'self.unravel_shape'], {}), '(gym_action, self.unravel_shape)\n', (1916, 1948), True, 'import numpy as np\n'), ((3781, 3841), 'pysc2.lib.actions.FUNCTIONS.move_unit', 'FUNCTIONS.move_unit', (['marine_1_tag', '"""now"""', 'marine_1_target_xy'], {}), "(marine_1_tag, 'now', marine_1_target_xy)\n", (3800, 3841), False, 'from pysc2.lib.actions import FUNCTIONS, FunctionCall\n'), ((3843, 3903), 'pysc2.lib.actions.FUNCTIONS.move_unit', 'FUNCTIONS.move_unit', (['marine_2_tag', '"""now"""', 'marine_2_target_xy'], {}), "(marine_2_tag, 'now', marine_2_target_xy)\n", (3862, 3903), False, 'from pysc2.lib.actions import FUNCTIONS, FunctionCall\n'), ((1636, 1653), 'pysc2.lib.actions.FUNCTIONS.no_op', 'FUNCTIONS.no_op', ([], {}), '()\n', (1651, 1653), False, 'from pysc2.lib.actions import FUNCTIONS, FunctionCall\n')]
import pandas as pd import numpy as np import math import tkinter as tk from tkinter import ttk from PIL import Image, ImageTk import os import sqlite3 from sqlite3 import OperationalError #file = str(os.path.realpath(__file__)) csv_file = "/Users/nedimdrekovic/Python/DB/simplemaps_worldcities/" + "world_cities.csv" sql_file = "/Users/nedimdrekovic/Python/DB/simplemaps_worldcities/" + "world_cities.sql" db_file = "/Users/nedimdrekovic/Python/DB/simplemaps_worldcities/" + "world_cities.db" backgroundImage_file = "/Users/nedimdrekovic/Python/DB/simplemaps_worldcities/Erde.jpg" backgroundColor = "DeepSkyBlue3" # eigentlich eher dumm geloest, aber es reicht fuers erste imSelbenLand = False # um zu checken ob country doppelt vorhanden n = 10000 # Anzal an zu suchende Städte digits_after_point = 2 def rad(grad): return (math.pi * grad)/180 def isValid(i): while True: city = input(str(i) + ".Stadt: ") if city in df["city"].tolist(): return city print("Diese Stadt exisiert nicht. Bitte existierende Stadt eingeben: ") def getCity(city): stadt = city.split(' (') command_string = "Select latitude, longitude From cities Where name = '" + stadt[0] + "'" if len(stadt) >= 2: # falls es die Stadt also doppelt gibt if len(stadt[1].split(', ')) == 1: # dann unterscheiden sich die Staedte im Land, weil es dann ein Komma gibt land = stadt[1][:-1] command_string += " and country = '" + land + "'" else: # im gleichen Land, also Region vorhanden land = stadt[1].split(',')[0] region = stadt[1].split(', ')[1][:-1] command_string += " and country = '" + land + "'" + " and region = '" + region + "'" zeiger.execute(command_string) latitude, longitude = zeiger.fetchone() return latitude, longitude def entfernung(): city1 = combo1.get() city2 = combo2.get() latitude1, longitude1 = getCity(city1) latitude2, longitude2 = getCity(city2) # print("\n(Längengrad/Breitengrad) von", city1 + ("(" + df.loc[df.index[index1], "admin_name"] + ")" if imSelbenLand else "") + ": (" + str(longitude1) + u'\N{DEGREE SIGN}/' + str(latitude1) + u'\N{DEGREE SIGN}' + ")") # print("(Längengrad/Breitengrad) von", city2 + ("(" + df.loc[df.index[index2], "admin_name"] + ")" if imSelbenLand else "") + ": (" + str(longitude2) + u'\N{DEGREE SIGN}/' + str(latitude2) + u'\N{DEGREE SIGN}' + ")") durchmesser = 12756.27 radius = durchmesser / 2 lat1 = rad(latitude1) lon1 = rad(longitude1) lat2 = rad(latitude2) lon2 = rad(longitude2) dlon = lon2 - lon1 dlat = lat2 - lat1 a = np.sin(dlat / 2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2)**2 c = 2 * math.atan2(np.sqrt(a), np.sqrt(1 - a)) distance = radius * c l1_text = "(" + str(round(longitude1, digits_after_point)) + u'\N{DEGREE SIGN}/' + str(round(latitude1, digits_after_point)) + u'\N{DEGREE SIGN}' + ")" l2_text = "(" + str(round(longitude2, digits_after_point)) + u'\N{DEGREE SIGN}/' + str(round(latitude2, digits_after_point)) + u'\N{DEGREE SIGN}' + ")" tk.Label(tkFenster, text=l1_text, bg="red", fg="white").grid(row=2, column=3) tk.Label(tkFenster, text=l2_text, bg="blue", fg="orange").grid(row=3, column=3) tk.Label(tkFenster, text="(Längengrad/Breitengrad) von\n" + combo1.get() + " =", bg="red", fg="white").grid(row=2, column=2, pady=3) tk.Label(tkFenster, text="(Längengrad/Breitengrad) von\n" + combo2.get() + " =", bg="blue", fg="orange").grid(row=3, column=2, pady=3) tk.Label(tkFenster, text="Entfernung zwischen \"" + combo1.get() + "\"\nund \"" + combo2.get() + "\" = ", bg="yellow", fg="dark green").grid(row=4, column=2, padx=10, pady=10) resultText = str(round(distance, digits_after_point)).replace(".", ",") + " km\n(" + str(round(distance/1.60934, digits_after_point)) + " miles)" result = tk.Label(tkFenster, text=resultText, bg="yellow", fg="dark green").grid(row=4, column=3) print("Entfernung zwischen",city1,"und",city2,":",str(round(distance, digits_after_point)),"km (= " + str(round(distance/1.60934, digits_after_point)) + " miles)") if __name__ == '__main__': connection = sqlite3.connect(db_file, timeout=10) zeiger = connection.cursor() sql_as_string = open(sql_file, 'r').read() cmds = sql_as_string.split(';')[:n] # suchen nur die ersten 1000 Werte zeiger.execute("DROP TABLE IF EXISTS `cities`;") for index in range(len(cmds)): try: zeiger.execute(cmds[index]) except OperationalError: print("Fehler: " + cmds[index]) zeiger.execute("Select name, country, region From cities Where name != '';") cities = sorted(zeiger.fetchall())[:n] cities_array = [] staedte = [city[0] for city in cities] for index, city in enumerate(cities): if staedte.count(city[0]) >= 2: # bedeutet dass 2 Mal die selbe Stadt in der Liste ist und man nun das Land ueberprueft if ((cities[index][0] == cities[index+1][0]) & (cities[index][1] == cities[index+1][1])) | ((cities[index][0] == cities[index-1][0]) & (cities[index][1] == cities[index-1][1])): # Stadt und Land gleich cities_array.append(cities[index][0] + " (" + cities[index][1] + ", " + cities[index][2] + ")") else: # Städte sind gleich, Länder aber nicht cities_array.append(cities[index][0] + " (" + cities[index][1] + ")") else: # wenn Stadt nur einmal vorhanden cities_array.append(cities[index][0]) # Ein Fenster erstellen tkFenster = tk.Tk(className='AutocompleteCombobox') # Den Fenstertitle erstellen tkFenster.title("Entfernung zweier Städte (Luftlinie)") tkFenster.geometry("1000x225") tkFenster.configure(background=backgroundColor) # combo1_cities = df["city"].tolist() # combo1_cities = [city if city not in (combo1_cities[:index] + combo1_cities[index+1:]) else (city + " (" + str(df.loc[index, "country"]) + ", " + str(df.loc[index, "admin_name"]) + ")") if (len(df[(df["city"] == city) & (df["country"] == df.iloc[index]["country"])]) >= 2) else (city + " (" + str(df.loc[index, "country"]) + ")") for index, city in enumerate(combo1_cities)] # wenn bis auf city das gleiche element enthalten ist combo1 = ttk.Combobox(tkFenster, state="readonly", values=sorted(cities_array)) combo2 = ttk.Combobox(tkFenster, state="readonly", values=sorted(cities_array)) combo1.grid(column=0, row=1, padx=5) combo2.grid(column=1, row=1, padx=5) combo1.current(0) combo2.current(0) label1 = tk.Label(tkFenster, text="1.Stadt", bg="red", fg="white").grid(row=0, column=0, padx=3) label2 = tk.Label(tkFenster, text="2.Stadt", bg="blue", fg="orange").grid(row=0, column=1, padx=3) berechne = ttk.Button(tkFenster, text="Berechne die Entfernung", command=entfernung).grid(row=1, column=2, padx=3) quit = ttk.Button(tkFenster, text="Schließe die Anwendung", command=tkFenster.quit).grid(row=1, column=3, padx=3) # In der Ereignisschleife auf Eingabe des Benutzers warten. tkFenster.mainloop() connection.commit() connection.close()
[ "numpy.sqrt", "sqlite3.connect", "tkinter.ttk.Button", "tkinter.Tk", "numpy.cos", "tkinter.Label", "numpy.sin" ]
[((4251, 4287), 'sqlite3.connect', 'sqlite3.connect', (['db_file'], {'timeout': '(10)'}), '(db_file, timeout=10)\n', (4266, 4287), False, 'import sqlite3\n'), ((5661, 5700), 'tkinter.Tk', 'tk.Tk', ([], {'className': '"""AutocompleteCombobox"""'}), "(className='AutocompleteCombobox')\n", (5666, 5700), True, 'import tkinter as tk\n'), ((2699, 2715), 'numpy.sin', 'np.sin', (['(dlat / 2)'], {}), '(dlat / 2)\n', (2705, 2715), True, 'import numpy as np\n'), ((2794, 2804), 'numpy.sqrt', 'np.sqrt', (['a'], {}), '(a)\n', (2801, 2804), True, 'import numpy as np\n'), ((2806, 2820), 'numpy.sqrt', 'np.sqrt', (['(1 - a)'], {}), '(1 - a)\n', (2813, 2820), True, 'import numpy as np\n'), ((3166, 3221), 'tkinter.Label', 'tk.Label', (['tkFenster'], {'text': 'l1_text', 'bg': '"""red"""', 'fg': '"""white"""'}), "(tkFenster, text=l1_text, bg='red', fg='white')\n", (3174, 3221), True, 'import tkinter as tk\n'), ((3248, 3305), 'tkinter.Label', 'tk.Label', (['tkFenster'], {'text': 'l2_text', 'bg': '"""blue"""', 'fg': '"""orange"""'}), "(tkFenster, text=l2_text, bg='blue', fg='orange')\n", (3256, 3305), True, 'import tkinter as tk\n'), ((3948, 4014), 'tkinter.Label', 'tk.Label', (['tkFenster'], {'text': 'resultText', 'bg': '"""yellow"""', 'fg': '"""dark green"""'}), "(tkFenster, text=resultText, bg='yellow', fg='dark green')\n", (3956, 4014), True, 'import tkinter as tk\n'), ((6667, 6724), 'tkinter.Label', 'tk.Label', (['tkFenster'], {'text': '"""1.Stadt"""', 'bg': '"""red"""', 'fg': '"""white"""'}), "(tkFenster, text='1.Stadt', bg='red', fg='white')\n", (6675, 6724), True, 'import tkinter as tk\n'), ((6768, 6827), 'tkinter.Label', 'tk.Label', (['tkFenster'], {'text': '"""2.Stadt"""', 'bg': '"""blue"""', 'fg': '"""orange"""'}), "(tkFenster, text='2.Stadt', bg='blue', fg='orange')\n", (6776, 6827), True, 'import tkinter as tk\n'), ((6873, 6946), 'tkinter.ttk.Button', 'ttk.Button', (['tkFenster'], {'text': '"""Berechne die Entfernung"""', 'command': 'entfernung'}), "(tkFenster, text='Berechne die Entfernung', command=entfernung)\n", (6883, 6946), False, 'from tkinter import ttk\n'), ((6988, 7064), 'tkinter.ttk.Button', 'ttk.Button', (['tkFenster'], {'text': '"""Schließe die Anwendung"""', 'command': 'tkFenster.quit'}), "(tkFenster, text='Schließe die Anwendung', command=tkFenster.quit)\n", (6998, 7064), False, 'from tkinter import ttk\n'), ((2721, 2733), 'numpy.cos', 'np.cos', (['lat1'], {}), '(lat1)\n', (2727, 2733), True, 'import numpy as np\n'), ((2736, 2748), 'numpy.cos', 'np.cos', (['lat2'], {}), '(lat2)\n', (2742, 2748), True, 'import numpy as np\n'), ((2751, 2767), 'numpy.sin', 'np.sin', (['(dlon / 2)'], {}), '(dlon / 2)\n', (2757, 2767), True, 'import numpy as np\n')]
import os from os import listdir from os.path import isfile, join import logging import numpy as np from ase import Atoms import mff from mff import models, calculators, utility from mff import configurations as cfg def get_potential(confs): pot = 0 for conf in confs: el1 = conf[:, 3] el2 = conf[:, 4] dist = np.sum(conf[:, :3]**2, axis=1)**0.5 pot += np.sum(el1**0.5*el2**0.5*pot_profile(dist)) return pot def pot_profile(dist): return ((dist-1)**2 - 0.5)*np.exp(-dist) def force_profile(dist): a = (dist-1)**2 - 0.5 da = 2*(dist-1) b = np.exp(-dist) db = -np.exp(-dist) return a*db+b*da def get_potentials(many_confs): pots = np.zeros(len(many_confs)) for i, confs in enumerate(many_confs): pots[i] = get_potential(confs) return pots def get_force(conf): el1 = conf[:, 3] el2 = conf[:, 4] dist = np.sum(conf[:, :3]**2, axis=1)**0.5 vers = conf[:, :3]/dist[:, None] force = np.sum(vers * (el1[:, None]**0.5*el2[:, None] ** 0.5*(force_profile(dist[:, None]))), axis=0) return force def get_forces(many_confs): forces = np.zeros((len(many_confs), 3)) for i, confs in enumerate(many_confs): forces[i] = get_force(confs) return forces def generate_confs(n, elements, r_cut): phi = np.random.uniform(0, 2*np.pi, size=n*2) costheta = np.random.uniform(-1, 1, size=n*2) u = np.random.uniform(0, 1, size=n*2) theta = np.arccos(costheta) r = r_cut * u**(1/3) x = r * np.sin(theta) * np.cos(phi) y = r * np.sin(theta) * np.sin(phi) z = r * np.cos(theta) xyz = np.vstack((x, y, z)).T glob_confs = [] loc_confs = [] for i in range(n): conf1 = np.zeros((2, 5)) conf2 = np.zeros((2, 5)) conf3 = np.zeros((2, 5)) conf1[0, :3] = xyz[2*i] conf1[1, :3] = xyz[2*i+1] conf2[0, :3] = -xyz[2*i] conf2[1, :3] = xyz[2*i+1] - xyz[2*i] conf3[0, :3] = xyz[2*i] - xyz[2*i+1] conf3[1, :3] = -xyz[2*i+1] if len(elements) == 1: conf1[:, 3] = elements conf1[:, 4] = elements conf2[:, 3] = elements conf2[:, 4] = elements conf3[:, 3] = elements conf3[:, 4] = elements elif len(elements) >= 2: a, b, c = np.random.choice(elements), np.random.choice( elements), np.random.choice(elements) conf1[:, 3] = a conf1[0, 4] = b conf1[1, 4] = c conf2[:, 3] = b conf2[0, 4] = a conf2[1, 4] = c conf3[:, 3] = c conf3[0, 4] = b conf3[1, 4] = a this_conf = np.array([conf1, conf2, conf3]) glob_confs.append(this_conf) loc_confs.append(conf1) loc_confs.append(conf2) loc_confs.append(conf3) loc_confs = np.array(loc_confs) glob_confs = np.array(glob_confs) return (glob_confs, loc_confs) def fit_test(m, loc_confs, forces, glob_confs, energies, ntr, ntest, elements, fit_type, r_cut, ncores = 1): if fit_type == 'force': m.fit(loc_confs[:ntr], forces[:ntr], ncores=ncores) elif fit_type == 'energy': m.fit_energy(glob_confs[:ntr], energies[:ntr], ncores=ncores) elif fit_type == 'force_and_energy': m.fit_force_and_energy( loc_confs[:ntr], forces[:ntr], glob_confs[:ntr], energies[:ntr], ncores=ncores) pred_forces = m.predict(loc_confs[-ntest:], ncores=ncores) pred_energies = m.predict_energy(glob_confs[-ntest:], ncores=ncores) # print("MAEF: %.4f eV/A " %(np.mean(np.sum(forces[-ntest:] - pred_forces, axis = 1)**2)**0.5)) # print("MAEE: %.4f eV" %( np.mean(abs(energies[-ntest:] - pred_energies)))) mtype = str(type(m)).split('.')[-1].split("'")[0] if mtype == "TwoBodySingleSpeciesModel" or mtype == "ThreeBodySingleSpeciesModel" or mtype == "TwoBodyManySpeciesModel" or mtype == "ThreeBodyManySpeciesModel": m.build_grid(0.0, 5, ncores=2) if mtype == "TwoBodySingleSpeciesModel": calc = calculators.TwoBodySingleSpecies(r_cut*2, m.grid) elif mtype == "ThreeBodySingleSpeciesModel": calc = calculators.ThreeBodySingleSpecies(r_cut*2, m.grid) elif mtype == "TwoBodyManySpeciesModel": calc = calculators.TwoBodyManySpecies(r_cut*2, elements, m.grid) elif mtype == "ThreeBodyManySpeciesModel": calc = calculators.ThreeBodyManySpecies(r_cut*2, elements, m.grid) elif mtype == "CombinedSingleSpeciesModel" or mtype == "CombinedManySpeciesModel": m.build_grid(0.0, 5, 5, ncores=2) if mtype == "CombinedSingleSpeciesModel": calc = calculators.CombinedSingleSpecies( r_cut*2, m.grid_2b, m.grid_3b) elif mtype == "CombinedManySpeciesModel": calc = calculators.CombinedManySpecies( r_cut*2, elements, m.grid_2b, m.grid_3b) elif mtype == "EamSingleSpeciesModel" or mtype == "EamManySpeciesModel": m.build_grid(5, ncores=2) if mtype == "EamSingleSpeciesModel": calc = calculators.EamSingleSpecies( r_cut*2, m.grid, m.gp.kernel.theta[2], m.gp.kernel.theta[3]) elif mtype == "EamManySpeciesModel": calc = calculators.EamManySpecies( r_cut*2, elements, m.grid, m.gp.kernel.theta[2], m.gp.kernel.theta[3]) elif mtype == "TwoThreeEamSingleSpeciesModel" or mtype == "TwoThreeEamManySpeciesModel": m.build_grid(0, 5, 5, 5, ncores=2) if mtype == "TwoThreeEamSingleSpeciesModel": calc = calculators.TwoThreeEamSingleSpecies( r_cut*2, m.grid_2b, m.grid_3b, m.grid_eam, m.gp_eam.kernel.theta[2], m.gp_eam.kernel.theta[3]) elif mtype == "TwoThreeEamManySpeciesModel": calc = calculators.TwoThreeEamManySpecies( r_cut*2, elements, m.grid_2b, m.grid_3b, m.grid_eam, m.gp_eam.kernel.theta[2], m.gp_eam.kernel.theta[3]) map_forces = np.zeros((len(pred_forces), 3)) map_energies = np.zeros_like(pred_energies) for i in np.arange(ntest): coords = np.vstack(([0, 0, 0], glob_confs[-ntest:][i][0, 0:3, 0:3])) atoms = Atoms(positions=coords + 20) atoms.set_atomic_numbers([glob_confs[-ntest:][i][0, 0, 3], glob_confs[-ntest:][i][0, 0, 4], glob_confs[-ntest:][i][0, 1, 4]]) atoms.set_cell([100, 100, 100]) atoms.set_calculator(calc) map_energies[i] = atoms.get_potential_energy() for i in np.arange(ntest): coords = np.vstack(([0, 0, 0], loc_confs[-ntest:][i][0:3, 0:3])) atoms = Atoms(positions=coords + 20) atoms.set_atomic_numbers( [loc_confs[-ntest:][i][0, 3], loc_confs[-ntest:][i][0, 4], loc_confs[-ntest:][i][1, 4]]) atoms.set_cell([100, 100, 100]) atoms.set_calculator(calc) map_forces[i] = atoms.get_forces()[0, :] error_f = np.sum((pred_forces - map_forces)**2, axis=1)**0.5 error_e = pred_energies - map_energies # print("Force Error: %.4f eV/A Energy Error: %.4f eV " %(np.mean(error_f), np.mean(error_e))) m.save('MODELS/') class Tests(): def __init__(self, elements, noise, sigma, r_cut, theta, ntr_f, ntr_e, ntest, alpha, r0, ncores): self.elements = elements self.noise = noise self.sigma = sigma self.r_cut = r_cut self.theta = theta self.ntr_f = ntr_f self.ntr_e = ntr_e self.ntest = ntest self.alpha = alpha self.r0 = r0 self.ncores = ncores self.glob_confs, self.loc_confs = generate_confs(self.ntr_f+self.ntr_e+self.ntest, self.elements, self.r_cut) self.forces = get_forces(self.loc_confs) self.energies = get_potentials(self.glob_confs) def test_2_body_single(self): for fit_type in ("force", "energy", "force_and_energy"): m = models.TwoBodySingleSpeciesModel( element=self.elements, noise=self.noise, sigma=self.sigma, r_cut=self.r_cut*2, theta=self.theta, rep_sig=0) try: fit_test(m, self.loc_confs, self.forces, self.glob_confs, self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores) except: print("ERROR in 2-body Single %s fit" % (fit_type)) def test_3_body_single(self): for fit_type in ("force", "energy", "force_and_energy"): m = models.ThreeBodySingleSpeciesModel( element=self.elements, noise=self.noise, sigma=self.sigma, r_cut=self.r_cut*2, theta=self.theta) try: fit_test(m, self.loc_confs, self.forces, self.glob_confs, self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores) except: print("ERROR in 3-body Single %s fit" % (fit_type)) def test_combined_body_single(self): for fit_type in ("force", "energy", "force_and_energy"): m = models.CombinedSingleSpeciesModel( element=self.elements, noise=self.noise, sigma_2b=self.sigma, sigma_3b=self.sigma, r_cut=self.r_cut*2, theta_2b=self.theta, theta_3b=self.theta, rep_sig=0) try: fit_test(m, self.loc_confs, self.forces, self.glob_confs, self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores) except: print("ERROR in combined Single %s fit" % (fit_type)) def test_eam_single(self): for fit_type in ("force", "energy", "force_and_energy"): m = models.EamSingleSpeciesModel( element=self.elements, noise=self.noise, sigma=self.sigma, r_cut=self.r_cut*2, alpha=self.alpha, r0=self.r0) try: fit_test(m, self.loc_confs, self.forces, self.glob_confs, self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores) except: print("ERROR in Eam Single %s fit" % (fit_type)) def test_23eam_single(self): for fit_type in ("force", "energy", "force_and_energy"): m = models.TwoThreeEamSingleSpeciesModel( self.elements, self.r_cut*2, self.sigma, self.sigma, self.sigma, self.theta, self.theta, self.alpha, self.r0, self.noise, 0) try: fit_test(m, self.loc_confs, self.forces, self.glob_confs, self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores) except: print("ERROR in 23 Eam Single %s fit" % (fit_type)) def test_2_body_many(self): for fit_type in ("force", "energy", "force_and_energy"): m = models.TwoBodyManySpeciesModel( elements=self.elements, noise=self.noise, sigma=self.sigma, r_cut=self.r_cut*2, theta=self.theta, rep_sig=0) try: fit_test(m, self.loc_confs, self.forces, self.glob_confs, self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores) except: print("ERROR in 2-body Many %s fit" % (fit_type)) def test_3_body_many(self): for fit_type in ("force", "energy", "force_and_energy"): m = models.ThreeBodyManySpeciesModel( elements=self.elements, noise=self.noise, sigma=self.sigma, r_cut=self.r_cut*2, theta=self.theta) try: fit_test(m, self.loc_confs, self.forces, self.glob_confs, self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores) except: print("ERROR in 3-body Many %s fit" % (fit_type)) def test_combined_body_many(self): for fit_type in ("force", "energy", "force_and_energy"): m = models.CombinedManySpeciesModel(elements=self.elements, noise=self.noise, sigma_2b=self.sigma, sigma_3b=self.sigma, r_cut=self.r_cut*2, theta_2b=self.theta, theta_3b=self.theta, rep_sig=0) try: fit_test(m, self.loc_confs, self.forces, self.glob_confs, self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores) except: print("ERROR in combined Many %s fit" % (fit_type)) def test_eam_many(self): for fit_type in ("force", "energy", "force_and_energy"): m = models.EamManySpeciesModel( elements=self.elements, noise=self.noise, sigma=self.sigma, r_cut=self.r_cut*2, alpha=self.alpha, r0=self.r0) try: fit_test(m, self.loc_confs, self.forces, self.glob_confs, self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores) except: print("ERROR in eam Many %s fit" % (fit_type)) def test_23eam_many(self): for fit_type in ("force", "energy", "force_and_energy"): m = models.TwoThreeEamManySpeciesModel( self.elements, self.r_cut*2, self.sigma, self.sigma, self.sigma, self.theta, self.theta, self.alpha, self.r0, self.noise, 0) try: fit_test(m, self.loc_confs, self.forces, self.glob_confs, self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores) except: print("ERROR in 23 eam Many %s fit" % (fit_type)) def test_load(self): onlyfiles = [f for f in listdir("MODELS") if isfile(join("MODELS", f))] for file in onlyfiles: if file.endswith(".json"): try: m2 = utility.load_model("MODELS/" + file) except: print("ERROR: %s not loaded" % (file)) if __name__ == '__main__': # GP Parameters sigma = 1.0 # Angstrom - typical value 0.2-0.6 noise = .001 # Number - Typical values 0.01 - 0.0001 theta = 0.1 # Cutoff decay lengthscale in Angstrom - Typical value r_cut/5 - r_cut/10 r_cut = 3.0 ntr_f = 10 ntr_e = 10 ntest = 10 elements = [1] ncores = 2 alpha = 1 r0 = 10 test = Tests(elements, noise, sigma, r_cut, theta, ntr_f, ntr_e, ntest, alpha, r0, ncores) test.test_2_body_single() test.test_3_body_single() test.test_combined_body_single() test.test_eam_single() test.test_23eam_single() test.test_2_body_many() test.test_3_body_many() test.test_combined_body_many() test.test_eam_many() test.test_23eam_many() test.test_load()
[ "mff.models.TwoThreeEamManySpeciesModel", "numpy.arccos", "ase.Atoms", "numpy.array", "mff.models.ThreeBodyManySpeciesModel", "numpy.sin", "numpy.arange", "os.listdir", "mff.calculators.EamSingleSpecies", "mff.models.TwoBodyManySpeciesModel", "mff.calculators.EamManySpecies", "numpy.exp", "n...
[((605, 618), 'numpy.exp', 'np.exp', (['(-dist)'], {}), '(-dist)\n', (611, 618), True, 'import numpy as np\n'), ((1357, 1400), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)'], {'size': '(n * 2)'}), '(0, 2 * np.pi, size=n * 2)\n', (1374, 1400), True, 'import numpy as np\n'), ((1412, 1448), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(n * 2)'}), '(-1, 1, size=n * 2)\n', (1429, 1448), True, 'import numpy as np\n'), ((1455, 1490), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': '(n * 2)'}), '(0, 1, size=n * 2)\n', (1472, 1490), True, 'import numpy as np\n'), ((1502, 1521), 'numpy.arccos', 'np.arccos', (['costheta'], {}), '(costheta)\n', (1511, 1521), True, 'import numpy as np\n'), ((2928, 2947), 'numpy.array', 'np.array', (['loc_confs'], {}), '(loc_confs)\n', (2936, 2947), True, 'import numpy as np\n'), ((2965, 2985), 'numpy.array', 'np.array', (['glob_confs'], {}), '(glob_confs)\n', (2973, 2985), True, 'import numpy as np\n'), ((6127, 6155), 'numpy.zeros_like', 'np.zeros_like', (['pred_energies'], {}), '(pred_energies)\n', (6140, 6155), True, 'import numpy as np\n'), ((6170, 6186), 'numpy.arange', 'np.arange', (['ntest'], {}), '(ntest)\n', (6179, 6186), True, 'import numpy as np\n'), ((6622, 6638), 'numpy.arange', 'np.arange', (['ntest'], {}), '(ntest)\n', (6631, 6638), True, 'import numpy as np\n'), ((510, 523), 'numpy.exp', 'np.exp', (['(-dist)'], {}), '(-dist)\n', (516, 523), True, 'import numpy as np\n'), ((629, 642), 'numpy.exp', 'np.exp', (['(-dist)'], {}), '(-dist)\n', (635, 642), True, 'import numpy as np\n'), ((910, 942), 'numpy.sum', 'np.sum', (['(conf[:, :3] ** 2)'], {'axis': '(1)'}), '(conf[:, :3] ** 2, axis=1)\n', (916, 942), True, 'import numpy as np\n'), ((1575, 1586), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (1581, 1586), True, 'import numpy as np\n'), ((1615, 1626), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1621, 1626), True, 'import numpy as np\n'), ((1639, 1652), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1645, 1652), True, 'import numpy as np\n'), ((1664, 1684), 'numpy.vstack', 'np.vstack', (['(x, y, z)'], {}), '((x, y, z))\n', (1673, 1684), True, 'import numpy as np\n'), ((1767, 1783), 'numpy.zeros', 'np.zeros', (['(2, 5)'], {}), '((2, 5))\n', (1775, 1783), True, 'import numpy as np\n'), ((1800, 1816), 'numpy.zeros', 'np.zeros', (['(2, 5)'], {}), '((2, 5))\n', (1808, 1816), True, 'import numpy as np\n'), ((1833, 1849), 'numpy.zeros', 'np.zeros', (['(2, 5)'], {}), '((2, 5))\n', (1841, 1849), True, 'import numpy as np\n'), ((2746, 2777), 'numpy.array', 'np.array', (['[conf1, conf2, conf3]'], {}), '([conf1, conf2, conf3])\n', (2754, 2777), True, 'import numpy as np\n'), ((6205, 6264), 'numpy.vstack', 'np.vstack', (['([0, 0, 0], glob_confs[-ntest:][i][0, 0:3, 0:3])'], {}), '(([0, 0, 0], glob_confs[-ntest:][i][0, 0:3, 0:3]))\n', (6214, 6264), True, 'import numpy as np\n'), ((6281, 6309), 'ase.Atoms', 'Atoms', ([], {'positions': '(coords + 20)'}), '(positions=coords + 20)\n', (6286, 6309), False, 'from ase import Atoms\n'), ((6657, 6712), 'numpy.vstack', 'np.vstack', (['([0, 0, 0], loc_confs[-ntest:][i][0:3, 0:3])'], {}), '(([0, 0, 0], loc_confs[-ntest:][i][0:3, 0:3]))\n', (6666, 6712), True, 'import numpy as np\n'), ((6729, 6757), 'ase.Atoms', 'Atoms', ([], {'positions': '(coords + 20)'}), '(positions=coords + 20)\n', (6734, 6757), False, 'from ase import Atoms\n'), ((7032, 7079), 'numpy.sum', 'np.sum', (['((pred_forces - map_forces) ** 2)'], {'axis': '(1)'}), '((pred_forces - map_forces) ** 2, axis=1)\n', (7038, 7079), True, 'import numpy as np\n'), ((344, 376), 'numpy.sum', 'np.sum', (['(conf[:, :3] ** 2)'], {'axis': '(1)'}), '(conf[:, :3] ** 2, axis=1)\n', (350, 376), True, 'import numpy as np\n'), ((1559, 1572), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1565, 1572), True, 'import numpy as np\n'), ((1599, 1612), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1605, 1612), True, 'import numpy as np\n'), ((4131, 4182), 'mff.calculators.TwoBodySingleSpecies', 'calculators.TwoBodySingleSpecies', (['(r_cut * 2)', 'm.grid'], {}), '(r_cut * 2, m.grid)\n', (4163, 4182), False, 'from mff import models, calculators, utility\n'), ((8082, 8228), 'mff.models.TwoBodySingleSpeciesModel', 'models.TwoBodySingleSpeciesModel', ([], {'element': 'self.elements', 'noise': 'self.noise', 'sigma': 'self.sigma', 'r_cut': '(self.r_cut * 2)', 'theta': 'self.theta', 'rep_sig': '(0)'}), '(element=self.elements, noise=self.noise,\n sigma=self.sigma, r_cut=self.r_cut * 2, theta=self.theta, rep_sig=0)\n', (8114, 8228), False, 'from mff import models, calculators, utility\n'), ((8665, 8802), 'mff.models.ThreeBodySingleSpeciesModel', 'models.ThreeBodySingleSpeciesModel', ([], {'element': 'self.elements', 'noise': 'self.noise', 'sigma': 'self.sigma', 'r_cut': '(self.r_cut * 2)', 'theta': 'self.theta'}), '(element=self.elements, noise=self.noise,\n sigma=self.sigma, r_cut=self.r_cut * 2, theta=self.theta)\n', (8699, 8802), False, 'from mff import models, calculators, utility\n'), ((9246, 9445), 'mff.models.CombinedSingleSpeciesModel', 'models.CombinedSingleSpeciesModel', ([], {'element': 'self.elements', 'noise': 'self.noise', 'sigma_2b': 'self.sigma', 'sigma_3b': 'self.sigma', 'r_cut': '(self.r_cut * 2)', 'theta_2b': 'self.theta', 'theta_3b': 'self.theta', 'rep_sig': '(0)'}), '(element=self.elements, noise=self.noise,\n sigma_2b=self.sigma, sigma_3b=self.sigma, r_cut=self.r_cut * 2,\n theta_2b=self.theta, theta_3b=self.theta, rep_sig=0)\n', (9279, 9445), False, 'from mff import models, calculators, utility\n'), ((9861, 10005), 'mff.models.EamSingleSpeciesModel', 'models.EamSingleSpeciesModel', ([], {'element': 'self.elements', 'noise': 'self.noise', 'sigma': 'self.sigma', 'r_cut': '(self.r_cut * 2)', 'alpha': 'self.alpha', 'r0': 'self.r0'}), '(element=self.elements, noise=self.noise, sigma\n =self.sigma, r_cut=self.r_cut * 2, alpha=self.alpha, r0=self.r0)\n', (9889, 10005), False, 'from mff import models, calculators, utility\n'), ((10421, 10594), 'mff.models.TwoThreeEamSingleSpeciesModel', 'models.TwoThreeEamSingleSpeciesModel', (['self.elements', '(self.r_cut * 2)', 'self.sigma', 'self.sigma', 'self.sigma', 'self.theta', 'self.theta', 'self.alpha', 'self.r0', 'self.noise', '(0)'], {}), '(self.elements, self.r_cut * 2, self.\n sigma, self.sigma, self.sigma, self.theta, self.theta, self.alpha, self\n .r0, self.noise, 0)\n', (10457, 10594), False, 'from mff import models, calculators, utility\n'), ((11007, 11152), 'mff.models.TwoBodyManySpeciesModel', 'models.TwoBodyManySpeciesModel', ([], {'elements': 'self.elements', 'noise': 'self.noise', 'sigma': 'self.sigma', 'r_cut': '(self.r_cut * 2)', 'theta': 'self.theta', 'rep_sig': '(0)'}), '(elements=self.elements, noise=self.noise,\n sigma=self.sigma, r_cut=self.r_cut * 2, theta=self.theta, rep_sig=0)\n', (11037, 11152), False, 'from mff import models, calculators, utility\n'), ((11569, 11705), 'mff.models.ThreeBodyManySpeciesModel', 'models.ThreeBodyManySpeciesModel', ([], {'elements': 'self.elements', 'noise': 'self.noise', 'sigma': 'self.sigma', 'r_cut': '(self.r_cut * 2)', 'theta': 'self.theta'}), '(elements=self.elements, noise=self.noise,\n sigma=self.sigma, r_cut=self.r_cut * 2, theta=self.theta)\n', (11601, 11705), False, 'from mff import models, calculators, utility\n'), ((12129, 12327), 'mff.models.CombinedManySpeciesModel', 'models.CombinedManySpeciesModel', ([], {'elements': 'self.elements', 'noise': 'self.noise', 'sigma_2b': 'self.sigma', 'sigma_3b': 'self.sigma', 'r_cut': '(self.r_cut * 2)', 'theta_2b': 'self.theta', 'theta_3b': 'self.theta', 'rep_sig': '(0)'}), '(elements=self.elements, noise=self.noise,\n sigma_2b=self.sigma, sigma_3b=self.sigma, r_cut=self.r_cut * 2,\n theta_2b=self.theta, theta_3b=self.theta, rep_sig=0)\n', (12160, 12327), False, 'from mff import models, calculators, utility\n'), ((12770, 12913), 'mff.models.EamManySpeciesModel', 'models.EamManySpeciesModel', ([], {'elements': 'self.elements', 'noise': 'self.noise', 'sigma': 'self.sigma', 'r_cut': '(self.r_cut * 2)', 'alpha': 'self.alpha', 'r0': 'self.r0'}), '(elements=self.elements, noise=self.noise, sigma=\n self.sigma, r_cut=self.r_cut * 2, alpha=self.alpha, r0=self.r0)\n', (12796, 12913), False, 'from mff import models, calculators, utility\n'), ((13325, 13496), 'mff.models.TwoThreeEamManySpeciesModel', 'models.TwoThreeEamManySpeciesModel', (['self.elements', '(self.r_cut * 2)', 'self.sigma', 'self.sigma', 'self.sigma', 'self.theta', 'self.theta', 'self.alpha', 'self.r0', 'self.noise', '(0)'], {}), '(self.elements, self.r_cut * 2, self.\n sigma, self.sigma, self.sigma, self.theta, self.theta, self.alpha, self\n .r0, self.noise, 0)\n', (13359, 13496), False, 'from mff import models, calculators, utility\n'), ((4254, 4307), 'mff.calculators.ThreeBodySingleSpecies', 'calculators.ThreeBodySingleSpecies', (['(r_cut * 2)', 'm.grid'], {}), '(r_cut * 2, m.grid)\n', (4288, 4307), False, 'from mff import models, calculators, utility\n'), ((4762, 4828), 'mff.calculators.CombinedSingleSpecies', 'calculators.CombinedSingleSpecies', (['(r_cut * 2)', 'm.grid_2b', 'm.grid_3b'], {}), '(r_cut * 2, m.grid_2b, m.grid_3b)\n', (4795, 4828), False, 'from mff import models, calculators, utility\n'), ((13851, 13868), 'os.listdir', 'listdir', (['"""MODELS"""'], {}), "('MODELS')\n", (13858, 13868), False, 'from os import listdir\n'), ((2373, 2399), 'numpy.random.choice', 'np.random.choice', (['elements'], {}), '(elements)\n', (2389, 2399), True, 'import numpy as np\n'), ((2401, 2427), 'numpy.random.choice', 'np.random.choice', (['elements'], {}), '(elements)\n', (2417, 2427), True, 'import numpy as np\n'), ((2446, 2472), 'numpy.random.choice', 'np.random.choice', (['elements'], {}), '(elements)\n', (2462, 2472), True, 'import numpy as np\n'), ((4375, 4434), 'mff.calculators.TwoBodyManySpecies', 'calculators.TwoBodyManySpecies', (['(r_cut * 2)', 'elements', 'm.grid'], {}), '(r_cut * 2, elements, m.grid)\n', (4405, 4434), False, 'from mff import models, calculators, utility\n'), ((4914, 4988), 'mff.calculators.CombinedManySpecies', 'calculators.CombinedManySpecies', (['(r_cut * 2)', 'elements', 'm.grid_2b', 'm.grid_3b'], {}), '(r_cut * 2, elements, m.grid_2b, m.grid_3b)\n', (4945, 4988), False, 'from mff import models, calculators, utility\n'), ((5181, 5277), 'mff.calculators.EamSingleSpecies', 'calculators.EamSingleSpecies', (['(r_cut * 2)', 'm.grid', 'm.gp.kernel.theta[2]', 'm.gp.kernel.theta[3]'], {}), '(r_cut * 2, m.grid, m.gp.kernel.theta[2], m.gp.\n kernel.theta[3])\n', (5209, 5277), False, 'from mff import models, calculators, utility\n'), ((13879, 13896), 'os.path.join', 'join', (['"""MODELS"""', 'f'], {}), "('MODELS', f)\n", (13883, 13896), False, 'from os.path import isfile, join\n'), ((14015, 14051), 'mff.utility.load_model', 'utility.load_model', (["('MODELS/' + file)"], {}), "('MODELS/' + file)\n", (14033, 14051), False, 'from mff import models, calculators, utility\n'), ((4503, 4564), 'mff.calculators.ThreeBodyManySpecies', 'calculators.ThreeBodyManySpecies', (['(r_cut * 2)', 'elements', 'm.grid'], {}), '(r_cut * 2, elements, m.grid)\n', (4535, 4564), False, 'from mff import models, calculators, utility\n'), ((5353, 5457), 'mff.calculators.EamManySpecies', 'calculators.EamManySpecies', (['(r_cut * 2)', 'elements', 'm.grid', 'm.gp.kernel.theta[2]', 'm.gp.kernel.theta[3]'], {}), '(r_cut * 2, elements, m.grid, m.gp.kernel.theta[2\n ], m.gp.kernel.theta[3])\n', (5379, 5457), False, 'from mff import models, calculators, utility\n'), ((5678, 5816), 'mff.calculators.TwoThreeEamSingleSpecies', 'calculators.TwoThreeEamSingleSpecies', (['(r_cut * 2)', 'm.grid_2b', 'm.grid_3b', 'm.grid_eam', 'm.gp_eam.kernel.theta[2]', 'm.gp_eam.kernel.theta[3]'], {}), '(r_cut * 2, m.grid_2b, m.grid_3b, m.\n grid_eam, m.gp_eam.kernel.theta[2], m.gp_eam.kernel.theta[3])\n', (5714, 5816), False, 'from mff import models, calculators, utility\n'), ((5900, 6046), 'mff.calculators.TwoThreeEamManySpecies', 'calculators.TwoThreeEamManySpecies', (['(r_cut * 2)', 'elements', 'm.grid_2b', 'm.grid_3b', 'm.grid_eam', 'm.gp_eam.kernel.theta[2]', 'm.gp_eam.kernel.theta[3]'], {}), '(r_cut * 2, elements, m.grid_2b, m.\n grid_3b, m.grid_eam, m.gp_eam.kernel.theta[2], m.gp_eam.kernel.theta[3])\n', (5934, 6046), False, 'from mff import models, calculators, utility\n')]
from __future__ import print_function import torch import numpy as np from PIL import Image import inspect import re import numpy as np import os import collections import pickle # Converts a Tensor into a Numpy array # |imtype|: the desired type of the converted numpy array def tensor2im(image_tensor, imtype=np.uint8, cvt_rgb=True): image_numpy = image_tensor[0].cpu().float().numpy() if image_numpy.shape[0] == 1 and cvt_rgb: image_numpy = np.tile(image_numpy, (3, 1, 1)) image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 return image_numpy.astype(imtype) def tensor2vec(vector_tensor): numpy_vec = vector_tensor.data.cpu().numpy() if numpy_vec.ndim == 4: return numpy_vec[:, :, 0, 0] else: return numpy_vec def pickle_load(file_name): data = None with open(file_name, 'rb') as f: data = pickle.load(f) return data def pickle_save(file_name, data): with open(file_name, 'wb') as f: pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL) def diagnose_network(net, name='network'): mean = 0.0 count = 0 for param in net.parameters(): if param.grad is not None: mean += torch.mean(torch.abs(param.grad.data)) count += 1 if count > 0: mean = mean / count print(name) print(mean) def interp_z(z0, z1, num_frames, interp_mode='linear'): zs = [] if interp_mode == 'linear': for n in range(num_frames): ratio = n / float(num_frames - 1) z_t = (1 - ratio) * z0 + ratio * z1 zs.append(z_t[np.newaxis, :]) zs = np.concatenate(zs, axis=0).astype(np.float32) if interp_mode == 'slerp': # st() z0_n = z0 / (np.linalg.norm(z0)+1e-10) z1_n = z1 / (np.linalg.norm(z1)+1e-10) omega = np.arccos(np.dot(z0_n, z1_n)) sin_omega = np.sin(omega) if sin_omega < 1e-10 and sin_omega > -1e-10: zs = interp_z(z0, z1, num_frames, interp_mode='linear') else: for n in range(num_frames): ratio = n / float(num_frames - 1) z_t = np.sin((1 - ratio) * omega) / sin_omega * z0 + np.sin(ratio * omega) / sin_omega * z1 zs.append(z_t[np.newaxis, :]) zs = np.concatenate(zs, axis=0).astype(np.float32) return zs def save_image(image_numpy, image_path): image_pil = Image.fromarray(image_numpy) image_pil.save(image_path, 'JPEG', quality=100) def info(object, spacing=10, collapse=1): """Print methods and doc strings. Takes module, class, list, dictionary, or string.""" methodList = [e for e in dir(object) if isinstance(getattr(object, e), collections.Callable)] processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s) print("\n".join(["%s %s" % (method.ljust(spacing), processFunc(str(getattr(object, method).__doc__))) for method in methodList])) def varname(p): for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]: m = re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)', line) if m: return m.group(1) def print_numpy(x, val=True, shp=False): x = x.astype(np.float64) if shp: print('shape,', x.shape) if val: x = x.flatten() print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) def mkdirs(paths): if isinstance(paths, list) and not isinstance(paths, str): for path in paths: mkdir(path) else: mkdir(paths) def mkdir(path): if not os.path.exists(path): os.makedirs(path) def normalize_tensor(in_feat, eps=1e-10): norm_factor = torch.sqrt(torch.sum(in_feat**2, dim=1)).repeat(1, in_feat.size()[1], 1, 1) return in_feat / (norm_factor+eps) def cos_sim(in0, in1): in0_norm = normalize_tensor(in0) in1_norm = normalize_tensor(in1) return torch.mean(torch.sum(in0_norm*in1_norm, dim=1))
[ "torch.sum", "numpy.linalg.norm", "numpy.sin", "re.search", "os.path.exists", "numpy.mean", "numpy.max", "numpy.dot", "numpy.concatenate", "numpy.min", "numpy.tile", "torch.abs", "pickle.load", "numpy.std", "numpy.transpose", "PIL.Image.fromarray", "numpy.median", "pickle.dump", ...
[((2424, 2452), 'PIL.Image.fromarray', 'Image.fromarray', (['image_numpy'], {}), '(image_numpy)\n', (2439, 2452), False, 'from PIL import Image\n'), ((462, 493), 'numpy.tile', 'np.tile', (['image_numpy', '(3, 1, 1)'], {}), '(image_numpy, (3, 1, 1))\n', (469, 493), True, 'import numpy as np\n'), ((887, 901), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (898, 901), False, 'import pickle\n'), ((999, 1053), 'pickle.dump', 'pickle.dump', (['data', 'f'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(data, f, protocol=pickle.HIGHEST_PROTOCOL)\n', (1010, 1053), False, 'import pickle\n'), ((1898, 1911), 'numpy.sin', 'np.sin', (['omega'], {}), '(omega)\n', (1904, 1911), True, 'import numpy as np\n'), ((3122, 3193), 're.search', 're.search', (['"""\\\\bvarname\\\\s*\\\\(\\\\s*([A-Za-z_][A-Za-z0-9_]*)\\\\s*\\\\)"""', 'line'], {}), "('\\\\bvarname\\\\s*\\\\(\\\\s*([A-Za-z_][A-Za-z0-9_]*)\\\\s*\\\\)', line)\n", (3131, 3193), False, 'import re\n'), ((3740, 3760), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3754, 3760), False, 'import os\n'), ((3770, 3787), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (3781, 3787), False, 'import os\n'), ((4086, 4123), 'torch.sum', 'torch.sum', (['(in0_norm * in1_norm)'], {'dim': '(1)'}), '(in0_norm * in1_norm, dim=1)\n', (4095, 4123), False, 'import torch\n'), ((1858, 1876), 'numpy.dot', 'np.dot', (['z0_n', 'z1_n'], {}), '(z0_n, z1_n)\n', (1864, 1876), True, 'import numpy as np\n'), ((513, 549), 'numpy.transpose', 'np.transpose', (['image_numpy', '(1, 2, 0)'], {}), '(image_numpy, (1, 2, 0))\n', (525, 549), True, 'import numpy as np\n'), ((1229, 1255), 'torch.abs', 'torch.abs', (['param.grad.data'], {}), '(param.grad.data)\n', (1238, 1255), False, 'import torch\n'), ((1645, 1671), 'numpy.concatenate', 'np.concatenate', (['zs'], {'axis': '(0)'}), '(zs, axis=0)\n', (1659, 1671), True, 'import numpy as np\n'), ((1759, 1777), 'numpy.linalg.norm', 'np.linalg.norm', (['z0'], {}), '(z0)\n', (1773, 1777), True, 'import numpy as np\n'), ((1806, 1824), 'numpy.linalg.norm', 'np.linalg.norm', (['z1'], {}), '(z1)\n', (1820, 1824), True, 'import numpy as np\n'), ((2304, 2330), 'numpy.concatenate', 'np.concatenate', (['zs'], {'axis': '(0)'}), '(zs, axis=0)\n', (2318, 2330), True, 'import numpy as np\n'), ((3075, 3097), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (3095, 3097), False, 'import inspect\n'), ((3861, 3891), 'torch.sum', 'torch.sum', (['(in_feat ** 2)'], {'dim': '(1)'}), '(in_feat ** 2, dim=1)\n', (3870, 3891), False, 'import torch\n'), ((3484, 3494), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (3491, 3494), True, 'import numpy as np\n'), ((3496, 3505), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (3502, 3505), True, 'import numpy as np\n'), ((3507, 3516), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (3513, 3516), True, 'import numpy as np\n'), ((3518, 3530), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (3527, 3530), True, 'import numpy as np\n'), ((3532, 3541), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (3538, 3541), True, 'import numpy as np\n'), ((2159, 2186), 'numpy.sin', 'np.sin', (['((1 - ratio) * omega)'], {}), '((1 - ratio) * omega)\n', (2165, 2186), True, 'import numpy as np\n'), ((2206, 2227), 'numpy.sin', 'np.sin', (['(ratio * omega)'], {}), '(ratio * omega)\n', (2212, 2227), True, 'import numpy as np\n')]
# Created on 2018/12 # Author: <NAME> import os import time import numpy as np import torch from torch.utils.tensorboard import SummaryWriter class Solver(object): def __init__(self, data, model, optimizer, epochs, save_folder, checkpoint, continue_from, model_path, print_freq, early_stop, max_norm, lr, lr_override, log_dir, lamb, decay_period, config, multidecoder, decay): self.tr_loader = data['tr_loader'] self.cv_loader = data['cv_loader'] self.model = model self.optimizer = optimizer self.lr_override = lr_override # Training config self.epochs = epochs self.early_stop = early_stop self.max_norm = max_norm self.lamb = lamb self.decay_period = decay_period self.decay = decay self.multidecoder = multidecoder if multidecoder: from loss_multidecoder import cal_loss else: from loss_hungarian import cal_loss self.loss_func = cal_loss # save and load model self.save_folder = save_folder self.checkpoint = checkpoint self.continue_from = continue_from self.model_path = model_path self.config = config # logging self.print_freq = print_freq # visualizing loss using visdom self.tr_loss = torch.Tensor(self.epochs) self.cv_loss = torch.Tensor(self.epochs) self._reset() self.writer = SummaryWriter(log_dir) def _reset(self): # Reset load = self.continue_from and os.path.exists(self.continue_from) self.start_epoch = 0 self.val_no_impv = 0 self.prev_val_loss = float("inf") self.best_val_loss = float("inf") if load: # if the checkpoint model exists print('Loading checkpoint model %s' % self.continue_from) package = torch.load(self.continue_from) self.model.module.load_state_dict(package['state_dict']) if not self.lr_override: self.optimizer.load_state_dict(package['optim_dict']) print('load lr at %s' % str(self.optimizer.state_dict()['param_groups'])) else: print('lr override to %s' % str(self.optimizer.state_dict()['param_groups'])) self.start_epoch = int(package.get('epoch', 1)) self.tr_loss[:self.start_epoch] = package['tr_loss'][:self.start_epoch] self.cv_loss[:self.start_epoch] = package['cv_loss'][:self.start_epoch] self.val_no_impv = package.get('val_no_impv', 0) if 'random_state' in package: torch.set_rng_state(package['random_state']) self.prev_val_loss = self.cv_loss[self.start_epoch - 1] self.best_val_loss = min(self.cv_loss[:self.start_epoch]) # Create save folder os.makedirs(self.save_folder, exist_ok=True) self.halving = False def train(self): # Train model multi-epoches for epoch in range(self.start_epoch, self.epochs): if epoch % self.decay_period == (self.decay_period - 1): optim_state = self.optimizer.state_dict() for param_group in optim_state['param_groups']: param_group['lr'] = param_group['lr'] * self.decay self.optimizer.load_state_dict(optim_state) print('Learning rate adjusted to: %s' % str(optim_state['param_groups'])) self.writer.add_scalar('LR/lr', self.optimizer.state_dict()["param_groups"][0]["lr"], epoch) # Train one epoch print("Training...") self.model.train() # Turn on BatchNorm & Dropout start = time.time() tr_avg_loss, tr_avg_snr, tr_avg_acc = self._run_one_epoch(epoch) print('-' * 85) print('Train Summary | End of Epoch {0} | Time {1:.2f}s | ' 'Train Loss {2:.3f}'.format( epoch + 1, time.time() - start, tr_avg_loss)) print('-' * 85) # Cross validation print('Cross validation...') self.model.eval() # Turn off Batchnorm & Dropout val_loss, val_snr, val_acc = self._run_one_epoch(epoch, cross_valid=True) print('-' * 85) print('Valid Summary | End of Epoch {0} | Time {1:.2f}s | ' 'Valid Loss {2:.3f}'.format( epoch + 1, time.time() - start, val_loss)) print('-' * 85) self.writer.add_scalar('Loss/per_epoch_cv', val_loss, epoch) self.writer.add_scalar('SNR/per_epoch_cv', val_snr.mean(), epoch) self.writer.add_scalar('Accuracy/per_epoch_cv', val_acc, epoch) self.writer.add_scalar('snr2/per_epoch_cv', val_snr[0], epoch) self.writer.add_scalar('snr3/per_epoch_cv', val_snr[1], epoch) self.writer.add_scalar('snr4/per_epoch_cv', val_snr[2], epoch) self.writer.add_scalar('snr5/per_epoch_cv', val_snr[3], epoch) # Adjust learning rate (halving) if val_loss >= self.prev_val_loss: self.val_no_impv += 1 if self.val_no_impv >= 10 and self.early_stop: print("No improvement for 10 epochs, early stopping.") break else: self.val_no_impv = 0 self.prev_val_loss = val_loss # Save the best model self.tr_loss[epoch] = tr_avg_loss self.cv_loss[epoch] = val_loss package = self.model.module.serialize(self.model.module, self.optimizer, epoch + 1, tr_loss=self.tr_loss, cv_loss=self.cv_loss, val_no_impv = self.val_no_impv, random_state=torch.get_rng_state()) if val_loss < self.best_val_loss: self.best_val_loss = val_loss file_path = os.path.join(self.save_folder, self.model_path) torch.save(package, file_path) print("Find better validated model, saving to %s" % file_path) # Save model each epoch, nd make a copy at last.pth if self.checkpoint: file_path = os.path.join( self.save_folder, 'epoch%d.pth.tar' % (epoch + 1)) torch.save(package, file_path) print('Saving checkpoint model to %s' % file_path) # update config#.pth torch.save(package, os.path.join(self.save_folder, self.config + '.pth')) def _run_one_epoch(self, epoch, cross_valid=False): start = time.time() total_loss = 0 total_snr = np.zeros(4) total_accuracy = 0 data_loader = self.tr_loader if not cross_valid else self.cv_loader current_device = next(self.model.module.parameters()).device counts = np.zeros(4) for i, (padded_mixture, mixture_lengths, padded_source) in enumerate(data_loader): for tmp_ps in padded_source: counts[tmp_ps.size(0) - 2] += 1 B = len(padded_source) padded_mixture = padded_mixture.cuda(current_device) padded_source = [tmp_ps.cuda(current_device) for tmp_ps in padded_source] num_sources = torch.Tensor([tmps_ps.size(0) for tmps_ps in padded_source]).long() try: if not cross_valid: estimate_source_list, vad_list = self.model(padded_mixture, num_sources, True) else: with torch.no_grad(): estimate_source_list, vad_list = self.model(padded_mixture, num_sources, True) except Exception as e: print('forward prop failed', padded_mixture.shape, e) continue if not self.multidecoder: # [#stages, B, ...] estimate_source_list = estimate_source_list.transpose(0, 1) vad_list = vad_list.transpose(0, 1) loss = [] snr = [] accuracy = [] for (estimate_source, vad) in zip(estimate_source_list, vad_list): step_loss, step_snr, acc = \ self.loss_func(padded_source, estimate_source, mixture_lengths, vad, lamb=self.lamb) loss.append(step_loss) snr.append(step_snr) accuracy.append(acc) loss = torch.stack(loss) snr = torch.stack(snr) accuracy = torch.stack(accuracy) else: # if using multidecoder # list of B, each [num_stages, spks, T] estimate_sources = [estimate_source_list[k, :, :num_sources[k], :] for k in range(B)] loss = [] snr = [] accuracy = [] for idx in range(B): # list of [num_stages, spks, T] # [num_stages, num_decoders] vad = vad_list[idx] step_loss, step_snr, acc = \ self.loss_func(padded_source[idx], estimate_sources[idx], mixture_lengths[idx], vad, self.lamb) # [num_stages] loss.append(step_loss) snr.append(step_snr) accuracy.append(acc) total_snr[num_sources[idx] - 2] += step_snr[-1].item() loss = torch.stack(loss, dim=0).mean(dim=0) snr = torch.stack(snr, dim=0).mean(dim=0) accuracy = torch.stack(accuracy, dim=0).mean(dim=0) if not cross_valid: # training loss = loss.mean() snr = snr.mean() accuracy = accuracy.mean() else: loss = loss[-1] snr = snr[-1] accuracy = accuracy[-1] try: if not cross_valid: self.optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.max_norm) self.optimizer.step() except Exception as e: print('backprop failed', padded_mixture.shape, e) continue total_loss += loss.item() total_accuracy += accuracy.item() if i % self.print_freq == 0: print(f'Epoch {epoch + 1} | Iter {i + 1} | Average Loss {total_loss / (i + 1): .2f} | ' f'Current Loss {loss.item(): .2f} | Average SNR {str(total_snr / counts)} | ' f'Average accuracy {total_accuracy / (i + 1):.2f} | {1000 * (time.time() - start) / (i + 1):.2f} ms/batch', flush=True) mode = 'cv' if cross_valid else 'train' self.writer.add_scalar(f'Loss/{mode}', loss.item(), epoch*len(data_loader)+i) self.writer.add_scalar(f'SNR/{mode}', snr.item(), epoch*len(data_loader)+i) self.writer.add_scalar(f'Accuracy/{mode}', accuracy.item(), epoch*len(data_loader)+i) self.writer.add_scalar(f'snr2/{mode}', total_snr[0] / counts[0], epoch*len(data_loader)+i) self.writer.add_scalar(f'snr3/{mode}', total_snr[1] / counts[1], epoch*len(data_loader)+i) self.writer.add_scalar(f'snr4/{mode}', total_snr[2] / counts[2], epoch*len(data_loader)+i) self.writer.add_scalar(f'snr5/{mode}', total_snr[3] / counts[3], epoch*len(data_loader)+i) if i <= 20: self.writer.add_audio(f"Speech/{i}_original {mode}", padded_mixture[0], epoch, sample_rate=8000) output_example = estimate_sources[0][-1] for channel, example in enumerate(output_example): self.writer.add_audio(f"Speech/{i}_reconstructed {mode} {channel}", example / (example.max() - example.min()), epoch, sample_rate=8000) self.writer.add_text(f'counts/{mode}', str(counts), global_step=epoch) return total_loss / (i + 1), total_snr / counts, total_accuracy / (i + 1)
[ "torch.utils.tensorboard.SummaryWriter", "os.path.exists", "os.makedirs", "torch.load", "torch.stack", "torch.Tensor", "os.path.join", "torch.set_rng_state", "numpy.zeros", "torch.get_rng_state", "torch.save", "torch.no_grad", "time.time" ]
[((1354, 1379), 'torch.Tensor', 'torch.Tensor', (['self.epochs'], {}), '(self.epochs)\n', (1366, 1379), False, 'import torch\n'), ((1403, 1428), 'torch.Tensor', 'torch.Tensor', (['self.epochs'], {}), '(self.epochs)\n', (1415, 1428), False, 'import torch\n'), ((1475, 1497), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['log_dir'], {}), '(log_dir)\n', (1488, 1497), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((2884, 2928), 'os.makedirs', 'os.makedirs', (['self.save_folder'], {'exist_ok': '(True)'}), '(self.save_folder, exist_ok=True)\n', (2895, 2928), False, 'import os\n'), ((6894, 6905), 'time.time', 'time.time', ([], {}), '()\n', (6903, 6905), False, 'import time\n'), ((6949, 6960), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (6957, 6960), True, 'import numpy as np\n'), ((7150, 7161), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (7158, 7161), True, 'import numpy as np\n'), ((1575, 1609), 'os.path.exists', 'os.path.exists', (['self.continue_from'], {}), '(self.continue_from)\n', (1589, 1609), False, 'import os\n'), ((1894, 1924), 'torch.load', 'torch.load', (['self.continue_from'], {}), '(self.continue_from)\n', (1904, 1924), False, 'import torch\n'), ((3739, 3750), 'time.time', 'time.time', ([], {}), '()\n', (3748, 3750), False, 'import time\n'), ((2650, 2694), 'torch.set_rng_state', 'torch.set_rng_state', (["package['random_state']"], {}), "(package['random_state'])\n", (2669, 2694), False, 'import torch\n'), ((6201, 6248), 'os.path.join', 'os.path.join', (['self.save_folder', 'self.model_path'], {}), '(self.save_folder, self.model_path)\n', (6213, 6248), False, 'import os\n'), ((6265, 6295), 'torch.save', 'torch.save', (['package', 'file_path'], {}), '(package, file_path)\n', (6275, 6295), False, 'import torch\n'), ((6500, 6563), 'os.path.join', 'os.path.join', (['self.save_folder', "('epoch%d.pth.tar' % (epoch + 1))"], {}), "(self.save_folder, 'epoch%d.pth.tar' % (epoch + 1))\n", (6512, 6563), False, 'import os\n'), ((6601, 6631), 'torch.save', 'torch.save', (['package', 'file_path'], {}), '(package, file_path)\n', (6611, 6631), False, 'import torch\n'), ((6765, 6817), 'os.path.join', 'os.path.join', (['self.save_folder', "(self.config + '.pth')"], {}), "(self.save_folder, self.config + '.pth')\n", (6777, 6817), False, 'import os\n'), ((8765, 8782), 'torch.stack', 'torch.stack', (['loss'], {}), '(loss)\n', (8776, 8782), False, 'import torch\n'), ((8805, 8821), 'torch.stack', 'torch.stack', (['snr'], {}), '(snr)\n', (8816, 8821), False, 'import torch\n'), ((8849, 8870), 'torch.stack', 'torch.stack', (['accuracy'], {}), '(accuracy)\n', (8860, 8870), False, 'import torch\n'), ((6058, 6079), 'torch.get_rng_state', 'torch.get_rng_state', ([], {}), '()\n', (6077, 6079), False, 'import torch\n'), ((4008, 4019), 'time.time', 'time.time', ([], {}), '()\n', (4017, 4019), False, 'import time\n'), ((4473, 4484), 'time.time', 'time.time', ([], {}), '()\n', (4482, 4484), False, 'import time\n'), ((7839, 7854), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7852, 7854), False, 'import torch\n'), ((9761, 9785), 'torch.stack', 'torch.stack', (['loss'], {'dim': '(0)'}), '(loss, dim=0)\n', (9772, 9785), False, 'import torch\n'), ((9820, 9843), 'torch.stack', 'torch.stack', (['snr'], {'dim': '(0)'}), '(snr, dim=0)\n', (9831, 9843), False, 'import torch\n'), ((9883, 9911), 'torch.stack', 'torch.stack', (['accuracy'], {'dim': '(0)'}), '(accuracy, dim=0)\n', (9894, 9911), False, 'import torch\n'), ((11054, 11065), 'time.time', 'time.time', ([], {}), '()\n', (11063, 11065), False, 'import time\n')]
from pathlib import Path import cv2 import numpy as np from pfrl.wrappers import atari_wrappers from matplotlib import pyplot as plt from sklearn.metrics import accuracy_score from bovw.utils import prepare_data from bovw import BOVWClassifier def get_player_position(ram): """ given the ram state, get the position of the player """ def _getIndex(address): assert type(address) == str and len(address) == 2 row, col = tuple(address) row = int(row, 16) - 8 col = int(col, 16) return row * 16 + col def getByte(ram, address): # Return the byte at the specified emulator RAM location idx = _getIndex(address) return ram[idx] # return the player position at a particular state x = int(getByte(ram, 'aa')) y = int(getByte(ram, 'ab')) return x, y def distance(pos1, pos2): """ l2 distance """ return np.linalg.norm(np.array(pos1) - np.array(pos2)) def collect_monte_frames(total_steps=200, save_dir='data/monte'): """ run a monte game using random actions, and collect frames from it save the resulting frames into two directories: start, non_start """ # make env env = atari_wrappers.wrap_deepmind( atari_wrappers.make_atari('MontezumaRevengeNoFrameskip-v4', max_frames=30*60*60), episode_life=True, clip_rewards=True, ) env.seed(0) # collection state = env.reset() step = 0 while step < total_steps: # save image im = np.array(state)[-1, :, :] pos = get_player_position(env.unwrapped.ale.getRAM()) if distance(pos, (77, 235)) < 1: file_dir = Path(save_dir) / 'start' else: file_dir = Path(save_dir) / 'non_start' file_dir.mkdir(exist_ok=True, parents=True) plt.imsave(file_dir.joinpath(f"step_{step}.png"), im) # control action = env.action_space.sample() state, reward, done, info = env.step(action) if done or info.get('needs_reset', False): state = env.reset() step += 1 def visualize_sift_features(images, save_dir='results/monte_sift'): """ visualize the sift features of the frames """ # prepare dir save_dir = Path(save_dir) save_dir.mkdir(exist_ok=True, parents=True) for i, image in enumerate(images): # turn to gray scale gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # SIFT extraction sift = cv2.SIFT_create() keypoints, descriptors = sift.detectAndCompute(image, None) # draw the detected key points sift_image = cv2.drawKeypoints(gray_image, keypoints, image) # save the image cv2.imwrite(str(save_dir.joinpath(f"{i}_sift.png")), sift_image) cv2.waitKey(0) cv2.destroyAllWindows() if __name__ == '__main__': # collect frames collect_monte_frames(total_steps=200, save_dir='data/monte_train') collect_monte_frames(total_steps=30 ,save_dir='data/monte_test') # visualize sift features image_paths, classes = prepare_data('data/monte_train') images = [cv2.imread(path, cv2.IMREAD_COLOR) for path in image_paths] visualize_sift_features(images, save_dir='results/monte_sift') # load data training_image_paths, training_classes = prepare_data('data/monte_train') training_images = [cv2.imread(path, cv2.IMREAD_COLOR) for path in training_image_paths] testing_image_paths, testing_classes = prepare_data('data/monte_test') testing_images = [cv2.imread(path, cv2.IMREAD_COLOR) for path in testing_image_paths] # train the classifier classifier = BOVWClassifier(num_clusters=50) classifier.fit(training_images, training_classes) # test the classifier predictions = classifier.predict(testing_images) accuracy = accuracy_score(testing_classes, predictions) print("accuracy: {}".format(accuracy))
[ "bovw.BOVWClassifier", "cv2.drawKeypoints", "pathlib.Path", "pfrl.wrappers.atari_wrappers.make_atari", "numpy.array", "cv2.SIFT_create", "cv2.destroyAllWindows", "cv2.cvtColor", "bovw.utils.prepare_data", "cv2.waitKey", "sklearn.metrics.accuracy_score", "cv2.imread" ]
[((2268, 2282), 'pathlib.Path', 'Path', (['save_dir'], {}), '(save_dir)\n', (2272, 2282), False, 'from pathlib import Path\n'), ((3098, 3130), 'bovw.utils.prepare_data', 'prepare_data', (['"""data/monte_train"""'], {}), "('data/monte_train')\n", (3110, 3130), False, 'from bovw.utils import prepare_data\n'), ((3334, 3366), 'bovw.utils.prepare_data', 'prepare_data', (['"""data/monte_train"""'], {}), "('data/monte_train')\n", (3346, 3366), False, 'from bovw.utils import prepare_data\n'), ((3502, 3533), 'bovw.utils.prepare_data', 'prepare_data', (['"""data/monte_test"""'], {}), "('data/monte_test')\n", (3514, 3533), False, 'from bovw.utils import prepare_data\n'), ((3669, 3700), 'bovw.BOVWClassifier', 'BOVWClassifier', ([], {'num_clusters': '(50)'}), '(num_clusters=50)\n', (3683, 3700), False, 'from bovw import BOVWClassifier\n'), ((3850, 3894), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['testing_classes', 'predictions'], {}), '(testing_classes, predictions)\n', (3864, 3894), False, 'from sklearn.metrics import accuracy_score\n'), ((1251, 1339), 'pfrl.wrappers.atari_wrappers.make_atari', 'atari_wrappers.make_atari', (['"""MontezumaRevengeNoFrameskip-v4"""'], {'max_frames': '(30 * 60 * 60)'}), "('MontezumaRevengeNoFrameskip-v4', max_frames=30 *\n 60 * 60)\n", (1276, 1339), False, 'from pfrl.wrappers import atari_wrappers\n'), ((2421, 2460), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (2433, 2460), False, 'import cv2\n'), ((2502, 2519), 'cv2.SIFT_create', 'cv2.SIFT_create', ([], {}), '()\n', (2517, 2519), False, 'import cv2\n'), ((2649, 2696), 'cv2.drawKeypoints', 'cv2.drawKeypoints', (['gray_image', 'keypoints', 'image'], {}), '(gray_image, keypoints, image)\n', (2666, 2696), False, 'import cv2\n'), ((2803, 2817), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2814, 2817), False, 'import cv2\n'), ((2826, 2849), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2847, 2849), False, 'import cv2\n'), ((3145, 3179), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_COLOR'], {}), '(path, cv2.IMREAD_COLOR)\n', (3155, 3179), False, 'import cv2\n'), ((3390, 3424), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_COLOR'], {}), '(path, cv2.IMREAD_COLOR)\n', (3400, 3424), False, 'import cv2\n'), ((3556, 3590), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_COLOR'], {}), '(path, cv2.IMREAD_COLOR)\n', (3566, 3590), False, 'import cv2\n'), ((932, 946), 'numpy.array', 'np.array', (['pos1'], {}), '(pos1)\n', (940, 946), True, 'import numpy as np\n'), ((949, 963), 'numpy.array', 'np.array', (['pos2'], {}), '(pos2)\n', (957, 963), True, 'import numpy as np\n'), ((1529, 1544), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (1537, 1544), True, 'import numpy as np\n'), ((1681, 1695), 'pathlib.Path', 'Path', (['save_dir'], {}), '(save_dir)\n', (1685, 1695), False, 'from pathlib import Path\n'), ((1743, 1757), 'pathlib.Path', 'Path', (['save_dir'], {}), '(save_dir)\n', (1747, 1757), False, 'from pathlib import Path\n')]
#!/usr/bin/python ######################################################################################################################## # # Copyright (c) 2014, Regents of the University of California # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the # following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following # disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the # following disclaimer in the documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ######################################################################################################################## """ADC library """ import laygo import numpy as np from math import log import yaml import os import laygo.GridLayoutGeneratorHelper as laygenhelper #utility functions #import logging;logging.basicConfig(level=logging.DEBUG) def generate_boundary(laygen, objectname_pfix, placement_grid, devname_bottom, devname_top, devname_left, devname_right, shape_bottom=None, shape_top=None, shape_left=None, shape_right=None, transform_bottom=None, transform_top=None, transform_left=None, transform_right=None, origin=np.array([0, 0])): # generate a boundary structure to resolve boundary design rules pg = placement_grid # parameters if shape_bottom == None: shape_bottom = [np.array([1, 1]) for d in devname_bottom] if shape_top == None: shape_top = [np.array([1, 1]) for d in devname_top] if shape_left == None: shape_left = [np.array([1, 1]) for d in devname_left] if shape_right == None: shape_right = [np.array([1, 1]) for d in devname_right] if transform_bottom == None: transform_bottom = ['R0' for d in devname_bottom] if transform_top == None: transform_top = ['R0' for d in devname_top] if transform_left == None: transform_left = ['R0' for d in devname_left] if transform_right == None: transform_right = ['R0' for d in devname_right] # bottom dev_bottom = [] dev_bottom.append(laygen.place("I" + objectname_pfix + 'BNDBTM0', devname_bottom[0], pg, xy=origin, shape=shape_bottom[0], transform=transform_bottom[0])) for i, d in enumerate(devname_bottom[1:]): dev_bottom.append( laygen.relplace(name="I" + objectname_pfix + 'BNDBTM' + str(i + 1), templatename=d, gridname=pg, refinstname=dev_bottom[-1].name, shape=shape_bottom[i + 1], transform=transform_bottom[i + 1])) dev_left = [] dev_left.append(laygen.relplace(name="I" + objectname_pfix + 'BNDLFT0', templatename=devname_left[0], gridname=pg, refinstname=dev_bottom[0].name, direction='top', shape=shape_left[0], transform=transform_left[0])) for i, d in enumerate(devname_left[1:]): dev_left.append(laygen.relplace(name="I" + objectname_pfix + 'BNDLFT' + str(i + 1), templatename=d, gridname=pg, refinstname=dev_left[-1].name, direction='top', shape=shape_left[i + 1], transform=transform_left[i + 1])) dev_right = [] dev_right.append(laygen.relplace(name="I" + objectname_pfix + 'BNDRHT0', templatename=devname_right[0], gridname=pg, refinstname=dev_bottom[-1].name, direction='top', shape=shape_right[0], transform=transform_right[0])) for i, d in enumerate(devname_right[1:]): dev_right.append( laygen.relplace(name="I" + objectname_pfix + 'BNDRHT' + str(i + 1), templatename=d, gridname=pg, refinstname=dev_right[-1].name, direction='top', shape=shape_right[i + 1], transform=transform_right[i + 1])) dev_top = [] dev_top.append(laygen.relplace(name="I" + objectname_pfix + 'BNDTOP0', templatename=devname_top[0], gridname=pg, refinstname=dev_left[-1].name, direction='top', shape=shape_top[0], transform=transform_top[0])) for i, d in enumerate(devname_top[1:]): dev_top.append(laygen.relplace(name="I" + objectname_pfix + 'BNDTOP' + str(i + 1), templatename=d, gridname=pg, refinstname=dev_top[-1].name, shape=shape_top[i + 1], transform=transform_top[i + 1])) return [dev_bottom, dev_top, dev_left, dev_right] def create_power_pin_from_inst(laygen, layer, gridname, inst_left, inst_right): """create power pin""" rvdd0_pin_xy = laygen.get_inst_pin_xy(inst_left.name, 'VDD', gridname, sort=True) rvdd1_pin_xy = laygen.get_inst_pin_xy(inst_right.name, 'VDD', gridname, sort=True) rvss0_pin_xy = laygen.get_inst_pin_xy(inst_left.name, 'VSS', gridname, sort=True) rvss1_pin_xy = laygen.get_inst_pin_xy(inst_right.name, 'VSS', gridname, sort=True) laygen.pin(name='VDD', layer=layer, xy=np.vstack((rvdd0_pin_xy[0], rvdd1_pin_xy[1])), gridname=gridname) laygen.pin(name='VSS', layer=layer, xy=np.vstack((rvss0_pin_xy[0], rvss1_pin_xy[1])), gridname=gridname) def generate_r2rdac_unit(laygen, objectname_pfix, templib_logic, placement_grid, routing_grid_m2m3, routing_grid_m3m4, m=2, m_series=4, origin=np.array([0, 0])): """generate clock delay """ pg = placement_grid rg_m3m4 = routing_grid_m3m4 tgate_name = 'tgate_'+str(m)+'x' # placement itgate = laygen.place(name="I" + objectname_pfix + 'TG0', templatename=tgate_name, gridname=pg, xy=origin, template_libname=templib_logic, shape=np.array([m_series,1])) # reference coordinates x0 = laygen.get_inst_pin_xy(itgate.name, 'VDD', rg_m2m3, index=[m_series-1, 0])[1][0] y0 = laygen.get_inst_pin_xy(itgate.name, 'VDD', rg_m2m3, index=[m_series-1, 0])[1][1] # internal routes for i in range(m_series-1): laygen.route(None, laygen.layers['metal'][4], xy0=laygen.get_inst_pin_xy(itgate.name, 'O', rg_m3m4, index=[i,0])[0] - [0, i%2], xy1=laygen.get_inst_pin_xy(itgate.name, 'I', rg_m3m4, index=[i+1,0])[0] - [0, i%2], gridname0=rg_m3m4, via0=[0,0], via1=[0,0]) ren = laygen.route(None, laygen.layers['metal'][4], xy0=laygen.get_inst_pin_xy(itgate.name, 'EN', rg_m3m4, index=[0, 0])[0] + [0, 1], xy1=laygen.get_inst_pin_xy(itgate.name, 'EN', rg_m3m4, index=[m_series-1, 0])[0] + [0, 1], gridname0=rg_m3m4) renb = laygen.route(None, laygen.layers['metal'][4], xy0=laygen.get_inst_pin_xy(itgate.name, 'ENB', rg_m3m4, index=[0, 0])[0] + [0, 2], xy1=laygen.get_inst_pin_xy(itgate.name, 'ENB', rg_m3m4, index=[m_series-1, 0])[0] + [0, 2], gridname0=rg_m3m4) for i in range(m_series): laygen.via(None, laygen.get_inst_pin_xy(itgate.name, 'EN', rg_m3m4, index=[i, 0])[0] + [0, 1], rg_m3m4) laygen.via(None, laygen.get_inst_pin_xy(itgate.name, 'ENB', rg_m3m4, index=[i, 0])[0] + [0, 2], rg_m3m4) # VDD/VSS rails rvdd = laygen.route(None, laygen.layers['metal'][2], xy0=np.array([0, y0]), xy1=np.array([x0, y0]), gridname0=rg_m2m3) rvss = laygen.route(None, laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=np.array([x0, 0]), gridname0=rg_m2m3) # pins laygen.pin_from_rect('EN', laygen.layers['pin'][4], ren, rg_m3m4) laygen.pin_from_rect('ENB', laygen.layers['pin'][4], renb, rg_m3m4) laygen.pin_from_rect('VDD', laygen.layers['pin'][2], rvdd, rg_m2m3) laygen.pin_from_rect('VSS', laygen.layers['pin'][2], rvss, rg_m2m3) laygen.pin(name='I', layer=laygen.layers['pin'][3], xy=laygen.get_inst_pin_xy(itgate.name, 'I', rg_m3m4, index=[0, 0]), gridname=rg_m3m4) laygen.pin(name='O', layer=laygen.layers['pin'][3], xy=laygen.get_inst_pin_xy(itgate.name, 'O', rg_m3m4, index=[m_series-1, 0]), gridname=rg_m3m4) def generate_r2r_dac(laygen, objectname_pfix, templib_logic, placement_grid, routing_grid_m2m3, routing_grid_m3m4, rg_m3m4_basic_thick, rg_m4m5_thick, num_bits=9, origin=np.array([0, 0])): """generate r2rdac """ inv_name='inv_2x' tap_name='tap' r2r_unit_name='r2r_dac_unit' r2r_unit_half_name='r2r_dac_unit_half' pg = placement_grid rg_m2m3 = routing_grid_m2m3 rg_m3m4 = routing_grid_m3m4 # rg_m4m5 = routing_grid_m4m5 # rg_m4m5_basic_thick = routing_grid_m4m5_basic_thick # rg_m4m5_thick = routing_grid_m4m5_thick # rg_m5m6 = routing_grid_m5m6 # rg_m5m6_thick = routing_grid_m5m6_thick # rg_m5m6_thick_basic = routing_grid_m5m6_thick_basic # rg_m6m7_thick = routing_grid_m6m7_thick #boundaries x0=laygen.templates.get_template('capdac', workinglib).xy[1][0] - \ laygen.templates.get_template('boundary_bottomleft').xy[1][0]*2 m_bnd_float = x0 / laygen.templates.get_template('boundary_bottom').xy[1][0] m_bnd = int(m_bnd_float) if not m_bnd_float == m_bnd: m_bnd += 1 devname_bnd_left = [] devname_bnd_right = [] transform_bnd_left = [] transform_bnd_right = [] num_row=num_bits*4 for i in range(num_row): if i%2==0: devname_bnd_left += ['nmos4_fast_left', 'pmos4_fast_left'] devname_bnd_right += ['nmos4_fast_right', 'pmos4_fast_right'] transform_bnd_left += ['R0', 'MX'] transform_bnd_right += ['R0', 'MX'] else: devname_bnd_left += ['pmos4_fast_left', 'nmos4_fast_left'] devname_bnd_right += ['pmos4_fast_right', 'nmos4_fast_right'] transform_bnd_left += ['R0', 'MX'] transform_bnd_right += ['R0', 'MX'] [bnd_bottom, bnd_top, bnd_left, bnd_right] = generate_boundary(laygen, objectname_pfix='BND0', placement_grid=pg, devname_bottom=['boundary_bottomleft', 'boundary_bottom', 'boundary_bottomright'], shape_bottom=[np.array([1, 1]), np.array([m_bnd, 1]), np.array([1, 1])], devname_top=['boundary_topleft', 'boundary_top', 'boundary_topright'], shape_top=[np.array([1, 1]), np.array([m_bnd, 1]), np.array([1, 1])], devname_left=devname_bnd_left, transform_left=transform_bnd_left, devname_right=devname_bnd_right, transform_right=transform_bnd_right, origin=np.array([0, 0])) #Calculate layout size array_origin = origin + laygen.get_template_xy(name='boundary_bottomleft', gridname=pg, libname=utemplib) tapr_origin = np.array([laygen.get_template_xy(name='capdac', gridname=pg, libname=workinglib)[0], 0]) \ + np.array([0, laygen.get_template_xy(name='boundary_bottomleft', gridname=pg, libname=utemplib)[1]]) \ - np.array([laygen.get_template_xy(name='boundary_bottomleft', gridname=pg, libname=utemplib)[0], 0]) \ - np.array([laygen.get_template_xy(name=tap_name, gridname=pg, libname=templib_logic)[0], 0]) # placement itapl = [] for i in range(num_row): if i%2 == 0: tf='R0' else: tf='MX' if i == 0: itapl.append(laygen.relplace(name="I" + objectname_pfix + 'ITAPL'+str(i), templatename=tap_name, gridname=pg, refinstname=None, xy=array_origin, template_libname=templib_logic)) else: itapl.append(laygen.relplace(name="I" + objectname_pfix + 'ITAPL'+str(i), templatename=tap_name, gridname=pg, refinstname=itapl[-1].name, template_libname=templib_logic, direction='top', transform=tf)) itapr = [] for i in range(num_row): if i%2 == 0: tf='R0' else: tf='MX' if i == 0: itapr.append(laygen.relplace(name="I" + objectname_pfix + 'ITAPR'+str(i), templatename=tap_name, gridname=pg, refinstname=None, xy=tapr_origin, template_libname=templib_logic)) else: itapr.append(laygen.relplace(name="I" + objectname_pfix + 'ITAPR'+str(i), templatename=tap_name, gridname=pg, refinstname=itapr[-1].name, template_libname=templib_logic, direction='top', transform=tf)) i2rvdd = [] for i in range(num_bits): if i == 0: i2rvdd.append(laygen.relplace(name="I" + objectname_pfix + 'I2RVDD'+str(i), templatename=r2r_unit_name, gridname=pg, refinstname=itapl[2].name, template_libname=workinglib)) else: i2rvdd.append(laygen.relplace(name="I" + objectname_pfix + 'I2RVDD'+str(i), templatename=r2r_unit_name, xy=np.array([0, 3*laygen.get_template_xy(name=r2r_unit_name, gridname=pg, libname=workinglib)[1]]), gridname=pg, refinstname=i2rvdd[-1].name, template_libname=workinglib, direction='top')) ir = [] for i in range(num_bits): if i == 0: ir.append(laygen.relplace(name="I" + objectname_pfix + 'IR'+str(i), templatename=r2r_unit_name, gridname=pg, refinstname=itapl[1].name, template_libname=workinglib, transform='MX')) # elif i == 0: # ir.append(laygen.relplace(name="I" + objectname_pfix + 'IR'+str(i), templatename=r2r_unit_name, # gridname=pg, refinstname=itapl[4*(num_bits-1)+1].name, template_libname=workinglib, direction='right', transform='MX')) else: ir.append(laygen.relplace(name="I" + objectname_pfix + 'IR'+str(i), templatename=r2r_unit_half_name, xy=np.array([0, 0]), gridname=pg, refinstname=itapl[4*i+1].name, template_libname=workinglib, direction='right', transform='MX')) i2rvss = [] for i in range(num_bits): if i == 0: i2rvss.append(laygen.relplace(name="I" + objectname_pfix + 'I2RVSS'+str(i), templatename=r2r_unit_name, gridname=pg, refinstname=itapl[0].name, template_libname=workinglib)) else: i2rvss.append(laygen.relplace(name="I" + objectname_pfix + 'I2RVSS'+str(i), templatename=r2r_unit_name, xy=np.array([0, 3*laygen.get_template_xy(name=r2r_unit_name, gridname=pg, libname=workinglib)[1]]), gridname=pg, refinstname=i2rvss[-1].name, template_libname=workinglib, direction='top')) ibuf0 = [] ibuf1 = [] for i in range(num_bits): if i == 0: ibuf0.append(laygen.relplace(name="I" + objectname_pfix + 'IBUF0'+str(i), templatename=inv_name, gridname=pg, refinstname=itapl[3].name, template_libname=logictemplib, transform='MX')) else: ibuf0.append(laygen.relplace(name="I" + objectname_pfix + 'IBUF0'+str(i), templatename=inv_name, xy=np.array([0, 3*laygen.get_template_xy(name=inv_name, gridname=pg, libname=logictemplib)[1]]), gridname=pg, refinstname=ibuf0[-1].name, template_libname=logictemplib, direction='top', transform='MX')) ibuf1.append(laygen.relplace(name="I" + objectname_pfix + 'IBUF1'+str(i), templatename=inv_name, gridname=pg, refinstname=ibuf0[-1].name, template_libname=logictemplib, transform='MX')) # Space calculation space_name = 'space_1x' space4x_name = 'space_4x' space_width = laygen.get_template_xy(name = space_name, gridname = pg, libname = templib_logic)[0] space4_width = laygen.get_template_xy(name = space4x_name, gridname = pg, libname = templib_logic)[0] blank_2r = laygen.get_inst_xy(itapr[0].name, pg)[0] - laygen.get_inst_bbox(i2rvdd[0].name, pg)[1][0] blank_r = laygen.get_inst_xy(itapr[1].name, pg)[0] - laygen.get_inst_bbox(ir[1].name, pg)[1][0] blank_buf = laygen.get_inst_xy(itapr[0].name, pg)[0] - laygen.get_inst_bbox(ibuf1[0].name, pg)[1][0] m_sp4x_2r = int(blank_2r/space4_width) m_sp1x_2r = int(blank_2r/space_width)-4*m_sp4x_2r m_sp4x_r = int(blank_r/space4_width) m_sp1x_r = int(blank_r/space_width)-4*m_sp4x_r m_sp4x_buf = int(blank_buf/space4_width) m_sp1x_buf = int(blank_buf/space_width)-4*m_sp4x_buf isp_2rvdd_4x = [] isp_2rvss_4x = [] isp_r_4x = [] isp_buf_4x = [] isp_2rvdd_1x = [] isp_2rvss_1x = [] isp_r_1x = [] isp_buf_1x = [] for i in range(num_bits): isp_2rvdd_4x.append(laygen.relplace(name="I" + objectname_pfix + 'SP2RVDD_4x'+str(i), templatename=space4x_name, gridname=pg, refinstname=i2rvdd[i].name, template_libname=logictemplib, shape=[m_sp4x_2r, 1], transform='R0')) isp_2rvdd_1x.append(laygen.relplace(name="I" + objectname_pfix + 'SP2RVDD_1x'+str(i), templatename=space_name, gridname=pg, refinstname=isp_2rvdd_4x[i].name, template_libname=logictemplib, shape=[m_sp1x_2r, 1], transform='R0')) isp_2rvss_4x.append(laygen.relplace(name="I" + objectname_pfix + 'SP2RVSS_4x'+str(i), templatename=space4x_name, gridname=pg, refinstname=i2rvss[i].name, template_libname=logictemplib, shape=[m_sp4x_2r, 1], transform='R0')) isp_2rvss_1x.append(laygen.relplace(name="I" + objectname_pfix + 'SP2RVSS_1x'+str(i), templatename=space_name, gridname=pg, refinstname=isp_2rvss_4x[i].name, template_libname=logictemplib, shape=[m_sp1x_2r, 1], transform='R0')) if i==0: isp_r_4x.append(laygen.relplace(name="I" + objectname_pfix + 'SPR_4x' + str(i), templatename=space4x_name, gridname=pg, refinstname=ir[i].name, template_libname=logictemplib, shape=[m_sp4x_2r, 1], transform='MX')) isp_r_1x.append(laygen.relplace(name="I" + objectname_pfix + 'SPR_1x' + str(i), templatename=space_name, gridname=pg, refinstname=isp_r_4x[i].name, template_libname=logictemplib, shape=[m_sp1x_2r, 1], transform='MX')) else: isp_r_4x.append(laygen.relplace(name="I" + objectname_pfix + 'SPR_4x'+str(i), templatename=space4x_name, gridname=pg, refinstname=ir[i].name, template_libname=logictemplib, shape=[m_sp4x_r, 1], transform='MX')) isp_r_1x.append(laygen.relplace(name="I" + objectname_pfix + 'SPR_1x'+str(i), templatename=space_name, gridname=pg, refinstname=isp_r_4x[i].name, template_libname=logictemplib, shape=[m_sp1x_r, 1], transform='MX')) isp_buf_4x.append(laygen.relplace(name="I" + objectname_pfix + 'SPBUF_4x'+str(i), templatename=space4x_name, gridname=pg, refinstname=ibuf1[i].name, template_libname=logictemplib, shape=[m_sp4x_buf, 1], transform='MX')) isp_buf_1x.append(laygen.relplace(name="I" + objectname_pfix + 'SPBUF_1x'+str(i), templatename=space_name, gridname=pg, refinstname=isp_buf_4x[i].name, template_libname=logictemplib, shape=[m_sp1x_buf, 1], transform='MX')) # internal pins pdict = laygen.get_inst_pin_xy(None, None, rg_m3m4) # routing # 2RVDD to VDD & 2RVSS to VSS for i in range(num_bits): rh0, rv0 = laygen.route_hv(laygen.layers['metal'][2], laygen.layers['metal'][3], xy0=laygen.get_inst_pin_xy(i2rvdd[i].name, 'VDD', rg_m2m3)[0], xy1=laygen.get_inst_pin_xy(i2rvdd[i].name, 'I', rg_m2m3)[0], gridname0=rg_m2m3) rh0, rv0 = laygen.route_hv(laygen.layers['metal'][2], laygen.layers['metal'][3], xy0=laygen.get_inst_pin_xy(i2rvss[i].name, 'VSS', rg_m2m3)[0], xy1=laygen.get_inst_pin_xy(i2rvss[i].name, 'I', rg_m2m3)[0], gridname0=rg_m2m3) x1 = laygen.get_inst_xy(ir[i].name, rg_m3m4)[0] for i in range(num_bits): [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], laygen.get_inst_pin_xy(i2rvdd[i].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(ir[i].name, 'I', rg_m4m5)[0]+[2,0], laygen.get_inst_pin_xy(ir[i].name, 'EN', rg_m3m4)[0][1] + 6, rg_m3m4, layerv1=laygen.layers['metal'][5], gridname1=rg_m4m5) if i == num_bits-1: laygen.pin_from_rect('out', laygen.layers['pin'][4], rh0, rg_m3m4) [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], laygen.get_inst_pin_xy(i2rvss[i].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(ir[i].name, 'I', rg_m4m5)[0]+[2,0], laygen.get_inst_pin_xy(ir[i].name, 'EN', rg_m3m4)[0][1] - 6, rg_m3m4, layerv1=laygen.layers['metal'][5], gridname1=rg_m4m5) laygen.route(None, laygen.layers['metal'][4], xy0=laygen.get_inst_pin_xy(ir[i].name, 'I', rg_m4m5)[0]+[0,2], xy1=laygen.get_inst_pin_xy(ir[i].name, 'I', rg_m4m5)[0]+[2,2], gridname0=rg_m3m4, gridname1=rg_m4m5,via0=[0,0], via1=[0,0]) y1 = laygen.get_inst_pin_xy(ir[i].name, 'VDD', rg_m3m4)[0][1]+2 # R path routing if not i == 0: rv0, rh0, rv1 = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], laygen.get_inst_pin_xy(ir[i].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(ir[i-1].name, 'I', rg_m4m5)[0]-[2,-6], y1, rg_m3m4, layerv1=laygen.layers['metal'][5], gridname1=rg_m4m5) rv0, rh0, rv1 = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], laygen.get_inst_pin_xy(ir[i-1].name, 'I', rg_m3m4)[0], laygen.get_inst_pin_xy(ir[i-1].name, 'I', rg_m4m5)[0]-[2,-6], laygen.get_inst_pin_xy(ir[i - 1].name, 'I', rg_m3m4)[0][1]+2, rg_m3m4, layerv1=laygen.layers['metal'][5], gridname1=rg_m4m5) else: # rh0, rv0 = laygen.route_hv(laygen.layers['metal'][2], laygen.layers['metal'][3], # xy0=laygen.get_inst_pin_xy(ir[i].name, 'VSS', rg_m2m3)[0], # xy1=laygen.get_inst_pin_xy(ir[i].name, 'O', rg_m2m3)[0], gridname0=rg_m2m3) rv0, rh0, rv1 = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], laygen.get_inst_pin_xy(ir[i].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(ir[i].name, 'VSS', rg_m3m4)[1], laygen.get_inst_pin_xy(ir[i].name, 'O', rg_m3m4)[0][1]-1, rg_m3m4) laygen.via(None, xy=laygen.get_inst_pin_xy(ir[i].name, 'VSS', rg_m2m3)[1], gridname=rg_m2m3) # R EN/ENB rv0, rh0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][2], xy0=laygen.get_inst_pin_xy(ir[i].name, 'EN', rg_m2m3)[1], xy1=laygen.get_inst_pin_xy(ir[i].name, 'VDD', rg_m2m3)[0], gridname0=rg_m2m3) rv0, rh0 = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][2], xy0=laygen.get_inst_pin_xy(ir[i].name, 'ENB', rg_m2m3)[1], xy1=laygen.get_inst_pin_xy(ir[i].name, 'VSS', rg_m2m3)[0], gridname0=rg_m2m3) # 2R EN/ENB x_en = laygen.get_inst_pin_xy(ibuf1[i].name, 'O', rg_m3m4)[0][0]+4 x_enb = laygen.get_inst_pin_xy(ibuf1[i].name, 'O', rg_m3m4)[0][0]+2 rv0, rh0, rv1 = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], laygen.get_inst_pin_xy(ibuf1[i].name, 'O', rg_m3m4)[0], np.array([x_en, laygen.get_inst_pin_xy(i2rvdd[i].name, 'EN', rg_m4m5)[0][1]]), laygen.get_inst_pin_xy(ibuf1[i].name, 'O', rg_m3m4)[1][1], rg_m3m4, layerv1=laygen.layers['metal'][5], gridname1=rg_m4m5) laygen.via(None, xy=np.array([x_en, laygen.get_inst_pin_xy(i2rvdd[i].name, 'EN', rg_m4m5)[0][1]]), gridname=rg_m4m5) rv0, rh0, rv1 = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], laygen.get_inst_pin_xy(ibuf1[i].name, 'O', rg_m3m4)[0], np.array([x_en, laygen.get_inst_pin_xy(i2rvss[i].name, 'ENB', rg_m4m5)[0][1]]), laygen.get_inst_pin_xy(ibuf1[i].name, 'O', rg_m3m4)[1][1], rg_m3m4, layerv1=laygen.layers['metal'][5], gridname1=rg_m4m5) laygen.via(None, xy=np.array([x_en, laygen.get_inst_pin_xy(i2rvss[i].name, 'ENB', rg_m4m5)[0][1]]), gridname=rg_m4m5) rv0, rh0, rv1 = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], laygen.get_inst_pin_xy(ibuf0[i].name, 'O', rg_m3m4)[0], np.array([x_enb, laygen.get_inst_pin_xy(i2rvdd[i].name, 'ENB', rg_m4m5)[0][1]]), laygen.get_inst_pin_xy(ibuf0[i].name, 'O', rg_m3m4)[0][1], rg_m3m4, layerv1=laygen.layers['metal'][5], gridname1=rg_m4m5) laygen.via(None, xy=np.array([x_enb, laygen.get_inst_pin_xy(i2rvdd[i].name, 'ENB', rg_m4m5)[0][1]]), gridname=rg_m4m5) rv0, rh0, rv1 = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], laygen.get_inst_pin_xy(ibuf0[i].name, 'O', rg_m3m4)[0], np.array([x_enb, laygen.get_inst_pin_xy(i2rvss[i].name, 'EN', rg_m4m5)[0][1]]), laygen.get_inst_pin_xy(ibuf0[i].name, 'O', rg_m3m4)[0][1], rg_m3m4, layerv1=laygen.layers['metal'][5], gridname1=rg_m4m5) laygen.via(None, xy=np.array([x_enb, laygen.get_inst_pin_xy(i2rvss[i].name, 'EN', rg_m4m5)[0][1]]), gridname=rg_m4m5) # buffer routing laygen.route(None, laygen.layers['metal'][4], xy0=laygen.get_inst_pin_xy(ibuf0[i].name, 'O', rg_m3m4)[0], xy1=laygen.get_inst_pin_xy(ibuf1[i].name, 'I', rg_m3m4)[0], gridname0=rg_m3m4, via0=[0,0], via1=[0,0]) # Sel pins rv0, rsel = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], xy0=laygen.get_inst_pin_xy(ibuf0[i].name, 'I', rg_m3m4)[0], xy1=laygen.get_inst_pin_xy(ibuf0[i].name, 'I', rg_m3m4)[0]+[4,2], gridname0=rg_m3m4) laygen.pin_from_rect('SEL<'+str(i)+'>', laygen.layers['pin'][4], rsel, rg_m3m4) # # power # for i in range(num_row): # laygen.pin(name='VSS'+str(i), layer=laygen.layers['pin'][2], xy=laygen.get_inst_pin_xy(itapl[i].name, 'VSS', rg_m2m3), # gridname=rg_m2m3, netname='VSS:') # laygen.pin(name='VDD'+str(i), layer=laygen.layers['pin'][2], xy=laygen.get_inst_pin_xy(itapl[i].name, 'VDD', rg_m2m3), # gridname=rg_m2m3, netname='VDD:') # power pin pwr_dim=laygen.get_template_xy(name=itapl[0].cellname, gridname=rg_m2m3, libname=itapl[0].libname) rvddl_m3 = [] rvssl_m3 = [] rvddr_m3 = [] rvssr_m3 = [] for i in range(0, int(pwr_dim[0]/2)): rvddl_m3.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+1, 0]), xy1=np.array([2*i+1, 0]), gridname0=rg_m2m3, refinstname0=itapl[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]), refinstname1=itapl[num_row-1].name, refpinname1='VSS', refinstindex1=np.array([0, 0]))) rvssl_m3.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+2, 0]), xy1=np.array([2*i+2, 0]), gridname0=rg_m2m3, refinstname0=itapl[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]), refinstname1=itapl[num_row-1].name, refpinname1='VSS', refinstindex1=np.array([0, 0]))) for j in range(num_row): laygen.via(None, xy=np.array([2*i+1, 0]), gridname=rg_m2m3, refinstname=itapl[j].name, refpinname='VDD') laygen.via(None, xy=np.array([2 * i + 2, 0]), gridname=rg_m2m3, refinstname=itapl[j].name, refpinname='VSS') # laygen.pin(name = 'VDDL'+str(i), layer = laygen.layers['pin'][3], refobj = rvddl_m3[-1], gridname=rg_m2m3, netname='VDD') # laygen.pin(name = 'VSSL'+str(i), layer = laygen.layers['pin'][3], refobj = rvssl_m3[-1], gridname=rg_m2m3, netname='VSS') rvddr_m3.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+1, 0]), xy1=np.array([2*i+1, 0]), gridname0=rg_m2m3, refinstname0=itapr[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]), refinstname1=itapr[num_row-1].name, refpinname1='VSS', refinstindex1=np.array([0, 0]))) rvssr_m3.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+2, 0]), xy1=np.array([2*i+2, 0]), gridname0=rg_m2m3, refinstname0=itapr[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]), refinstname1=itapr[num_row-1].name, refpinname1='VSS', refinstindex1=np.array([0, 0]))) for j in range(num_row): laygen.via(None, xy=np.array([2*i+1, 0]), gridname=rg_m2m3, refinstname=itapr[j].name, refpinname='VDD') laygen.via(None, xy=np.array([2 * i + 2, 0]), gridname=rg_m2m3, refinstname=itapr[j].name, refpinname='VSS') # laygen.pin(name = 'VDDR'+str(i), layer = laygen.layers['pin'][3], refobj = rvddr_m3[-1], gridname=rg_m2m3, netname='VDD') # laygen.pin(name = 'VSSR'+str(i), layer = laygen.layers['pin'][3], refobj = rvssr_m3[-1], gridname=rg_m2m3, netname='VSS') #m4 input_rails_rect = [rvddl_m3, rvssl_m3] rvddl_m4, rvssl_m4 = laygenhelper.generate_power_rails_from_rails_rect(laygen, routename_tag='L_M4_', layer=laygen.layers['metal'][4], gridname=rg_m3m4_basic_thick, netnames=['VDD', 'VSS'], direction='x', input_rails_rect=input_rails_rect, generate_pin=False, overwrite_start_coord=2, overwrite_end_coord=None, offset_start_index=0, offset_end_index=0) x1_phy = laygen.get_xy(obj =bnd_right[0])[0]\ +laygen.get_xy(obj =bnd_right[0].template)[0] x1 = laygen.grids.get_absgrid_x(rg_m3m4_basic_thick, x1_phy) input_rails_rect = [rvddr_m3, rvssr_m3] rvddr_m4, rvssr_m4 = laygenhelper.generate_power_rails_from_rails_rect(laygen, routename_tag='R_M4_', layer=laygen.layers['metal'][4], gridname=rg_m3m4_basic_thick, netnames=['VDD', 'VSS'], direction='x', input_rails_rect=input_rails_rect, generate_pin=False, overwrite_start_coord=None, overwrite_end_coord=x1-2, offset_start_index=0, offset_end_index=0) #m5 input_rails_rect = [rvddl_m4, rvssl_m4] rvddl_m5, rvssl_m5 = laygenhelper.generate_power_rails_from_rails_rect(laygen, routename_tag='L_M5_', layer=laygen.layers['pin'][5], gridname=rg_m4m5_thick, netnames=['VDD', 'VSS'], direction='y', input_rails_rect=input_rails_rect, generate_pin=True, overwrite_start_coord=None, overwrite_end_coord=None, offset_start_index=0, offset_end_index=0) y1_phy = laygen.get_xy(obj =bnd_top[0])[1]\ +laygen.get_xy(obj =bnd_top[0].template)[1] y1 = laygen.grids.get_absgrid_x(rg_m4m5_thick, y1_phy) input_rails_rect = [rvddr_m4, rvssr_m4] rvddr_m5, rvssr_m5 = laygenhelper.generate_power_rails_from_rails_rect(laygen, routename_tag='R_M5_', layer=laygen.layers['pin'][5], gridname=rg_m4m5_thick, netnames=['VDD', 'VSS'], direction='y', input_rails_rect=input_rails_rect, generate_pin=True, overwrite_start_coord=None, overwrite_end_coord=None, offset_start_index=0, offset_end_index=0) def generate_r2rdac_bcap_unit(laygen, objectname_pfix, templib_logic, placement_grid, routing_grid_m2m3, routing_grid_m3m4_basic_thick, m=2, origin=np.array([0, 0])): pg = placement_grid rg_m2m3 = routing_grid_m2m3 rg_m3m4_basic_thick = routing_grid_m3m4_basic_thick bcap_name = 'bcap2_8x' # placement ibcap = laygen.place(name="I" + objectname_pfix + 'BCAP0', templatename=bcap_name, gridname=pg, xy=origin, template_libname=templib_logic, shape=np.array([m,1])) # reference coordinates x0 = laygen.get_inst_pin_xy(ibcap.name, 'VDD', rg_m2m3, index=[m-1, 0])[1][0] y0 = laygen.get_inst_pin_xy(ibcap.name, 'VDD', rg_m2m3, index=[m-1, 0])[1][1] # internal routes for i in range(m-1): laygen.route(None, laygen.layers['metal'][4], xy0=laygen.get_inst_pin_xy(ibcap.name, 'I', rg_m3m4_basic_thick, index=[i,0])[0], xy1=laygen.get_inst_pin_xy(ibcap.name, 'I', rg_m3m4_basic_thick, index=[i+1,0])[0], gridname0=rg_m3m4_basic_thick, via0=[0,0], via1=[0,0]) for i in range(m): laygen.route(None, laygen.layers['metal'][3], xy0=np.array([laygen.get_inst_pin_xy(ibcap.name, 'I', rg_m2m3, index=[i,0])[0][0], 0]), xy1=np.array([laygen.get_inst_pin_xy(ibcap.name, 'I', rg_m2m3, index=[i,0])[0][0], y0]), gridname0=rg_m2m3) rin = laygen.route(None, laygen.layers['metal'][4], xy0=laygen.get_inst_pin_xy(ibcap.name, 'I', rg_m3m4_basic_thick, index=[0, 0])[0], xy1=laygen.get_inst_pin_xy(ibcap.name, 'I', rg_m3m4_basic_thick, index=[m-1, 0])[0], gridname0=rg_m3m4_basic_thick) # VDD/VSS rails rvdd = laygen.route(None, laygen.layers['metal'][2], xy0=np.array([0, y0]), xy1=np.array([x0, y0]), gridname0=rg_m2m3) rvss = laygen.route(None, laygen.layers['metal'][2], xy0=np.array([0, 0]), xy1=np.array([x0, 0]), gridname0=rg_m2m3) # pins laygen.pin_from_rect('I', laygen.layers['pin'][4], rin, rg_m3m4_basic_thick) laygen.pin_from_rect('VDD', laygen.layers['pin'][2], rvdd, rg_m2m3) laygen.pin_from_rect('VSS', laygen.layers['pin'][2], rvss, rg_m2m3) def generate_r2r_dac_bcap(laygen, objectname_pfix, templib_logic, placement_grid, routing_grid_m2m3, routing_grid_m3m4, rg_m3m4_basic_thick, rg_m4m5_thick, num_bits=9, origin=np.array([0, 0])): """generate r2rdac """ inv_name='inv_2x' tap_name='tap' bcap_unit_name='r2r_dac_bcap_unit' pg = placement_grid rg_m2m3 = routing_grid_m2m3 rg_m3m4 = routing_grid_m3m4 # rg_m4m5 = routing_grid_m4m5 # rg_m4m5_basic_thick = routing_grid_m4m5_basic_thick # rg_m4m5_thick = routing_grid_m4m5_thick # rg_m5m6 = routing_grid_m5m6 # rg_m5m6_thick = routing_grid_m5m6_thick # rg_m5m6_thick_basic = routing_grid_m5m6_thick_basic # rg_m6m7_thick = routing_grid_m6m7_thick #boundaries x0=laygen.templates.get_template('r2r_dac_bcap_unit', workinglib).xy[1][0] + \ laygen.templates.get_template('tap', templib_logic).xy[1][0]*2 m_bnd_float = x0 / laygen.templates.get_template('boundary_bottom').xy[1][0] m_bnd = int(m_bnd_float) if not m_bnd_float == m_bnd: m_bnd += 1 devname_bnd_left = [] devname_bnd_right = [] transform_bnd_left = [] transform_bnd_right = [] num_row=num_bits*4 for i in range(num_row): if i%2==0: devname_bnd_left += ['nmos4_fast_left', 'pmos4_fast_left'] devname_bnd_right += ['nmos4_fast_right', 'pmos4_fast_right'] transform_bnd_left += ['R0', 'MX'] transform_bnd_right += ['R0', 'MX'] else: devname_bnd_left += ['pmos4_fast_left', 'nmos4_fast_left'] devname_bnd_right += ['pmos4_fast_right', 'nmos4_fast_right'] transform_bnd_left += ['R0', 'MX'] transform_bnd_right += ['R0', 'MX'] [bnd_bottom, bnd_top, bnd_left, bnd_right] = generate_boundary(laygen, objectname_pfix='BND0', placement_grid=pg, devname_bottom=['boundary_bottomleft', 'boundary_bottom', 'boundary_bottomright'], shape_bottom=[np.array([1, 1]), np.array([m_bnd, 1]), np.array([1, 1])], devname_top=['boundary_topleft', 'boundary_top', 'boundary_topright'], shape_top=[np.array([1, 1]), np.array([m_bnd, 1]), np.array([1, 1])], devname_left=devname_bnd_left, transform_left=transform_bnd_left, devname_right=devname_bnd_right, transform_right=transform_bnd_right, origin=np.array([0, 0])) #Calculate layout size array_origin = origin + laygen.get_template_xy(name='boundary_bottomleft', gridname=pg, libname=utemplib) tapr_origin = np.array([laygen.get_template_xy(name='r2r_dac_bcap_unit', gridname=pg, libname=workinglib)[0], 0]) \ + np.array([0, laygen.get_template_xy(name='boundary_bottomleft', gridname=pg, libname=utemplib)[1]]) \ + np.array([laygen.get_template_xy(name='boundary_bottomleft', gridname=pg, libname=utemplib)[0], 0]) \ + np.array([laygen.get_template_xy(name=tap_name, gridname=pg, libname=templib_logic)[0], 0]) # placement itapl = [] ibcap = [] for i in range(num_row): if i%2 == 0: tf='R0' else: tf='MX' if i == 0: itapl.append(laygen.relplace(name="I" + objectname_pfix + 'ITAPL'+str(i), templatename=tap_name, gridname=pg, refinstname=None, xy=array_origin, template_libname=templib_logic)) else: itapl.append(laygen.relplace(name="I" + objectname_pfix + 'ITAPL'+str(i), templatename=tap_name, gridname=pg, refinstname=itapl[-1].name, template_libname=templib_logic, direction='top', transform=tf)) ibcap.append(laygen.relplace(name="I" + objectname_pfix + 'IBCAP'+str(i), templatename=bcap_unit_name, gridname=pg, refinstname=itapl[-1].name, template_libname=workinglib, direction='right', transform=tf)) itapr = [] for i in range(num_row): if i%2 == 0: tf='R0' else: tf='MX' if i == 0: itapr.append(laygen.relplace(name="I" + objectname_pfix + 'ITAPR'+str(i), templatename=tap_name, gridname=pg, refinstname=None, xy=tapr_origin, template_libname=templib_logic)) else: itapr.append(laygen.relplace(name="I" + objectname_pfix + 'ITAPR'+str(i), templatename=tap_name, gridname=pg, refinstname=itapr[-1].name, template_libname=templib_logic, direction='top', transform=tf)) # pins pdict = laygen.get_inst_pin_xy(None, None, rg_m3m4_basic_thick) laygen.pin(name='I', layer=laygen.layers['pin'][4], xy=laygen.get_inst_pin_xy(ibcap[-1].name, 'I', rg_m3m4_basic_thick), gridname=rg_m3m4_basic_thick) # power pin pwr_dim=laygen.get_template_xy(name=itapl[0].cellname, gridname=rg_m2m3, libname=itapl[0].libname) rvddl_m3 = [] rvssl_m3 = [] rvddr_m3 = [] rvssr_m3 = [] for i in range(0, int(pwr_dim[0]/2)): rvddl_m3.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+1, 0]), xy1=np.array([2*i+1, 0]), gridname0=rg_m2m3, refinstname0=itapl[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]), refinstname1=itapl[num_row-1].name, refpinname1='VSS', refinstindex1=np.array([0, 0]))) rvssl_m3.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+2, 0]), xy1=np.array([2*i+2, 0]), gridname0=rg_m2m3, refinstname0=itapl[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]), refinstname1=itapl[num_row-1].name, refpinname1='VSS', refinstindex1=np.array([0, 0]))) for j in range(num_row): laygen.via(None, xy=np.array([2*i+1, 0]), gridname=rg_m2m3, refinstname=itapl[j].name, refpinname='VDD') laygen.via(None, xy=np.array([2 * i + 2, 0]), gridname=rg_m2m3, refinstname=itapl[j].name, refpinname='VSS') # laygen.pin(name = 'VDDL'+str(i), layer = laygen.layers['pin'][3], refobj = rvddl_m3[-1], gridname=rg_m2m3, netname='VDD') # laygen.pin(name = 'VSSL'+str(i), layer = laygen.layers['pin'][3], refobj = rvssl_m3[-1], gridname=rg_m2m3, netname='VSS') rvddr_m3.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+1, 0]), xy1=np.array([2*i+1, 0]), gridname0=rg_m2m3, refinstname0=itapr[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]), refinstname1=itapr[num_row-1].name, refpinname1='VSS', refinstindex1=np.array([0, 0]))) rvssr_m3.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+2, 0]), xy1=np.array([2*i+2, 0]), gridname0=rg_m2m3, refinstname0=itapr[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]), refinstname1=itapr[num_row-1].name, refpinname1='VSS', refinstindex1=np.array([0, 0]))) for j in range(num_row): laygen.via(None, xy=np.array([2*i+1, 0]), gridname=rg_m2m3, refinstname=itapr[j].name, refpinname='VDD') laygen.via(None, xy=np.array([2 * i + 2, 0]), gridname=rg_m2m3, refinstname=itapr[j].name, refpinname='VSS') # laygen.pin(name = 'VDDR'+str(i), layer = laygen.layers['pin'][3], refobj = rvddr_m3[-1], gridname=rg_m2m3, netname='VDD') # laygen.pin(name = 'VSSR'+str(i), layer = laygen.layers['pin'][3], refobj = rvssr_m3[-1], gridname=rg_m2m3, netname='VSS') #m4 input_rails_rect = [rvddl_m3, rvssl_m3] rvddl_m4, rvssl_m4 = laygenhelper.generate_power_rails_from_rails_rect(laygen, routename_tag='L_M4_', layer=laygen.layers['metal'][4], gridname=rg_m3m4_basic_thick, netnames=['VDD', 'VSS'], direction='x', input_rails_rect=input_rails_rect, generate_pin=False, overwrite_start_coord=2, overwrite_end_coord=None, offset_start_index=0, offset_end_index=0) x1_phy = laygen.get_xy(obj =bnd_right[0])[0]\ +laygen.get_xy(obj =bnd_right[0].template)[0] x1 = laygen.grids.get_absgrid_x(rg_m3m4_basic_thick, x1_phy) input_rails_rect = [rvddr_m3, rvssr_m3] rvddr_m4, rvssr_m4 = laygenhelper.generate_power_rails_from_rails_rect(laygen, routename_tag='R_M4_', layer=laygen.layers['metal'][4], gridname=rg_m3m4_basic_thick, netnames=['VDD', 'VSS'], direction='x', input_rails_rect=input_rails_rect, generate_pin=False, overwrite_start_coord=None, overwrite_end_coord=x1-2, offset_start_index=0, offset_end_index=0) #m5 input_rails_rect = [rvddl_m4, rvssl_m4] rvddl_m5, rvssl_m5 = laygenhelper.generate_power_rails_from_rails_rect(laygen, routename_tag='L_M5_', layer=laygen.layers['pin'][5], gridname=rg_m4m5_thick, netnames=['VDD', 'VSS'], direction='y', input_rails_rect=input_rails_rect, generate_pin=True, overwrite_start_coord=None, overwrite_end_coord=None, offset_start_index=0, offset_end_index=0) y1_phy = laygen.get_xy(obj =bnd_top[0])[1]\ +laygen.get_xy(obj =bnd_top[0].template)[1] y1 = laygen.grids.get_absgrid_x(rg_m4m5_thick, y1_phy) input_rails_rect = [rvddr_m4, rvssr_m4] rvddr_m5, rvssr_m5 = laygenhelper.generate_power_rails_from_rails_rect(laygen, routename_tag='R_M5_', layer=laygen.layers['pin'][5], gridname=rg_m4m5_thick, netnames=['VDD', 'VSS'], direction='y', input_rails_rect=input_rails_rect, generate_pin=True, overwrite_start_coord=None, overwrite_end_coord=None, offset_start_index=0, offset_end_index=0) if __name__ == '__main__': laygen = laygo.GridLayoutGenerator(config_file="laygo_config.yaml") import imp try: imp.find_module('bag') laygen.use_phantom = False except ImportError: laygen.use_phantom = True tech=laygen.tech utemplib = tech+'_microtemplates_dense' logictemplib = tech+'_logic_templates' ret_libname = 'adc_retimer_ec' clkdist_libname = 'clk_dis_generated' laygen.load_template(filename=tech+'_microtemplates_dense_templates.yaml', libname=utemplib) laygen.load_grid(filename=tech+'_microtemplates_dense_grids.yaml', libname=utemplib) laygen.load_template(filename=logictemplib+'.yaml', libname=logictemplib) # laygen.load_template(filename='adc_retimer.yaml', libname=ret_libname) #laygen.load_template(filename=ret_libname+'.yaml', libname=ret_libname) laygen.load_template(filename=clkdist_libname+'.yaml', libname=clkdist_libname) laygen.templates.sel_library(utemplib) laygen.grids.sel_library(utemplib) #library load or generation workinglib = 'adc_sar_generated' laygen.add_library(workinglib) laygen.sel_library(workinglib) if os.path.exists(workinglib+'.yaml'): #generated layout file exists laygen.load_template(filename=workinglib+'.yaml', libname=workinglib) laygen.templates.sel_library(utemplib) #grid pg = 'placement_basic' #placement grid rg_m1m2 = 'route_M1_M2_cmos' rg_m1m2_thick = 'route_M1_M2_thick' rg_m2m3 = 'route_M2_M3_cmos' rg_m3m4 = 'route_M3_M4_basic' rg_m3m4_thick = 'route_M3_M4_thick' rg_m3m4_basic_thick = 'route_M3_M4_basic_thick' rg_m4m5 = 'route_M4_M5_basic' rg_m4m5_thick = 'route_M4_M5_thick' rg_m4m5_basic_thick = 'route_M4_M5_basic_thick' rg_m5m6 = 'route_M5_M6_basic' rg_m5m6_thick = 'route_M5_M6_thick' rg_m5m6_thick_basic = 'route_M5_M6_thick_basic' rg_m5m6_basic_thick = 'route_M5_M6_basic_thick' rg_m5m6_thick2_thick = 'route_M5_M6_thick2_thick' rg_m6m7_thick = 'route_M6_M7_thick' rg_m6m7_thick2_thick = 'route_M6_M7_thick2_thick' rg_m1m2_pin = 'route_M1_M2_basic' rg_m2m3_pin = 'route_M2_M3_basic' mycell_list = [] num_bits=9 num_slices=9 slice_order=[0,2,4,6,1,3,5,7] #load from preset load_from_file=True yamlfile_spec="adc_sar_spec.yaml" yamlfile_size="adc_sar_size.yaml" if load_from_file==True: with open(yamlfile_spec, 'r') as stream: specdict = yaml.load(stream) with open(yamlfile_size, 'r') as stream: sizedict = yaml.load(stream) num_bits=sizedict['r2rdac']['num_bits'] num_slices=specdict['n_interleave'] m_latch=sizedict['retimer']['ret_m_latch'] m_ibuf=sizedict['retimer']['ret_m_ibuf'] m_obuf=sizedict['retimer']['ret_m_obuf'] m_srbuf=sizedict['retimer']['ret_m_srbuf'] m_sr=sizedict['retimer']['ret_m_sr'] slice_order=sizedict['slice_order'] m=sizedict['r2rdac']['m'] m_bcap=sizedict['r2rdac']['m_bcap'] num_series=sizedict['r2rdac']['num_series'] sar_name = 'sar_wsamp_bb_doubleSA_array' ret_name = 'adc_retimer' clkdist_name = 'clk_dis_viadel_htree' #tisar_space_name = 'tisaradc_body_space' space_1x_name = 'space_1x' #r2r unit cellname='r2r_dac_unit' print(cellname+" generating") mycell_list.append(cellname) laygen.add_cell(cellname) laygen.sel_cell(cellname) generate_r2rdac_unit(laygen, objectname_pfix='R2RUNIT', templib_logic=logictemplib, placement_grid=pg, routing_grid_m2m3=rg_m2m3, routing_grid_m3m4=rg_m3m4, m=m, m_series=num_series, origin=np.array([0, 0])) laygen.add_template_from_cell() #r2r half unit cellname='r2r_dac_unit_half' print(cellname+" generating") mycell_list.append(cellname) laygen.add_cell(cellname) laygen.sel_cell(cellname) generate_r2rdac_unit(laygen, objectname_pfix='R2RUNIT_half', templib_logic=logictemplib, placement_grid=pg, routing_grid_m2m3=rg_m2m3, routing_grid_m3m4=rg_m3m4, m=m, m_series=int(num_series/2), origin=np.array([0, 0])) laygen.add_template_from_cell() # r2r dac cellname = 'r2r_dac' print(cellname + " generating") mycell_list.append(cellname) laygen.add_cell(cellname) laygen.sel_cell(cellname) generate_r2r_dac(laygen, objectname_pfix='R2R', templib_logic=logictemplib, placement_grid=pg, routing_grid_m2m3=rg_m2m3, routing_grid_m3m4=rg_m3m4, rg_m3m4_basic_thick=rg_m3m4_basic_thick, rg_m4m5_thick=rg_m4m5_thick, num_bits=num_bits, origin=np.array([0, 0])) laygen.add_template_from_cell() # r2r dac bcap unit cellname = 'r2r_dac_bcap_unit' print(cellname + " generating") mycell_list.append(cellname) laygen.add_cell(cellname) laygen.sel_cell(cellname) generate_r2rdac_bcap_unit(laygen, objectname_pfix='BCAPUNIT', templib_logic=logictemplib, placement_grid=pg, routing_grid_m2m3=rg_m2m3, routing_grid_m3m4_basic_thick=rg_m3m4_basic_thick, m=m_bcap, origin=np.array([0, 0])) laygen.add_template_from_cell() # r2r dac cellname = 'r2r_dac_bcap' print(cellname + " generating") mycell_list.append(cellname) laygen.add_cell(cellname) laygen.sel_cell(cellname) generate_r2r_dac_bcap(laygen, objectname_pfix='R2R_bcap', templib_logic=logictemplib, placement_grid=pg, routing_grid_m2m3=rg_m2m3, routing_grid_m3m4=rg_m3m4, rg_m3m4_basic_thick=rg_m3m4_basic_thick, rg_m4m5_thick=rg_m4m5_thick, num_bits=num_bits, origin=np.array([0, 0])) laygen.add_template_from_cell() laygen.save_template(filename=workinglib+'.yaml', libname=workinglib) #bag export, if bag does not exist, gds export import imp try: imp.find_module('bag') import bag prj = bag.BagProject() for mycell in mycell_list: laygen.sel_cell(mycell) laygen.export_BAG(prj, array_delimiter=['[', ']']) except ImportError: laygen.export_GDS('output.gds', cellname=mycell_list, layermapfile=tech+".layermap") # change layermapfile
[ "os.path.exists", "laygo.GridLayoutGeneratorHelper.generate_power_rails_from_rails_rect", "laygo.GridLayoutGenerator", "yaml.load", "numpy.array", "numpy.vstack", "bag.BagProject", "imp.find_module" ]
[((2218, 2234), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (2226, 2234), True, 'import numpy as np\n'), ((6472, 6488), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (6480, 6488), True, 'import numpy as np\n'), ((9362, 9378), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (9370, 9378), True, 'import numpy as np\n'), ((32589, 32938), 'laygo.GridLayoutGeneratorHelper.generate_power_rails_from_rails_rect', 'laygenhelper.generate_power_rails_from_rails_rect', (['laygen'], {'routename_tag': '"""L_M4_"""', 'layer': "laygen.layers['metal'][4]", 'gridname': 'rg_m3m4_basic_thick', 'netnames': "['VDD', 'VSS']", 'direction': '"""x"""', 'input_rails_rect': 'input_rails_rect', 'generate_pin': '(False)', 'overwrite_start_coord': '(2)', 'overwrite_end_coord': 'None', 'offset_start_index': '(0)', 'offset_end_index': '(0)'}), "(laygen, routename_tag=\n 'L_M4_', layer=laygen.layers['metal'][4], gridname=rg_m3m4_basic_thick,\n netnames=['VDD', 'VSS'], direction='x', input_rails_rect=\n input_rails_rect, generate_pin=False, overwrite_start_coord=2,\n overwrite_end_coord=None, offset_start_index=0, offset_end_index=0)\n", (32638, 32938), True, 'import laygo.GridLayoutGeneratorHelper as laygenhelper\n'), ((33208, 33562), 'laygo.GridLayoutGeneratorHelper.generate_power_rails_from_rails_rect', 'laygenhelper.generate_power_rails_from_rails_rect', (['laygen'], {'routename_tag': '"""R_M4_"""', 'layer': "laygen.layers['metal'][4]", 'gridname': 'rg_m3m4_basic_thick', 'netnames': "['VDD', 'VSS']", 'direction': '"""x"""', 'input_rails_rect': 'input_rails_rect', 'generate_pin': '(False)', 'overwrite_start_coord': 'None', 'overwrite_end_coord': '(x1 - 2)', 'offset_start_index': '(0)', 'offset_end_index': '(0)'}), "(laygen, routename_tag=\n 'R_M4_', layer=laygen.layers['metal'][4], gridname=rg_m3m4_basic_thick,\n netnames=['VDD', 'VSS'], direction='x', input_rails_rect=\n input_rails_rect, generate_pin=False, overwrite_start_coord=None,\n overwrite_end_coord=x1 - 2, offset_start_index=0, offset_end_index=0)\n", (33257, 33562), True, 'import laygo.GridLayoutGeneratorHelper as laygenhelper\n'), ((33669, 34012), 'laygo.GridLayoutGeneratorHelper.generate_power_rails_from_rails_rect', 'laygenhelper.generate_power_rails_from_rails_rect', (['laygen'], {'routename_tag': '"""L_M5_"""', 'layer': "laygen.layers['pin'][5]", 'gridname': 'rg_m4m5_thick', 'netnames': "['VDD', 'VSS']", 'direction': '"""y"""', 'input_rails_rect': 'input_rails_rect', 'generate_pin': '(True)', 'overwrite_start_coord': 'None', 'overwrite_end_coord': 'None', 'offset_start_index': '(0)', 'offset_end_index': '(0)'}), "(laygen, routename_tag=\n 'L_M5_', layer=laygen.layers['pin'][5], gridname=rg_m4m5_thick,\n netnames=['VDD', 'VSS'], direction='y', input_rails_rect=\n input_rails_rect, generate_pin=True, overwrite_start_coord=None,\n overwrite_end_coord=None, offset_start_index=0, offset_end_index=0)\n", (33718, 34012), True, 'import laygo.GridLayoutGeneratorHelper as laygenhelper\n'), ((34272, 34615), 'laygo.GridLayoutGeneratorHelper.generate_power_rails_from_rails_rect', 'laygenhelper.generate_power_rails_from_rails_rect', (['laygen'], {'routename_tag': '"""R_M5_"""', 'layer': "laygen.layers['pin'][5]", 'gridname': 'rg_m4m5_thick', 'netnames': "['VDD', 'VSS']", 'direction': '"""y"""', 'input_rails_rect': 'input_rails_rect', 'generate_pin': '(True)', 'overwrite_start_coord': 'None', 'overwrite_end_coord': 'None', 'offset_start_index': '(0)', 'offset_end_index': '(0)'}), "(laygen, routename_tag=\n 'R_M5_', layer=laygen.layers['pin'][5], gridname=rg_m4m5_thick,\n netnames=['VDD', 'VSS'], direction='y', input_rails_rect=\n input_rails_rect, generate_pin=True, overwrite_start_coord=None,\n overwrite_end_coord=None, offset_start_index=0, offset_end_index=0)\n", (34321, 34615), True, 'import laygo.GridLayoutGeneratorHelper as laygenhelper\n'), ((34820, 34836), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (34828, 34836), True, 'import numpy as np\n'), ((37113, 37129), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (37121, 37129), True, 'import numpy as np\n'), ((45419, 45768), 'laygo.GridLayoutGeneratorHelper.generate_power_rails_from_rails_rect', 'laygenhelper.generate_power_rails_from_rails_rect', (['laygen'], {'routename_tag': '"""L_M4_"""', 'layer': "laygen.layers['metal'][4]", 'gridname': 'rg_m3m4_basic_thick', 'netnames': "['VDD', 'VSS']", 'direction': '"""x"""', 'input_rails_rect': 'input_rails_rect', 'generate_pin': '(False)', 'overwrite_start_coord': '(2)', 'overwrite_end_coord': 'None', 'offset_start_index': '(0)', 'offset_end_index': '(0)'}), "(laygen, routename_tag=\n 'L_M4_', layer=laygen.layers['metal'][4], gridname=rg_m3m4_basic_thick,\n netnames=['VDD', 'VSS'], direction='x', input_rails_rect=\n input_rails_rect, generate_pin=False, overwrite_start_coord=2,\n overwrite_end_coord=None, offset_start_index=0, offset_end_index=0)\n", (45468, 45768), True, 'import laygo.GridLayoutGeneratorHelper as laygenhelper\n'), ((46038, 46392), 'laygo.GridLayoutGeneratorHelper.generate_power_rails_from_rails_rect', 'laygenhelper.generate_power_rails_from_rails_rect', (['laygen'], {'routename_tag': '"""R_M4_"""', 'layer': "laygen.layers['metal'][4]", 'gridname': 'rg_m3m4_basic_thick', 'netnames': "['VDD', 'VSS']", 'direction': '"""x"""', 'input_rails_rect': 'input_rails_rect', 'generate_pin': '(False)', 'overwrite_start_coord': 'None', 'overwrite_end_coord': '(x1 - 2)', 'offset_start_index': '(0)', 'offset_end_index': '(0)'}), "(laygen, routename_tag=\n 'R_M4_', layer=laygen.layers['metal'][4], gridname=rg_m3m4_basic_thick,\n netnames=['VDD', 'VSS'], direction='x', input_rails_rect=\n input_rails_rect, generate_pin=False, overwrite_start_coord=None,\n overwrite_end_coord=x1 - 2, offset_start_index=0, offset_end_index=0)\n", (46087, 46392), True, 'import laygo.GridLayoutGeneratorHelper as laygenhelper\n'), ((46499, 46842), 'laygo.GridLayoutGeneratorHelper.generate_power_rails_from_rails_rect', 'laygenhelper.generate_power_rails_from_rails_rect', (['laygen'], {'routename_tag': '"""L_M5_"""', 'layer': "laygen.layers['pin'][5]", 'gridname': 'rg_m4m5_thick', 'netnames': "['VDD', 'VSS']", 'direction': '"""y"""', 'input_rails_rect': 'input_rails_rect', 'generate_pin': '(True)', 'overwrite_start_coord': 'None', 'overwrite_end_coord': 'None', 'offset_start_index': '(0)', 'offset_end_index': '(0)'}), "(laygen, routename_tag=\n 'L_M5_', layer=laygen.layers['pin'][5], gridname=rg_m4m5_thick,\n netnames=['VDD', 'VSS'], direction='y', input_rails_rect=\n input_rails_rect, generate_pin=True, overwrite_start_coord=None,\n overwrite_end_coord=None, offset_start_index=0, offset_end_index=0)\n", (46548, 46842), True, 'import laygo.GridLayoutGeneratorHelper as laygenhelper\n'), ((47102, 47445), 'laygo.GridLayoutGeneratorHelper.generate_power_rails_from_rails_rect', 'laygenhelper.generate_power_rails_from_rails_rect', (['laygen'], {'routename_tag': '"""R_M5_"""', 'layer': "laygen.layers['pin'][5]", 'gridname': 'rg_m4m5_thick', 'netnames': "['VDD', 'VSS']", 'direction': '"""y"""', 'input_rails_rect': 'input_rails_rect', 'generate_pin': '(True)', 'overwrite_start_coord': 'None', 'overwrite_end_coord': 'None', 'offset_start_index': '(0)', 'offset_end_index': '(0)'}), "(laygen, routename_tag=\n 'R_M5_', layer=laygen.layers['pin'][5], gridname=rg_m4m5_thick,\n netnames=['VDD', 'VSS'], direction='y', input_rails_rect=\n input_rails_rect, generate_pin=True, overwrite_start_coord=None,\n overwrite_end_coord=None, offset_start_index=0, offset_end_index=0)\n", (47151, 47445), True, 'import laygo.GridLayoutGeneratorHelper as laygenhelper\n'), ((47517, 47575), 'laygo.GridLayoutGenerator', 'laygo.GridLayoutGenerator', ([], {'config_file': '"""laygo_config.yaml"""'}), "(config_file='laygo_config.yaml')\n", (47542, 47575), False, 'import laygo\n'), ((48642, 48678), 'os.path.exists', 'os.path.exists', (["(workinglib + '.yaml')"], {}), "(workinglib + '.yaml')\n", (48656, 48678), False, 'import os\n'), ((47609, 47631), 'imp.find_module', 'imp.find_module', (['"""bag"""'], {}), "('bag')\n", (47624, 47631), False, 'import imp\n'), ((53369, 53391), 'imp.find_module', 'imp.find_module', (['"""bag"""'], {}), "('bag')\n", (53384, 53391), False, 'import imp\n'), ((53425, 53441), 'bag.BagProject', 'bag.BagProject', ([], {}), '()\n', (53439, 53441), False, 'import bag\n'), ((2400, 2416), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (2408, 2416), True, 'import numpy as np\n'), ((2489, 2505), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (2497, 2505), True, 'import numpy as np\n'), ((2577, 2593), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (2585, 2593), True, 'import numpy as np\n'), ((2668, 2684), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (2676, 2684), True, 'import numpy as np\n'), ((6127, 6172), 'numpy.vstack', 'np.vstack', (['(rvdd0_pin_xy[0], rvdd1_pin_xy[1])'], {}), '((rvdd0_pin_xy[0], rvdd1_pin_xy[1]))\n', (6136, 6172), True, 'import numpy as np\n'), ((6236, 6281), 'numpy.vstack', 'np.vstack', (['(rvss0_pin_xy[0], rvss1_pin_xy[1])'], {}), '((rvss0_pin_xy[0], rvss1_pin_xy[1]))\n', (6245, 6281), True, 'import numpy as np\n'), ((6808, 6831), 'numpy.array', 'np.array', (['[m_series, 1]'], {}), '([m_series, 1])\n', (6816, 6831), True, 'import numpy as np\n'), ((8360, 8377), 'numpy.array', 'np.array', (['[0, y0]'], {}), '([0, y0])\n', (8368, 8377), True, 'import numpy as np\n'), ((8383, 8401), 'numpy.array', 'np.array', (['[x0, y0]'], {}), '([x0, y0])\n', (8391, 8401), True, 'import numpy as np\n'), ((8483, 8499), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (8491, 8499), True, 'import numpy as np\n'), ((8505, 8522), 'numpy.array', 'np.array', (['[x0, 0]'], {}), '([x0, 0])\n', (8513, 8522), True, 'import numpy as np\n'), ((12571, 12587), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (12579, 12587), True, 'import numpy as np\n'), ((35170, 35186), 'numpy.array', 'np.array', (['[m, 1]'], {}), '([m, 1])\n', (35178, 35186), True, 'import numpy as np\n'), ((36490, 36507), 'numpy.array', 'np.array', (['[0, y0]'], {}), '([0, y0])\n', (36498, 36507), True, 'import numpy as np\n'), ((36513, 36531), 'numpy.array', 'np.array', (['[x0, y0]'], {}), '([x0, y0])\n', (36521, 36531), True, 'import numpy as np\n'), ((36613, 36629), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (36621, 36629), True, 'import numpy as np\n'), ((36635, 36652), 'numpy.array', 'np.array', (['[x0, 0]'], {}), '([x0, 0])\n', (36643, 36652), True, 'import numpy as np\n'), ((40295, 40311), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (40303, 40311), True, 'import numpy as np\n'), ((49958, 49975), 'yaml.load', 'yaml.load', (['stream'], {}), '(stream)\n', (49967, 49975), False, 'import yaml\n'), ((50048, 50065), 'yaml.load', 'yaml.load', (['stream'], {}), '(stream)\n', (50057, 50065), False, 'import yaml\n'), ((51165, 51181), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (51173, 51181), True, 'import numpy as np\n'), ((51630, 51646), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (51638, 51646), True, 'import numpy as np\n'), ((52148, 52164), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (52156, 52164), True, 'import numpy as np\n'), ((52624, 52640), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (52632, 52640), True, 'import numpy as np\n'), ((53157, 53173), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (53165, 53173), True, 'import numpy as np\n'), ((11520, 11536), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (11528, 11536), True, 'import numpy as np\n'), ((11538, 11558), 'numpy.array', 'np.array', (['[m_bnd, 1]'], {}), '([m_bnd, 1])\n', (11546, 11558), True, 'import numpy as np\n'), ((11641, 11657), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (11649, 11657), True, 'import numpy as np\n'), ((11956, 11972), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (11964, 11972), True, 'import numpy as np\n'), ((11974, 11994), 'numpy.array', 'np.array', (['[m_bnd, 1]'], {}), '([m_bnd, 1])\n', (11982, 11994), True, 'import numpy as np\n'), ((12074, 12090), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (12082, 12090), True, 'import numpy as np\n'), ((39244, 39260), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (39252, 39260), True, 'import numpy as np\n'), ((39262, 39282), 'numpy.array', 'np.array', (['[m_bnd, 1]'], {}), '([m_bnd, 1])\n', (39270, 39282), True, 'import numpy as np\n'), ((39365, 39381), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (39373, 39381), True, 'import numpy as np\n'), ((39680, 39696), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (39688, 39696), True, 'import numpy as np\n'), ((39698, 39718), 'numpy.array', 'np.array', (['[m_bnd, 1]'], {}), '([m_bnd, 1])\n', (39706, 39718), True, 'import numpy as np\n'), ((39798, 39814), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (39806, 39814), True, 'import numpy as np\n'), ((30115, 30139), 'numpy.array', 'np.array', (['[2 * i + 1, 0]'], {}), '([2 * i + 1, 0])\n', (30123, 30139), True, 'import numpy as np\n'), ((30141, 30165), 'numpy.array', 'np.array', (['[2 * i + 1, 0]'], {}), '([2 * i + 1, 0])\n', (30149, 30165), True, 'import numpy as np\n'), ((30264, 30280), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (30272, 30280), True, 'import numpy as np\n'), ((30372, 30388), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (30380, 30388), True, 'import numpy as np\n'), ((30465, 30489), 'numpy.array', 'np.array', (['[2 * i + 2, 0]'], {}), '([2 * i + 2, 0])\n', (30473, 30489), True, 'import numpy as np\n'), ((30491, 30515), 'numpy.array', 'np.array', (['[2 * i + 2, 0]'], {}), '([2 * i + 2, 0])\n', (30499, 30515), True, 'import numpy as np\n'), ((30614, 30630), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (30622, 30630), True, 'import numpy as np\n'), ((30722, 30738), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (30730, 30738), True, 'import numpy as np\n'), ((30806, 30830), 'numpy.array', 'np.array', (['[2 * i + 1, 0]'], {}), '([2 * i + 1, 0])\n', (30814, 30830), True, 'import numpy as np\n'), ((30923, 30947), 'numpy.array', 'np.array', (['[2 * i + 2, 0]'], {}), '([2 * i + 2, 0])\n', (30931, 30947), True, 'import numpy as np\n'), ((31350, 31374), 'numpy.array', 'np.array', (['[2 * i + 1, 0]'], {}), '([2 * i + 1, 0])\n', (31358, 31374), True, 'import numpy as np\n'), ((31376, 31400), 'numpy.array', 'np.array', (['[2 * i + 1, 0]'], {}), '([2 * i + 1, 0])\n', (31384, 31400), True, 'import numpy as np\n'), ((31499, 31515), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (31507, 31515), True, 'import numpy as np\n'), ((31607, 31623), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (31615, 31623), True, 'import numpy as np\n'), ((31700, 31724), 'numpy.array', 'np.array', (['[2 * i + 2, 0]'], {}), '([2 * i + 2, 0])\n', (31708, 31724), True, 'import numpy as np\n'), ((31726, 31750), 'numpy.array', 'np.array', (['[2 * i + 2, 0]'], {}), '([2 * i + 2, 0])\n', (31734, 31750), True, 'import numpy as np\n'), ((31849, 31865), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (31857, 31865), True, 'import numpy as np\n'), ((31957, 31973), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (31965, 31973), True, 'import numpy as np\n'), ((32041, 32065), 'numpy.array', 'np.array', (['[2 * i + 1, 0]'], {}), '([2 * i + 1, 0])\n', (32049, 32065), True, 'import numpy as np\n'), ((32158, 32182), 'numpy.array', 'np.array', (['[2 * i + 2, 0]'], {}), '([2 * i + 2, 0])\n', (32166, 32182), True, 'import numpy as np\n'), ((42945, 42969), 'numpy.array', 'np.array', (['[2 * i + 1, 0]'], {}), '([2 * i + 1, 0])\n', (42953, 42969), True, 'import numpy as np\n'), ((42971, 42995), 'numpy.array', 'np.array', (['[2 * i + 1, 0]'], {}), '([2 * i + 1, 0])\n', (42979, 42995), True, 'import numpy as np\n'), ((43094, 43110), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (43102, 43110), True, 'import numpy as np\n'), ((43202, 43218), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (43210, 43218), True, 'import numpy as np\n'), ((43295, 43319), 'numpy.array', 'np.array', (['[2 * i + 2, 0]'], {}), '([2 * i + 2, 0])\n', (43303, 43319), True, 'import numpy as np\n'), ((43321, 43345), 'numpy.array', 'np.array', (['[2 * i + 2, 0]'], {}), '([2 * i + 2, 0])\n', (43329, 43345), True, 'import numpy as np\n'), ((43444, 43460), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (43452, 43460), True, 'import numpy as np\n'), ((43552, 43568), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (43560, 43568), True, 'import numpy as np\n'), ((43636, 43660), 'numpy.array', 'np.array', (['[2 * i + 1, 0]'], {}), '([2 * i + 1, 0])\n', (43644, 43660), True, 'import numpy as np\n'), ((43753, 43777), 'numpy.array', 'np.array', (['[2 * i + 2, 0]'], {}), '([2 * i + 2, 0])\n', (43761, 43777), True, 'import numpy as np\n'), ((44180, 44204), 'numpy.array', 'np.array', (['[2 * i + 1, 0]'], {}), '([2 * i + 1, 0])\n', (44188, 44204), True, 'import numpy as np\n'), ((44206, 44230), 'numpy.array', 'np.array', (['[2 * i + 1, 0]'], {}), '([2 * i + 1, 0])\n', (44214, 44230), True, 'import numpy as np\n'), ((44329, 44345), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (44337, 44345), True, 'import numpy as np\n'), ((44437, 44453), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (44445, 44453), True, 'import numpy as np\n'), ((44530, 44554), 'numpy.array', 'np.array', (['[2 * i + 2, 0]'], {}), '([2 * i + 2, 0])\n', (44538, 44554), True, 'import numpy as np\n'), ((44556, 44580), 'numpy.array', 'np.array', (['[2 * i + 2, 0]'], {}), '([2 * i + 2, 0])\n', (44564, 44580), True, 'import numpy as np\n'), ((44679, 44695), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (44687, 44695), True, 'import numpy as np\n'), ((44787, 44803), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (44795, 44803), True, 'import numpy as np\n'), ((44871, 44895), 'numpy.array', 'np.array', (['[2 * i + 1, 0]'], {}), '([2 * i + 1, 0])\n', (44879, 44895), True, 'import numpy as np\n'), ((44988, 45012), 'numpy.array', 'np.array', (['[2 * i + 2, 0]'], {}), '([2 * i + 2, 0])\n', (44996, 45012), True, 'import numpy as np\n'), ((15782, 15798), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (15790, 15798), True, 'import numpy as np\n')]
import math import unittest from copy import copy import numpy as np from omicron.core import talib as ta class LibTest(unittest.TestCase): def test_barssince(self): condition = [False, True] self.assertEqual(0, ta.barssince(condition)) condition = [True, False] self.assertEqual(1, ta.barssince(condition)) condition = [True, True, False] self.assertEqual(1, ta.barssince(condition)) condition = [True, True, False, True] self.assertEqual(0, ta.barssince(condition)) condition = [True, True, False, False] self.assertEqual(2, ta.barssince(condition)) def test_cross(self): y1 = np.array([i + 5 for i in range(10)]) y2 = np.array([0.3 * i ** 2 for i in range(10)]) flag, index = ta.cross(y1, y2) self.assertEqual(-1, flag) self.assertEqual(6, index) flag, index = ta.cross(y2, y1) self.assertEqual(1, flag) self.assertEqual(6, index) # y1 == y2 when index == 4 y2 = np.array([0.5 * i ** 2 for i in range(10)]) flag, index = ta.cross(y1, y2) self.assertEqual(-1, flag) self.assertEqual(4, index) flag, index = ta.cross(y2, y1) self.assertEqual(1, flag) self.assertEqual(4, index) # no cross y2 = np.array([i + 3 for i in range(10)]) flag, index = ta.cross(y1, y2) self.assertEqual(0, flag) def test_vcross(self): f = np.array([3 * i ** 2 - 20 * i + 2 for i in range(10)]) g = np.array([i - 5 for i in range(10)]) flag, indices = ta.vcross(f, g) self.assertTrue(flag) self.assertTupleEqual((0, 6), indices) def test_moving_average(self): ts = [i for i in range(5)] ma = ta.moving_average(ts, 3) self.assertEqual(3, len(ma)) self.assertListEqual([1., 2., 3.], ma.tolist()) self.assertFalse(math.isnan(ma[0])) def test_mae(self): y = np.array([i for i in range(5)]) y_hat = copy(y) y_hat[4] = 0 self.assertEqual(0, ta.mean_absolute_error(y, y)) self.assertAlmostEquals(0.8, ta.mean_absolute_error(y, y_hat)) self.assertAlmostEquals(0.8, ta.mean_absolute_error(y_hat, y)) def test_relative_error(self): y = np.arange(5) y_hat = copy(y) y_hat[4] = 0 print(ta.relative_error(y, y_hat)) def test_normalize(self): # unit_vector X = [[1.0, -1.0, 2.0], [2.0, 0.0, 0.0], [0.0, 1.0, -1.0]] expected = [[0.4082, -0.4082, 0.8165], [1.0, 0.0, 0.0], [0.0, 0.7071, -0.7071]] X_hat = ta.normalize(X, scaler="unit_vector") np.testing.assert_array_almost_equal(expected, X_hat, decimal=4) # max_abs X = np.array([[1.0, -1.0, 2.0], [2.0, 0.0, 0.0], [0.0, 1.0, -1.0]]) expected = [[0.5, -1.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, -0.5]] X_hat = ta.normalize(X, scaler="maxabs") np.testing.assert_array_almost_equal(expected, X_hat, decimal=2) # min_max expected = [[0.5, 0.0, 1.0], [1.0, 0.5, 0.33333333], [0.0, 1.0, 0.0]] X_hat = ta.normalize(X, scaler="minmax") np.testing.assert_array_almost_equal(expected, X_hat, decimal=3) # standard X = [[0, 0], [0, 0], [1, 1], [1, 1]] expected = [[-1.0, -1.0], [-1.0, -1.0], [1.0, 1.0], [1.0, 1.0]] X_hat = ta.normalize(X, scaler="standard") np.testing.assert_array_almost_equal(expected, X_hat, decimal=3) def test_polyfit(self): ts = [i for i in range(5)] err, (a, b) = ta.polyfit(ts, deg=1) self.assertTrue(err < 1e-13) self.assertAlmostEquals(1, a) self.assertAlmostEqual(0, b, 7) ts = np.array([0.2 * i ** 2 + 2 * i + 3 for i in range(5)]) err, (a, b, c), (x, y) = ta.polyfit(ts) self.assertTrue(err < 1e-13) self.assertAlmostEquals(0.2, a) self.assertAlmostEquals(2, b) self.assertAlmostEquals(3, c) ts[2] = np.NaN err, _, _ = ta.polyfit(ts) self.assertTrue(err >= 1e9) def test_angle(self): ts = np.array([i for i in range(5)]) err, angle = ta.angle(ts) self.assertTrue(err < 0.01) self.assertAlmostEquals(0.707, angle, places=3) # degree: 45, rad: pi/2 ts = np.array([np.sqrt(3) / 3 * i for i in range(10)]) err, angle = ta.angle(ts) self.assertTrue(err < 0.01) self.assertAlmostEquals(0.866, angle, places=3) # degree: 30, rad: pi/6 ts = np.array([-np.sqrt(3) / 3 * i for i in range(7)]) err, angle = ta.angle(ts) self.assertTrue(err < 0.01) self.assertAlmostEquals(-0.866, angle, places=3) # degree: 150, rad: 5*pi/6 def test_inverse_vcross(self): f = np.array([-3 * i ** 2 + 20 * i - 10 for i in range(10)]) g = np.array([i - 5 for i in range(10)]) flag, indices = ta.inverse_vcross(f, g) self.assertTrue(flag) self.assertTupleEqual((0, 6), indices) def test_slope(self): ts = [i for i in range(5)] err, a = ta.slope(ts) self.assertTrue(err < 1e-13) self.assertAlmostEquals(1, a, places=7) def test_max_drawdown(self): ts = np.sin(np.arange(10) * np.pi / 10) dd, start, end = ta.max_drawdown(ts) self.assertAlmostEquals(-0.691, dd, places=3) self.assertListEqual([5, 9], [start, end])
[ "omicron.core.talib.vcross", "numpy.testing.assert_array_almost_equal", "omicron.core.talib.max_drawdown", "omicron.core.talib.angle", "omicron.core.talib.relative_error", "numpy.sqrt", "omicron.core.talib.cross", "omicron.core.talib.barssince", "numpy.array", "omicron.core.talib.slope", "omicro...
[((801, 817), 'omicron.core.talib.cross', 'ta.cross', (['y1', 'y2'], {}), '(y1, y2)\n', (809, 817), True, 'from omicron.core import talib as ta\n'), ((911, 927), 'omicron.core.talib.cross', 'ta.cross', (['y2', 'y1'], {}), '(y2, y1)\n', (919, 927), True, 'from omicron.core import talib as ta\n'), ((1112, 1128), 'omicron.core.talib.cross', 'ta.cross', (['y1', 'y2'], {}), '(y1, y2)\n', (1120, 1128), True, 'from omicron.core import talib as ta\n'), ((1222, 1238), 'omicron.core.talib.cross', 'ta.cross', (['y2', 'y1'], {}), '(y2, y1)\n', (1230, 1238), True, 'from omicron.core import talib as ta\n'), ((1400, 1416), 'omicron.core.talib.cross', 'ta.cross', (['y1', 'y2'], {}), '(y1, y2)\n', (1408, 1416), True, 'from omicron.core import talib as ta\n'), ((1620, 1635), 'omicron.core.talib.vcross', 'ta.vcross', (['f', 'g'], {}), '(f, g)\n', (1629, 1635), True, 'from omicron.core import talib as ta\n'), ((1797, 1821), 'omicron.core.talib.moving_average', 'ta.moving_average', (['ts', '(3)'], {}), '(ts, 3)\n', (1814, 1821), True, 'from omicron.core import talib as ta\n'), ((2044, 2051), 'copy.copy', 'copy', (['y'], {}), '(y)\n', (2048, 2051), False, 'from copy import copy\n'), ((2322, 2334), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (2331, 2334), True, 'import numpy as np\n'), ((2351, 2358), 'copy.copy', 'copy', (['y'], {}), '(y)\n', (2355, 2358), False, 'from copy import copy\n'), ((2649, 2686), 'omicron.core.talib.normalize', 'ta.normalize', (['X'], {'scaler': '"""unit_vector"""'}), "(X, scaler='unit_vector')\n", (2661, 2686), True, 'from omicron.core import talib as ta\n'), ((2695, 2759), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expected', 'X_hat'], {'decimal': '(4)'}), '(expected, X_hat, decimal=4)\n', (2731, 2759), True, 'import numpy as np\n'), ((2791, 2854), 'numpy.array', 'np.array', (['[[1.0, -1.0, 2.0], [2.0, 0.0, 0.0], [0.0, 1.0, -1.0]]'], {}), '([[1.0, -1.0, 2.0], [2.0, 0.0, 0.0], [0.0, 1.0, -1.0]])\n', (2799, 2854), True, 'import numpy as np\n'), ((2946, 2978), 'omicron.core.talib.normalize', 'ta.normalize', (['X'], {'scaler': '"""maxabs"""'}), "(X, scaler='maxabs')\n", (2958, 2978), True, 'from omicron.core import talib as ta\n'), ((2987, 3051), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expected', 'X_hat'], {'decimal': '(2)'}), '(expected, X_hat, decimal=2)\n', (3023, 3051), True, 'import numpy as np\n'), ((3165, 3197), 'omicron.core.talib.normalize', 'ta.normalize', (['X'], {'scaler': '"""minmax"""'}), "(X, scaler='minmax')\n", (3177, 3197), True, 'from omicron.core import talib as ta\n'), ((3206, 3270), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expected', 'X_hat'], {'decimal': '(3)'}), '(expected, X_hat, decimal=3)\n', (3242, 3270), True, 'import numpy as np\n'), ((3424, 3458), 'omicron.core.talib.normalize', 'ta.normalize', (['X'], {'scaler': '"""standard"""'}), "(X, scaler='standard')\n", (3436, 3458), True, 'from omicron.core import talib as ta\n'), ((3467, 3531), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expected', 'X_hat'], {'decimal': '(3)'}), '(expected, X_hat, decimal=3)\n', (3503, 3531), True, 'import numpy as np\n'), ((3619, 3640), 'omicron.core.talib.polyfit', 'ta.polyfit', (['ts'], {'deg': '(1)'}), '(ts, deg=1)\n', (3629, 3640), True, 'from omicron.core import talib as ta\n'), ((3858, 3872), 'omicron.core.talib.polyfit', 'ta.polyfit', (['ts'], {}), '(ts)\n', (3868, 3872), True, 'from omicron.core import talib as ta\n'), ((4070, 4084), 'omicron.core.talib.polyfit', 'ta.polyfit', (['ts'], {}), '(ts)\n', (4080, 4084), True, 'from omicron.core import talib as ta\n'), ((4214, 4226), 'omicron.core.talib.angle', 'ta.angle', (['ts'], {}), '(ts)\n', (4222, 4226), True, 'from omicron.core import talib as ta\n'), ((4430, 4442), 'omicron.core.talib.angle', 'ta.angle', (['ts'], {}), '(ts)\n', (4438, 4442), True, 'from omicron.core import talib as ta\n'), ((4645, 4657), 'omicron.core.talib.angle', 'ta.angle', (['ts'], {}), '(ts)\n', (4653, 4657), True, 'from omicron.core import talib as ta\n'), ((4958, 4981), 'omicron.core.talib.inverse_vcross', 'ta.inverse_vcross', (['f', 'g'], {}), '(f, g)\n', (4975, 4981), True, 'from omicron.core import talib as ta\n'), ((5139, 5151), 'omicron.core.talib.slope', 'ta.slope', (['ts'], {}), '(ts)\n', (5147, 5151), True, 'from omicron.core import talib as ta\n'), ((5344, 5363), 'omicron.core.talib.max_drawdown', 'ta.max_drawdown', (['ts'], {}), '(ts)\n', (5359, 5363), True, 'from omicron.core import talib as ta\n'), ((236, 259), 'omicron.core.talib.barssince', 'ta.barssince', (['condition'], {}), '(condition)\n', (248, 259), True, 'from omicron.core import talib as ta\n'), ((324, 347), 'omicron.core.talib.barssince', 'ta.barssince', (['condition'], {}), '(condition)\n', (336, 347), True, 'from omicron.core import talib as ta\n'), ((418, 441), 'omicron.core.talib.barssince', 'ta.barssince', (['condition'], {}), '(condition)\n', (430, 441), True, 'from omicron.core import talib as ta\n'), ((518, 541), 'omicron.core.talib.barssince', 'ta.barssince', (['condition'], {}), '(condition)\n', (530, 541), True, 'from omicron.core import talib as ta\n'), ((619, 642), 'omicron.core.talib.barssince', 'ta.barssince', (['condition'], {}), '(condition)\n', (631, 642), True, 'from omicron.core import talib as ta\n'), ((1940, 1957), 'math.isnan', 'math.isnan', (['ma[0]'], {}), '(ma[0])\n', (1950, 1957), False, 'import math\n'), ((2102, 2130), 'omicron.core.talib.mean_absolute_error', 'ta.mean_absolute_error', (['y', 'y'], {}), '(y, y)\n', (2124, 2130), True, 'from omicron.core import talib as ta\n'), ((2169, 2201), 'omicron.core.talib.mean_absolute_error', 'ta.mean_absolute_error', (['y', 'y_hat'], {}), '(y, y_hat)\n', (2191, 2201), True, 'from omicron.core import talib as ta\n'), ((2240, 2272), 'omicron.core.talib.mean_absolute_error', 'ta.mean_absolute_error', (['y_hat', 'y'], {}), '(y_hat, y)\n', (2262, 2272), True, 'from omicron.core import talib as ta\n'), ((2395, 2422), 'omicron.core.talib.relative_error', 'ta.relative_error', (['y', 'y_hat'], {}), '(y, y_hat)\n', (2412, 2422), True, 'from omicron.core import talib as ta\n'), ((5291, 5304), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (5300, 5304), True, 'import numpy as np\n'), ((4369, 4379), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (4376, 4379), True, 'import numpy as np\n'), ((4585, 4595), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (4592, 4595), True, 'import numpy as np\n')]
import matplotlib.pyplot as plt import numpy as np with open("RangeN.dat","r") as temp_file: data = temp_file.readlines() NN = [] ts = [] tbh = [] for line in data: NN.append(int(line.split()[0])) ts.append(float(line.split()[1])) tbh.append(float(line.split()[2])) NNl = np.array([np.log(itm) for itm in NN]) tsl = np.array([np.log(itm) for itm in ts]) tbhl = np.array([np.log(itm) for itm in tbh]) A = np.vstack([NNl, np.ones(len(NNl))]).T a_s, b_s = np.linalg.lstsq(A, tsl, rcond=None)[0] a_b, b_b = np.linalg.lstsq(A, tbhl, rcond=None)[0] print("For standard: alpha={:.8f} and beta={:.6f}".format(np.exp(b_s),a_s)) print("For Barnes-Hut: alpha={:.8f} and beta={:.6f}".format(np.exp(b_b),a_b)) xx= np.linspace(1000,15848,200) yy_s = [np.exp(b_s)*np.power(itm, a_s) for itm in xx] yy_b = [np.exp(b_b)*np.power(itm, a_b) for itm in xx] fig, ax = plt.subplots(1, figsize=(7,5)) ax.loglog(NN, ts, 'x', color = '#2f20bc', alpha = 1.0, label = r'Initial Method') ax.loglog(xx, yy_s, '-', color = '#2f20bc', alpha=0.7, label=r'$\alpha N^\beta$ - $\alpha\simeq 1.0e-6$, $\beta\simeq 1.823$') ax.loglog(NN, tbh, 'x', color = '#d66f02', alpha =1.0, label = r'Barnes-Hut Method') ax.loglog(xx, yy_b, '-', color = '#d66f02', alpha=0.7, label=r'$\alpha N^\beta$ - $\alpha\simeq 5.2e-3$, $\beta\simeq 0.714$') ax.set_xlabel(r'Number of particles $N$', fontsize = 13) ax.set_ylabel(r'Execution Time (s)', fontsize = 13) ax.set_xlim([1000,15868]) ax.set_title("Comparison initial method and Barnes-Hut\nimplementation "+r'with $J=0.1$, $K=1$, $\theta=0.5$', fontsize=14, fontweight='bold') plt.legend(loc='upper left', frameon=True, fancybox=True, facecolor='w', fontsize=11) plt.grid(which='both') plt.savefig("ExecTimeN.png",dpi=300) plt.show()
[ "matplotlib.pyplot.grid", "matplotlib.pyplot.savefig", "numpy.power", "numpy.log", "numpy.exp", "numpy.linspace", "numpy.linalg.lstsq", "matplotlib.pyplot.subplots", "matplotlib.pyplot.legend", "matplotlib.pyplot.show" ]
[((720, 749), 'numpy.linspace', 'np.linspace', (['(1000)', '(15848)', '(200)'], {}), '(1000, 15848, 200)\n', (731, 749), True, 'import numpy as np\n'), ((868, 899), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(7, 5)'}), '(1, figsize=(7, 5))\n', (880, 899), True, 'import matplotlib.pyplot as plt\n'), ((1598, 1687), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'frameon': '(True)', 'fancybox': '(True)', 'facecolor': '"""w"""', 'fontsize': '(11)'}), "(loc='upper left', frameon=True, fancybox=True, facecolor='w',\n fontsize=11)\n", (1608, 1687), True, 'import matplotlib.pyplot as plt\n'), ((1684, 1706), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""both"""'}), "(which='both')\n", (1692, 1706), True, 'import matplotlib.pyplot as plt\n'), ((1707, 1744), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ExecTimeN.png"""'], {'dpi': '(300)'}), "('ExecTimeN.png', dpi=300)\n", (1718, 1744), True, 'import matplotlib.pyplot as plt\n'), ((1744, 1754), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1752, 1754), True, 'import matplotlib.pyplot as plt\n'), ((472, 507), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'tsl'], {'rcond': 'None'}), '(A, tsl, rcond=None)\n', (487, 507), True, 'import numpy as np\n'), ((522, 558), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'tbhl'], {'rcond': 'None'}), '(A, tbhl, rcond=None)\n', (537, 558), True, 'import numpy as np\n'), ((301, 312), 'numpy.log', 'np.log', (['itm'], {}), '(itm)\n', (307, 312), True, 'import numpy as np\n'), ((345, 356), 'numpy.log', 'np.log', (['itm'], {}), '(itm)\n', (351, 356), True, 'import numpy as np\n'), ((390, 401), 'numpy.log', 'np.log', (['itm'], {}), '(itm)\n', (396, 401), True, 'import numpy as np\n'), ((620, 631), 'numpy.exp', 'np.exp', (['b_s'], {}), '(b_s)\n', (626, 631), True, 'import numpy as np\n'), ((698, 709), 'numpy.exp', 'np.exp', (['b_b'], {}), '(b_b)\n', (704, 709), True, 'import numpy as np\n'), ((757, 768), 'numpy.exp', 'np.exp', (['b_s'], {}), '(b_s)\n', (763, 768), True, 'import numpy as np\n'), ((769, 787), 'numpy.power', 'np.power', (['itm', 'a_s'], {}), '(itm, a_s)\n', (777, 787), True, 'import numpy as np\n'), ((811, 822), 'numpy.exp', 'np.exp', (['b_b'], {}), '(b_b)\n', (817, 822), True, 'import numpy as np\n'), ((823, 841), 'numpy.power', 'np.power', (['itm', 'a_b'], {}), '(itm, a_b)\n', (831, 841), True, 'import numpy as np\n')]
import timeit, functools def dist_test(): pp_sketchlib.queryDatabase("listeria", "listeria", names, names, kmers, 1) setup = """ import sys sys.path.insert(0, "build/lib.macosx-10.9-x86_64-3.7") import pp_sketchlib """ #import numpy as np # #from __main__ import dist_test # #kmers = np.arange(15, 30, 3) # #names = [] #sequences = [] #with open("rfiles.txt", 'r') as refFile: # for refLine in refFile: # refFields = refLine.rstrip().split("\t") # names.append(refFields[0]) # sequences.append(list(refFields[1:])) #""" if __name__ == '__main__': import numpy as np import sys sys.path.insert(0, "build/lib.macosx-10.9-x86_64-3.7") import pp_sketchlib #from __main__ import dist_test kmers = np.arange(15, 30, 3) names = [] sequences = [] with open("rfiles.txt", 'r') as refFile: for refLine in refFile: refFields = refLine.rstrip().split("\t") names.append(refFields[0]) sequences.append(list(refFields[1:])) t = timeit.Timer(functools.partial(pp_sketchlib.queryDatabase, "listeria", "listeria", names, names, kmers, 1), setup=setup) print(t.timeit(100))
[ "sys.path.insert", "functools.partial", "pp_sketchlib.queryDatabase", "numpy.arange" ]
[((47, 121), 'pp_sketchlib.queryDatabase', 'pp_sketchlib.queryDatabase', (['"""listeria"""', '"""listeria"""', 'names', 'names', 'kmers', '(1)'], {}), "('listeria', 'listeria', names, names, kmers, 1)\n", (73, 121), False, 'import pp_sketchlib\n'), ((624, 678), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""build/lib.macosx-10.9-x86_64-3.7"""'], {}), "(0, 'build/lib.macosx-10.9-x86_64-3.7')\n", (639, 678), False, 'import sys\n'), ((753, 773), 'numpy.arange', 'np.arange', (['(15)', '(30)', '(3)'], {}), '(15, 30, 3)\n', (762, 773), True, 'import numpy as np\n'), ((1047, 1144), 'functools.partial', 'functools.partial', (['pp_sketchlib.queryDatabase', '"""listeria"""', '"""listeria"""', 'names', 'names', 'kmers', '(1)'], {}), "(pp_sketchlib.queryDatabase, 'listeria', 'listeria', names,\n names, kmers, 1)\n", (1064, 1144), False, 'import timeit, functools\n')]
######################################################################################## # Compare two systems using bootstrap resampling # # adapted from https://github.com/neubig/util-scripts/blob/master/paired-bootstrap.py # # # # See, e.g. the following paper for references # # # # Statistical Significance Tests for Machine Translation Evaluation # # <NAME> # # http://www.aclweb.org/anthology/W04-3250 # # # ######################################################################################## import numpy as np def eval_with_paired_bootstrap(ref, outs, src, scorer, compare_directions=[(0, 1)], num_samples=1000, sample_ratio=0.5, cache_stats=None): """ Evaluate with paired boostrap. This compares several systems, performing a signifiance tests with paired bootstrap resampling to compare the accuracy of the specified systems. Args: ref: The correct labels outs: The output of systems src: The source corpus scorer: The scorer compare_directions: A string specifying which two systems to compare num_samples: The number of bootstrap samples to take sample_ratio: The ratio of samples to take every time cache_stats: The precomputed statistics Returns: A tuple containing the win ratios, statistics for systems """ sys_scores = [[] for _ in outs] wins = [[0, 0, 0] for _ in compare_directions] if compare_directions is not None else None n = len(ref) ids = list(range(n)) if cache_stats is None: cache_stats = [scorer.cache_stats(ref, out, src=src) for out in outs] sample_size = int(n*sample_ratio) for _ in range(num_samples): # Subsample the gold and system outputs (with replacement) reduced_ids = np.random.choice(ids, size=sample_size, replace=True) # Calculate accuracy on the reduced sample and save stats if cache_stats[0]: sys_score, _ = zip(*[scorer.score_cached_corpus(reduced_ids, cache_stat) for cache_stat in cache_stats]) else: reduced_ref = [ref[i] for i in reduced_ids] reduced_outs = [[out[i] for i in reduced_ids] for out in outs] reduced_src = [src[i] for i in reduced_ids] sys_score, _ = zip(*[scorer.score_corpus(reduced_ref, reduced_out, reduced_src) for reduced_out in reduced_outs]) if wins is not None: for i, compare_direction in enumerate(compare_directions): left, right = compare_direction if sys_score[left] > sys_score[right]: wins[i][0] += 1 if sys_score[left] < sys_score[right]: wins[i][1] += 1 else: wins[i][2] += 1 for i in range(len(outs)): sys_scores[i].append(sys_score[i]) # Print win stats wins = [[x/float(num_samples) for x in win] for win in wins] if wins is not None else None # Print system stats sys_stats = [] for i in range(len(outs)): sys_scores[i].sort() sys_stats.append({ 'mean':np.mean(sys_scores[i]), 'median':np.median(sys_scores[i]), 'lower_bound':sys_scores[i][int(num_samples * 0.025)], 'upper_bound':sys_scores[i][int(num_samples * 0.975)] }) return wins, sys_stats
[ "numpy.random.choice", "numpy.mean", "numpy.median" ]
[((2291, 2344), 'numpy.random.choice', 'np.random.choice', (['ids'], {'size': 'sample_size', 'replace': '(True)'}), '(ids, size=sample_size, replace=True)\n', (2307, 2344), True, 'import numpy as np\n'), ((3482, 3504), 'numpy.mean', 'np.mean', (['sys_scores[i]'], {}), '(sys_scores[i])\n', (3489, 3504), True, 'import numpy as np\n'), ((3521, 3545), 'numpy.median', 'np.median', (['sys_scores[i]'], {}), '(sys_scores[i])\n', (3530, 3545), True, 'import numpy as np\n')]
# """Pytorch Dataset object that loads 27x27 patches that contain single cells.""" import os import random import scipy.io import numpy as np from PIL import Image import torch import torch.utils.data as data_utils import torchvision.transforms as transforms from torch.nn.functional import pad import dataloaders.additional_transforms as AT class ColonCancerBagsCross(data_utils.Dataset): def __init__(self, path, train_val_idxs=None, test_idxs=None, train=True, shuffle_bag=False, data_augmentation=False, padding=True, base_att=False): self.path = path self.train_val_idxs = train_val_idxs self.test_idxs = test_idxs self.train = train self.shuffle_bag = shuffle_bag self.data_augmentation = data_augmentation self.padding = padding self.base_att = base_att if self.base_att: # Trace # print('Normalization enabled on the Colon Cancer dataset.') self.data_augmentation_img_transform = transforms.Compose( [ AT.RandomHEStain(), AT.HistoNormalize(), AT.RandomRotate(), AT.RandomVerticalFlip(), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) self.normalize_to_tensor_transform = transforms.Compose( [ AT.HistoNormalize(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) else: # Trace # print('Normalization disabled on the Colon Cancer dataset.') self.data_augmentation_img_transform = transforms.Compose( [ AT.RandomHEStain(), AT.HistoNormalize(), AT.RandomRotate(), AT.RandomVerticalFlip(), transforms.RandomHorizontalFlip(), transforms.ToTensor(), ]) self.normalize_to_tensor_transform = transforms.Compose( [ AT.HistoNormalize(), transforms.ToTensor(), ]) self.dir_list_train, self.dir_list_test = self.split_dir_list( self.path, self.train_val_idxs, self.test_idxs) if self.train: self.bag_list_train, self.labels_list_train = self.create_bags(self.dir_list_train) else: self.bag_list_test, self.labels_list_test = self.create_bags(self.dir_list_test) @staticmethod def split_dir_list(path, train_val_idxs, test_idxs): dirs = [x[0] for x in os.walk(path)] dirs.pop(0) dirs.sort() dir_list_train = [dirs[i] for i in train_val_idxs] dir_list_test = [dirs[i] for i in test_idxs] return dir_list_train, dir_list_test def create_bags(self, dir_list): bag_list = [] labels_list = [] for dir in dir_list: # Get image name img_name = dir.split('/')[-1] # bmp to pillow img_dir = dir + '/' + img_name + '.bmp' with open(img_dir, 'rb') as f: with Image.open(f) as img: img = img.convert('RGB') # crop malignant cells dir_epithelial = dir + '/' + img_name + '_epithelial.mat' with open(dir_epithelial, 'rb') as f: mat_epithelial = scipy.io.loadmat(f) cropped_cells_epithelial = [] for (x, y) in mat_epithelial['detection']: x = np.round(x) y = np.round(y) if self.data_augmentation: x = x + np.round(np.random.normal(0, 3, 1)) y = y + np.round(np.random.normal(0, 3, 1)) # If it is a numpy array if type(x) == np.ndarray: x = x[0] if x < 13: x_start = 0 x_end = 27 elif x > 500 - 13: x_start = 500 - 27 x_end = 500 else: x_start = x - 13 x_end = x + 14 # If it is a numpy array if type(y) == np.ndarray: y = y[0] if y < 13: y_start = 0 y_end = 27 elif y > 500 - 13: y_start = 500 - 27 y_end = 500 else: y_start = y - 13 y_end = y + 14 cropped_cells_epithelial.append(img.crop((x_start, y_start, x_end, y_end))) # crop all other cells dir_inflammatory = dir + '/' + img_name + '_inflammatory.mat' dir_fibroblast = dir + '/' + img_name + '_fibroblast.mat' dir_others = dir + '/' + img_name + '_others.mat' with open(dir_inflammatory, 'rb') as f: mat_inflammatory = scipy.io.loadmat(f) with open(dir_fibroblast, 'rb') as f: mat_fibroblast = scipy.io.loadmat(f) with open(dir_others, 'rb') as f: mat_others = scipy.io.loadmat(f) all_coordinates = np.concatenate( (mat_inflammatory['detection'], mat_fibroblast['detection'], mat_others['detection']), axis=0) cropped_cells_others = [] for (x, y) in all_coordinates: x = np.round(x) y = np.round(y) if self.data_augmentation: x = x + np.round(np.random.normal(0, 3, 1)) y = y + np.round(np.random.normal(0, 3, 1)) # If it is a numpy array if type(x) == np.ndarray: x = x[0] if x < 13: x_start = 0 x_end = 27 elif x > 500 - 13: x_start = 500 - 27 x_end = 500 else: x_start = x - 13 x_end = x + 14 # If it is a numpy array if type(y) == np.ndarray: y = y[0] if y < 13: y_start = 0 y_end = 27 elif y > 500 - 13: y_start = 500 - 27 y_end = 500 else: y_start = y - 13 y_end = y + 14 cropped_cells_others.append(img.crop((x_start, y_start, x_end, y_end))) # generate bag bag = cropped_cells_epithelial + cropped_cells_others # store single cell labels labels = np.concatenate((np.ones(len(cropped_cells_epithelial)), np.zeros(len(cropped_cells_others))), axis=0) # shuffle if self.shuffle_bag: zip_bag_labels = list(zip(bag, labels)) random.shuffle(zip_bag_labels) bag, labels = zip(*zip_bag_labels) # append every bag two times if training if self.train: for _ in [0, 1]: bag_list.append(bag) labels_list.append(labels) else: bag_list.append(bag) labels_list.append(labels) # bag_list.append(bag) # labels_list.append(labels) return bag_list, labels_list def transform_and_data_augmentation(self, bag): if self.data_augmentation: img_transform = self.data_augmentation_img_transform else: img_transform = self.normalize_to_tensor_transform bag_tensors = [] for img in bag: # If padding is True if self.padding: bag_tensors.append(pad(img_transform(img), (0, 1, 0, 1), mode='constant')) # Otherwise else: bag_tensors.append(img_transform(img)) return torch.stack(bag_tensors) def __len__(self): if self.train: return len(self.labels_list_train) else: return len(self.labels_list_test) def __getitem__(self, index): if self.train: bag = self.bag_list_train[index] label = [max(self.labels_list_train[index]), self.labels_list_train[index]] else: bag = self.bag_list_test[index] label = [max(self.labels_list_test[index]), self.labels_list_test[index]] return self.transform_and_data_augmentation(bag), label
[ "numpy.random.normal", "dataloaders.additional_transforms.RandomHEStain", "dataloaders.additional_transforms.RandomVerticalFlip", "dataloaders.additional_transforms.HistoNormalize", "random.shuffle", "dataloaders.additional_transforms.RandomRotate", "PIL.Image.open", "torch.stack", "os.walk", "tor...
[((8439, 8463), 'torch.stack', 'torch.stack', (['bag_tensors'], {}), '(bag_tensors)\n', (8450, 8463), False, 'import torch\n'), ((5635, 5748), 'numpy.concatenate', 'np.concatenate', (["(mat_inflammatory['detection'], mat_fibroblast['detection'], mat_others[\n 'detection'])"], {'axis': '(0)'}), "((mat_inflammatory['detection'], mat_fibroblast['detection'],\n mat_others['detection']), axis=0)\n", (5649, 5748), True, 'import numpy as np\n'), ((3003, 3016), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (3010, 3016), False, 'import os\n'), ((3941, 3952), 'numpy.round', 'np.round', (['x'], {}), '(x)\n', (3949, 3952), True, 'import numpy as np\n'), ((3973, 3984), 'numpy.round', 'np.round', (['y'], {}), '(y)\n', (3981, 3984), True, 'import numpy as np\n'), ((5864, 5875), 'numpy.round', 'np.round', (['x'], {}), '(x)\n', (5872, 5875), True, 'import numpy as np\n'), ((5896, 5907), 'numpy.round', 'np.round', (['y'], {}), '(y)\n', (5904, 5907), True, 'import numpy as np\n'), ((7397, 7427), 'random.shuffle', 'random.shuffle', (['zip_bag_labels'], {}), '(zip_bag_labels)\n', (7411, 7427), False, 'import random\n'), ((1185, 1203), 'dataloaders.additional_transforms.RandomHEStain', 'AT.RandomHEStain', ([], {}), '()\n', (1201, 1203), True, 'import dataloaders.additional_transforms as AT\n'), ((1225, 1244), 'dataloaders.additional_transforms.HistoNormalize', 'AT.HistoNormalize', ([], {}), '()\n', (1242, 1244), True, 'import dataloaders.additional_transforms as AT\n'), ((1266, 1283), 'dataloaders.additional_transforms.RandomRotate', 'AT.RandomRotate', ([], {}), '()\n', (1281, 1283), True, 'import dataloaders.additional_transforms as AT\n'), ((1305, 1328), 'dataloaders.additional_transforms.RandomVerticalFlip', 'AT.RandomVerticalFlip', ([], {}), '()\n', (1326, 1328), True, 'import dataloaders.additional_transforms as AT\n'), ((1350, 1383), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1381, 1383), True, 'import torchvision.transforms as transforms\n'), ((1405, 1426), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1424, 1426), True, 'import torchvision.transforms as transforms\n'), ((1448, 1502), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (1468, 1502), True, 'import torchvision.transforms as transforms\n'), ((1670, 1689), 'dataloaders.additional_transforms.HistoNormalize', 'AT.HistoNormalize', ([], {}), '()\n', (1687, 1689), True, 'import dataloaders.additional_transforms as AT\n'), ((1711, 1732), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1730, 1732), True, 'import torchvision.transforms as transforms\n'), ((1754, 1808), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (1774, 1808), True, 'import torchvision.transforms as transforms\n'), ((2087, 2105), 'dataloaders.additional_transforms.RandomHEStain', 'AT.RandomHEStain', ([], {}), '()\n', (2103, 2105), True, 'import dataloaders.additional_transforms as AT\n'), ((2127, 2146), 'dataloaders.additional_transforms.HistoNormalize', 'AT.HistoNormalize', ([], {}), '()\n', (2144, 2146), True, 'import dataloaders.additional_transforms as AT\n'), ((2168, 2185), 'dataloaders.additional_transforms.RandomRotate', 'AT.RandomRotate', ([], {}), '()\n', (2183, 2185), True, 'import dataloaders.additional_transforms as AT\n'), ((2207, 2230), 'dataloaders.additional_transforms.RandomVerticalFlip', 'AT.RandomVerticalFlip', ([], {}), '()\n', (2228, 2230), True, 'import dataloaders.additional_transforms as AT\n'), ((2252, 2285), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (2283, 2285), True, 'import torchvision.transforms as transforms\n'), ((2307, 2328), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2326, 2328), True, 'import torchvision.transforms as transforms\n'), ((2456, 2475), 'dataloaders.additional_transforms.HistoNormalize', 'AT.HistoNormalize', ([], {}), '()\n', (2473, 2475), True, 'import dataloaders.additional_transforms as AT\n'), ((2497, 2518), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2516, 2518), True, 'import torchvision.transforms as transforms\n'), ((3547, 3560), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (3557, 3560), False, 'from PIL import Image\n'), ((4066, 4091), 'numpy.random.normal', 'np.random.normal', (['(0)', '(3)', '(1)'], {}), '(0, 3, 1)\n', (4082, 4091), True, 'import numpy as np\n'), ((4130, 4155), 'numpy.random.normal', 'np.random.normal', (['(0)', '(3)', '(1)'], {}), '(0, 3, 1)\n', (4146, 4155), True, 'import numpy as np\n'), ((5989, 6014), 'numpy.random.normal', 'np.random.normal', (['(0)', '(3)', '(1)'], {}), '(0, 3, 1)\n', (6005, 6014), True, 'import numpy as np\n'), ((6053, 6078), 'numpy.random.normal', 'np.random.normal', (['(0)', '(3)', '(1)'], {}), '(0, 3, 1)\n', (6069, 6078), True, 'import numpy as np\n')]
# =============================================================================== # Copyright 2013 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= from __future__ import absolute_import from traits.api import Array, Event, Range, Bool from traitsui.api import UItem, Item, VGroup # ============= standard library imports ======================== from numpy import asarray, array, ndarray from PIL import Image # ============= local library imports ========================== from pychron.viewable import Viewable from pychron.core.ui.image_editor import ImageEditor class FrameImage(Viewable): source_frame = Array refresh_needed = Event alpha = Range(0.0, 1.0) overlays = None alpha_enabled = Bool(True) def load(self, frame, swap_rb=False): self.source_frame = array(frame) self.refresh_needed = True def set_frame(self, frame): if not isinstance(frame, ndarray): frame = asarray(frame) self.overlays = None self.source_frame = frame self.refresh_needed = True def overlay(self, frame, alpha): im0 = Image.fromarray(self.source_frame) im1 = Image.fromarray(frame) self.overlays = (im0, im1) o = self.alpha self.alpha = alpha if alpha == o: self._alpha_changed() def _overlay(self, im0, im1, alpha): try: arr = Image.blend(im1, im0, alpha) self.source_frame = asarray(arr) self.refresh_needed = True except ValueError: pass def _alpha_changed(self): if self.overlays: im0, im1 = self.overlays self._overlay(im0, im1, self.alpha) class StandAloneImage(FrameImage): def traits_view(self): img = UItem('source_frame', editor=ImageEditor(refresh='refresh')) if self.alpha_enabled: vv = VGroup(Item('alpha'), img) else: vv = img v = self.view_factory(VGroup(vv)) return v # ============= EOF =============================================
[ "traitsui.api.VGroup", "PIL.Image.fromarray", "PIL.Image.blend", "numpy.asarray", "numpy.array", "traitsui.api.Item", "traits.api.Range", "pychron.core.ui.image_editor.ImageEditor", "traits.api.Bool" ]
[((1313, 1328), 'traits.api.Range', 'Range', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1318, 1328), False, 'from traits.api import Array, Event, Range, Bool\n'), ((1369, 1379), 'traits.api.Bool', 'Bool', (['(True)'], {}), '(True)\n', (1373, 1379), False, 'from traits.api import Array, Event, Range, Bool\n'), ((1451, 1463), 'numpy.array', 'array', (['frame'], {}), '(frame)\n', (1456, 1463), False, 'from numpy import asarray, array, ndarray\n'), ((1761, 1795), 'PIL.Image.fromarray', 'Image.fromarray', (['self.source_frame'], {}), '(self.source_frame)\n', (1776, 1795), False, 'from PIL import Image\n'), ((1810, 1832), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), '(frame)\n', (1825, 1832), False, 'from PIL import Image\n'), ((1595, 1609), 'numpy.asarray', 'asarray', (['frame'], {}), '(frame)\n', (1602, 1609), False, 'from numpy import asarray, array, ndarray\n'), ((2050, 2078), 'PIL.Image.blend', 'Image.blend', (['im1', 'im0', 'alpha'], {}), '(im1, im0, alpha)\n', (2061, 2078), False, 'from PIL import Image\n'), ((2111, 2123), 'numpy.asarray', 'asarray', (['arr'], {}), '(arr)\n', (2118, 2123), False, 'from numpy import asarray, array, ndarray\n'), ((2628, 2638), 'traitsui.api.VGroup', 'VGroup', (['vv'], {}), '(vv)\n', (2634, 2638), False, 'from traitsui.api import UItem, Item, VGroup\n'), ((2456, 2486), 'pychron.core.ui.image_editor.ImageEditor', 'ImageEditor', ([], {'refresh': '"""refresh"""'}), "(refresh='refresh')\n", (2467, 2486), False, 'from pychron.core.ui.image_editor import ImageEditor\n'), ((2543, 2556), 'traitsui.api.Item', 'Item', (['"""alpha"""'], {}), "('alpha')\n", (2547, 2556), False, 'from traitsui.api import UItem, Item, VGroup\n')]
''' Generate instance groundtruth .txt files (for evaluation) ''' import numpy as np import glob import torch import os semantic_label_idxs = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39] semantic_label_names = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture'] if __name__ == '__main__': split = 'val' files = sorted(glob.glob('{}/scene*_inst_nostuff.pth'.format(split))) rooms = [torch.load(i) for i in files] if not os.path.exists(split + '_gt'): os.mkdir(split + '_gt') for i in range(len(rooms)): xyz, rgb, label, instance_label = rooms[i] # label 0~19 -100; instance_label 0~instance_num-1 -100 scene_name = files[i].split('/')[-1][:12] print('{}/{} {}'.format(i + 1, len(rooms), scene_name)) instance_label_new = np.zeros(instance_label.shape, dtype=np.int32) # 0 for unannotated, xx00y: x for semantic_label, y for inst_id (1~instance_num) instance_num = int(instance_label.max()) + 1 for inst_id in range(instance_num): instance_mask = np.where(instance_label == inst_id)[0] sem_id = int(label[instance_mask[0]]) if(sem_id == -100): sem_id = 0 semantic_label = semantic_label_idxs[sem_id] instance_label_new[instance_mask] = semantic_label * 1000 + inst_id + 1 np.savetxt(os.path.join(split + '_gt', scene_name + '.txt'), instance_label_new, fmt='%d')
[ "os.path.exists", "numpy.where", "torch.load", "os.path.join", "numpy.zeros", "os.mkdir" ]
[((588, 601), 'torch.load', 'torch.load', (['i'], {}), '(i)\n', (598, 601), False, 'import torch\n'), ((630, 659), 'os.path.exists', 'os.path.exists', (["(split + '_gt')"], {}), "(split + '_gt')\n", (644, 659), False, 'import os\n'), ((669, 692), 'os.mkdir', 'os.mkdir', (["(split + '_gt')"], {}), "(split + '_gt')\n", (677, 692), False, 'import os\n'), ((980, 1026), 'numpy.zeros', 'np.zeros', (['instance_label.shape'], {'dtype': 'np.int32'}), '(instance_label.shape, dtype=np.int32)\n', (988, 1026), True, 'import numpy as np\n'), ((1528, 1576), 'os.path.join', 'os.path.join', (["(split + '_gt')", "(scene_name + '.txt')"], {}), "(split + '_gt', scene_name + '.txt')\n", (1540, 1576), False, 'import os\n'), ((1235, 1270), 'numpy.where', 'np.where', (['(instance_label == inst_id)'], {}), '(instance_label == inst_id)\n', (1243, 1270), True, 'import numpy as np\n')]
import numpy as np import matplotlib.pyplot as plt class ToySquares: """A set of squares that grow and shift to the right over time Parameters ---------- canvas_size : int size of the canvas on which the toy squares fall, in pixels n_objects : int number of toy squares to spawn """ def __init__(self, canvas_size, n_objects): self.canvas_size = canvas_size self.n_objects = n_objects self.initialize_positions() self.initialize_sizes() self.set_growth_rates() self.rightward_shift = 2 # pixels def initialize_positions(self): """Initialize the initial positions of the squares, with respect to the lower left of the square """ # Initialize x on the left half, so it doesn't fall out of bounds too quickly as it moves rightward across the canvas self.x_pos = (np.random.rand(self.n_objects)*self.canvas_size*0.5).astype(int) self.y_pos = (np.random.rand(self.n_objects)*self.canvas_size).astype(int) self.in_the_canvas = np.ones(self.n_objects).astype(bool) def initialize_sizes(self): """Initialize the initial sizes of the squares, as the number of pixels per edge """ allowed_sizes = np.arange(1, 5) prob = np.ones(len(allowed_sizes)) prob /= np.sum(prob) sizes = np.random.choice(allowed_sizes, size=self.n_objects, p=prob, replace=True) self.x_sizes = sizes self.y_sizes = sizes def set_growth_rates(self): """Randomly set the size increase that is applied every time step """ allowed_growth_rates = np.arange(1, 3) prob = np.ones(len(allowed_growth_rates)) prob /= np.sum(prob) self.growth_rates = np.random.choice(allowed_growth_rates, size=self.n_objects, p=prob, replace=True) def increment_time_step(self): """Advance one time step, updating object properties accordingly """ self.grow() self.shift_right() self.update_in_canvas() def grow(self): """Grow the sizes of the objects by their respective growth rates """ self.x_sizes += self.growth_rates self.y_sizes += self.growth_rates def shift_right(self): """Shift the objects to the right by two pixels """ self.x_pos += self.rightward_shift def update_in_canvas(self): """Evaluate whether the objects fall within the canvas and, if they get truncated by the canvas bounds, what the effective sizes are """ self.x_sizes = np.minimum(self.x_sizes, self.canvas_size - self.x_pos) self.y_sizes = np.minimum(self.x_sizes, self.canvas_size - self.y_pos) x_in_canvas = (self.x_sizes > 0.0) y_in_canvas = (self.y_sizes > 0.0) self.in_canvas = np.logical_and(x_in_canvas, y_in_canvas) def export_image(self, img_path): """Export the current object states to disk as an npy file Paramters --------- img_path : str or os.path object path of image file to be saved """ canvas = np.zeros((self.canvas_size, self.canvas_size)) for obj in range(self.n_objects): canvas[self.x_pos[obj]:self.x_pos[obj] + self.x_sizes[obj], self.y_pos[obj]:self.y_pos[obj] + self.y_sizes[obj]] = 1.0 np.save(img_path, canvas.T) # transpose b/c numpy indexing conventions if __name__ == '__main__': toy_squares = ToySquares(canvas_size=224, n_objects=3) toy_squares.increment_time_step()
[ "numpy.minimum", "numpy.logical_and", "numpy.ones", "numpy.random.choice", "numpy.random.rand", "numpy.sum", "numpy.zeros", "numpy.save", "numpy.arange" ]
[((1266, 1281), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (1275, 1281), True, 'import numpy as np\n'), ((1341, 1353), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (1347, 1353), True, 'import numpy as np\n'), ((1370, 1444), 'numpy.random.choice', 'np.random.choice', (['allowed_sizes'], {'size': 'self.n_objects', 'p': 'prob', 'replace': '(True)'}), '(allowed_sizes, size=self.n_objects, p=prob, replace=True)\n', (1386, 1444), True, 'import numpy as np\n'), ((1655, 1670), 'numpy.arange', 'np.arange', (['(1)', '(3)'], {}), '(1, 3)\n', (1664, 1670), True, 'import numpy as np\n'), ((1737, 1749), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (1743, 1749), True, 'import numpy as np\n'), ((1778, 1864), 'numpy.random.choice', 'np.random.choice', (['allowed_growth_rates'], {'size': 'self.n_objects', 'p': 'prob', 'replace': '(True)'}), '(allowed_growth_rates, size=self.n_objects, p=prob, replace\n =True)\n', (1794, 1864), True, 'import numpy as np\n'), ((2603, 2658), 'numpy.minimum', 'np.minimum', (['self.x_sizes', '(self.canvas_size - self.x_pos)'], {}), '(self.x_sizes, self.canvas_size - self.x_pos)\n', (2613, 2658), True, 'import numpy as np\n'), ((2682, 2737), 'numpy.minimum', 'np.minimum', (['self.x_sizes', '(self.canvas_size - self.y_pos)'], {}), '(self.x_sizes, self.canvas_size - self.y_pos)\n', (2692, 2737), True, 'import numpy as np\n'), ((2849, 2889), 'numpy.logical_and', 'np.logical_and', (['x_in_canvas', 'y_in_canvas'], {}), '(x_in_canvas, y_in_canvas)\n', (2863, 2889), True, 'import numpy as np\n'), ((3147, 3193), 'numpy.zeros', 'np.zeros', (['(self.canvas_size, self.canvas_size)'], {}), '((self.canvas_size, self.canvas_size))\n', (3155, 3193), True, 'import numpy as np\n'), ((3387, 3414), 'numpy.save', 'np.save', (['img_path', 'canvas.T'], {}), '(img_path, canvas.T)\n', (3394, 3414), True, 'import numpy as np\n'), ((1070, 1093), 'numpy.ones', 'np.ones', (['self.n_objects'], {}), '(self.n_objects)\n', (1077, 1093), True, 'import numpy as np\n'), ((980, 1010), 'numpy.random.rand', 'np.random.rand', (['self.n_objects'], {}), '(self.n_objects)\n', (994, 1010), True, 'import numpy as np\n'), ((893, 923), 'numpy.random.rand', 'np.random.rand', (['self.n_objects'], {}), '(self.n_objects)\n', (907, 923), True, 'import numpy as np\n')]
import gym import numpy as np import os,sys,time import math if 'SUMO_HOME' in os.environ: tools = os.path.join(os.environ['SUMO_HOME'],'tools') sys.path.append(tools) else: sys.exit("please declare environment variable 'SUMO_HOME'") import xml.etree.ElementTree as ET from xml.dom import minidom import torch from lib import seeding from collections import deque import traci from sumolib import checkBinary #Environment Constants WARM_UP_TIME = 300 TOTAL_TIME = 9000 VEHICLE_MEAN_LENGTH = 5 EDGE=[146, 45, 2346, 145] #LX, LY, RX, RY class SumoEnv(gym.Env): '''Sumo Environment is a simulation environment which provides necessary parameters for training. On-ramp simulation environment could be modified in xml files in project.''' #Memory Organization __slots__ = ['frameskip', 'run_step', 'scenario',\ 'lanearea_ob', 'action_set', 'evaluation',\ 'sumoBinary', 'projectFile', 'observation_space', 'action_space', 'shape',\ 'return_reward', 'downsample'] def __init__(self, frameskip=15, downsamples=10, device='cpu', evaluation=False): super(SumoEnv, self).__init__() #create environment if isinstance(frameskip, int): self.frameskip = frameskip else: self.frameskip = np.random.randint(frameskip[0], frameskip[1]) self.run_step = 0 self.lanearea_ob = list() self.downsample = downsamples self.shape = (int((EDGE[3]-EDGE[1])/self.downsample), int((EDGE[2]-EDGE[0])/self.downsample)) self.evaluation = evaluation if self.evaluation: self.return_reward = 0.0 # initialize sumo path self.projectFile = './Project/' # initialize observation space #self.observation_space = (2 * self.downsample, self.shape[0], self.shape[1]) #(samples, h, w) self.observation_space = (2, self.shape[0], self.shape[1]) # initialize action set self.action_set = [11.11, 12.51, 13.89, 15.28, 16.67, 18.05, 19.44, 20.83, 22.22] #self.action_set = [8.33, 11.11, 13.89, 16.67, 19.44, 22.22] # possible actions collection self.action_space = len(self.action_set) # initialize lanearea_dec_list dec_tree = ET.parse("./Project/ramp.add.xml") for lanearea_dec in dec_tree.iter("laneAreaDetector"): if 'obs' in lanearea_dec.attrib["id"]: self.lanearea_ob.append(lanearea_dec.attrib["id"]) def seed(self, seed= None): _, seed1 = seeding.np_random(seed) # Derive a random seed. This gets passed as a uint, but gets # checked as an int elsewhere, so we need to keep it below # 2**31. seed2 = seeding.hash_seed(seed1 + 1) % 2**31 return [seed1, seed2] def is_episode(self): if self.run_step > TOTAL_TIME: print('Scenario finished.') self.close() return True '''if self.evaluation == False and self._getmainlinespeed() < 14: print('Traffic jammed! at phase %d' % (self.run_step / 1800 + 1)) traci.close() return True''' return False def warm_up_simulation(self): # Warm up simulation. warm_step=0 while warm_step <= WARM_UP_TIME: traci.simulationStep() warm_step += 1 def get_state(self): position = np.zeros(self.shape, dtype=np.float32) velocity = np.zeros(self.shape, dtype=np.float32) #First get vehicle info on mainline lane = ['ramp_0', 'mainline_up_0', 'mainline_up_1', 'mainline_up_2'] for idx in lane: curent_veh = traci.lane.getLastStepVehicleIDs(idx) for veh in curent_veh: pos = traci.vehicle.getPosition(veh) y = math.floor((pos[0]-146)/self.downsample) x = math.floor((pos[1]-45)/self.downsample) #print(x,y) if position[x][y] == 0: velocity[x][y] += traci.vehicle.getSpeed(veh) else: velocity[x][y] = (velocity[x][y] * position[x][y] + traci.vehicle.getSpeed(veh)) / (position[x][y] + 1) position[x][y] += 1.0 #state = np.concatenate((np.stack(np.hsplit(position,self.downsample)), np.stack(np.hsplit(velocity,self.downsample))), axis=0) state = np.stack((position, velocity)) return state def _getmergingspeed(self): ms = list() for lanearea in self.lanearea_ob: if "merging" in lanearea: ms.append(traci.lanearea.getLastStepMeanSpeed(lanearea)) return np.mean(ms) def _getmainlinespeed(self): ms = list() for lanearea in self.lanearea_ob: if "mainline" in lanearea: ms.append(traci.lanearea.getLastStepMeanSpeed(lanearea)) return np.mean(ms) def _gettraveltime(self): mainline=['ramp', 'merging', 'mainline_down'] ramp=['mainline_up', 'merging', 'mainline_down'] ttr=0.0 ttm=0.0 for item in ramp: ttr += traci.edge.getTraveltime(item) for item in mainline: ttm += traci.edge.getTraveltime(item) return np.mean([ttr, ttm]) def _gettotalvehiclelength(self): vl = 0.0 for lanearea in self.lanearea_ob: vl += traci.lanearea.getJamLengthVehicle(lanearea) return vl def _getvarmainline(self): mls = list() for lanearea in self.lanearea_ob: if "mainline" in lanearea: mls.append(traci.lanearea.getLastStepMeanSpeed(lanearea)) var = np.var(mls) return var def _getflow(self): ms = list() vn = list() l = list() for lanearea in self.lanearea_ob: if "mainline" in lanearea: ms.append(traci.lanearea.getLastStepMeanSpeed(lanearea)) vn.append(traci.lanearea.getLastStepVehicleNumber(lanearea)) l.append(traci.lanearea.getLength(lanearea)) return np.mean(ms) * np.mean(vn) / np.mean(l) * 3600 def step_reward(self): #Reward = w1 * var(mainline) + w2 * queue + w3 * speed queue = self._gettotalvehiclelength() speed = self._getmergingspeed() #flow = self._getflow() #tt = self._gettraveltime() #maxdec = self._getvehdec() #var = self._getvarmainline() reward = .1 * speed - .4 * queue #reward = .03* flow - .07 * queue #print("queue: {}, speed: {}".format(queue,speed)) return reward def _GetMergingOccupancy(self): vn = list() l = list() for lanearea in self.lanearea_ob: if "merging" in lanearea: vn.append(traci.lanearea.getLastStepVehicleNumber(lanearea)) l.append(traci.lanearea.getLength(lanearea)) return VEHICLE_MEAN_LENGTH * np.mean(vn) / np.mean(l) * 100 def status_report(self): num_arrow = int(self.run_step * 50 / TOTAL_TIME) num_line = 50 - num_arrow percent = self.run_step * 100.0 / TOTAL_TIME process_bar = 'Scenario Running... [' + '>' * num_arrow + '-' * num_line + ']' + '%.2f' % percent + '%' + '\r' sys.stdout.write(process_bar) sys.stdout.flush() def step(self, a): '''Conduct an action, update observation and collect reward.''' reward = 0.0 info = dict() if a <= self.action_space: action = self.action_set[a] else: action = a #print(action) traci.edge.setMaxSpeed('mainline_up', action) for _ in range(self.frameskip): traci.simulationStep() reward += self.step_reward() self.status_report() self.run_step += 1 observation = self.get_state() # Update observation of environment state. #reward = self.step_reward() #reward = reward / self.frameskip #print(reward) if self.evaluation: info = {'flow':self._getflow(),'speed': self._getmainlinespeed(), 'ms':self._getmergingspeed(), \ 'tt': self._gettraveltime(),'occ':self._GetMergingOccupancy(), 'var': self._getvarmainline()} #print(info) done = self.is_episode() return observation, reward, done, info def reset(self, label=None, eval_seed=None): # Reset simulation with the random seed randomly selected the pool. if self.evaluation: self.sumoBinary = "sumo" seed = eval_seed traci.start([self.sumoBinary, '-c', self.projectFile + 'ramp.sumo.cfg', '--start',\ '--seed', str(seed), '--netstate-dump', self.projectFile + \ 'Output/Traj/traj{}.xml'.format(label), \ '--emergencydecel.warning-threshold', '1.2', '--quit-on-end'], label='evaluation') self.scenario = traci.getConnection('evaluation') else: self.sumoBinary = "sumo" seeds = self.seed()[1] traci.start([self.sumoBinary, '-c', self.projectFile + 'ramp.sumo.cfg','--start','--seed',\ str(seeds), '--emergencydecel.warning-threshold', '1.2', '--quit-on-end'], label='training') self.scenario = traci.getConnection('training') self.warm_up_simulation() obs = self.get_state() self.run_step = 0 return obs def close(self): self.scenario.close() #*************************************************# ''' # initialize lane_list and edge_list net_tree = ET.parse("./Project/ramp.net.xml") for lane in net_tree.iter("lane"): self.lane_list.append(lane.attrib["id"]) self.observation_space = (2, self.shape[0], self.shape[1]) # initialize lanearea_dec_list dec_tree = ET.parse("./Project/ramp.add.xml") for lanearea_dec in dec_tree.iter("laneAreaDetector"): if 'obs' in lanearea_dec.attrib["id"]: self.lanearea_ob.append(lanearea_dec.attrib["id"]) if 'vsl' in lanearea_dec.attrib["id"]: self.lanearea_dec_list.append(lanearea_dec.attrib["id"]) self.lanearea_max_speed[lanearea_dec.attrib["id"]] = 22.22 #Original state extractor def update_observation(self): state = np.zeros((1, 3*len(self.lane_list), self.maxlen), dtype = np.float32) vehicle_position = np.zeros((len(self.lane_list),self.maxlen),dtype = np.float32) vehicle_speed = np.zeros((len(self.lane_list),self.maxlen),dtype = np.float32) vehicle_acceleration = np.zeros((len(self.lane_list),self.maxlen),dtype = np.float32) #originally set -1 on no road sections (abandoned) for lane in self.lane_list: lane_index = self.lane_list.index(lane) lane_len = traci.lane.getLength(lane) lane_stop = int (lane_len / VEHICLE_MEAN_LENGTH/self.downsample) for i in range(lane_stop, self.maxlen): vehicle_position[lane_index][i] = -1.0 current_step_vehicle = list() for lane in self.lane_list: current_step_vehicle += (traci.lane.getLastStepVehicleIDs(lane)) for vehicle in current_step_vehicle: vehicle_in_lane = traci.vehicle.getLaneID(vehicle) lane_index = self.lane_list.index(vehicle_in_lane) vehicle_pos= traci.vehicle.getPosition(vehicle) lane_shape = traci.lane.getShape(vehicle_in_lane) vehicle_index = abs(int((vehicle_pos[0]-lane_shape[0][0])/VEHICLE_MEAN_LENGTH)) vehicle_index = round(vehicle_index / self.downsample) vehicle_position[lane_index][vehicle_index] += 1.0 vehicle_speed[lane_index][vehicle_index] += traci.vehicle.getSpeed(vehicle) vehicle_acceleration[lane_index][vehicle_index] += traci.vehicle.getAcceleration(vehicle) for lane_num in range(len(self.lane_list)): for vehicle_num in range(len(vehicle_position[lane_num])): if vehicle_position[lane_num][vehicle_num] == 0 or vehicle_position[lane_num][vehicle_num] == -1: continue vehicle_speed[lane_num][vehicle_num] /= vehicle_position[lane_num][vehicle_num] vehicle_acceleration[lane_num][vehicle_num] /= vehicle_position[lane_num][vehicle_num] state = np.concatenate((vehicle_position, vehicle_speed, vehicle_acceleration), axis= 0) return np.expand_dims(state, 0) #Original action function def reset_vehicle_maxspeed(self): for lane in self.lane_list: max_speed = 22.22 for vehicle in traci.lane.getLastStepVehicleIDs(lane): traci.vehicle.setMaxSpeed(vehicle,max_speed) for dec_lane in self.lanearea_dec_list: vehicle_list = traci.lanearea.getLastStepVehicleIDs(dec_lane) max_speed = self.lanearea_max_speed[dec_lane] for vehicle in vehicle_list: traci.vehicle.setMaxSpeed(vehicle,max_speed)'''
[ "lib.seeding.np_random", "math.floor", "traci.vehicle.getSpeed", "sys.exit", "traci.lanearea.getJamLengthVehicle", "sys.path.append", "traci.lanearea.getLastStepMeanSpeed", "traci.vehicle.getPosition", "traci.lanearea.getLastStepVehicleNumber", "numpy.mean", "xml.etree.ElementTree.parse", "num...
[((108, 154), 'os.path.join', 'os.path.join', (["os.environ['SUMO_HOME']", '"""tools"""'], {}), "(os.environ['SUMO_HOME'], 'tools')\n", (120, 154), False, 'import os, sys, time\n'), ((159, 181), 'sys.path.append', 'sys.path.append', (['tools'], {}), '(tools)\n', (174, 181), False, 'import os, sys, time\n'), ((194, 253), 'sys.exit', 'sys.exit', (['"""please declare environment variable \'SUMO_HOME\'"""'], {}), '("please declare environment variable \'SUMO_HOME\'")\n', (202, 253), False, 'import os, sys, time\n'), ((2330, 2364), 'xml.etree.ElementTree.parse', 'ET.parse', (['"""./Project/ramp.add.xml"""'], {}), "('./Project/ramp.add.xml')\n", (2338, 2364), True, 'import xml.etree.ElementTree as ET\n'), ((2604, 2627), 'lib.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (2621, 2627), False, 'from lib import seeding\n'), ((3518, 3556), 'numpy.zeros', 'np.zeros', (['self.shape'], {'dtype': 'np.float32'}), '(self.shape, dtype=np.float32)\n', (3526, 3556), True, 'import numpy as np\n'), ((3577, 3615), 'numpy.zeros', 'np.zeros', (['self.shape'], {'dtype': 'np.float32'}), '(self.shape, dtype=np.float32)\n', (3585, 3615), True, 'import numpy as np\n'), ((4530, 4560), 'numpy.stack', 'np.stack', (['(position, velocity)'], {}), '((position, velocity))\n', (4538, 4560), True, 'import numpy as np\n'), ((4815, 4826), 'numpy.mean', 'np.mean', (['ms'], {}), '(ms)\n', (4822, 4826), True, 'import numpy as np\n'), ((5061, 5072), 'numpy.mean', 'np.mean', (['ms'], {}), '(ms)\n', (5068, 5072), True, 'import numpy as np\n'), ((5429, 5448), 'numpy.mean', 'np.mean', (['[ttr, ttm]'], {}), '([ttr, ttm])\n', (5436, 5448), True, 'import numpy as np\n'), ((5871, 5882), 'numpy.var', 'np.var', (['mls'], {}), '(mls)\n', (5877, 5882), True, 'import numpy as np\n'), ((7541, 7570), 'sys.stdout.write', 'sys.stdout.write', (['process_bar'], {}), '(process_bar)\n', (7557, 7570), False, 'import os, sys, time\n'), ((7580, 7598), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7596, 7598), False, 'import os, sys, time\n'), ((7896, 7941), 'traci.edge.setMaxSpeed', 'traci.edge.setMaxSpeed', (['"""mainline_up"""', 'action'], {}), "('mainline_up', action)\n", (7918, 7941), False, 'import traci\n'), ((1341, 1386), 'numpy.random.randint', 'np.random.randint', (['frameskip[0]', 'frameskip[1]'], {}), '(frameskip[0], frameskip[1])\n', (1358, 1386), True, 'import numpy as np\n'), ((2801, 2829), 'lib.seeding.hash_seed', 'seeding.hash_seed', (['(seed1 + 1)'], {}), '(seed1 + 1)\n', (2818, 2829), False, 'from lib import seeding\n'), ((3405, 3427), 'traci.simulationStep', 'traci.simulationStep', ([], {}), '()\n', (3425, 3427), False, 'import traci\n'), ((3801, 3838), 'traci.lane.getLastStepVehicleIDs', 'traci.lane.getLastStepVehicleIDs', (['idx'], {}), '(idx)\n', (3833, 3838), False, 'import traci\n'), ((5300, 5330), 'traci.edge.getTraveltime', 'traci.edge.getTraveltime', (['item'], {}), '(item)\n', (5324, 5330), False, 'import traci\n'), ((5382, 5412), 'traci.edge.getTraveltime', 'traci.edge.getTraveltime', (['item'], {}), '(item)\n', (5406, 5412), False, 'import traci\n'), ((5574, 5618), 'traci.lanearea.getJamLengthVehicle', 'traci.lanearea.getJamLengthVehicle', (['lanearea'], {}), '(lanearea)\n', (5608, 5618), False, 'import traci\n'), ((7996, 8018), 'traci.simulationStep', 'traci.simulationStep', ([], {}), '()\n', (8016, 8018), False, 'import traci\n'), ((9281, 9314), 'traci.getConnection', 'traci.getConnection', (['"""evaluation"""'], {}), "('evaluation')\n", (9300, 9314), False, 'import traci\n'), ((9649, 9680), 'traci.getConnection', 'traci.getConnection', (['"""training"""'], {}), "('training')\n", (9668, 9680), False, 'import traci\n'), ((3898, 3928), 'traci.vehicle.getPosition', 'traci.vehicle.getPosition', (['veh'], {}), '(veh)\n', (3923, 3928), False, 'import traci\n'), ((3950, 3994), 'math.floor', 'math.floor', (['((pos[0] - 146) / self.downsample)'], {}), '((pos[0] - 146) / self.downsample)\n', (3960, 3994), False, 'import math\n'), ((4012, 4055), 'math.floor', 'math.floor', (['((pos[1] - 45) / self.downsample)'], {}), '((pos[1] - 45) / self.downsample)\n', (4022, 4055), False, 'import math\n'), ((6333, 6343), 'numpy.mean', 'np.mean', (['l'], {}), '(l)\n', (6340, 6343), True, 'import numpy as np\n'), ((7216, 7226), 'numpy.mean', 'np.mean', (['l'], {}), '(l)\n', (7223, 7226), True, 'import numpy as np\n'), ((4161, 4188), 'traci.vehicle.getSpeed', 'traci.vehicle.getSpeed', (['veh'], {}), '(veh)\n', (4183, 4188), False, 'import traci\n'), ((4752, 4797), 'traci.lanearea.getLastStepMeanSpeed', 'traci.lanearea.getLastStepMeanSpeed', (['lanearea'], {}), '(lanearea)\n', (4787, 4797), False, 'import traci\n'), ((4998, 5043), 'traci.lanearea.getLastStepMeanSpeed', 'traci.lanearea.getLastStepMeanSpeed', (['lanearea'], {}), '(lanearea)\n', (5033, 5043), False, 'import traci\n'), ((5809, 5854), 'traci.lanearea.getLastStepMeanSpeed', 'traci.lanearea.getLastStepMeanSpeed', (['lanearea'], {}), '(lanearea)\n', (5844, 5854), False, 'import traci\n'), ((6102, 6147), 'traci.lanearea.getLastStepMeanSpeed', 'traci.lanearea.getLastStepMeanSpeed', (['lanearea'], {}), '(lanearea)\n', (6137, 6147), False, 'import traci\n'), ((6176, 6225), 'traci.lanearea.getLastStepVehicleNumber', 'traci.lanearea.getLastStepVehicleNumber', (['lanearea'], {}), '(lanearea)\n', (6215, 6225), False, 'import traci\n'), ((6253, 6287), 'traci.lanearea.getLength', 'traci.lanearea.getLength', (['lanearea'], {}), '(lanearea)\n', (6277, 6287), False, 'import traci\n'), ((6305, 6316), 'numpy.mean', 'np.mean', (['ms'], {}), '(ms)\n', (6312, 6316), True, 'import numpy as np\n'), ((6319, 6330), 'numpy.mean', 'np.mean', (['vn'], {}), '(vn)\n', (6326, 6330), True, 'import numpy as np\n'), ((7051, 7100), 'traci.lanearea.getLastStepVehicleNumber', 'traci.lanearea.getLastStepVehicleNumber', (['lanearea'], {}), '(lanearea)\n', (7090, 7100), False, 'import traci\n'), ((7128, 7162), 'traci.lanearea.getLength', 'traci.lanearea.getLength', (['lanearea'], {}), '(lanearea)\n', (7152, 7162), False, 'import traci\n'), ((7202, 7213), 'numpy.mean', 'np.mean', (['vn'], {}), '(vn)\n', (7209, 7213), True, 'import numpy as np\n'), ((4285, 4312), 'traci.vehicle.getSpeed', 'traci.vehicle.getSpeed', (['veh'], {}), '(veh)\n', (4307, 4312), False, 'import traci\n')]
# ---------------------------------------------------------------------- # # <NAME>, U.S. Geological Survey # <NAME>, GNS Science # <NAME>, University of Chicago # # This code was developed as part of the Computational Infrastructure # for Geodynamics (http://geodynamics.org). # # Copyright (c) 2010-2018 University of California, Davis # # See COPYING for license information. # # ---------------------------------------------------------------------- # # @file tests/fullscale/viscoelasticity/nofaults-3d/axialtraction_maxwell_soln.py # # @brief Analytical solution to axial traction problem for a Maxwell viscoelastic material. # # 3-D axial traction solution for linear Maxwell viscoelastic material. # # Tz=T0 # ---------- # | | # Ux=0 | | Ux=0 # | | # | | # ---------- # Uz=0 # # Dirichlet boundary conditions # Ux(-4000,y,z) = 0 # Ux(+4000,y,z) = 0 # Uy(x,-4000,z) = 0 # Uy(x,+4000,z) = 0 # Uz(x,y,-8000) = 0 # # Neumann boundary conditions # Tz(x,y,0) = T0 import numpy # Physical properties. p_density = 2500.0 p_vs = 3464.1016 p_vp = 6000.0 p_viscosity = 9.46728e17 p_mu = p_density*p_vs*p_vs p_lambda = p_density*p_vp*p_vp - 2.0*p_mu p_youngs = p_mu*(3.0*p_lambda + 2.0*p_mu)/(p_lambda + p_mu) p_poissons = 0.5*p_lambda/(p_lambda + p_mu) # Time information. year = 60.0*60.0*24.0*365.25 dt = 0.025*year startTime = dt endTime = 0.5*year numSteps = 20 timeArray = numpy.linspace(startTime, endTime, num=numSteps, dtype=numpy.float64) # Uniform stress field (plane strain). T0 = -1.0e7 szz = T0*numpy.ones(numSteps, dtype=numpy.float64) timeFac = numpy.exp(-p_youngs*timeArray/(6.0*p_viscosity*(1.0 - p_poissons))) poisFac = (2.0*p_poissons - 1.0)/(1.0 - p_poissons) sxx = T0*(1.0 + poisFac*timeFac) syy = T0*(1.0 + poisFac*timeFac) sxy = numpy.zeros(numSteps, dtype=numpy.float64) syz = numpy.zeros(numSteps, dtype=numpy.float64) sxz = numpy.zeros(numSteps, dtype=numpy.float64) # Deviatoric stress. meanStress = (sxx + syy + szz)/3.0 sDevxx = sxx - meanStress sDevyy = syy - meanStress sDevzz = szz - meanStress # Uniform strain field. exx = numpy.zeros(numSteps, dtype=numpy.float64) eyy = numpy.zeros(numSteps, dtype=numpy.float64) ezz = T0*(1.0 - 2.0*p_poissons)*(3.0 + 2.0*poisFac*timeFac)/p_youngs exy = numpy.zeros(numSteps, dtype=numpy.float64) eyz = numpy.zeros(numSteps, dtype=numpy.float64) exz = numpy.zeros(numSteps, dtype=numpy.float64) # outArray = numpy.column_stack((timeArray, syy, ezz)) # numpy.savetxt('axialtraction_maxwell_analytical.txt', outArray) # Get viscous strains from deviatoric stress. eVisxx = 0.5*sDevxx/p_mu eVisyy = 0.5*sDevyy/p_mu eViszz = 0.5*sDevzz/p_mu eVisxy = 0.5*sxy/p_mu eVisyz = 0.5*syz/p_mu eVisxz = 0.5*sxz/p_mu # ---------------------------------------------------------------------- class AnalyticalSoln(object): """Analytical solution to axial extension problem. """ SPACE_DIM = 3 TENSOR_SIZE = 6 def __init__(self): self.fields = { "displacement": self.displacement, "density": self.density, "shear_modulus": self.shear_modulus, "bulk_modulus": self.bulk_modulus, "maxwell_time": self.maxwell_time, "cauchy_strain": self.strain, "cauchy_stress": self.stress, "viscous_strain": self.viscous_strain, "initial_amplitude": { "bc_xneg": self.initial_displacement, "bc_yneg": self.initial_displacement, "bc_zneg": self.initial_displacement, "bc_xpos": self.initial_displacement, "bc_ypos": self.initial_displacement, "bc_zpos": self.initial_traction } } self.key = None return def getField(self, name, pts): if self.key is None: field = self.fields[name](pts) else: field = self.fields[name][self.key](pts) return field def displacement(self, locs): """Compute displacement field at locations. """ (npts, dim) = locs.shape disp = numpy.zeros((numSteps, npts, self.SPACE_DIM), dtype=numpy.float64) disp[:,:, 2] = numpy.dot(ezz.reshape(numSteps, 1), (locs[:, 2] + 8000.0).reshape(1, npts)) return disp def initial_displacement(self, locs): """Compute initial displacement field at locations. """ (npts, dim) = locs.shape disp = numpy.zeros((1, npts, self.SPACE_DIM), dtype=numpy.float64) return disp def initial_traction(self, locs): """Compute initial traction field at locations. """ (npts, dim) = locs.shape traction = numpy.zeros((1, npts, self.SPACE_DIM), dtype=numpy.float64) traction[:,:, 2] = T0 return traction def density(self, locs): """Compute density field at locations. """ (npts, dim) = locs.shape density = p_density * numpy.ones((1, npts, 1), dtype=numpy.float64) return density def shear_modulus(self, locs): """Compute shear modulus field at locations. """ (npts, dim) = locs.shape shear_modulus = p_mu * numpy.ones((1, npts, 1), dtype=numpy.float64) return shear_modulus def bulk_modulus(self, locs): """Compute bulk modulus field at locations. """ (npts, dim) = locs.shape bulk_modulus = (p_lambda + 2.0 / 3.0 * p_mu) * numpy.ones((1, npts, 1), dtype=numpy.float64) return bulk_modulus def maxwell_time(self, locs): """Compute Maxwell time field at locations. """ (npts, dim) = locs.shape maxwell_time = p_viscosity * numpy.ones((1, npts, 1), dtype=numpy.float64)/p_mu return maxwell_time def strain(self, locs): """Compute strain field at locations. """ (npts, dim) = locs.shape strain = numpy.zeros((numSteps, npts, self.TENSOR_SIZE), dtype=numpy.float64) strain[:,:, 0] = exx.reshape(numSteps, 1) strain[:,:, 1] = eyy.reshape(numSteps, 1) strain[:,:, 2] = ezz.reshape(numSteps, 1) strain[:,:, 3] = exy.reshape(numSteps, 1) strain[:,:, 4] = eyz.reshape(numSteps, 1) strain[:,:, 5] = exz.reshape(numSteps, 1) return strain def stress(self, locs): """Compute stress field at locations. """ (npts, dim) = locs.shape stress = numpy.zeros((numSteps, npts, self.TENSOR_SIZE), dtype=numpy.float64) stress[:,:, 0] = sxx.reshape(numSteps, 1) stress[:,:, 1] = syy.reshape(numSteps, 1) stress[:,:, 2] = szz.reshape(numSteps, 1) stress[:,:, 3] = sxy.reshape(numSteps, 1) stress[:,:, 4] = syz.reshape(numSteps, 1) stress[:,:, 5] = sxz.reshape(numSteps, 1) return stress def viscous_strain(self, locs): """Compute viscous strain field at locations. """ (npts, dim) = locs.shape viscous_strain = numpy.zeros((numSteps, npts, self.TENSOR_SIZE), dtype=numpy.float64) viscous_strain[:,:, 0] = eVisxx.reshape(numSteps, 1) viscous_strain[:,:, 1] = eVisyy.reshape(numSteps, 1) viscous_strain[:,:, 2] = eViszz.reshape(numSteps, 1) viscous_strain[:,:, 3] = eVisxy.reshape(numSteps, 1) viscous_strain[:,:, 4] = eVisyz.reshape(numSteps, 1) viscous_strain[:,:, 5] = eVisxz.reshape(numSteps, 1) return viscous_strain # End of file
[ "numpy.exp", "numpy.linspace", "numpy.ones", "numpy.zeros" ]
[((1480, 1549), 'numpy.linspace', 'numpy.linspace', (['startTime', 'endTime'], {'num': 'numSteps', 'dtype': 'numpy.float64'}), '(startTime, endTime, num=numSteps, dtype=numpy.float64)\n', (1494, 1549), False, 'import numpy\n'), ((1663, 1738), 'numpy.exp', 'numpy.exp', (['(-p_youngs * timeArray / (6.0 * p_viscosity * (1.0 - p_poissons)))'], {}), '(-p_youngs * timeArray / (6.0 * p_viscosity * (1.0 - p_poissons)))\n', (1672, 1738), False, 'import numpy\n'), ((1855, 1897), 'numpy.zeros', 'numpy.zeros', (['numSteps'], {'dtype': 'numpy.float64'}), '(numSteps, dtype=numpy.float64)\n', (1866, 1897), False, 'import numpy\n'), ((1904, 1946), 'numpy.zeros', 'numpy.zeros', (['numSteps'], {'dtype': 'numpy.float64'}), '(numSteps, dtype=numpy.float64)\n', (1915, 1946), False, 'import numpy\n'), ((1953, 1995), 'numpy.zeros', 'numpy.zeros', (['numSteps'], {'dtype': 'numpy.float64'}), '(numSteps, dtype=numpy.float64)\n', (1964, 1995), False, 'import numpy\n'), ((2162, 2204), 'numpy.zeros', 'numpy.zeros', (['numSteps'], {'dtype': 'numpy.float64'}), '(numSteps, dtype=numpy.float64)\n', (2173, 2204), False, 'import numpy\n'), ((2211, 2253), 'numpy.zeros', 'numpy.zeros', (['numSteps'], {'dtype': 'numpy.float64'}), '(numSteps, dtype=numpy.float64)\n', (2222, 2253), False, 'import numpy\n'), ((2329, 2371), 'numpy.zeros', 'numpy.zeros', (['numSteps'], {'dtype': 'numpy.float64'}), '(numSteps, dtype=numpy.float64)\n', (2340, 2371), False, 'import numpy\n'), ((2378, 2420), 'numpy.zeros', 'numpy.zeros', (['numSteps'], {'dtype': 'numpy.float64'}), '(numSteps, dtype=numpy.float64)\n', (2389, 2420), False, 'import numpy\n'), ((2427, 2469), 'numpy.zeros', 'numpy.zeros', (['numSteps'], {'dtype': 'numpy.float64'}), '(numSteps, dtype=numpy.float64)\n', (2438, 2469), False, 'import numpy\n'), ((1611, 1652), 'numpy.ones', 'numpy.ones', (['numSteps'], {'dtype': 'numpy.float64'}), '(numSteps, dtype=numpy.float64)\n', (1621, 1652), False, 'import numpy\n'), ((4156, 4222), 'numpy.zeros', 'numpy.zeros', (['(numSteps, npts, self.SPACE_DIM)'], {'dtype': 'numpy.float64'}), '((numSteps, npts, self.SPACE_DIM), dtype=numpy.float64)\n', (4167, 4222), False, 'import numpy\n'), ((4505, 4564), 'numpy.zeros', 'numpy.zeros', (['(1, npts, self.SPACE_DIM)'], {'dtype': 'numpy.float64'}), '((1, npts, self.SPACE_DIM), dtype=numpy.float64)\n', (4516, 4564), False, 'import numpy\n'), ((4744, 4803), 'numpy.zeros', 'numpy.zeros', (['(1, npts, self.SPACE_DIM)'], {'dtype': 'numpy.float64'}), '((1, npts, self.SPACE_DIM), dtype=numpy.float64)\n', (4755, 4803), False, 'import numpy\n'), ((5965, 6033), 'numpy.zeros', 'numpy.zeros', (['(numSteps, npts, self.TENSOR_SIZE)'], {'dtype': 'numpy.float64'}), '((numSteps, npts, self.TENSOR_SIZE), dtype=numpy.float64)\n', (5976, 6033), False, 'import numpy\n'), ((6493, 6561), 'numpy.zeros', 'numpy.zeros', (['(numSteps, npts, self.TENSOR_SIZE)'], {'dtype': 'numpy.float64'}), '((numSteps, npts, self.TENSOR_SIZE), dtype=numpy.float64)\n', (6504, 6561), False, 'import numpy\n'), ((7045, 7113), 'numpy.zeros', 'numpy.zeros', (['(numSteps, npts, self.TENSOR_SIZE)'], {'dtype': 'numpy.float64'}), '((numSteps, npts, self.TENSOR_SIZE), dtype=numpy.float64)\n', (7056, 7113), False, 'import numpy\n'), ((5010, 5055), 'numpy.ones', 'numpy.ones', (['(1, npts, 1)'], {'dtype': 'numpy.float64'}), '((1, npts, 1), dtype=numpy.float64)\n', (5020, 5055), False, 'import numpy\n'), ((5244, 5289), 'numpy.ones', 'numpy.ones', (['(1, npts, 1)'], {'dtype': 'numpy.float64'}), '((1, npts, 1), dtype=numpy.float64)\n', (5254, 5289), False, 'import numpy\n'), ((5506, 5551), 'numpy.ones', 'numpy.ones', (['(1, npts, 1)'], {'dtype': 'numpy.float64'}), '((1, npts, 1), dtype=numpy.float64)\n', (5516, 5551), False, 'import numpy\n'), ((5749, 5794), 'numpy.ones', 'numpy.ones', (['(1, npts, 1)'], {'dtype': 'numpy.float64'}), '((1, npts, 1), dtype=numpy.float64)\n', (5759, 5794), False, 'import numpy\n')]
#### All this code needs to be modified. We need to modify for LiTS. ##### Neeed to probably do some kind of from promise2012.Vnet.model_vnet3d import Vnet3dModule from promise2012.Vnet.util import convertMetaModelToPbModel import numpy as np import pandas as pd import cv2 def train(): ''' Preprocessing for dataset ''' # Read data set (Train data from CSV file) csvmaskdata = pd.read_csv('trainY.csv') csvimagedata = pd.read_csv('trainX.csv') maskdata = csvmaskdata.iloc[:, :].values imagedata = csvimagedata.iloc[:, :].values # shuffle imagedata and maskdata together perm = np.arange(len(csvimagedata)) np.random.shuffle(perm) imagedata = imagedata[perm] maskdata = maskdata[perm] Vnet3d = Vnet3dModule(128, 128, 64, channels=1, costname="dice coefficient") Vnet3d.train(imagedata, maskdata, "model\\Vnet3dModule.pd", "log\\", 0.001, 0.7, 100000, 1) def predict0(): Vnet3d = Vnet3dModule(256, 256, 64, inference=True, model_path="model\\Vnet3dModule.pd") for filenumber in range(30): batch_xs = np.zeros(shape=(64, 256, 256)) for index in range(64): imgs = cv2.imread( "D:\Data\PROMISE2012\Vnet3d_data\\test\image\\" + str(filenumber) + "\\" + str(index) + ".bmp", 0) batch_xs[index, :, :] = imgs[128:384, 128:384] predictvalue = Vnet3d.prediction(batch_xs) for index in range(64): result = np.zeros(shape=(512, 512), dtype=np.uint8) result[128:384, 128:384] = predictvalue[index] kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) result = cv2.morphologyEx(result, cv2.MORPH_CLOSE, kernel) cv2.imwrite( "D:\Data\PROMISE2012\Vnet3d_data\\test\image\\" + str(filenumber) + "\\" + str(index) + "mask.bmp", result) def meta2pd(): convertMetaModelToPbModel(meta_model="model\\Vnet3dModule.pd", pb_model="model") train() #predict0() #meta2pd()
[ "pandas.read_csv", "promise2012.Vnet.model_vnet3d.Vnet3dModule", "promise2012.Vnet.util.convertMetaModelToPbModel", "cv2.morphologyEx", "numpy.zeros", "cv2.getStructuringElement", "numpy.random.shuffle" ]
[((422, 447), 'pandas.read_csv', 'pd.read_csv', (['"""trainY.csv"""'], {}), "('trainY.csv')\n", (433, 447), True, 'import pandas as pd\n'), ((468, 493), 'pandas.read_csv', 'pd.read_csv', (['"""trainX.csv"""'], {}), "('trainX.csv')\n", (479, 493), True, 'import pandas as pd\n'), ((681, 704), 'numpy.random.shuffle', 'np.random.shuffle', (['perm'], {}), '(perm)\n', (698, 704), True, 'import numpy as np\n'), ((785, 852), 'promise2012.Vnet.model_vnet3d.Vnet3dModule', 'Vnet3dModule', (['(128)', '(128)', '(64)'], {'channels': '(1)', 'costname': '"""dice coefficient"""'}), "(128, 128, 64, channels=1, costname='dice coefficient')\n", (797, 852), False, 'from promise2012.Vnet.model_vnet3d import Vnet3dModule\n'), ((985, 1064), 'promise2012.Vnet.model_vnet3d.Vnet3dModule', 'Vnet3dModule', (['(256)', '(256)', '(64)'], {'inference': '(True)', 'model_path': '"""model\\\\Vnet3dModule.pd"""'}), "(256, 256, 64, inference=True, model_path='model\\\\Vnet3dModule.pd')\n", (997, 1064), False, 'from promise2012.Vnet.model_vnet3d import Vnet3dModule\n'), ((1942, 2027), 'promise2012.Vnet.util.convertMetaModelToPbModel', 'convertMetaModelToPbModel', ([], {'meta_model': '"""model\\\\Vnet3dModule.pd"""', 'pb_model': '"""model"""'}), "(meta_model='model\\\\Vnet3dModule.pd', pb_model='model'\n )\n", (1967, 2027), False, 'from promise2012.Vnet.util import convertMetaModelToPbModel\n'), ((1119, 1149), 'numpy.zeros', 'np.zeros', ([], {'shape': '(64, 256, 256)'}), '(shape=(64, 256, 256))\n', (1127, 1149), True, 'import numpy as np\n'), ((1502, 1544), 'numpy.zeros', 'np.zeros', ([], {'shape': '(512, 512)', 'dtype': 'np.uint8'}), '(shape=(512, 512), dtype=np.uint8)\n', (1510, 1544), True, 'import numpy as np\n'), ((1627, 1676), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(5, 5)'], {}), '(cv2.MORPH_RECT, (5, 5))\n', (1652, 1676), False, 'import cv2\n'), ((1699, 1748), 'cv2.morphologyEx', 'cv2.morphologyEx', (['result', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(result, cv2.MORPH_CLOSE, kernel)\n', (1715, 1748), False, 'import cv2\n')]
# Tencent is pleased to support the open source community by making PocketFlow available. # # Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved. # # Licensed under the BSD 3-Clause License (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Channel Pruned Learner""" import os import math import pathlib import string import random from collections import deque from timeit import default_timer as timer import numpy as np import tensorflow as tf from tensorflow.contrib import graph_editor from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw from utils.lrn_rate_utils import setup_lrn_rate from learners.distillation_helper import DistillationHelper from learners.abstract_learner import AbstractLearner from learners.channel_pruning.model_wrapper import Model from learners.channel_pruning.channel_pruner import ChannelPruner from rl_agents.ddpg.agent import Agent as DdpgAgent FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string( 'cp_prune_option', 'auto', """the action we want to prune the channel you can select one of the following option: uniform: prune with a uniform compression ratio list: prune with a list of compression ratio""") tf.app.flags.DEFINE_string( 'cp_prune_list_file', 'ratio.list', 'the prune list file which contains the compression ratio of each convolution layers') tf.app.flags.DEFINE_string( 'cp_best_path', './models/best_model.ckpt', 'channel pruned model\'s temporary save path') tf.app.flags.DEFINE_string( 'cp_original_path', './models/original_model.ckpt', 'channel pruned model\'s temporary save path') tf.app.flags.DEFINE_float( 'cp_preserve_ratio', 0.5, 'How much computation cost desired to be preserved after pruning') tf.app.flags.DEFINE_float( 'cp_uniform_preserve_ratio', 0.6, 'How much computation cost desired to be preserved each layer') tf.app.flags.DEFINE_float( 'cp_noise_tolerance', 0.15, 'the noise tolerance which is used to restrict the maximum reward to avoid an unexpected speedup') tf.app.flags.DEFINE_float('cp_lrn_rate_ft', 1e-4, 'CP: learning rate for global fine-tuning') tf.app.flags.DEFINE_float('cp_nb_iters_ft_ratio', 0.2, 'CP: the ratio of total iterations for global fine-tuning') tf.app.flags.DEFINE_boolean('cp_finetune', False, 'CP: whether finetuning between each list group') tf.app.flags.DEFINE_boolean('cp_retrain', False, 'CP: whether retraining between each list group') tf.app.flags.DEFINE_integer('cp_list_group', 1000, 'CP: # of iterations for fast evaluation') tf.app.flags.DEFINE_integer('cp_nb_rlouts', 200, 'CP: # of roll-outs for the RL agent') tf.app.flags.DEFINE_integer('cp_nb_rlouts_min', 50, 'CP: # of roll-outs for the RL agent') class ChannelPrunedLearner(AbstractLearner): # pylint: disable=too-many-instance-attributes """Learner with channel/filter pruning""" def __init__(self, sm_writer, model_helper): # class-independent initialization super(ChannelPrunedLearner, self).__init__(sm_writer, model_helper) # class-dependent initialization if FLAGS.enbl_dst: self.learner_dst = DistillationHelper(sm_writer, model_helper, self.mpi_comm) self.model_scope = 'model' self.sm_writer = sm_writer #self.max_eval_acc = 0 self.max_save_path = '' self.saver = None self.saver_train = None self.saver_eval = None self.model = None self.pruner = None self.sess_train = None self.sess_eval = None self.log_op = None self.train_op = None self.bcast_op = None self.train_init_op = None self.time_prev = None self.agent = None self.idx_iter = None self.accuracy_keys = None self.eval_op = None self.global_step = None self.summary_op = None self.nb_iters_train = 0 self.bestinfo = None self.__build(is_train=True) self.__build(is_train=False) channel_pruned_path = './models/pruned_model.ckpt' best_model_path = './models/best_model.ckpt' if FLAGS.enbl_multi_gpu: self.parent_path = '' if self.mpi_comm.rank == 0: self.parent_path = '/opt/ml/disk/' + \ ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8)) pathlib.Path(self.parent_path).mkdir(parents=True, exist_ok=True) channel_pruned_path = self.parent_path + '/' + channel_pruned_path best_model_path = self.parent_path + '/' + best_model_path channel_pruned_path = self.mpi_comm.bcast(channel_pruned_path, root=0) best_model_path = self.mpi_comm.bcast(best_model_path, root=0) self.parent_path = self.mpi_comm.bcast(self.parent_path, root=0) tf.app.flags.DEFINE_string( 'cp_channel_pruned_path', channel_pruned_path, 'channel pruned model\'s save path') tf.app.flags.DEFINE_string( 'cp_best_model_path', best_model_path, 'channel best model\'s save path') def train(self): """Train the pruned model""" # download pre-trained model if self.__is_primary_worker(): self.download_model() self.__restore_model(True) self.saver_train.save(self.sess_train, FLAGS.cp_original_path) self.create_pruner() if FLAGS.enbl_multi_gpu: self.mpi_comm.Barrier() tf.logging.info('Start pruning') # channel pruning and finetuning if FLAGS.cp_prune_option == 'list': self.__prune_and_finetune_list() elif FLAGS.cp_prune_option == 'auto': self.__prune_and_finetune_auto() elif FLAGS.cp_prune_option == 'uniform': self.__prune_and_finetune_uniform() def create_pruner(self): """create a pruner""" with tf.Graph().as_default(): config = tf.ConfigProto() config.gpu_options.visible_device_list = str(0) # pylint: disable=no-member sess = tf.Session(config=config) self.saver = tf.train.import_meta_graph(FLAGS.cp_original_path + '.meta') self.saver.restore(sess, FLAGS.cp_original_path) self.sess_train = sess self.sm_writer.add_graph(sess.graph) train_images = tf.get_collection('train_images')[0] train_labels = tf.get_collection('train_labels')[0] mem_images = tf.get_collection('mem_images')[0] mem_labels = tf.get_collection('mem_labels')[0] summary_op = tf.get_collection('summary_op')[0] loss = tf.get_collection('loss')[0] accuracy = tf.get_collection('accuracy')[0] #accuracy1 = tf.get_collection('top1')[0] #metrics = {'loss': loss, 'accuracy': accuracy['top1']} metrics = {'loss': loss, 'accuracy': accuracy} for key in self.accuracy_keys: metrics[key] = tf.get_collection(key)[0] self.model = Model(self.sess_train) pruner = ChannelPruner( self.model, images=train_images, labels=train_labels, mem_images=mem_images, mem_labels=mem_labels, metrics=metrics, lbound=self.lbound, summary_op=summary_op, sm_writer=self.sm_writer) self.pruner = pruner def evaluate(self): """evaluate the model""" # early break for non-primary workers if not self.__is_primary_worker(): return if self.saver_eval is None: self.saver_eval = tf.train.Saver() self.__restore_model(is_train=False) losses, accuracy = [], [] nb_iters = FLAGS.nb_smpls_eval // FLAGS.batch_size_eval self.sm_writer.add_graph(self.sess_eval.graph) accuracies = [[] for i in range(len(self.accuracy_keys))] for _ in range(nb_iters): eval_rslt = self.sess_eval.run(self.eval_op) losses.append(eval_rslt[0]) for i in range(len(self.accuracy_keys)): accuracies[i].append(eval_rslt[i + 1]) loss = np.mean(np.array(losses)) tf.logging.info('loss: {}'.format(loss)) for i in range(len(self.accuracy_keys)): accuracy.append(np.mean(np.array(accuracies[i]))) tf.logging.info('{}: {}'.format(self.accuracy_keys[i], accuracy[i])) # save the checkpoint if its evaluatin result is best so far #if accuracy[0] > self.max_eval_acc: # self.max_eval_acc = accuracy[0] # self.__save_in_progress_pruned_model() def __build(self, is_train): # pylint: disable=too-many-locals # early break for non-primary workers if not self.__is_primary_worker(): return if not is_train: self.__build_pruned_evaluate_model() return with tf.Graph().as_default(): # create a TF session for the current graph config = tf.ConfigProto() config.gpu_options.visible_device_list = str(0) # pylint: disable=no-member sess = tf.Session(config=config) # data input pipeline with tf.variable_scope(self.data_scope): train_images, train_labels = self.build_dataset_train().get_next() eval_images, eval_labels = self.build_dataset_eval().get_next() image_shape = train_images.shape.as_list() label_shape = train_labels.shape.as_list() image_shape[0] = FLAGS.batch_size label_shape[0] = FLAGS.batch_size mem_images = tf.placeholder(dtype=train_images.dtype, shape=image_shape) mem_labels = tf.placeholder(dtype=train_labels.dtype, shape=label_shape) tf.add_to_collection('train_images', train_images) tf.add_to_collection('train_labels', train_labels) tf.add_to_collection('eval_images', eval_images) tf.add_to_collection('eval_labels', eval_labels) tf.add_to_collection('mem_images', mem_images) tf.add_to_collection('mem_labels', mem_labels) # model definition with tf.variable_scope(self.model_scope): # forward pass logits = self.forward_train(mem_images) loss, accuracy = self.calc_loss(mem_labels, logits, self.trainable_vars) self.accuracy_keys = list(accuracy.keys()) for key in self.accuracy_keys: tf.add_to_collection(key, accuracy[key]) tf.add_to_collection('loss', loss) tf.add_to_collection('logits', logits) #self.loss = loss tf.summary.scalar('loss', loss) for key in accuracy.keys(): tf.summary.scalar(key, accuracy[key]) # learning rate & pruning ratio self.sess_train = sess self.summary_op = tf.summary.merge_all() tf.add_to_collection('summary_op', self.summary_op) self.saver_train = tf.train.Saver(self.vars) self.lbound = math.log(FLAGS.cp_preserve_ratio + 1, 10) * 1.5 self.rbound = 1.0 def __build_pruned_evaluate_model(self, path=None): ''' build a evaluation model from pruned model ''' # early break for non-primary workers if not self.__is_primary_worker(): return if path is None: path = FLAGS.save_path if not tf.train.checkpoint_exists(path): return with tf.Graph().as_default(): config = tf.ConfigProto() config.gpu_options.visible_device_list = str(# pylint: disable=no-member mgw.local_rank() if FLAGS.enbl_multi_gpu else 0) self.sess_eval = tf.Session(config=config) self.saver_eval = tf.train.import_meta_graph(path + '.meta') self.saver_eval.restore(self.sess_eval, path) eval_logits = tf.get_collection('logits')[0] eval_images = tf.get_collection('eval_images')[0] eval_labels = tf.get_collection('eval_labels')[0] mem_images = tf.get_collection('mem_images')[0] mem_labels = tf.get_collection('mem_labels')[0] self.sess_eval.close() graph_editor.reroute_ts(eval_images, mem_images) graph_editor.reroute_ts(eval_labels, mem_labels) self.sess_eval = tf.Session(config=config) self.saver_eval.restore(self.sess_eval, path) trainable_vars = self.trainable_vars loss, accuracy = self.calc_loss(eval_labels, eval_logits, trainable_vars) self.eval_op = [loss] + list(accuracy.values()) self.sm_writer.add_graph(self.sess_eval.graph) def __build_pruned_train_model(self, path=None, finetune=False): # pylint: disable=too-many-locals ''' build a training model from pruned model ''' if path is None: path = FLAGS.save_path with tf.Graph().as_default(): config = tf.ConfigProto() config.gpu_options.visible_device_list = str(# pylint: disable=no-member mgw.local_rank() if FLAGS.enbl_multi_gpu else 0) self.sess_train = tf.Session(config=config) self.saver_train = tf.train.import_meta_graph(path + '.meta') self.saver_train.restore(self.sess_train, path) logits = tf.get_collection('logits')[0] train_images = tf.get_collection('train_images')[0] train_labels = tf.get_collection('train_labels')[0] mem_images = tf.get_collection('mem_images')[0] mem_labels = tf.get_collection('mem_labels')[0] self.sess_train.close() graph_editor.reroute_ts(train_images, mem_images) graph_editor.reroute_ts(train_labels, mem_labels) self.sess_train = tf.Session(config=config) self.saver_train.restore(self.sess_train, path) trainable_vars = self.trainable_vars loss, accuracy = self.calc_loss(train_labels, logits, trainable_vars) self.accuracy_keys = list(accuracy.keys()) if FLAGS.enbl_dst: logits_dst = self.learner_dst.calc_logits(self.sess_train, train_images) loss += self.learner_dst.calc_loss(logits, logits_dst) tf.summary.scalar('loss', loss) for key in accuracy.keys(): tf.summary.scalar(key, accuracy[key]) self.summary_op = tf.summary.merge_all() global_step = tf.get_variable('global_step', shape=[], dtype=tf.int32, trainable=False) self.global_step = global_step lrn_rate, self.nb_iters_train = setup_lrn_rate( self.global_step, self.model_name, self.dataset_name) if finetune and not FLAGS.cp_retrain: mom_optimizer = tf.train.AdamOptimizer(FLAGS.cp_lrn_rate_ft) self.log_op = [tf.constant(FLAGS.cp_lrn_rate_ft), loss, list(accuracy.values())] else: mom_optimizer = tf.train.MomentumOptimizer(lrn_rate, FLAGS.momentum) self.log_op = [lrn_rate, loss, list(accuracy.values())] if FLAGS.enbl_multi_gpu: optimizer = mgw.DistributedOptimizer(mom_optimizer) else: optimizer = mom_optimizer grads_origin = optimizer.compute_gradients(loss, trainable_vars) grads_pruned, masks = self.__calc_grads_pruned(grads_origin) with tf.control_dependencies(self.update_ops): self.train_op = optimizer.apply_gradients(grads_pruned, global_step=global_step) self.sm_writer.add_graph(tf.get_default_graph()) self.train_init_op = \ tf.initialize_variables(mom_optimizer.variables() + [global_step] + masks) if FLAGS.enbl_multi_gpu: self.bcast_op = mgw.broadcast_global_variables(0) def __calc_grads_pruned(self, grads_origin): """Calculate the pruned gradients Args: * grads_origin: the original gradient Return: * the pruned gradients * the corresponding mask of the pruned gradients """ grads_pruned = [] masks = [] maskable_var_names = {} fake_pruning_dict = {} if self.__is_primary_worker(): fake_pruning_dict = self.pruner.fake_pruning_dict maskable_var_names = { self.pruner.model.get_var_by_op( self.pruner.model.g.get_operation_by_name(op_name)).name: \ op_name for op_name, ratio in fake_pruning_dict.items()} tf.logging.debug('maskable var names {}'.format(maskable_var_names)) if FLAGS.enbl_multi_gpu: fake_pruning_dict = self.mpi_comm.bcast(fake_pruning_dict, root=0) maskable_var_names = self.mpi_comm.bcast(maskable_var_names, root=0) for grad in grads_origin: if grad[1].name not in maskable_var_names.keys(): grads_pruned.append(grad) else: pruned_idxs = fake_pruning_dict[maskable_var_names[grad[1].name]] mask_tensor = np.ones(grad[0].shape) mask_tensor[:, :, [not i for i in pruned_idxs[0]], :] = 0 mask_tensor[:, :, :, [not i for i in pruned_idxs[1]]] = 0 mask_initializer = tf.constant_initializer(mask_tensor) mask = tf.get_variable( grad[1].name.split(':')[0] + '_mask', shape=mask_tensor.shape, initializer=mask_initializer, trainable=False) masks.append(mask) grads_pruned.append((grad[0] * mask, grad[1])) return grads_pruned, masks def __train_pruned_model(self, finetune=False): """Train pruned model""" # Initialize varialbes self.sess_train.run(self.train_init_op) if FLAGS.enbl_multi_gpu: self.sess_train.run(self.bcast_op) ## Fintuning & distilling self.time_prev = timer() nb_iters = int(FLAGS.cp_nb_iters_ft_ratio * self.nb_iters_train) \ if finetune and not FLAGS.cp_retrain else self.nb_iters_train for self.idx_iter in range(nb_iters): # train the model if (self.idx_iter + 1) % FLAGS.summ_step != 0: self.sess_train.run(self.train_op) else: __, summary, log_rslt = self.sess_train.run([self.train_op, self.summary_op, self.log_op]) self.__monitor_progress(summary, log_rslt) # save the model at certain steps if (self.idx_iter + 1) % FLAGS.save_step == 0: #summary, log_rslt = self.sess_train.run([self.summary_op, self.log_op]) #self.__monitor_progress(summary, log_rslt) if self.__is_primary_worker(): self.__save_model() self.evaluate() if FLAGS.enbl_multi_gpu: self.mpi_comm.Barrier() if self.__is_primary_worker(): self.__save_model() self.evaluate() self.__save_in_progress_pruned_model() if FLAGS.enbl_multi_gpu: self.max_save_path = self.mpi_comm.bcast(self.max_save_path, root=0) if self.__is_primary_worker(): with self.pruner.model.g.as_default(): #save_path = tf.train.latest_checkpoint(os.path.dirname(FLAGS.channel_pruned_path)) self.pruner.saver = tf.train.Saver() self.pruner.saver.restore(self.pruner.model.sess, self.max_save_path) #self.pruner.save_model() #self.saver_train.restore(self.sess_train, self.max_save_path) #self.__save_model() def __save_best_pruned_model(self): """ save a in best purned model with a max evaluation result""" best_path = tf.train.Saver().save(self.pruner.model.sess, FLAGS.cp_best_path) tf.logging.info('model saved best model to ' + best_path) def __save_in_progress_pruned_model(self): """ save a in progress training model with a max evaluation result""" self.max_save_path = self.saver_eval.save(self.sess_eval, FLAGS.cp_best_model_path) tf.logging.info('model saved best model to ' + self.max_save_path) def __save_model(self): save_path = self.saver_train.save(self.sess_train, FLAGS.save_path, self.global_step) tf.logging.info('model saved to ' + save_path) def __restore_model(self, is_train): save_path = tf.train.latest_checkpoint(os.path.dirname(FLAGS.save_path)) if is_train: self.saver_train.restore(self.sess_train, save_path) else: self.saver_eval.restore(self.sess_eval, save_path) tf.logging.info('model restored from ' + save_path) def __monitor_progress(self, summary, log_rslt): # early break for non-primary workers if not self.__is_primary_worker(): return # write summaries for TensorBoard visualization self.sm_writer.add_summary(summary, self.idx_iter) # display monitored statistics lrn_rate, loss, accuracy = log_rslt[0], log_rslt[1], log_rslt[2] speed = FLAGS.batch_size * FLAGS.summ_step / (timer() - self.time_prev) if FLAGS.enbl_multi_gpu: speed *= mgw.size() tf.logging.info('iter #%d: lr = %e | loss = %e | speed = %.2f pics / sec' % (self.idx_iter + 1, lrn_rate, loss, speed)) for i in range(len(self.accuracy_keys)): tf.logging.info('{} = {}'.format(self.accuracy_keys[i], accuracy[i])) self.time_prev = timer() def __prune_and_finetune_uniform(self): '''prune with a list of compression ratio''' if self.__is_primary_worker(): done = False self.pruner.extract_features() start = timer() while not done: _, _, done, _ = self.pruner.compress(FLAGS.cp_uniform_preserve_ratio) tf.logging.info('uniform channl pruning time cost: {}s'.format(timer() - start)) self.pruner.save_model() if FLAGS.enbl_multi_gpu: self.mpi_comm.Barrier() self.__finetune_pruned_model(path=FLAGS.cp_channel_pruned_path) def __prune_and_finetune_list(self): '''prune with a list of compression ratio''' try: ratio_list = np.loadtxt(FLAGS.cp_prune_list_file, delimiter=',') ratio_list = list(ratio_list) except IOError as err: tf.logging.error('The prune list file format is not correct. \n \ It\'s content should be a float list delimited by a comma.') raise err ratio_list.reverse() queue = deque(ratio_list) done = False while not done: done = self.__prune_list_layers(queue, [FLAGS.cp_list_group]) def __prune_list_layers(self, queue, ps=None): for p in ps: done = self.__prune_n_layers(p, queue) return done def __prune_n_layers(self, n, queue): #self.max_eval_acc = 0 done = False if self.__is_primary_worker(): self.pruner.extract_features() done = False i = 0 while not done and i < n: if not queue: ratio = 1 else: ratio = queue.pop() _, _, done, _ = self.pruner.compress(ratio) i += 1 self.pruner.save_model() if FLAGS.enbl_multi_gpu: self.mpi_comm.Barrier() done = self.mpi_comm.bcast(done, root=0) if done: self.__finetune_pruned_model(path=FLAGS.cp_channel_pruned_path, finetune=False) else: self.__finetune_pruned_model(path=FLAGS.cp_channel_pruned_path, finetune=FLAGS.cp_finetune) return done def __finetune_pruned_model(self, path=None, finetune=False): if path is None: path = FLAGS.cp_channel_pruned_path start = timer() tf.logging.info('build pruned evaluating model') self.__build_pruned_evaluate_model(path) tf.logging.info('build pruned training model') self.__build_pruned_train_model(path, finetune=finetune) tf.logging.info('training pruned model') self.__train_pruned_model(finetune=finetune) tf.logging.info('fintuning time cost: {}s'.format(timer() - start)) def __prune_and_finetune_auto(self): if self.__is_primary_worker(): self.__prune_rl() self.pruner.initialize_state() if FLAGS.enbl_multi_gpu: self.mpi_comm.Barrier() self.bestinfo = self.mpi_comm.bcast(self.bestinfo, root=0) ratio_list = self.bestinfo[0] tf.logging.info('best split ratio is: {}'.format(ratio_list)) ratio_list.reverse() queue = deque(ratio_list) done = False while not done: done = self.__prune_list_layers(queue, [FLAGS.cp_list_group]) @classmethod def __calc_reward(cls, accuracy, flops): if FLAGS.cp_reward_policy == 'accuracy': reward = accuracy * np.ones((1, 1)) elif FLAGS.cp_reward_policy == 'flops': reward = -np.maximum( FLAGS.cp_noise_tolerance, (1 - accuracy)) * np.log(flops) * np.ones((1, 1)) else: raise ValueError('unrecognized reward type: ' + FLAGS.cp_reward_policy) return reward def __prune_rl(self): # pylint: disable=too-many-locals """ search pruning strategy with reinforcement learning""" tf.logging.info( 'preserve lower bound: {}, preserve ratio: {}, preserve upper bound: {}'.format( self.lbound, FLAGS.cp_preserve_ratio, self.rbound)) config = tf.ConfigProto() config.gpu_options.visible_device_list = str(0) # pylint: disable=no-member buf_size = len(self.pruner.states) * FLAGS.cp_nb_rlouts_min nb_rlouts = FLAGS.cp_nb_rlouts self.agent = DdpgAgent( tf.Session(config=config), len(self.pruner.states.loc[0].tolist()), 1, nb_rlouts, buf_size, self.lbound, self.rbound) self.agent.init() self.bestinfo = None reward_best = np.NINF # pylint: disable=no-member for idx_rlout in range(FLAGS.cp_nb_rlouts): # execute roll-outs to obtain pruning ratios self.agent.init_rlout() states_n_actions = [] self.create_pruner() self.pruner.initialize_state() self.pruner.extract_features() state = np.array(self.pruner.currentStates.loc[0].tolist())[None, :] start = timer() while True: tf.logging.info('state is {}'.format(state)) action = self.agent.sess.run(self.agent.actions_noisy, feed_dict={self.agent.states: state}) tf.logging.info('RL choosed preserv ratio: {}'.format(action)) state_next, acc_flops, done, real_action = self.pruner.compress(action) tf.logging.info('Actural preserv ratio: {}'.format(real_action)) states_n_actions += [(state, real_action * np.ones((1, 1)))] state = state_next[None, :] actor_loss, critic_loss, noise_std = self.agent.train() if done: break tf.logging.info('roll-out #%d: a-loss = %.2e | c-loss = %.2e | noise std. = %.2e' % (idx_rlout, actor_loss, critic_loss, noise_std)) reward = self.__calc_reward(acc_flops[0], acc_flops[1]) rewards = reward * np.ones(len(self.pruner.states)) self.agent.finalize_rlout(rewards) # record transactions for RL training strategy = [] for idx, (state, action) in enumerate(states_n_actions): strategy.append(action[0, 0]) if idx != len(states_n_actions) - 1: terminal = np.zeros((1, 1)) state_next = states_n_actions[idx + 1][0] else: terminal = np.ones((1, 1)) state_next = np.zeros_like(state) self.agent.record(state, action, reward, terminal, state_next) # record the best combination of pruning ratios if reward_best < reward: tf.logging.info('best reward updated: %.4f -> %.4f' % (reward_best, reward)) reward_best = reward self.bestinfo = [strategy, acc_flops[0], acc_flops[1]] tf.logging.info("""The best pruned model occured with strategy: {}, accuracy: {} and pruned ratio: {}""".format(self.bestinfo[0], self.bestinfo[1], self.bestinfo[2])) with self.pruner.model.g.as_default(): self.__save_best_pruned_model() tf.logging.info('automatic channl pruning time cost: {}s'.format(timer() - start)) @classmethod def __is_primary_worker(cls): """Weather it is the primary worker""" return not FLAGS.enbl_multi_gpu or mgw.rank() == 0
[ "learners.channel_pruning.channel_pruner.ChannelPruner", "tensorflow.get_variable", "learners.distillation_helper.DistillationHelper", "numpy.log", "math.log", "numpy.array", "utils.multi_gpu_wrapper.MultiGpuWrapper.DistributedOptimizer", "tensorflow.control_dependencies", "tensorflow.Graph", "ten...
[((1471, 1743), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""cp_prune_option"""', '"""auto"""', '"""the action we want to prune the channel you can select one of the following option:\n uniform:\n prune with a uniform compression ratio\n list:\n prune with a list of compression ratio"""'], {}), '(\'cp_prune_option\', \'auto\',\n """the action we want to prune the channel you can select one of the following option:\n uniform:\n prune with a uniform compression ratio\n list:\n prune with a list of compression ratio"""\n )\n', (1497, 1743), True, 'import tensorflow as tf\n'), ((1743, 1901), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""cp_prune_list_file"""', '"""ratio.list"""', '"""the prune list file which contains the compression ratio of each convolution layers"""'], {}), "('cp_prune_list_file', 'ratio.list',\n 'the prune list file which contains the compression ratio of each convolution layers'\n )\n", (1769, 1901), True, 'import tensorflow as tf\n'), ((1900, 2020), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""cp_best_path"""', '"""./models/best_model.ckpt"""', '"""channel pruned model\'s temporary save path"""'], {}), '(\'cp_best_path\', \'./models/best_model.ckpt\',\n "channel pruned model\'s temporary save path")\n', (1926, 2020), True, 'import tensorflow as tf\n'), ((2025, 2157), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""cp_original_path"""', '"""./models/original_model.ckpt"""', '"""channel pruned model\'s temporary save path"""'], {}), '(\'cp_original_path\',\n \'./models/original_model.ckpt\',\n "channel pruned model\'s temporary save path")\n', (2051, 2157), True, 'import tensorflow as tf\n'), ((2158, 2280), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""cp_preserve_ratio"""', '(0.5)', '"""How much computation cost desired to be preserved after pruning"""'], {}), "('cp_preserve_ratio', 0.5,\n 'How much computation cost desired to be preserved after pruning')\n", (2183, 2280), True, 'import tensorflow as tf\n'), ((2282, 2409), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""cp_uniform_preserve_ratio"""', '(0.6)', '"""How much computation cost desired to be preserved each layer"""'], {}), "('cp_uniform_preserve_ratio', 0.6,\n 'How much computation cost desired to be preserved each layer')\n", (2307, 2409), True, 'import tensorflow as tf\n'), ((2411, 2572), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""cp_noise_tolerance"""', '(0.15)', '"""the noise tolerance which is used to restrict the maximum reward to avoid an unexpected speedup"""'], {}), "('cp_noise_tolerance', 0.15,\n 'the noise tolerance which is used to restrict the maximum reward to avoid an unexpected speedup'\n )\n", (2436, 2572), True, 'import tensorflow as tf\n'), ((2571, 2670), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""cp_lrn_rate_ft"""', '(0.0001)', '"""CP: learning rate for global fine-tuning"""'], {}), "('cp_lrn_rate_ft', 0.0001,\n 'CP: learning rate for global fine-tuning')\n", (2596, 2670), True, 'import tensorflow as tf\n'), ((2665, 2783), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""cp_nb_iters_ft_ratio"""', '(0.2)', '"""CP: the ratio of total iterations for global fine-tuning"""'], {}), "('cp_nb_iters_ft_ratio', 0.2,\n 'CP: the ratio of total iterations for global fine-tuning')\n", (2690, 2783), True, 'import tensorflow as tf\n'), ((2806, 2909), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""cp_finetune"""', '(False)', '"""CP: whether finetuning between each list group"""'], {}), "('cp_finetune', False,\n 'CP: whether finetuning between each list group')\n", (2833, 2909), True, 'import tensorflow as tf\n'), ((2906, 3008), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""cp_retrain"""', '(False)', '"""CP: whether retraining between each list group"""'], {}), "('cp_retrain', False,\n 'CP: whether retraining between each list group')\n", (2933, 3008), True, 'import tensorflow as tf\n'), ((3005, 3102), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""cp_list_group"""', '(1000)', '"""CP: # of iterations for fast evaluation"""'], {}), "('cp_list_group', 1000,\n 'CP: # of iterations for fast evaluation')\n", (3032, 3102), True, 'import tensorflow as tf\n'), ((3099, 3190), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""cp_nb_rlouts"""', '(200)', '"""CP: # of roll-outs for the RL agent"""'], {}), "('cp_nb_rlouts', 200,\n 'CP: # of roll-outs for the RL agent')\n", (3126, 3190), True, 'import tensorflow as tf\n'), ((3187, 3281), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""cp_nb_rlouts_min"""', '(50)', '"""CP: # of roll-outs for the RL agent"""'], {}), "('cp_nb_rlouts_min', 50,\n 'CP: # of roll-outs for the RL agent')\n", (3214, 3281), True, 'import tensorflow as tf\n'), ((5191, 5304), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""cp_channel_pruned_path"""', 'channel_pruned_path', '"""channel pruned model\'s save path"""'], {}), '(\'cp_channel_pruned_path\', channel_pruned_path,\n "channel pruned model\'s save path")\n', (5217, 5304), True, 'import tensorflow as tf\n'), ((5325, 5428), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""cp_best_model_path"""', 'best_model_path', '"""channel best model\'s save path"""'], {}), '(\'cp_best_model_path\', best_model_path,\n "channel best model\'s save path")\n', (5351, 5428), True, 'import tensorflow as tf\n'), ((5788, 5820), 'tensorflow.logging.info', 'tf.logging.info', (['"""Start pruning"""'], {}), "('Start pruning')\n", (5803, 5820), True, 'import tensorflow as tf\n'), ((17243, 17250), 'timeit.default_timer', 'timer', ([], {}), '()\n', (17248, 17250), True, 'from timeit import default_timer as timer\n'), ((18959, 19016), 'tensorflow.logging.info', 'tf.logging.info', (["('model saved best model to ' + best_path)"], {}), "('model saved best model to ' + best_path)\n", (18974, 19016), True, 'import tensorflow as tf\n'), ((19229, 19295), 'tensorflow.logging.info', 'tf.logging.info', (["('model saved best model to ' + self.max_save_path)"], {}), "('model saved best model to ' + self.max_save_path)\n", (19244, 19295), True, 'import tensorflow as tf\n'), ((19417, 19463), 'tensorflow.logging.info', 'tf.logging.info', (["('model saved to ' + save_path)"], {}), "('model saved to ' + save_path)\n", (19432, 19463), True, 'import tensorflow as tf\n'), ((19728, 19779), 'tensorflow.logging.info', 'tf.logging.info', (["('model restored from ' + save_path)"], {}), "('model restored from ' + save_path)\n", (19743, 19779), True, 'import tensorflow as tf\n'), ((20273, 20396), 'tensorflow.logging.info', 'tf.logging.info', (["('iter #%d: lr = %e | loss = %e | speed = %.2f pics / sec' % (self.idx_iter +\n 1, lrn_rate, loss, speed))"], {}), "('iter #%d: lr = %e | loss = %e | speed = %.2f pics / sec' %\n (self.idx_iter + 1, lrn_rate, loss, speed))\n", (20288, 20396), True, 'import tensorflow as tf\n'), ((20555, 20562), 'timeit.default_timer', 'timer', ([], {}), '()\n', (20560, 20562), True, 'from timeit import default_timer as timer\n'), ((21543, 21560), 'collections.deque', 'deque', (['ratio_list'], {}), '(ratio_list)\n', (21548, 21560), False, 'from collections import deque\n'), ((22673, 22680), 'timeit.default_timer', 'timer', ([], {}), '()\n', (22678, 22680), True, 'from timeit import default_timer as timer\n'), ((22685, 22733), 'tensorflow.logging.info', 'tf.logging.info', (['"""build pruned evaluating model"""'], {}), "('build pruned evaluating model')\n", (22700, 22733), True, 'import tensorflow as tf\n'), ((22783, 22829), 'tensorflow.logging.info', 'tf.logging.info', (['"""build pruned training model"""'], {}), "('build pruned training model')\n", (22798, 22829), True, 'import tensorflow as tf\n'), ((22895, 22935), 'tensorflow.logging.info', 'tf.logging.info', (['"""training pruned model"""'], {}), "('training pruned model')\n", (22910, 22935), True, 'import tensorflow as tf\n'), ((23456, 23473), 'collections.deque', 'deque', (['ratio_list'], {}), '(ratio_list)\n', (23461, 23473), False, 'from collections import deque\n'), ((24292, 24308), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (24306, 24308), True, 'import tensorflow as tf\n'), ((3661, 3719), 'learners.distillation_helper.DistillationHelper', 'DistillationHelper', (['sm_writer', 'model_helper', 'self.mpi_comm'], {}), '(sm_writer, model_helper, self.mpi_comm)\n', (3679, 3719), False, 'from learners.distillation_helper import DistillationHelper\n'), ((6209, 6225), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (6223, 6225), True, 'import tensorflow as tf\n'), ((6321, 6346), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (6331, 6346), True, 'import tensorflow as tf\n'), ((6366, 6426), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (["(FLAGS.cp_original_path + '.meta')"], {}), "(FLAGS.cp_original_path + '.meta')\n", (6392, 6426), True, 'import tensorflow as tf\n'), ((7193, 7215), 'learners.channel_pruning.model_wrapper.Model', 'Model', (['self.sess_train'], {}), '(self.sess_train)\n', (7198, 7215), False, 'from learners.channel_pruning.model_wrapper import Model\n'), ((7231, 7439), 'learners.channel_pruning.channel_pruner.ChannelPruner', 'ChannelPruner', (['self.model'], {'images': 'train_images', 'labels': 'train_labels', 'mem_images': 'mem_images', 'mem_labels': 'mem_labels', 'metrics': 'metrics', 'lbound': 'self.lbound', 'summary_op': 'summary_op', 'sm_writer': 'self.sm_writer'}), '(self.model, images=train_images, labels=train_labels,\n mem_images=mem_images, mem_labels=mem_labels, metrics=metrics, lbound=\n self.lbound, summary_op=summary_op, sm_writer=self.sm_writer)\n', (7244, 7439), False, 'from learners.channel_pruning.channel_pruner import ChannelPruner\n'), ((7735, 7751), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (7749, 7751), True, 'import tensorflow as tf\n'), ((8227, 8243), 'numpy.array', 'np.array', (['losses'], {}), '(losses)\n', (8235, 8243), True, 'import numpy as np\n'), ((8996, 9012), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (9010, 9012), True, 'import tensorflow as tf\n'), ((9108, 9133), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (9118, 9133), True, 'import tensorflow as tf\n'), ((10821, 10843), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (10841, 10843), True, 'import tensorflow as tf\n'), ((10850, 10901), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""summary_op"""', 'self.summary_op'], {}), "('summary_op', self.summary_op)\n", (10870, 10901), True, 'import tensorflow as tf\n'), ((10927, 10952), 'tensorflow.train.Saver', 'tf.train.Saver', (['self.vars'], {}), '(self.vars)\n', (10941, 10952), True, 'import tensorflow as tf\n'), ((11313, 11345), 'tensorflow.train.checkpoint_exists', 'tf.train.checkpoint_exists', (['path'], {}), '(path)\n', (11339, 11345), True, 'import tensorflow as tf\n'), ((11410, 11426), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (11424, 11426), True, 'import tensorflow as tf\n'), ((11586, 11611), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (11596, 11611), True, 'import tensorflow as tf\n'), ((11636, 11678), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (["(path + '.meta')"], {}), "(path + '.meta')\n", (11662, 11678), True, 'import tensorflow as tf\n'), ((12039, 12087), 'tensorflow.contrib.graph_editor.reroute_ts', 'graph_editor.reroute_ts', (['eval_images', 'mem_images'], {}), '(eval_images, mem_images)\n', (12062, 12087), False, 'from tensorflow.contrib import graph_editor\n'), ((12094, 12142), 'tensorflow.contrib.graph_editor.reroute_ts', 'graph_editor.reroute_ts', (['eval_labels', 'mem_labels'], {}), '(eval_labels, mem_labels)\n', (12117, 12142), False, 'from tensorflow.contrib import graph_editor\n'), ((12167, 12192), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (12177, 12192), True, 'import tensorflow as tf\n'), ((12730, 12746), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (12744, 12746), True, 'import tensorflow as tf\n'), ((12907, 12932), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (12917, 12932), True, 'import tensorflow as tf\n'), ((12958, 13000), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (["(path + '.meta')"], {}), "(path + '.meta')\n", (12984, 13000), True, 'import tensorflow as tf\n'), ((13363, 13412), 'tensorflow.contrib.graph_editor.reroute_ts', 'graph_editor.reroute_ts', (['train_images', 'mem_images'], {}), '(train_images, mem_images)\n', (13386, 13412), False, 'from tensorflow.contrib import graph_editor\n'), ((13419, 13468), 'tensorflow.contrib.graph_editor.reroute_ts', 'graph_editor.reroute_ts', (['train_labels', 'mem_labels'], {}), '(train_labels, mem_labels)\n', (13442, 13468), False, 'from tensorflow.contrib import graph_editor\n'), ((13494, 13519), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (13504, 13519), True, 'import tensorflow as tf\n'), ((13920, 13951), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (13937, 13951), True, 'import tensorflow as tf\n'), ((14056, 14078), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (14076, 14078), True, 'import tensorflow as tf\n'), ((14100, 14173), 'tensorflow.get_variable', 'tf.get_variable', (['"""global_step"""'], {'shape': '[]', 'dtype': 'tf.int32', 'trainable': '(False)'}), "('global_step', shape=[], dtype=tf.int32, trainable=False)\n", (14115, 14173), True, 'import tensorflow as tf\n'), ((14249, 14317), 'utils.lrn_rate_utils.setup_lrn_rate', 'setup_lrn_rate', (['self.global_step', 'self.model_name', 'self.dataset_name'], {}), '(self.global_step, self.model_name, self.dataset_name)\n', (14263, 14317), False, 'from utils.lrn_rate_utils import setup_lrn_rate\n'), ((19547, 19579), 'os.path.dirname', 'os.path.dirname', (['FLAGS.save_path'], {}), '(FLAGS.save_path)\n', (19562, 19579), False, 'import os\n'), ((20258, 20268), 'utils.multi_gpu_wrapper.MultiGpuWrapper.size', 'mgw.size', ([], {}), '()\n', (20266, 20268), True, 'from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw\n'), ((20761, 20768), 'timeit.default_timer', 'timer', ([], {}), '()\n', (20766, 20768), True, 'from timeit import default_timer as timer\n'), ((21234, 21285), 'numpy.loadtxt', 'np.loadtxt', (['FLAGS.cp_prune_list_file'], {'delimiter': '""","""'}), "(FLAGS.cp_prune_list_file, delimiter=',')\n", (21244, 21285), True, 'import numpy as np\n'), ((24522, 24547), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (24532, 24547), True, 'import tensorflow as tf\n'), ((25127, 25134), 'timeit.default_timer', 'timer', ([], {}), '()\n', (25132, 25134), True, 'from timeit import default_timer as timer\n'), ((25739, 25881), 'tensorflow.logging.info', 'tf.logging.info', (["('roll-out #%d: a-loss = %.2e | c-loss = %.2e | noise std. = %.2e' % (\n idx_rlout, actor_loss, critic_loss, noise_std))"], {}), "(\n 'roll-out #%d: a-loss = %.2e | c-loss = %.2e | noise std. = %.2e' % (\n idx_rlout, actor_loss, critic_loss, noise_std))\n", (25754, 25881), True, 'import tensorflow as tf\n'), ((6575, 6608), 'tensorflow.get_collection', 'tf.get_collection', (['"""train_images"""'], {}), "('train_images')\n", (6592, 6608), True, 'import tensorflow as tf\n'), ((6633, 6666), 'tensorflow.get_collection', 'tf.get_collection', (['"""train_labels"""'], {}), "('train_labels')\n", (6650, 6666), True, 'import tensorflow as tf\n'), ((6689, 6720), 'tensorflow.get_collection', 'tf.get_collection', (['"""mem_images"""'], {}), "('mem_images')\n", (6706, 6720), True, 'import tensorflow as tf\n'), ((6743, 6774), 'tensorflow.get_collection', 'tf.get_collection', (['"""mem_labels"""'], {}), "('mem_labels')\n", (6760, 6774), True, 'import tensorflow as tf\n'), ((6797, 6828), 'tensorflow.get_collection', 'tf.get_collection', (['"""summary_op"""'], {}), "('summary_op')\n", (6814, 6828), True, 'import tensorflow as tf\n'), ((6845, 6870), 'tensorflow.get_collection', 'tf.get_collection', (['"""loss"""'], {}), "('loss')\n", (6862, 6870), True, 'import tensorflow as tf\n'), ((6892, 6921), 'tensorflow.get_collection', 'tf.get_collection', (['"""accuracy"""'], {}), "('accuracy')\n", (6909, 6921), True, 'import tensorflow as tf\n'), ((9174, 9208), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.data_scope'], {}), '(self.data_scope)\n', (9191, 9208), True, 'import tensorflow as tf\n'), ((9565, 9624), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'train_images.dtype', 'shape': 'image_shape'}), '(dtype=train_images.dtype, shape=image_shape)\n', (9579, 9624), True, 'import tensorflow as tf\n'), ((9682, 9741), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'train_labels.dtype', 'shape': 'label_shape'}), '(dtype=train_labels.dtype, shape=label_shape)\n', (9696, 9741), True, 'import tensorflow as tf\n'), ((9787, 9837), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""train_images"""', 'train_images'], {}), "('train_images', train_images)\n", (9807, 9837), True, 'import tensorflow as tf\n'), ((9846, 9896), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""train_labels"""', 'train_labels'], {}), "('train_labels', train_labels)\n", (9866, 9896), True, 'import tensorflow as tf\n'), ((9905, 9953), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""eval_images"""', 'eval_images'], {}), "('eval_images', eval_images)\n", (9925, 9953), True, 'import tensorflow as tf\n'), ((9962, 10010), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""eval_labels"""', 'eval_labels'], {}), "('eval_labels', eval_labels)\n", (9982, 10010), True, 'import tensorflow as tf\n'), ((10019, 10065), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""mem_images"""', 'mem_images'], {}), "('mem_images', mem_images)\n", (10039, 10065), True, 'import tensorflow as tf\n'), ((10074, 10120), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""mem_labels"""', 'mem_labels'], {}), "('mem_labels', mem_labels)\n", (10094, 10120), True, 'import tensorflow as tf\n'), ((10158, 10193), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.model_scope'], {}), '(self.model_scope)\n', (10175, 10193), True, 'import tensorflow as tf\n'), ((10496, 10530), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (10516, 10530), True, 'import tensorflow as tf\n'), ((10539, 10577), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""logits"""', 'logits'], {}), "('logits', logits)\n", (10559, 10577), True, 'import tensorflow as tf\n'), ((10613, 10644), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (10630, 10644), True, 'import tensorflow as tf\n'), ((10974, 11015), 'math.log', 'math.log', (['(FLAGS.cp_preserve_ratio + 1)', '(10)'], {}), '(FLAGS.cp_preserve_ratio + 1, 10)\n', (10982, 11015), False, 'import math\n'), ((11751, 11778), 'tensorflow.get_collection', 'tf.get_collection', (['"""logits"""'], {}), "('logits')\n", (11768, 11778), True, 'import tensorflow as tf\n'), ((11802, 11834), 'tensorflow.get_collection', 'tf.get_collection', (['"""eval_images"""'], {}), "('eval_images')\n", (11819, 11834), True, 'import tensorflow as tf\n'), ((11858, 11890), 'tensorflow.get_collection', 'tf.get_collection', (['"""eval_labels"""'], {}), "('eval_labels')\n", (11875, 11890), True, 'import tensorflow as tf\n'), ((11913, 11944), 'tensorflow.get_collection', 'tf.get_collection', (['"""mem_images"""'], {}), "('mem_images')\n", (11930, 11944), True, 'import tensorflow as tf\n'), ((11967, 11998), 'tensorflow.get_collection', 'tf.get_collection', (['"""mem_labels"""'], {}), "('mem_labels')\n", (11984, 11998), True, 'import tensorflow as tf\n'), ((13070, 13097), 'tensorflow.get_collection', 'tf.get_collection', (['"""logits"""'], {}), "('logits')\n", (13087, 13097), True, 'import tensorflow as tf\n'), ((13122, 13155), 'tensorflow.get_collection', 'tf.get_collection', (['"""train_images"""'], {}), "('train_images')\n", (13139, 13155), True, 'import tensorflow as tf\n'), ((13180, 13213), 'tensorflow.get_collection', 'tf.get_collection', (['"""train_labels"""'], {}), "('train_labels')\n", (13197, 13213), True, 'import tensorflow as tf\n'), ((13236, 13267), 'tensorflow.get_collection', 'tf.get_collection', (['"""mem_images"""'], {}), "('mem_images')\n", (13253, 13267), True, 'import tensorflow as tf\n'), ((13290, 13321), 'tensorflow.get_collection', 'tf.get_collection', (['"""mem_labels"""'], {}), "('mem_labels')\n", (13307, 13321), True, 'import tensorflow as tf\n'), ((13994, 14031), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['key', 'accuracy[key]'], {}), '(key, accuracy[key])\n', (14011, 14031), True, 'import tensorflow as tf\n'), ((14396, 14440), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['FLAGS.cp_lrn_rate_ft'], {}), '(FLAGS.cp_lrn_rate_ft)\n', (14418, 14440), True, 'import tensorflow as tf\n'), ((14566, 14618), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['lrn_rate', 'FLAGS.momentum'], {}), '(lrn_rate, FLAGS.momentum)\n', (14592, 14618), True, 'import tensorflow as tf\n'), ((14735, 14774), 'utils.multi_gpu_wrapper.MultiGpuWrapper.DistributedOptimizer', 'mgw.DistributedOptimizer', (['mom_optimizer'], {}), '(mom_optimizer)\n', (14759, 14774), True, 'from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw\n'), ((14972, 15012), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['self.update_ops'], {}), '(self.update_ops)\n', (14995, 15012), True, 'import tensorflow as tf\n'), ((15135, 15157), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (15155, 15157), True, 'import tensorflow as tf\n'), ((15327, 15360), 'utils.multi_gpu_wrapper.MultiGpuWrapper.broadcast_global_variables', 'mgw.broadcast_global_variables', (['(0)'], {}), '(0)\n', (15357, 15360), True, 'from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw\n'), ((16474, 16496), 'numpy.ones', 'np.ones', (['grad[0].shape'], {}), '(grad[0].shape)\n', (16481, 16496), True, 'import numpy as np\n'), ((16656, 16692), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['mask_tensor'], {}), '(mask_tensor)\n', (16679, 16692), True, 'import tensorflow as tf\n'), ((18540, 18556), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (18554, 18556), True, 'import tensorflow as tf\n'), ((18889, 18905), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (18903, 18905), True, 'import tensorflow as tf\n'), ((20188, 20195), 'timeit.default_timer', 'timer', ([], {}), '()\n', (20193, 20195), True, 'from timeit import default_timer as timer\n'), ((21355, 21499), 'tensorflow.logging.error', 'tf.logging.error', (['"""The prune list file format is not correct. \n It\'s content should be a float list delimited by a comma."""'], {}), '(\n """The prune list file format is not correct. \n It\'s content should be a float list delimited by a comma."""\n )\n', (21371, 21499), True, 'import tensorflow as tf\n'), ((23710, 23725), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (23717, 23725), True, 'import numpy as np\n'), ((26618, 26694), 'tensorflow.logging.info', 'tf.logging.info', (["('best reward updated: %.4f -> %.4f' % (reward_best, reward))"], {}), "('best reward updated: %.4f -> %.4f' % (reward_best, reward))\n", (26633, 26694), True, 'import tensorflow as tf\n'), ((27320, 27330), 'utils.multi_gpu_wrapper.MultiGpuWrapper.rank', 'mgw.rank', ([], {}), '()\n', (27328, 27330), True, 'from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw\n'), ((6169, 6179), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (6177, 6179), True, 'import tensorflow as tf\n'), ((7148, 7170), 'tensorflow.get_collection', 'tf.get_collection', (['key'], {}), '(key)\n', (7165, 7170), True, 'import tensorflow as tf\n'), ((8365, 8388), 'numpy.array', 'np.array', (['accuracies[i]'], {}), '(accuracies[i])\n', (8373, 8388), True, 'import numpy as np\n'), ((8906, 8916), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (8914, 8916), True, 'import tensorflow as tf\n'), ((10447, 10487), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['key', 'accuracy[key]'], {}), '(key, accuracy[key])\n', (10467, 10487), True, 'import tensorflow as tf\n'), ((10691, 10728), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['key', 'accuracy[key]'], {}), '(key, accuracy[key])\n', (10708, 10728), True, 'import tensorflow as tf\n'), ((11370, 11380), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (11378, 11380), True, 'import tensorflow as tf\n'), ((11514, 11530), 'utils.multi_gpu_wrapper.MultiGpuWrapper.local_rank', 'mgw.local_rank', ([], {}), '()\n', (11528, 11530), True, 'from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw\n'), ((12690, 12700), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (12698, 12700), True, 'import tensorflow as tf\n'), ((12834, 12850), 'utils.multi_gpu_wrapper.MultiGpuWrapper.local_rank', 'mgw.local_rank', ([], {}), '()\n', (12848, 12850), True, 'from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw\n'), ((14464, 14497), 'tensorflow.constant', 'tf.constant', (['FLAGS.cp_lrn_rate_ft'], {}), '(FLAGS.cp_lrn_rate_ft)\n', (14475, 14497), True, 'import tensorflow as tf\n'), ((23039, 23046), 'timeit.default_timer', 'timer', ([], {}), '()\n', (23044, 23046), True, 'from timeit import default_timer as timer\n'), ((23866, 23881), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (23873, 23881), True, 'import numpy as np\n'), ((26289, 26305), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (26297, 26305), True, 'import numpy as np\n'), ((26393, 26408), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (26400, 26408), True, 'import numpy as np\n'), ((26432, 26452), 'numpy.zeros_like', 'np.zeros_like', (['state'], {}), '(state)\n', (26445, 26452), True, 'import numpy as np\n'), ((4760, 4790), 'pathlib.Path', 'pathlib.Path', (['self.parent_path'], {}), '(self.parent_path)\n', (4772, 4790), False, 'import pathlib\n'), ((20939, 20946), 'timeit.default_timer', 'timer', ([], {}), '()\n', (20944, 20946), True, 'from timeit import default_timer as timer\n'), ((23850, 23863), 'numpy.log', 'np.log', (['flops'], {}), '(flops)\n', (23856, 23863), True, 'import numpy as np\n'), ((27171, 27178), 'timeit.default_timer', 'timer', ([], {}), '()\n', (27176, 27178), True, 'from timeit import default_timer as timer\n'), ((4679, 4732), 'random.choice', 'random.choice', (['(string.ascii_uppercase + string.digits)'], {}), '(string.ascii_uppercase + string.digits)\n', (4692, 4732), False, 'import random\n'), ((23786, 23836), 'numpy.maximum', 'np.maximum', (['FLAGS.cp_noise_tolerance', '(1 - accuracy)'], {}), '(FLAGS.cp_noise_tolerance, 1 - accuracy)\n', (23796, 23836), True, 'import numpy as np\n'), ((25582, 25597), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (25589, 25597), True, 'import numpy as np\n')]
import numpy as np import os from train import parseArgs FLAGS = parseArgs() model_dir = FLAGS.model_dir from matplotlib import pyplot as plt import matplotlib font = {'size' : 8} matplotlib.rc('font', **font) fig = plt.figure() ax = fig.add_subplot(211) ax.set_title("Actor Loss") ax.set_xlabel("Train Steps") ax2 = fig.add_subplot(212) ax2.set_title("Critic Loss") ax2.set_xlabel("Train Steps") filename = os.path.join(model_dir, "train_stats.npy") stats = np.load(filename) #stats[2] = stats[2] + np.abs(np.min(stats[2])) total_actor_losses = stats[0] total_critic_losses = stats[1] rewards = stats[2]#/np.max(stats[2]) steps = stats[3] ax.plot(np.arange(len(total_actor_losses)), np.array(total_actor_losses)) ax2.plot(np.arange(len(total_critic_losses)), np.array(total_critic_losses)) fig2 = plt.figure() ax3 = fig2.add_subplot(111) ax3.set_title("Rewards per Episode") ax3.set_xlabel("Episodes") ax3.plot(np.arange(len(rewards)), np.array(rewards)) plt.show() fig.savefig(os.path.join(model_dir, "train_errors.png")) fig2.savefig(os.path.join(model_dir, "rewards.png"))
[ "os.path.join", "numpy.array", "matplotlib.pyplot.figure", "matplotlib.rc", "train.parseArgs", "numpy.load", "matplotlib.pyplot.show" ]
[((65, 76), 'train.parseArgs', 'parseArgs', ([], {}), '()\n', (74, 76), False, 'from train import parseArgs\n'), ((183, 212), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (196, 212), False, 'import matplotlib\n'), ((221, 233), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (231, 233), True, 'from matplotlib import pyplot as plt\n'), ((414, 456), 'os.path.join', 'os.path.join', (['model_dir', '"""train_stats.npy"""'], {}), "(model_dir, 'train_stats.npy')\n", (426, 456), False, 'import os\n'), ((465, 482), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (472, 482), True, 'import numpy as np\n'), ((808, 820), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (818, 820), True, 'from matplotlib import pyplot as plt\n'), ((967, 977), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (975, 977), True, 'from matplotlib import pyplot as plt\n'), ((692, 720), 'numpy.array', 'np.array', (['total_actor_losses'], {}), '(total_actor_losses)\n', (700, 720), True, 'import numpy as np\n'), ((768, 797), 'numpy.array', 'np.array', (['total_critic_losses'], {}), '(total_critic_losses)\n', (776, 797), True, 'import numpy as np\n'), ((947, 964), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (955, 964), True, 'import numpy as np\n'), ((991, 1034), 'os.path.join', 'os.path.join', (['model_dir', '"""train_errors.png"""'], {}), "(model_dir, 'train_errors.png')\n", (1003, 1034), False, 'import os\n'), ((1049, 1087), 'os.path.join', 'os.path.join', (['model_dir', '"""rewards.png"""'], {}), "(model_dir, 'rewards.png')\n", (1061, 1087), False, 'import os\n')]
import numpy as np import gym from envs.utils import goal_distance, goal_distance_obs from utils.os_utils import remove_color class CustomGoalEnv(): def __init__(self, args): self.args = args self.env = gym.make(args.env) self.np_random = self.env.env.np_random self.distance_threshold = self.env.env.distance_threshold self.action_space = self.env.action_space self.observation_space = self.env.observation_space self.max_episode_steps = self.env._max_episode_steps self.fixed_obj = False self.has_object = self.env.env.has_object self.obj_range = self.env.env.obj_range # self.target_range = self.env.env.target_range self.target_offset = self.env.env.target_offset self.target_in_the_air = self.env.env.target_in_the_air if self.has_object: self.height_offset = self.env.env.height_offset self.render = self.env.render self.get_obs = self.env.env._get_obs self.reset_sim = self.env.env._reset_sim self.reset_ep() self.env_info = { 'Rewards': self.process_info_rewards, # episode cumulative rewards 'Distance': self.process_info_distance, # distance in the last step 'Success@green': self.process_info_success # is_success in the last step } self.env.reset() self.fixed_obj = True def compute_reward(self, achieved, goal): # achieved is a tuple of two goals return self.env.env.compute_reward(achieved[0], goal, None) # Original # dis = goal_distance(achieved[0], goal) # return -1.0 if dis > self.distance_threshold else 0.0 def compute_distance(self, achieved, goal): return np.sqrt(np.sum(np.square(achieved - goal))) def process_info_rewards(self, obs, reward, info): self.rewards += reward return self.rewards def process_info_distance(self, obs, reward, info): return self.compute_distance(obs['achieved_goal'], obs['desired_goal']) def process_info_success(self, obs, reward, info): return info['is_success'] def process_info(self, obs, reward, info): return { remove_color(key): value_func(obs, reward, info) for key, value_func in self.env_info.items() } def step(self, action): # imaginary infinity horizon (without done signal) obs, reward, done, info = self.env.step(action) info = self.process_info(obs, reward, info) reward = self.compute_reward((obs['achieved_goal'], self.last_obs['achieved_goal']), obs['desired_goal']) # TODO: why the heck second argument if it is then ignored?? self.last_obs = obs.copy() return obs, reward, False, info def reset_ep(self): self.rewards = 0.0 def reset(self): self.reset_ep() self.last_obs = (self.env.reset()).copy() return self.last_obs.copy() @property def sim(self): return self.env.env.sim @sim.setter def sim(self, new_sim): self.env.env.sim = new_sim @property def initial_state(self): return self.env.env.initial_state @property def initial_gripper_xpos(self): return self.env.env.initial_gripper_xpos.copy() @property def goal(self): return self.env.env.goal.copy() @goal.setter def goal(self, value): self.env.env.goal = value.copy() def generate_goal(self): """ if self.has_object: goal = self.initial_gripper_xpos[:3] + self.target_offset if self.args.env == 'FetchSlide-v1': goal[0] += self.target_range * 0.5 goal[1] += np.random.uniform(-self.target_range, self.target_range) * 0.5 else: goal[0] += np.random.uniform(-self.target_range, self.target_range) goal[1] += self.target_range # goal[1] += np.random.uniform(-self.target_range, self.target_range) # TODO: changed goal[2] = self.height_offset + int(self.target_in_the_air) * 0.45 else: goal = self.initial_gripper_xpos[:3] + np.array( [np.random.uniform(-self.target_range, self.target_range), self.target_range, self.target_range]) return goal.copy() """ return self.env.env._sample_goal() def reset(self): self.reset_ep() self.env.env._reset_sim() """ self.sim.set_state(self.initial_state) if self.has_object: object_xpos = self.initial_gripper_xpos[:2].copy() random_offset = np.random.uniform(-1, 1) * self.obj_range * self.args.init_offset if self.args.env == 'FetchSlide-v1': object_xpos -= np.array([self.obj_range * 0.5, random_offset]) else: object_xpos -= np.array([random_offset, self.obj_range]) object_qpos = self.sim.data.get_joint_qpos('object0:joint') assert object_qpos.shape == (7,) object_qpos[:2] = object_xpos self.sim.data.set_joint_qpos('object0:joint', object_qpos) self.sim.forward() """ self.goal = self.generate_goal() self.last_obs = (self.get_obs()).copy() return self.get_obs() def generate_goal(self): return self.env.env._sample_goal()
[ "utils.os_utils.remove_color", "gym.make", "numpy.square" ]
[((225, 243), 'gym.make', 'gym.make', (['args.env'], {}), '(args.env)\n', (233, 243), False, 'import gym\n'), ((2223, 2240), 'utils.os_utils.remove_color', 'remove_color', (['key'], {}), '(key)\n', (2235, 2240), False, 'from utils.os_utils import remove_color\n'), ((1775, 1801), 'numpy.square', 'np.square', (['(achieved - goal)'], {}), '(achieved - goal)\n', (1784, 1801), True, 'import numpy as np\n')]
import numpy from numpy.testing import assert_raises, assert_equal, assert_allclose from fuel.datasets import Iris from tests import skip_if_not_available def test_iris_all(): skip_if_not_available(datasets=['iris.hdf5']) dataset = Iris(('all',), load_in_memory=False) handle = dataset.open() data, labels = dataset.get_data(handle, slice(0, 10)) assert data.dtype == 'float32' assert data.shape == (10, 4) assert labels.shape == (10, 1) known = numpy.array([5.1, 3.5, 1.4, 0.2]) assert_allclose(data[0], known) assert labels[0][0] == 0 assert dataset.num_examples == 150 dataset.close(handle) def test_iris_axes(): skip_if_not_available(datasets=['iris.hdf5']) dataset = Iris(('all',), load_in_memory=False) assert_equal(dataset.axis_labels['features'], ('batch', 'feature')) def test_iris_invalid_split(): skip_if_not_available(datasets=['iris.hdf5']) assert_raises(ValueError, Iris, ('dummy',))
[ "numpy.testing.assert_equal", "fuel.datasets.Iris", "numpy.testing.assert_allclose", "numpy.testing.assert_raises", "numpy.array", "tests.skip_if_not_available" ]
[((184, 229), 'tests.skip_if_not_available', 'skip_if_not_available', ([], {'datasets': "['iris.hdf5']"}), "(datasets=['iris.hdf5'])\n", (205, 229), False, 'from tests import skip_if_not_available\n'), ((245, 281), 'fuel.datasets.Iris', 'Iris', (["('all',)"], {'load_in_memory': '(False)'}), "(('all',), load_in_memory=False)\n", (249, 281), False, 'from fuel.datasets import Iris\n'), ((483, 516), 'numpy.array', 'numpy.array', (['[5.1, 3.5, 1.4, 0.2]'], {}), '([5.1, 3.5, 1.4, 0.2])\n', (494, 516), False, 'import numpy\n'), ((521, 552), 'numpy.testing.assert_allclose', 'assert_allclose', (['data[0]', 'known'], {}), '(data[0], known)\n', (536, 552), False, 'from numpy.testing import assert_raises, assert_equal, assert_allclose\n'), ((675, 720), 'tests.skip_if_not_available', 'skip_if_not_available', ([], {'datasets': "['iris.hdf5']"}), "(datasets=['iris.hdf5'])\n", (696, 720), False, 'from tests import skip_if_not_available\n'), ((736, 772), 'fuel.datasets.Iris', 'Iris', (["('all',)"], {'load_in_memory': '(False)'}), "(('all',), load_in_memory=False)\n", (740, 772), False, 'from fuel.datasets import Iris\n'), ((777, 844), 'numpy.testing.assert_equal', 'assert_equal', (["dataset.axis_labels['features']", "('batch', 'feature')"], {}), "(dataset.axis_labels['features'], ('batch', 'feature'))\n", (789, 844), False, 'from numpy.testing import assert_raises, assert_equal, assert_allclose\n'), ((899, 944), 'tests.skip_if_not_available', 'skip_if_not_available', ([], {'datasets': "['iris.hdf5']"}), "(datasets=['iris.hdf5'])\n", (920, 944), False, 'from tests import skip_if_not_available\n'), ((950, 993), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'Iris', "('dummy',)"], {}), "(ValueError, Iris, ('dummy',))\n", (963, 993), False, 'from numpy.testing import assert_raises, assert_equal, assert_allclose\n')]
# coding: utf-8 import base64 from keras import models import tensorflow as tf import os import cv2 import numpy as np import scipy.fftpack graph = tf.get_default_graph() PATH = lambda p: os.path.abspath( os.path.join(os.path.dirname(__file__), p) ) TEXT_MODEL = "" IMG_MODEL = "" def pretreatment_get_text(img, offset=0): # 得到图像中的文本部分 return img[3:22, 120 + offset:177 + offset] def phash(im): im = cv2.resize(im, (32, 32), interpolation=cv2.INTER_CUBIC) im = scipy.fftpack.dct(scipy.fftpack.dct(im, axis=0), axis=1) im = im[:8, :8] med = np.median(im) im = im > med im = np.packbits(im) return im def _get_imgs(img): interval = 5 length = 67 for x in range(40, img.shape[0] - length, interval + length): for y in range(interval, img.shape[1] - length, interval + length): yield img[x:x + length, y:y + length] def get_imgs(img): imgs = [] for img in _get_imgs(img): imgs.append(phash(img)) return imgs def get_text(img, offset=0): text = pretreatment_get_text(img, offset) text = cv2.cvtColor(text, cv2.COLOR_BGR2GRAY) text = text / 255.0 h, w = text.shape text.shape = (1, h, w, 1) return text def base64_to_image(base64_code): # base64解码 img_data = base64.b64decode(base64_code) # 转换为np数组 img_array = np.fromstring(img_data, np.uint8) # 转换成opencv可用格式 img = cv2.imdecode(img_array, cv2.COLOR_RGB2BGR) return img def preprocess_input(x): x = x.astype('float32') # 我是用cv2来读取的图片,其已经是BGR格式了 mean = [103.939, 116.779, 123.68] x -= mean return x def code_xy(Ofset=None, is_raw_input=True): """ 获取验证码 :return: str """ if is_raw_input: print(u""" ***************** | 1 | 2 | 3 | 4 | ***************** | 5 | 6 | 7 | 8 | ***************** """) print(u"验证码分为8个,对应上面数字,例如第一和第二张,输入1, 2 如果开启cdn查询的话,会冲掉提示,直接鼠标点击命令行获取焦点,输入即可,不要输入空格") print(u"如果是linux无图形界面,请使用自动打码,is_auto_code: True") print(u"如果没有弹出验证码,请手动双击根目录下的tkcode.png文件") Ofset = input(u"输入对应的验证码: ") if isinstance(Ofset, list): select = Ofset else: Ofset = Ofset.replace(",", ",") select = Ofset.split(',') post = [] offsetsX = 0 # 选择的答案的left值,通过浏览器点击8个小图的中点得到的,这样基本没问题 offsetsY = 0 # 选择的答案的top值 for ofset in select: if ofset == '1': offsetsY = 77 offsetsX = 40 elif ofset == '2': offsetsY = 77 offsetsX = 112 elif ofset == '3': offsetsY = 77 offsetsX = 184 elif ofset == '4': offsetsY = 77 offsetsX = 256 elif ofset == '5': offsetsY = 149 offsetsX = 40 elif ofset == '6': offsetsY = 149 offsetsX = 112 elif ofset == '7': offsetsY = 149 offsetsX = 184 elif ofset == '8': offsetsY = 149 offsetsX = 256 else: pass post.append(offsetsX) post.append(offsetsY) randCode = str(post).replace(']', '').replace('[', '').replace("'", '').replace(' ', '') print(u"验证码识别坐标为{0}".format(randCode)) return randCode class Verify: def __init__(self): self.textModel = "" self.imgModel = "" self.loadImgModel() self.loadTextModel() def loadTextModel(self): if not self.textModel: self.textModel = models.load_model(PATH('model.v2.0.h5')) else: print("无需加载模型model.v2.0.h5") def loadImgModel(self): if not self.imgModel: self.imgModel = models.load_model(PATH('12306.image.model.h5')) def verify(self, fn): verify_titles = ['打字机', '调色板', '跑步机', '毛线', '老虎', '安全帽', '沙包', '盘子', '本子', '药片', '双面胶', '龙舟', '红酒', '拖把', '卷尺', '海苔', '红豆', '黑板', '热水袋', '烛台', '钟表', '路灯', '沙拉', '海报', '公交卡', '樱桃', '创可贴', '牌坊', '苍蝇拍', '高压锅', '电线', '网球拍', '海鸥', '风铃', '订书机', '冰箱', '话梅', '排风机', '锅铲', '绿豆', '航母', '电子秤', '红枣', '金字塔', '鞭炮', '菠萝', '开瓶器', '电饭煲', '仪表盘', '棉棒', '篮球', '狮子', '蚂蚁', '蜡烛', '茶盅', '印章', '茶几', '啤酒', '档案袋', '挂钟', '刺绣', '铃铛', '护腕', '手掌印', '锦旗', '文具盒', '辣椒酱', '耳塞', '中国结', '蜥蜴', '剪纸', '漏斗', '锣', '蒸笼', '珊瑚', '雨靴', '薯条', '蜜蜂', '日历', '口哨'] # 读取并预处理验证码 img = base64_to_image(fn) text = get_text(img) imgs = np.array(list(_get_imgs(img))) imgs = preprocess_input(imgs) text_list = [] # 识别文字 self.loadTextModel() global graph with graph.as_default(): label = self.textModel.predict(text) label = label.argmax() text = verify_titles[label] text_list.append(text) # 获取下一个词 # 根据第一个词的长度来定位第二个词的位置 if len(text) == 1: offset = 27 elif len(text) == 2: offset = 47 else: offset = 60 text = get_text(img, offset=offset) if text.mean() < 0.95: with graph.as_default(): label = self.textModel.predict(text) label = label.argmax() text = verify_titles[label] text_list.append(text) print("题目为{}".format(text_list)) # 加载图片分类器 self.loadImgModel() with graph.as_default(): labels = self.imgModel.predict(imgs) labels = labels.argmax(axis=1) results = [] for pos, label in enumerate(labels): l = verify_titles[label] print(pos + 1, l) if l in text_list: results.append(str(pos + 1)) return results
[ "numpy.packbits", "numpy.median", "base64.b64decode", "os.path.dirname", "cv2.imdecode", "cv2.cvtColor", "cv2.resize", "numpy.fromstring", "tensorflow.get_default_graph" ]
[((150, 172), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (170, 172), True, 'import tensorflow as tf\n'), ((423, 478), 'cv2.resize', 'cv2.resize', (['im', '(32, 32)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(im, (32, 32), interpolation=cv2.INTER_CUBIC)\n', (433, 478), False, 'import cv2\n'), ((575, 588), 'numpy.median', 'np.median', (['im'], {}), '(im)\n', (584, 588), True, 'import numpy as np\n'), ((616, 631), 'numpy.packbits', 'np.packbits', (['im'], {}), '(im)\n', (627, 631), True, 'import numpy as np\n'), ((1095, 1133), 'cv2.cvtColor', 'cv2.cvtColor', (['text', 'cv2.COLOR_BGR2GRAY'], {}), '(text, cv2.COLOR_BGR2GRAY)\n', (1107, 1133), False, 'import cv2\n'), ((1292, 1321), 'base64.b64decode', 'base64.b64decode', (['base64_code'], {}), '(base64_code)\n', (1308, 1321), False, 'import base64\n'), ((1352, 1385), 'numpy.fromstring', 'np.fromstring', (['img_data', 'np.uint8'], {}), '(img_data, np.uint8)\n', (1365, 1385), True, 'import numpy as np\n'), ((1416, 1458), 'cv2.imdecode', 'cv2.imdecode', (['img_array', 'cv2.COLOR_RGB2BGR'], {}), '(img_array, cv2.COLOR_RGB2BGR)\n', (1428, 1458), False, 'import cv2\n'), ((224, 249), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (239, 249), False, 'import os\n')]
################################################################################ # Numba-DPPY # # Copyright 2020-2021 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ import dpctl import numpy as np import pytest from numba import njit import numba_dppy as dppy from numba_dppy.tests._helper import ( dpnp_debug, filter_strings_with_skips_for_opencl, skip_no_dpnp, ) from ._helper import wrapper_function pytestmark = skip_no_dpnp list_of_dtypes = [ np.int32, np.int64, np.float32, np.float64, ] @pytest.fixture(params=list_of_dtypes) def input_arrays(request): # The size of input and out arrays to be used N = 100 a = np.array(np.random.random(N), request.param) b = np.array(np.random.random(N), request.param) return a, b list_of_shape = [ (100), (50, 2), (10, 5, 2), ] @pytest.fixture(params=list_of_shape) def get_shape(request): return request.param list_of_unary_ops = [ "max", "amax", "min", "amin", "median", "mean", "cov", ] @pytest.fixture(params=list_of_unary_ops) def unary_op(request): return ( wrapper_function("a", f"np.{request.param}(a)", globals()), request.param, ) @pytest.mark.parametrize("filter_str", filter_strings_with_skips_for_opencl) def test_unary_ops(filter_str, unary_op, input_arrays, get_shape, capfd): a = input_arrays[0] op, name = unary_op if name != "cov": a = np.reshape(a, get_shape) actual = np.empty(shape=a.shape, dtype=a.dtype) expected = np.empty(shape=a.shape, dtype=a.dtype) f = njit(op) device = dpctl.SyclDevice(filter_str) with dpctl.device_context(device), dpnp_debug(): actual = f(a) captured = capfd.readouterr() assert "dpnp implementation" in captured.out expected = op(a) np.testing.assert_allclose(actual, expected, rtol=1e-3, atol=0)
[ "numpy.reshape", "numpy.random.random", "numpy.testing.assert_allclose", "numba.njit", "dpctl.SyclDevice", "numba_dppy.tests._helper.dpnp_debug", "pytest.mark.parametrize", "dpctl.device_context", "numpy.empty", "pytest.fixture" ]
[((1159, 1196), 'pytest.fixture', 'pytest.fixture', ([], {'params': 'list_of_dtypes'}), '(params=list_of_dtypes)\n', (1173, 1196), False, 'import pytest\n'), ((1473, 1509), 'pytest.fixture', 'pytest.fixture', ([], {'params': 'list_of_shape'}), '(params=list_of_shape)\n', (1487, 1509), False, 'import pytest\n'), ((1671, 1711), 'pytest.fixture', 'pytest.fixture', ([], {'params': 'list_of_unary_ops'}), '(params=list_of_unary_ops)\n', (1685, 1711), False, 'import pytest\n'), ((1848, 1923), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""filter_str"""', 'filter_strings_with_skips_for_opencl'], {}), "('filter_str', filter_strings_with_skips_for_opencl)\n", (1871, 1923), False, 'import pytest\n'), ((2119, 2157), 'numpy.empty', 'np.empty', ([], {'shape': 'a.shape', 'dtype': 'a.dtype'}), '(shape=a.shape, dtype=a.dtype)\n', (2127, 2157), True, 'import numpy as np\n'), ((2173, 2211), 'numpy.empty', 'np.empty', ([], {'shape': 'a.shape', 'dtype': 'a.dtype'}), '(shape=a.shape, dtype=a.dtype)\n', (2181, 2211), True, 'import numpy as np\n'), ((2221, 2229), 'numba.njit', 'njit', (['op'], {}), '(op)\n', (2225, 2229), False, 'from numba import njit\n'), ((2243, 2271), 'dpctl.SyclDevice', 'dpctl.SyclDevice', (['filter_str'], {}), '(filter_str)\n', (2259, 2271), False, 'import dpctl\n'), ((2464, 2528), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['actual', 'expected'], {'rtol': '(0.001)', 'atol': '(0)'}), '(actual, expected, rtol=0.001, atol=0)\n', (2490, 2528), True, 'import numpy as np\n'), ((1303, 1322), 'numpy.random.random', 'np.random.random', (['N'], {}), '(N)\n', (1319, 1322), True, 'import numpy as np\n'), ((1356, 1375), 'numpy.random.random', 'np.random.random', (['N'], {}), '(N)\n', (1372, 1375), True, 'import numpy as np\n'), ((2080, 2104), 'numpy.reshape', 'np.reshape', (['a', 'get_shape'], {}), '(a, get_shape)\n', (2090, 2104), True, 'import numpy as np\n'), ((2281, 2309), 'dpctl.device_context', 'dpctl.device_context', (['device'], {}), '(device)\n', (2301, 2309), False, 'import dpctl\n'), ((2311, 2323), 'numba_dppy.tests._helper.dpnp_debug', 'dpnp_debug', ([], {}), '()\n', (2321, 2323), False, 'from numba_dppy.tests._helper import dpnp_debug, filter_strings_with_skips_for_opencl, skip_no_dpnp\n')]
import numpy as np from skimage import morphology import hy # setup tensorflow import os os.environ["CUDA_VISIBLE_DEVICES"]="" import tensorflow as tf print(f'tensorflow version = {tf.__version__}') tf_device = '/cpu:0' setup = { # based on 3_compare_re_nn.py 'in_size': 64, 'undersample_target': False, # Number of cycles and AD iterations 'n_cycles': 1, 'n_iters': 1000, 'learn_rate': 1e-2, # Shrink wrap variables (Gaussian smoothing) 'dilate_support': 0, # NN as initial shrink wrap support 'isig': 1, # Sigma of G for first support update 'fsig': 1, # Sigma for final support update 'severy': 50, # Update support every how many iterations? 'sfrac': 0.15, # Fraction of maximum intensity to use for shrink wrap boundary } if setup['undersample_target']: typ = 'downsampled' else: typ = 'crop' setup['target'] = f"ft_{typ}_{setup['in_size']}.npy" # Shrink wrap support with increasing iterations # by scaling guassian smoothening def update_support(obj:np.ndarray, i:int, setup:dict): # Only shrink support during the first cycle ntot = setup['n_iters']/setup['n_cycles'] # Calculate initial and final update points xo, xf = 1., ntot/setup['severy'] yo, yf = setup['isig'], float(setup['fsig']) i /= float(setup['severy']) #Count in number of updates if xf == 1: # 0 division of only 1 update done sig = (setup['isig']+setup['fsig'])/2. else: # Linearly scale sigma between initial and final values sig = yo+(i-xo)*((yf-yo)/(xf-xo)) #if not i%setup['severy']: print ("%d Real space sigma"%i, sig) rimage = np.abs(obj) smooth = np.abs(hy.gauss_conv_fft(rimage,[sig,sig,sig])) smooth /= smooth.max() supp = (smooth>=setup['sfrac'])*1 # Threshold as fraction of max #xyz_save(supp,'visuals/supp%d.xyz' %i) return supp # Simple Reconstruction: tf.compat.v1.disable_eager_execution() def reconstruct( target_diffraction: np.ndarray, initial_guess: np.ndarray, initial_support: np.ndarray, setup: dict, ): guess0 = np.copy(initial_guess) support0 = np.copy(initial_support) tf.compat.v1.reset_default_graph() with tf.device(tf_device): tf_diffs = tf.constant(target_diffraction, dtype='float32') tf_obj_real = tf.Variable(np.real(guess0), dtype='float32') tf_obj_imag = tf.Variable(np.imag(guess0), dtype='float32') tf_obj = tf.complex(tf_obj_real, tf_obj_imag) tf_support = tf.compat.v1.placeholder(tf.float32, shape=support0.shape) tf_support = tf.complex(tf_support, tf.zeros_like(tf_support)) tf_obj *= tf_support # Finally the loss function exitwave = tf.abs(tf.signal.fft3d(tf_obj)) exitwave /= tf.reduce_max(exitwave) loss = tf.reduce_sum((exitwave - tf_diffs)**2) print('learning rate: %g\n'%setup['learn_rate']) opt = tf.compat.v1.train.AdamOptimizer(setup['learn_rate']) minimize_op = opt.minimize(loss) sess_config = tf.compat.v1.ConfigProto() #sess_config.gpu_options.allow_growth = True #sess_config.allow_soft_placement = True session = tf.compat.v1.Session(config=sess_config) session.run(tf.compat.v1.global_variables_initializer()) lossvals = [] lowest = np.inf print('update support every: %d'%setup['severy']) print('sigma of G: from %g to %g'%(setup['isig'],setup['fsig'])) print('shrink wrap boundary threshold fraction: %g'%setup['sfrac']) print('\nrunning %d iterations ...'%setup['n_iters']) for i in range(setup['n_iters']): lossval, _ = session.run([loss, minimize_op], feed_dict={tf_support: support0}) lossvals.append(lossval) if i % 100 == 0: print(f"{i}| current loss {lossval:4.3g}, "+ f"loss before last shrinkwrap {lowest:4.3g}") if (lowest - lossval) < 0.1 * lowest or i % setup['severy'] != 0: continue lowest = lossval recon = session.run(tf_obj, feed_dict={tf_support:support0}) if i % setup['severy'] == 0: support0 = update_support(recon, i, setup) print('lowest lossval: ',np.min(lossvals)) support0 = update_support(recon, i, setup) recon = recon * support0 recon /= np.max(np.abs(recon)) return { 'output': recon, 'loss': lossvals, 'support': support0, } def run_ad(setup): # load the diffraction patterns fp = np.fft.fftshift(np.load(setup['target'])) print("ft input shape =",fp.shape,", minmax =",np.min(fp),np.max(fp)) # initial guess guess = np.load(f"ad_input_{setup['in_size']}.npy") support = (np.abs(guess) > 1e-4) support = morphology.dilation(support, selem=morphology.ball(setup['dilate_support'])) # reconstruct refined = reconstruct(fp, guess, support, setup) # output hy.to_vtk("compare/ad_output", { 'shape': np.abs(refined['output']), 'phase': np.angle(refined['output']), }) np.save(f"ad_output_{setup['in_size']}.npy",refined['output']) if __name__ == '__main__': run_ad(setup)
[ "tensorflow.reduce_sum", "tensorflow.compat.v1.train.AdamOptimizer", "tensorflow.signal.fft3d", "tensorflow.compat.v1.Session", "numpy.save", "numpy.imag", "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.global_variables_initializer", "numpy.max", "numpy.real", "numpy.min", "tensorfl...
[((1861, 1899), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (1897, 1899), True, 'import tensorflow as tf\n'), ((1603, 1614), 'numpy.abs', 'np.abs', (['obj'], {}), '(obj)\n', (1609, 1614), True, 'import numpy as np\n'), ((2051, 2073), 'numpy.copy', 'np.copy', (['initial_guess'], {}), '(initial_guess)\n', (2058, 2073), True, 'import numpy as np\n'), ((2089, 2113), 'numpy.copy', 'np.copy', (['initial_support'], {}), '(initial_support)\n', (2096, 2113), True, 'import numpy as np\n'), ((2119, 2153), 'tensorflow.compat.v1.reset_default_graph', 'tf.compat.v1.reset_default_graph', ([], {}), '()\n', (2151, 2153), True, 'import tensorflow as tf\n'), ((4671, 4714), 'numpy.load', 'np.load', (['f"""ad_input_{setup[\'in_size\']}.npy"""'], {}), '(f"ad_input_{setup[\'in_size\']}.npy")\n', (4678, 4714), True, 'import numpy as np\n'), ((5068, 5131), 'numpy.save', 'np.save', (['f"""ad_output_{setup[\'in_size\']}.npy"""', "refined['output']"], {}), '(f"ad_output_{setup[\'in_size\']}.npy", refined[\'output\'])\n', (5075, 5131), True, 'import numpy as np\n'), ((1636, 1678), 'hy.gauss_conv_fft', 'hy.gauss_conv_fft', (['rimage', '[sig, sig, sig]'], {}), '(rimage, [sig, sig, sig])\n', (1653, 1678), False, 'import hy\n'), ((2163, 2183), 'tensorflow.device', 'tf.device', (['tf_device'], {}), '(tf_device)\n', (2172, 2183), True, 'import tensorflow as tf\n'), ((2205, 2253), 'tensorflow.constant', 'tf.constant', (['target_diffraction'], {'dtype': '"""float32"""'}), "(target_diffraction, dtype='float32')\n", (2216, 2253), True, 'import tensorflow as tf\n'), ((2408, 2444), 'tensorflow.complex', 'tf.complex', (['tf_obj_real', 'tf_obj_imag'], {}), '(tf_obj_real, tf_obj_imag)\n', (2418, 2444), True, 'import tensorflow as tf\n'), ((2467, 2525), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': 'support0.shape'}), '(tf.float32, shape=support0.shape)\n', (2491, 2525), True, 'import tensorflow as tf\n'), ((2734, 2757), 'tensorflow.reduce_max', 'tf.reduce_max', (['exitwave'], {}), '(exitwave)\n', (2747, 2757), True, 'import tensorflow as tf\n'), ((2773, 2814), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['((exitwave - tf_diffs) ** 2)'], {}), '((exitwave - tf_diffs) ** 2)\n', (2786, 2814), True, 'import tensorflow as tf\n'), ((2885, 2938), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.compat.v1.train.AdamOptimizer', (["setup['learn_rate']"], {}), "(setup['learn_rate'])\n", (2917, 2938), True, 'import tensorflow as tf\n'), ((3003, 3029), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {}), '()\n', (3027, 3029), True, 'import tensorflow as tf\n'), ((3151, 3191), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'sess_config'}), '(config=sess_config)\n', (3171, 3191), True, 'import tensorflow as tf\n'), ((4224, 4240), 'numpy.min', 'np.min', (['lossvals'], {}), '(lossvals)\n', (4230, 4240), True, 'import numpy as np\n'), ((4340, 4353), 'numpy.abs', 'np.abs', (['recon'], {}), '(recon)\n', (4346, 4353), True, 'import numpy as np\n'), ((4538, 4562), 'numpy.load', 'np.load', (["setup['target']"], {}), "(setup['target'])\n", (4545, 4562), True, 'import numpy as np\n'), ((4615, 4625), 'numpy.min', 'np.min', (['fp'], {}), '(fp)\n', (4621, 4625), True, 'import numpy as np\n'), ((4626, 4636), 'numpy.max', 'np.max', (['fp'], {}), '(fp)\n', (4632, 4636), True, 'import numpy as np\n'), ((4730, 4743), 'numpy.abs', 'np.abs', (['guess'], {}), '(guess)\n', (4736, 4743), True, 'import numpy as np\n'), ((2289, 2304), 'numpy.real', 'np.real', (['guess0'], {}), '(guess0)\n', (2296, 2304), True, 'import numpy as np\n'), ((2357, 2372), 'numpy.imag', 'np.imag', (['guess0'], {}), '(guess0)\n', (2364, 2372), True, 'import numpy as np\n'), ((2570, 2595), 'tensorflow.zeros_like', 'tf.zeros_like', (['tf_support'], {}), '(tf_support)\n', (2583, 2595), True, 'import tensorflow as tf\n'), ((2689, 2712), 'tensorflow.signal.fft3d', 'tf.signal.fft3d', (['tf_obj'], {}), '(tf_obj)\n', (2704, 2712), True, 'import tensorflow as tf\n'), ((3212, 3255), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (3253, 3255), True, 'import tensorflow as tf\n'), ((4801, 4841), 'skimage.morphology.ball', 'morphology.ball', (["setup['dilate_support']"], {}), "(setup['dilate_support'])\n", (4816, 4841), False, 'from skimage import morphology\n'), ((4983, 5008), 'numpy.abs', 'np.abs', (["refined['output']"], {}), "(refined['output'])\n", (4989, 5008), True, 'import numpy as np\n'), ((5027, 5054), 'numpy.angle', 'np.angle', (["refined['output']"], {}), "(refined['output'])\n", (5035, 5054), True, 'import numpy as np\n')]
import numpy as np def ReLU(x): return np.maximum(x,0) def dReLU(x,y): x[y == 0]=0 return x
[ "numpy.maximum" ]
[((47, 63), 'numpy.maximum', 'np.maximum', (['x', '(0)'], {}), '(x, 0)\n', (57, 63), True, 'import numpy as np\n')]
from obspy import UTCDateTime from obspy.clients.fdsn import Client from obspy.taup import TauPyModel import numpy as np import seisutils as su import os import shutil import time # -------------------------------------------------------------------------------------------------------------- # newFetch.py # # This is an updated version of fetchRFdata.py which allows the user to download the data without performing # any pre-processing. The instrument response information is saved in both 'RESP' and 'SACPZ' output formats. # Since downloading data is often slower than processing it, all subsequent processing is done with subsequent # scripts. # # -------------------------------------------------------------------------------------------------------------- # Last updated 12/05/2020 by <EMAIL> # -------------------------------------------------------------------------------------------------------------- def fetch_rf_data(network, location, channel, data_directory, output_units, minimum_magnitude, maximum_magnitude, station): # Track execution time for logging purposes t1 = time.time() ntwk = network stat = station loc = location chan = channel # Define the client that hosts the desired data client = Client("IRIS") # Define directory where seismic data will be saved as SAC files if output_units == 'counts': sac_dir = data_directory + ntwk + '/' + stat + '/' + loc + '/RFQUAKES_COUNTS/' elif output_units == 'displacement': sac_dir = data_directory + ntwk + '/' + stat + '/' + loc + '/RFQUAKES_DISP/' elif output_units == 'velocity': sac_dir = data_directory + ntwk + '/' + stat + '/' + loc + '/RFQUAKES_VEL/' elif output_units == 'acceleration': sac_dir = data_directory + ntwk + '/' + stat + '/' + loc + '/RFQUAKES_ACC/' else: print('ERROR: Invalid output units. Acceptable options are \'displacement,\' \'velocity,\' or \'counts\'') quit() # For now: delete the directory if it exists... if os.path.exists(sac_dir): print('Directory exists. Terminiating process...') quit() # shutil.rmtree(sac_dir) if not os.path.exists(sac_dir): os.makedirs(sac_dir) # Define amount of data desired (minutes) duration = 60 # Log potential errors to a .log file logFileName = sac_dir + ntwk + '.' + stat + '.log' # Fetch station information for data retrieval if loc == "NULL": loc = "" try: inv = client.get_stations(network=ntwk, station=stat, channel=chan, level="response") except Exception as error: with open(logFileName, "a") as log: log.write(str(error)) log.write('Error fetching station information with the IRIS client...') return else: try: inv = client.get_stations(network=ntwk, station=stat, loc=loc, channel=chan, level="response") except Exception as error: with open(logFileName, "a") as log: log.write(str(error)) log.write('Error fetching station information with the IRIS client...') return # Save the pole zero files nstats = len(inv.networks[0]) resp_t0 = [] resp_tf = [] pre_filt = [] for i in range(0, nstats): nresp = len(inv.networks[0].stations[i].channels) # Tag the PZ files and SAC files with a number indicating the period of operation for j in range(0, nresp): fileName = sac_dir + "SAC_PZs_" + ntwk + '_' + stat + '_' + inv.networks[0].stations[i].channels[j].code + \ '.' + str(j) with open(fileName, "a") as pzFile: pzFile.write('* **********************************\n') pzFile.write('* NETWORK (KNETWK): ' + inv.networks[0].code + '\n') pzFile.write('* STATION (KSTNM): ' + inv.networks[0].stations[i].code + '\n') pzFile.write('* LOCATION (KHOLE): ' + inv.networks[0].stations[i].channels[j].location_code + '\n') pzFile.write('* CHANNEL (KCMPNM): ' + inv.networks[0].stations[i].channels[j].code + '\n') pzFile.write('* CREATED : ' + str(UTCDateTime.now()).split('.')[0] + '\n') pzFile.write('* START : ' + str(inv.networks[0].stations[i].channels[j].start_date).split('.')[0] + '\n') pzFile.write('* END : ' + str(inv.networks[0].stations[i].channels[j].end_date).split('.')[0] + '\n') pzFile.write('* DESCRIPTION : ' + inv.networks[0].stations[i].site.name + '\n') pzFile.write('* LATITUDE : %0.6f\n' % inv.networks[0].stations[i].latitude) pzFile.write('* LONGITUDE : %0.6f\n' % inv.networks[0].stations[i].longitude) pzFile.write('* ELEVATION : %0.1f\n' % inv.networks[0].stations[i].channels[j].elevation) pzFile.write('* DEPTH : %0.1f\n' % inv.networks[0].stations[i].channels[j].depth) pzFile.write('* DIP : %0.1f\n' % (90.0 - np.abs(inv.networks[0].stations[i].channels[j].dip))) pzFile.write('* AZIMUTH : %0.1f\n' % inv.networks[0].stations[i].channels[j].azimuth) pzFile.write('* SAMPLE RATE : %0.1f\n' % inv.networks[0].stations[i].channels[j].sample_rate) pzFile.write('* INPUT UNIT : M\n') pzFile.write('* OUTPUT UNIT : COUNTS\n') pzFile.write('* INSTTYPE : ' + inv.networks[0].stations[i].channels[j].sensor.description +'\n') pzFile.write('* INSTGAIN : %e (M/S)\n' % inv.networks[0].stations[i].channels[j].response.get_paz().stage_gain) pzFile.write('* COMMENT : \n') pzFile.write('* SENSITIVITY : %e (M/S)\n' % inv.networks[0].stations[i].channels[j].response.instrument_sensitivity.value) pzFile.write('* A0 : %e\n' % inv.networks[0].stations[i].channels[j].response.get_paz().normalization_factor) pzFile.write('* **********************************\n') # Save the poles, zeros, and constant nzeros = 3 zeros = inv.networks[0].stations[i].channels[j].response.get_paz().zeros nz = np.nonzero(zeros) pzFile.write('ZEROS ' + str(len(nz[0]) + nzeros) + '\n') pzFile.write(" %+e %+e\n" % (0, 0)) pzFile.write(" %+e %+e\n" % (0, 0)) pzFile.write(" %+e %+e\n" % (0, 0)) if len(nz[0]) != 0: for k in range(0, len(nz[0])): pzFile.write(" %+e %+e\n" % (np.real(zeros[nz[0][k]]), np.imag(zeros[nz[0][k]]))) poles = inv.networks[0].stations[i].channels[j].response.get_paz().poles pzFile.write('POLES ' + str(len(poles)) + '\n') for k in range(0, len(poles)): pzFile.write(" %+e %+e\n" % (np.real(inv.networks[0].stations[i].channels[j].response.get_paz().poles[k]), np.imag(inv.networks[0].stations[i].channels[j].response.get_paz().poles[k]))) pzFile.write('CONSTANT %e' % (inv.networks[0].stations[i].channels[j].response.get_paz().normalization_factor * inv.networks[0].stations[i].channels[j].response.instrument_sensitivity.value)) # pzFile.write(inv.networks[0].stations[i].channels[j].response.get_sacpz()) # Loop over time-periods during which the station was operational and fetch data for i in range(0, nstats): for j in range(0, nresp): if inv.networks[0].stations[i].channels[j].end_date > UTCDateTime.now(): t0 = inv.networks[0].stations[i].channels[j].start_date tf = UTCDateTime.now() else: t0 = inv.networks[0].stations[i].channels[j].start_date tf = inv.networks[0].stations[i].channels[j].end_date # Get station coordinates for event selection stla = inv.networks[0].stations[i].latitude stlo = inv.networks[0].stations[i].longitude # Fetch relevant events in time-window during which station was operational try: catalog = client.get_events(starttime=t0, endtime=tf, minmagnitude=minimum_magnitude, maxmagnitude=maximum_magnitude, latitude=stla, longitude=stlo, minradius=30, maxradius=90) except Exception as error: with open(logFileName, "a") as log: log.write(str(error)) log.write('Error fetching event catalog...') continue nEvents = len(catalog.events) # Initialize list of events used for bulk request bulk = [] # Fill 'bulk' with desired event information for k in range(0, nEvents): teq = catalog.events[k].origins[0].time chan = inv.networks[0].stations[i].channels[j].code bulk.append((ntwk, stat, loc, chan, teq, teq + duration*60)) # Fetch the data! if output_units == 'counts': try: st = client.get_waveforms_bulk(bulk) except Exception as error: with open(logFileName, "a") as log: log.write(str(error)) log.write('Unable to complete fetch request for: ' + stat + '.' + loc + '.' + chan) continue else: try: st = client.get_waveforms_bulk(bulk, attach_response=True) except Exception as error: with open(logFileName, "a") as log: log.write(str(error)) log.write('Unable to complete fetch request for: ' + stat + '.' + loc + '.' + chan) continue # Do some file-formatting and optional minor pre-processing for k in range(0, len(st)): teq = st[k].meta.starttime # Optional instrument response removal goes here... # Prepare filename for saving evchan = st[k].meta.channel evid = st[k].meta.starttime.isoformat().replace('-', '.').replace('T', '.').replace(':', '.').split('.')[:-1] evid.extend([ntwk, stat, loc, evchan, str(j), 'SAC']) evid = ".".join(evid) # Add station specific metadata to SAC files st[k].stats.sac = {} st[k].stats.sac.stla = stla st[k].stats.sac.stlo = stlo # Channel orientation (CMPAZ) azid = [ntwk, stat, loc, evchan] azid = ".".join(azid) st[k].stats.sac.cmpaz = inv.get_orientation(azid, teq)["azimuth"] # Add event-specific metadata to SAC files (surely there must be a faster way to do this...?) for l in range(0, nEvents): if catalog.events[l].origins[0].time - 5 <= st[k].meta.starttime <= \ catalog.events[l].origins[0].time + 5: st[k].stats.sac.evla = catalog.events[l].origins[0].latitude if st[k].stats.sac.evla is None: with open(logFileName, "a") as log: log.write('Couldn''t find event latitude for: ' + evid + '\n') st[k].stats.sac.evla = 0.0 st[k].stats.sac.evlo = catalog.events[l].origins[0].longitude if st[k].stats.sac.evlo is None: with open(logFileName, "a") as log: log.write('Couldn''t find event longitude for: ' + evid + '\n') st[k].stats.sac.evlo = 0.0 st[k].stats.sac.evdp = catalog.events[l].origins[0].depth if st[k].stats.sac.evdp is None: with open(logFileName, "a") as log: log.write('Couldn''t find event depth for: ' + evid + '\n') st[k].stats.sac.evdp = 0.0 st[k].stats.sac.mag = catalog.events[l].magnitudes[0].mag if st[k].stats.sac.mag is None: with open(logFileName, "a") as log: log.write('Couldn''t find event magnitude for: ' + evid + '\n') st[k].stats.sac.mag = 0.0 # Calculate great circle distance and back-azimuth gcarc, baz = su.haversine(stla, stlo, st[k].stats.sac.evla, st[k].stats.sac.evlo) st[k].stats.sac.gcarc = gcarc st[k].stats.sac.baz = baz # Get theoretical P arrival time, and assign to header 'T0' model = TauPyModel(model="iasp91") phases = ["P"] arrivals = model.get_travel_times(source_depth_in_km=st[k].stats.sac.evdp/1000.0, distance_in_degree=gcarc, phase_list=phases) st[k].stats.sac.t0 = arrivals[0].time # Save the Pole Zero file index in 'USER0' Header st[k].stats.sac.user0 = j # Save the P-wave ray parameter in 'USER9' Header st[k].stats.sac.user9 = arrivals[0].ray_param*(np.pi/180) # Write the data to a SAC file st[k].write(sac_dir + evid, format='SAC') elapsed = time.time() - t1 with open(logFileName, "a") as log: log.write('Time required to complete fetch request: ' + str(elapsed))
[ "os.path.exists", "numpy.abs", "os.makedirs", "obspy.taup.TauPyModel", "numpy.real", "obspy.UTCDateTime.now", "numpy.nonzero", "seisutils.haversine", "obspy.clients.fdsn.Client", "time.time", "numpy.imag" ]
[((1118, 1129), 'time.time', 'time.time', ([], {}), '()\n', (1127, 1129), False, 'import time\n'), ((1273, 1287), 'obspy.clients.fdsn.Client', 'Client', (['"""IRIS"""'], {}), "('IRIS')\n", (1279, 1287), False, 'from obspy.clients.fdsn import Client\n'), ((2050, 2073), 'os.path.exists', 'os.path.exists', (['sac_dir'], {}), '(sac_dir)\n', (2064, 2073), False, 'import os\n'), ((2194, 2217), 'os.path.exists', 'os.path.exists', (['sac_dir'], {}), '(sac_dir)\n', (2208, 2217), False, 'import os\n'), ((2227, 2247), 'os.makedirs', 'os.makedirs', (['sac_dir'], {}), '(sac_dir)\n', (2238, 2247), False, 'import os\n'), ((14364, 14375), 'time.time', 'time.time', ([], {}), '()\n', (14373, 14375), False, 'import time\n'), ((6653, 6670), 'numpy.nonzero', 'np.nonzero', (['zeros'], {}), '(zeros)\n', (6663, 6670), True, 'import numpy as np\n'), ((8201, 8218), 'obspy.UTCDateTime.now', 'UTCDateTime.now', ([], {}), '()\n', (8216, 8218), False, 'from obspy import UTCDateTime\n'), ((8313, 8330), 'obspy.UTCDateTime.now', 'UTCDateTime.now', ([], {}), '()\n', (8328, 8330), False, 'from obspy import UTCDateTime\n'), ((13319, 13387), 'seisutils.haversine', 'su.haversine', (['stla', 'stlo', 'st[k].stats.sac.evla', 'st[k].stats.sac.evlo'], {}), '(stla, stlo, st[k].stats.sac.evla, st[k].stats.sac.evlo)\n', (13331, 13387), True, 'import seisutils as su\n'), ((13608, 13634), 'obspy.taup.TauPyModel', 'TauPyModel', ([], {'model': '"""iasp91"""'}), "(model='iasp91')\n", (13618, 13634), False, 'from obspy.taup import TauPyModel\n'), ((5269, 5320), 'numpy.abs', 'np.abs', (['inv.networks[0].stations[i].channels[j].dip'], {}), '(inv.networks[0].stations[i].channels[j].dip)\n', (5275, 5320), True, 'import numpy as np\n'), ((7078, 7102), 'numpy.real', 'np.real', (['zeros[nz[0][k]]'], {}), '(zeros[nz[0][k]])\n', (7085, 7102), True, 'import numpy as np\n'), ((7104, 7128), 'numpy.imag', 'np.imag', (['zeros[nz[0][k]]'], {}), '(zeros[nz[0][k]])\n', (7111, 7128), True, 'import numpy as np\n'), ((4275, 4292), 'obspy.UTCDateTime.now', 'UTCDateTime.now', ([], {}), '()\n', (4290, 4292), False, 'from obspy import UTCDateTime\n')]
import cv2 import numpy as np import keras.models import glob from datetime import datetime PATH_TEST = "../image_dataset_keras_color/" VIDEO_INFERENCE = 0 IMG_INFERNECE = 1 model_color = keras.models.load_model('saved_models/keras_RAS_model_color_3.h5') color_class = ['Yellow', 'Green', 'Orange', 'Red', 'Blue', 'Purple', 'Nothing'] if VIDEO_INFERENCE: #cap = cv2.VideoCapture('/home/driverless/ras_perception/DL_training/ras_labeling') cap = cv2.VideoCapture(0) while cap.isOpened(): a = datetime.now() ret, image = cap.read() if ret: input_img = [] input_img.append(cv2.resize(image, (32,32))) input_img = np.array(input_img) prediction = model_color.predict(input_img) print(prediction) cv2.imshow('image', image) cv2.waitKey(10) b = datetime.now() c = b - a fps = 1.0/(c.total_seconds()) print('## FPS: ' + str(fps)) print('') elif IMG_INFERNECE: try: # while True: # label = np.random.randint(0,7) # if label == 0: # dirname = 'Ball' # elif label == 1: # dirname = 'Cube' # elif label == 2: # dirname = 'Cylinder' # elif label == 3: # dirname = 'Hollow Cube' # elif label == 4: # dirname = 'Cross' # elif label == 5: # dirname = 'Triangle' # elif label == 6: # dirname = 'Star' # for file in glob.glob(PATH_TEST + dirname + "/*.jpg"): # image = cv2.imread(file) # input_img = [] # input_img.append(cv2.resize(image, (32,32))) # input_img = np.array(input_img) # prediction = model_colordel.predict(input_img) # print('Actual: ' + str(dirname) + ' detected: ' + str(prediction)) # cv2.waitKey(3000) # #cv2.imshow('image', image) # #cv2.waitKey(0) for file in glob.glob('../RAS_DATASET'+ "/*.jpg"): #image = cv2.imread(file) image = cv2.imread('../ras_objects.jpg') input_img = [] input_img.append(cv2.resize(image, (32,32))) input_img = np.array(input_img) prediction = model_color.predict(input_img) #print('Actual: ' + str(dirname) + ' detected: ' + shape_class[np.argmax(prediction)]) print('detected: ' + color_class[np.argmax(prediction)]) cv2.imshow('image', cv2.resize(image, (640,480))) cv2.waitKey(3000) except KeyboardInterrupt: pass
[ "numpy.argmax", "cv2.imshow", "datetime.datetime.now", "numpy.array", "cv2.VideoCapture", "cv2.resize", "cv2.waitKey", "glob.glob", "cv2.imread" ]
[((462, 481), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (478, 481), False, 'import cv2\n'), ((521, 535), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (533, 535), False, 'from datetime import datetime\n'), ((694, 713), 'numpy.array', 'np.array', (['input_img'], {}), '(input_img)\n', (702, 713), True, 'import numpy as np\n'), ((814, 840), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (824, 840), False, 'import cv2\n'), ((853, 868), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (864, 868), False, 'import cv2\n'), ((886, 900), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (898, 900), False, 'from datetime import datetime\n'), ((2162, 2200), 'glob.glob', 'glob.glob', (["('../RAS_DATASET' + '/*.jpg')"], {}), "('../RAS_DATASET' + '/*.jpg')\n", (2171, 2200), False, 'import glob\n'), ((642, 669), 'cv2.resize', 'cv2.resize', (['image', '(32, 32)'], {}), '(image, (32, 32))\n', (652, 669), False, 'import cv2\n'), ((2259, 2291), 'cv2.imread', 'cv2.imread', (['"""../ras_objects.jpg"""'], {}), "('../ras_objects.jpg')\n", (2269, 2291), False, 'import cv2\n'), ((2401, 2420), 'numpy.array', 'np.array', (['input_img'], {}), '(input_img)\n', (2409, 2420), True, 'import numpy as np\n'), ((2724, 2741), 'cv2.waitKey', 'cv2.waitKey', (['(3000)'], {}), '(3000)\n', (2735, 2741), False, 'import cv2\n'), ((2349, 2376), 'cv2.resize', 'cv2.resize', (['image', '(32, 32)'], {}), '(image, (32, 32))\n', (2359, 2376), False, 'import cv2\n'), ((2682, 2711), 'cv2.resize', 'cv2.resize', (['image', '(640, 480)'], {}), '(image, (640, 480))\n', (2692, 2711), False, 'import cv2\n'), ((2626, 2647), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (2635, 2647), True, 'import numpy as np\n')]
from keras.models import Sequential from keras.layers.core import Dense import numpy as np import time number_neuron_connections = 3000000 u = list() for i in range(112): u.append(np.random.rand(number_neuron_connections, 6).astype(np.float32)) def create_mlp(): model = Sequential() model.add(Dense(16, input_dim=6, activation="relu")) model.add(Dense(32, activation="relu")) model.add(Dense(16, activation="relu")) model.add(Dense(8, activation="relu")) model.add(Dense(1, activation="linear")) return model trainX = np.load('trainX.npy') trainY = np.load('trainY.npy') testX = np.load('testX.npy') testY = np.load('testY.npy') model = create_mlp() #model.compile(loss="mean_absolute_percentage_error") model.compile(loss="mean_squared_error") # train the model model.fit(trainX, trainY, validation_data=(testX, testY), epochs=5) # make predictions on the testing data preds = model.predict(testX) preds2 = preds[:,0] diff = preds2 - testY perc = 100 - (preds2/testY)*100 ######### # start = time.time() # z = model.predict(z1) # end = time.time() # print(end - start) ######### start = time.time() for u_i in u: z = model.predict_on_batch(u_i) end = time.time() print(end - start)
[ "numpy.random.rand", "keras.models.Sequential", "numpy.load", "time.time", "keras.layers.core.Dense" ]
[((559, 580), 'numpy.load', 'np.load', (['"""trainX.npy"""'], {}), "('trainX.npy')\n", (566, 580), True, 'import numpy as np\n'), ((590, 611), 'numpy.load', 'np.load', (['"""trainY.npy"""'], {}), "('trainY.npy')\n", (597, 611), True, 'import numpy as np\n'), ((620, 640), 'numpy.load', 'np.load', (['"""testX.npy"""'], {}), "('testX.npy')\n", (627, 640), True, 'import numpy as np\n'), ((649, 669), 'numpy.load', 'np.load', (['"""testY.npy"""'], {}), "('testY.npy')\n", (656, 669), True, 'import numpy as np\n'), ((1140, 1151), 'time.time', 'time.time', ([], {}), '()\n', (1149, 1151), False, 'import time\n'), ((1210, 1221), 'time.time', 'time.time', ([], {}), '()\n', (1219, 1221), False, 'import time\n'), ((284, 296), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (294, 296), False, 'from keras.models import Sequential\n'), ((311, 352), 'keras.layers.core.Dense', 'Dense', (['(16)'], {'input_dim': '(6)', 'activation': '"""relu"""'}), "(16, input_dim=6, activation='relu')\n", (316, 352), False, 'from keras.layers.core import Dense\n'), ((368, 396), 'keras.layers.core.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (373, 396), False, 'from keras.layers.core import Dense\n'), ((412, 440), 'keras.layers.core.Dense', 'Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (417, 440), False, 'from keras.layers.core import Dense\n'), ((456, 483), 'keras.layers.core.Dense', 'Dense', (['(8)'], {'activation': '"""relu"""'}), "(8, activation='relu')\n", (461, 483), False, 'from keras.layers.core import Dense\n'), ((499, 528), 'keras.layers.core.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (504, 528), False, 'from keras.layers.core import Dense\n'), ((186, 230), 'numpy.random.rand', 'np.random.rand', (['number_neuron_connections', '(6)'], {}), '(number_neuron_connections, 6)\n', (200, 230), True, 'import numpy as np\n')]
""" =========== Convex Hull =========== The convex hull of a binary image is the set of pixels included in the smallest convex polygon that surround all white pixels in the input. In this example, we show how the input pixels (white) get filled in by the convex hull (white and grey). A good overview of the algorithm is given on `Steve Eddin's blog <http://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/>`__. """ import numpy as np import matplotlib.pyplot as plt from skimage.morphology import convex_hull_image image = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=float) original_image = np.copy(image) chull = convex_hull_image(image) image[chull] += 1 # image is now: # [[ 0. 0. 0. 0. 0. 0. 0. 0. 0.] # [ 0. 0. 0. 0. 2. 0. 0. 0. 0.] # [ 0. 0. 0. 2. 1. 2. 0. 0. 0.] # [ 0. 0. 2. 1. 1. 1. 2. 0. 0.] # [ 0. 2. 1. 1. 1. 1. 1. 2. 0.] # [ 0. 0. 0. 0. 0. 0. 0. 0. 0.]] fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 6)) ax1.set_title('Original picture') ax1.imshow(original_image, cmap=plt.cm.gray, interpolation='nearest') ax1.set_xticks([]), ax1.set_yticks([]) ax2.set_title('Transformed picture') ax2.imshow(image, cmap=plt.cm.gray, interpolation='nearest') ax2.set_xticks([]), ax2.set_yticks([]) plt.show()
[ "numpy.copy", "numpy.array", "skimage.morphology.convex_hull_image", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show" ]
[((561, 767), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, \n 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0]]'], {'dtype': 'float'}), '([[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, \n 0, 1, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0, 0,\n 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=float)\n', (569, 767), True, 'import numpy as np\n'), ((807, 821), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (814, 821), True, 'import numpy as np\n'), ((831, 855), 'skimage.morphology.convex_hull_image', 'convex_hull_image', (['image'], {}), '(image)\n', (848, 855), False, 'from skimage.morphology import convex_hull_image\n'), ((1157, 1192), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(10, 6)'}), '(1, 2, figsize=(10, 6))\n', (1169, 1192), True, 'import matplotlib.pyplot as plt\n'), ((1476, 1486), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1484, 1486), True, 'import matplotlib.pyplot as plt\n')]
import cv2 import torch import scipy.special import numpy as np import torchvision import torchvision.transforms as transforms from PIL import Image from enum import Enum from scipy.spatial.distance import cdist from ultrafastLaneDetector.model import parsingNet lane_colors = [(0,0,255),(0,255,0),(255,0,0),(0,255,255)] tusimple_row_anchor = [ 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 148, 152, 156, 160, 164, 168, 172, 176, 180, 184, 188, 192, 196, 200, 204, 208, 212, 216, 220, 224, 228, 232, 236, 240, 244, 248, 252, 256, 260, 264, 268, 272, 276, 280, 284] culane_row_anchor = [121, 131, 141, 150, 160, 170, 180, 189, 199, 209, 219, 228, 238, 248, 258, 267, 277, 287] class ModelType(Enum): TUSIMPLE = 0 CULANE = 1 class ModelConfig(): def __init__(self, model_type): if model_type == ModelType.TUSIMPLE: self.init_tusimple_config() else: self.init_culane_config() def init_tusimple_config(self): self.img_w = 1280 self.img_h = 720 self.row_anchor = tusimple_row_anchor self.griding_num = 100 self.cls_num_per_lane = 56 def init_culane_config(self): self.img_w = 1640 self.img_h = 590 self.row_anchor = culane_row_anchor self.griding_num = 200 self.cls_num_per_lane = 18 class UltrafastLaneDetector(): def __init__(self, model_path, model_type=ModelType.TUSIMPLE, use_gpu=False): self.use_gpu = use_gpu # Load model configuration based on the model type self.cfg = ModelConfig(model_type) # Initialize model self.model = self.initialize_model(model_path, self.cfg, use_gpu) # Initialize image transformation self.img_transform = self.initialize_image_transform() @staticmethod def initialize_model(model_path, cfg, use_gpu): # Load the model architecture net = parsingNet(pretrained = False, backbone='18', cls_dim = (cfg.griding_num+1,cfg.cls_num_per_lane,4), use_aux=False) # we dont need auxiliary segmentation in testing # Load the weights from the downloaded model if use_gpu: net = net.cuda() state_dict = torch.load(model_path, map_location='cuda')['model'] # CUDA else: state_dict = torch.load(model_path, map_location='cpu')['model'] # CPU compatible_state_dict = {} for k, v in state_dict.items(): if 'module.' in k: compatible_state_dict[k[7:]] = v else: compatible_state_dict[k] = v # Load the weights into the model net.load_state_dict(compatible_state_dict, strict=False) net.eval() return net @staticmethod def initialize_image_transform(): # Create transfom operation to resize and normalize the input images img_transforms = transforms.Compose([ transforms.Resize((288, 800)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ]) return img_transforms def detect_lanes(self, image, draw_points=True): input_tensor = self.prepare_input(image) # Perform inference on the image output = self.inference(input_tensor) # Process output data self.lanes_points, self.lanes_detected = self.process_output(output, self.cfg) # Draw depth image visualization_img = self.draw_lanes(image, self.lanes_points, self.lanes_detected, self.cfg, draw_points) return visualization_img def prepare_input(self, img): # Transform the image for inference img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img_pil = Image.fromarray(img) input_img = self.img_transform(img_pil) input_tensor = input_img[None, ...] if self.use_gpu: input_tensor = input_tensor.cuda() return input_tensor def inference(self, input_tensor): with torch.no_grad(): output = self.model(input_tensor) return output @staticmethod def process_output(output, cfg): # Parse the output of the model processed_output = output[0].data.cpu().numpy() processed_output = processed_output[:, ::-1, :] prob = scipy.special.softmax(processed_output[:-1, :, :], axis=0) idx = np.arange(cfg.griding_num) + 1 idx = idx.reshape(-1, 1, 1) loc = np.sum(prob * idx, axis=0) processed_output = np.argmax(processed_output, axis=0) loc[processed_output == cfg.griding_num] = 0 processed_output = loc col_sample = np.linspace(0, 800 - 1, cfg.griding_num) col_sample_w = col_sample[1] - col_sample[0] lanes_points = [] lanes_detected = [] max_lanes = processed_output.shape[1] for lane_num in range(max_lanes): lane_points = [] # Check if there are any points detected in the lane if np.sum(processed_output[:, lane_num] != 0) > 2: lanes_detected.append(True) # Process each of the points for each lane for point_num in range(processed_output.shape[0]): if processed_output[point_num, lane_num] > 0: lane_point = [int(processed_output[point_num, lane_num] * col_sample_w * cfg.img_w / 800) - 1, int(cfg.img_h * (cfg.row_anchor[cfg.cls_num_per_lane-1-point_num]/288)) - 1 ] lane_points.append(lane_point) else: lanes_detected.append(False) lanes_points.append(lane_points) return np.array(lanes_points), np.array(lanes_detected) @staticmethod def draw_lanes(input_img, lanes_points, lanes_detected, cfg, draw_points=True): # Write the detected line points in the image visualization_img = cv2.resize(input_img, (cfg.img_w, cfg.img_h), interpolation = cv2.INTER_AREA) # Draw a mask for the current lane if(lanes_detected[1] and lanes_detected[2]): lane_segment_img = visualization_img.copy() cv2.fillPoly(lane_segment_img, pts = [np.vstack((lanes_points[1],np.flipud(lanes_points[2])))], color =(255,191,0)) visualization_img = cv2.addWeighted(visualization_img, 0.7, lane_segment_img, 0.3, 0) if(draw_points): for lane_num,lane_points in enumerate(lanes_points): for lane_point in lane_points: cv2.circle(visualization_img, (lane_point[0],lane_point[1]), 3, lane_colors[lane_num], -1) return visualization_img
[ "PIL.Image.fromarray", "numpy.flipud", "torch.load", "torchvision.transforms.Resize", "numpy.argmax", "numpy.sum", "numpy.linspace", "numpy.array", "cv2.addWeighted", "cv2.circle", "cv2.cvtColor", "torchvision.transforms.Normalize", "ultrafastLaneDetector.model.parsingNet", "torch.no_grad"...
[((1814, 1932), 'ultrafastLaneDetector.model.parsingNet', 'parsingNet', ([], {'pretrained': '(False)', 'backbone': '"""18"""', 'cls_dim': '(cfg.griding_num + 1, cfg.cls_num_per_lane, 4)', 'use_aux': '(False)'}), "(pretrained=False, backbone='18', cls_dim=(cfg.griding_num + 1,\n cfg.cls_num_per_lane, 4), use_aux=False)\n", (1824, 1932), False, 'from ultrafastLaneDetector.model import parsingNet\n'), ((3349, 3385), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (3361, 3385), False, 'import cv2\n'), ((3398, 3418), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (3413, 3418), False, 'from PIL import Image\n'), ((4026, 4052), 'numpy.sum', 'np.sum', (['(prob * idx)'], {'axis': '(0)'}), '(prob * idx, axis=0)\n', (4032, 4052), True, 'import numpy as np\n'), ((4074, 4109), 'numpy.argmax', 'np.argmax', (['processed_output'], {'axis': '(0)'}), '(processed_output, axis=0)\n', (4083, 4109), True, 'import numpy as np\n'), ((4199, 4239), 'numpy.linspace', 'np.linspace', (['(0)', '(800 - 1)', 'cfg.griding_num'], {}), '(0, 800 - 1, cfg.griding_num)\n', (4210, 4239), True, 'import numpy as np\n'), ((5244, 5319), 'cv2.resize', 'cv2.resize', (['input_img', '(cfg.img_w, cfg.img_h)'], {'interpolation': 'cv2.INTER_AREA'}), '(input_img, (cfg.img_w, cfg.img_h), interpolation=cv2.INTER_AREA)\n', (5254, 5319), False, 'import cv2\n'), ((3624, 3639), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3637, 3639), False, 'import torch\n'), ((3957, 3983), 'numpy.arange', 'np.arange', (['cfg.griding_num'], {}), '(cfg.griding_num)\n', (3966, 3983), True, 'import numpy as np\n'), ((5028, 5050), 'numpy.array', 'np.array', (['lanes_points'], {}), '(lanes_points)\n', (5036, 5050), True, 'import numpy as np\n'), ((5052, 5076), 'numpy.array', 'np.array', (['lanes_detected'], {}), '(lanes_detected)\n', (5060, 5076), True, 'import numpy as np\n'), ((5600, 5665), 'cv2.addWeighted', 'cv2.addWeighted', (['visualization_img', '(0.7)', 'lane_segment_img', '(0.3)', '(0)'], {}), '(visualization_img, 0.7, lane_segment_img, 0.3, 0)\n', (5615, 5665), False, 'import cv2\n'), ((2083, 2126), 'torch.load', 'torch.load', (['model_path'], {'map_location': '"""cuda"""'}), "(model_path, map_location='cuda')\n", (2093, 2126), False, 'import torch\n'), ((2167, 2209), 'torch.load', 'torch.load', (['model_path'], {'map_location': '"""cpu"""'}), "(model_path, map_location='cpu')\n", (2177, 2209), False, 'import torch\n'), ((2678, 2707), 'torchvision.transforms.Resize', 'transforms.Resize', (['(288, 800)'], {}), '((288, 800))\n', (2695, 2707), True, 'import torchvision.transforms as transforms\n'), ((2712, 2733), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2731, 2733), True, 'import torchvision.transforms as transforms\n'), ((2738, 2804), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (2758, 2804), True, 'import torchvision.transforms as transforms\n'), ((4489, 4531), 'numpy.sum', 'np.sum', (['(processed_output[:, lane_num] != 0)'], {}), '(processed_output[:, lane_num] != 0)\n', (4495, 4531), True, 'import numpy as np\n'), ((5782, 5877), 'cv2.circle', 'cv2.circle', (['visualization_img', '(lane_point[0], lane_point[1])', '(3)', 'lane_colors[lane_num]', '(-1)'], {}), '(visualization_img, (lane_point[0], lane_point[1]), 3,\n lane_colors[lane_num], -1)\n', (5792, 5877), False, 'import cv2\n'), ((5526, 5552), 'numpy.flipud', 'np.flipud', (['lanes_points[2]'], {}), '(lanes_points[2])\n', (5535, 5552), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- """ Analysis + Visualization functions for planning """ # Author: <NAME> <<EMAIL>> # License: MIT import warnings import numpy as np import matplotlib.pyplot as plt import opencda.core.plan.drive_profile_plotting as open_plt class PlanDebugHelper(object): """This class aims to save statistics for planner behaviour Attributes: speed_list (list): The list containing speed info(m/s) of all time-steps acc_list(list): The list containing acceleration info(m^2/s) of all time-steps ttc_list(list): The list containing ttc info(s) for all time-steps count(int): Used to count how many simulation steps have been executed. """ def __init__(self, actor_id): self.actor_id = actor_id self.speed_list = [[]] self.acc_list = [[]] self.ttc_list = [[]] self.count = 0 def update(self, ego_speed, ttc): """ Update the speed info. Args: ego_speed(km/h): Ego speed. ttc(s): time to collision. Returns: """ self.count += 1 # at the very beginning, the vehicle is in a spawn state, so we should filter out the first 100 data points. if self.count > 100: self.speed_list[0].append(ego_speed / 3.6) if len(self.speed_list[0]) <= 1: self.acc_list[0].append(0) else: self.acc_list[0].append((self.speed_list[0][-1] - self.speed_list[0][-2]) / 0.05) self.ttc_list[0].append(ttc) def evaluate(self): warnings.filterwarnings('ignore') # draw speed, acc and ttc plotting figure = plt.figure() plt.subplot(311) open_plt.draw_velocity_profile_single_plot(self.speed_list) plt.subplot(312) open_plt.draw_acceleration_profile_single_plot(self.acc_list) plt.subplot(313) open_plt.draw_ttc_profile_single_plot(self.ttc_list) figure.suptitle('planning profile of actor id %d' % self.actor_id) # calculate the statistics spd_avg = np.mean(np.array(self.speed_list[0])) spd_std = np.std(np.array(self.speed_list[0])) acc_avg = np.mean(np.array(self.acc_list[0])) acc_std = np.std(np.array(self.acc_list[0])) ttc_array = np.array(self.ttc_list[0]) ttc_array = ttc_array[ttc_array < 1000] ttc_avg = np.mean(ttc_array) ttc_std = np.std(ttc_array) perform_txt = 'Speed average: %f (m/s), ' \ 'Speed std: %f (m/s) \n' % (spd_avg, spd_std) perform_txt += 'Acceleration average: %f (m/s), ' \ 'Acceleration std: %f (m/s) \n' % (acc_avg, acc_std) perform_txt += 'TTC average: %f (m/s), ' \ 'TTC std: %f (m/s) \n' % (ttc_avg, ttc_std) return figure, perform_txt
[ "numpy.mean", "opencda.core.plan.drive_profile_plotting.draw_velocity_profile_single_plot", "opencda.core.plan.drive_profile_plotting.draw_ttc_profile_single_plot", "numpy.array", "matplotlib.pyplot.figure", "opencda.core.plan.drive_profile_plotting.draw_acceleration_profile_single_plot", "numpy.std", ...
[((1584, 1617), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1607, 1617), False, 'import warnings\n'), ((1678, 1690), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1688, 1690), True, 'import matplotlib.pyplot as plt\n'), ((1699, 1715), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (1710, 1715), True, 'import matplotlib.pyplot as plt\n'), ((1724, 1783), 'opencda.core.plan.drive_profile_plotting.draw_velocity_profile_single_plot', 'open_plt.draw_velocity_profile_single_plot', (['self.speed_list'], {}), '(self.speed_list)\n', (1766, 1783), True, 'import opencda.core.plan.drive_profile_plotting as open_plt\n'), ((1793, 1809), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (1804, 1809), True, 'import matplotlib.pyplot as plt\n'), ((1818, 1879), 'opencda.core.plan.drive_profile_plotting.draw_acceleration_profile_single_plot', 'open_plt.draw_acceleration_profile_single_plot', (['self.acc_list'], {}), '(self.acc_list)\n', (1864, 1879), True, 'import opencda.core.plan.drive_profile_plotting as open_plt\n'), ((1889, 1905), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (1900, 1905), True, 'import matplotlib.pyplot as plt\n'), ((1914, 1966), 'opencda.core.plan.drive_profile_plotting.draw_ttc_profile_single_plot', 'open_plt.draw_ttc_profile_single_plot', (['self.ttc_list'], {}), '(self.ttc_list)\n', (1951, 1966), True, 'import opencda.core.plan.drive_profile_plotting as open_plt\n'), ((2319, 2345), 'numpy.array', 'np.array', (['self.ttc_list[0]'], {}), '(self.ttc_list[0])\n', (2327, 2345), True, 'import numpy as np\n'), ((2412, 2430), 'numpy.mean', 'np.mean', (['ttc_array'], {}), '(ttc_array)\n', (2419, 2430), True, 'import numpy as np\n'), ((2449, 2466), 'numpy.std', 'np.std', (['ttc_array'], {}), '(ttc_array)\n', (2455, 2466), True, 'import numpy as np\n'), ((2105, 2133), 'numpy.array', 'np.array', (['self.speed_list[0]'], {}), '(self.speed_list[0])\n', (2113, 2133), True, 'import numpy as np\n'), ((2160, 2188), 'numpy.array', 'np.array', (['self.speed_list[0]'], {}), '(self.speed_list[0])\n', (2168, 2188), True, 'import numpy as np\n'), ((2217, 2243), 'numpy.array', 'np.array', (['self.acc_list[0]'], {}), '(self.acc_list[0])\n', (2225, 2243), True, 'import numpy as np\n'), ((2270, 2296), 'numpy.array', 'np.array', (['self.acc_list[0]'], {}), '(self.acc_list[0])\n', (2278, 2296), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- import numpy as np import pprint from envs.GridWorld import GridworldEnv pp = pprint.PrettyPrinter(indent=2) env = GridworldEnv() import matplotlib.pyplot as pl def value_iteration(env, theta=0.0001, discount_factor=1.0): """ Value Iteration Algorithm. Args: env: OpenAI env. env.P represents the transition probabilities of the environment. env.P[s][a] is a list of transition tuples (prob, next_state, reward, done). env.nS is a number of states in the environment. env.nA is a number of actions in the environment. theta: We stop evaluation once our value function change is less than theta for all states. discount_factor: Gamma discount factor. Returns: A tuple (policy, V) of the optimal policy and the optimal value function. """ V = np.zeros(env.nS) policy = np.zeros([env.nS, env.nA]) k=0 historical=[] errors=[] while True: delta=0 historical+=[np.copy(V)] error=0 for s in range(env.nS): v=V[s] choiceActions=computeTerm(env,discount_factor,V,s) V[s]=np.max(choiceActions) delta=max(delta,np.abs(v-V[s])) error+=np.abs(v-V[s]) k+=1 print('iteration ',k, 'error %.3e'%delta) errors+=[error/env.nS] if delta<theta: break for s in range(env.nS): policy[s][np.argmax(V[s])]=1 return policy, V,historical,errors def computeTerm(env,discount_factor,V,s): output=np.zeros([env.nA]) for a in range(env.nA): temp=0 for sp in range(len(env.P[s][a])): transition=env.P[s][a][sp] temp+=transition[0]*(transition[2]+discount_factor*V[transition[1]]) output[a]=temp return(output) policy, v,historical,errors = value_iteration(env) print("Policy Probability Distribution:") print(policy) print("") print("Reshaped Grid Policy (0=up, 1=right, 2=down, 3=left):") print(np.reshape(np.argmax(policy, axis=1), env.shape)) print("") print("Value Function:") print(v) print("") print("Reshaped Grid Value Function:") print(v.reshape(env.shape)) print("") # Test the value function expected_v = np.array([ 0, -1, -2, -3, -1, -2, -3, -2, -2, -3, -2, -1, -3, -2, -1, 0]) np.testing.assert_array_almost_equal(v, expected_v, decimal=2) pl.clf() fig=pl.figure(figsize=(12,5)) for i in range(len(historical)): ax=fig.add_subplot('15%d'%(i+1)) ax.imshow(np.reshape(historical[i],env.shape)) pl.title('iteration {}'.format(i)) ax=fig.add_subplot('155') ax.imshow(np.reshape(expected_v,env.shape)) pl.title('expected value') pl.savefig('ValueIteration.png') pl.legend() pl.show() fig=pl.figure() pl.plot(errors) pl.xlabel('iteration') pl.ylabel('L1 difference') pl.title('variation of the Value Function') pl.legend() fig.savefig('errorValueIteration.png') pl.show()
[ "matplotlib.pyplot.ylabel", "numpy.array", "numpy.testing.assert_array_almost_equal", "envs.GridWorld.GridworldEnv", "numpy.reshape", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "numpy.max", "pprint.PrettyPrinter", "numpy.abs", "matplotlib.pyplot.savefig", "numpy.argmax", "matplotl...
[((106, 136), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(2)'}), '(indent=2)\n', (126, 136), False, 'import pprint\n'), ((143, 157), 'envs.GridWorld.GridworldEnv', 'GridworldEnv', ([], {}), '()\n', (155, 157), False, 'from envs.GridWorld import GridworldEnv\n'), ((2304, 2376), 'numpy.array', 'np.array', (['[0, -1, -2, -3, -1, -2, -3, -2, -2, -3, -2, -1, -3, -2, -1, 0]'], {}), '([0, -1, -2, -3, -1, -2, -3, -2, -2, -3, -2, -1, -3, -2, -1, 0])\n', (2312, 2376), True, 'import numpy as np\n'), ((2379, 2441), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['v', 'expected_v'], {'decimal': '(2)'}), '(v, expected_v, decimal=2)\n', (2415, 2441), True, 'import numpy as np\n'), ((2443, 2451), 'matplotlib.pyplot.clf', 'pl.clf', ([], {}), '()\n', (2449, 2451), True, 'import matplotlib.pyplot as pl\n'), ((2456, 2482), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (2465, 2482), True, 'import matplotlib.pyplot as pl\n'), ((2722, 2748), 'matplotlib.pyplot.title', 'pl.title', (['"""expected value"""'], {}), "('expected value')\n", (2730, 2748), True, 'import matplotlib.pyplot as pl\n'), ((2750, 2782), 'matplotlib.pyplot.savefig', 'pl.savefig', (['"""ValueIteration.png"""'], {}), "('ValueIteration.png')\n", (2760, 2782), True, 'import matplotlib.pyplot as pl\n'), ((2783, 2794), 'matplotlib.pyplot.legend', 'pl.legend', ([], {}), '()\n', (2792, 2794), True, 'import matplotlib.pyplot as pl\n'), ((2795, 2804), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (2802, 2804), True, 'import matplotlib.pyplot as pl\n'), ((2811, 2822), 'matplotlib.pyplot.figure', 'pl.figure', ([], {}), '()\n', (2820, 2822), True, 'import matplotlib.pyplot as pl\n'), ((2823, 2838), 'matplotlib.pyplot.plot', 'pl.plot', (['errors'], {}), '(errors)\n', (2830, 2838), True, 'import matplotlib.pyplot as pl\n'), ((2839, 2861), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""iteration"""'], {}), "('iteration')\n", (2848, 2861), True, 'import matplotlib.pyplot as pl\n'), ((2862, 2888), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""L1 difference"""'], {}), "('L1 difference')\n", (2871, 2888), True, 'import matplotlib.pyplot as pl\n'), ((2889, 2932), 'matplotlib.pyplot.title', 'pl.title', (['"""variation of the Value Function"""'], {}), "('variation of the Value Function')\n", (2897, 2932), True, 'import matplotlib.pyplot as pl\n'), ((2933, 2944), 'matplotlib.pyplot.legend', 'pl.legend', ([], {}), '()\n', (2942, 2944), True, 'import matplotlib.pyplot as pl\n'), ((2985, 2994), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (2992, 2994), True, 'import matplotlib.pyplot as pl\n'), ((892, 908), 'numpy.zeros', 'np.zeros', (['env.nS'], {}), '(env.nS)\n', (900, 908), True, 'import numpy as np\n'), ((922, 948), 'numpy.zeros', 'np.zeros', (['[env.nS, env.nA]'], {}), '([env.nS, env.nA])\n', (930, 948), True, 'import numpy as np\n'), ((1622, 1640), 'numpy.zeros', 'np.zeros', (['[env.nA]'], {}), '([env.nA])\n', (1630, 1640), True, 'import numpy as np\n'), ((2688, 2721), 'numpy.reshape', 'np.reshape', (['expected_v', 'env.shape'], {}), '(expected_v, env.shape)\n', (2698, 2721), True, 'import numpy as np\n'), ((2091, 2116), 'numpy.argmax', 'np.argmax', (['policy'], {'axis': '(1)'}), '(policy, axis=1)\n', (2100, 2116), True, 'import numpy as np\n'), ((2566, 2602), 'numpy.reshape', 'np.reshape', (['historical[i]', 'env.shape'], {}), '(historical[i], env.shape)\n', (2576, 2602), True, 'import numpy as np\n'), ((1042, 1052), 'numpy.copy', 'np.copy', (['V'], {}), '(V)\n', (1049, 1052), True, 'import numpy as np\n'), ((1201, 1222), 'numpy.max', 'np.max', (['choiceActions'], {}), '(choiceActions)\n', (1207, 1222), True, 'import numpy as np\n'), ((1286, 1302), 'numpy.abs', 'np.abs', (['(v - V[s])'], {}), '(v - V[s])\n', (1292, 1302), True, 'import numpy as np\n'), ((1488, 1503), 'numpy.argmax', 'np.argmax', (['V[s]'], {}), '(V[s])\n', (1497, 1503), True, 'import numpy as np\n'), ((1251, 1267), 'numpy.abs', 'np.abs', (['(v - V[s])'], {}), '(v - V[s])\n', (1257, 1267), True, 'import numpy as np\n')]
import numpy as np import numba as nb _signatures = [ (nb.float32[:], nb.float32[:], nb.float32[:]), (nb.float64[:], nb.float64[:], nb.float64[:]), ] @nb.njit(_signatures, cache=True) def _de_castlejau(z, beta, res): # De Casteljau algorithm, numerically stable n = len(beta) if n == 0: res[:] = np.nan else: betai = np.empty_like(beta) for iz, zi in enumerate(z): azi = 1.0 - zi betai[:] = beta for j in range(1, n): for k in range(n - j): betai[k] = betai[k] * azi + betai[k + 1] * zi res[iz] = betai[0] return res _signatures = [ nb.float32[:](nb.float32[:]), nb.float64[:](nb.float64[:]), ] @nb.njit(_signatures, cache=True) def _beta_int(beta): n = len(beta) r = np.zeros(n + 1, dtype=beta.dtype) for j in range(1, n + 1): for k in range(j): r[j] += beta[k] r *= 1.0 / n return r @nb.njit(cache=True) def _prepare_z_beta(x, xmin, xmax, beta): inverse_scale = 1 / (xmax - xmin) z = x.copy() z -= xmin z *= inverse_scale # beta = beta.copy() # inverse_scale /= len(beta) + 1 # beta *= inverse_scale return z, beta def _prepare_array(x): x = np.atleast_1d(x) if x.dtype.kind != "f": x = x.astype(np.float64) return x _signatures = [ (nb.float32[:], nb.float32[:], nb.float32[:], nb.float32[:], nb.float32[:]), (nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:]), ] @nb.guvectorize(_signatures, "(),(n),(),()->()", cache=True) def scaled_pdf(x, beta, xmin, xmax, res): z, beta = _prepare_z_beta(x, xmin, xmax, beta) _de_castlejau(z, beta, res) @nb.guvectorize(_signatures, "(),(n),(),()->()", cache=True) def scaled_cdf(x, beta, xmin, xmax, res): z, beta = _prepare_z_beta(x, xmin, xmax, beta) beta = _beta_int(beta) _de_castlejau(z, beta, res) @nb.extending.overload(scaled_pdf) def bernstein_scaled_pdf_ol(x, beta, xmin, xmax): from numba.core.errors import TypingError from numba.types import Array, Float if not isinstance(x, Array): raise TypingError("x must be a Numpy array") if not isinstance(beta, Array): raise TypingError("beta must be a Numpy array") if not isinstance(xmin, Float): raise TypingError("xmin must be float") if not isinstance(xmax, Float): raise TypingError("xmax must be float") def impl(x, beta, xmin, xmax): z, beta = _prepare_z_beta(x, xmin, xmax, beta) res = np.empty_like(z) _de_castlejau(z, beta, res) return res return impl @nb.extending.overload(scaled_cdf) def bernstein_scaled_cdf_ol(x, beta, xmin, xmax): from numba.core.errors import TypingError from numba.types import Array, Float if not isinstance(x, Array): raise TypingError("x must be a Numpy array") if not isinstance(beta, Array): raise TypingError("beta must be a Numpy array") if not isinstance(xmin, Float): raise TypingError("xmin must be float") if not isinstance(xmax, Float): raise TypingError("xmax must be float") def impl(x, beta, xmin, xmax): z, beta = _prepare_z_beta(x, xmin, xmax, beta) beta = _beta_int(beta) res = np.empty_like(z) _de_castlejau(z, beta, res) return res return impl density = scaled_pdf
[ "numba.extending.overload", "numba.njit", "numba.core.errors.TypingError", "numba.guvectorize", "numpy.zeros", "numpy.empty_like", "numpy.atleast_1d" ]
[((163, 195), 'numba.njit', 'nb.njit', (['_signatures'], {'cache': '(True)'}), '(_signatures, cache=True)\n', (170, 195), True, 'import numba as nb\n'), ((748, 780), 'numba.njit', 'nb.njit', (['_signatures'], {'cache': '(True)'}), '(_signatures, cache=True)\n', (755, 780), True, 'import numba as nb\n'), ((980, 999), 'numba.njit', 'nb.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (987, 999), True, 'import numba as nb\n'), ((1552, 1611), 'numba.guvectorize', 'nb.guvectorize', (['_signatures', '"""(),(n),(),()->()"""'], {'cache': '(True)'}), "(_signatures, '(),(n),(),()->()', cache=True)\n", (1566, 1611), True, 'import numba as nb\n'), ((1740, 1799), 'numba.guvectorize', 'nb.guvectorize', (['_signatures', '"""(),(n),(),()->()"""'], {'cache': '(True)'}), "(_signatures, '(),(n),(),()->()', cache=True)\n", (1754, 1799), True, 'import numba as nb\n'), ((1955, 1988), 'numba.extending.overload', 'nb.extending.overload', (['scaled_pdf'], {}), '(scaled_pdf)\n', (1976, 1988), True, 'import numba as nb\n'), ((2670, 2703), 'numba.extending.overload', 'nb.extending.overload', (['scaled_cdf'], {}), '(scaled_cdf)\n', (2691, 2703), True, 'import numba as nb\n'), ((828, 861), 'numpy.zeros', 'np.zeros', (['(n + 1)'], {'dtype': 'beta.dtype'}), '(n + 1, dtype=beta.dtype)\n', (836, 861), True, 'import numpy as np\n'), ((1276, 1292), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (1289, 1292), True, 'import numpy as np\n'), ((361, 380), 'numpy.empty_like', 'np.empty_like', (['beta'], {}), '(beta)\n', (374, 380), True, 'import numpy as np\n'), ((2174, 2212), 'numba.core.errors.TypingError', 'TypingError', (['"""x must be a Numpy array"""'], {}), "('x must be a Numpy array')\n", (2185, 2212), False, 'from numba.core.errors import TypingError\n'), ((2263, 2304), 'numba.core.errors.TypingError', 'TypingError', (['"""beta must be a Numpy array"""'], {}), "('beta must be a Numpy array')\n", (2274, 2304), False, 'from numba.core.errors import TypingError\n'), ((2355, 2388), 'numba.core.errors.TypingError', 'TypingError', (['"""xmin must be float"""'], {}), "('xmin must be float')\n", (2366, 2388), False, 'from numba.core.errors import TypingError\n'), ((2439, 2472), 'numba.core.errors.TypingError', 'TypingError', (['"""xmax must be float"""'], {}), "('xmax must be float')\n", (2450, 2472), False, 'from numba.core.errors import TypingError\n'), ((2578, 2594), 'numpy.empty_like', 'np.empty_like', (['z'], {}), '(z)\n', (2591, 2594), True, 'import numpy as np\n'), ((2889, 2927), 'numba.core.errors.TypingError', 'TypingError', (['"""x must be a Numpy array"""'], {}), "('x must be a Numpy array')\n", (2900, 2927), False, 'from numba.core.errors import TypingError\n'), ((2978, 3019), 'numba.core.errors.TypingError', 'TypingError', (['"""beta must be a Numpy array"""'], {}), "('beta must be a Numpy array')\n", (2989, 3019), False, 'from numba.core.errors import TypingError\n'), ((3070, 3103), 'numba.core.errors.TypingError', 'TypingError', (['"""xmin must be float"""'], {}), "('xmin must be float')\n", (3081, 3103), False, 'from numba.core.errors import TypingError\n'), ((3154, 3187), 'numba.core.errors.TypingError', 'TypingError', (['"""xmax must be float"""'], {}), "('xmax must be float')\n", (3165, 3187), False, 'from numba.core.errors import TypingError\n'), ((3324, 3340), 'numpy.empty_like', 'np.empty_like', (['z'], {}), '(z)\n', (3337, 3340), True, 'import numpy as np\n')]
# Experiment script to compare ReLU dAs versus Gaussian-Bernoulli dAs # Train each with varying amounts of noise, import numpy import theano import theano.tensor as T from theano.tensor.shared_randomstreams import RandomStreams from AutoEncoder import AutoEncoder from AutoEncoder import ReluAutoEncoder from AutoEncoder import GaussianAutoEncoder from extract_datasets import extract_labeled_chunkrange from load_shared import load_data_labeled from tables import * import os import sys import time from datetime import datetime from optparse import OptionParser def drive_dA(learning_rate=0.00001, training_epochs=100, batch_size=32): """ This dA is driven with foci data :type learning_rate: float :param learning_rate: learning rate used for training the DeNosing AutoEncoder :type training_epochs: int :param training_epochs: number of epochs used for training :type batch_size: int :param batch_size: size of each minibatch """ parser = OptionParser() parser.add_option("-d", "--dir", dest="dir", help="test output directory") parser.add_option("-c", "--corruption", dest="corruption", help="use this amount of corruption for the denoising AE", type="float") parser.add_option("-i", "--inputfile", dest="inputfile", help="the hdf5 filename as an absolute pathname") (options, args) = parser.parse_args() #current_dir = os.getcwd() #os.chdir(options.dir) today = datetime.today() day = str(today.date()) hour = str(today.time()) corruptn = str(options.corruption) #output_filename = "gb_da." + "corruption_" + corruptn + "_" + day + "." + hour #output_file = open(output_filename,'w') #print >> output_file, "Run on " + str(datetime.now()) #os.chdir(current_dir) data_set_file = openFile(str(options.inputfile), mode = 'r') datafiles, labels = extract_labeled_chunkrange(data_set_file, num_files = 10) datasets = load_data_labeled(datafiles, labels) train_set_x, train_set_y = datasets[0] data_set_file.close() # compute number of minibatches for training, validation and testing n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size n_cols = train_set_x.get_value(borrow=True).shape[1] # allocate symbolic variables for the data index = T.lscalar() # index to a [mini]batch x = T.matrix('x') # the data matrix ################################## # Build the GaussianBernoulli dA # ################################## #rng = numpy.random.RandomState(2345) #theano_rng = RandomStreams(rng.randint(2 ** 30)) #da = GaussianAutoEncoder(numpy_rng=rng, theano_rng=theano_rng, input=x, #n_visible=n_cols, n_hidden=800) #cost, updates = da.get_cost_updates(corruption_level=options.corruption, #learning_rate=learning_rate) #train_da = theano.function([index], cost, updates=updates, #givens={x: train_set_x[index * batch_size: #(index + 1) * batch_size]}) #start_time = time.clock() ############# ## TRAINING # ############# ## go through training epochs #for epoch in xrange(training_epochs): ## go through training set #c = [] #for batch_index in xrange(n_train_batches): #c.append(train_da(batch_index)) #print >> output_file, 'Training epoch %d, cost ' % epoch, numpy.mean(c) #end_time = time.clock() #training_time = (end_time - start_time) #print >> output_file, ('The ' + str(options.corruption) + ' corruption code for file ' + #os.path.split(__file__)[1] + #' ran for %.2fm' % ((training_time) / 60.)) #output_file.close() ########## # Build the ReLU dA ########## output_filename = "relu_da." + "corruption_" + corruptn + "_" + day + "." + hour current_dir = os.getcwd() os.chdir(options.dir) output_file = open(output_filename,'w') os.chdir(current_dir) print >> output_file, "Run on " + str(datetime.now()) rng = numpy.random.RandomState(6789) theano_rng = RandomStreams(rng.randint(2 ** 30)) da = ReluAutoEncoder(numpy_rng=rng, theano_rng=theano_rng, input=x, n_visible=n_cols, n_hidden=800) cost, updates = da.get_cost_updates_safe(corruption_level=float(options.corruption), learning_rate=learning_rate,mb_size=batch_size) train_da = theano.function([index], cost, updates=updates, givens={x: train_set_x[index * batch_size: (index + 1) * batch_size]}) start_time = time.clock() ########## # Train the model ########## # go through training epochs for epoch in xrange(training_epochs): # go through trainng set c = [] for batch_index in xrange(n_train_batches): c.append(train_da(batch_index)) print >> output_file, 'Training epoch %d, cost ' % epoch, numpy.mean(c) end_time = time.clock() training_time = (end_time - start_time) print >> output_file, ('The ' + str(options.corruption) + ' corruption code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((training_time) / 60.)) output_file.close() if __name__ == '__main__': drive_dA()
[ "load_shared.load_data_labeled", "numpy.mean", "AutoEncoder.ReluAutoEncoder", "theano.tensor.lscalar", "theano.function", "time.clock", "theano.tensor.matrix", "optparse.OptionParser", "extract_datasets.extract_labeled_chunkrange", "os.getcwd", "os.chdir", "datetime.datetime.now", "os.path.s...
[((1081, 1095), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (1093, 1095), False, 'from optparse import OptionParser\n'), ((1544, 1560), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1558, 1560), False, 'from datetime import datetime\n'), ((1980, 2035), 'extract_datasets.extract_labeled_chunkrange', 'extract_labeled_chunkrange', (['data_set_file'], {'num_files': '(10)'}), '(data_set_file, num_files=10)\n', (2006, 2035), False, 'from extract_datasets import extract_labeled_chunkrange\n'), ((2053, 2089), 'load_shared.load_data_labeled', 'load_data_labeled', (['datafiles', 'labels'], {}), '(datafiles, labels)\n', (2070, 2089), False, 'from load_shared import load_data_labeled\n'), ((2430, 2441), 'theano.tensor.lscalar', 'T.lscalar', ([], {}), '()\n', (2439, 2441), True, 'import theano.tensor as T\n'), ((2478, 2491), 'theano.tensor.matrix', 'T.matrix', (['"""x"""'], {}), "('x')\n", (2486, 2491), True, 'import theano.tensor as T\n'), ((4082, 4093), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4091, 4093), False, 'import os\n'), ((4102, 4123), 'os.chdir', 'os.chdir', (['options.dir'], {}), '(options.dir)\n', (4110, 4123), False, 'import os\n'), ((4176, 4197), 'os.chdir', 'os.chdir', (['current_dir'], {}), '(current_dir)\n', (4184, 4197), False, 'import os\n'), ((4276, 4306), 'numpy.random.RandomState', 'numpy.random.RandomState', (['(6789)'], {}), '(6789)\n', (4300, 4306), False, 'import numpy\n'), ((4371, 4470), 'AutoEncoder.ReluAutoEncoder', 'ReluAutoEncoder', ([], {'numpy_rng': 'rng', 'theano_rng': 'theano_rng', 'input': 'x', 'n_visible': 'n_cols', 'n_hidden': '(800)'}), '(numpy_rng=rng, theano_rng=theano_rng, input=x, n_visible=\n n_cols, n_hidden=800)\n', (4386, 4470), False, 'from AutoEncoder import ReluAutoEncoder\n'), ((4684, 4806), 'theano.function', 'theano.function', (['[index]', 'cost'], {'updates': 'updates', 'givens': '{x: train_set_x[index * batch_size:(index + 1) * batch_size]}'}), '([index], cost, updates=updates, givens={x: train_set_x[\n index * batch_size:(index + 1) * batch_size]})\n', (4699, 4806), False, 'import theano\n'), ((4915, 4927), 'time.clock', 'time.clock', ([], {}), '()\n', (4925, 4927), False, 'import time\n'), ((5306, 5318), 'time.clock', 'time.clock', ([], {}), '()\n', (5316, 5318), False, 'import time\n'), ((5276, 5289), 'numpy.mean', 'numpy.mean', (['c'], {}), '(c)\n', (5286, 5289), False, 'import numpy\n'), ((4240, 4254), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4252, 4254), False, 'from datetime import datetime\n'), ((5484, 5507), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (5497, 5507), False, 'import os\n')]
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys from scipy.io import loadmat import numpy as np from scipy import linalg import glob import pickle from six.moves import xrange # pylint: disable=redefined-builtin from six.moves import urllib import tensorflow as tf from dataset_utils import * DATA_URL_TRAIN = 'http://ufldl.stanford.edu/housenumbers/train_32x32.mat' DATA_URL_TEST = 'http://ufldl.stanford.edu/housenumbers/test_32x32.mat' FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('data_dir', '/cache/vat-tf/svhn', "") tf.app.flags.DEFINE_integer('num_labeled_examples', 1000, "The number of labeled examples") tf.app.flags.DEFINE_integer('num_valid_examples', 1000, "The number of validation examples") tf.app.flags.DEFINE_integer('dataset_seed', 1, "dataset seed") NUM_EXAMPLES_TRAIN = 73257 NUM_EXAMPLES_TEST = 26032 def maybe_download_and_extract(): if not os.path.exists(FLAGS.data_dir): os.makedirs(FLAGS.data_dir) filepath_train_mat = os.path.join(FLAGS.data_dir, 'train_32x32.mat') filepath_test_mat = os.path.join(FLAGS.data_dir, 'test_32x32.mat') if not os.path.exists(filepath_train_mat) or not os.path.exists(filepath_test_mat): def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %.1f%%' % (float(count * block_size) / float(total_size) * 100.0)) sys.stdout.flush() urllib.request.urlretrieve(DATA_URL_TRAIN, filepath_train_mat, _progress) urllib.request.urlretrieve(DATA_URL_TEST, filepath_test_mat, _progress) # Training set print("Loading training data...") print("Preprocessing training data...") train_data = loadmat(FLAGS.data_dir + '/train_32x32.mat') train_x = (-127.5 + train_data['X']) / 255. train_x = train_x.transpose((3, 0, 1, 2)) train_x = train_x.reshape([train_x.shape[0], -1]) train_y = train_data['y'].flatten().astype(np.int32) train_y[train_y == 10] = 0 # Test set print("Loading test data...") test_data = loadmat(FLAGS.data_dir + '/test_32x32.mat') test_x = (-127.5 + test_data['X']) / 255. test_x = test_x.transpose((3, 0, 1, 2)) test_x = test_x.reshape((test_x.shape[0], -1)) test_y = test_data['y'].flatten().astype(np.int32) test_y[test_y == 10] = 0 np.save('{}/train_images'.format(FLAGS.data_dir), train_x) np.save('{}/train_labels'.format(FLAGS.data_dir), train_y) np.save('{}/test_images'.format(FLAGS.data_dir), test_x) np.save('{}/test_labels'.format(FLAGS.data_dir), test_y) def load_svhn(): maybe_download_and_extract() train_images = np.load('{}/train_images.npy'.format(FLAGS.data_dir)).astype(np.float32) train_labels = np.load('{}/train_labels.npy'.format(FLAGS.data_dir)).astype(np.float32) test_images = np.load('{}/test_images.npy'.format(FLAGS.data_dir)).astype(np.float32) test_labels = np.load('{}/test_labels.npy'.format(FLAGS.data_dir)).astype(np.float32) return (train_images, train_labels), (test_images, test_labels) def prepare_dataset(): (train_images, train_labels), (test_images, test_labels) = load_svhn() dirpath = os.path.join(FLAGS.data_dir, 'seed' + str(FLAGS.dataset_seed)) if not os.path.exists(dirpath): os.makedirs(dirpath) rng = np.random.RandomState(FLAGS.dataset_seed) rand_ix = rng.permutation(NUM_EXAMPLES_TRAIN) print(rand_ix) _train_images, _train_labels = train_images[rand_ix], train_labels[rand_ix] labeled_ind = np.arange(FLAGS.num_labeled_examples) labeled_train_images, labeled_train_labels = _train_images[labeled_ind], _train_labels[labeled_ind] _train_images = np.delete(_train_images, labeled_ind, 0) _train_labels = np.delete(_train_labels, labeled_ind, 0) convert_images_and_labels(labeled_train_images, labeled_train_labels, os.path.join(dirpath, 'labeled_train.tfrecords')) convert_images_and_labels(train_images, train_labels, os.path.join(dirpath, 'unlabeled_train.tfrecords')) convert_images_and_labels(test_images, test_labels, os.path.join(dirpath, 'test.tfrecords')) # Construct dataset for validation train_images_valid, train_labels_valid = labeled_train_images, labeled_train_labels test_images_valid, test_labels_valid = \ _train_images[:FLAGS.num_valid_examples], _train_labels[:FLAGS.num_valid_examples] unlabeled_train_images_valid = np.concatenate( (train_images_valid, _train_images[FLAGS.num_valid_examples:]), axis=0) unlabeled_train_labels_valid = np.concatenate( (train_labels_valid, _train_labels[FLAGS.num_valid_examples:]), axis=0) convert_images_and_labels(train_images_valid, train_labels_valid, os.path.join(dirpath, 'labeled_train_val.tfrecords')) convert_images_and_labels(unlabeled_train_images_valid, unlabeled_train_labels_valid, os.path.join(dirpath, 'unlabeled_train_val.tfrecords')) convert_images_and_labels(test_images_valid, test_labels_valid, os.path.join(dirpath, 'test_val.tfrecords')) def inputs(batch_size=100, train=True, validation=False, shuffle=True, num_epochs=None): if validation: if train: filenames = ['labeled_train_val.tfrecords'] num_examples = FLAGS.num_labeled_examples else: filenames = ['test_val.tfrecords'] num_examples = FLAGS.num_valid_examples else: if train: filenames = ['labeled_train.tfrecords'] num_examples = FLAGS.num_labeled_examples else: filenames = ['test.tfrecords'] num_examples = NUM_EXAMPLES_TEST filenames = [os.path.join('seed' + str(FLAGS.dataset_seed), filename) for filename in filenames] filename_queue = generate_filename_queue(filenames, FLAGS.data_dir, num_epochs) image, label = read(filename_queue) image = transform(tf.cast(image, tf.float32)) if train else image return generate_batch([image, label], num_examples, batch_size, shuffle) def unlabeled_inputs(batch_size=100, validation=False, shuffle=True): if validation: filenames = ['unlabeled_train_val.tfrecords'] num_examples = NUM_EXAMPLES_TRAIN - FLAGS.num_valid_examples else: filenames = ['unlabeled_train.tfrecords'] num_examples = NUM_EXAMPLES_TRAIN filenames = [os.path.join('seed' + str(FLAGS.dataset_seed), filename) for filename in filenames] filename_queue = generate_filename_queue(filenames, data_dir=FLAGS.data_dir) image, label = read(filename_queue) image = transform(tf.cast(image, tf.float32)) return generate_batch([image], num_examples, batch_size, shuffle) def main(argv): prepare_dataset() if __name__ == "__main__": tf.app.run()
[ "os.path.exists", "tensorflow.app.flags.DEFINE_integer", "numpy.arange", "os.makedirs", "numpy.delete", "scipy.io.loadmat", "os.path.join", "tensorflow.app.flags.DEFINE_string", "six.moves.urllib.request.urlretrieve", "numpy.concatenate", "sys.stdout.flush", "tensorflow.cast", "numpy.random....
[((555, 619), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""data_dir"""', '"""/cache/vat-tf/svhn"""', '""""""'], {}), "('data_dir', '/cache/vat-tf/svhn', '')\n", (581, 619), True, 'import tensorflow as tf\n'), ((620, 715), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_labeled_examples"""', '(1000)', '"""The number of labeled examples"""'], {}), "('num_labeled_examples', 1000,\n 'The number of labeled examples')\n", (647, 715), True, 'import tensorflow as tf\n'), ((712, 808), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_valid_examples"""', '(1000)', '"""The number of validation examples"""'], {}), "('num_valid_examples', 1000,\n 'The number of validation examples')\n", (739, 808), True, 'import tensorflow as tf\n'), ((805, 867), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""dataset_seed"""', '(1)', '"""dataset seed"""'], {}), "('dataset_seed', 1, 'dataset seed')\n", (832, 867), True, 'import tensorflow as tf\n'), ((1062, 1109), 'os.path.join', 'os.path.join', (['FLAGS.data_dir', '"""train_32x32.mat"""'], {}), "(FLAGS.data_dir, 'train_32x32.mat')\n", (1074, 1109), False, 'import os\n'), ((1134, 1180), 'os.path.join', 'os.path.join', (['FLAGS.data_dir', '"""test_32x32.mat"""'], {}), "(FLAGS.data_dir, 'test_32x32.mat')\n", (1146, 1180), False, 'import os\n'), ((1750, 1794), 'scipy.io.loadmat', 'loadmat', (["(FLAGS.data_dir + '/train_32x32.mat')"], {}), "(FLAGS.data_dir + '/train_32x32.mat')\n", (1757, 1794), False, 'from scipy.io import loadmat\n'), ((2097, 2140), 'scipy.io.loadmat', 'loadmat', (["(FLAGS.data_dir + '/test_32x32.mat')"], {}), "(FLAGS.data_dir + '/test_32x32.mat')\n", (2104, 2140), False, 'from scipy.io import loadmat\n'), ((3352, 3393), 'numpy.random.RandomState', 'np.random.RandomState', (['FLAGS.dataset_seed'], {}), '(FLAGS.dataset_seed)\n', (3373, 3393), True, 'import numpy as np\n'), ((3562, 3599), 'numpy.arange', 'np.arange', (['FLAGS.num_labeled_examples'], {}), '(FLAGS.num_labeled_examples)\n', (3571, 3599), True, 'import numpy as np\n'), ((3724, 3764), 'numpy.delete', 'np.delete', (['_train_images', 'labeled_ind', '(0)'], {}), '(_train_images, labeled_ind, 0)\n', (3733, 3764), True, 'import numpy as np\n'), ((3785, 3825), 'numpy.delete', 'np.delete', (['_train_labels', 'labeled_ind', '(0)'], {}), '(_train_labels, labeled_ind, 0)\n', (3794, 3825), True, 'import numpy as np\n'), ((4606, 4697), 'numpy.concatenate', 'np.concatenate', (['(train_images_valid, _train_images[FLAGS.num_valid_examples:])'], {'axis': '(0)'}), '((train_images_valid, _train_images[FLAGS.num_valid_examples:\n ]), axis=0)\n', (4620, 4697), True, 'import numpy as np\n'), ((4737, 4828), 'numpy.concatenate', 'np.concatenate', (['(train_labels_valid, _train_labels[FLAGS.num_valid_examples:])'], {'axis': '(0)'}), '((train_labels_valid, _train_labels[FLAGS.num_valid_examples:\n ]), axis=0)\n', (4751, 4828), True, 'import numpy as np\n'), ((7152, 7164), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (7162, 7164), True, 'import tensorflow as tf\n'), ((969, 999), 'os.path.exists', 'os.path.exists', (['FLAGS.data_dir'], {}), '(FLAGS.data_dir)\n', (983, 999), False, 'import os\n'), ((1009, 1036), 'os.makedirs', 'os.makedirs', (['FLAGS.data_dir'], {}), '(FLAGS.data_dir)\n', (1020, 1036), False, 'import os\n'), ((1477, 1550), 'six.moves.urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['DATA_URL_TRAIN', 'filepath_train_mat', '_progress'], {}), '(DATA_URL_TRAIN, filepath_train_mat, _progress)\n', (1503, 1550), False, 'from six.moves import urllib\n'), ((1559, 1630), 'six.moves.urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['DATA_URL_TEST', 'filepath_test_mat', '_progress'], {}), '(DATA_URL_TEST, filepath_test_mat, _progress)\n', (1585, 1630), False, 'from six.moves import urllib\n'), ((3287, 3310), 'os.path.exists', 'os.path.exists', (['dirpath'], {}), '(dirpath)\n', (3301, 3310), False, 'import os\n'), ((3320, 3340), 'os.makedirs', 'os.makedirs', (['dirpath'], {}), '(dirpath)\n', (3331, 3340), False, 'import os\n'), ((3960, 4008), 'os.path.join', 'os.path.join', (['dirpath', '"""labeled_train.tfrecords"""'], {}), "(dirpath, 'labeled_train.tfrecords')\n", (3972, 4008), False, 'import os\n'), ((4098, 4148), 'os.path.join', 'os.path.join', (['dirpath', '"""unlabeled_train.tfrecords"""'], {}), "(dirpath, 'unlabeled_train.tfrecords')\n", (4110, 4148), False, 'import os\n'), ((4266, 4305), 'os.path.join', 'os.path.join', (['dirpath', '"""test.tfrecords"""'], {}), "(dirpath, 'test.tfrecords')\n", (4278, 4305), False, 'import os\n'), ((4963, 5015), 'os.path.join', 'os.path.join', (['dirpath', '"""labeled_train_val.tfrecords"""'], {}), "(dirpath, 'labeled_train_val.tfrecords')\n", (4975, 5015), False, 'import os\n'), ((5167, 5221), 'os.path.join', 'os.path.join', (['dirpath', '"""unlabeled_train_val.tfrecords"""'], {}), "(dirpath, 'unlabeled_train_val.tfrecords')\n", (5179, 5221), False, 'import os\n'), ((5351, 5394), 'os.path.join', 'os.path.join', (['dirpath', '"""test_val.tfrecords"""'], {}), "(dirpath, 'test_val.tfrecords')\n", (5363, 5394), False, 'import os\n'), ((6981, 7007), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (6988, 7007), True, 'import tensorflow as tf\n'), ((1192, 1226), 'os.path.exists', 'os.path.exists', (['filepath_train_mat'], {}), '(filepath_train_mat)\n', (1206, 1226), False, 'import os\n'), ((1234, 1267), 'os.path.exists', 'os.path.exists', (['filepath_test_mat'], {}), '(filepath_test_mat)\n', (1248, 1267), False, 'import os\n'), ((1449, 1467), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1465, 1467), False, 'import sys\n'), ((6253, 6279), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (6260, 6279), True, 'import tensorflow as tf\n')]
# -*- coding: utf-8 -*- """ Created on Fri May 8 23:00:16 2020 @author: Han """ import numpy as np import seaborn as sns import matplotlib import matplotlib.pyplot as plt from scipy.stats import pearsonr def softmax(x, softmax_temperature, bias = 0): # Put the bias outside /sigma to make it comparable across different softmax_temperatures. if len(x.shape) == 1: X = x/softmax_temperature + bias # Backward compatibility else: X = np.sum(x/softmax_temperature, axis=0) + bias # Allow more than one kernels (e.g., choice kernel) max_temp = np.max(X) if max_temp > 700: # To prevent explosion of EXP greedy = np.zeros(len(x)) greedy[np.random.choice(np.where(X == np.max(X))[0])] = 1 return greedy else: # Normal softmax return np.exp(X)/np.sum(np.exp(X)) # Accept np. def choose_ps(ps): ''' "Poisson"-choice process ''' ps = ps/np.sum(ps) return np.max(np.argwhere(np.hstack([-1e-16, np.cumsum(ps)]) < np.random.rand())) def seaborn_style(): """ Set seaborn style for plotting figures """ sns.set(style="ticks", context="paper", font_scale=1.4) # sns.set(style="ticks", context="talk", font_scale=2) sns.despine(trim=True) matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 def moving_average(a, n=3) : ret = np.nancumsum(a, dtype=float) ret[n:] = ret[n:] - ret[:-n] return ret[n - 1:] / n def plot_corr(x, y, **kws): (r, p) = pearsonr(x, y) ax = plt.gca() title_obj = ax.set_title("r = %.3f, p = %.4f " % (r, p), fontsize = 8) if p < 0.05: plt.setp(title_obj, color='r')
[ "matplotlib.pyplot.setp", "seaborn.set", "numpy.random.rand", "seaborn.despine", "matplotlib.pyplot.gca", "numpy.max", "numpy.exp", "numpy.nancumsum", "numpy.sum", "scipy.stats.pearsonr", "numpy.cumsum" ]
[((589, 598), 'numpy.max', 'np.max', (['X'], {}), '(X)\n', (595, 598), True, 'import numpy as np\n'), ((1126, 1181), 'seaborn.set', 'sns.set', ([], {'style': '"""ticks"""', 'context': '"""paper"""', 'font_scale': '(1.4)'}), "(style='ticks', context='paper', font_scale=1.4)\n", (1133, 1181), True, 'import seaborn as sns\n'), ((1245, 1267), 'seaborn.despine', 'sns.despine', ([], {'trim': '(True)'}), '(trim=True)\n', (1256, 1267), True, 'import seaborn as sns\n'), ((1398, 1426), 'numpy.nancumsum', 'np.nancumsum', (['a'], {'dtype': 'float'}), '(a, dtype=float)\n', (1410, 1426), True, 'import numpy as np\n'), ((1530, 1544), 'scipy.stats.pearsonr', 'pearsonr', (['x', 'y'], {}), '(x, y)\n', (1538, 1544), False, 'from scipy.stats import pearsonr\n'), ((1554, 1563), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1561, 1563), True, 'import matplotlib.pyplot as plt\n'), ((943, 953), 'numpy.sum', 'np.sum', (['ps'], {}), '(ps)\n', (949, 953), True, 'import numpy as np\n'), ((1664, 1694), 'matplotlib.pyplot.setp', 'plt.setp', (['title_obj'], {'color': '"""r"""'}), "(title_obj, color='r')\n", (1672, 1694), True, 'import matplotlib.pyplot as plt\n'), ((471, 510), 'numpy.sum', 'np.sum', (['(x / softmax_temperature)'], {'axis': '(0)'}), '(x / softmax_temperature, axis=0)\n', (477, 510), True, 'import numpy as np\n'), ((823, 832), 'numpy.exp', 'np.exp', (['X'], {}), '(X)\n', (829, 832), True, 'import numpy as np\n'), ((840, 849), 'numpy.exp', 'np.exp', (['X'], {}), '(X)\n', (846, 849), True, 'import numpy as np\n'), ((1021, 1037), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1035, 1037), True, 'import numpy as np\n'), ((1003, 1016), 'numpy.cumsum', 'np.cumsum', (['ps'], {}), '(ps)\n', (1012, 1016), True, 'import numpy as np\n'), ((737, 746), 'numpy.max', 'np.max', (['X'], {}), '(X)\n', (743, 746), True, 'import numpy as np\n')]
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE import pytest # noqa: F401 import numpy as np # noqa: F401 import awkward as ak # noqa: F401 numba = pytest.importorskip("numba") def test_unmasked(): @numba.njit def find_it(array): for item in array: if item is None: pass elif item.x == 3: return item return None content = ak.Array([{"x": 1}, {"x": 2}, {"x": 3}]).layout unmasked = ak.layout.UnmaskedArray(content) array = ak.Array(unmasked) assert ak.to_list(find_it(array)) == {"x": 3} def test_indexedoption(): @numba.njit def find_it(array): for item in array: if item is None: pass elif item.x == 3: return item return None array = ak.Array([{"x": 1}, {"x": 2}, None, {"x": 3}]) assert ak.to_list(find_it(array)) == {"x": 3} def test_indexed_1(): @numba.njit def f1(array, check): for i in range(len(array)): item = array[i] if item.x == check: return i return 999 content = ak.Array([{"x": 100}, {"x": 101}, {"x": 102}]).layout index = ak.layout.Index64(np.array([2, 0, 1], dtype=np.int64)) indexedarray = ak.layout.IndexedArray64(index, content) array = ak.Array(indexedarray) assert f1(array, 100) == 1 assert f1(array, 101) == 2 assert f1(array, 102) == 0 assert f1(array, 12345) == 999 def test_indexed_2(): @numba.njit def f1(array, check): for item in array: if item.x == check: return item return None content = ak.Array([{"x": 100}, {"x": 101}, {"x": 102}]).layout index = ak.layout.Index64(np.array([2, 0, 1], dtype=np.int64)) indexedarray = ak.layout.IndexedArray64(index, content) array = ak.Array(indexedarray) assert f1(array, 100).tolist() == {"x": 100} assert f1(array, 101).tolist() == {"x": 101} assert f1(array, 102).tolist() == {"x": 102} assert f1(array, 12345) is None
[ "awkward.layout.IndexedArray64", "awkward.Array", "awkward.layout.UnmaskedArray", "numpy.array", "pytest.importorskip" ]
[((195, 223), 'pytest.importorskip', 'pytest.importorskip', (['"""numba"""'], {}), "('numba')\n", (214, 223), False, 'import pytest\n'), ((520, 552), 'awkward.layout.UnmaskedArray', 'ak.layout.UnmaskedArray', (['content'], {}), '(content)\n', (543, 552), True, 'import awkward as ak\n'), ((565, 583), 'awkward.Array', 'ak.Array', (['unmasked'], {}), '(unmasked)\n', (573, 583), True, 'import awkward as ak\n'), ((870, 916), 'awkward.Array', 'ak.Array', (["[{'x': 1}, {'x': 2}, None, {'x': 3}]"], {}), "([{'x': 1}, {'x': 2}, None, {'x': 3}])\n", (878, 916), True, 'import awkward as ak\n'), ((1328, 1368), 'awkward.layout.IndexedArray64', 'ak.layout.IndexedArray64', (['index', 'content'], {}), '(index, content)\n', (1352, 1368), True, 'import awkward as ak\n'), ((1381, 1403), 'awkward.Array', 'ak.Array', (['indexedarray'], {}), '(indexedarray)\n', (1389, 1403), True, 'import awkward as ak\n'), ((1861, 1901), 'awkward.layout.IndexedArray64', 'ak.layout.IndexedArray64', (['index', 'content'], {}), '(index, content)\n', (1885, 1901), True, 'import awkward as ak\n'), ((1914, 1936), 'awkward.Array', 'ak.Array', (['indexedarray'], {}), '(indexedarray)\n', (1922, 1936), True, 'import awkward as ak\n'), ((457, 497), 'awkward.Array', 'ak.Array', (["[{'x': 1}, {'x': 2}, {'x': 3}]"], {}), "([{'x': 1}, {'x': 2}, {'x': 3}])\n", (465, 497), True, 'import awkward as ak\n'), ((1188, 1234), 'awkward.Array', 'ak.Array', (["[{'x': 100}, {'x': 101}, {'x': 102}]"], {}), "([{'x': 100}, {'x': 101}, {'x': 102}])\n", (1196, 1234), True, 'import awkward as ak\n'), ((1272, 1307), 'numpy.array', 'np.array', (['[2, 0, 1]'], {'dtype': 'np.int64'}), '([2, 0, 1], dtype=np.int64)\n', (1280, 1307), True, 'import numpy as np\n'), ((1721, 1767), 'awkward.Array', 'ak.Array', (["[{'x': 100}, {'x': 101}, {'x': 102}]"], {}), "([{'x': 100}, {'x': 101}, {'x': 102}])\n", (1729, 1767), True, 'import awkward as ak\n'), ((1805, 1840), 'numpy.array', 'np.array', (['[2, 0, 1]'], {'dtype': 'np.int64'}), '([2, 0, 1], dtype=np.int64)\n', (1813, 1840), True, 'import numpy as np\n')]
# Import dependencies. import os import numpy as np import cv2 from scipy.io import savemat # Create labels. C = np.ones((349,)) N = np.zeros((397,)) labels = np.concatenate((C, N), axis=0) # Load the datased and resize to imagenet size. covid = os.listdir('CT_COVID') n_covid = os.listdir('CT_NonCOVID') data=[] for img_path in covid: img = cv2.imread('CT_COVID/'+img_path, cv2.IMREAD_COLOR) data.append(cv2.resize(img, (224, 224))) for img_path in n_covid: img = cv2.imread('CT_NonCOVID/'+img_path, cv2.IMREAD_COLOR) data.append(cv2.resize(img, (224, 224))) # Normalization. data = np.array(data)/255. print(data.shape) print(labels.shape) # Save the data. savemat('images.mat', {'data': data, 'labels': labels})
[ "os.listdir", "scipy.io.savemat", "numpy.ones", "numpy.array", "numpy.zeros", "numpy.concatenate", "cv2.resize", "cv2.imread" ]
[((114, 129), 'numpy.ones', 'np.ones', (['(349,)'], {}), '((349,))\n', (121, 129), True, 'import numpy as np\n'), ((134, 150), 'numpy.zeros', 'np.zeros', (['(397,)'], {}), '((397,))\n', (142, 150), True, 'import numpy as np\n'), ((160, 190), 'numpy.concatenate', 'np.concatenate', (['(C, N)'], {'axis': '(0)'}), '((C, N), axis=0)\n', (174, 190), True, 'import numpy as np\n'), ((248, 270), 'os.listdir', 'os.listdir', (['"""CT_COVID"""'], {}), "('CT_COVID')\n", (258, 270), False, 'import os\n'), ((281, 306), 'os.listdir', 'os.listdir', (['"""CT_NonCOVID"""'], {}), "('CT_NonCOVID')\n", (291, 306), False, 'import os\n'), ((679, 734), 'scipy.io.savemat', 'savemat', (['"""images.mat"""', "{'data': data, 'labels': labels}"], {}), "('images.mat', {'data': data, 'labels': labels})\n", (686, 734), False, 'from scipy.io import savemat\n'), ((348, 400), 'cv2.imread', 'cv2.imread', (["('CT_COVID/' + img_path)", 'cv2.IMREAD_COLOR'], {}), "('CT_COVID/' + img_path, cv2.IMREAD_COLOR)\n", (358, 400), False, 'import cv2\n'), ((479, 534), 'cv2.imread', 'cv2.imread', (["('CT_NonCOVID/' + img_path)", 'cv2.IMREAD_COLOR'], {}), "('CT_NonCOVID/' + img_path, cv2.IMREAD_COLOR)\n", (489, 534), False, 'import cv2\n'), ((603, 617), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (611, 617), True, 'import numpy as np\n'), ((413, 440), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (423, 440), False, 'import cv2\n'), ((547, 574), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (557, 574), False, 'import cv2\n')]
import inspect import logging import hashlib import gym import numpy as np from gym.spaces import Box, Tuple, Dict from mujoco_py import MjSimState from mujoco_worldgen.util.types import enforce_is_callable from mujoco_worldgen.util.sim_funcs import ( empty_get_info, flatten_get_obs, false_get_diverged, ctrl_set_action, zero_get_reward, ) logger = logging.getLogger(__name__) class Env(gym.Env): metadata = { 'render.modes': ['human', 'rgb_array'], } def __init__(self, get_sim, get_obs=flatten_get_obs, get_reward=zero_get_reward, get_info=empty_get_info, get_diverged=false_get_diverged, set_action=ctrl_set_action, action_space=None, horizon=100, start_seed=None, deterministic_mode=False): """ Env is a Gym environment subclass tuned for robotics learning research. Args: - get_sim (callable): a callable that returns an MjSim. - get_obs (callable): callable with an MjSim object as the sole argument and should return observations. - set_action (callable): callable which takes an MjSim object and updates its data and buffer directly. - get_reward (callable): callable which takes an MjSim object and returns a scalar reward. - get_info (callable): callable which takes an MjSim object and returns info (dictionary). - get_diverged (callable): callable which takes an MjSim object and returns a (bool, float) tuple. First value is True if simulator diverged and second value is the reward at divergence. - action_space: a space of allowed actions or a two-tuple of a ranges if number of actions is unknown until the simulation is instantiated - horizon (int): horizon of environment (i.e. max number of steps). - start_seed (int or string): seed for random state generator (None for random seed). Strings will be hashed. A non-None value implies deterministic_mode=True. This argument allows us to run a deterministic series of goals/randomizations for a given policy. Then applying the same seed to another policy will allow the comparison of results more accurately. The reason a string is allowed is so that we can more easily find and share seeds that are farther from 0, which is the default starting point for deterministic_mode, and thus have more likelihood of getting a performant sequence of goals. """ if (horizon is not None) and not isinstance(horizon, int): raise TypeError('horizon must be an int') self.get_sim = enforce_is_callable(get_sim, ( 'get_sim should be callable and should return an MjSim object')) self.get_obs = enforce_is_callable(get_obs, ( 'get_obs should be callable with an MjSim object as the sole ' 'argument and should return observations')) self.set_action = enforce_is_callable(set_action, ( 'set_action should be a callable which takes an MjSim object and ' 'updates its data and buffer directly')) self.get_reward = enforce_is_callable(get_reward, ( 'get_reward should be a callable which takes an MjSim object and ' 'returns a scalar reward')) self.get_info = enforce_is_callable(get_info, ( 'get_info should be a callable which takes an MjSim object and ' 'returns a dictionary')) self.get_diverged = enforce_is_callable(get_diverged, ( 'get_diverged should be a callable which takes an MjSim object ' 'and returns a (bool, float) tuple. First value is whether ' 'simulator is diverged (or done) and second value is the reward at ' 'that time.')) self.sim = None self.horizon = horizon self.t = None self.deterministic_mode = deterministic_mode # Numpy Random State if isinstance(start_seed, str): start_seed = int(hashlib.sha1(start_seed.encode()).hexdigest(), 16) % (2**32) self.deterministic_mode = True elif isinstance(start_seed, int): self.deterministic_mode = True else: start_seed = 0 if self.deterministic_mode else np.random.randint(2**32) self._random_state = np.random.RandomState(start_seed) # Seed that will be used on next _reset() self._next_seed = start_seed # Seed that was used in last _reset() self._current_seed = None # For rendering self.viewer = None # These are required by Gym self._action_space = action_space self._observation_space = None self._spec = Spec(max_episode_steps=horizon, timestep_limit=horizon) self._name = None # This is to mitigate issues with old/new envs @property def unwrapped(self): return self @property def name(self): if self._name is None: name = str(inspect.getfile(self.get_sim)) if name.endswith(".py"): name = name[:-3] self._name = name return self._name def set_state(self, state, call_forward=True): """ Sets the state of the enviroment to the given value. It does not set time. Warning: This only sets the MuJoCo state by setting qpos/qvel (and the user-defined state "udd_state"). It doesn't set the state of objects which don't have joints. Args: - state (MjSimState): desired state. - call_forward (bool): if True, forward simulation after setting state. """ if not isinstance(state, MjSimState): raise TypeError("state must be an MjSimState") if self.sim is None: raise EmptyEnvException( "You must call reset() or reset_to_state() before setting the " "state the first time") # Call forward to write out values in the MuJoCo data. # Note: if udd_callback is set on the MjSim instance, then the # user will need to call forward() manually before calling step. self.sim.set_state(state) if call_forward: self.sim.forward() def get_state(self): """ Returns a copy of the current environment state. Returns: - state (MjSimState): state of the environment's MjSim object. """ if self.sim is None: raise EmptyEnvException( "You must call reset() or reset_to_state() before accessing " "the state the first time") return self.sim.get_state() def get_xml(self): ''' :return: full state of the simulator serialized as XML (won't contain meshes, textures, and data information). ''' return self.sim.model.get_xml() def get_mjb(self): ''' :return: full state of the simulator serialized as mjb. ''' return self.sim.model.get_mjb() def reset_to_state(self, state, call_forward=True): """ Reset to given state. Args: - state (MjSimState): desired state. """ if not isinstance(state, MjSimState): raise TypeError( "You must reset to an explicit state (MjSimState).") if self.sim is None: if self._current_seed is None: self._update_seed() self.sim = self.get_sim(self._current_seed) else: # Ensure environment state not captured in MuJoCo's qpos/qvel # is reset to the state defined by the model. self.sim.reset() self.set_state(state, call_forward=call_forward) self.t = 0 return self._reset_sim_and_spaces() def _update_seed(self, force_seed=None): if force_seed is not None: self._next_seed = force_seed self._current_seed = self._next_seed assert self._current_seed is not None # if in deterministic mode, then simply increment seed, otherwise randomize if self.deterministic_mode: self._next_seed = self._next_seed + 1 else: self._next_seed = np.random.randint(2**32) # immediately update the seed in the random state object self._random_state.seed(self._current_seed) @property def current_seed(self): # Note: this is a property rather than just instance variable # for legacy and backwards compatibility reasons. return self._current_seed def _reset_sim_and_spaces(self): obs = self.get_obs(self.sim) # Mocaps are defined by 3-dim position and 4-dim quaternion if isinstance(self._action_space, tuple): assert len(self._action_space) == 2 self._action_space = Box( self._action_space[0], self._action_space[1], (self.sim.model.nmocap * 7 + self.sim.model.nu, ), np.float32) elif self._action_space is None: self._action_space = Box( -np.inf, np.inf, (self.sim.model.nmocap * 7 + self.sim.model.nu, ), np.float32) self._action_space.flatten_dim = np.prod(self._action_space.shape) self._observation_space = gym_space_from_arrays(obs) if self.viewer is not None: self.viewer.update_sim(self.sim) return obs # # Custom pickling # def __getstate__(self): excluded_attrs = frozenset( ("sim", "viewer", "_monitor")) attr_values = {k: v for k, v in self.__dict__.items() if k not in excluded_attrs} if self.sim is not None: attr_values['sim_state'] = self.get_state() return attr_values def __setstate__(self, attr_values): for k, v in attr_values.items(): if k != 'sim_state': self.__dict__[k] = v self.sim = None self.viewer = None if 'sim_state' in attr_values: if self.sim is None: assert self._current_seed is not None self.sim = self.get_sim(self._current_seed) self.set_state(attr_values['sim_state']) self._reset_sim_and_spaces() return self def logs(self): logs = [] if hasattr(self.env, 'logs'): logs += self.env.logs() return logs # # GYM REQUIREMENTS: these are methods required to be compatible with Gym # @property def action_space(self): if self._action_space is None: raise EmptyEnvException( "You have to reset environment before accessing action_space.") return self._action_space @property def observation_space(self): if self._observation_space is None: raise EmptyEnvException( "You have to reset environment before accessing " "observation_space.") return self._observation_space def reset(self, force_seed=None): self._update_seed(force_seed=force_seed) # get sim with current seed self.sim = self.get_sim(self._current_seed) # init sim self.sim.forward() self.t = 0 self.sim.data.time = 0.0 return self._reset_sim_and_spaces() def seed(self, seed=None): """ Use `env.seed(some_seed)` to set the seed that'll be used in `env.reset()`. More specifically, this is the seed that will be passed into `env.get_sim` during `env.reset()`. The seed will then be incremented in consequent calls to `env.reset()`. For example: env.seed(0) env.reset() -> gives seed(0) world env.reset() -> gives seed(1) world ... env.seed(0) env.reset() -> gives seed(0) world """ if isinstance(seed, list): # Support list of seeds as required by Gym. assert len(seed) == 1, "Only a single seed supported." self._next_seed = seed[0] elif isinstance(seed, int): self._next_seed = seed elif seed is not None: # If seed is None, we just return current seed. raise ValueError("Seed must be an integer.") # Return list of seeds to conform to Gym specs return [self._next_seed] def step(self, action): action = np.asarray(action) action = np.minimum(action, self.action_space.high) action = np.maximum(action, self.action_space.low) assert self.action_space.contains(action), ( 'Action should be in action_space:\nSPACE=%s\nACTION=%s' % (self.action_space, action)) self.set_action(self.sim, action) self.sim.step() # Need to call forward() so that sites etc are updated, # since they're used in the reward computations. self.sim.forward() self.t += 1 reward = self.get_reward(self.sim) if not isinstance(reward, float): raise TypeError("The return value of get_reward must be a float") obs = self.get_obs(self.sim) diverged, divergence_reward = self.get_diverged(self.sim) if not isinstance(diverged, bool): raise TypeError( "The first return value of get_diverged must be boolean") if not isinstance(divergence_reward, float): raise TypeError( "The second return value of get_diverged must be float") if diverged: done = True if divergence_reward is not None: reward = divergence_reward elif self.horizon is not None: done = (self.t >= self.horizon) else: done = False info = self.get_info(self.sim) info["diverged"] = divergence_reward # Return value as required by Gym return obs, reward, done, info def observe(self): """ Gets a new observation from the environment. """ self.sim.forward() return self.get_obs(self.sim) def render(self, mode='human', close=False): if close: # TODO: actually close the inspection viewer return assert self.sim is not None, \ "Please reset environment before render()." if mode == 'human': # Use a nicely-interactive version of the mujoco viewer if self.viewer is None: # Inline import since this is only relevant on platforms # which have GLFW support. from mujoco_py.mjviewer import MjViewer # noqa self.viewer = MjViewer(self.sim) self.viewer.render() elif mode == 'rgb_array': return self.sim.render(500, 500) else: raise ValueError("Unsupported mode %s" % mode) class EmptyEnvException(Exception): pass # Helpers ############################################################################### class Spec(object): # required by gym.wrappers.Monitor def __init__(self, max_episode_steps=np.inf, timestep_limit=np.inf): self.id = "worldgen.env" self.max_episode_steps = max_episode_steps self.timestep_limit = timestep_limit def gym_space_from_arrays(arrays): if isinstance(arrays, np.ndarray): ret = Box(-np.inf, np.inf, arrays.shape, np.float32) ret.flatten_dim = np.prod(ret.shape) elif isinstance(arrays, (tuple, list)): ret = Tuple([gym_space_from_arrays(arr) for arr in arrays]) elif isinstance(arrays, dict): ret = Dict(dict([(k, gym_space_from_arrays(v)) for k, v in arrays.items()])) else: raise TypeError("Array is of unsupported type.") return ret
[ "logging.getLogger", "numpy.prod", "numpy.minimum", "numpy.asarray", "mujoco_worldgen.util.types.enforce_is_callable", "gym.spaces.Box", "inspect.getfile", "mujoco_py.mjviewer.MjViewer", "numpy.random.randint", "numpy.maximum", "numpy.random.RandomState" ]
[((378, 405), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (395, 405), False, 'import logging\n'), ((2871, 2967), 'mujoco_worldgen.util.types.enforce_is_callable', 'enforce_is_callable', (['get_sim', '"""get_sim should be callable and should return an MjSim object"""'], {}), "(get_sim,\n 'get_sim should be callable and should return an MjSim object')\n", (2890, 2967), False, 'from mujoco_worldgen.util.types import enforce_is_callable\n'), ((3002, 3142), 'mujoco_worldgen.util.types.enforce_is_callable', 'enforce_is_callable', (['get_obs', '"""get_obs should be callable with an MjSim object as the sole argument and should return observations"""'], {}), "(get_obs,\n 'get_obs should be callable with an MjSim object as the sole argument and should return observations'\n )\n", (3021, 3142), False, 'from mujoco_worldgen.util.types import enforce_is_callable\n'), ((3190, 3334), 'mujoco_worldgen.util.types.enforce_is_callable', 'enforce_is_callable', (['set_action', '"""set_action should be a callable which takes an MjSim object and updates its data and buffer directly"""'], {}), "(set_action,\n 'set_action should be a callable which takes an MjSim object and updates its data and buffer directly'\n )\n", (3209, 3334), False, 'from mujoco_worldgen.util.types import enforce_is_callable\n'), ((3382, 3513), 'mujoco_worldgen.util.types.enforce_is_callable', 'enforce_is_callable', (['get_reward', '"""get_reward should be a callable which takes an MjSim object and returns a scalar reward"""'], {}), "(get_reward,\n 'get_reward should be a callable which takes an MjSim object and returns a scalar reward'\n )\n", (3401, 3513), False, 'from mujoco_worldgen.util.types import enforce_is_callable\n'), ((3559, 3683), 'mujoco_worldgen.util.types.enforce_is_callable', 'enforce_is_callable', (['get_info', '"""get_info should be a callable which takes an MjSim object and returns a dictionary"""'], {}), "(get_info,\n 'get_info should be a callable which takes an MjSim object and returns a dictionary'\n )\n", (3578, 3683), False, 'from mujoco_worldgen.util.types import enforce_is_callable\n'), ((3733, 3975), 'mujoco_worldgen.util.types.enforce_is_callable', 'enforce_is_callable', (['get_diverged', '"""get_diverged should be a callable which takes an MjSim object and returns a (bool, float) tuple. First value is whether simulator is diverged (or done) and second value is the reward at that time."""'], {}), "(get_diverged,\n 'get_diverged should be a callable which takes an MjSim object and returns a (bool, float) tuple. First value is whether simulator is diverged (or done) and second value is the reward at that time.'\n )\n", (3752, 3975), False, 'from mujoco_worldgen.util.types import enforce_is_callable\n'), ((4582, 4615), 'numpy.random.RandomState', 'np.random.RandomState', (['start_seed'], {}), '(start_seed)\n', (4603, 4615), True, 'import numpy as np\n'), ((9504, 9537), 'numpy.prod', 'np.prod', (['self._action_space.shape'], {}), '(self._action_space.shape)\n', (9511, 9537), True, 'import numpy as np\n'), ((12737, 12755), 'numpy.asarray', 'np.asarray', (['action'], {}), '(action)\n', (12747, 12755), True, 'import numpy as np\n'), ((12773, 12815), 'numpy.minimum', 'np.minimum', (['action', 'self.action_space.high'], {}), '(action, self.action_space.high)\n', (12783, 12815), True, 'import numpy as np\n'), ((12833, 12874), 'numpy.maximum', 'np.maximum', (['action', 'self.action_space.low'], {}), '(action, self.action_space.low)\n', (12843, 12874), True, 'import numpy as np\n'), ((15694, 15740), 'gym.spaces.Box', 'Box', (['(-np.inf)', 'np.inf', 'arrays.shape', 'np.float32'], {}), '(-np.inf, np.inf, arrays.shape, np.float32)\n', (15697, 15740), False, 'from gym.spaces import Box, Tuple, Dict\n'), ((15767, 15785), 'numpy.prod', 'np.prod', (['ret.shape'], {}), '(ret.shape)\n', (15774, 15785), True, 'import numpy as np\n'), ((8520, 8546), 'numpy.random.randint', 'np.random.randint', (['(2 ** 32)'], {}), '(2 ** 32)\n', (8537, 8546), True, 'import numpy as np\n'), ((9142, 9258), 'gym.spaces.Box', 'Box', (['self._action_space[0]', 'self._action_space[1]', '(self.sim.model.nmocap * 7 + self.sim.model.nu,)', 'np.float32'], {}), '(self._action_space[0], self._action_space[1], (self.sim.model.nmocap * \n 7 + self.sim.model.nu,), np.float32)\n', (9145, 9258), False, 'from gym.spaces import Box, Tuple, Dict\n'), ((5256, 5285), 'inspect.getfile', 'inspect.getfile', (['self.get_sim'], {}), '(self.get_sim)\n', (5271, 5285), False, 'import inspect\n'), ((9362, 9449), 'gym.spaces.Box', 'Box', (['(-np.inf)', 'np.inf', '(self.sim.model.nmocap * 7 + self.sim.model.nu,)', 'np.float32'], {}), '(-np.inf, np.inf, (self.sim.model.nmocap * 7 + self.sim.model.nu,), np.\n float32)\n', (9365, 9449), False, 'from gym.spaces import Box, Tuple, Dict\n'), ((14998, 15016), 'mujoco_py.mjviewer.MjViewer', 'MjViewer', (['self.sim'], {}), '(self.sim)\n', (15006, 15016), False, 'from mujoco_py.mjviewer import MjViewer\n'), ((4519, 4545), 'numpy.random.randint', 'np.random.randint', (['(2 ** 32)'], {}), '(2 ** 32)\n', (4536, 4545), True, 'import numpy as np\n')]
import numpy as np class Team(object): def __init__(self,teamId,rank=1000,scored=0): self.teamId = teamId self.rank = rank self.scored = scored self.new_rank = 0 self.perf = 0 class Match(object): def __init__(self,matchId,level,homeTeam,awayTeam,homeTeam_goals,awayTeam_goals): self.matchId = matchId self.level = level self.homeTeam = homeTeam self.homeTeam.scored = homeTeam_goals self.awayTeam = awayTeam self.awayTeam.scored = awayTeam_goals self.proba = 0 self.goalDiff = self.homeTeam.scored - self.awayTeam.scored self.home = 0 self.away = 0 self.probaHome = 0 self.probaAway = 0 def get_res_match(self): if self.homeTeam.scored > self.awayTeam.scored: self.homeTeam.perf = 1 self.awayTeam.perf = 0 elif self.homeTeam.scored == self.awayTeam.scored: self.homeTeam.perf = 0.5 self.awayTeam.perf = 0.5 else: self.homeTeam.perf = 0 self.awayTeam.perf = 1 return 1 def get_proba(self): self.probaHome = 1.0/(1+np.power(10,(self.awayTeam.rank - (self.homeTeam.rank))/400.0)) self.probaAway = 1.0 - self.probaHome return 1 def G_index(self, avg_goalDiff): """ Weighting the result so that the defeat is not linear (8-0 or 6-0 is roughly the same : you got destroyed). Here use custom G function defined to be constant (1) in case of W/D/L, give diminishing returns with score difference and fit world football index as closely as possible. """ ### Generalised G_index Factor # G = max(1, 1.6*np.log((np.abs(self.goalDiff)/avg_goalDiff)+1)) # return G ### World Football specific G_index factor ### if self.goalDiff==0 or np.abs(self.goalDiff) == 1: return 1 elif np.abs(self.goalDiff) == 2: return 3/2.0 else : return (11+np.abs(self.goalDiff))/8.0 def get_point(self, avg_goalDiff): self.get_proba() self.pointHome = round(self.level * self.G_index(avg_goalDiff) *(self.homeTeam.perf - self.probaHome)) self.pointAway = round(self.level * self.G_index(avg_goalDiff) *(self.awayTeam.perf - self.probaAway)) return 1 def update_team_rank(self, avg_goalDiff): self.get_res_match() self.get_point(avg_goalDiff) self.homeTeam.new_rank = self.homeTeam.rank + self.pointHome self.awayTeam.new_rank = self.awayTeam.rank + self.pointAway return 1 def write_rankings(self): return {"matchId":self.matchId ,"homeTeam_goals":self.homeTeam.scored ,"awayTeam_goals":self.awayTeam.scored ,"homeTeamId":self.homeTeam.teamId ,"awayTeamId":self.awayTeam.teamId ,"homeTeam_rank":self.homeTeam.rank ,"awayTeam_rank":self.awayTeam.rank ,"homeTeam_new_rank":self.homeTeam.new_rank ,"awayTeam_new_rank":self.awayTeam.new_rank ,"rank_change_home":self.pointHome ,"ELOprob_home":self.probaHome ,"ELOprob_away":self.probaAway ,"rank_change_away":self.pointAway}
[ "numpy.abs", "numpy.power" ]
[((1204, 1267), 'numpy.power', 'np.power', (['(10)', '((self.awayTeam.rank - self.homeTeam.rank) / 400.0)'], {}), '(10, (self.awayTeam.rank - self.homeTeam.rank) / 400.0)\n', (1212, 1267), True, 'import numpy as np\n'), ((1905, 1926), 'numpy.abs', 'np.abs', (['self.goalDiff'], {}), '(self.goalDiff)\n', (1911, 1926), True, 'import numpy as np\n'), ((1967, 1988), 'numpy.abs', 'np.abs', (['self.goalDiff'], {}), '(self.goalDiff)\n', (1973, 1988), True, 'import numpy as np\n'), ((2058, 2079), 'numpy.abs', 'np.abs', (['self.goalDiff'], {}), '(self.goalDiff)\n', (2064, 2079), True, 'import numpy as np\n')]
from collections import OrderedDict from functools import partial import matplotlib.pyplot as plt from scipy.linalg import toeplitz import scipy.sparse as sps import numpy as np import pandas as pd import bioframe import cooler from .lib.numutils import LazyToeplitz def make_bin_aligned_windows(binsize, chroms, centers_bp, flank_bp=0, region_start_bp=0, ignore_index=False): """ Convert genomic loci into bin spans on a fixed bin-size segmentation of a genomic region. Window limits are adjusted to align with bin edges. Parameters ----------- binsize : int Bin size (resolution) in base pairs. chroms : 1D array-like Column of chromosome names. centers_bp : 1D or nx2 array-like If 1D, center points of each window. If 2D, the starts and ends. flank_bp : int Distance in base pairs to extend windows on either side. region_start_bp : int, optional If region is a subset of a chromosome, shift coordinates by this amount. Default is 0. Returns ------- DataFrame with columns: 'chrom' - chromosome 'start', 'end' - window limits in base pairs 'lo', 'hi' - window limits in bins """ if not (flank_bp % binsize == 0): raise ValueError( "Flanking distance must be divisible by the bin size.") if isinstance(chroms, pd.Series) and not ignore_index: index = chroms.index else: index = None chroms = np.asarray(chroms) centers_bp = np.asarray(centers_bp) if len(centers_bp.shape) == 2: left_bp = centers_bp[:, 0] right_bp = centers_bp[:, 1] else: left_bp = right_bp = centers_bp if np.any(left_bp > right_bp): raise ValueError("Found interval with end > start.") left = left_bp - region_start_bp right = right_bp - region_start_bp left_bin = (left / binsize).astype(int) right_bin = (right / binsize).astype(int) flank_bin = flank_bp // binsize lo = left_bin - flank_bin hi = right_bin + flank_bin + 1 windows = pd.DataFrame(index=index) windows['chrom'] = chroms windows['start'] = lo * binsize windows['end'] = hi * binsize windows['lo'] = lo windows['hi'] = hi return windows def assign_regions(features, supports): """ """ features = features.copy() # on-diagonal features if 'chrom' in features.columns: for i, region in enumerate(supports): if len(region) == 3: sel = (features.chrom == region[0]) sel &= (features.end >= region[1]) if region[2] is not None: sel &= (features.start < region[2]) features.loc[sel, 'region'] = i elif len(region) == 2: region1, region2 = region sel1 = (features.chrom == region1[0]) sel1 &= (features.end >= region1[1]) if region1[2] is not None: sel1 &= (features.start < region1[2]) sel2 = (features.chrom == region2[0]) sel2 &= (features.end >= region2[1]) if region2[2] is not None: sel2 &= (features.start < region2[2]) features.loc[(sel1 | sel2), 'region'] = i # off-diagonal features elif 'chrom1' in features.columns: for i, region in enumerate(supports): if len(region) == 3: region1, region2 = region, region elif len(region) == 2: region1, region2 = region[0], region[1] sel1 = (features.chrom1 == region1[0]) sel1 &= (features.end1 >= region1[1]) if region1[2] is not None: sel1 &= (features.start1 < region1[2]) sel2 = (features.chrom2 == region2[0]) sel2 &= (features.end2 >= region2[1]) if region2[2] is not None: sel2 &= (features.start2 < region2[2]) features.loc[(sel1 | sel2), 'region'] = i else: raise ValueError('Could not parse `features` data frame.') features['region'] = features['region'].map( lambda i: '{}:{}-{}'.format(*supports[int(i)]), na_action='ignore') return features def _pileup(data_select, data_snip, arg): support, feature_group = arg # check if support region is on- or off-diagonal if len(support) == 2: region1, region2 = map(bioframe.parse_region_string, support) else: region1 = region2 = bioframe.parse_region_string(support) # check if features are on- or off-diagonal if 'start' in feature_group: s1 = feature_group['start'].values e1 = feature_group['end'].values s2, e2 = s1, e1 else: s1 = feature_group['start1'].values e1 = feature_group['end1'].values s2 = feature_group['start2'].values e2 = feature_group['end2'].values data = data_select(region1, region2) stack = list(map(partial(data_snip, data, region1, region2), zip(s1, e1, s2, e2))) return np.dstack(stack), feature_group['_rank'].values def pileup(features, data_select, data_snip, map=map): """ Handles on-diagonal and off-diagonal cases. Parameters ---------- features : DataFrame Table of features. Requires columns ['chrom', 'start', 'end']. Or ['chrom1', 'start1', 'end1', 'chrom1', 'start2', 'end2']. start, end are bp coordinates. lo, hi are bin coordinates. data_select : callable Callable that takes a region as argument and returns the data, mask and bin offset of a support region data_snip : callable Callable that takes data, mask and a 2D bin span (lo1, hi1, lo2, hi2) and returns a snippet from the selected support region """ if features.region.isnull().any(): raise ValueError( 'Drop features with no region assignment before calling pileup!') features = features.copy() features['_rank'] = range(len(features)) # cumul_stack = [] # orig_rank = [] cumul_stack, orig_rank = zip(*map( partial(_pileup, data_select, data_snip), features.groupby('region', sort=False) )) # Restore the original rank of the input features cumul_stack = np.dstack(cumul_stack) orig_rank = np.concatenate(orig_rank) idx = np.argsort(orig_rank) cumul_stack = cumul_stack[:, :, idx] return cumul_stack def pair_sites(sites, separation, slop): """ Create "hand" intervals to the right and to the left of each site. Then join right hands with left hands to pair sites together. """ from bioframe.tools import tsv, bedtools mids = (sites['start'] + sites['end']) // 2 left_hand = sites[['chrom']].copy() left_hand['start'] = mids - separation - slop left_hand['end'] = mids - separation + slop left_hand['site_id'] = left_hand.index left_hand['direction'] = 'L' left_hand['snip_mid'] = mids left_hand['snip_strand'] = sites['strand'] right_hand = sites[['chrom']].copy() right_hand['start'] = mids + separation - slop right_hand['end'] = mids + separation + slop right_hand['site_id'] = right_hand.index right_hand['direction'] = 'R' right_hand['snip_mid'] = mids right_hand['snip_strand'] = sites['strand'] # ignore out-of-bounds hands mask = (left_hand['start'] > 0) & (right_hand['start'] > 0) left_hand = left_hand[mask].copy() right_hand = right_hand[mask].copy() # intersect right hands (left anchor site) # with left hands (right anchor site) with tsv(right_hand) as R, tsv(left_hand) as L: out = bedtools.intersect(a=R.name, b=L.name, wa=True, wb=True) out.columns = ([c+'_r' for c in right_hand.columns] + [c+'_l' for c in left_hand.columns]) return out class CoolerSnipper: def __init__(self, clr, cooler_opts=None): self.clr = clr self.binsize = self.clr.binsize self.offsets = {} self.pad = True self.cooler_opts = {} if cooler_opts is None else cooler_opts self.cooler_opts.setdefault('sparse', True) def select(self, region1, region2): self.offsets[region1] = self.clr.offset( region1) - self.clr.offset(region1[0]) self.offsets[region2] = self.clr.offset( region2) - self.clr.offset(region2[0]) self._isnan1 = np.isnan(self.clr.bins()['weight'].fetch(region1).values) self._isnan2 = np.isnan(self.clr.bins()['weight'].fetch(region2).values) matrix = (self.clr.matrix(**self.cooler_opts) .fetch(region1, region2)) if self.cooler_opts['sparse']: matrix = matrix.tocsr() return matrix def snip(self, matrix, region1, region2, tup): s1, e1, s2, e2 = tup offset1 = self.offsets[region1] offset2 = self.offsets[region2] binsize = self.binsize lo1, hi1 = (s1 // binsize) - offset1, (e1 // binsize) - offset1 lo2, hi2 = (s2 // binsize) - offset2, (e2 // binsize) - offset2 assert hi1 >= 0 assert hi2 >= 0 m, n = matrix.shape dm, dn = hi1 - lo1, hi2 - lo2 out_of_bounds = False pad_left = pad_right = pad_bottom = pad_top = None if lo1 < 0: pad_bottom = -lo1 out_of_bounds = True if lo2 < 0: pad_left = -lo2 out_of_bounds = True if hi1 > m: pad_top = dm - (hi1 - m) out_of_bounds = True if hi2 > n: pad_right = dn - (hi2 - n) out_of_bounds = True if out_of_bounds: i0 = max(lo1, 0) i1 = min(hi1, m) j0 = max(lo2, 0) j1 = min(hi2, n) snippet = np.full((dm, dn), np.nan) # snippet[pad_bottom:pad_top, # pad_left:pad_right] = matrix[i0:i1, j0:j1].toarray() else: snippet = matrix[lo1:hi1, lo2:hi2].toarray() snippet[self._isnan1[lo1:hi1], :] = np.nan snippet[:, self._isnan2[lo2:hi2]] = np.nan return snippet class ObsExpSnipper: def __init__(self, clr, expected, cooler_opts=None): self.clr = clr self.expected = expected # Detecting the columns for the detection of regions columns = expected.columns assert len(columns)>0 if 'chrom' in columns and 'start' in columns and 'end' in columns: self.regions_columns = ['chrom', 'start', 'end'] # Chromosome arms encoded by multiple columns elif 'chrom' in columns: self.regions_columns = ['chrom'] # Chromosomes or regions encoded in string mode: "chr3:XXXXXXX-YYYYYYYY" elif 'region' in columns: self.regions_columns = ['region'] # Regions encoded in string mode: "chr3:XXXXXXX-YYYYYYYY" elif len(columns)>0: self.regions_columns = columns[0] # The first columns is treated as chromosome/region annotation else: raise ValueError('Expected dataframe has no columns.') self.binsize = self.clr.binsize self.offsets = {} self.pad = True self.cooler_opts = {} if cooler_opts is None else cooler_opts self.cooler_opts.setdefault('sparse', True) def select(self, region1, region2): assert region1==region2, "ObsExpSnipper is implemented for cis contacts only." self.offsets[region1] = self.clr.offset( region1) - self.clr.offset(region1[0]) self.offsets[region2] = self.clr.offset( region2) - self.clr.offset(region2[0]) matrix = (self.clr.matrix(**self.cooler_opts) .fetch(region1, region2)) if self.cooler_opts['sparse']: matrix = matrix.tocsr() self._isnan1 = np.isnan(self.clr.bins()['weight'].fetch(region1).values) self._isnan2 = np.isnan(self.clr.bins()['weight'].fetch(region2).values) self._expected = LazyToeplitz(self.expected .groupby(self.regions_columns) .get_group(region1[0] if len(self.regions_columns)>0 else region1) ['balanced.avg'] .values) return matrix def snip(self, matrix, region1, region2, tup): s1, e1, s2, e2 = tup offset1 = self.offsets[region1] offset2 = self.offsets[region2] binsize = self.binsize lo1, hi1 = (s1 // binsize) - offset1, (e1 // binsize) - offset1 lo2, hi2 = (s2 // binsize) - offset2, (e2 // binsize) - offset2 assert hi1 >= 0 assert hi2 >= 0 m, n = matrix.shape dm, dn = hi1 - lo1, hi2 - lo2 out_of_bounds = False pad_left = pad_right = pad_bottom = pad_top = None if lo1 < 0: pad_bottom = -lo1 out_of_bounds = True if lo2 < 0: pad_left = -lo2 out_of_bounds = True if hi1 > m: pad_top = dm - (hi1 - m) out_of_bounds = True if hi2 > n: pad_right = dn - (hi2 - n) out_of_bounds = True if out_of_bounds: i0 = max(lo1, 0) i1 = min(hi1, m) j0 = max(lo2, 0) j1 = min(hi2, n) return np.full((dm, dn), np.nan) # snippet[pad_bottom:pad_top, # pad_left:pad_right] = matrix[i0:i1, j0:j1].toarray() else: snippet = matrix[lo1:hi1, lo2:hi2].toarray() snippet[self._isnan1[lo1:hi1], :] = np.nan snippet[:, self._isnan2[lo2:hi2]] = np.nan e = self._expected[lo1:hi1, lo2:hi2] return snippet / e class ExpectedSnipper: def __init__(self, clr, expected): self.clr = clr self.expected = expected # Detecting the columns for the detection of regions columns = expected.columns assert len(columns)>0 if 'chrom' in columns and 'start' in columns and 'end' in columns: self.regions_columns = ['chrom', 'start', 'end'] # Chromosome arms encoded by multiple columns elif 'chrom' in columns: self.regions_columns = ['chrom'] # Chromosomes or regions encoded in string mode: "chr3:XXXXXXX-YYYYYYYY" elif 'region' in columns: self.regions_columns = ['region'] # Regions encoded in string mode: "chr3:XXXXXXX-YYYYYYYY" elif len(columns)>0: self.regions_columns = columns[0] # The first columns is treated as chromosome/region annotation else: raise ValueError('Expected dataframe has no columns.') self.binsize = self.clr.binsize self.offsets = {} def select(self, region1, region2): assert region1==region2, "ExpectedSnipper is implemented for cis contacts only." self.offsets[region1] = \ self.clr.offset(region1) - self.clr.offset(region1[0]) self.offsets[region2] = \ self.clr.offset(region2) - self.clr.offset(region2[0]) self.m = np.diff(self.clr.extent(region1)) self.n = np.diff(self.clr.extent(region2)) self._expected = LazyToeplitz(self.expected .groupby(self.regions_columns) .get_group(region1[0] if len(self.regions_columns)>0 else region1) ['balanced.avg'] .values) return self._expected def snip(self, exp, region1, region2, tup): s1, e1, s2, e2 = tup offset1 = self.offsets[region1] offset2 = self.offsets[region2] binsize = self.binsize lo1, hi1 = (s1 // binsize) - offset1, (e1 // binsize) - offset1 lo2, hi2 = (s2 // binsize) - offset2, (e2 // binsize) - offset2 assert hi1 >= 0 assert hi2 >= 0 dm, dn = hi1 - lo1, hi2 - lo2 if (lo1 < 0 or lo2 < 0 or hi1 > self.m or hi2 > self.n): return np.full((dm, dn), np.nan) snippet = exp[lo1:hi1, lo2:hi2] return snippet
[ "numpy.dstack", "bioframe.tools.tsv", "numpy.asarray", "bioframe.tools.bedtools.intersect", "numpy.any", "numpy.argsort", "functools.partial", "bioframe.parse_region_string", "numpy.concatenate", "pandas.DataFrame", "numpy.full" ]
[((1524, 1542), 'numpy.asarray', 'np.asarray', (['chroms'], {}), '(chroms)\n', (1534, 1542), True, 'import numpy as np\n'), ((1560, 1582), 'numpy.asarray', 'np.asarray', (['centers_bp'], {}), '(centers_bp)\n', (1570, 1582), True, 'import numpy as np\n'), ((1747, 1773), 'numpy.any', 'np.any', (['(left_bp > right_bp)'], {}), '(left_bp > right_bp)\n', (1753, 1773), True, 'import numpy as np\n'), ((2119, 2144), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'index'}), '(index=index)\n', (2131, 2144), True, 'import pandas as pd\n'), ((6386, 6408), 'numpy.dstack', 'np.dstack', (['cumul_stack'], {}), '(cumul_stack)\n', (6395, 6408), True, 'import numpy as np\n'), ((6425, 6450), 'numpy.concatenate', 'np.concatenate', (['orig_rank'], {}), '(orig_rank)\n', (6439, 6450), True, 'import numpy as np\n'), ((6462, 6483), 'numpy.argsort', 'np.argsort', (['orig_rank'], {}), '(orig_rank)\n', (6472, 6483), True, 'import numpy as np\n'), ((4577, 4614), 'bioframe.parse_region_string', 'bioframe.parse_region_string', (['support'], {}), '(support)\n', (4605, 4614), False, 'import bioframe\n'), ((5149, 5165), 'numpy.dstack', 'np.dstack', (['stack'], {}), '(stack)\n', (5158, 5165), True, 'import numpy as np\n'), ((7713, 7728), 'bioframe.tools.tsv', 'tsv', (['right_hand'], {}), '(right_hand)\n', (7716, 7728), False, 'from bioframe.tools import tsv, bedtools\n'), ((7735, 7749), 'bioframe.tools.tsv', 'tsv', (['left_hand'], {}), '(left_hand)\n', (7738, 7749), False, 'from bioframe.tools import tsv, bedtools\n'), ((7770, 7826), 'bioframe.tools.bedtools.intersect', 'bedtools.intersect', ([], {'a': 'R.name', 'b': 'L.name', 'wa': '(True)', 'wb': '(True)'}), '(a=R.name, b=L.name, wa=True, wb=True)\n', (7788, 7826), False, 'from bioframe.tools import tsv, bedtools\n'), ((5050, 5092), 'functools.partial', 'partial', (['data_snip', 'data', 'region1', 'region2'], {}), '(data_snip, data, region1, region2)\n', (5057, 5092), False, 'from functools import partial\n'), ((9926, 9951), 'numpy.full', 'np.full', (['(dm, dn)', 'np.nan'], {}), '((dm, dn), np.nan)\n', (9933, 9951), True, 'import numpy as np\n'), ((13440, 13465), 'numpy.full', 'np.full', (['(dm, dn)', 'np.nan'], {}), '((dm, dn), np.nan)\n', (13447, 13465), True, 'import numpy as np\n'), ((16069, 16094), 'numpy.full', 'np.full', (['(dm, dn)', 'np.nan'], {}), '((dm, dn), np.nan)\n', (16076, 16094), True, 'import numpy as np\n'), ((6217, 6257), 'functools.partial', 'partial', (['_pileup', 'data_select', 'data_snip'], {}), '(_pileup, data_select, data_snip)\n', (6224, 6257), False, 'from functools import partial\n')]
#!/usr/bin/env python """ Small demonstration of the hlines and vlines plots. """ from matplotlib import pyplot as plt from numpy import sin, exp, absolute, pi, arange from numpy.random import normal def f(t): s1 = sin(2 * pi * t) e1 = exp(-t) return absolute((s1 * e1)) + .05 t = arange(0.0, 5.0, 0.1) s = f(t) nse = normal(0.0, 0.3, t.shape) * s fig = plt.figure(figsize=(12, 6)) vax = fig.add_subplot(121) hax = fig.add_subplot(122) vax.plot(t, s + nse, 'b^') vax.vlines(t, [0], s) vax.set_xlabel('time (s)') vax.set_title('Vertical lines demo') hax.plot(s + nse, t, 'b^') hax.hlines(t, [0], s, lw=2) hax.set_xlabel('time (s)') hax.set_title('Horizontal lines demo') plt.show()
[ "numpy.random.normal", "numpy.absolute", "numpy.exp", "matplotlib.pyplot.figure", "numpy.sin", "numpy.arange", "matplotlib.pyplot.show" ]
[((300, 321), 'numpy.arange', 'arange', (['(0.0)', '(5.0)', '(0.1)'], {}), '(0.0, 5.0, 0.1)\n', (306, 321), False, 'from numpy import sin, exp, absolute, pi, arange\n'), ((374, 401), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (384, 401), True, 'from matplotlib import pyplot as plt\n'), ((693, 703), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (701, 703), True, 'from matplotlib import pyplot as plt\n'), ((224, 239), 'numpy.sin', 'sin', (['(2 * pi * t)'], {}), '(2 * pi * t)\n', (227, 239), False, 'from numpy import sin, exp, absolute, pi, arange\n'), ((249, 256), 'numpy.exp', 'exp', (['(-t)'], {}), '(-t)\n', (252, 256), False, 'from numpy import sin, exp, absolute, pi, arange\n'), ((337, 362), 'numpy.random.normal', 'normal', (['(0.0)', '(0.3)', 't.shape'], {}), '(0.0, 0.3, t.shape)\n', (343, 362), False, 'from numpy.random import normal\n'), ((268, 285), 'numpy.absolute', 'absolute', (['(s1 * e1)'], {}), '(s1 * e1)\n', (276, 285), False, 'from numpy import sin, exp, absolute, pi, arange\n')]
import os import torch from torchvision.datasets import CelebA, CIFAR10, LSUN, ImageFolder from torch.utils.data import Dataset, DataLoader, random_split, Subset from utils import CropTransform import torchvision.transforms as transforms import numpy as np from tqdm import tqdm import cv2 from PIL import Image # Change the below to the actual dataset root folders celeba_root = 'datasets/CelebA' ffhq_root = 'datasets/FFHQ' shoes_root = 'datasets/edges2shoes' class Shoes(Dataset): """ Dataset format is the same as used in pix2pix. We take only trainB and testB. """ def __init__(self, root_dir, split='train', transform=None): self.root_dir = root_dir self.transform = transform self.split = split self.im_list = [f for f in os.listdir(os.path.join(root_dir, split+'B')) if f.endswith('jpg')] print('Got {} shoes in split {}.'.format(len(self.im_list), split)) def __len__(self): return len(self.im_list) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() img_path = os.path.join(self.root_dir, self.split+'B', self.im_list[idx]) image = Image.open(img_path) if not image.mode == 'RGB': image = image.convert('RGB') if self.transform: image = self.transform(image) return image class FFHQ(Dataset): """ FFHQ folder should contain images1024x1024 and thumbnails128x128 """ def __init__(self, root_dir, split='train', transform=None, use_thumbnails=False): self.root_dir = root_dir self.transform = transform self.split = split self.use_thumbnails = use_thumbnails self.split_ranges = {'train': (0, 60000), 'test': (60000, 70000)} def __len__(self): return self.split_ranges[self.split][1] - self.split_ranges[self.split][0] def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() subfolder = 'thumbnails128x128' if self.use_thumbnails else 'images1024x1024' img_name = os.path.join(self.root_dir, subfolder, '%05i.png' % (idx+self.split_ranges[self.split][0])) image = Image.open(img_name) if self.transform: image = self.transform(image) return image def load_data(dataset, num_samples=None, w=128, shuffle=True, has_cls=False): if num_samples: if shuffle: dataset = random_split(dataset, [num_samples, len(dataset)-num_samples])[0] else: dataset = Subset(dataset, np.arange(num_samples)) loader = DataLoader(dataset, shuffle=shuffle, num_workers=8) if has_cls: return np.vstack([x.numpy() for x, _ in tqdm(loader)]).transpose([0, 2, 3, 1]).reshape(-1, w*w*3) return np.vstack([x.numpy() for x in tqdm(loader)]).transpose([0, 2, 3, 1]).reshape(-1, w*w*3) def get_ffhq_data(split='train', num_samples=None, w=128, shuffle=True): ffhq = FFHQ(ffhq_root, split=split, transform=transforms.Compose([transforms.Resize(w), transforms.ToTensor()]), use_thumbnails=(w <= 128)) return load_data(ffhq, num_samples, w, shuffle) def get_celeba_data(split='train', num_samples=None, w=128, attr_num=None, attr_value=None, shuffle=True): celeba = CelebA(root=celeba_root, split=split, download=False, target_type='attr', transform=transforms.Compose([CropTransform((25, 50, 25+128, 50+128)), transforms.Resize(w), transforms.ToTensor()])) return load_data(celeba, num_samples, w, shuffle, has_cls=True) def get_shoes_data(split='train', num_samples=None, w=128, shuffle=True): shoes = Shoes(shoes_root, split=split, transform=transforms.Compose([transforms.CenterCrop((256, 256)), transforms.Resize((w, w)), transforms.ToTensor()])) return load_data(shoes, num_samples, w, shuffle) def true_transform(X, ttype='identity', w=128): """ Apply a synthetic transformation to a set of images :param X: Images (ch last) flattened - each image as row vector in X :param ttype: The required transformation :param w: The image resolution (w=h) :return: Transformed images """ X = X.reshape(-1, w, w, 3) if ttype == 'rot90': X = np.rot90(X, k=1, axes=(1, 2)) elif ttype == 'inpaint': mask = cv2.imread('data/inpaint_mask_simple.png').astype(np.float32)/255.0 # mask = cv2.imread('data/inpaint_mask.png').astype(np.float32)/255.0 # mask[:, 64:, :] = 1.0 - mask[:, 64:, :] if not mask.shape[0] == w: mask = cv2.resize(mask, (w, w), interpolation=cv2.INTER_NEAREST) X = X.copy() * mask.reshape(1, w, w, 3) elif ttype == 'vflip': X = X[:, ::-1] elif ttype == 'colorize': X = np.repeat(np.mean(X, axis=3, keepdims=True), 3, axis=3) elif ttype == 'edges': ksize = 1 if w == 64 else 3 X = np.stack([cv2.Laplacian(X[i], cv2.CV_32F, ksize=ksize) for i in range(X.shape[0])]) elif ttype == 'Canny-edges': edges = np.stack([cv2.Canny((np.mean(X[i], axis=2)*255.0).astype(np.uint8), 80, 200) for i in range(X.shape[0])]) X = np.repeat(np.expand_dims(edges.astype(np.float32)*(1.0/255.0), 3), 3, axis=3) elif ttype == 'super-res': X = np.stack([cv2.resize(cv2.resize(X[i], (w//8, w//8), interpolation=cv2.INTER_LINEAR), (w, w), interpolation=cv2.INTER_LINEAR) for i in range(X.shape[0])]) elif ttype == 'identity': pass else: assert False, ttype return X.reshape(-1, w*w*3) def get_data(args): """ Load samples from a dataset and apply a synthetic transformation to half of the data ("A") :param args: Relevant options are: dataset: Name of the dataset to be loaded n_train: Number of training images n_test: Number of test images resolution: Images will be resized to [resolution x resolution] pairing: 'paired' = supervised - X_A[i] = T(X_B[i]) 'matching' = The same original images are used for X_A and X_B, but in different random order 'nonmatching' = X_A and X_B are disjoint sets (i.e. split the dataset to two parts) 'few-matches' = Only 1/8 of the images in X_A and X_B match a_transform: The synthetic transformation applied to X_A (see function true_transform) :return: X_A, X_B, X_A_test, X_B_test """ if args.dataset == 'celeba': train_x = get_celeba_data(num_samples=args.n_train, w=args.resolution) test_x = get_celeba_data('test', num_samples=args.n_test, w=args.resolution, shuffle=False) elif args.dataset == 'ffhq': train_x = get_ffhq_data(num_samples=args.n_train, w=args.resolution) test_x = get_ffhq_data('test', num_samples=args.n_test, w=args.resolution, shuffle=False) elif args.dataset == 'shoes': train_x = get_shoes_data(num_samples=args.n_train, w=args.resolution) test_x = get_shoes_data('test', num_samples=args.n_test, w=args.resolution, shuffle=False) n_train = train_x.shape[0] if args.pairing == 'nonmatching': X_A = train_x[:n_train//2] X_B = train_x[n_train//2:] elif args.pairing == 'few-matches': n_matches = n_train//8 if (n_train-n_matches) % 2 == 1: n_matches += 1 print('Inserting {}/{} matching pairs...'.format(n_matches, n_train)) n_per_part = (n_train-n_matches) // 2 X_A = train_x[:(n_per_part+n_matches)].copy() X_B = train_x[n_per_part:] else: X_A = train_x X_B = train_x.copy() if not args.pairing == 'paired': np.random.shuffle(X_B) X_A = true_transform(X_A, ttype=args.a_transform, w=args.resolution) X_B_test = test_x.copy() X_A_test = true_transform(test_x, ttype=args.a_transform, w=args.resolution) return X_A, X_B, X_A_test, X_B_test
[ "torchvision.transforms.CenterCrop", "numpy.mean", "cv2.Laplacian", "PIL.Image.open", "cv2.imread", "utils.CropTransform", "tqdm.tqdm", "os.path.join", "torch.is_tensor", "numpy.rot90", "torch.utils.data.DataLoader", "torchvision.transforms.Resize", "cv2.resize", "torchvision.transforms.To...
[((2598, 2649), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'shuffle': 'shuffle', 'num_workers': '(8)'}), '(dataset, shuffle=shuffle, num_workers=8)\n', (2608, 2649), False, 'from torch.utils.data import Dataset, DataLoader, random_split, Subset\n'), ((1025, 1045), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (1040, 1045), False, 'import torch\n'), ((1097, 1161), 'os.path.join', 'os.path.join', (['self.root_dir', "(self.split + 'B')", 'self.im_list[idx]'], {}), "(self.root_dir, self.split + 'B', self.im_list[idx])\n", (1109, 1161), False, 'import os\n'), ((1176, 1196), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1186, 1196), False, 'from PIL import Image\n'), ((1924, 1944), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (1939, 1944), False, 'import torch\n'), ((2082, 2180), 'os.path.join', 'os.path.join', (['self.root_dir', 'subfolder', "('%05i.png' % (idx + self.split_ranges[self.split][0]))"], {}), "(self.root_dir, subfolder, '%05i.png' % (idx + self.\n split_ranges[self.split][0]))\n", (2094, 2180), False, 'import os\n'), ((2190, 2210), 'PIL.Image.open', 'Image.open', (['img_name'], {}), '(img_name)\n', (2200, 2210), False, 'from PIL import Image\n'), ((4481, 4510), 'numpy.rot90', 'np.rot90', (['X'], {'k': '(1)', 'axes': '(1, 2)'}), '(X, k=1, axes=(1, 2))\n', (4489, 4510), True, 'import numpy as np\n'), ((2561, 2583), 'numpy.arange', 'np.arange', (['num_samples'], {}), '(num_samples)\n', (2570, 2583), True, 'import numpy as np\n'), ((4806, 4863), 'cv2.resize', 'cv2.resize', (['mask', '(w, w)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(mask, (w, w), interpolation=cv2.INTER_NEAREST)\n', (4816, 4863), False, 'import cv2\n'), ((7898, 7920), 'numpy.random.shuffle', 'np.random.shuffle', (['X_B'], {}), '(X_B)\n', (7915, 7920), True, 'import numpy as np\n'), ((791, 826), 'os.path.join', 'os.path.join', (['root_dir', "(split + 'B')"], {}), "(root_dir, split + 'B')\n", (803, 826), False, 'import os\n'), ((3018, 3038), 'torchvision.transforms.Resize', 'transforms.Resize', (['w'], {}), '(w)\n', (3035, 3038), True, 'import torchvision.transforms as transforms\n'), ((3040, 3061), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3059, 3061), True, 'import torchvision.transforms as transforms\n'), ((3407, 3450), 'utils.CropTransform', 'CropTransform', (['(25, 50, 25 + 128, 50 + 128)'], {}), '((25, 50, 25 + 128, 50 + 128))\n', (3420, 3450), False, 'from utils import CropTransform\n'), ((3498, 3518), 'torchvision.transforms.Resize', 'transforms.Resize', (['w'], {}), '(w)\n', (3515, 3518), True, 'import torchvision.transforms as transforms\n'), ((3570, 3591), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3589, 3591), True, 'import torchvision.transforms as transforms\n'), ((3812, 3845), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(256, 256)'], {}), '((256, 256))\n', (3833, 3845), True, 'import torchvision.transforms as transforms\n'), ((3920, 3945), 'torchvision.transforms.Resize', 'transforms.Resize', (['(w, w)'], {}), '((w, w))\n', (3937, 3945), True, 'import torchvision.transforms as transforms\n'), ((4020, 4041), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4039, 4041), True, 'import torchvision.transforms as transforms\n'), ((4556, 4598), 'cv2.imread', 'cv2.imread', (['"""data/inpaint_mask_simple.png"""'], {}), "('data/inpaint_mask_simple.png')\n", (4566, 4598), False, 'import cv2\n'), ((5016, 5049), 'numpy.mean', 'np.mean', (['X'], {'axis': '(3)', 'keepdims': '(True)'}), '(X, axis=3, keepdims=True)\n', (5023, 5049), True, 'import numpy as np\n'), ((2815, 2827), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (2819, 2827), False, 'from tqdm import tqdm\n'), ((5148, 5192), 'cv2.Laplacian', 'cv2.Laplacian', (['X[i]', 'cv2.CV_32F'], {'ksize': 'ksize'}), '(X[i], cv2.CV_32F, ksize=ksize)\n', (5161, 5192), False, 'import cv2\n'), ((2715, 2727), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (2719, 2727), False, 'from tqdm import tqdm\n'), ((5533, 5599), 'cv2.resize', 'cv2.resize', (['X[i]', '(w // 8, w // 8)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(X[i], (w // 8, w // 8), interpolation=cv2.INTER_LINEAR)\n', (5543, 5599), False, 'import cv2\n'), ((5293, 5314), 'numpy.mean', 'np.mean', (['X[i]'], {'axis': '(2)'}), '(X[i], axis=2)\n', (5300, 5314), True, 'import numpy as np\n')]
"""Dummy Policy for algo tests..""" import numpy as np from garage.np.policies import Policy class DummyPolicy(Policy): """Dummy Policy. Args: env_spec (garage.envs.env_spec.EnvSpec): Environment specification. """ def __init__(self, env_spec): # pylint: disable=super-init-not-called self._env_spec = env_spec self._param = [] self._param_values = np.random.uniform(-1, 1, 1000) def get_action(self, observation): """Get single action from this policy for the input observation. Args: observation (numpy.ndarray): Observation from environment. Returns: numpy.ndarray: Predicted action. dict: Distribution parameters. """ return self.action_space.sample(), dict(dummy='dummy', mean=0.) def get_actions(self, observations): """Get multiple actions from this policy for the input observations. Args: observations (numpy.ndarray): Observations from environment. Returns: numpy.ndarray: Predicted actions. dict: Distribution parameters. """ n = len(observations) action, action_info = self.get_action(None) return [action] * n, action_info def get_params_internal(self): """Return a list of policy internal params. Returns: list: Policy parameters. """ return self._param def get_param_values(self): """Return values of params. Returns: np.ndarray: Policy parameters values. """ return self._param_values @property def vectorized(self): """Vectorized or not. Returns: bool: True if vectorized. """ return True @property def env_spec(self): """Policy environment specification. Returns: garage.EnvSpec: Environment specification. """ return self._env_spec class DummyPolicyWithoutVectorized(DummyPolicy): """Dummy Policy without vectorized. Args: env_spec (garage.envs.env_spec.EnvSpec): Environment specification. """ def __init__(self, env_spec): super().__init__(env_spec=env_spec) @property def vectorized(self): """Vectorized or not. Returns: bool: True if vectorized. """ return False
[ "numpy.random.uniform" ]
[((411, 441), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(1000)'], {}), '(-1, 1, 1000)\n', (428, 441), True, 'import numpy as np\n')]
# This file contains code from https://github.com/tensorflow/models/blob/master/research/deeplab/deeplab_demo.ipynb # and was released under an Apache 2 license import os import tarfile import numpy as np import tensorflow as tf import warnings from config import _FULL_MODEL_PATH from config import _MOBILE_MODEL_PATH from PIL import Image import io import logging logger = logging.getLogger() # Import model parameters as environmental variables if they were passed to docker run model_type = os.environ.get('MODEL_TYPE', default='mobile') image_size = int(os.environ.get('IMAGE_SIZE', default=513)) if (image_size < 16) or (image_size > 1024): image_size = 513 warnings.warn('image size not in range 16 to 1024, reverted to default image size of 513') if (model_type != 'full') and (model_type != 'mobile'): model_type = 'mobile' warnings.warn('model type not mobile or full, reverted to default model type mobile') class DeepLabModel(object): """Class to load deeplab model and run inference.""" INPUT_TENSOR_NAME = 'ImageTensor:0' OUTPUT_TENSOR_NAME = 'SemanticPredictions:0' FROZEN_GRAPH_NAME = 'frozen_inference_graph' def __init__(self, tarball_path): """Creates and loads pre-trained deeplab model.""" self.graph = tf.Graph() graph_def = None # Extract frozen graph from tar archive. tar_file = tarfile.open(tarball_path) for tar_info in tar_file.getmembers(): if self.FROZEN_GRAPH_NAME in os.path.basename(tar_info.name): file_handle = tar_file.extractfile(tar_info) graph_def = tf.GraphDef.FromString(file_handle.read()) break tar_file.close() if graph_def is None: raise RuntimeError('Cannot find inference graph in tar archive.') with self.graph.as_default(): tf.import_graph_def(graph_def, name='') self.sess = tf.Session(graph=self.graph) def run(self, image): """Runs inference on a single image. Args: image: A PIL.Image object, raw input image. Returns: resized_image: RGB image resized from original input image. seg_map: Segmentation map of `resized_image`. """ width, height = image.size resize_ratio = 1.0 * image_size / max(width, height) target_size = (int(resize_ratio * width), int(resize_ratio * height)) resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS) batch_seg_map = self.sess.run( self.OUTPUT_TENSOR_NAME, feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]}) seg_map = batch_seg_map[0] return resized_image, seg_map def read_image(image_data): try: image = Image.open(io.BytesIO(image_data)) except Exception as excptn: print(str(excptn)) from flask import abort abort(400, "The provided input is not a valid image.") return image class ModelWrapper(object): """Model wrapper for TensorFlow models in SavedModel format""" def __init__(self): # Set model path based on environmental variable if model_type == 'full': self.model = DeepLabModel(_FULL_MODEL_PATH) if model_type == 'mobile': self.model = DeepLabModel(_MOBILE_MODEL_PATH) def predict(self, x): resized_im, seg_map = self.model.run(x) return resized_im, seg_map
[ "logging.getLogger", "tensorflow.Graph", "tarfile.open", "tensorflow.Session", "os.environ.get", "io.BytesIO", "numpy.asarray", "os.path.basename", "tensorflow.import_graph_def", "warnings.warn", "flask.abort" ]
[((376, 395), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (393, 395), False, 'import logging\n'), ((498, 544), 'os.environ.get', 'os.environ.get', (['"""MODEL_TYPE"""'], {'default': '"""mobile"""'}), "('MODEL_TYPE', default='mobile')\n", (512, 544), False, 'import os\n'), ((562, 603), 'os.environ.get', 'os.environ.get', (['"""IMAGE_SIZE"""'], {'default': '(513)'}), "('IMAGE_SIZE', default=513)\n", (576, 603), False, 'import os\n'), ((676, 776), 'warnings.warn', 'warnings.warn', (['"""image size not in range 16 to 1024, reverted to default image size of 513"""'], {}), "(\n 'image size not in range 16 to 1024, reverted to default image size of 513'\n )\n", (689, 776), False, 'import warnings\n'), ((854, 944), 'warnings.warn', 'warnings.warn', (['"""model type not mobile or full, reverted to default model type mobile"""'], {}), "(\n 'model type not mobile or full, reverted to default model type mobile')\n", (867, 944), False, 'import warnings\n'), ((1285, 1295), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1293, 1295), True, 'import tensorflow as tf\n'), ((1390, 1416), 'tarfile.open', 'tarfile.open', (['tarball_path'], {}), '(tarball_path)\n', (1402, 1416), False, 'import tarfile\n'), ((1939, 1967), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.graph'}), '(graph=self.graph)\n', (1949, 1967), True, 'import tensorflow as tf\n'), ((1878, 1917), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (1897, 1917), True, 'import tensorflow as tf\n'), ((2813, 2835), 'io.BytesIO', 'io.BytesIO', (['image_data'], {}), '(image_data)\n', (2823, 2835), False, 'import io\n'), ((2936, 2990), 'flask.abort', 'abort', (['(400)', '"""The provided input is not a valid image."""'], {}), "(400, 'The provided input is not a valid image.')\n", (2941, 2990), False, 'from flask import abort\n'), ((1505, 1536), 'os.path.basename', 'os.path.basename', (['tar_info.name'], {}), '(tar_info.name)\n', (1521, 1536), False, 'import os\n'), ((2645, 2670), 'numpy.asarray', 'np.asarray', (['resized_image'], {}), '(resized_image)\n', (2655, 2670), True, 'import numpy as np\n')]
""" MAP Client Plugin Step """ import json import os import numpy as np from mapclientplugins.cimconverterstep.SurfaceExtractor import Subdivision_Surface from PySide2 import QtGui from mapclient.mountpoints.workflowstep import WorkflowStepMountPoint from mapclientplugins.cimconverterstep.configuredialog import ConfigureDialog class CIMConverterStep(WorkflowStepMountPoint): """ Skeleton step which is intended to be a helpful starting point for new steps. """ def __init__(self, location): super(CIMConverterStep, self).__init__('CIM Converter', location) self._configured = False # A step cannot be executed until it has been configured. self._category = 'Source' # Add any other initialisation code here: self._icon = QtGui.QImage(':/cimconverterstep/images/data-source.png') # Ports: self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port', 'http://physiomeproject.org/workflow/1.0/rdf-schema#provides', 'http://physiomeproject.org/workflow/1.0/rdf-schema#file_location')) # Port data: self._portData0 = None # http://physiomeproject.org/workflow/1.0/rdf-schema#file_location # Config: self._config = {} self._config['identifier'] = '' def execute(self): """ Add your code here that will kick off the execution of the step. Make sure you call the _doneExecution() method when finished. This method may be connected up to a button in a widget for example. """ # Put your execute step code here before calling the '_doneExecution' method. input_path = os.path.abspath(os.path.join(self._location, self._config['file'])) output_path=os.path.join(input_path+'\\csv') if not os.path.isdir(output_path): os.makedirs(output_path) model = Subdivision_Surface('InitFromCIM',input_path) time_frame = np.shape(model.etPos)[2] for i in range(time_frame): LVendo = model.etPos[range(model.etVertexStartEnd[0,0],model.etVertexStartEnd[0,1]+1),:,i] RV_S = model.etPos[range(model.etVertexStartEnd[1,0],model.etVertexStartEnd[1,1]+1),:,i] RV_FW = model.etPos[range(model.etVertexStartEnd[2,0],model.etVertexStartEnd[2,1]+1),:,i] Epi = model.etPos[range(model.etVertexStartEnd[3,0],model.etVertexStartEnd[3,1]+1),:,i] MV = model.etPos[range(model.etVertexStartEnd[4,0],model.etVertexStartEnd[4,1]+1),:,i] AV = model.etPos[range(model.etVertexStartEnd[5,0],model.etVertexStartEnd[5,1]+1),:,i] TV = model.etPos[range(model.etVertexStartEnd[6,0],model.etVertexStartEnd[6,1]+1),:,i] PV = model.etPos[range(model.etVertexStartEnd[7,0],model.etVertexStartEnd[7,1]+1),:,i] np.savetxt(os.path.abspath(output_path)+'\\'+'LVendo_'+str(i+1)+'.csv',LVendo,delimiter=',') np.savetxt(os.path.abspath(output_path)+'\\'+'RV_septum_'+str(i+1)+'.csv',RV_S,delimiter=',') np.savetxt(os.path.abspath(output_path)+'\\'+'RV_freewall_'+str(i+1)+'.csv',RV_FW,delimiter=',') np.savetxt(os.path.abspath(output_path)+'\\'+'Epi_'+str(i+1)+'.csv',Epi,delimiter=',') np.savetxt(os.path.abspath(output_path)+'\\'+'MV_'+str(i+1)+'.csv',MV[:-1,:],delimiter=',') np.savetxt(os.path.abspath(output_path)+'\\'+'AV_'+str(i+1)+'.csv',AV[:-1,:],delimiter=',') np.savetxt(os.path.abspath(output_path)+'\\'+'TV_'+str(i+1)+'.csv',TV[:-1,:],delimiter=',') np.savetxt(os.path.abspath(output_path)+'\\'+'PV_'+str(i+1)+'.csv',PV[:-1,:],delimiter=',') self._portData0=output_path self._doneExecution() def getPortData(self, index): """ Add your code here that will return the appropriate objects for this step. The index is the index of the port in the port list. If there is only one provides port for this step then the index can be ignored. :param index: Index of the port to return. """ return self._portData0 # http://physiomeproject.org/workflow/1.0/rdf-schema#file_location def configure(self): """ This function will be called when the configure icon on the step is clicked. It is appropriate to display a configuration dialog at this time. If the conditions for the configuration of this step are complete then set: self._configured = True """ dlg = ConfigureDialog(self._main_window) dlg.setWorkflowLocation(self._location) dlg.identifierOccursCount = self._identifierOccursCount dlg.setConfig(self._config) dlg.validate() dlg.setModal(True) if dlg.exec_(): self._config = dlg.getConfig() self._configured = dlg.validate() self._configuredObserver() def getIdentifier(self): """ The identifier is a string that must be unique within a workflow. """ return self._config['identifier'] def setIdentifier(self, identifier): """ The framework will set the identifier for this step when it is loaded. """ self._config['identifier'] = identifier def serialize(self): """ Add code to serialize this step to string. This method should implement the opposite of 'deserialize'. """ return json.dumps(self._config, default=lambda o: o.__dict__, sort_keys=True, indent=4) def deserialize(self, string): """ Add code to deserialize this step from string. This method should implement the opposite of 'serialize'. :param string: JSON representation of the configuration in a string. """ self._config.update(json.loads(string)) d = ConfigureDialog() d.setWorkflowLocation(self._location) d.identifierOccursCount = self._identifierOccursCount d.setConfig(self._config) self._configured = d.validate()
[ "json.loads", "mapclientplugins.cimconverterstep.configuredialog.ConfigureDialog", "os.makedirs", "json.dumps", "os.path.join", "os.path.isdir", "mapclientplugins.cimconverterstep.SurfaceExtractor.Subdivision_Surface", "numpy.shape", "PySide2.QtGui.QImage", "os.path.abspath" ]
[((789, 846), 'PySide2.QtGui.QImage', 'QtGui.QImage', (['""":/cimconverterstep/images/data-source.png"""'], {}), "(':/cimconverterstep/images/data-source.png')\n", (801, 846), False, 'from PySide2 import QtGui\n'), ((1788, 1822), 'os.path.join', 'os.path.join', (["(input_path + '\\\\csv')"], {}), "(input_path + '\\\\csv')\n", (1800, 1822), False, 'import os\n'), ((1917, 1963), 'mapclientplugins.cimconverterstep.SurfaceExtractor.Subdivision_Surface', 'Subdivision_Surface', (['"""InitFromCIM"""', 'input_path'], {}), "('InitFromCIM', input_path)\n", (1936, 1963), False, 'from mapclientplugins.cimconverterstep.SurfaceExtractor import Subdivision_Surface\n'), ((4551, 4585), 'mapclientplugins.cimconverterstep.configuredialog.ConfigureDialog', 'ConfigureDialog', (['self._main_window'], {}), '(self._main_window)\n', (4566, 4585), False, 'from mapclientplugins.cimconverterstep.configuredialog import ConfigureDialog\n'), ((5478, 5563), 'json.dumps', 'json.dumps', (['self._config'], {'default': '(lambda o: o.__dict__)', 'sort_keys': '(True)', 'indent': '(4)'}), '(self._config, default=lambda o: o.__dict__, sort_keys=True, indent=4\n )\n', (5488, 5563), False, 'import json\n'), ((5880, 5897), 'mapclientplugins.cimconverterstep.configuredialog.ConfigureDialog', 'ConfigureDialog', ([], {}), '()\n', (5895, 5897), False, 'from mapclientplugins.cimconverterstep.configuredialog import ConfigureDialog\n'), ((1716, 1766), 'os.path.join', 'os.path.join', (['self._location', "self._config['file']"], {}), "(self._location, self._config['file'])\n", (1728, 1766), False, 'import os\n'), ((1836, 1862), 'os.path.isdir', 'os.path.isdir', (['output_path'], {}), '(output_path)\n', (1849, 1862), False, 'import os\n'), ((1876, 1900), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (1887, 1900), False, 'import os\n'), ((1984, 2005), 'numpy.shape', 'np.shape', (['model.etPos'], {}), '(model.etPos)\n', (1992, 2005), True, 'import numpy as np\n'), ((5847, 5865), 'json.loads', 'json.loads', (['string'], {}), '(string)\n', (5857, 5865), False, 'import json\n'), ((2870, 2898), 'os.path.abspath', 'os.path.abspath', (['output_path'], {}), '(output_path)\n', (2885, 2898), False, 'import os\n'), ((2975, 3003), 'os.path.abspath', 'os.path.abspath', (['output_path'], {}), '(output_path)\n', (2990, 3003), False, 'import os\n'), ((3081, 3109), 'os.path.abspath', 'os.path.abspath', (['output_path'], {}), '(output_path)\n', (3096, 3109), False, 'import os\n'), ((3190, 3218), 'os.path.abspath', 'os.path.abspath', (['output_path'], {}), '(output_path)\n', (3205, 3218), False, 'import os\n'), ((3289, 3317), 'os.path.abspath', 'os.path.abspath', (['output_path'], {}), '(output_path)\n', (3304, 3317), False, 'import os\n'), ((3393, 3421), 'os.path.abspath', 'os.path.abspath', (['output_path'], {}), '(output_path)\n', (3408, 3421), False, 'import os\n'), ((3497, 3525), 'os.path.abspath', 'os.path.abspath', (['output_path'], {}), '(output_path)\n', (3512, 3525), False, 'import os\n'), ((3601, 3629), 'os.path.abspath', 'os.path.abspath', (['output_path'], {}), '(output_path)\n', (3616, 3629), False, 'import os\n')]
"""Client to access DICOM Part10 files through a layer of abstraction.""" import collections import io import logging import math import os import re import sqlite3 import sys import time import traceback from collections import OrderedDict from enum import Enum from pathlib import Path from typing import ( Any, Dict, Iterator, Iterable, List, Mapping, Optional, Sequence, Tuple, Union, ) import numpy as np from PIL import Image from PIL.ImageCms import ImageCmsProfile, createProfile from pydicom.dataset import Dataset, FileMetaDataset from pydicom.encaps import encapsulate, get_frame_offsets from pydicom.errors import InvalidDicomError from pydicom.filebase import DicomFileLike from pydicom.datadict import dictionary_VR, keyword_for_tag, tag_for_keyword from pydicom.filereader import ( data_element_offset_to_value, dcmread, read_file_meta_info, read_partial, ) from pydicom.filewriter import dcmwrite from pydicom.pixel_data_handlers.numpy_handler import unpack_bits from pydicom.tag import ( BaseTag, ItemTag, SequenceDelimiterTag, Tag, TupleTag, ) from pydicom.uid import UID from pydicom.valuerep import DA, DT, TM logger = logging.getLogger(__name__) _FLOAT_PIXEL_DATA_TAGS = {0x7FE00008, 0x7FE00009, } _UINT_PIXEL_DATA_TAGS = {0x7FE00010, } _PIXEL_DATA_TAGS = _FLOAT_PIXEL_DATA_TAGS.union(_UINT_PIXEL_DATA_TAGS) _JPEG_SOI_MARKER = b'\xFF\xD8' # also JPEG-LS _JPEG_EOI_MARKER = b'\xFF\xD9' # also JPEG-LS _JPEG2000_SOC_MARKER = b'\xFF\x4F' _JPEG2000_EOC_MARKER = b'\xFF\xD9' _START_MARKERS = {_JPEG_SOI_MARKER, _JPEG2000_SOC_MARKER} _END_MARKERS = {_JPEG_EOI_MARKER, _JPEG2000_EOC_MARKER} def _get_bot(fp: DicomFileLike, number_of_frames: int) -> List[int]: """Read or build the Basic Offset Table (BOT). Parameters ---------- fp: pydicom.filebase.DicomFileLike Pointer for DICOM PS3.10 file stream positioned at the first byte of the Pixel Data element number_of_frames: int Number of frames contained in the Pixel Data element Returns ------- List[int] Offset of each Frame item in bytes from the first byte of the Pixel Data element following the BOT item Note ---- Moves the pointer to the first byte of the open file following the BOT item (the first byte of the first Frame item). """ logger.debug('read Basic Offset Table') basic_offset_table = _read_bot(fp) first_frame_offset = fp.tell() tag = TupleTag(fp.read_tag()) if int(tag) != ItemTag: raise ValueError('Reading of Basic Offset Table failed') fp.seek(first_frame_offset, 0) # Basic Offset Table item must be present, but it may be empty if len(basic_offset_table) == 0: logger.debug('Basic Offset Table item is empty') if len(basic_offset_table) != number_of_frames: logger.debug('build Basic Offset Table item') basic_offset_table = _build_bot( fp, number_of_frames=number_of_frames ) return basic_offset_table def _read_bot(fp: DicomFileLike) -> List[int]: """Read the Basic Offset Table (BOT) of an encapsulated Pixel Data element. Parameters ---------- fp: pydicom.filebase.DicomFileLike Pointer for DICOM PS3.10 file stream positioned at the first byte of the Pixel Data element Returns ------- List[int] Offset of each Frame item in bytes from the first byte of the Pixel Data element following the BOT item Note ---- Moves the pointer to the first byte of the open file following the BOT item (the first byte of the first Frame item). Raises ------ IOError When file pointer is not positioned at first byte of Pixel Data element """ tag = TupleTag(fp.read_tag()) if int(tag) not in _PIXEL_DATA_TAGS: raise IOError( 'Expected file pointer at first byte of Pixel Data element.' ) # Skip Pixel Data element header (tag, VR, length) pixel_data_element_value_offset = data_element_offset_to_value( fp.is_implicit_VR, 'OB' ) fp.seek(pixel_data_element_value_offset - 4, 1) is_empty, offsets = get_frame_offsets(fp) return offsets def _build_bot(fp: DicomFileLike, number_of_frames: int) -> List[int]: """Build a Basic Offset Table (BOT) for an encapsulated Pixel Data element. Parameters ---------- fp: pydicom.filebase.DicomFileLike Pointer for DICOM PS3.10 file stream positioned at the first byte of the Pixel Data element following the empty Basic Offset Table (BOT) number_of_frames: int Total number of frames in the dataset Returns ------- List[int] Offset of each Frame item in bytes from the first byte of the Pixel Data element following the BOT item Note ---- Moves the pointer back to the first byte of the Pixel Data element following the BOT item (the first byte of the first Frame item). Raises ------ IOError When file pointer is not positioned at first byte of first Frame item after Basic Offset Table item or when parsing of Frame item headers fails ValueError When the number of offsets doesn't match the specified number of frames """ initial_position = fp.tell() offset_values = [] current_offset = 0 i = 0 while True: frame_position = fp.tell() tag = TupleTag(fp.read_tag()) if int(tag) == SequenceDelimiterTag: break if int(tag) != ItemTag: fp.seek(initial_position, 0) raise IOError( 'Building Basic Offset Table (BOT) failed. Expected tag of ' f'Frame item #{i} at position {frame_position}.' ) length = fp.read_UL() if length % 2: fp.seek(initial_position, 0) raise IOError( 'Building Basic Offset Table (BOT) failed. ' f'Length of Frame item #{i} is not a multiple of 2.' ) elif length == 0: fp.seek(initial_position, 0) raise IOError( 'Building Basic Offset Table (BOT) failed. ' f'Length of Frame item #{i} is zero.' ) first_two_bytes = fp.read(2) if not fp.is_little_endian: first_two_bytes = first_two_bytes[::-1] # In case of fragmentation, we only want to get the offsets to the # first fragment of a given frame. We can identify those based on the # JPEG and JPEG 2000 markers that should be found at the beginning and # end of the compressed byte stream. if first_two_bytes in _START_MARKERS: current_offset = frame_position - initial_position offset_values.append(current_offset) i += 1 fp.seek(length - 2, 1) # minus the first two bytes if len(offset_values) != number_of_frames: raise ValueError( 'Number of frame items does not match specified Number of Frames.' ) else: basic_offset_table = offset_values fp.seek(initial_position, 0) return basic_offset_table class _ImageFileReader: """Class for reading DICOM files that represent Image Information Entities. The class provides methods for efficient access to individual Frame items contained in the Pixel Data element of a Data Set stored in a Part10 file on disk without loading the entire element into memory. """ def __init__(self, fp: Union[str, Path, DicomFileLike]): """ Parameters ---------- fp: Union[str, pathlib.Path, pydicom.filebase.DicomfileLike] DICOM Part10 file containing a dataset of an image SOP Instance """ self._filepointer: Union[DicomFileLike, None] self._filepath: Union[Path, None] if isinstance(fp, DicomFileLike): is_little_endian, is_implicit_VR = self._check_file_format(fp) try: if fp.is_little_endian != is_little_endian: raise ValueError( 'Transfer syntax of file object has incorrect value ' 'for attribute "is_little_endian".' ) except AttributeError: raise AttributeError( 'Transfer syntax of file object does not have ' 'attribute "is_little_endian".' ) try: if fp.is_implicit_VR != is_implicit_VR: raise ValueError( 'Transfer syntax of file object has incorrect value ' 'for attribute "is_implicit_VR".' ) except AttributeError: raise AttributeError( 'Transfer syntax of file object does not have ' 'attribute "is_implicit_VR".' ) self._filepointer = fp self._filepath = None elif isinstance(fp, (str, Path)): self._filepath = Path(fp) self._filepointer = None else: raise TypeError( 'Argument "filename" must either an open DICOM file object or ' 'the path to a DICOM file stored on disk.' ) # Those attributes will be set by the "open()" self._metadata: Dataset = Dataset() self._is_open = False self._as_float = False self._bytes_per_frame_uncompressed: int = -1 self._basic_offset_table: List[int] = [] self._first_frame_offset: int = -1 self._pixel_data_offset: int = -1 self._pixels_per_frame: int = -1 def _check_file_format(self, fp: DicomFileLike) -> Tuple[bool, bool]: """Check whether file object represents a DICOM Part 10 file. Parameters ---------- fp: pydicom.filebase.DicomFileLike DICOM file object Returns ------- is_little_endian: bool Whether the data set is encoded in little endian transfer syntax is_implicit_VR: bool Whether value representations of data elements in the data set are implicit Raises ------ InvalidDicomError If the file object does not represent a DICOM Part 10 file """ def is_main_tag(tag: BaseTag, VR: Optional[str], length: int) -> bool: return tag >= 0x00040000 pos = fp.tell() ds = read_partial(fp, stop_when=is_main_tag) # type: ignore fp.seek(pos) transfer_syntax_uid = UID(ds.file_meta.TransferSyntaxUID) return ( transfer_syntax_uid.is_little_endian, transfer_syntax_uid.is_implicit_VR, ) def __enter__(self) -> '_ImageFileReader': self.open() return self def __exit__(self, except_type, except_value, except_trace) -> None: self._fp.close() if except_value: sys.stdout.write( 'Error while accessing file "{}":\n{}'.format( self._filepath, str(except_value) ) ) for tb in traceback.format_tb(except_trace): sys.stdout.write(tb) raise @property def _fp(self) -> DicomFileLike: if self._filepointer is None: raise IOError('File has not been opened for reading.') return self._filepointer def open(self) -> None: """Open file for reading. Raises ------ FileNotFoundError When file cannot be found OSError When file cannot be opened IOError When DICOM metadata cannot be read from file ValueError When DICOM dataset contained in file does not represent an image Note ---- Reads the metadata of the DICOM Data Set contained in the file and builds a Basic Offset Table to speed up subsequent frame-level access. """ # This methods sets several attributes on the object, which cannot # (or should not) be set in the constructor. Other methods assert that # this method has been called first by checking the value of the # "_is_open" attribute. if self._is_open: return if self._filepointer is None: # This should not happen is just for mypy to be happy if self._filepath is None: raise ValueError(f'File not found: "{self._filepath}".') logger.debug('read File Meta Information') try: file_meta = read_file_meta_info(self._filepath) except FileNotFoundError: raise ValueError('No file path was set.') except InvalidDicomError: raise InvalidDicomError( f'File is not a valid DICOM file: "{self._filepath}".' ) except Exception: raise IOError(f'Could not read file: "{self._filepath}".') transfer_syntax_uid = UID(file_meta.TransferSyntaxUID) if transfer_syntax_uid is None: raise IOError( 'File is not a valid DICOM file: "{self._filepath}".' 'It lacks File Meta Information.' ) self._transfer_syntax_uid: UID = transfer_syntax_uid is_little_endian = transfer_syntax_uid.is_little_endian is_implicit_VR = transfer_syntax_uid.is_implicit_VR self._filepointer = DicomFileLike(open(self._filepath, 'rb')) self._filepointer.is_little_endian = is_little_endian self._filepointer.is_implicit_VR = is_implicit_VR logger.debug('read metadata elements') try: tmp = dcmread(self._fp, stop_before_pixels=True) except Exception as error: raise IOError( f'DICOM metadata cannot be read from file: "{error}"' ) # Construct a new Dataset that is fully decoupled from the file, # i.e., that does not contain any File Meta Information del tmp.file_meta self._metadata = Dataset(tmp) self._pixels_per_frame = int(np.product([ self._metadata.Rows, self._metadata.Columns, self._metadata.SamplesPerPixel ])) self._pixel_data_offset = self._fp.tell() # Determine whether dataset contains a Pixel Data element try: tag = TupleTag(self._fp.read_tag()) except EOFError: raise ValueError( 'Dataset does not represent an image information entity.' ) if int(tag) not in _PIXEL_DATA_TAGS: raise ValueError( 'Dataset does not represent an image information entity.' ) self._as_float = False if int(tag) in _FLOAT_PIXEL_DATA_TAGS: self._as_float = True # Reset the file pointer to the beginning of the Pixel Data element self._fp.seek(self._pixel_data_offset, 0) logger.debug('build Basic Offset Table') try: number_of_frames = int(self._metadata.NumberOfFrames) except AttributeError: number_of_frames = 1 if self._transfer_syntax_uid.is_encapsulated: try: self._basic_offset_table = _get_bot( self._fp, number_of_frames=number_of_frames ) except Exception as error: raise IOError( f'Failed to build Basic Offset Table: "{error}"' ) self._first_frame_offset = self._fp.tell() else: if self._fp.is_implicit_VR: header_offset = 4 + 4 # tag and length else: header_offset = 4 + 2 + 2 + 4 # tag, VR, reserved, and length self._first_frame_offset = self._pixel_data_offset + header_offset n_pixels = self._pixels_per_frame bits_allocated = self._metadata.BitsAllocated if bits_allocated == 1: # Determine the nearest whole number of bytes needed to contain # 1-bit pixel data. e.g. 10 x 10 1-bit pixels is 100 bits, # which are packed into 12.5 -> 13 bytes self._bytes_per_frame_uncompressed = ( n_pixels // 8 + (n_pixels % 8 > 0) ) self._basic_offset_table = [ int(math.floor(i * n_pixels / 8)) for i in range(number_of_frames) ] else: self._bytes_per_frame_uncompressed = ( n_pixels * bits_allocated // 8 ) self._basic_offset_table = [ i * self._bytes_per_frame_uncompressed for i in range(number_of_frames) ] if len(self._basic_offset_table) != number_of_frames: raise ValueError( 'Length of Basic Offset Table does not match ' 'Number of Frames.' ) self._is_open = True def _assert_is_open(self) -> None: if not self._is_open: raise IOError('DICOM image file has not been opened for reading.') @property def transfer_syntax_uid(self) -> UID: """pydicom.uid.UID: Transfer Syntax UID""" self._assert_is_open() return self._transfer_syntax_uid @property def metadata(self) -> Dataset: """pydicom.dataset.Dataset: Metadata""" self._assert_is_open() return self._metadata def close(self) -> None: """Close file.""" if self._fp is not None: self._fp.close() self._is_open = False def read_frame(self, index: int) -> bytes: """Read the pixel data of an individual frame item. Parameters ---------- index: int Zero-based frame index Returns ------- bytes Pixel data of a given frame item encoded in the transfer syntax. Raises ------ IOError When frame could not be read """ self._assert_is_open() if index > self.number_of_frames: raise ValueError( f'Frame index {index} exceeds number of frames in image: ' f'{self.number_of_frames}.' ) logger.debug(f'read frame #{index}') frame_offset = self._basic_offset_table[index] self._fp.seek(self._first_frame_offset + frame_offset, 0) if self._transfer_syntax_uid.is_encapsulated: try: stop_at = self._basic_offset_table[index + 1] - frame_offset except IndexError: # For the last frame, there is no next offset available. stop_at = -1 n = 0 # A frame may consist of multiple items (fragments). fragments = [] while True: tag = TupleTag(self._fp.read_tag()) if n == stop_at or int(tag) == SequenceDelimiterTag: break if int(tag) != ItemTag: raise ValueError(f'Failed to read frame #{index}.') length = self._fp.read_UL() fragments.append(self._fp.read(length)) n += 4 + 4 + length frame_data = b''.join(fragments) else: frame_data = self._fp.read(self._bytes_per_frame_uncompressed) if len(frame_data) == 0: raise IOError(f'Failed to read frame #{index}.') return frame_data def decode_frame(self, index: int, value: bytes): """Decode the pixel data of an individual frame item. Parameters ---------- index: int Zero-based frame index value: bytes Value of a Frame item Returns ------- numpy.ndarray Array of decoded pixels of the frame with shape (Rows x Columns) in case of a monochrome image or (Rows x Columns x SamplesPerPixel) in case of a color image. """ self._assert_is_open() logger.debug(f'decode frame #{index}') metadata = self.metadata if metadata.BitsAllocated == 1: unpacked_frame = unpack_bits(value) rows, columns = metadata.Rows, self.metadata.Columns n_pixels = self._pixels_per_frame pixel_offset = int(((index * n_pixels / 8) % 1) * 8) pixel_array = unpacked_frame[pixel_offset:pixel_offset + n_pixels] return pixel_array.reshape(rows, columns) else: # This hack creates a small dataset containing a Pixel Data element # with only a single frame item, which can then be decoded using the # existing pydicom API. ds = Dataset() ds.file_meta = FileMetaDataset() ds.file_meta.TransferSyntaxUID = self._transfer_syntax_uid ds.Rows = metadata.Rows ds.Columns = metadata.Columns ds.SamplesPerPixel = metadata.SamplesPerPixel ds.PhotometricInterpretation = metadata.PhotometricInterpretation ds.PixelRepresentation = metadata.PixelRepresentation ds.PlanarConfiguration = metadata.get('PlanarConfiguration', None) ds.BitsAllocated = metadata.BitsAllocated ds.BitsStored = metadata.BitsStored ds.HighBit = metadata.HighBit if self._transfer_syntax_uid.is_encapsulated: ds.PixelData = encapsulate(frames=[value]) else: ds.PixelData = value return ds.pixel_array def read_and_decode_frame(self, index: int): """Read and decode the pixel data of an individual frame item. Parameters ---------- index: int Zero-based frame index Returns ------- numpy.ndarray Array of decoded pixels of the frame with shape (Rows x Columns) in case of a monochrome image or (Rows x Columns x SamplesPerPixel) in case of a color image. Raises ------ IOError When frame could not be read """ frame = self.read_frame(index) return self.decode_frame(index, frame) @property def number_of_frames(self) -> int: """int: Number of frames""" self._assert_is_open() try: return int(self.metadata.NumberOfFrames) except AttributeError: return 1 class _QueryResourceType(Enum): """DICOMweb Query resource types.""" STUDIES = 'studies' SERIES = 'series' INSTANCES = 'instances' def _build_acceptable_media_type_lut( media_types: Tuple[Union[str, Tuple[str, str]], ...], supported_media_type_lut: Mapping[str, Iterable[str]] ) -> Mapping[str, Iterable[str]]: # If no acceptable transfer syntax has been specified, then we just return # the instance in whatever transfer syntax is has been stored. This # behavior should be compliant with the standard (Part 18 Section 8.7.3.4): # If the Transfer Syntax is not specified in a message, then the Default # Transfer Syntax shall be used, unless the origin server has only access # to the pixel data in lossy compressed form or the pixel data in a # lossless compressed form that is of such length that it cannot be encoded # in the Explicit VR Little Endian Transfer Syntax. acceptable_media_type_lut = collections.defaultdict(set) for m in media_types: if isinstance(m, tuple): media_type = str(m[0]) if media_type not in supported_media_type_lut: raise ValueError( f'Media type "{media_type}" is not a valid for ' 'retrieval of instance frames.' ) if len(m) > 1: ts_uid = str(m[1]) if ts_uid not in supported_media_type_lut[media_type]: raise ValueError( f'Transfer syntax "{ts_uid}" is not a valid for ' 'retrieval of instance frames with media type ' f'"{media_type}".' ) acceptable_media_type_lut[media_type].add(ts_uid) else: acceptable_media_type_lut[media_type].update( supported_media_type_lut[media_type] ) elif isinstance(m, str): media_type = str(m) if media_type not in supported_media_type_lut: raise ValueError( f'Media type "{media_type}" is not a valid for ' 'retrieval of instance frames.' ) acceptable_media_type_lut[media_type].update( supported_media_type_lut[media_type] ) else: raise ValueError('Argument "media_types" is malformatted.') return acceptable_media_type_lut class DICOMfileClient: """Client for managing DICOM Part10 files in a DICOMweb-like manner. Facilitates serverless access to data stored locally on a file system as DICOM Part10 files. Note ---- The class exposes the same :class:`dicomweb_client.api.DICOMClient` interface as the :class:`dicomweb_client.api.DICOMwebClient` class. While method parameters and return values have the same types, but the types of exceptions may differ. Note ---- The class internally uses an in-memory database, which is persisted on disk to facilitate faster subsequent data access. However, the implementation details of the database and the structure of any database files stored on the file system may change at any time and should not be relied on. Note ---- This is **not** an implementation of the DICOM File Service and does not depend on the presence of ``DICOMDIR`` files. """ def __init__( self, base_dir: Union[Path, str], update_db: bool = False, recreate_db: bool = False, in_memory: bool = False ): """Instantiate client. Parameters ---------- base_dir: Union[pathlib.Path, str] Path to base directory containing DICOM files update_db: bool, optional Whether the database should be updated (default: ``False``). If ``True``, the client will search `base_dir` recursively for new DICOM Part10 files and create database entries for each file. The client will further delete any database entries for files that no longer exist on the file system. recreate_db: bool, optional Whether the database should be recreated (default: ``False``). If ``True``, the client will search `base_dir` recursively for DICOM Part10 files and create database entries for each file. in_memory: bool, optional Whether the database should only be stored in memory (default: ``False``). """ self.base_dir = Path(base_dir).resolve() if in_memory: filename = ':memory:' else: filename = '.dicom-file-client.db' self._db_filepath = self.base_dir.joinpath(filename) if not self._db_filepath.exists(): update_db = True self._db_connection_handle: Union[sqlite3.Connection, None] = None self._db_cursor_handle: Union[sqlite3.Cursor, None] = None if recreate_db: self._drop_db() update_db = True self._create_db() self._attributes = { _QueryResourceType.STUDIES: self._get_attributes( _QueryResourceType.STUDIES ), _QueryResourceType.SERIES: self._get_attributes( _QueryResourceType.SERIES ), _QueryResourceType.INSTANCES: self._get_attributes( _QueryResourceType.INSTANCES ), } if update_db: logger.info('updating database...') start = time.time() self._update_db() end = time.time() elapsed = round(end - start) logger.info(f'updated database in {elapsed} seconds') self._reader_cache: OrderedDict[Path, _ImageFileReader] = OrderedDict() self._max_reader_cache_size = 50 def __getstate__(self) -> dict: """Customize state for serialization via pickle module. Returns ------- dict Contents of the instance that should be serialized """ contents = self.__dict__ # The database connection and the cached image file readers should # (and cannot) be serialized. Therefore, we reset the state of the # instance before serialization. # This is critical for applications that rely on Python multiprocessing # such as PyTorch or TensorFlow. try: if self._db_cursor_handle is not None: self._db_cursor_handle.execute('PRAGMA optimize') self._db_cursor_handle.close() if self._db_connection_handle is not None: self._db_connection_handle.commit() self._db_connection_handle.close() for image_file_reader in self._reader_cache.values(): image_file_reader.close() finally: contents['_db_connection_handle'] = None contents['_db_cursor_handle'] = None contents['_reader_cache'] = OrderedDict() return contents @property def _connection(self) -> sqlite3.Connection: """sqlite3.Connection: database connection""" if self._db_connection_handle is None: self._db_connection_handle = sqlite3.connect(str(self._db_filepath)) self._db_connection_handle.row_factory = sqlite3.Row return self._db_connection_handle @property def _cursor(self) -> sqlite3.Cursor: if self._db_cursor_handle is None: self._db_cursor_handle = self._connection.cursor() return self._db_cursor_handle def _create_db(self): """Creating database tables and indices.""" with self._connection as connection: cursor = connection.cursor() cursor.execute('PRAGMA journal_mode = WAL') cursor.execute('PRAGMA synchronous = off') cursor.execute('PRAGMA temp_store = memory') cursor.execute('PRAGMA mmap_size = 30000000000') cursor.execute(''' CREATE TABLE IF NOT EXISTS studies ( StudyInstanceUID TEXT NOT NULL, StudyID TEXT, StudyDate TEXT, StudyTime TEXT, PatientName TEXT, PatientID TEXT, PatientSex TEXT, PatientBirthDate TEXT, ReferringPhysicianName TEXT, PRIMARY KEY (StudyInstanceUID) ) ''') cursor.execute(''' CREATE INDEX IF NOT EXISTS study_index_patient_id ON studies (PatientID) ''') cursor.execute(''' CREATE INDEX IF NOT EXISTS study_index_study_id ON studies (StudyID) ''') cursor.execute(''' CREATE TABLE IF NOT EXISTS series ( StudyInstanceUID TEXT NOT NULL, SeriesInstanceUID TEXT NOT NULL, Modality VARCHAR(2), AccessionNumber TEXT, SeriesNumber INTEGER, PRIMARY KEY (StudyInstanceUID, SeriesInstanceUID) FOREIGN KEY (StudyInstanceUID) REFERENCES studies(StudyInstanceUID) ) ''') cursor.execute(''' CREATE INDEX IF NOT EXISTS series_index_modality ON series ( Modality ) ''') cursor.execute(''' CREATE TABLE IF NOT EXISTS instances ( StudyInstanceUID TEXT NOT NULL, SeriesInstanceUID TEXT NOT NULL, SOPInstanceUID TEXT NOT NULL, SOPClassUID TEXT NOT NULL, InstanceNumber INTEGER, Rows INTEGER, Columns INTEGER, BitsAllocated INTEGER, NumberOfFrames INTEGER, TransferSyntaxUID TEXT NOT NULL, _file_path TEXT, PRIMARY KEY ( StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID ) FOREIGN KEY (SeriesInstanceUID) REFERENCES series(SeriesInstanceUID) FOREIGN KEY (StudyInstanceUID) REFERENCES studies(StudyInstanceUID) ) ''') cursor.execute( 'CREATE INDEX IF NOT EXISTS instances_index_sop_class_uid ' 'ON instances (SOPClassUID)' ) cursor.close() def _drop_db(self): """Drop database tables and indices.""" with self._connection as connection: cursor = connection.cursor() cursor.execute('DROP TABLE IF EXISTS instances') cursor.execute('DROP TABLE IF EXISTS series') cursor.execute('DROP TABLE IF EXISTS studies') cursor.close() def _update_db(self): """Update database.""" all_attributes = ( self._attributes[_QueryResourceType.STUDIES] + self._attributes[_QueryResourceType.SERIES] + self._attributes[_QueryResourceType.INSTANCES] ) tags = [ tag_for_keyword(attr) for attr in all_attributes ] def is_stop_tag(tag: BaseTag, VR: Optional[str], length: int) -> bool: return tag > max(tags) indexed_file_paths = set(self._get_indexed_file_paths()) found_file_paths = set() studies = {} series = {} instances = {} n = 100 for i, file_path in enumerate(self.base_dir.glob('**/*')): if not file_path.is_file() or file_path.name == 'DICOMDIR': continue rel_file_path = file_path.relative_to(self.base_dir) found_file_paths.add(rel_file_path) if file_path in indexed_file_paths: logger.debug(f'skip indexed file {file_path}') continue logger.debug(f'index file {file_path}') with open(file_path, 'rb') as fp: try: ds = read_partial( fp, stop_when=is_stop_tag, specific_tags=tags ) except (InvalidDicomError, AttributeError): logger.debug(f'failed to read file "{file_path}"') continue if not hasattr(ds, 'SOPClassUID'): # This is probably a DICOMDIR file or some other weird thing continue try: study_metadata = self._extract_study_metadata(ds) study_instance_uid = ds.StudyInstanceUID studies[study_instance_uid] = tuple(study_metadata) series_metadata = self._extract_series_metadata(ds) series_instance_uid = ds.SeriesInstanceUID series[series_instance_uid] = tuple(series_metadata) instance_metadata = self._extract_instance_metadata( ds, rel_file_path ) sop_instance_uid = ds.SOPInstanceUID instances[sop_instance_uid] = tuple(instance_metadata) except AttributeError as error: logger.warn(f'failed to parse file "{file_path}": {error}') continue if not i % n: # Insert every nth iteration to avoid having to read all # files again in case the update operation gets interrupted self._insert_into_db( studies.values(), series.values(), instances.values() ) self._insert_into_db( studies.values(), series.values(), instances.values() ) missing_file_paths = [ file_path for file_path in indexed_file_paths if file_path not in found_file_paths ] self._cleanup_db(missing_file_paths) def _get_data_element_value( self, dataset: Dataset, keyword: str ) -> Union[str, int, None]: # TODO: consider converting date and time to ISO format value = getattr(dataset, keyword, None) if value is None or isinstance(value, int): return value else: return str(value) def _extract_study_metadata( self, dataset: Dataset ) -> Tuple[ str, Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], ]: metadata = [ self._get_data_element_value(dataset, attr) for attr in self._attributes[_QueryResourceType.STUDIES] ] return tuple(metadata) # type: ignore def _extract_series_metadata( self, dataset: Dataset ) -> Tuple[ str, str, str, Optional[str], Optional[int], ]: metadata = [ self._get_data_element_value(dataset, attr) for attr in self._attributes[_QueryResourceType.SERIES] ] return tuple(metadata) # type: ignore def _extract_instance_metadata( self, dataset: Dataset, file_path: Union[Path, str] ) -> Tuple[ str, str, str, str, Optional[int], Optional[int], Optional[int], Optional[int], Optional[int], str, str, ]: metadata = [ self._get_data_element_value(dataset, attr) for attr in self._attributes[_QueryResourceType.INSTANCES] ] metadata.append(str(dataset.file_meta.TransferSyntaxUID)) metadata.append(str(file_path)) return tuple(metadata) # type: ignore def _insert_into_db( self, studies: Iterable[ Tuple[ str, Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], ] ], series: Iterable[ Tuple[ str, str, str, Optional[str], Optional[int], ] ], instances: Iterable[ Tuple[ str, str, str, str, Optional[int], Optional[int], Optional[int], Optional[int], Optional[int], str, str, ] ] ): with self._connection as connection: cursor = connection.cursor() cursor.executemany( 'INSERT OR REPLACE INTO studies ' 'VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)', studies ) cursor.executemany( 'INSERT OR REPLACE INTO series ' 'VALUES (?, ?, ?, ?, ?)', series ) cursor.executemany( 'INSERT OR REPLACE INTO instances ' 'VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', instances ) cursor.close() def _cleanup_db(self, missing_file_paths: Sequence[Path]): # Find instances for which database entries should be cleaned up. # Perform the query in smaller batches to avoid running into issues # with parsing parameters in the SQL statement. with self._connection as connection: cursor = connection.cursor() n = 20 results = [] for i in range(0, len(missing_file_paths), n): batch = missing_file_paths[i:(i + n)] cursor.execute( 'SELECT StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID ' # noqa 'FROM instances ' 'WHERE _file_path IN ({sequence})'.format( sequence=','.join(['?'] * len(batch)) ), [str(p) for p in batch] ) results += cursor.fetchall() cursor.close() self._delete_instances_from_db( uids=[ ( r['StudyInstanceUID'], r['SeriesInstanceUID'], r['SOPInstanceUID'], ) for r in results ] ) def _delete_instances_from_db( self, uids: Sequence[Tuple[str, str, str]] ) -> None: with self._connection as connection: cursor = connection.cursor() # Delete instances as well as any parent series or studies that # would be empty after the instances are deleted. studies_to_check = set() series_to_check = set() for study_instance_uid, series_instance_uid, sop_instance_uid in uids: # noqa cursor.executemany( 'DELETE FROM instances WHERE SOPInstanceUID=?', sop_instance_uid ) studies_to_check.add(study_instance_uid) series_to_check.add((study_instance_uid, series_instance_uid)) for study_instance_uid, series_instance_uid in series_to_check: n_in_series = self._count_instances_in_series( series_instance_uid ) if n_in_series == 0: cursor.executemany( 'DELETE FROM series WHERE SeriesInstanceUID=?', series_instance_uid ) for study_instance_uid in studies_to_check: n_in_study = self._count_instances_in_study( study_instance_uid ) if n_in_study == 0: cursor.executemany( 'DELETE FROM studies WHERE StudyInstanceUID=?', study_instance_uid ) cursor.close() def _get_attributes(self, resource_type: _QueryResourceType) -> List[str]: table = resource_type.value self._cursor.execute(f'SELECT * FROM {table} LIMIT 1') attributes = [ item[0] for item in self._cursor.description if not item[0].startswith('_') and item[0] != 'TransferSyntaxUID' ] return attributes def _get_indexed_file_paths(self) -> List[Path]: self._cursor.execute('SELECT _file_path FROM instances') results = self._cursor.fetchall() return [self.base_dir.joinpath(r['_file_path']) for r in results] def _build_query( self, searchable_keywords: Sequence[str], fuzzymatching: Optional[bool] = None, limit: Optional[int] = None, offset: Optional[int] = None, fields: Optional[Sequence[str]] = None, search_filters: Optional[Dict[str, Any]] = None ) -> Tuple[str, Dict[str, Union[int, str]]]: if fuzzymatching is None: fuzzymatching = False if fields is not None: logger.warning('argument "fields" is ignored') query_expressions = [] query_params = {} if search_filters is not None: wildcard_search_vrs = ( 'AE', 'CS', 'LO', 'LT', 'PN', 'SH', 'ST', 'UC', 'UR', 'UT', ) first_filter_expression = True for i, (key, value) in enumerate(search_filters.items()): filter_expressions = [] filter_params = {} if value is None or len(str(value)) == 0: logger.warning(f'skip search filter "{key}" - empty value') continue try: keyword = self.lookup_keyword(key) vr = dictionary_VR(key) except Exception: keyword = key try: tag = tag_for_keyword(keyword) if tag is None: raise vr = dictionary_VR(tag) except Exception: logger.warning( f'skip search filter "{key}" - not a known ' 'attribute' ) continue if keyword not in searchable_keywords: logger.warning( f'skip search filter "{key}" - queries based on this ' 'attribute are not supported' ) continue if vr in wildcard_search_vrs: if '*' in value: filter_expressions.append(f'{keyword} LIKE :{keyword}') filter_params[keyword] = value.replace('*', '%') elif '?' in value: filter_expressions.append(f'{keyword} LIKE :{keyword}') filter_params[keyword] = value.replace('?', '_') elif vr == 'PN' and fuzzymatching: filter_expressions.append(f'{keyword} LIKE :{keyword}') filter_params[keyword] = f'%{value}%' else: filter_expressions.append(f'{keyword} = :{keyword}') filter_params[keyword] = str(value) else: if vr == 'DA': try: DA(value) except ValueError: logger.warning( f'skip search filter "{key}" - not a valid ' f'value for value representation DA: {value}' ) continue filter_expressions.append(f'{keyword} LIKE :{keyword}') filter_params[keyword] = f'%{value}' elif vr == 'DT': try: DT(value) except ValueError: logger.warning( f'skip search filter "{key}" - not a valid ' f'value for value representation DT: {value}' ) continue filter_expressions.append(f'{keyword} LIKE :{keyword}') filter_params[keyword] = f'%{value}' elif vr == 'TM': try: TM(value) except ValueError: logger.warning( f'skip search filter "{key}" - not a valid ' f'value for value representation TM: {value}' ) continue filter_expressions.append(f'{keyword} LIKE :{keyword}') filter_params[keyword] = f'%{value}' else: filter_expressions.append(f'{keyword} = :{keyword}') filter_params[keyword] = str(value) if first_filter_expression: query_expressions.append('WHERE') first_filter_expression = False else: query_expressions.append('AND') query_expressions.extend(filter_expressions) query_params.update(filter_params) if limit is not None: if limit < 0: raise ValueError('Limit must be a positive integer.') query_expressions.append('LIMIT :limit') query_params['limit'] = limit if offset is not None: if offset < 0: raise ValueError('Offset must be a positive integer.') if limit is None: query_expressions.append('LIMIT :limit') query_params['limit'] = -1 query_expressions.append('OFFSET :offset') query_params['offset'] = offset query_string = ' '.join(query_expressions) return (query_string, query_params) def _get_modalities_in_study(self, study_instance_uid: str) -> List[str]: self._cursor.execute( 'SELECT DISTINCT Modality FROM series ' 'WHERE StudyInstanceUID = :study_instance_uid', {'study_instance_uid': study_instance_uid} ) results = self._cursor.fetchall() return [r['Modality'] for r in results] def _get_studies(self) -> List[str]: self._cursor.execute('SELECT StudyInstanceUID FROM studies') results = self._cursor.fetchall() return [r['StudyInstanceUID'] for r in results] def _get_series( self, study_instance_uid: Optional[str] = None ) -> List[Tuple[str, str]]: query_expressions = [ 'SELECT StudyInstanceUID, SeriesInstanceUID FROM series' ] query_params = {} if study_instance_uid is not None: query_expressions.append( 'WHERE StudyInstanceUID = :study_instance_uid' ) query_params['study_instance_uid'] = study_instance_uid query_string = ' '.join(query_expressions) self._cursor.execute(query_string, query_params) results = self._cursor.fetchall() return [ ( r['StudyInstanceUID'], r['SeriesInstanceUID'], ) for r in results ] def _get_instances( self, study_instance_uid: Optional[str] = None, series_instance_uid: Optional[str] = None ) -> List[Tuple[str, str, str]]: query_expressions = [ 'SELECT StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID', 'FROM instances' ] query_params = {} if study_instance_uid is not None: query_expressions.append( 'WHERE StudyInstanceUID = :study_instance_uid' ) query_params['study_instance_uid'] = study_instance_uid if series_instance_uid is not None: if study_instance_uid is None: raise ValueError( 'Study Instance UID needs to be specified when ' 'searching for instances by Series Instance UID.' ) query_expressions.append( 'AND SeriesInstanceUID = :series_instance_uid' ) query_params['series_instance_uid'] = series_instance_uid query_string = ' '.join(query_expressions) self._cursor.execute(query_string, query_params) results = self._cursor.fetchall() return [ ( r['StudyInstanceUID'], r['SeriesInstanceUID'], r['SOPInstanceUID'], ) for r in results ] def _get_image_file_reader(self, file_path: Path) -> _ImageFileReader: """Get the reader for a given image file. Parameters ---------- file_path: pathlib.Path Path to the DICOM file containing a data set of an image Returns ------- dicomweb_client.file._ImageFileReader Reader object Note ---- The instance of the class caches reader object to improve performance for repeated frame-level file access. """ try: image_file_reader = self._reader_cache[file_path] # Move the most recently retrieved entry to the beginning. self._reader_cache.move_to_end(file_path, last=False) except KeyError: image_file_reader = _ImageFileReader(file_path) image_file_reader.open() self._reader_cache[file_path] = image_file_reader if len(self._reader_cache) > self._max_reader_cache_size: # Remove the last entry. tmp_path, tmp_reader = self._reader_cache.popitem(last=False) tmp_reader.close() return image_file_reader def _get_instance_file_path( self, study_instance_uid: str, series_instance_uid: str, sop_instance_uid: str ) -> Path: self._cursor.execute( 'SELECT _file_path FROM instances ' 'WHERE StudyInstanceUID = :study_instance_uid ' 'AND SeriesInstanceUID = :series_instance_uid ' 'AND SOPInstanceUID = :sop_instance_uid ', { 'study_instance_uid': study_instance_uid, 'series_instance_uid': series_instance_uid, 'sop_instance_uid': sop_instance_uid, } ) result = self._cursor.fetchone() if result is None: raise IOError( f'Could not find instance "{sop_instance_uid}" of ' f'series "{series_instance_uid}" and ' f'study "{study_instance_uid}".' ) return self.base_dir.joinpath(result['_file_path']) def _count_series_in_study(self, study_instance_uid: str) -> int: self._cursor.execute( 'SELECT COUNT(SeriesInstanceUID) AS count FROM series ' 'WHERE StudyInstanceUID = :study_instance_uid', {'study_instance_uid': study_instance_uid} ) result = self._cursor.fetchone() return int(result['count']) def _count_instances_in_study(self, study_instance_uid: str) -> int: self._cursor.execute( 'SELECT COUNT(SOPInstanceUID) AS count FROM instances ' 'WHERE StudyInstanceUID = :study_instance_uid', {'study_instance_uid': study_instance_uid} ) result = self._cursor.fetchone() return int(result['count']) def _count_instances_in_series(self, series_instance_uid: str) -> int: self._cursor.execute( 'SELECT COUNT(SOPInstanceUID) AS count FROM instances ' 'WHERE SeriesInstanceUID = :series_instance_uid', { 'series_instance_uid': series_instance_uid, } ) result = self._cursor.fetchone() return int(result['count']) def search_for_studies( self, fuzzymatching: Optional[bool] = None, limit: Optional[int] = None, offset: Optional[int] = None, fields: Optional[Sequence[str]] = None, search_filters: Optional[Dict[str, Any]] = None, get_remaining: bool = False ) -> List[Dict[str, dict]]: """Search for studies. Parameters ---------- fuzzymatching: Union[bool, None], optional Whether fuzzy semantic matching should be performed limit: Union[int, None], optional Maximum number of results that should be returned offset: Union[int, None], optional Number of results that should be skipped fields: Union[Sequence[str], None], optional Names of fields (attributes) that should be included in results search_filters: Union[dict, None], optional Search filter criteria as key-value pairs, where *key* is a keyword or a tag of the attribute and *value* is the expected value that should match get_remaining: bool, optional Whether remaining results should be included Returns ------- List[Dict[str, dict]] Studies (see `Study Result Attributes <http://dicom.nema.org/medical/dicom/current/output/chtml/part18/sect_6.7.html#table_6.7.1-2>`_) Note ---- No additional `fields` are currently supported. """ # noqa: E501 logger.info('search for studies') query_filter_string, query_params = self._build_query( searchable_keywords=self._attributes[_QueryResourceType.STUDIES], fuzzymatching=fuzzymatching, limit=limit, offset=offset, fields=fields, search_filters=search_filters ) query_string = ' '.join([ 'SELECT * FROM studies', query_filter_string ]) self._cursor.execute(query_string, query_params) results = self._cursor.fetchall() collection = [] for row in results: dataset = Dataset() for key in row.keys(): if not key.startswith('_'): setattr(dataset, key, row[key]) n_series_in_study = self._count_series_in_study( study_instance_uid=dataset.StudyInstanceUID ) dataset.NumberOfStudyRelatedSeries = n_series_in_study n_instances_in_study = self._count_instances_in_study( study_instance_uid=dataset.StudyInstanceUID ) dataset.NumberOfStudyRelatedInstances = n_instances_in_study modalities_in_study = self._get_modalities_in_study( study_instance_uid=dataset.StudyInstanceUID ) dataset.ModalitiesInStudy = modalities_in_study collection.append(dataset.to_json_dict()) return collection def search_for_series( self, study_instance_uid: Optional[str] = None, fuzzymatching: Optional[bool] = None, limit: Optional[int] = None, offset: Optional[int] = None, fields: Optional[Sequence[str]] = None, search_filters: Optional[Dict[str, Any]] = None, get_remaining: bool = False ) -> List[Dict[str, dict]]: """Search for series. Parameters ---------- study_instance_uid: Union[str, None], optional Study Instance UID fuzzymatching: Union[bool, None], optional Whether fuzzy semantic matching should be performed limit: Union[int, None], optional Maximum number of results that should be returned offset: Union[int, None], optional Number of results that should be skipped fields: Union[Sequence[str], None], optional Names of fields (attributes) that should be included in results search_filters: Union[dict, None], optional Search filter criteria as key-value pairs, where *key* is a keyword or a tag of the attribute and *value* is the expected value that should match get_remaining: bool, optional Whether remaining results should be included Returns ------- List[Dict[str, dict]] Series (see `Series Result Attributes <http://dicom.nema.org/medical/dicom/current/output/chtml/part18/sect_6.7.html#table_6.7.1-2a>`_) """ # noqa: E501 if study_instance_uid is None: logger.info('search for series') else: logger.info(f'search for series of study "{study_instance_uid}"') if search_filters is None: search_params = {} else: search_params = dict(search_filters) all_series = True if study_instance_uid is not None: search_params['StudyInstanceUID'] = study_instance_uid all_series = False searchable_keywords = list(self._attributes[_QueryResourceType.STUDIES]) searchable_keywords.extend( self._attributes[_QueryResourceType.SERIES] ) query_filter_string, query_params = self._build_query( searchable_keywords=searchable_keywords, fuzzymatching=fuzzymatching, limit=limit, offset=offset, fields=fields, search_filters=search_params ) query_filter_string = re.sub( r'StudyInstanceUID =', 'series.StudyInstanceUID =', query_filter_string ) if all_series: query_string = ' '.join([ 'SELECT * FROM series', 'INNER JOIN studies', 'ON series.StudyInstanceUID = studies.StudyInstanceUID', query_filter_string ]) else: includefields = [ 'Modality', 'SeriesInstanceUID', 'SeriesNumber', ] if fields is not None: includefields += [ f for f in fields if f in { 'StudyInstanceUID', 'StudyID', 'StudyDate', 'StudyTime', 'PatientName', 'PatientID', 'PatientSex', 'PatientBirthDate', } ] includefields_string = ', '.join(includefields) includefields_string = includefields_string.replace( 'StudyInstanceUID', 'studies.StudyInstanceUID' ) query_string = ' '.join([ f'SELECT {includefields_string} FROM series', 'INNER JOIN studies', 'ON series.StudyInstanceUID = studies.StudyInstanceUID', query_filter_string ]) self._cursor.execute(query_string, query_params) results = self._cursor.fetchall() collection = [] for row in results: dataset = Dataset() for key in row.keys(): if not key.startswith('_'): setattr(dataset, key, row[key]) if all_series: n_series_in_study = self._count_series_in_study( study_instance_uid=dataset.StudyInstanceUID ) dataset.NumberOfStudyRelatedSeries = n_series_in_study n_instances_in_study = self._count_instances_in_study( study_instance_uid=dataset.StudyInstanceUID ) dataset.NumberOfStudyRelatedInstances = n_instances_in_study modalities_in_study = self._get_modalities_in_study( study_instance_uid=dataset.StudyInstanceUID ) dataset.ModalitiesInStudy = modalities_in_study n_instances_in_series = self._count_instances_in_series( series_instance_uid=dataset.SeriesInstanceUID, ) dataset.NumberOfSeriesRelatedInstances = n_instances_in_series collection.append(dataset.to_json_dict()) return collection def search_for_instances( self, study_instance_uid: Optional[str] = None, series_instance_uid: Optional[str] = None, fuzzymatching: Optional[bool] = None, limit: Optional[int] = None, offset: Optional[int] = None, fields: Optional[Sequence[str]] = None, search_filters: Optional[Dict[str, Any]] = None, get_remaining: bool = False ) -> List[Dict[str, dict]]: """Search for instances. Parameters ---------- study_instance_uid: Union[str, None], optional Study Instance UID series_instance_uid: Union[str, None], optional Series Instance UID fuzzymatching: Union[bool, None], optional Whether fuzzy semantic matching should be performed limit: Union[int, None], optional Maximum number of results that should be returned offset: Union[int, None], optional Number of results that should be skipped fields: Union[Sequence[str], None], optional Names of fields (attributes) that should be included in results search_filters: Union[dict, None], optional Search filter criteria as key-value pairs, where *key* is a keyword or a tag of the attribute and *value* is the expected value that should match get_remaining: bool, optional Whether remaining results should be included Returns ------- List[Dict[str, dict]] Instances (see `Instance Result Attributes <http://dicom.nema.org/medical/dicom/current/output/chtml/part18/sect_6.7.html#table_6.7.1-2b>`_) Note ---- No additional `fields` are currently supported. """ # noqa: E501 if search_filters is None: search_params = {} else: search_params = dict(search_filters) all_instances = True study_instances = True if study_instance_uid is None and series_instance_uid is None: logger.info('search for instances') else: if study_instance_uid is None: raise TypeError( 'Study Instance UID must be specified if ' 'Series Instance UID is specified.' ) if series_instance_uid is None: all_instances = False search_params['StudyInstanceUID'] = study_instance_uid logger.info( f'search for instances of study "{study_instance_uid}"' ) else: all_instances = False study_instances = False search_params['StudyInstanceUID'] = study_instance_uid search_params['SeriesInstanceUID'] = series_instance_uid logger.info( f'search for instances of series "{series_instance_uid}" ' f'of study "{study_instance_uid}"' ) searchable_keywords = list(self._attributes[_QueryResourceType.STUDIES]) searchable_keywords.extend( self._attributes[_QueryResourceType.SERIES] ) searchable_keywords.extend( self._attributes[_QueryResourceType.INSTANCES] ) query_filter_string, query_params = self._build_query( searchable_keywords=searchable_keywords, fuzzymatching=fuzzymatching, limit=limit, offset=offset, fields=fields, search_filters=search_params ) query_filter_string = re.sub( r'StudyInstanceUID =', 'instances.StudyInstanceUID =', query_filter_string ) query_filter_string = re.sub( r'SeriesInstanceUID =', 'instances.SeriesInstanceUID =', query_filter_string ) if all_instances: query_string = ' '.join([ 'SELECT * FROM instances', 'INNER JOIN series', 'ON instances.SeriesInstanceUID = series.SeriesInstanceUID', 'INNER JOIN studies', 'ON instances.StudyInstanceUID = studies.StudyInstanceUID', query_filter_string ]) else: includefields = [ 'SOPClassUID', 'SOPInstanceUID', 'InstanceNumber', 'Rows', 'Columns', 'BitsAllocated', 'NumberOfFrames', 'TransferSyntaxUID', ] if study_instances: includefields += [ 'Modality', 'SeriesInstanceUID', 'SeriesNumber', ] if fields is not None: includefields += [ f for f in fields if f in { 'StudyInstanceUID', 'StudyID', 'StudyDate', 'StudyTime', 'PatientName', 'PatientID', 'PatientSex', 'PatientBirthDate', } ] else: if fields is not None: includefields += [ f for f in fields if f in { 'StudyInstanceUID', 'StudyID', 'StudyDate', 'StudyTime', 'PatientName', 'PatientID', 'PatientSex', 'PatientBirthDate', 'Modality', 'SeriesInstanceUID', 'SeriesNumber', } ] includefields_string = ', '.join(includefields) includefields_string = includefields_string.replace( 'SeriesInstanceUID', 'series.SeriesInstanceUID' ) includefields_string = includefields_string.replace( 'StudyInstanceUID', 'studies.StudyInstanceUID' ) query_string = ' '.join([ f'SELECT {includefields_string} FROM instances', 'INNER JOIN series', 'ON instances.SeriesInstanceUID = series.SeriesInstanceUID', 'INNER JOIN studies', 'ON instances.StudyInstanceUID = studies.StudyInstanceUID', query_filter_string ]) self._cursor.execute(query_string, query_params) results = self._cursor.fetchall() collection = [] for row in results: dataset = Dataset() for key in row.keys(): if not key.startswith('_'): setattr(dataset, key, row[key]) if all_instances: n_series_in_study = self._count_series_in_study( study_instance_uid=dataset.StudyInstanceUID ) dataset.NumberOfStudyRelatedSeries = n_series_in_study n_instances_in_study = self._count_instances_in_study( study_instance_uid=dataset.StudyInstanceUID ) dataset.NumberOfStudyRelatedInstances = n_instances_in_study modalities_in_study = self._get_modalities_in_study( study_instance_uid=dataset.StudyInstanceUID ) dataset.ModalitiesInStudy = modalities_in_study if all_instances or study_instances: n_instances_in_series = self._count_instances_in_series( series_instance_uid=dataset.SeriesInstanceUID, ) dataset.NumberOfSeriesRelatedInstances = n_instances_in_series collection.append(dataset.to_json_dict()) return collection def retrieve_bulkdata( self, url: str, media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None, byte_range: Optional[Tuple[int, int]] = None ) -> List[bytes]: """Retrieve bulk data at a given location. Parameters ---------- url: str Location of the bulk data media_types: Union[Tuple[Union[str, Tuple[str, str]], ...], None], optional Acceptable media types and optionally the UIDs of the corresponding transfer syntaxes byte_range: Union[Tuple[int, int], None], optional Start and end of byte range Returns ------- Iterator[bytes] Bulk data items Raises ------ IOError When requested resource is not found at `url` """ # noqa: E501 iterator = self.iter_bulkdata( url=url, media_types=media_types, byte_range=byte_range ) return list(iterator) def iter_bulkdata( self, url: str, media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None, byte_range: Optional[Tuple[int, int]] = None ) -> Iterator[bytes]: """Iterate over bulk data items at a given location. Parameters ---------- url: str Location of the bulk data media_types: Union[Tuple[Union[str, Tuple[str, str]], ...], None], optional Acceptable media types and optionally the UIDs of the corresponding transfer syntaxes byte_range: Union[Tuple[int, int], None], optional Start and end of byte range Returns ------- Iterator[bytes] Bulk data items Raises ------ IOError When requested resource is not found at `url` """ # noqa: E501 # The retrieve_study_metadata, retrieve_series_metadata, and # retrieve_instance_metadata methods currently include all bulkdata # into metadata resources by value rather than by reference, i.e., # using the "InlineBinary" rather than the "BulkdataURI" key. # Therefore, no valid URL should exist for any bulkdata at this point. # If that behavior gets changed, i.e., if bulkdata gets included into # metadata using "BulkdataURI", then the implementation of this method # will need to change as well. raise IOError(f'Resource does not exist: "{url}".') def retrieve_study_metadata( self, study_instance_uid: str, ) -> List[Dict[str, dict]]: """Retrieve metadata of instances in a study. Parameters ---------- study_instance_uid: str Study Instance UID Returns ------- List[Dict[str, Any]] Metadata of each instance in study """ logger.info( 'retrieve metadata of all instances ' f'of study "{study_instance_uid}"' ) series_index = self._get_series(study_instance_uid) collection = [] for series_instance_uid, study_instance_uid in series_index: collection.extend( self.retrieve_series_metadata( study_instance_uid=study_instance_uid, series_instance_uid=series_instance_uid, ) ) return collection def iter_study( self, study_instance_uid: str, media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None ) -> Iterator[Dataset]: """Iterate over all instances of a study. Parameters ---------- study_instance_uid: str Study Instance UID media_types: Union[Tuple[Union[str, Tuple[str, str]], ...], None], optional Acceptable media types and optionally the UIDs of the corresponding transfer syntaxes Returns ------- Iterator[pydicom.dataset.Dataset] Instances """ # noqa: E501 logger.info( f'iterate over all instances of study "{study_instance_uid}"' ) series_index = self._get_series(study_instance_uid) for study_instance_uid, series_instance_uid in series_index: uids = self._get_instances( study_instance_uid=study_instance_uid, series_instance_uid=series_instance_uid, ) for study_instance_uid, series_instance_uid, sop_instance_uid in uids: # noqa yield self.retrieve_instance( study_instance_uid=study_instance_uid, series_instance_uid=series_instance_uid, sop_instance_uid=sop_instance_uid, media_types=media_types ) def retrieve_study( self, study_instance_uid: str, media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None ) -> List[Dataset]: """Retrieve all instances of a study. Parameters ---------- study_instance_uid: str Study Instance UID media_types: Union[Tuple[Union[str, Tuple[str, str]], ...], None], optional Acceptable media types and optionally the UIDs of the corresponding transfer syntaxes Returns ------- Sequence[pydicom.dataset.Dataset] Instances """ # noqa: E501 logger.info(f'retrieve all instances of study "{study_instance_uid}"') iterator = self.iter_study( study_instance_uid=study_instance_uid, media_types=media_types, ) return list(iterator) def iter_series( self, study_instance_uid: str, series_instance_uid: str, media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None ) -> Iterator[Dataset]: """Iterate over all instances of a series. Parameters ---------- study_instance_uid: str Study Instance UID series_instance_uid: str Series Instance UID media_types: Union[Tuple[Union[str, Tuple[str, str]], ...], None], optional Acceptable media types and optionally the UIDs of the corresponding transfer syntaxes Returns ------- Iterator[pydicom.dataset.Dataset] Instances """ # noqa: E501 logger.info( f'iterate over all instances of series "{series_instance_uid}" ' f'of study "{study_instance_uid}"' ) instance_index = self._get_instances( study_instance_uid=study_instance_uid, series_instance_uid=series_instance_uid, ) for i in instance_index: study_instance_uid, series_instance_uid, sop_instance_uid = i yield self.retrieve_instance( study_instance_uid=study_instance_uid, series_instance_uid=series_instance_uid, sop_instance_uid=sop_instance_uid, media_types=media_types ) def retrieve_series( self, study_instance_uid: str, series_instance_uid: str, media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None ) -> List[Dataset]: """Retrieve all instances of a series. Parameters ---------- study_instance_uid: str Study Instance UID series_instance_uid: str Series Instance UID media_types: Union[Tuple[Union[str, Tuple[str, str]], ...], None], optional Acceptable media types and optionally the UIDs of the corresponding transfer syntaxes Returns ------- Sequence[pydicom.dataset.Dataset] Instances """ # noqa: E501 logger.info( f'retrieve all instances of series "{series_instance_uid}" ' f'of study "{study_instance_uid}"' ) iterator = self.iter_series( study_instance_uid=study_instance_uid, series_instance_uid=series_instance_uid, media_types=media_types, ) return list(iterator) def retrieve_series_rendered( self, study_instance_uid, series_instance_uid, media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None, params: Optional[Dict[str, Any]] = None ) -> bytes: """Retrieve rendered representation of a series. Parameters ---------- study_instance_uid: str Study Instance UID series_instance_uid: str Series Instance UID media_types: Union[Tuple[Union[str, Tuple[str, str]], ...], None], optional Acceptable media types (choices: ``"image/jpeg"``, ``"image/jp2"``, ``"image/gif"``, ``"image/png"``, ``"video/gif"``, ``"video/mp4"``, ``"video/h265"``, ``"text/html"``, ``"text/plain"``, ``"text/xml"``, ``"text/rtf"``, ``"application/pdf"``) params: Union[Dict[str, Any], None], optional Additional parameters relevant for given `media_type`, e.g., ``{"quality": 95}`` for ``"image/jpeg"`` Returns ------- bytes Rendered representation of series """ # noqa: E501 raise ValueError('Retrieval of rendered series is not supported.') def retrieve_series_metadata( self, study_instance_uid: str, series_instance_uid: str ) -> List[Dict[str, dict]]: """Retrieve metadata of instances in a series. Parameters ---------- study_instance_uid: str Study Instance UID series_instance_uid: str Series Instance UID Returns ------- List[Dict[str, Any]] Metadata of each instance in series """ logger.info( 'retrieve metadata of all instances of ' f'series "{series_instance_uid}" of study "{study_instance_uid}"' ) collection = [] instance_index = self._get_instances( study_instance_uid=study_instance_uid, series_instance_uid=series_instance_uid, ) for i in instance_index: study_instance_uid, series_instance_uid, sop_instance_uid = i metadata = self.retrieve_instance_metadata( study_instance_uid=study_instance_uid, series_instance_uid=series_instance_uid, sop_instance_uid=sop_instance_uid, ) collection.append(metadata) return collection def retrieve_instance_metadata( self, study_instance_uid: str, series_instance_uid: str, sop_instance_uid: str, ) -> Dict[str, dict]: """Retrieve metadata of a single instance. Parameters ---------- study_instance_uid: str Study Instance UID series_instance_uid: str Series Instance UID sop_instance_uid: str SOP Instance UID Returns ------- Dict[str, Any] Metadata of instance """ logger.info( f'retrieve metadata of instance "{sop_instance_uid}" of ' f'series "{series_instance_uid}" of study "{study_instance_uid}"' ) file_path = self._get_instance_file_path( study_instance_uid, series_instance_uid, sop_instance_uid, ) metadata = dcmread(file_path, stop_before_pixels=True) return metadata.to_json_dict() def retrieve_instance( self, study_instance_uid: str, series_instance_uid: str, sop_instance_uid: str, media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None ) -> Dataset: """Retrieve metadata of a single instance. Parameters ---------- study_instance_uid: str Study Instance UID series_instance_uid: str Series Instance UID sop_instance_uid: str SOP Instance UID media_types: Union[Tuple[Union[str, Tuple[str, str]], ...], None], optional Acceptable media types and optionally the UIDs of the corresponding transfer syntaxes Returns ------- pydicom.dataset.Dataset Instance """ # noqa: E501 logger.info( f'retrieve instance "{sop_instance_uid}" of ' f'series "{series_instance_uid}" of study "{study_instance_uid}"' ) transfer_syntax_uid_lut = { '1.2.840.10008.1.2.1': 'application/dicom', '1.2.840.10008.192.168.3.11': 'application/dicom', '1.2.840.10008.172.16.58.3': 'application/dicom', '1.2.840.10008.172.16.31.10': 'application/dicom', '1.2.840.10008.172.16.58.3': 'application/dicom', '1.2.840.10008.192.168.127.12': 'application/dicom', '1.2.840.10008.172.16.31.10': 'application/dicom', '1.2.840.10008.1.2.4.90': 'application/dicom', '1.2.840.10008.1.2.4.91': 'application/dicom', '1.2.840.10008.1.2.4.92': 'application/dicom', '1.2.840.10008.1.2.4.93': 'application/dicom', } supported_media_type_lut = { 'application/dicom': { '1.2.840.10008.1.2.1', '1.2.840.10008.1.2.4.50', '1.2.840.10008.1.2.4.51', '1.2.840.10008.1.2.4.57', '1.2.840.10008.1.2.4.70', '1.2.840.10008.1.2.4.80', '1.2.840.10008.1.2.4.81', '1.2.840.10008.1.2.4.90', '1.2.840.10008.1.2.4.91', '1.2.840.10008.1.2.4.92', '1.2.840.10008.1.2.4.93', '*', }, } supported_media_type_lut['application/*'] = set( supported_media_type_lut['application/dicom'] ) supported_media_type_lut['application/'] = set( supported_media_type_lut['application/dicom'] ) supported_media_type_lut['*/*'] = set( supported_media_type_lut['application/dicom'] ) supported_media_type_lut['*/'] = set( supported_media_type_lut['application/dicom'] ) if media_types is None: media_types = (('application/dicom', '*'), ) acceptable_media_type_lut = _build_acceptable_media_type_lut( media_types, supported_media_type_lut ) file_path = self._get_instance_file_path( study_instance_uid, series_instance_uid, sop_instance_uid, ) dataset = dcmread(file_path) transfer_syntax_uid = dataset.file_meta.TransferSyntaxUID # Check whether the expected media is specified as one of the # acceptable media types. expected_media_type = transfer_syntax_uid_lut[transfer_syntax_uid] found_matching_media_type = False wildcards = {'*/*', '*/', 'application/*', 'application/'} if any([w in acceptable_media_type_lut for w in wildcards]): found_matching_media_type = True elif expected_media_type in acceptable_media_type_lut: found_matching_media_type = True # If expected media type is specified as one of the acceptable # media types, check whether the corresponding transfer syntax is # appropriate. expected_transfer_syntaxes = acceptable_media_type_lut[ expected_media_type ] if ( transfer_syntax_uid not in expected_transfer_syntaxes and '*' not in expected_transfer_syntaxes ): raise ValueError( 'Instance cannot be retrieved using media type "{}" ' 'with any of the specified transfer syntaxes: "{}".'.format( expected_media_type, '", "'.join(expected_transfer_syntaxes) ) ) if not found_matching_media_type: raise ValueError( 'Instance cannot be retrieved using any of the ' f'acceptable media types: {media_types}.' ) return dataset def retrieve_instance_rendered( self, study_instance_uid: str, series_instance_uid: str, sop_instance_uid: str, media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None, params: Optional[Dict[str, Any]] = None ) -> bytes: """Retrieve an individual, server-side rendered instance. Parameters ---------- study_instance_uid: str Study Instance UID series_instance_uid: str Series Instance UID sop_instance_uid: str SOP Instance UID media_types: Union[Tuple[Union[str, Tuple[str, str]], ...], None], optional Acceptable media types (choices: ``"image/jpeg"``, ``"image/jp2"``, ``"image/gif"``, ``"image/png"``, ``"video/gif"``, ``"video/mp4"``, ``"video/h265"``, ``"text/html"``, ``"text/plain"``, ``"text/xml"``, ``"text/rtf"``, ``"application/pdf"``) params: Union[Dict[str, Any], None], optional Additional parameters relevant for given `media_type`, e.g., ``{"quality": 95}`` for ``"image/jpeg"`` Returns ------- bytes Rendered representation of instance Note ---- Only rendering of single-frame image instances is currently supported. """ # noqa: E501 file_path = self._get_instance_file_path( study_instance_uid, series_instance_uid, sop_instance_uid, ) image_file_reader = self._get_image_file_reader(file_path) metadata = image_file_reader.metadata if int(getattr(metadata, 'NumberOfFrames', '1')) > 1: raise ValueError( 'Rendering of multi-frame image instance is not supported.' ) frame_index = 0 frame = image_file_reader.read_frame(frame_index) transfer_syntax_uid = image_file_reader.transfer_syntax_uid codec_name, codec_kwargs = self._get_image_codec_parameters( metadata=metadata, transfer_syntax_uid=transfer_syntax_uid, media_types=media_types, params=params ) if codec_name is None: pixels = frame else: array = image_file_reader.decode_frame(frame_index, frame) image = Image.fromarray(array) with io.BytesIO() as fp: image.save(fp, codec_name, **codec_kwargs) # type: ignore fp.seek(0) pixels = fp.read() return pixels def _check_media_types_for_instance_frames( self, transfer_syntax_uid: UID, media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None ) -> Union[str, None]: transfer_syntax_uid_lut = { '1.2.840.10008.1.2.1': 'application/octet-stream', '1.2.840.10008.1.2.4.50': 'image/jpeg', '1.2.840.10008.1.2.4.51': 'image/jpeg', '1.2.840.10008.1.2.4.57': 'image/jpeg', '1.2.840.10008.1.2.4.70': 'image/jpeg', '1.2.840.10008.1.2.4.80': 'image/jls', '1.2.840.10008.1.2.4.81': 'image/jls', '1.2.840.10008.1.2.4.90': 'image/jp2', '1.2.840.10008.172.16.17.321': 'image/jp2', '1.2.840.10008.172.16.17.322': 'image/jpx', '1.2.840.10008.172.16.17.323': 'image/jpx', } supported_media_type_lut = { 'image/jpeg': { '1.2.840.10008.1.2.4.50', '1.2.840.10008.1.2.4.51', '1.2.840.10008.1.2.4.57', '1.2.840.10008.1.2.4.70', '*', }, 'image/jls': { '1.2.840.10008.192.168.127.12', '1.2.840.10008.1.2.4.81', '*', }, 'image/jp2': { '1.2.840.10008.172.16.17.320', '1.2.840.10008.172.16.17.321', '*', }, 'image/jpx': { '1.2.840.10008.172.16.17.32', '1.2.840.10008.172.16.17.323', '*', }, 'application/octet-stream': { '1.2.840.10008.1.2.1', '*', }, } supported_media_type_lut['image/*'] = set().union(*[ supported_media_type_lut['image/jpeg'], supported_media_type_lut['image/jls'], supported_media_type_lut['image/jp2'], supported_media_type_lut['image/jpx'], ]) supported_media_type_lut['image/'] = set().union(*[ supported_media_type_lut['image/jpeg'], supported_media_type_lut['image/jls'], supported_media_type_lut['image/jp2'], supported_media_type_lut['image/jpx'], ]) supported_media_type_lut['application/*'] = set().union(*[ supported_media_type_lut['application/octet-stream'], ]) supported_media_type_lut['application/'] = set().union(*[ supported_media_type_lut['application/octet-stream'], ]) supported_media_type_lut['*/*'] = set().union(*[ supported_media_type_lut['image/*'], supported_media_type_lut['application/*'], ]) supported_media_type_lut['*/'] = set().union(*[ supported_media_type_lut['image/*'], supported_media_type_lut['application/*'], ]) if media_types is None: media_types = ('*/*', ) acceptable_media_type_lut = _build_acceptable_media_type_lut( media_types, supported_media_type_lut ) # Check whether the expected media is specified as one of the # acceptable media types. expected_media_type = transfer_syntax_uid_lut[transfer_syntax_uid] found_matching_media_type = False if transfer_syntax_uid.is_encapsulated: wildcards = {'*/*', '*/', 'image/*', 'image/'} if any([w in acceptable_media_type_lut for w in wildcards]): found_matching_media_type = True else: wildcards = {'*/*', '*/', 'application/*', 'application/'} if any([w in acceptable_media_type_lut for w in wildcards]): found_matching_media_type = True if expected_media_type in acceptable_media_type_lut: found_matching_media_type = True # If expected media type is specified as one of the acceptable # media types, check whether the corresponding transfer syntax is # appropriate. expected_transfer_syntaxes = acceptable_media_type_lut[ expected_media_type ] if ( transfer_syntax_uid not in expected_transfer_syntaxes and '*' not in expected_transfer_syntaxes ): raise ValueError( 'Instance frames cannot be retrieved using media type "{}" ' 'with any of the specified transfer syntaxes: "{}".'.format( expected_media_type, '", "'.join(expected_transfer_syntaxes) ) ) if found_matching_media_type: image_type = None else: # If expected media is not specified as one of the acceptable media # types, check whether one of the acceptable media types is # suitable for lossless recompression of the frame. if ( 'image/jp2' in acceptable_media_type_lut and ( '1.2.840.10008.1.2.4.90' in acceptable_media_type_lut['image/jp2'] ) ): image_type = 'image/jp2' else: raise ValueError( 'Instance frames cannot be retrieved using any of the ' f'acceptable media types: {media_types}.' ) return image_type def iter_instance_frames( self, study_instance_uid: str, series_instance_uid: str, sop_instance_uid: str, frame_numbers: List[int], media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None ) -> Iterator[bytes]: """Iterate over frames of an image instance. Parameters ---------- study_instance_uid: str Study Instance UID series_instance_uid: str Series Instance UID sop_instance_uid: str SOP Instance UID frame_numbers: List[int] Frame numbers media_types: Union[Tuple[Union[str, Tuple[str, str]], ...], None], optional Acceptable media types and optionally the UIDs of the corresponding transfer syntaxes Returns ------- Iterator[bytes] Frames """ # noqa: E501 logger.info( f'iterate over frames of instance "{sop_instance_uid}" of ' f'series "{series_instance_uid}" of study "{study_instance_uid}"' ) file_path = self._get_instance_file_path( study_instance_uid, series_instance_uid, sop_instance_uid, ) if len(frame_numbers) == 0: raise ValueError('At least one frame number must be provided.') image_file_reader = self._get_image_file_reader(file_path) metadata = image_file_reader.metadata transfer_syntax_uid = image_file_reader.transfer_syntax_uid reencoding_media_type = self._check_media_types_for_instance_frames( transfer_syntax_uid, media_types ) for frame_number in frame_numbers: frame_index = frame_number - 1 frame = image_file_reader.read_frame(frame_index) if frame_number > int(getattr(metadata, 'NumberOfFrames', '1')): raise ValueError( f'Provided frame number {frame_number} exceeds number ' 'of available frames.' ) if not transfer_syntax_uid.is_encapsulated: pixels = frame else: if reencoding_media_type is None: pixels = frame elif reencoding_media_type == 'image/jp2': image_type = 'jpeg2000' image_kwargs = {'irreversible': False} array = image_file_reader.decode_frame(frame_index, frame) image = Image.fromarray(array) with io.BytesIO() as fp: image.save( fp, image_type, **image_kwargs # type: ignore ) pixels = fp.getvalue() else: raise ValueError( 'Cannot re-encode frames using media type ' f'"{reencoding_media_type}".' ) yield pixels def retrieve_instance_frames( self, study_instance_uid: str, series_instance_uid: str, sop_instance_uid: str, frame_numbers: List[int], media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None ) -> List[bytes]: """Retrieve one or more frames of an image instance. Parameters ---------- study_instance_uid: str Study Instance UID series_instance_uid: str Series Instance UID sop_instance_uid: str SOP Instance UID frame_numbers: List[int] Frame numbers media_types: Union[Tuple[Union[str, Tuple[str, str]], ...], None], optional Acceptable media types and optionally the UIDs of the corresponding transfer syntaxes Returns ------- List[bytes] Frames """ # noqa: E501 logger.info( f'retrieve frames of instance "{sop_instance_uid}" of ' f'series "{series_instance_uid}" of study "{study_instance_uid}"' ) file_path = self._get_instance_file_path( study_instance_uid, series_instance_uid, sop_instance_uid, ) if len(frame_numbers) == 0: raise ValueError('At least one frame number must be provided.') image_file_reader = self._get_image_file_reader(file_path) metadata = image_file_reader.metadata transfer_syntax_uid = image_file_reader.transfer_syntax_uid reencoding_media_type = self._check_media_types_for_instance_frames( transfer_syntax_uid, media_types ) frame_indices = [] for frame_number in frame_numbers: if frame_number > int(getattr(metadata, 'NumberOfFrames', '1')): raise ValueError( f'Provided frame number {frame_number} exceeds number ' 'of available frames.' ) frame_index = frame_number - 1 frame_indices.append(frame_index) reencoded_frames = [] for frame_index in frame_indices: frame = image_file_reader.read_frame(frame_index) if not transfer_syntax_uid.is_encapsulated: reencoded_frame = frame else: if reencoding_media_type is None: reencoded_frame = frame elif reencoding_media_type == 'image/jp2': image_type = 'jpeg2000' image_kwargs = {'irreversible': False} array = image_file_reader.decode_frame(frame_index, frame) image = Image.fromarray(array) with io.BytesIO() as fp: image.save( fp, image_type, **image_kwargs # type: ignore ) reencoded_frame = fp.getvalue() else: raise ValueError( 'Cannot re-encode frames using media type ' f'"{reencoding_media_type}".' ) reencoded_frames.append(reencoded_frame) return reencoded_frames def retrieve_instance_frames_rendered( self, study_instance_uid: str, series_instance_uid: str, sop_instance_uid: str, frame_numbers: List[int], media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None, params: Optional[Dict[str, str]] = None, ) -> bytes: """Retrieve server-side rendered frames of an image instance. Parameters ---------- study_instance_uid: str Study Instance UID series_instance_uid: str Series Instance UID sop_instance_uid: str SOP Instance UID frame_numbers: List[int] Frame numbers media_types: Union[Tuple[Union[str, Tuple[str, str]], ...], None], optional Acceptable media types and optionally the UIDs of the corresponding transfer syntaxes params: Union[Dict[str, str], None], optional Additional query parameters Returns ------- bytes Rendered representation of frames """ # noqa: E501 logger.info( f'retrieve rendered frames of instance "{sop_instance_uid}" of ' f'series "{series_instance_uid}" of study "{study_instance_uid}"' ) if len(frame_numbers) == 0: raise ValueError('A frame number must be provided.') elif len(frame_numbers) > 1: raise ValueError( 'Only rendering of a single frame is supported for now.' ) frame_number = frame_numbers[0] file_path = self._get_instance_file_path( study_instance_uid, series_instance_uid, sop_instance_uid, ) image_file_reader = self._get_image_file_reader(file_path) frame_index = frame_number - 1 frame = image_file_reader.read_frame(frame_index) metadata = image_file_reader.metadata transfer_syntax_uid = image_file_reader.transfer_syntax_uid if frame_number > int(getattr(metadata, 'NumberOfFrames', '1')): raise ValueError( 'Provided frame number exceeds number of frames.' ) codec_name, codec_kwargs = self._get_image_codec_parameters( metadata=metadata, transfer_syntax_uid=transfer_syntax_uid, media_types=media_types, params=params ) if codec_name is None: pixels = frame else: array = image_file_reader.decode_frame(frame_index, frame) image = Image.fromarray(array) with io.BytesIO() as fp: image.save(fp, codec_name, **codec_kwargs) fp.seek(0) pixels = fp.read() return pixels def _get_image_codec_parameters( self, metadata: Dataset, transfer_syntax_uid: str, media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None, params: Optional[Dict[str, str]] = None, ) -> Tuple[Optional[str], Dict[str, Any]]: if media_types is not None: acceptable_media_types = list(set([ m[0] if isinstance(m, tuple) else m for m in media_types ])) are_media_types_valid = all( m.startswith('image') for m in acceptable_media_types ) if not are_media_types_valid: raise ValueError( 'Compressed instance frames can only be retrieved in ' 'rendered format using media type "image".' ) if 'image/png' in acceptable_media_types: image_type = 'png' elif 'image/jp2' in acceptable_media_types: if transfer_syntax_uid == '1.2.840.10008.1.2.4.90': image_type = None else: # Lossless recompression image_type = 'jpeg2000' elif 'image/jpeg' in acceptable_media_types: if transfer_syntax_uid == '1.2.840.10008.1.2.4.50': # Avoid lossy recompression of lossy compressed frames. image_type = None else: # Allow lossy recompression in case of retrieve rendered. logger.warn( 'frames of instance "{sop_instance_uid}" are lossy ' 'recompressed upon retrieval' ) image_type = 'jpeg' else: raise ValueError( 'Cannot retrieve frames of instance in rendered ' 'format using any of the acceptable media types: ' '"{}".'.format('", "'.join(acceptable_media_types)) ) else: if transfer_syntax_uid == '1.2.840.10008.1.2.4.50': # Avoid lossy recompression of lossy compressed frames. image_type = None else: image_type = 'jpeg' image_kwargs: Dict[str, Any] = { # Avoid re-compression when encoding in PNG format 'png': {'compress_level': 0, 'optimize': False}, 'jpeg': {'quality': 100, 'optimize': False}, 'jpeg2000': {'irreversible': False}, } if params is not None and image_type is not None: include_icc_profile = params.get('icc_profile', 'no') if include_icc_profile == 'yes': icc_profile = metadata.OpticalPathSequence[0].ICCProfile image_kwargs[image_type]['icc_profile'] = ImageCmsProfile( icc_profile ) elif include_icc_profile == 'srgb': icc_profile = createProfile('sRGB') image_kwargs[image_type]['icc_profile'] = ImageCmsProfile( icc_profile ) elif include_icc_profile == 'no': pass else: raise ValueError( f'ICC Profile "{include_icc_profile}" is not supported.' ) if image_type is None: return (image_type, {}) return (image_type, image_kwargs[image_type]) @staticmethod def lookup_keyword( tag: Union[int, str, Tuple[int, int], BaseTag] ) -> str: """Look up the keyword of a DICOM attribute. Parameters ---------- tag: Union[str, int, Tuple[int, int], pydicom.tag.BaseTag] Attribute tag (e.g. ``"00080018"``) Returns ------- str Attribute keyword (e.g. ``"SOPInstanceUID"``) """ keyword = keyword_for_tag(tag) if keyword is None: raise KeyError(f'Could not find a keyword for tag {tag}.') return keyword @staticmethod def lookup_tag(keyword: str) -> str: """Look up the tag of a DICOM attribute. Parameters ---------- keyword: str Attribute keyword (e.g. ``"SOPInstanceUID"``) Returns ------- str Attribute tag as HEX string (e.g. ``"00080018"``) """ tag = tag_for_keyword(keyword) if tag is None: raise KeyError(f'Could not find a tag for "{keyword}".') tag = Tag(tag) return '{0:04x}{1:04x}'.format(tag.group, tag.element).upper() def store_instances( self, datasets: Sequence[Dataset], study_instance_uid: Optional[str] = None ) -> Dataset: """Store instances. Parameters ---------- datasets: Sequence[pydicom.dataset.Dataset] Instances that should be stored study_instance_uid: Union[str, None], optional Study Instance UID Returns ------- pydicom.dataset.Dataset Information about status of stored instances """ message = 'store instances' if study_instance_uid is not None: message += f' of study "{study_instance_uid}"' logger.info(message) # We first encode all data sets and temporarily store them in memory # before inserting the metadata into the database and writing the data # sets to files on disk. This will allow us to "roll back" in case of # an error. We may want to consider implementing this in a more # sophisticated way in case it becomes a performance bottleneck. studies: Dict[ str, Tuple[ str, Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], ] ] = {} series: Dict[ str, Tuple[ str, str, str, Optional[str], Optional[int], ] ] = {} instances: Dict[ str, Tuple[ str, str, str, str, Optional[int], Optional[int], Optional[int], Optional[int], Optional[int], str, str, ] ] = {} successes = [] failures = [] for ds in datasets: logger.info( f'store instance "{ds.SOPInstanceUID}" ' f'of series "{ds.SeriesInstanceUID}" ' f'of study "{ds.StudyInstanceUID}" ' ) try: if study_instance_uid is not None: if ds.StudyInstanceUID != study_instance_uid: continue else: study_instance_uid = ds.StudyInstanceUID study_metadata = self._extract_study_metadata(ds) studies[study_instance_uid] = study_metadata series_metadata = self._extract_series_metadata(ds) series_instance_uid = ds.SeriesInstanceUID series[series_instance_uid] = series_metadata sop_instance_uid = ds.SOPInstanceUID rel_file_path = '/'.join([ 'studies', study_instance_uid, 'series', series_instance_uid, 'instances', sop_instance_uid ]) instance_metadata = self._extract_instance_metadata( ds, rel_file_path ) instances[sop_instance_uid] = instance_metadata with io.BytesIO() as b: dcmwrite(b, ds, write_like_original=False) file_content = b.getvalue() file_path = self.base_dir.joinpath(rel_file_path) successes.append((ds, file_path, file_content)) except Exception as error: logger.error( f'failed to store instance "{ds.SOPInstanceUID}" ' f'of series "{ds.SeriesInstanceUID}" ' f'of study "{ds.StudyInstanceUID}": {error}' ) failures.append(ds) self._insert_into_db( studies.values(), series.values(), instances.values() ) response = Dataset() response.RetrieveURL = None if len(successes) > 0: response.ReferencedSOPSequence = [] for ds, file_path, file_content in successes: directory = file_path.parent directory.mkdir(exist_ok=True, parents=True) with open(file_path, 'wb') as fp: fp.write(file_content) success_item = Dataset() success_item.ReferencedSOPClassUID = ds.SOPClassUID success_item.ReferencedSOPInstanceUID = ds.SOPInstanceUID success_item.RetrieveURL = None if len(failures) > 0: response.FailedSOPSequence = [] for ds in failures: failure_item = Dataset() failure_item.FailureReason = 272 failure_item.ReferencedSOPClassUID = ds.SOPClassUID failure_item.ReferencedSOPInstanceUID = ds.SOPInstanceUID response.FailedSOPSequence.append(failure_item) return response def delete_study(self, study_instance_uid: str) -> None: """Delete all instances of a study. Parameters ---------- study_instance_uid: str Study Instance UID """ if study_instance_uid is None: raise ValueError( 'Study Instance UID is required for deletion of a study.' ) uids = self._get_instances(study_instance_uid) for study_instance_uid, series_instance_uid, sop_instance_uid in uids: self.delete_instance( study_instance_uid=study_instance_uid, series_instance_uid=series_instance_uid, sop_instance_uid=sop_instance_uid, ) def delete_series( self, study_instance_uid: str, series_instance_uid: str ) -> None: """Delete all instances of a series. Parameters ---------- study_instance_uid: str Study Instance UID series_instance_uid: str Series Instance UID """ if study_instance_uid is None: raise ValueError( 'Study Instance UID is required for deletion of a series.' ) if series_instance_uid is None: raise ValueError( 'Series Instance UID is required for deletion of a series.' ) uids = self._get_instances( study_instance_uid=study_instance_uid, series_instance_uid=series_instance_uid, ) for study_instance_uid, series_instance_uid, sop_instance_uid in uids: self.delete_instance( study_instance_uid=study_instance_uid, series_instance_uid=series_instance_uid, sop_instance_uid=sop_instance_uid, ) def delete_instance( self, study_instance_uid: str, series_instance_uid: str, sop_instance_uid: str ) -> None: """Delete specified instance. Parameters ---------- study_instance_uid: str Study Instance UID series_instance_uid: str Series Instance UID sop_instance_uid: str SOP Instance UID """ if study_instance_uid is None: raise ValueError( 'Study Instance UID is required for deletion of an instance.' ) if series_instance_uid is None: raise ValueError( 'Series Instance UID is required for deletion of an instance.' ) if sop_instance_uid is None: raise ValueError( 'SOP Instance UID is required for deletion of an instance.' ) file_path = self._get_instance_file_path( study_instance_uid=study_instance_uid, series_instance_uid=series_instance_uid, sop_instance_uid=sop_instance_uid, ) self._delete_instances_from_db( uids=[ (study_instance_uid, series_instance_uid, sop_instance_uid) ] ) os.remove(file_path)
[ "logging.getLogger", "numpy.product", "pydicom.filereader.data_element_offset_to_value", "traceback.format_tb", "math.floor", "io.BytesIO", "pydicom.filewriter.dcmwrite", "pydicom.valuerep.DA", "pydicom.dataset.Dataset", "os.remove", "pydicom.filereader.dcmread", "pydicom.dataset.FileMetaDatas...
[((1217, 1244), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1234, 1244), False, 'import logging\n'), ((4090, 4143), 'pydicom.filereader.data_element_offset_to_value', 'data_element_offset_to_value', (['fp.is_implicit_VR', '"""OB"""'], {}), "(fp.is_implicit_VR, 'OB')\n", (4118, 4143), False, 'from pydicom.filereader import data_element_offset_to_value, dcmread, read_file_meta_info, read_partial\n'), ((4234, 4255), 'pydicom.encaps.get_frame_offsets', 'get_frame_offsets', (['fp'], {}), '(fp)\n', (4251, 4255), False, 'from pydicom.encaps import encapsulate, get_frame_offsets\n'), ((23856, 23884), 'collections.defaultdict', 'collections.defaultdict', (['set'], {}), '(set)\n', (23879, 23884), False, 'import collections\n'), ((9503, 9512), 'pydicom.dataset.Dataset', 'Dataset', ([], {}), '()\n', (9510, 9512), False, 'from pydicom.dataset import Dataset, FileMetaDataset\n'), ((10624, 10663), 'pydicom.filereader.read_partial', 'read_partial', (['fp'], {'stop_when': 'is_main_tag'}), '(fp, stop_when=is_main_tag)\n', (10636, 10663), False, 'from pydicom.filereader import data_element_offset_to_value, dcmread, read_file_meta_info, read_partial\n'), ((10731, 10766), 'pydicom.uid.UID', 'UID', (['ds.file_meta.TransferSyntaxUID'], {}), '(ds.file_meta.TransferSyntaxUID)\n', (10734, 10766), False, 'from pydicom.uid import UID\n'), ((14335, 14347), 'pydicom.dataset.Dataset', 'Dataset', (['tmp'], {}), '(tmp)\n', (14342, 14347), False, 'from pydicom.dataset import Dataset, FileMetaDataset\n'), ((28745, 28758), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (28756, 28758), False, 'from collections import OrderedDict\n'), ((61446, 61524), 're.sub', 're.sub', (['"""StudyInstanceUID ="""', '"""series.StudyInstanceUID ="""', 'query_filter_string'], {}), "('StudyInstanceUID =', 'series.StudyInstanceUID =', query_filter_string)\n", (61452, 61524), False, 'import re\n'), ((67930, 68015), 're.sub', 're.sub', (['"""StudyInstanceUID ="""', '"""instances.StudyInstanceUID ="""', 'query_filter_string'], {}), "('StudyInstanceUID =', 'instances.StudyInstanceUID =',\n query_filter_string)\n", (67936, 68015), False, 'import re\n'), ((68089, 68176), 're.sub', 're.sub', (['"""SeriesInstanceUID ="""', '"""instances.SeriesInstanceUID ="""', 'query_filter_string'], {}), "('SeriesInstanceUID =', 'instances.SeriesInstanceUID =',\n query_filter_string)\n", (68095, 68176), False, 'import re\n'), ((84219, 84262), 'pydicom.filereader.dcmread', 'dcmread', (['file_path'], {'stop_before_pixels': '(True)'}), '(file_path, stop_before_pixels=True)\n', (84226, 84262), False, 'from pydicom.filereader import data_element_offset_to_value, dcmread, read_file_meta_info, read_partial\n'), ((87469, 87487), 'pydicom.filereader.dcmread', 'dcmread', (['file_path'], {}), '(file_path)\n', (87476, 87487), False, 'from pydicom.filereader import data_element_offset_to_value, dcmread, read_file_meta_info, read_partial\n'), ((110347, 110367), 'pydicom.datadict.keyword_for_tag', 'keyword_for_tag', (['tag'], {}), '(tag)\n', (110362, 110367), False, 'from pydicom.datadict import dictionary_VR, keyword_for_tag, tag_for_keyword\n'), ((110851, 110875), 'pydicom.datadict.tag_for_keyword', 'tag_for_keyword', (['keyword'], {}), '(keyword)\n', (110866, 110875), False, 'from pydicom.datadict import dictionary_VR, keyword_for_tag, tag_for_keyword\n'), ((110983, 110991), 'pydicom.tag.Tag', 'Tag', (['tag'], {}), '(tag)\n', (110986, 110991), False, 'from pydicom.tag import BaseTag, ItemTag, SequenceDelimiterTag, Tag, TupleTag\n'), ((115212, 115221), 'pydicom.dataset.Dataset', 'Dataset', ([], {}), '()\n', (115219, 115221), False, 'from pydicom.dataset import Dataset, FileMetaDataset\n'), ((119362, 119382), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (119371, 119382), False, 'import os\n'), ((11305, 11338), 'traceback.format_tb', 'traceback.format_tb', (['except_trace'], {}), '(except_trace)\n', (11324, 11338), False, 'import traceback\n'), ((13225, 13257), 'pydicom.uid.UID', 'UID', (['file_meta.TransferSyntaxUID'], {}), '(file_meta.TransferSyntaxUID)\n', (13228, 13257), False, 'from pydicom.uid import UID\n'), ((13957, 13999), 'pydicom.filereader.dcmread', 'dcmread', (['self._fp'], {'stop_before_pixels': '(True)'}), '(self._fp, stop_before_pixels=True)\n', (13964, 13999), False, 'from pydicom.filereader import data_element_offset_to_value, dcmread, read_file_meta_info, read_partial\n'), ((14386, 14480), 'numpy.product', 'np.product', (['[self._metadata.Rows, self._metadata.Columns, self._metadata.SamplesPerPixel]'], {}), '([self._metadata.Rows, self._metadata.Columns, self._metadata.\n SamplesPerPixel])\n', (14396, 14480), True, 'import numpy as np\n'), ((20607, 20625), 'pydicom.pixel_data_handlers.numpy_handler.unpack_bits', 'unpack_bits', (['value'], {}), '(value)\n', (20618, 20625), False, 'from pydicom.pixel_data_handlers.numpy_handler import unpack_bits\n'), ((21163, 21172), 'pydicom.dataset.Dataset', 'Dataset', ([], {}), '()\n', (21170, 21172), False, 'from pydicom.dataset import Dataset, FileMetaDataset\n'), ((21200, 21217), 'pydicom.dataset.FileMetaDataset', 'FileMetaDataset', ([], {}), '()\n', (21215, 21217), False, 'from pydicom.dataset import Dataset, FileMetaDataset\n'), ((28499, 28510), 'time.time', 'time.time', ([], {}), '()\n', (28508, 28510), False, 'import time\n'), ((28559, 28570), 'time.time', 'time.time', ([], {}), '()\n', (28568, 28570), False, 'import time\n'), ((29970, 29983), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (29981, 29983), False, 'from collections import OrderedDict\n'), ((34371, 34392), 'pydicom.datadict.tag_for_keyword', 'tag_for_keyword', (['attr'], {}), '(attr)\n', (34386, 34392), False, 'from pydicom.datadict import dictionary_VR, keyword_for_tag, tag_for_keyword\n'), ((58065, 58074), 'pydicom.dataset.Dataset', 'Dataset', ([], {}), '()\n', (58072, 58074), False, 'from pydicom.dataset import Dataset, FileMetaDataset\n'), ((63155, 63164), 'pydicom.dataset.Dataset', 'Dataset', ([], {}), '()\n', (63162, 63164), False, 'from pydicom.dataset import Dataset, FileMetaDataset\n'), ((71336, 71345), 'pydicom.dataset.Dataset', 'Dataset', ([], {}), '()\n', (71343, 71345), False, 'from pydicom.dataset import Dataset, FileMetaDataset\n'), ((91454, 91476), 'PIL.Image.fromarray', 'Image.fromarray', (['array'], {}), '(array)\n', (91469, 91476), False, 'from PIL import Image\n'), ((106148, 106170), 'PIL.Image.fromarray', 'Image.fromarray', (['array'], {}), '(array)\n', (106163, 106170), False, 'from PIL import Image\n'), ((9171, 9179), 'pathlib.Path', 'Path', (['fp'], {}), '(fp)\n', (9175, 9179), False, 'from pathlib import Path\n'), ((11356, 11376), 'sys.stdout.write', 'sys.stdout.write', (['tb'], {}), '(tb)\n', (11372, 11376), False, 'import sys\n'), ((12781, 12816), 'pydicom.filereader.read_file_meta_info', 'read_file_meta_info', (['self._filepath'], {}), '(self._filepath)\n', (12800, 12816), False, 'from pydicom.filereader import data_element_offset_to_value, dcmread, read_file_meta_info, read_partial\n'), ((21881, 21908), 'pydicom.encaps.encapsulate', 'encapsulate', ([], {'frames': '[value]'}), '(frames=[value])\n', (21892, 21908), False, 'from pydicom.encaps import encapsulate, get_frame_offsets\n'), ((27480, 27494), 'pathlib.Path', 'Path', (['base_dir'], {}), '(base_dir)\n', (27484, 27494), False, 'from pathlib import Path\n'), ((91494, 91506), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (91504, 91506), False, 'import io\n'), ((106188, 106200), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (106198, 106200), False, 'import io\n'), ((109265, 109293), 'PIL.ImageCms.ImageCmsProfile', 'ImageCmsProfile', (['icc_profile'], {}), '(icc_profile)\n', (109280, 109293), False, 'from PIL.ImageCms import ImageCmsProfile, createProfile\n'), ((115627, 115636), 'pydicom.dataset.Dataset', 'Dataset', ([], {}), '()\n', (115634, 115636), False, 'from pydicom.dataset import Dataset, FileMetaDataset\n'), ((115965, 115974), 'pydicom.dataset.Dataset', 'Dataset', ([], {}), '()\n', (115972, 115974), False, 'from pydicom.dataset import Dataset, FileMetaDataset\n'), ((12973, 13046), 'pydicom.errors.InvalidDicomError', 'InvalidDicomError', (['f"""File is not a valid DICOM file: "{self._filepath}"."""'], {}), '(f\'File is not a valid DICOM file: "{self._filepath}".\')\n', (12990, 13046), False, 'from pydicom.errors import InvalidDicomError\n'), ((35296, 35355), 'pydicom.filereader.read_partial', 'read_partial', (['fp'], {'stop_when': 'is_stop_tag', 'specific_tags': 'tags'}), '(fp, stop_when=is_stop_tag, specific_tags=tags)\n', (35308, 35355), False, 'from pydicom.filereader import data_element_offset_to_value, dcmread, read_file_meta_info, read_partial\n'), ((45372, 45390), 'pydicom.datadict.dictionary_VR', 'dictionary_VR', (['key'], {}), '(key)\n', (45385, 45390), False, 'from pydicom.datadict import dictionary_VR, keyword_for_tag, tag_for_keyword\n'), ((109410, 109431), 'PIL.ImageCms.createProfile', 'createProfile', (['"""sRGB"""'], {}), "('sRGB')\n", (109423, 109431), False, 'from PIL.ImageCms import ImageCmsProfile, createProfile\n'), ((109490, 109518), 'PIL.ImageCms.ImageCmsProfile', 'ImageCmsProfile', (['icc_profile'], {}), '(icc_profile)\n', (109505, 109518), False, 'from PIL.ImageCms import ImageCmsProfile, createProfile\n'), ((114482, 114494), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (114492, 114494), False, 'import io\n'), ((114521, 114563), 'pydicom.filewriter.dcmwrite', 'dcmwrite', (['b', 'ds'], {'write_like_original': '(False)'}), '(b, ds, write_like_original=False)\n', (114529, 114563), False, 'from pydicom.filewriter import dcmwrite\n'), ((16712, 16740), 'math.floor', 'math.floor', (['(i * n_pixels / 8)'], {}), '(i * n_pixels / 8)\n', (16722, 16740), False, 'import math\n'), ((99701, 99723), 'PIL.Image.fromarray', 'Image.fromarray', (['array'], {}), '(array)\n', (99716, 99723), False, 'from PIL import Image\n'), ((102952, 102974), 'PIL.Image.fromarray', 'Image.fromarray', (['array'], {}), '(array)\n', (102967, 102974), False, 'from PIL import Image\n'), ((45514, 45538), 'pydicom.datadict.tag_for_keyword', 'tag_for_keyword', (['keyword'], {}), '(keyword)\n', (45529, 45538), False, 'from pydicom.datadict import dictionary_VR, keyword_for_tag, tag_for_keyword\n'), ((45642, 45660), 'pydicom.datadict.dictionary_VR', 'dictionary_VR', (['tag'], {}), '(tag)\n', (45655, 45660), False, 'from pydicom.datadict import dictionary_VR, keyword_for_tag, tag_for_keyword\n'), ((47090, 47099), 'pydicom.valuerep.DA', 'DA', (['value'], {}), '(value)\n', (47092, 47099), False, 'from pydicom.valuerep import DA, DT, TM\n'), ((99749, 99761), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (99759, 99761), False, 'import io\n'), ((103000, 103012), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (103010, 103012), False, 'import io\n'), ((47644, 47653), 'pydicom.valuerep.DT', 'DT', (['value'], {}), '(value)\n', (47646, 47653), False, 'from pydicom.valuerep import DA, DT, TM\n'), ((48198, 48207), 'pydicom.valuerep.TM', 'TM', (['value'], {}), '(value)\n', (48200, 48207), False, 'from pydicom.valuerep import DA, DT, TM\n')]
import math from skimage import io from skimage.feature import blob_log from skimage import exposure from skimage.morphology import extrema import cv2 import os import sys import numpy as np # Beware, not giving min/max sigma's prompts skimage to calculate it, which takes at least twice as long, so for largescale use this is not a good idea def laplacianOfGaussianBlobDetector(image, min_sigma=None, max_sigma=None): """Wrapper for the skimage.feature.blob_log function. Warning: Image gets temporarily converted into 8-bit for improved spot detection Parameters ---------- image : nd.array Image on which spot detection is to be performed. min_sigma : int, optional [description], by default None max_sigma : int, optional [description], by default None num_sigma : int, optional [description], by default None threshold : int, optional [description], by default None Returns ------- [type] [description] """ image = image.astype('uint8') if min_sigma is None or max_sigma is None: blobs=blob_log(image) print("No sigma's received as input for the spot detection. This will increase computation time.") else: blobs = blob_log(image, min_sigma=int(min_sigma), max_sigma=int(max_sigma)) # QC based on sigma values try: average_sigma = np.mean(blobs[:,2]) stdev_sigma = np.std(blobs[:,2]) upper_bound =math.ceil(average_sigma +(2*stdev_sigma)) lower_bound =math.floor(average_sigma -(2*stdev_sigma)) mask = np.where(np.logical_or(blobs[:,2] > upper_bound, blobs[:,2] < lower_bound), False, True) blobs = blobs[mask] # put this in a try except block, since it might be the case that no blobs are found, and in that case this throws an error except ValueError: pass return blobs def localMaximaBlobDetection(image_path: str): image = io.imread(image_path) local_maxima = extrema.local_maxima(image)
[ "skimage.morphology.extrema.local_maxima", "numpy.mean", "math.ceil", "math.floor", "numpy.logical_or", "skimage.io.imread", "numpy.std", "skimage.feature.blob_log" ]
[((1952, 1973), 'skimage.io.imread', 'io.imread', (['image_path'], {}), '(image_path)\n', (1961, 1973), False, 'from skimage import io\n'), ((1993, 2020), 'skimage.morphology.extrema.local_maxima', 'extrema.local_maxima', (['image'], {}), '(image)\n', (2013, 2020), False, 'from skimage.morphology import extrema\n'), ((1107, 1122), 'skimage.feature.blob_log', 'blob_log', (['image'], {}), '(image)\n', (1115, 1122), False, 'from skimage.feature import blob_log\n'), ((1389, 1409), 'numpy.mean', 'np.mean', (['blobs[:, 2]'], {}), '(blobs[:, 2])\n', (1396, 1409), True, 'import numpy as np\n'), ((1431, 1450), 'numpy.std', 'np.std', (['blobs[:, 2]'], {}), '(blobs[:, 2])\n', (1437, 1450), True, 'import numpy as np\n'), ((1471, 1513), 'math.ceil', 'math.ceil', (['(average_sigma + 2 * stdev_sigma)'], {}), '(average_sigma + 2 * stdev_sigma)\n', (1480, 1513), False, 'import math\n'), ((1534, 1577), 'math.floor', 'math.floor', (['(average_sigma - 2 * stdev_sigma)'], {}), '(average_sigma - 2 * stdev_sigma)\n', (1544, 1577), False, 'import math\n'), ((1601, 1668), 'numpy.logical_or', 'np.logical_or', (['(blobs[:, 2] > upper_bound)', '(blobs[:, 2] < lower_bound)'], {}), '(blobs[:, 2] > upper_bound, blobs[:, 2] < lower_bound)\n', (1614, 1668), True, 'import numpy as np\n')]
import numpy as np import csv class Rullo: def __init__(self, content, row_constraints, column_constraints,): """Creates a rullo board Attributes ---------- content: 2-dim array Values on the board row_constraints: 1-dim array Array of constraints on rows row_constraints.shape[0] == content.shape[0] column_constraints: 1-dim array Array of constraints on columns column_constraints.shape[0] == content.shape[1] """ self.content = np.asarray(content, dtype=np.int) self.row_constraints = np.asarray(row_constraints, dtype=np.int) self.column_constraints = np.asarray(column_constraints, dtype=np.int) @classmethod def from_csv(cls, f): reader = csv.reader(f) column_constraints = np.array(next(reader), dtype=np.int) row_constraints = np.array(next(reader), dtype=np.int) content_shape = row_constraints.shape + column_constraints.shape content = np.zeros(content_shape, dtype=np.int) for i, line in enumerate(reader): row = np.array(line, dtype=np.int) content[i, :] = row return cls(content, row_constraints, column_constraints)
[ "numpy.array", "numpy.zeros", "numpy.asarray", "csv.reader" ]
[((616, 649), 'numpy.asarray', 'np.asarray', (['content'], {'dtype': 'np.int'}), '(content, dtype=np.int)\n', (626, 649), True, 'import numpy as np\n'), ((681, 722), 'numpy.asarray', 'np.asarray', (['row_constraints'], {'dtype': 'np.int'}), '(row_constraints, dtype=np.int)\n', (691, 722), True, 'import numpy as np\n'), ((757, 801), 'numpy.asarray', 'np.asarray', (['column_constraints'], {'dtype': 'np.int'}), '(column_constraints, dtype=np.int)\n', (767, 801), True, 'import numpy as np\n'), ((872, 885), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (882, 885), False, 'import csv\n'), ((1107, 1144), 'numpy.zeros', 'np.zeros', (['content_shape'], {'dtype': 'np.int'}), '(content_shape, dtype=np.int)\n', (1115, 1144), True, 'import numpy as np\n'), ((1205, 1233), 'numpy.array', 'np.array', (['line'], {'dtype': 'np.int'}), '(line, dtype=np.int)\n', (1213, 1233), True, 'import numpy as np\n')]
#!/usr/bin/env python3 #################################################################################################### # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See LICENSE in the project root for license information. #################################################################################################### # Tip: to run a particular test / set of tests: # python -m unittest discover -k "test_input_array" path_to_accera/test dsl_tests.py # python -m unittest discover -k "DSLTest_01" path_to_accera/test dsl_tests.py import logging import sys import unittest import os import pathlib import numpy as np from enum import Enum from typing import Callable, Tuple DEV_MODE = False if "@CMAKE_INSTALL_PREFIX@"[1:-1] != "CMAKE_INSTALL_PREFIX": sys.path.insert(1, "@CMAKE_INSTALL_PREFIX@") else: DEV_MODE = True sys.path.insert(1, os.getcwd()) from accera import ScalarType, Array, Function, Nest, Target, Package from accera.test import verifiers TEST_MODE = Package.Mode.DEBUG if DEV_MODE else Package.Mode.RELEASE TEST_FORMAT = Package.Format.MLIR_DYNAMIC if DEV_MODE else Package.Format.HAT_DYNAMIC TEST_PACKAGE_DIR = "test_acccgen" # Groups of types commonly used for tests INT_TYPES = [ ScalarType.int8, ScalarType.int16, ScalarType.int32, ScalarType.int64, ScalarType.uint8, ScalarType.uint16, ScalarType.uint32, ScalarType.uint64 ] FLOAT_TYPES = [ScalarType.float16, ScalarType.float32, ScalarType.float64] logger = logging.getLogger() logger.setLevel(logging.DEBUG) os.environ["OMP_DISPLAY_AFFINITY"] = "TRUE" # TODO: Remove all @expectedFailure decorators as implementation converges with spec class FailedReason(Enum): NOT_IN_CORE = "Not yet implemented (core)" NOT_IN_PY = "Not yet implemented (python)" UNKNOWN = "Unknown failure" BUG = "Bug" def expectedFailure(reason: FailedReason, msg: str, condition: bool = True) -> Callable: "Extends the unittest.expectedFailure decorator to print failure details and takes an optional condition" def _decorator(func): @unittest.expectedFailure def _wrapper(x): print(f"\n{reason.value}: {msg}") try: return func(x) except Exception as e: print(f"\t{e}\n") raise (e) return _wrapper if condition else func return _decorator class DSLTest_01Arrays(unittest.TestCase): def _verify_nest(self, nest, args: Tuple[Array], package_name, correctness_check_values=None) -> None: # create a HAT package and add the function to it package = Package() function = package.add(nest, args, base_name=package_name) output_dir = pathlib.Path(TEST_PACKAGE_DIR) / package_name # build the HAT package with verifiers.VerifyPackage(self, package_name, output_dir) as v: package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=output_dir) if correctness_check_values: v.check_correctness( function.name, before=correctness_check_values["pre"], after=correctness_check_values["post"] ) def test_input_array(self) -> None: A = Array(shape=(10, 20), role=Array.Role.INPUT, element_type=ScalarType.float32) self.assertIsNotNone(A) def test_input_array_standard_layout(self) -> None: A = Array(shape=(10, 20), role=Array.Role.INPUT, layout=Array.Layout.LAST_MAJOR) # A = Array(shape=(10, 20), layout=Array.Layout.LAST_MAJOR, role=Array.Role.INPUT, element_type=ScalarType.float32) self.assertIsNotNone(A) def test_input_array_dimension_layout(self) -> None: A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(10, 20), layout=(1, 10)) self.assertIsNotNone(A) A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(10, 20), layout=(10, 1)) self.assertIsNotNone(A) A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(10, ), layout=(1, )) self.assertIsNotNone(A) A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(10, 20, 50), layout=(1, 10, 200)) self.assertIsNotNone(A) A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(10, 20, 50), layout=(200, 10, 1)) self.assertIsNotNone(A) A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(10, 20, 50), layout=(1, 200, 10)) self.assertIsNotNone(A) A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(10, 20, 50), layout=(10, 200, 1)) self.assertIsNotNone(A) def test_input_array_infinite_major_dimension(self) -> None: from accera import inf with self.assertRaises(ValueError): Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(inf, inf)) A = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(10, inf)) self.assertIsNotNone(A) self.assertEqual(A.shape[1], inf) nest = Nest(shape=(10, 16)) i, j = nest.get_indices() @nest.iteration_logic def _(): A[i, j] += A[i, j] package = Package() package.add(nest, (A, ), base_name="inf_test") self.assertEqual(A.shape[1], 16) package_name = "input_array_inf_test" with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) def test_input_output_array(self) -> None: A = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(10, 20)) self.assertIsNotNone(A) def test_const_array(self) -> None: for dt in [ bool, # np.bool is deprecated in favor of bool np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64, np.float16, np.float32, np.float64 ]: D = np.ones((128, 256), dtype=dt) A = Array(role=Array.Role.CONST, data=D) self.assertIsNotNone(A) def test_const_array_type_layout(self) -> None: D = np.ones((128, 256), dtype=np.float64) for t in [ScalarType.bool] + INT_TYPES + FLOAT_TYPES: A = Array(role=Array.Role.CONST, element_type=t, layout=Array.Layout.LAST_MAJOR, data=D) self.assertIsNotNone(A) def test_temp_array(self) -> None: A = Array(role=Array.Role.TEMP, element_type=ScalarType.float32, layout=Array.Layout.LAST_MAJOR, shape=(10, 20)) self.assertIsNotNone(A) B = Array( role=Array.Role.TEMP, element_type=ScalarType.float32, layout=Array.Layout.FIRST_MAJOR, shape=(10, 20) ) self.assertIsNotNone(B) def test_temp_array_materialization_1(self) -> None: # Materializes (allocates) a TEMP array externally to an added function def make_test_fn(package, A, B, C): T = Array(role=Array.Role.TEMP, element_type=A.element_type, shape=A.shape) nest = Nest(A.shape) i, j = nest.get_indices() @nest.iteration_logic def _(): T[i, j] = A[i, j] + B[i, j] C[i, j] += T[i, j]**2. return package.add(nest, args=(A, B, C)) A = Array(shape=(256, 32), role=Array.Role.INPUT) B = Array(shape=(256, 32), role=Array.Role.INPUT) C = Array(shape=(256, 32), role=Array.Role.INPUT_OUTPUT) package = Package() make_test_fn(package, A, B, C) package_name = "test_temp_array_materialization_1" with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) def test_temp_array_materialization_2(self) -> None: # Materializes (allocates) a TEMP array within an added function package = Package() A = Array(shape=(256, 32), role=Array.Role.INPUT) B = Array(shape=(256, 32), role=Array.Role.INPUT_OUTPUT) def make_init_function(package, A): nest = Nest(A.shape) i, j = nest.get_indices() @nest.iteration_logic def _(): A[i, j] = 3.14 return package.add(nest, args=(A, )) init_fn = make_init_function(package, B) def make_helper_function2(package, A, B): nest = Nest(A.shape) i, j = nest.get_indices() @nest.iteration_logic def _(): B[i, j] += A[i, j] * 2. return package.add(nest, args=(A, B)) helper_fn2 = make_helper_function2(package, A, B) def test_fn(A, B): T = Array(role=Array.Role.TEMP, element_type=A.element_type, shape=A.shape) init_fn(T) helper_fn2(T, B) helper_fn2(A, B) package.add(test_fn, args=(A, B)) package_name = "test_temp_array_materialization_2" with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) def test_fn_wrong_role(A, B): T = Array(role=Array.Role.INPUT_OUTPUT, element_type=A.element_type, shape=A.shape) init_fn(T) helper_fn2(T, B) helper_fn2(A, B) package.add(test_fn_wrong_role, args=(A, B)) package_name = "test_temp_array_materialization_2_wrong_role" with self.assertRaises(ValueError): package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) def test_temp_array_materialization_3(self) -> None: # Materializes (allocates) a TEMP array within some nest iteration logic # *without* passing the array as a function argument package = Package() A = Array(shape=(256, 32), role=Array.Role.INPUT_OUTPUT) B = Array(shape=(256, 32), role=Array.Role.INPUT_OUTPUT) nest = Nest(A.shape) i, j = nest.get_indices() @nest.iteration_logic def _(): T = Array(role=Array.Role.TEMP, element_type=A.element_type, shape=(1, )) # TODO: inject via introspection if we need to support this scenario T._allocate() T = T._get_native_array() T[0] = B[i, j] B[i, j] += A[i, j] * 2. A[i, j] = T[0] package.add(nest, args=(A, B)) package_name = "test_temp_array_materialization_3" with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) def test_first_major_array_access(self) -> None: A = Array(shape=(256, 32), role=Array.Role.INPUT, layout=Array.Layout.FIRST_MAJOR) nest = Nest(shape=(256, 32)) i, j = nest.get_indices() @nest.iteration_logic def _(): A[i, j] = 5.0 A_test = np.random.random((256, 32)).astype(np.float32) A_expected = np.ndarray((256, 32)).astype(np.float32) A_expected.fill(5.0) correctness_check_values = { "pre": (A_test, ), "post": (A_expected, ) } self._verify_nest( nest, (A, ), "test_first_major_array_access", correctness_check_values=correctness_check_values ) def test_last_major_array_access(self) -> None: A = Array(shape=(256, 32), role=Array.Role.INPUT, layout=Array.Layout.LAST_MAJOR) nest = Nest(shape=(256, 32)) i, j = nest.get_indices() @nest.iteration_logic def _(): A[i, j] = 5.0 A_test = np.random.random((256, 32)).astype(np.float32, order="F") A_expected = np.ndarray((256, 32)).astype(np.float32, order="F") A_expected.fill(5.0) correctness_check_values = { "pre": (A_test, ), "post": (A_expected, ) } self._verify_nest( nest, (A, ), "test_last_major_array_access", correctness_check_values=correctness_check_values ) def test_array_value_type_cast(self) -> None: A = Array(shape=(256, 32), role=Array.Role.INPUT, layout=Array.Layout.FIRST_MAJOR) B = Array( shape=(256, 32), role=Array.Role.INPUT, layout=Array.Layout.FIRST_MAJOR, element_type=ScalarType.int32 ) nest = Nest(shape=(256, 32)) i, j = nest.get_indices() @nest.iteration_logic def _(): A[i, j] = 5 # implicit cast from int8 to float B[i, j] = 10 # implicit cast from int8 to int32 A_test = np.random.random((256, 32)).astype(np.float32) A_expected = np.ndarray((256, 32)).astype(np.float32) A_expected.fill(5.0) B_test = np.random.random((256, 32)).astype(np.int32) B_expected = np.ndarray((256, 32)).astype(np.int32) B_expected.fill(10) correctness_check_values = { "pre": (A_test, B_test), "post": (A_expected, B_expected) } self._verify_nest(nest, (A, B), "test_array_value_type_cast", correctness_check_values=correctness_check_values) def test_subarray(self) -> None: package = Package() arr = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(256, 256)) arr0 = arr.sub_array(offsets=(0, 0), shape=(128, 128)) self.assertEqual(arr0.shape, [128, 128]) self.assertEqual(arr0.element_type, arr.element_type) print(arr0.layout) # add a function that utilizes a subarray layout def make_subarray_fn(arr0): nest = Nest(shape=arr0.shape) i, j = nest.get_indices() @nest.iteration_logic def _(): arr0[i, j] += 1. return package.add(nest, args=(arr0, )) subarray_fn = make_subarray_fn(arr0) # add a function that instantiates a subarray of the input array and calls the function above def main(arr): arr1 = arr.sub_array(offsets=(0, 0), shape=(128, 128)) print(arr1.layout) self.assertEqual(arr0.layout, arr1.layout) subarray_fn(arr1) package.add(main, args=(arr, )) package_name = "test_subarray" with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) def test_subarray_l2(self) -> None: package = Package() arr = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(256, 256)) arr0 = arr.sub_array(offsets=(0, 0), shape=(128, 128)) self.assertEqual(arr0.shape, [128, 128]) self.assertEqual(arr0.element_type, arr.element_type) arr00 = arr0.sub_array(offsets=(64, 64), shape=(64, 64)) self.assertEqual(arr00.shape, [64, 64]) self.assertEqual(arr00.element_type, arr0.element_type) # add a function that utilizes a subarray layout def make_fn(A): nest = Nest(shape=A.shape) i, j = nest.get_indices() @nest.iteration_logic def _(): A[i, j] += 1. return package.add(nest, args=(A, )) subarray_fn = make_fn(arr0) subarray_fn1 = make_fn(arr00) # add a function that instantiates a subarray of the input array and calls the function above def main(arr): arr1 = arr.sub_array(offsets=(0, 0), shape=(128, 128)) arr11 = arr1.sub_array(offsets=(64, 64), shape=(64, 64)) print(f"{arr1.layout}\n{arr11.layout}") self.assertEqual(arr0.layout, arr1.layout) self.assertEqual(arr00.layout, arr11.layout) subarray_fn(arr1) subarray_fn1(arr11) package.add(main, args=(arr, )) package_name = "test_subarray_l2" with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) class DSLTest_02SimpleAffineLoopNests(unittest.TestCase): def _create_nest(self, shape: Tuple[int], type=ScalarType.float32) -> Tuple: # helper function to create a nest so that we can focus on the logic function M, N, S = shape A = Array(role=Array.Role.INPUT, element_type=type, shape=(M, S)) B = Array(role=Array.Role.INPUT, element_type=type, shape=(S, N)) C = Array(role=Array.Role.INPUT_OUTPUT, element_type=type, shape=(M, N)) return Nest(shape=(M, N, S)), A, B, C def _build_nest(self, nest, args: Tuple[Array], package_name, correctness_check_values=None) -> None: # helper function to build a nest so that we can focus on the logic function # create a HAT package and add the nest to it package = Package() function = package.add(nest, args, base_name=package_name) # build the HAT package with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR) as v: package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) if correctness_check_values: v.check_correctness( function.name, before=correctness_check_values["pre"], after=correctness_check_values["post"] ) def test_signed_types(self) -> None: for t in [ScalarType.int16, ScalarType.int32, ScalarType.int64] + FLOAT_TYPES: A = Array(role=Array.Role.INPUT, element_type=t, shape=(16, 16)) B = Array(role=Array.Role.INPUT, element_type=t, shape=(16, 16)) C = Array(role=Array.Role.INPUT_OUTPUT, element_type=t, shape=(16, 16)) nest = Nest(shape=(16, 16)) i, j = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, j] + B[i, j] C[i, j] += A[i, j] - B[i, j] C[i, j] += A[i, j] * B[i, j] C[i, j] += A[i, j] / B[i, j] dtype = np.dtype(t.name) A_test = np.random.random(A.shape).astype(dtype) B_test = np.ones((C.shape)).astype(dtype) # avoid divide by zero C_test = np.random.random(C.shape).astype(dtype) C_ref = C_test + A_test + B_test C_ref = C_ref + A_test - B_test C_ref = C_ref + A_test * B_test C_ref = C_ref + A_test / B_test if t == ScalarType.float16: # TODO: verification issue with correctness check? correctness_check_values = None else: correctness_check_values = { "pre": [A_test, B_test, C_test], "post": [A_test, B_test, C_ref] } self._build_nest(nest, [A, B, C], f"test_types_{t.name}", correctness_check_values) def test_unsigned_types(self) -> None: for t in [ScalarType.uint8, ScalarType.uint16, ScalarType.uint32, ScalarType.uint64]: A = Array(role=Array.Role.INPUT, element_type=t, shape=(16, 16)) B = Array(role=Array.Role.INPUT, element_type=t, shape=(16, 16)) C = Array(role=Array.Role.INPUT_OUTPUT, element_type=t, shape=(16, 16)) nest = Nest(shape=(16, 16)) i, j = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, j] + B[i, j] C[i, j] += A[i, j] - B[i, j] C[i, j] += A[i, j] * B[i, j] C[i, j] += A[i, j] / B[i, j] dtype = np.dtype(t.name) A_test = np.random.random(A.shape).astype(dtype) B_test = np.ones((C.shape)).astype(dtype) # avoid divide by zero C_test = np.random.random(C.shape).astype(dtype) C_ref = C_test + A_test + B_test C_ref = C_ref + A_test - B_test C_ref = C_ref + A_test * B_test C_ref = C_ref + A_test / B_test correctness_check_values = { "pre": [A_test, B_test, C_test], "post": [A_test, B_test, C_ref] } self._build_nest(nest, [A, B, C], f"test_types_{t.name}", correctness_check_values) def test_arithmetic_operations(self) -> None: for t in INT_TYPES + FLOAT_TYPES: nest, A, B, C = self._create_nest((16, 10, 11), type=t) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] = A[i, k] + B[k, j] # test assignment C[i, j] += A[i, k] - B[k, j] C[i, j] += A[i, k] * B[k, j] C[i, j] += A[i, k] / B[k, j] C[i, j] += -A[i, k] C[i, j] += A[i, k] // B[k, j] C[i, j] += A[i, k] % B[k, j] C[i, j] += A[i, k]**B[k, j] self._build_nest(nest, [A, B, C], f"test_arithmetic_operations_{t.name}") def test_relational_operations(self) -> None: from accera._lang_python._lang import _If for t in [ScalarType.bool] + INT_TYPES + FLOAT_TYPES: nest, A, B, C = self._create_nest((16, 10, 11)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): def f1(): C[i, j] += A[i, k] + B[k, j] def f2(): C[i, j] -= A[i, k] + B[k, j] def f3(): C[i, j] *= A[i, k] + B[k, j] def f4(): C[i, j] /= A[i, k] + B[k, j] # BUGBUG: this syntax probably needs to change _If(A[i, k] == B[k, j], f1) _If(A[i, k] != B[k, j], f2) _If(A[i, k] < B[k, j], f3) _If(A[i, k] <= B[k, j], f4) _If(A[i, k] > B[k, j], f1) _If(A[i, k] >= B[k, j], f2) self._build_nest(nest, [A, B, C], f"test_relational_operations_{t.name}") def test_logical_operations(self) -> None: from accera import logical_and, logical_or, logical_not for t in [ScalarType.bool] + INT_TYPES: nest, A, B, C = self._create_nest((16, 10, 11), type=t) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += logical_not(A[i, k]) C[i, j] += logical_and(A[i, k], B[k, j]) C[i, j] += logical_or(A[i, k], B[k, j]) self._build_nest(nest, [A, B, C], f"test_logical_operations_{t.name}") def test_bitwise_operations(self) -> None: for t in INT_TYPES: nest, A, B, C = self._create_nest((16, 10, 11), type=t) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += B[j, k] >> 1 C[i, j] += A[i, j] << 2 C[i, j] += A[i, j] & B[j, k] C[i, j] += A[i, j] | B[j, k] C[i, j] += A[i, j] ^ B[j, k] C[i, j] += ~A[i, j] self._build_nest(nest, [A, B, C], f"test_bitwise_operations_{t.name}") def test_intrinsics(self) -> None: from accera import max, min for t in INT_TYPES + FLOAT_TYPES: nest, A, B, C = self._create_nest((16, 10, 11), type=t) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += max(A[i, j], B[j, k]) C[i, j] += min(A[i, j], B[j, k]) self._build_nest(nest, [A, B, C], f"test_intrinsics_{t.name}") def test_intrinsics_float(self) -> None: from accera import abs, sqrt, exp, log, log10, log2, sin, cos, ceil, floor, tan, cosh, sinh, tanh # from accera._lang_python import fast_exp, fast_exp_mlas for t in FLOAT_TYPES: nest, A, B, C = self._create_nest((16, 10, 11), type=t) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += abs(A[i, j]) C[i, j] += exp(A[i, j]) # C[i, j] += fast_exp(A[i, j]) # C[i, j] += fast_exp_mlas(A[i, j]) C[i, j] += log(B[j, k]) C[i, j] += log2(B[j, k]) C[i, j] += log10(A[i, j]) C[i, j] += sin(A[i, j]) C[i, j] += cos(B[j, k]) C[i, j] += tan(A[i, j]) C[i, j] += sqrt(B[j, k]) C[i, j] += ceil(B[j, k]) C[i, j] += floor(A[i, j]) C[i, j] += sinh(A[i, j]) C[i, j] += cosh(B[j, k]) C[i, j] += tanh(A[i, j]) self._build_nest(nest, [A, B, C], f"test_intrinsics_float_{t.name}") def test_convenience_syntax_1(self) -> None: nest, A, B, C = self._create_nest((16, 10, 11)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] + B[k, j] package = Package() package_name = "test_convenience_syntax_2" package.add(nest, args=(A, B, C), base_name="matmul") with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) def test_convenience_syntax_2(self) -> None: nest, A, B, C = self._create_nest((16, 10, 11)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] * B[k, j] plan = nest.create_plan() package = Package() package_name = "test_convenience_syntax_2" package.add(plan, args=(A, B, C), base_name="matmul") with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) class DSLTest_03Schedules(unittest.TestCase): def _create_nest(self, shape: Tuple[int], type=ScalarType.float32) -> Tuple: M, N, S = shape A = Array(role=Array.Role.INPUT, element_type=type, shape=(M, S)) B = Array(role=Array.Role.INPUT, element_type=type, shape=(S, N)) C = Array(role=Array.Role.INPUT_OUTPUT, element_type=type, shape=(M, N)) return Nest(shape=(M, N, S)), A, B, C def _verify_schedule(self, schedule, args: Tuple[Array], package_name, correctness_check_values=None) -> None: # create a HAT package and add the function to it package = Package() function = package.add(schedule, args, base_name="schedule_test") output_dir = pathlib.Path(TEST_PACKAGE_DIR) / package_name # build the HAT package with verifiers.VerifyPackage(self, package_name, output_dir) as v: package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=output_dir) if correctness_check_values: v.check_correctness( function.name, before=correctness_check_values["pre"], after=correctness_check_values["post"] ) def test_schedule_reorder(self) -> None: nest, A, B, C = self._create_nest((16, 10, 11)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] * B[k, j] schedule = nest.create_schedule() schedule.reorder(k, i, j) self.assertEqual(schedule._indices, [k, i, j]) schedule.reorder(order=(j, i, k)) self.assertEqual(schedule._indices, [j, i, k]) self._verify_schedule(schedule, [A, B, C], "test_schedule_reorder") def test_schedule_split(self) -> None: nest, A, B, C = self._create_nest((16, 10, 11)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] * B[k, j] schedule = nest.create_schedule() ii = schedule.split(i, 4) iii = schedule.split(i, 2) iiii = schedule.split(ii, 2) for index in [ii, iii, iiii]: self.assertIsNotNone(index) self.assertEqual(schedule._indices, [i, iii, ii, iiii, j, k]) self._verify_schedule(schedule, [A, B, C], "test_schedule_split1") # split size does not divide the dimension size schedule2 = nest.create_schedule() kk = schedule2.split(k, 4) # original size of dimension k was 11 self.assertIsNotNone(kk) self.assertEqual(schedule2._indices, [i, j, k, kk]) self._verify_schedule(schedule2, [A, B, C], "test_schedule_split2") # split size == dimension size schedule3 = nest.create_schedule() kk = schedule3.split(k, 11) # original size of dimension k was 11 self.assertIsNotNone(kk) self.assertEqual(schedule3._indices, [i, j, k, kk]) self._verify_schedule(schedule3, [A, B, C], "test_schedule_split3") # split size > dimension size schedule4 = nest.create_schedule() kk = schedule4.split(k, 13) # original size of dimension k was 11 self.assertIsNotNone(kk) self.assertEqual(schedule4._indices, [i, j, k, kk]) self._verify_schedule(schedule4, [A, B, C], "test_schedule_split4") def test_schedule_set_invalid_order(self) -> None: nest, A, B, C = self._create_nest((16, 10, 11)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] * B[k, j] schedule = nest.create_schedule() ii = schedule.split(i, 2) iii = schedule.split(ii, 2) jj = schedule.split(j, 5) self.assertEqual(schedule._indices, [i, ii, iii, j, jj, k]) with self.assertRaises(ValueError): schedule.reorder(k, i, jj, j) self.assertEqual(schedule._indices, [i, ii, iii, j, jj, k]) with self.assertRaises(ValueError): schedule.reorder(k, ii, iii, j, jj, i) self.assertEqual(schedule._indices, [i, ii, iii, j, jj, k]) schedule.reorder(i, j, ii, jj, iii, k) self.assertEqual(schedule._indices, [i, j, ii, jj, iii, k]) def test_schedule_tile(self) -> None: nest, A, B, C = self._create_nest((16, 10, 11)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] * B[k, j] schedule = nest.create_schedule() ii, jj, kk = schedule.tile({ i: 8, j: 2, k: 3 }) self.assertIsNotNone(ii) self.assertIsNotNone(jj) self.assertIsNotNone(kk) self.assertEqual(schedule._indices, [i, ii, j, jj, k, kk]) self._verify_schedule(schedule, [A, B, C], "test_schedule_tile") # tile a subset of the iteration space schedule1 = nest.create_schedule() iii, kkk = schedule1.tile({ i: 8, k: 3 }) self.assertIsNotNone(iii) self.assertIsNotNone(kkk) self.assertEqual(schedule1._indices, [i, iii, j, k, kkk]) self._verify_schedule(schedule1, [A, B, C], "test_schedule_tile_subset") def test_schedule_skew(self) -> None: for N in [10, 224]: # input sizes for K in [1, 3, 5]: # filter sizes M = N - K + 1 # output size A = Array(role=Array.Role.INPUT, shape=(N, )) B = Array(role=Array.Role.INPUT, shape=(K, )) C = Array(role=Array.Role.INPUT_OUTPUT, shape=(M, )) nest = Nest(shape=(M, K)) i, j = nest.get_indices() @nest.iteration_logic def _(): C[i] += A[i + j] * B[j] schedule = nest.create_schedule() A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) correctness_check_values = { "pre": [A_test, B_test, C_test], "post": [A_test, B_test, C_test + np.convolve(np.flip(B_test), A_test, "valid")] } # Skew dimension i with respect to dimension j. schedule.skew(i, j) self._verify_schedule(schedule, [A, B, C], f"test_schedule_skew_i_j_{N}_{K}", correctness_check_values) # Skew dimension j with respect to dimension i. schedule1 = nest.create_schedule() schedule1.skew(j, i) self._verify_schedule(schedule1, [A, B, C], f"test_schedule_skew_j_i_{N}_{K}", correctness_check_values) def test_schedule_skew_unrolling(self) -> None: N = 10 # input size K = 3 # filter size M = N - K + 1 # output size = 8 A = Array(role=Array.Role.INPUT, shape=(N, )) B = Array(role=Array.Role.INPUT, shape=(K, )) C = Array(role=Array.Role.INPUT_OUTPUT, shape=(M, )) nest = Nest(shape=(M, K)) i, j = nest.get_indices() @nest.iteration_logic def _(): C[i] += A[i + j] * B[j] A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) correctness_check_values = { "pre": [A_test, B_test, C_test], "post": [A_test, B_test, C_test + np.convolve(np.flip(B_test), A_test, "valid")] } # Skew dimension i with respect to dimension j, with unrolling. schedule = nest.create_schedule() schedule.skew(i, j, unroll_loops_smaller_than=3) self._verify_schedule(schedule, [A, B, C], "test_schedule_skew_i_j_with_unrolling", correctness_check_values) # Skew dimension j with respect to dimension i, with unrolling. schedule1 = nest.create_schedule() schedule1.skew(j, i, unroll_loops_smaller_than=3) self._verify_schedule(schedule1, [A, B, C], f"test_schedule_skew_j_i_with_unrolling", correctness_check_values) def test_schedule_pad(self) -> None: nest, A, B, C = self._create_nest((16, 10, 11)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] * B[k, j] schedule = nest.create_schedule() # Adds empty elements to the beginning of dimension i, j, k schedule.pad(i, 2) ii = schedule.split(i, 3) # (2 + 16) // 3 # should result in these loops for i, ii # i: [2, 3:3), ii: [0, 1:1) <-- partial (front padding) # i: [3: 18:3), ii: [0, 3:1) <-- full schedule.pad(j, 3) jj = schedule.split(j, 3) # (3 + 10) // 3 # should result in these loops for j, jj # j: [3, 12:3), jj: [0, 3:3) <-- full (front padding == split size) # j: [12, 13:3), jj: [0, 1:1) <-- partial (automatic back padding) schedule.pad(k, 11) kk = schedule.split(k, 4) # (11 + 11) // 4 # should result in these loops for k, kk # k: [11, 12:1), kk: [0, 1: 1) <-- partial # k: [12, 20:4), kk: [0: 4: 1) <-- full # k: [20, 22:4), kk: [0: 2: 1) <-- partial (automatic back padding) schedule.reorder(i, ii, k, j, jj, kk) A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) correctness_check_values = { "pre": [A_test, B_test, C_test], "post": [A_test, B_test, C_test + A_test @ B_test] } self._verify_schedule(schedule, [A, B, C], "test_schedule_pad", correctness_check_values) def test_convenience_syntax(self) -> None: nest, A, B, C = self._create_nest((16, 10, 11)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] * B[k, j] schedule = nest.create_schedule() package = Package() package_name = "test_convenience_syntax" package.add(schedule, args=(A, B, C), base_name="plan_test") with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) class DSLTest_04Fusing(unittest.TestCase): def _verify_schedule(self, schedule, args: Tuple[Array], package_name, correctness_check_values) -> None: # create a HAT package and add the function to it package = Package() function = package.add(schedule, args, base_name="fusing_test") output_dir = pathlib.Path(TEST_PACKAGE_DIR) / package_name # build the HAT package with verifiers.VerifyPackage(self, package_name, output_dir) as v: package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=output_dir) if correctness_check_values: v.check_correctness( function.name, before=correctness_check_values["pre"], after=correctness_check_values["post"] ) def test_full_iteration_space_fusing(self) -> None: from accera import fuse, Nest A = Array(role=Array.Role.INPUT, shape=(16, 16)) B = Array(role=Array.Role.INPUT, shape=(16, 16)) C = Array(role=Array.Role.INPUT_OUTPUT, shape=(16, 16)) # Create nest0 and schedule nest0 = Nest(shape=(16, 16)) i0, j0 = nest0.get_indices() @nest0.iteration_logic def _(): C[i0, j0] += A[i0, j0] schedule0 = nest0.create_schedule() # Create nest1 and schedule1 nest1 = Nest(shape=(16, 16)) i1, j1 = nest1.get_indices() @nest1.iteration_logic def _(): C[i1, j1] *= B[i1, j1] schedule1 = nest1.create_schedule() # Create a fused schedule schedule = fuse(schedule0, schedule1) f, i, j = schedule.get_indices() schedule.reorder(i, j, f) A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) correctness_check_values = { "pre": [A_test, B_test, C_test], "post": [A_test, B_test, (C_test + A_test) * B_test] } self._verify_schedule(schedule, (A, B, C), "test_full_iteration_space_fusing1", correctness_check_values) # computing the output block-by-block: # first computing C[0:4, 0:4] += A[0:4, 0:4] # then computing C[0:4, 0:4] *= B[0:4, 0:4] ii, jj = schedule.tile({ i: 4, j: 4 }) schedule.reorder(i, j, f, ii, jj) self._verify_schedule(schedule, (A, B, C), "test_full_iteration_space_fusing2", correctness_check_values) def test_partial_iteration_space_fusing_1(self) -> None: from accera import fuse, Nest, max from accera._lang_python._lang import Scalar A = Array(role=Array.Role.INPUT, shape=(16, 11)) B = Array(role=Array.Role.INPUT, shape=(11, 10)) C = Array(role=Array.Role.INPUT, shape=(16, 10)) # Fully-connected neural layer with activation: C = op(C + A @ B) # Create nest0 and schedule0 nest0 = Nest(shape=(16, 10, 11)) i0, j0, k0 = nest0.get_indices() @nest0.iteration_logic def _(): C[i0, j0] += A[i0, k0] * B[k0, j0] schedule0 = nest0.create_schedule() # Create nest1 and schedule1 nest1 = Nest(shape=(16, 10)) i1, j1 = nest1.get_indices() @nest1.iteration_logic def _(): # BUGBUG: should implicitly convert Scalar C[i1, j1] = max(C[i1, j1], Scalar(0.)) schedule1 = nest1.create_schedule() schedule = fuse((schedule0, schedule1), partial=2) f, i, j, k = schedule.get_indices() schedule.reorder(i, j, f, k) # unfused indices (k) must not precede the fusing index (f) with self.assertRaises(ValueError): schedule.reorder(i, j, k, f) self.assertEqual(schedule._indices, [i, j, f, k]) A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) correctness_check_values = { "pre": [A_test, B_test, C_test], "post": [A_test, B_test, np.maximum(C_test + A_test @ B_test, 0.)] } self._verify_schedule(schedule, (A, B, C), "test_partial_iteration_space_fusing_1", correctness_check_values) def test_partial_iteration_space_fusing_2(self) -> None: from accera import fuse, Nest A = Array(role=Array.Role.INPUT_OUTPUT, shape=(16, )) B = Array(role=Array.Role.INPUT_OUTPUT, shape=(4, )) n0 = Nest([16]) i0 = n0.get_indices() @n0.iteration_logic def _(): A[i0] *= A[i0] s0 = n0.create_schedule() n1 = Nest([16, 4]) i1, j1 = n1.get_indices() @n1.iteration_logic def _(): B[j1] += A[i1] s1 = n1.create_schedule() fs = fuse((s0, s1), partial=1) f, i, j = fs.get_indices() jj = fs.split(j, 2) fs.reorder(i, f, j, jj) A_test_pre = np.random.random(A.shape).astype(np.float32) B_test_pre = np.random.random(B.shape).astype(np.float32) A_test_post = A_test_pre * A_test_pre B_test_post = B_test_pre + np.sum(A_test_post) correctness_check_values = { "pre": [A_test_pre, B_test_pre], "post": [A_test_post, B_test_post] } self._verify_schedule(fs, (A, B), "test_partial_iteration_space_fusing_2", correctness_check_values) def test_unequal_iteration_space_fusing_1(self) -> None: from accera import fuse, Nest A = Array(role=Array.Role.INPUT, shape=(16, 16)) B = Array(role=Array.Role.INPUT, shape=(16, 10)) C = Array(role=Array.Role.INPUT_OUTPUT, shape=(16, 16)) # Create nest0 and schedule nest0 = Nest(shape=(16, 16)) i0, j0 = nest0.get_indices() @nest0.iteration_logic def _(): C[i0, j0] += A[i0, j0] schedule0 = nest0.create_schedule() # Create nest1 and schedule1 with a smaller iteration space size nest1 = Nest(shape=(16, 10)) i1, j1 = nest1.get_indices() @nest1.iteration_logic def _(): C[i1, j1] *= B[i1, j1] schedule1 = nest1.create_schedule() # Create a fused schedule: the smaller iteration space (nest1) should # be automatically end-padded with no-ops schedule = fuse(schedule0, schedule1) f, i, j = schedule.get_indices() schedule.reorder(i, j, f) # Emitted fused loop should look like: # for i in range(0, 16): # for j in range(0, 10): # for f in range(2): # if f == 0: # C[i, j] += A[i, j] # if f == 1: # C[i, j] *= B[i, j] # for j in range(10, 16): # for f in range(2): # if f == 0: # C[i, j] += A[i, j] A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) C_ref = C_test + A_test # nest0 C_ref[:, :B.shape[1]] = C_ref[:, :B.shape[1]] * B_test # nest1 correctness_check_values = { "pre": [A_test, B_test, C_test], "post": [A_test, B_test, C_ref] } self._verify_schedule(schedule, (A, B, C), "test_unequal_iteration_space_fusing_1", correctness_check_values) def test_unequal_iteration_space_fusing_2(self) -> None: from accera import fuse, Nest A = Array(role=Array.Role.INPUT, shape=(16, 10)) B = Array(role=Array.Role.INPUT, shape=(16, 16)) C = Array(role=Array.Role.INPUT_OUTPUT, shape=(16, 16)) # Create nest0 and schedule nest0 = Nest(shape=(16, 10)) i0, j0 = nest0.get_indices() @nest0.iteration_logic def _(): C[i0, j0] += A[i0, j0] schedule0 = nest0.create_schedule() # Create nest1 and schedule1 with a larger iteration space size nest1 = Nest(shape=(16, 16)) i1, j1 = nest1.get_indices() @nest1.iteration_logic def _(): C[i1, j1] *= B[i1, j1] schedule1 = nest1.create_schedule() # Create a fused schedule: the smaller iteration space (nest0) should # be automatically end-padded with no-ops schedule = fuse(schedule0, schedule1) f, i, j = schedule.get_indices() schedule.reorder(i, j, f) # Emitted fused loop should look like: # for i in range(0, 16): # for j in range(0, 10): # for f in range(2): # if f == 0: # C[i, j] += A[i, j] # if f == 1: # C[i, j] *= B[i, j] # for j in range(10, 16): # for f in range(2): # if f == 1: # C[i, j] *= B[i, j] A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) C_ref = np.copy(C_test) C_ref[:, :A.shape[1]] = C_test[:, :A.shape[1]] + A_test # nest0 C_ref *= B_test # nest1 correctness_check_values = { "pre": [A_test, B_test, C_test], "post": [A_test, B_test, C_ref] } self._verify_schedule(schedule, (A, B, C), "test_unequal_iteration_space_fusing_2", correctness_check_values) def test_unequal_iteration_space_fusing_3(self) -> None: from accera import fuse, Nest A = Array(role=Array.Role.INPUT, shape=(16, 16)) B = Array(role=Array.Role.INPUT, shape=(16, 10)) C = Array(role=Array.Role.INPUT_OUTPUT, shape=(16, 16)) # Create nest0 and schedule nest0 = Nest(shape=(16, 16)) i0, j0 = nest0.get_indices() @nest0.iteration_logic def _(): C[i0, j0] += A[i0, j0] schedule0 = nest0.create_schedule() # Create nest1 and schedule1 with a smaller iteration space size nest1 = Nest(shape=(16, 10)) i1, j1 = nest1.get_indices() @nest1.iteration_logic def _(): C[i1, j1] *= B[i1, j1] schedule1 = nest1.create_schedule() # Create a fused schedule: the smaller iteration space (nest1) should # be automatically end-padded with no-ops schedule = fuse(schedule0, schedule1) f, i, j = schedule.get_indices() # computing the output block-by-block: # first computing C[0:4, 0:4] += A[0:4, 0:4] # then computing C[0:4, 0:4] *= B[0:4, 0:4] ii, jj = schedule.tile({ i: 4, j: 4 }) schedule.reorder(i, j, f, ii, jj) # Emitted fused loop should look like: # for i in range(0, 16, 4): # # run both kernels in the smaller iteration spaces # # (tiled block) # for j in range(0, 8, 4): # for f in range(2): # if f == 0: # for ii in range(0, 4): # for jj in range(0, 4): # C[i+ii, j+jj] += A[i+ii, j+jj] # if f == 1: # for ii in range(0, 4): # for jj in range(0, 4): # C[i+ii, j+jj] *= B[i+ii, j+jj] # # # run both kernels in the smaller iteration space # # (boundary block for split) # for j in range(8, 10): # range < split size # for f in range(2): # if f == 0: # for ii in range(0, 4): # C[i+ii, j] += A[i+ii, j] # if f == 1: # for ii in range(0, 4): # C[i+ii, j] *= B[i+ii, j] # # # run kernel with the larger iteration space # # (boundary block for split) # for j in range(10, 16): # range < split size # for f in range(2): # if f == 0: # for ii in range(0, 4): # C[i+ii, j] += A[i+ii, j] A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) C_ref = C_test + A_test # nest0 C_ref[:, :B.shape[1]] = C_ref[:, :B.shape[1]] * B_test # nest1 correctness_check_values = { "pre": [A_test, B_test, C_test], "post": [A_test, B_test, C_ref] } self._verify_schedule(schedule, (A, B, C), "test_unequal_iteration_space_fusing_3", correctness_check_values) def test_concat_fusing_1(self) -> None: from accera import fuse, Nest A = Array(role=Array.Role.INPUT_OUTPUT, shape=(3, )) B = Array(role=Array.Role.INPUT_OUTPUT, shape=(7, )) n1 = Nest(A.shape) n2 = Nest(B.shape) n1_i = n1.get_indices() @n1.iteration_logic def _(): A[n1_i] /= A[n1_i] n2_i = n2.get_indices() @n2.iteration_logic def _(): B[n2_i] *= B[n2_i] fused = fuse([n.create_schedule() for n in [n1, n2]], partial=0) # Emitted fused loop should look like: # for f in range(3): # if f == 0: # for i in range(3): # A[i] /= A[i] # if f == 1: # for i in range(7): # B[i] *= B[i] A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) A_ref = A_test / A_test B_ref = B_test * B_test correctness_check_values = { "pre": [A_test, B_test], "post": [A_ref, B_ref] } self._verify_schedule(fused, (A, B), "test_concat_fusing_1", correctness_check_values) @expectedFailure(FailedReason.BUG, "Concat fusing is broken") def test_concat_fusing_2(self) -> None: from accera import fuse, Nest A = Array(role=Array.Role.INPUT_OUTPUT, shape=(11, )) B = Array(role=Array.Role.INPUT_OUTPUT, shape=(7, )) C = Array(role=Array.Role.INPUT_OUTPUT, shape=(5, )) n1 = Nest(A.shape) n2 = Nest(B.shape) n3 = Nest(C.shape) n1_i = n1.get_indices() @n1.iteration_logic def _(): A[n1_i] += A[n1_i] n2_i = n2.get_indices() @n2.iteration_logic def _(): B[n2_i] *= B[n2_i] n3_i = n3.get_indices() @n3.iteration_logic def _(): C[n3_i] /= C[n3_i] fused = fuse([n.create_schedule() for n in [n1, n2, n3]], partial=0) # Emitted fused loop should look like: # for f in range(3): # if f == 0: # for i in range(11): # A[i}] += A[i}] # if f == 1: # for i in range(7): # B[i}] *= B[i}] # if f == 2: # for i in range(5): # C[i}] /= C[i}] A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) A_ref = A_test + A_test B_ref = B_test * B_test C_ref = C_test / C_test correctness_check_values = { "pre": [A_test, B_test, C_test], "post": [A_ref, B_ref, C_ref] } self._verify_schedule(fused, (A, B, C), "test_concat_fusing_2", correctness_check_values) def test_concat_fusing_3(self) -> None: from accera import fuse, Nest A = Array(role=Array.Role.INPUT_OUTPUT, shape=(3, 16)) B = Array(role=Array.Role.INPUT_OUTPUT, shape=(7, 16)) n1 = Nest(A.shape) n2 = Nest(B.shape) n1_i, n1_j = n1.get_indices() @n1.iteration_logic def _(): A[n1_i, n1_j] /= A[n1_i, n1_j] n2_i, n2_j = n2.get_indices() @n2.iteration_logic def _(): B[n2_i, n2_j] *= B[n2_i, n2_j] fused = fuse([n.create_schedule() for n in [n1, n2]], partial=0) # Emitted fused loop should look like: # for f in range(3): # if f == 0: # for i in range(3): # for j in range(16): # A[i,j] /= A[i,j] # if f == 1: # for i in range(7): # for j in range(16): # B[i,j] *= B[i,j] A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) A_ref = A_test / A_test B_ref = B_test * B_test correctness_check_values = { "pre": [A_test, B_test], "post": [A_ref, B_ref] } self._verify_schedule(fused, (A, B), "test_concat_fusing_3", correctness_check_values) @expectedFailure(FailedReason.BUG, "Concat fusing is broken") def test_concat_fusing_4(self) -> None: from accera import fuse, Nest A = Array(role=Array.Role.INPUT_OUTPUT, shape=(11, 16)) B = Array(role=Array.Role.INPUT_OUTPUT, shape=(7, 16)) C = Array(role=Array.Role.INPUT_OUTPUT, shape=(5, 16)) n1 = Nest(A.shape) n2 = Nest(B.shape) n3 = Nest(C.shape) n1_i, n1_j = n1.get_indices() @n1.iteration_logic def _(): A[n1_i, n1_j] += A[n1_i, n1_j] n2_i, n2_j = n2.get_indices() @n2.iteration_logic def _(): B[n2_i, n2_j] *= B[n2_i, n2_j] n3_i, n3_j = n3.get_indices() @n3.iteration_logic def _(): C[n3_i, n3_j] /= C[n3_i, n3_j] fused = fuse([n.create_schedule() for n in [n1, n2, n3]], partial=0) # Emitted fused loop should look like: # for f in range(3): # if f == 0: # for i in range(11): # for j in range(16): # A[i,j] += A[i,j] # if f == 1: # for i in range(7): # for j in range(16): # B[i,j] *= B[i,j] # if f == 2: # for i in range(5): # for j in range(16): # C[i,j] /= C[i,j] A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) A_ref = A_test + A_test B_ref = B_test * B_test C_ref = C_test / C_test correctness_check_values = { "pre": [A_test, B_test, C_test], "post": [A_ref, B_ref, C_ref] } self._verify_schedule(fused, (A, B, C), "test_concat_fusing_4", correctness_check_values) @unittest.skip("BUG: Compilation takes too long") def test_multi_concat_fusing_1(self) -> None: from accera import fuse, Nest A = Array(role=Array.Role.INPUT_OUTPUT, shape=(1024 + 13, )) B = Array(role=Array.Role.INPUT_OUTPUT, shape=(1024 + 11, )) C = Array(role=Array.Role.INPUT_OUTPUT, shape=(1024 + 7, )) D = Array(role=Array.Role.INPUT_OUTPUT, shape=(1024 + 3, )) # Create nest0 and schedule nest0 = Nest(A.shape) i0 = nest0.get_indices() @nest0.iteration_logic def _(): A[i0] += A[i0] # Create nest1 and schedule1 nest1 = Nest(B.shape) i1 = nest1.get_indices() @nest1.iteration_logic def _(): B[i1] *= B[i1] # Create a fused schedule s0, s1 = [n.create_schedule() for n in [nest0, nest1]] s0.split(i0, 11) s1.split(i1, 5) fused1 = fuse([s0, s1], partial=0) nest2 = Nest(C.shape) i2 = nest2.get_indices() @nest2.iteration_logic def _(): C[i2] *= C[i2] s2 = nest2.create_schedule() s2.split(i2, 13) fused2 = fuse([fused1, s2], partial=0) nest3 = Nest(D.shape) i3 = nest3.get_indices() @nest3.iteration_logic def _(): D[i3] *= D[i3] s3 = nest3.create_schedule() s3.split(i3, 7) fused3 = fuse([fused2, s3], partial=0) A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) D_test = np.random.random(D.shape).astype(np.float32) correctness_check_values = { "pre": [A_test, B_test, C_test, D_test], "post": [A_test + A_test, B_test * B_test, C_test * C_test, D_test * D_test] } self._verify_schedule(fused3, (A, B, C, D), "test_multi_concat_fusing_1", correctness_check_values) class DSLTest_05Targets(unittest.TestCase): def test_known_targets(self) -> None: intel_name = "Intel 6400" intel = Target(known_name=intel_name, num_threads=44) self.assertEqual(intel.name, intel_name) self.assertEqual(intel.num_threads, 44) # override self.assertEqual(intel.vector_bytes, 32) # default self.assertEqual(intel.vector_registers, 16) # default self.assertEqual(intel.category, Target.Category.CPU) # default pi3_name = "Raspberry Pi 3B" pi3 = Target(Target.Model.RASPBERRY_PI_3B, category=Target.Category.CPU, frequency_GHz=1.2) self.assertEqual(pi3.name, pi3_name) self.assertEqual(pi3.num_threads, 8) self.assertEqual(pi3.category, Target.Category.CPU) def test_custom_targets(self) -> None: my_target = Target( name="Custom processor", category=Target.Category.CPU, architecture="x86_64", family="Broadwell", extensions=["MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE4", "SSE4.1", "SSE4.2", "AVX", "AVX2", "FMA3"], num_cores=22, num_threads=44, frequency_GHz=3.2, turbo_frequency_GHz=3.8, cache_sizes=[32, 256, 56320], cache_lines=[64, 64, 64] ) self.assertEqual(my_target.name, "Custom processor") self.assertEqual(my_target.category, Target.Category.CPU) self.assertEqual(my_target.architecture, "x86_64") self.assertTrue("SSE3" in my_target.extensions) def test_gpu_targets(self) -> None: v100_name = "NVidia V100" v100 = Target(Target.Model.NVIDIA_V100, category=Target.Category.GPU) self.assertEqual(v100.name, v100_name) self.assertEqual(v100.category, Target.Category.GPU) self.assertEqual(v100.warp_size, 32) mi100 = Target(Target.Model.AMD_MI100) self.assertEqual(mi100.warp_size, 64) self.assertEqual(mi100.frequency_GHz, 1.502) a100 = Target(Target.Model.NVIDIA_A100) self.assertEqual(a100.warp_size, 32) class DSLTest_06PlansCaching(unittest.TestCase): def _create_plan(self, shape: Tuple[int], type=ScalarType.float32) -> Tuple: M, N, S = shape A = Array(role=Array.Role.INPUT, element_type=type, shape=(M, S)) B = Array( role=Array.Role.INPUT, element_type=type, shape=(S, N), layout=Array.Layout.LAST_MAJOR ) # use a different caching layout C = Array(role=Array.Role.INPUT_OUTPUT, element_type=type, shape=(M, N)) nest = Nest(shape=(M, N, S)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] * B[k, j] plan = nest.create_plan() return plan, [A, B, C], [i, j, k] def _verify_plan(self, plan, args: Tuple[Array], package_name, correctness_check_values=None) -> None: # create a HAT package and add the function to it package = Package() function = package.add(plan, args, base_name="caching_test") output_dir = pathlib.Path(TEST_PACKAGE_DIR) / package_name # build the HAT package with verifiers.VerifyPackage(self, package_name, output_dir) as v: package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=output_dir) if correctness_check_values: v.check_correctness( function.name, before=correctness_check_values["pre"], after=correctness_check_values["post"] ) def test_caching_by_level(self) -> None: plan, args, indices = self._create_plan((16, 10, 11)) A, B, C = args _, j, _ = indices AA = plan.cache(A, level=2) self.assertEqual(AA.index, j) # input, different layout BB = plan.cache(B, level=2, layout=Array.Layout.FIRST_MAJOR) self.assertEqual(BB.index, j) self._verify_plan(plan, [A, B, C], "test_caching_by_level") def test_caching_by_index(self) -> None: plan, args, indices = self._create_plan((16, 10, 11)) A, B, C = args _, j, _ = indices with self.assertRaises(ValueError): AA = plan.cache(A, index=j, level=1) AA = plan.cache(A, index=j) # input self.assertEqual(AA.index, j) # input, different layout BB = plan.cache(B, index=j, layout=Array.Layout.FIRST_MAJOR) self.assertEqual(BB.index, j) CC = plan.cache(C, index=j) # input/output self.assertEqual(CC.index, j) self._verify_plan(plan, [A, B, C], "test_caching_by_index") def test_caching_by_element_budget(self) -> None: plan, args, _ = self._create_plan((256, 10, 11)) A, B, C = args AA = plan.cache(A, max_elements=1024) self.assertEqual(AA.index, None) self.assertEqual(AA.max_elements, 1024) self._verify_plan(plan, [A, B, C], "test_caching_by_element_budget") def test_thrifty_caching(self) -> None: plan, args, indices = self._create_plan((16, 10, 11)) A, B, C = args _, j, k = indices # A is row-major, thrifty mode should skip caching AA = plan.cache(A, thrifty=True, index=j) self.assertIsNotNone(AA) # B is column-major, thrifty mode should cache BB = plan.cache(B, thrifty=True, index=k) self.assertIsNotNone(BB) self._verify_plan(plan, [A, B, C], "test_thrifty_caching") @expectedFailure(FailedReason.NOT_IN_PY, "Various target memory identifiers") def test_cache_mapping(self) -> None: A = Array(role=Array.Role.INPUT, shape=(1024, )) nest = Nest(shape=(64, )) i = nest.get_indices() @nest.iteration_logic def _(): A[i] += 2 v100 = Target(Target.Model.NVIDIA_V100, category=Target.Category.GPU, num_threads=16) plan = nest.create_plan(v100) plan.cache(i, type=v100.MemorySpace.SHARED) self._verify_plan(plan, [A], "test_cache_mapping") def test_cache_trigger_level(self) -> None: A = Array(role=Array.Role.INPUT, shape=(1024, 1024)) B = Array(role=Array.Role.INPUT_OUTPUT, shape=(1024, 1024)) nest = Nest(shape=(1024, 1024)) i, j = nest.get_indices() @nest.iteration_logic def _(): B[i, j] += A[i, j] schedule = nest.create_schedule() ii = schedule.split(i, 128) jj = schedule.split(j, 256) schedule.reorder(i, j, ii, jj) plan = schedule.create_plan() plan.cache(A, index=ii, trigger_index=j) self._verify_plan(plan, [A, B], "test_cache_trigger_level") def test_cache_trigger_level_matmul(self) -> None: M = 1024 N = 1024 S = 1024 A = Array(role=Array.Role.INPUT, shape=(M, S)) B = Array(role=Array.Role.INPUT, shape=(S, N)) C = Array(role=Array.Role.INPUT_OUTPUT, shape=(M, N)) nest = Nest(shape=(M, N, S)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] * B[k, j] schedule = nest.create_schedule() jj = schedule.split(j, 128) kk = schedule.split(k, 256) kkk = schedule.split(kk, 4) jjj = schedule.split(jj, 16) jjjj = schedule.split(jjj, 8) ii = schedule.split(i, 6) schedule.reorder(j, k, i, jj, kk, kkk, ii, jjj, jjjj) plan = schedule.create_plan() plan.cache(B, index=kkk, trigger_index=k, layout=Array.Layout.FIRST_MAJOR) A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) correctness_check_values = { "pre": [A_test, B_test, C_test], "post": [A_test, B_test, C_test + A_test @ B_test] } self._verify_plan( plan, [A, B, C], "test_cache_trigger_level_matmul", correctness_check_values=correctness_check_values ) def test_hierachical_caching(self) -> None: M = 1024 N = 1024 S = 1024 A = Array(role=Array.Role.INPUT, shape=(M, S)) B = Array(role=Array.Role.INPUT, shape=(S, N)) C = Array(role=Array.Role.INPUT_OUTPUT, shape=(M, N)) nest = Nest(shape=(M, N, S)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] * B[k, j] schedule = nest.create_schedule() jj = schedule.split(j, 128) kk = schedule.split(k, 256) kkk = schedule.split(kk, 4) jjj = schedule.split(jj, 16) jjjj = schedule.split(jjj, 8) ii = schedule.split(i, 6) schedule.reorder(j, k, i, jj, kk, kkk, ii, jjj, jjjj) plan = schedule.create_plan() AA = plan.cache(A, level=5, trigger_level=7, layout=Array.Layout.FIRST_MAJOR) AAA = plan.cache(AA, level=3, trigger_level=5, layout=Array.Layout.LAST_MAJOR) BB = plan.cache(B, level=6, trigger_level=7, layout=Array.Layout.FIRST_MAJOR) BBB = plan.cache(BB, level=2, trigger_level=5, layout=Array.Layout.LAST_MAJOR) CC = plan.cache(C, level=8, layout=Array.Layout.FIRST_MAJOR) CCC = plan.cache(CC, level=6, layout=Array.Layout.LAST_MAJOR) A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) correctness_check_values = { "pre": [A_test, B_test, C_test], "post": [A_test, B_test, C_test + A_test @ B_test] } self._verify_plan( plan, [A, B, C], "test_hierarchical_caching", correctness_check_values=correctness_check_values ) class DSLTest_07PlansVectorizationParallelization(unittest.TestCase): def _verify_plan(self, plan, args: Tuple[int], package_name, correctness_check_values=None) -> None: package = Package() function = package.add(plan, args, base_name="vectorization_parallelization_test") output_dir = pathlib.Path(TEST_PACKAGE_DIR) / package_name with verifiers.VerifyPackage(self, package_name, output_dir) as v: package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=output_dir) if correctness_check_values: v.check_correctness( function.name, before=correctness_check_values["pre"], after=correctness_check_values["post"] ) def test_unroll(self) -> None: from accera import Target, Nest A = Array(role=Array.Role.INPUT, shape=(3, 5)) my_target = Target(category=Target.Category.CPU) nest = Nest(shape=(3, 5)) i, j = nest.get_indices() @nest.iteration_logic def _(): A[i, j] *= 2.0 plan1 = nest.create_plan(my_target) plan1.unroll(index=j) self._verify_plan(plan1, [A], "test_unroll1") plan2 = nest.create_plan(my_target) plan2.unroll(index=i) self._verify_plan(plan2, [A], "test_unroll2") def test_vectorize(self) -> None: from accera import Target, Nest A = Array(role=Array.Role.INPUT, shape=(64, )) B = Array(role=Array.Role.INPUT, shape=(64, )) C = Array(role=Array.Role.INPUT_OUTPUT, shape=(64, )) my_target = Target(category=Target.Category.CPU, vector_bytes=16, vector_registers=2) nest = Nest(shape=(64, )) i = nest.get_indices() @nest.iteration_logic def _(): C[i] = A[i] * B[i] plan = nest.create_plan(my_target) plan.vectorize(index=i) self._verify_plan(plan, [A, B, C], "test_vectorize") def test_kernelize(self) -> None: from accera import Target, Nest A = Array(role=Array.Role.INPUT, shape=(16, 11)) B = Array(role=Array.Role.INPUT, shape=(11, 10)) C = Array(role=Array.Role.INPUT_OUTPUT, shape=(16, 10)) nest = Nest(shape=(16, 10, 11)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] * B[k, j] my_target = Target(category=Target.Category.CPU, vector_bytes=16, vector_registers=2) plan = nest.create_plan(my_target) # Shorthand for: # plan.unroll(i) # plan.unroll(j) # plan.vectorize(k) plan.kernelize(unroll_indices=(i, j), vectorize_indices=k) self._verify_plan(plan, [A, B, C], "test_kernelize") def test_kernelize_2(self) -> None: from accera import Target, Nest A = Array(role=Array.Role.INPUT, shape=(16, 16)) B = Array(role=Array.Role.INPUT, shape=(16, 16)) C = Array(role=Array.Role.INPUT_OUTPUT, shape=(16, 16)) nest = Nest(shape=(16, 16, 16)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] * B[k, j] my_target = Target(category=Target.Category.CPU, vector_bytes=16, vector_registers=2) plan = nest.create_plan(my_target) # Shorthand for: # plan.unroll(i) # plan.vectorize(j) # plan.vectorize(k) plan.kernelize(unroll_indices=(i, ), vectorize_indices=(j, k)) self._verify_plan(plan, [A, B, C], "test_kernelize_2") @expectedFailure(FailedReason.NOT_IN_PY, "pinning parallelization to CPU cores") def test_cpu_bind(self) -> None: A = Array(role=Array.Role.INPUT, shape=(16, 11)) B = Array(role=Array.Role.INPUT, shape=(11, 10)) C = Array(role=Array.Role.INPUT_OUTPUT, shape=(16, 10)) nest = Nest(shape=(16, 10, 11)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] * B[k, j] target = Target("HOST", num_threads=16) plan = nest.create_plan(target) plan.parallelize(indices=(i, j, k), pin=(target.cores[0], target.cores[1])) # TODO: confirm syntax self._verify_plan(plan, [A, B, C], "test_cpu_bind") def test_gpu_bind(self) -> None: M = 128 N = 256 K = 256 A = Array(role=Array.Role.INPUT, shape=(M, K)) B = Array(role=Array.Role.INPUT, shape=(K, N)) C = Array(role=Array.Role.INPUT_OUTPUT, shape=(M, N)) nest = Nest(shape=(M, N, K)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] * B[k, j] v100 = Target(Target.Model.NVIDIA_V100, category=Target.Category.GPU) plan = nest.create_plan(v100) plan.bind(mapping={ i: v100.GridUnit.BLOCK_X, j: v100.GridUnit.THREAD_X, k: v100.GridUnit.THREAD_Y }) test_name = "test_gpu_bind" package = Package() function = package.add(plan, args=(A, B, C), base_name=test_name) output_dir = pathlib.Path(TEST_PACKAGE_DIR) / test_name with verifiers.VerifyPackage(self, test_name, output_dir, file_list=[f"{test_name}.cu", f"{test_name}.hat"]) as v: package.build( name=test_name, format=Package.Format.MLIR | Package.Format.CUDA | Package.Format.HAT_PACKAGE, mode=Package.Mode.RELEASE, # Package.Mode.DEBUG, output_dir=output_dir ) def test_scheduling_strategies(self) -> None: A = Array(role=Array.Role.INPUT, shape=(256, 1024)) B = Array(role=Array.Role.INPUT, shape=(1024, 512)) C = Array(role=Array.Role.INPUT_OUTPUT, shape=(256, 512)) nest = Nest(shape=(256, 512, 1024)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] * B[k, j] target = Target("HOST", num_threads=16) # disable correctness checking on windows because the # install location of libomp.dll is non-standard as of now if sys.platform.startswith('win'): correctness_check_values = None else: A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) correctness_check_values = { "pre": [A_test, B_test, C_test], "post": [A_test, B_test, C_test + A_test @ B_test] } schedule = nest.create_schedule() ii = schedule.split(i, A.shape[0] // min(4, target.num_threads)) # set the index (k) that cannot be parallelized as innermost schedule.reorder(i, ii, j, k) for policy in ["static", "dynamic"]: plan = schedule.create_plan(target) # wrong order with self.assertRaises(ValueError): plan.parallelize(indices=(k, ii), policy=policy) # non-contiguous with self.assertRaises(ValueError): plan.parallelize(indices=(i, j), policy=policy) # non-collapsed plan.parallelize(indices=i, policy=policy) self._verify_plan(plan, [A, B, C], f"test_parallelize_i_{policy}", correctness_check_values) # parallelizing middle index plan_ii = schedule.create_plan(target) plan_ii.parallelize(indices=ii, policy=policy) self._verify_plan(plan_ii, [A, B, C], f"test_parallelize_ii_{policy}", correctness_check_values) # partial collapsed plan_partial = schedule.create_plan(target) plan_partial.parallelize(indices=(i, ii, j), policy=policy) self._verify_plan(plan_partial, [A, B, C], f"test_parallelize_i_ii_j_{policy}", correctness_check_values) # partial collapsed inner indices plan_partial_inner = schedule.create_plan(target) plan_partial_inner.parallelize(indices=(ii, j), policy=policy) self._verify_plan( plan_partial_inner, [A, B, C], f"test_parallelize_ii_j_{policy}", correctness_check_values ) # fully collapsed will result in correctness issues because parallelizing k can stomp on the C matrix # where multiple threads try to update C[i, j] for different values of k class DSLTest_08DeferredLayout(unittest.TestCase): def _verify_package(self, plan, args, package_name, correctness_check_values) -> None: package = Package() function = package.add(plan, args, base_name="deferred_layout") output_dir = pathlib.Path(TEST_PACKAGE_DIR) / package_name with verifiers.VerifyPackage(self, package_name, output_dir) as v: package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=output_dir) if correctness_check_values: v.check_correctness( function.name, before=correctness_check_values["pre"], after=correctness_check_values["post"] ) def test_deferred_layout_predefined(self) -> None: matrix = np.random.rand(128, 128).astype(np.float32) B_test = np.random.random(matrix.shape).astype(np.float32) for layout in [Array.Layout.FIRST_MAJOR, Array.Layout.LAST_MAJOR]: A = Array(role=Array.Role.CONST, data=matrix, layout=Array.Layout.DEFERRED) B = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=matrix.shape) nest = Nest(shape=matrix.shape) i, j = nest.get_indices() @nest.iteration_logic def _(): B[i, j] += A[i, j] # create a cache for the constant array plan1 = nest.create_plan() AA = plan1.cache(A, i, layout=layout) # , thrifty=True) # TODO # create another cache, using a different plan, for testing purposes plan2 = nest.create_plan() BB = plan2.cache(B, i) with self.assertRaises(ValueError): B.deferred_layout(cache=BB) # non-const array with self.assertRaises(ValueError): A.deferred_layout(cache=BB) # wrong cache # update the constant array's layout based on the cache A.deferred_layout(cache=AA) self.assertEqual(A.layout, AA.layout) with self.assertRaises(ValueError): A.deferred_layout(cache=AA) # duplicate package_name = f"test_deferred_layout_predefined_{layout}".replace(".", "_") # sanitize path name self._verify_package(plan1, (B, ), package_name, { "pre": [B_test], "post": [B_test + matrix] }) def test_deferred_layout_coefficients(self) -> None: matrix = np.random.rand(128, 128).astype(np.float32) B_test = np.random.random(matrix.shape).astype(np.float32) for layout in [(128, 1), (1, 128)]: A = Array(role=Array.Role.CONST, data=matrix, layout=Array.Layout.DEFERRED) B = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=matrix.shape) nest = Nest(shape=matrix.shape) i, j = nest.get_indices() @nest.iteration_logic def _(): B[i, j] += A[i, j] plan = nest.create_plan() AA = plan.cache(A, i, layout=layout) # , thrifty=True) # TODO A.deferred_layout(cache=AA) self.assertEqual(A.layout, AA.layout) package_name = f"test_deferred_layout_coefficients_{'_'.join(map(str, layout))}" self._verify_package(plan, (B, ), package_name, { "pre": [B_test], "post": [B_test + matrix] }) class DSLTest_09Parameters(unittest.TestCase): def test_parameterization_1(self) -> None: from accera import create_parameters, Nest P0, P1, P2, P3 = create_parameters(4) A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(P0, P2)) B = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(P2, P1)) C = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(P0, P1)) nest = Nest(shape=(P0, P1, P2)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += P3 * A[i, k] * B[k, j] package = Package() package_name = "test_parameterization_1" # Use the templated nest to add two different functions to the package package.add( nest, args=(A, B, C), parameters={ P0: 16, P1: 16, P2: 16, P3: 1.0 }, base_name="matmul_16_16_16_1" ) package.add( nest, args=(A, B, C), parameters={ P0: 32, P1: 32, P2: 32, P3: 2.0 }, base_name="matmul_32_32_32_2" ) P4, P5 = create_parameters(2) # Create a parameterized schedule schedule = nest.create_schedule() ii = schedule.split(i, size=P4) P6 = create_parameters(1) schedule.reorder(order=P6) # Create a parameterized plan plan = schedule.create_plan() plan.cache(A, level=P5) # Add another function to the package package.add( plan, args=(A, B, C), parameters={ P0: 16, P1: 16, P2: 16, P3: 1.0, P4: 4, P5: 2, P6: (j, k, i, ii) }, base_name="alternative_matmul_16_16_16" ) with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build(name=package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) def test_parameterization_2(self) -> None: from accera import create_parameters, Nest P0, P1, P2, P3 = create_parameters(4) A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(P0, P2)) B = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(P2, P1)) C = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(P0, P1)) nest = Nest(shape=(P0, P1, P2)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += P3 * A[i, k] * B[k, j] package = Package() package_name = "test_parameterization_2" P4, P5 = create_parameters(2) # Create a parameterized schedule schedule = nest.create_schedule() ii = schedule.split(i, size=P4) jj = schedule.split(j, size=P4) kk = schedule.split(k, size=P4) P6, P7, P8 = create_parameters(3) schedule.reorder(order=P6) # Create a parameterized plan plan = schedule.create_plan() plan.cache(A, level=P5) plan.kernelize(unroll_indices=P7, vectorize_indices=P8) # Add another function to the package package.add( plan, args=(A, B, C), parameters={ P0: 256, P1: 256, P2: 256, P3: 1.0, P4: 4, P5: 2, P6: (j, k, i, ii, jj, kk), P7: (ii, jj), P8: kk }, base_name="matmul_256_256_256" ) with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build(name=package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) def test_parameterization_3(self) -> None: from accera import create_parameters, Nest for N in [10, 224]: # input sizes for K in [1, 3, 5]: # filter sizes M = N - K + 1 # output size P = create_parameters(1) A = Array(role=Array.Role.INPUT, shape=(N, )) B = Array(role=Array.Role.INPUT, shape=(K, )) C = Array(role=Array.Role.INPUT_OUTPUT, shape=(M, )) nest = Nest(shape=(M, K)) i, j = nest.get_indices() @nest.iteration_logic def _(): C[i] += A[i + j] * B[j] schedule = nest.create_schedule() A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) correctness_check_values = { "pre": [A_test, B_test, C_test], "post": [A_test, B_test, C_test + np.convolve(np.flip(B_test), A_test, "valid")] } # Skew dimension i with respect to dimension j with unroll loop not smaller than P. schedule.skew(i, j, P) # create a HAT package and add the function to it package = Package() package_name = f"test_parameterization_3_skew_i_j_{N}_{K}" function = package.add( schedule, args=(A, B, C), parameters={P: 0}, base_name=f"schedule_test_skew_i_j_{N}_{K}" ) output_dir = pathlib.Path(TEST_PACKAGE_DIR) / package_name # build the HAT package with verifiers.VerifyPackage(self, package_name, output_dir) as v: package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=output_dir) if correctness_check_values: v.check_correctness( function.name, before=correctness_check_values["pre"], after=correctness_check_values["post"] ) def test_parameterization_4(self) -> None: from accera import create_parameters, Nest M = 16 N = 10 S = 11 type = ScalarType.float32 A = Array(role=Array.Role.INPUT, element_type=type, shape=(M, S)) B = Array(role=Array.Role.INPUT, element_type=type, shape=(S, N)) C = Array(role=Array.Role.INPUT_OUTPUT, element_type=type, shape=(M, N)) nest = Nest(shape=(M, N, S)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] * B[k, j] schedule = nest.create_schedule() P1, P2, P3, P4, P5, P6 = create_parameters(6) # Adds empty elements to the beginning of dimension i, j, k schedule.pad(i, P1) ii = schedule.split(i, P2) # (2 + 16) // 3 # should result in these loops for i, ii # i: [2, 3:3), ii: [0, 1:1) <-- partial (front padding) # i: [3: 18:3), ii: [0, 3:1) <-- full schedule.pad(j, P3) jj = schedule.split(j, P4) # (3 + 10) // 3 # should result in these loops for j, jj # j: [3, 12:3), jj: [0, 3:3) <-- full (front padding == split size) # j: [12, 13:3), jj: [0, 1:1) <-- partial (automatic back padding) schedule.pad(k, P5) kk = schedule.split(k, P6) # (11 + 11) // 4 # should result in these loops for k, kk # k: [11, 12:1), kk: [0, 1: 1) <-- partial # k: [12, 20:4), kk: [0: 4: 1) <-- full # k: [20, 22:4), kk: [0: 2: 1) <-- partial (automatic back padding) schedule.reorder(i, ii, k, j, jj, kk) A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) correctness_check_values = { "pre": [A_test, B_test, C_test], "post": [A_test, B_test, C_test + A_test @ B_test] } # create a HAT package and add the function to it package = Package() package_name = "test_parameterization_4_pad" function = package.add( schedule, args=(A, B, C), parameters={ P1: 2, P2: 3, P3: 3, P4: 3, P5: 11, P6: 4 }, base_name="schedule_test_pad_parameter" ) output_dir = pathlib.Path(TEST_PACKAGE_DIR) / package_name # build the HAT package with verifiers.VerifyPackage(self, package_name, output_dir) as v: package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=output_dir) if correctness_check_values: v.check_correctness( function.name, before=correctness_check_values["pre"], after=correctness_check_values["post"] ) def test_parameterization_5(self) -> None: from accera import create_parameters A = Array(role=Array.Role.INPUT, shape=(256, 1024)) B = Array(role=Array.Role.INPUT, shape=(1024, 512)) C = Array(role=Array.Role.INPUT_OUTPUT, shape=(256, 512)) nest = Nest(shape=(256, 512, 1024)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] * B[k, j] target = Target("HOST", num_threads=16) assert target.architecture == Target.Architecture.HOST # disable correctness checking on windows because the # install location of libomp.dll is non-standard as of now if sys.platform.startswith('win'): correctness_check_values = None else: A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) correctness_check_values = { "pre": [A_test, B_test, C_test], "post": [A_test, B_test, C_test + A_test @ B_test] } schedule = nest.create_schedule() ii = schedule.split(i, A.shape[0] // target.num_threads) # set the index (k) that cannot be parallelized as innermost schedule.reorder(i, ii, j, k) P1, P2, P3, P4, P5, P6, P7, P8 = create_parameters(8) for policy in ["static", "dynamic"]: plan = schedule.create_plan(target) # non-collapsed plan.parallelize(indices=P1, policy=P2) package_name = f"parameterized_test_parallelize_i_{policy}" package = Package() function = package.add( plan, args=[A, B, C], parameters={ P1: i, P2: policy }, base_name=f"parameterized_vectorization_parallelization_test_i_{policy}" ) output_dir = pathlib.Path(TEST_PACKAGE_DIR) / package_name with verifiers.VerifyPackage(self, package_name, output_dir) as v: package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=output_dir) if correctness_check_values: v.check_correctness( function.name, before=correctness_check_values["pre"], after=correctness_check_values["post"] ) # parallelizing middle index plan_ii = schedule.create_plan(target) plan_ii.parallelize(indices=P3, policy=P4) package_name = f"parameterized_test_parallelize_ii_{policy}" package_ii = Package() function_ii = package_ii.add( plan_ii, args=[A, B, C], parameters={ P3: ii, P4: policy }, base_name=f"parameterized_vectorization_parallelization_test_ii_{policy}" ) output_dir = pathlib.Path(TEST_PACKAGE_DIR) / package_name with verifiers.VerifyPackage(self, package_name, output_dir) as v: package_ii.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=output_dir) if correctness_check_values: v.check_correctness( function_ii.name, before=correctness_check_values["pre"], after=correctness_check_values["post"] ) # partial collapsed plan_partial = schedule.create_plan(target) plan_partial.parallelize(indices=P5, policy=P6) package_name = f"parameterized_test_parallelize_i_ii_j_{policy}" package_partial = Package() function_partial = package_partial.add( plan_ii, args=[A, B, C], parameters={ P5: (i, ii, j), P6: policy }, base_name=f"parameterized_vectorization_parallelization_test_i_ii_j_{policy}" ) output_dir = pathlib.Path(TEST_PACKAGE_DIR) / package_name with verifiers.VerifyPackage(self, package_name, output_dir) as v: package_partial.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=output_dir) if correctness_check_values: v.check_correctness( function_partial.name, before=correctness_check_values["pre"], after=correctness_check_values["post"] ) # partial collapsed inner indices plan_partial_inner = schedule.create_plan(target) plan_partial_inner.parallelize(indices=P7, policy=P8) package_name = f"parameterized_test_parallelize_ii_j_{policy}" package_partial_inner = Package() function_partial_inner = package_partial_inner.add( plan, args=[A, B, C], parameters={ P7: (ii, j), P8: policy }, base_name=f"parameterized_vectorization_parallelization_test_ii_j_{policy}" ) output_dir = pathlib.Path(TEST_PACKAGE_DIR) / package_name with verifiers.VerifyPackage(self, package_name, output_dir) as v: package_partial_inner.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=output_dir) if correctness_check_values: v.check_correctness( function_partial_inner.name, before=correctness_check_values["pre"], after=correctness_check_values["post"] ) def test_parameterization_grid(self) -> None: from accera import create_parameters, create_parameter_grid, Nest, Schedule P0, P1, P2, P3, P4 = create_parameters(5) A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(P0, P2)) B = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(P2, P1)) C = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(P0, P1)) nest = Nest(shape=(P0, P1, P2)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += P3 * A[i, k] * B[k, j] sched: Schedule = nest.create_schedule() sched.split(j, P4) package = Package() package_name = "test_parameter_grid_generation" parameter_grid = { P0: [8, 16], P1: [16, 32], P2: [16], P3: [1.0, 2.0], P4: [3, 5, 7] } parameters = create_parameter_grid(parameter_grid) package.add(sched, args=(A, B, C), base_name="matmul", parameters=parameters) with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build(name=package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) def test_fusion_parameterization_1(self) -> None: from accera import create_parameters, Nest, fuse A = Array(role=Array.Role.INPUT, element_type=float, shape=(32, )) B = Array(role=Array.Role.INPUT_OUTPUT, element_type=float, shape=(32, )) C = Array(role=Array.Role.INPUT_OUTPUT, element_type=float, shape=(1, )) n0 = Nest([32, 32]) i0, j0 = n0.get_indices() @n0.iteration_logic def _(): B[i0] += A[i0] * A[j0] s0 = n0.create_schedule() n0_up = Nest(n0.get_shape()) i0_up, j0_up = n0_up.get_indices() @n0_up.iteration_logic def _(): B[i0_up] += A[i0_up] * A[j0_up] s0_up = n0_up.create_schedule() n1 = Nest([32]) i1 = n1.get_indices() @n1.iteration_logic def _(): C[0] += B[i1] s1 = n1.create_schedule() P0 = create_parameters(1) jj0 = s0.split(j0, P0) jj0_up = s0_up.split(j0_up, 16) fs = fuse((s0, s1), partial=1) f, i, j, jj = fs.get_indices() fs.reorder(i, f, j, jj) fs_up = fuse((s0_up, s1), partial=1) f_up, i_up, j_up, jj_up = fs_up.get_indices() fs_up.reorder(i_up, f_up, j_up, jj_up) package = Package() package_name = "test_fusion_parameterization_1" package.add(fs_up, args=(A, B, C), base_name="fuse_unparameterized_1") package.add( fs, args=(A, B, C), parameters={ P0: 16, }, base_name="fuse_1" ) package.add( fs, args=(A, B, C), parameters={ P0: 3, }, base_name="fuse_2" ) package.add( fs, args=(A, B, C), parameters=[{ P0: 5 }, { P0: 7 }], base_name="fuse_3" ) with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build(name=package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) def test_fusion_parameterization_2(self) -> None: """ Goes through a different codepath from the above tests because the schedules are emitted directly prior to the fused schedule, which matters because the fused schedule has references to the schedule """ from accera import create_parameters, Nest, fuse A = Array(role=Array.Role.INPUT, element_type=float, shape=(32, )) B = Array(role=Array.Role.INPUT_OUTPUT, element_type=float, shape=(32, )) C = Array(role=Array.Role.INPUT_OUTPUT, element_type=float, shape=(1, )) n0 = Nest([32, 32]) i0, j0 = n0.get_indices() @n0.iteration_logic def _(): B[i0] += A[i0] * A[j0] s0 = n0.create_schedule() n1 = Nest([32]) i1 = n1.get_indices() @n1.iteration_logic def _(): C[0] += B[i1] s1 = n1.create_schedule() P0 = create_parameters(1) jj0 = s0.split(j0, P0) fs = fuse((s0, s1), partial=1) package = Package() package_name = "test_fusion_parameterization_2" package.add( s0, args=(A, B), parameters={P0: 16}, base_name="s0_1" ) package.add( s0, args=(A, B), parameters={P0: 32}, base_name="s0_2" ) package.add( s1, args=(C, B), parameters={P0: 16}, base_name="s1_1" ) package.add( fs, args=(A, B, C), parameters={ P0: 16, }, base_name="fuse_1" ) package.add( fs, args=(A, B, C), parameters={ P0: 32, }, base_name="fuse_2" ) with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build(name=package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) def test_fusion_parameterization_3(self) -> None: from accera import create_parameters, Nest, fuse A = Array(role=Array.Role.INPUT, element_type=float, shape=(32, )) B = Array(role=Array.Role.INPUT_OUTPUT, element_type=float, shape=(32, )) C = Array(role=Array.Role.INPUT_OUTPUT, element_type=float, shape=(1, )) n0 = Nest([32, 32]) i0, j0 = n0.get_indices() @n0.iteration_logic def _(): B[i0] += A[i0] * A[j0] s0 = n0.create_schedule() n1 = Nest([32]) i1 = n1.get_indices() @n1.iteration_logic def _(): C[0] += B[i1] s1 = n1.create_schedule() P0, P1 = create_parameters(2) jj0 = s0.split(j0, P0) fs = fuse((s0, s1), partial=1) f, i, j, jj = fs.get_indices() ii = fs.split(i, P1) fs.reorder(f, i, j, ii, jj) package = Package() package_name = "test_fusion_parameterization_3" package.add( fs, args=(A, B, C), parameters={ P0: 16, P1: 8 }, base_name="fuse_1" ) package.add( fs, args=(A, B, C), parameters={ P0: 32, P1: 4, }, base_name="fuse_2" ) with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build(name=package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) def test_fusion_parameterization_4(self) -> None: from accera import create_parameters, Nest, fuse, create_parameter_grid A = Array(role=Array.Role.INPUT, element_type=float, shape=(128, )) B = Array(role=Array.Role.INPUT_OUTPUT, element_type=float, shape=(128, )) C = Array(role=Array.Role.INPUT_OUTPUT, element_type=float, shape=(1, )) n0 = Nest([128, 128]) i0, j0 = n0.get_indices() @n0.iteration_logic def _(): B[i0] += A[i0] * A[j0] s0 = n0.create_schedule() n1 = Nest([128]) i1 = n1.get_indices() @n1.iteration_logic def _(): C[0] += B[i1] s1 = n1.create_schedule() P0, P1, P2 = create_parameters(3) jj0 = s0.split(j0, P0) fs = fuse((s0, s1), partial=1) f, i, j, jj = fs.get_indices() ii = fs.split(i, P1) fs.reorder(i, f, j, ii, jj) jjj = fs.split(jj, P2) package = Package() package_name = "test_fusion_parameterization_4" # Expected loop structure # P0 = 16 # P1 = 8 # P2 = 4 # for i in range(128, step=P1): # for f in range(2): # if f == 0: # for j in range(128, step=P0): # for ii in range(P1): # for jj in range(P0, step=P2): # for jjj in range(P2): # ... # if f == 1: # for ii in range(P1): # ... package.add( fs, args=(A, B, C), parameters={ P0: 16, P1: 8, P2: 4 }, base_name="fuse_1" ) # Expected loop structure # P0 = 32 # P1 = 4 # P2 = 8 # for i in range(128, step=P1): # for f in range(2): # if f == 0: # for j in range(128, step=P0): # for ii in range(P1): # for jj in range(P0, step=P2): # for jjj in range(P2): # ... # if f == 1: # for ii in range(P1): # ... package.add( fs, args=(A, B, C), parameters={ P0: 32, P1: 4, P2: 8 }, base_name="fuse_2" ) package.add( fs, args=(A, B, C), parameters=create_parameter_grid({ P0: [64, 8], P1: [12, 16, 20], P2: [2, 10] }), base_name="fuse_grid" ) with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build(name=package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) def test_parameterization_auxiliary_data(self) -> None: from accera import create_parameters, create_parameter_grid, Nest, Schedule from hatlib import HATPackage P0, P1, P2, P3, P4 = create_parameters(5) A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(P0, P2)) B = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(P2, P1)) C = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(P0, P1)) nest = Nest(shape=(P0, P1, P2)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += P3 * A[i, k] * B[k, j] sched: Schedule = nest.create_schedule() sched.split(j, P4) package = Package() package_name = "test_parameterization_auxiliary_data" parameter_grid = { P0: [8, 16], P1: [16, 32], P2: [16], P3: [1.0, 2.0], P4: [3, 5, 7] } parameters = create_parameter_grid(parameter_grid) package.add(sched, args=(A, B, C), base_name="matmul", parameters=parameters) with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build(name=package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) hat_package = HATPackage(pathlib.Path(TEST_PACKAGE_DIR) / f"{package_name}.hat") functions = [fn for fn in hat_package.get_functions()] for function in functions: data_point = function.auxiliary['accera']['parameters'] if data_point: self.assertIn(int(data_point["P0"]), [8, 16]) self.assertIn(int(data_point["P1"]), [16, 32]) self.assertIn(int(data_point["P2"]), [16]) self.assertIn(float(data_point["P3"]), [1.0, 2.0]) self.assertIn(int(data_point["P4"]), [3, 5, 7]) class DSLTest_10Packages(unittest.TestCase): def _create_plan(self, target=Target.HOST) -> Function: A = Array(role=Array.Role.INPUT_OUTPUT, shape=(64, )) nest = Nest(shape=(64, )) i = nest.get_indices() @nest.iteration_logic def _(): A[i] += 2. plan = nest.create_plan(target) return plan, A def test_HAT_packages(self) -> None: from accera import Target pi3 = Target(Target.Model.RASPBERRY_PI_3B, category=Target.Category.CPU) plan, A = self._create_plan(pi3) package = Package() package_name = "MyPackage" package.add(plan, args=(A, ), base_name="func1") package.add(plan, args=(A, ), base_name="func2") with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build( package_name, format=Package.Format.HAT_STATIC, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR, platform=Package.Platform.RASPBIAN ) def test_MLIR_packages(self) -> None: plan, A = self._create_plan() package = Package() package_name = "MyPackage" package.add(plan, args=(A, ), base_name="func1") package.add(plan, args=(A, ), base_name="func2") with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build(package_name, format=Package.Format.MLIR_STATIC, output_dir=TEST_PACKAGE_DIR) def test_default_output_dir(self) -> None: plan, A = self._create_plan() package = Package() package_name = "MyPackage" package.add(plan, args=(A, ), base_name="func1") package.add(plan, args=(A, ), base_name="func2") with verifiers.VerifyPackage(self, package_name): package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE) def test_debug_mode_1(self) -> None: M = N = K = 16 A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, K)) B = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(K, N)) C = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(M, N)) nest = Nest(shape=(M, N, K)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] * B[k, j] schedule = nest.create_schedule() ii = schedule.split(i, 4) schedule.reorder(i, k, j, ii) plan = schedule.create_plan() plan.unroll(ii) package = Package() package_name = "MyDebugPackage" function = package.add(plan, args=(A, B, C), base_name="func1") output_dir = pathlib.Path(TEST_PACKAGE_DIR) / package_name with verifiers.VerifyPackage(self, package_name, output_dir) as v: package.build( package_name, format=TEST_FORMAT, output_dir=output_dir, mode=Package.Mode.DEBUG, tolerance=1e-5 ) A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) v.check_correctness( function.name, before=[A_test, B_test, C_test], after=[A_test, B_test, C_test + A_test @ B_test] ) def test_debug_mode_2(self) -> None: M = N = K = 16 A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, K)) B = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(K, N)) C = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(M, N)) nest = Nest(shape=(M, N, K)) i, j, k = nest.get_indices() @nest.iteration_logic def _(): C[i, j] += A[i, k] * B[k, j] schedule = nest.create_schedule() ii = schedule.split(i, 4) schedule.reorder(i, k, j, ii) plan = schedule.create_plan() plan.unroll(ii) # deliberately introduce a correctness issue plan.parallelize(indices=k) package = Package() package_name = "MyDebugPackageIncorrect" function = package.add(plan, args=(A, B, C), base_name="func1") output_dir = pathlib.Path(TEST_PACKAGE_DIR) / package_name with verifiers.VerifyPackage(self, package_name, output_dir) as v: package.build( package_name, format=TEST_FORMAT, output_dir=output_dir, mode=Package.Mode.DEBUG, tolerance=1e-5 ) A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) try: v.check_correctness( function.name, before=[A_test, B_test, C_test], after=[A_test, B_test, C_test + A_test @ B_test] ) except Exception as e: print(e) def test_debug_mode_fusion_1(self) -> None: from accera import fuse M = N = 16 A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, N)) B = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, N)) C = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(M, N)) nest0 = Nest(shape=(M, N)) i0, j0 = nest0.get_indices() @nest0.iteration_logic def _(): C[i0, j0] += A[i0, j0] schedule0 = nest0.create_schedule() nest1 = Nest(shape=(M, N)) i1, j1 = nest1.get_indices() @nest1.iteration_logic def _(): C[i1, j1] *= B[i1, j1] schedule1 = nest1.create_schedule() schedule = fuse(schedule0, schedule1, partial=1) f, i, j0, j1 = schedule.get_indices() ii = schedule.split(i, 2) schedule.reorder(i, ii, f, j0, j1) package = Package() package_name = "MyFusionDebugPackage" function = package.add(schedule, args=(A, B, C), base_name="fusion_func1") output_dir = pathlib.Path(TEST_PACKAGE_DIR) / package_name with verifiers.VerifyPackage(self, package_name, output_dir) as v: package.build( package_name, format=TEST_FORMAT, output_dir=output_dir, mode=Package.Mode.DEBUG, tolerance=1e-5 ) A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) v.check_correctness( function.name, before=[A_test, B_test, C_test], after=[A_test, B_test, (C_test + A_test) * B_test] ) def test_debug_mode_fusion_2(self) -> None: from accera import fuse M = N = 16 A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, N)) B = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, N)) C = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(M, N)) nest0 = Nest(shape=(M, N)) i0, j0 = nest0.get_indices() @nest0.iteration_logic def _(): C[i0, j0] += A[i0, j0] schedule0 = nest0.create_schedule() nest1 = Nest(shape=(M, N)) i1, j1 = nest1.get_indices() @nest1.iteration_logic def _(): C[i1, j1] *= B[i1, j1] schedule1 = nest1.create_schedule() # Reorder schedule1 before fusing schedule1.reorder(j1, i1) # Fuse schedule0 with the reordered schedule1 schedule = fuse(schedule0, schedule1) f, a, b = schedule.get_indices() # Deliberately break logical equivalence # before: C[1,0] = C[1,0] * B[1,0] + A[1,0] # after: C[1,0] = (C[1,0] + A[1,0]) * B[1,0] schedule.reorder(a, b, f) package = Package() package_name = "MyFusionDebugPackageIncorrect" function = package.add(schedule, args=(A, B, C), base_name="fusion_func1") output_dir = pathlib.Path(TEST_PACKAGE_DIR) / package_name with verifiers.VerifyPackage(self, package_name, output_dir) as v: package.build( package_name, format=TEST_FORMAT, output_dir=output_dir, mode=Package.Mode.DEBUG, tolerance=1e-5 ) A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) try: v.check_correctness( function.name, before=[A_test, B_test, C_test], after=[A_test, B_test, (C_test + A_test) * B_test] ) except Exception as e: print(e) def test_debug_mode_fusion_cascading_1(self) -> None: from accera import fuse M = N = 16 A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, N)) B = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, N)) C = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(M, N)) nest0 = Nest(shape=(M, N)) i0, j0 = nest0.get_indices() @nest0.iteration_logic def _(): C[i0, j0] += A[i0, j0] schedule0 = nest0.create_schedule() nest1 = Nest(shape=(M, N)) i1, j1 = nest1.get_indices() @nest1.iteration_logic def _(): C[i1, j1] *= B[i1, j1] schedule1 = nest1.create_schedule() schedule_f1 = fuse(schedule0, schedule1) f, i, j = schedule_f1.get_indices() schedule_f1.reorder(i, j, f) nest2 = Nest(shape=(M, N)) i2, j2 = nest2.get_indices() @nest2.iteration_logic def _(): C[i2, j2] -= 1.0 schedule2 = nest2.create_schedule() # set the fused schedule first in the fusing order schedule_f2 = fuse(schedule_f1, schedule2, partial=2) package = Package() package_name = "MyFusionDebugPackageCascade1" function = package.add(schedule_f2, args=(A, B, C), base_name="fusion_func1") output_dir = pathlib.Path(TEST_PACKAGE_DIR) / package_name with verifiers.VerifyPackage(self, package_name, output_dir) as v: package.build( package_name, format=TEST_FORMAT, output_dir=output_dir, mode=Package.Mode.DEBUG, tolerance=1e-5 ) A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) v.check_correctness( function.name, before=[A_test, B_test, C_test], after=[A_test, B_test, (C_test + A_test) * B_test - 1.0] ) def test_debug_mode_fusion_cascading_2(self) -> None: from accera import fuse M = N = 16 A = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, N)) B = Array(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, N)) C = Array(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=(M, N)) nest0 = Nest(shape=(M, N)) i0, j0 = nest0.get_indices() @nest0.iteration_logic def _(): C[i0, j0] += A[i0, j0] schedule0 = nest0.create_schedule() nest1 = Nest(shape=(M, N)) i1, j1 = nest1.get_indices() @nest1.iteration_logic def _(): C[i1, j1] *= B[i1, j1] schedule1 = nest1.create_schedule() schedule_f1 = fuse(schedule0, schedule1) f, i, j = schedule_f1.get_indices() schedule_f1.reorder(i, j, f) nest2 = Nest(shape=(M, N)) i2, j2 = nest2.get_indices() @nest2.iteration_logic def _(): C[i2, j2] -= 1.0 schedule2 = nest2.create_schedule() # set an unfused schedule first in the fusing order schedule_f2 = fuse(schedule2, schedule_f1, partial=2) package = Package() package_name = "MyFusionDebugPackageCascade2" function = package.add(schedule_f2, args=(A, B, C), base_name="fusion_func1") output_dir = pathlib.Path(TEST_PACKAGE_DIR) / package_name with verifiers.VerifyPackage(self, package_name, output_dir) as v: package.build( package_name, format=TEST_FORMAT, output_dir=output_dir, mode=Package.Mode.DEBUG, tolerance=1e-5 ) A_test = np.random.random(A.shape).astype(np.float32) B_test = np.random.random(B.shape).astype(np.float32) C_test = np.random.random(C.shape).astype(np.float32) v.check_correctness( function.name, before=[A_test, B_test, C_test], after=[A_test, B_test, (C_test - 1.0 + A_test) * B_test] ) def test_add_description(self) -> None: from hatlib import HATFile plan, A, = self._create_plan() package = Package() package_name = "MyPackage" package.add(plan, args=(A, ), base_name="func1") package.add(plan, args=(A, ), base_name="func2") description1 = { "Dependencies": ["numpy", "onnx", "scipy"], "Documentation": "https://docs.readthedocs.io.", "SHA": "0bb913ce84afa28127ea3fd2a9995e219dad322a" } package.add_description( other=description1, version="1.0", author="Microsoft Research", license="https://mit-license.org" ) description2 = { "Documentation": "", # clearing a value "SHA": None, # removing a value "Release Notes": "https://stackoverflow.com" # adding an entry } package.add_description(other=description2) package.add_description(version="2.0") with verifiers.VerifyPackage(self, package_name, TEST_PACKAGE_DIR): package.build(package_name, format=TEST_FORMAT, mode=TEST_MODE, output_dir=TEST_PACKAGE_DIR) hat_file = HATFile.Deserialize(pathlib.Path(TEST_PACKAGE_DIR) / f"{package_name}.hat") hat_description = hat_file.description.auxiliary self.assertEqual(hat_description["Dependencies"], description1["Dependencies"]) self.assertEqual(hat_description["Documentation"], description2["Documentation"]) self.assertNotIn("SHA", hat_description) self.assertEqual(hat_description["Release Notes"], description2["Release Notes"]) self.assertEqual(hat_file.description.version, "2.0") self.assertEqual(hat_file.description.author, "Microsoft Research") self.assertEqual(hat_file.description.license_url, "https://mit-license.org") if __name__ == '__main__': unittest.main(verbosity=10)
[ "logging.getLogger", "accera.tanh", "sys.path.insert", "accera.ceil", "numpy.random.rand", "accera.Nest", "sys.platform.startswith", "accera.Array", "accera.sqrt", "accera.log10", "unittest.main", "accera.min", "accera.floor", "accera.logical_or", "numpy.flip", "accera.max", "pathlib...
[((1515, 1534), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1532, 1534), False, 'import logging\n'), ((816, 860), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""@CMAKE_INSTALL_PREFIX@"""'], {}), "(1, '@CMAKE_INSTALL_PREFIX@')\n", (831, 860), False, 'import sys\n'), ((55966, 56014), 'unittest.skip', 'unittest.skip', (['"""BUG: Compilation takes too long"""'], {}), "('BUG: Compilation takes too long')\n", (55979, 56014), False, 'import unittest\n'), ((121554, 121581), 'unittest.main', 'unittest.main', ([], {'verbosity': '(10)'}), '(verbosity=10)\n', (121567, 121581), False, 'import unittest\n'), ((910, 921), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (919, 921), False, 'import os\n'), ((2641, 2650), 'accera.Package', 'Package', ([], {}), '()\n', (2648, 2650), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((3255, 3332), 'accera.Array', 'Array', ([], {'shape': '(10, 20)', 'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32'}), '(shape=(10, 20), role=Array.Role.INPUT, element_type=ScalarType.float32)\n', (3260, 3332), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((3434, 3510), 'accera.Array', 'Array', ([], {'shape': '(10, 20)', 'role': 'Array.Role.INPUT', 'layout': 'Array.Layout.LAST_MAJOR'}), '(shape=(10, 20), role=Array.Role.INPUT, layout=Array.Layout.LAST_MAJOR)\n', (3439, 3510), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((3737, 3835), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(10, 20)', 'layout': '(1, 10)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(10, 20\n ), layout=(1, 10))\n', (3742, 3835), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((3876, 3974), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(10, 20)', 'layout': '(10, 1)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(10, 20\n ), layout=(10, 1))\n', (3881, 3974), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((4015, 4106), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(10,)', 'layout': '(1,)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(10,),\n layout=(1,))\n', (4020, 4106), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((4150, 4256), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(10, 20, 50)', 'layout': '(1, 10, 200)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(10, 20,\n 50), layout=(1, 10, 200))\n', (4155, 4256), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((4298, 4404), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(10, 20, 50)', 'layout': '(200, 10, 1)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(10, 20,\n 50), layout=(200, 10, 1))\n', (4303, 4404), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((4446, 4552), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(10, 20, 50)', 'layout': '(1, 200, 10)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(10, 20,\n 50), layout=(1, 200, 10))\n', (4451, 4552), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((4594, 4700), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(10, 20, 50)', 'layout': '(10, 200, 1)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(10, 20,\n 50), layout=(10, 200, 1))\n', (4599, 4700), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((4983, 5073), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'ScalarType.float32', 'shape': '(10, inf)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=\n (10, inf))\n', (4988, 5073), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((5159, 5179), 'accera.Nest', 'Nest', ([], {'shape': '(10, 16)'}), '(shape=(10, 16))\n', (5163, 5179), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((5312, 5321), 'accera.Package', 'Package', ([], {}), '()\n', (5319, 5321), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((5706, 5795), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'ScalarType.float32', 'shape': '(10, 20)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=\n (10, 20))\n', (5711, 5795), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((6454, 6491), 'numpy.ones', 'np.ones', (['(128, 256)'], {'dtype': 'np.float64'}), '((128, 256), dtype=np.float64)\n', (6461, 6491), True, 'import numpy as np\n'), ((6743, 6856), 'accera.Array', 'Array', ([], {'role': 'Array.Role.TEMP', 'element_type': 'ScalarType.float32', 'layout': 'Array.Layout.LAST_MAJOR', 'shape': '(10, 20)'}), '(role=Array.Role.TEMP, element_type=ScalarType.float32, layout=Array.\n Layout.LAST_MAJOR, shape=(10, 20))\n', (6748, 6856), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((6896, 7010), 'accera.Array', 'Array', ([], {'role': 'Array.Role.TEMP', 'element_type': 'ScalarType.float32', 'layout': 'Array.Layout.FIRST_MAJOR', 'shape': '(10, 20)'}), '(role=Array.Role.TEMP, element_type=ScalarType.float32, layout=Array.\n Layout.FIRST_MAJOR, shape=(10, 20))\n', (6901, 7010), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((7609, 7654), 'accera.Array', 'Array', ([], {'shape': '(256, 32)', 'role': 'Array.Role.INPUT'}), '(shape=(256, 32), role=Array.Role.INPUT)\n', (7614, 7654), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((7667, 7712), 'accera.Array', 'Array', ([], {'shape': '(256, 32)', 'role': 'Array.Role.INPUT'}), '(shape=(256, 32), role=Array.Role.INPUT)\n', (7672, 7712), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((7725, 7777), 'accera.Array', 'Array', ([], {'shape': '(256, 32)', 'role': 'Array.Role.INPUT_OUTPUT'}), '(shape=(256, 32), role=Array.Role.INPUT_OUTPUT)\n', (7730, 7777), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((7797, 7806), 'accera.Package', 'Package', ([], {}), '()\n', (7804, 7806), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((8236, 8245), 'accera.Package', 'Package', ([], {}), '()\n', (8243, 8245), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((8258, 8303), 'accera.Array', 'Array', ([], {'shape': '(256, 32)', 'role': 'Array.Role.INPUT'}), '(shape=(256, 32), role=Array.Role.INPUT)\n', (8263, 8303), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((8316, 8368), 'accera.Array', 'Array', ([], {'shape': '(256, 32)', 'role': 'Array.Role.INPUT_OUTPUT'}), '(shape=(256, 32), role=Array.Role.INPUT_OUTPUT)\n', (8321, 8368), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((10191, 10200), 'accera.Package', 'Package', ([], {}), '()\n', (10198, 10200), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((10213, 10265), 'accera.Array', 'Array', ([], {'shape': '(256, 32)', 'role': 'Array.Role.INPUT_OUTPUT'}), '(shape=(256, 32), role=Array.Role.INPUT_OUTPUT)\n', (10218, 10265), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((10278, 10330), 'accera.Array', 'Array', ([], {'shape': '(256, 32)', 'role': 'Array.Role.INPUT_OUTPUT'}), '(shape=(256, 32), role=Array.Role.INPUT_OUTPUT)\n', (10283, 10330), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((10347, 10360), 'accera.Nest', 'Nest', (['A.shape'], {}), '(A.shape)\n', (10351, 10360), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((11112, 11190), 'accera.Array', 'Array', ([], {'shape': '(256, 32)', 'role': 'Array.Role.INPUT', 'layout': 'Array.Layout.FIRST_MAJOR'}), '(shape=(256, 32), role=Array.Role.INPUT, layout=Array.Layout.FIRST_MAJOR)\n', (11117, 11190), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((11207, 11228), 'accera.Nest', 'Nest', ([], {'shape': '(256, 32)'}), '(shape=(256, 32))\n', (11211, 11228), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((11816, 11893), 'accera.Array', 'Array', ([], {'shape': '(256, 32)', 'role': 'Array.Role.INPUT', 'layout': 'Array.Layout.LAST_MAJOR'}), '(shape=(256, 32), role=Array.Role.INPUT, layout=Array.Layout.LAST_MAJOR)\n', (11821, 11893), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((11910, 11931), 'accera.Nest', 'Nest', ([], {'shape': '(256, 32)'}), '(shape=(256, 32))\n', (11914, 11931), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((12538, 12616), 'accera.Array', 'Array', ([], {'shape': '(256, 32)', 'role': 'Array.Role.INPUT', 'layout': 'Array.Layout.FIRST_MAJOR'}), '(shape=(256, 32), role=Array.Role.INPUT, layout=Array.Layout.FIRST_MAJOR)\n', (12543, 12616), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((12629, 12743), 'accera.Array', 'Array', ([], {'shape': '(256, 32)', 'role': 'Array.Role.INPUT', 'layout': 'Array.Layout.FIRST_MAJOR', 'element_type': 'ScalarType.int32'}), '(shape=(256, 32), role=Array.Role.INPUT, layout=Array.Layout.\n FIRST_MAJOR, element_type=ScalarType.int32)\n', (12634, 12743), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((12777, 12798), 'accera.Nest', 'Nest', ([], {'shape': '(256, 32)'}), '(shape=(256, 32))\n', (12781, 12798), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((13620, 13629), 'accera.Package', 'Package', ([], {}), '()\n', (13627, 13629), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((13645, 13736), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'ScalarType.float32', 'shape': '(256, 256)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=\n (256, 256))\n', (13650, 13736), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((14925, 14934), 'accera.Package', 'Package', ([], {}), '()\n', (14932, 14934), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((14950, 15041), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'ScalarType.float32', 'shape': '(256, 256)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=\n (256, 256))\n', (14955, 15041), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((16775, 16836), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'type', 'shape': '(M, S)'}), '(role=Array.Role.INPUT, element_type=type, shape=(M, S))\n', (16780, 16836), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((16849, 16910), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'type', 'shape': '(S, N)'}), '(role=Array.Role.INPUT, element_type=type, shape=(S, N))\n', (16854, 16910), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((16923, 16991), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'type', 'shape': '(M, N)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=type, shape=(M, N))\n', (16928, 16991), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((17303, 17312), 'accera.Package', 'Package', ([], {}), '()\n', (17310, 17312), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((25473, 25482), 'accera.Package', 'Package', ([], {}), '()\n', (25480, 25482), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((26065, 26074), 'accera.Package', 'Package', ([], {}), '()\n', (26072, 26074), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((26536, 26597), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'type', 'shape': '(M, S)'}), '(role=Array.Role.INPUT, element_type=type, shape=(M, S))\n', (26541, 26597), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((26610, 26671), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'type', 'shape': '(S, N)'}), '(role=Array.Role.INPUT, element_type=type, shape=(S, N))\n', (26615, 26671), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((26684, 26752), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'type', 'shape': '(M, N)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=type, shape=(M, N))\n', (26689, 26752), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((26993, 27002), 'accera.Package', 'Package', ([], {}), '()\n', (27000, 27002), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((33297, 33337), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(N,)'}), '(role=Array.Role.INPUT, shape=(N,))\n', (33302, 33337), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((33351, 33391), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(K,)'}), '(role=Array.Role.INPUT, shape=(K,))\n', (33356, 33391), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((33405, 33452), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(M,)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(M,))\n', (33410, 33452), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((33470, 33488), 'accera.Nest', 'Nest', ([], {'shape': '(M, K)'}), '(shape=(M, K))\n', (33474, 33488), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((36517, 36526), 'accera.Package', 'Package', ([], {}), '()\n', (36524, 36526), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((37058, 37067), 'accera.Package', 'Package', ([], {}), '()\n', (37065, 37067), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((37732, 37776), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(16, 16)'}), '(role=Array.Role.INPUT, shape=(16, 16))\n', (37737, 37776), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((37789, 37833), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(16, 16)'}), '(role=Array.Role.INPUT, shape=(16, 16))\n', (37794, 37833), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((37846, 37897), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(16, 16)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(16, 16))\n', (37851, 37897), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((37951, 37971), 'accera.Nest', 'Nest', ([], {'shape': '(16, 16)'}), '(shape=(16, 16))\n', (37955, 37971), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((38192, 38212), 'accera.Nest', 'Nest', ([], {'shape': '(16, 16)'}), '(shape=(16, 16))\n', (38196, 38212), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((38433, 38459), 'accera.fuse', 'fuse', (['schedule0', 'schedule1'], {}), '(schedule0, schedule1)\n', (38437, 38459), False, 'from accera import fuse\n'), ((39556, 39600), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(16, 11)'}), '(role=Array.Role.INPUT, shape=(16, 11))\n', (39561, 39600), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((39613, 39657), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(11, 10)'}), '(role=Array.Role.INPUT, shape=(11, 10))\n', (39618, 39657), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((39670, 39714), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(16, 10)'}), '(role=Array.Role.INPUT, shape=(16, 10))\n', (39675, 39714), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((39843, 39867), 'accera.Nest', 'Nest', ([], {'shape': '(16, 10, 11)'}), '(shape=(16, 10, 11))\n', (39847, 39867), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((40104, 40124), 'accera.Nest', 'Nest', ([], {'shape': '(16, 10)'}), '(shape=(16, 10))\n', (40108, 40124), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((40382, 40421), 'accera.fuse', 'fuse', (['(schedule0, schedule1)'], {'partial': '(2)'}), '((schedule0, schedule1), partial=2)\n', (40386, 40421), False, 'from accera import fuse\n'), ((41304, 41352), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(16,)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(16,))\n', (41309, 41352), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((41366, 41413), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(4,)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(4,))\n', (41371, 41413), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((41429, 41439), 'accera.Nest', 'Nest', (['[16]'], {}), '([16])\n', (41433, 41439), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((41592, 41605), 'accera.Nest', 'Nest', (['[16, 4]'], {}), '([16, 4])\n', (41596, 41605), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((41762, 41787), 'accera.fuse', 'fuse', (['(s0, s1)'], {'partial': '(1)'}), '((s0, s1), partial=1)\n', (41766, 41787), False, 'from accera import fuse\n'), ((42479, 42523), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(16, 16)'}), '(role=Array.Role.INPUT, shape=(16, 16))\n', (42484, 42523), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((42536, 42580), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(16, 10)'}), '(role=Array.Role.INPUT, shape=(16, 10))\n', (42541, 42580), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((42593, 42644), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(16, 16)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(16, 16))\n', (42598, 42644), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((42698, 42718), 'accera.Nest', 'Nest', ([], {'shape': '(16, 16)'}), '(shape=(16, 16))\n', (42702, 42718), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((42975, 42995), 'accera.Nest', 'Nest', ([], {'shape': '(16, 10)'}), '(shape=(16, 10))\n', (42979, 42995), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((43311, 43337), 'accera.fuse', 'fuse', (['schedule0', 'schedule1'], {}), '(schedule0, schedule1)\n', (43315, 43337), False, 'from accera import fuse\n'), ((44510, 44554), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(16, 10)'}), '(role=Array.Role.INPUT, shape=(16, 10))\n', (44515, 44554), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((44567, 44611), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(16, 16)'}), '(role=Array.Role.INPUT, shape=(16, 16))\n', (44572, 44611), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((44624, 44675), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(16, 16)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(16, 16))\n', (44629, 44675), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((44729, 44749), 'accera.Nest', 'Nest', ([], {'shape': '(16, 10)'}), '(shape=(16, 10))\n', (44733, 44749), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((45005, 45025), 'accera.Nest', 'Nest', ([], {'shape': '(16, 16)'}), '(shape=(16, 16))\n', (45009, 45025), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((45341, 45367), 'accera.fuse', 'fuse', (['schedule0', 'schedule1'], {}), '(schedule0, schedule1)\n', (45345, 45367), False, 'from accera import fuse\n'), ((46070, 46085), 'numpy.copy', 'np.copy', (['C_test'], {}), '(C_test)\n', (46077, 46085), True, 'import numpy as np\n'), ((46565, 46609), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(16, 16)'}), '(role=Array.Role.INPUT, shape=(16, 16))\n', (46570, 46609), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((46622, 46666), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(16, 10)'}), '(role=Array.Role.INPUT, shape=(16, 10))\n', (46627, 46666), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((46679, 46730), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(16, 16)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(16, 16))\n', (46684, 46730), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((46784, 46804), 'accera.Nest', 'Nest', ([], {'shape': '(16, 16)'}), '(shape=(16, 16))\n', (46788, 46804), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((47061, 47081), 'accera.Nest', 'Nest', ([], {'shape': '(16, 10)'}), '(shape=(16, 10))\n', (47065, 47081), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((47396, 47422), 'accera.fuse', 'fuse', (['schedule0', 'schedule1'], {}), '(schedule0, schedule1)\n', (47400, 47422), False, 'from accera import fuse\n'), ((49824, 49871), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(3,)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(3,))\n', (49829, 49871), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((49885, 49932), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(7,)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(7,))\n', (49890, 49932), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((49948, 49961), 'accera.Nest', 'Nest', (['A.shape'], {}), '(A.shape)\n', (49952, 49961), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((49975, 49988), 'accera.Nest', 'Nest', (['B.shape'], {}), '(B.shape)\n', (49979, 49988), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((51121, 51169), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(11,)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(11,))\n', (51126, 51169), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((51183, 51230), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(7,)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(7,))\n', (51188, 51230), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((51244, 51291), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(5,)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(5,))\n', (51249, 51291), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((51307, 51320), 'accera.Nest', 'Nest', (['A.shape'], {}), '(A.shape)\n', (51311, 51320), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((51334, 51347), 'accera.Nest', 'Nest', (['B.shape'], {}), '(B.shape)\n', (51338, 51347), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((51361, 51374), 'accera.Nest', 'Nest', (['C.shape'], {}), '(C.shape)\n', (51365, 51374), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((52765, 52815), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(3, 16)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(3, 16))\n', (52770, 52815), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((52828, 52878), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(7, 16)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(7, 16))\n', (52833, 52878), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((52893, 52906), 'accera.Nest', 'Nest', (['A.shape'], {}), '(A.shape)\n', (52897, 52906), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((52920, 52933), 'accera.Nest', 'Nest', (['B.shape'], {}), '(B.shape)\n', (52924, 52933), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((54202, 54253), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(11, 16)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(11, 16))\n', (54207, 54253), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((54266, 54316), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(7, 16)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(7, 16))\n', (54271, 54316), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((54329, 54379), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(5, 16)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(5, 16))\n', (54334, 54379), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((54394, 54407), 'accera.Nest', 'Nest', (['A.shape'], {}), '(A.shape)\n', (54398, 54407), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((54421, 54434), 'accera.Nest', 'Nest', (['B.shape'], {}), '(B.shape)\n', (54425, 54434), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((54448, 54461), 'accera.Nest', 'Nest', (['C.shape'], {}), '(C.shape)\n', (54452, 54461), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((56116, 56171), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(1024 + 13,)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(1024 + 13,))\n', (56121, 56171), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((56185, 56240), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(1024 + 11,)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(1024 + 11,))\n', (56190, 56240), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((56254, 56308), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(1024 + 7,)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(1024 + 7,))\n', (56259, 56308), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((56322, 56376), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(1024 + 3,)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(1024 + 3,))\n', (56327, 56376), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((56431, 56444), 'accera.Nest', 'Nest', (['A.shape'], {}), '(A.shape)\n', (56435, 56444), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((56608, 56621), 'accera.Nest', 'Nest', (['B.shape'], {}), '(B.shape)\n', (56612, 56621), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((56895, 56920), 'accera.fuse', 'fuse', (['[s0, s1]'], {'partial': '(0)'}), '([s0, s1], partial=0)\n', (56899, 56920), False, 'from accera import fuse\n'), ((56938, 56951), 'accera.Nest', 'Nest', (['C.shape'], {}), '(C.shape)\n', (56942, 56951), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((57141, 57170), 'accera.fuse', 'fuse', (['[fused1, s2]'], {'partial': '(0)'}), '([fused1, s2], partial=0)\n', (57145, 57170), False, 'from accera import fuse\n'), ((57188, 57201), 'accera.Nest', 'Nest', (['D.shape'], {}), '(D.shape)\n', (57192, 57201), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((57390, 57419), 'accera.fuse', 'fuse', (['[fused2, s3]'], {'partial': '(0)'}), '([fused2, s3], partial=0)\n', (57394, 57419), False, 'from accera import fuse\n'), ((58104, 58149), 'accera.Target', 'Target', ([], {'known_name': 'intel_name', 'num_threads': '(44)'}), '(known_name=intel_name, num_threads=44)\n', (58110, 58149), False, 'from accera import Target\n'), ((58516, 58605), 'accera.Target', 'Target', (['Target.Model.RASPBERRY_PI_3B'], {'category': 'Target.Category.CPU', 'frequency_GHz': '(1.2)'}), '(Target.Model.RASPBERRY_PI_3B, category=Target.Category.CPU,\n frequency_GHz=1.2)\n', (58522, 58605), False, 'from accera import Target\n'), ((58816, 59171), 'accera.Target', 'Target', ([], {'name': '"""Custom processor"""', 'category': 'Target.Category.CPU', 'architecture': '"""x86_64"""', 'family': '"""Broadwell"""', 'extensions': "['MMX', 'SSE', 'SSE2', 'SSE3', 'SSSE3', 'SSE4', 'SSE4.1', 'SSE4.2', 'AVX',\n 'AVX2', 'FMA3']", 'num_cores': '(22)', 'num_threads': '(44)', 'frequency_GHz': '(3.2)', 'turbo_frequency_GHz': '(3.8)', 'cache_sizes': '[32, 256, 56320]', 'cache_lines': '[64, 64, 64]'}), "(name='Custom processor', category=Target.Category.CPU, architecture=\n 'x86_64', family='Broadwell', extensions=['MMX', 'SSE', 'SSE2', 'SSE3',\n 'SSSE3', 'SSE4', 'SSE4.1', 'SSE4.2', 'AVX', 'AVX2', 'FMA3'], num_cores=\n 22, num_threads=44, frequency_GHz=3.2, turbo_frequency_GHz=3.8,\n cache_sizes=[32, 256, 56320], cache_lines=[64, 64, 64])\n", (58822, 59171), False, 'from accera import Target\n'), ((59628, 59690), 'accera.Target', 'Target', (['Target.Model.NVIDIA_V100'], {'category': 'Target.Category.GPU'}), '(Target.Model.NVIDIA_V100, category=Target.Category.GPU)\n', (59634, 59690), False, 'from accera import Target\n'), ((59861, 59891), 'accera.Target', 'Target', (['Target.Model.AMD_MI100'], {}), '(Target.Model.AMD_MI100)\n', (59867, 59891), False, 'from accera import Target\n'), ((60007, 60039), 'accera.Target', 'Target', (['Target.Model.NVIDIA_A100'], {}), '(Target.Model.NVIDIA_A100)\n', (60013, 60039), False, 'from accera import Target\n'), ((60254, 60315), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'type', 'shape': '(M, S)'}), '(role=Array.Role.INPUT, element_type=type, shape=(M, S))\n', (60259, 60315), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((60328, 60426), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'type', 'shape': '(S, N)', 'layout': 'Array.Layout.LAST_MAJOR'}), '(role=Array.Role.INPUT, element_type=type, shape=(S, N), layout=Array.\n Layout.LAST_MAJOR)\n', (60333, 60426), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((60492, 60560), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'type', 'shape': '(M, N)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=type, shape=(M, N))\n', (60497, 60560), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((60577, 60598), 'accera.Nest', 'Nest', ([], {'shape': '(M, N, S)'}), '(shape=(M, N, S))\n', (60581, 60598), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((60987, 60996), 'accera.Package', 'Package', ([], {}), '()\n', (60994, 60996), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((63626, 63669), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(1024,)'}), '(role=Array.Role.INPUT, shape=(1024,))\n', (63631, 63669), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((63687, 63704), 'accera.Nest', 'Nest', ([], {'shape': '(64,)'}), '(shape=(64,))\n', (63691, 63704), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((63823, 63901), 'accera.Target', 'Target', (['Target.Model.NVIDIA_V100'], {'category': 'Target.Category.GPU', 'num_threads': '(16)'}), '(Target.Model.NVIDIA_V100, category=Target.Category.GPU, num_threads=16)\n', (63829, 63901), False, 'from accera import Target\n'), ((64113, 64161), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(1024, 1024)'}), '(role=Array.Role.INPUT, shape=(1024, 1024))\n', (64118, 64161), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((64174, 64229), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(1024, 1024)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(1024, 1024))\n', (64179, 64229), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((64246, 64270), 'accera.Nest', 'Nest', ([], {'shape': '(1024, 1024)'}), '(shape=(1024, 1024))\n', (64250, 64270), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((64816, 64858), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(M, S)'}), '(role=Array.Role.INPUT, shape=(M, S))\n', (64821, 64858), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((64871, 64913), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(S, N)'}), '(role=Array.Role.INPUT, shape=(S, N))\n', (64876, 64913), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((64926, 64975), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(M, N)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(M, N))\n', (64931, 64975), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((64992, 65013), 'accera.Nest', 'Nest', ([], {'shape': '(M, N, S)'}), '(shape=(M, N, S))\n', (64996, 65013), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((66192, 66234), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(M, S)'}), '(role=Array.Role.INPUT, shape=(M, S))\n', (66197, 66234), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((66247, 66289), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(S, N)'}), '(role=Array.Role.INPUT, shape=(S, N))\n', (66252, 66289), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((66302, 66351), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(M, N)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(M, N))\n', (66307, 66351), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((66368, 66389), 'accera.Nest', 'Nest', ([], {'shape': '(M, N, S)'}), '(shape=(M, N, S))\n', (66372, 66389), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((68047, 68056), 'accera.Package', 'Package', ([], {}), '()\n', (68054, 68056), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((68689, 68731), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(3, 5)'}), '(role=Array.Role.INPUT, shape=(3, 5))\n', (68694, 68731), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((68753, 68789), 'accera.Target', 'Target', ([], {'category': 'Target.Category.CPU'}), '(category=Target.Category.CPU)\n', (68759, 68789), False, 'from accera import Target\n'), ((68806, 68824), 'accera.Nest', 'Nest', ([], {'shape': '(3, 5)'}), '(shape=(3, 5))\n', (68810, 68824), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((69284, 69325), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(64,)'}), '(role=Array.Role.INPUT, shape=(64,))\n', (69289, 69325), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((69339, 69380), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(64,)'}), '(role=Array.Role.INPUT, shape=(64,))\n', (69344, 69380), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((69394, 69442), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(64,)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(64,))\n', (69399, 69442), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((69465, 69538), 'accera.Target', 'Target', ([], {'category': 'Target.Category.CPU', 'vector_bytes': '(16)', 'vector_registers': '(2)'}), '(category=Target.Category.CPU, vector_bytes=16, vector_registers=2)\n', (69471, 69538), False, 'from accera import Target\n'), ((69555, 69572), 'accera.Nest', 'Nest', ([], {'shape': '(64,)'}), '(shape=(64,))\n', (69559, 69572), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((69913, 69957), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(16, 11)'}), '(role=Array.Role.INPUT, shape=(16, 11))\n', (69918, 69957), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((69970, 70014), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(11, 10)'}), '(role=Array.Role.INPUT, shape=(11, 10))\n', (69975, 70014), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((70027, 70078), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(16, 10)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(16, 10))\n', (70032, 70078), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((70095, 70119), 'accera.Nest', 'Nest', ([], {'shape': '(16, 10, 11)'}), '(shape=(16, 10, 11))\n', (70099, 70119), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((70267, 70340), 'accera.Target', 'Target', ([], {'category': 'Target.Category.CPU', 'vector_bytes': '(16)', 'vector_registers': '(2)'}), '(category=Target.Category.CPU, vector_bytes=16, vector_registers=2)\n', (70273, 70340), False, 'from accera import Target\n'), ((70710, 70754), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(16, 16)'}), '(role=Array.Role.INPUT, shape=(16, 16))\n', (70715, 70754), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((70767, 70811), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(16, 16)'}), '(role=Array.Role.INPUT, shape=(16, 16))\n', (70772, 70811), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((70824, 70875), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(16, 16)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(16, 16))\n', (70829, 70875), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((70892, 70916), 'accera.Nest', 'Nest', ([], {'shape': '(16, 16, 16)'}), '(shape=(16, 16, 16))\n', (70896, 70916), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((71064, 71137), 'accera.Target', 'Target', ([], {'category': 'Target.Category.CPU', 'vector_bytes': '(16)', 'vector_registers': '(2)'}), '(category=Target.Category.CPU, vector_bytes=16, vector_registers=2)\n', (71070, 71137), False, 'from accera import Target\n'), ((71557, 71601), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(16, 11)'}), '(role=Array.Role.INPUT, shape=(16, 11))\n', (71562, 71601), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((71614, 71658), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(11, 10)'}), '(role=Array.Role.INPUT, shape=(11, 10))\n', (71619, 71658), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((71671, 71722), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(16, 10)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(16, 10))\n', (71676, 71722), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((71739, 71763), 'accera.Nest', 'Nest', ([], {'shape': '(16, 10, 11)'}), '(shape=(16, 10, 11))\n', (71743, 71763), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((71908, 71938), 'accera.Target', 'Target', (['"""HOST"""'], {'num_threads': '(16)'}), "('HOST', num_threads=16)\n", (71914, 71938), False, 'from accera import Target\n'), ((72248, 72290), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(M, K)'}), '(role=Array.Role.INPUT, shape=(M, K))\n', (72253, 72290), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((72303, 72345), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(K, N)'}), '(role=Array.Role.INPUT, shape=(K, N))\n', (72308, 72345), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((72358, 72407), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(M, N)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(M, N))\n', (72363, 72407), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((72424, 72445), 'accera.Nest', 'Nest', ([], {'shape': '(M, N, K)'}), '(shape=(M, N, K))\n', (72428, 72445), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((72588, 72650), 'accera.Target', 'Target', (['Target.Model.NVIDIA_V100'], {'category': 'Target.Category.GPU'}), '(Target.Model.NVIDIA_V100, category=Target.Category.GPU)\n', (72594, 72650), False, 'from accera import Target\n'), ((72899, 72908), 'accera.Package', 'Package', ([], {}), '()\n', (72906, 72908), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((73586, 73633), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(256, 1024)'}), '(role=Array.Role.INPUT, shape=(256, 1024))\n', (73591, 73633), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((73646, 73693), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(1024, 512)'}), '(role=Array.Role.INPUT, shape=(1024, 512))\n', (73651, 73693), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((73706, 73759), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(256, 512)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(256, 512))\n', (73711, 73759), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((73776, 73804), 'accera.Nest', 'Nest', ([], {'shape': '(256, 512, 1024)'}), '(shape=(256, 512, 1024))\n', (73780, 73804), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((73949, 73979), 'accera.Target', 'Target', (['"""HOST"""'], {'num_threads': '(16)'}), "('HOST', num_threads=16)\n", (73955, 73979), False, 'from accera import Target\n'), ((74121, 74151), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (74144, 74151), False, 'import sys\n'), ((76606, 76615), 'accera.Package', 'Package', ([], {}), '()\n', (76613, 76615), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((80076, 80096), 'accera.create_parameters', 'create_parameters', (['(4)'], {}), '(4)\n', (80093, 80096), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((80110, 80187), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(P0, P2)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(P0, P2))\n', (80115, 80187), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((80200, 80277), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(P2, P1)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(P2, P1))\n', (80205, 80277), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((80290, 80379), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'ScalarType.float32', 'shape': '(P0, P1)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=\n (P0, P1))\n', (80295, 80379), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((80391, 80415), 'accera.Nest', 'Nest', ([], {'shape': '(P0, P1, P2)'}), '(shape=(P0, P1, P2))\n', (80395, 80415), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((80566, 80575), 'accera.Package', 'Package', ([], {}), '()\n', (80573, 80575), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((81161, 81181), 'accera.create_parameters', 'create_parameters', (['(2)'], {}), '(2)\n', (81178, 81181), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((81321, 81341), 'accera.create_parameters', 'create_parameters', (['(1)'], {}), '(1)\n', (81338, 81341), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((82191, 82211), 'accera.create_parameters', 'create_parameters', (['(4)'], {}), '(4)\n', (82208, 82211), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((82225, 82302), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(P0, P2)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(P0, P2))\n', (82230, 82302), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((82315, 82392), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(P2, P1)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(P2, P1))\n', (82320, 82392), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((82405, 82494), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'ScalarType.float32', 'shape': '(P0, P1)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=\n (P0, P1))\n', (82410, 82494), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((82506, 82530), 'accera.Nest', 'Nest', ([], {'shape': '(P0, P1, P2)'}), '(shape=(P0, P1, P2))\n', (82510, 82530), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((82681, 82690), 'accera.Package', 'Package', ([], {}), '()\n', (82688, 82690), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((82758, 82778), 'accera.create_parameters', 'create_parameters', (['(2)'], {}), '(2)\n', (82775, 82778), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((83006, 83026), 'accera.create_parameters', 'create_parameters', (['(3)'], {}), '(3)\n', (83023, 83026), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((86301, 86362), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'type', 'shape': '(M, S)'}), '(role=Array.Role.INPUT, element_type=type, shape=(M, S))\n', (86306, 86362), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((86375, 86436), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'type', 'shape': '(S, N)'}), '(role=Array.Role.INPUT, element_type=type, shape=(S, N))\n', (86380, 86436), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((86449, 86517), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'type', 'shape': '(M, N)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=type, shape=(M, N))\n', (86454, 86517), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((86534, 86555), 'accera.Nest', 'Nest', ([], {'shape': '(M, N, S)'}), '(shape=(M, N, S))\n', (86538, 86555), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((86760, 86780), 'accera.create_parameters', 'create_parameters', (['(6)'], {}), '(6)\n', (86777, 86780), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((88159, 88168), 'accera.Package', 'Package', ([], {}), '()\n', (88166, 88168), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((89134, 89181), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(256, 1024)'}), '(role=Array.Role.INPUT, shape=(256, 1024))\n', (89139, 89181), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((89194, 89241), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(1024, 512)'}), '(role=Array.Role.INPUT, shape=(1024, 512))\n', (89199, 89241), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((89254, 89307), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(256, 512)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(256, 512))\n', (89259, 89307), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((89324, 89352), 'accera.Nest', 'Nest', ([], {'shape': '(256, 512, 1024)'}), '(shape=(256, 512, 1024))\n', (89328, 89352), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((89497, 89527), 'accera.Target', 'Target', (['"""HOST"""'], {'num_threads': '(16)'}), "('HOST', num_threads=16)\n", (89503, 89527), False, 'from accera import Target\n'), ((89732, 89762), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (89755, 89762), False, 'import sys\n'), ((90448, 90468), 'accera.create_parameters', 'create_parameters', (['(8)'], {}), '(8)\n', (90465, 90468), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((95062, 95082), 'accera.create_parameters', 'create_parameters', (['(5)'], {}), '(5)\n', (95079, 95082), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((95096, 95173), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(P0, P2)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(P0, P2))\n', (95101, 95173), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((95186, 95263), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(P2, P1)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(P2, P1))\n', (95191, 95263), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((95276, 95365), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'ScalarType.float32', 'shape': '(P0, P1)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=\n (P0, P1))\n', (95281, 95365), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((95377, 95401), 'accera.Nest', 'Nest', ([], {'shape': '(P0, P1, P2)'}), '(shape=(P0, P1, P2))\n', (95381, 95401), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((95629, 95638), 'accera.Package', 'Package', ([], {}), '()\n', (95636, 95638), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((95882, 95919), 'accera.create_parameter_grid', 'create_parameter_grid', (['parameter_grid'], {}), '(parameter_grid)\n', (95903, 95919), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((96318, 96379), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'float', 'shape': '(32,)'}), '(role=Array.Role.INPUT, element_type=float, shape=(32,))\n', (96323, 96379), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((96393, 96461), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'float', 'shape': '(32,)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=float, shape=(32,))\n', (96398, 96461), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((96475, 96542), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'float', 'shape': '(1,)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=float, shape=(1,))\n', (96480, 96542), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((96558, 96572), 'accera.Nest', 'Nest', (['[32, 32]'], {}), '([32, 32])\n', (96562, 96572), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((96952, 96962), 'accera.Nest', 'Nest', (['[32]'], {}), '([32])\n', (96956, 96962), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((97114, 97134), 'accera.create_parameters', 'create_parameters', (['(1)'], {}), '(1)\n', (97131, 97134), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((97221, 97246), 'accera.fuse', 'fuse', (['(s0, s1)'], {'partial': '(1)'}), '((s0, s1), partial=1)\n', (97225, 97246), False, 'from accera import fuse\n'), ((97335, 97363), 'accera.fuse', 'fuse', (['(s0_up, s1)'], {'partial': '(1)'}), '((s0_up, s1), partial=1)\n', (97339, 97363), False, 'from accera import fuse\n'), ((97484, 97493), 'accera.Package', 'Package', ([], {}), '()\n', (97491, 97493), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((98630, 98691), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'float', 'shape': '(32,)'}), '(role=Array.Role.INPUT, element_type=float, shape=(32,))\n', (98635, 98691), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((98705, 98773), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'float', 'shape': '(32,)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=float, shape=(32,))\n', (98710, 98773), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((98787, 98854), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'float', 'shape': '(1,)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=float, shape=(1,))\n', (98792, 98854), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((98870, 98884), 'accera.Nest', 'Nest', (['[32, 32]'], {}), '([32, 32])\n', (98874, 98884), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((99049, 99059), 'accera.Nest', 'Nest', (['[32]'], {}), '([32])\n', (99053, 99059), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((99211, 99231), 'accera.create_parameters', 'create_parameters', (['(1)'], {}), '(1)\n', (99228, 99231), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((99277, 99302), 'accera.fuse', 'fuse', (['(s0, s1)'], {'partial': '(1)'}), '((s0, s1), partial=1)\n', (99281, 99302), False, 'from accera import fuse\n'), ((99322, 99331), 'accera.Package', 'Package', ([], {}), '()\n', (99329, 99331), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((100263, 100324), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'float', 'shape': '(32,)'}), '(role=Array.Role.INPUT, element_type=float, shape=(32,))\n', (100268, 100324), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((100338, 100406), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'float', 'shape': '(32,)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=float, shape=(32,))\n', (100343, 100406), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((100420, 100487), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'float', 'shape': '(1,)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=float, shape=(1,))\n', (100425, 100487), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((100503, 100517), 'accera.Nest', 'Nest', (['[32, 32]'], {}), '([32, 32])\n', (100507, 100517), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((100682, 100692), 'accera.Nest', 'Nest', (['[32]'], {}), '([32])\n', (100686, 100692), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((100848, 100868), 'accera.create_parameters', 'create_parameters', (['(2)'], {}), '(2)\n', (100865, 100868), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((100914, 100939), 'accera.fuse', 'fuse', (['(s0, s1)'], {'partial': '(1)'}), '((s0, s1), partial=1)\n', (100918, 100939), False, 'from accera import fuse\n'), ((101063, 101072), 'accera.Package', 'Package', ([], {}), '()\n', (101070, 101072), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((101778, 101840), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'float', 'shape': '(128,)'}), '(role=Array.Role.INPUT, element_type=float, shape=(128,))\n', (101783, 101840), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((101854, 101923), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'float', 'shape': '(128,)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=float, shape=(128,))\n', (101859, 101923), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((101937, 102004), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'float', 'shape': '(1,)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=float, shape=(1,))\n', (101942, 102004), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((102020, 102036), 'accera.Nest', 'Nest', (['[128, 128]'], {}), '([128, 128])\n', (102024, 102036), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((102201, 102212), 'accera.Nest', 'Nest', (['[128]'], {}), '([128])\n', (102205, 102212), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((102372, 102392), 'accera.create_parameters', 'create_parameters', (['(3)'], {}), '(3)\n', (102389, 102392), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((102438, 102463), 'accera.fuse', 'fuse', (['(s0, s1)'], {'partial': '(1)'}), '((s0, s1), partial=1)\n', (102442, 102463), False, 'from accera import fuse\n'), ((102618, 102627), 'accera.Package', 'Package', ([], {}), '()\n', (102625, 102627), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((104801, 104821), 'accera.create_parameters', 'create_parameters', (['(5)'], {}), '(5)\n', (104818, 104821), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((104835, 104912), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(P0, P2)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(P0, P2))\n', (104840, 104912), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((104925, 105002), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(P2, P1)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(P2, P1))\n', (104930, 105002), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((105015, 105104), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'ScalarType.float32', 'shape': '(P0, P1)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=\n (P0, P1))\n', (105020, 105104), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((105116, 105140), 'accera.Nest', 'Nest', ([], {'shape': '(P0, P1, P2)'}), '(shape=(P0, P1, P2))\n', (105120, 105140), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((105368, 105377), 'accera.Package', 'Package', ([], {}), '()\n', (105375, 105377), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((105627, 105664), 'accera.create_parameter_grid', 'create_parameter_grid', (['parameter_grid'], {}), '(parameter_grid)\n', (105648, 105664), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((106655, 106703), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(64,)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(64,))\n', (106660, 106703), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((106721, 106738), 'accera.Nest', 'Nest', ([], {'shape': '(64,)'}), '(shape=(64,))\n', (106725, 106738), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((106997, 107063), 'accera.Target', 'Target', (['Target.Model.RASPBERRY_PI_3B'], {'category': 'Target.Category.CPU'}), '(Target.Model.RASPBERRY_PI_3B, category=Target.Category.CPU)\n', (107003, 107063), False, 'from accera import Target\n'), ((107124, 107133), 'accera.Package', 'Package', ([], {}), '()\n', (107131, 107133), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((107709, 107718), 'accera.Package', 'Package', ([], {}), '()\n', (107716, 107718), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((108154, 108163), 'accera.Package', 'Package', ([], {}), '()\n', (108161, 108163), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((108525, 108600), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(M, K)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, K))\n', (108530, 108600), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((108613, 108688), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(K, N)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(K, N))\n', (108618, 108688), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((108701, 108788), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'ScalarType.float32', 'shape': '(M, N)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=\n (M, N))\n', (108706, 108788), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((108800, 108821), 'accera.Nest', 'Nest', ([], {'shape': '(M, N, K)'}), '(shape=(M, N, K))\n', (108804, 108821), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((109145, 109154), 'accera.Package', 'Package', ([], {}), '()\n', (109152, 109154), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((110001, 110076), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(M, K)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, K))\n', (110006, 110076), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((110089, 110164), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(K, N)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(K, N))\n', (110094, 110164), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((110177, 110264), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'ScalarType.float32', 'shape': '(M, N)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=\n (M, N))\n', (110182, 110264), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((110276, 110297), 'accera.Nest', 'Nest', ([], {'shape': '(M, N, K)'}), '(shape=(M, N, K))\n', (110280, 110297), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((110710, 110719), 'accera.Package', 'Package', ([], {}), '()\n', (110717, 110719), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((111700, 111775), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(M, N)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, N))\n', (111705, 111775), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((111788, 111863), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(M, N)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, N))\n', (111793, 111863), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((111876, 111963), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'ScalarType.float32', 'shape': '(M, N)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=\n (M, N))\n', (111881, 111963), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((111976, 111994), 'accera.Nest', 'Nest', ([], {'shape': '(M, N)'}), '(shape=(M, N))\n', (111980, 111994), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((112178, 112196), 'accera.Nest', 'Nest', ([], {'shape': '(M, N)'}), '(shape=(M, N))\n', (112182, 112196), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((112383, 112420), 'accera.fuse', 'fuse', (['schedule0', 'schedule1'], {'partial': '(1)'}), '(schedule0, schedule1, partial=1)\n', (112387, 112420), False, 'from accera import fuse\n'), ((112563, 112572), 'accera.Package', 'Package', ([], {}), '()\n', (112570, 112572), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((113474, 113549), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(M, N)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, N))\n', (113479, 113549), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((113562, 113637), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(M, N)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, N))\n', (113567, 113637), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((113650, 113737), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'ScalarType.float32', 'shape': '(M, N)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=\n (M, N))\n', (113655, 113737), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((113750, 113768), 'accera.Nest', 'Nest', ([], {'shape': '(M, N)'}), '(shape=(M, N))\n', (113754, 113768), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((113952, 113970), 'accera.Nest', 'Nest', ([], {'shape': '(M, N)'}), '(shape=(M, N))\n', (113956, 113970), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((114287, 114313), 'accera.fuse', 'fuse', (['schedule0', 'schedule1'], {}), '(schedule0, schedule1)\n', (114291, 114313), False, 'from accera import fuse\n'), ((114563, 114572), 'accera.Package', 'Package', ([], {}), '()\n', (114570, 114572), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((115582, 115657), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(M, N)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, N))\n', (115587, 115657), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((115670, 115745), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(M, N)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, N))\n', (115675, 115745), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((115758, 115845), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'ScalarType.float32', 'shape': '(M, N)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=\n (M, N))\n', (115763, 115845), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((115858, 115876), 'accera.Nest', 'Nest', ([], {'shape': '(M, N)'}), '(shape=(M, N))\n', (115862, 115876), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((116060, 116078), 'accera.Nest', 'Nest', ([], {'shape': '(M, N)'}), '(shape=(M, N))\n', (116064, 116078), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((116268, 116294), 'accera.fuse', 'fuse', (['schedule0', 'schedule1'], {}), '(schedule0, schedule1)\n', (116272, 116294), False, 'from accera import fuse\n'), ((116393, 116411), 'accera.Nest', 'Nest', ([], {'shape': '(M, N)'}), '(shape=(M, N))\n', (116397, 116411), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((116654, 116693), 'accera.fuse', 'fuse', (['schedule_f1', 'schedule2'], {'partial': '(2)'}), '(schedule_f1, schedule2, partial=2)\n', (116658, 116693), False, 'from accera import fuse\n'), ((116713, 116722), 'accera.Package', 'Package', ([], {}), '()\n', (116720, 116722), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((117683, 117758), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(M, N)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, N))\n', (117688, 117758), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((117771, 117846), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 'ScalarType.float32', 'shape': '(M, N)'}), '(role=Array.Role.INPUT, element_type=ScalarType.float32, shape=(M, N))\n', (117776, 117846), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((117859, 117946), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'ScalarType.float32', 'shape': '(M, N)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=\n (M, N))\n', (117864, 117946), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((117959, 117977), 'accera.Nest', 'Nest', ([], {'shape': '(M, N)'}), '(shape=(M, N))\n', (117963, 117977), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((118161, 118179), 'accera.Nest', 'Nest', ([], {'shape': '(M, N)'}), '(shape=(M, N))\n', (118165, 118179), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((118369, 118395), 'accera.fuse', 'fuse', (['schedule0', 'schedule1'], {}), '(schedule0, schedule1)\n', (118373, 118395), False, 'from accera import fuse\n'), ((118494, 118512), 'accera.Nest', 'Nest', ([], {'shape': '(M, N)'}), '(shape=(M, N))\n', (118498, 118512), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((118756, 118795), 'accera.fuse', 'fuse', (['schedule2', 'schedule_f1'], {'partial': '(2)'}), '(schedule2, schedule_f1, partial=2)\n', (118760, 118795), False, 'from accera import fuse\n'), ((118815, 118824), 'accera.Package', 'Package', ([], {}), '()\n', (118822, 118824), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((119801, 119810), 'accera.Package', 'Package', ([], {}), '()\n', (119808, 119810), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((2739, 2769), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (2751, 2769), False, 'import pathlib\n'), ((2831, 2886), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'output_dir'], {}), '(self, package_name, output_dir)\n', (2854, 2886), False, 'from accera.test import verifiers\n'), ((4883, 4974), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'ScalarType.float32', 'shape': '(inf, inf)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=\n (inf, inf))\n', (4888, 4974), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((5478, 5539), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (5501, 5539), False, 'from accera.test import verifiers\n'), ((6270, 6299), 'numpy.ones', 'np.ones', (['(128, 256)'], {'dtype': 'dt'}), '((128, 256), dtype=dt)\n', (6277, 6299), True, 'import numpy as np\n'), ((6316, 6352), 'accera.Array', 'Array', ([], {'role': 'Array.Role.CONST', 'data': 'D'}), '(role=Array.Role.CONST, data=D)\n', (6321, 6352), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((6570, 6658), 'accera.Array', 'Array', ([], {'role': 'Array.Role.CONST', 'element_type': 't', 'layout': 'Array.Layout.LAST_MAJOR', 'data': 'D'}), '(role=Array.Role.CONST, element_type=t, layout=Array.Layout.LAST_MAJOR,\n data=D)\n', (6575, 6658), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((7259, 7330), 'accera.Array', 'Array', ([], {'role': 'Array.Role.TEMP', 'element_type': 'A.element_type', 'shape': 'A.shape'}), '(role=Array.Role.TEMP, element_type=A.element_type, shape=A.shape)\n', (7264, 7330), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((7351, 7364), 'accera.Nest', 'Nest', (['A.shape'], {}), '(A.shape)\n', (7355, 7364), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((7918, 7979), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (7941, 7979), False, 'from accera.test import verifiers\n'), ((8433, 8446), 'accera.Nest', 'Nest', (['A.shape'], {}), '(A.shape)\n', (8437, 8446), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((8743, 8756), 'accera.Nest', 'Nest', (['A.shape'], {}), '(A.shape)\n', (8747, 8756), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((9045, 9116), 'accera.Array', 'Array', ([], {'role': 'Array.Role.TEMP', 'element_type': 'A.element_type', 'shape': 'A.shape'}), '(role=Array.Role.TEMP, element_type=A.element_type, shape=A.shape)\n', (9050, 9116), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((9314, 9375), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (9337, 9375), False, 'from accera.test import verifiers\n'), ((9537, 9616), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'A.element_type', 'shape': 'A.shape'}), '(role=Array.Role.INPUT_OUTPUT, element_type=A.element_type, shape=A.shape)\n', (9542, 9616), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((10459, 10527), 'accera.Array', 'Array', ([], {'role': 'Array.Role.TEMP', 'element_type': 'A.element_type', 'shape': '(1,)'}), '(role=Array.Role.TEMP, element_type=A.element_type, shape=(1,))\n', (10464, 10527), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((10878, 10939), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (10901, 10939), False, 'from accera.test import verifiers\n'), ((14046, 14068), 'accera.Nest', 'Nest', ([], {'shape': 'arr0.shape'}), '(shape=arr0.shape)\n', (14050, 14068), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((14698, 14759), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (14721, 14759), False, 'from accera.test import verifiers\n'), ((15489, 15508), 'accera.Nest', 'Nest', ([], {'shape': 'A.shape'}), '(shape=A.shape)\n', (15493, 15508), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((16343, 16404), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (16366, 16404), False, 'from accera.test import verifiers\n'), ((17008, 17029), 'accera.Nest', 'Nest', ([], {'shape': '(M, N, S)'}), '(shape=(M, N, S))\n', (17012, 17029), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((17426, 17487), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (17449, 17487), False, 'from accera.test import verifiers\n'), ((17955, 18015), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 't', 'shape': '(16, 16)'}), '(role=Array.Role.INPUT, element_type=t, shape=(16, 16))\n', (17960, 18015), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((18032, 18092), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 't', 'shape': '(16, 16)'}), '(role=Array.Role.INPUT, element_type=t, shape=(16, 16))\n', (18037, 18092), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((18109, 18176), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 't', 'shape': '(16, 16)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=t, shape=(16, 16))\n', (18114, 18176), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((18197, 18217), 'accera.Nest', 'Nest', ([], {'shape': '(16, 16)'}), '(shape=(16, 16))\n', (18201, 18217), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((18513, 18529), 'numpy.dtype', 'np.dtype', (['t.name'], {}), '(t.name)\n', (18521, 18529), True, 'import numpy as np\n'), ((19492, 19552), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 't', 'shape': '(16, 16)'}), '(role=Array.Role.INPUT, element_type=t, shape=(16, 16))\n', (19497, 19552), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((19569, 19629), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'element_type': 't', 'shape': '(16, 16)'}), '(role=Array.Role.INPUT, element_type=t, shape=(16, 16))\n', (19574, 19629), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((19646, 19713), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 't', 'shape': '(16, 16)'}), '(role=Array.Role.INPUT_OUTPUT, element_type=t, shape=(16, 16))\n', (19651, 19713), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((19734, 19754), 'accera.Nest', 'Nest', ([], {'shape': '(16, 16)'}), '(shape=(16, 16))\n', (19738, 19754), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((20050, 20066), 'numpy.dtype', 'np.dtype', (['t.name'], {}), '(t.name)\n', (20058, 20066), True, 'import numpy as np\n'), ((25610, 25671), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (25633, 25671), False, 'from accera.test import verifiers\n'), ((26202, 26263), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (26225, 26263), False, 'from accera.test import verifiers\n'), ((26769, 26790), 'accera.Nest', 'Nest', ([], {'shape': '(M, N, S)'}), '(shape=(M, N, S))\n', (26773, 26790), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((27098, 27128), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (27110, 27128), False, 'import pathlib\n'), ((27190, 27245), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'output_dir'], {}), '(self, package_name, output_dir)\n', (27213, 27245), False, 'from accera.test import verifiers\n'), ((36659, 36720), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (36682, 36720), False, 'from accera.test import verifiers\n'), ((37161, 37191), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (37173, 37191), False, 'import pathlib\n'), ((37253, 37308), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'output_dir'], {}), '(self, package_name, output_dir)\n', (37276, 37308), False, 'from accera.test import verifiers\n'), ((42097, 42116), 'numpy.sum', 'np.sum', (['A_test_post'], {}), '(A_test_post)\n', (42103, 42116), True, 'import numpy as np\n'), ((61087, 61117), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (61099, 61117), False, 'import pathlib\n'), ((61179, 61234), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'output_dir'], {}), '(self, package_name, output_dir)\n', (61202, 61234), False, 'from accera.test import verifiers\n'), ((68170, 68200), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (68182, 68200), False, 'import pathlib\n'), ((68229, 68284), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'output_dir'], {}), '(self, package_name, output_dir)\n', (68252, 68284), False, 'from accera.test import verifiers\n'), ((73005, 73035), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (73017, 73035), False, 'import pathlib\n'), ((73062, 73170), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'test_name', 'output_dir'], {'file_list': "[f'{test_name}.cu', f'{test_name}.hat']"}), "(self, test_name, output_dir, file_list=[\n f'{test_name}.cu', f'{test_name}.hat'])\n", (73085, 73170), False, 'from accera.test import verifiers\n'), ((76710, 76740), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (76722, 76740), False, 'import pathlib\n'), ((76769, 76824), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'output_dir'], {}), '(self, package_name, output_dir)\n', (76792, 76824), False, 'from accera.test import verifiers\n'), ((77416, 77487), 'accera.Array', 'Array', ([], {'role': 'Array.Role.CONST', 'data': 'matrix', 'layout': 'Array.Layout.DEFERRED'}), '(role=Array.Role.CONST, data=matrix, layout=Array.Layout.DEFERRED)\n', (77421, 77487), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((77504, 77597), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'ScalarType.float32', 'shape': 'matrix.shape'}), '(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=\n matrix.shape)\n', (77509, 77597), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((77613, 77637), 'accera.Nest', 'Nest', ([], {'shape': 'matrix.shape'}), '(shape=matrix.shape)\n', (77617, 77637), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((79099, 79170), 'accera.Array', 'Array', ([], {'role': 'Array.Role.CONST', 'data': 'matrix', 'layout': 'Array.Layout.DEFERRED'}), '(role=Array.Role.CONST, data=matrix, layout=Array.Layout.DEFERRED)\n', (79104, 79170), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((79187, 79280), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'element_type': 'ScalarType.float32', 'shape': 'matrix.shape'}), '(role=Array.Role.INPUT_OUTPUT, element_type=ScalarType.float32, shape=\n matrix.shape)\n', (79192, 79280), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((79296, 79320), 'accera.Nest', 'Nest', ([], {'shape': 'matrix.shape'}), '(shape=matrix.shape)\n', (79300, 79320), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((81893, 81954), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (81916, 81954), False, 'from accera.test import verifiers\n'), ((83698, 83759), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (83721, 83759), False, 'from accera.test import verifiers\n'), ((88565, 88595), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (88577, 88595), False, 'import pathlib\n'), ((88657, 88712), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'output_dir'], {}), '(self, package_name, output_dir)\n', (88680, 88712), False, 'from accera.test import verifiers\n'), ((90739, 90748), 'accera.Package', 'Package', ([], {}), '()\n', (90746, 90748), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((91775, 91784), 'accera.Package', 'Package', ([], {}), '()\n', (91782, 91784), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((92822, 92831), 'accera.Package', 'Package', ([], {}), '()\n', (92829, 92831), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((93995, 94004), 'accera.Package', 'Package', ([], {}), '()\n', (94002, 94004), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((96020, 96081), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (96043, 96081), False, 'from accera.test import verifiers\n'), ((98085, 98146), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (98108, 98146), False, 'from accera.test import verifiers\n'), ((99965, 100026), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (99988, 100026), False, 'from accera.test import verifiers\n'), ((101457, 101518), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (101480, 101518), False, 'from accera.test import verifiers\n'), ((104415, 104476), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (104438, 104476), False, 'from accera.test import verifiers\n'), ((105765, 105826), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (105788, 105826), False, 'from accera.test import verifiers\n'), ((107297, 107358), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (107320, 107358), False, 'from accera.test import verifiers\n'), ((107882, 107943), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (107905, 107943), False, 'from accera.test import verifiers\n'), ((108327, 108370), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name'], {}), '(self, package_name)\n', (108350, 108370), False, 'from accera.test import verifiers\n'), ((109288, 109318), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (109300, 109318), False, 'import pathlib\n'), ((109348, 109403), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'output_dir'], {}), '(self, package_name, output_dir)\n', (109371, 109403), False, 'from accera.test import verifiers\n'), ((110862, 110892), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (110874, 110892), False, 'import pathlib\n'), ((110922, 110977), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'output_dir'], {}), '(self, package_name, output_dir)\n', (110945, 110977), False, 'from accera.test import verifiers\n'), ((112723, 112753), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (112735, 112753), False, 'import pathlib\n'), ((112783, 112838), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'output_dir'], {}), '(self, package_name, output_dir)\n', (112806, 112838), False, 'from accera.test import verifiers\n'), ((114732, 114762), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (114744, 114762), False, 'import pathlib\n'), ((114792, 114847), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'output_dir'], {}), '(self, package_name, output_dir)\n', (114815, 114847), False, 'from accera.test import verifiers\n'), ((116884, 116914), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (116896, 116914), False, 'import pathlib\n'), ((116944, 116999), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'output_dir'], {}), '(self, package_name, output_dir)\n', (116967, 116999), False, 'from accera.test import verifiers\n'), ((118986, 119016), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (118998, 119016), False, 'import pathlib\n'), ((119046, 119101), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'output_dir'], {}), '(self, package_name, output_dir)\n', (119069, 119101), False, 'from accera.test import verifiers\n'), ((120659, 120720), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'TEST_PACKAGE_DIR'], {}), '(self, package_name, TEST_PACKAGE_DIR)\n', (120682, 120720), False, 'from accera.test import verifiers\n'), ((11355, 11382), 'numpy.random.random', 'np.random.random', (['(256, 32)'], {}), '((256, 32))\n', (11371, 11382), True, 'import numpy as np\n'), ((11423, 11444), 'numpy.ndarray', 'np.ndarray', (['(256, 32)'], {}), '((256, 32))\n', (11433, 11444), True, 'import numpy as np\n'), ((12058, 12085), 'numpy.random.random', 'np.random.random', (['(256, 32)'], {}), '((256, 32))\n', (12074, 12085), True, 'import numpy as np\n'), ((12137, 12158), 'numpy.ndarray', 'np.ndarray', (['(256, 32)'], {}), '((256, 32))\n', (12147, 12158), True, 'import numpy as np\n'), ((13024, 13051), 'numpy.random.random', 'np.random.random', (['(256, 32)'], {}), '((256, 32))\n', (13040, 13051), True, 'import numpy as np\n'), ((13092, 13113), 'numpy.ndarray', 'np.ndarray', (['(256, 32)'], {}), '((256, 32))\n', (13102, 13113), True, 'import numpy as np\n'), ((13180, 13207), 'numpy.random.random', 'np.random.random', (['(256, 32)'], {}), '((256, 32))\n', (13196, 13207), True, 'import numpy as np\n'), ((13246, 13267), 'numpy.ndarray', 'np.ndarray', (['(256, 32)'], {}), '((256, 32))\n', (13256, 13267), True, 'import numpy as np\n'), ((22117, 22144), 'accera._lang_python._lang._If', '_If', (['(A[i, k] == B[k, j])', 'f1'], {}), '(A[i, k] == B[k, j], f1)\n', (22120, 22144), False, 'from accera._lang_python._lang import _If\n'), ((22161, 22188), 'accera._lang_python._lang._If', '_If', (['(A[i, k] != B[k, j])', 'f2'], {}), '(A[i, k] != B[k, j], f2)\n', (22164, 22188), False, 'from accera._lang_python._lang import _If\n'), ((22205, 22231), 'accera._lang_python._lang._If', '_If', (['(A[i, k] < B[k, j])', 'f3'], {}), '(A[i, k] < B[k, j], f3)\n', (22208, 22231), False, 'from accera._lang_python._lang import _If\n'), ((22248, 22275), 'accera._lang_python._lang._If', '_If', (['(A[i, k] <= B[k, j])', 'f4'], {}), '(A[i, k] <= B[k, j], f4)\n', (22251, 22275), False, 'from accera._lang_python._lang import _If\n'), ((22292, 22318), 'accera._lang_python._lang._If', '_If', (['(A[i, k] > B[k, j])', 'f1'], {}), '(A[i, k] > B[k, j], f1)\n', (22295, 22318), False, 'from accera._lang_python._lang import _If\n'), ((22335, 22362), 'accera._lang_python._lang._If', '_If', (['(A[i, k] >= B[k, j])', 'f2'], {}), '(A[i, k] >= B[k, j], f2)\n', (22338, 22362), False, 'from accera._lang_python._lang import _If\n'), ((22803, 22823), 'accera.logical_not', 'logical_not', (['A[i, k]'], {}), '(A[i, k])\n', (22814, 22823), False, 'from accera import logical_and, logical_or, logical_not\n'), ((22851, 22880), 'accera.logical_and', 'logical_and', (['A[i, k]', 'B[k, j]'], {}), '(A[i, k], B[k, j])\n', (22862, 22880), False, 'from accera import logical_and, logical_or, logical_not\n'), ((22908, 22936), 'accera.logical_or', 'logical_or', (['A[i, k]', 'B[k, j]'], {}), '(A[i, k], B[k, j])\n', (22918, 22936), False, 'from accera import logical_and, logical_or, logical_not\n'), ((23909, 23930), 'accera.max', 'max', (['A[i, j]', 'B[j, k]'], {}), '(A[i, j], B[j, k])\n', (23912, 23930), False, 'from accera import fuse, Nest, max\n'), ((23958, 23979), 'accera.min', 'min', (['A[i, j]', 'B[j, k]'], {}), '(A[i, j], B[j, k])\n', (23961, 23979), False, 'from accera import max, min\n'), ((24498, 24510), 'accera.abs', 'abs', (['A[i, j]'], {}), '(A[i, j])\n', (24501, 24510), False, 'from accera import abs, sqrt, exp, log, log10, log2, sin, cos, ceil, floor, tan, cosh, sinh, tanh\n'), ((24538, 24550), 'accera.exp', 'exp', (['A[i, j]'], {}), '(A[i, j])\n', (24541, 24550), False, 'from accera import abs, sqrt, exp, log, log10, log2, sin, cos, ceil, floor, tan, cosh, sinh, tanh\n'), ((24677, 24689), 'accera.log', 'log', (['B[j, k]'], {}), '(B[j, k])\n', (24680, 24689), False, 'from accera import abs, sqrt, exp, log, log10, log2, sin, cos, ceil, floor, tan, cosh, sinh, tanh\n'), ((24717, 24730), 'accera.log2', 'log2', (['B[j, k]'], {}), '(B[j, k])\n', (24721, 24730), False, 'from accera import abs, sqrt, exp, log, log10, log2, sin, cos, ceil, floor, tan, cosh, sinh, tanh\n'), ((24758, 24772), 'accera.log10', 'log10', (['A[i, j]'], {}), '(A[i, j])\n', (24763, 24772), False, 'from accera import abs, sqrt, exp, log, log10, log2, sin, cos, ceil, floor, tan, cosh, sinh, tanh\n'), ((24800, 24812), 'accera.sin', 'sin', (['A[i, j]'], {}), '(A[i, j])\n', (24803, 24812), False, 'from accera import abs, sqrt, exp, log, log10, log2, sin, cos, ceil, floor, tan, cosh, sinh, tanh\n'), ((24840, 24852), 'accera.cos', 'cos', (['B[j, k]'], {}), '(B[j, k])\n', (24843, 24852), False, 'from accera import abs, sqrt, exp, log, log10, log2, sin, cos, ceil, floor, tan, cosh, sinh, tanh\n'), ((24880, 24892), 'accera.tan', 'tan', (['A[i, j]'], {}), '(A[i, j])\n', (24883, 24892), False, 'from accera import abs, sqrt, exp, log, log10, log2, sin, cos, ceil, floor, tan, cosh, sinh, tanh\n'), ((24920, 24933), 'accera.sqrt', 'sqrt', (['B[j, k]'], {}), '(B[j, k])\n', (24924, 24933), False, 'from accera import abs, sqrt, exp, log, log10, log2, sin, cos, ceil, floor, tan, cosh, sinh, tanh\n'), ((24961, 24974), 'accera.ceil', 'ceil', (['B[j, k]'], {}), '(B[j, k])\n', (24965, 24974), False, 'from accera import abs, sqrt, exp, log, log10, log2, sin, cos, ceil, floor, tan, cosh, sinh, tanh\n'), ((25002, 25016), 'accera.floor', 'floor', (['A[i, j]'], {}), '(A[i, j])\n', (25007, 25016), False, 'from accera import abs, sqrt, exp, log, log10, log2, sin, cos, ceil, floor, tan, cosh, sinh, tanh\n'), ((25044, 25057), 'accera.sinh', 'sinh', (['A[i, j]'], {}), '(A[i, j])\n', (25048, 25057), False, 'from accera import abs, sqrt, exp, log, log10, log2, sin, cos, ceil, floor, tan, cosh, sinh, tanh\n'), ((25085, 25098), 'accera.cosh', 'cosh', (['B[j, k]'], {}), '(B[j, k])\n', (25089, 25098), False, 'from accera import abs, sqrt, exp, log, log10, log2, sin, cos, ceil, floor, tan, cosh, sinh, tanh\n'), ((25126, 25139), 'accera.tanh', 'tanh', (['A[i, j]'], {}), '(A[i, j])\n', (25130, 25139), False, 'from accera import abs, sqrt, exp, log, log10, log2, sin, cos, ceil, floor, tan, cosh, sinh, tanh\n'), ((31786, 31826), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(N,)'}), '(role=Array.Role.INPUT, shape=(N,))\n', (31791, 31826), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((31848, 31888), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(K,)'}), '(role=Array.Role.INPUT, shape=(K,))\n', (31853, 31888), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((31910, 31957), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(M,)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(M,))\n', (31915, 31957), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((31983, 32001), 'accera.Nest', 'Nest', ([], {'shape': '(M, K)'}), '(shape=(M, K))\n', (31987, 32001), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((33625, 33650), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (33641, 33650), True, 'import numpy as np\n'), ((33687, 33712), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (33703, 33712), True, 'import numpy as np\n'), ((33749, 33774), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (33765, 33774), True, 'import numpy as np\n'), ((35802, 35827), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (35818, 35827), True, 'import numpy as np\n'), ((35864, 35889), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (35880, 35889), True, 'import numpy as np\n'), ((35926, 35951), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (35942, 35951), True, 'import numpy as np\n'), ((38554, 38579), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (38570, 38579), True, 'import numpy as np\n'), ((38616, 38641), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (38632, 38641), True, 'import numpy as np\n'), ((38678, 38703), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (38694, 38703), True, 'import numpy as np\n'), ((40305, 40316), 'accera._lang_python._lang.Scalar', 'Scalar', (['(0.0)'], {}), '(0.0)\n', (40311, 40316), False, 'from accera._lang_python._lang import Scalar\n'), ((40733, 40758), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (40749, 40758), True, 'import numpy as np\n'), ((40795, 40820), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (40811, 40820), True, 'import numpy as np\n'), ((40857, 40882), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (40873, 40882), True, 'import numpy as np\n'), ((41021, 41062), 'numpy.maximum', 'np.maximum', (['(C_test + A_test @ B_test)', '(0.0)'], {}), '(C_test + A_test @ B_test, 0.0)\n', (41031, 41062), True, 'import numpy as np\n'), ((41905, 41930), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (41921, 41930), True, 'import numpy as np\n'), ((41971, 41996), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (41987, 41996), True, 'import numpy as np\n'), ((43855, 43880), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (43871, 43880), True, 'import numpy as np\n'), ((43917, 43942), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (43933, 43942), True, 'import numpy as np\n'), ((43979, 44004), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (43995, 44004), True, 'import numpy as np\n'), ((45885, 45910), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (45901, 45910), True, 'import numpy as np\n'), ((45947, 45972), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (45963, 45972), True, 'import numpy as np\n'), ((46009, 46034), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (46025, 46034), True, 'import numpy as np\n'), ((49186, 49211), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (49202, 49211), True, 'import numpy as np\n'), ((49248, 49273), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (49264, 49273), True, 'import numpy as np\n'), ((49310, 49335), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (49326, 49335), True, 'import numpy as np\n'), ((50572, 50597), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (50588, 50597), True, 'import numpy as np\n'), ((50634, 50659), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (50650, 50659), True, 'import numpy as np\n'), ((52170, 52195), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (52186, 52195), True, 'import numpy as np\n'), ((52232, 52257), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (52248, 52257), True, 'import numpy as np\n'), ((52294, 52319), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (52310, 52319), True, 'import numpy as np\n'), ((53653, 53678), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (53669, 53678), True, 'import numpy as np\n'), ((53715, 53740), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (53731, 53740), True, 'import numpy as np\n'), ((55461, 55486), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (55477, 55486), True, 'import numpy as np\n'), ((55523, 55548), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (55539, 55548), True, 'import numpy as np\n'), ((55585, 55610), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (55601, 55610), True, 'import numpy as np\n'), ((57438, 57463), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (57454, 57463), True, 'import numpy as np\n'), ((57500, 57525), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (57516, 57525), True, 'import numpy as np\n'), ((57562, 57587), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (57578, 57587), True, 'import numpy as np\n'), ((57624, 57649), 'numpy.random.random', 'np.random.random', (['D.shape'], {}), '(D.shape)\n', (57640, 57649), True, 'import numpy as np\n'), ((65603, 65628), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (65619, 65628), True, 'import numpy as np\n'), ((65665, 65690), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (65681, 65690), True, 'import numpy as np\n'), ((65727, 65752), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (65743, 65752), True, 'import numpy as np\n'), ((67382, 67407), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (67398, 67407), True, 'import numpy as np\n'), ((67444, 67469), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (67460, 67469), True, 'import numpy as np\n'), ((67506, 67531), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (67522, 67531), True, 'import numpy as np\n'), ((74668, 74694), 'accera.min', 'min', (['(4)', 'target.num_threads'], {}), '(4, target.num_threads)\n', (74671, 74694), False, 'from accera import max, min\n'), ((77213, 77237), 'numpy.random.rand', 'np.random.rand', (['(128)', '(128)'], {}), '(128, 128)\n', (77227, 77237), True, 'import numpy as np\n'), ((77274, 77304), 'numpy.random.random', 'np.random.random', (['matrix.shape'], {}), '(matrix.shape)\n', (77290, 77304), True, 'import numpy as np\n'), ((78927, 78951), 'numpy.random.rand', 'np.random.rand', (['(128)', '(128)'], {}), '(128, 128)\n', (78941, 78951), True, 'import numpy as np\n'), ((78988, 79018), 'numpy.random.random', 'np.random.random', (['matrix.shape'], {}), '(matrix.shape)\n', (79004, 79018), True, 'import numpy as np\n'), ((84134, 84154), 'accera.create_parameters', 'create_parameters', (['(1)'], {}), '(1)\n', (84151, 84154), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((84176, 84216), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(N,)'}), '(role=Array.Role.INPUT, shape=(N,))\n', (84181, 84216), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((84238, 84278), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT', 'shape': '(K,)'}), '(role=Array.Role.INPUT, shape=(K,))\n', (84243, 84278), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((84300, 84347), 'accera.Array', 'Array', ([], {'role': 'Array.Role.INPUT_OUTPUT', 'shape': '(M,)'}), '(role=Array.Role.INPUT_OUTPUT, shape=(M,))\n', (84305, 84347), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((84373, 84391), 'accera.Nest', 'Nest', ([], {'shape': '(M, K)'}), '(shape=(M, K))\n', (84377, 84391), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((85254, 85263), 'accera.Package', 'Package', ([], {}), '()\n', (85261, 85263), False, 'from accera import ScalarType, Array, Function, Nest, Target, Package\n'), ((87758, 87783), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (87774, 87783), True, 'import numpy as np\n'), ((87820, 87845), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (87836, 87845), True, 'import numpy as np\n'), ((87882, 87907), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (87898, 87907), True, 'import numpy as np\n'), ((91074, 91104), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (91086, 91104), False, 'import pathlib\n'), ((91137, 91192), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'output_dir'], {}), '(self, package_name, output_dir)\n', (91160, 91192), False, 'from accera.test import verifiers\n'), ((92121, 92151), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (92133, 92151), False, 'import pathlib\n'), ((92184, 92239), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'output_dir'], {}), '(self, package_name, output_dir)\n', (92207, 92239), False, 'from accera.test import verifiers\n'), ((93190, 93220), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (93202, 93220), False, 'import pathlib\n'), ((93253, 93308), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'output_dir'], {}), '(self, package_name, output_dir)\n', (93276, 93308), False, 'from accera.test import verifiers\n'), ((94367, 94397), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (94379, 94397), False, 'import pathlib\n'), ((94430, 94485), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'output_dir'], {}), '(self, package_name, output_dir)\n', (94453, 94485), False, 'from accera.test import verifiers\n'), ((104226, 104293), 'accera.create_parameter_grid', 'create_parameter_grid', (['{P0: [64, 8], P1: [12, 16, 20], P2: [2, 10]}'], {}), '({P0: [64, 8], P1: [12, 16, 20], P2: [2, 10]})\n', (104247, 104293), False, 'from accera import create_parameters, create_parameter_grid, Nest, Schedule\n'), ((105972, 106002), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (105984, 106002), False, 'import pathlib\n'), ((120867, 120897), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (120879, 120897), False, 'import pathlib\n'), ((18552, 18577), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (18568, 18577), True, 'import numpy as np\n'), ((18613, 18629), 'numpy.ones', 'np.ones', (['C.shape'], {}), '(C.shape)\n', (18620, 18629), True, 'import numpy as np\n'), ((18693, 18718), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (18709, 18718), True, 'import numpy as np\n'), ((20088, 20113), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (20104, 20113), True, 'import numpy as np\n'), ((20149, 20165), 'numpy.ones', 'np.ones', (['C.shape'], {}), '(C.shape)\n', (20156, 20165), True, 'import numpy as np\n'), ((20229, 20254), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (20245, 20254), True, 'import numpy as np\n'), ((74232, 74257), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (74248, 74257), True, 'import numpy as np\n'), ((74298, 74323), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (74314, 74323), True, 'import numpy as np\n'), ((74364, 74389), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (74380, 74389), True, 'import numpy as np\n'), ((85535, 85565), 'pathlib.Path', 'pathlib.Path', (['TEST_PACKAGE_DIR'], {}), '(TEST_PACKAGE_DIR)\n', (85547, 85565), False, 'import pathlib\n'), ((85643, 85698), 'accera.test.verifiers.VerifyPackage', 'verifiers.VerifyPackage', (['self', 'package_name', 'output_dir'], {}), '(self, package_name, output_dir)\n', (85666, 85698), False, 'from accera.test import verifiers\n'), ((89843, 89868), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (89859, 89868), True, 'import numpy as np\n'), ((89909, 89934), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (89925, 89934), True, 'import numpy as np\n'), ((89975, 90000), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (89991, 90000), True, 'import numpy as np\n'), ((109586, 109611), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (109602, 109611), True, 'import numpy as np\n'), ((109652, 109677), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (109668, 109677), True, 'import numpy as np\n'), ((109718, 109743), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (109734, 109743), True, 'import numpy as np\n'), ((111160, 111185), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (111176, 111185), True, 'import numpy as np\n'), ((111226, 111251), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (111242, 111251), True, 'import numpy as np\n'), ((111292, 111317), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (111308, 111317), True, 'import numpy as np\n'), ((113021, 113046), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (113037, 113046), True, 'import numpy as np\n'), ((113087, 113112), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (113103, 113112), True, 'import numpy as np\n'), ((113153, 113178), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (113169, 113178), True, 'import numpy as np\n'), ((115030, 115055), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (115046, 115055), True, 'import numpy as np\n'), ((115096, 115121), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (115112, 115121), True, 'import numpy as np\n'), ((115162, 115187), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (115178, 115187), True, 'import numpy as np\n'), ((117182, 117207), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (117198, 117207), True, 'import numpy as np\n'), ((117248, 117273), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (117264, 117273), True, 'import numpy as np\n'), ((117314, 117339), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (117330, 117339), True, 'import numpy as np\n'), ((119284, 119309), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (119300, 119309), True, 'import numpy as np\n'), ((119350, 119375), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (119366, 119375), True, 'import numpy as np\n'), ((119416, 119441), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (119432, 119441), True, 'import numpy as np\n'), ((32229, 32254), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (32245, 32254), True, 'import numpy as np\n'), ((32299, 32324), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (32315, 32324), True, 'import numpy as np\n'), ((32369, 32394), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (32385, 32394), True, 'import numpy as np\n'), ((33934, 33949), 'numpy.flip', 'np.flip', (['B_test'], {}), '(B_test)\n', (33941, 33949), True, 'import numpy as np\n'), ((84619, 84644), 'numpy.random.random', 'np.random.random', (['A.shape'], {}), '(A.shape)\n', (84635, 84644), True, 'import numpy as np\n'), ((84689, 84714), 'numpy.random.random', 'np.random.random', (['B.shape'], {}), '(B.shape)\n', (84705, 84714), True, 'import numpy as np\n'), ((84759, 84784), 'numpy.random.random', 'np.random.random', (['C.shape'], {}), '(C.shape)\n', (84775, 84784), True, 'import numpy as np\n'), ((32578, 32593), 'numpy.flip', 'np.flip', (['B_test'], {}), '(B_test)\n', (32585, 32593), True, 'import numpy as np\n'), ((84968, 84983), 'numpy.flip', 'np.flip', (['B_test'], {}), '(B_test)\n', (84975, 84983), True, 'import numpy as np\n')]
# pylint: disable=no-member, invalid-name, redefined-outer-name # pylint: disable=too-many-lines from collections import namedtuple, OrderedDict import os from urllib.parse import urlunsplit import numpy as np from numpy import ma import pymc3 as pm import pytest from arviz import ( concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData, ) from ..data.base import generate_dims_coords, make_attrs from ..data.io_pystan import get_draws, get_draws_stan3 # pylint: disable=unused-import from ..data.datasets import REMOTE_DATASETS, LOCAL_DATASETS, RemoteFileMetadata from .helpers import ( # pylint: disable=unused-import check_multiple_attrs, _emcee_lnprior as emcee_lnprior, _emcee_lnprob as emcee_lnprob, needs_emcee3, eight_schools_params, load_cached_models, pystan_extract_unpermuted, stan_extract_dict, pystan_version, ) @pytest.fixture(scope="module") def draws(): return 500 @pytest.fixture(scope="module") def chains(): return 2 @pytest.fixture(autouse=True) def no_remote_data(monkeypatch, tmpdir): """Delete all remote data and replace it with a local dataset.""" keys = list(REMOTE_DATASETS) for key in keys: monkeypatch.delitem(REMOTE_DATASETS, key) centered = LOCAL_DATASETS["centered_eight"] filename = os.path.join(str(tmpdir), os.path.basename(centered.filename)) url = urlunsplit(("file", "", centered.filename, "", "")) monkeypatch.setitem( REMOTE_DATASETS, "test_remote", RemoteFileMetadata( filename=filename, url=url, checksum="9ae00c83654b3f061d32c882ec0a270d10838fa36515ecb162b89a290e014849", description=centered.description, ), ) monkeypatch.setitem( REMOTE_DATASETS, "bad_checksum", RemoteFileMetadata( filename=filename, url=url, checksum="bad!", description=centered.description ), ) UnknownFileMetaData = namedtuple( "UnknownFileMetaData", ["filename", "url", "checksum", "description"] ) monkeypatch.setitem( REMOTE_DATASETS, "test_unknown", UnknownFileMetaData( filename=filename, url=url, checksum="9ae00c83654b3f061d32c882ec0a270d10838fa36515ecb162b89a290e014849", description="Test bad REMOTE_DATASET", ), ) def test_load_local_arviz_data(): assert load_arviz_data("centered_eight") def test_clear_data_home(): resource = REMOTE_DATASETS["test_remote"] assert not os.path.exists(resource.filename) load_arviz_data("test_remote") assert os.path.exists(resource.filename) clear_data_home(data_home=os.path.dirname(resource.filename)) assert not os.path.exists(resource.filename) def test_load_remote_arviz_data(): assert load_arviz_data("test_remote") def test_bad_checksum(): with pytest.raises(IOError): load_arviz_data("bad_checksum") def test_missing_dataset(): with pytest.raises(ValueError): load_arviz_data("does not exist") def test_list_datasets(): dataset_string = list_datasets() # make sure all the names of the data sets are in the dataset description for key in ( "centered_eight", "non_centered_eight", "test_remote", "bad_checksum", "test_unknown", ): assert key in dataset_string def test_dims_coords(): shape = 4, 20, 5 var_name = "x" dims, coords = generate_dims_coords(shape, var_name) assert "x_dim_0" in dims assert "x_dim_1" in dims assert "x_dim_2" in dims assert len(coords["x_dim_0"]) == 4 assert len(coords["x_dim_1"]) == 20 assert len(coords["x_dim_2"]) == 5 def test_dims_coords_extra_dims(): shape = 4, 20 var_name = "x" with pytest.warns(SyntaxWarning): dims, coords = generate_dims_coords(shape, var_name, dims=["xx", "xy", "xz"]) assert "xx" in dims assert "xy" in dims assert "xz" in dims assert len(coords["xx"]) == 4 assert len(coords["xy"]) == 20 def test_make_attrs(): extra_attrs = {"key": "Value"} attrs = make_attrs(attrs=extra_attrs) assert "key" in attrs assert attrs["key"] == "Value" def test_addition(): idata1 = from_dict( posterior={"A": np.random.randn(2, 10, 2), "B": np.random.randn(2, 10, 5, 2)} ) idata2 = from_dict(prior={"C": np.random.randn(2, 10, 2), "D": np.random.randn(2, 10, 5, 2)}) new_idata = idata1 + idata2 assert new_idata is not None test_dict = {"posterior": ["A", "B"], "prior": ["C", "D"]} fails = check_multiple_attrs(test_dict, new_idata) assert not fails @pytest.mark.parametrize("copy", [True, False]) @pytest.mark.parametrize("inplace", [True, False]) @pytest.mark.parametrize("sequence", [True, False]) def test_concat(copy, inplace, sequence): idata1 = from_dict( posterior={"A": np.random.randn(2, 10, 2), "B": np.random.randn(2, 10, 5, 2)} ) if copy and inplace: original_idata1_posterior_id = id(idata1.posterior) idata2 = from_dict(prior={"C": np.random.randn(2, 10, 2), "D": np.random.randn(2, 10, 5, 2)}) idata3 = from_dict(observed_data={"E": np.random.randn(100), "F": np.random.randn(2, 100)}) # basic case assert concat(idata1, idata2, copy=True, inplace=False) is not None if sequence: new_idata = concat((idata1, idata2, idata3), copy=copy, inplace=inplace) else: new_idata = concat(idata1, idata2, idata3, copy=copy, inplace=inplace) if inplace: assert new_idata is None new_idata = idata1 assert new_idata is not None test_dict = {"posterior": ["A", "B"], "prior": ["C", "D"], "observed_data": ["E", "F"]} fails = check_multiple_attrs(test_dict, new_idata) assert not fails if copy: if inplace: assert id(new_idata.posterior) == original_idata1_posterior_id else: assert id(new_idata.posterior) != id(idata1.posterior) assert id(new_idata.prior) != id(idata2.prior) assert id(new_idata.observed_data) != id(idata3.observed_data) else: assert id(new_idata.posterior) == id(idata1.posterior) assert id(new_idata.prior) == id(idata2.prior) assert id(new_idata.observed_data) == id(idata3.observed_data) @pytest.mark.parametrize("copy", [True, False]) @pytest.mark.parametrize("inplace", [True, False]) @pytest.mark.parametrize("sequence", [True, False]) def test_concat_edgecases(copy, inplace, sequence): idata = from_dict(posterior={"A": np.random.randn(2, 10, 2), "B": np.random.randn(2, 10, 5, 2)}) empty = concat() assert empty is not None if sequence: new_idata = concat([idata], copy=copy, inplace=inplace) else: new_idata = concat(idata, copy=copy, inplace=inplace) if inplace: assert new_idata is None new_idata = idata else: assert new_idata is not None test_dict = {"posterior": ["A", "B"]} fails = check_multiple_attrs(test_dict, new_idata) assert not fails if copy and not inplace: assert id(new_idata.posterior) != id(idata.posterior) else: assert id(new_idata.posterior) == id(idata.posterior) def test_concat_bad(): with pytest.raises(TypeError): concat("hello", "hello") idata = from_dict(posterior={"A": np.random.randn(2, 10, 2), "B": np.random.randn(2, 10, 5, 2)}) with pytest.raises(TypeError): concat(idata, np.array([1, 2, 3, 4, 5])) with pytest.raises(NotImplementedError): concat(idata, idata) class TestNumpyToDataArray: def test_1d_dataset(self): size = 100 dataset = convert_to_dataset(np.random.randn(size)) assert len(dataset.data_vars) == 1 assert set(dataset.coords) == {"chain", "draw"} assert dataset.chain.shape == (1,) assert dataset.draw.shape == (size,) def test_warns_bad_shape(self): # Shape should be (chain, draw, *shape) with pytest.warns(SyntaxWarning): convert_to_dataset(np.random.randn(100, 4)) def test_nd_to_dataset(self): shape = (1, 2, 3, 4, 5) dataset = convert_to_dataset(np.random.randn(*shape)) assert len(dataset.data_vars) == 1 var_name = list(dataset.data_vars)[0] assert len(dataset.coords) == len(shape) assert dataset.chain.shape == shape[:1] assert dataset.draw.shape == shape[1:2] assert dataset[var_name].shape == shape def test_nd_to_inference_data(self): shape = (1, 2, 3, 4, 5) inference_data = convert_to_inference_data(np.random.randn(*shape), group="foo") assert hasattr(inference_data, "foo") assert len(inference_data.foo.data_vars) == 1 var_name = list(inference_data.foo.data_vars)[0] assert len(inference_data.foo.coords) == len(shape) assert inference_data.foo.chain.shape == shape[:1] assert inference_data.foo.draw.shape == shape[1:2] assert inference_data.foo[var_name].shape == shape assert repr(inference_data).startswith("Inference data with groups") def test_more_chains_than_draws(self): shape = (10, 4) with pytest.warns(SyntaxWarning): inference_data = convert_to_inference_data(np.random.randn(*shape), group="foo") assert hasattr(inference_data, "foo") assert len(inference_data.foo.data_vars) == 1 var_name = list(inference_data.foo.data_vars)[0] assert len(inference_data.foo.coords) == len(shape) assert inference_data.foo.chain.shape == shape[:1] assert inference_data.foo.draw.shape == shape[1:2] assert inference_data.foo[var_name].shape == shape class TestConvertToDataset: @pytest.fixture(scope="class") def data(self): # pylint: disable=attribute-defined-outside-init class Data: datadict = { "a": np.random.randn(100), "b": np.random.randn(1, 100, 10), "c": np.random.randn(1, 100, 3, 4), } coords = {"c1": np.arange(3), "c2": np.arange(4), "b1": np.arange(10)} dims = {"b": ["b1"], "c": ["c1", "c2"]} return Data def test_use_all(self, data): dataset = convert_to_dataset(data.datadict, coords=data.coords, dims=data.dims) assert set(dataset.data_vars) == {"a", "b", "c"} assert set(dataset.coords) == {"chain", "draw", "c1", "c2", "b1"} assert set(dataset.a.coords) == {"chain", "draw"} assert set(dataset.b.coords) == {"chain", "draw", "b1"} assert set(dataset.c.coords) == {"chain", "draw", "c1", "c2"} def test_missing_coords(self, data): dataset = convert_to_dataset(data.datadict, coords=None, dims=data.dims) assert set(dataset.data_vars) == {"a", "b", "c"} assert set(dataset.coords) == {"chain", "draw", "c1", "c2", "b1"} assert set(dataset.a.coords) == {"chain", "draw"} assert set(dataset.b.coords) == {"chain", "draw", "b1"} assert set(dataset.c.coords) == {"chain", "draw", "c1", "c2"} def test_missing_dims(self, data): # missing dims coords = {"c_dim_0": np.arange(3), "c_dim_1": np.arange(4), "b_dim_0": np.arange(10)} dataset = convert_to_dataset(data.datadict, coords=coords, dims=None) assert set(dataset.data_vars) == {"a", "b", "c"} assert set(dataset.coords) == {"chain", "draw", "c_dim_0", "c_dim_1", "b_dim_0"} assert set(dataset.a.coords) == {"chain", "draw"} assert set(dataset.b.coords) == {"chain", "draw", "b_dim_0"} assert set(dataset.c.coords) == {"chain", "draw", "c_dim_0", "c_dim_1"} def test_skip_dim_0(self, data): dims = {"c": [None, "c2"]} coords = {"c_dim_0": np.arange(3), "c2": np.arange(4), "b_dim_0": np.arange(10)} dataset = convert_to_dataset(data.datadict, coords=coords, dims=dims) assert set(dataset.data_vars) == {"a", "b", "c"} assert set(dataset.coords) == {"chain", "draw", "c_dim_0", "c2", "b_dim_0"} assert set(dataset.a.coords) == {"chain", "draw"} assert set(dataset.b.coords) == {"chain", "draw", "b_dim_0"} assert set(dataset.c.coords) == {"chain", "draw", "c_dim_0", "c2"} def test_dict_to_dataset(): datadict = {"a": np.random.randn(100), "b": np.random.randn(1, 100, 10)} dataset = convert_to_dataset(datadict, coords={"c": np.arange(10)}, dims={"b": ["c"]}) assert set(dataset.data_vars) == {"a", "b"} assert set(dataset.coords) == {"chain", "draw", "c"} assert set(dataset.a.coords) == {"chain", "draw"} assert set(dataset.b.coords) == {"chain", "draw", "c"} def test_convert_to_dataset_idempotent(): first = convert_to_dataset(np.random.randn(100)) second = convert_to_dataset(first) assert first.equals(second) def test_convert_to_inference_data_idempotent(): first = convert_to_inference_data(np.random.randn(100), group="foo") second = convert_to_inference_data(first) assert first.foo is second.foo def test_convert_to_inference_data_from_file(tmpdir): first = convert_to_inference_data(np.random.randn(100), group="foo") filename = str(tmpdir.join("test_file.nc")) first.to_netcdf(filename) second = convert_to_inference_data(filename) assert first.foo.equals(second.foo) def test_convert_to_inference_data_bad(): with pytest.raises(ValueError): convert_to_inference_data(1) def test_convert_to_dataset_bad(tmpdir): first = convert_to_inference_data(np.random.randn(100), group="foo") filename = str(tmpdir.join("test_file.nc")) first.to_netcdf(filename) with pytest.raises(ValueError): convert_to_dataset(filename, group="bar") def test_bad_inference_data(): with pytest.raises(ValueError): InferenceData(posterior=[1, 2, 3]) class TestDictNetCDFUtils: @pytest.fixture(scope="class") def data(self, eight_schools_params, draws, chains): # Data of the Eight Schools Model class Data: _, stan_fit = load_cached_models(eight_schools_params, draws, chains)["pystan"] if pystan_version() == 2: stan_dict = pystan_extract_unpermuted(stan_fit) obj = {} for name, vals in stan_dict.items(): if name not in {"y_hat", "log_lik"}: # extra vars obj[name] = np.swapaxes(vals, 0, 1) else: stan_dict = stan_extract_dict(stan_fit) obj = {} for name, vals in stan_dict.items(): if name not in {"y_hat", "log_lik"}: # extra vars obj[name] = vals return Data def check_var_names_coords_dims(self, dataset): assert set(dataset.data_vars) == {"mu", "tau", "eta", "theta"} assert set(dataset.coords) == {"chain", "draw", "school"} def get_inference_data(self, data, eight_schools_params): return convert_to_inference_data( data.obj, group="posterior", coords={"school": np.arange(eight_schools_params["J"])}, dims={"theta": ["school"], "eta": ["school"]}, ) def test_testing_extract(self, data): if pystan_version() == 2: extract_func = pystan_extract_unpermuted parameters = data.stan_fit.model_pars else: extract_func = stan_extract_dict parameters = data.stan_fit.param_names assert isinstance(extract_func(data.stan_fit, var_names=None), dict) assert isinstance(extract_func(data.stan_fit, var_names=parameters[0]), dict) assert isinstance(extract_func(data.stan_fit, var_names=parameters), dict) assert isinstance( extract_func(data.stan_fit, var_names=[parameters[0], parameters[0]]), dict ) def test_convert_to_inference_data(self, data, eight_schools_params): inference_data = self.get_inference_data(data, eight_schools_params) assert hasattr(inference_data, "posterior") self.check_var_names_coords_dims(inference_data.posterior) def test_convert_to_dataset(self, eight_schools_params, draws, chains, data): dataset = convert_to_dataset( data.obj, group="posterior", coords={"school": np.arange(eight_schools_params["J"])}, dims={"theta": ["school"], "eta": ["school"]}, ) assert dataset.draw.shape == (draws,) assert dataset.chain.shape == (chains,) assert dataset.school.shape == (eight_schools_params["J"],) assert dataset.theta.shape == (chains, draws, eight_schools_params["J"]) class TestDictIONetCDFUtils: @pytest.fixture(scope="class") def data(self, eight_schools_params, draws, chains): # Data of the Eight Schools Model class Data: _, stan_fit = load_cached_models(eight_schools_params, draws, chains)["pystan"] if pystan_version() == 2: stan_dict = pystan_extract_unpermuted(stan_fit) obj = {} for name, vals in stan_dict.items(): if name not in {"y_hat", "log_lik"}: # extra vars obj[name] = np.swapaxes(vals, 0, 1) else: stan_dict = stan_extract_dict(stan_fit) obj = {} for name, vals in stan_dict.items(): if name not in {"y_hat", "log_lik"}: # extra vars obj[name] = vals return Data def check_var_names_coords_dims(self, dataset): assert set(dataset.data_vars) == {"mu", "tau", "eta", "theta"} assert set(dataset.coords) == {"chain", "draw", "school"} def get_inference_data(self, data, eight_schools_params): return from_dict( posterior=data.obj, posterior_predictive=data.obj, sample_stats=data.obj, prior=data.obj, prior_predictive=data.obj, sample_stats_prior=data.obj, observed_data=eight_schools_params, coords={"school": np.arange(eight_schools_params["J"])}, dims={"theta": ["school"], "eta": ["school"]}, ) def test_inference_data(self, data, eight_schools_params): inference_data = self.get_inference_data(data, eight_schools_params) test_dict = { "posterior": [], "prior": [], "sample_stats": [], "posterior_predictive": [], "prior_predictive": [], "sample_stats_prior": [], "observed_data": ["J", "y", "sigma"], } fails = check_multiple_attrs(test_dict, inference_data) assert not fails self.check_var_names_coords_dims(inference_data.posterior) self.check_var_names_coords_dims(inference_data.posterior_predictive) self.check_var_names_coords_dims(inference_data.sample_stats) self.check_var_names_coords_dims(inference_data.prior) self.check_var_names_coords_dims(inference_data.prior_predictive) self.check_var_names_coords_dims(inference_data.sample_stats_prior) def test_inference_data_edge_cases(self): # create data log_likelihood = { "y": np.random.randn(4, 100), "log_likelihood": np.random.randn(4, 100, 8), } # log_likelihood to posterior assert from_dict(posterior=log_likelihood) is not None # dims == None assert from_dict(observed_data=log_likelihood, dims=None) is not None def test_inference_data_bad(self): # create data x = np.random.randn(4, 100) # input ndarray with pytest.raises(TypeError): from_dict(posterior=x) with pytest.raises(TypeError): from_dict(posterior_predictive=x) with pytest.raises(TypeError): from_dict(sample_stats=x) with pytest.raises(TypeError): from_dict(prior=x) with pytest.raises(TypeError): from_dict(prior_predictive=x) with pytest.raises(TypeError): from_dict(sample_stats_prior=x) with pytest.raises(TypeError): from_dict(observed_data=x) class TestEmceeNetCDFUtils: @pytest.fixture(scope="class") def data(self, draws, chains): class Data: # chains are not used # emcee uses lots of walkers obj = load_cached_models(eight_schools_params, draws, chains)["emcee"] return Data def get_inference_data(self, data): return from_emcee(data.obj, var_names=["ln(f)", "b", "m"]) def get_inference_data_reader(self): from emcee import backends # pylint: disable=no-name-in-module here = os.path.dirname(os.path.abspath(__file__)) data_directory = os.path.join(here, "saved_models") filepath = os.path.join(data_directory, "reader_testfile.h5") assert os.path.exists(filepath) assert os.path.getsize(filepath) reader = backends.HDFBackend(filepath, read_only=True) return from_emcee(reader, var_names=["ln(f)", "b", "m"]) def test_inference_data(self, data): inference_data = self.get_inference_data(data) test_dict = {"posterior": ["ln(f)", "b", "m"]} fails = check_multiple_attrs(test_dict, inference_data) assert not fails @needs_emcee3 def test_inference_data_reader(self): inference_data = self.get_inference_data_reader() test_dict = {"posterior": ["ln(f)", "b", "m"]} fails = check_multiple_attrs(test_dict, inference_data) assert not fails def test_verify_var_names(self, data): with pytest.raises(ValueError): from_emcee(data.obj, var_names=["not", "enough"]) def test_verify_arg_names(self, data): with pytest.raises(ValueError): from_emcee(data.obj, arg_names=["not", "enough"]) def test_ln_funcs_for_infinity(self): # after dropping Python 3.5 support use underscore 1_000_000 assert np.isinf(emcee_lnprior([1000, 10000, 1000000])) assert np.isinf(emcee_lnprob([1000, 10000, 1000000], 0, 0, 0)) class TestIONetCDFUtils: @pytest.fixture(scope="class") def data(self, draws, chains): class Data: model, obj = load_cached_models(eight_schools_params, draws, chains)["pymc3"] return Data def get_inference_data(self, data, eight_schools_params): # pylint: disable=W0613 with data.model: prior = pm.sample_prior_predictive() posterior_predictive = pm.sample_posterior_predictive(data.obj) return from_pymc3( trace=data.obj, prior=prior, posterior_predictive=posterior_predictive, coords={"school": np.arange(eight_schools_params["J"])}, dims={"theta": ["school"], "eta": ["school"]}, ) def test_io_function(self, data, eight_schools_params): inference_data = self.get_inference_data( # pylint: disable=W0612 data, eight_schools_params ) assert hasattr(inference_data, "posterior") here = os.path.dirname(os.path.abspath(__file__)) data_directory = os.path.join(here, "saved_models") filepath = os.path.join(data_directory, "io_function_testfile.nc") # az -function to_netcdf(inference_data, filepath) assert os.path.exists(filepath) assert os.path.getsize(filepath) > 0 inference_data2 = from_netcdf(filepath) assert hasattr(inference_data2, "posterior") os.remove(filepath) assert not os.path.exists(filepath) # Test deprecated functions save_data(inference_data, filepath) assert os.path.exists(filepath) assert os.path.getsize(filepath) > 0 inference_data3 = load_data(filepath) assert hasattr(inference_data3, "posterior") os.remove(filepath) assert not os.path.exists(filepath) def test_io_method(self, data, eight_schools_params): inference_data = self.get_inference_data( # pylint: disable=W0612 data, eight_schools_params ) assert hasattr(inference_data, "posterior") here = os.path.dirname(os.path.abspath(__file__)) data_directory = os.path.join(here, "saved_models") filepath = os.path.join(data_directory, "io_method_testfile.nc") assert not os.path.exists(filepath) # InferenceData method inference_data.to_netcdf(filepath) assert os.path.exists(filepath) assert os.path.getsize(filepath) > 0 inference_data2 = InferenceData.from_netcdf(filepath) assert hasattr(inference_data2, "posterior") os.remove(filepath) assert not os.path.exists(filepath) def test_empty_inference_data_object(self): inference_data = InferenceData() here = os.path.dirname(os.path.abspath(__file__)) data_directory = os.path.join(here, "saved_models") filepath = os.path.join(data_directory, "empty_test_file.nc") assert not os.path.exists(filepath) inference_data.to_netcdf(filepath) assert os.path.exists(filepath) os.remove(filepath) assert not os.path.exists(filepath) class TestPyMC3NetCDFUtils: @pytest.fixture(scope="class") def data(self, draws, chains): class Data: model, obj = load_cached_models(eight_schools_params, draws, chains)["pymc3"] return Data def get_inference_data(self, data, eight_schools_params): with data.model: prior = pm.sample_prior_predictive() posterior_predictive = pm.sample_posterior_predictive(data.obj) return from_pymc3( trace=data.obj, prior=prior, posterior_predictive=posterior_predictive, coords={"school": np.arange(eight_schools_params["J"])}, dims={"theta": ["school"], "eta": ["school"]}, ) def test_posterior(self, data, eight_schools_params): inference_data = self.get_inference_data(data, eight_schools_params) assert hasattr(inference_data, "posterior") def test_sampler_stats(self, data, eight_schools_params): inference_data = self.get_inference_data(data, eight_schools_params) assert hasattr(inference_data, "sample_stats") def test_posterior_predictive(self, data, eight_schools_params): inference_data = self.get_inference_data(data, eight_schools_params) assert hasattr(inference_data, "posterior_predictive") def test_prior(self, data, eight_schools_params): inference_data = self.get_inference_data(data, eight_schools_params) assert hasattr(inference_data, "prior") def test_missing_data_model(self): # source pymc3/pymc3/tests/test_missing.py data = ma.masked_values([1, 2, -1, 4, -1], value=-1) model = pm.Model() with model: x = pm.Normal("x", 1, 1) pm.Normal("y", x, 1, observed=data) trace = pm.sample(100, chains=2) # make sure that data is really missing y_missing, = model.missing_values assert y_missing.tag.test_value.shape == (2,) inference_data = from_pymc3(trace=trace) test_dict = {"posterior": ["x"], "observed_data": ["y"], "sample_stats": ["log_likelihood"]} fails = check_multiple_attrs(test_dict, inference_data) assert not fails def test_multiple_observed_rv(self): y1_data = np.random.randn(10) y2_data = np.random.randn(100) with pm.Model(): x = pm.Normal("x", 1, 1) pm.Normal("y1", x, 1, observed=y1_data) pm.Normal("y2", x, 1, observed=y2_data) trace = pm.sample(100, chains=2) inference_data = from_pymc3(trace=trace) test_dict = {"posterior": ["x"], "observed_data": ["y1", "y2"], "sample_stats": ["lp"]} fails = check_multiple_attrs(test_dict, inference_data) assert not fails assert not hasattr(inference_data.sample_stats, "log_likelihood") class TestPyroNetCDFUtils: @pytest.fixture(scope="class") def data(self, draws, chains): class Data: obj = load_cached_models(eight_schools_params, draws, chains)["pyro"] return Data def get_inference_data(self, data): return from_pyro(posterior=data.obj) def test_inference_data(self, data): inference_data = self.get_inference_data(data) assert hasattr(inference_data, "posterior") class TestPyStanNetCDFUtils: @pytest.fixture(scope="class") def data(self, draws, chains): class Data: model, obj = load_cached_models(eight_schools_params, draws, chains)["pystan"] return Data def get_inference_data(self, data, eight_schools_params): """vars as str.""" return from_pystan( posterior=data.obj, posterior_predictive="y_hat", prior=data.obj, prior_predictive="y_hat", observed_data="y", log_likelihood="log_lik", coords={"school": np.arange(eight_schools_params["J"])}, dims={ "theta": ["school"], "y": ["school"], "log_lik": ["school"], "y_hat": ["school"], "eta": ["school"], }, posterior_model=data.model, prior_model=data.model, ) def get_inference_data2(self, data, eight_schools_params): """vars as lists.""" return from_pystan( posterior=data.obj, posterior_predictive=["y_hat"], prior=data.obj, prior_predictive=["y_hat"], observed_data="y", log_likelihood="log_lik", coords={ "school": np.arange(eight_schools_params["J"]), "log_likelihood_dim": np.arange(eight_schools_params["J"]), }, dims={ "theta": ["school"], "y": ["school"], "y_hat": ["school"], "eta": ["school"], "log_lik": ["log_likelihood_dim"], }, posterior_model=data.model, prior_model=data.model, ) def get_inference_data3(self, data, eight_schools_params): """multiple vars as lists.""" return from_pystan( posterior=data.obj, posterior_predictive=["y_hat", "log_lik"], prior=data.obj, prior_predictive=["y_hat", "log_lik"], observed_data="y", coords={"school": np.arange(eight_schools_params["J"])}, dims={"theta": ["school"], "y": ["school"], "y_hat": ["school"], "eta": ["school"]}, posterior_model=data.model, prior_model=data.model, ) def get_inference_data4(self, data): """multiple vars as lists.""" return from_pystan( posterior=data.obj, posterior_predictive=None, prior=data.obj, prior_predictive=None, observed_data="y", coords=None, dims=None, posterior_model=data.model, prior_model=data.model, ) def test_sampler_stats(self, data, eight_schools_params): inference_data = self.get_inference_data(data, eight_schools_params) test_dict = {"sample_stats": ["lp", "diverging"]} fails = check_multiple_attrs(test_dict, inference_data) assert not fails def test_inference_data(self, data, eight_schools_params): inference_data1 = self.get_inference_data(data, eight_schools_params) inference_data2 = self.get_inference_data2(data, eight_schools_params) inference_data3 = self.get_inference_data3(data, eight_schools_params) inference_data4 = self.get_inference_data4(data) # inference_data 1 test_dict = { "posterior": ["theta"], "observed_data": ["y"], "sample_stats": ["log_likelihood"], "prior": ["theta"], } fails = check_multiple_attrs(test_dict, inference_data1) assert not fails # inference_data 2 test_dict = { "posterior_predictive": ["y_hat"], "observed_data": ["y"], "sample_stats_prior": ["lp"], "sample_stats": ["lp"], "prior_predictive": ["y_hat"], } fails = check_multiple_attrs(test_dict, inference_data2) assert not fails # inference_data 3 test_dict = { "posterior_predictive": ["y_hat"], "observed_data": ["y"], "sample_stats_prior": ["lp"], "sample_stats": ["lp"], "prior_predictive": ["y_hat"], } fails = check_multiple_attrs(test_dict, inference_data3) assert not fails # inference_data 4 test_dict = {"posterior": ["theta"], "prior": ["theta"]} fails = check_multiple_attrs(test_dict, inference_data4) assert not fails def test_invalid_fit(self, data): if pystan_version() == 2: model = data.model model_data = { "J": 8, "y": np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0]), "sigma": np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0]), } fit_test_grad = model.sampling( data=model_data, test_grad=True, check_hmc_diagnostics=False ) with pytest.raises(AttributeError): _ = from_pystan(posterior=fit_test_grad) fit = model.sampling(data=model_data, iter=100, chains=1, check_hmc_diagnostics=False) del fit.sim["samples"] with pytest.raises(AttributeError): _ = from_pystan(posterior=fit) def test_empty_parameter(self): if pystan_version() == 2: model_code = """ parameters { real y; vector[0] z; } model { y ~ normal(0,1); } """ from pystan import StanModel model = StanModel(model_code=model_code) fit = model.sampling(iter=10, chains=2, check_hmc_diagnostics=False) posterior = from_pystan(posterior=fit) test_dict = {"posterior": ["y"], "sample_stats": ["lp"]} fails = check_multiple_attrs(test_dict, posterior) assert not fails def test_get_draws(self, data): fit = data.obj if pystan_version() == 2: draws = get_draws(fit, variables=["theta", "theta"]) assert draws.get("theta") is not None else: draws = get_draws_stan3(fit, variables=["theta", "theta"]) assert draws.get("theta") is not None @pytest.mark.skipif(pystan_version() != 2, reason="PyStan 2.x required") def test_index_order(self, data, eight_schools_params): """Test 0-indexed data.""" import pystan fit = data.model.sampling(data=eight_schools_params) if pystan.__version__ >= "2.18": # make 1-indexed to 0-indexed for holder in fit.sim["samples"]: new_chains = OrderedDict() for i, (key, values) in enumerate(holder.chains.items()): if "[" in key: name, *shape = key.replace("]", "").split("[") shape = [str(int(item) - 1) for items in shape for item in items.split(",")] key = name + "[{}]".format(",".join(shape)) new_chains[key] = np.full_like(values, fill_value=float(i)) setattr(holder, "chains", new_chains) fit.sim["fnames_oi"] = list(fit.sim["samples"][0].chains.keys()) idata = from_pystan(posterior=fit) assert idata is not None for j, fpar in enumerate(fit.sim["fnames_oi"]): if fpar == "lp__": continue par, *shape = fpar.replace("]", "").split("[") assert hasattr(idata.posterior, par) if shape: shape = [slice(None), slice(None)] + list(map(int, shape)) assert idata.posterior[par][tuple(shape)].values.mean() == float(j) else: assert idata.posterior[par].values.mean() == float(j) class TestTfpNetCDFUtils: @pytest.fixture(scope="class") def data(self, draws, chains): class Data: # Returns result of from_tfp model, obj = load_cached_models(eight_schools_params, draws, chains)[ "tensorflow_probability" ] return Data def get_inference_data(self, data, eight_schools_params): """Normal read with observed and var_names.""" inference_data = from_tfp( data.obj, var_names=["mu", "tau", "eta"], model_fn=lambda: data.model( eight_schools_params["J"], eight_schools_params["sigma"].astype(np.float32) ), observed=eight_schools_params["y"].astype(np.float32), ) return inference_data def get_inference_data2(self, data): """Fit only.""" inference_data = from_tfp(data.obj) return inference_data def get_inference_data3(self, data, eight_schools_params): """Read with observed Tensor var_names and dims.""" import tensorflow as tf inference_data = from_tfp( data.obj, var_names=["mu", "tau", "eta"], model_fn=lambda: data.model( eight_schools_params["J"], eight_schools_params["sigma"].astype(np.float32) ), posterior_predictive_samples=100, posterior_predictive_size=3, observed=tf.convert_to_tensor( np.vstack( ( eight_schools_params["y"], eight_schools_params["y"], eight_schools_params["y"], ) ).astype(np.float32), np.float32, ), coords={"school": np.arange(eight_schools_params["J"])}, dims={"eta": ["school"], "obs": ["size_dim", "school"]}, ) return inference_data def get_inference_data4(self, data, eight_schools_params): """Test setter.""" inference_data = from_tfp( data.obj + [np.ones_like(data.obj[0]).astype(np.float32)], var_names=["mu", "tau", "eta", "avg_effect"], model_fn=lambda: data.model( eight_schools_params["J"], eight_schools_params["sigma"].astype(np.float32) ), observed=eight_schools_params["y"].astype(np.float32), ) return inference_data def test_inference_data(self, data, eight_schools_params): inference_data = self.get_inference_data(data, eight_schools_params) test_dict = { "posterior": ["mu", "tau", "eta"], "observed_data": ["obs"], "posterior_predictive": ["obs"], } fails = check_multiple_attrs(test_dict, inference_data) assert not fails def test_inference_data2(self, data): inference_data = self.get_inference_data2(data) assert hasattr(inference_data, "posterior") def test_inference_data3(self, data, eight_schools_params): inference_data = self.get_inference_data3(data, eight_schools_params) test_dict = { "posterior": ["mu", "tau", "eta"], "observed_data": ["obs"], "posterior_predictive": ["obs"], } fails = check_multiple_attrs(test_dict, inference_data) assert not fails def test_inference_data4(self, data, eight_schools_params): inference_data = self.get_inference_data4(data, eight_schools_params) test_dict = { "posterior": ["mu", "tau", "eta", "avg_effect"], "observed_data": ["obs"], "posterior_predictive": ["obs"], } fails = check_multiple_attrs(test_dict, inference_data) assert not fails class TestCmdStanNetCDFUtils: @pytest.fixture(scope="session") def data_directory(self): here = os.path.dirname(os.path.abspath(__file__)) data_directory = os.path.join(here, "saved_models") return data_directory @pytest.fixture(scope="class") def paths(self, data_directory): paths = { "no_warmup": [ os.path.join(data_directory, "cmdstan/output_no_warmup1.csv"), os.path.join(data_directory, "cmdstan/output_no_warmup2.csv"), os.path.join(data_directory, "cmdstan/output_no_warmup3.csv"), os.path.join(data_directory, "cmdstan/output_no_warmup4.csv"), ], "warmup": [ os.path.join(data_directory, "cmdstan/output_warmup1.csv"), os.path.join(data_directory, "cmdstan/output_warmup2.csv"), os.path.join(data_directory, "cmdstan/output_warmup3.csv"), os.path.join(data_directory, "cmdstan/output_warmup4.csv"), ], "no_warmup_glob": os.path.join(data_directory, "cmdstan/output_no_warmup[0-9].csv"), "warmup_glob": os.path.join(data_directory, "cmdstan/output_warmup[0-9].csv"), "combined_no_warmup": [ os.path.join(data_directory, "cmdstan/combined_output_no_warmup.csv") ], "combined_warmup": [os.path.join(data_directory, "cmdstan/combined_output_warmup.csv")], "combined_no_warmup_glob": os.path.join( data_directory, "cmdstan/combined_output_no_warmup.csv" ), "combined_warmup_glob": os.path.join( data_directory, "cmdstan/combined_output_warmup.csv" ), "eight_schools_glob": os.path.join( data_directory, "cmdstan/eight_schools_output[0-9].csv" ), "eight_schools": [ os.path.join(data_directory, "cmdstan/eight_schools_output1.csv"), os.path.join(data_directory, "cmdstan/eight_schools_output2.csv"), os.path.join(data_directory, "cmdstan/eight_schools_output3.csv"), os.path.join(data_directory, "cmdstan/eight_schools_output4.csv"), ], "missing_files": [ os.path.join(data_directory, "cmdstan/combined_missing_config.csv"), os.path.join(data_directory, "cmdstan/combined_missing_adaptation.csv"), os.path.join(data_directory, "cmdstan/combined_missing_timing1.csv"), os.path.join(data_directory, "cmdstan/combined_missing_timing2.csv"), ], } return paths @pytest.fixture(scope="class") def observed_data_paths(self, data_directory): observed_data_paths = [ os.path.join(data_directory, "cmdstan/eight_schools.data.R"), os.path.join(data_directory, "cmdstan/example_stan.data.R"), ] return observed_data_paths def get_inference_data(self, posterior, **kwargs): return from_cmdstan(posterior=posterior, **kwargs) def test_sample_stats(self, paths): for key, path in paths.items(): if "missing" in key: continue inference_data = self.get_inference_data(path) assert hasattr(inference_data, "sample_stats") def test_inference_data_shapes(self, paths): """Assert that shapes are transformed correctly""" for key, path in paths.items(): if "eight" in key or "missing" in key: continue inference_data = self.get_inference_data(path) test_dict = {"posterior": ["x", "y", "Z"]} fails = check_multiple_attrs(test_dict, inference_data) assert not fails assert inference_data.posterior["y"].shape == (4, 100) assert inference_data.posterior["x"].shape == (4, 100, 3) assert inference_data.posterior["Z"].shape == (4, 100, 4, 6) dims = ["chain", "draw"] y_mean_true = 0 y_mean = inference_data.posterior["y"].mean(dim=dims) assert np.isclose(y_mean, y_mean_true, atol=1e-1) x_mean_true = np.array([1, 2, 3]) x_mean = inference_data.posterior["x"].mean(dim=dims) assert np.isclose(x_mean, x_mean_true, atol=1e-1).all() Z_mean_true = np.array([1, 2, 3, 4]) Z_mean = inference_data.posterior["Z"].mean(dim=dims).mean(axis=1) assert np.isclose(Z_mean, Z_mean_true, atol=7e-1).all() def test_inference_data_input_types1(self, paths, observed_data_paths): """Check input types posterior --> str, list of str prior --> str, list of str posterior_predictive --> str, variable in posterior observed_data --> Rdump format observed_data_var --> str, variable log_likelihood --> str coords --> one to many dims --> one to many """ for key, path in paths.items(): if "eight" not in key: continue inference_data = self.get_inference_data( posterior=path, posterior_predictive="y_hat", prior=path, prior_predictive="y_hat", observed_data=observed_data_paths[0], observed_data_var="y", log_likelihood="log_lik", coords={"school": np.arange(8)}, dims={ "theta": ["school"], "y": ["school"], "log_lik": ["school"], "y_hat": ["school"], "eta": ["school"], }, ) test_dict = { "posterior": ["mu", "tau", "theta_tilde", "theta"], "prior": ["mu", "tau", "theta_tilde", "theta"], "prior_predictive": ["y_hat"], "sample_stats": ["log_likelihood"], "observed_data": ["y"], "posterior_predictive": ["y_hat"], } fails = check_multiple_attrs(test_dict, inference_data) assert not fails def test_inference_data_input_types2(self, paths, observed_data_paths): """Check input types (change, see earlier) posterior_predictive --> List[str], variable in posterior observed_data_var --> List[str], variable """ for key, path in paths.items(): if "eight" not in key: continue inference_data = self.get_inference_data( posterior=path, posterior_predictive=["y_hat"], prior=path, prior_predictive=["y_hat"], observed_data=observed_data_paths[0], observed_data_var=["y"], log_likelihood="log_lik", coords={"school": np.arange(8)}, dims={ "theta": ["school"], "y": ["school"], "log_lik": ["school"], "y_hat": ["school"], "eta": ["school"], }, ) test_dict = { "posterior": ["mu", "tau", "theta_tilde", "theta"], "prior": ["mu", "tau", "theta_tilde", "theta"], "prior_predictive": ["y_hat"], "sample_stats": ["log_likelihood"], "observed_data": ["y"], "posterior_predictive": ["y_hat"], } fails = check_multiple_attrs(test_dict, inference_data) assert not fails def test_inference_data_input_types3(self, paths, observed_data_paths): """Check input types (change, see earlier) posterior_predictive --> str, csv file coords --> one to many + one to one (default dim) dims --> one to many """ for key, path in paths.items(): if "eight" not in key: continue post_pred = paths["eight_schools_glob"] inference_data = self.get_inference_data( posterior=path, posterior_predictive=post_pred, prior=path, prior_predictive=post_pred, observed_data=observed_data_paths[0], observed_data_var=["y"], log_likelihood="log_lik", coords={"school": np.arange(8), "log_lik_dim_0": np.arange(8)}, dims={"theta": ["school"], "y": ["school"], "y_hat": ["school"], "eta": ["school"]}, ) test_dict = { "posterior": ["mu", "tau", "theta_tilde", "theta"], "prior": ["mu", "tau", "theta_tilde", "theta"], "prior_predictive": ["y_hat"], "sample_stats": ["log_likelihood"], "observed_data": ["y"], "posterior_predictive": ["y_hat"], } fails = check_multiple_attrs(test_dict, inference_data) assert not fails def test_inference_data_input_types4(self, paths): """Check input types (change, see earlier) coords --> one to many + one to one (non-default dim) dims --> one to many + one to one """ path = paths["combined_no_warmup"] for path in [path, path[0]]: inference_data = self.get_inference_data( posterior=path, posterior_predictive=path, prior=path, prior_predictive=path, observed_data=None, observed_data_var=None, coords={"rand": np.arange(3)}, dims={"x": ["rand"]}, ) test_dict = { "posterior": ["x", "y", "Z"], "prior": ["x", "y", "Z"], "prior_predictive": ["x", "y", "Z"], "sample_stats": ["lp"], "sample_stats_prior": ["lp"], "posterior_predictive": ["x", "y", "Z"], } fails = check_multiple_attrs(test_dict, inference_data) assert not fails def test_inference_data_input_types5(self, paths, observed_data_paths): """Check input types (change, see earlier) posterior_predictive is None prior_predictive is None """ for key, path in paths.items(): if "eight" not in key: continue inference_data = self.get_inference_data( posterior=path, posterior_predictive=None, prior=path, prior_predictive=None, observed_data=observed_data_paths[0], observed_data_var=["y"], log_likelihood=["log_lik"], coords={"school": np.arange(8), "log_lik_dim": np.arange(8)}, dims={ "theta": ["school"], "y": ["school"], "log_lik": ["log_lik_dim"], "y_hat": ["school"], "eta": ["school"], }, ) test_dict = { "posterior": ["mu", "tau", "theta_tilde", "theta"], "prior": ["mu", "tau", "theta_tilde", "theta"], "sample_stats": ["log_likelihood"], "observed_data": ["y"], "sample_stats_prior": ["lp"], } fails = check_multiple_attrs(test_dict, inference_data) assert not fails def test_inference_data_bad_csv(self, paths): """Check ValueError for csv with missing headers""" for key, _paths in paths.items(): if "missing" not in key: continue for path in _paths: with pytest.raises(ValueError): self.get_inference_data(posterior=path) def test_inference_data_observed_data1(self, observed_data_paths): """Read Rdump, check shapes are correct All variables """ path = observed_data_paths[1] inference_data = self.get_inference_data(posterior=None, observed_data=path) assert hasattr(inference_data, "observed_data") assert len(inference_data.observed_data.data_vars) == 3 assert inference_data.observed_data["x"].shape == (1,) assert inference_data.observed_data["y"].shape == (3,) assert inference_data.observed_data["Z"].shape == (4, 5) def test_inference_data_observed_data2(self, observed_data_paths): """Read Rdump, check shapes are correct One variable as str """ path = observed_data_paths[1] inference_data = self.get_inference_data( posterior=None, observed_data=path, observed_data_var="x" ) assert hasattr(inference_data, "observed_data") assert len(inference_data.observed_data.data_vars) == 1 assert inference_data.observed_data["x"].shape == (1,) def test_inference_data_observed_data3(self, observed_data_paths): """Read Rdump, check shapes are correct One variable as a list """ path = observed_data_paths[1] inference_data = self.get_inference_data( posterior=None, observed_data=path, observed_data_var=["x"] ) assert hasattr(inference_data, "observed_data") assert len(inference_data.observed_data.data_vars) == 1 assert inference_data.observed_data["x"].shape == (1,) def test_inference_data_observed_data4(self, observed_data_paths): """Read Rdump, check shapes are correct Many variables as list """ path = observed_data_paths[1] inference_data = self.get_inference_data( posterior=None, observed_data=path, observed_data_var=["y", "Z"] ) assert hasattr(inference_data, "observed_data") assert len(inference_data.observed_data.data_vars) == 2 assert inference_data.observed_data["y"].shape == (3,) assert inference_data.observed_data["Z"].shape == (4, 5)
[ "numpy.ma.masked_values", "arviz.load_data", "arviz.from_pyro", "arviz.from_pystan", "numpy.array", "pymc3.sample", "emcee.backends.HDFBackend", "pytest.fixture", "numpy.arange", "os.remove", "pystan.StanModel", "os.path.exists", "arviz.convert_to_dataset", "arviz.from_emcee", "arviz.lis...
[((1182, 1212), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1196, 1212), False, 'import pytest\n'), ((1249, 1279), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1263, 1279), False, 'import pytest\n'), ((1315, 1343), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (1329, 1343), False, 'import pytest\n'), ((5118, 5164), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""copy"""', '[True, False]'], {}), "('copy', [True, False])\n", (5141, 5164), False, 'import pytest\n'), ((5167, 5216), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inplace"""', '[True, False]'], {}), "('inplace', [True, False])\n", (5190, 5216), False, 'import pytest\n'), ((5219, 5269), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sequence"""', '[True, False]'], {}), "('sequence', [True, False])\n", (5242, 5269), False, 'import pytest\n'), ((6812, 6858), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""copy"""', '[True, False]'], {}), "('copy', [True, False])\n", (6835, 6858), False, 'import pytest\n'), ((6861, 6910), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inplace"""', '[True, False]'], {}), "('inplace', [True, False])\n", (6884, 6910), False, 'import pytest\n'), ((6913, 6963), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sequence"""', '[True, False]'], {}), "('sequence', [True, False])\n", (6936, 6963), False, 'import pytest\n'), ((1707, 1758), 'urllib.parse.urlunsplit', 'urlunsplit', (["('file', '', centered.filename, '', '')"], {}), "(('file', '', centered.filename, '', ''))\n", (1717, 1758), False, 'from urllib.parse import urlunsplit\n'), ((2319, 2404), 'collections.namedtuple', 'namedtuple', (['"""UnknownFileMetaData"""', "['filename', 'url', 'checksum', 'description']"], {}), "('UnknownFileMetaData', ['filename', 'url', 'checksum',\n 'description'])\n", (2329, 2404), False, 'from collections import namedtuple, OrderedDict\n'), ((2790, 2823), 'arviz.load_arviz_data', 'load_arviz_data', (['"""centered_eight"""'], {}), "('centered_eight')\n", (2805, 2823), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((2959, 2989), 'arviz.load_arviz_data', 'load_arviz_data', (['"""test_remote"""'], {}), "('test_remote')\n", (2974, 2989), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((3002, 3035), 'os.path.exists', 'os.path.exists', (['resource.filename'], {}), '(resource.filename)\n', (3016, 3035), False, 'import os\n'), ((3205, 3235), 'arviz.load_arviz_data', 'load_arviz_data', (['"""test_remote"""'], {}), "('test_remote')\n", (3220, 3235), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((3507, 3522), 'arviz.list_datasets', 'list_datasets', ([], {}), '()\n', (3520, 3522), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((7132, 7140), 'arviz.concat', 'concat', ([], {}), '()\n', (7138, 7140), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((10353, 10382), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (10367, 10382), False, 'import pytest\n'), ((13481, 13506), 'arviz.convert_to_dataset', 'convert_to_dataset', (['first'], {}), '(first)\n', (13499, 13506), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((13682, 13714), 'arviz.convert_to_inference_data', 'convert_to_inference_data', (['first'], {}), '(first)\n', (13707, 13714), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((13978, 14013), 'arviz.convert_to_inference_data', 'convert_to_inference_data', (['filename'], {}), '(filename)\n', (14003, 14013), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((14620, 14649), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (14634, 14649), False, 'import pytest\n'), ((17533, 17562), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (17547, 17562), False, 'import pytest\n'), ((21203, 21232), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (21217, 21232), False, 'import pytest\n'), ((23212, 23241), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (23226, 23241), False, 'import pytest\n'), ((26415, 26444), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (26429, 26444), False, 'import pytest\n'), ((29325, 29354), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (29339, 29354), False, 'import pytest\n'), ((29800, 29829), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (29814, 29829), False, 'import pytest\n'), ((37963, 37992), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (37977, 37992), False, 'import pytest\n'), ((41874, 41905), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (41888, 41905), False, 'import pytest\n'), ((42096, 42125), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (42110, 42125), False, 'import pytest\n'), ((44569, 44598), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (44583, 44598), False, 'import pytest\n'), ((1657, 1692), 'os.path.basename', 'os.path.basename', (['centered.filename'], {}), '(centered.filename)\n', (1673, 1692), False, 'import os\n'), ((2920, 2953), 'os.path.exists', 'os.path.exists', (['resource.filename'], {}), '(resource.filename)\n', (2934, 2953), False, 'import os\n'), ((3119, 3152), 'os.path.exists', 'os.path.exists', (['resource.filename'], {}), '(resource.filename)\n', (3133, 3152), False, 'import os\n'), ((3276, 3298), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (3289, 3298), False, 'import pytest\n'), ((3309, 3340), 'arviz.load_arviz_data', 'load_arviz_data', (['"""bad_checksum"""'], {}), "('bad_checksum')\n", (3324, 3340), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((3384, 3409), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3397, 3409), False, 'import pytest\n'), ((3420, 3453), 'arviz.load_arviz_data', 'load_arviz_data', (['"""does not exist"""'], {}), "('does not exist')\n", (3435, 3453), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((4227, 4254), 'pytest.warns', 'pytest.warns', (['SyntaxWarning'], {}), '(SyntaxWarning)\n', (4239, 4254), False, 'import pytest\n'), ((5745, 5793), 'arviz.concat', 'concat', (['idata1', 'idata2'], {'copy': '(True)', 'inplace': '(False)'}), '(idata1, idata2, copy=True, inplace=False)\n', (5751, 5793), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((5845, 5905), 'arviz.concat', 'concat', (['(idata1, idata2, idata3)'], {'copy': 'copy', 'inplace': 'inplace'}), '((idata1, idata2, idata3), copy=copy, inplace=inplace)\n', (5851, 5905), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((5938, 5996), 'arviz.concat', 'concat', (['idata1', 'idata2', 'idata3'], {'copy': 'copy', 'inplace': 'inplace'}), '(idata1, idata2, idata3, copy=copy, inplace=inplace)\n', (5944, 5996), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((7210, 7253), 'arviz.concat', 'concat', (['[idata]'], {'copy': 'copy', 'inplace': 'inplace'}), '([idata], copy=copy, inplace=inplace)\n', (7216, 7253), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((7286, 7327), 'arviz.concat', 'concat', (['idata'], {'copy': 'copy', 'inplace': 'inplace'}), '(idata, copy=copy, inplace=inplace)\n', (7292, 7327), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((7781, 7805), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (7794, 7805), False, 'import pytest\n'), ((7816, 7840), 'arviz.concat', 'concat', (['"""hello"""', '"""hello"""'], {}), "('hello', 'hello')\n", (7822, 7840), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((7953, 7977), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (7966, 7977), False, 'import pytest\n'), ((8039, 8073), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (8052, 8073), False, 'import pytest\n'), ((8084, 8104), 'arviz.concat', 'concat', (['idata', 'idata'], {}), '(idata, idata)\n', (8090, 8104), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((10888, 10957), 'arviz.convert_to_dataset', 'convert_to_dataset', (['data.datadict'], {'coords': 'data.coords', 'dims': 'data.dims'}), '(data.datadict, coords=data.coords, dims=data.dims)\n', (10906, 10957), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((11351, 11413), 'arviz.convert_to_dataset', 'convert_to_dataset', (['data.datadict'], {'coords': 'None', 'dims': 'data.dims'}), '(data.datadict, coords=None, dims=data.dims)\n', (11369, 11413), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((11924, 11983), 'arviz.convert_to_dataset', 'convert_to_dataset', (['data.datadict'], {'coords': 'coords', 'dims': 'None'}), '(data.datadict, coords=coords, dims=None)\n', (11942, 11983), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((12529, 12588), 'arviz.convert_to_dataset', 'convert_to_dataset', (['data.datadict'], {'coords': 'coords', 'dims': 'dims'}), '(data.datadict, coords=coords, dims=dims)\n', (12547, 12588), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((12994, 13014), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (13009, 13014), True, 'import numpy as np\n'), ((13021, 13048), 'numpy.random.randn', 'np.random.randn', (['(1)', '(100)', '(10)'], {}), '(1, 100, 10)\n', (13036, 13048), True, 'import numpy as np\n'), ((13445, 13465), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (13460, 13465), True, 'import numpy as np\n'), ((13633, 13653), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (13648, 13653), True, 'import numpy as np\n'), ((13849, 13869), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (13864, 13869), True, 'import numpy as np\n'), ((14112, 14137), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (14125, 14137), False, 'import pytest\n'), ((14148, 14176), 'arviz.convert_to_inference_data', 'convert_to_inference_data', (['(1)'], {}), '(1)\n', (14173, 14176), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((14262, 14282), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (14277, 14282), True, 'import numpy as np\n'), ((14387, 14412), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (14400, 14412), False, 'import pytest\n'), ((14423, 14464), 'arviz.convert_to_dataset', 'convert_to_dataset', (['filename'], {'group': '"""bar"""'}), "(filename, group='bar')\n", (14441, 14464), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((14511, 14536), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (14524, 14536), False, 'import pytest\n'), ((14547, 14581), 'arviz.InferenceData', 'InferenceData', ([], {'posterior': '[1, 2, 3]'}), '(posterior=[1, 2, 3])\n', (14560, 14581), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((20551, 20574), 'numpy.random.randn', 'np.random.randn', (['(4)', '(100)'], {}), '(4, 100)\n', (20566, 20574), True, 'import numpy as np\n'), ((21533, 21584), 'arviz.from_emcee', 'from_emcee', (['data.obj'], {'var_names': "['ln(f)', 'b', 'm']"}), "(data.obj, var_names=['ln(f)', 'b', 'm'])\n", (21543, 21584), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((21789, 21823), 'os.path.join', 'os.path.join', (['here', '"""saved_models"""'], {}), "(here, 'saved_models')\n", (21801, 21823), False, 'import os\n'), ((21844, 21894), 'os.path.join', 'os.path.join', (['data_directory', '"""reader_testfile.h5"""'], {}), "(data_directory, 'reader_testfile.h5')\n", (21856, 21894), False, 'import os\n'), ((21911, 21935), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (21925, 21935), False, 'import os\n'), ((21952, 21977), 'os.path.getsize', 'os.path.getsize', (['filepath'], {}), '(filepath)\n', (21967, 21977), False, 'import os\n'), ((21996, 22041), 'emcee.backends.HDFBackend', 'backends.HDFBackend', (['filepath'], {'read_only': '(True)'}), '(filepath, read_only=True)\n', (22015, 22041), False, 'from emcee import backends\n'), ((22058, 22107), 'arviz.from_emcee', 'from_emcee', (['reader'], {'var_names': "['ln(f)', 'b', 'm']"}), "(reader, var_names=['ln(f)', 'b', 'm'])\n", (22068, 22107), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((24266, 24300), 'os.path.join', 'os.path.join', (['here', '"""saved_models"""'], {}), "(here, 'saved_models')\n", (24278, 24300), False, 'import os\n'), ((24321, 24376), 'os.path.join', 'os.path.join', (['data_directory', '"""io_function_testfile.nc"""'], {}), "(data_directory, 'io_function_testfile.nc')\n", (24333, 24376), False, 'import os\n'), ((24410, 24445), 'arviz.to_netcdf', 'to_netcdf', (['inference_data', 'filepath'], {}), '(inference_data, filepath)\n', (24419, 24445), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((24462, 24486), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (24476, 24486), False, 'import os\n'), ((24560, 24581), 'arviz.from_netcdf', 'from_netcdf', (['filepath'], {}), '(filepath)\n', (24571, 24581), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((24645, 24664), 'os.remove', 'os.remove', (['filepath'], {}), '(filepath)\n', (24654, 24664), False, 'import os\n'), ((24756, 24791), 'arviz.save_data', 'save_data', (['inference_data', 'filepath'], {}), '(inference_data, filepath)\n', (24765, 24791), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((24808, 24832), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (24822, 24832), False, 'import os\n'), ((24906, 24925), 'arviz.load_data', 'load_data', (['filepath'], {}), '(filepath)\n', (24915, 24925), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((24989, 25008), 'os.remove', 'os.remove', (['filepath'], {}), '(filepath)\n', (24998, 25008), False, 'import os\n'), ((25380, 25414), 'os.path.join', 'os.path.join', (['here', '"""saved_models"""'], {}), "(here, 'saved_models')\n", (25392, 25414), False, 'import os\n'), ((25435, 25488), 'os.path.join', 'os.path.join', (['data_directory', '"""io_method_testfile.nc"""'], {}), "(data_directory, 'io_method_testfile.nc')\n", (25447, 25488), False, 'import os\n'), ((25626, 25650), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (25640, 25650), False, 'import os\n'), ((25724, 25759), 'arviz.InferenceData.from_netcdf', 'InferenceData.from_netcdf', (['filepath'], {}), '(filepath)\n', (25749, 25759), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((25823, 25842), 'os.remove', 'os.remove', (['filepath'], {}), '(filepath)\n', (25832, 25842), False, 'import os\n'), ((25965, 25980), 'arviz.InferenceData', 'InferenceData', ([], {}), '()\n', (25978, 25980), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((26066, 26100), 'os.path.join', 'os.path.join', (['here', '"""saved_models"""'], {}), "(here, 'saved_models')\n", (26078, 26100), False, 'import os\n'), ((26121, 26171), 'os.path.join', 'os.path.join', (['data_directory', '"""empty_test_file.nc"""'], {}), "(data_directory, 'empty_test_file.nc')\n", (26133, 26171), False, 'import os\n'), ((26277, 26301), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (26291, 26301), False, 'import os\n'), ((26311, 26330), 'os.remove', 'os.remove', (['filepath'], {}), '(filepath)\n', (26320, 26330), False, 'import os\n'), ((28015, 28060), 'numpy.ma.masked_values', 'ma.masked_values', (['[1, 2, -1, 4, -1]'], {'value': '(-1)'}), '([1, 2, -1, 4, -1], value=-1)\n', (28031, 28060), False, 'from numpy import ma\n'), ((28078, 28088), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (28086, 28088), True, 'import pymc3 as pm\n'), ((28418, 28441), 'arviz.from_pymc3', 'from_pymc3', ([], {'trace': 'trace'}), '(trace=trace)\n', (28428, 28441), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((28698, 28717), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (28713, 28717), True, 'import numpy as np\n'), ((28737, 28757), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (28752, 28757), True, 'import numpy as np\n'), ((29000, 29023), 'arviz.from_pymc3', 'from_pymc3', ([], {'trace': 'trace'}), '(trace=trace)\n', (29010, 29023), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((29577, 29606), 'arviz.from_pyro', 'from_pyro', ([], {'posterior': 'data.obj'}), '(posterior=data.obj)\n', (29586, 29606), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((32259, 32459), 'arviz.from_pystan', 'from_pystan', ([], {'posterior': 'data.obj', 'posterior_predictive': 'None', 'prior': 'data.obj', 'prior_predictive': 'None', 'observed_data': '"""y"""', 'coords': 'None', 'dims': 'None', 'posterior_model': 'data.model', 'prior_model': 'data.model'}), "(posterior=data.obj, posterior_predictive=None, prior=data.obj,\n prior_predictive=None, observed_data='y', coords=None, dims=None,\n posterior_model=data.model, prior_model=data.model)\n", (32270, 32459), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((37366, 37392), 'arviz.from_pystan', 'from_pystan', ([], {'posterior': 'fit'}), '(posterior=fit)\n', (37377, 37392), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((38836, 38854), 'arviz.from_tfp', 'from_tfp', (['data.obj'], {}), '(data.obj)\n', (38844, 38854), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((42022, 42056), 'os.path.join', 'os.path.join', (['here', '"""saved_models"""'], {}), "(here, 'saved_models')\n", (42034, 42056), False, 'import os\n'), ((44956, 44999), 'arviz.from_cmdstan', 'from_cmdstan', ([], {'posterior': 'posterior'}), '(posterior=posterior, **kwargs)\n', (44968, 44999), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((3067, 3101), 'os.path.dirname', 'os.path.dirname', (['resource.filename'], {}), '(resource.filename)\n', (3082, 3101), False, 'import os\n'), ((8002, 8027), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (8010, 8027), True, 'import numpy as np\n'), ((8228, 8249), 'numpy.random.randn', 'np.random.randn', (['size'], {}), '(size)\n', (8243, 8249), True, 'import numpy as np\n'), ((8546, 8573), 'pytest.warns', 'pytest.warns', (['SyntaxWarning'], {}), '(SyntaxWarning)\n', (8558, 8573), False, 'import pytest\n'), ((8740, 8763), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (8755, 8763), True, 'import numpy as np\n'), ((9184, 9207), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (9199, 9207), True, 'import numpy as np\n'), ((9788, 9815), 'pytest.warns', 'pytest.warns', (['SyntaxWarning'], {}), '(SyntaxWarning)\n', (9800, 9815), False, 'import pytest\n'), ((11840, 11852), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (11849, 11852), True, 'import numpy as np\n'), ((11865, 11877), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (11874, 11877), True, 'import numpy as np\n'), ((11890, 11903), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (11899, 11903), True, 'import numpy as np\n'), ((12450, 12462), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (12459, 12462), True, 'import numpy as np\n'), ((12470, 12482), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (12479, 12482), True, 'import numpy as np\n'), ((12495, 12508), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (12504, 12508), True, 'import numpy as np\n'), ((20168, 20191), 'numpy.random.randn', 'np.random.randn', (['(4)', '(100)'], {}), '(4, 100)\n', (20183, 20191), True, 'import numpy as np\n'), ((20224, 20250), 'numpy.random.randn', 'np.random.randn', (['(4)', '(100)', '(8)'], {}), '(4, 100, 8)\n', (20239, 20250), True, 'import numpy as np\n'), ((20320, 20355), 'arviz.from_dict', 'from_dict', ([], {'posterior': 'log_likelihood'}), '(posterior=log_likelihood)\n', (20329, 20355), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((20410, 20460), 'arviz.from_dict', 'from_dict', ([], {'observed_data': 'log_likelihood', 'dims': 'None'}), '(observed_data=log_likelihood, dims=None)\n', (20419, 20460), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((20616, 20640), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (20629, 20640), False, 'import pytest\n'), ((20655, 20677), 'arviz.from_dict', 'from_dict', ([], {'posterior': 'x'}), '(posterior=x)\n', (20664, 20677), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((20692, 20716), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (20705, 20716), False, 'import pytest\n'), ((20731, 20764), 'arviz.from_dict', 'from_dict', ([], {'posterior_predictive': 'x'}), '(posterior_predictive=x)\n', (20740, 20764), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((20779, 20803), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (20792, 20803), False, 'import pytest\n'), ((20818, 20843), 'arviz.from_dict', 'from_dict', ([], {'sample_stats': 'x'}), '(sample_stats=x)\n', (20827, 20843), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((20858, 20882), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (20871, 20882), False, 'import pytest\n'), ((20897, 20915), 'arviz.from_dict', 'from_dict', ([], {'prior': 'x'}), '(prior=x)\n', (20906, 20915), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((20930, 20954), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (20943, 20954), False, 'import pytest\n'), ((20969, 20998), 'arviz.from_dict', 'from_dict', ([], {'prior_predictive': 'x'}), '(prior_predictive=x)\n', (20978, 20998), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((21013, 21037), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (21026, 21037), False, 'import pytest\n'), ((21052, 21083), 'arviz.from_dict', 'from_dict', ([], {'sample_stats_prior': 'x'}), '(sample_stats_prior=x)\n', (21061, 21083), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((21098, 21122), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (21111, 21122), False, 'import pytest\n'), ((21137, 21163), 'arviz.from_dict', 'from_dict', ([], {'observed_data': 'x'}), '(observed_data=x)\n', (21146, 21163), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((21736, 21761), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (21751, 21761), False, 'import os\n'), ((22685, 22710), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (22698, 22710), False, 'import pytest\n'), ((22725, 22774), 'arviz.from_emcee', 'from_emcee', (['data.obj'], {'var_names': "['not', 'enough']"}), "(data.obj, var_names=['not', 'enough'])\n", (22735, 22774), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((22835, 22860), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (22848, 22860), False, 'import pytest\n'), ((22875, 22924), 'arviz.from_emcee', 'from_emcee', (['data.obj'], {'arg_names': "['not', 'enough']"}), "(data.obj, arg_names=['not', 'enough'])\n", (22885, 22924), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((23550, 23578), 'pymc3.sample_prior_predictive', 'pm.sample_prior_predictive', ([], {}), '()\n', (23576, 23578), True, 'import pymc3 as pm\n'), ((23615, 23655), 'pymc3.sample_posterior_predictive', 'pm.sample_posterior_predictive', (['data.obj'], {}), '(data.obj)\n', (23645, 23655), True, 'import pymc3 as pm\n'), ((24213, 24238), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (24228, 24238), False, 'import os\n'), ((24503, 24528), 'os.path.getsize', 'os.path.getsize', (['filepath'], {}), '(filepath)\n', (24518, 24528), False, 'import os\n'), ((24685, 24709), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (24699, 24709), False, 'import os\n'), ((24849, 24874), 'os.path.getsize', 'os.path.getsize', (['filepath'], {}), '(filepath)\n', (24864, 24874), False, 'import os\n'), ((25029, 25053), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (25043, 25053), False, 'import os\n'), ((25327, 25352), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (25342, 25352), False, 'import os\n'), ((25509, 25533), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (25523, 25533), False, 'import os\n'), ((25667, 25692), 'os.path.getsize', 'os.path.getsize', (['filepath'], {}), '(filepath)\n', (25682, 25692), False, 'import os\n'), ((25863, 25887), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (25877, 25887), False, 'import os\n'), ((26013, 26038), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (26028, 26038), False, 'import os\n'), ((26192, 26216), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (26206, 26216), False, 'import os\n'), ((26351, 26375), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (26365, 26375), False, 'import os\n'), ((26728, 26756), 'pymc3.sample_prior_predictive', 'pm.sample_prior_predictive', ([], {}), '()\n', (26754, 26756), True, 'import pymc3 as pm\n'), ((26793, 26833), 'pymc3.sample_posterior_predictive', 'pm.sample_posterior_predictive', (['data.obj'], {}), '(data.obj)\n', (26823, 26833), True, 'import pymc3 as pm\n'), ((28127, 28147), 'pymc3.Normal', 'pm.Normal', (['"""x"""', '(1)', '(1)'], {}), "('x', 1, 1)\n", (28136, 28147), True, 'import pymc3 as pm\n'), ((28161, 28196), 'pymc3.Normal', 'pm.Normal', (['"""y"""', 'x', '(1)'], {'observed': 'data'}), "('y', x, 1, observed=data)\n", (28170, 28196), True, 'import pymc3 as pm\n'), ((28218, 28242), 'pymc3.sample', 'pm.sample', (['(100)'], {'chains': '(2)'}), '(100, chains=2)\n', (28227, 28242), True, 'import pymc3 as pm\n'), ((28772, 28782), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (28780, 28782), True, 'import pymc3 as pm\n'), ((28801, 28821), 'pymc3.Normal', 'pm.Normal', (['"""x"""', '(1)', '(1)'], {}), "('x', 1, 1)\n", (28810, 28821), True, 'import pymc3 as pm\n'), ((28835, 28874), 'pymc3.Normal', 'pm.Normal', (['"""y1"""', 'x', '(1)'], {'observed': 'y1_data'}), "('y1', x, 1, observed=y1_data)\n", (28844, 28874), True, 'import pymc3 as pm\n'), ((28888, 28927), 'pymc3.Normal', 'pm.Normal', (['"""y2"""', 'x', '(1)'], {'observed': 'y2_data'}), "('y2', x, 1, observed=y2_data)\n", (28897, 28927), True, 'import pymc3 as pm\n'), ((28949, 28973), 'pymc3.sample', 'pm.sample', (['(100)'], {'chains': '(2)'}), '(100, chains=2)\n', (28958, 28973), True, 'import pymc3 as pm\n'), ((35657, 35689), 'pystan.StanModel', 'StanModel', ([], {'model_code': 'model_code'}), '(model_code=model_code)\n', (35666, 35689), False, 'from pystan import StanModel\n'), ((35797, 35823), 'arviz.from_pystan', 'from_pystan', ([], {'posterior': 'fit'}), '(posterior=fit)\n', (35808, 35823), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((41969, 41994), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (41984, 41994), False, 'import os\n'), ((42927, 42992), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/output_no_warmup[0-9].csv"""'], {}), "(data_directory, 'cmdstan/output_no_warmup[0-9].csv')\n", (42939, 42992), False, 'import os\n'), ((43022, 43084), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/output_warmup[0-9].csv"""'], {}), "(data_directory, 'cmdstan/output_warmup[0-9].csv')\n", (43034, 43084), False, 'import os\n'), ((43368, 43437), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/combined_output_no_warmup.csv"""'], {}), "(data_directory, 'cmdstan/combined_output_no_warmup.csv')\n", (43380, 43437), False, 'import os\n'), ((43508, 43574), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/combined_output_warmup.csv"""'], {}), "(data_directory, 'cmdstan/combined_output_warmup.csv')\n", (43520, 43574), False, 'import os\n'), ((43643, 43712), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/eight_schools_output[0-9].csv"""'], {}), "(data_directory, 'cmdstan/eight_schools_output[0-9].csv')\n", (43655, 43712), False, 'import os\n'), ((44697, 44757), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/eight_schools.data.R"""'], {}), "(data_directory, 'cmdstan/eight_schools.data.R')\n", (44709, 44757), False, 'import os\n'), ((44772, 44831), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/example_stan.data.R"""'], {}), "(data_directory, 'cmdstan/example_stan.data.R')\n", (44784, 44831), False, 'import os\n'), ((46077, 46118), 'numpy.isclose', 'np.isclose', (['y_mean', 'y_mean_true'], {'atol': '(0.1)'}), '(y_mean, y_mean_true, atol=0.1)\n', (46087, 46118), True, 'import numpy as np\n'), ((46147, 46166), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (46155, 46166), True, 'import numpy as np\n'), ((46330, 46352), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (46338, 46352), True, 'import numpy as np\n'), ((4735, 4760), 'numpy.random.randn', 'np.random.randn', (['(2)', '(10)', '(2)'], {}), '(2, 10, 2)\n', (4750, 4760), True, 'import numpy as np\n'), ((4767, 4795), 'numpy.random.randn', 'np.random.randn', (['(2)', '(10)', '(5)', '(2)'], {}), '(2, 10, 5, 2)\n', (4782, 4795), True, 'import numpy as np\n'), ((4840, 4865), 'numpy.random.randn', 'np.random.randn', (['(2)', '(10)', '(2)'], {}), '(2, 10, 2)\n', (4855, 4865), True, 'import numpy as np\n'), ((4872, 4900), 'numpy.random.randn', 'np.random.randn', (['(2)', '(10)', '(5)', '(2)'], {}), '(2, 10, 5, 2)\n', (4887, 4900), True, 'import numpy as np\n'), ((5363, 5388), 'numpy.random.randn', 'np.random.randn', (['(2)', '(10)', '(2)'], {}), '(2, 10, 2)\n', (5378, 5388), True, 'import numpy as np\n'), ((5395, 5423), 'numpy.random.randn', 'np.random.randn', (['(2)', '(10)', '(5)', '(2)'], {}), '(2, 10, 5, 2)\n', (5410, 5423), True, 'import numpy as np\n'), ((5555, 5580), 'numpy.random.randn', 'np.random.randn', (['(2)', '(10)', '(2)'], {}), '(2, 10, 2)\n', (5570, 5580), True, 'import numpy as np\n'), ((5587, 5615), 'numpy.random.randn', 'np.random.randn', (['(2)', '(10)', '(5)', '(2)'], {}), '(2, 10, 5, 2)\n', (5602, 5615), True, 'import numpy as np\n'), ((5662, 5682), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (5677, 5682), True, 'import numpy as np\n'), ((5689, 5712), 'numpy.random.randn', 'np.random.randn', (['(2)', '(100)'], {}), '(2, 100)\n', (5704, 5712), True, 'import numpy as np\n'), ((7056, 7081), 'numpy.random.randn', 'np.random.randn', (['(2)', '(10)', '(2)'], {}), '(2, 10, 2)\n', (7071, 7081), True, 'import numpy as np\n'), ((7088, 7116), 'numpy.random.randn', 'np.random.randn', (['(2)', '(10)', '(5)', '(2)'], {}), '(2, 10, 5, 2)\n', (7103, 7116), True, 'import numpy as np\n'), ((7880, 7905), 'numpy.random.randn', 'np.random.randn', (['(2)', '(10)', '(2)'], {}), '(2, 10, 2)\n', (7895, 7905), True, 'import numpy as np\n'), ((7912, 7940), 'numpy.random.randn', 'np.random.randn', (['(2)', '(10)', '(5)', '(2)'], {}), '(2, 10, 5, 2)\n', (7927, 7940), True, 'import numpy as np\n'), ((8607, 8630), 'numpy.random.randn', 'np.random.randn', (['(100)', '(4)'], {}), '(100, 4)\n', (8622, 8630), True, 'import numpy as np\n'), ((9873, 9896), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (9888, 9896), True, 'import numpy as np\n'), ((10531, 10551), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (10546, 10551), True, 'import numpy as np\n'), ((10575, 10602), 'numpy.random.randn', 'np.random.randn', (['(1)', '(100)', '(10)'], {}), '(1, 100, 10)\n', (10590, 10602), True, 'import numpy as np\n'), ((10626, 10655), 'numpy.random.randn', 'np.random.randn', (['(1)', '(100)', '(3)', '(4)'], {}), '(1, 100, 3, 4)\n', (10641, 10655), True, 'import numpy as np\n'), ((10701, 10713), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (10710, 10713), True, 'import numpy as np\n'), ((10721, 10733), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (10730, 10733), True, 'import numpy as np\n'), ((10741, 10754), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (10750, 10754), True, 'import numpy as np\n'), ((13107, 13120), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (13116, 13120), True, 'import numpy as np\n'), ((34642, 34697), 'numpy.array', 'np.array', (['[28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0]'], {}), '([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0])\n', (34650, 34697), True, 'import numpy as np\n'), ((34725, 34782), 'numpy.array', 'np.array', (['[15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0]'], {}), '([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0])\n', (34733, 34782), True, 'import numpy as np\n'), ((34955, 34984), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (34968, 34984), False, 'import pytest\n'), ((35007, 35043), 'arviz.from_pystan', 'from_pystan', ([], {'posterior': 'fit_test_grad'}), '(posterior=fit_test_grad)\n', (35018, 35043), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((35198, 35227), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (35211, 35227), False, 'import pytest\n'), ((35250, 35276), 'arviz.from_pystan', 'from_pystan', ([], {'posterior': 'fit'}), '(posterior=fit)\n', (35261, 35276), False, 'from arviz import concat, convert_to_inference_data, convert_to_dataset, from_cmdstan, from_dict, from_pymc3, from_pystan, from_pyro, from_emcee, from_netcdf, from_tfp, to_netcdf, load_data, save_data, load_arviz_data, list_datasets, clear_data_home, InferenceData\n'), ((36767, 36780), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (36778, 36780), False, 'from collections import namedtuple, OrderedDict\n'), ((42228, 42289), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/output_no_warmup1.csv"""'], {}), "(data_directory, 'cmdstan/output_no_warmup1.csv')\n", (42240, 42289), False, 'import os\n'), ((42308, 42369), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/output_no_warmup2.csv"""'], {}), "(data_directory, 'cmdstan/output_no_warmup2.csv')\n", (42320, 42369), False, 'import os\n'), ((42388, 42449), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/output_no_warmup3.csv"""'], {}), "(data_directory, 'cmdstan/output_no_warmup3.csv')\n", (42400, 42449), False, 'import os\n'), ((42468, 42529), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/output_no_warmup4.csv"""'], {}), "(data_directory, 'cmdstan/output_no_warmup4.csv')\n", (42480, 42529), False, 'import os\n'), ((42589, 42647), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/output_warmup1.csv"""'], {}), "(data_directory, 'cmdstan/output_warmup1.csv')\n", (42601, 42647), False, 'import os\n'), ((42666, 42724), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/output_warmup2.csv"""'], {}), "(data_directory, 'cmdstan/output_warmup2.csv')\n", (42678, 42724), False, 'import os\n'), ((42743, 42801), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/output_warmup3.csv"""'], {}), "(data_directory, 'cmdstan/output_warmup3.csv')\n", (42755, 42801), False, 'import os\n'), ((42820, 42878), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/output_warmup4.csv"""'], {}), "(data_directory, 'cmdstan/output_warmup4.csv')\n", (42832, 42878), False, 'import os\n'), ((43140, 43209), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/combined_output_no_warmup.csv"""'], {}), "(data_directory, 'cmdstan/combined_output_no_warmup.csv')\n", (43152, 43209), False, 'import os\n'), ((43259, 43325), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/combined_output_warmup.csv"""'], {}), "(data_directory, 'cmdstan/combined_output_warmup.csv')\n", (43271, 43325), False, 'import os\n'), ((43795, 43860), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/eight_schools_output1.csv"""'], {}), "(data_directory, 'cmdstan/eight_schools_output1.csv')\n", (43807, 43860), False, 'import os\n'), ((43879, 43944), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/eight_schools_output2.csv"""'], {}), "(data_directory, 'cmdstan/eight_schools_output2.csv')\n", (43891, 43944), False, 'import os\n'), ((43963, 44028), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/eight_schools_output3.csv"""'], {}), "(data_directory, 'cmdstan/eight_schools_output3.csv')\n", (43975, 44028), False, 'import os\n'), ((44047, 44112), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/eight_schools_output4.csv"""'], {}), "(data_directory, 'cmdstan/eight_schools_output4.csv')\n", (44059, 44112), False, 'import os\n'), ((44179, 44246), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/combined_missing_config.csv"""'], {}), "(data_directory, 'cmdstan/combined_missing_config.csv')\n", (44191, 44246), False, 'import os\n'), ((44265, 44336), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/combined_missing_adaptation.csv"""'], {}), "(data_directory, 'cmdstan/combined_missing_adaptation.csv')\n", (44277, 44336), False, 'import os\n'), ((44355, 44423), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/combined_missing_timing1.csv"""'], {}), "(data_directory, 'cmdstan/combined_missing_timing1.csv')\n", (44367, 44423), False, 'import os\n'), ((44442, 44510), 'os.path.join', 'os.path.join', (['data_directory', '"""cmdstan/combined_missing_timing2.csv"""'], {}), "(data_directory, 'cmdstan/combined_missing_timing2.csv')\n", (44454, 44510), False, 'import os\n'), ((15865, 15901), 'numpy.arange', 'np.arange', (["eight_schools_params['J']"], {}), "(eight_schools_params['J'])\n", (15874, 15901), True, 'import numpy as np\n'), ((17136, 17172), 'numpy.arange', 'np.arange', (["eight_schools_params['J']"], {}), "(eight_schools_params['J'])\n", (17145, 17172), True, 'import numpy as np\n'), ((18980, 19016), 'numpy.arange', 'np.arange', (["eight_schools_params['J']"], {}), "(eight_schools_params['J'])\n", (18989, 19016), True, 'import numpy as np\n'), ((23828, 23864), 'numpy.arange', 'np.arange', (["eight_schools_params['J']"], {}), "(eight_schools_params['J'])\n", (23837, 23864), True, 'import numpy as np\n'), ((27006, 27042), 'numpy.arange', 'np.arange', (["eight_schools_params['J']"], {}), "(eight_schools_params['J'])\n", (27015, 27042), True, 'import numpy as np\n'), ((30370, 30406), 'numpy.arange', 'np.arange', (["eight_schools_params['J']"], {}), "(eight_schools_params['J'])\n", (30379, 30406), True, 'import numpy as np\n'), ((31113, 31149), 'numpy.arange', 'np.arange', (["eight_schools_params['J']"], {}), "(eight_schools_params['J'])\n", (31122, 31149), True, 'import numpy as np\n'), ((31190, 31226), 'numpy.arange', 'np.arange', (["eight_schools_params['J']"], {}), "(eight_schools_params['J'])\n", (31199, 31226), True, 'import numpy as np\n'), ((31934, 31970), 'numpy.arange', 'np.arange', (["eight_schools_params['J']"], {}), "(eight_schools_params['J'])\n", (31943, 31970), True, 'import numpy as np\n'), ((39781, 39817), 'numpy.arange', 'np.arange', (["eight_schools_params['J']"], {}), "(eight_schools_params['J'])\n", (39790, 39817), True, 'import numpy as np\n'), ((46254, 46295), 'numpy.isclose', 'np.isclose', (['x_mean', 'x_mean_true'], {'atol': '(0.1)'}), '(x_mean, x_mean_true, atol=0.1)\n', (46264, 46295), True, 'import numpy as np\n'), ((46453, 46494), 'numpy.isclose', 'np.isclose', (['Z_mean', 'Z_mean_true'], {'atol': '(0.7)'}), '(Z_mean, Z_mean_true, atol=0.7)\n', (46463, 46494), True, 'import numpy as np\n'), ((54033, 54058), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (54046, 54058), False, 'import pytest\n'), ((15160, 15183), 'numpy.swapaxes', 'np.swapaxes', (['vals', '(0)', '(1)'], {}), '(vals, 0, 1)\n', (15171, 15183), True, 'import numpy as np\n'), ((18073, 18096), 'numpy.swapaxes', 'np.swapaxes', (['vals', '(0)', '(1)'], {}), '(vals, 0, 1)\n', (18084, 18096), True, 'import numpy as np\n'), ((47457, 47469), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (47466, 47469), True, 'import numpy as np\n'), ((48967, 48979), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (48976, 48979), True, 'import numpy as np\n'), ((50553, 50565), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (50562, 50565), True, 'import numpy as np\n'), ((50584, 50596), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (50593, 50596), True, 'import numpy as np\n'), ((51820, 51832), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (51829, 51832), True, 'import numpy as np\n'), ((53027, 53039), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (53036, 53039), True, 'import numpy as np\n'), ((53056, 53068), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (53065, 53068), True, 'import numpy as np\n'), ((39453, 39549), 'numpy.vstack', 'np.vstack', (["(eight_schools_params['y'], eight_schools_params['y'], eight_schools_params\n ['y'])"], {}), "((eight_schools_params['y'], eight_schools_params['y'],\n eight_schools_params['y']))\n", (39462, 39549), True, 'import numpy as np\n'), ((40087, 40112), 'numpy.ones_like', 'np.ones_like', (['data.obj[0]'], {}), '(data.obj[0])\n', (40099, 40112), True, 'import numpy as np\n')]
import tensorflow as tf import numpy as np import input_data def sample_prob(probs): return tf.floor(probs + tf.random_uniform(tf.shape(probs), 0, 1)) # return tf.select((tf.random_uniform(tf.shape(probs), 0, 1) - probs) > 0.5, tf.ones(tf.shape(probs)), tf.zeros(tf.shape(probs))) learning_rate = 0.1 momentum = 0.9 batchsize = 100 mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) trX = mnist.train.images X = tf.placeholder("float", [None, 784]) Y = tf.placeholder("float", [None, 10]) rbm_w = tf.placeholder("float", [784, 500]) rbm_vb = tf.placeholder("float", [784]) rbm_hb = tf.placeholder("float", [500]) rbm_w_inc = tf.placeholder("float", [784, 500]) rbm_vb_inc = tf.placeholder("float", [784]) rbm_hb_inc = tf.placeholder("float", [500]) h0_a = tf.nn.sigmoid(tf.matmul(X, rbm_w) + rbm_hb) h0 = sample_prob(h0_a) v1_a = tf.nn.sigmoid(tf.matmul(h0, tf.transpose(rbm_w)) + rbm_vb) v1 = sample_prob(v1_a) h1_a = tf.nn.sigmoid(tf.matmul(v1, rbm_w) + rbm_hb) # h1 = sample_prob(h1_a) w_positive_grad = tf.matmul(tf.transpose(X), h0_a) w_negative_grad = tf.matmul(tf.transpose(v1_a), h1_a) grad_w = (w_positive_grad - w_negative_grad) / tf.to_float(tf.shape(X)[0]) grad_vb = tf.reduce_mean(X - v1_a, 0) grad_hb = tf.reduce_mean(h0 - h1_a, 0) update_w_inc = momentum * rbm_w_inc + (learning_rate / batchsize) * grad_w update_vb_inc = momentum * rbm_vb_inc + (learning_rate / batchsize) * grad_vb update_hb_inc = momentum * rbm_hb_inc + (learning_rate / batchsize) * grad_hb update_w = rbm_w + update_w_inc update_vb = rbm_vb + update_vb_inc update_hb = rbm_hb + update_hb_inc err = X - v1_a err_sum = tf.reduce_mean(err * err) sess = tf.Session() init = tf.initialize_all_variables() sess.run(init) o_w = np.zeros([784, 500], np.float32) o_vb = np.zeros([784], np.float32) o_hb = np.zeros([500], np.float32) o_w_inc = np.zeros([784, 500], np.float32) o_vb_inc = np.zeros([784], np.float32) o_hb_inc = np.zeros([500], np.float32) print(sess.run(err_sum, feed_dict={X: trX, rbm_w: o_w, rbm_vb: o_vb, rbm_hb: o_hb})) for e in range(0, 50): for start, end in zip(range(0, len(trX), batchsize), range(batchsize, len(trX), batchsize)): batch = trX[start:end] o_w_inc, o_vb_inc, o_hb_inc, o_w, o_vb, o_hb = sess.run([update_w_inc, update_vb_inc, update_hb_inc, update_w, update_vb, update_hb], feed_dict={X: batch, rbm_w_inc: o_w_inc, rbm_vb_inc: o_vb_inc, rbm_hb_inc: o_hb_inc, rbm_w: o_w, rbm_vb: o_vb, rbm_hb: o_hb}) print(sess.run(err_sum, feed_dict={X: trX, rbm_w: o_w, rbm_vb: o_vb, rbm_hb: o_hb}))
[ "tensorflow.initialize_all_variables", "tensorflow.shape", "tensorflow.transpose", "tensorflow.placeholder", "tensorflow.Session", "numpy.zeros", "tensorflow.matmul", "tensorflow.reduce_mean", "input_data.read_data_sets" ]
[((351, 405), 'input_data.read_data_sets', 'input_data.read_data_sets', (['"""MNIST_data/"""'], {'one_hot': '(True)'}), "('MNIST_data/', one_hot=True)\n", (376, 405), False, 'import input_data\n'), ((436, 472), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, 784]'], {}), "('float', [None, 784])\n", (450, 472), True, 'import tensorflow as tf\n'), ((477, 512), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, 10]'], {}), "('float', [None, 10])\n", (491, 512), True, 'import tensorflow as tf\n'), ((522, 557), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[784, 500]'], {}), "('float', [784, 500])\n", (536, 557), True, 'import tensorflow as tf\n'), ((567, 597), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[784]'], {}), "('float', [784])\n", (581, 597), True, 'import tensorflow as tf\n'), ((607, 637), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[500]'], {}), "('float', [500])\n", (621, 637), True, 'import tensorflow as tf\n'), ((651, 686), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[784, 500]'], {}), "('float', [784, 500])\n", (665, 686), True, 'import tensorflow as tf\n'), ((700, 730), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[784]'], {}), "('float', [784])\n", (714, 730), True, 'import tensorflow as tf\n'), ((744, 774), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[500]'], {}), "('float', [500])\n", (758, 774), True, 'import tensorflow as tf\n'), ((1210, 1237), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(X - v1_a)', '(0)'], {}), '(X - v1_a, 0)\n', (1224, 1237), True, 'import tensorflow as tf\n'), ((1248, 1276), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(h0 - h1_a)', '(0)'], {}), '(h0 - h1_a, 0)\n', (1262, 1276), True, 'import tensorflow as tf\n'), ((1638, 1663), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(err * err)'], {}), '(err * err)\n', (1652, 1663), True, 'import tensorflow as tf\n'), ((1672, 1684), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1682, 1684), True, 'import tensorflow as tf\n'), ((1692, 1721), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (1719, 1721), True, 'import tensorflow as tf\n'), ((1744, 1776), 'numpy.zeros', 'np.zeros', (['[784, 500]', 'np.float32'], {}), '([784, 500], np.float32)\n', (1752, 1776), True, 'import numpy as np\n'), ((1784, 1811), 'numpy.zeros', 'np.zeros', (['[784]', 'np.float32'], {}), '([784], np.float32)\n', (1792, 1811), True, 'import numpy as np\n'), ((1819, 1846), 'numpy.zeros', 'np.zeros', (['[500]', 'np.float32'], {}), '([500], np.float32)\n', (1827, 1846), True, 'import numpy as np\n'), ((1858, 1890), 'numpy.zeros', 'np.zeros', (['[784, 500]', 'np.float32'], {}), '([784, 500], np.float32)\n', (1866, 1890), True, 'import numpy as np\n'), ((1902, 1929), 'numpy.zeros', 'np.zeros', (['[784]', 'np.float32'], {}), '([784], np.float32)\n', (1910, 1929), True, 'import numpy as np\n'), ((1941, 1968), 'numpy.zeros', 'np.zeros', (['[500]', 'np.float32'], {}), '([500], np.float32)\n', (1949, 1968), True, 'import numpy as np\n'), ((1047, 1062), 'tensorflow.transpose', 'tf.transpose', (['X'], {}), '(X)\n', (1059, 1062), True, 'import tensorflow as tf\n'), ((1098, 1116), 'tensorflow.transpose', 'tf.transpose', (['v1_a'], {}), '(v1_a)\n', (1110, 1116), True, 'import tensorflow as tf\n'), ((797, 816), 'tensorflow.matmul', 'tf.matmul', (['X', 'rbm_w'], {}), '(X, rbm_w)\n', (806, 816), True, 'import tensorflow as tf\n'), ((962, 982), 'tensorflow.matmul', 'tf.matmul', (['v1', 'rbm_w'], {}), '(v1, rbm_w)\n', (971, 982), True, 'import tensorflow as tf\n'), ((886, 905), 'tensorflow.transpose', 'tf.transpose', (['rbm_w'], {}), '(rbm_w)\n', (898, 905), True, 'import tensorflow as tf\n'), ((1184, 1195), 'tensorflow.shape', 'tf.shape', (['X'], {}), '(X)\n', (1192, 1195), True, 'import tensorflow as tf\n'), ((132, 147), 'tensorflow.shape', 'tf.shape', (['probs'], {}), '(probs)\n', (140, 147), True, 'import tensorflow as tf\n')]
import numpy as np import pandas as pd from copy import deepcopy def super_str(x): if isinstance(x,np.int64): x=float(x) if isinstance(x,int): x=float(x) ans=str(x) return ans def convert_to_array(x): if isinstance(x, np.ndarray): return x else: return np.array(x) def special_sort(a, order='ascending'): n=len(a) if order=='ascending': for i in range(1,n): j=deepcopy(i) while j>0 and a[j][1]<a[j-1][1]: temp=a[j-1] a[j-1]=a[j] a[j]=temp j=j-1 elif order=='descending': for i in range(1,n): j=deepcopy(i) while j>0 and a[j][1]>a[j-1][1]: temp=a[j-1] a[j-1]=a[j] a[j]=temp j=j-1 return a def dissimilarity(arr1, arr2, weighted): n=arr1.shape[0] s=0 if weighted==True: for i in range(0,n): diff=abs(arr1[i]-arr2[i]) s = s + (diff*(n-i)/n) else: for i in range(0,n): diff=abs(arr1[i]-arr2[i]) s = s + (diff) return s def create_utility_matrix(data, formatizer = {'user':0, 'item': 1, 'value': 2}): """ :param data: pandas dataframe, 2D, nx3 :param formatizer: dict having the column name or ids for users, items and ratings/values :return: 1. the utility matrix. (2D, n x m, n=users, m=items) 2. list of users (in order with the utility matrix rows) 3. list of items (in order with the utility matrix columns) """ itemField = formatizer['item'] userField = formatizer['user'] valueField = formatizer['value'] userList = data.ix[:,userField].tolist() itemList = data.ix[:,itemField].tolist() valueList = data.ix[:,valueField].tolist() users = list(set(data.ix[:,userField])) items = list(set(data.ix[:,itemField])) users_index = {users[i]: i for i in range(len(users))} pd_dict = {item: [np.nan for i in range(len(users))] for item in items} for i in range(0,len(data)): item = itemList[i] user = userList[i] value = valueList[i] pd_dict[item][users_index[user]] = value X = pd.DataFrame(pd_dict) X.index = users users = list(X.index) items = list(X.columns) return np.array(X), users, items
[ "pandas.DataFrame", "numpy.array", "copy.deepcopy" ]
[((2333, 2354), 'pandas.DataFrame', 'pd.DataFrame', (['pd_dict'], {}), '(pd_dict)\n', (2345, 2354), True, 'import pandas as pd\n'), ((316, 327), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (324, 327), True, 'import numpy as np\n'), ((2442, 2453), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2450, 2453), True, 'import numpy as np\n'), ((453, 464), 'copy.deepcopy', 'deepcopy', (['i'], {}), '(i)\n', (461, 464), False, 'from copy import deepcopy\n'), ((690, 701), 'copy.deepcopy', 'deepcopy', (['i'], {}), '(i)\n', (698, 701), False, 'from copy import deepcopy\n')]
""" Copyright 2018 <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import itertools import multiprocessing as mp import matplotlib.pyplot as plt import numpy as np from matplotlib.patches import Circle from scipy.spatial.distance import pdist from nuart.clustering import DualVigilanceHypersphereART from nuart.preprocessing import vat __author__ = "<NAME>" def dvha_cluster(args): from sklearn.metrics import adjusted_rand_score rho_ub, rho_lb, r_bar, inputs, targets = args return adjusted_rand_score( targets, DualVigilanceHypersphereART(rho_ub, rho_lb, r_bar, shuffle=False, max_epochs=1).fit(inputs) ) if __name__ == '__main__': # load the data data = np.loadtxt('data/csv/Target.csv', delimiter=',') inputs = data[:, :-1] targets = data[:, -1] # visualize the data fig = plt.figure() ax = fig.add_subplot(111) ax.scatter(inputs[:, 0], inputs[:, 1], c=targets, cmap='Set1') fig.show() # apply vat ordering vat_ix = vat(inputs) inputs = inputs[vat_ix, :] targets = targets[vat_ix] # run a parameter study r_max = pdist(inputs).max() / 2 rho_step, r_bar_step = 0.05, 0.05 rho_values = np.arange(0, 1, rho_step) r_bar_values = np.arange(r_max, 2 * r_max, r_bar_step) args_comb = list(itertools.product(rho_values, r_bar_values)) with mp.Pool() as pool: ari_values = list( pool.map(dvha_cluster, [(rho, max(0, rho - 0.01), r_bar, inputs, targets) for rho, r_bar in args_comb])) # find the best performance parameters best_ix = np.argmax(ari_values) best_rho, best_r_bar = args_comb[best_ix] best_ari = ari_values[best_ix] # show ARI scores plt.figure() plt.plot(ari_values) plt.title('Best rho_ub: {:.2}, r_bar: {:.4} (ARI: {:.2})'.format(best_rho, best_r_bar, best_ari)) plt.show() # visualize ARI scores on a rho/r_bar matrix ari_matrix = np.zeros((rho_values.size, r_bar_values.size)) - 1 for ix, ari in enumerate(ari_values): i, j = (int(round((y - args_comb[0][x]) / (rho_step, r_bar_step)[x])) for x, y in enumerate(args_comb[ix])) ari_matrix[i, j] = ari plt.matshow(ari_matrix, aspect='auto', extent=[r_bar_values[0], r_bar_values[-1], rho_values[0], rho_values[-1]]) plt.xlabel('r_bar') plt.ylabel('rho') plt.colorbar() plt.show() # rerun the best settings to get categories dvha = DualVigilanceHypersphereART(best_rho, max(0, best_rho - 0.01), best_r_bar, shuffle=False, max_epochs=1) labels = dvha.fit(inputs) # visualize clusters fig = plt.figure() ax = fig.add_subplot(111) ax.scatter(inputs[:, 0], inputs[:, 1], c=labels, cmap='Set1') for category in dvha.w: r, (x, y) = category[0], category[1:] ax.add_patch(Circle((x, y), r, fill=False)) plt.show()
[ "matplotlib.pyplot.ylabel", "scipy.spatial.distance.pdist", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.colorbar", "numpy.argmax", "itertools.product", "nuart.clustering.DualVigilanceHypersphereART", "matplotlib.pyplot.figure", "numpy.zeros", "matplotlib.pyplot.matsh...
[((1214, 1262), 'numpy.loadtxt', 'np.loadtxt', (['"""data/csv/Target.csv"""'], {'delimiter': '""","""'}), "('data/csv/Target.csv', delimiter=',')\n", (1224, 1262), True, 'import numpy as np\n'), ((1351, 1363), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1361, 1363), True, 'import matplotlib.pyplot as plt\n'), ((1515, 1526), 'nuart.preprocessing.vat', 'vat', (['inputs'], {}), '(inputs)\n', (1518, 1526), False, 'from nuart.preprocessing import vat\n'), ((1708, 1733), 'numpy.arange', 'np.arange', (['(0)', '(1)', 'rho_step'], {}), '(0, 1, rho_step)\n', (1717, 1733), True, 'import numpy as np\n'), ((1753, 1792), 'numpy.arange', 'np.arange', (['r_max', '(2 * r_max)', 'r_bar_step'], {}), '(r_max, 2 * r_max, r_bar_step)\n', (1762, 1792), True, 'import numpy as np\n'), ((2089, 2110), 'numpy.argmax', 'np.argmax', (['ari_values'], {}), '(ari_values)\n', (2098, 2110), True, 'import numpy as np\n'), ((2219, 2231), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2229, 2231), True, 'import matplotlib.pyplot as plt\n'), ((2236, 2256), 'matplotlib.pyplot.plot', 'plt.plot', (['ari_values'], {}), '(ari_values)\n', (2244, 2256), True, 'import matplotlib.pyplot as plt\n'), ((2363, 2373), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2371, 2373), True, 'import matplotlib.pyplot as plt\n'), ((2686, 2803), 'matplotlib.pyplot.matshow', 'plt.matshow', (['ari_matrix'], {'aspect': '"""auto"""', 'extent': '[r_bar_values[0], r_bar_values[-1], rho_values[0], rho_values[-1]]'}), "(ari_matrix, aspect='auto', extent=[r_bar_values[0],\n r_bar_values[-1], rho_values[0], rho_values[-1]])\n", (2697, 2803), True, 'import matplotlib.pyplot as plt\n'), ((2804, 2823), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""r_bar"""'], {}), "('r_bar')\n", (2814, 2823), True, 'import matplotlib.pyplot as plt\n'), ((2828, 2845), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""rho"""'], {}), "('rho')\n", (2838, 2845), True, 'import matplotlib.pyplot as plt\n'), ((2850, 2864), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2862, 2864), True, 'import matplotlib.pyplot as plt\n'), ((2869, 2879), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2877, 2879), True, 'import matplotlib.pyplot as plt\n'), ((3110, 3122), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3120, 3122), True, 'import matplotlib.pyplot as plt\n'), ((3349, 3359), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3357, 3359), True, 'import matplotlib.pyplot as plt\n'), ((1814, 1857), 'itertools.product', 'itertools.product', (['rho_values', 'r_bar_values'], {}), '(rho_values, r_bar_values)\n', (1831, 1857), False, 'import itertools\n'), ((1868, 1877), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (1875, 1877), True, 'import multiprocessing as mp\n'), ((2441, 2487), 'numpy.zeros', 'np.zeros', (['(rho_values.size, r_bar_values.size)'], {}), '((rho_values.size, r_bar_values.size))\n', (2449, 2487), True, 'import numpy as np\n'), ((3314, 3343), 'matplotlib.patches.Circle', 'Circle', (['(x, y)', 'r'], {'fill': '(False)'}), '((x, y), r, fill=False)\n', (3320, 3343), False, 'from matplotlib.patches import Circle\n'), ((1056, 1135), 'nuart.clustering.DualVigilanceHypersphereART', 'DualVigilanceHypersphereART', (['rho_ub', 'rho_lb', 'r_bar'], {'shuffle': '(False)', 'max_epochs': '(1)'}), '(rho_ub, rho_lb, r_bar, shuffle=False, max_epochs=1)\n', (1083, 1135), False, 'from nuart.clustering import DualVigilanceHypersphereART\n'), ((1629, 1642), 'scipy.spatial.distance.pdist', 'pdist', (['inputs'], {}), '(inputs)\n', (1634, 1642), False, 'from scipy.spatial.distance import pdist\n')]
from scipy.optimize import minimize import numpy as np import pylab as pl from mpl_toolkits.mplot3d import Axes3D import math def f(x): """ Function that returns x_0^2 + e^{0.5*x_0} + 10*sin(x_1) + x_1^2. """ return x[0] ** 2 + math.exp(0.5 * x[0]) + 10 * math.sin(x[1]) + x[1] ** 2 def fprime(x): """ The derivative of f. """ ddx0 = 2 * x[0] + 0.5 * math.exp(0.5 * x[0]) ddx1 = 10 * math.cos(x[1]) + 2 * x[1] return np.array([ddx0, ddx1]) opt_out = minimize(f, x0=np.array( [10, 10]), jac=fprime, tol=1e-8, method='BFGS', options={'disp': True}) # Plotting pl.close('all') r = 6 x_range = np.linspace(-r, r) y_range = np.linspace(-r, r) X, Y = np.meshgrid(x_range, y_range) Z = np.zeros(X.shape) for i in range(X.shape[0]): for j in range(X.shape[1]): Z[i, j] = f(np.array([X[i, j], Y[i, j]])) fig = pl.figure('Cost function') ax = fig.add_subplot(111, projection='3d') surf = ax.plot_surface(X, Y, Z, cmap=pl.cm.coolwarm, alpha=0.6) ax.scatter(opt_out.x[0], opt_out.x[1], f(opt_out.x), c='r', s=50) pl.show(block=False)
[ "pylab.close", "pylab.figure", "numpy.array", "numpy.linspace", "numpy.zeros", "math.cos", "numpy.meshgrid", "math.exp", "math.sin", "pylab.show" ]
[((581, 596), 'pylab.close', 'pl.close', (['"""all"""'], {}), "('all')\n", (589, 596), True, 'import pylab as pl\n'), ((613, 631), 'numpy.linspace', 'np.linspace', (['(-r)', 'r'], {}), '(-r, r)\n', (624, 631), True, 'import numpy as np\n'), ((642, 660), 'numpy.linspace', 'np.linspace', (['(-r)', 'r'], {}), '(-r, r)\n', (653, 660), True, 'import numpy as np\n'), ((668, 697), 'numpy.meshgrid', 'np.meshgrid', (['x_range', 'y_range'], {}), '(x_range, y_range)\n', (679, 697), True, 'import numpy as np\n'), ((702, 719), 'numpy.zeros', 'np.zeros', (['X.shape'], {}), '(X.shape)\n', (710, 719), True, 'import numpy as np\n'), ((831, 857), 'pylab.figure', 'pl.figure', (['"""Cost function"""'], {}), "('Cost function')\n", (840, 857), True, 'import pylab as pl\n'), ((1032, 1052), 'pylab.show', 'pl.show', ([], {'block': '(False)'}), '(block=False)\n', (1039, 1052), True, 'import pylab as pl\n'), ((433, 455), 'numpy.array', 'np.array', (['[ddx0, ddx1]'], {}), '([ddx0, ddx1])\n', (441, 455), True, 'import numpy as np\n'), ((483, 501), 'numpy.array', 'np.array', (['[10, 10]'], {}), '([10, 10])\n', (491, 501), True, 'import numpy as np\n'), ((363, 383), 'math.exp', 'math.exp', (['(0.5 * x[0])'], {}), '(0.5 * x[0])\n', (371, 383), False, 'import math\n'), ((398, 412), 'math.cos', 'math.cos', (['x[1]'], {}), '(x[1])\n', (406, 412), False, 'import math\n'), ((794, 822), 'numpy.array', 'np.array', (['[X[i, j], Y[i, j]]'], {}), '([X[i, j], Y[i, j]])\n', (802, 822), True, 'import numpy as np\n'), ((234, 254), 'math.exp', 'math.exp', (['(0.5 * x[0])'], {}), '(0.5 * x[0])\n', (242, 254), False, 'import math\n'), ((262, 276), 'math.sin', 'math.sin', (['x[1]'], {}), '(x[1])\n', (270, 276), False, 'import math\n')]
from contextlib import contextmanager from copy import deepcopy from functools import partial import sys import warnings import numpy as np from numpy.testing import assert_equal import pytest from numpy.testing import assert_allclose from expyfun import ExperimentController, visual, _experiment_controller from expyfun._experiment_controller import _get_dev_db from expyfun._utils import (_TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet) from expyfun._sound_controllers._sound_controller import _SOUND_CARD_KEYS from expyfun.stimuli import get_tdt_rates std_args = ['test'] # experiment name std_kwargs = dict(output_dir=None, full_screen=False, window_size=(8, 8), participant='foo', session='01', stim_db=0.0, noise_db=0.0, verbose=True, version='dev') def dummy_print(string): """Print.""" print(string) @pytest.mark.parametrize('ws', [(2, 1), (1, 1)]) def test_unit_conversions(hide_window, ws): """Test unit conversions.""" kwargs = deepcopy(std_kwargs) kwargs['stim_fs'] = 44100 kwargs['window_size'] = ws with ExperimentController(*std_args, **kwargs) as ec: verts = np.random.rand(2, 4) for to in ['norm', 'pix', 'deg', 'cm']: for fro in ['norm', 'pix', 'deg', 'cm']: v2 = ec._convert_units(verts, fro, to) v2 = ec._convert_units(v2, to, fro) assert_allclose(verts, v2) # test that degrees yield equiv. pixels in both directions verts = np.ones((2, 1)) v0 = ec._convert_units(verts, 'deg', 'pix') verts = np.zeros((2, 1)) v1 = ec._convert_units(verts, 'deg', 'pix') v2 = v0 - v1 # must check deviation from zero position assert_allclose(v2[0], v2[1]) pytest.raises(ValueError, ec._convert_units, verts, 'deg', 'nothing') pytest.raises(RuntimeError, ec._convert_units, verts[0], 'deg', 'pix') def test_validate_audio(hide_window): """Test that validate_audio can pass through samples.""" with ExperimentController(*std_args, suppress_resamp=True, **std_kwargs) as ec: ec.set_stim_db(_get_dev_db(ec.audio_type) - 40) # 0.01 RMS assert ec._stim_scaler == 1. for shape in ((1000,), (1, 1000), (2, 1000)): samples_in = np.zeros(shape) samples_out = ec._validate_audio(samples_in) assert samples_out.shape == (1000, 2) assert samples_out.dtype == np.float32 assert samples_out is not samples_in for order in 'CF': samples_in = np.zeros((2, 1000), dtype=np.float32, order=order) samples_out = ec._validate_audio(samples_in) assert samples_out.shape == samples_in.shape[::-1] assert samples_out.dtype == np.float32 # ensure that we have not bade a copy, just a view assert samples_out.base is samples_in def test_data_line(hide_window): """Test writing of data lines.""" entries = [['foo'], ['bar', 'bar\tbar'], ['bar2', r'bar\tbar'], ['fb', None, -0.5]] # this is what should be written to the file for each one goal_vals = ['None', 'bar\\tbar', 'bar\\\\tbar', 'None'] assert_equal(len(entries), len(goal_vals)) temp_dir = _TempDir() with std_kwargs_changed(output_dir=temp_dir): with ExperimentController(*std_args, stim_fs=44100, **std_kwargs) as ec: for ent in entries: ec.write_data_line(*ent) fname = ec._data_file.name with open(fname) as fid: lines = fid.readlines() # check the header assert_equal(len(lines), len(entries) + 4) # header, colnames, flip, stop assert_equal(lines[0][0], '#') # first line is a comment for x in ['timestamp', 'event', 'value']: # second line is col header assert (x in lines[1]) assert ('flip' in lines[2]) # ec.__init__ ends with a flip assert ('stop' in lines[-1]) # last line is stop (from __exit__) outs = lines[1].strip().split('\t') assert (all(l1 == l2 for l1, l2 in zip(outs, ['timestamp', 'event', 'value']))) # check the entries ts = [] for line, ent, gv in zip(lines[3:], entries, goal_vals): outs = line.strip().split('\t') assert_equal(len(outs), 3) # check timestamping if len(ent) == 3 and ent[2] is not None: assert_equal(outs[0], str(ent[2])) else: ts.append(float(outs[0])) # check events assert_equal(outs[1], ent[0]) # check values assert_equal(outs[2], gv) # make sure we got monotonically increasing timestamps ts = np.array(ts) assert (np.all(ts[1:] >= ts[:-1])) @contextmanager def std_kwargs_changed(**kwargs): """Use modified std_kwargs.""" old_vals = dict() for key, val in kwargs.items(): old_vals[key] = std_kwargs[key] std_kwargs[key] = val try: yield finally: for key, val in old_vals.items(): std_kwargs[key] = val def test_degenerate(): """Test degenerate EC conditions.""" pytest.raises(TypeError, ExperimentController, *std_args, audio_controller=1, stim_fs=44100, **std_kwargs) pytest.raises(ValueError, ExperimentController, *std_args, audio_controller='foo', stim_fs=44100, **std_kwargs) pytest.raises(ValueError, ExperimentController, *std_args, audio_controller=dict(TYPE='foo'), stim_fs=44100, **std_kwargs) # monitor, etc. pytest.raises(TypeError, ExperimentController, *std_args, monitor='foo', **std_kwargs) pytest.raises(KeyError, ExperimentController, *std_args, monitor=dict(), **std_kwargs) pytest.raises(ValueError, ExperimentController, *std_args, response_device='foo', **std_kwargs) with std_kwargs_changed(window_size=10.): pytest.raises(ValueError, ExperimentController, *std_args, **std_kwargs) pytest.raises(ValueError, ExperimentController, *std_args, audio_controller='sound_card', response_device='tdt', **std_kwargs) pytest.raises(ValueError, ExperimentController, *std_args, audio_controller='pyglet', response_device='keyboard', trigger_controller='sound_card', **std_kwargs) # test type checking for 'session' with std_kwargs_changed(session=1): pytest.raises(TypeError, ExperimentController, *std_args, audio_controller='sound_card', stim_fs=44100, **std_kwargs) # test value checking for trigger controller pytest.raises(ValueError, ExperimentController, *std_args, audio_controller='sound_card', trigger_controller='foo', stim_fs=44100, **std_kwargs) # test value checking for RMS checker pytest.raises(ValueError, ExperimentController, *std_args, audio_controller='sound_card', check_rms=True, stim_fs=44100, **std_kwargs) @pytest.mark.timeout(20) def test_ec(ac, hide_window, monkeypatch): """Test EC methods.""" if ac == 'tdt': rd, tc, fs = 'tdt', 'tdt', get_tdt_rates()['25k'] pytest.raises(ValueError, ExperimentController, *std_args, audio_controller=dict(TYPE=ac, TDT_MODEL='foo'), **std_kwargs) else: _check_skip_backend(ac) rd, tc, fs = 'keyboard', 'dummy', 44100 for suppress in (True, False): with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') with ExperimentController( *std_args, audio_controller=ac, response_device=rd, trigger_controller=tc, stim_fs=100., suppress_resamp=suppress, **std_kwargs) as ec: pass w = [ww for ww in w if 'TDT is in dummy mode' in str(ww.message)] assert len(w) == (1 if ac == 'tdt' else 0) SAFE_DELAY = 0.2 with ExperimentController( *std_args, audio_controller=ac, response_device=rd, trigger_controller=tc, stim_fs=fs, **std_kwargs) as ec: assert (ec.participant == std_kwargs['participant']) assert (ec.session == std_kwargs['session']) assert (ec.exp_name == std_args[0]) stamp = ec.current_time ec.write_data_line('hello') ec.wait_until(stamp + 0.02) ec.screen_prompt('test', 0.01, 0, None) ec.screen_prompt('test', 0.01, 0, ['1']) ec.screen_prompt(['test', 'ing'], 0.01, 0, ['1']) ec.screen_prompt('test', 1e-3, click=True) pytest.raises(ValueError, ec.screen_prompt, 'foo', np.inf, 0, []) pytest.raises(TypeError, ec.screen_prompt, 3, 0.01, 0, None) assert_equal(ec.wait_one_press(0.01), (None, None)) assert (ec.wait_one_press(0.01, timestamp=False) is None) assert_equal(ec.wait_for_presses(0.01), []) assert_equal(ec.wait_for_presses(0.01, timestamp=False), []) pytest.raises(ValueError, ec.get_presses) ec.listen_presses() assert_equal(ec.get_presses(), []) assert_equal(ec.get_presses(kind='presses'), []) pytest.raises(ValueError, ec.get_presses, kind='foo') if rd == 'tdt': # TDT does not have key release events, so should raise an # exception if asked for them: pytest.raises(RuntimeError, ec.get_presses, kind='releases') pytest.raises(RuntimeError, ec.get_presses, kind='both') else: assert_equal(ec.get_presses(kind='both'), []) assert_equal(ec.get_presses(kind='releases'), []) ec.set_noise_db(0) ec.set_stim_db(20) # test buffer data handling ec.set_rms_checking(None) ec.load_buffer([0, 0, 0, 0, 0, 0]) ec.load_buffer([]) pytest.raises(ValueError, ec.load_buffer, [0, 2, 0, 0, 0, 0]) ec.load_buffer(np.zeros((100,))) with pytest.raises(ValueError, match='100 did not match .* count 2'): ec.load_buffer(np.zeros((100, 1))) with pytest.raises(ValueError, match='100 did not match .* count 2'): ec.load_buffer(np.zeros((100, 2))) ec.load_buffer(np.zeros((1, 100))) ec.load_buffer(np.zeros((2, 100))) data = np.zeros(int(5e6), np.float32) # too long for TDT if fs == get_tdt_rates()['25k']: pytest.raises(RuntimeError, ec.load_buffer, data) else: ec.load_buffer(data) ec.load_buffer(np.zeros(2)) del data pytest.raises(ValueError, ec.stamp_triggers, 'foo') pytest.raises(ValueError, ec.stamp_triggers, 0) pytest.raises(ValueError, ec.stamp_triggers, 3) pytest.raises(ValueError, ec.stamp_triggers, 1, check='foo') print(ec._tc) # test __repr__ if tc == 'dummy': assert_equal(ec._tc._trigger_list, []) ec.stamp_triggers(3, check='int4') ec.stamp_triggers(2) ec.stamp_triggers([2, 4, 8]) if tc == 'dummy': assert_equal(ec._tc._trigger_list, [3, 2, 2, 4, 8]) ec._tc._trigger_list = list() pytest.raises(ValueError, ec.load_buffer, np.zeros((100, 3))) pytest.raises(ValueError, ec.load_buffer, np.zeros((3, 100))) pytest.raises(ValueError, ec.load_buffer, np.zeros((1, 1, 1))) # test RMS checking pytest.raises(ValueError, ec.set_rms_checking, 'foo') # click: RMS 0.0135, should pass 'fullfile' and fail 'windowed' click = np.zeros((int(ec.fs / 4),)) # 250 ms click[len(click) // 2] = 1. click[len(click) // 2 + 1] = -1. # noise: RMS 0.03, should fail both 'fullfile' and 'windowed' noise = np.random.normal(scale=0.03, size=(int(ec.fs / 4),)) ec.set_rms_checking(None) ec.load_buffer(click) # should go unchecked ec.load_buffer(noise) # should go unchecked ec.set_rms_checking('wholefile') ec.load_buffer(click) # should pass with pytest.warns(UserWarning, match='exceeds stated'): ec.load_buffer(noise) ec.wait_secs(SAFE_DELAY) ec.set_rms_checking('windowed') with pytest.warns(UserWarning, match='exceeds stated'): ec.load_buffer(click) ec.wait_secs(SAFE_DELAY) with pytest.warns(UserWarning, match='exceeds stated'): ec.load_buffer(noise) if ac != 'tdt': # too many samples there monkeypatch.setattr(_experiment_controller, '_SLOW_LIMIT', 1) with pytest.warns(UserWarning, match='samples is slow'): ec.load_buffer(np.zeros(2, dtype=np.float32)) monkeypatch.setattr(_experiment_controller, '_SLOW_LIMIT', 1e7) ec.stop() ec.set_visible() ec.set_visible(False) ec.call_on_every_flip(partial(dummy_print, 'called start stimuli')) ec.wait_secs(SAFE_DELAY) # Note: we put some wait_secs in here because otherwise the delay in # play start (e.g. for trigdel and onsetdel) can # mess things up! So we probably eventually should add # some safeguard against stopping too quickly after starting... # # First: identify_trial # noise = np.random.normal(scale=0.01, size=(int(ec.fs),)) ec.load_buffer(noise) pytest.raises(RuntimeError, ec.start_stimulus) # order violation assert (ec._playing is False) if tc == 'dummy': assert_equal(ec._tc._trigger_list, []) ec.start_stimulus(start_of_trial=False) # should work if tc == 'dummy': assert_equal(ec._tc._trigger_list, [1]) ec.wait_secs(SAFE_DELAY) assert (ec._playing is True) pytest.raises(RuntimeError, ec.trial_ok) # order violation ec.stop() assert (ec._playing is False) # only binary for TTL pytest.raises(KeyError, ec.identify_trial, ec_id='foo') # need ttl_id pytest.raises(TypeError, ec.identify_trial, ec_id='foo', ttl_id='bar') pytest.raises(ValueError, ec.identify_trial, ec_id='foo', ttl_id=[2]) assert (ec._playing is False) if tc == 'dummy': ec._tc._trigger_list = list() ec.identify_trial(ec_id='foo', ttl_id=[0, 1]) assert (ec._playing is False) # # Second: start_stimuli # pytest.raises(RuntimeError, ec.identify_trial, ec_id='foo', ttl_id=[0]) assert (ec._playing is False) pytest.raises(RuntimeError, ec.trial_ok) # order violation assert (ec._playing is False) ec.start_stimulus(flip=False, when=-1) if tc == 'dummy': assert_equal(ec._tc._trigger_list, [4, 8, 1]) if ac != 'tdt': # dummy TDT version won't do this check properly, as # ec._ac._playing -> GetTagVal('playing') always gives False pytest.raises(RuntimeError, ec.play) # already played, must stop ec.wait_secs(SAFE_DELAY) ec.stop() assert (ec._playing is False) # # Third: trial_ok # pytest.raises(RuntimeError, ec.start_stimulus) # order violation pytest.raises(RuntimeError, ec.identify_trial) # order violation ec.trial_ok() # double-check pytest.raises(RuntimeError, ec.start_stimulus) # order violation ec.start_stimulus(start_of_trial=False) # should work pytest.raises(RuntimeError, ec.trial_ok) # order violation ec.wait_secs(SAFE_DELAY) ec.stop() assert (ec._playing is False) ec.flip(-np.inf) assert (ec._playing is False) ec.estimate_screen_fs() assert (ec._playing is False) ec.play() ec.wait_secs(SAFE_DELAY) assert (ec._playing is True) ec.call_on_every_flip(None) # something funny with the ring buffer in testing on OSX if sys.platform != 'darwin': ec.call_on_next_flip(ec.start_noise()) ec.flip() ec.wait_secs(SAFE_DELAY) ec.stop_noise() ec.stop() assert (ec._playing is False) ec.stop_noise() ec.wait_secs(SAFE_DELAY) ec.start_stimulus(start_of_trial=False) ec.stop() ec.start_stimulus(start_of_trial=False) ec.get_mouse_position() ec.listen_clicks() ec.get_clicks() ec.toggle_cursor(False) ec.toggle_cursor(True, True) ec.move_mouse_to((0, 0)) # center of the window ec.wait_secs(0.001) print(ec.id_types) print(ec.stim_db) print(ec.noise_db) print(ec.on_next_flip_functions) print(ec.on_every_flip_functions) print(ec.window) # we need to monkey-patch for old Pyglet try: from PIL import Image Image.fromstring except AttributeError: Image.fromstring = None data = ec.screenshot() # HiDPI sizes = [tuple(std_kwargs['window_size']), tuple(np.array(std_kwargs['window_size']) * 2)] assert data.shape[:2] in sizes print(ec.fs) # test fs support wait_secs(0.01) test_pix = (11.3, 0.5, 110003) print(test_pix) # test __repr__ assert all([x in repr(ec) for x in ['foo', '"test"', '01']]) ec.refocus() # smoke test for refocusing del ec @pytest.mark.parametrize('screen_num', (None, 0)) @pytest.mark.parametrize('monitor', ( None, dict(SCREEN_WIDTH=10, SCREEN_DISTANCE=10, SCREEN_SIZE_PIX=(1000, 1000)), )) def test_screen_monitor(screen_num, monitor, hide_window): """Test screen and monitor option support.""" with ExperimentController( *std_args, screen_num=screen_num, monitor=monitor, **std_kwargs): pass full_kwargs = deepcopy(std_kwargs) full_kwargs['full_screen'] = True with pytest.raises(RuntimeError, match='resolution set incorrectly'): ExperimentController(*std_args, **full_kwargs) with pytest.raises(TypeError, match='must be a dict'): ExperimentController(*std_args, monitor=1, **std_kwargs) with pytest.raises(KeyError, match='is missing required keys'): ExperimentController(*std_args, monitor={}, **std_kwargs) def test_tdtpy_failure(hide_window): """Test that failed TDTpy import raises ImportError.""" try: from tdt.util import connect_rpcox # noqa, analysis:ignore except ImportError: pass else: pytest.skip('Cannot test TDT import failure') ac = dict(TYPE='tdt', TDT_MODEL='RP2') with pytest.raises(ImportError, match='No module named'): ExperimentController( *std_args, audio_controller=ac, response_device='keyboard', trigger_controller='tdt', stim_fs=100., suppress_resamp=True, **std_kwargs) @pytest.mark.timeout(10) def test_button_presses_and_window_size(hide_window): """Test EC window_size=None and button press capture.""" with ExperimentController(*std_args, audio_controller='sound_card', response_device='keyboard', window_size=None, output_dir=None, full_screen=False, session='01', participant='foo', trigger_controller='dummy', force_quit='escape', version='dev') as ec: ec.listen_presses() ec.get_presses() assert_equal(ec.get_presses(), []) fake_button_press(ec, '1', 0.5) assert_equal(ec.screen_prompt('press 1', live_keys=['1'], max_wait=1.5), '1') ec.listen_presses() assert_equal(ec.get_presses(), []) fake_button_press(ec, '1') assert_equal(ec.get_presses(timestamp=False), [('1',)]) ec.listen_presses() fake_button_press(ec, '1') presses = ec.get_presses(timestamp=True, relative_to=0.2) assert_equal(len(presses), 1) assert_equal(len(presses[0]), 2) assert_equal(presses[0][0], '1') assert (isinstance(presses[0][1], float)) ec.listen_presses() fake_button_press(ec, '1') presses = ec.get_presses(timestamp=True, relative_to=0.1, return_kinds=True) assert_equal(len(presses), 1) assert_equal(len(presses[0]), 3) assert_equal(presses[0][::2], ('1', 'press')) assert (isinstance(presses[0][1], float)) ec.listen_presses() fake_button_press(ec, '1') presses = ec.get_presses(timestamp=False, return_kinds=True) assert_equal(presses, [('1', 'press')]) ec.listen_presses() ec.screen_text('press 1 again') ec.flip() fake_button_press(ec, '1', 0.3) assert_equal(ec.wait_one_press(1.5, live_keys=[1])[0], '1') ec.screen_text('press 1 one last time') ec.flip() fake_button_press(ec, '1', 0.3) out = ec.wait_for_presses(1.5, live_keys=['1'], timestamp=False) assert_equal(out[0], '1') fake_button_press(ec, 'a', 0.3) fake_button_press(ec, 'return', 0.5) assert ec.text_input() == 'A' fake_button_press(ec, 'a', 0.3) fake_button_press(ec, 'space', 0.35) fake_button_press(ec, 'backspace', 0.4) fake_button_press(ec, 'comma', 0.45) fake_button_press(ec, 'return', 0.5) # XXX this fails on OSX travis for some reason new_pyglet = _new_pyglet() bad = sys.platform == 'darwin' bad |= sys.platform == 'win32' and new_pyglet if not bad: assert ec.text_input(all_caps=False).strip() == 'a' @pytest.mark.timeout(10) @requires_opengl21 def test_mouse_clicks(hide_window): """Test EC mouse click support.""" with ExperimentController(*std_args, participant='foo', session='01', output_dir=None, version='dev') as ec: rect = visual.Rectangle(ec, [0, 0, 2, 2]) fake_mouse_click(ec, [1, 2], delay=0.3) assert_equal(ec.wait_for_click_on(rect, 1.5, timestamp=False)[0], ('left', 1, 2)) pytest.raises(TypeError, ec.wait_for_click_on, (rect, rect), 1.5) fake_mouse_click(ec, [2, 1], 'middle', delay=0.3) out = ec.wait_one_click(1.5, 0., ['middle'], timestamp=True) assert (out[3] < 1.5) assert_equal(out[:3], ('middle', 2, 1)) fake_mouse_click(ec, [3, 2], 'left', delay=0.3) fake_mouse_click(ec, [4, 5], 'right', delay=0.3) out = ec.wait_for_clicks(1.5, timestamp=False) assert_equal(len(out), 2) assert (any(o == ('left', 3, 2) for o in out)) assert (any(o == ('right', 4, 5) for o in out)) out = ec.wait_for_clicks(0.1) assert_equal(len(out), 0) @requires_opengl21 @pytest.mark.timeout(30) def test_background_color(hide_window): """Test setting background color""" with ExperimentController(*std_args, participant='foo', session='01', output_dir=None, version='dev') as ec: print((ec.window.width, ec.window.height)) ec.set_background_color('red') ss = ec.screenshot()[:, :, :3] red_mask = (ss == [255, 0, 0]).all(axis=-1) assert (red_mask.all()) ec.set_background_color('white') ss = ec.screenshot()[:, :, :3] white_mask = (ss == [255] * 3).all(axis=-1) assert (white_mask.all()) ec.flip() ec.set_background_color('0.5') visual.Rectangle(ec, [0, 0, 1, 1], fill_color='black').draw() ss = ec.screenshot()[:, :, :3] gray_mask = ((ss == [127] * 3).all(axis=-1) | (ss == [128] * 3).all(axis=-1)) assert (gray_mask.any()) black_mask = (ss == [0] * 3).all(axis=-1) assert (black_mask.any()) assert (np.logical_or(gray_mask, black_mask).all()) def test_tdt_delay(hide_window): """Test the tdt_delay parameter.""" with ExperimentController(*std_args, audio_controller=dict(TYPE='tdt', TDT_DELAY=0), **std_kwargs) as ec: assert_equal(ec._ac._used_params['TDT_DELAY'], 0) with ExperimentController(*std_args, audio_controller=dict(TYPE='tdt', TDT_DELAY=1), **std_kwargs) as ec: assert_equal(ec._ac._used_params['TDT_DELAY'], 1) pytest.raises(ValueError, ExperimentController, *std_args, audio_controller=dict(TYPE='tdt', TDT_DELAY='foo'), **std_kwargs) pytest.raises(OverflowError, ExperimentController, *std_args, audio_controller=dict(TYPE='tdt', TDT_DELAY=np.inf), **std_kwargs) pytest.raises(TypeError, ExperimentController, *std_args, audio_controller=dict(TYPE='tdt', TDT_DELAY=np.ones(2)), **std_kwargs) pytest.raises(ValueError, ExperimentController, *std_args, audio_controller=dict(TYPE='tdt', TDT_DELAY=-1), **std_kwargs) def test_sound_card_triggering(hide_window): """Test using the sound card as a trigger controller.""" audio_controller = dict(TYPE='sound_card', SOUND_CARD_TRIGGER_CHANNELS='0') with pytest.raises(ValueError, match='SOUND_CARD_TRIGGER_CHANNELS is zer'): ExperimentController(*std_args, audio_controller=audio_controller, trigger_controller='sound_card', suppress_resamp=True, **std_kwargs) audio_controller.update(SOUND_CARD_TRIGGER_CHANNELS='1') # Use 1 trigger ch and 1 output ch because this should work on all systems with ExperimentController(*std_args, audio_controller=audio_controller, trigger_controller='sound_card', n_channels=1, suppress_resamp=True, **std_kwargs) as ec: ec.identify_trial(ttl_id=[1, 0], ec_id='') ec.load_buffer([1e-2]) ec.start_stimulus() ec.stop() # Test the drift triggers audio_controller.update(SOUND_CARD_DRIFT_TRIGGER=0.001) with ExperimentController(*std_args, audio_controller=audio_controller, trigger_controller='sound_card', n_channels=1, **std_kwargs) as ec: ec.identify_trial(ttl_id=[1, 0], ec_id='') with pytest.warns(UserWarning, match='Drift triggers overlap with ' 'onset triggers.'): ec.load_buffer(np.zeros(ec.stim_fs)) ec.start_stimulus() ec.stop() audio_controller.update(SOUND_CARD_DRIFT_TRIGGER=[1.1, 0.3, -0.3, 'end']) with ExperimentController(*std_args, audio_controller=audio_controller, trigger_controller='sound_card', n_channels=1, **std_kwargs) as ec: ec.identify_trial(ttl_id=[1, 0], ec_id='') with pytest.warns(UserWarning, match='Drift trigger at 1.1 seconds ' 'occurs outside stimulus window, not stamping ' 'trigger.'): ec.load_buffer(np.zeros(ec.stim_fs)) ec.start_stimulus() ec.stop() audio_controller.update(SOUND_CARD_DRIFT_TRIGGER=[0.5, 0.501]) with ExperimentController(*std_args, audio_controller=audio_controller, trigger_controller='sound_card', n_channels=1, **std_kwargs) as ec: ec.identify_trial(ttl_id=[1, 0], ec_id='') with pytest.warns(UserWarning, match='Some 2-triggers overlap.*'): ec.load_buffer(np.zeros(ec.stim_fs)) ec.start_stimulus() ec.stop() audio_controller.update(SOUND_CARD_DRIFT_TRIGGER=[]) with ExperimentController(*std_args, audio_controller=audio_controller, trigger_controller='sound_card', n_channels=1, **std_kwargs) as ec: ec.identify_trial(ttl_id=[1, 0], ec_id='') ec.load_buffer(np.zeros(ec.stim_fs)) ec.start_stimulus() ec.stop() audio_controller.update(SOUND_CARD_DRIFT_TRIGGER=[0.2, 0.5, -0.3]) with ExperimentController(*std_args, audio_controller=audio_controller, trigger_controller='sound_card', n_channels=1, **std_kwargs) as ec: ec.identify_trial(ttl_id=[1, 0], ec_id='') ec.load_buffer(np.zeros(ec.stim_fs)) ec.start_stimulus() ec.stop() class _FakeJoystick(object): device = 'FakeJoystick' on_joybutton_press = lambda self, joystick, button: None # noqa open = lambda self, window, exclusive: None # noqa x = 0.125 def test_joystick(hide_window, monkeypatch): """Test joystick support.""" import pyglet fake = _FakeJoystick() monkeypatch.setattr(pyglet.input, 'get_joysticks', lambda: [fake]) with ExperimentController(*std_args, joystick=True, **std_kwargs) as ec: ec.listen_joystick_button_presses() fake.on_joybutton_press(fake, 1) presses = ec.get_joystick_button_presses() assert len(presses) == 1 assert presses[0][0] == '1' assert ec.get_joystick_value('x') == 0.125 def test_sound_card_params(): """Test that sound card params are known keys.""" for key in _SOUND_CARD_KEYS: if key != 'TYPE': assert key in known_config_types, key
[ "expyfun._experiment_controller._get_dev_db", "numpy.testing.assert_equal", "numpy.random.rand", "numpy.array", "copy.deepcopy", "pytest.mark.timeout", "expyfun._utils.fake_mouse_click", "expyfun.ExperimentController", "numpy.testing.assert_allclose", "expyfun._utils._new_pyglet", "expyfun._util...
[((1042, 1089), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ws"""', '[(2, 1), (1, 1)]'], {}), "('ws', [(2, 1), (1, 1)])\n", (1065, 1089), False, 'import pytest\n'), ((7380, 7403), 'pytest.mark.timeout', 'pytest.mark.timeout', (['(20)'], {}), '(20)\n', (7399, 7403), False, 'import pytest\n'), ((17843, 17891), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""screen_num"""', '(None, 0)'], {}), "('screen_num', (None, 0))\n", (17866, 17891), False, 'import pytest\n'), ((19314, 19337), 'pytest.mark.timeout', 'pytest.mark.timeout', (['(10)'], {}), '(10)\n', (19333, 19337), False, 'import pytest\n'), ((22132, 22155), 'pytest.mark.timeout', 'pytest.mark.timeout', (['(10)'], {}), '(10)\n', (22151, 22155), False, 'import pytest\n'), ((23288, 23311), 'pytest.mark.timeout', 'pytest.mark.timeout', (['(30)'], {}), '(30)\n', (23307, 23311), False, 'import pytest\n'), ((1180, 1200), 'copy.deepcopy', 'deepcopy', (['std_kwargs'], {}), '(std_kwargs)\n', (1188, 1200), False, 'from copy import deepcopy\n'), ((1684, 1699), 'numpy.ones', 'np.ones', (['(2, 1)'], {}), '((2, 1))\n', (1691, 1699), True, 'import numpy as np\n'), ((1760, 1776), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (1768, 1776), True, 'import numpy as np\n'), ((1889, 1918), 'numpy.testing.assert_allclose', 'assert_allclose', (['v2[0]', 'v2[1]'], {}), '(v2[0], v2[1])\n', (1904, 1918), False, 'from numpy.testing import assert_allclose\n'), ((1923, 1992), 'pytest.raises', 'pytest.raises', (['ValueError', 'ec._convert_units', 'verts', '"""deg"""', '"""nothing"""'], {}), "(ValueError, ec._convert_units, verts, 'deg', 'nothing')\n", (1936, 1992), False, 'import pytest\n'), ((1997, 2067), 'pytest.raises', 'pytest.raises', (['RuntimeError', 'ec._convert_units', 'verts[0]', '"""deg"""', '"""pix"""'], {}), "(RuntimeError, ec._convert_units, verts[0], 'deg', 'pix')\n", (2010, 2067), False, 'import pytest\n'), ((3468, 3478), 'expyfun._utils._TempDir', '_TempDir', ([], {}), '()\n', (3476, 3478), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((3923, 3953), 'numpy.testing.assert_equal', 'assert_equal', (['lines[0][0]', '"""#"""'], {}), "(lines[0][0], '#')\n", (3935, 3953), False, 'from numpy.testing import assert_equal\n'), ((4930, 4942), 'numpy.array', 'np.array', (['ts'], {}), '(ts)\n', (4938, 4942), True, 'import numpy as np\n'), ((4955, 4980), 'numpy.all', 'np.all', (['(ts[1:] >= ts[:-1])'], {}), '(ts[1:] >= ts[:-1])\n', (4961, 4980), True, 'import numpy as np\n'), ((5379, 5490), 'pytest.raises', 'pytest.raises', (['TypeError', 'ExperimentController', '*std_args'], {'audio_controller': '(1)', 'stim_fs': '(44100)'}), '(TypeError, ExperimentController, *std_args, audio_controller=\n 1, stim_fs=44100, **std_kwargs)\n', (5392, 5490), False, 'import pytest\n'), ((5508, 5624), 'pytest.raises', 'pytest.raises', (['ValueError', 'ExperimentController', '*std_args'], {'audio_controller': '"""foo"""', 'stim_fs': '(44100)'}), "(ValueError, ExperimentController, *std_args, audio_controller\n ='foo', stim_fs=44100, **std_kwargs)\n", (5521, 5624), False, 'import pytest\n'), ((5825, 5916), 'pytest.raises', 'pytest.raises', (['TypeError', 'ExperimentController', '*std_args'], {'monitor': '"""foo"""'}), "(TypeError, ExperimentController, *std_args, monitor='foo', **\n std_kwargs)\n", (5838, 5916), False, 'import pytest\n'), ((6043, 6143), 'pytest.raises', 'pytest.raises', (['ValueError', 'ExperimentController', '*std_args'], {'response_device': '"""foo"""'}), "(ValueError, ExperimentController, *std_args, response_device=\n 'foo', **std_kwargs)\n", (6056, 6143), False, 'import pytest\n'), ((6310, 6441), 'pytest.raises', 'pytest.raises', (['ValueError', 'ExperimentController', '*std_args'], {'audio_controller': '"""sound_card"""', 'response_device': '"""tdt"""'}), "(ValueError, ExperimentController, *std_args, audio_controller\n ='sound_card', response_device='tdt', **std_kwargs)\n", (6323, 6441), False, 'import pytest\n'), ((6477, 6646), 'pytest.raises', 'pytest.raises', (['ValueError', 'ExperimentController', '*std_args'], {'audio_controller': '"""pyglet"""', 'response_device': '"""keyboard"""', 'trigger_controller': '"""sound_card"""'}), "(ValueError, ExperimentController, *std_args, audio_controller\n ='pyglet', response_device='keyboard', trigger_controller='sound_card',\n **std_kwargs)\n", (6490, 6646), False, 'import pytest\n'), ((6978, 7127), 'pytest.raises', 'pytest.raises', (['ValueError', 'ExperimentController', '*std_args'], {'audio_controller': '"""sound_card"""', 'trigger_controller': '"""foo"""', 'stim_fs': '(44100)'}), "(ValueError, ExperimentController, *std_args, audio_controller\n ='sound_card', trigger_controller='foo', stim_fs=44100, **std_kwargs)\n", (6991, 7127), False, 'import pytest\n'), ((7206, 7345), 'pytest.raises', 'pytest.raises', (['ValueError', 'ExperimentController', '*std_args'], {'audio_controller': '"""sound_card"""', 'check_rms': '(True)', 'stim_fs': '(44100)'}), "(ValueError, ExperimentController, *std_args, audio_controller\n ='sound_card', check_rms=True, stim_fs=44100, **std_kwargs)\n", (7219, 7345), False, 'import pytest\n'), ((18281, 18301), 'copy.deepcopy', 'deepcopy', (['std_kwargs'], {}), '(std_kwargs)\n', (18289, 18301), False, 'from copy import deepcopy\n'), ((1271, 1312), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {}), '(*std_args, **kwargs)\n', (1291, 1312), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((1336, 1356), 'numpy.random.rand', 'np.random.rand', (['(2)', '(4)'], {}), '(2, 4)\n', (1350, 1356), True, 'import numpy as np\n'), ((2178, 2245), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {'suppress_resamp': '(True)'}), '(*std_args, suppress_resamp=True, **std_kwargs)\n', (2198, 2245), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((4775, 4804), 'numpy.testing.assert_equal', 'assert_equal', (['outs[1]', 'ent[0]'], {}), '(outs[1], ent[0])\n', (4787, 4804), False, 'from numpy.testing import assert_equal\n'), ((4836, 4861), 'numpy.testing.assert_equal', 'assert_equal', (['outs[2]', 'gv'], {}), '(outs[2], gv)\n', (4848, 4861), False, 'from numpy.testing import assert_equal\n'), ((6211, 6283), 'pytest.raises', 'pytest.raises', (['ValueError', 'ExperimentController', '*std_args'], {}), '(ValueError, ExperimentController, *std_args, **std_kwargs)\n', (6224, 6283), False, 'import pytest\n'), ((6762, 6884), 'pytest.raises', 'pytest.raises', (['TypeError', 'ExperimentController', '*std_args'], {'audio_controller': '"""sound_card"""', 'stim_fs': '(44100)'}), "(TypeError, ExperimentController, *std_args, audio_controller=\n 'sound_card', stim_fs=44100, **std_kwargs)\n", (6775, 6884), False, 'import pytest\n'), ((7744, 7767), 'expyfun._utils._check_skip_backend', '_check_skip_backend', (['ac'], {}), '(ac)\n', (7763, 7767), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((8362, 8487), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {'audio_controller': 'ac', 'response_device': 'rd', 'trigger_controller': 'tc', 'stim_fs': 'fs'}), '(*std_args, audio_controller=ac, response_device=rd,\n trigger_controller=tc, stim_fs=fs, **std_kwargs)\n', (8382, 8487), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((8992, 9057), 'pytest.raises', 'pytest.raises', (['ValueError', 'ec.screen_prompt', '"""foo"""', 'np.inf', '(0)', '[]'], {}), "(ValueError, ec.screen_prompt, 'foo', np.inf, 0, [])\n", (9005, 9057), False, 'import pytest\n'), ((9066, 9126), 'pytest.raises', 'pytest.raises', (['TypeError', 'ec.screen_prompt', '(3)', '(0.01)', '(0)', 'None'], {}), '(TypeError, ec.screen_prompt, 3, 0.01, 0, None)\n', (9079, 9126), False, 'import pytest\n'), ((9382, 9423), 'pytest.raises', 'pytest.raises', (['ValueError', 'ec.get_presses'], {}), '(ValueError, ec.get_presses)\n', (9395, 9423), False, 'import pytest\n'), ((9560, 9613), 'pytest.raises', 'pytest.raises', (['ValueError', 'ec.get_presses'], {'kind': '"""foo"""'}), "(ValueError, ec.get_presses, kind='foo')\n", (9573, 9613), False, 'import pytest\n'), ((10230, 10291), 'pytest.raises', 'pytest.raises', (['ValueError', 'ec.load_buffer', '[0, 2, 0, 0, 0, 0]'], {}), '(ValueError, ec.load_buffer, [0, 2, 0, 0, 0, 0])\n', (10243, 10291), False, 'import pytest\n'), ((10946, 10997), 'pytest.raises', 'pytest.raises', (['ValueError', 'ec.stamp_triggers', '"""foo"""'], {}), "(ValueError, ec.stamp_triggers, 'foo')\n", (10959, 10997), False, 'import pytest\n'), ((11006, 11053), 'pytest.raises', 'pytest.raises', (['ValueError', 'ec.stamp_triggers', '(0)'], {}), '(ValueError, ec.stamp_triggers, 0)\n', (11019, 11053), False, 'import pytest\n'), ((11062, 11109), 'pytest.raises', 'pytest.raises', (['ValueError', 'ec.stamp_triggers', '(3)'], {}), '(ValueError, ec.stamp_triggers, 3)\n', (11075, 11109), False, 'import pytest\n'), ((11118, 11178), 'pytest.raises', 'pytest.raises', (['ValueError', 'ec.stamp_triggers', '(1)'], {'check': '"""foo"""'}), "(ValueError, ec.stamp_triggers, 1, check='foo')\n", (11131, 11178), False, 'import pytest\n'), ((11784, 11837), 'pytest.raises', 'pytest.raises', (['ValueError', 'ec.set_rms_checking', '"""foo"""'], {}), "(ValueError, ec.set_rms_checking, 'foo')\n", (11797, 11837), False, 'import pytest\n'), ((13746, 13792), 'pytest.raises', 'pytest.raises', (['RuntimeError', 'ec.start_stimulus'], {}), '(RuntimeError, ec.start_stimulus)\n', (13759, 13792), False, 'import pytest\n'), ((14153, 14193), 'pytest.raises', 'pytest.raises', (['RuntimeError', 'ec.trial_ok'], {}), '(RuntimeError, ec.trial_ok)\n', (14166, 14193), False, 'import pytest\n'), ((14313, 14368), 'pytest.raises', 'pytest.raises', (['KeyError', 'ec.identify_trial'], {'ec_id': '"""foo"""'}), "(KeyError, ec.identify_trial, ec_id='foo')\n", (14326, 14368), False, 'import pytest\n'), ((14392, 14462), 'pytest.raises', 'pytest.raises', (['TypeError', 'ec.identify_trial'], {'ec_id': '"""foo"""', 'ttl_id': '"""bar"""'}), "(TypeError, ec.identify_trial, ec_id='foo', ttl_id='bar')\n", (14405, 14462), False, 'import pytest\n'), ((14471, 14540), 'pytest.raises', 'pytest.raises', (['ValueError', 'ec.identify_trial'], {'ec_id': '"""foo"""', 'ttl_id': '[2]'}), "(ValueError, ec.identify_trial, ec_id='foo', ttl_id=[2])\n", (14484, 14540), False, 'import pytest\n'), ((14799, 14870), 'pytest.raises', 'pytest.raises', (['RuntimeError', 'ec.identify_trial'], {'ec_id': '"""foo"""', 'ttl_id': '[0]'}), "(RuntimeError, ec.identify_trial, ec_id='foo', ttl_id=[0])\n", (14812, 14870), False, 'import pytest\n'), ((14917, 14957), 'pytest.raises', 'pytest.raises', (['RuntimeError', 'ec.trial_ok'], {}), '(RuntimeError, ec.trial_ok)\n', (14930, 14957), False, 'import pytest\n'), ((15535, 15581), 'pytest.raises', 'pytest.raises', (['RuntimeError', 'ec.start_stimulus'], {}), '(RuntimeError, ec.start_stimulus)\n', (15548, 15581), False, 'import pytest\n'), ((15609, 15655), 'pytest.raises', 'pytest.raises', (['RuntimeError', 'ec.identify_trial'], {}), '(RuntimeError, ec.identify_trial)\n', (15622, 15655), False, 'import pytest\n'), ((15728, 15774), 'pytest.raises', 'pytest.raises', (['RuntimeError', 'ec.start_stimulus'], {}), '(RuntimeError, ec.start_stimulus)\n', (15741, 15774), False, 'import pytest\n'), ((15872, 15912), 'pytest.raises', 'pytest.raises', (['RuntimeError', 'ec.trial_ok'], {}), '(RuntimeError, ec.trial_ok)\n', (15885, 15912), False, 'import pytest\n'), ((17607, 17622), 'expyfun._utils._wait_secs', 'wait_secs', (['(0.01)'], {}), '(0.01)\n', (17616, 17622), True, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((18138, 18228), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {'screen_num': 'screen_num', 'monitor': 'monitor'}), '(*std_args, screen_num=screen_num, monitor=monitor, **\n std_kwargs)\n', (18158, 18228), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((18349, 18412), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""resolution set incorrectly"""'}), "(RuntimeError, match='resolution set incorrectly')\n", (18362, 18412), False, 'import pytest\n'), ((18422, 18468), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {}), '(*std_args, **full_kwargs)\n', (18442, 18468), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((18478, 18526), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""must be a dict"""'}), "(TypeError, match='must be a dict')\n", (18491, 18526), False, 'import pytest\n'), ((18536, 18592), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {'monitor': '(1)'}), '(*std_args, monitor=1, **std_kwargs)\n', (18556, 18592), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((18602, 18659), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""is missing required keys"""'}), "(KeyError, match='is missing required keys')\n", (18615, 18659), False, 'import pytest\n'), ((18669, 18726), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {'monitor': '{}'}), '(*std_args, monitor={}, **std_kwargs)\n', (18689, 18726), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((18958, 19003), 'pytest.skip', 'pytest.skip', (['"""Cannot test TDT import failure"""'], {}), "('Cannot test TDT import failure')\n", (18969, 19003), False, 'import pytest\n'), ((19056, 19107), 'pytest.raises', 'pytest.raises', (['ImportError'], {'match': '"""No module named"""'}), "(ImportError, match='No module named')\n", (19069, 19107), False, 'import pytest\n'), ((19117, 19284), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {'audio_controller': 'ac', 'response_device': '"""keyboard"""', 'trigger_controller': '"""tdt"""', 'stim_fs': '(100.0)', 'suppress_resamp': '(True)'}), "(*std_args, audio_controller=ac, response_device=\n 'keyboard', trigger_controller='tdt', stim_fs=100.0, suppress_resamp=\n True, **std_kwargs)\n", (19137, 19284), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((19462, 19716), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {'audio_controller': '"""sound_card"""', 'response_device': '"""keyboard"""', 'window_size': 'None', 'output_dir': 'None', 'full_screen': '(False)', 'session': '"""01"""', 'participant': '"""foo"""', 'trigger_controller': '"""dummy"""', 'force_quit': '"""escape"""', 'version': '"""dev"""'}), "(*std_args, audio_controller='sound_card',\n response_device='keyboard', window_size=None, output_dir=None,\n full_screen=False, session='01', participant='foo', trigger_controller=\n 'dummy', force_quit='escape', version='dev')\n", (19482, 19716), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((19936, 19967), 'expyfun._utils.fake_button_press', 'fake_button_press', (['ec', '"""1"""', '(0.5)'], {}), "(ec, '1', 0.5)\n", (19953, 19967), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((20171, 20197), 'expyfun._utils.fake_button_press', 'fake_button_press', (['ec', '"""1"""'], {}), "(ec, '1')\n", (20188, 20197), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((20299, 20325), 'expyfun._utils.fake_button_press', 'fake_button_press', (['ec', '"""1"""'], {}), "(ec, '1')\n", (20316, 20325), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((20479, 20511), 'numpy.testing.assert_equal', 'assert_equal', (['presses[0][0]', '"""1"""'], {}), "(presses[0][0], '1')\n", (20491, 20511), False, 'from numpy.testing import assert_equal\n'), ((20599, 20625), 'expyfun._utils.fake_button_press', 'fake_button_press', (['ec', '"""1"""'], {}), "(ec, '1')\n", (20616, 20625), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((20831, 20876), 'numpy.testing.assert_equal', 'assert_equal', (['presses[0][::2]', "('1', 'press')"], {}), "(presses[0][::2], ('1', 'press'))\n", (20843, 20876), False, 'from numpy.testing import assert_equal\n'), ((20964, 20990), 'expyfun._utils.fake_button_press', 'fake_button_press', (['ec', '"""1"""'], {}), "(ec, '1')\n", (20981, 20990), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((21068, 21107), 'numpy.testing.assert_equal', 'assert_equal', (['presses', "[('1', 'press')]"], {}), "(presses, [('1', 'press')])\n", (21080, 21107), False, 'from numpy.testing import assert_equal\n'), ((21203, 21234), 'expyfun._utils.fake_button_press', 'fake_button_press', (['ec', '"""1"""', '(0.3)'], {}), "(ec, '1', 0.3)\n", (21220, 21234), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((21377, 21408), 'expyfun._utils.fake_button_press', 'fake_button_press', (['ec', '"""1"""', '(0.3)'], {}), "(ec, '1', 0.3)\n", (21394, 21408), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((21490, 21515), 'numpy.testing.assert_equal', 'assert_equal', (['out[0]', '"""1"""'], {}), "(out[0], '1')\n", (21502, 21515), False, 'from numpy.testing import assert_equal\n'), ((21524, 21555), 'expyfun._utils.fake_button_press', 'fake_button_press', (['ec', '"""a"""', '(0.3)'], {}), "(ec, 'a', 0.3)\n", (21541, 21555), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((21564, 21600), 'expyfun._utils.fake_button_press', 'fake_button_press', (['ec', '"""return"""', '(0.5)'], {}), "(ec, 'return', 0.5)\n", (21581, 21600), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((21647, 21678), 'expyfun._utils.fake_button_press', 'fake_button_press', (['ec', '"""a"""', '(0.3)'], {}), "(ec, 'a', 0.3)\n", (21664, 21678), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((21687, 21723), 'expyfun._utils.fake_button_press', 'fake_button_press', (['ec', '"""space"""', '(0.35)'], {}), "(ec, 'space', 0.35)\n", (21704, 21723), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((21732, 21771), 'expyfun._utils.fake_button_press', 'fake_button_press', (['ec', '"""backspace"""', '(0.4)'], {}), "(ec, 'backspace', 0.4)\n", (21749, 21771), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((21780, 21816), 'expyfun._utils.fake_button_press', 'fake_button_press', (['ec', '"""comma"""', '(0.45)'], {}), "(ec, 'comma', 0.45)\n", (21797, 21816), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((21825, 21861), 'expyfun._utils.fake_button_press', 'fake_button_press', (['ec', '"""return"""', '(0.5)'], {}), "(ec, 'return', 0.5)\n", (21842, 21861), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((21938, 21951), 'expyfun._utils._new_pyglet', '_new_pyglet', ([], {}), '()\n', (21949, 21951), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((22259, 22360), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {'participant': '"""foo"""', 'session': '"""01"""', 'output_dir': 'None', 'version': '"""dev"""'}), "(*std_args, participant='foo', session='01', output_dir\n =None, version='dev')\n", (22279, 22360), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((22408, 22442), 'expyfun.visual.Rectangle', 'visual.Rectangle', (['ec', '[0, 0, 2, 2]'], {}), '(ec, [0, 0, 2, 2])\n', (22424, 22442), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((22451, 22490), 'expyfun._utils.fake_mouse_click', 'fake_mouse_click', (['ec', '[1, 2]'], {'delay': '(0.3)'}), '(ec, [1, 2], delay=0.3)\n', (22467, 22490), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((22610, 22675), 'pytest.raises', 'pytest.raises', (['TypeError', 'ec.wait_for_click_on', '(rect, rect)', '(1.5)'], {}), '(TypeError, ec.wait_for_click_on, (rect, rect), 1.5)\n', (22623, 22675), False, 'import pytest\n'), ((22684, 22733), 'expyfun._utils.fake_mouse_click', 'fake_mouse_click', (['ec', '[2, 1]', '"""middle"""'], {'delay': '(0.3)'}), "(ec, [2, 1], 'middle', delay=0.3)\n", (22700, 22733), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((22841, 22880), 'numpy.testing.assert_equal', 'assert_equal', (['out[:3]', "('middle', 2, 1)"], {}), "(out[:3], ('middle', 2, 1))\n", (22853, 22880), False, 'from numpy.testing import assert_equal\n'), ((22889, 22936), 'expyfun._utils.fake_mouse_click', 'fake_mouse_click', (['ec', '[3, 2]', '"""left"""'], {'delay': '(0.3)'}), "(ec, [3, 2], 'left', delay=0.3)\n", (22905, 22936), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((22945, 22993), 'expyfun._utils.fake_mouse_click', 'fake_mouse_click', (['ec', '[4, 5]', '"""right"""'], {'delay': '(0.3)'}), "(ec, [4, 5], 'right', delay=0.3)\n", (22961, 22993), False, 'from expyfun._utils import _TempDir, fake_button_press, _check_skip_backend, fake_mouse_click, requires_opengl21, _wait_secs as wait_secs, known_config_types, _new_pyglet\n'), ((23401, 23502), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {'participant': '"""foo"""', 'session': '"""01"""', 'output_dir': 'None', 'version': '"""dev"""'}), "(*std_args, participant='foo', session='01', output_dir\n =None, version='dev')\n", (23421, 23502), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((24617, 24666), 'numpy.testing.assert_equal', 'assert_equal', (["ec._ac._used_params['TDT_DELAY']", '(0)'], {}), "(ec._ac._used_params['TDT_DELAY'], 0)\n", (24629, 24666), False, 'from numpy.testing import assert_equal\n'), ((24845, 24894), 'numpy.testing.assert_equal', 'assert_equal', (["ec._ac._used_params['TDT_DELAY']", '(1)'], {}), "(ec._ac._used_params['TDT_DELAY'], 1)\n", (24857, 24894), False, 'from numpy.testing import assert_equal\n'), ((25757, 25826), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""SOUND_CARD_TRIGGER_CHANNELS is zer"""'}), "(ValueError, match='SOUND_CARD_TRIGGER_CHANNELS is zer')\n", (25770, 25826), False, 'import pytest\n'), ((25836, 25975), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {'audio_controller': 'audio_controller', 'trigger_controller': '"""sound_card"""', 'suppress_resamp': '(True)'}), "(*std_args, audio_controller=audio_controller,\n trigger_controller='sound_card', suppress_resamp=True, **std_kwargs)\n", (25856, 25975), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((26237, 26395), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {'audio_controller': 'audio_controller', 'trigger_controller': '"""sound_card"""', 'n_channels': '(1)', 'suppress_resamp': '(True)'}), "(*std_args, audio_controller=audio_controller,\n trigger_controller='sound_card', n_channels=1, suppress_resamp=True, **\n std_kwargs)\n", (26257, 26395), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((26771, 26902), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {'audio_controller': 'audio_controller', 'trigger_controller': '"""sound_card"""', 'n_channels': '(1)'}), "(*std_args, audio_controller=audio_controller,\n trigger_controller='sound_card', n_channels=1, **std_kwargs)\n", (26791, 26902), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((27435, 27566), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {'audio_controller': 'audio_controller', 'trigger_controller': '"""sound_card"""', 'n_channels': '(1)'}), "(*std_args, audio_controller=audio_controller,\n trigger_controller='sound_card', n_channels=1, **std_kwargs)\n", (27455, 27566), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((28102, 28233), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {'audio_controller': 'audio_controller', 'trigger_controller': '"""sound_card"""', 'n_channels': '(1)'}), "(*std_args, audio_controller=audio_controller,\n trigger_controller='sound_card', n_channels=1, **std_kwargs)\n", (28122, 28233), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((28644, 28775), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {'audio_controller': 'audio_controller', 'trigger_controller': '"""sound_card"""', 'n_channels': '(1)'}), "(*std_args, audio_controller=audio_controller,\n trigger_controller='sound_card', n_channels=1, **std_kwargs)\n", (28664, 28775), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((29121, 29252), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {'audio_controller': 'audio_controller', 'trigger_controller': '"""sound_card"""', 'n_channels': '(1)'}), "(*std_args, audio_controller=audio_controller,\n trigger_controller='sound_card', n_channels=1, **std_kwargs)\n", (29141, 29252), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((29921, 29981), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {'joystick': '(True)'}), '(*std_args, joystick=True, **std_kwargs)\n', (29941, 29981), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((2467, 2482), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (2475, 2482), True, 'import numpy as np\n'), ((2742, 2792), 'numpy.zeros', 'np.zeros', (['(2, 1000)'], {'dtype': 'np.float32', 'order': 'order'}), '((2, 1000), dtype=np.float32, order=order)\n', (2750, 2792), True, 'import numpy as np\n'), ((3542, 3602), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {'stim_fs': '(44100)'}), '(*std_args, stim_fs=44100, **std_kwargs)\n', (3562, 3602), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((7864, 7900), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (7887, 7900), False, 'import warnings\n'), ((7919, 7950), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (7940, 7950), False, 'import warnings\n'), ((9764, 9824), 'pytest.raises', 'pytest.raises', (['RuntimeError', 'ec.get_presses'], {'kind': '"""releases"""'}), "(RuntimeError, ec.get_presses, kind='releases')\n", (9777, 9824), False, 'import pytest\n'), ((9837, 9893), 'pytest.raises', 'pytest.raises', (['RuntimeError', 'ec.get_presses'], {'kind': '"""both"""'}), "(RuntimeError, ec.get_presses, kind='both')\n", (9850, 9893), False, 'import pytest\n'), ((10315, 10331), 'numpy.zeros', 'np.zeros', (['(100,)'], {}), '((100,))\n', (10323, 10331), True, 'import numpy as np\n'), ((10346, 10409), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""100 did not match .* count 2"""'}), "(ValueError, match='100 did not match .* count 2')\n", (10359, 10409), False, 'import pytest\n'), ((10471, 10534), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""100 did not match .* count 2"""'}), "(ValueError, match='100 did not match .* count 2')\n", (10484, 10534), False, 'import pytest\n'), ((10606, 10624), 'numpy.zeros', 'np.zeros', (['(1, 100)'], {}), '((1, 100))\n', (10614, 10624), True, 'import numpy as np\n'), ((10649, 10667), 'numpy.zeros', 'np.zeros', (['(2, 100)'], {}), '((2, 100))\n', (10657, 10667), True, 'import numpy as np\n'), ((10788, 10837), 'pytest.raises', 'pytest.raises', (['RuntimeError', 'ec.load_buffer', 'data'], {}), '(RuntimeError, ec.load_buffer, data)\n', (10801, 10837), False, 'import pytest\n'), ((10908, 10919), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (10916, 10919), True, 'import numpy as np\n'), ((11256, 11294), 'numpy.testing.assert_equal', 'assert_equal', (['ec._tc._trigger_list', '[]'], {}), '(ec._tc._trigger_list, [])\n', (11268, 11294), False, 'from numpy.testing import assert_equal\n'), ((11442, 11493), 'numpy.testing.assert_equal', 'assert_equal', (['ec._tc._trigger_list', '[3, 2, 2, 4, 8]'], {}), '(ec._tc._trigger_list, [3, 2, 2, 4, 8])\n', (11454, 11493), False, 'from numpy.testing import assert_equal\n'), ((11586, 11604), 'numpy.zeros', 'np.zeros', (['(100, 3)'], {}), '((100, 3))\n', (11594, 11604), True, 'import numpy as np\n'), ((11656, 11674), 'numpy.zeros', 'np.zeros', (['(3, 100)'], {}), '((3, 100))\n', (11664, 11674), True, 'import numpy as np\n'), ((11726, 11745), 'numpy.zeros', 'np.zeros', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (11734, 11745), True, 'import numpy as np\n'), ((12419, 12468), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""exceeds stated"""'}), "(UserWarning, match='exceeds stated')\n", (12431, 12468), False, 'import pytest\n'), ((12590, 12639), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""exceeds stated"""'}), "(UserWarning, match='exceeds stated')\n", (12602, 12639), False, 'import pytest\n'), ((12721, 12770), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""exceeds stated"""'}), "(UserWarning, match='exceeds stated')\n", (12733, 12770), False, 'import pytest\n'), ((13241, 13285), 'functools.partial', 'partial', (['dummy_print', '"""called start stimuli"""'], {}), "(dummy_print, 'called start stimuli')\n", (13248, 13285), False, 'from functools import partial\n'), ((13888, 13926), 'numpy.testing.assert_equal', 'assert_equal', (['ec._tc._trigger_list', '[]'], {}), '(ec._tc._trigger_list, [])\n', (13900, 13926), False, 'from numpy.testing import assert_equal\n'), ((14035, 14074), 'numpy.testing.assert_equal', 'assert_equal', (['ec._tc._trigger_list', '[1]'], {}), '(ec._tc._trigger_list, [1])\n', (14047, 14074), False, 'from numpy.testing import assert_equal\n'), ((15106, 15151), 'numpy.testing.assert_equal', 'assert_equal', (['ec._tc._trigger_list', '[4, 8, 1]'], {}), '(ec._tc._trigger_list, [4, 8, 1])\n', (15118, 15151), False, 'from numpy.testing import assert_equal\n'), ((15326, 15362), 'pytest.raises', 'pytest.raises', (['RuntimeError', 'ec.play'], {}), '(RuntimeError, ec.play)\n', (15339, 15362), False, 'import pytest\n'), ((27090, 27168), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Drift triggers overlap with onset triggers."""'}), "(UserWarning, match='Drift triggers overlap with onset triggers.')\n", (27102, 27168), False, 'import pytest\n'), ((27754, 27881), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Drift trigger at 1.1 seconds occurs outside stimulus window, not stamping trigger."""'}), "(UserWarning, match=\n 'Drift trigger at 1.1 seconds occurs outside stimulus window, not stamping trigger.'\n )\n", (27766, 27881), False, 'import pytest\n'), ((28421, 28481), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Some 2-triggers overlap.*"""'}), "(UserWarning, match='Some 2-triggers overlap.*')\n", (28433, 28481), False, 'import pytest\n'), ((28973, 28993), 'numpy.zeros', 'np.zeros', (['ec.stim_fs'], {}), '(ec.stim_fs)\n', (28981, 28993), True, 'import numpy as np\n'), ((29450, 29470), 'numpy.zeros', 'np.zeros', (['ec.stim_fs'], {}), '(ec.stim_fs)\n', (29458, 29470), True, 'import numpy as np\n'), ((1581, 1607), 'numpy.testing.assert_allclose', 'assert_allclose', (['verts', 'v2'], {}), '(verts, v2)\n', (1596, 1607), False, 'from numpy.testing import assert_allclose\n'), ((2306, 2332), 'expyfun._experiment_controller._get_dev_db', '_get_dev_db', (['ec.audio_type'], {}), '(ec.audio_type)\n', (2317, 2332), False, 'from expyfun._experiment_controller import _get_dev_db\n'), ((7529, 7544), 'expyfun.stimuli.get_tdt_rates', 'get_tdt_rates', ([], {}), '()\n', (7542, 7544), False, 'from expyfun.stimuli import get_tdt_rates\n'), ((7968, 8127), 'expyfun.ExperimentController', 'ExperimentController', (['*std_args'], {'audio_controller': 'ac', 'response_device': 'rd', 'trigger_controller': 'tc', 'stim_fs': '(100.0)', 'suppress_resamp': 'suppress'}), '(*std_args, audio_controller=ac, response_device=rd,\n trigger_controller=tc, stim_fs=100.0, suppress_resamp=suppress, **\n std_kwargs)\n', (7988, 8127), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((10438, 10456), 'numpy.zeros', 'np.zeros', (['(100, 1)'], {}), '((100, 1))\n', (10446, 10456), True, 'import numpy as np\n'), ((10563, 10581), 'numpy.zeros', 'np.zeros', (['(100, 2)'], {}), '((100, 2))\n', (10571, 10581), True, 'import numpy as np\n'), ((10752, 10767), 'expyfun.stimuli.get_tdt_rates', 'get_tdt_rates', ([], {}), '()\n', (10765, 10767), False, 'from expyfun.stimuli import get_tdt_rates\n'), ((12947, 12997), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""samples is slow"""'}), "(UserWarning, match='samples is slow')\n", (12959, 12997), False, 'import pytest\n'), ((23979, 24033), 'expyfun.visual.Rectangle', 'visual.Rectangle', (['ec', '[0, 0, 1, 1]'], {'fill_color': '"""black"""'}), "(ec, [0, 0, 1, 1], fill_color='black')\n", (23995, 24033), False, 'from expyfun import ExperimentController, visual, _experiment_controller\n'), ((24320, 24356), 'numpy.logical_or', 'np.logical_or', (['gray_mask', 'black_mask'], {}), '(gray_mask, black_mask)\n', (24333, 24356), True, 'import numpy as np\n'), ((27226, 27246), 'numpy.zeros', 'np.zeros', (['ec.stim_fs'], {}), '(ec.stim_fs)\n', (27234, 27246), True, 'import numpy as np\n'), ((27958, 27978), 'numpy.zeros', 'np.zeros', (['ec.stim_fs'], {}), '(ec.stim_fs)\n', (27966, 27978), True, 'import numpy as np\n'), ((28510, 28530), 'numpy.zeros', 'np.zeros', (['ec.stim_fs'], {}), '(ec.stim_fs)\n', (28518, 28530), True, 'import numpy as np\n'), ((13030, 13059), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.float32'}), '(2, dtype=np.float32)\n', (13038, 13059), True, 'import numpy as np\n'), ((17478, 17513), 'numpy.array', 'np.array', (["std_kwargs['window_size']"], {}), "(std_kwargs['window_size'])\n", (17486, 17513), True, 'import numpy as np\n'), ((25353, 25363), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (25360, 25363), True, 'import numpy as np\n')]
import dgl from . import register_model, BaseModel import torch.nn as nn import numpy as np import dgl.nn.pytorch as dglnn import torch import torch.nn.functional as F @register_model('DMGI') class DMGI(BaseModel): r""" Description ----------- **Title:** Unsupervised Attributed Multiplex Network Embedding **Authors:** <NAME>, <NAME>, <NAME>, <NAME> DMGI was introduced in `[paper] <https://ojs.aaai.org//index.php/AAAI/article/view/5985>`_ and parameters are defined as follows: Parameters ---------- meta_paths : dict Extract metapaths from graph sc : int Introducing a weight to self-connections category : string The category of the nodes to be classificated in_size : int Input feature size hid_unit : int Hidden units size dropout : float Dropout rate on feature. Defaults: ``0.5``. num_nodes : int The number of all nodes of category in graph num_classes : int The numbers of category's types isBias :bool If True, adds a learnable bias to the output.Defaults: ``False``. isAttn : bool If True, adopt the attention mechanism to calculate loss . Defaults: ``False``. isSemi : bool If True, add isSemi's loss to calculate loss Attributes ---------- H : torch.FloatTensor The learnable weight tensor. """ @classmethod def build_model_from_args(cls, args, hg): etypes = hg.canonical_etypes mps = [] for etype in etypes: if etype[0] == args.category: for dst_e in etypes: if etype[0] == dst_e[2] and etype[2] == dst_e[0]: if etype[0] != etype[2]: mps.append([etype, dst_e]) num_nodes = hg.num_nodes(args.category) return cls(meta_paths=mps, sc=args.sc, category=args.category, in_size=args.in_dim, hid_unit=args.hid_unit, nheads=args.num_heads,dropout=args.dropout, num_nodes=num_nodes, num_classes=args.num_classes, isSemi=args.isSemi,isAttn=args.isAttn, isBias=args.isBias) def __init__(self, meta_paths, sc, category, in_size, hid_unit,nheads, dropout, num_nodes, num_classes, isBias, isAttn, isSemi): super(DMGI, self).__init__() self.category = category # self.layers = nn.ModuleList() self.hid = hid_unit self.meta_paths = meta_paths self.nheads = nheads self.isAttn = isAttn self.isSemi = isSemi self.sc = sc r""" The encoder is a single-layer GCN: .. math:: \begin{equation} \mathbf{H}^{(r)}=g_{r}\left(\mathbf{X}, \mathbf{A}^{(r)} \mid \mathbf{W}^{(r)}\right)=\sigma\left(\hat{\mathbf{D}}_{r}^{-\frac{1}{2}} \hat{\mathbf{A}}^{(r)} \hat{\mathbf{D}}_{r}^{-\frac{1}{2}} \mathbf{X} \mathbf{W}^{(r)}\right) \end{equation} where :math:`\hat{\mathbf{A}}^{(r)}=\mathbf{A}^{(r)}+w \mathbf{I}_{n}` , :math:`\hat{D}_{i i}=\sum_{j} \hat{A}_{i j}` """ self.gcn = nn.ModuleList([dglnn.GraphConv(in_feats=in_size, out_feats=hid_unit, activation=nn.ReLU(), bias=isBias, allow_zero_in_degree=True) for _ in range(len(meta_paths))]) self.disc = Discriminator(hid_unit) self.readout = AvgReadout() self.readout_act_func = nn.Sigmoid() self.dropout = dropout self.num_nodes = num_nodes # num_head = 1 self.H = nn.Parameter(torch.FloatTensor(1, num_nodes, hid_unit)) self.logistic = LogReg(hid_unit, num_classes) if self.isAttn: self.attn = nn.ModuleList(Attention(hid_units=hid_unit, num_mps=len(meta_paths), num_ndoes=num_nodes) for _ in range(nheads)) # self.attn = Attention(hid_units=hid_unit, num_mps=len(meta_paths), num_ndoes=num_nodes) self.init_weight() print("category:{}, category's classes:{}, isBias:{}," " isAttn:{}, isSemi:{}".format(category, num_classes,isBias,isAttn,isSemi)) def init_weight(self): nn.init.xavier_normal_(self.H) # samp_bias1, samp_bias2 default None def forward(self, hg, samp_bias1=None, samp_bias2=None): r""" The formula to compute the relation-type specific cross entropy :math:`\mathcal{L}^{(r)}` .. math:: \begin{equation} \mathcal{L}^{(r)}=\sum_{v_{i} \in \mathcal{V}}^{n} \log \mathcal{D}\left(\mathbf{h}_{i}^{(r)}, \mathbf{s}^{(r)}\right)+\sum_{j=1}^{n} \log \left(1-\mathcal{D}\left(\tilde{\mathbf{h}}_{j}^{(r)}, \mathbf{s}^{(r)}\right)\right) \end{equation} where :math:`h_{i}^{(r)}` is calculate by :math:`\mathbf{h}_{i}=\sigma\left(\sum_{j \in N(i)} \frac{1}{c_{i j}} \mathbf{x}_{j} \mathbf{W}\right)` , :math:`s^{(r)}` is :math:`\mathbf{s}^{(r)}=\operatorname{Readout}\left(\mathbf{H}^{(r)}\right)=\sigma\left(\frac{1}{n} \sum_{i=1}^{n} \mathbf{h}_{i}^{(r)}\right)` . :math:`\mathcal{D}` is a discriminator that scores patchsummary representation pairs :math:`\tilde{\mathbf{h}}_{j}^{(r)}` corrupt the original attribute matrix by shuffling it. """ h_1_all = [];h_2_all = [];c_all = [];logits = [] result = {} # process features features = hg.srcdata['h'] feats = self.normal_feat(features, self.meta_paths) # shuffled features shuf_feats = self.shuf_feats(feats) for idx, meta_path in enumerate(self.meta_paths): new_g = dgl.metapath_reachable_graph(hg, meta_path) for i in range(self.sc): new_g = dgl.add_self_loop(new_g) feats[idx] = F.dropout(feats[idx], self.dropout, training=self.training) shuf_feats[idx] = F.dropout(shuf_feats[idx], self.dropout, training=self.training) h_1 = self.gcn[idx](new_g, feats[idx]) c = self.readout(h_1) c = self.readout_act_func(c) h_2 = self.gcn[idx](new_g, shuf_feats[idx]) logit = self.disc(c, h_1, h_2, samp_bias1, samp_bias2) h_1_all.append(h_1.unsqueeze(0)) h_2_all.append(h_2.unsqueeze(0)) c_all.append(c) logits.append(logit) result['logits'] = logits # Attention or not if self.isAttn: r""" .. math:: \begin{equation} \mathbf{h}_{i}=\mathcal{Q}\left(\left\{\mathbf{h}^{(r)} \mid r \in \mathcal{R}\right\}\right)=\sum_{r \in \mathcal{R}} a_{i}^{(r)} \mathbf{h}^{(r)} \end{equation} where :math:`a_{i}^{(r)}` denotes the importance of relationr in generating the final embedding of node videfined as: .. math:: \begin{equation} a_{i}^{(r)}=\frac{\exp \left(\mathbf{q}^{(r)} \cdot \mathbf{h}_{i}^{(r)}\right)}{\sum_{r^{\prime} \in \mathcal{R}} \exp \left(\mathbf{q}^{\left(r^{\prime}\right)} \cdot \mathbf{h}_{i}^{r^{\prime}}\right)} \end{equation} """ h_1_all_lst = [];h_2_all_lst = [];c_all_lst = [] for h_idx in range(self.nheads): h_1_all_, h_2_all_, c_all_ = self.attn[h_idx](h_1_all, h_2_all, c_all) h_1_all_lst.append(h_1_all_);h_2_all_lst.append(h_2_all_); c_all_lst.append(c_all_) h_1_all = torch.mean(torch.cat(h_1_all_lst, 0), 0).unsqueeze(0) h_2_all = torch.mean(torch.cat(h_2_all_lst, 0), 0).unsqueeze(0) else: h_1_all = torch.mean(torch.cat(h_1_all, 0), 0).unsqueeze(0) h_2_all = torch.mean(torch.cat(h_2_all, 0), 0).unsqueeze(0) # Lcs = [Z − AVG { H(r)|r∈ R }]^2 - [Z − AVG { ~H(r)|r∈ R }]^2 pos_reg_loss = ((self.H - h_1_all) ** 2).sum() neg_reg_loss = ((self.H - h_2_all) ** 2).sum() reg_loss = pos_reg_loss - neg_reg_loss result['reg_loss'] = reg_loss # semi-supervised module if self.isSemi: r""" Extension to Semi-Supervised Learning .. math:: \begin{equation} \ell_{\text {sup }}=-\frac{1}{\left|\mathcal{Y}_{L}\right|} \sum_{l \in \mathcal{Y}_{L}} \sum_{i=1}^{c} Y_{l i} \ln \hat{Y}_{l i} \end{equation} Where :math:`mathcal{Y}_{L}` is the set of node indices with labels """ semi = self.logistic(self.H).squeeze(0) result['semi'] = semi # result: ['logits','reg_loss','semi'] return result '''feature_normalize''' def normal_feat(self, feats, meta_paths): feat = [] feats = feats[self.category].data for mp in meta_paths: rowsum = feats.sum(1) r_inv = torch.pow(rowsum, -1).flatten() r_inv[torch.isinf(r_inv)] = 0. r_mat_inv = torch.diag(r_inv) feats = torch.spmm(r_mat_inv, feats) feat.append(feats) return feat '''corrupt the original attribute matrix by shuffling it''' def shuf_feats(self, feats): shuf_feats = [] for feat in feats: idx = np.random.permutation(feat.shape[0]) shuf = feat[idx] shuf_feats.append(shuf) return shuf_feats '''In the experiments, some relation type is more beneficial for a certain downstream task than others. Therefore, we can adopt the attention mechanism''' class Attention(nn.Module): def __init__(self, hid_units, num_mps, num_ndoes): super(Attention, self).__init__() self.num_mps = num_mps self.hid_units = hid_units self.num_nodes = num_ndoes self.A = nn.ModuleList([nn.Linear(hid_units, 1) for _ in range(num_mps)]) self.weight_init() def weight_init(self): for i in range(self.num_mps): nn.init.xavier_normal_(self.A[i].weight) self.A[i].bias.data.fill_(0.0) def forward(self, feat_pos, feat_neg, summary): feat_pos, feat_pos_attn = self.attn_feature(feat_pos) feat_neg, feat_neg_attn = self.attn_feature(feat_neg) summary, summary_attn = self.attn_summary(summary) return feat_pos, feat_neg, summary def attn_feature(self, features): features_attn = [] for i in range(self.num_mps): features_attn.append((self.A[i](features[i].squeeze()))) features_attn = F.softmax(torch.cat(features_attn, 1), -1) features = torch.cat(features,1).squeeze(0) features_attn_reshaped = features_attn.transpose(1, 0).contiguous().view(-1, 1) features = features * features_attn_reshaped.expand_as(features) features = features.view(self.num_mps, self.num_nodes, self.hid_units).sum(0).unsqueeze(0) return features, features_attn def attn_summary(self, features): features_attn = [] for i in range(self.num_mps): features_attn.append((self.A[i](features[i].squeeze()))) features_attn = F.softmax(torch.cat(features_attn), dim=-1).unsqueeze(1) features = torch.stack(features, 0) features_attn_expanded = features_attn.expand_as(features) features = (features * features_attn_expanded).sum(0).unsqueeze(0) return features, features_attn ''' D is a discriminator that scores patchsummary representation pairs. In this paper, we apply a simple bilinear scoring function as it empirically performs the best in our experiments:''' class Discriminator(nn.Module): r""" The discriminator .. math:: \begin{equation} \mathcal{D}\left(\mathbf{h}_{i}^{(r)}, \mathbf{s}^{(r)}\right)=\sigma\left(\mathbf{h}_{i}^{(r) T} \mathbf{M}^{(r)} \mathbf{s}^{(r)}\right) \end{equation} where :math:`M^{(r)}` is a trainable scoring matrix. """ def __init__(self, n_h): super(Discriminator, self).__init__() self.f_k_bilinear = nn.Bilinear(n_h, n_h, 1) for m in self.modules(): self.weights_init(m) def weights_init(self, m): if isinstance(m, nn.Bilinear): torch.nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.fill_(0.0) def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None): c_x = c.expand_as(h_pl) sc_1 = torch.squeeze(self.f_k_bilinear(h_pl, c_x), 1) # sc_1 = 1 x nb_nodes sc_2 = torch.squeeze(self.f_k_bilinear(h_mi, c_x), 1) # sc_2 = 1 x nb_nodes if s_bias1 is not None: sc_1 += s_bias1 if s_bias2 is not None: sc_2 += s_bias2 logits = torch.cat((sc_1, sc_2), 0) return logits '''considering the efficiency of the method, we simply employ average pooling''' class AvgReadout(nn.Module): r""" Considering the efficiency of the method, we simply employ average pooling, computing the average of the set of embedding matrices .. math:: \begin{equation} \mathbf{H}=\mathcal{Q}\left(\left\{\mathbf{H}^{(r)} \mid r \in \mathcal{R}\right\}\right)=\frac{1}{|\mathcal{R}|} \sum_{r \in \mathcal{R}} \mathbf{H}^{(r)} \end{equation} """ def __init__(self): super(AvgReadout, self).__init__() def forward(self, seq): return torch.mean(seq, 0) '''logreg''' class LogReg(nn.Module): r""" Parameters ---------- ft_in : int Size of hid_units nb_class : int The number of category's types """ def __init__(self, ft_in, nb_classes): super(LogReg, self).__init__() self.fc = nn.Linear(ft_in, nb_classes) for m in self.modules(): self.weights_init(m) def weights_init(self, m): if isinstance(m, nn.Linear): torch.nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.fill_(0.0) def forward(self, seq): ret = self.fc(seq) return ret
[ "torch.nn.Sigmoid", "dgl.add_self_loop", "torch.nn.ReLU", "torch.mean", "torch.nn.init.xavier_uniform_", "torch.nn.Bilinear", "torch.stack", "dgl.metapath_reachable_graph", "torch.diag", "torch.nn.functional.dropout", "torch.nn.init.xavier_normal_", "torch.pow", "torch.nn.Linear", "torch.s...
[((3781, 3793), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (3791, 3793), True, 'import torch.nn as nn\n'), ((4589, 4619), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.H'], {}), '(self.H)\n', (4611, 4619), True, 'import torch.nn as nn\n'), ((11598, 11622), 'torch.stack', 'torch.stack', (['features', '(0)'], {}), '(features, 0)\n', (11609, 11622), False, 'import torch\n'), ((12438, 12462), 'torch.nn.Bilinear', 'nn.Bilinear', (['n_h', 'n_h', '(1)'], {}), '(n_h, n_h, 1)\n', (12449, 12462), True, 'import torch.nn as nn\n'), ((13138, 13164), 'torch.cat', 'torch.cat', (['(sc_1, sc_2)', '(0)'], {}), '((sc_1, sc_2), 0)\n', (13147, 13164), False, 'import torch\n'), ((13786, 13804), 'torch.mean', 'torch.mean', (['seq', '(0)'], {}), '(seq, 0)\n', (13796, 13804), False, 'import torch\n'), ((14091, 14119), 'torch.nn.Linear', 'nn.Linear', (['ft_in', 'nb_classes'], {}), '(ft_in, nb_classes)\n', (14100, 14119), True, 'import torch.nn as nn\n'), ((3913, 3954), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)', 'num_nodes', 'hid_unit'], {}), '(1, num_nodes, hid_unit)\n', (3930, 3954), False, 'import torch\n'), ((6031, 6074), 'dgl.metapath_reachable_graph', 'dgl.metapath_reachable_graph', (['hg', 'meta_path'], {}), '(hg, meta_path)\n', (6059, 6074), False, 'import dgl\n'), ((6187, 6246), 'torch.nn.functional.dropout', 'F.dropout', (['feats[idx]', 'self.dropout'], {'training': 'self.training'}), '(feats[idx], self.dropout, training=self.training)\n', (6196, 6246), True, 'import torch.nn.functional as F\n'), ((6277, 6341), 'torch.nn.functional.dropout', 'F.dropout', (['shuf_feats[idx]', 'self.dropout'], {'training': 'self.training'}), '(shuf_feats[idx], self.dropout, training=self.training)\n', (6286, 6341), True, 'import torch.nn.functional as F\n'), ((9384, 9401), 'torch.diag', 'torch.diag', (['r_inv'], {}), '(r_inv)\n', (9394, 9401), False, 'import torch\n'), ((9422, 9450), 'torch.spmm', 'torch.spmm', (['r_mat_inv', 'feats'], {}), '(r_mat_inv, feats)\n', (9432, 9450), False, 'import torch\n'), ((9668, 9704), 'numpy.random.permutation', 'np.random.permutation', (['feat.shape[0]'], {}), '(feat.shape[0])\n', (9689, 9704), True, 'import numpy as np\n'), ((10367, 10407), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.A[i].weight'], {}), '(self.A[i].weight)\n', (10389, 10407), True, 'import torch.nn as nn\n'), ((10938, 10965), 'torch.cat', 'torch.cat', (['features_attn', '(1)'], {}), '(features_attn, 1)\n', (10947, 10965), False, 'import torch\n'), ((12613, 12657), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['m.weight.data'], {}), '(m.weight.data)\n', (12642, 12657), False, 'import torch\n'), ((14268, 14312), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['m.weight.data'], {}), '(m.weight.data)\n', (14297, 14312), False, 'import torch\n'), ((6136, 6160), 'dgl.add_self_loop', 'dgl.add_self_loop', (['new_g'], {}), '(new_g)\n', (6153, 6160), False, 'import dgl\n'), ((9335, 9353), 'torch.isinf', 'torch.isinf', (['r_inv'], {}), '(r_inv)\n', (9346, 9353), False, 'import torch\n'), ((10213, 10236), 'torch.nn.Linear', 'nn.Linear', (['hid_units', '(1)'], {}), '(hid_units, 1)\n', (10222, 10236), True, 'import torch.nn as nn\n'), ((10991, 11013), 'torch.cat', 'torch.cat', (['features', '(1)'], {}), '(features, 1)\n', (11000, 11013), False, 'import torch\n'), ((9285, 9306), 'torch.pow', 'torch.pow', (['rowsum', '(-1)'], {}), '(rowsum, -1)\n', (9294, 9306), False, 'import torch\n'), ((11532, 11556), 'torch.cat', 'torch.cat', (['features_attn'], {}), '(features_attn)\n', (11541, 11556), False, 'import torch\n'), ((3483, 3492), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3490, 3492), True, 'import torch.nn as nn\n'), ((7913, 7938), 'torch.cat', 'torch.cat', (['h_1_all_lst', '(0)'], {}), '(h_1_all_lst, 0)\n', (7922, 7938), False, 'import torch\n'), ((7989, 8014), 'torch.cat', 'torch.cat', (['h_2_all_lst', '(0)'], {}), '(h_2_all_lst, 0)\n', (7998, 8014), False, 'import torch\n'), ((8080, 8101), 'torch.cat', 'torch.cat', (['h_1_all', '(0)'], {}), '(h_1_all, 0)\n', (8089, 8101), False, 'import torch\n'), ((8152, 8173), 'torch.cat', 'torch.cat', (['h_2_all', '(0)'], {}), '(h_2_all, 0)\n', (8161, 8173), False, 'import torch\n')]
# MIT License # # Copyright (c) 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import numpy as np from PIL import Image from dpemu.nodes import Array from dpemu.filters import Constant, Identity, Subtraction # generate image with bitwise operations data = [] for y in range(0, 512): data.append([]) for x in range(0, 512): data[y].append((x ^ y, x & y, 0)) data = np.array(data, dtype=np.uint8) # show original image img_original = Image.fromarray(data, "RGB") img_original.show() # generate error root_node = Array() # add filter which subtracts each pixel value from 255 root_node.addfilter(Subtraction("const", "identity")) out = root_node.generate_error(data, {'c': 255, 'const': Constant("c"), 'identity': Identity()}) # show modified image img_modified = Image.fromarray(out, "RGB") img_modified.show() print(out) print("output shape:", out.shape, ", output dtype:", out.dtype)
[ "PIL.Image.fromarray", "dpemu.filters.Constant", "dpemu.filters.Identity", "dpemu.filters.Subtraction", "numpy.array", "dpemu.nodes.Array" ]
[((1437, 1467), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.uint8'}), '(data, dtype=np.uint8)\n', (1445, 1467), True, 'import numpy as np\n'), ((1506, 1534), 'PIL.Image.fromarray', 'Image.fromarray', (['data', '"""RGB"""'], {}), "(data, 'RGB')\n", (1521, 1534), False, 'from PIL import Image\n'), ((1585, 1592), 'dpemu.nodes.Array', 'Array', ([], {}), '()\n', (1590, 1592), False, 'from dpemu.nodes import Array\n'), ((1837, 1864), 'PIL.Image.fromarray', 'Image.fromarray', (['out', '"""RGB"""'], {}), "(out, 'RGB')\n", (1852, 1864), False, 'from PIL import Image\n'), ((1668, 1700), 'dpemu.filters.Subtraction', 'Subtraction', (['"""const"""', '"""identity"""'], {}), "('const', 'identity')\n", (1679, 1700), False, 'from dpemu.filters import Constant, Identity, Subtraction\n'), ((1759, 1772), 'dpemu.filters.Constant', 'Constant', (['"""c"""'], {}), "('c')\n", (1767, 1772), False, 'from dpemu.filters import Constant, Identity, Subtraction\n'), ((1786, 1796), 'dpemu.filters.Identity', 'Identity', ([], {}), '()\n', (1794, 1796), False, 'from dpemu.filters import Constant, Identity, Subtraction\n')]
#!/usr/bin/env python # coding: utf-8 # In[2]: import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() # # Importing dataset # 1.Since data is in form of excel file we have to use pandas read_excel to load the data. # # 2.After loading it is important to check the complete information of data as it can indicate many of the hidden infomation such as null values in a column or a row # # 3.Check whether any null values are there or not. If it is present then following can be done, # # A.Imputing data using Imputation method in sklearn # # B.Filling NaN values with mean, median and mode using fillna() method # # 4.Describe data --> which can give statistical analysis # In[5]: train_data = pd.read_excel(r"/Users/dhanyashreegowda/Desktop/github/Flight-Fare-Prediction/Data_Train.xlsx") # In[7]: pd.set_option('display.max_columns', None) # In[8]: train_data.head() # In[9]: train_data.info() # In[10]: train_data["Duration"].value_counts() # In[11]: train_data.shape # In[12]: train_data.dropna(inplace = True) # In[13]: train_data.isnull().sum() # In[14]: train_data.shape # ### EDA # From description we can see that Date_of_Journey is a object data type. # # Therefore, we have to convert this datatype into timestamp so as to use this column properly for prediction # # For this we require pandas to_datetime to convert object data type to datetime dtype. # # **.dt.day method will extract only day of that date**\ # **.dt.month method will extract only month of that date** # In[15]: train_data["Journey_day"] = pd.to_datetime(train_data.Date_of_Journey, format="%d/%m/%Y").dt.day # In[16]: train_data["Journey_month"] = pd.to_datetime(train_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month # In[17]: train_data.head() # In[18]: # Since we have converted Date_of_Journey column into integers, Now we can drop as it is of no use. train_data.drop(["Date_of_Journey"], axis = 1, inplace = True) # In[19]: # Departure time is when a plane leaves the gate. # Similar to Date_of_Journey we can extract values from Dep_Time # Extracting Hours train_data["Dep_hour"] = pd.to_datetime(train_data["Dep_Time"]).dt.hour # Extracting Minutes train_data["Dep_min"] = pd.to_datetime(train_data["Dep_Time"]).dt.minute # Now we can drop Dep_Time as it is of no use train_data.drop(["Dep_Time"], axis = 1, inplace = True) # In[20]: train_data.head() # In[21]: # Arrival time is when the plane pulls up to the gate. # Similar to Date_of_Journey we can extract values from Arrival_Time # Extracting Hours train_data["Arrival_hour"] = pd.to_datetime(train_data.Arrival_Time).dt.hour # Extracting Minutes train_data["Arrival_min"] = pd.to_datetime(train_data.Arrival_Time).dt.minute # Now we can drop Arrival_Time as it is of no use train_data.drop(["Arrival_Time"], axis = 1, inplace = True) # In[22]: train_data.head() # In[23]: # Time taken by plane to reach destination is called Duration # It is the differnce betwwen Departure Time and Arrival time # Assigning and converting Duration column into list duration = list(train_data["Duration"]) for i in range(len(duration)): if len(duration[i].split()) != 2: # Check if duration contains only hour or mins if "h" in duration[i]: duration[i] = duration[i].strip() + " 0m" # Adds 0 minute else: duration[i] = "0h " + duration[i] # Adds 0 hour duration_hours = [] duration_mins = [] for i in range(len(duration)): duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration # In[25]: # Adding duration_hours and duration_mins list to train_data dataframe train_data["Duration_hours"] = duration_hours train_data["Duration_mins"] = duration_mins # In[26]: train_data.head() # In[27]: train_data.drop(["Duration"], axis = 1, inplace = True) # In[28]: train_data.head() # # ### Handling Categorical Data # One can find many ways to handle categorical data. Some of them categorical data are, # # **Nominal data** --> data are not in any order --> **OneHotEncoder** is used in this case # # **Ordinal data** --> data are in order --> **LabelEncoder** is used in this case # In[30]: train_data["Airline"].value_counts() # In[31]: # From graph we can see that Jet Airways Business have the highest Price. # Apart from the first Airline almost all are having similar median # Airline vs Price sns.catplot(y = "Price", x = "Airline", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 6, aspect = 3) plt.show() # In[32]: # As Airline is Nominal Categorical data we will perform OneHotEncoding Airline = train_data[["Airline"]] Airline = pd.get_dummies(Airline, drop_first= True) Airline.head() # In[33]: train_data["Source"].value_counts() # In[34]: # Source vs Price sns.catplot(y = "Price", x = "Source", data = train_data.sort_values("Price", ascending = False), kind="boxen", height = 4, aspect = 3) plt.show() # In[35]: # As Source is Nominal Categorical data we will perform OneHotEncoding Source = train_data[["Source"]] Source = pd.get_dummies(Source, drop_first= True) Source.head() # In[36]: train_data["Destination"].value_counts() # In[37]: # As Destination is Nominal Categorical data we will perform OneHotEncoding Destination = train_data[["Destination"]] Destination = pd.get_dummies(Destination, drop_first = True) Destination.head() # In[38]: train_data["Route"] # In[39]: # Additional_Info contains almost 80% no_info # Route and Total_Stops are related to each other train_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True) # In[40]: train_data.head() # In[41]: train_data["Total_Stops"].value_counts() # In[42]: # As this is case of Ordinal Categorical type we perform LabelEncoder # Here Values are assigned with corresponding keys train_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True) # In[43]: train_data.head() # In[44]: # Concatenate dataframe --> train_data + Airline + Source + Destination data_train = pd.concat([train_data, Airline, Source, Destination], axis = 1) # In[45]: data_train.head() # In[46]: data_train.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True) # In[47]: data_train.head() # In[48]: data_train.shape # ## Test set # # In[49]: test_data = pd.read_excel(r"/Users/dhanyashreegowda/Desktop/github/Flight-Fare-Prediction/Test_set.xlsx") # In[50]: test_data.head() # In[51]: # Preprocessing print("Test data Info") print("-"*75) print(test_data.info()) print() print() print("Null values :") print("-"*75) test_data.dropna(inplace = True) print(test_data.isnull().sum()) # EDA # Date_of_Journey test_data["Journey_day"] = pd.to_datetime(test_data.Date_of_Journey, format="%d/%m/%Y").dt.day test_data["Journey_month"] = pd.to_datetime(test_data["Date_of_Journey"], format = "%d/%m/%Y").dt.month test_data.drop(["Date_of_Journey"], axis = 1, inplace = True) # Dep_Time test_data["Dep_hour"] = pd.to_datetime(test_data["Dep_Time"]).dt.hour test_data["Dep_min"] = pd.to_datetime(test_data["Dep_Time"]).dt.minute test_data.drop(["Dep_Time"], axis = 1, inplace = True) # Arrival_Time test_data["Arrival_hour"] = pd.to_datetime(test_data.Arrival_Time).dt.hour test_data["Arrival_min"] = pd.to_datetime(test_data.Arrival_Time).dt.minute test_data.drop(["Arrival_Time"], axis = 1, inplace = True) # Duration duration = list(test_data["Duration"]) for i in range(len(duration)): if len(duration[i].split()) != 2: # Check if duration contains only hour or mins if "h" in duration[i]: duration[i] = duration[i].strip() + " 0m" # Adds 0 minute else: duration[i] = "0h " + duration[i] # Adds 0 hour duration_hours = [] duration_mins = [] for i in range(len(duration)): duration_hours.append(int(duration[i].split(sep = "h")[0])) # Extract hours from duration duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) # Extracts only minutes from duration # Adding Duration column to test set test_data["Duration_hours"] = duration_hours test_data["Duration_mins"] = duration_mins test_data.drop(["Duration"], axis = 1, inplace = True) # Categorical data print("Airline") print("-"*75) print(test_data["Airline"].value_counts()) Airline = pd.get_dummies(test_data["Airline"], drop_first= True) print() print("Source") print("-"*75) print(test_data["Source"].value_counts()) Source = pd.get_dummies(test_data["Source"], drop_first= True) print() print("Destination") print("-"*75) print(test_data["Destination"].value_counts()) Destination = pd.get_dummies(test_data["Destination"], drop_first = True) # Additional_Info contains almost 80% no_info # Route and Total_Stops are related to each other test_data.drop(["Route", "Additional_Info"], axis = 1, inplace = True) # Replacing Total_Stops test_data.replace({"non-stop": 0, "1 stop": 1, "2 stops": 2, "3 stops": 3, "4 stops": 4}, inplace = True) # Concatenate dataframe --> test_data + Airline + Source + Destination data_test = pd.concat([test_data, Airline, Source, Destination], axis = 1) data_test.drop(["Airline", "Source", "Destination"], axis = 1, inplace = True) print() print() print("Shape of test data : ", data_test.shape) # In[52]: data_test.head() # ## Feature Selection # Finding out the best feature which will contribute and have good relation with target variable. Following are some of the feature selection methods, # # 1.**heatmap** # # 2.**feature_importance_** # # 3.**SelectKBest** # # In[54]: data_train.shape # In[55]: data_train.columns # In[56]: X = data_train.loc[:, ['Total_Stops', 'Journey_day', 'Journey_month', 'Dep_hour', 'Dep_min', 'Arrival_hour', 'Arrival_min', 'Duration_hours', 'Duration_mins', 'Airline_Air India', 'Airline_GoAir', 'Airline_IndiGo', 'Airline_Jet Airways', 'Airline_Jet Airways Business', 'Airline_Multiple carriers', 'Airline_Multiple carriers Premium economy', 'Airline_SpiceJet', 'Airline_Trujet', 'Airline_Vistara', 'Airline_Vistara Premium economy', 'Source_Chennai', 'Source_Delhi', 'Source_Kolkata', 'Source_Mumbai', 'Destination_Cochin', 'Destination_Delhi', 'Destination_Hyderabad', 'Destination_Kolkata', 'Destination_New Delhi']] X.head() # In[57]: y = data_train.iloc[:, 1] y.head() # In[58]: # Finds correlation between Independent and dependent attributes plt.figure(figsize = (18,18)) sns.heatmap(train_data.corr(), annot = True, cmap = "RdYlGn") plt.show() # In[63]: # Important feature using ExtraTreesRegressor from sklearn.ensemble import ExtraTreesRegressor selection = ExtraTreesRegressor() selection.fit(X, y) # In[61]: print(selection.feature_importances_) # In[62]: #plot graph of feature importances for better visualization plt.figure(figsize = (12,8)) feat_importances = pd.Series(selection.feature_importances_, index=X.columns) feat_importances.nlargest(20).plot(kind='barh') plt.show() # ## Fitting model using Random Forest # # 1.Split dataset into train and test set in order to prediction w.r.t X_test # # 2.If needed do scaling of data # # 3.Scaling is not done in Random forest # # 4.Import model # # 5.Fit the data # # 6.Predict w.r.t X_test # # 7.In regression check RSME Score # # 8.Plot graph # In[64]: from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42) # In[65]: from sklearn.ensemble import RandomForestRegressor reg_rf = RandomForestRegressor() reg_rf.fit(X_train, y_train) # In[66]: y_pred = reg_rf.predict(X_test) # In[67]: reg_rf.score(X_train, y_train) # In[68]: reg_rf.score(X_test, y_test) # In[69]: sns.distplot(y_test-y_pred) plt.show() # In[70]: plt.scatter(y_test, y_pred, alpha = 0.5) plt.xlabel("y_test") plt.ylabel("y_pred") plt.show() # In[71]: from sklearn import metrics # In[72]: print('MAE:', metrics.mean_absolute_error(y_test, y_pred)) print('MSE:', metrics.mean_squared_error(y_test, y_pred)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) # In[73]: # RMSE/(max(DV)-min(DV)) 2090.5509/(max(y)-min(y)) # In[74]: metrics.r2_score(y_test, y_pred) # ### Hyperparameter Tuning # Choose following method for hyperparameter tuning # # RandomizedSearchCV --> Fast # # GridSearchCV # # Assign hyperparameters in form of dictionary # # Fit the model # # Check best paramters and best score # In[75]: from sklearn.model_selection import RandomizedSearchCV # In[76]: #Randomized Search CV # Number of trees in random forest n_estimators = [int(x) for x in np.linspace(start = 100, stop = 1200, num = 12)] # Number of features to consider at every split max_features = ['auto', 'sqrt'] # Maximum number of levels in tree max_depth = [int(x) for x in np.linspace(5, 30, num = 6)] # Minimum number of samples required to split a node min_samples_split = [2, 5, 10, 15, 100] # Minimum number of samples required at each leaf node min_samples_leaf = [1, 2, 5, 10] # In[77]: # Create the random grid random_grid = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf} # In[78]: # Random search of parameters, using 5 fold cross validation, # search across 100 different combinations rf_random = RandomizedSearchCV(estimator = reg_rf, param_distributions = random_grid,scoring='neg_mean_squared_error', n_iter = 10, cv = 5, verbose=2, random_state=42, n_jobs = 1) # In[79]: rf_random.fit(X_train,y_train) # In[80]: rf_random.best_params_ # In[81]: prediction = rf_random.predict(X_test) # In[82]: plt.figure(figsize = (8,8)) sns.distplot(y_test-prediction) plt.show() # In[83]: plt.figure(figsize = (8,8)) plt.scatter(y_test, prediction, alpha = 0.5) plt.xlabel("y_test") plt.ylabel("y_pred") plt.show() # In[84]: print('MAE:', metrics.mean_absolute_error(y_test, prediction)) print('MSE:', metrics.mean_squared_error(y_test, prediction)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, prediction))) # ### Save the model to reuse it again # # In[94]: import pickle # open a file, where you ant to store the data file = open('flight_rf.pkl', 'wb') # dump information to that file pickle.dump(rf_random, file) # In[95]: model = open('flight_rf.pkl','rb') forest = pickle.load(model) # In[96]: y_prediction = forest.predict(X_test) # In[97]: metrics.r2_score(y_test, y_prediction)
[ "matplotlib.pyplot.ylabel", "sklearn.ensemble.ExtraTreesRegressor", "pandas.read_excel", "sklearn.metrics.r2_score", "pandas.to_datetime", "seaborn.set", "sklearn.ensemble.RandomForestRegressor", "seaborn.distplot", "matplotlib.pyplot.xlabel", "pandas.set_option", "numpy.linspace", "matplotlib...
[((144, 153), 'seaborn.set', 'sns.set', ([], {}), '()\n', (151, 153), True, 'import seaborn as sns\n'), ((755, 859), 'pandas.read_excel', 'pd.read_excel', (['"""/Users/dhanyashreegowda/Desktop/github/Flight-Fare-Prediction/Data_Train.xlsx"""'], {}), "(\n '/Users/dhanyashreegowda/Desktop/github/Flight-Fare-Prediction/Data_Train.xlsx'\n )\n", (768, 859), True, 'import pandas as pd\n'), ((864, 906), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (877, 906), True, 'import pandas as pd\n'), ((4753, 4763), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4761, 4763), True, 'import matplotlib.pyplot as plt\n'), ((4896, 4936), 'pandas.get_dummies', 'pd.get_dummies', (['Airline'], {'drop_first': '(True)'}), '(Airline, drop_first=True)\n', (4910, 4936), True, 'import pandas as pd\n'), ((5174, 5184), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5182, 5184), True, 'import matplotlib.pyplot as plt\n'), ((5313, 5352), 'pandas.get_dummies', 'pd.get_dummies', (['Source'], {'drop_first': '(True)'}), '(Source, drop_first=True)\n', (5327, 5352), True, 'import pandas as pd\n'), ((5572, 5616), 'pandas.get_dummies', 'pd.get_dummies', (['Destination'], {'drop_first': '(True)'}), '(Destination, drop_first=True)\n', (5586, 5616), True, 'import pandas as pd\n'), ((6318, 6379), 'pandas.concat', 'pd.concat', (['[train_data, Airline, Source, Destination]'], {'axis': '(1)'}), '([train_data, Airline, Source, Destination], axis=1)\n', (6327, 6379), True, 'import pandas as pd\n'), ((6615, 6717), 'pandas.read_excel', 'pd.read_excel', (['"""/Users/dhanyashreegowda/Desktop/github/Flight-Fare-Prediction/Test_set.xlsx"""'], {}), "(\n '/Users/dhanyashreegowda/Desktop/github/Flight-Fare-Prediction/Test_set.xlsx'\n )\n", (6628, 6717), True, 'import pandas as pd\n'), ((8595, 8648), 'pandas.get_dummies', 'pd.get_dummies', (["test_data['Airline']"], {'drop_first': '(True)'}), "(test_data['Airline'], drop_first=True)\n", (8609, 8648), True, 'import pandas as pd\n'), ((8741, 8793), 'pandas.get_dummies', 'pd.get_dummies', (["test_data['Source']"], {'drop_first': '(True)'}), "(test_data['Source'], drop_first=True)\n", (8755, 8793), True, 'import pandas as pd\n'), ((8901, 8958), 'pandas.get_dummies', 'pd.get_dummies', (["test_data['Destination']"], {'drop_first': '(True)'}), "(test_data['Destination'], drop_first=True)\n", (8915, 8958), True, 'import pandas as pd\n'), ((9344, 9404), 'pandas.concat', 'pd.concat', (['[test_data, Airline, Source, Destination]'], {'axis': '(1)'}), '([test_data, Airline, Source, Destination], axis=1)\n', (9353, 9404), True, 'import pandas as pd\n'), ((10736, 10764), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 18)'}), '(figsize=(18, 18))\n', (10746, 10764), True, 'import matplotlib.pyplot as plt\n'), ((10829, 10839), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10837, 10839), True, 'import matplotlib.pyplot as plt\n'), ((10962, 10983), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (10981, 10983), False, 'from sklearn.ensemble import ExtraTreesRegressor\n'), ((11131, 11158), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (11141, 11158), True, 'import matplotlib.pyplot as plt\n'), ((11179, 11237), 'pandas.Series', 'pd.Series', (['selection.feature_importances_'], {'index': 'X.columns'}), '(selection.feature_importances_, index=X.columns)\n', (11188, 11237), True, 'import pandas as pd\n'), ((11286, 11296), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11294, 11296), True, 'import matplotlib.pyplot as plt\n'), ((11724, 11778), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, y, test_size=0.2, random_state=42)\n', (11740, 11778), False, 'from sklearn.model_selection import train_test_split\n'), ((11857, 11880), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (11878, 11880), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((12058, 12087), 'seaborn.distplot', 'sns.distplot', (['(y_test - y_pred)'], {}), '(y_test - y_pred)\n', (12070, 12087), True, 'import seaborn as sns\n'), ((12086, 12096), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12094, 12096), True, 'import matplotlib.pyplot as plt\n'), ((12111, 12149), 'matplotlib.pyplot.scatter', 'plt.scatter', (['y_test', 'y_pred'], {'alpha': '(0.5)'}), '(y_test, y_pred, alpha=0.5)\n', (12122, 12149), True, 'import matplotlib.pyplot as plt\n'), ((12152, 12172), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""y_test"""'], {}), "('y_test')\n", (12162, 12172), True, 'import matplotlib.pyplot as plt\n'), ((12173, 12193), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y_pred"""'], {}), "('y_pred')\n", (12183, 12193), True, 'import matplotlib.pyplot as plt\n'), ((12194, 12204), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12202, 12204), True, 'import matplotlib.pyplot as plt\n'), ((12526, 12558), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (12542, 12558), False, 'from sklearn import metrics\n'), ((13786, 13952), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', ([], {'estimator': 'reg_rf', 'param_distributions': 'random_grid', 'scoring': '"""neg_mean_squared_error"""', 'n_iter': '(10)', 'cv': '(5)', 'verbose': '(2)', 'random_state': '(42)', 'n_jobs': '(1)'}), "(estimator=reg_rf, param_distributions=random_grid,\n scoring='neg_mean_squared_error', n_iter=10, cv=5, verbose=2,\n random_state=42, n_jobs=1)\n", (13804, 13952), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((14103, 14129), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (14113, 14129), True, 'import matplotlib.pyplot as plt\n'), ((14131, 14164), 'seaborn.distplot', 'sns.distplot', (['(y_test - prediction)'], {}), '(y_test - prediction)\n', (14143, 14164), True, 'import seaborn as sns\n'), ((14163, 14173), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14171, 14173), True, 'import matplotlib.pyplot as plt\n'), ((14188, 14214), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (14198, 14214), True, 'import matplotlib.pyplot as plt\n'), ((14216, 14258), 'matplotlib.pyplot.scatter', 'plt.scatter', (['y_test', 'prediction'], {'alpha': '(0.5)'}), '(y_test, prediction, alpha=0.5)\n', (14227, 14258), True, 'import matplotlib.pyplot as plt\n'), ((14261, 14281), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""y_test"""'], {}), "('y_test')\n", (14271, 14281), True, 'import matplotlib.pyplot as plt\n'), ((14282, 14302), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y_pred"""'], {}), "('y_pred')\n", (14292, 14302), True, 'import matplotlib.pyplot as plt\n'), ((14303, 14313), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14311, 14313), True, 'import matplotlib.pyplot as plt\n'), ((14711, 14739), 'pickle.dump', 'pickle.dump', (['rf_random', 'file'], {}), '(rf_random, file)\n', (14722, 14739), False, 'import pickle\n'), ((14798, 14816), 'pickle.load', 'pickle.load', (['model'], {}), '(model)\n', (14809, 14816), False, 'import pickle\n'), ((14883, 14921), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['y_test', 'y_prediction'], {}), '(y_test, y_prediction)\n', (14899, 14921), False, 'from sklearn import metrics\n'), ((12275, 12318), 'sklearn.metrics.mean_absolute_error', 'metrics.mean_absolute_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (12302, 12318), False, 'from sklearn import metrics\n'), ((12334, 12376), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (12360, 12376), False, 'from sklearn import metrics\n'), ((14342, 14389), 'sklearn.metrics.mean_absolute_error', 'metrics.mean_absolute_error', (['y_test', 'prediction'], {}), '(y_test, prediction)\n', (14369, 14389), False, 'from sklearn import metrics\n'), ((14405, 14451), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_test', 'prediction'], {}), '(y_test, prediction)\n', (14431, 14451), False, 'from sklearn import metrics\n'), ((1622, 1683), 'pandas.to_datetime', 'pd.to_datetime', (['train_data.Date_of_Journey'], {'format': '"""%d/%m/%Y"""'}), "(train_data.Date_of_Journey, format='%d/%m/%Y')\n", (1636, 1683), True, 'import pandas as pd\n'), ((1735, 1799), 'pandas.to_datetime', 'pd.to_datetime', (["train_data['Date_of_Journey']"], {'format': '"""%d/%m/%Y"""'}), "(train_data['Date_of_Journey'], format='%d/%m/%Y')\n", (1749, 1799), True, 'import pandas as pd\n'), ((2196, 2234), 'pandas.to_datetime', 'pd.to_datetime', (["train_data['Dep_Time']"], {}), "(train_data['Dep_Time'])\n", (2210, 2234), True, 'import pandas as pd\n'), ((2289, 2327), 'pandas.to_datetime', 'pd.to_datetime', (["train_data['Dep_Time']"], {}), "(train_data['Dep_Time'])\n", (2303, 2327), True, 'import pandas as pd\n'), ((2660, 2699), 'pandas.to_datetime', 'pd.to_datetime', (['train_data.Arrival_Time'], {}), '(train_data.Arrival_Time)\n', (2674, 2699), True, 'import pandas as pd\n'), ((2758, 2797), 'pandas.to_datetime', 'pd.to_datetime', (['train_data.Arrival_Time'], {}), '(train_data.Arrival_Time)\n', (2772, 2797), True, 'import pandas as pd\n'), ((7006, 7066), 'pandas.to_datetime', 'pd.to_datetime', (['test_data.Date_of_Journey'], {'format': '"""%d/%m/%Y"""'}), "(test_data.Date_of_Journey, format='%d/%m/%Y')\n", (7020, 7066), True, 'import pandas as pd\n'), ((7103, 7166), 'pandas.to_datetime', 'pd.to_datetime', (["test_data['Date_of_Journey']"], {'format': '"""%d/%m/%Y"""'}), "(test_data['Date_of_Journey'], format='%d/%m/%Y')\n", (7117, 7166), True, 'import pandas as pd\n'), ((7276, 7313), 'pandas.to_datetime', 'pd.to_datetime', (["test_data['Dep_Time']"], {}), "(test_data['Dep_Time'])\n", (7290, 7313), True, 'import pandas as pd\n'), ((7345, 7382), 'pandas.to_datetime', 'pd.to_datetime', (["test_data['Dep_Time']"], {}), "(test_data['Dep_Time'])\n", (7359, 7382), True, 'import pandas as pd\n'), ((7492, 7530), 'pandas.to_datetime', 'pd.to_datetime', (['test_data.Arrival_Time'], {}), '(test_data.Arrival_Time)\n', (7506, 7530), True, 'import pandas as pd\n'), ((7566, 7604), 'pandas.to_datetime', 'pd.to_datetime', (['test_data.Arrival_Time'], {}), '(test_data.Arrival_Time)\n', (7580, 7604), True, 'import pandas as pd\n'), ((12401, 12443), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (12427, 12443), False, 'from sklearn import metrics\n'), ((12974, 13015), 'numpy.linspace', 'np.linspace', ([], {'start': '(100)', 'stop': '(1200)', 'num': '(12)'}), '(start=100, stop=1200, num=12)\n', (12985, 13015), True, 'import numpy as np\n'), ((13167, 13192), 'numpy.linspace', 'np.linspace', (['(5)', '(30)'], {'num': '(6)'}), '(5, 30, num=6)\n', (13178, 13192), True, 'import numpy as np\n'), ((14476, 14522), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_test', 'prediction'], {}), '(y_test, prediction)\n', (14502, 14522), False, 'from sklearn import metrics\n')]
r""" Definition ---------- This model provides the form factor for an elliptical cylinder with a core-shell scattering length density profile. Thus this is a variation of the core-shell bicelle model, but with an elliptical cylinder for the core. Outer shells on the rims and flat ends may be of different thicknesses and scattering length densities. The form factor is normalized by the total particle volume. .. figure:: img/core_shell_bicelle_geometry.png Schematic cross-section of bicelle. Note however that the model here calculates for rectangular, not curved, rims as shown below. .. figure:: img/core_shell_bicelle_parameters.png Cross section of model used here. Users will have to decide how to distribute "heads" and "tails" between the rim, face and core regions in order to estimate appropriate starting parameters. Given the scattering length densities (sld) $\rho_c$, the core sld, $\rho_f$, the face sld, $\rho_r$, the rim sld and $\rho_s$ the solvent sld, the scattering length density variation along the bicelle axis is: .. math:: \rho(r) = \begin{cases} &\rho_c \text{ for } 0 \lt r \lt R; -L \lt z\lt L \\[1.5ex] &\rho_f \text{ for } 0 \lt r \lt R; -(L+2t) \lt z\lt -L; L \lt z\lt (L+2t) \\[1.5ex] &\rho_r\text{ for } 0 \lt r \lt R; -(L+2t) \lt z\lt -L; L \lt z\lt (L+2t) \end{cases} The form factor for the bicelle is calculated in cylindrical coordinates, where $\alpha$ is the angle between the $Q$ vector and the cylinder axis, and $\psi$ is the angle for the ellipsoidal cross section core, to give: .. math:: I(Q,\alpha,\psi) = \frac{\text{scale}}{V_t} \cdot F(Q,\alpha, \psi)^2.sin(\alpha) + \text{background} where a numerical integration of $F(Q,\alpha, \psi)^2.sin(\alpha)$ is carried out over \alpha and \psi for: .. math:: \begin{align} F(Q,\alpha,\psi) = &\bigg[ (\rho_c - \rho_f) V_c \frac{2J_1(QR'sin \alpha)}{QR'sin\alpha}\frac{sin(QLcos\alpha/2)}{Q(L/2)cos\alpha} \\ &+(\rho_f - \rho_r) V_{c+f} \frac{2J_1(QR'sin\alpha)}{QR'sin\alpha}\frac{sin(Q(L/2+t_f)cos\alpha)}{Q(L/2+t_f)cos\alpha} \\ &+(\rho_r - \rho_s) V_t \frac{2J_1(Q(R'+t_r)sin\alpha)}{Q(R'+t_r)sin\alpha}\frac{sin(Q(L/2+t_f)cos\alpha)}{Q(L/2+t_f)cos\alpha} \bigg] \end{align} where .. math:: R'=\frac{R}{\sqrt{2}}\sqrt{(1+X_{core}^{2}) + (1-X_{core}^{2})cos(\psi)} and $V_t = \pi.(R+t_r)(Xcore.R+t_r)^2.(L+2.t_f)$ is the total volume of the bicelle, $V_c = \pi.Xcore.R^2.L$ the volume of the core, $V_{c+f} = \pi.Xcore.R^2.(L+2.t_f)$ the volume of the core plus the volume of the faces, $R$ is the radius of the core, $Xcore$ is the axial ratio of the core, $L$ the length of the core, $t_f$ the thickness of the face, $t_r$ the thickness of the rim and $J_1$ the usual first order bessel function. The core has radii $R$ and $Xcore.R$ so is circular, as for the core_shell_bicelle model, for $Xcore$ =1. Note that you may need to limit the range of $Xcore$, especially if using the Monte-Carlo algorithm, as setting radius to $R/Xcore$ and axial ratio to $1/Xcore$ gives an equivalent solution! The output of the 1D scattering intensity function for randomly oriented bicelles is then given by integrating over all possible $\alpha$ and $\psi$. For oriented bicelles the *theta*, *phi* and *psi* orientation parameters will appear when fitting 2D data, see the :ref:`elliptical-cylinder` model for further information. .. figure:: img/elliptical_cylinder_angle_definition.png Definition of the angles for the oriented core_shell_bicelle_elliptical particles. References ---------- .. [#] Authorship and Verification ---------------------------- * **Author:** <NAME> **Date:** December 14, 2016 * **Last Modified by:** <NAME> **Date:** December 14, 2016 * **Last Reviewed by:** <NAME> BEWARE 2d data yet to be checked **Date:** December 14, 2016 """ from numpy import inf, sin, cos, pi name = "core_shell_bicelle_elliptical" title = "Elliptical cylinder with a core-shell scattering length density profile.." description = """ core_shell_bicelle_elliptical Elliptical cylinder core, optional shell on the two flat faces, and shell of uniform thickness on its rim (extending around the end faces). Please see full documentation for equations and further details. Involves a double numerical integral around the ellipsoid diameter and the angle of the cylinder axis to Q. Compare also the core_shell_bicelle and elliptical_cylinder models. """ category = "shape:cylinder" # pylint: disable=bad-whitespace, line-too-long # ["name", "units", default, [lower, upper], "type", "description"], parameters = [ ["radius", "Ang", 30, [0, inf], "volume", "Cylinder core radius"], ["x_core", "None", 3, [0, inf], "volume", "axial ratio of core, X = r_polar/r_equatorial"], ["thick_rim", "Ang", 8, [0, inf], "volume", "Rim shell thickness"], ["thick_face", "Ang", 14, [0, inf], "volume", "Cylinder face thickness"], ["length", "Ang", 50, [0, inf], "volume", "Cylinder length"], ["sld_core", "1e-6/Ang^2", 4, [-inf, inf], "sld", "Cylinder core scattering length density"], ["sld_face", "1e-6/Ang^2", 7, [-inf, inf], "sld", "Cylinder face scattering length density"], ["sld_rim", "1e-6/Ang^2", 1, [-inf, inf], "sld", "Cylinder rim scattering length density"], ["sld_solvent", "1e-6/Ang^2", 6, [-inf, inf], "sld", "Solvent scattering length density"], ["theta", "degrees", 90.0, [-360, 360], "orientation", "cylinder axis to beam angle"], ["phi", "degrees", 0, [-360, 360], "orientation", "rotation about beam"], ["psi", "degrees", 0, [-360, 360], "orientation", "rotation about cylinder axis"] ] # pylint: enable=bad-whitespace, line-too-long source = ["lib/sas_Si.c", "lib/polevl.c", "lib/sas_J1.c", "lib/gauss76.c", "core_shell_bicelle_elliptical.c"] demo = dict(scale=1, background=0, radius=30.0, x_core=3.0, thick_rim=8.0, thick_face=14.0, length=50.0, sld_core=4.0, sld_face=7.0, sld_rim=1.0, sld_solvent=6.0, theta=90, phi=0, psi=0) q = 0.1 # april 6 2017, rkh added a 2d unit test, NOT READY YET pull #890 branch assume correct! qx = q*cos(pi/6.0) qy = q*sin(pi/6.0) tests = [ [{'radius': 30.0, 'x_core': 3.0, 'thick_rim':8.0, 'thick_face':14.0, 'length':50.0}, 'ER', 1], [{'radius': 30.0, 'x_core': 3.0, 'thick_rim':8.0, 'thick_face':14.0, 'length':50.0}, 'VR', 1], [{'radius': 30.0, 'x_core': 3.0, 'thick_rim':8.0, 'thick_face':14.0, 'length':50.0, 'sld_core':4.0, 'sld_face':7.0, 'sld_rim':1.0, 'sld_solvent':6.0, 'background':0.0}, 0.015, 286.540286], # [{'theta':80., 'phi':10.}, (qx, qy), 7.88866563001 ], ] del qx, qy # not necessary to delete, but cleaner
[ "numpy.sin", "numpy.cos" ]
[((6560, 6573), 'numpy.cos', 'cos', (['(pi / 6.0)'], {}), '(pi / 6.0)\n', (6563, 6573), False, 'from numpy import inf, sin, cos, pi\n'), ((6579, 6592), 'numpy.sin', 'sin', (['(pi / 6.0)'], {}), '(pi / 6.0)\n', (6582, 6592), False, 'from numpy import inf, sin, cos, pi\n')]
# Copyright 2016-2020 The <NAME> at the California Institute of # Technology (Caltech), with support from the Paul Allen Family Foundation, # Google, & National Institutes of Health (NIH) under Grant U24CA224309-01. # All rights reserved. # # Licensed under a modified Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.github.com/vanvalenlab/caliban-toolbox/LICENSE # # The Work provided may be used for non-commercial academic purposes only. # For any other use of the Work, including commercial use, please contact: # <EMAIL> # # Neither the name of Caltech nor the names of its contributors may be used # to endorse or promote products derived from this software without specific # prior written permission. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import print_function from __future__ import division import numpy as np import os import json from itertools import product def save_npzs_for_caliban(X_data, y_data, original_data, log_data, save_dir, blank_labels='include', save_format='npz', verbose=True): """Take an array of processed image data and save as NPZ for caliban Args: X_data: 7D tensor of cropped and sliced raw images y_data: 7D tensor of cropped and sliced labeled images original_data: the original unmodified images log_data: data used to reconstruct images save_dir: path to save the npz and JSON files blank_labels: whether to include NPZs with blank labels (poor predictions) or skip (no cells) save_format: format to save the data (currently only NPZ) verbose: flag to control print statements """ if not os.path.isdir(save_dir): os.makedirs(save_dir) # if these are present, it means data was cropped/sliced. Otherwise, default to 1 num_crops = log_data.get('num_crops', 1) num_slices = log_data.get('num_slices', 1) fov_names = original_data.fovs.values fov_len = len(fov_names) if blank_labels not in ['skip', 'include', 'separate']: raise ValueError('blank_labels must be one of ' '[skip, include, separate], got {}'.format(blank_labels)) if blank_labels == 'separate': os.makedirs(os.path.join(save_dir, 'separate')) # for each fov, loop through 2D crops and 3D slices for fov, crop, slice in product(range(fov_len), range(num_crops), range(num_slices)): # generate identifier for crop npz_id = 'fov_{}_crop_{}_slice_{}'.format(fov_names[fov], crop, slice) # get working batch labels = y_data[fov, :, crop, slice, ...].values channels = X_data[fov, :, crop, slice, ...].values # determine if labels are blank, and if so what to do with npz if np.sum(labels) == 0: # blank labels get saved to separate folder if blank_labels == 'separate': if verbose: print('{} is blank, saving to separate folder'.format(npz_id)) save_path = os.path.join(save_dir, blank_labels, npz_id) # save images as either npz or xarray if save_format == 'npz': np.savez_compressed(save_path + '.npz', X=channels, y=labels) elif save_format == 'xr': raise NotImplementedError() # blank labels don't get saved, empty area of tissue elif blank_labels == 'skip': if verbose: print('{} is blank, skipping saving'.format(npz_id)) # blank labels get saved along with other crops elif blank_labels == 'include': if verbose: print('{} is blank, saving to folder'.format(npz_id)) save_path = os.path.join(save_dir, npz_id) # save images as either npz or xarray if save_format == 'npz': np.savez_compressed(save_path + '.npz', X=channels, y=labels) elif save_format == 'xr': raise NotImplementedError() else: # crop is not blank, save based on file_format save_path = os.path.join(save_dir, npz_id) # save images as either npz or xarray if save_format == 'npz': np.savez_compressed(save_path + '.npz', X=channels, y=labels) elif save_format == 'xr': raise NotImplementedError() log_data['fov_names'] = fov_names.tolist() log_data['label_name'] = str(y_data.coords[y_data.dims[-1]][0].values) log_data['original_shape'] = original_data.shape log_data['slice_stack_len'] = X_data.shape[1] log_data['save_format'] = save_format log_data['label_dtype'] = str(y_data.dtype) log_path = os.path.join(save_dir, 'log_data.json') with open(log_path, 'w') as write_file: json.dump(log_data, write_file) def get_saved_file_path(dir_list, fov_name, crop, slice, file_ext='.npz'): """Helper function to identify correct file path for an npz file Args: dir_list: list of files in directory fov_name: string of the current fov_name crop: int of current crop slice: int of current slice file_ext: extension file was saved with Returns: string: formatted file name Raises: ValueError: If multiple file path matches were found """ base_string = 'fov_{}_crop_{}_slice_{}'.format(fov_name, crop, slice) string_matches = [string for string in dir_list if base_string + '_save_version' in string] if len(string_matches) == 0: full_string = base_string + file_ext elif len(string_matches) == 1: full_string = string_matches[0] else: raise ValueError('Multiple save versions found: ' 'please select only a single save version. {}'.format(string_matches)) return full_string def load_npzs(crop_dir, log_data, verbose=True): """Reads all of the cropped images from a directory, and aggregates them into a single stack Args: crop_dir: path to directory with cropped npz or xarray files log_data: dictionary of parameters generated during data saving verbose: flag to control print statements Returns: numpy.array: 7D tensor of labeled crops """ fov_names = log_data['fov_names'] fov_len, stack_len, _, _, row_size, col_size, _ = log_data['original_shape'] save_format = log_data['save_format'] label_dtype = log_data['label_dtype'] # if cropped/sliced, get size of dimensions. Otherwise, use size in original data row_crop_size = log_data.get('row_crop_size', row_size) col_crop_size = log_data.get('col_crop_size', col_size) slice_stack_len = log_data.get('slice_stack_len', stack_len) # if cropped/sliced, get number of crops/slices num_crops, num_slices = log_data.get('num_crops', 1), log_data.get('num_slices', 1) stack = np.zeros((fov_len, slice_stack_len, num_crops, num_slices, row_crop_size, col_crop_size, 1), dtype=label_dtype) saved_files = os.listdir(crop_dir) # for each fov, loop over each 2D crop and 3D slice for fov, crop, slice in product(range(fov_len), range(num_crops), range(num_slices)): # load NPZs if save_format == 'npz': npz_path = os.path.join(crop_dir, get_saved_file_path(saved_files, fov_names[fov], crop, slice)) if os.path.exists(npz_path): temp_npz = np.load(npz_path) # determine how labels were named labels_key = 'y' if 'y' in temp_npz else 'annotated' # last slice may be truncated, modify index if slice == num_slices - 1: current_stack_len = temp_npz[labels_key].shape[1] else: current_stack_len = slice_stack_len stack[fov, :current_stack_len, crop, slice, ...] = temp_npz[labels_key] else: # npz not generated, did not contain any labels, keep blank if verbose: print('could not find npz {}, skipping'.format(npz_path)) # load xarray elif save_format == 'xr': raise NotImplementedError() # xr_path = os.path.join(crop_dir, get_saved_file_path(saved_files, fov_names[fov], # crop, slice)) # if os.path.exists(xr_path): # temp_xr = xr.open_dataarray(xr_path) # # # last slice may be truncated, modify index # if slice == num_slices - 1: # current_stack_len = temp_xr.shape[1] # else: # current_stack_len = stack_len # # stack[fov, :current_stack_len, crop, slice, ...] = temp_xr[..., -1:] # else: # # npz not generated, did not contain any labels, keep blank # print('could not find xr {}, skipping'.format(xr_path)) return stack
[ "os.path.exists", "os.listdir", "os.makedirs", "os.path.join", "numpy.sum", "numpy.zeros", "os.path.isdir", "numpy.savez_compressed", "numpy.load", "json.dump" ]
[((5320, 5359), 'os.path.join', 'os.path.join', (['save_dir', '"""log_data.json"""'], {}), "(save_dir, 'log_data.json')\n", (5332, 5359), False, 'import os\n'), ((7507, 7622), 'numpy.zeros', 'np.zeros', (['(fov_len, slice_stack_len, num_crops, num_slices, row_crop_size,\n col_crop_size, 1)'], {'dtype': 'label_dtype'}), '((fov_len, slice_stack_len, num_crops, num_slices, row_crop_size,\n col_crop_size, 1), dtype=label_dtype)\n', (7515, 7622), True, 'import numpy as np\n'), ((7659, 7679), 'os.listdir', 'os.listdir', (['crop_dir'], {}), '(crop_dir)\n', (7669, 7679), False, 'import os\n'), ((2202, 2225), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (2215, 2225), False, 'import os\n'), ((2235, 2256), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (2246, 2256), False, 'import os\n'), ((5412, 5443), 'json.dump', 'json.dump', (['log_data', 'write_file'], {}), '(log_data, write_file)\n', (5421, 5443), False, 'import json\n'), ((2764, 2798), 'os.path.join', 'os.path.join', (['save_dir', '"""separate"""'], {}), "(save_dir, 'separate')\n", (2776, 2798), False, 'import os\n'), ((3293, 3307), 'numpy.sum', 'np.sum', (['labels'], {}), '(labels)\n', (3299, 3307), True, 'import numpy as np\n'), ((4708, 4738), 'os.path.join', 'os.path.join', (['save_dir', 'npz_id'], {}), '(save_dir, npz_id)\n', (4720, 4738), False, 'import os\n'), ((8136, 8160), 'os.path.exists', 'os.path.exists', (['npz_path'], {}), '(npz_path)\n', (8150, 8160), False, 'import os\n'), ((3553, 3597), 'os.path.join', 'os.path.join', (['save_dir', 'blank_labels', 'npz_id'], {}), '(save_dir, blank_labels, npz_id)\n', (3565, 3597), False, 'import os\n'), ((4843, 4904), 'numpy.savez_compressed', 'np.savez_compressed', (["(save_path + '.npz')"], {'X': 'channels', 'y': 'labels'}), "(save_path + '.npz', X=channels, y=labels)\n", (4862, 4904), True, 'import numpy as np\n'), ((8189, 8206), 'numpy.load', 'np.load', (['npz_path'], {}), '(npz_path)\n', (8196, 8206), True, 'import numpy as np\n'), ((3714, 3775), 'numpy.savez_compressed', 'np.savez_compressed', (["(save_path + '.npz')"], {'X': 'channels', 'y': 'labels'}), "(save_path + '.npz', X=channels, y=labels)\n", (3733, 3775), True, 'import numpy as np\n'), ((4310, 4340), 'os.path.join', 'os.path.join', (['save_dir', 'npz_id'], {}), '(save_dir, npz_id)\n', (4322, 4340), False, 'import os\n'), ((4457, 4518), 'numpy.savez_compressed', 'np.savez_compressed', (["(save_path + '.npz')"], {'X': 'channels', 'y': 'labels'}), "(save_path + '.npz', X=channels, y=labels)\n", (4476, 4518), True, 'import numpy as np\n')]
""" Uses generator functions to supply train/test with data. Image renderings and text are created on the fly each time. """ from itertools import groupby from tensorflow.keras.preprocessing.sequence import pad_sequences import handwritten_text_recognition.data.preproc as pp import h5py import numpy as np import unicodedata class DataGenerator(): """Generator class with data streaming""" def __init__(self, source, batch_size, charset, max_text_length, predict=False): self.tokenizer = Tokenizer(charset, max_text_length) self.batch_size = batch_size self.partitions = ['test'] if predict else ['train', 'valid', 'test'] self.size = dict() self.steps = dict() self.index = dict() self.dataset = dict() with h5py.File(source, "r") as f: for pt in self.partitions: self.dataset[pt] = dict() self.dataset[pt]['dt'] = f[pt]['dt'][:] self.dataset[pt]['gt'] = f[pt]['gt'][:] for pt in self.partitions: # decode sentences from byte self.dataset[pt]['gt'] = [x.decode() for x in self.dataset[pt]['gt']] # set size and setps self.size[pt] = len(self.dataset[pt]['gt']) self.steps[pt] = int(np.ceil(self.size[pt] / self.batch_size)) self.index[pt] = 0 def next_train_batch(self): """Get the next batch from train partition (yield)""" while True: if self.index['train'] >= self.size['train']: self.index['train'] = 0 index = self.index['train'] until = self.index['train'] + self.batch_size self.index['train'] = until x_train = self.dataset['train']['dt'][index:until] y_train = self.dataset['train']['gt'][index:until] x_train = pp.augmentation(x_train, rotation_range=1.5, scale_range=0.05, height_shift_range=0.025, width_shift_range=0.05, erode_range=5, dilate_range=3) x_train = pp.normalization(x_train) y_train = [self.tokenizer.encode(y) for y in y_train] y_train = pad_sequences(y_train, maxlen=self.tokenizer.maxlen, padding="post") yield (x_train, y_train, []) def next_valid_batch(self): """Get the next batch from validation partition (yield)""" while True: if self.index['valid'] >= self.size['valid']: self.index['valid'] = 0 index = self.index['valid'] until = self.index['valid'] + self.batch_size self.index['valid'] = until x_valid = self.dataset['valid']['dt'][index:until] y_valid = self.dataset['valid']['gt'][index:until] x_valid = pp.normalization(x_valid) y_valid = [self.tokenizer.encode(y) for y in y_valid] y_valid = pad_sequences(y_valid, maxlen=self.tokenizer.maxlen, padding="post") yield (x_valid, y_valid, []) def next_test_batch(self): """Return model predict parameters""" while True: if self.index['test'] >= self.size['test']: self.index['test'] = 0 break index = self.index['test'] until = self.index['test'] + self.batch_size self.index['test'] = until x_test = self.dataset['test']['dt'][index:until] x_test = pp.normalization(x_test) yield x_test class Tokenizer(): """Manager tokens functions and charset/dictionary properties""" def __init__(self, chars, max_text_length=128): self.PAD_TK, self.UNK_TK = "¶", "¤" self.chars = (self.PAD_TK + self.UNK_TK + chars) self.PAD = self.chars.find(self.PAD_TK) self.UNK = self.chars.find(self.UNK_TK) self.vocab_size = len(self.chars) self.maxlen = max_text_length def encode(self, text): """Encode text to vector""" text = unicodedata.normalize("NFKD", text).encode("ASCII", "ignore").decode("ASCII") text = " ".join(text.split()) groups = ["".join(group) for _, group in groupby(text)] text = "".join([self.UNK_TK.join(list(x)) if len(x) > 1 else x for x in groups]) encoded = [] for item in text: index = self.chars.find(item) index = self.UNK if index == -1 else index encoded.append(index) return np.asarray(encoded) def decode(self, text): """Decode vector to text""" decoded = "".join([self.chars[int(x)] for x in text if x > -1]) decoded = self.remove_tokens(decoded) decoded = pp.text_standardize(decoded) return decoded def remove_tokens(self, text): """Remove tokens (PAD) from text""" return text.replace(self.PAD_TK, "").replace(self.UNK_TK, "")
[ "handwritten_text_recognition.data.preproc.text_standardize", "numpy.ceil", "itertools.groupby", "tensorflow.keras.preprocessing.sequence.pad_sequences", "numpy.asarray", "h5py.File", "unicodedata.normalize", "handwritten_text_recognition.data.preproc.normalization", "handwritten_text_recognition.da...
[((4678, 4697), 'numpy.asarray', 'np.asarray', (['encoded'], {}), '(encoded)\n', (4688, 4697), True, 'import numpy as np\n'), ((4900, 4928), 'handwritten_text_recognition.data.preproc.text_standardize', 'pp.text_standardize', (['decoded'], {}), '(decoded)\n', (4919, 4928), True, 'import handwritten_text_recognition.data.preproc as pp\n'), ((789, 811), 'h5py.File', 'h5py.File', (['source', '"""r"""'], {}), "(source, 'r')\n", (798, 811), False, 'import h5py\n'), ((1869, 2020), 'handwritten_text_recognition.data.preproc.augmentation', 'pp.augmentation', (['x_train'], {'rotation_range': '(1.5)', 'scale_range': '(0.05)', 'height_shift_range': '(0.025)', 'width_shift_range': '(0.05)', 'erode_range': '(5)', 'dilate_range': '(3)'}), '(x_train, rotation_range=1.5, scale_range=0.05,\n height_shift_range=0.025, width_shift_range=0.05, erode_range=5,\n dilate_range=3)\n', (1884, 2020), True, 'import handwritten_text_recognition.data.preproc as pp\n'), ((2264, 2289), 'handwritten_text_recognition.data.preproc.normalization', 'pp.normalization', (['x_train'], {}), '(x_train)\n', (2280, 2289), True, 'import handwritten_text_recognition.data.preproc as pp\n'), ((2379, 2447), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['y_train'], {'maxlen': 'self.tokenizer.maxlen', 'padding': '"""post"""'}), "(y_train, maxlen=self.tokenizer.maxlen, padding='post')\n", (2392, 2447), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((2998, 3023), 'handwritten_text_recognition.data.preproc.normalization', 'pp.normalization', (['x_valid'], {}), '(x_valid)\n', (3014, 3023), True, 'import handwritten_text_recognition.data.preproc as pp\n'), ((3113, 3181), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['y_valid'], {'maxlen': 'self.tokenizer.maxlen', 'padding': '"""post"""'}), "(y_valid, maxlen=self.tokenizer.maxlen, padding='post')\n", (3126, 3181), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((3659, 3683), 'handwritten_text_recognition.data.preproc.normalization', 'pp.normalization', (['x_test'], {}), '(x_test)\n', (3675, 3683), True, 'import handwritten_text_recognition.data.preproc as pp\n'), ((1293, 1333), 'numpy.ceil', 'np.ceil', (['(self.size[pt] / self.batch_size)'], {}), '(self.size[pt] / self.batch_size)\n', (1300, 1333), True, 'import numpy as np\n'), ((4379, 4392), 'itertools.groupby', 'groupby', (['text'], {}), '(text)\n', (4386, 4392), False, 'from itertools import groupby\n'), ((4213, 4248), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFKD"""', 'text'], {}), "('NFKD', text)\n", (4234, 4248), False, 'import unicodedata\n')]
import numpy as np import infotheory class bcolors: HEADER = "\033[95m" OKBLUE = "\033[94m" OKGREEN = "\033[92m" TEST_HEADER = "\033[93m" FAIL = "\033[91m" ENDC = "\033[0m" BOLD = "\033[1m" UNDERLINE = "\033[4m" SUCCESS = bcolors.OKGREEN + "SUCCESS" + bcolors.ENDC FAILED = bcolors.FAIL + "FAILED" + bcolors.ENDC def _except(e): print("\n" + FAILED) print(e) exit(1) def do_matching(base_str, result, target, name, decimals=5): result = np.round(result, decimals=decimals) target = np.round(target, decimals=decimals) if result == target: print(base_str, name, result, target, SUCCESS) else: raise Exception( "{} not equal to expected value. Expected = {}, Actual = {}".format( name, target, result ) ) def decomposition_equivalence_4D(dims, nreps, nbins, data_ranges, data): try: # creating the object and adding data it_par = infotheory.InfoTools(dims, nreps) it_par.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1]) it_par.add_data(data) # PID-ing total_mi = it_par.mutual_info([1, 1, 1, 0]) redundant_info = it_par.redundant_info([1, 2, 3, 0]) unique_1 = it_par.unique_info([1, 2, 3, 0]) unique_2 = it_par.unique_info([2, 1, 3, 0]) unique_3 = it_par.unique_info([2, 3, 1, 0]) synergy = it_par.synergy([1, 2, 3, 0]) targets = [total_mi, redundant_info, unique_1, unique_2, unique_3, synergy] # Alternate PID-ing total_mi = it_par.mutual_info([1, 1, 1, 0]) redundant_info = it_par.redundant_info([2, 1, 3, 0]) unique_1 = it_par.unique_info([1, 3, 2, 0]) unique_2 = it_par.unique_info([3, 1, 2, 0]) unique_3 = it_par.unique_info([3, 2, 1, 0]) synergy = it_par.synergy([2, 1, 3, 0]) base_str = "Decomposition equivalence | " do_matching(base_str, total_mi, targets[0], "Total MI") do_matching(base_str, redundant_info, targets[1], "Redundant info | ") do_matching(base_str, unique_1, targets[2], "Unique source 1 info | ") do_matching(base_str, unique_2, targets[3], "Unique source 2 info | ") do_matching(base_str, unique_3, targets[4], "Unique source 3 info | ") do_matching(base_str, synergy, targets[5], "Synergistic info | ") except Exception as e: _except(e) def decomposition_test_4D(dims, nreps, nbins, data_ranges, data, targets): """ testing if 4D PID matches expected values """ try: # creating the object and adding data it_par = infotheory.InfoTools(dims, nreps) it_par.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1]) it_par.add_data(data) # PID-ing total_mi = it_par.mutual_info([1, 1, 1, 0]) redundant_info = it_par.redundant_info([1, 2, 3, 0]) unique_1 = it_par.unique_info([1, 2, 3, 0]) unique_2 = it_par.unique_info([2, 1, 3, 0]) unique_3 = it_par.unique_info([2, 3, 1, 0]) synergy = it_par.synergy([1, 2, 3, 0]) results = [total_mi, redundant_info, unique_1, unique_2, unique_3, synergy] base_str = "Decomposition test | " do_matching(base_str, total_mi, targets[0], "Total MI") do_matching(base_str, redundant_info, targets[1], "Redundant info | ") do_matching(base_str, unique_1, targets[2], "Unique source 1 info | ") do_matching(base_str, unique_2, targets[3], "Unique source 2 info | ") do_matching(base_str, unique_3, targets[4], "Unique source 3 info | ") do_matching(base_str, synergy, targets[5], "Synergistic info | ") except Exception as e: _except(e) def pid_test_3D(dims, nreps, nbins, data_ranges, data): """ testing sum of pid == total_mi """ try: # creating the object it = infotheory.InfoTools(dims, nreps) it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1]) # adding points it.add_data(data) # estimating mutual information mi = it.mutual_info([1, 1, 0]) redundant_info = it.redundant_info([1, 2, 0]) unique_1 = it.unique_info([1, 2, 0]) unique_2 = it.unique_info([2, 1, 0]) synergy = it.synergy([1, 2, 0]) # total_pid total_pid = np.sum( np.round([redundant_info, unique_1, unique_2, synergy], decimals=6) ) # mi total_mi = np.round(mi, decimals=6) if (total_pid - total_mi) < 1e-5: print(total_pid, total_mi, SUCCESS) else: raise Exception( "Total PID does not equal MI: total_mi = {}; total_pid = {}".format( total_pid, total_mi ) ) except Exception as e: _except(e) def decomposition_equivalence_3D(dims, nreps, nbins, data_ranges, data): try: # creating the object it = infotheory.InfoTools(dims, nreps) it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1]) # adding points it.add_data(data) # estimating mutual information redundant_info_1 = it.redundant_info([1, 2, 0]) synergy_1 = it.synergy([1, 2, 0]) redundant_info_2 = it.redundant_info([2, 1, 0]) synergy_2 = it.synergy([2, 1, 0]) base_str = "Decomposition equivalence | " do_matching(base_str, redundant_info_1, redundant_info_2, "Redundant info | ") do_matching(base_str, synergy_1, synergy_2, "Synergy | ") except Exception as e: _except(e) def decomposition_test_3D(dims, nreps, nbins, data_ranges, data, results): try: # creating the object it = infotheory.InfoTools(dims, nreps) it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1]) # adding points it.add_data(data) # estimating mutual information redundant_info = it.redundant_info([1, 2, 0]) unique_1 = it.unique_info([1, 2, 0]) unique_2 = it.unique_info([2, 1, 0]) synergy = it.synergy([1, 2, 0]) if all( np.round([redundant_info, unique_1, unique_2, synergy], decimals=2) == results ): print(synergy, SUCCESS) else: raise Exception("PID computation error") except Exception as e: _except(e) def uniform_random_mi_test(dims, nreps, nbins, data_ranges, num_samples=1000): print( "Testing mutual info with uniform random variables. MI = ", end="", flush=True ) try: # creating the object it = infotheory.InfoTools(dims, nreps) it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1]) # adding points it.add_data(np.random.rand(num_samples, dims)) # ...alternatively, # for _ in range(num_samples): # it.add_data_point(np.random.rand(dims)) # estimating mutual information mi = it.mutual_info([0, 1]) / ((1 / dims) * np.log2(np.prod(nbins))) print(mi, SUCCESS) except Exception as e: print(e) _except(e) def identical_random_mi_test( dims, nreps, nbins, data_ranges, add_noise=False, num_samples=1000 ): print("Testing mutual info with identical random variables", end="", flush=True) if add_noise: print(" with noise. MI = ", end="", flush=True) else: print(". MI = ", end="", flush=True) try: # creating the object if dims % 2 != 0: dims += 1 it = infotheory.InfoTools(dims, nreps) it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1]) p_dims = int(dims / 2) # adding points for _ in range(num_samples): point1 = np.random.rand(p_dims) if add_noise: point2 = point1 + (np.random.rand(p_dims) / 30) else: point2 = point1 it.add_data_point(np.concatenate((point1, point2))) # computing mutual information mi = it.mutual_info([0, 1]) / ((1 / dims) * np.log2(np.prod(nbins))) print(mi, SUCCESS) except Exception as e: _except(e) def entropy_test(dims, nreps, nbins, data_ranges, data_sampler, num_samples=1000): try: # creating the object it = infotheory.InfoTools(dims, nreps) it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1]) # adding points for _ in range(num_samples): it.add_data_point([data_sampler()]) # estimate entropy print(it.entropy([0]), SUCCESS) except Exception as e: _except(e) def test_pid_4D(): """ Testing 3D PI-decomposition 1. sanity for each PI measure 2. known PIDs for even parity """ print("\n" + bcolors.TEST_HEADER + "PID-4D" + bcolors.ENDC) ## Testing PID by value dims = 4 nreps = 0 nbins = [2] * dims data_ranges = [[0] * dims, [1] * dims] # Even parity check data = [ [0, 0, 0, 0], [0, 0, 1, 1], [0, 1, 0, 1], [0, 1, 1, 0], [1, 0, 0, 1], [1, 0, 1, 0], [1, 1, 0, 0], [1, 1, 1, 1], ] targets = [1.0, 0.0, 0.0, 0.0, 0.0, 1.0] print("Testing PID with even parity checker") decomposition_test_4D(dims, nreps, nbins, data_ranges, data, targets) # random data print("Testing PID with uniform random data") dims = 4 neps = 0 nbins = [50] * dims data_ranges = [[0] * dims, [1] * dims] data = np.random.rand(5000, dims) decomposition_equivalence_4D(dims, nreps, nbins, data_ranges, data) def test_pid_3D(): """ Testing 1. sum(PID) == mi 2. known PIDs for logic gates 3. synergy([0,1,2]) == synergy([0,2,1])? """ print("\n" + bcolors.TEST_HEADER + "PID-3D" + bcolors.ENDC) ## Testing PID by value dims = 3 neps = 0 nbins = [2] * dims data_ranges = [[0] * dims, [1] * dims] # AND gate data = [[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]] print("Testing total PID with total mi | AND gate = ", end="", flush=True) pid_test_3D(dims, nreps, nbins, data_ranges, data) # XOR gate data = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]] print("Testing total PID with total mi | XOR gate = ", end="", flush=True) pid_test_3D(dims, nreps, nbins, data_ranges, data) # random data dims = 3 neps = 0 nbins = [50] * 3 data_ranges = [[0] * 3, [1] * 3] data = np.random.rand(500, dims) print("Testing total PID with total mi | random data = ", end="", flush=True) pid_test_3D(dims, nreps, nbins, data_ranges, data) ## Testing PI decomposition dims = 3 neps = 0 nbins = [2] * 3 data_ranges = [[0] * 3, [1] * 3] # AND gate data = [[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]] print("Testing decomposition with AND gate = ", end="", flush=True) decomposition_test_3D(dims, nreps, nbins, data_ranges, data, [0.31, 0.0, 0.0, 0.5]) # XOR gate data = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]] print("Testing decomposition with XOR gate = ", end="", flush=True) decomposition_test_3D(dims, nreps, nbins, data_ranges, data, [0.0, 0.0, 0.0, 1.0]) ## Testing decomposition equivalence dims = 3 neps = 0 nbins = [2] * 3 data_ranges = [[0] * 3, [1] * 3] # AND gate data = [[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]] print("Testing redundant and synergistic equivalence | AND gate") decomposition_equivalence_3D(dims, nreps, nbins, data_ranges, data) # XOR gate data = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]] print("Testing redundant and synergistic equivalence | XOR gate") decomposition_equivalence_3D(dims, nreps, nbins, data_ranges, data) # random data dims = 3 neps = 0 nbins = [50] * 3 data_ranges = [[0] * 3, [1] * 3] data = np.random.rand(500, dims) print("Testing redundant and synergistic equivalence | random data") decomposition_equivalence_3D(dims, nreps, nbins, data_ranges, data) def test_mutual_info(dims, nreps, nbins, data_ranges): """ Testing mutual information under three conditions 1. two uniform random variables (low MI) 2. two identical random variables (high MI) 3. one ranom variable and a noisy version of the same (medium MI) """ print("\n" + bcolors.TEST_HEADER + "MUTUAL INFORMATION" + bcolors.ENDC) uniform_random_mi_test(dims, nreps, nbins, data_ranges) identical_random_mi_test(dims, nreps, nbins, data_ranges, add_noise=False) identical_random_mi_test(dims, nreps, nbins, data_ranges, add_noise=True) def test_entropy(dims, nreps, nbins, data_ranges): """ Testing entropy under two conditions 1. A uniform random variable (high entropy) 2. A gaussian with low std. dev. (low entropy) """ print("\n" + bcolors.TEST_HEADER + "ENTROPY" + bcolors.ENDC) print("Testing entropy with uniform distribution = ", end="", flush=True) entropy_test(dims, nreps, nbins, data_ranges, lambda: np.random.uniform()) print("Testing entropy with normal distribution = ", end="", flush=True) entropy_test( dims, nreps, nbins, data_ranges, lambda: np.random.normal(loc=0.5, scale=0.01) ) def test_binning(dims, nreps, nbins, data_ranges): """ Test execution of both types of binning 1. Equal interval 2. Manual specification """ print("\n" + bcolors.TEST_HEADER + "BINNING" + bcolors.ENDC) mi_eq = mi_mb = None # resetting for this test dims = 2 # generating a commong set of datapoints datapoints = [] for _ in range(1000): point1 = np.random.rand() point2 = point1 + (np.random.rand() / 30) datapoints.append([point1, point2]) # Equal interval binning try: print("Estimating MI using equal interval binning = ", end="", flush=True) it = infotheory.InfoTools(dims, nreps) # set bin boundaries it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1]) # adding points it.add_data(datapoints) # computing mutual information mi_eq = it.mutual_info([0, 1]) print(mi_eq, SUCCESS) except Exception as e: _except(e) # Manual binning try: print("Estimating MI using manually specified binning = ", end="", flush=True) it = infotheory.InfoTools(dims, nreps) # set bin boundaries it.set_bin_boundaries([[0.3333, 0.6666], [0.3333, 0.6666]]) # adding points it.add_data(datapoints) # computing mutual information mi_mb = it.mutual_info([0, 1]) print(mi_mb, SUCCESS) except Exception as e: _except(e) # mi_eq == mi_mb? print( "Tested both binning methods. Difference in result = {}".format(mi_eq - mi_mb), SUCCESS, ) def test_creation(dims, nreps, nbins, data_ranges): print("Testing creating an object. ", end="", flush=True) try: # creating object it = infotheory.InfoTools(dims, nreps) it.set_equal_interval_binning(nbins, data_ranges[0], data_ranges[1]) print(bcolors.OKGREEN + "SUCCESS" + bcolors.ENDC) except Exception as e: _except(e) def run_tests(dims, nreps, nbins, data_ranges): """ runs all tests """ print(bcolors.HEADER + "************ Starting tests ************" + bcolors.ENDC) test_creation(dims, nreps, nbins, data_ranges) test_binning(dims, nreps, [3, 3], data_ranges) test_entropy(1, nreps, [50], [[0], [1]]) test_mutual_info(dims, nreps, nbins, data_ranges) test_pid_3D() test_pid_4D() print( "\n" + bcolors.HEADER + "************ Tests completed ************" + bcolors.ENDC ) def manual_test(m, n): it = infotheory.InfoTools(2, 1, [2, 2], [0, 0], [1, 1]) it.add_data([[0, 0]] * m + [[1, 1]] * n) print("m = ", m, " n = ", n, " MI = ", it.mutual_info([0, 1])) if __name__ == "__main__": dims = 2 nreps = 0 nbins = [50] * dims data_ranges = [[0] * dims, [1] * dims] # for m,n in zip([1,2,2,3,500,499,200],[1,1,2,2,500,500,500]): # manual_test(m,n) run_tests(dims, nreps, nbins, data_ranges)
[ "numpy.random.normal", "numpy.prod", "numpy.random.rand", "numpy.concatenate", "numpy.random.uniform", "infotheory.InfoTools", "numpy.round" ]
[((493, 528), 'numpy.round', 'np.round', (['result'], {'decimals': 'decimals'}), '(result, decimals=decimals)\n', (501, 528), True, 'import numpy as np\n'), ((542, 577), 'numpy.round', 'np.round', (['target'], {'decimals': 'decimals'}), '(target, decimals=decimals)\n', (550, 577), True, 'import numpy as np\n'), ((9607, 9633), 'numpy.random.rand', 'np.random.rand', (['(5000)', 'dims'], {}), '(5000, dims)\n', (9621, 9633), True, 'import numpy as np\n'), ((10563, 10588), 'numpy.random.rand', 'np.random.rand', (['(500)', 'dims'], {}), '(500, dims)\n', (10577, 10588), True, 'import numpy as np\n'), ((11971, 11996), 'numpy.random.rand', 'np.random.rand', (['(500)', 'dims'], {}), '(500, dims)\n', (11985, 11996), True, 'import numpy as np\n'), ((15900, 15950), 'infotheory.InfoTools', 'infotheory.InfoTools', (['(2)', '(1)', '[2, 2]', '[0, 0]', '[1, 1]'], {}), '(2, 1, [2, 2], [0, 0], [1, 1])\n', (15920, 15950), False, 'import infotheory\n'), ((982, 1015), 'infotheory.InfoTools', 'infotheory.InfoTools', (['dims', 'nreps'], {}), '(dims, nreps)\n', (1002, 1015), False, 'import infotheory\n'), ((2646, 2679), 'infotheory.InfoTools', 'infotheory.InfoTools', (['dims', 'nreps'], {}), '(dims, nreps)\n', (2666, 2679), False, 'import infotheory\n'), ((3908, 3941), 'infotheory.InfoTools', 'infotheory.InfoTools', (['dims', 'nreps'], {}), '(dims, nreps)\n', (3928, 3941), False, 'import infotheory\n'), ((4503, 4527), 'numpy.round', 'np.round', (['mi'], {'decimals': '(6)'}), '(mi, decimals=6)\n', (4511, 4527), True, 'import numpy as np\n'), ((4992, 5025), 'infotheory.InfoTools', 'infotheory.InfoTools', (['dims', 'nreps'], {}), '(dims, nreps)\n', (5012, 5025), False, 'import infotheory\n'), ((5767, 5800), 'infotheory.InfoTools', 'infotheory.InfoTools', (['dims', 'nreps'], {}), '(dims, nreps)\n', (5787, 5800), False, 'import infotheory\n'), ((6668, 6701), 'infotheory.InfoTools', 'infotheory.InfoTools', (['dims', 'nreps'], {}), '(dims, nreps)\n', (6688, 6701), False, 'import infotheory\n'), ((7609, 7642), 'infotheory.InfoTools', 'infotheory.InfoTools', (['dims', 'nreps'], {}), '(dims, nreps)\n', (7629, 7642), False, 'import infotheory\n'), ((8389, 8422), 'infotheory.InfoTools', 'infotheory.InfoTools', (['dims', 'nreps'], {}), '(dims, nreps)\n', (8409, 8422), False, 'import infotheory\n'), ((13736, 13752), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (13750, 13752), True, 'import numpy as np\n'), ((13982, 14015), 'infotheory.InfoTools', 'infotheory.InfoTools', (['dims', 'nreps'], {}), '(dims, nreps)\n', (14002, 14015), False, 'import infotheory\n'), ((14466, 14499), 'infotheory.InfoTools', 'infotheory.InfoTools', (['dims', 'nreps'], {}), '(dims, nreps)\n', (14486, 14499), False, 'import infotheory\n'), ((15119, 15152), 'infotheory.InfoTools', 'infotheory.InfoTools', (['dims', 'nreps'], {}), '(dims, nreps)\n', (15139, 15152), False, 'import infotheory\n'), ((4393, 4460), 'numpy.round', 'np.round', (['[redundant_info, unique_1, unique_2, synergy]'], {'decimals': '(6)'}), '([redundant_info, unique_1, unique_2, synergy], decimals=6)\n', (4401, 4460), True, 'import numpy as np\n'), ((6824, 6857), 'numpy.random.rand', 'np.random.rand', (['num_samples', 'dims'], {}), '(num_samples, dims)\n', (6838, 6857), True, 'import numpy as np\n'), ((7834, 7856), 'numpy.random.rand', 'np.random.rand', (['p_dims'], {}), '(p_dims)\n', (7848, 7856), True, 'import numpy as np\n'), ((13127, 13146), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (13144, 13146), True, 'import numpy as np\n'), ((13292, 13329), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.5)', 'scale': '(0.01)'}), '(loc=0.5, scale=0.01)\n', (13308, 13329), True, 'import numpy as np\n'), ((6180, 6247), 'numpy.round', 'np.round', (['[redundant_info, unique_1, unique_2, synergy]'], {'decimals': '(2)'}), '([redundant_info, unique_1, unique_2, synergy], decimals=2)\n', (6188, 6247), True, 'import numpy as np\n'), ((8027, 8059), 'numpy.concatenate', 'np.concatenate', (['(point1, point2)'], {}), '((point1, point2))\n', (8041, 8059), True, 'import numpy as np\n'), ((13780, 13796), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (13794, 13796), True, 'import numpy as np\n'), ((7080, 7094), 'numpy.prod', 'np.prod', (['nbins'], {}), '(nbins)\n', (7087, 7094), True, 'import numpy as np\n'), ((8161, 8175), 'numpy.prod', 'np.prod', (['nbins'], {}), '(nbins)\n', (8168, 8175), True, 'import numpy as np\n'), ((7918, 7940), 'numpy.random.rand', 'np.random.rand', (['p_dims'], {}), '(p_dims)\n', (7932, 7940), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- """ @author: clausmichele """ import time import tensorflow as tf import cv2 import numpy as np from tqdm import tqdm def SpatialCNN(input, is_training=False, output_channels=3, reuse=tf.AUTO_REUSE): with tf.variable_scope('block1',reuse=reuse): output = tf.layers.conv2d(input, 128, 3, padding='same', activation=tf.nn.relu) for layers in range(2, 20): with tf.variable_scope('block%d' % layers,reuse=reuse): output = tf.layers.conv2d(output, 64, 3, padding='same', name='conv%d' % layers, use_bias=False) output = tf.nn.relu(tf.layers.batch_normalization(output, training=is_training)) with tf.variable_scope('block20', reuse=reuse): output = tf.layers.conv2d(output, output_channels, 3, padding='same', use_bias=False) return input - output def Temp3CNN(input, is_training=False, output_channels=3, reuse=tf.AUTO_REUSE): input_middle = input[:,:,:,3:6] with tf.variable_scope('temp-block1',reuse=reuse): output = tf.layers.conv2d(input, 128, 3, padding='same', activation=tf.nn.leaky_relu) for layers in range(2, 20): with tf.variable_scope('temp-block%d' % layers,reuse=reuse): output = tf.layers.conv2d(output, 64, 3, padding='same', name='conv%d' % layers, use_bias=False) output = tf.nn.leaky_relu(output) with tf.variable_scope('temp-block20', reuse=reuse): output = tf.layers.conv2d(output, output_channels, 3, padding='same', use_bias=False) return input_middle - output class ViDeNN(object): def __init__(self, sess): self.sess = sess # build model self.Y_ = tf.placeholder(tf.float32, [None, None, None, 3],name='clean_image') self.X = tf.placeholder(tf.float32, [None, None, None, 3],name='noisy_image') self.Y = SpatialCNN(self.X) self.Y_frames = tf.placeholder(tf.float32, [None, None, None, 9],name='clean_frames') self.Xframes = tf.placeholder(tf.float32, [None, None, None, 9],name='noisy_frames') self.Yframes = Temp3CNN(self.Xframes) init = tf.global_variables_initializer() self.sess.run(init) print("[*] Initialize model successfully...") def denoise(self, eval_files, eval_files_noisy, print_psnr, ckpt_dir, save_dir): # init variables tf.global_variables_initializer().run() assert len(eval_files) != 0, '[!] No testing data!' if ckpt_dir is None: full_path = tf.train.latest_checkpoint('./Temp3-CNN/ckpt') if(full_path is None): print('[!] No Temp3-CNN checkpoint!') quit() vars_to_restore_temp3CNN = {} for i in range(len(tf.global_variables())): if tf.global_variables()[i].name[0] != 'b': a = tf.global_variables()[i].name.split(':')[0] vars_to_restore_temp3CNN[a] = tf.global_variables()[i] saver_t = tf.train.Saver(var_list=vars_to_restore_temp3CNN) saver_t.restore(self.sess, full_path) full_path = tf.train.latest_checkpoint('./Spatial-CNN/ckpt_awgn') if(full_path is None): print('[!] No Spatial-CNN checkpoint!') quit() vars_to_restore_spatialCNN = {} for i in range(len(tf.global_variables())): if tf.global_variables()[i].name[0] != 't': a = tf.global_variables()[i].name.split(':')[0] vars_to_restore_spatialCNN[a] = tf.global_variables()[i] saver_s = tf.train.Saver(var_list=vars_to_restore_spatialCNN) saver_s.restore(self.sess, full_path) else: load_model_status, _ = self.load(ckpt_dir) print("[*] Model restore successfully!") # psnr_sum = 0 start = time.time() for idx in tqdm(range(len(eval_files)-1)): if idx==0: test = cv2.imread(eval_files[idx]) test1 = cv2.imread(eval_files[idx+1]) test2 = cv2.imread(eval_files[idx+2]) noisy = cv2.imread(eval_files_noisy[idx]) noisy1 = cv2.imread(eval_files_noisy[idx+1]) noisy2 = cv2.imread(eval_files_noisy[idx+2]) test = test.astype(np.float32) / 255.0 test1 = test1.astype(np.float32) / 255.0 test2 = test2.astype(np.float32) / 255.0 noisy = noisy.astype(np.float32) / 255.0 noisy1 = noisy1.astype(np.float32) / 255.0 noisy2 = noisy2.astype(np.float32) / 255.0 noisyin2 = np.zeros((1,test.shape[0],test.shape[1],9)) current = np.zeros((test.shape[0],test.shape[1],3)) previous = np.zeros((test.shape[0],test.shape[1],3)) noisyin = np.zeros((3,test.shape[0],test.shape[1],3)) noisyin[0] = noisy noisyin[1] = noisy1 noisyin[2] = noisy2 out = self.sess.run([self.Y],feed_dict={self.X:noisyin}) out = np.asarray(out) noisyin2[0,:,:,0:3] = out[0,0] noisyin2[0,:,:,3:6] = out[0,0] noisyin2[0,:,:,6:] = out[0,1] temp_clean_image= self.sess.run([self.Yframes],feed_dict={self.Xframes:noisyin2}) temp_clean_image = np.asarray(temp_clean_image) cv2.imwrite(save_dir + '/%04d.png'%idx,temp_clean_image[0,0]*255) psnr = psnr_scaled(test,temp_clean_image[0,0]) psnr1 = psnr_scaled(test,out[0,0]) psnr_sum += psnr if print_psnr: print(" frame %d denoised, PSNR: %.2f" % (idx, psnr)) else: print(" frame %d denoised" % (idx)) noisyin2[0,:,:,0:3] = out[0,0] noisyin2[0,:,:,3:6] = out[0,1] noisyin2[0,:,:,6:] = out[0,2] current[:,:,:] = out[0,2,:,:,:] previous[:,:,:] = out[0,1,:,:,:] else: if idx<(len(eval_files)-2): test3 = cv2.imread(eval_files[idx+2]) test3 = test3.astype(np.float32) / 255.0 noisy3 = cv2.imread(eval_files_noisy[idx+2]) noisy3 = noisy3.astype(np.float32) / 255.0 out2 = self.sess.run([self.Y],feed_dict={self.X:np.expand_dims(noisy3,0)}) out2 = np.asarray(out2) noisyin2[0,:,:,0:3] = previous noisyin2[0,:,:,3:6] = current noisyin2[0,:,:,6:] = out2[0,0] previous = current current = out2[0,0] else: try: out2 except NameError: out2 = np.zeros((out.shape)) out2=out out2[0,0]=out[0,2] noisyin2[0,:,:,0:3] = current noisyin2[0,:,:,3:6] = out2[0,0] noisyin2[0,:,:,6:] = out2[0,0] temp_clean_image= self.sess.run([self.Yframes],feed_dict={self.Xframes:noisyin2}) temp_clean_image = np.asarray(temp_clean_image) cv2.imwrite(save_dir+ '/%04d.png'%(idx+1),temp_clean_image[0,0]*255) # calculate PSNR if idx==0: psnr1 = psnr_scaled(test1,out[0,1]) psnr = psnr_scaled(test1, temp_clean_image[0,0]) else: psnr1 = psnr_scaled(test2,previous) psnr = psnr_scaled(test2, temp_clean_image[0,0]) try: test3 except NameError: test3=test2 test2=test3 if print_psnr: print(" frame %d denoised, PSNR: %.2f" % (idx+1, psnr)) else: print(" frame %d denoised" % (idx+1)) psnr_sum += psnr avg_psnr = psnr_sum / len(eval_files) if print_psnr: print("--- Average PSNR %.2f ---" % avg_psnr) print("--- Elapsed time: %.4fs" %(time.time()-start)) def load(self, checkpoint_dir): print("[*] Reading checkpoint...") saver = tf.train.Saver() ckpt = tf.train.get_checkpoint_state(checkpoint_dir) if ckpt and ckpt.model_checkpoint_path: full_path = tf.train.latest_checkpoint(checkpoint_dir) global_step = int(full_path.split('/')[-1].split('-')[-1]) saver.restore(self.sess, full_path) return True, global_step else: return False, 0 def psnr_scaled(im1, im2): # PSNR function for 0-1 values mse = ((im1 - im2) ** 2).mean() mse = mse * (255 ** 2) psnr = 10 * np.log10(255 **2 / mse) return psnr
[ "cv2.imwrite", "numpy.log10", "tensorflow.variable_scope", "tensorflow.placeholder", "tensorflow.train.Saver", "numpy.asarray", "tensorflow.nn.leaky_relu", "tensorflow.global_variables", "tensorflow.global_variables_initializer", "tensorflow.layers.conv2d", "tensorflow.train.get_checkpoint_state...
[((232, 272), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""block1"""'], {'reuse': 'reuse'}), "('block1', reuse=reuse)\n", (249, 272), True, 'import tensorflow as tf\n'), ((284, 354), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['input', '(128)', '(3)'], {'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(input, 128, 3, padding='same', activation=tf.nn.relu)\n", (300, 354), True, 'import tensorflow as tf\n'), ((632, 673), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""block20"""'], {'reuse': 'reuse'}), "('block20', reuse=reuse)\n", (649, 673), True, 'import tensorflow as tf\n'), ((686, 762), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['output', 'output_channels', '(3)'], {'padding': '"""same"""', 'use_bias': '(False)'}), "(output, output_channels, 3, padding='same', use_bias=False)\n", (702, 762), True, 'import tensorflow as tf\n'), ((906, 951), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""temp-block1"""'], {'reuse': 'reuse'}), "('temp-block1', reuse=reuse)\n", (923, 951), True, 'import tensorflow as tf\n'), ((963, 1039), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['input', '(128)', '(3)'], {'padding': '"""same"""', 'activation': 'tf.nn.leaky_relu'}), "(input, 128, 3, padding='same', activation=tf.nn.leaky_relu)\n", (979, 1039), True, 'import tensorflow as tf\n'), ((1275, 1321), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""temp-block20"""'], {'reuse': 'reuse'}), "('temp-block20', reuse=reuse)\n", (1292, 1321), True, 'import tensorflow as tf\n'), ((1334, 1410), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['output', 'output_channels', '(3)'], {'padding': '"""same"""', 'use_bias': '(False)'}), "(output, output_channels, 3, padding='same', use_bias=False)\n", (1350, 1410), True, 'import tensorflow as tf\n'), ((1538, 1607), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, None, 3]'], {'name': '"""clean_image"""'}), "(tf.float32, [None, None, None, 3], name='clean_image')\n", (1552, 1607), True, 'import tensorflow as tf\n'), ((1618, 1687), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, None, 3]'], {'name': '"""noisy_image"""'}), "(tf.float32, [None, None, None, 3], name='noisy_image')\n", (1632, 1687), True, 'import tensorflow as tf\n'), ((1735, 1805), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, None, 9]'], {'name': '"""clean_frames"""'}), "(tf.float32, [None, None, None, 9], name='clean_frames')\n", (1749, 1805), True, 'import tensorflow as tf\n'), ((1822, 1892), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, None, 9]'], {'name': '"""noisy_frames"""'}), "(tf.float32, [None, None, None, 9], name='noisy_frames')\n", (1836, 1892), True, 'import tensorflow as tf\n'), ((1941, 1974), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1972, 1974), True, 'import tensorflow as tf\n'), ((3379, 3390), 'time.time', 'time.time', ([], {}), '()\n', (3388, 3390), False, 'import time\n'), ((6744, 6760), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (6758, 6760), True, 'import tensorflow as tf\n'), ((6770, 6815), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (6799, 6815), True, 'import tensorflow as tf\n'), ((7201, 7225), 'numpy.log10', 'np.log10', (['(255 ** 2 / mse)'], {}), '(255 ** 2 / mse)\n', (7209, 7225), True, 'import numpy as np\n'), ((391, 441), 'tensorflow.variable_scope', 'tf.variable_scope', (["('block%d' % layers)"], {'reuse': 'reuse'}), "('block%d' % layers, reuse=reuse)\n", (408, 441), True, 'import tensorflow as tf\n'), ((454, 545), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['output', '(64)', '(3)'], {'padding': '"""same"""', 'name': "('conv%d' % layers)", 'use_bias': '(False)'}), "(output, 64, 3, padding='same', name='conv%d' % layers,\n use_bias=False)\n", (470, 545), True, 'import tensorflow as tf\n'), ((1076, 1131), 'tensorflow.variable_scope', 'tf.variable_scope', (["('temp-block%d' % layers)"], {'reuse': 'reuse'}), "('temp-block%d' % layers, reuse=reuse)\n", (1093, 1131), True, 'import tensorflow as tf\n'), ((1144, 1235), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['output', '(64)', '(3)'], {'padding': '"""same"""', 'name': "('conv%d' % layers)", 'use_bias': '(False)'}), "(output, 64, 3, padding='same', name='conv%d' % layers,\n use_bias=False)\n", (1160, 1235), True, 'import tensorflow as tf\n'), ((1244, 1268), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['output'], {}), '(output)\n', (1260, 1268), True, 'import tensorflow as tf\n'), ((2281, 2327), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['"""./Temp3-CNN/ckpt"""'], {}), "('./Temp3-CNN/ckpt')\n", (2307, 2327), True, 'import tensorflow as tf\n'), ((2661, 2710), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'vars_to_restore_temp3CNN'}), '(var_list=vars_to_restore_temp3CNN)\n', (2675, 2710), True, 'import tensorflow as tf\n'), ((2769, 2822), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['"""./Spatial-CNN/ckpt_awgn"""'], {}), "('./Spatial-CNN/ckpt_awgn')\n", (2795, 2822), True, 'import tensorflow as tf\n'), ((3162, 3213), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'vars_to_restore_spatialCNN'}), '(var_list=vars_to_restore_spatialCNN)\n', (3176, 3213), True, 'import tensorflow as tf\n'), ((5957, 5985), 'numpy.asarray', 'np.asarray', (['temp_clean_image'], {}), '(temp_clean_image)\n', (5967, 5985), True, 'import numpy as np\n'), ((5989, 6066), 'cv2.imwrite', 'cv2.imwrite', (["(save_dir + '/%04d.png' % (idx + 1))", '(temp_clean_image[0, 0] * 255)'], {}), "(save_dir + '/%04d.png' % (idx + 1), temp_clean_image[0, 0] * 255)\n", (6000, 6066), False, 'import cv2\n'), ((6873, 6915), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (6899, 6915), True, 'import tensorflow as tf\n'), ((565, 624), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['output'], {'training': 'is_training'}), '(output, training=is_training)\n', (594, 624), True, 'import tensorflow as tf\n'), ((2149, 2182), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2180, 2182), True, 'import tensorflow as tf\n'), ((3461, 3488), 'cv2.imread', 'cv2.imread', (['eval_files[idx]'], {}), '(eval_files[idx])\n', (3471, 3488), False, 'import cv2\n'), ((3501, 3532), 'cv2.imread', 'cv2.imread', (['eval_files[idx + 1]'], {}), '(eval_files[idx + 1])\n', (3511, 3532), False, 'import cv2\n'), ((3543, 3574), 'cv2.imread', 'cv2.imread', (['eval_files[idx + 2]'], {}), '(eval_files[idx + 2])\n', (3553, 3574), False, 'import cv2\n'), ((3585, 3618), 'cv2.imread', 'cv2.imread', (['eval_files_noisy[idx]'], {}), '(eval_files_noisy[idx])\n', (3595, 3618), False, 'import cv2\n'), ((3632, 3669), 'cv2.imread', 'cv2.imread', (['eval_files_noisy[idx + 1]'], {}), '(eval_files_noisy[idx + 1])\n', (3642, 3669), False, 'import cv2\n'), ((3681, 3718), 'cv2.imread', 'cv2.imread', (['eval_files_noisy[idx + 2]'], {}), '(eval_files_noisy[idx + 2])\n', (3691, 3718), False, 'import cv2\n'), ((4014, 4060), 'numpy.zeros', 'np.zeros', (['(1, test.shape[0], test.shape[1], 9)'], {}), '((1, test.shape[0], test.shape[1], 9))\n', (4022, 4060), True, 'import numpy as np\n'), ((4073, 4116), 'numpy.zeros', 'np.zeros', (['(test.shape[0], test.shape[1], 3)'], {}), '((test.shape[0], test.shape[1], 3))\n', (4081, 4116), True, 'import numpy as np\n'), ((4131, 4174), 'numpy.zeros', 'np.zeros', (['(test.shape[0], test.shape[1], 3)'], {}), '((test.shape[0], test.shape[1], 3))\n', (4139, 4174), True, 'import numpy as np\n'), ((4193, 4239), 'numpy.zeros', 'np.zeros', (['(3, test.shape[0], test.shape[1], 3)'], {}), '((3, test.shape[0], test.shape[1], 3))\n', (4201, 4239), True, 'import numpy as np\n'), ((4380, 4395), 'numpy.asarray', 'np.asarray', (['out'], {}), '(out)\n', (4390, 4395), True, 'import numpy as np\n'), ((4610, 4638), 'numpy.asarray', 'np.asarray', (['temp_clean_image'], {}), '(temp_clean_image)\n', (4620, 4638), True, 'import numpy as np\n'), ((4643, 4714), 'cv2.imwrite', 'cv2.imwrite', (["(save_dir + '/%04d.png' % idx)", '(temp_clean_image[0, 0] * 255)'], {}), "(save_dir + '/%04d.png' % idx, temp_clean_image[0, 0] * 255)\n", (4654, 4714), False, 'import cv2\n'), ((2462, 2483), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (2481, 2483), True, 'import tensorflow as tf\n'), ((2961, 2982), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (2980, 2982), True, 'import tensorflow as tf\n'), ((5171, 5202), 'cv2.imread', 'cv2.imread', (['eval_files[idx + 2]'], {}), '(eval_files[idx + 2])\n', (5181, 5202), False, 'import cv2\n'), ((5261, 5298), 'cv2.imread', 'cv2.imread', (['eval_files_noisy[idx + 2]'], {}), '(eval_files_noisy[idx + 2])\n', (5271, 5298), False, 'import cv2\n'), ((5439, 5455), 'numpy.asarray', 'np.asarray', (['out2'], {}), '(out2)\n', (5449, 5455), True, 'import numpy as np\n'), ((6643, 6654), 'time.time', 'time.time', ([], {}), '()\n', (6652, 6654), False, 'import time\n'), ((2623, 2644), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (2642, 2644), True, 'import tensorflow as tf\n'), ((3124, 3145), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (3143, 3145), True, 'import tensorflow as tf\n'), ((5679, 5698), 'numpy.zeros', 'np.zeros', (['out.shape'], {}), '(out.shape)\n', (5687, 5698), True, 'import numpy as np\n'), ((2494, 2515), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (2513, 2515), True, 'import tensorflow as tf\n'), ((2993, 3014), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (3012, 3014), True, 'import tensorflow as tf\n'), ((5400, 5425), 'numpy.expand_dims', 'np.expand_dims', (['noisy3', '(0)'], {}), '(noisy3, 0)\n', (5414, 5425), True, 'import numpy as np\n'), ((2544, 2565), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (2563, 2565), True, 'import tensorflow as tf\n'), ((3043, 3064), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (3062, 3064), True, 'import tensorflow as tf\n')]
import igraph as ig import numpy as np import random import time from collections import defaultdict, Counter, deque import itertools MAX_LENGTH = 1000 # maximum random walk length (hyperparameter) class RandomWalkSingleAttribute(object): def __init__(self, p_diff, p_same, jump, out, gpre, attr_name='single_attr', debug=True): self.p_diff = p_diff self.p_same = p_same self.jump = jump self.out = out self.gpre = gpre self.attr_name = attr_name self.directed = True self.g = ig.Graph(directed=self.directed) self.attributed = attr_name in gpre.vs.attributes() self.debug = debug self.setup() def summary(self): return self.g.summary() def flip(self, p): return random.random() < p def setup(self): self.seed_same = self.p_same/(self.p_same+self.p_diff) self.seed_diff = 1.-self.seed_same self.n0 = len(self.gpre.vs) self.total_edges = len(self.gpre.es) self.next_nid = self.n0 self.chunk_nid = self.next_nid-1 self.chunk_size = 1 self.nbors = defaultdict(list) self.out_nbors = defaultdict(list) self.in_nbors = defaultdict(list) self.nid_chunk_map = {} self.nid_attr_map = {} self.attr_nid_map = defaultdict(list) for nid, nbor_nids in enumerate(self.gpre.get_adjlist(mode='ALL')): self.nbors[nid] = nbor_nids for nid, nbor_nids in enumerate(self.gpre.get_adjlist(mode='OUT')): self.out_nbors[nid] = nbor_nids for nid, nbor_nids in enumerate(self.gpre.get_adjlist(mode='IN')): self.in_nbors[nid] = nbor_nids for node in self.gpre.vs: self.nid_attr_map[node.index] = node[self.attr_name] if self.attributed else None for nid, attr in self.nid_attr_map.items(): self.attr_nid_map[attr].append(nid) for nid in self.gpre.vs.indices: self.nid_chunk_map[nid] = 0 def add_nodes(self, chunk_seq, mean_seq, chunk_attr_sampler=None): if self.attributed: assert chunk_attr_sampler num_chunks = len(chunk_seq) chunk_debug = num_chunks//10 if (self.debug): print ("Total chunks: {}".format(num_chunks)) for idx, (chunk_size, m) in enumerate(zip(chunk_seq, mean_seq)): if self.debug and (idx + 1) % chunk_debug == 0: print (idx, end=' ') self.chunk_size = chunk_size self.m = m self.add_chunk(idx, attr_sampler=chunk_attr_sampler[idx][:] if self.attributed else None) self.chunk_nid = self.next_nid-1 self.build_graph() def add_chunk(self, chunk_id, attr_sampler=None): if self.attributed: assert attr_sampler marked = defaultdict(frozenset) for _ in range(self.chunk_size): new_nid = self.next_nid; self.next_nid += 1 self.nid_chunk_map[new_nid] = chunk_id attrs = attr_sampler.pop() if self.attributed else None marked[new_nid] = self.add_node(new_nid, attrs=attrs) self.update_node(new_nid, marked[new_nid]) def update_node(self, nid, marked): for nbor_nid in marked: self.out_nbors[nid].append(nbor_nid) self.in_nbors[nbor_nid].append(nid) def build_graph(self): self.edges = edges = set() all_nbors = self.out_nbors for node, nbors in all_nbors.items(): for nbor in nbors: edges.add((node, nbor)) self.g.add_vertices(self.next_nid) self.g.add_edges(list(edges)) self.g.simplify() self.g.vs['chunk_id'] = [self.nid_chunk_map[n] for n in self.g.vs.indices] if self.attributed: self.g.vs[self.attr_name] = [self.nid_attr_map[n] for n in self.g.vs.indices] if self.debug: print ("\n{}".format(self.g.summary())) def link(self, cur_nid, attrs=None): if not self.attributed: return random.random() < self.p_diff else: cur_attrs = self.nid_attr_map[cur_nid] p = self.p_same if cur_attrs == attrs else self.p_diff return random.random() < p def get_seed_nid(self, new_nid, attrs=None): if not self.attributed: return random.randint(0, new_nid-1) if random.random() < self.seed_diff: return random.randint(0, new_nid-1) same_nids = self.attr_nid_map[attrs] if same_nids: return random.choice(same_nids) return np.random.randint(0, new_nid-1) def add_node(self, new_nid, attrs=None): marked = set() m = int(round(self.m if self.flip(0.5) else self.m+0.5)) cur_nid = seed_nid = self.get_seed_nid(new_nid, attrs=attrs) num_marked, length, max_length = 0, 0, MAX_LENGTH/max(self.p_same, self.p_diff) while num_marked < m: length += 1 if length > max_length: break if cur_nid not in marked and self.link(cur_nid, attrs): num_marked += 1 marked.add(cur_nid) if random.random() < self.jump: cur_nid = seed_nid else: use_out = random.random() < self.out nbors = self.out_nbors[cur_nid] if use_out else self.in_nbors[cur_nid] if not nbors: nbors = self.in_nbors[cur_nid] if use_out else self.out_nbors[cur_nid] if nbors: cur_nid = random.choice(nbors) else: cur_nid = seed_nid = self.get_seed_nid(new_nid, attrs=attrs) if self.attributed: self.nid_attr_map[new_nid] = attrs self.attr_nid_map[attrs].append(new_nid) return marked
[ "random.choice", "numpy.random.randint", "collections.defaultdict", "random.random", "random.randint", "igraph.Graph" ]
[((549, 581), 'igraph.Graph', 'ig.Graph', ([], {'directed': 'self.directed'}), '(directed=self.directed)\n', (557, 581), True, 'import igraph as ig\n'), ((1121, 1138), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1132, 1138), False, 'from collections import defaultdict, Counter, deque\n'), ((1164, 1181), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1175, 1181), False, 'from collections import defaultdict, Counter, deque\n'), ((1206, 1223), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1217, 1223), False, 'from collections import defaultdict, Counter, deque\n'), ((1315, 1332), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1326, 1332), False, 'from collections import defaultdict, Counter, deque\n'), ((2723, 2745), 'collections.defaultdict', 'defaultdict', (['frozenset'], {}), '(frozenset)\n', (2734, 2745), False, 'from collections import defaultdict, Counter, deque\n'), ((4434, 4467), 'numpy.random.randint', 'np.random.randint', (['(0)', '(new_nid - 1)'], {}), '(0, new_nid - 1)\n', (4451, 4467), True, 'import numpy as np\n'), ((769, 784), 'random.random', 'random.random', ([], {}), '()\n', (782, 784), False, 'import random\n'), ((4210, 4240), 'random.randint', 'random.randint', (['(0)', '(new_nid - 1)'], {}), '(0, new_nid - 1)\n', (4224, 4240), False, 'import random\n'), ((4250, 4265), 'random.random', 'random.random', ([], {}), '()\n', (4263, 4265), False, 'import random\n'), ((4291, 4321), 'random.randint', 'random.randint', (['(0)', '(new_nid - 1)'], {}), '(0, new_nid - 1)\n', (4305, 4321), False, 'import random\n'), ((4394, 4418), 'random.choice', 'random.choice', (['same_nids'], {}), '(same_nids)\n', (4407, 4418), False, 'import random\n'), ((3919, 3934), 'random.random', 'random.random', ([], {}), '()\n', (3932, 3934), False, 'import random\n'), ((4101, 4116), 'random.random', 'random.random', ([], {}), '()\n', (4114, 4116), False, 'import random\n'), ((5024, 5039), 'random.random', 'random.random', ([], {}), '()\n', (5037, 5039), False, 'import random\n'), ((5132, 5147), 'random.random', 'random.random', ([], {}), '()\n', (5145, 5147), False, 'import random\n'), ((5384, 5404), 'random.choice', 'random.choice', (['nbors'], {}), '(nbors)\n', (5397, 5404), False, 'import random\n')]
import numpy as np from skimage.io import imread # import pdb def add_patch(img,trigger): flag=False if img.max()>1.: img=img/255. flag=True if trigger.max()>1.: trigger=trigger/255. # x,y=np.random.randint(10,20,size=(2,)) x,y = np.random.choice([3, 28]), np.random.choice([3, 28]) m,n,_=trigger.shape #img[x-int(m/2):x+m-int(m/2),y-int(n/2):y+n-int(n/2),:]=img[x-int(m/2):x+m-int(m/2), # y-int(n/2):y+n-int(n/2),:]*(1-trigger)+trigger img[x-int(m/2):x+m-int(m/2),y-int(n/2):y+n-int(n/2),:]=trigger # opaque trigger if flag: img=(img*255).astype('uint8') return img def generate_poisoned_data(X_train,Y_train,source,target, trigger): ind=np.argwhere(Y_train==source) Y_poisoned=target*np.ones((ind.shape[0])).astype(int) # k=np.random.randint(6,11) # trigger=imread('Data/Masks_Test_5/mask%1d.bmp'%(k)) # pdb.set_trace() X_poisoned=np.stack([add_patch(X_train[i,...],trigger) for i in ind.squeeze()],0) return X_poisoned,Y_poisoned,trigger,ind.squeeze()
[ "numpy.random.choice", "numpy.argwhere", "numpy.ones" ]
[((795, 825), 'numpy.argwhere', 'np.argwhere', (['(Y_train == source)'], {}), '(Y_train == source)\n', (806, 825), True, 'import numpy as np\n'), ((276, 301), 'numpy.random.choice', 'np.random.choice', (['[3, 28]'], {}), '([3, 28])\n', (292, 301), True, 'import numpy as np\n'), ((303, 328), 'numpy.random.choice', 'np.random.choice', (['[3, 28]'], {}), '([3, 28])\n', (319, 328), True, 'import numpy as np\n'), ((846, 867), 'numpy.ones', 'np.ones', (['ind.shape[0]'], {}), '(ind.shape[0])\n', (853, 867), True, 'import numpy as np\n')]
import torch import torch.nn as nn from torch import optim import numpy as np import nltk class TreeRecursiveEduNN(nn.Module): def __init__(self, embed_dict, glove, embed_size, glove_size, hidden_size, use_relations=True): super(TreeRecursiveEduNN, self).__init__() self.glove = glove self.embed_dict = embed_dict self.embed_size = embed_size self.hidden_size = hidden_size self.glove_size = glove_size self.embeddings = self.init_embeddings() self.use_relations = use_relations self.Wforget = nn.Linear(embed_size, hidden_size, bias=True) self.Uforget_l_l = nn.Linear(hidden_size, hidden_size, bias=False) self.Uforget_l_r = nn.Linear(hidden_size, hidden_size, bias=False) self.Uforget_r_l = nn.Linear(hidden_size, hidden_size, bias=False) self.Uforget_r_r = nn.Linear(hidden_size, hidden_size, bias=False) self.Winput = nn.Linear(embed_size, hidden_size, bias=True) self.Uinput_l = nn.Linear(hidden_size, hidden_size, bias=False) self.Uinput_r = nn.Linear(hidden_size, hidden_size, bias=False) self.Woutput = nn.Linear(embed_size, hidden_size, bias=True) self.Uoutput_l = nn.Linear(hidden_size, hidden_size, bias=False) self.Uoutput_r = nn.Linear(hidden_size, hidden_size, bias=False) self.Wupdate = nn.Linear(embed_size, hidden_size, bias=True) self.Uupdate_l = nn.Linear(hidden_size, hidden_size, bias=False) self.Uupdate_r = nn.Linear(hidden_size, hidden_size, bias=False) self.tree2scores = nn.Linear(hidden_size * 2, 3, bias=True) self.edu_lstm = nn.LSTM(glove_size, hidden_size, num_layers=1, batch_first=True) def forward(self, input_tree): root_hidden_output = self.forward_recurse(input_tree) return self.tree2scores(root_hidden_output)[0], root_hidden_output def forward_recurse(self, input_tree): if (input_tree.left_child is None): return self.compute_edu_embeddings(input_tree) else: l_child_hidden_state, l_child_cell = self.forward_recurse(input_tree.left_child) r_child_hidden_state, r_child_cell = self.forward_recurse(input_tree.right_child) # Embedding for the current discourse role (node) mononuclear = ["Joint", "Contrast", "TextualOrganization", "Same-Unit"] if (input_tree.role == 'Root'): return torch.cat((l_child_hidden_state, r_child_hidden_state), 2) elif (input_tree.rel_type in mononuclear): if self.use_relations: root_embedding = self.embeddings(self.embed_dict[input_tree.rel_type]) else: root_embedding = self.embeddings(self.embed_dict['Nucleus']) else: if self.use_relations: root_embedding = self.embeddings(self.embed_dict[input_tree.rel_type + "_" + input_tree.role]) else: root_embedding = self.embeddings(self.embed_dict[input_tree.role]) # RNN gates forget_gate_left = torch.sigmoid(self.Wforget(root_embedding) + self.Uforget_l_l(l_child_hidden_state) + self.Uforget_l_r(r_child_hidden_state)) forget_gate_right = torch.sigmoid(self.Wforget(root_embedding) + self.Uforget_r_l(l_child_hidden_state) + self.Uforget_r_r(r_child_hidden_state)) input_gate = torch.sigmoid(self.Winput(root_embedding) + self.Uinput_l(l_child_hidden_state) + self.Uinput_r(r_child_hidden_state)) output_gate = torch.sigmoid(self.Woutput(root_embedding) + self.Uoutput_l(l_child_hidden_state) + self.Uoutput_r(r_child_hidden_state)) update_gate = torch.tanh(self.Wupdate(root_embedding) + self.Uupdate_l(l_child_hidden_state) + self.Uupdate_r(r_child_hidden_state)) cell = input_gate * update_gate + forget_gate_left * l_child_cell + forget_gate_right * r_child_cell hidden = output_gate * torch.tanh(cell) return hidden, cell def init_embeddings(self): return nn.Embedding(len(self.embed_dict), self.embed_size) def compute_edu_embeddings(self, tree): _, edu_hid_cell_tuple = self.edu_lstm(self.construct_edu_embeddings(tree.edu_text)) return edu_hid_cell_tuple def construct_edu_embeddings(self, edu_text): edu_words = nltk.word_tokenize(edu_text) matrix_len = len(edu_words) weights_matrix = np.zeros((matrix_len, self.glove_size)) for i, word in enumerate(edu_words): try: weights_matrix[i] = self.glove[word.lower()] except KeyError: weights_matrix[i] = np.zeros(self.glove_size) return torch.FloatTensor([weights_matrix])
[ "torch.tanh", "nltk.word_tokenize", "torch.nn.LSTM", "numpy.zeros", "torch.nn.Linear", "torch.FloatTensor", "torch.cat" ]
[((573, 618), 'torch.nn.Linear', 'nn.Linear', (['embed_size', 'hidden_size'], {'bias': '(True)'}), '(embed_size, hidden_size, bias=True)\n', (582, 618), True, 'import torch.nn as nn\n'), ((646, 693), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {'bias': '(False)'}), '(hidden_size, hidden_size, bias=False)\n', (655, 693), True, 'import torch.nn as nn\n'), ((721, 768), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {'bias': '(False)'}), '(hidden_size, hidden_size, bias=False)\n', (730, 768), True, 'import torch.nn as nn\n'), ((796, 843), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {'bias': '(False)'}), '(hidden_size, hidden_size, bias=False)\n', (805, 843), True, 'import torch.nn as nn\n'), ((871, 918), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {'bias': '(False)'}), '(hidden_size, hidden_size, bias=False)\n', (880, 918), True, 'import torch.nn as nn\n'), ((942, 987), 'torch.nn.Linear', 'nn.Linear', (['embed_size', 'hidden_size'], {'bias': '(True)'}), '(embed_size, hidden_size, bias=True)\n', (951, 987), True, 'import torch.nn as nn\n'), ((1012, 1059), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {'bias': '(False)'}), '(hidden_size, hidden_size, bias=False)\n', (1021, 1059), True, 'import torch.nn as nn\n'), ((1084, 1131), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {'bias': '(False)'}), '(hidden_size, hidden_size, bias=False)\n', (1093, 1131), True, 'import torch.nn as nn\n'), ((1156, 1201), 'torch.nn.Linear', 'nn.Linear', (['embed_size', 'hidden_size'], {'bias': '(True)'}), '(embed_size, hidden_size, bias=True)\n', (1165, 1201), True, 'import torch.nn as nn\n'), ((1227, 1274), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {'bias': '(False)'}), '(hidden_size, hidden_size, bias=False)\n', (1236, 1274), True, 'import torch.nn as nn\n'), ((1300, 1347), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {'bias': '(False)'}), '(hidden_size, hidden_size, bias=False)\n', (1309, 1347), True, 'import torch.nn as nn\n'), ((1372, 1417), 'torch.nn.Linear', 'nn.Linear', (['embed_size', 'hidden_size'], {'bias': '(True)'}), '(embed_size, hidden_size, bias=True)\n', (1381, 1417), True, 'import torch.nn as nn\n'), ((1443, 1490), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {'bias': '(False)'}), '(hidden_size, hidden_size, bias=False)\n', (1452, 1490), True, 'import torch.nn as nn\n'), ((1516, 1563), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {'bias': '(False)'}), '(hidden_size, hidden_size, bias=False)\n', (1525, 1563), True, 'import torch.nn as nn\n'), ((1592, 1632), 'torch.nn.Linear', 'nn.Linear', (['(hidden_size * 2)', '(3)'], {'bias': '(True)'}), '(hidden_size * 2, 3, bias=True)\n', (1601, 1632), True, 'import torch.nn as nn\n'), ((1657, 1721), 'torch.nn.LSTM', 'nn.LSTM', (['glove_size', 'hidden_size'], {'num_layers': '(1)', 'batch_first': '(True)'}), '(glove_size, hidden_size, num_layers=1, batch_first=True)\n', (1664, 1721), True, 'import torch.nn as nn\n'), ((4440, 4468), 'nltk.word_tokenize', 'nltk.word_tokenize', (['edu_text'], {}), '(edu_text)\n', (4458, 4468), False, 'import nltk\n'), ((4531, 4570), 'numpy.zeros', 'np.zeros', (['(matrix_len, self.glove_size)'], {}), '((matrix_len, self.glove_size))\n', (4539, 4570), True, 'import numpy as np\n'), ((4801, 4836), 'torch.FloatTensor', 'torch.FloatTensor', (['[weights_matrix]'], {}), '([weights_matrix])\n', (4818, 4836), False, 'import torch\n'), ((2441, 2499), 'torch.cat', 'torch.cat', (['(l_child_hidden_state, r_child_hidden_state)', '(2)'], {}), '((l_child_hidden_state, r_child_hidden_state), 2)\n', (2450, 2499), False, 'import torch\n'), ((4054, 4070), 'torch.tanh', 'torch.tanh', (['cell'], {}), '(cell)\n', (4064, 4070), False, 'import torch\n'), ((4760, 4785), 'numpy.zeros', 'np.zeros', (['self.glove_size'], {}), '(self.glove_size)\n', (4768, 4785), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- import os, sys import numpy as np import itertools import matplotlib.pyplot as plt import torch import torch.nn.functional as F from pydaily import filesystem def get_slide_filenames(slides_dir): slide_list = [] svs_file_list = filesystem.find_ext_files(slides_dir, "svs") slide_list.extend([os.path.basename(ele) for ele in svs_file_list]) SVS_file_list = filesystem.find_ext_files(slides_dir, "SVS") slide_list.extend([os.path.basename(ele) for ele in SVS_file_list]) slide_filenames = [os.path.splitext(ele)[0] for ele in slide_list] slide_filenames.sort() return slide_filenames def mask2color(mask): # colors = np.asarray([(201, 58, 64), (242, 207, 1), (0, 152, 75), (101, 172, 228),(56, 34, 132), (160, 194, 56), # (0, 0, 117), (128, 128, 0), (191, 239, 69), (145, 30, 180)]) color_img = np.zeros((mask.shape[0], mask.shape[1], 3), dtype=np.uint8) color_img[mask==255] = (191, 239, 69) return color_img def gen_patch_pred(inputs, masks, preds): imgs = inputs.permute(0, 2, 3, 1).data.cpu().numpy() masks = torch.squeeze(masks, dim=1).data.cpu().numpy() preds = torch.squeeze(F.sigmoid(preds), dim=1).data.cpu().numpy() imgs = (imgs * 255).astype(np.uint8) masks = ((masks > 0.5) * 255).astype(np.uint8) preds = ((preds > 0.5) * 255).astype(np.uint8) img_num, img_size = imgs.shape[0], imgs.shape[1] result_img = np.zeros((img_num*img_size, img_size*3, imgs.shape[3]), dtype=np.uint8) for ind in np.arange(img_num): result_img[ind*img_size:(ind+1)*img_size, :img_size] = imgs[ind] result_img[ind*img_size:(ind+1)*img_size, img_size:img_size*2] = mask2color(masks[ind]) result_img[ind*img_size:(ind+1)*img_size, img_size*2:img_size*3] = mask2color(preds[ind]) return result_img def gen_patch_mask_wmap(slide_img, mask_img, coors_arr, plen): patch_list, mask_list = [], [] wmap = np.zeros((slide_img.shape[0], slide_img.shape[1]), dtype=np.int32) for coor in coors_arr: ph, pw = coor[0], coor[1] patch_list.append(slide_img[ph:ph+plen, pw:pw+plen] / 255.0) mask_list.append(mask_img[ph:ph+plen, pw:pw+plen]) wmap[ph:ph+plen, pw:pw+plen] += 1 patch_arr = np.asarray(patch_list).astype(np.float32) mask_arr = np.asarray(mask_list).astype(np.float32) return patch_arr, mask_arr, wmap def gen_patch_wmap(slide_img, coors_arr, plen): patch_list = [] wmap = np.zeros((slide_img.shape[0], slide_img.shape[1]), dtype=np.int32) for coor in coors_arr: ph, pw = coor[0], coor[1] patch_list.append(slide_img[ph:ph+plen, pw:pw+plen] / 255.0) wmap[ph:ph+plen, pw:pw+plen] += 1 patch_arr = np.asarray(patch_list).astype(np.float32) return patch_arr, wmap def wsi_stride_splitting(wsi_h, wsi_w, patch_len, stride_len): """ Spltting whole slide image to patches by stride. Parameters ------- wsi_h: int height of whole slide image wsi_w: int width of whole slide image patch_len: int length of the patch image stride_len: int length of the stride Returns ------- coors_arr: list list of starting coordinates of patches ([0]-h, [1]-w) """ coors_arr = [] def stride_split(ttl_len, patch_len, stride_len): p_sets = [] if patch_len > ttl_len: raise AssertionError("patch length larger than total length") elif patch_len == ttl_len: p_sets.append(0) else: stride_num = int(np.ceil((ttl_len - patch_len) * 1.0 / stride_len)) for ind in range(stride_num+1): cur_pos = int(((ttl_len - patch_len) * 1.0 / stride_num) * ind) p_sets.append(cur_pos) return p_sets h_sets = stride_split(wsi_h, patch_len, stride_len) w_sets = stride_split(wsi_w, patch_len, stride_len) # combine points in both w and h direction if len(w_sets) > 0 and len(h_sets) > 0: coors_arr = list(itertools.product(h_sets, w_sets)) return coors_arr class LambdaLR(): def __init__(self, n_epochs, offset, decay_start_epoch): assert ((n_epochs - decay_start_epoch) > 0), "Decay must start before the training session ends!" self.n_epochs = n_epochs self.offset = offset self.decay_start_epoch = decay_start_epoch def step(self, epoch): return 1.0 - max(0, epoch + self.offset - self.decay_start_epoch)/(self.n_epochs - self.decay_start_epoch)
[ "numpy.ceil", "itertools.product", "os.path.splitext", "numpy.asarray", "torch.nn.functional.sigmoid", "numpy.zeros", "os.path.basename", "torch.squeeze", "pydaily.filesystem.find_ext_files", "numpy.arange" ]
[((263, 307), 'pydaily.filesystem.find_ext_files', 'filesystem.find_ext_files', (['slides_dir', '"""svs"""'], {}), "(slides_dir, 'svs')\n", (288, 307), False, 'from pydaily import filesystem\n'), ((400, 444), 'pydaily.filesystem.find_ext_files', 'filesystem.find_ext_files', (['slides_dir', '"""SVS"""'], {}), "(slides_dir, 'SVS')\n", (425, 444), False, 'from pydaily import filesystem\n'), ((890, 949), 'numpy.zeros', 'np.zeros', (['(mask.shape[0], mask.shape[1], 3)'], {'dtype': 'np.uint8'}), '((mask.shape[0], mask.shape[1], 3), dtype=np.uint8)\n', (898, 949), True, 'import numpy as np\n'), ((1458, 1533), 'numpy.zeros', 'np.zeros', (['(img_num * img_size, img_size * 3, imgs.shape[3])'], {'dtype': 'np.uint8'}), '((img_num * img_size, img_size * 3, imgs.shape[3]), dtype=np.uint8)\n', (1466, 1533), True, 'import numpy as np\n'), ((1545, 1563), 'numpy.arange', 'np.arange', (['img_num'], {}), '(img_num)\n', (1554, 1563), True, 'import numpy as np\n'), ((1966, 2032), 'numpy.zeros', 'np.zeros', (['(slide_img.shape[0], slide_img.shape[1])'], {'dtype': 'np.int32'}), '((slide_img.shape[0], slide_img.shape[1]), dtype=np.int32)\n', (1974, 2032), True, 'import numpy as np\n'), ((2497, 2563), 'numpy.zeros', 'np.zeros', (['(slide_img.shape[0], slide_img.shape[1])'], {'dtype': 'np.int32'}), '((slide_img.shape[0], slide_img.shape[1]), dtype=np.int32)\n', (2505, 2563), True, 'import numpy as np\n'), ((331, 352), 'os.path.basename', 'os.path.basename', (['ele'], {}), '(ele)\n', (347, 352), False, 'import os, sys\n'), ((468, 489), 'os.path.basename', 'os.path.basename', (['ele'], {}), '(ele)\n', (484, 489), False, 'import os, sys\n'), ((540, 561), 'os.path.splitext', 'os.path.splitext', (['ele'], {}), '(ele)\n', (556, 561), False, 'import os, sys\n'), ((2280, 2302), 'numpy.asarray', 'np.asarray', (['patch_list'], {}), '(patch_list)\n', (2290, 2302), True, 'import numpy as np\n'), ((2337, 2358), 'numpy.asarray', 'np.asarray', (['mask_list'], {}), '(mask_list)\n', (2347, 2358), True, 'import numpy as np\n'), ((2752, 2774), 'numpy.asarray', 'np.asarray', (['patch_list'], {}), '(patch_list)\n', (2762, 2774), True, 'import numpy as np\n'), ((4066, 4099), 'itertools.product', 'itertools.product', (['h_sets', 'w_sets'], {}), '(h_sets, w_sets)\n', (4083, 4099), False, 'import itertools\n'), ((3599, 3648), 'numpy.ceil', 'np.ceil', (['((ttl_len - patch_len) * 1.0 / stride_len)'], {}), '((ttl_len - patch_len) * 1.0 / stride_len)\n', (3606, 3648), True, 'import numpy as np\n'), ((1127, 1154), 'torch.squeeze', 'torch.squeeze', (['masks'], {'dim': '(1)'}), '(masks, dim=1)\n', (1140, 1154), False, 'import torch\n'), ((1200, 1216), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['preds'], {}), '(preds)\n', (1209, 1216), True, 'import torch.nn.functional as F\n')]
import os import pandas as pd import matplotlib.pyplot as plt import numpy as np datapath = '../util/stock_dfs/' def get_ticker(x): return x.split('/')[-1].split('.')[0] def ret(x, y): return np.log(y/x) def get_zscore(x): return (x -x.mean())/x.std() def make_inputs(filepath): D = pd.read_csv(filepath).set_index('Date') #D.index = pd.to_datetime(D.index,format='%Y-%m-%d') # Set the indix to a datetime Res = pd.DataFrame() ticker = get_ticker(filepath) Res['c_2_o'] = get_zscore(ret(D.Open,D.Close)) Res['h_2_o'] = get_zscore(ret(D.Open,D.High)) Res['l_2_o'] = get_zscore(ret(D.Open,D.Low)) Res['c_2_h'] = get_zscore(ret(D.High,D.Close)) Res['h_2_l'] = get_zscore(ret(D.High,D.Low)) Res['c1_c0'] = ret(D.Close,D.Close.shift(-1)).fillna(0) #Tommorows return Res['vol'] = get_zscore(D.Volume) Res['ticker'] = ticker return Res def merge_all_data(datapath): all = pd.DataFrame() for f in os.listdir(datapath): filepath = os.path.join(datapath,f) if filepath.endswith('.csv'): print(filepath) Res = make_inputs(filepath) all = all.append(Res) return all def embed(df, str): "str: choice of return, class, multi_class" pivot_columns = df.columns[:-1] P = df.pivot_table(index=df.index, columns='ticker', values=pivot_columns) # Make a pivot table from the data mi = P.columns.tolist() new_ind = pd.Index(e[1] + '_' + e[0] for e in mi) P.columns = new_ind clean_and_flat = P.dropna(axis=1) target_cols = list(filter(lambda x: 'c1_c0' in x, clean_and_flat.columns.values)) input_cols = list(filter(lambda x: 'c1_c0' not in x, clean_and_flat.columns.values)) inputDF = clean_and_flat[input_cols] targetDF = clean_and_flat[target_cols] TotalReturn = ((1 - np.exp(targetDF)).sum(axis=1)) / len(targetDF.columns) # If i put one dollar in each stock at the close, this is how much I'd get back Labeled = pd.DataFrame() Labeled['return'] = TotalReturn Labeled['class'] = TotalReturn.apply(labeler, 1) Labeled['multi_class'] = pd.qcut(TotalReturn, 11, labels=range(11)) pd.qcut(TotalReturn, 5).unique() return inputDF, Labeled[str] def labeler(x): if x>0.0029: return 1 if x<-0.00462: return -1 else: return 0 ''' if __name__ == "__main__": all = merge_all_data(datapath) inputdf, targetdf = embed(all) labeled = process_target(targetdf) print(inputdf.head()) print(labeled.head()) '''
[ "os.listdir", "pandas.read_csv", "pandas.qcut", "numpy.log", "os.path.join", "pandas.Index", "numpy.exp", "pandas.DataFrame" ]
[((207, 220), 'numpy.log', 'np.log', (['(y / x)'], {}), '(y / x)\n', (213, 220), True, 'import numpy as np\n'), ((447, 461), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (459, 461), True, 'import pandas as pd\n'), ((947, 961), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (959, 961), True, 'import pandas as pd\n'), ((975, 995), 'os.listdir', 'os.listdir', (['datapath'], {}), '(datapath)\n', (985, 995), False, 'import os\n'), ((1460, 1499), 'pandas.Index', 'pd.Index', (["(e[1] + '_' + e[0] for e in mi)"], {}), "(e[1] + '_' + e[0] for e in mi)\n", (1468, 1499), True, 'import pandas as pd\n'), ((1997, 2011), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2009, 2011), True, 'import pandas as pd\n'), ((1016, 1041), 'os.path.join', 'os.path.join', (['datapath', 'f'], {}), '(datapath, f)\n', (1028, 1041), False, 'import os\n'), ((310, 331), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {}), '(filepath)\n', (321, 331), True, 'import pandas as pd\n'), ((2177, 2200), 'pandas.qcut', 'pd.qcut', (['TotalReturn', '(5)'], {}), '(TotalReturn, 5)\n', (2184, 2200), True, 'import pandas as pd\n'), ((1846, 1862), 'numpy.exp', 'np.exp', (['targetDF'], {}), '(targetDF)\n', (1852, 1862), True, 'import numpy as np\n')]
#! /usr/bin/env python # SPDX-FileCopyrightText: Copyright 2022, <NAME> <<EMAIL>> # SPDX-License-Identifier: BSD-3-Clause # SPDX-FileType: SOURCE # # This program is free software: you can redistribute it and/or modify it under # the terms of the license found in the LICENSE.txt file in the root directory # of this source tree. # ======= # Imports # ======= import numpy import numpy.linalg from detkit import loggdet, orthogonalize # ============ # test loggdet # ============ def test_loggdet(): """ Test for `loggdet` function. """ def pr(A, pr=5): print(numpy.around(A, pr)) n = 100 m = 5 A = numpy.random.rand(n, n) X = numpy.random.rand(n, m) # Make A a PSD matrix, and make X orthogonal A = A.T @ A sym_pos = True X_orth = True if X_orth: orthogonalize(X) C = X.T @ numpy.linalg.inv(A) @ X sign_00, logdet_00 = numpy.linalg.slogdet(A) sign_01, logdet_01 = numpy.linalg.slogdet(C) sign_0 = sign_00 logdet_0 = logdet_00 + logdet_01 XtX = X.T @ X XtXinv = numpy.linalg.inv(XtX) P = X @ XtXinv @ X.T N = A + P - A @ P logdet_5 = numpy.linalg.slogdet(XtX)[1] + numpy.linalg.slogdet(N)[1] print('%16.8f' % logdet_5) logdet_1, sign_1, flops_1 = loggdet(A, X, method='legacy', sym_pos=sym_pos, X_orth=False, flops=True) logdet_2, sign_2, flops_2 = loggdet(A, X, method='proj', sym_pos=True, X_orth=X_orth, flops=True) logdet_3, sign_3 = loggdet(A, X, method='legacy', sym_pos=sym_pos, X_orth=False, use_scipy=True) logdet_4, sign_4 = loggdet(A, X, method='proj', sym_pos=True, X_orth=X_orth, use_scipy=True) print('%16.8f, %+d' % (logdet_0, sign_0)) print('%16.8f, %+d, %ld' % (logdet_1, sign_1, flops_1)) print('%16.8f, %+d, %ld' % (logdet_2, sign_2, flops_2)) print('%16.8f, %+d' % (logdet_3, sign_3)) print('%16.8f, %+d' % (logdet_4, sign_4)) # =========== # Script main # =========== if __name__ == "__main__": test_loggdet()
[ "numpy.random.rand", "detkit.orthogonalize", "numpy.linalg.slogdet", "numpy.linalg.inv", "numpy.around", "detkit.loggdet" ]
[((644, 667), 'numpy.random.rand', 'numpy.random.rand', (['n', 'n'], {}), '(n, n)\n', (661, 667), False, 'import numpy\n'), ((676, 699), 'numpy.random.rand', 'numpy.random.rand', (['n', 'm'], {}), '(n, m)\n', (693, 699), False, 'import numpy\n'), ((909, 932), 'numpy.linalg.slogdet', 'numpy.linalg.slogdet', (['A'], {}), '(A)\n', (929, 932), False, 'import numpy\n'), ((958, 981), 'numpy.linalg.slogdet', 'numpy.linalg.slogdet', (['C'], {}), '(C)\n', (978, 981), False, 'import numpy\n'), ((1072, 1093), 'numpy.linalg.inv', 'numpy.linalg.inv', (['XtX'], {}), '(XtX)\n', (1088, 1093), False, 'import numpy\n'), ((1278, 1351), 'detkit.loggdet', 'loggdet', (['A', 'X'], {'method': '"""legacy"""', 'sym_pos': 'sym_pos', 'X_orth': '(False)', 'flops': '(True)'}), "(A, X, method='legacy', sym_pos=sym_pos, X_orth=False, flops=True)\n", (1285, 1351), False, 'from detkit import loggdet, orthogonalize\n'), ((1424, 1493), 'detkit.loggdet', 'loggdet', (['A', 'X'], {'method': '"""proj"""', 'sym_pos': '(True)', 'X_orth': 'X_orth', 'flops': '(True)'}), "(A, X, method='proj', sym_pos=True, X_orth=X_orth, flops=True)\n", (1431, 1493), False, 'from detkit import loggdet, orthogonalize\n'), ((1557, 1634), 'detkit.loggdet', 'loggdet', (['A', 'X'], {'method': '"""legacy"""', 'sym_pos': 'sym_pos', 'X_orth': '(False)', 'use_scipy': '(True)'}), "(A, X, method='legacy', sym_pos=sym_pos, X_orth=False, use_scipy=True)\n", (1564, 1634), False, 'from detkit import loggdet, orthogonalize\n'), ((1689, 1762), 'detkit.loggdet', 'loggdet', (['A', 'X'], {'method': '"""proj"""', 'sym_pos': '(True)', 'X_orth': 'X_orth', 'use_scipy': '(True)'}), "(A, X, method='proj', sym_pos=True, X_orth=X_orth, use_scipy=True)\n", (1696, 1762), False, 'from detkit import loggdet, orthogonalize\n'), ((828, 844), 'detkit.orthogonalize', 'orthogonalize', (['X'], {}), '(X)\n', (841, 844), False, 'from detkit import loggdet, orthogonalize\n'), ((592, 611), 'numpy.around', 'numpy.around', (['A', 'pr'], {}), '(A, pr)\n', (604, 611), False, 'import numpy\n'), ((860, 879), 'numpy.linalg.inv', 'numpy.linalg.inv', (['A'], {}), '(A)\n', (876, 879), False, 'import numpy\n'), ((1156, 1181), 'numpy.linalg.slogdet', 'numpy.linalg.slogdet', (['XtX'], {}), '(XtX)\n', (1176, 1181), False, 'import numpy\n'), ((1187, 1210), 'numpy.linalg.slogdet', 'numpy.linalg.slogdet', (['N'], {}), '(N)\n', (1207, 1210), False, 'import numpy\n')]