code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
Data access functions
---------------------
"""
from __future__ import absolute_import
from os.path import join as pjoin, basename, dirname
import subprocess
import tempfile
import logging
import numpy as np
import h5py
import rasterio
from rasterio.crs import CRS
from rasterio.warp import reproject
from rasterio.enums import Resampling
from wagl.geobox import GriddedGeoBox
from wagl.tiling import generate_tiles
def get_pixel(filename, lonlat, band=1):
"""Return a pixel from `filename` at the longitude and latitude given
by the tuple `lonlat`. Optionally, the `band` can be specified."""
with rasterio.open(filename) as src:
x, y = [int(v) for v in ~src.transform * lonlat]
if isinstance(band, list):
data = src.read(band, window=((y, y + 1), (x, x + 1))).ravel()
else:
data = src.read(band, window=((y, y + 1), (x, x + 1))).flat[0]
return data
def select_acquisitions(acqs_list, fn=(lambda acq: True)):
"""
Given a list of acquisitions, apply the supplied fn to select the
desired acquisitions.
"""
acqs = [acq for acq in acqs_list if fn(acq)]
return acqs
def stack_data(acqs_list, fn=(lambda acq: True), window=None, masked=False):
"""
Given a list of acquisitions, return the data from each acquisition
collected in a 3D numpy array (first index is the acquisition number).
If window is defined, then the subset contained within the window is
returned along with a GriddedGeoBox instance detailing the
spatial information associated with that subset.
:param acqs_list:
The list of acquisitions from which to generate a stack of data.
:param window:
Defines a subset ((ystart, yend), (xstart, xend)) in array
co-ordinates. Default is None.
:param masked:
Indicates whether or not to return a masked array. Default is False.
:return:
A 2-tuple containing:
* 1. A 3D numpy array (or None) containing the corresponding
acquisition data. (None if no data).
* 2. A GriddedGeoBox instance specifying the spatial context
of the 3D numpy array. Note: All Acquisitions share the
same GriddedGeoBox.
"""
# determine data type and dimensions by reading the first band
acqs = acqs_list
a, geo_box = acqs[0].data_and_box(window=window, masked=masked)
# create the result array, setting datatype based on source type
stack_shape = (len(acqs), a.shape[0], a.shape[1])
stack = np.empty(stack_shape, a.dtype)
stack[0] = a
del a
# read remaining aquisitions into it
for i in range(1, stack_shape[0]):
# can't use this statement because it will cause data to be
# resampled. But we want an exception thrown if the user
# tries to stack irreqular aquisitions
stack[i] = acqs[i].data(window=window, masked=masked)
return stack, geo_box
def write_img(array, filename, driver='GTiff', geobox=None, nodata=None,
tags=None, options=None, cogtif=False, levels=None,
resampling=Resampling.nearest):
"""
Writes a 2D/3D image to disk using rasterio.
:param array:
A 2D/3D NumPy array.
:param filename:
A string containing the output file name.
:param driver:
A string containing a GDAL compliant image driver. Default is
'GTiff'.
:param geobox:
An instance of a GriddedGeoBox object.
:param nodata:
A value representing the no data value for the array.
:param tags:
A dictionary of dataset-level metadata.
:param options:
A dictionary containing other dataset creation options.
See creation options for the respective GDAL formats.
:param cogtif:
If set to True, override the `driver` keyword with `GTiff`
and create a Cloud Optimised GeoTiff. Default is False.
See:
https://trac.osgeo.org/gdal/wiki/CloudOptimizedGeoTIFF
:param levels:
If cogtif is set to True, build overviews/pyramids
according to levels. Default levels are [2, 4, 8, 16, 32].
:param resampling:
If cogtif is set to True, build overviews/pyramids using
a resampling method from `rasterio.enums.Resampling`.
Default is `Resampling.nearest`.
:notes:
If array is an instance of a `h5py.Dataset`, then the output
file will include blocksizes based on the `h5py.Dataset's`
chunks. To override the blocksizes, specify them using the
`options` keyword. Eg {'blockxsize': 512, 'blockysize': 512}.
If `cogtif` is set to True, the default blocksizes will be
256x256. To override this behaviour, specify them using the
`options` keyword. Eg {'blockxsize': 512, 'blockysize': 512}.
"""
# Get the datatype of the array
dtype = array.dtype.name
# Check for excluded datatypes
excluded_dtypes = ['int64', 'int8', 'uint64']
if dtype in excluded_dtypes:
msg = "Datatype not supported: {dt}".format(dt=dtype)
raise TypeError(msg)
# convert any bools to uin8
if dtype == 'bool':
array = np.uint8(array)
dtype = 'uint8'
ndims = array.ndim
dims = array.shape
# Get the (z, y, x) dimensions (assuming BSQ interleave)
if ndims == 2:
samples = dims[1]
lines = dims[0]
bands = 1
elif ndims == 3:
samples = dims[2]
lines = dims[1]
bands = dims[0]
else:
logging.error('Input array is not of 2 or 3 dimensions!!!')
err = 'Array dimensions: {dims}'.format(dims=ndims)
raise IndexError(err)
# If we have a geobox, then retrieve the geotransform and projection
if geobox is not None:
transform = geobox.transform
projection = geobox.crs.ExportToWkt()
else:
transform = None
projection = None
# override the driver if we are creating a cogtif
if cogtif:
driver = 'GTiff'
# compression predictor choices
predictor = {'int8': 2,
'uint8': 2,
'int16': 2,
'uint16': 2,
'int32': 2,
'uint32': 2,
'int64': 2,
'uint64': 2,
'float32': 3,
'float64': 3}
kwargs = {'count': bands,
'width': samples,
'height': lines,
'crs': projection,
'transform': transform,
'dtype': dtype,
'driver': driver,
'nodata': nodata,
'predictor': predictor[dtype]}
if isinstance(array, h5py.Dataset):
# TODO: if array is 3D get x & y chunks
if array.chunks[1] == array.shape[1]:
# GDAL doesn't like tiled or blocksize options to be set
# the same length as the columns (probably true for rows as well)
array = array[:]
else:
y_tile, x_tile = array.chunks
tiles = generate_tiles(samples, lines, x_tile, y_tile)
# add blocksizes to the creation keywords
kwargs['tiled'] = 'yes'
kwargs['blockxsize'] = x_tile
kwargs['blockysize'] = y_tile
# the user can override any derived blocksizes by supplying `options`
if options is not None:
for key in options:
kwargs[key] = options[key]
with tempfile.TemporaryDirectory() as tmpdir:
out_fname = pjoin(tmpdir, basename(filename)) if cogtif else filename
with rasterio.open(out_fname, 'w', **kwargs) as outds:
if bands == 1:
if isinstance(array, h5py.Dataset):
for tile in tiles:
idx = (slice(tile[0][0], tile[0][1]),
slice(tile[1][0], tile[1][1]))
outds.write(array[idx], 1, window=tile)
else:
outds.write(array, 1)
else:
if isinstance(array, h5py.Dataset):
for tile in tiles:
idx = (slice(tile[0][0], tile[0][1]),
slice(tile[1][0], tile[1][1]))
subs = array[:, idx[0], idx[1]]
for i in range(bands):
outds.write(subs[i], i + 1, window=tile)
else:
for i in range(bands):
outds.write(array[i], i + 1)
if tags is not None:
outds.update_tags(**tags)
# overviews/pyramids
if cogtif:
if levels is None:
levels = [2, 4, 8, 16, 32]
outds.build_overviews(levels, resampling)
if cogtif:
cmd = ['gdal_translate',
'-co',
'TILED=YES',
'-co',
'COPY_SRC_OVERVIEWS=YES',
'-co',
'{}={}'.format('PREDICTOR', predictor[dtype])]
for key, value in options.items():
cmd.extend(['-co', '{}={}'.format(key, value)])
cmd.extend([out_fname, filename])
subprocess.check_call(cmd, cwd=dirname(filename))
def read_subset(fname, ul_xy, ur_xy, lr_xy, ll_xy, bands=1):
"""
Return a 2D or 3D NumPy array subsetted to the given bounding
extents.
:param fname:
A string containing the full file pathname to an image on
disk.
:param ul_xy:
A tuple containing the Upper Left (x,y) co-ordinate pair
in real world (map) co-ordinates. Co-ordinate pairs can be
(longitude, latitude) or (eastings, northings), but they must
be of the same reference as the image of interest.
:param ur_xy:
A tuple containing the Upper Right (x,y) co-ordinate pair
in real world (map) co-ordinates. Co-ordinate pairs can be
(longitude, latitude) or (eastings, northings), but they must
be of the same reference as the image of interest.
:param lr_xy:
A tuple containing the Lower Right (x,y) co-ordinate pair
in real world (map) co-ordinates. Co-ordinate pairs can be
(longitude, latitude) or (eastings, northings), but they must
be of the same reference as the image of interest.
:param ll_xy:
A tuple containing the Lower Left (x,y) co-ordinate pair
in real world (map) co-ordinates. Co-ordinate pairs can be
(longitude, latitude) or (eastings, northings), but they must
be of the same reference as the image of interest.
:param bands:
Can be an integer of list of integers representing the band(s)
to be read from disk. If bands is a list, then the returned
subset will be 3D, otherwise the subset will be strictly 2D.
:return:
A tuple of 3 elements:
* 1. 2D or 3D NumPy array containing the image subset.
* 2. A list of length 6 containing the GDAL geotransform.
* 3. A WKT formatted string representing the co-ordinate
reference system (projection).
:additional notes:
The ending array co-ordinates are increased by +1,
i.e. xend = 270 + 1
to account for Python's [inclusive, exclusive) index notation.
"""
if isinstance(fname, h5py.Dataset):
geobox = GriddedGeoBox.from_dataset(fname)
prj = fname.attrs['crs_wkt']
else:
# Open the file
with rasterio.open(fname) as src:
# Get the inverse transform of the affine co-ordinate reference
geobox = GriddedGeoBox.from_dataset(src)
prj = src.crs.wkt # rasterio returns a unicode
inv = ~geobox.transform
rows, cols = geobox.shape
# Convert each map co-ordinate to image/array co-ordinates
img_ul_x, img_ul_y = [int(v) for v in inv * ul_xy]
img_ur_x, img_ur_y = [int(v) for v in inv * ur_xy]
img_lr_x, img_lr_y = [int(v) for v in inv * lr_xy]
img_ll_x, img_ll_y = [int(v) for v in inv * ll_xy]
# Calculate the min and max array extents
# The ending array extents have +1 to account for Python's
# [inclusive, exclusive) index notation.
xstart = min(img_ul_x, img_ll_x)
ystart = min(img_ul_y, img_ur_y)
xend = max(img_ur_x, img_lr_x) + 1
yend = max(img_ll_y, img_lr_y) + 1
# Check for out of bounds
if (((xstart < 0) or (ystart < 0)) or
((xend -1 > cols) or (yend -1 > rows))):
msg = ("Error! Attempt to read a subset that is outside of the"
"image domain. Index: ({ys}, {ye}), ({xs}, {xe}))")
msg = msg.format(ys=ystart, ye=yend, xs=xstart, xe=xend)
raise IndexError(msg)
if isinstance(fname, h5py.Dataset):
subs = fname[ystart:yend, xstart:xend]
else:
with rasterio.open(fname) as src:
subs = src.read(bands, window=((ystart, yend), (xstart, xend)))
# Get the new UL co-ordinates of the array
ul_x, ul_y = geobox.transform * (xstart, ystart)
geobox_subs = GriddedGeoBox(shape=subs.shape, origin=(ul_x, ul_y),
pixelsize=geobox.pixelsize, crs=prj)
return (subs, geobox_subs)
def reproject_file_to_array(src_filename, src_band=1, dst_geobox=None,
resampling=Resampling.nearest):
"""
Given an image on file, reproject to the desired coordinate
reference system.
:param src_filename:
A string containing the full file path name to the source
image on disk.
:param src_band:
An integer representing the band number to be reprojected.
Default is 1, the 1st band.
:param dst_geobox:
An instance of a GriddedGeoBox object containing the
destination parameters such as origin, affine, projection,
and array dimensions.
:param resampling:
An integer representing the resampling method to be used.
check rasterio.warp.RESMPLING for more details.
Default is 0, nearest neighbour resampling.
:return:
A NumPy array containing the reprojected result.
"""
if not isinstance(dst_geobox, GriddedGeoBox):
msg = 'dst_geobox must be an instance of a GriddedGeoBox! Type: {}'
msg = msg.format(type(dst_geobox))
raise TypeError(msg)
with rasterio.open(src_filename) as src:
# Define a rasterio band
rio_band = rasterio.band(src, src_band)
# Define the output NumPy array
dst_arr = np.zeros(dst_geobox.shape, dtype=src.dtypes[0])
# Get the rasterio proj4 styled dict
prj = CRS.from_string(dst_geobox.crs.ExportToProj4())
reproject(rio_band, dst_arr, dst_transform=dst_geobox.transform,
dst_crs=prj, resampling=resampling)
return dst_arr
def reproject_img_to_img(src_img, src_geobox, dst_geobox,
resampling=Resampling.nearest):
"""
Reprojects an image/array to the desired co-ordinate reference system.
:param src_img:
A NumPy array containing the source image.
:param src_geobox:
An instance of a GriddedGeoBox object containing the
source parameters such as origin, affine, projection.
:param dst_geobox:
An instance of a GriddedGeoBox object containing the
destination parameters such as origin, affine, projection,
and array dimensions.
:param resampling:
An integer representing the resampling method to be used.
check rasterio.warp.RESMPLING for more details.
Default is 0, nearest neighbour resampling.
:return:
A NumPy array containing the reprojected result.
"""
if not isinstance(dst_geobox, GriddedGeoBox):
msg = 'dst_geobox must be an instance of a GriddedGeoBox! Type: {}'
msg = msg.format(type(dst_geobox))
raise TypeError(msg)
if not isinstance(src_geobox, GriddedGeoBox):
msg = 'src_geobox must be an instance of a GriddedGeoBox! Type: {}'
msg = msg.format(type(src_geobox))
raise TypeError(msg)
# Get the source and destination projections in Proj4 styled dicts
src_prj = CRS.from_string(src_geobox.crs.ExportToProj4())
dst_prj = CRS.from_string(dst_geobox.crs.ExportToProj4())
# Get the source and destination transforms
src_trans = src_geobox.transform
dst_trans = dst_geobox.transform
# Define the output NumPy array
dst_arr = np.zeros(dst_geobox.shape, dtype=src_img.dtype)
reproject(src_img, dst_arr, src_transform=src_trans,
src_crs=src_prj, dst_transform=dst_trans, dst_crs=dst_prj,
resampling=resampling)
return dst_arr
def as_array(array, dtype, transpose=False):
"""
Given an array and dtype, array will be converted to dtype if
and only if array.dtype != dtype. If transpose is set to True
then array will be transposed before returning.
:param array:
A NumPy array.
:param dtype:
The type to return the array as.
:type dtype:
A NumPy data type (e.g. ``numpy.float32``).
:param transpose:
If set then array will be transposed before returning.
Useful for passing arrays into Fortran routiines. Default is
False.
:type transpose:
Bool.
:return:
A :py:class:`numpy.ndarry` of type ``dtype`` with the same
dimensions as array.
"""
if array.dtype != dtype:
if transpose:
return array.astype(dtype).transpose()
return array.astype(dtype)
if transpose:
return array.transpose()
return array
| [
"numpy.uint8",
"tempfile.TemporaryDirectory",
"rasterio.open",
"rasterio.band",
"rasterio.warp.reproject",
"wagl.geobox.GriddedGeoBox",
"os.path.dirname",
"numpy.zeros",
"wagl.geobox.GriddedGeoBox.from_dataset",
"numpy.empty",
"wagl.tiling.generate_tiles",
"os.path.basename",
"logging.error"... | [((2564, 2594), 'numpy.empty', 'np.empty', (['stack_shape', 'a.dtype'], {}), '(stack_shape, a.dtype)\n', (2572, 2594), True, 'import numpy as np\n'), ((13165, 13259), 'wagl.geobox.GriddedGeoBox', 'GriddedGeoBox', ([], {'shape': 'subs.shape', 'origin': '(ul_x, ul_y)', 'pixelsize': 'geobox.pixelsize', 'crs': 'prj'}), '(shape=subs.shape, origin=(ul_x, ul_y), pixelsize=geobox.\n pixelsize, crs=prj)\n', (13178, 13259), False, 'from wagl.geobox import GriddedGeoBox\n'), ((16580, 16627), 'numpy.zeros', 'np.zeros', (['dst_geobox.shape'], {'dtype': 'src_img.dtype'}), '(dst_geobox.shape, dtype=src_img.dtype)\n', (16588, 16627), True, 'import numpy as np\n'), ((16633, 16771), 'rasterio.warp.reproject', 'reproject', (['src_img', 'dst_arr'], {'src_transform': 'src_trans', 'src_crs': 'src_prj', 'dst_transform': 'dst_trans', 'dst_crs': 'dst_prj', 'resampling': 'resampling'}), '(src_img, dst_arr, src_transform=src_trans, src_crs=src_prj,\n dst_transform=dst_trans, dst_crs=dst_prj, resampling=resampling)\n', (16642, 16771), False, 'from rasterio.warp import reproject\n'), ((619, 642), 'rasterio.open', 'rasterio.open', (['filename'], {}), '(filename)\n', (632, 642), False, 'import rasterio\n'), ((5218, 5233), 'numpy.uint8', 'np.uint8', (['array'], {}), '(array)\n', (5226, 5233), True, 'import numpy as np\n'), ((7483, 7512), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (7510, 7512), False, 'import tempfile\n'), ((11481, 11514), 'wagl.geobox.GriddedGeoBox.from_dataset', 'GriddedGeoBox.from_dataset', (['fname'], {}), '(fname)\n', (11507, 11514), False, 'from wagl.geobox import GriddedGeoBox\n'), ((14454, 14481), 'rasterio.open', 'rasterio.open', (['src_filename'], {}), '(src_filename)\n', (14467, 14481), False, 'import rasterio\n'), ((14542, 14570), 'rasterio.band', 'rasterio.band', (['src', 'src_band'], {}), '(src, src_band)\n', (14555, 14570), False, 'import rasterio\n'), ((14630, 14677), 'numpy.zeros', 'np.zeros', (['dst_geobox.shape'], {'dtype': 'src.dtypes[0]'}), '(dst_geobox.shape, dtype=src.dtypes[0])\n', (14638, 14677), True, 'import numpy as np\n'), ((14795, 14900), 'rasterio.warp.reproject', 'reproject', (['rio_band', 'dst_arr'], {'dst_transform': 'dst_geobox.transform', 'dst_crs': 'prj', 'resampling': 'resampling'}), '(rio_band, dst_arr, dst_transform=dst_geobox.transform, dst_crs=\n prj, resampling=resampling)\n', (14804, 14900), False, 'from rasterio.warp import reproject\n'), ((5567, 5626), 'logging.error', 'logging.error', (['"""Input array is not of 2 or 3 dimensions!!!"""'], {}), "('Input array is not of 2 or 3 dimensions!!!')\n", (5580, 5626), False, 'import logging\n'), ((7081, 7127), 'wagl.tiling.generate_tiles', 'generate_tiles', (['samples', 'lines', 'x_tile', 'y_tile'], {}), '(samples, lines, x_tile, y_tile)\n', (7095, 7127), False, 'from wagl.tiling import generate_tiles\n'), ((7616, 7655), 'rasterio.open', 'rasterio.open', (['out_fname', '"""w"""'], {}), "(out_fname, 'w', **kwargs)\n", (7629, 7655), False, 'import rasterio\n'), ((11599, 11619), 'rasterio.open', 'rasterio.open', (['fname'], {}), '(fname)\n', (11612, 11619), False, 'import rasterio\n'), ((11726, 11757), 'wagl.geobox.GriddedGeoBox.from_dataset', 'GriddedGeoBox.from_dataset', (['src'], {}), '(src)\n', (11752, 11757), False, 'from wagl.geobox import GriddedGeoBox\n'), ((12940, 12960), 'rasterio.open', 'rasterio.open', (['fname'], {}), '(fname)\n', (12953, 12960), False, 'import rasterio\n'), ((7558, 7576), 'os.path.basename', 'basename', (['filename'], {}), '(filename)\n', (7566, 7576), False, 'from os.path import join as pjoin, basename, dirname\n'), ((9312, 9329), 'os.path.dirname', 'dirname', (['filename'], {}), '(filename)\n', (9319, 9329), False, 'from os.path import join as pjoin, basename, dirname\n')] |
from fluxrgnn import dataloader, utils
from fluxrgnn.models import *
import torch
from torch.utils.data import random_split, Subset
from torch.optim import lr_scheduler
from torch_geometric.data import DataLoader, DataListLoader
from torch_geometric.utils import to_dense_adj
from omegaconf import DictConfig, OmegaConf
import pickle
import os.path as osp
import os
import numpy as np
import ruamel.yaml
import pandas as pd
# map model name to implementation
MODEL_MAPPING = {'LocalMLP': LocalMLP,
'LocalLSTM': LocalLSTM,
'FluxRGNN': FluxRGNN}
def run(cfg: DictConfig, output_dir: str, log):
"""
Run training and/or testing for neural network model.
:param cfg: DictConfig specifying model, data and training/testing details
:param output_dir: directory to which all outputs are written to
:param log: log file
"""
if 'search' in cfg.task.name:
cross_validation(cfg, output_dir, log)
if 'train' in cfg.task.name:
training(cfg, output_dir, log)
if 'eval' in cfg.task.name:
if hasattr(cfg, 'importance_sampling'):
cfg.importance_sampling = False
cfg['fixed_t0'] = True
testing(cfg, output_dir, log, ext='_fixedT0')
cfg['fixed_t0'] = False
testing(cfg, output_dir, log)
if cfg.get('test_train_data', False):
# evaluate performance on training data
training_years = set(cfg.datasource.years) - set([cfg.datasource.test_year])
cfg.model.test_horizon = cfg.model.horizon
for y in training_years:
cfg.datasource.test_year = y
testing(cfg, output_dir, log, ext=f'_training_year_{y}')
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def training(cfg: DictConfig, output_dir: str, log):
"""
Run training of a neural network model.
:param cfg: DictConfig specifying model, data and training details
:param output_dir: directory to which the final model and logs are written to
:param log: log file
"""
assert cfg.model.name in MODEL_MAPPING
if cfg.debugging: torch.autograd.set_detect_anomaly(True)
Model = MODEL_MAPPING[cfg.model.name]
device = 'cuda' if (cfg.device.cuda and torch.cuda.is_available()) else 'cpu'
seed = cfg.seed + cfg.get('job_id', 0)
data = dataloader.load_dataset(cfg, output_dir, training=True)[0]
data = torch.utils.data.ConcatDataset(data)
n_data = len(data)
print('done with setup', file=log)
log.flush()
# split data into training and validation set
n_val = max(1, int(cfg.datasource.val_train_split * n_data))
n_train = n_data - n_val
if cfg.verbose:
print('------------------------------------------------------', file=log)
print('-------------------- data sets -----------------------', file=log)
print(f'total number of sequences = {n_data}', file=log)
print(f'number of training sequences = {n_train}', file=log)
print(f'number of validation sequences = {n_val}', file=log)
train_data, val_data = random_split(data, (n_train, n_val), generator=torch.Generator().manual_seed(cfg.seed))
train_loader = DataLoader(train_data, batch_size=cfg.model.batch_size, shuffle=True)
val_loader = DataLoader(val_data, batch_size=1, shuffle=True)
if cfg.model.edge_type == 'voronoi':
n_edge_attr = 5
else:
n_edge_attr = 4
if cfg.model.get('root_transformed_loss', False):
loss_func = utils.MSE_root_transformed
elif cfg.model.get('weighted_loss', False):
loss_func = utils.MSE_weighted
else:
loss_func = utils.MSE
if cfg.verbose:
print('------------------ model settings --------------------', file=log)
print(cfg.model, file=log)
print('------------------------------------------------------', file=log)
log.flush()
best_val_loss = np.inf
training_curve = np.ones((1, cfg.model.epochs)) * np.nan
val_curve = np.ones((1, cfg.model.epochs)) * np.nan
model = Model(n_env=len(cfg.datasource.env_vars), coord_dim=2, n_edge_attr=n_edge_attr,
seed=seed, **cfg.model)
n_params = count_parameters(model)
if cfg.verbose:
print('initialized model', file=log)
print(f'number of model parameters: {n_params}', file=log)
print(f'environmental variables: {cfg.datasource.env_vars}')
log.flush()
ext = ''
if cfg.get('use_pretrained_model', False):
states_path = osp.join(output_dir, 'model.pkl')
if osp.isfile(states_path):
model.load_state_dict(torch.load(states_path))
if cfg.verbose: print('successfully loaded pretrained model')
ext = '_pretrained'
model = model.to(device)
params = model.parameters()
optimizer = torch.optim.Adam(params, lr=cfg.model.lr)
scheduler = lr_scheduler.StepLR(optimizer, step_size=cfg.model.lr_decay, gamma=cfg.model.get('lr_gamma', 0.1))
for p in model.parameters():
p.register_hook(lambda grad: torch.clamp(grad, -1.0, 1.0))
tf = cfg.model.get('teacher_forcing_init', 1.0)
all_tf = np.zeros(cfg.model.epochs)
all_lr = np.zeros(cfg.model.epochs)
avg_loss = np.inf
saved = False
for epoch in range(cfg.model.epochs):
all_tf[epoch] = tf
all_lr[epoch] = optimizer.param_groups[0]["lr"]
loss = train(model, train_loader, optimizer, loss_func, device, teacher_forcing=tf, **cfg.model)
training_curve[0, epoch] = loss / n_train
val_loss = test(model, val_loader, loss_func, device, **cfg.model).cpu()
val_loss = val_loss[torch.isfinite(val_loss)].mean()
val_curve[0, epoch] = val_loss
if cfg.verbose:
print(f'epoch {epoch + 1}: loss = {training_curve[0, epoch]}', file=log)
print(f'epoch {epoch + 1}: val loss = {val_loss}', file=log)
log.flush()
if val_loss <= best_val_loss:
if cfg.verbose: print('best model so far; save to disk ...')
torch.save(model.state_dict(), osp.join(output_dir, f'best_model{ext}.pkl'))
best_val_loss = val_loss
if cfg.model.early_stopping and (epoch + 1) % cfg.model.avg_window == 0:
# every X epochs, check for convergence of validation loss
if epoch == 0:
l = val_curve[0, 0]
else:
l = val_curve[0, (epoch - (cfg.model.avg_window - 1)) : (epoch + 1)].mean()
if (avg_loss - l) > cfg.model.stopping_criterion:
# loss decayed significantly, continue training
avg_loss = l
torch.save(model.state_dict(), osp.join(output_dir, f'model{ext}.pkl'))
saved = True
else:
# loss converged sufficiently, stop training
break
tf = tf * cfg.model.get('teacher_forcing_gamma', 0)
scheduler.step()
if not cfg.model.early_stopping or not saved:
torch.save(model.state_dict(), osp.join(output_dir, f'model{ext}.pkl'))
if cfg.verbose:
print(f'validation loss = {best_val_loss}', file=log)
log.flush()
# save training and validation curves
np.save(osp.join(output_dir, f'training_curves{ext}.npy'), training_curve)
np.save(osp.join(output_dir, f'validation_curves{ext}.npy'), val_curve)
np.save(osp.join(output_dir, f'learning_rates{ext}.npy'), all_lr)
np.save(osp.join(output_dir, f'teacher_forcing{ext}.npy'), all_tf)
# plotting
utils.plot_training_curves(training_curve, val_curve, output_dir, log=True)
utils.plot_training_curves(training_curve, val_curve, output_dir, log=False)
with open(osp.join(output_dir, f'config.yaml'), 'w') as f:
OmegaConf.save(config=cfg, f=f)
log.flush()
def cross_validation(cfg: DictConfig, output_dir: str, log):
"""
Run cross-validation for neural network model.
The training data is split into N subsets, and N models are trained where for each model a different subset
is left for validation.
:param cfg: DictConfig specifying the model, data and training details, including the number of folds to use
:param output_dir: directory to which all N models and logs are written to
:param log: log file
"""
assert cfg.model.name in MODEL_MAPPING
if cfg.debugging: torch.autograd.set_detect_anomaly(True)
Model = MODEL_MAPPING[cfg.model.name]
device = 'cuda' if (cfg.device.cuda and torch.cuda.is_available()) else 'cpu'
epochs = cfg.model.epochs
n_folds = cfg.task.n_folds
seed = cfg.seed + cfg.get('job_id', 0)
data = dataloader.load_dataset(cfg, output_dir, training=True)[0]
data = torch.utils.data.ConcatDataset(data)
n_data = len(data)
if cfg.model.edge_type == 'voronoi':
n_edge_attr = 5
else:
n_edge_attr = 4
if cfg.model.get('root_transformed_loss', False):
loss_func = utils.MSE_root_transformed
elif cfg.model.get('weighted_loss', False):
loss_func = utils.MSE_weighted
else:
loss_func = utils.MSE
if cfg.verbose:
print('------------------ model settings --------------------')
print(cfg.model)
print(f'environmental variables: {cfg.datasource.env_vars}')
cv_folds = np.array_split(np.arange(n_data), n_folds)
if cfg.verbose: print(f'--- run cross-validation with {n_folds} folds ---')
training_curves = np.ones((n_folds, epochs)) * np.nan
val_curves = np.ones((n_folds, epochs)) * np.nan
best_val_losses = np.ones(n_folds) * np.nan
best_epochs = np.zeros(n_folds)
for f in range(n_folds):
if cfg.verbose: print(f'------------------- fold = {f} ----------------------')
subdir = osp.join(output_dir, f'cv_fold_{f}')
os.makedirs(subdir, exist_ok=True)
# split into training and validation set
val_data = Subset(data, cv_folds[f].tolist())
train_idx = np.concatenate([cv_folds[i] for i in range(n_folds) if i!=f]).tolist()
n_train = len(train_idx)
train_data = Subset(data, train_idx) # everything else
train_loader = DataLoader(train_data, batch_size=cfg.model.batch_size, shuffle=True)
val_loader = DataLoader(val_data, batch_size=1, shuffle=True)
model = Model(n_env=len(cfg.datasource.env_vars), coord_dim=2, n_edge_attr=n_edge_attr,
seed=seed, **cfg.model)
states_path = cfg.model.get('load_states_from', '')
if osp.isfile(states_path):
model.load_state_dict(torch.load(states_path))
model = model.to(device)
params = model.parameters()
optimizer = torch.optim.Adam(params, lr=cfg.model.lr)
scheduler = lr_scheduler.StepLR(optimizer, step_size=cfg.model.lr_decay, gamma=cfg.model.get('lr_gamma', 0.1))
best_val_loss = np.inf
avg_loss = np.inf
for p in model.parameters():
p.register_hook(lambda grad: torch.clamp(grad, -1.0, 1.0))
tf = cfg.model.get('teacher_forcing_init', 1.0)
all_tf = np.zeros(epochs)
all_lr = np.zeros(epochs)
for epoch in range(epochs):
all_tf[epoch] = tf
all_lr[epoch] = optimizer.param_groups[0]["lr"]
loss = train(model, train_loader, optimizer, loss_func, device, teacher_forcing=tf, **cfg.model)
training_curves[f, epoch] = loss / n_train
val_loss = test(model, val_loader, loss_func, device, **cfg.model).cpu()
val_loss = val_loss[torch.isfinite(val_loss)].mean()
val_curves[f, epoch] = val_loss
if cfg.verbose:
print(f'epoch {epoch + 1}: loss = {training_curves[f, epoch]}')
print(f'epoch {epoch + 1}: val loss = {val_loss}')
if val_loss <= best_val_loss:
if cfg.verbose: print('best model so far; save to disk ...')
torch.save(model.state_dict(), osp.join(subdir, f'best_model.pkl'))
best_val_loss = val_loss
best_epochs[f] = epoch
if cfg.model.early_stopping and (epoch % cfg.model.avg_window) == 0:
# every X epochs, check for convergence of validation loss
if epoch == 0:
l = val_curves[f, 0]
else:
l = val_curves[f, (epoch - cfg.model.avg_window): epoch].mean()
if (avg_loss - l) > cfg.model.stopping_criterion:
# loss decayed significantly, continue training
avg_loss = l
torch.save(model.state_dict(), osp.join(subdir, 'model.pkl'))
else:
# loss converged sufficiently, stop training
val_curves[f, epoch:] = l
break
tf = tf * cfg.model.get('teacher_forcing_gamma', 0)
scheduler.step()
if not cfg.model.early_stopping:
torch.save(model.state_dict(), osp.join(subdir, 'model.pkl'))
if cfg.verbose:
print(f'fold {f}: final validation loss = {val_curves[f, -1]}', file=log)
best_val_losses[f] = best_val_loss
log.flush()
# update training and validation curves
np.save(osp.join(subdir, 'training_curves.npy'), training_curves)
np.save(osp.join(subdir, 'validation_curves.npy'), val_curves)
np.save(osp.join(subdir, 'learning_rates.npy'), all_lr)
np.save(osp.join(subdir, 'teacher_forcing.npy'), all_tf)
# plotting
utils.plot_training_curves(training_curves, val_curves, subdir, log=True)
utils.plot_training_curves(training_curves, val_curves, subdir, log=False)
if cfg.verbose:
print(f'average validation loss = {val_curves[:, -1].mean()}', file=log)
summary = pd.DataFrame({'fold': range(n_folds),
'final_val_loss': val_curves[:, -cfg.model.avg_window:].mean(1),
'best_val_loss': best_val_losses,
'best_epoch': best_epochs})
summary.to_csv(osp.join(output_dir, 'summary.csv'))
with open(osp.join(output_dir, f'config.yaml'), 'w') as f:
OmegaConf.save(config=cfg, f=f)
log.flush()
def testing(cfg: DictConfig, output_dir: str, log, ext=''):
"""
Test neural network model on unseen test data.
:param cfg: DictConfig specifying model, data and testing details
:param output_dir: directory to which test results are written to
"""
assert cfg.model.name in MODEL_MAPPING
Model = MODEL_MAPPING[cfg.model.name]
device = 'cuda' if (cfg.device.cuda and torch.cuda.is_available()) else 'cpu'
if cfg.model.edge_type == 'voronoi':
n_edge_attr = 5
else:
n_edge_attr = 4
if cfg.get('use_pretrained_model', False):
model_ext = '_pretrained'
else:
model_ext = ''
ext = f'{ext}{model_ext}'
model_dir = cfg.get('model_dir', output_dir)
model_cfg = utils.load_model_cfg(model_dir)
cfg.datasource.bird_scale = float(model_cfg['datasource']['bird_scale'])
# load test data
test_data, input_col, context, seq_len = dataloader.load_dataset(cfg, output_dir, training=False)
test_data = test_data[0]
test_loader = DataLoader(test_data, batch_size=1, shuffle=False)
# load additional data
time = test_data.info['timepoints']
radars = test_data.info['radars']
areas = test_data.info['areas']
to_km2 = np.ones(len(radars)) if input_col == 'birds_km2' else test_data.info['areas']
radar_index = {idx: name for idx, name in enumerate(radars)}
# load models and predict
results = dict(gt_km2=[], prediction_km2=[], night=[], radar=[], area=[], seqID=[],
tidx=[], datetime=[], horizon=[], missing=[], trial=[])
if cfg.model.name in ['LocalLSTM', 'FluxRGNN']:
results['source_km2'] = []
results['sink_km2'] = []
if cfg.model.name == 'FluxRGNN':
results['influx_km2'] = []
results['outflux_km2'] = []
model = Model(n_env=len(cfg.datasource.env_vars), coord_dim=2, n_edge_attr=n_edge_attr,
seed=model_cfg['seed'], **model_cfg['model'])
model.load_state_dict(torch.load(osp.join(model_dir, f'model{model_ext}.pkl')))
# adjust model settings for testing
model.horizon = cfg.model.test_horizon
if cfg.model.get('fixed_boundary', 0):
model.fixed_boundary = True
model.to(device)
model.eval()
edge_fluxes = {}
radar_fluxes = {}
for nidx, data in enumerate(test_loader):
# load ground truth and predicted densities
data = data.to(device)
y_hat = model(data).cpu().detach() * cfg.datasource.bird_scale
y = data.y.cpu() * cfg.datasource.bird_scale
if cfg.root_transform > 0:
# transform back
y = torch.pow(y, cfg.root_transform)
y_hat = torch.pow(y_hat, cfg.root_transform)
_tidx = data.tidx.cpu()
local_night = data.local_night.cpu()
missing = data.missing.cpu()
if 'Flux' in cfg.model.name:
# fluxes along edges
adj = to_dense_adj(data.edge_index, edge_attr=model.edge_fluxes)
edge_fluxes[nidx] = adj.view(
data.num_nodes, data.num_nodes, -1).detach().cpu() * cfg.datasource.bird_scale
# absolute fluxes across Voronoi faces
if input_col == 'birds_km2':
edge_fluxes[nidx] *= areas.max()
# net fluxes per node
influxes = edge_fluxes[nidx].sum(1)
outfluxes = edge_fluxes[nidx].permute(1, 0, 2).sum(1)
radar_fluxes[nidx] = to_dense_adj(data.edge_index, edge_attr=data.fluxes).view(
data.num_nodes, data.num_nodes, -1).detach().cpu()
if 'LSTM' in cfg.model.name or 'RGNN' in cfg.model.name:
node_source = model.node_source.detach().cpu() * cfg.datasource.bird_scale
node_sink = model.node_sink.detach().cpu() * cfg.datasource.bird_scale
# fill prediction columns with nans for context timesteps
fill_context = torch.ones(context) * float('nan')
for ridx, name in radar_index.items():
results['gt_km2'].append(y[ridx, :] / to_km2[ridx])
results['prediction_km2'].append(torch.cat([fill_context, y_hat[ridx, :] / to_km2[ridx]]))
results['night'].append(local_night[ridx, :])
results['radar'].append([name] * y.shape[1])
results['area'].append([areas[ridx]] * y.shape[1])
results['seqID'].append([nidx] * y.shape[1])
results['tidx'].append(_tidx)
results['datetime'].append(time[_tidx])
results['trial'].append([cfg.get('job_id', 0)] * y.shape[1])
results['horizon'].append(np.arange(-(cfg.model.context-1), cfg.model.test_horizon+1))
results['missing'].append(missing[ridx, :])
if 'LSTM' in cfg.model.name or 'RGNN' in cfg.model.name:
results['source_km2'].append(torch.cat([fill_context, node_source[ridx].view(-1) / to_km2[ridx]]))
results['sink_km2'].append(torch.cat([fill_context, node_sink[ridx].view(-1) / to_km2[ridx]]))
if 'Flux' in cfg.model.name:
results['influx_km2'].append(torch.cat([fill_context, influxes[ridx].view(-1)]) / to_km2[ridx])
results['outflux_km2'].append(torch.cat([fill_context, outfluxes[ridx].view(-1)]) / to_km2[ridx])
utils.finalize_results(results, output_dir, ext)
with open(osp.join(output_dir, f'radar_index.pickle'), 'wb') as f:
pickle.dump(radar_index, f, pickle.HIGHEST_PROTOCOL)
if 'Flux' in cfg.model.name:
with open(osp.join(output_dir, f'model_fluxes{ext}.pickle'), 'wb') as f:
pickle.dump(edge_fluxes, f, pickle.HIGHEST_PROTOCOL)
if cfg.verbose:
print(f'successfully saved results to {osp.join(output_dir, f"results{ext}.csv")}', file=log)
log.flush()
| [
"torch.utils.data.ConcatDataset",
"torch.pow",
"torch.cuda.is_available",
"fluxrgnn.dataloader.load_dataset",
"numpy.arange",
"fluxrgnn.utils.finalize_results",
"fluxrgnn.utils.plot_training_curves",
"torch.autograd.set_detect_anomaly",
"numpy.ones",
"torch_geometric.utils.to_dense_adj",
"os.pat... | [((2464, 2500), 'torch.utils.data.ConcatDataset', 'torch.utils.data.ConcatDataset', (['data'], {}), '(data)\n', (2494, 2500), False, 'import torch\n'), ((3248, 3317), 'torch_geometric.data.DataLoader', 'DataLoader', (['train_data'], {'batch_size': 'cfg.model.batch_size', 'shuffle': '(True)'}), '(train_data, batch_size=cfg.model.batch_size, shuffle=True)\n', (3258, 3317), False, 'from torch_geometric.data import DataLoader, DataListLoader\n'), ((3335, 3383), 'torch_geometric.data.DataLoader', 'DataLoader', (['val_data'], {'batch_size': '(1)', 'shuffle': '(True)'}), '(val_data, batch_size=1, shuffle=True)\n', (3345, 3383), False, 'from torch_geometric.data import DataLoader, DataListLoader\n'), ((4885, 4926), 'torch.optim.Adam', 'torch.optim.Adam', (['params'], {'lr': 'cfg.model.lr'}), '(params, lr=cfg.model.lr)\n', (4901, 4926), False, 'import torch\n'), ((5210, 5236), 'numpy.zeros', 'np.zeros', (['cfg.model.epochs'], {}), '(cfg.model.epochs)\n', (5218, 5236), True, 'import numpy as np\n'), ((5250, 5276), 'numpy.zeros', 'np.zeros', (['cfg.model.epochs'], {}), '(cfg.model.epochs)\n', (5258, 5276), True, 'import numpy as np\n'), ((7604, 7679), 'fluxrgnn.utils.plot_training_curves', 'utils.plot_training_curves', (['training_curve', 'val_curve', 'output_dir'], {'log': '(True)'}), '(training_curve, val_curve, output_dir, log=True)\n', (7630, 7679), False, 'from fluxrgnn import dataloader, utils\n'), ((7684, 7760), 'fluxrgnn.utils.plot_training_curves', 'utils.plot_training_curves', (['training_curve', 'val_curve', 'output_dir'], {'log': '(False)'}), '(training_curve, val_curve, output_dir, log=False)\n', (7710, 7760), False, 'from fluxrgnn import dataloader, utils\n'), ((8788, 8824), 'torch.utils.data.ConcatDataset', 'torch.utils.data.ConcatDataset', (['data'], {}), '(data)\n', (8818, 8824), False, 'import torch\n'), ((9682, 9699), 'numpy.zeros', 'np.zeros', (['n_folds'], {}), '(n_folds)\n', (9690, 9699), True, 'import numpy as np\n'), ((15089, 15120), 'fluxrgnn.utils.load_model_cfg', 'utils.load_model_cfg', (['model_dir'], {}), '(model_dir)\n', (15109, 15120), False, 'from fluxrgnn import dataloader, utils\n'), ((15265, 15321), 'fluxrgnn.dataloader.load_dataset', 'dataloader.load_dataset', (['cfg', 'output_dir'], {'training': '(False)'}), '(cfg, output_dir, training=False)\n', (15288, 15321), False, 'from fluxrgnn import dataloader, utils\n'), ((15369, 15419), 'torch_geometric.data.DataLoader', 'DataLoader', (['test_data'], {'batch_size': '(1)', 'shuffle': '(False)'}), '(test_data, batch_size=1, shuffle=False)\n', (15379, 15419), False, 'from torch_geometric.data import DataLoader, DataListLoader\n'), ((19622, 19670), 'fluxrgnn.utils.finalize_results', 'utils.finalize_results', (['results', 'output_dir', 'ext'], {}), '(results, output_dir, ext)\n', (19644, 19670), False, 'from fluxrgnn import dataloader, utils\n'), ((2173, 2212), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['(True)'], {}), '(True)\n', (2206, 2212), False, 'import torch\n'), ((2394, 2449), 'fluxrgnn.dataloader.load_dataset', 'dataloader.load_dataset', (['cfg', 'output_dir'], {'training': '(True)'}), '(cfg, output_dir, training=True)\n', (2417, 2449), False, 'from fluxrgnn import dataloader, utils\n'), ((3999, 4029), 'numpy.ones', 'np.ones', (['(1, cfg.model.epochs)'], {}), '((1, cfg.model.epochs))\n', (4006, 4029), True, 'import numpy as np\n'), ((4055, 4085), 'numpy.ones', 'np.ones', (['(1, cfg.model.epochs)'], {}), '((1, cfg.model.epochs))\n', (4062, 4085), True, 'import numpy as np\n'), ((4572, 4605), 'os.path.join', 'osp.join', (['output_dir', '"""model.pkl"""'], {}), "(output_dir, 'model.pkl')\n", (4580, 4605), True, 'import os.path as osp\n'), ((4617, 4640), 'os.path.isfile', 'osp.isfile', (['states_path'], {}), '(states_path)\n', (4627, 4640), True, 'import os.path as osp\n'), ((7300, 7349), 'os.path.join', 'osp.join', (['output_dir', 'f"""training_curves{ext}.npy"""'], {}), "(output_dir, f'training_curves{ext}.npy')\n", (7308, 7349), True, 'import os.path as osp\n'), ((7379, 7430), 'os.path.join', 'osp.join', (['output_dir', 'f"""validation_curves{ext}.npy"""'], {}), "(output_dir, f'validation_curves{ext}.npy')\n", (7387, 7430), True, 'import os.path as osp\n'), ((7455, 7503), 'os.path.join', 'osp.join', (['output_dir', 'f"""learning_rates{ext}.npy"""'], {}), "(output_dir, f'learning_rates{ext}.npy')\n", (7463, 7503), True, 'import os.path as osp\n'), ((7525, 7574), 'os.path.join', 'osp.join', (['output_dir', 'f"""teacher_forcing{ext}.npy"""'], {}), "(output_dir, f'teacher_forcing{ext}.npy')\n", (7533, 7574), True, 'import os.path as osp\n'), ((7833, 7864), 'omegaconf.OmegaConf.save', 'OmegaConf.save', ([], {'config': 'cfg', 'f': 'f'}), '(config=cfg, f=f)\n', (7847, 7864), False, 'from omegaconf import DictConfig, OmegaConf\n'), ((8436, 8475), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['(True)'], {}), '(True)\n', (8469, 8475), False, 'import torch\n'), ((8718, 8773), 'fluxrgnn.dataloader.load_dataset', 'dataloader.load_dataset', (['cfg', 'output_dir'], {'training': '(True)'}), '(cfg, output_dir, training=True)\n', (8741, 8773), False, 'from fluxrgnn import dataloader, utils\n'), ((9395, 9412), 'numpy.arange', 'np.arange', (['n_data'], {}), '(n_data)\n', (9404, 9412), True, 'import numpy as np\n'), ((9527, 9553), 'numpy.ones', 'np.ones', (['(n_folds, epochs)'], {}), '((n_folds, epochs))\n', (9534, 9553), True, 'import numpy as np\n'), ((9580, 9606), 'numpy.ones', 'np.ones', (['(n_folds, epochs)'], {}), '((n_folds, epochs))\n', (9587, 9606), True, 'import numpy as np\n'), ((9638, 9654), 'numpy.ones', 'np.ones', (['n_folds'], {}), '(n_folds)\n', (9645, 9654), True, 'import numpy as np\n'), ((9836, 9872), 'os.path.join', 'osp.join', (['output_dir', 'f"""cv_fold_{f}"""'], {}), "(output_dir, f'cv_fold_{f}')\n", (9844, 9872), True, 'import os.path as osp\n'), ((9881, 9915), 'os.makedirs', 'os.makedirs', (['subdir'], {'exist_ok': '(True)'}), '(subdir, exist_ok=True)\n', (9892, 9915), False, 'import os\n'), ((10165, 10188), 'torch.utils.data.Subset', 'Subset', (['data', 'train_idx'], {}), '(data, train_idx)\n', (10171, 10188), False, 'from torch.utils.data import random_split, Subset\n'), ((10230, 10299), 'torch_geometric.data.DataLoader', 'DataLoader', (['train_data'], {'batch_size': 'cfg.model.batch_size', 'shuffle': '(True)'}), '(train_data, batch_size=cfg.model.batch_size, shuffle=True)\n', (10240, 10299), False, 'from torch_geometric.data import DataLoader, DataListLoader\n'), ((10321, 10369), 'torch_geometric.data.DataLoader', 'DataLoader', (['val_data'], {'batch_size': '(1)', 'shuffle': '(True)'}), '(val_data, batch_size=1, shuffle=True)\n', (10331, 10369), False, 'from torch_geometric.data import DataLoader, DataListLoader\n'), ((10585, 10608), 'os.path.isfile', 'osp.isfile', (['states_path'], {}), '(states_path)\n', (10595, 10608), True, 'import os.path as osp\n'), ((10759, 10800), 'torch.optim.Adam', 'torch.optim.Adam', (['params'], {'lr': 'cfg.model.lr'}), '(params, lr=cfg.model.lr)\n', (10775, 10800), False, 'import torch\n'), ((11160, 11176), 'numpy.zeros', 'np.zeros', (['epochs'], {}), '(epochs)\n', (11168, 11176), True, 'import numpy as np\n'), ((11194, 11210), 'numpy.zeros', 'np.zeros', (['epochs'], {}), '(epochs)\n', (11202, 11210), True, 'import numpy as np\n'), ((13637, 13710), 'fluxrgnn.utils.plot_training_curves', 'utils.plot_training_curves', (['training_curves', 'val_curves', 'subdir'], {'log': '(True)'}), '(training_curves, val_curves, subdir, log=True)\n', (13663, 13710), False, 'from fluxrgnn import dataloader, utils\n'), ((13719, 13793), 'fluxrgnn.utils.plot_training_curves', 'utils.plot_training_curves', (['training_curves', 'val_curves', 'subdir'], {'log': '(False)'}), '(training_curves, val_curves, subdir, log=False)\n', (13745, 13793), False, 'from fluxrgnn import dataloader, utils\n'), ((14179, 14214), 'os.path.join', 'osp.join', (['output_dir', '"""summary.csv"""'], {}), "(output_dir, 'summary.csv')\n", (14187, 14214), True, 'import os.path as osp\n'), ((14289, 14320), 'omegaconf.OmegaConf.save', 'OmegaConf.save', ([], {'config': 'cfg', 'f': 'f'}), '(config=cfg, f=f)\n', (14303, 14320), False, 'from omegaconf import DictConfig, OmegaConf\n'), ((19751, 19803), 'pickle.dump', 'pickle.dump', (['radar_index', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(radar_index, f, pickle.HIGHEST_PROTOCOL)\n', (19762, 19803), False, 'import pickle\n'), ((2301, 2326), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2324, 2326), False, 'import torch\n'), ((7101, 7140), 'os.path.join', 'osp.join', (['output_dir', 'f"""model{ext}.pkl"""'], {}), "(output_dir, f'model{ext}.pkl')\n", (7109, 7140), True, 'import os.path as osp\n'), ((7776, 7812), 'os.path.join', 'osp.join', (['output_dir', 'f"""config.yaml"""'], {}), "(output_dir, f'config.yaml')\n", (7784, 7812), True, 'import os.path as osp\n'), ((8564, 8589), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8587, 8589), False, 'import torch\n'), ((13351, 13390), 'os.path.join', 'osp.join', (['subdir', '"""training_curves.npy"""'], {}), "(subdir, 'training_curves.npy')\n", (13359, 13390), True, 'import os.path as osp\n'), ((13425, 13466), 'os.path.join', 'osp.join', (['subdir', '"""validation_curves.npy"""'], {}), "(subdir, 'validation_curves.npy')\n", (13433, 13466), True, 'import os.path as osp\n'), ((13496, 13534), 'os.path.join', 'osp.join', (['subdir', '"""learning_rates.npy"""'], {}), "(subdir, 'learning_rates.npy')\n", (13504, 13534), True, 'import os.path as osp\n'), ((13560, 13599), 'os.path.join', 'osp.join', (['subdir', '"""teacher_forcing.npy"""'], {}), "(subdir, 'teacher_forcing.npy')\n", (13568, 13599), True, 'import os.path as osp\n'), ((14232, 14268), 'os.path.join', 'osp.join', (['output_dir', 'f"""config.yaml"""'], {}), "(output_dir, f'config.yaml')\n", (14240, 14268), True, 'import os.path as osp\n'), ((14740, 14765), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14763, 14765), False, 'import torch\n'), ((16335, 16379), 'os.path.join', 'osp.join', (['model_dir', 'f"""model{model_ext}.pkl"""'], {}), "(model_dir, f'model{model_ext}.pkl')\n", (16343, 16379), True, 'import os.path as osp\n'), ((16963, 16995), 'torch.pow', 'torch.pow', (['y', 'cfg.root_transform'], {}), '(y, cfg.root_transform)\n', (16972, 16995), False, 'import torch\n'), ((17016, 17052), 'torch.pow', 'torch.pow', (['y_hat', 'cfg.root_transform'], {}), '(y_hat, cfg.root_transform)\n', (17025, 17052), False, 'import torch\n'), ((17257, 17315), 'torch_geometric.utils.to_dense_adj', 'to_dense_adj', (['data.edge_index'], {'edge_attr': 'model.edge_fluxes'}), '(data.edge_index, edge_attr=model.edge_fluxes)\n', (17269, 17315), False, 'from torch_geometric.utils import to_dense_adj\n'), ((18247, 18266), 'torch.ones', 'torch.ones', (['context'], {}), '(context)\n', (18257, 18266), False, 'import torch\n'), ((19686, 19729), 'os.path.join', 'osp.join', (['output_dir', 'f"""radar_index.pickle"""'], {}), "(output_dir, f'radar_index.pickle')\n", (19694, 19729), True, 'import os.path as osp\n'), ((19931, 19983), 'pickle.dump', 'pickle.dump', (['edge_fluxes', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(edge_fluxes, f, pickle.HIGHEST_PROTOCOL)\n', (19942, 19983), False, 'import pickle\n'), ((4676, 4699), 'torch.load', 'torch.load', (['states_path'], {}), '(states_path)\n', (4686, 4699), False, 'import torch\n'), ((5113, 5141), 'torch.clamp', 'torch.clamp', (['grad', '(-1.0)', '(1.0)'], {}), '(grad, -1.0, 1.0)\n', (5124, 5141), False, 'import torch\n'), ((6143, 6187), 'os.path.join', 'osp.join', (['output_dir', 'f"""best_model{ext}.pkl"""'], {}), "(output_dir, f'best_model{ext}.pkl')\n", (6151, 6187), True, 'import os.path as osp\n'), ((10644, 10667), 'torch.load', 'torch.load', (['states_path'], {}), '(states_path)\n', (10654, 10667), False, 'import torch\n'), ((13080, 13109), 'os.path.join', 'osp.join', (['subdir', '"""model.pkl"""'], {}), "(subdir, 'model.pkl')\n", (13088, 13109), True, 'import os.path as osp\n'), ((18439, 18495), 'torch.cat', 'torch.cat', (['[fill_context, y_hat[ridx, :] / to_km2[ridx]]'], {}), '([fill_context, y_hat[ridx, :] / to_km2[ridx]])\n', (18448, 18495), False, 'import torch\n'), ((18937, 19000), 'numpy.arange', 'np.arange', (['(-(cfg.model.context - 1))', '(cfg.model.test_horizon + 1)'], {}), '(-(cfg.model.context - 1), cfg.model.test_horizon + 1)\n', (18946, 19000), True, 'import numpy as np\n'), ((19856, 19905), 'os.path.join', 'osp.join', (['output_dir', 'f"""model_fluxes{ext}.pickle"""'], {}), "(output_dir, f'model_fluxes{ext}.pickle')\n", (19864, 19905), True, 'import os.path as osp\n'), ((3188, 3205), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (3203, 3205), False, 'import torch\n'), ((5709, 5733), 'torch.isfinite', 'torch.isfinite', (['val_loss'], {}), '(val_loss)\n', (5723, 5733), False, 'import torch\n'), ((6754, 6793), 'os.path.join', 'osp.join', (['output_dir', 'f"""model{ext}.pkl"""'], {}), "(output_dir, f'model{ext}.pkl')\n", (6762, 6793), True, 'import os.path as osp\n'), ((11056, 11084), 'torch.clamp', 'torch.clamp', (['grad', '(-1.0)', '(1.0)'], {}), '(grad, -1.0, 1.0)\n', (11067, 11084), False, 'import torch\n'), ((12041, 12076), 'os.path.join', 'osp.join', (['subdir', 'f"""best_model.pkl"""'], {}), "(subdir, f'best_model.pkl')\n", (12049, 12076), True, 'import os.path as osp\n'), ((20052, 20093), 'os.path.join', 'osp.join', (['output_dir', 'f"""results{ext}.csv"""'], {}), "(output_dir, f'results{ext}.csv')\n", (20060, 20093), True, 'import os.path as osp\n'), ((11621, 11645), 'torch.isfinite', 'torch.isfinite', (['val_loss'], {}), '(val_loss)\n', (11635, 11645), False, 'import torch\n'), ((12711, 12740), 'os.path.join', 'osp.join', (['subdir', '"""model.pkl"""'], {}), "(subdir, 'model.pkl')\n", (12719, 12740), True, 'import os.path as osp\n'), ((17794, 17846), 'torch_geometric.utils.to_dense_adj', 'to_dense_adj', (['data.edge_index'], {'edge_attr': 'data.fluxes'}), '(data.edge_index, edge_attr=data.fluxes)\n', (17806, 17846), False, 'from torch_geometric.utils import to_dense_adj\n')] |
import numpy as np
from typing import Callable
from .base_score import BaseScore
class BleiLaffertyScore(BaseScore):
"""
This score implements method described in 2009 paper
Blei, <NAME>., and <NAME>erty. "Topic models." Text Mining.
Chapman and Hall/CRC, 2009. 101-124.
At the core this score helps to discover tokens that are most likely
to describe given topic. Summing up that score helps to estimate how
well the model distinguishes between topics. The higher this score - better
"""
def __init__(
self,
name: str = None,
num_top_tokens: int = 30,
should_compute: Callable[[int], bool] = None):
"""
Parameters
----------
name:
name of the score
num_top_tokens : int
now many tokens we consider to be
"""
super().__init__(name=name, should_compute=should_compute)
self.num_top_tokens = num_top_tokens
def __repr__(self):
return f'{self.__class__.__name__}(num_top_tokens={self.num_top_tokens})'
def _compute_blei_scores(self, phi):
"""
Computes Blei score
phi[wt] * [log(phi[wt]) - 1/T sum_k log(phi[wk])]
Parameters
----------
phi : pd.Dataframe
phi matrix of the model
Returns
-------
score : pd.Dataframe
wheighted phi matrix
""" # noqa: W291
topic_number = phi.shape[1]
blei_eps = 1e-42
log_phi = np.log(phi + blei_eps)
numerator = np.sum(log_phi, axis=1)
numerator = numerator[:, np.newaxis]
if hasattr(log_phi, "values"):
multiplier = log_phi.values - numerator / topic_number
else:
multiplier = log_phi - numerator / topic_number
scores = phi * multiplier
return scores
def call(self, model, **kwargs):
modalities = list(model.class_ids.keys())
score = 0
for modality in modalities:
phi = model.get_phi(class_ids=modality)
modality_scores = np.sort(self._compute_blei_scores(phi).values)
score += np.sum(modality_scores[-self.num_top_tokens:, :])
if modalities is None:
phi = model.get_phi()
modality_scores = np.sort(self._compute_blei_scores(phi).values)
score = np.sum(modality_scores[-self.num_top_tokens:, :])
return score
| [
"numpy.sum",
"numpy.log"
] | [((1537, 1559), 'numpy.log', 'np.log', (['(phi + blei_eps)'], {}), '(phi + blei_eps)\n', (1543, 1559), True, 'import numpy as np\n'), ((1580, 1603), 'numpy.sum', 'np.sum', (['log_phi'], {'axis': '(1)'}), '(log_phi, axis=1)\n', (1586, 1603), True, 'import numpy as np\n'), ((2180, 2229), 'numpy.sum', 'np.sum', (['modality_scores[-self.num_top_tokens:, :]'], {}), '(modality_scores[-self.num_top_tokens:, :])\n', (2186, 2229), True, 'import numpy as np\n'), ((2392, 2441), 'numpy.sum', 'np.sum', (['modality_scores[-self.num_top_tokens:, :]'], {}), '(modality_scores[-self.num_top_tokens:, :])\n', (2398, 2441), True, 'import numpy as np\n')] |
import os
import sys
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import numpy as np
from astropy.io import fits
# ------------------------------------------------------------
# Input
opacity = os.path.join(sys.argv[1], "")
# ------------------------------------------------------------
count = 0
for file in os.listdir(opacity):
if file.endswith('.fits') and not "gas_opacity_" in file[:-5]:
count = count + 1
colors = iter(plt.cm.rainbow(np.linspace(0,1,count)))
j = 0
for file in os.listdir(opacity):
if file.endswith('.fits'):
fitsfile = opacity+file
hdulist = fits.open(fitsfile)
hdu0 = hdulist[0]
hdu1 = hdulist[1]
hdulist.close()
naxis = hdu0.header['NAXIS1']
wavelength = np.zeros(naxis)
absorption = np.zeros(naxis)
scattering = np.zeros(naxis)
data = fits.getdata(fitsfile,0)
for i in range(naxis):
wavelength[i] = data[0,i]
absorption[i] = data[2,i]
scattering[i] = data[3,i]
if not "gas_opacity_" in file[:-5]:
c = next(colors)
if np.size(wavelength) == 1:
plt.plot(wavelength, scattering, color=c, marker='o', ms=5, mew=0, label=file[:-5])
plt.plot(wavelength, absorption, color=c, marker='^', ms=5, mew=0)
else:
plt.plot(wavelength, scattering, color=c, ls='--', label=file[:-5])
plt.plot(wavelength, absorption, color=c, ls='-')
j += 1
else:
if np.size(wavelength) == 1:
plt.plot(wavelength, scattering, color='gray', marker='o', ms=3, mew=0)
plt.plot(wavelength, absorption, color='gray', marker='^', ms=3, mew=0)
else:
plt.plot(wavelength, scattering, color='gray', ls='--', lw=0.6)
plt.plot(wavelength, absorption, color='gray', ls='-', lw=0.6)
plt.xlabel('Wavelength [um]')
plt.ylabel('Opacity [cm$^2$/g]')
plt.xscale('log')
plt.yscale('log')
if count != j:
plt.legend(loc='upper left')
plt.savefig(os.path.join(opacity, 'opacity.pdf'), bbox_inches='tight')
| [
"os.listdir",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"numpy.size",
"os.path.join",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.zeros",
"astropy.io.fits.getdata",
"astropy.io.fits.open",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.xscale"... | [((222, 251), 'os.path.join', 'os.path.join', (['sys.argv[1]', '""""""'], {}), "(sys.argv[1], '')\n", (234, 251), False, 'import os\n'), ((344, 363), 'os.listdir', 'os.listdir', (['opacity'], {}), '(opacity)\n', (354, 363), False, 'import os\n'), ((533, 552), 'os.listdir', 'os.listdir', (['opacity'], {}), '(opacity)\n', (543, 552), False, 'import os\n'), ((1998, 2027), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Wavelength [um]"""'], {}), "('Wavelength [um]')\n", (2008, 2027), True, 'import matplotlib.pyplot as plt\n'), ((2028, 2060), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Opacity [cm$^2$/g]"""'], {}), "('Opacity [cm$^2$/g]')\n", (2038, 2060), True, 'import matplotlib.pyplot as plt\n'), ((2062, 2079), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (2072, 2079), True, 'import matplotlib.pyplot as plt\n'), ((2080, 2097), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (2090, 2097), True, 'import matplotlib.pyplot as plt\n'), ((2118, 2146), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (2128, 2146), True, 'import matplotlib.pyplot as plt\n'), ((2160, 2196), 'os.path.join', 'os.path.join', (['opacity', '"""opacity.pdf"""'], {}), "(opacity, 'opacity.pdf')\n", (2172, 2196), False, 'import os\n'), ((488, 512), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'count'], {}), '(0, 1, count)\n', (499, 512), True, 'import numpy as np\n'), ((645, 664), 'astropy.io.fits.open', 'fits.open', (['fitsfile'], {}), '(fitsfile)\n', (654, 664), False, 'from astropy.io import fits\n'), ((802, 817), 'numpy.zeros', 'np.zeros', (['naxis'], {}), '(naxis)\n', (810, 817), True, 'import numpy as np\n'), ((839, 854), 'numpy.zeros', 'np.zeros', (['naxis'], {}), '(naxis)\n', (847, 854), True, 'import numpy as np\n'), ((876, 891), 'numpy.zeros', 'np.zeros', (['naxis'], {}), '(naxis)\n', (884, 891), True, 'import numpy as np\n'), ((908, 933), 'astropy.io.fits.getdata', 'fits.getdata', (['fitsfile', '(0)'], {}), '(fitsfile, 0)\n', (920, 933), False, 'from astropy.io import fits\n'), ((1170, 1189), 'numpy.size', 'np.size', (['wavelength'], {}), '(wavelength)\n', (1177, 1189), True, 'import numpy as np\n'), ((1212, 1300), 'matplotlib.pyplot.plot', 'plt.plot', (['wavelength', 'scattering'], {'color': 'c', 'marker': '"""o"""', 'ms': '(5)', 'mew': '(0)', 'label': 'file[:-5]'}), "(wavelength, scattering, color=c, marker='o', ms=5, mew=0, label=\n file[:-5])\n", (1220, 1300), True, 'import matplotlib.pyplot as plt\n'), ((1312, 1378), 'matplotlib.pyplot.plot', 'plt.plot', (['wavelength', 'absorption'], {'color': 'c', 'marker': '"""^"""', 'ms': '(5)', 'mew': '(0)'}), "(wavelength, absorption, color=c, marker='^', ms=5, mew=0)\n", (1320, 1378), True, 'import matplotlib.pyplot as plt\n'), ((1413, 1480), 'matplotlib.pyplot.plot', 'plt.plot', (['wavelength', 'scattering'], {'color': 'c', 'ls': '"""--"""', 'label': 'file[:-5]'}), "(wavelength, scattering, color=c, ls='--', label=file[:-5])\n", (1421, 1480), True, 'import matplotlib.pyplot as plt\n'), ((1497, 1546), 'matplotlib.pyplot.plot', 'plt.plot', (['wavelength', 'absorption'], {'color': 'c', 'ls': '"""-"""'}), "(wavelength, absorption, color=c, ls='-')\n", (1505, 1546), True, 'import matplotlib.pyplot as plt\n'), ((1618, 1637), 'numpy.size', 'np.size', (['wavelength'], {}), '(wavelength)\n', (1625, 1637), True, 'import numpy as np\n'), ((1660, 1731), 'matplotlib.pyplot.plot', 'plt.plot', (['wavelength', 'scattering'], {'color': '"""gray"""', 'marker': '"""o"""', 'ms': '(3)', 'mew': '(0)'}), "(wavelength, scattering, color='gray', marker='o', ms=3, mew=0)\n", (1668, 1731), True, 'import matplotlib.pyplot as plt\n'), ((1748, 1819), 'matplotlib.pyplot.plot', 'plt.plot', (['wavelength', 'absorption'], {'color': '"""gray"""', 'marker': '"""^"""', 'ms': '(3)', 'mew': '(0)'}), "(wavelength, absorption, color='gray', marker='^', ms=3, mew=0)\n", (1756, 1819), True, 'import matplotlib.pyplot as plt\n'), ((1854, 1917), 'matplotlib.pyplot.plot', 'plt.plot', (['wavelength', 'scattering'], {'color': '"""gray"""', 'ls': '"""--"""', 'lw': '(0.6)'}), "(wavelength, scattering, color='gray', ls='--', lw=0.6)\n", (1862, 1917), True, 'import matplotlib.pyplot as plt\n'), ((1934, 1996), 'matplotlib.pyplot.plot', 'plt.plot', (['wavelength', 'absorption'], {'color': '"""gray"""', 'ls': '"""-"""', 'lw': '(0.6)'}), "(wavelength, absorption, color='gray', ls='-', lw=0.6)\n", (1942, 1996), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import pandas as pd
def sra(real, synth):
"""
SRA can be thought of as the (empirical) probability of a
comparison on the synthetic data being ”correct” (i.e. the same as
the comparison would be on the real data).
From "Measuring the quality of Synthetic data for use in competitions"
https://arxiv.org/pdf/1806.11345.pdf
(NOTE: SRA requires at least 2 accuracies per list to work)
:param real: list of accuracies on models of real data
:type real: list of floats
:param synth: list of accuracies on models of synthetic data
:type synth: list of floats
:return: sra score
:rtype: float
"""
k = len(real)
sum_I = 0
for i in range(k):
R_vals = np.array([real[i]-rj if i != k else None for k, rj in enumerate(real)])
S_vals = np.array([synth[i]-sj if i != k else None for k, sj in enumerate(synth)])
I = (R_vals[R_vals != np.array(None)] * S_vals[S_vals != np.array(None)])
I[I >= 0] = 1
I[I < 0] = 0
sum_I += I
return np.sum((1 / (k * (k-1))) * sum_I) | [
"numpy.sum",
"numpy.array"
] | [((1060, 1093), 'numpy.sum', 'np.sum', (['(1 / (k * (k - 1)) * sum_I)'], {}), '(1 / (k * (k - 1)) * sum_I)\n', (1066, 1093), True, 'import numpy as np\n'), ((935, 949), 'numpy.array', 'np.array', (['None'], {}), '(None)\n', (943, 949), True, 'import numpy as np\n'), ((970, 984), 'numpy.array', 'np.array', (['None'], {}), '(None)\n', (978, 984), True, 'import numpy as np\n')] |
""" Code for fitting circles, ellipses, planes, etc.
"""
import numpy as np
from numpy.linalg import eig, inv
from stentseg.utils.new_pointset import PointSet
def fit_circle(pp, warnIfIllDefined=True):
""" Fit a circle on the given 2D points
Returns a tuple (x, y, r).
In case the three points are on a line, the algorithm will fail, and
return (0, 0, 0). A warning is printed, but this can be suppressed.
The solution is a Least Squares fit. The method as describes in [1] is
called Modified Least Squares (MLS) and poses a closed form solution
which is very robust.
[1]
<NAME> and <NAME>
2000
A Few Methods for Fitting Circles to Data
IEEE Transactions on Instrumentation and Measurement
"""
# Check
if pp.ndim != 2:
raise ValueError('Circle fit needs an Nx2 array.')
if pp.shape[1] != 2:
raise ValueError('Circle fit needs 2D points.')
if pp.shape[0] < 2:
raise ValueError('Circle fit needs at least two points.')
def cov(a, b):
n = len(a)
Ex = a.sum() / n
Ey = b.sum() / n
return ( (a-Ex)*(b-Ey) ).sum() / (n-1)
# Get x and y elements
X = pp[:,0]
Y = pp[:,1]
xoffset = X.mean()
yoffset = Y.mean()
X = X - xoffset
Y = Y - yoffset
# In the paper there is a factor n*(n-1) in all equations below. However,
# this factor is removed by devision in the equations in the following cell
A = cov(X,X)
B = cov(X,Y)
C = cov(Y,Y)
D = 0.5 * ( cov(X,Y**2) + cov(X,X**2) )
E = 0.5 * ( cov(Y,X**2) + cov(Y,Y**2) )
# Calculate denumerator
denum = A*C - B*B
if denum==0:
if warnIfIllDefined:
print("Warning: can not fit a circle to the given points.")
return 0, 0, 0
# Calculate point
x = (D*C-B*E)/denum + xoffset
y = (A*E-B*D)/denum + yoffset
c = PointSet([x, y])
# Calculate radius
r = c.distance(pp).sum() / len(pp)
# Done
return x, y, r
def fit_ellipse(pp):
""" Fit an ellipse to the given 2D points
Returns a tuple (x, y, r1, r2, phi).
Algorithm derived from:
From http://nicky.vanforeest.com/misc/fitEllipse/fitEllipse.html.
Based on approach suggested by <NAME> al., Direct least squares
fitting of ellipsees, 1996.
"""
# Check
if pp.ndim != 2:
raise ValueError('Ellipse fit needs an Nx2 array.')
if pp.shape[1] != 2:
raise ValueError('Ellipse fit needs 2D points.')
if pp.shape[0] < 3:
raise ValueError('Ellipse fit needs at least three points.')
# Get x and y and subtract offset to avoid inaccuracied during
# eigenvalue decomposition.
x = pp[:,0]
y = pp[:,1]
xoffset = x.mean()
yoffset = y.mean()
x = x - xoffset
y = y - yoffset
# Do the math
x = x[:,np.newaxis]
y = y[:,np.newaxis]
D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x)))
S = np.dot(D.T,D)
C = np.zeros([6,6])
C[0,2] = C[2,0] = 2; C[1,1] = -1
E, V = eig(np.dot(inv(S), C))
n = np.argmax(np.abs(E))
a = V[:,n]
b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
# Calculate position
num = b*b-a*c
x0 = (c*d-b*f)/num + xoffset
y0 = (a*f-b*d)/num + yoffset
# Calculate radii
up = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)
down1=(b*b-a*c)*( (c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
down2=(b*b-a*c)*( (a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
res1 = np.sqrt(up/down1)
res2 = np.sqrt(up/down2)
# Calculate direction vector
phi = 0.5*np.arctan(2*b/(a-c))
# Ensure that first radius is the largers
if res1 < res2:
res2, res1 = res1, res2
phi += 0.5 * np.pi
# Ensure that phi is between 0 and pi
while phi < 0:
phi += np.pi
while phi > np.pi:
phi -= np.pi
return x0, y0, res1, res2, phi
def area(circle_or_ellipse):
""" Calculate the area of the given circle or ellipse
"""
if len(circle_or_ellipse) == 3:
r1 = r2 = circle_or_ellipse[2]
elif len(circle_or_ellipse) == 5:
r1, r2 = circle_or_ellipse[2], circle_or_ellipse[3]
else:
raise ValueError('Input of area() is not a circle nor an ellipse.')
return np.pi * r1 * r2
def sample_circle(c, N=32):
""" Sample points on a circle c
Returns a 2D PointSet with N points
"""
assert len(c) == 3
# Get x, y and radius
x, y, r = c
# Sample N points, but add one to close the loop
a = np.linspace(0,2*np.pi, N+1)
# Prepare array
pp = np.empty((len(a), 2), dtype=np.float32)
# Apply polar coordinates
pp[:,0] = np.cos(a) * r + x
pp[:,1] = np.sin(a) * r + y
# Return as a pointset
return PointSet(pp)
def sample_ellipse(e, N=32):
""" Sample points on a ellipse e
Returns a 2D PointSet with N+1 points
"""
assert len(e) == 5
# Get x, y, radii and phi
x, y, r1, r2, phi = e
# Sample N points, but add one to close the loop
a = np.linspace(0, 2*np.pi, N+1)
# Prepare array
pp = np.empty((len(a), 2), dtype=np.float32)
# Apply polar coordinates
pp[:,0] = x + r1 * np.cos(a) * np.cos(phi) - r2 * np.sin(a) * np.sin(phi)
pp[:,1] = y + r1 * np.cos(a) * np.sin(phi) + r2 * np.sin(a) * np.cos(phi)
# Return as a pointset
return PointSet(pp)
def fit_plane(pp):
""" Fit a plane through a set of 3D points
Returns a tuple (a, b, c, d) which represents the plane mathematically
as ``a*x + b*y + c*z = d``.
This method uses singular value decomposition. It is the SVD method
plublished here: http://stackoverflow.com/questions/15959411
"""
# Check
if pp.ndim != 2:
raise ValueError('Plane fit needs an Nx3 array.')
if pp.shape[1] != 3:
raise ValueError('Plane fit needs 3D points.')
if pp.shape[0] < 3:
raise ValueError('Plane fit needs at least three points.')
rows, cols = pp.shape
# Set up constraint equations of the form AB = 0,
# where B is a column vector of the plane coefficients
# in the form b(1)*X + b(2)*Y +b(3)*Z + b(4) = 0.
p = np.ones((rows, 1))
AB = np.hstack([pp, p])
[u, d, v] = np.linalg.svd(AB, 0)
B = v[3, :] # Solution is last column of v.
# Normalize
nn = np.linalg.norm(B[0:3])
B = B / nn
# Make sure that the plane points up
if B[3] > 0:
B = [-x for x in B]
# Return a b c d
return B[0], B[1], B[2], B[3]
def project_to_plane(pp, plane):
""" Project given 3D points to a plane to make them 2D
Returns a 2D PointSet. We assume that the plane represents a grid
that is aligned with the world grid, but rotated over the x and y
axis.
"""
# Check
if pp.ndim != 2:
raise ValueError('project_to_plane needs an Nx3 array.')
if pp.shape[1] != 3:
raise ValueError('project_to_plane needs 3D points.')
# Prepare
a, b, c, d = plane
norm = a**2 + b**2 + c**2
common = (a*pp[:,0] + b*pp[:,1] + c*pp[:,2] + d) / norm
# Calculate angles
phix = np.arctan(a/c)
phiy = np.arctan(b/c)
# Project points to the plane. Points are still in world
# coordinates, but are moved so that thet lie on the plane. The
# movement is such that they are now on the closest point to the
# plane.
pp3 = pp.copy()
pp3[:,0] = pp[:,0] - a * common
pp3[:,1] = pp[:,1] - b * common
pp3[:,2] = pp[:,2] - c * common
# Rotate the points
pp2 = PointSet(pp3[:,:2])
pp2[:,0] = pp3[:,0] / np.cos(phix)
pp2[:,1] = pp3[:,1] / np.cos(phiy)
# Add some information so we can reconstruct the points
pp2.plane = a, b, c, d
return pp2
def signed_distance_to_plane(pp, plane):
""" Find the signed distances of the given 3D points to the given plane.
Note that the distances are signed, and can thus be negative.
"""
a, b, c, d = plane
plane_norm = (a**2 + b**2 + c**2) ** 0.5
return (a * pp[:, 0] + b * pp[:, 1] + c * pp[:, 2] + d) / plane_norm
def project_from_plane(pp, plane):
""" Project 2D points on a plane to the original 3D coordinate frame
Returns a 3D PointSet.
"""
# Check
if pp.ndim != 2:
raise ValueError('project_from_plane needs an Nx2 array.')
if pp.shape[1] != 2:
raise ValueError('project_from_plane needs 2D points.')
# Prepare
pp2 = pp
a, b, c, d = plane
phix = np.arctan(a/c)
phiy = np.arctan(b/c)
# Init 3D points
pp3 = PointSet(np.zeros((pp2.shape[0], 3), 'float32'))
# Rotate the points
pp3[:,0] = pp2[:,0] * np.cos(phix)
pp3[:,1] = pp2[:,1] * np.cos(phiy)
# Find the z value for all points
pp3[:,2] = -(pp3[:,0]*a + pp3[:,1]*b + d) / c
return pp3
def convex_hull(points):
"""Computes the convex hull of a set of 2D points
Input: an iterable sequence of (x, y) pairs representing the points.
Output: a list of vertices of the convex hull in counter-clockwise order,
starting from the vertex with the lexicographically smallest coordinates.
Implements Andrew's monotone chain algorithm. O(n log n) complexity.
Each tuple in points may contain additional elements which happilly move
along, but only the first 2 elements (x,y) are considered.
"""
# Sort the points lexicographically (tuples are compared lexicographically).
# Remove duplicates to detect the case we have just one unique point.
points = sorted(points, key=lambda x:x[:2])
# Boring case: no points or a single point, possibly repeated multiple times.
if len(points) <= 1:
return points
# 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.
# Returns a positive value, if OAB makes a counter-clockwise turn,
# negative for clockwise turn, and zero if the points are collinear.
def cross(o, a, b):
return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])
# Build lower hull
lower = []
for p in points:
while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:
lower.pop()
lower.append(p)
# Build upper hull
upper = []
for p in reversed(points):
while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:
upper.pop()
upper.append(p)
# Concatenation of the lower and upper hulls gives the convex hull.
# Last point of each list is omitted because it is repeated at the beginning of the other list.
return lower[:-1] + upper[:-1]
if __name__ == '__main__':
from stentseg.utils.new_pointset import PointSet
# Create some data, 2D and 3D
pp2 = PointSet(2)
pp3 = PointSet(3)
for r in np.linspace(0, 2*np.pi):
x = np.sin(r) + 10
y = np.cos(r) * 1.33 + 20
z = 0.17*x + 0.79*y + 30
pp2.append(x, y)
pp3.append(x, y, z)
# With noise
pp2 += np.random.normal(0, 0.15, size=pp2.shape)
pp3 += np.random.normal(0, 0.15, size=pp3.shape)
# Fit 2D
c2 = fit_circle(pp2)
e2 = fit_ellipse(pp2)
print('area circle 2D: % 1.2f' % area(c2))
print('area ellipse 2D: % 1.2f' % area(e2))
# Fit 3D. We first fit a plane, then project the points onto that
# plane to make the points 2D, and then we fit the ellipse.
# Further down, we sample the ellipse and project them to 3D again
# to be able to visualize the result.
plane = fit_plane(pp3)
pp3_2 = project_to_plane(pp3, plane)
c3 = fit_circle(pp3_2)
e3 = fit_ellipse(pp3_2)
print('area circle 3D: % 1.2f' % area(c3))
print('area ellipse 3D: % 1.2f' % area(e3))
# For visualization, calculate 4 points on rectangle that lies on the plane
x1, x2 = pp3.min(0)[0]-0.3, pp3.max(0)[0]+0.3
y1, y2 = pp3.min(0)[1]-0.3, pp3.max(0)[1]+0.3
p1 = x1, y1, -(x1*plane[0] + y1*plane[1] + plane[3]) / plane[2]
p2 = x2, y1, -(x2*plane[0] + y1*plane[1] + plane[3]) / plane[2]
p3 = x2, y2, -(x2*plane[0] + y2*plane[1] + plane[3]) / plane[2]
p4 = x1, y2, -(x1*plane[0] + y2*plane[1] + plane[3]) / plane[2]
# Init visualization
import visvis as vv
fig = vv.clf()
fig.position = 300, 300, 1000, 600
# 2D vis
a = vv.subplot(121)
a.daspectAuto = False
a.axis.showGrid = True
vv.title('2D fitting')
vv.xlabel('x'); vv.ylabel('y')
# Plot
vv.plot(pp2, ls='', ms='.', mc='k')
# vv.plot(sample_circle(c2), lc='r', lw=2)
vv.plot(sample_ellipse(e2), lc='b', lw=2)
# vv.legend('2D points', 'Circle fit', 'Ellipse fit')
vv.legend('2D points', 'Ellipse fit')
# 3D vis
a = vv.subplot(122)
a.daspectAuto = False
a.axis.showGrid = True
vv.title('3D fitting')
vv.xlabel('x'); vv.ylabel('y'); vv.zlabel('z')
# Plot
vv.plot(pp3, ls='', ms='.', mc='k')
vv.plot(project_from_plane(pp3_2, plane), lc='r', ls='', ms='.', mc='r', mw=4)
# vv.plot(project_from_plane(sample_circle(c3), plane), lc='r', lw=2)
vv.plot(project_from_plane(sample_ellipse(e3), plane), lc='b', lw=2)
vv.plot(np.array([p1, p2, p3, p4, p1]), lc='g', lw=2)
# vv.legend('3D points', 'Projected points', 'Circle fit', 'Ellipse fit', 'Plane fit')
vv.legend('3D points', 'Projected points', 'Ellipse fit', 'Plane fit')
| [
"numpy.sqrt",
"visvis.xlabel",
"stentseg.utils.new_pointset.PointSet",
"numpy.hstack",
"visvis.ylabel",
"visvis.plot",
"numpy.array",
"numpy.linalg.norm",
"numpy.sin",
"visvis.title",
"visvis.clf",
"numpy.dot",
"numpy.linspace",
"numpy.arctan",
"numpy.random.normal",
"visvis.subplot",
... | [((1926, 1942), 'stentseg.utils.new_pointset.PointSet', 'PointSet', (['[x, y]'], {}), '([x, y])\n', (1934, 1942), False, 'from stentseg.utils.new_pointset import PointSet\n'), ((3000, 3014), 'numpy.dot', 'np.dot', (['D.T', 'D'], {}), '(D.T, D)\n', (3006, 3014), True, 'import numpy as np\n'), ((3022, 3038), 'numpy.zeros', 'np.zeros', (['[6, 6]'], {}), '([6, 6])\n', (3030, 3038), True, 'import numpy as np\n'), ((3542, 3561), 'numpy.sqrt', 'np.sqrt', (['(up / down1)'], {}), '(up / down1)\n', (3549, 3561), True, 'import numpy as np\n'), ((3571, 3590), 'numpy.sqrt', 'np.sqrt', (['(up / down2)'], {}), '(up / down2)\n', (3578, 3590), True, 'import numpy as np\n'), ((4616, 4648), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(N + 1)'], {}), '(0, 2 * np.pi, N + 1)\n', (4627, 4648), True, 'import numpy as np\n'), ((4860, 4872), 'stentseg.utils.new_pointset.PointSet', 'PointSet', (['pp'], {}), '(pp)\n', (4868, 4872), False, 'from stentseg.utils.new_pointset import PointSet\n'), ((5151, 5183), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(N + 1)'], {}), '(0, 2 * np.pi, N + 1)\n', (5162, 5183), True, 'import numpy as np\n'), ((5488, 5500), 'stentseg.utils.new_pointset.PointSet', 'PointSet', (['pp'], {}), '(pp)\n', (5496, 5500), False, 'from stentseg.utils.new_pointset import PointSet\n'), ((6305, 6323), 'numpy.ones', 'np.ones', (['(rows, 1)'], {}), '((rows, 1))\n', (6312, 6323), True, 'import numpy as np\n'), ((6333, 6351), 'numpy.hstack', 'np.hstack', (['[pp, p]'], {}), '([pp, p])\n', (6342, 6351), True, 'import numpy as np\n'), ((6368, 6388), 'numpy.linalg.svd', 'np.linalg.svd', (['AB', '(0)'], {}), '(AB, 0)\n', (6381, 6388), True, 'import numpy as np\n'), ((6463, 6485), 'numpy.linalg.norm', 'np.linalg.norm', (['B[0:3]'], {}), '(B[0:3])\n', (6477, 6485), True, 'import numpy as np\n'), ((7260, 7276), 'numpy.arctan', 'np.arctan', (['(a / c)'], {}), '(a / c)\n', (7269, 7276), True, 'import numpy as np\n'), ((7286, 7302), 'numpy.arctan', 'np.arctan', (['(b / c)'], {}), '(b / c)\n', (7295, 7302), True, 'import numpy as np\n'), ((7684, 7704), 'stentseg.utils.new_pointset.PointSet', 'PointSet', (['pp3[:, :2]'], {}), '(pp3[:, :2])\n', (7692, 7704), False, 'from stentseg.utils.new_pointset import PointSet\n'), ((8640, 8656), 'numpy.arctan', 'np.arctan', (['(a / c)'], {}), '(a / c)\n', (8649, 8656), True, 'import numpy as np\n'), ((8666, 8682), 'numpy.arctan', 'np.arctan', (['(b / c)'], {}), '(b / c)\n', (8675, 8682), True, 'import numpy as np\n'), ((10919, 10930), 'stentseg.utils.new_pointset.PointSet', 'PointSet', (['(2)'], {}), '(2)\n', (10927, 10930), False, 'from stentseg.utils.new_pointset import PointSet\n'), ((10941, 10952), 'stentseg.utils.new_pointset.PointSet', 'PointSet', (['(3)'], {}), '(3)\n', (10949, 10952), False, 'from stentseg.utils.new_pointset import PointSet\n'), ((10966, 10991), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)'], {}), '(0, 2 * np.pi)\n', (10977, 10991), True, 'import numpy as np\n'), ((11166, 11207), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.15)'], {'size': 'pp2.shape'}), '(0, 0.15, size=pp2.shape)\n', (11182, 11207), True, 'import numpy as np\n'), ((11219, 11260), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.15)'], {'size': 'pp3.shape'}), '(0, 0.15, size=pp3.shape)\n', (11235, 11260), True, 'import numpy as np\n'), ((12418, 12426), 'visvis.clf', 'vv.clf', ([], {}), '()\n', (12424, 12426), True, 'import visvis as vv\n'), ((12492, 12507), 'visvis.subplot', 'vv.subplot', (['(121)'], {}), '(121)\n', (12502, 12507), True, 'import visvis as vv\n'), ((12565, 12587), 'visvis.title', 'vv.title', (['"""2D fitting"""'], {}), "('2D fitting')\n", (12573, 12587), True, 'import visvis as vv\n'), ((12592, 12606), 'visvis.xlabel', 'vv.xlabel', (['"""x"""'], {}), "('x')\n", (12601, 12606), True, 'import visvis as vv\n'), ((12608, 12622), 'visvis.ylabel', 'vv.ylabel', (['"""y"""'], {}), "('y')\n", (12617, 12622), True, 'import visvis as vv\n'), ((12638, 12673), 'visvis.plot', 'vv.plot', (['pp2'], {'ls': '""""""', 'ms': '"""."""', 'mc': '"""k"""'}), "(pp2, ls='', ms='.', mc='k')\n", (12645, 12673), True, 'import visvis as vv\n'), ((12829, 12866), 'visvis.legend', 'vv.legend', (['"""2D points"""', '"""Ellipse fit"""'], {}), "('2D points', 'Ellipse fit')\n", (12838, 12866), True, 'import visvis as vv\n'), ((12893, 12908), 'visvis.subplot', 'vv.subplot', (['(122)'], {}), '(122)\n', (12903, 12908), True, 'import visvis as vv\n'), ((12966, 12988), 'visvis.title', 'vv.title', (['"""3D fitting"""'], {}), "('3D fitting')\n", (12974, 12988), True, 'import visvis as vv\n'), ((12993, 13007), 'visvis.xlabel', 'vv.xlabel', (['"""x"""'], {}), "('x')\n", (13002, 13007), True, 'import visvis as vv\n'), ((13009, 13023), 'visvis.ylabel', 'vv.ylabel', (['"""y"""'], {}), "('y')\n", (13018, 13023), True, 'import visvis as vv\n'), ((13025, 13039), 'visvis.zlabel', 'vv.zlabel', (['"""z"""'], {}), "('z')\n", (13034, 13039), True, 'import visvis as vv\n'), ((13055, 13090), 'visvis.plot', 'vv.plot', (['pp3'], {'ls': '""""""', 'ms': '"""."""', 'mc': '"""k"""'}), "(pp3, ls='', ms='.', mc='k')\n", (13062, 13090), True, 'import visvis as vv\n'), ((13474, 13544), 'visvis.legend', 'vv.legend', (['"""3D points"""', '"""Projected points"""', '"""Ellipse fit"""', '"""Plane fit"""'], {}), "('3D points', 'Projected points', 'Ellipse fit', 'Plane fit')\n", (13483, 13544), True, 'import visvis as vv\n'), ((3128, 3137), 'numpy.abs', 'np.abs', (['E'], {}), '(E)\n', (3134, 3137), True, 'import numpy as np\n'), ((3641, 3667), 'numpy.arctan', 'np.arctan', (['(2 * b / (a - c))'], {}), '(2 * b / (a - c))\n', (3650, 3667), True, 'import numpy as np\n'), ((7730, 7742), 'numpy.cos', 'np.cos', (['phix'], {}), '(phix)\n', (7736, 7742), True, 'import numpy as np\n'), ((7769, 7781), 'numpy.cos', 'np.cos', (['phiy'], {}), '(phiy)\n', (7775, 7781), True, 'import numpy as np\n'), ((8726, 8764), 'numpy.zeros', 'np.zeros', (['(pp2.shape[0], 3)', '"""float32"""'], {}), "((pp2.shape[0], 3), 'float32')\n", (8734, 8764), True, 'import numpy as np\n'), ((8821, 8833), 'numpy.cos', 'np.cos', (['phix'], {}), '(phix)\n', (8827, 8833), True, 'import numpy as np\n'), ((8860, 8872), 'numpy.cos', 'np.cos', (['phiy'], {}), '(phiy)\n', (8866, 8872), True, 'import numpy as np\n'), ((13333, 13363), 'numpy.array', 'np.array', (['[p1, p2, p3, p4, p1]'], {}), '([p1, p2, p3, p4, p1])\n', (13341, 13363), True, 'import numpy as np\n'), ((2974, 2989), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (2986, 2989), True, 'import numpy as np\n'), ((3098, 3104), 'numpy.linalg.inv', 'inv', (['S'], {}), '(S)\n', (3101, 3104), False, 'from numpy.linalg import eig, inv\n'), ((4767, 4776), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (4773, 4776), True, 'import numpy as np\n'), ((4799, 4808), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (4805, 4808), True, 'import numpy as np\n'), ((5355, 5366), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (5361, 5366), True, 'import numpy as np\n'), ((5433, 5444), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (5439, 5444), True, 'import numpy as np\n'), ((11003, 11012), 'numpy.sin', 'np.sin', (['r'], {}), '(r)\n', (11009, 11012), True, 'import numpy as np\n'), ((3427, 3471), 'numpy.sqrt', 'np.sqrt', (['(1 + 4 * b * b / ((a - c) * (a - c)))'], {}), '(1 + 4 * b * b / ((a - c) * (a - c)))\n', (3434, 3471), True, 'import numpy as np\n'), ((3493, 3537), 'numpy.sqrt', 'np.sqrt', (['(1 + 4 * b * b / ((a - c) * (a - c)))'], {}), '(1 + 4 * b * b / ((a - c) * (a - c)))\n', (3500, 3537), True, 'import numpy as np\n'), ((5324, 5335), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (5330, 5335), True, 'import numpy as np\n'), ((5343, 5352), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (5349, 5352), True, 'import numpy as np\n'), ((5402, 5413), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (5408, 5413), True, 'import numpy as np\n'), ((5421, 5430), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (5427, 5430), True, 'import numpy as np\n'), ((11030, 11039), 'numpy.cos', 'np.cos', (['r'], {}), '(r)\n', (11036, 11039), True, 'import numpy as np\n'), ((5312, 5321), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (5318, 5321), True, 'import numpy as np\n'), ((5390, 5399), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (5396, 5399), True, 'import numpy as np\n')] |
import numpy as np
from keras.utils import to_categorical
import copy
from common.utils import default_config, make_env, eligibility_traces, discount_rewards
from common.ppo_independant import PPOPolicyNetwork, ValueNetwork
render = False
normalize_inputs = True
config = default_config()
env = make_env(config, normalize_inputs)
LAMBDA = float(config['agent']['lambda'])
lr_actor = float(config['agent']['lr_actor'])
n_agent = env.n_agent
T = env.T
GAMMA = env.GAMMA
n_episode = env.n_episode
max_steps = env.max_steps
n_actions = env.n_actions
i_episode = 0
Pi = []
V = []
for i in range(n_agent):
Pi.append(PPOPolicyNetwork(num_features=env.input_size, num_actions=n_actions, layer_size=256, epsilon=0.1, learning_rate=lr_actor))
V.append(ValueNetwork(num_features=env.input_size, hidden_size=256, learning_rate=0.001))
while i_episode < n_episode:
i_episode += 1
avg = [0] * n_agent
ep_actions = [[] for _ in range(n_agent)]
ep_rewards = [[] for _ in range(n_agent)]
ep_states = [[] for _ in range(n_agent)]
score = 0
steps = 0
su = [0.] * n_agent
su = np.array(su)
obs = env.reset()
done = False
while steps < max_steps and not done:
steps += 1
action = []
for i in range(n_agent):
h = copy.deepcopy(obs[i])
p = Pi[i].get_dist(np.array([h]))[0]
action.append(np.random.choice(range(n_actions), p=p))
ep_states[i].append(h)
ep_actions[i].append(to_categorical(action[i], n_actions))
obs, rewards, done = env.step(action)
su += np.array(rewards)
score += sum(rewards)
for i in range(n_agent):
ep_rewards[i].append(rewards[i])
if steps % T == 0:
for i in range(n_agent):
ep_actions[i] = np.array(ep_actions[i])
ep_rewards[i] = np.array(ep_rewards[i], dtype=np.float_)
ep_states[i] = np.array(ep_states[i])
if LAMBDA < -0.1:
targets = discount_rewards(ep_rewards[i], GAMMA)
V[i].update(ep_states[i], targets)
vs = V[i].get(ep_states[i])
else:
vs = V[i].get(ep_states[i])
targets = eligibility_traces(ep_rewards[i], vs, V[i].get(copy.deepcopy([obs[i]])), GAMMA, LAMBDA)
V[i].update(ep_states[i], targets)
ep_advantages = targets - vs
ep_advantages = (ep_advantages - np.mean(ep_advantages)) / (np.std(ep_advantages) + 0.0000000001)
Pi[i].update(ep_states[i], ep_actions[i], ep_advantages)
ep_actions = [[] for _ in range(n_agent)]
ep_rewards = [[] for _ in range(n_agent)]
ep_states = [[] for _ in range(n_agent)]
if render:
env.render()
print(i_episode)
print(score / max_steps, steps)
print(su)
print(env.rinfo.flatten())
env.end_episode()
| [
"numpy.mean",
"copy.deepcopy",
"numpy.std",
"keras.utils.to_categorical",
"numpy.array",
"common.ppo_independant.ValueNetwork",
"common.utils.discount_rewards",
"common.ppo_independant.PPOPolicyNetwork",
"common.utils.default_config",
"common.utils.make_env"
] | [((274, 290), 'common.utils.default_config', 'default_config', ([], {}), '()\n', (288, 290), False, 'from common.utils import default_config, make_env, eligibility_traces, discount_rewards\n'), ((297, 331), 'common.utils.make_env', 'make_env', (['config', 'normalize_inputs'], {}), '(config, normalize_inputs)\n', (305, 331), False, 'from common.utils import default_config, make_env, eligibility_traces, discount_rewards\n'), ((1109, 1121), 'numpy.array', 'np.array', (['su'], {}), '(su)\n', (1117, 1121), True, 'import numpy as np\n'), ((618, 743), 'common.ppo_independant.PPOPolicyNetwork', 'PPOPolicyNetwork', ([], {'num_features': 'env.input_size', 'num_actions': 'n_actions', 'layer_size': '(256)', 'epsilon': '(0.1)', 'learning_rate': 'lr_actor'}), '(num_features=env.input_size, num_actions=n_actions,\n layer_size=256, epsilon=0.1, learning_rate=lr_actor)\n', (634, 743), False, 'from common.ppo_independant import PPOPolicyNetwork, ValueNetwork\n'), ((754, 833), 'common.ppo_independant.ValueNetwork', 'ValueNetwork', ([], {'num_features': 'env.input_size', 'hidden_size': '(256)', 'learning_rate': '(0.001)'}), '(num_features=env.input_size, hidden_size=256, learning_rate=0.001)\n', (766, 833), False, 'from common.ppo_independant import PPOPolicyNetwork, ValueNetwork\n'), ((1599, 1616), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (1607, 1616), True, 'import numpy as np\n'), ((1293, 1314), 'copy.deepcopy', 'copy.deepcopy', (['obs[i]'], {}), '(obs[i])\n', (1306, 1314), False, 'import copy\n'), ((1499, 1535), 'keras.utils.to_categorical', 'to_categorical', (['action[i]', 'n_actions'], {}), '(action[i], n_actions)\n', (1513, 1535), False, 'from keras.utils import to_categorical\n'), ((1823, 1846), 'numpy.array', 'np.array', (['ep_actions[i]'], {}), '(ep_actions[i])\n', (1831, 1846), True, 'import numpy as np\n'), ((1879, 1919), 'numpy.array', 'np.array', (['ep_rewards[i]'], {'dtype': 'np.float_'}), '(ep_rewards[i], dtype=np.float_)\n', (1887, 1919), True, 'import numpy as np\n'), ((1951, 1973), 'numpy.array', 'np.array', (['ep_states[i]'], {}), '(ep_states[i])\n', (1959, 1973), True, 'import numpy as np\n'), ((1346, 1359), 'numpy.array', 'np.array', (['[h]'], {}), '([h])\n', (1354, 1359), True, 'import numpy as np\n'), ((2038, 2076), 'common.utils.discount_rewards', 'discount_rewards', (['ep_rewards[i]', 'GAMMA'], {}), '(ep_rewards[i], GAMMA)\n', (2054, 2076), False, 'from common.utils import default_config, make_env, eligibility_traces, discount_rewards\n'), ((2517, 2539), 'numpy.mean', 'np.mean', (['ep_advantages'], {}), '(ep_advantages)\n', (2524, 2539), True, 'import numpy as np\n'), ((2544, 2565), 'numpy.std', 'np.std', (['ep_advantages'], {}), '(ep_advantages)\n', (2550, 2565), True, 'import numpy as np\n'), ((2327, 2350), 'copy.deepcopy', 'copy.deepcopy', (['[obs[i]]'], {}), '([obs[i]])\n', (2340, 2350), False, 'import copy\n')] |
import argparse
import baselineUtils
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
import os
import time
from transformer.batch import subsequent_mask
from torch.optim import Adam,SGD,RMSprop,Adagrad
from transformer.noam_opt import NoamOpt
import numpy as np
import scipy.io
import json
import pickle
from torch.utils.tensorboard import SummaryWriter
def main():
parser=argparse.ArgumentParser(description='Train the individual Transformer model')
parser.add_argument('--dataset_folder',type=str,default='datasets')
parser.add_argument('--dataset_name',type=str,default='zara1')
parser.add_argument('--obs',type=int,default=8)
parser.add_argument('--preds',type=int,default=12)
parser.add_argument('--emb_size',type=int,default=512)
parser.add_argument('--heads',type=int, default=8)
parser.add_argument('--layers',type=int,default=6)
parser.add_argument('--dropout',type=float,default=0.1)
parser.add_argument('--cpu',action='store_true')
parser.add_argument('--output_folder',type=str,default='Output')
parser.add_argument('--val_size',type=int, default=0)
parser.add_argument('--gpu_device',type=str, default="0")
parser.add_argument('--verbose',action='store_true')
parser.add_argument('--max_epoch',type=int, default=100)
parser.add_argument('--batch_size',type=int,default=100)
parser.add_argument('--validation_epoch_start', type=int, default=30)
parser.add_argument('--resume_train',action='store_true')
parser.add_argument('--delim',type=str,default='\t')
parser.add_argument('--name', type=str, default="zara1")
parser.add_argument('--factor', type=float, default=1.)
parser.add_argument('--evaluate',type=bool,default=True)
parser.add_argument('--save_step', type=int, default=1)
args=parser.parse_args()
model_name=args.name
try:
os.mkdir('models')
except:
pass
try:
os.mkdir('output')
except:
pass
try:
os.mkdir('output/QuantizedTF')
except:
pass
try:
os.mkdir(f'models/QuantizedTF')
except:
pass
try:
os.mkdir(f'output/QuantizedTF/{args.name}')
except:
pass
try:
os.mkdir(f'models/QuantizedTF/{args.name}')
except:
pass
log=SummaryWriter('logs/%s'%model_name)
#os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_device
device=torch.device("cuda")
if args.cpu or not torch.cuda.is_available():
device=torch.device("cpu")
args.verbose=True
## creation of the dataloaders for train and validation
if args.val_size==0:
train_dataset,_ = baselineUtils.create_dataset(args.dataset_folder,args.dataset_name,0,args.obs,args.preds,delim=args.delim,train=True,verbose=args.verbose)
val_dataset, _ = baselineUtils.create_dataset(args.dataset_folder, args.dataset_name, 0, args.obs,
args.preds, delim=args.delim, train=False,
verbose=args.verbose)
else:
train_dataset, val_dataset = baselineUtils.create_dataset(args.dataset_folder, args.dataset_name, args.val_size, args.obs,
args.preds, delim=args.delim, train=True,
verbose=args.verbose)
test_dataset,_ = baselineUtils.create_dataset(args.dataset_folder,args.dataset_name,0,args.obs,args.preds,delim=args.delim,train=False,eval=True,verbose=args.verbose)
mat = scipy.io.loadmat(os.path.join(args.dataset_folder, args.dataset_name, "clusters.mat"))
clusters=mat['centroids']
import quantized_TF
model=quantized_TF.QuantizedTF(clusters.shape[0], clusters.shape[0]+1, clusters.shape[0], N=args.layers,
d_model=args.emb_size, d_ff=1024, h=args.heads, dropout=args.dropout).to(device)
tr_dl=torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0)
val_dl = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0)
test_dl = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0)
#optim = SGD(list(a.parameters())+list(model.parameters())+list(generator.parameters()),lr=0.01)
#sched=torch.optim.lr_scheduler.StepLR(optim,0.0005)
optim = NoamOpt(args.emb_size, args.factor, len(tr_dl)*5,
torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
#optim=Adagrad(list(a.parameters())+list(model.parameters())+list(generator.parameters()),lr=0.01,lr_decay=0.001)
epoch=0
while epoch<args.max_epoch:
epoch_loss=0
model.train()
for id_b,batch in enumerate(tr_dl):
optim.optimizer.zero_grad()
scale=np.random.uniform(0.5,4)
#rot_mat = np.array([[np.cos(r), np.sin(r)], [-np.sin(r), np.cos(r)]])
n_in_batch=batch['src'].shape[0]
speeds_inp=batch['src'][:,1:,2:4]*scale
inp=torch.tensor(scipy.spatial.distance.cdist(speeds_inp.reshape(-1,2),clusters).argmin(axis=1).reshape(n_in_batch,-1)).to(device)
speeds_trg = batch['trg'][:,:,2:4]*scale
target = torch.tensor(
scipy.spatial.distance.cdist(speeds_trg.reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch, -1)).to(
device)
src_att = torch.ones((inp.shape[0], 1,inp.shape[1])).to(device)
trg_att=subsequent_mask(target.shape[1]).repeat(n_in_batch,1,1).to(device)
start_of_seq=torch.tensor([clusters.shape[0]]).repeat(n_in_batch).unsqueeze(1).to(device)
dec_inp=torch.cat((start_of_seq,target[:,:-1]),1)
out=model(inp, dec_inp, src_att, trg_att)
loss = F.cross_entropy(out.view(-1,out.shape[-1]),target.view(-1),reduction='mean')
loss.backward()
optim.step()
print("epoch %03i/%03i frame %04i / %04i loss: %7.4f" % (epoch, args.max_epoch, id_b, len(tr_dl), loss.item()))
epoch_loss += loss.item()
#sched.step()
log.add_scalar('Loss/train', epoch_loss / len(tr_dl), epoch)
with torch.no_grad():
model.eval()
gt=[]
pr=[]
val_loss=0
step=0
for batch in val_dl:
# rot_mat = np.array([[np.cos(r), np.sin(r)], [-np.sin(r), np.cos(r)]])
n_in_batch = batch['src'].shape[0]
speeds_inp = batch['src'][:, 1:, 2:4]
inp = torch.tensor(
scipy.spatial.distance.cdist(speeds_inp.contiguous().reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch,
-1)).to(
device)
speeds_trg = batch['trg'][:, :, 2:4]
target = torch.tensor(
scipy.spatial.distance.cdist(speeds_trg.contiguous().reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch,
-1)).to(
device)
src_att = torch.ones((inp.shape[0], 1,inp.shape[1])).to(device)
trg_att = subsequent_mask(target.shape[1]).repeat(n_in_batch, 1, 1).to(device)
start_of_seq = torch.tensor([clusters.shape[0]]).repeat(n_in_batch).unsqueeze(1).to(device)
dec_inp = torch.cat((start_of_seq, target[:, :-1]), 1)
out = model(inp, dec_inp, src_att, trg_att)
loss = F.cross_entropy(out.contiguous().view(-1, out.shape[-1]), target.contiguous().view(-1), reduction='mean')
print("val epoch %03i/%03i frame %04i / %04i loss: %7.4f" % (
epoch, args.max_epoch, step, len(val_dl), loss.item()))
val_loss+=loss.item()
step+=1
log.add_scalar('validation/loss', val_loss / len(val_dl), epoch)
if args.evaluate:
# DETERMINISTIC MODE
model.eval()
model.eval()
gt = []
pr = []
inp_ = []
peds = []
frames = []
dt = []
for batch in test_dl:
inp_.append(batch['src'][:,:,0:2])
gt.append(batch['trg'][:, :, 0:2])
frames.append(batch['frames'])
peds.append(batch['peds'])
dt.append(batch['dataset'])
n_in_batch = batch['src'].shape[0]
speeds_inp = batch['src'][:, 1:, 2:4]
gt_b = batch['trg'][:, :, 0:2]
inp = torch.tensor(
scipy.spatial.distance.cdist(speeds_inp.reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch,
-1)).to(
device)
src_att = torch.ones((inp.shape[0], 1,inp.shape[1])).to(device)
trg_att = subsequent_mask(target.shape[1]).repeat(n_in_batch, 1, 1).to(device)
start_of_seq = torch.tensor([clusters.shape[0]]).repeat(n_in_batch).unsqueeze(1).to(device)
dec_inp = start_of_seq
for i in range(args.preds):
trg_att = subsequent_mask(dec_inp.shape[1]).repeat(n_in_batch, 1, 1).to(device)
out = model(inp, dec_inp, src_att, trg_att)
dec_inp=torch.cat((dec_inp,out[:,-1:].argmax(dim=2)),1)
preds_tr_b=clusters[dec_inp[:,1:].cpu().numpy()].cumsum(1)+batch['src'][:,-1:,0:2].cpu().numpy()
pr.append(preds_tr_b)
peds = np.concatenate(peds, 0)
frames = np.concatenate(frames, 0)
dt = np.concatenate(dt, 0)
gt = np.concatenate(gt, 0)
dt_names = test_dataset.data['dataset_name']
pr = np.concatenate(pr, 0)
mad,fad,errs=baselineUtils.distance_metrics(gt,pr)
log.add_scalar('eval/DET_mad', mad, epoch)
log.add_scalar('eval/DET_fad', fad, epoch)
scipy.io.savemat(f"output/QuantizedTF/{args.name}/{epoch:05d}.mat",
{'input': inp, 'gt': gt, 'pr': pr, 'peds': peds, 'frames': frames, 'dt': dt,
'dt_names': dt_names})
# MULTI MODALITY
if False:
num_samples=20
model.eval()
gt=[]
pr_all={}
for sam in range(num_samples):
pr_all[sam]=[]
for batch in test_dl:
# rot_mat = np.array([[np.cos(r), np.sin(r)], [-np.sin(r), np.cos(r)]])
n_in_batch = batch['src'].shape[0]
speeds_inp = batch['src'][:, 1:, 2:4]
gt_b = batch['trg'][:, :, 0:2]
gt.append(gt_b)
inp = torch.tensor(
scipy.spatial.distance.cdist(speeds_inp.reshape(-1, 2), clusters).argmin(axis=1).reshape(n_in_batch,
-1)).to(
device)
src_att = torch.ones((inp.shape[0], 1,inp.shape[1])).to(device)
trg_att = subsequent_mask(target.shape[1]).repeat(n_in_batch, 1, 1).to(device)
start_of_seq = torch.tensor([clusters.shape[0]]).repeat(n_in_batch).unsqueeze(1).to(device)
for sam in range(num_samples):
dec_inp = start_of_seq
for i in range(args.preds):
trg_att = subsequent_mask(dec_inp.shape[1]).repeat(n_in_batch, 1, 1).to(device)
out = model.predict(inp, dec_inp, src_att, trg_att)
h=out[:,-1]
dec_inp=torch.cat((dec_inp,torch.multinomial(h,1)),1)
preds_tr_b=clusters[dec_inp[:,1:].cpu().numpy()].cumsum(1)+batch['src'][:,-1:,0:2].cpu().numpy()
pr_all[sam].append(preds_tr_b)
gt=np.concatenate(gt,0)
#pr=np.concatenate(pr,0)
samp = {}
for k in pr_all.keys():
samp[k] = {}
samp[k]['pr'] = np.concatenate(pr_all[k], 0)
samp[k]['mad'], samp[k]['fad'], samp[k]['err'] = baselineUtils.distance_metrics(gt, samp[k]['pr'])
ev = [samp[i]['err'] for i in range(num_samples)]
e20 = np.stack(ev, -1)
mad_samp=e20.mean(1).min(-1).mean()
fad_samp=e20[:,-1].min(-1).mean()
#mad,fad,errs=baselineUtils.distance_metrics(gt,pr)
log.add_scalar('eval/MM_mad', mad_samp, epoch)
log.add_scalar('eval/MM_fad', fad_samp, epoch)
if epoch % args.save_step == 0:
torch.save(model.state_dict(), f'models/QuantizedTF/{args.name}/{epoch:05d}.pth')
epoch+=1
ab=1
if __name__=='__main__':
main()
| [
"torch.utils.data.DataLoader",
"baselineUtils.create_dataset",
"torch.cuda.is_available",
"torch.utils.tensorboard.SummaryWriter",
"argparse.ArgumentParser",
"numpy.stack",
"os.mkdir",
"numpy.concatenate",
"baselineUtils.distance_metrics",
"torch.cat",
"torch.device",
"quantized_TF.QuantizedTF... | [((425, 502), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train the individual Transformer model"""'}), "(description='Train the individual Transformer model')\n", (448, 502), False, 'import argparse\n'), ((2344, 2381), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (["('logs/%s' % model_name)"], {}), "('logs/%s' % model_name)\n", (2357, 2381), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((2450, 2470), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2462, 2470), False, 'import torch\n'), ((3492, 3658), 'baselineUtils.create_dataset', 'baselineUtils.create_dataset', (['args.dataset_folder', 'args.dataset_name', '(0)', 'args.obs', 'args.preds'], {'delim': 'args.delim', 'train': '(False)', 'eval': '(True)', 'verbose': 'args.verbose'}), '(args.dataset_folder, args.dataset_name, 0,\n args.obs, args.preds, delim=args.delim, train=False, eval=True, verbose\n =args.verbose)\n', (3520, 3658), False, 'import baselineUtils\n'), ((4017, 4120), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(0)'}), '(train_dataset, batch_size=args.batch_size,\n shuffle=True, num_workers=0)\n', (4044, 4120), False, 'import torch\n'), ((4130, 4231), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(0)'}), '(val_dataset, batch_size=args.batch_size,\n shuffle=True, num_workers=0)\n', (4157, 4231), False, 'import torch\n'), ((4242, 4345), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(0)'}), '(test_dataset, batch_size=args.batch_size,\n shuffle=False, num_workers=0)\n', (4269, 4345), False, 'import torch\n'), ((1909, 1927), 'os.mkdir', 'os.mkdir', (['"""models"""'], {}), "('models')\n", (1917, 1927), False, 'import os\n'), ((1970, 1988), 'os.mkdir', 'os.mkdir', (['"""output"""'], {}), "('output')\n", (1978, 1988), False, 'import os\n'), ((2031, 2061), 'os.mkdir', 'os.mkdir', (['"""output/QuantizedTF"""'], {}), "('output/QuantizedTF')\n", (2039, 2061), False, 'import os\n'), ((2104, 2135), 'os.mkdir', 'os.mkdir', (['f"""models/QuantizedTF"""'], {}), "(f'models/QuantizedTF')\n", (2112, 2135), False, 'import os\n'), ((2179, 2222), 'os.mkdir', 'os.mkdir', (['f"""output/QuantizedTF/{args.name}"""'], {}), "(f'output/QuantizedTF/{args.name}')\n", (2187, 2222), False, 'import os\n'), ((2266, 2309), 'os.mkdir', 'os.mkdir', (['f"""models/QuantizedTF/{args.name}"""'], {}), "(f'models/QuantizedTF/{args.name}')\n", (2274, 2309), False, 'import os\n'), ((2537, 2556), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2549, 2556), False, 'import torch\n'), ((2693, 2842), 'baselineUtils.create_dataset', 'baselineUtils.create_dataset', (['args.dataset_folder', 'args.dataset_name', '(0)', 'args.obs', 'args.preds'], {'delim': 'args.delim', 'train': '(True)', 'verbose': 'args.verbose'}), '(args.dataset_folder, args.dataset_name, 0,\n args.obs, args.preds, delim=args.delim, train=True, verbose=args.verbose)\n', (2721, 2842), False, 'import baselineUtils\n'), ((2857, 3007), 'baselineUtils.create_dataset', 'baselineUtils.create_dataset', (['args.dataset_folder', 'args.dataset_name', '(0)', 'args.obs', 'args.preds'], {'delim': 'args.delim', 'train': '(False)', 'verbose': 'args.verbose'}), '(args.dataset_folder, args.dataset_name, 0,\n args.obs, args.preds, delim=args.delim, train=False, verbose=args.verbose)\n', (2885, 3007), False, 'import baselineUtils\n'), ((3187, 3354), 'baselineUtils.create_dataset', 'baselineUtils.create_dataset', (['args.dataset_folder', 'args.dataset_name', 'args.val_size', 'args.obs', 'args.preds'], {'delim': 'args.delim', 'train': '(True)', 'verbose': 'args.verbose'}), '(args.dataset_folder, args.dataset_name, args.\n val_size, args.obs, args.preds, delim=args.delim, train=True, verbose=\n args.verbose)\n', (3215, 3354), False, 'import baselineUtils\n'), ((3670, 3738), 'os.path.join', 'os.path.join', (['args.dataset_folder', 'args.dataset_name', '"""clusters.mat"""'], {}), "(args.dataset_folder, args.dataset_name, 'clusters.mat')\n", (3682, 3738), False, 'import os\n'), ((2495, 2520), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2518, 2520), False, 'import torch\n'), ((3805, 3985), 'quantized_TF.QuantizedTF', 'quantized_TF.QuantizedTF', (['clusters.shape[0]', '(clusters.shape[0] + 1)', 'clusters.shape[0]'], {'N': 'args.layers', 'd_model': 'args.emb_size', 'd_ff': '(1024)', 'h': 'args.heads', 'dropout': 'args.dropout'}), '(clusters.shape[0], clusters.shape[0] + 1, clusters\n .shape[0], N=args.layers, d_model=args.emb_size, d_ff=1024, h=args.\n heads, dropout=args.dropout)\n', (3829, 3985), False, 'import quantized_TF\n'), ((4972, 4997), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(4)'], {}), '(0.5, 4)\n', (4989, 4997), True, 'import numpy as np\n'), ((5843, 5887), 'torch.cat', 'torch.cat', (['(start_of_seq, target[:, :-1])', '(1)'], {}), '((start_of_seq, target[:, :-1]), 1)\n', (5852, 5887), False, 'import torch\n'), ((6359, 6374), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6372, 6374), False, 'import torch\n'), ((7703, 7747), 'torch.cat', 'torch.cat', (['(start_of_seq, target[:, :-1])', '(1)'], {}), '((start_of_seq, target[:, :-1]), 1)\n', (7712, 7747), False, 'import torch\n'), ((10113, 10136), 'numpy.concatenate', 'np.concatenate', (['peds', '(0)'], {}), '(peds, 0)\n', (10127, 10136), True, 'import numpy as np\n'), ((10162, 10187), 'numpy.concatenate', 'np.concatenate', (['frames', '(0)'], {}), '(frames, 0)\n', (10176, 10187), True, 'import numpy as np\n'), ((10209, 10230), 'numpy.concatenate', 'np.concatenate', (['dt', '(0)'], {}), '(dt, 0)\n', (10223, 10230), True, 'import numpy as np\n'), ((10252, 10273), 'numpy.concatenate', 'np.concatenate', (['gt', '(0)'], {}), '(gt, 0)\n', (10266, 10273), True, 'import numpy as np\n'), ((10356, 10377), 'numpy.concatenate', 'np.concatenate', (['pr', '(0)'], {}), '(pr, 0)\n', (10370, 10377), True, 'import numpy as np\n'), ((10407, 10445), 'baselineUtils.distance_metrics', 'baselineUtils.distance_metrics', (['gt', 'pr'], {}), '(gt, pr)\n', (10437, 10445), False, 'import baselineUtils\n'), ((5580, 5623), 'torch.ones', 'torch.ones', (['(inp.shape[0], 1, inp.shape[1])'], {}), '((inp.shape[0], 1, inp.shape[1]))\n', (5590, 5623), False, 'import torch\n'), ((12790, 12811), 'numpy.concatenate', 'np.concatenate', (['gt', '(0)'], {}), '(gt, 0)\n', (12804, 12811), True, 'import numpy as np\n'), ((13256, 13272), 'numpy.stack', 'np.stack', (['ev', '(-1)'], {}), '(ev, -1)\n', (13264, 13272), True, 'import numpy as np\n'), ((7420, 7463), 'torch.ones', 'torch.ones', (['(inp.shape[0], 1, inp.shape[1])'], {}), '((inp.shape[0], 1, inp.shape[1]))\n', (7430, 7463), False, 'import torch\n'), ((13007, 13035), 'numpy.concatenate', 'np.concatenate', (['pr_all[k]', '(0)'], {}), '(pr_all[k], 0)\n', (13021, 13035), True, 'import numpy as np\n'), ((13109, 13158), 'baselineUtils.distance_metrics', 'baselineUtils.distance_metrics', (['gt', "samp[k]['pr']"], {}), "(gt, samp[k]['pr'])\n", (13139, 13158), False, 'import baselineUtils\n'), ((5654, 5686), 'transformer.batch.subsequent_mask', 'subsequent_mask', (['target.shape[1]'], {}), '(target.shape[1])\n', (5669, 5686), False, 'from transformer.batch import subsequent_mask\n'), ((9319, 9362), 'torch.ones', 'torch.ones', (['(inp.shape[0], 1, inp.shape[1])'], {}), '((inp.shape[0], 1, inp.shape[1]))\n', (9329, 9362), False, 'import torch\n'), ((7500, 7532), 'transformer.batch.subsequent_mask', 'subsequent_mask', (['target.shape[1]'], {}), '(target.shape[1])\n', (7515, 7532), False, 'from transformer.batch import subsequent_mask\n'), ((11816, 11859), 'torch.ones', 'torch.ones', (['(inp.shape[0], 1, inp.shape[1])'], {}), '((inp.shape[0], 1, inp.shape[1]))\n', (11826, 11859), False, 'import torch\n'), ((5746, 5779), 'torch.tensor', 'torch.tensor', (['[clusters.shape[0]]'], {}), '([clusters.shape[0]])\n', (5758, 5779), False, 'import torch\n'), ((9403, 9435), 'transformer.batch.subsequent_mask', 'subsequent_mask', (['target.shape[1]'], {}), '(target.shape[1])\n', (9418, 9435), False, 'from transformer.batch import subsequent_mask\n'), ((7600, 7633), 'torch.tensor', 'torch.tensor', (['[clusters.shape[0]]'], {}), '([clusters.shape[0]])\n', (7612, 7633), False, 'import torch\n'), ((9710, 9743), 'transformer.batch.subsequent_mask', 'subsequent_mask', (['dec_inp.shape[1]'], {}), '(dec_inp.shape[1])\n', (9725, 9743), False, 'from transformer.batch import subsequent_mask\n'), ((11904, 11936), 'transformer.batch.subsequent_mask', 'subsequent_mask', (['target.shape[1]'], {}), '(target.shape[1])\n', (11919, 11936), False, 'from transformer.batch import subsequent_mask\n'), ((12552, 12575), 'torch.multinomial', 'torch.multinomial', (['h', '(1)'], {}), '(h, 1)\n', (12569, 12575), False, 'import torch\n'), ((9507, 9540), 'torch.tensor', 'torch.tensor', (['[clusters.shape[0]]'], {}), '([clusters.shape[0]])\n', (9519, 9540), False, 'import torch\n'), ((12012, 12045), 'torch.tensor', 'torch.tensor', (['[clusters.shape[0]]'], {}), '([clusters.shape[0]])\n', (12024, 12045), False, 'import torch\n'), ((12295, 12328), 'transformer.batch.subsequent_mask', 'subsequent_mask', (['dec_inp.shape[1]'], {}), '(dec_inp.shape[1])\n', (12310, 12328), False, 'from transformer.batch import subsequent_mask\n')] |
## This file is adopted from DVE's github repo: https://github.com/jamt9000/DVE
import torch.nn.functional as F
import torch.nn as nn
import time
import torch
from PIL import Image
import numpy as np
from pointcloud_utils import pointcloud_vis
import pointnet3.sinkhorn_approximate as sinkFunc
def save_data_as_image(filename, data):
min_val = np.amin(data)
max_val = np.amax(data)
# RESCALING THE DATA to 0 255
img = (data - min_val) / (max_val-min_val) * 255
img = img.astype(np.uint8)
img = Image.fromarray(img, 'P')
img.save(filename)
class DVE_loss(nn.Module):
loss_type = ['reconstruction', 'cycle', 'cyle_new']
def __init__(
self,
pow=0.5, fold_corr=False, normalize_vectors=True,
temperature=1.0,
sink_tau=[0.3,0.3], sink_iters=[30,30],
lambda_lc=0.0, lambda_qap=0.0, lambda_ln=0.0,
local_args=None
):
super(DVE_loss, self).__init__()
self.pow=pow
self.fold_corr = fold_corr
self.normalize_vectors=normalize_vectors
self.temperature = temperature
self.sink_tau = sink_tau
self.sink_iters = sink_iters
self.lambda_lc = lambda_lc
self.lambda_qap = lambda_qap
self.lambda_ln = lambda_ln
print(f"temperature {temperature} sink_tau {sink_tau}, sink_iters {sink_iters}, pow {pow}")
def forward(self, feats, meta, epoch, step=0, fname_vis=None):
device = feats.device
X1 = meta['pc0'].to(device)
if X1.shape[2] > 3:
N1 = X1[:,:,3:]
X1 = X1[:,:,:3]
feats1 = feats[0::2]
feats2 = feats[1::2] # deformed
B, N, C = feats1.shape
num_vis = 1 if B > 3 else B
if fname_vis is not None:
vis_idx = np.random.choice(B, num_vis, replace=False)
# parameters
loss = 0.
correct_match = 0.
diff_via_recon = 0.
Lc = 0.0
perm_to_I = 0.0
for b in range(B):
# C X N
f1 = feats1[b].permute(1,0) # source to B, C, N
f2 = feats2[b].permute(1,0) # target
fa = feats1[(b+1)%B].permute(1,0) # auxiliary
if self.normalize_vectors:
f1 = F.normalize(f1, p=2, dim=0) * 20
f2 = F.normalize(f2, p=2, dim=0) * 20
fa = F.normalize(fa, p=2, dim=0) * 20
## f1 && fa correlation
corr_1a = torch.matmul(f1.t(), fa)/self.temperature ## [C, M]T X [C, N] = [M, N]
smcorr_1a = F.softmax(corr_1a, dim=1)
## f1 reconstructed by fa
f1_via_fa_t = torch.sum( smcorr_1a[:, None, :]*fa[None, :, :], dim=-1 ) ## [M, 1, N] X [1, C, N] --> [M, C, N] --> [M, C]
corr_1a2 = torch.matmul(f1_via_fa_t, f2)/self.temperature ## [M, C] X [C, K] = [M, K]
smcorr_1a2 = F.softmax(corr_1a2, dim=1)
with torch.no_grad():
smcorr_1a2_sink, _ = sinkFunc.gumbel_sinkhorn(
corr_1a2, temp=self.sink_tau[1], n_iters=self.sink_iters[1]); del corr_1a2
if smcorr_1a2_sink.shape[0]==1:
smcorr_1a2_sink = smcorr_1a2_sink.squeeze(0)
else:
smcorr_1a2_sink = torch.mean(smcorr_1a2_sink, dim=0)
diff = X1[b, :, None, :] - X1[b, None, :, :]
dist = (diff * diff).sum(2).sqrt()
dist = dist.pow(self.pow) # make distance more sharper
# C1*C2
L = dist * smcorr_1a2
## rotational invariance
corr_12 = torch.matmul(f1.t(), f2)
smcorr_12 = F.softmax(corr_12/self.temperature, dim=1); del corr_12
L12 = dist * smcorr_12
## for reference
perm_to_I += 3.0*F.l1_loss(torch.eye(N).to(device), smcorr_1a2_sink, reduction='sum')/N
## Sinkhorn regularization
## ablation
## 1) constraint to permutation
constraint_to_perm = "1a2_perm"
# constraint_to_perm = "1a_perm"
if constraint_to_perm == "1a2_perm":
Lc_b = F.l1_loss(smcorr_1a2_sink, smcorr_1a2, reduction='sum')/N
## 2) constraint to identity
elif constraint_to_perm == "1a2_identity":
Lc_b = F.l1_loss(torch.eye(N).to(device), smcorr_1a2, reduction='sum')/N
print("constraint smcorr_1a2_sink to identity")
## 3) constraint on 1-a correspondence
elif constraint_to_perm == "1a_perm":
Lc_b = F.l1_loss(smcorr_1a_sink, smcorr_1a, reduction='sum')/N; del smcorr_1a
print("constraint smcorr_1a_sink to perm")
Lc += 3.0*Lc_b
## finall loss
L += 1.0*L12
loss += (L.sum()/N)
print(f"Loss: {L.sum():.6f}, Loss 12: {L12.sum():.6f}, smcorr1a2 to perm: {Lc_b:.6f}")
## record & check
with torch.no_grad():
# ## f1 fa correlation
max_idx = torch.argmax(smcorr_1a2, dim=1)
count = 0.0
for i, max_id in enumerate(max_idx):
if max_id == i: count += 1
correct_match += count
if fname_vis is not None and np.sum(vis_idx==b) == 1:
txt_fname = fname_vis+str(b) + "smcorr_1a2_sink.png"
npdata = smcorr_1a2_sink.cpu().detach().numpy()
save_data_as_image(txt_fname, npdata)
txt_fname = fname_vis+str(b) + "smcorr_1a2.png"
npdata = smcorr_1a2.cpu().detach().numpy()
save_data_as_image(txt_fname, npdata)
print("saved files")
del diff
print("--------LOSS with DVE: {}--------".format(loss/B))
total_loss = loss + self.lambda_lc*Lc
output_loss = {
'total_loss': total_loss/B,
'cycle_loss': loss/B,
'perm_loss': Lc/B,
}
output_info = {
'correct_match': correct_match/B,
'smcorr_to_I': perm_to_I/B,
}
return output_loss, output_info
| [
"pointnet3.sinkhorn_approximate.gumbel_sinkhorn",
"PIL.Image.fromarray",
"torch.nn.functional.l1_loss",
"numpy.amin",
"numpy.random.choice",
"torch.mean",
"torch.eye",
"torch.nn.functional.normalize",
"numpy.sum",
"torch.sum",
"torch.matmul",
"torch.no_grad",
"numpy.amax",
"torch.nn.functi... | [((350, 363), 'numpy.amin', 'np.amin', (['data'], {}), '(data)\n', (357, 363), True, 'import numpy as np\n'), ((378, 391), 'numpy.amax', 'np.amax', (['data'], {}), '(data)\n', (385, 391), True, 'import numpy as np\n'), ((520, 545), 'PIL.Image.fromarray', 'Image.fromarray', (['img', '"""P"""'], {}), "(img, 'P')\n", (535, 545), False, 'from PIL import Image\n'), ((1790, 1833), 'numpy.random.choice', 'np.random.choice', (['B', 'num_vis'], {'replace': '(False)'}), '(B, num_vis, replace=False)\n', (1806, 1833), True, 'import numpy as np\n'), ((2543, 2568), 'torch.nn.functional.softmax', 'F.softmax', (['corr_1a'], {'dim': '(1)'}), '(corr_1a, dim=1)\n', (2552, 2568), True, 'import torch.nn.functional as F\n'), ((2633, 2690), 'torch.sum', 'torch.sum', (['(smcorr_1a[:, None, :] * fa[None, :, :])'], {'dim': '(-1)'}), '(smcorr_1a[:, None, :] * fa[None, :, :], dim=-1)\n', (2642, 2690), False, 'import torch\n'), ((2866, 2892), 'torch.nn.functional.softmax', 'F.softmax', (['corr_1a2'], {'dim': '(1)'}), '(corr_1a2, dim=1)\n', (2875, 2892), True, 'import torch.nn.functional as F\n'), ((3655, 3699), 'torch.nn.functional.softmax', 'F.softmax', (['(corr_12 / self.temperature)'], {'dim': '(1)'}), '(corr_12 / self.temperature, dim=1)\n', (3664, 3699), True, 'import torch.nn.functional as F\n'), ((2766, 2795), 'torch.matmul', 'torch.matmul', (['f1_via_fa_t', 'f2'], {}), '(f1_via_fa_t, f2)\n', (2778, 2795), False, 'import torch\n'), ((2911, 2926), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2924, 2926), False, 'import torch\n'), ((2965, 3055), 'pointnet3.sinkhorn_approximate.gumbel_sinkhorn', 'sinkFunc.gumbel_sinkhorn', (['corr_1a2'], {'temp': 'self.sink_tau[1]', 'n_iters': 'self.sink_iters[1]'}), '(corr_1a2, temp=self.sink_tau[1], n_iters=self.\n sink_iters[1])\n', (2989, 3055), True, 'import pointnet3.sinkhorn_approximate as sinkFunc\n'), ((5003, 5018), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5016, 5018), False, 'import torch\n'), ((5085, 5116), 'torch.argmax', 'torch.argmax', (['smcorr_1a2'], {'dim': '(1)'}), '(smcorr_1a2, dim=1)\n', (5097, 5116), False, 'import torch\n'), ((2248, 2275), 'torch.nn.functional.normalize', 'F.normalize', (['f1'], {'p': '(2)', 'dim': '(0)'}), '(f1, p=2, dim=0)\n', (2259, 2275), True, 'import torch.nn.functional as F\n'), ((2302, 2329), 'torch.nn.functional.normalize', 'F.normalize', (['f2'], {'p': '(2)', 'dim': '(0)'}), '(f2, p=2, dim=0)\n', (2313, 2329), True, 'import torch.nn.functional as F\n'), ((2356, 2383), 'torch.nn.functional.normalize', 'F.normalize', (['fa'], {'p': '(2)', 'dim': '(0)'}), '(fa, p=2, dim=0)\n', (2367, 2383), True, 'import torch.nn.functional as F\n'), ((3260, 3294), 'torch.mean', 'torch.mean', (['smcorr_1a2_sink'], {'dim': '(0)'}), '(smcorr_1a2_sink, dim=0)\n', (3270, 3294), False, 'import torch\n'), ((4145, 4200), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['smcorr_1a2_sink', 'smcorr_1a2'], {'reduction': '"""sum"""'}), "(smcorr_1a2_sink, smcorr_1a2, reduction='sum')\n", (4154, 4200), True, 'import torch.nn.functional as F\n'), ((5330, 5350), 'numpy.sum', 'np.sum', (['(vis_idx == b)'], {}), '(vis_idx == b)\n', (5336, 5350), True, 'import numpy as np\n'), ((4576, 4629), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['smcorr_1a_sink', 'smcorr_1a'], {'reduction': '"""sum"""'}), "(smcorr_1a_sink, smcorr_1a, reduction='sum')\n", (4585, 4629), True, 'import torch.nn.functional as F\n'), ((3815, 3827), 'torch.eye', 'torch.eye', (['N'], {}), '(N)\n', (3824, 3827), False, 'import torch\n'), ((4332, 4344), 'torch.eye', 'torch.eye', (['N'], {}), '(N)\n', (4341, 4344), False, 'import torch\n')] |
import numpy as np
import unittest
from chainer import testing
from chainercv.experimental.links.model.pspnet import convolution_crop
class TestConvolutionCrop(unittest.TestCase):
def test_convolution_crop(self):
size = (8, 6)
stride = (8, 6)
n_channel = 3
img = np.random.uniform(size=(n_channel, 16, 12)).astype(np.float32)
crop_imgs, param = convolution_crop(
img, size, stride, return_param=True)
self.assertEqual(crop_imgs.shape, (4, n_channel) + size)
self.assertEqual(crop_imgs.dtype, np.float32)
for y in range(2):
for x in range(2):
self.assertEqual(param['y_slices'][2 * y + x].start, 8 * y)
self.assertEqual(
param['y_slices'][2 * y + x].stop, 8 * (y + 1))
self.assertEqual(param['x_slices'][2 * y + x].start, 6 * x)
self.assertEqual(
param['x_slices'][2 * y + x].stop, 6 * (x + 1))
for i in range(4):
self.assertEqual(param['crop_y_slices'][i].start, 0)
self.assertEqual(param['crop_y_slices'][i].stop, 8)
self.assertEqual(param['crop_x_slices'][i].start, 0)
self.assertEqual(param['crop_x_slices'][i].stop, 6)
testing.run_module(__name__, __file__)
| [
"chainercv.experimental.links.model.pspnet.convolution_crop",
"chainer.testing.run_module",
"numpy.random.uniform"
] | [((1282, 1320), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (1300, 1320), False, 'from chainer import testing\n'), ((393, 447), 'chainercv.experimental.links.model.pspnet.convolution_crop', 'convolution_crop', (['img', 'size', 'stride'], {'return_param': '(True)'}), '(img, size, stride, return_param=True)\n', (409, 447), False, 'from chainercv.experimental.links.model.pspnet import convolution_crop\n'), ((303, 346), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n_channel, 16, 12)'}), '(size=(n_channel, 16, 12))\n', (320, 346), True, 'import numpy as np\n')] |
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
"""Extensions of the priors specified in GPy"""
from typing import Sequence, Union
import GPy.core.parameterization.priors as priors
import numpy as np
from psbutils.arrayshapes import Shapes
class InverseGamma(priors.Gamma): # pragma: no cover
"""
Implementation of the inverse-Gamma probability function, coupled with random variables.
This is a fix for the GPy.priors.InverseGamma implementation, which doesn't work since 2016:
https://github.com/SheffieldML/GPy/issues/502
:param a: shape parameter
:param b: rate parameter (warning: it's the *inverse* of the scale)
.. Note:: Bishop 2006 notation is used throughout the code
"""
domain = priors._POSITIVE
def __init__(self, a, b):
self._a = float(a)
self._b = float(b)
self.constant = -priors.gammaln(self.a) + a * np.log(b)
def __str__(self):
"""Return a string description of the prior."""
return "iGa({:.2g}, {:.2g})".format(self.a, self.b)
def lnpdf(self, x: np.ndarray) -> np.ndarray:
"""Return the log probability density function evaluated at x."""
return Shapes(x, "X")(self.constant - (self.a + 1) * np.log(x) - self.b / x, "X")[-1] # type: ignore # auto
def lnpdf_grad(self, x: np.ndarray) -> np.ndarray:
"""Return the gradient of the log probability density function evaluated at x."""
return Shapes(x, "X")(-(self.a + 1.0) / x + self.b / x ** 2, "X")[-1] # type: ignore # auto
def rvs(self, n: Union[int, Sequence[int], np.ndarray]) -> np.ndarray:
"""Return samples from this prior of shape n."""
result = Shapes(1.0 / np.random.gamma(scale=1.0 / self.b, shape=self.a, size=n), f"{n}")[-1]
return result # type: ignore # auto
| [
"numpy.random.gamma",
"GPy.core.parameterization.priors.gammaln",
"numpy.log",
"psbutils.arrayshapes.Shapes"
] | [((1155, 1177), 'GPy.core.parameterization.priors.gammaln', 'priors.gammaln', (['self.a'], {}), '(self.a)\n', (1169, 1177), True, 'import GPy.core.parameterization.priors as priors\n'), ((1184, 1193), 'numpy.log', 'np.log', (['b'], {}), '(b)\n', (1190, 1193), True, 'import numpy as np\n'), ((1474, 1488), 'psbutils.arrayshapes.Shapes', 'Shapes', (['x', '"""X"""'], {}), "(x, 'X')\n", (1480, 1488), False, 'from psbutils.arrayshapes import Shapes\n'), ((1737, 1751), 'psbutils.arrayshapes.Shapes', 'Shapes', (['x', '"""X"""'], {}), "(x, 'X')\n", (1743, 1751), False, 'from psbutils.arrayshapes import Shapes\n'), ((1986, 2043), 'numpy.random.gamma', 'np.random.gamma', ([], {'scale': '(1.0 / self.b)', 'shape': 'self.a', 'size': 'n'}), '(scale=1.0 / self.b, shape=self.a, size=n)\n', (2001, 2043), True, 'import numpy as np\n'), ((1520, 1529), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (1526, 1529), True, 'import numpy as np\n')] |
import numpy as np
from ..Delboeuf.delboeuf_parameters import _delboeuf_parameters_sizeinner, _delboeuf_parameters_sizeouter
def _ebbinghaus_parameters(illusion_strength=0, difference=0, size_min=0.25, distance=1, distance_auto=False):
# Size inner circles
parameters = _delboeuf_parameters_sizeinner(difference=difference, size_min=size_min)
inner_size_left = parameters["Size_Inner_Left"]
inner_size_right = parameters["Size_Inner_Right"]
# Position
position_left = -0.5
position_right = 0.5
# Base size outer circles
outer_size_left = size_min
outer_size_right = size_min
# Actual outer size based on illusion
outer_size_left, outer_size_right = _delboeuf_parameters_sizeouter(outer_size_left,
outer_size_right,
difference=difference,
illusion_strength=illusion_strength,
both_sizes=True)
# Location outer circles
l_outer_x, l_outer_y, l_distance_edges = _ebbinghaus_parameters_outercircles(x=position_left,
y=0,
size_inner=inner_size_left,
size_outer=outer_size_left,
n="auto")
r_outer_x, r_outer_y, r_distance_edges = _ebbinghaus_parameters_outercircles(x=position_right,
y=0,
size_inner=inner_size_right,
size_outer=outer_size_right,
n="auto")
# Get location and distances
if distance_auto is False:
distance_reference = 'Between Centers'
distance_centers = distance
position_left, position_right = -(distance_centers / 2), (distance_centers / 2)
distance_edges_inner = distance_centers - (inner_size_left/2 + inner_size_right/2)
distance_edges_outer = distance_centers - l_distance_edges - (outer_size_left/2) - r_distance_edges - (outer_size_right/2)
else:
distance_reference = 'Between Edges'
distance_edges_outer = distance
distance_centers = distance_edges_outer + l_distance_edges + (outer_size_left/2) + r_distance_edges + (outer_size_right/2)
distance_edges_inner = distance_centers - (outer_size_left/2 + outer_size_right/2)
position_left, position_right = -(distance_centers / 2), (distance_centers / 2)
parameters.update({
"Illusion": "Ebbinghaus",
"Illusion_Strength": illusion_strength,
"Illusion_Type": "Incongruent" if illusion_strength > 0 else "Congruent",
"Size_Outer_Left": outer_size_left,
"Size_Outer_Right": outer_size_right,
"Distance": distance_centers,
"Distance_Reference": distance_reference,
"Distance_Edges_Inner": distance_edges_inner,
"Distance_Edges_Outer": distance_edges_outer,
"Size_Inner_Smaller": np.min([inner_size_left, inner_size_right]),
"Size_Inner_Larger": np.max([inner_size_left, inner_size_right]),
"Size_Outer_Smaller": np.min([outer_size_left, outer_size_right]),
"Size_Outer_Larger": np.max([outer_size_left, outer_size_right]),
"Position_Outer_x_Left": l_outer_x,
"Position_Outer_y_Left": l_outer_y,
"Position_Outer_x_Right": r_outer_x,
"Position_Outer_y_Right": r_outer_y,
"Position_Left": position_left,
"Position_Right": position_right
})
return parameters
def _ebbinghaus_parameters_outercircles(x=0, y=0, size_inner=0.25, size_outer=0.3, n="auto"):
# Find distance between center of inner circle and centers of outer circles
distance = (size_inner / 2) + (size_outer / 2) + 0.01
# Find n
if n == "auto":
perimeter = 2 * np.pi * distance
n = int(perimeter / size_outer)
# Get position of outer circles
angle = np.deg2rad(np.linspace(0, 360, num=n, endpoint=False))
circle_x = x + (np.cos(angle) * distance)
circle_y = y + (np.sin(angle) * distance)
return circle_x, circle_y, distance
| [
"numpy.max",
"numpy.linspace",
"numpy.cos",
"numpy.min",
"numpy.sin"
] | [((4327, 4369), 'numpy.linspace', 'np.linspace', (['(0)', '(360)'], {'num': 'n', 'endpoint': '(False)'}), '(0, 360, num=n, endpoint=False)\n', (4338, 4369), True, 'import numpy as np\n'), ((3354, 3397), 'numpy.min', 'np.min', (['[inner_size_left, inner_size_right]'], {}), '([inner_size_left, inner_size_right])\n', (3360, 3397), True, 'import numpy as np\n'), ((3428, 3471), 'numpy.max', 'np.max', (['[inner_size_left, inner_size_right]'], {}), '([inner_size_left, inner_size_right])\n', (3434, 3471), True, 'import numpy as np\n'), ((3503, 3546), 'numpy.min', 'np.min', (['[outer_size_left, outer_size_right]'], {}), '([outer_size_left, outer_size_right])\n', (3509, 3546), True, 'import numpy as np\n'), ((3577, 3620), 'numpy.max', 'np.max', (['[outer_size_left, outer_size_right]'], {}), '([outer_size_left, outer_size_right])\n', (3583, 3620), True, 'import numpy as np\n'), ((4391, 4404), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (4397, 4404), True, 'import numpy as np\n'), ((4437, 4450), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (4443, 4450), True, 'import numpy as np\n')] |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Inception Score (IS) from the paper
"Improved techniques for training GANs"."""
import pickle
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from metrics import metric_base
# ----------------------------------------------------------------------------
class IS(metric_base.MetricBase):
def __init__(self, num_images, num_splits, minibatch_per_gpu, **kwargs):
super().__init__(**kwargs)
self.num_images = num_images
self.num_splits = num_splits
self.minibatch_per_gpu = minibatch_per_gpu
def _evaluate(self, Gs, G_kwargs, num_gpus, **_kwargs): # pylint: disable=arguments-differ
minibatch_size = num_gpus * self.minibatch_per_gpu
with dnnlib.util.open_url(
'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/inception_v3_softmax.pkl') as f:
inception = pickle.load(f)
activations = np.empty([self.num_images, inception.output_shape[1]], dtype=np.float32)
# Construct TensorFlow graph.
result_expr = []
for gpu_idx in range(num_gpus):
with tf.device(f'/gpu:{gpu_idx}'):
Gs_clone = Gs.clone()
inception_clone = inception.clone()
latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:])
labels = self._get_random_labels_tf(self.minibatch_per_gpu)
images = Gs_clone.get_output_for(latents, labels, **G_kwargs)
if images.shape[1] == 1: images = tf.tile(images, [1, 3, 1, 1])
images = tflib.convert_images_to_uint8(images)
result_expr.append(inception_clone.get_output_for(images))
# Calculate activations for fakes.
for begin in range(0, self.num_images, minibatch_size):
self._report_progress(begin, self.num_images)
end = min(begin + minibatch_size, self.num_images)
activations[begin:end] = np.concatenate(tflib.run(result_expr), axis=0)[:end - begin]
# Calculate IS.
scores = []
for i in range(self.num_splits):
part = activations[i * self.num_images // self.num_splits: (i + 1) * self.num_images // self.num_splits]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
self._report_result(np.mean(scores), suffix='_mean')
self._report_result(np.std(scores), suffix='_std')
# ----------------------------------------------------------------------------
| [
"numpy.mean",
"tensorflow.device",
"tensorflow.tile",
"tensorflow.random_normal",
"dnnlib.util.open_url",
"numpy.log",
"pickle.load",
"dnnlib.tflib.convert_images_to_uint8",
"numpy.exp",
"numpy.sum",
"numpy.empty",
"numpy.std",
"dnnlib.tflib.run"
] | [((1319, 1391), 'numpy.empty', 'np.empty', (['[self.num_images, inception.output_shape[1]]'], {'dtype': 'np.float32'}), '([self.num_images, inception.output_shape[1]], dtype=np.float32)\n', (1327, 1391), True, 'import numpy as np\n'), ((1138, 1262), 'dnnlib.util.open_url', 'dnnlib.util.open_url', (['"""https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/inception_v3_softmax.pkl"""'], {}), "(\n 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metrics/inception_v3_softmax.pkl'\n )\n", (1158, 1262), False, 'import dnnlib\n'), ((1286, 1300), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1297, 1300), False, 'import pickle\n'), ((2674, 2689), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (2681, 2689), True, 'import numpy as np\n'), ((2731, 2745), 'numpy.std', 'np.std', (['scores'], {}), '(scores)\n', (2737, 2745), True, 'import numpy as np\n'), ((1495, 1523), 'tensorflow.device', 'tf.device', (['f"""/gpu:{gpu_idx}"""'], {}), "(f'/gpu:{gpu_idx}')\n", (1504, 1523), True, 'import tensorflow as tf\n'), ((1617, 1686), 'tensorflow.random_normal', 'tf.random_normal', (['([self.minibatch_per_gpu] + Gs_clone.input_shape[1:])'], {}), '([self.minibatch_per_gpu] + Gs_clone.input_shape[1:])\n', (1633, 1686), True, 'import tensorflow as tf\n'), ((1914, 1951), 'dnnlib.tflib.convert_images_to_uint8', 'tflib.convert_images_to_uint8', (['images'], {}), '(images)\n', (1943, 1951), True, 'import dnnlib.tflib as tflib\n'), ((2603, 2616), 'numpy.sum', 'np.sum', (['kl', '(1)'], {}), '(kl, 1)\n', (2609, 2616), True, 'import numpy as np\n'), ((2638, 2648), 'numpy.exp', 'np.exp', (['kl'], {}), '(kl)\n', (2644, 2648), True, 'import numpy as np\n'), ((1867, 1896), 'tensorflow.tile', 'tf.tile', (['images', '[1, 3, 1, 1]'], {}), '(images, [1, 3, 1, 1])\n', (1874, 1896), True, 'import tensorflow as tf\n'), ((2274, 2296), 'dnnlib.tflib.run', 'tflib.run', (['result_expr'], {}), '(result_expr)\n', (2283, 2296), True, 'import dnnlib.tflib as tflib\n'), ((2524, 2536), 'numpy.log', 'np.log', (['part'], {}), '(part)\n', (2530, 2536), True, 'import numpy as np\n'), ((2561, 2577), 'numpy.mean', 'np.mean', (['part', '(0)'], {}), '(part, 0)\n', (2568, 2577), True, 'import numpy as np\n')] |
import numpy as np
from keras import Model
from keras.layers import Dense , GlobalAveragePooling2D
from PIL import Image, ImageDraw
from keras.applications import resnet
import numpy as np
def create_model(trainable=False):
#model = vgg16.VGG16(include_top=False, weights='imagenet',input_shape=(IMAGE_SIZE_H,IMAGE_SIZE_W , 3), pooling='None')
model = resnet.ResNet50(include_top=False, weights='imagenet' , input_shape = (360,640,3) )
for layer in model.layers:
layer.trainable = False
model.layers[-1].trainable = True
model.layers[-2].trainable = True
model.layers[-3].trainable = True
model.layers[-4].trainable = True
model.layers[-5].trainable = True
model.layers[-6].trainable = True
model.layers[-7].trainable = True
model.layers[-8].trainable = True
model.layers[-9].trainable = True
model.layers[-10].trainable = True
model.layers[-11].trainable = True
out = model.layers[-1].output
x = GlobalAveragePooling2D()(out)
x = Dense(4 , activation = 'linear')(x)
return Model(inputs=model.input, outputs=x)
net = create_model()
net.load_weights('C:\\users\\ateeb\\desktop\\localization\\weights\\train_resnet_fullsize.hdf5')
images = np.load('C:\\users\\ateeb\\desktop\\localization\\X_cow_half_size.npy')
count = 1
for i in images:
roi = net.predict(i.reshape(1,360,640,3))
roi = roi[0]
img = Image.fromarray(i)
draw = ImageDraw.Draw(img)
draw.rectangle( ((roi[0],roi[1]) , (roi[2],roi[3])) , outline = 'black')
img.save('C:\\users\\ateeb\\desktop\\localization\\predictions'+'\\'+str(count)+'.jpg')
count += 1
print(count)
| [
"PIL.Image.fromarray",
"keras.Model",
"PIL.ImageDraw.Draw",
"keras.applications.resnet.ResNet50",
"keras.layers.Dense",
"keras.layers.GlobalAveragePooling2D",
"numpy.load"
] | [((1233, 1304), 'numpy.load', 'np.load', (['"""C:\\\\users\\\\ateeb\\\\desktop\\\\localization\\\\X_cow_half_size.npy"""'], {}), "('C:\\\\users\\\\ateeb\\\\desktop\\\\localization\\\\X_cow_half_size.npy')\n", (1240, 1304), True, 'import numpy as np\n'), ((362, 448), 'keras.applications.resnet.ResNet50', 'resnet.ResNet50', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'input_shape': '(360, 640, 3)'}), "(include_top=False, weights='imagenet', input_shape=(360, \n 640, 3))\n", (377, 448), False, 'from keras.applications import resnet\n'), ((1065, 1101), 'keras.Model', 'Model', ([], {'inputs': 'model.input', 'outputs': 'x'}), '(inputs=model.input, outputs=x)\n', (1070, 1101), False, 'from keras import Model\n'), ((1397, 1415), 'PIL.Image.fromarray', 'Image.fromarray', (['i'], {}), '(i)\n', (1412, 1415), False, 'from PIL import Image, ImageDraw\n'), ((1424, 1443), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (1438, 1443), False, 'from PIL import Image, ImageDraw\n'), ((979, 1003), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (1001, 1003), False, 'from keras.layers import Dense, GlobalAveragePooling2D\n'), ((1017, 1046), 'keras.layers.Dense', 'Dense', (['(4)'], {'activation': '"""linear"""'}), "(4, activation='linear')\n", (1022, 1046), False, 'from keras.layers import Dense, GlobalAveragePooling2D\n')] |
import numpy as np
from numpy.testing import (assert_array_equal, assert_almost_equal,
assert_array_almost_equal, assert_equal, assert_)
from modules.scipy.special import VeroneseMap, VeroneseMapWithIdentity
def test_veronese_map():
x = np.random.randn(10)
n, m = len(x), 784
V = VeroneseMap(shape=(n, m), shuffle=True)
z = V(x)
x_ = V(z, inverse=True)
assert_array_almost_equal(x, x_)
def test_VeroneseMapWithIdentity():
samples, dim_x, dim_z = 2, 10, 784
x = np.random.randn(samples, dim_x)
V = VeroneseMapWithIdentity(shape=(dim_x, dim_z))
z = V(x)
x_ = V(z, inverse=True)
assert_array_almost_equal(x, x_)
assert_almost_equal(VeroneseMapWithIdentity.l_compare(V.l(x), V.l(x_)), 0.0) | [
"numpy.testing.assert_array_almost_equal",
"numpy.random.randn",
"modules.scipy.special.VeroneseMap",
"modules.scipy.special.VeroneseMapWithIdentity"
] | [((272, 291), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (287, 291), True, 'import numpy as np\n'), ((323, 362), 'modules.scipy.special.VeroneseMap', 'VeroneseMap', ([], {'shape': '(n, m)', 'shuffle': '(True)'}), '(shape=(n, m), shuffle=True)\n', (334, 362), False, 'from modules.scipy.special import VeroneseMap, VeroneseMapWithIdentity\n'), ((408, 440), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['x', 'x_'], {}), '(x, x_)\n', (433, 440), False, 'from numpy.testing import assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_equal, assert_\n'), ((525, 556), 'numpy.random.randn', 'np.random.randn', (['samples', 'dim_x'], {}), '(samples, dim_x)\n', (540, 556), True, 'import numpy as np\n'), ((565, 610), 'modules.scipy.special.VeroneseMapWithIdentity', 'VeroneseMapWithIdentity', ([], {'shape': '(dim_x, dim_z)'}), '(shape=(dim_x, dim_z))\n', (588, 610), False, 'from modules.scipy.special import VeroneseMap, VeroneseMapWithIdentity\n'), ((656, 688), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['x', 'x_'], {}), '(x, x_)\n', (681, 688), False, 'from numpy.testing import assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_equal, assert_\n')] |
#!/usr/bin/env python
####################################################################
### This is the PYTHON version of program 3.4 from page 87 of #
### "Modeling Infectious Disease in humans and animals" #
### by Keeling & Rohani. #
### #
### It is the SEIR model with four different age-groups and #
### yearly "movements" between the groups mimicking the school year#
####################################################################
###################################
### Written by <NAME> #
### <EMAIL> (work) #
### <EMAIL> #
###################################
import scipy.integrate as spi
import numpy as np
import pylab as pl
from matplotlib.font_manager import FontProperties
m = 4
mu = np.array([0.0, 0.0, 0.0, 1.0 / (55 * 365)])
nu = np.array([1.0 / (55 * 365), 0.0, 0.0, 0.0])
n = np.array([6.0, 4.0, 10.0, 55.0]) / 75.0
S0 = np.array([0.05, 0.01, 0.01, 0.008])
E0 = np.array([0.0001, 0.0001, 0.0001, 0.0001])
I0 = np.array([0.0001, 0.0001, 0.0001, 0.0001])
R0 = np.array([0.0298, 0.04313333, 0.12313333, 0.72513333])
ND = MaxTime = 365.0
beta = np.array(
(
[2.089, 2.089, 2.086, 2.037],
[2.089, 9.336, 2.086, 2.037],
[2.086, 2.086, 2.086, 2.037],
[2.037, 2.037, 2.037, 2.037],
)
)
gamma = 1 / 5.0
sigma = 1 / 8.0
TS = 1.0
INPUT = np.hstack((S0, E0, I0, R0))
def diff_eqs(INP, t):
"""The main set of equations"""
Y = np.zeros((16))
V = INP
for i in range(m):
Inf = np.dot(beta[i], V[list(np.array(range(m)) + 2 * m)]) * V[i]
Y[i] = nu[i] * n[3] - Inf - mu[i] * V[i]
Y[(m + i)] = Inf - mu[i] * V[(m + i)] - sigma * V[(m + i)]
Y[(2 * m + i)] = sigma * V[(m + i)] - gamma * V[(2 * m + i)] - mu[i] * V[(2 * m + i)]
Y[(3 * m + i)] = gamma * V[(2 * m + i)] - mu[i] * V[(3 * m + i)]
return Y # For odeint
t_start = 0.0
t_end = ND
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = np.zeros((16))
k = 1
while k <= 100:
RES = spi.odeint(diff_eqs, INPUT, t_range)
INPUT = RES[-1]
INPUT[15] = INPUT[15] + INPUT[14] / 10
INPUT[14] = INPUT[14] + INPUT[13] / 4 - INPUT[14] / 10
INPUT[13] = INPUT[13] + INPUT[12] / 6 - INPUT[13] / 4
INPUT[12] = INPUT[12] - INPUT[12] / 6
INPUT[11] = INPUT[11] + INPUT[10] / 10
INPUT[10] = INPUT[10] + INPUT[9] / 4 - INPUT[10] / 10
INPUT[9] = INPUT[9] + INPUT[8] / 6 - INPUT[9] / 4
INPUT[8] = INPUT[8] - INPUT[8] / 6
INPUT[7] = INPUT[7] + INPUT[6] / 10
INPUT[6] = INPUT[6] + INPUT[5] / 4 - INPUT[6] / 10
INPUT[5] = INPUT[5] + INPUT[4] / 6 - INPUT[5] / 4
INPUT[4] = INPUT[4] - INPUT[4] / 6
INPUT[3] = INPUT[3] + INPUT[2] / 10
INPUT[2] = INPUT[2] + INPUT[1] / 4 - INPUT[2] / 10
INPUT[1] = INPUT[1] + INPUT[0] / 6 - INPUT[1] / 4
INPUT[0] = INPUT[0] - INPUT[0] / 6
RES2 = np.vstack((RES2, RES))
k = k + 1
RES = RES2[
1:,
]
print(RES)
Time = np.arange(100 * (ND + 1)) / (ND + 1)
##Ploting
pl.subplot(311)
pl.plot(Time, RES[:, 0], "c", label="0-6")
pl.plot(Time, RES[:, 1], "b", label="6-10")
pl.plot(Time, RES[:, 2], "g", label="10-20")
pl.plot(Time, RES[:, 3], "r", label="20+")
pl.ylabel("Susceptibles")
pl.xlabel("Time (years)")
pl.legend(loc=1, prop=FontProperties(size="smaller"))
pl.subplot(312)
pl.semilogy(Time, RES[:, 0 + 2 * m], "c", label="0-6")
pl.semilogy(Time, RES[:, 1 + 2 * m], "b", label="6-10")
pl.semilogy(Time, RES[:, 2 + 2 * m], "g", label="10-20")
pl.semilogy(Time, RES[:, 3 + 2 * m], "r", label="20+")
pl.ylabel("Infectious")
pl.xlabel("Time (years)")
pl.legend(loc=1, prop=FontProperties(size="smaller"))
R = np.zeros(4)
pl.subplot(313)
mm = pl.find(Time > (ND - 365.0))
for i in range(4):
R[i] = 1.0 - np.mean(RES[mm, i]) / n[i]
pl.fill(
np.array([0, 0, 6, 6, 6, 6, 10, 10, 10, 10, 20, 20, 20, 20, 75, 75]),
np.array([0, R[0], R[0], 0, 0, R[1], R[1], 0, 0, R[2], R[2], 0, 0, R[3], R[3], 0]),
"r",
)
pl.xlabel("Age-group")
pl.ylabel("Proportion Sero-positive")
pl.xlim((0, 25))
pl.ylim((0, 1))
pl.show()
| [
"numpy.mean",
"pylab.ylim",
"pylab.subplot",
"numpy.hstack",
"pylab.plot",
"pylab.find",
"scipy.integrate.odeint",
"pylab.xlabel",
"matplotlib.font_manager.FontProperties",
"numpy.array",
"numpy.zeros",
"pylab.semilogy",
"numpy.vstack",
"pylab.xlim",
"pylab.ylabel",
"numpy.arange",
"... | [((766, 809), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0 / (55 * 365)]'], {}), '([0.0, 0.0, 0.0, 1.0 / (55 * 365)])\n', (774, 809), True, 'import numpy as np\n'), ((815, 858), 'numpy.array', 'np.array', (['[1.0 / (55 * 365), 0.0, 0.0, 0.0]'], {}), '([1.0 / (55 * 365), 0.0, 0.0, 0.0])\n', (823, 858), True, 'import numpy as np\n'), ((908, 943), 'numpy.array', 'np.array', (['[0.05, 0.01, 0.01, 0.008]'], {}), '([0.05, 0.01, 0.01, 0.008])\n', (916, 943), True, 'import numpy as np\n'), ((949, 991), 'numpy.array', 'np.array', (['[0.0001, 0.0001, 0.0001, 0.0001]'], {}), '([0.0001, 0.0001, 0.0001, 0.0001])\n', (957, 991), True, 'import numpy as np\n'), ((997, 1039), 'numpy.array', 'np.array', (['[0.0001, 0.0001, 0.0001, 0.0001]'], {}), '([0.0001, 0.0001, 0.0001, 0.0001])\n', (1005, 1039), True, 'import numpy as np\n'), ((1045, 1099), 'numpy.array', 'np.array', (['[0.0298, 0.04313333, 0.12313333, 0.72513333]'], {}), '([0.0298, 0.04313333, 0.12313333, 0.72513333])\n', (1053, 1099), True, 'import numpy as np\n'), ((1128, 1263), 'numpy.array', 'np.array', (['([2.089, 2.089, 2.086, 2.037], [2.089, 9.336, 2.086, 2.037], [2.086, 2.086,\n 2.086, 2.037], [2.037, 2.037, 2.037, 2.037])'], {}), '(([2.089, 2.089, 2.086, 2.037], [2.089, 9.336, 2.086, 2.037], [\n 2.086, 2.086, 2.086, 2.037], [2.037, 2.037, 2.037, 2.037]))\n', (1136, 1263), True, 'import numpy as np\n'), ((1355, 1382), 'numpy.hstack', 'np.hstack', (['(S0, E0, I0, R0)'], {}), '((S0, E0, I0, R0))\n', (1364, 1382), True, 'import numpy as np\n'), ((1933, 1973), 'numpy.arange', 'np.arange', (['t_start', '(t_end + t_inc)', 't_inc'], {}), '(t_start, t_end + t_inc, t_inc)\n', (1942, 1973), True, 'import numpy as np\n'), ((1982, 1994), 'numpy.zeros', 'np.zeros', (['(16)'], {}), '(16)\n', (1990, 1994), True, 'import numpy as np\n'), ((2999, 3014), 'pylab.subplot', 'pl.subplot', (['(311)'], {}), '(311)\n', (3009, 3014), True, 'import pylab as pl\n'), ((3015, 3057), 'pylab.plot', 'pl.plot', (['Time', 'RES[:, 0]', '"""c"""'], {'label': '"""0-6"""'}), "(Time, RES[:, 0], 'c', label='0-6')\n", (3022, 3057), True, 'import pylab as pl\n'), ((3058, 3101), 'pylab.plot', 'pl.plot', (['Time', 'RES[:, 1]', '"""b"""'], {'label': '"""6-10"""'}), "(Time, RES[:, 1], 'b', label='6-10')\n", (3065, 3101), True, 'import pylab as pl\n'), ((3102, 3146), 'pylab.plot', 'pl.plot', (['Time', 'RES[:, 2]', '"""g"""'], {'label': '"""10-20"""'}), "(Time, RES[:, 2], 'g', label='10-20')\n", (3109, 3146), True, 'import pylab as pl\n'), ((3147, 3189), 'pylab.plot', 'pl.plot', (['Time', 'RES[:, 3]', '"""r"""'], {'label': '"""20+"""'}), "(Time, RES[:, 3], 'r', label='20+')\n", (3154, 3189), True, 'import pylab as pl\n'), ((3190, 3215), 'pylab.ylabel', 'pl.ylabel', (['"""Susceptibles"""'], {}), "('Susceptibles')\n", (3199, 3215), True, 'import pylab as pl\n'), ((3216, 3241), 'pylab.xlabel', 'pl.xlabel', (['"""Time (years)"""'], {}), "('Time (years)')\n", (3225, 3241), True, 'import pylab as pl\n'), ((3298, 3313), 'pylab.subplot', 'pl.subplot', (['(312)'], {}), '(312)\n', (3308, 3313), True, 'import pylab as pl\n'), ((3314, 3368), 'pylab.semilogy', 'pl.semilogy', (['Time', 'RES[:, 0 + 2 * m]', '"""c"""'], {'label': '"""0-6"""'}), "(Time, RES[:, 0 + 2 * m], 'c', label='0-6')\n", (3325, 3368), True, 'import pylab as pl\n'), ((3369, 3424), 'pylab.semilogy', 'pl.semilogy', (['Time', 'RES[:, 1 + 2 * m]', '"""b"""'], {'label': '"""6-10"""'}), "(Time, RES[:, 1 + 2 * m], 'b', label='6-10')\n", (3380, 3424), True, 'import pylab as pl\n'), ((3425, 3481), 'pylab.semilogy', 'pl.semilogy', (['Time', 'RES[:, 2 + 2 * m]', '"""g"""'], {'label': '"""10-20"""'}), "(Time, RES[:, 2 + 2 * m], 'g', label='10-20')\n", (3436, 3481), True, 'import pylab as pl\n'), ((3482, 3536), 'pylab.semilogy', 'pl.semilogy', (['Time', 'RES[:, 3 + 2 * m]', '"""r"""'], {'label': '"""20+"""'}), "(Time, RES[:, 3 + 2 * m], 'r', label='20+')\n", (3493, 3536), True, 'import pylab as pl\n'), ((3537, 3560), 'pylab.ylabel', 'pl.ylabel', (['"""Infectious"""'], {}), "('Infectious')\n", (3546, 3560), True, 'import pylab as pl\n'), ((3561, 3586), 'pylab.xlabel', 'pl.xlabel', (['"""Time (years)"""'], {}), "('Time (years)')\n", (3570, 3586), True, 'import pylab as pl\n'), ((3646, 3657), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (3654, 3657), True, 'import numpy as np\n'), ((3658, 3673), 'pylab.subplot', 'pl.subplot', (['(313)'], {}), '(313)\n', (3668, 3673), True, 'import pylab as pl\n'), ((3679, 3705), 'pylab.find', 'pl.find', (['(Time > ND - 365.0)'], {}), '(Time > ND - 365.0)\n', (3686, 3705), True, 'import pylab as pl\n'), ((3954, 3976), 'pylab.xlabel', 'pl.xlabel', (['"""Age-group"""'], {}), "('Age-group')\n", (3963, 3976), True, 'import pylab as pl\n'), ((3977, 4014), 'pylab.ylabel', 'pl.ylabel', (['"""Proportion Sero-positive"""'], {}), "('Proportion Sero-positive')\n", (3986, 4014), True, 'import pylab as pl\n'), ((4015, 4031), 'pylab.xlim', 'pl.xlim', (['(0, 25)'], {}), '((0, 25))\n', (4022, 4031), True, 'import pylab as pl\n'), ((4032, 4047), 'pylab.ylim', 'pl.ylim', (['(0, 1)'], {}), '((0, 1))\n', (4039, 4047), True, 'import pylab as pl\n'), ((4048, 4057), 'pylab.show', 'pl.show', ([], {}), '()\n', (4055, 4057), True, 'import pylab as pl\n'), ((863, 895), 'numpy.array', 'np.array', (['[6.0, 4.0, 10.0, 55.0]'], {}), '([6.0, 4.0, 10.0, 55.0])\n', (871, 895), True, 'import numpy as np\n'), ((1451, 1463), 'numpy.zeros', 'np.zeros', (['(16)'], {}), '(16)\n', (1459, 1463), True, 'import numpy as np\n'), ((2029, 2065), 'scipy.integrate.odeint', 'spi.odeint', (['diff_eqs', 'INPUT', 't_range'], {}), '(diff_eqs, INPUT, t_range)\n', (2039, 2065), True, 'import scipy.integrate as spi\n'), ((2872, 2894), 'numpy.vstack', 'np.vstack', (['(RES2, RES)'], {}), '((RES2, RES))\n', (2881, 2894), True, 'import numpy as np\n'), ((2951, 2976), 'numpy.arange', 'np.arange', (['(100 * (ND + 1))'], {}), '(100 * (ND + 1))\n', (2960, 2976), True, 'import numpy as np\n'), ((3785, 3853), 'numpy.array', 'np.array', (['[0, 0, 6, 6, 6, 6, 10, 10, 10, 10, 20, 20, 20, 20, 75, 75]'], {}), '([0, 0, 6, 6, 6, 6, 10, 10, 10, 10, 20, 20, 20, 20, 75, 75])\n', (3793, 3853), True, 'import numpy as np\n'), ((3859, 3946), 'numpy.array', 'np.array', (['[0, R[0], R[0], 0, 0, R[1], R[1], 0, 0, R[2], R[2], 0, 0, R[3], R[3], 0]'], {}), '([0, R[0], R[0], 0, 0, R[1], R[1], 0, 0, R[2], R[2], 0, 0, R[3], R[\n 3], 0])\n', (3867, 3946), True, 'import numpy as np\n'), ((3264, 3294), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {'size': '"""smaller"""'}), "(size='smaller')\n", (3278, 3294), False, 'from matplotlib.font_manager import FontProperties\n'), ((3609, 3639), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {'size': '"""smaller"""'}), "(size='smaller')\n", (3623, 3639), False, 'from matplotlib.font_manager import FontProperties\n'), ((3744, 3763), 'numpy.mean', 'np.mean', (['RES[mm, i]'], {}), '(RES[mm, i])\n', (3751, 3763), True, 'import numpy as np\n')] |
import numpy as np
def moving_average(a, n=3) :
"""
perform moving average, return a vector of same length as input
"""
a=a.ravel()
a = np.concatenate(([a[0]]*(n-1),a)) # repeating first values
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
ret=ret[n - 1:] / n
return ret
| [
"numpy.cumsum",
"numpy.concatenate"
] | [((158, 195), 'numpy.concatenate', 'np.concatenate', (['([a[0]] * (n - 1), a)'], {}), '(([a[0]] * (n - 1), a))\n', (172, 195), True, 'import numpy as np\n'), ((226, 251), 'numpy.cumsum', 'np.cumsum', (['a'], {'dtype': 'float'}), '(a, dtype=float)\n', (235, 251), True, 'import numpy as np\n')] |
import click
from train_anomaly_detection import main_func
import numpy as np
import os
# Define base parameters.
dataset_name = 'selfsupervised'
net_name = 'StackConvNet'
xp_path_base = 'log'
data_path = 'data/full'
train_folder = 'train'
val_pos_folder = 'val/wangen_sun_3_pos'
val_neg_folder = 'val/wangen_sun_3_neg'
load_config = None
load_model = None
nu = 0.1
device = 'cuda'
seed = -1
optimizer_name = 'adam'
lr = 0.0001
n_epochs = 150
lr_milestone = (100,)
batch_size = 200
weight_decay = 0.5e-6
ae_optimizer_name = 'adam'
ae_lr = 0.0001
ae_n_epochs = 350
ae_lr_milestone = (250,)
ae_batch_size = 200
ae_weight_decay = 0.5e-6
n_jobs_dataloader = 0
normal_class = 1
batchnorm = False
dropout = False
augment = False
objectives = [
{'objective': 'real-nvp', 'pretrain': True, 'fix_encoder': True}, # 0
{'objective': 'soft-boundary', 'pretrain': True, 'fix_encoder': False}, # 1
{'objective': 'one-class', 'pretrain': True, 'fix_encoder': False}, # 2
{'objective': 'real-nvp', 'pretrain': False, 'fix_encoder': False}, # 3
{'objective': 'real-nvp', 'pretrain': True, 'fix_encoder': False}, # 4
{'objective': 'one-class', 'pretrain': False, 'fix_encoder': False}, # 5
{'objective': 'soft-boundary', 'pretrain': False, 'fix_encoder': False} # 6
]
modalities = [
{'rgb': True , 'ir': False, 'depth': False, 'depth_3d': True , 'normals': False, 'normal_angle': True },
{'rgb': True , 'ir': False, 'depth': False, 'depth_3d': False, 'normals': False, 'normal_angle': False},
{'rgb': False, 'ir': True , 'depth': False, 'depth_3d': False, 'normals': False, 'normal_angle': False},
{'rgb': False, 'ir': False, 'depth': True , 'depth_3d': False, 'normals': False, 'normal_angle': False},
{'rgb': True , 'ir': False, 'depth': True , 'depth_3d': False, 'normals': False, 'normal_angle': False},
{'rgb': False, 'ir': True , 'depth': True , 'depth_3d': False, 'normals': False, 'normal_angle': False},
{'rgb': True , 'ir': True , 'depth': True , 'depth_3d': False, 'normals': False, 'normal_angle': False},
{'rgb': True , 'ir': True , 'depth': True , 'depth_3d': False, 'normals': True , 'normal_angle': False},
{'rgb': True , 'ir': True , 'depth': False, 'depth_3d': True , 'normals': False, 'normal_angle': True },
{'rgb': True , 'ir': False, 'depth': True , 'depth_3d': False, 'normals': True , 'normal_angle': False},
{'rgb': True , 'ir': False, 'depth': False, 'depth_3d': False, 'normals': True , 'normal_angle': False},
{'rgb': True , 'ir': False, 'depth': False, 'depth_3d': True , 'normals': False, 'normal_angle': False},
{'rgb': True , 'ir': False, 'depth': False, 'depth_3d': False, 'normals': False, 'normal_angle': True },
{'rgb': False, 'ir': False, 'depth': True , 'depth_3d': False, 'normals': True , 'normal_angle': False},
{'rgb': False, 'ir': False, 'depth': False, 'depth_3d': True , 'normals': False, 'normal_angle': True },
{'rgb': True , 'ir': False, 'depth': True , 'depth_3d': False, 'normals': False, 'normal_angle': True },
{'rgb': True , 'ir': False, 'depth': False, 'depth_3d': True , 'normals': True , 'normal_angle': False},
{'rgb': False, 'ir': False, 'depth': True , 'depth_3d': False, 'normals': False, 'normal_angle': True }
]
N_ITER = 10
auc_mat = np.zeros((N_ITER, len(objectives)+1, len(modalities))) # +1 for Autoencoder
for it in range(N_ITER):
xp_path = os.path.join(xp_path_base, str(it))
for i, obj in enumerate(objectives):
for j, mod in enumerate(modalities):
train_obj = main_func(dataset_name, net_name, xp_path, data_path, train_folder,
val_pos_folder, val_neg_folder, load_config, load_model, obj['objective'], nu,
device, seed, optimizer_name, lr, n_epochs, lr_milestone, batch_size,
weight_decay, obj['pretrain'], ae_optimizer_name, ae_lr, ae_n_epochs,
ae_lr_milestone, ae_batch_size, ae_weight_decay, n_jobs_dataloader, normal_class,
mod['rgb'], mod['ir'], mod['depth'], mod['depth_3d'], mod['normals'],
mod['normal_angle'], batchnorm, dropout, augment, obj['fix_encoder'])
auc = train_obj.results['test_auc']
auc_ae = train_obj.results['test_auc_ae']
auc_mat[it, i,j] = auc
if auc_ae is not None:
auc_mat[it, -1,j] = auc_ae
np.save(os.path.join(xp_path, 'auc.npy'), auc_mat)
np.save(os.path.join(xp_path_base, 'auc.npy'), auc_mat)
print('avg')
print(np.mean(auc_mat, axis=0))
print('std')
print(np.std(auc_mat, axis=0)) | [
"numpy.mean",
"train_anomaly_detection.main_func",
"os.path.join",
"numpy.std"
] | [((4318, 4355), 'os.path.join', 'os.path.join', (['xp_path_base', '"""auc.npy"""'], {}), "(xp_path_base, 'auc.npy')\n", (4330, 4355), False, 'import os\n'), ((4385, 4409), 'numpy.mean', 'np.mean', (['auc_mat'], {'axis': '(0)'}), '(auc_mat, axis=0)\n', (4392, 4409), True, 'import numpy as np\n'), ((4430, 4453), 'numpy.std', 'np.std', (['auc_mat'], {'axis': '(0)'}), '(auc_mat, axis=0)\n', (4436, 4453), True, 'import numpy as np\n'), ((4266, 4298), 'os.path.join', 'os.path.join', (['xp_path', '"""auc.npy"""'], {}), "(xp_path, 'auc.npy')\n", (4278, 4298), False, 'import os\n'), ((3491, 4028), 'train_anomaly_detection.main_func', 'main_func', (['dataset_name', 'net_name', 'xp_path', 'data_path', 'train_folder', 'val_pos_folder', 'val_neg_folder', 'load_config', 'load_model', "obj['objective']", 'nu', 'device', 'seed', 'optimizer_name', 'lr', 'n_epochs', 'lr_milestone', 'batch_size', 'weight_decay', "obj['pretrain']", 'ae_optimizer_name', 'ae_lr', 'ae_n_epochs', 'ae_lr_milestone', 'ae_batch_size', 'ae_weight_decay', 'n_jobs_dataloader', 'normal_class', "mod['rgb']", "mod['ir']", "mod['depth']", "mod['depth_3d']", "mod['normals']", "mod['normal_angle']", 'batchnorm', 'dropout', 'augment', "obj['fix_encoder']"], {}), "(dataset_name, net_name, xp_path, data_path, train_folder,\n val_pos_folder, val_neg_folder, load_config, load_model, obj[\n 'objective'], nu, device, seed, optimizer_name, lr, n_epochs,\n lr_milestone, batch_size, weight_decay, obj['pretrain'],\n ae_optimizer_name, ae_lr, ae_n_epochs, ae_lr_milestone, ae_batch_size,\n ae_weight_decay, n_jobs_dataloader, normal_class, mod['rgb'], mod['ir'],\n mod['depth'], mod['depth_3d'], mod['normals'], mod['normal_angle'],\n batchnorm, dropout, augment, obj['fix_encoder'])\n", (3500, 4028), False, 'from train_anomaly_detection import main_func\n')] |
from src.environments.slippery_grid import SlipperyGrid
import numpy as np
# A modified version of OpenAI Gym FrozenLake
# only the labelling function needs to be specified
sinks = []
for i in range(12, 16):
for j in range(15, 19):
sinks.append([i, j])
# create a SlipperyGrid object
FrozenLake = SlipperyGrid(shape=[20, 20],
initial_state=[0, 10],
slip_probability=0.1,
sink_states=sinks
)
# define the labellings
labels = np.empty([FrozenLake.shape[0], FrozenLake.shape[1]], dtype=object)
labels[0:20, 0:20] = 'safe'
labels[4:8, 9:13] = 'unsafe'
labels[12:16, 15:19] = 'goal1'
labels[15:19, 15:19] = 'goal2'
labels[9:13, 9:13] = 'goal3'
labels[0:4, 15:19] = 'goal4'
# override the labels
FrozenLake.labels = labels
# FrozenLake doesn't have the action "stay"
FrozenLake.action_space = [
"right",
"up",
"left",
"down",
]
| [
"src.environments.slippery_grid.SlipperyGrid",
"numpy.empty"
] | [((311, 407), 'src.environments.slippery_grid.SlipperyGrid', 'SlipperyGrid', ([], {'shape': '[20, 20]', 'initial_state': '[0, 10]', 'slip_probability': '(0.1)', 'sink_states': 'sinks'}), '(shape=[20, 20], initial_state=[0, 10], slip_probability=0.1,\n sink_states=sinks)\n', (323, 407), False, 'from src.environments.slippery_grid import SlipperyGrid\n'), ((543, 609), 'numpy.empty', 'np.empty', (['[FrozenLake.shape[0], FrozenLake.shape[1]]'], {'dtype': 'object'}), '([FrozenLake.shape[0], FrozenLake.shape[1]], dtype=object)\n', (551, 609), True, 'import numpy as np\n')] |
"""
Loads the BVH files that make up the databases and processes them into the format required by our training algorithm.
It does NOT subdivide the clips into overlapping windows and does NOT split the data set into training and validation.
For this, use the script `extract_data_splits.py`.
This code is mostly copied from Holden et al. and tweaked to our purposes where necessary.
"""
import os
import numpy as np
import scipy.ndimage.filters as filters
import BVH as BVH
import Animation as Animation
from Quaternions import Quaternions
from Pivots import Pivots
def softmax(x, **kw):
softness = kw.pop('softness', 1.0)
maxi, mini = np.max(x, **kw), np.min(x, **kw)
return maxi + np.log(softness + np.exp(mini - maxi))
def softmin(x, **kw):
return -softmax(-x, **kw)
def process_file(filename, window=240, window_step=120):
anim, names, frametime = BVH.load(filename)
""" Convert to 60 fps """
anim = anim[::2]
""" Do FK """
global_positions = Animation.positions_global(anim)
""" Remove Uneeded Joints """
positions = global_positions[:, np.array([
0,
2, 3, 4, 5,
7, 8, 9, 10,
12, 13, 15, 16,
18, 19, 20, 22,
25, 26, 27, 29])]
""" Put on Floor """
# positions is (seq_length, n_joints, 3)
fid_l, fid_r = np.array([4, 5]), np.array([8, 9])
foot_heights = np.minimum(positions[:, fid_l, 1], positions[:, fid_r, 1]).min(axis=1)
floor_height = softmin(foot_heights, softness=0.5, axis=0)
positions[:, :, 1] -= floor_height
""" Add Reference Joint """
trajectory_filterwidth = 3
reference = positions[:, 0] * np.array([1, 0, 1])
reference = filters.gaussian_filter1d(reference, trajectory_filterwidth, axis=0, mode='nearest')
positions = np.concatenate([reference[:, np.newaxis], positions], axis=1)
""" Get Foot Contacts """
velfactor, heightfactor = np.array([0.05, 0.05]), np.array([3.0, 2.0])
feet_l_x = (positions[1:, fid_l, 0] - positions[:-1, fid_l, 0]) ** 2
feet_l_y = (positions[1:, fid_l, 1] - positions[:-1, fid_l, 1]) ** 2
feet_l_z = (positions[1:, fid_l, 2] - positions[:-1, fid_l, 2]) ** 2
feet_l_h = positions[:-1, fid_l, 1]
feet_l = (((feet_l_x + feet_l_y + feet_l_z) < velfactor) & (feet_l_h < heightfactor)).astype(np.float)
feet_r_x = (positions[1:, fid_r, 0] - positions[:-1, fid_r, 0]) ** 2
feet_r_y = (positions[1:, fid_r, 1] - positions[:-1, fid_r, 1]) ** 2
feet_r_z = (positions[1:, fid_r, 2] - positions[:-1, fid_r, 2]) ** 2
feet_r_h = positions[:-1, fid_r, 1]
feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor) & (feet_r_h < heightfactor)).astype(np.float)
""" Get Root Velocity """
velocity = (positions[1:, 0:1] - positions[:-1, 0:1]).copy()
""" Remove Translation """
positions[:, :, 0] = positions[:, :, 0] - positions[:, 0:1, 0]
positions[:, :, 2] = positions[:, :, 2] - positions[:, 0:1, 2]
""" Get Forward Direction """
sdr_l, sdr_r, hip_l, hip_r = 14, 18, 2, 6
across1 = positions[:, hip_l] - positions[:, hip_r]
across0 = positions[:, sdr_l] - positions[:, sdr_r]
across = across0 + across1
across = across / np.sqrt((across ** 2).sum(axis=-1))[..., np.newaxis]
direction_filterwidth = 20
forward = np.cross(across, np.array([[0, 1, 0]]))
forward = filters.gaussian_filter1d(forward, direction_filterwidth, axis=0, mode='nearest')
forward = forward / np.sqrt((forward ** 2).sum(axis=-1))[..., np.newaxis]
""" Remove Y Rotation """
target = np.array([[0, 0, 1]]).repeat(len(forward), axis=0)
rotation = Quaternions.between(forward, target)[:, np.newaxis]
positions = rotation * positions
""" Get Root Rotation """
velocity = rotation[1:] * velocity
rvelocity = Pivots.from_quaternions(rotation[1:] * -rotation[:-1]).ps
""" Add Velocity, RVelocity, Foot Contacts to vector """
positions = positions[:-1]
positions = positions.reshape(len(positions), -1)
positions = np.concatenate([positions, velocity[:, :, 0]], axis=-1)
positions = np.concatenate([positions, velocity[:, :, 2]], axis=-1)
positions = np.concatenate([positions, rvelocity], axis=-1)
positions = np.concatenate([positions, feet_l, feet_r], axis=-1)
return positions
def get_files(directory):
return [os.path.join(directory, f) for f in sorted(list(os.listdir(directory)))
if os.path.isfile(os.path.join(directory, f))
and f.endswith('.bvh') and f != 'rest.bvh']
def export_db(input_path, output_path):
print('\nprocessing db {} ...'.format(input_path.split('/')[-1]))
all_files = get_files(input_path)
all_clips = []
lengths = []
for i, item in enumerate(all_files):
print('\r\tprocessing {} of {} ({})'.format(i, len(all_files), item), end='')
clips = process_file(item)
all_clips.append(clips)
lengths.append(clips.shape[0])
data_clips = np.array(all_clips)
mean_length = np.mean(lengths)
std_length = np.std(lengths)
min_length = np.amin(lengths)
max_length = np.amax(lengths)
print('\ngathered {} clips of mean length {} (+/- {}) max length {} min length {}'.format(
data_clips.shape[0], mean_length, std_length, max_length, min_length))
np.savez_compressed(output_path, clips=data_clips)
if __name__ == '__main__':
data_base_path = '/path_to_data_from_holden/motionsynth_data/data/processed/'
output_path = '../data_preprocessed/raw/'
dbs = [(os.path.join(data_base_path, 'cmu'), os.path.join(output_path, 'data_cmu.npz')),
(os.path.join(data_base_path, 'hdm05'), os.path.join(output_path, 'data_hdm05.npz')),
(os.path.join(data_base_path, 'edin_locomotion'), os.path.join(output_path, 'data_edin_locomotion.npz')),
(os.path.join(data_base_path, 'edin_xsens'), os.path.join(output_path, 'data_edin_xsens.npz')),
(os.path.join(data_base_path, 'edin_kinect'), os.path.join(output_path, 'data_edin_kinect.npz')),
(os.path.join(data_base_path, 'edin_misc'), os.path.join(output_path, 'data_edin_misc.npz')),
(os.path.join(data_base_path, 'mhad'), os.path.join(output_path, 'data_mhad.npz')),
(os.path.join(data_base_path, 'edin_punching'), os.path.join(output_path, 'data_edin_punching.npz')),
(os.path.join(data_base_path, 'edin_terrain'), os.path.join(output_path, 'data_edin_terrain.npz'))]
for (db_path, out_path) in dbs:
export_db(db_path, out_path)
| [
"numpy.mean",
"os.listdir",
"numpy.amin",
"numpy.minimum",
"Animation.positions_global",
"os.path.join",
"numpy.min",
"Quaternions.Quaternions.between",
"numpy.max",
"numpy.exp",
"numpy.array",
"scipy.ndimage.filters.gaussian_filter1d",
"numpy.concatenate",
"numpy.std",
"Pivots.Pivots.fr... | [((880, 898), 'BVH.load', 'BVH.load', (['filename'], {}), '(filename)\n', (888, 898), True, 'import BVH as BVH\n'), ((993, 1025), 'Animation.positions_global', 'Animation.positions_global', (['anim'], {}), '(anim)\n', (1019, 1025), True, 'import Animation as Animation\n'), ((1686, 1775), 'scipy.ndimage.filters.gaussian_filter1d', 'filters.gaussian_filter1d', (['reference', 'trajectory_filterwidth'], {'axis': '(0)', 'mode': '"""nearest"""'}), "(reference, trajectory_filterwidth, axis=0, mode=\n 'nearest')\n", (1711, 1775), True, 'import scipy.ndimage.filters as filters\n'), ((1787, 1848), 'numpy.concatenate', 'np.concatenate', (['[reference[:, np.newaxis], positions]'], {'axis': '(1)'}), '([reference[:, np.newaxis], positions], axis=1)\n', (1801, 1848), True, 'import numpy as np\n'), ((3350, 3436), 'scipy.ndimage.filters.gaussian_filter1d', 'filters.gaussian_filter1d', (['forward', 'direction_filterwidth'], {'axis': '(0)', 'mode': '"""nearest"""'}), "(forward, direction_filterwidth, axis=0, mode=\n 'nearest')\n", (3375, 3436), True, 'import scipy.ndimage.filters as filters\n'), ((4016, 4071), 'numpy.concatenate', 'np.concatenate', (['[positions, velocity[:, :, 0]]'], {'axis': '(-1)'}), '([positions, velocity[:, :, 0]], axis=-1)\n', (4030, 4071), True, 'import numpy as np\n'), ((4088, 4143), 'numpy.concatenate', 'np.concatenate', (['[positions, velocity[:, :, 2]]'], {'axis': '(-1)'}), '([positions, velocity[:, :, 2]], axis=-1)\n', (4102, 4143), True, 'import numpy as np\n'), ((4160, 4207), 'numpy.concatenate', 'np.concatenate', (['[positions, rvelocity]'], {'axis': '(-1)'}), '([positions, rvelocity], axis=-1)\n', (4174, 4207), True, 'import numpy as np\n'), ((4224, 4276), 'numpy.concatenate', 'np.concatenate', (['[positions, feet_l, feet_r]'], {'axis': '(-1)'}), '([positions, feet_l, feet_r], axis=-1)\n', (4238, 4276), True, 'import numpy as np\n'), ((4961, 4980), 'numpy.array', 'np.array', (['all_clips'], {}), '(all_clips)\n', (4969, 4980), True, 'import numpy as np\n'), ((5000, 5016), 'numpy.mean', 'np.mean', (['lengths'], {}), '(lengths)\n', (5007, 5016), True, 'import numpy as np\n'), ((5034, 5049), 'numpy.std', 'np.std', (['lengths'], {}), '(lengths)\n', (5040, 5049), True, 'import numpy as np\n'), ((5067, 5083), 'numpy.amin', 'np.amin', (['lengths'], {}), '(lengths)\n', (5074, 5083), True, 'import numpy as np\n'), ((5101, 5117), 'numpy.amax', 'np.amax', (['lengths'], {}), '(lengths)\n', (5108, 5117), True, 'import numpy as np\n'), ((5297, 5347), 'numpy.savez_compressed', 'np.savez_compressed', (['output_path'], {'clips': 'data_clips'}), '(output_path, clips=data_clips)\n', (5316, 5347), True, 'import numpy as np\n'), ((648, 663), 'numpy.max', 'np.max', (['x'], {}), '(x, **kw)\n', (654, 663), True, 'import numpy as np\n'), ((665, 680), 'numpy.min', 'np.min', (['x'], {}), '(x, **kw)\n', (671, 680), True, 'import numpy as np\n'), ((1324, 1340), 'numpy.array', 'np.array', (['[4, 5]'], {}), '([4, 5])\n', (1332, 1340), True, 'import numpy as np\n'), ((1342, 1358), 'numpy.array', 'np.array', (['[8, 9]'], {}), '([8, 9])\n', (1350, 1358), True, 'import numpy as np\n'), ((1650, 1669), 'numpy.array', 'np.array', (['[1, 0, 1]'], {}), '([1, 0, 1])\n', (1658, 1669), True, 'import numpy as np\n'), ((1910, 1932), 'numpy.array', 'np.array', (['[0.05, 0.05]'], {}), '([0.05, 0.05])\n', (1918, 1932), True, 'import numpy as np\n'), ((1934, 1954), 'numpy.array', 'np.array', (['[3.0, 2.0]'], {}), '([3.0, 2.0])\n', (1942, 1954), True, 'import numpy as np\n'), ((3313, 3334), 'numpy.array', 'np.array', (['[[0, 1, 0]]'], {}), '([[0, 1, 0]])\n', (3321, 3334), True, 'import numpy as np\n'), ((3620, 3656), 'Quaternions.Quaternions.between', 'Quaternions.between', (['forward', 'target'], {}), '(forward, target)\n', (3639, 3656), False, 'from Quaternions import Quaternions\n'), ((3795, 3849), 'Pivots.Pivots.from_quaternions', 'Pivots.from_quaternions', (['(rotation[1:] * -rotation[:-1])'], {}), '(rotation[1:] * -rotation[:-1])\n', (3818, 3849), False, 'from Pivots import Pivots\n'), ((4339, 4365), 'os.path.join', 'os.path.join', (['directory', 'f'], {}), '(directory, f)\n', (4351, 4365), False, 'import os\n'), ((1097, 1188), 'numpy.array', 'np.array', (['[0, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 16, 18, 19, 20, 22, 25, 26, 27, 29]'], {}), '([0, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 16, 18, 19, 20, 22, 25, \n 26, 27, 29])\n', (1105, 1188), True, 'import numpy as np\n'), ((1378, 1436), 'numpy.minimum', 'np.minimum', (['positions[:, fid_l, 1]', 'positions[:, fid_r, 1]'], {}), '(positions[:, fid_l, 1], positions[:, fid_r, 1])\n', (1388, 1436), True, 'import numpy as np\n'), ((3554, 3575), 'numpy.array', 'np.array', (['[[0, 0, 1]]'], {}), '([[0, 0, 1]])\n', (3562, 3575), True, 'import numpy as np\n'), ((5518, 5553), 'os.path.join', 'os.path.join', (['data_base_path', '"""cmu"""'], {}), "(data_base_path, 'cmu')\n", (5530, 5553), False, 'import os\n'), ((5555, 5596), 'os.path.join', 'os.path.join', (['output_path', '"""data_cmu.npz"""'], {}), "(output_path, 'data_cmu.npz')\n", (5567, 5596), False, 'import os\n'), ((5611, 5648), 'os.path.join', 'os.path.join', (['data_base_path', '"""hdm05"""'], {}), "(data_base_path, 'hdm05')\n", (5623, 5648), False, 'import os\n'), ((5650, 5693), 'os.path.join', 'os.path.join', (['output_path', '"""data_hdm05.npz"""'], {}), "(output_path, 'data_hdm05.npz')\n", (5662, 5693), False, 'import os\n'), ((5708, 5755), 'os.path.join', 'os.path.join', (['data_base_path', '"""edin_locomotion"""'], {}), "(data_base_path, 'edin_locomotion')\n", (5720, 5755), False, 'import os\n'), ((5757, 5810), 'os.path.join', 'os.path.join', (['output_path', '"""data_edin_locomotion.npz"""'], {}), "(output_path, 'data_edin_locomotion.npz')\n", (5769, 5810), False, 'import os\n'), ((5825, 5867), 'os.path.join', 'os.path.join', (['data_base_path', '"""edin_xsens"""'], {}), "(data_base_path, 'edin_xsens')\n", (5837, 5867), False, 'import os\n'), ((5869, 5917), 'os.path.join', 'os.path.join', (['output_path', '"""data_edin_xsens.npz"""'], {}), "(output_path, 'data_edin_xsens.npz')\n", (5881, 5917), False, 'import os\n'), ((5932, 5975), 'os.path.join', 'os.path.join', (['data_base_path', '"""edin_kinect"""'], {}), "(data_base_path, 'edin_kinect')\n", (5944, 5975), False, 'import os\n'), ((5977, 6026), 'os.path.join', 'os.path.join', (['output_path', '"""data_edin_kinect.npz"""'], {}), "(output_path, 'data_edin_kinect.npz')\n", (5989, 6026), False, 'import os\n'), ((6041, 6082), 'os.path.join', 'os.path.join', (['data_base_path', '"""edin_misc"""'], {}), "(data_base_path, 'edin_misc')\n", (6053, 6082), False, 'import os\n'), ((6084, 6131), 'os.path.join', 'os.path.join', (['output_path', '"""data_edin_misc.npz"""'], {}), "(output_path, 'data_edin_misc.npz')\n", (6096, 6131), False, 'import os\n'), ((6146, 6182), 'os.path.join', 'os.path.join', (['data_base_path', '"""mhad"""'], {}), "(data_base_path, 'mhad')\n", (6158, 6182), False, 'import os\n'), ((6184, 6226), 'os.path.join', 'os.path.join', (['output_path', '"""data_mhad.npz"""'], {}), "(output_path, 'data_mhad.npz')\n", (6196, 6226), False, 'import os\n'), ((6241, 6286), 'os.path.join', 'os.path.join', (['data_base_path', '"""edin_punching"""'], {}), "(data_base_path, 'edin_punching')\n", (6253, 6286), False, 'import os\n'), ((6288, 6339), 'os.path.join', 'os.path.join', (['output_path', '"""data_edin_punching.npz"""'], {}), "(output_path, 'data_edin_punching.npz')\n", (6300, 6339), False, 'import os\n'), ((6354, 6398), 'os.path.join', 'os.path.join', (['data_base_path', '"""edin_terrain"""'], {}), "(data_base_path, 'edin_terrain')\n", (6366, 6398), False, 'import os\n'), ((6400, 6450), 'os.path.join', 'os.path.join', (['output_path', '"""data_edin_terrain.npz"""'], {}), "(output_path, 'data_edin_terrain.npz')\n", (6412, 6450), False, 'import os\n'), ((717, 736), 'numpy.exp', 'np.exp', (['(mini - maxi)'], {}), '(mini - maxi)\n', (723, 736), True, 'import numpy as np\n'), ((4387, 4408), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (4397, 4408), False, 'import os\n'), ((4441, 4467), 'os.path.join', 'os.path.join', (['directory', 'f'], {}), '(directory, f)\n', (4453, 4467), False, 'import os\n')] |
#!/usr/bin/env python
# coding: utf-8
import os
import sys
import random
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf
from mrcnn.config import Config
# import utils
from mrcnn import model as modellib, utils
from mrcnn import visualize
import yaml
from mrcnn.model import log
from PIL import Image
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
print("ROOT_DIR= ", ROOT_DIR)
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "samples/coco/weights/mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
class ShapesConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "shapes"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 3
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # background + 3 shapes
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 800
IMAGE_MAX_DIM = 1280
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8 * 6, 16 * 6, 32 * 6, 64 * 6, 128 * 6) # anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 32
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 100
# use small validation steps since the epoch is small
VALIDATION_STEPS = 5
config = ShapesConfig()
config.display()
iter_num = 0
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))
return ax
class DrugDataset(utils.Dataset):
# 得到该图中有多少个实例(物体)
def get_obj_index(self, image):
n = np.max(image)
return n
# 解析labelme中得到的yaml文件,从而得到mask每一层对应的实例标签
def from_yaml_get_class(self, image_id):
info = self.image_info[image_id]
with open(info['yaml_path']) as f:
temp = yaml.load(f.read())
labels = temp['label_names']
del labels[0]
return labels
def draw_mask(self, num_obj, mask, image):
info = self.image_info[image_id]
for index in range(num_obj):
for i in range(info['width']):
for j in range(info['height']):
at_pixel = image.getpixel((i, j))
if at_pixel == index + 1:
mask[j, i, index] = 1
return mask
def load_shapes(self, count, height, width, img_floder, mask_floder, imglist, dataset_root_path):
"""Generate the requested number of synthetic images.
count: number of images to generate.
height, width: the size of the generated images.
"""
# Add classes
self.add_class("shapes", 1, "leakage")
for i in range(count):
filestr = imglist[i].split(".")[0]
# filestr = filestr.split("_")[1]
# mask_path = mask_floder + "/" + filestr + ".png"
# yaml_path = dataset_root_path + "total/rgb_" + filestr + "_json/info.yaml"
mask_path = mask_floder + "/" + filestr + ".png"
yaml_path = dataset_root_path + "labelme_json/" + filestr + "_json/info.yaml"
print(dataset_root_path + "labelme_json/" + filestr + "_json/img.png")
cv_img = cv2.imread(dataset_root_path + "labelme_json/" + filestr + "_json/img.png")
# self.add_image("shapes", image_id=i, path=img_floder + "/" + imglist[i], width=width, height=height,
# mask_path=mask_path, yaml_path=yaml_path)
self.add_image("shapes", image_id=i, path=img_floder + "/" + imglist[i],
width=cv_img.shape[1], height=cv_img.shape[0], mask_path=mask_path, yaml_path=yaml_path)
def load_mask(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
global iter_num
print("image_id", image_id)
info = self.image_info[image_id]
count = 1
img = Image.open(info['mask_path'])
num_obj = self.get_obj_index(img)
mask = np.zeros([info['height'], info['width'], num_obj], dtype=np.uint8)
mask = self.draw_mask(num_obj, mask, img)
occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)
for i in range(count - 2, -1, -1):
mask[:, :, i] = mask[:, :, i] * occlusion
occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))
labels = []
labels = self.from_yaml_get_class(image_id)
labels_form = []
for i in range(len(labels)):
if labels[i].find("leakage") != -1:
labels_form.append("leakage")
class_ids = np.array([self.class_names.index(s) for s in labels_form])
return mask, class_ids.astype(np.int32)
# 基础设置
dataset_root_path = os.path.join(ROOT_DIR, "train_data/")
img_floder = dataset_root_path + "pic"
mask_floder = dataset_root_path + "cv2_mask"
# yaml_floder = dataset_root_path
imglist = os.listdir(img_floder)
count = len(imglist)
#train与val数据集准备
dataset_train = DrugDataset()
dataset_train.load_shapes(count,800,1280, img_floder, mask_floder, imglist,dataset_root_path)
dataset_train.prepare()
dataset_val = DrugDataset()
dataset_val.load_shapes(7, 800,1280,img_floder, mask_floder, imglist,dataset_root_path)
dataset_val.prepare()
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=MODEL_DIR)
# Which weights to start with?
init_with = "coco" # imagenet, coco, or last
if init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training
model.load_weights(model.find_last()[1], by_name=True)
# Train the head branches
# Passing layers="heads" freezes all layers except the head
# layers. You can also pass a regular expression to select
# which layers to train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=10,
layers='heads')
# Fine tune all layers
# Passing layers="all" trains all layers. You can also
# pass a regular expression to select which layers to
# train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=30,
layers="all")
# Load and display random samples
image_ids = np.random.choice(dataset_train.image_ids, 4)
for image_id in image_ids:
image = dataset_train.load_image(image_id)
mask, class_ids = dataset_train.load_mask(image_id)
visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)
| [
"mrcnn.model.MaskRCNN",
"os.path.exists",
"os.listdir",
"PIL.Image.open",
"mrcnn.utils.download_trained_weights",
"numpy.random.choice",
"mrcnn.visualize.display_top_masks",
"numpy.logical_not",
"os.path.join",
"numpy.max",
"numpy.zeros",
"os.path.abspath",
"sys.path.append",
"matplotlib.p... | [((436, 461), 'os.path.abspath', 'os.path.abspath', (['"""../../"""'], {}), "('../../')\n", (451, 461), False, 'import os\n'), ((482, 507), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (497, 507), False, 'import sys\n'), ((592, 622), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""logs"""'], {}), "(ROOT_DIR, 'logs')\n", (604, 622), False, 'import os\n'), ((679, 743), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""samples/coco/weights/mask_rcnn_coco.h5"""'], {}), "(ROOT_DIR, 'samples/coco/weights/mask_rcnn_coco.h5')\n", (691, 743), False, 'import os\n'), ((5803, 5840), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""train_data/"""'], {}), "(ROOT_DIR, 'train_data/')\n", (5815, 5840), False, 'import os\n'), ((5969, 5991), 'os.listdir', 'os.listdir', (['img_floder'], {}), '(img_floder)\n', (5979, 5991), False, 'import os\n'), ((6327, 6397), 'mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""training"""', 'config': 'config', 'model_dir': 'MODEL_DIR'}), "(mode='training', config=config, model_dir=MODEL_DIR)\n", (6344, 6397), True, 'from mrcnn import model as modellib, utils\n'), ((7810, 7854), 'numpy.random.choice', 'np.random.choice', (['dataset_train.image_ids', '(4)'], {}), '(dataset_train.image_ids, 4)\n', (7826, 7854), True, 'import numpy as np\n'), ((807, 838), 'os.path.exists', 'os.path.exists', (['COCO_MODEL_PATH'], {}), '(COCO_MODEL_PATH)\n', (821, 838), False, 'import os\n'), ((844, 891), 'mrcnn.utils.download_trained_weights', 'utils.download_trained_weights', (['COCO_MODEL_PATH'], {}), '(COCO_MODEL_PATH)\n', (874, 891), False, 'from mrcnn import model as modellib, utils\n'), ((2476, 2536), 'matplotlib.pyplot.subplots', 'plt.subplots', (['rows', 'cols'], {'figsize': '(size * cols, size * rows)'}), '(rows, cols, figsize=(size * cols, size * rows))\n', (2488, 2536), True, 'import matplotlib.pyplot as plt\n'), ((7989, 8067), 'mrcnn.visualize.display_top_masks', 'visualize.display_top_masks', (['image', 'mask', 'class_ids', 'dataset_train.class_names'], {}), '(image, mask, class_ids, dataset_train.class_names)\n', (8016, 8067), False, 'from mrcnn import visualize\n'), ((2657, 2670), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (2663, 2670), True, 'import numpy as np\n'), ((4969, 4998), 'PIL.Image.open', 'Image.open', (["info['mask_path']"], {}), "(info['mask_path'])\n", (4979, 4998), False, 'from PIL import Image\n'), ((5056, 5122), 'numpy.zeros', 'np.zeros', (["[info['height'], info['width'], num_obj]"], {'dtype': 'np.uint8'}), "([info['height'], info['width'], num_obj], dtype=np.uint8)\n", (5064, 5122), True, 'import numpy as np\n'), ((4255, 4330), 'cv2.imread', 'cv2.imread', (["(dataset_root_path + 'labelme_json/' + filestr + '_json/img.png')"], {}), "(dataset_root_path + 'labelme_json/' + filestr + '_json/img.png')\n", (4265, 4330), False, 'import cv2\n'), ((5193, 5223), 'numpy.logical_not', 'np.logical_not', (['mask[:, :, -1]'], {}), '(mask[:, :, -1])\n', (5207, 5223), True, 'import numpy as np\n'), ((5388, 5417), 'numpy.logical_not', 'np.logical_not', (['mask[:, :, i]'], {}), '(mask[:, :, i])\n', (5402, 5417), True, 'import numpy as np\n')] |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tf_agents.specs import tensor_spec
from tf_agents.policies import tf_policy
from typing import Any, Callable, Iterable, Optional, Sequence, Text, Tuple, Union
import dice_rl.data.dataset as dataset_lib
import dice_rl.utils.common as common_lib
import dice_rl.estimators.estimator as estimator_lib
class TabularBayesDice(object):
"""Robust policy evaluation."""
def __init__(self,
dataset_spec,
gamma: Union[float, tf.Tensor],
reward_fn: Callable = None,
solve_for_state_action_ratio: bool = True,
nu_learning_rate: Union[float, tf.Tensor] = 0.1,
zeta_learning_rate: Union[float, tf.Tensor] = 0.1,
kl_regularizer: Union[float, tf.Tensor] = 1.,
eps_std: Union[float, tf.Tensor] = 1):
"""Initializes the solver.
Args:
dataset_spec: The spec of the dataset that will be given.
gamma: The discount factor to use.
reward_fn: A function that takes in an EnvStep and returns the reward for
that step. If not specified, defaults to just EnvStep.reward.
solve_for_state_action_ratio: Whether to solve for state-action density
ratio. Defaults to True. When solving an environment with a large
state/action space (taxi), better to set this to False to avoid OOM
issues.
nu_learning_rate: Learning rate for nu.
zeta_learning_rate: Learning rate for zeta.
kl_regularizer: Regularization constant for D_kl(q || p).
eps_std: epsilon standard deviation for sampling from the posterior.
"""
self._dataset_spec = dataset_spec
self._gamma = gamma
if reward_fn is None:
reward_fn = lambda env_step: env_step.reward
self._reward_fn = reward_fn
self._kl_regularizer = kl_regularizer
self._eps_std = eps_std
self._solve_for_state_action_ratio = solve_for_state_action_ratio
if (not self._solve_for_state_action_ratio and
not self._dataset_spec.has_log_probability()):
raise ValueError('Dataset must contain log-probability when '
'solve_for_state_action_ratio is False.')
# Get number of states/actions.
observation_spec = self._dataset_spec.observation
action_spec = self._dataset_spec.action
if not common_lib.is_categorical_spec(observation_spec):
raise ValueError('Observation spec must be discrete and bounded.')
self._num_states = observation_spec.maximum + 1
if not common_lib.is_categorical_spec(action_spec):
raise ValueError('Action spec must be discrete and bounded.')
self._num_actions = action_spec.maximum + 1
self._dimension = (
self._num_states * self._num_actions
if self._solve_for_state_action_ratio else self._num_states)
self._td_residuals = np.zeros([self._dimension, self._dimension])
self._total_weights = np.zeros([self._dimension])
self._initial_weights = np.zeros([self._dimension])
self._nu_optimizer = tf.keras.optimizers.Adam(nu_learning_rate)
self._zeta_optimizer = tf.keras.optimizers.Adam(zeta_learning_rate)
# Initialize variational Bayes parameters
self._nu_mu = tf.Variable(tf.zeros([self._dimension]))
self._nu_log_sigma = tf.Variable(tf.zeros([self._dimension]))
self._prior_mu = tf.Variable(tf.zeros([self._dimension]), trainable=True)
self._prior_log_sigma = tf.Variable(
tf.zeros([self._dimension]), trainable=False)
def _get_index(self, state, action):
if self._solve_for_state_action_ratio:
return state * self._num_actions + action
else:
return state
def prepare_dataset(self, dataset: dataset_lib.OffpolicyDataset,
target_policy: tf_policy.TFPolicy):
episodes, valid_steps = dataset.get_all_episodes()
tfagents_episodes = dataset_lib.convert_to_tfagents_timestep(episodes)
for episode_num in range(tf.shape(valid_steps)[0]):
# Precompute probabilites for this episode.
this_episode = tf.nest.map_structure(lambda t: t[episode_num], episodes)
first_step = tf.nest.map_structure(lambda t: t[0], this_episode)
this_tfagents_episode = dataset_lib.convert_to_tfagents_timestep(
this_episode)
episode_target_log_probabilities = target_policy.distribution(
this_tfagents_episode).action.log_prob(this_episode.action)
episode_target_probs = target_policy.distribution(
this_tfagents_episode).action.probs_parameter()
for step_num in range(tf.shape(valid_steps)[1] - 1):
this_step = tf.nest.map_structure(lambda t: t[episode_num, step_num],
episodes)
next_step = tf.nest.map_structure(
lambda t: t[episode_num, step_num + 1], episodes)
if this_step.is_last() or not valid_steps[episode_num, step_num]:
continue
weight = 1.0
nu_index = self._get_index(this_step.observation, this_step.action)
self._td_residuals[nu_index, nu_index] += -weight
self._total_weights[nu_index] += weight
policy_ratio = 1.0
if not self._solve_for_state_action_ratio:
policy_ratio = tf.exp(episode_target_log_probabilities[step_num] -
this_step.get_log_probability())
# Need to weight next nu by importance weight.
next_weight = (
weight if self._solve_for_state_action_ratio else policy_ratio *
weight)
next_probs = episode_target_probs[step_num + 1]
for next_action, next_prob in enumerate(next_probs):
next_nu_index = self._get_index(next_step.observation, next_action)
self._td_residuals[next_nu_index, nu_index] += (
next_prob * self._gamma * next_weight)
initial_probs = episode_target_probs[0]
for initial_action, initial_prob in enumerate(initial_probs):
initial_nu_index = self._get_index(first_step.observation,
initial_action)
self._initial_weights[initial_nu_index] += weight * initial_prob
self._initial_weights = tf.cast(self._initial_weights, tf.float32)
self._total_weights = tf.cast(self._total_weights, tf.float32)
self._td_residuals = self._td_residuals / np.sqrt(
1e-8 + self._total_weights)[None, :]
self._td_errors = tf.cast(
np.dot(self._td_residuals, self._td_residuals.T), tf.float32)
self._td_residuals = tf.cast(self._td_residuals, tf.float32)
@tf.function
def train_step(self, regularizer: float = 1e-6):
# Solve primal form min (1-g) * E[nu0] + E[(B nu - nu)^2].
with tf.GradientTape() as tape:
nu_sigma = tf.sqrt(tf.exp(self._nu_log_sigma))
eps = tf.random.normal(tf.shape(nu_sigma), 0, self._eps_std)
nu = self._nu_mu + nu_sigma * eps
init_nu_loss = tf.einsum('m,m', (1 - self._gamma) * self._initial_weights,
nu)
residuals = tf.einsum('n,nm->m', nu, self._td_residuals)
bellman_loss = 0.5 * tf.einsum('m,m', residuals, residuals)
prior_sigma = tf.sqrt(tf.exp(self._prior_log_sigma))
prior_var = tf.square(prior_sigma)
prior_var = 1.
neg_kl = (0.5 * (1. - 2. * tf.math.log(prior_sigma / nu_sigma + 1e-8) -
(self._nu_mu - self._prior_mu)**2 / prior_var -
nu_sigma**2 / prior_var))
loss = init_nu_loss + bellman_loss - self._kl_regularizer * neg_kl
grads = tape.gradient(loss, [
self._nu_mu, self._nu_log_sigma, self._prior_mu, self._prior_log_sigma
])
self._nu_optimizer.apply_gradients(
zip(grads, [
self._nu_mu, self._nu_log_sigma, self._prior_mu,
self._prior_log_sigma
]))
return loss
def estimate_average_reward(self,
dataset: dataset_lib.OffpolicyDataset,
target_policy: tf_policy.TFPolicy,
num_samples=100):
"""Estimates value (average per-step reward) of policy.
The estimation is based on solved values of zeta, so one should call
solve() before calling this function.
Args:
dataset: The dataset to sample experience from.
target_policy: The policy whose value we want to estimate.
num_samples: number of posterior samples.
Returns:
A tensor with num_samples samples of estimated average per-step reward
of the target policy.
"""
nu_sigma = tf.sqrt(tf.exp(self._nu_log_sigma))
eps = tf.random.normal(
tf.concat([[num_samples], tf.shape(nu_sigma)], axis=-1), 0,
self._eps_std)
nu = self._nu_mu + nu_sigma * eps
self._zeta = (
tf.einsum('bn,nm->bm', nu, self._td_residuals) /
tf.math.sqrt(1e-8 + self._total_weights))
def weight_fn(env_step):
index = self._get_index(env_step.observation, env_step.action)
zeta = tf.gather(
self._zeta, tf.tile(index[None, :], [num_samples, 1]), batch_dims=1)
policy_ratio = 1.0
if not self._solve_for_state_action_ratio:
tfagents_timestep = dataset_lib.convert_to_tfagents_timestep(env_step)
target_log_probabilities = target_policy.distribution(
tfagents_timestep).action.log_prob(env_step.action)
policy_ratio = tf.exp(target_log_probabilities -
env_step.get_log_probability())
return tf.cast(zeta * policy_ratio, tf.float32)
return estimator_lib.get_fullbatch_average(
dataset,
limit=None,
by_steps=True,
reward_fn=self._reward_fn,
weight_fn=weight_fn)
| [
"numpy.sqrt",
"tensorflow.compat.v2.keras.optimizers.Adam",
"tensorflow.compat.v2.zeros",
"dice_rl.utils.common.is_categorical_spec",
"dice_rl.data.dataset.convert_to_tfagents_timestep",
"tensorflow.compat.v2.nest.map_structure",
"tensorflow.compat.v2.square",
"tensorflow.compat.v2.einsum",
"tensorf... | [((3547, 3591), 'numpy.zeros', 'np.zeros', (['[self._dimension, self._dimension]'], {}), '([self._dimension, self._dimension])\n', (3555, 3591), True, 'import numpy as np\n'), ((3618, 3645), 'numpy.zeros', 'np.zeros', (['[self._dimension]'], {}), '([self._dimension])\n', (3626, 3645), True, 'import numpy as np\n'), ((3674, 3701), 'numpy.zeros', 'np.zeros', (['[self._dimension]'], {}), '([self._dimension])\n', (3682, 3701), True, 'import numpy as np\n'), ((3728, 3770), 'tensorflow.compat.v2.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['nu_learning_rate'], {}), '(nu_learning_rate)\n', (3752, 3770), True, 'import tensorflow.compat.v2 as tf\n'), ((3798, 3842), 'tensorflow.compat.v2.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['zeta_learning_rate'], {}), '(zeta_learning_rate)\n', (3822, 3842), True, 'import tensorflow.compat.v2 as tf\n'), ((4553, 4603), 'dice_rl.data.dataset.convert_to_tfagents_timestep', 'dataset_lib.convert_to_tfagents_timestep', (['episodes'], {}), '(episodes)\n', (4593, 4603), True, 'import dice_rl.data.dataset as dataset_lib\n'), ((6861, 6903), 'tensorflow.compat.v2.cast', 'tf.cast', (['self._initial_weights', 'tf.float32'], {}), '(self._initial_weights, tf.float32)\n', (6868, 6903), True, 'import tensorflow.compat.v2 as tf\n'), ((6930, 6970), 'tensorflow.compat.v2.cast', 'tf.cast', (['self._total_weights', 'tf.float32'], {}), '(self._total_weights, tf.float32)\n', (6937, 6970), True, 'import tensorflow.compat.v2 as tf\n'), ((7197, 7236), 'tensorflow.compat.v2.cast', 'tf.cast', (['self._td_residuals', 'tf.float32'], {}), '(self._td_residuals, tf.float32)\n', (7204, 7236), True, 'import tensorflow.compat.v2 as tf\n'), ((10208, 10331), 'dice_rl.estimators.estimator.get_fullbatch_average', 'estimator_lib.get_fullbatch_average', (['dataset'], {'limit': 'None', 'by_steps': '(True)', 'reward_fn': 'self._reward_fn', 'weight_fn': 'weight_fn'}), '(dataset, limit=None, by_steps=True,\n reward_fn=self._reward_fn, weight_fn=weight_fn)\n', (10243, 10331), True, 'import dice_rl.estimators.estimator as estimator_lib\n'), ((3035, 3083), 'dice_rl.utils.common.is_categorical_spec', 'common_lib.is_categorical_spec', (['observation_spec'], {}), '(observation_spec)\n', (3065, 3083), True, 'import dice_rl.utils.common as common_lib\n'), ((3222, 3265), 'dice_rl.utils.common.is_categorical_spec', 'common_lib.is_categorical_spec', (['action_spec'], {}), '(action_spec)\n', (3252, 3265), True, 'import dice_rl.utils.common as common_lib\n'), ((3920, 3947), 'tensorflow.compat.v2.zeros', 'tf.zeros', (['[self._dimension]'], {}), '([self._dimension])\n', (3928, 3947), True, 'import tensorflow.compat.v2 as tf\n'), ((3986, 4013), 'tensorflow.compat.v2.zeros', 'tf.zeros', (['[self._dimension]'], {}), '([self._dimension])\n', (3994, 4013), True, 'import tensorflow.compat.v2 as tf\n'), ((4048, 4075), 'tensorflow.compat.v2.zeros', 'tf.zeros', (['[self._dimension]'], {}), '([self._dimension])\n', (4056, 4075), True, 'import tensorflow.compat.v2 as tf\n'), ((4142, 4169), 'tensorflow.compat.v2.zeros', 'tf.zeros', (['[self._dimension]'], {}), '([self._dimension])\n', (4150, 4169), True, 'import tensorflow.compat.v2 as tf\n'), ((4732, 4789), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['(lambda t: t[episode_num])', 'episodes'], {}), '(lambda t: t[episode_num], episodes)\n', (4753, 4789), True, 'import tensorflow.compat.v2 as tf\n'), ((4809, 4860), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['(lambda t: t[0])', 'this_episode'], {}), '(lambda t: t[0], this_episode)\n', (4830, 4860), True, 'import tensorflow.compat.v2 as tf\n'), ((4891, 4945), 'dice_rl.data.dataset.convert_to_tfagents_timestep', 'dataset_lib.convert_to_tfagents_timestep', (['this_episode'], {}), '(this_episode)\n', (4931, 4945), True, 'import dice_rl.data.dataset as dataset_lib\n'), ((7110, 7158), 'numpy.dot', 'np.dot', (['self._td_residuals', 'self._td_residuals.T'], {}), '(self._td_residuals, self._td_residuals.T)\n', (7116, 7158), True, 'import numpy as np\n'), ((7376, 7393), 'tensorflow.compat.v2.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (7391, 7393), True, 'import tensorflow.compat.v2 as tf\n'), ((7584, 7647), 'tensorflow.compat.v2.einsum', 'tf.einsum', (['"""m,m"""', '((1 - self._gamma) * self._initial_weights)', 'nu'], {}), "('m,m', (1 - self._gamma) * self._initial_weights, nu)\n", (7593, 7647), True, 'import tensorflow.compat.v2 as tf\n'), ((7697, 7741), 'tensorflow.compat.v2.einsum', 'tf.einsum', (['"""n,nm->m"""', 'nu', 'self._td_residuals'], {}), "('n,nm->m', nu, self._td_residuals)\n", (7706, 7741), True, 'import tensorflow.compat.v2 as tf\n'), ((7886, 7908), 'tensorflow.compat.v2.square', 'tf.square', (['prior_sigma'], {}), '(prior_sigma)\n', (7895, 7908), True, 'import tensorflow.compat.v2 as tf\n'), ((9229, 9255), 'tensorflow.compat.v2.exp', 'tf.exp', (['self._nu_log_sigma'], {}), '(self._nu_log_sigma)\n', (9235, 9255), True, 'import tensorflow.compat.v2 as tf\n'), ((9441, 9487), 'tensorflow.compat.v2.einsum', 'tf.einsum', (['"""bn,nm->bm"""', 'nu', 'self._td_residuals'], {}), "('bn,nm->bm', nu, self._td_residuals)\n", (9450, 9487), True, 'import tensorflow.compat.v2 as tf\n'), ((9498, 9539), 'tensorflow.compat.v2.math.sqrt', 'tf.math.sqrt', (['(1e-08 + self._total_weights)'], {}), '(1e-08 + self._total_weights)\n', (9510, 9539), True, 'import tensorflow.compat.v2 as tf\n'), ((10155, 10195), 'tensorflow.compat.v2.cast', 'tf.cast', (['(zeta * policy_ratio)', 'tf.float32'], {}), '(zeta * policy_ratio, tf.float32)\n', (10162, 10195), True, 'import tensorflow.compat.v2 as tf\n'), ((4634, 4655), 'tensorflow.compat.v2.shape', 'tf.shape', (['valid_steps'], {}), '(valid_steps)\n', (4642, 4655), True, 'import tensorflow.compat.v2 as tf\n'), ((5291, 5358), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['(lambda t: t[episode_num, step_num])', 'episodes'], {}), '(lambda t: t[episode_num, step_num], episodes)\n', (5312, 5358), True, 'import tensorflow.compat.v2 as tf\n'), ((5421, 5492), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['(lambda t: t[episode_num, step_num + 1])', 'episodes'], {}), '(lambda t: t[episode_num, step_num + 1], episodes)\n', (5442, 5492), True, 'import tensorflow.compat.v2 as tf\n'), ((7017, 7053), 'numpy.sqrt', 'np.sqrt', (['(1e-08 + self._total_weights)'], {}), '(1e-08 + self._total_weights)\n', (7024, 7053), True, 'import numpy as np\n'), ((7428, 7454), 'tensorflow.compat.v2.exp', 'tf.exp', (['self._nu_log_sigma'], {}), '(self._nu_log_sigma)\n', (7434, 7454), True, 'import tensorflow.compat.v2 as tf\n'), ((7485, 7503), 'tensorflow.compat.v2.shape', 'tf.shape', (['nu_sigma'], {}), '(nu_sigma)\n', (7493, 7503), True, 'import tensorflow.compat.v2 as tf\n'), ((7769, 7807), 'tensorflow.compat.v2.einsum', 'tf.einsum', (['"""m,m"""', 'residuals', 'residuals'], {}), "('m,m', residuals, residuals)\n", (7778, 7807), True, 'import tensorflow.compat.v2 as tf\n'), ((7837, 7866), 'tensorflow.compat.v2.exp', 'tf.exp', (['self._prior_log_sigma'], {}), '(self._prior_log_sigma)\n', (7843, 7866), True, 'import tensorflow.compat.v2 as tf\n'), ((9685, 9726), 'tensorflow.compat.v2.tile', 'tf.tile', (['index[None, :]', '[num_samples, 1]'], {}), '(index[None, :], [num_samples, 1])\n', (9692, 9726), True, 'import tensorflow.compat.v2 as tf\n'), ((9844, 9894), 'dice_rl.data.dataset.convert_to_tfagents_timestep', 'dataset_lib.convert_to_tfagents_timestep', (['env_step'], {}), '(env_step)\n', (9884, 9894), True, 'import dice_rl.data.dataset as dataset_lib\n'), ((9319, 9337), 'tensorflow.compat.v2.shape', 'tf.shape', (['nu_sigma'], {}), '(nu_sigma)\n', (9327, 9337), True, 'import tensorflow.compat.v2 as tf\n'), ((5240, 5261), 'tensorflow.compat.v2.shape', 'tf.shape', (['valid_steps'], {}), '(valid_steps)\n', (5248, 5261), True, 'import tensorflow.compat.v2 as tf\n'), ((7963, 8006), 'tensorflow.compat.v2.math.log', 'tf.math.log', (['(prior_sigma / nu_sigma + 1e-08)'], {}), '(prior_sigma / nu_sigma + 1e-08)\n', (7974, 8006), True, 'import tensorflow.compat.v2 as tf\n')] |
import pandas as pd
import numpy as np
from collections import defaultdict
from itertools import combinations
# Alpha interval of .95
# corrected for multiple comparisons
Z_MULT = 2.98
def calculate_significance(array):
"""Calculate significance directly."""
return is_sig(*interval_from_values(array))
def interval_from_values(array):
"""Calculate the interval directly from some values."""
return interval(array.mean(), array.std(), Z_MULT)
def interval(mean, std, z):
"""Calculate the interval."""
z_std = std * z
return (mean - z_std, mean + z_std)
def is_sig(upper, lower):
"""See whether 0 is included in the confidence interval."""
return not (upper < 0 < lower or upper > 0 > lower)
if __name__ == "__main__":
df = pd.read_csv("/Users/stephantulkens/Google Drive/code/r/lrec/experiment_3_eng-uk_words.csv")
values = defaultdict(list)
for x in range(10000):
for y in df[df.iter == x].as_matrix():
name = "{}-{}".format(y[0], y[1])
values[name].append(y[-2])
values = {k: np.array(v) for k, v in values.items()}
values_stat = {k: (v.mean(), v.std()) for k, v in values.items()}
for (k1, v1), (k2, v2) in combinations(values.items(), 2):
print(k1, k2, calculate_significance(v1 - v2))
'''sig = {k: interval_from_values(np.array(v))
for k, v in comparisons.items()}'''
| [
"numpy.array",
"collections.defaultdict",
"pandas.read_csv"
] | [((776, 877), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/stephantulkens/Google Drive/code/r/lrec/experiment_3_eng-uk_words.csv"""'], {}), "(\n '/Users/stephantulkens/Google Drive/code/r/lrec/experiment_3_eng-uk_words.csv'\n )\n", (787, 877), True, 'import pandas as pd\n'), ((882, 899), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (893, 899), False, 'from collections import defaultdict\n'), ((1078, 1089), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (1086, 1089), True, 'import numpy as np\n')] |
"""<NAME>., 2019 - 2020. All rights reserved.
This file process the IO for the Text similarity """
import math
import os
import datetime
import shutil
import time
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import similarity.similarity_logging as cl
LOG = cl.get_logger()
def is_nan(value):
""" Function which identifies the "nan" on empty cells """
try:
return math.isnan(float(value))
except ValueError:
return False
class SimilarityIO:
""" This class is used for IO Processing the text similarity tool.
User input file is fetched here, also intermediate file as well as
the final recommendation creating are tasks for this class """
def __init__(self, file_path, uniq_id, col_int, filter_range="60,100", num_html_row=100, is_new_text=False,
new_text=None, report_row_filter=500000):
""" Constructor for Similarity input output processor, which initializes the the input variables needed IO
processing """
LOG.info("\nSimilarity_UI \nValues passed:\n") # pragma: no mutate
self.file_path = file_path
LOG.info("Path:%s", str(self.file_path)) # pragma: no mutate
self.uniq_id = uniq_id
LOG.info("\nUnique ID Column:%s", str(self.uniq_id)) # pragma: no mutate
self.col_int = col_int
LOG.info("\nColumns of Interest:%s", str(self.col_int)) # pragma: no mutate
self.filter_range = str(filter_range)
LOG.info("\nfilter_range value:%s", str(self.filter_range)) # pragma: no mutate
self.num_html_row = num_html_row
LOG.info("\nnumber of html row:%s", str(self.num_html_row)) # pragma: no mutate
self.report_row_filter = report_row_filter
LOG.info("\nnumber of html row split filter:%s", str(self.report_row_filter)) # pragma: no mutate
self.is_new_text = is_new_text
self.new_text = new_text
LOG.info("\nNew_text:%s", str(self.new_text)) # pragma: no mutate
self.data_frame = None
self.uniq_header = None
def __get_file_path(self):
""" Function used for getting the file path where the results can be stored /
from where input is provided"""
if os.path.isfile(self.file_path):
return str(os.path.dirname(self.file_path))
return self.file_path
def __get_file_name(self):
""" Function used for getting the input file name which can be further used for naming
the result """
if os.path.isfile(self.file_path):
file_path = self.file_path.split("/")
return os.path.splitext(file_path[-1])[0]
return "similarity"
def __get_header(self):
""" Function to fetch the header from the input file read in the dataframe """
return list(self.data_frame.columns.values)
def __set_uniq_header(self):
""" Function to fetch the unique ID header """
sheet_headers = self.__get_header()
self.uniq_header = sheet_headers[int(self.uniq_id)]
def __get_duplicate_id(self):
""" Function which identifies if any duplicate ID present in the input file """
unique_id_frame = pd.Series(self.data_frame[self.uniq_header]).fillna(method='ffill')
unique_id_frame = unique_id_frame.mask((unique_id_frame.shift(1) == unique_id_frame))
__duplicated_list = list(unique_id_frame.duplicated())
__du_list = []
# Remove the 'NaN' in case of empty cell and filter only IDs
for key, item in enumerate(__duplicated_list):
if item:
__du_list.append(unique_id_frame[key])
du_list = list(map(lambda x: 0 if is_nan(x) else x, __du_list))
__data = {"Duplicate ID": [nonzero for nonzero in du_list if nonzero != 0]}
# Create DataFrame and write
self.__write_xlsx(pd.DataFrame(__data), "Duplicate_ID")
def __get_ip_file_type(self):
""" Function to return the file extension type"""
file_type = self.file_path.split(".")[-1]
return file_type.upper()
def __read_to_panda_df(self):
""" Function which read the input data/xlsx to a pandas Data frame """
if not os.path.exists(self.file_path):
LOG.error("\nFile path is invalid") # pragma: no mutate
return False
function_dict = {
"XLSX": lambda x: pd.read_excel(self.file_path),
"CSV": lambda x: pd.read_csv(self.file_path)
}
self.data_frame = function_dict[self.__get_ip_file_type()](self.file_path)
if self.data_frame.empty:
LOG.error("\nInput data is incorrect/ file is invalid/"
"It has more than one sheet") # pragma: no mutate
return False
return True
def __get_needed_df_header(self, uniq_id_header, sheet_headers):
""" Function to fetch only the Unique ID + column of interest as per user input """
self.col_int = list(self.col_int.split(','))
__column_of_interest_header = [sheet_headers[int(i)] for i in self.col_int]
__all_col_int = ",".join(str(potion) for potion in __column_of_interest_header)
return (uniq_id_header + "," + __all_col_int).split(",")
def __refine_df(self):
""" Create/Modify data frame with only needed contents as per user input """
sheet_headers = self.__get_header()
self.data_frame[sheet_headers[int(self.uniq_id)]] = self.data_frame[
sheet_headers[int(self.uniq_id)]].ffill()
self.data_frame = self.data_frame[self.__get_needed_df_header(
sheet_headers[int(self.uniq_id)], sheet_headers)]
def create_merged_df(self):
""" Merge the text so as to form two column one with unique ID , other with merged
content in steps """
self.data_frame = (self.data_frame.set_index([self.uniq_header])
.apply(lambda x: " ".join(x.dropna()), axis=1)
.reset_index(name="Steps"))
self.data_frame = self.data_frame.groupby(self.uniq_header)["Steps"] \
.apply(' '.join).reset_index()
def __create_mergrd_file(self):
""" Create a copy of the merged content so that user can analyse """
self.__write_xlsx(self.data_frame, "merged_steps")
def __write_xlsx(self, data_f, name):
""" Function which write the dataframe to xlsx """
data_f.reset_index(inplace=True, drop=True)
list_of_dfs = [data_f.loc[i:i + self.report_row_filter - 1, :]
for i in range(0, data_f.shape[0], self.report_row_filter)]
for report_df_index, df_content in enumerate(list_of_dfs):
file_path = os.path.join(self.__get_file_path(), self.__get_file_name() + "_" + name + "_"
+ str(report_df_index) + "_" +
str(datetime.datetime.fromtimestamp(time.time()).strftime( # pragma: no mutate
'%H-%M-%S_%d_%m_%Y')))
writer = pd.ExcelWriter("%s.xlsx" % file_path, engine="xlsxwriter")
df_content.to_excel(writer, sheet_name=name)
writer.save()
def __new_text_df(self):
""" Function which is created to form the new dataframe to include new text if
entered in UI """
__new_df = pd.DataFrame({self.uniq_header: ["New/ID_TBD"], "Steps": [self.new_text]})
self.data_frame = __new_df.append(self.data_frame, ignore_index=True)
@staticmethod
def set_column_width(df_column):
"""
Function to split the long line in the steps column
df_column: data frame column value
Returns: data frame column value after splitting
"""
specifier_column = []
spe_data = ""
line_length = 200
for i in range(len(df_column)):
for line in str(df_column.iat[i, 0]).splitlines():
if len(line) > line_length:
spe_data = spe_data + "\r\n".join(line[i:i + line_length] for i in range(0, len(line), line_length))
else:
spe_data = spe_data + line + "\r\n"
specifier_column.append(spe_data)
spe_data = ""
return specifier_column
def __write_html(self, html_data_frame):
""" Function which is used to report out the top similarity match defaulted to 10 rows """
html_file_path = os.path.join(self.__get_file_path(), self.__get_file_name() + "_" + "brief_report_" +
str(datetime.datetime.fromtimestamp(time.time()).strftime( # pragma: no mutate
'%H-%M-%S_%d_%m_%Y')) + ".html")
html_data_frame['UNIQ ID'] = html_data_frame['UNIQ ID'].apply(str).str.wrap(80)
html_data_frame['POTENTIAL MATCH'] = html_data_frame['POTENTIAL MATCH'].apply(str).str.wrap(80)
html_data_frame.sort_values('SIMILARITY', ascending=False, inplace=True)
pd.set_option('colheader_justify', 'center')
html_string = '''
<html>
<head><title>HTML Pandas Dataframe with CSS</title></head>
<link rel="stylesheet" type="text/css" href="df_style.css"/>
<h1 style="font-size:50px;">Brief Report on Similarity Analysis</h1>
<h2 style="font-size:20px;font-style:italic;">Note: This is a brief report. For details
please refer 'csv/xlsx' in same folder</h2>
<body>
{table}
</body>
</html>
'''
with open(html_file_path, 'w', encoding='utf-8') as html_file:
html_file.write(html_string.format(table=html_data_frame.to_html(classes='mystyle')).
replace(r'\r\n', "<br>").replace(r'\n', "<br>").replace(r'\r', "<br>"))
shutil.copy(os.path.join(os.path.dirname(__file__), "df_style.css"), os.path.join(self.__get_file_path(),
"df_style.css"))
def report(self, brief_report):
""" Function which report the highest similarity match in html and xlsx output based on input argument (
defaulted to 100 rows #no in html """
if not brief_report.empty:
html_df = self.data_frame.rename(columns={self.uniq_header: 'UNIQ ID', "Steps": "Steps"})
html_df['Steps'] = self.set_column_width(html_df[['Steps']])
temp_data_frame1 = (pd.merge(html_df.drop(['Potential Match'], axis=1), brief_report, on=['UNIQ ID'],
how='inner'))
html_df.rename(columns={'UNIQ ID': 'POTENTIAL MATCH', "Steps": "Steps"}, inplace=True)
temp_data_frame2 = ((pd.merge(html_df.drop(['Potential Match'], axis=1), temp_data_frame1,
on=['POTENTIAL MATCH'],
how='inner')))
self.__write_html(temp_data_frame2.sort_values('SIMILARITY', ascending=False).iloc[:int(self.num_html_row)])
self.__write_xlsx(temp_data_frame2.sort_values('SIMILARITY', ascending=False), "recommendation")
else:
LOG.error("\nNothing to write to html file") # pragma: no mutate
print("\nNothing to write to html file") # pragma: no mutate
def process_cos_match(self):
""" Function which process the data frame for matching/finding similarity index """
self.filter_range = [int(i) for i in self.filter_range.split(',')]
self.filter_range.sort()
count_vect = CountVectorizer()
word_count_vector = count_vect.fit_transform(self.data_frame["Steps"].astype(str).to_numpy())
c_sim = 100 * (cosine_similarity(word_count_vector))
self.data_frame["Potential Match"] = self.data_frame[self.uniq_header]
dataframe = pd.DataFrame(c_sim, columns=self.data_frame["Potential Match"],
index=self.data_frame[self.uniq_header])
row, col = dataframe.shape
dataframe[:] = np.where(np.arange(row)[:, None] >= np.arange(col), np.nan, dataframe)
report_df = dataframe.stack().reset_index()
report_df.columns = ["UNIQ ID", "POTENTIAL MATCH", "SIMILARITY"]
report_df = (report_df[(self.filter_range[0] <= report_df['SIMILARITY']) &
(report_df['SIMILARITY'] <= self.filter_range[1])]) # pragma: no mutate
return report_df
def __validate_range(self):
""" Function which validate the input lower and upper range """
__ret_val = True
filter_range = list(map(int, self.filter_range.split(',')))
if any(range_val > 100 or range_val < 0 for range_val in filter_range):
__ret_val = False
LOG.error("\nEither of range value is wrong") # pragma: no mutate
return __ret_val
def __validate_input(self):
""" Function to validate the input parameters """
__ret_val = True
try:
rows, columns = self.data_frame.shape
LOG.info("\n#Row:%s #Col:%s" % (str(rows), str(columns))) # pragma: no mutate
input_list = self.col_int.split(',')
test_list = [int(i) for i in input_list]
test_list.append(int(self.uniq_id))
list_check = list(map(lambda item: True if item <= columns - 1 else False, test_list))
if False in list_check:
__ret_val = False
LOG.error("\nEither or both unique id and col of interest out of range, or range value is wrong") #
# pragma: no mutate
return __ret_val
except ValueError:
LOG.error("\nInput data is not an integer") # pragma: no mutate
return False
def orchestrate_similarity(self):
"""Function which orchestrate the entire sequence of cosine similarity matching
from IO layer"""
start = datetime.datetime.now().timestamp()
if self.__read_to_panda_df() and self.__validate_input() and self.__validate_range():
self.__set_uniq_header()
self.__get_duplicate_id()
self.__refine_df()
self.create_merged_df()
if self.is_new_text == 1:
self.__new_text_df()
self.__create_mergrd_file()
report_df = self.process_cos_match()
self.report(report_df)
end = datetime.datetime.now().timestamp()
print("Execution time %s" % (end - start)) # pragma: no mutate
| [
"pandas.Series",
"os.path.exists",
"sklearn.metrics.pairwise.cosine_similarity",
"pandas.read_csv",
"numpy.arange",
"sklearn.feature_extraction.text.CountVectorizer",
"os.path.splitext",
"pandas.set_option",
"os.path.isfile",
"os.path.dirname",
"datetime.datetime.now",
"pandas.read_excel",
"... | [((368, 383), 'similarity.similarity_logging.get_logger', 'cl.get_logger', ([], {}), '()\n', (381, 383), True, 'import similarity.similarity_logging as cl\n'), ((2315, 2345), 'os.path.isfile', 'os.path.isfile', (['self.file_path'], {}), '(self.file_path)\n', (2329, 2345), False, 'import os\n'), ((2594, 2624), 'os.path.isfile', 'os.path.isfile', (['self.file_path'], {}), '(self.file_path)\n', (2608, 2624), False, 'import os\n'), ((7437, 7511), 'pandas.DataFrame', 'pd.DataFrame', (["{self.uniq_header: ['New/ID_TBD'], 'Steps': [self.new_text]}"], {}), "({self.uniq_header: ['New/ID_TBD'], 'Steps': [self.new_text]})\n", (7449, 7511), True, 'import pandas as pd\n'), ((9094, 9138), 'pandas.set_option', 'pd.set_option', (['"""colheader_justify"""', '"""center"""'], {}), "('colheader_justify', 'center')\n", (9107, 9138), True, 'import pandas as pd\n'), ((11676, 11693), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '()\n', (11691, 11693), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((11956, 12065), 'pandas.DataFrame', 'pd.DataFrame', (['c_sim'], {'columns': "self.data_frame['Potential Match']", 'index': 'self.data_frame[self.uniq_header]'}), "(c_sim, columns=self.data_frame['Potential Match'], index=self.\n data_frame[self.uniq_header])\n", (11968, 12065), True, 'import pandas as pd\n'), ((3935, 3955), 'pandas.DataFrame', 'pd.DataFrame', (['__data'], {}), '(__data)\n', (3947, 3955), True, 'import pandas as pd\n'), ((4278, 4308), 'os.path.exists', 'os.path.exists', (['self.file_path'], {}), '(self.file_path)\n', (4292, 4308), False, 'import os\n'), ((7133, 7191), 'pandas.ExcelWriter', 'pd.ExcelWriter', (["('%s.xlsx' % file_path)"], {'engine': '"""xlsxwriter"""'}), "('%s.xlsx' % file_path, engine='xlsxwriter')\n", (7147, 7191), True, 'import pandas as pd\n'), ((11819, 11855), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['word_count_vector'], {}), '(word_count_vector)\n', (11836, 11855), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((2370, 2401), 'os.path.dirname', 'os.path.dirname', (['self.file_path'], {}), '(self.file_path)\n', (2385, 2401), False, 'import os\n'), ((2695, 2726), 'os.path.splitext', 'os.path.splitext', (['file_path[-1]'], {}), '(file_path[-1])\n', (2711, 2726), False, 'import os\n'), ((3268, 3312), 'pandas.Series', 'pd.Series', (['self.data_frame[self.uniq_header]'], {}), '(self.data_frame[self.uniq_header])\n', (3277, 3312), True, 'import pandas as pd\n'), ((4460, 4489), 'pandas.read_excel', 'pd.read_excel', (['self.file_path'], {}), '(self.file_path)\n', (4473, 4489), True, 'import pandas as pd\n'), ((4520, 4547), 'pandas.read_csv', 'pd.read_csv', (['self.file_path'], {}), '(self.file_path)\n', (4531, 4547), True, 'import pandas as pd\n'), ((9937, 9962), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (9952, 9962), False, 'import os\n'), ((12188, 12202), 'numpy.arange', 'np.arange', (['col'], {}), '(col)\n', (12197, 12202), True, 'import numpy as np\n'), ((14041, 14064), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (14062, 14064), False, 'import datetime\n'), ((14526, 14549), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (14547, 14549), False, 'import datetime\n'), ((12161, 12175), 'numpy.arange', 'np.arange', (['row'], {}), '(row)\n', (12170, 12175), True, 'import numpy as np\n'), ((7004, 7015), 'time.time', 'time.time', ([], {}), '()\n', (7013, 7015), False, 'import time\n'), ((8694, 8705), 'time.time', 'time.time', ([], {}), '()\n', (8703, 8705), False, 'import time\n')] |
"""
MesoNet
Authors: <NAME> and <NAME>, <NAME>
https://github.com/bf777/MesoNet
Licensed under the Creative Commons Attribution 4.0 International License (see LICENSE for details)
This file has been adapted from data.py in https://github.com/zhixuhao/unet
"""
from __future__ import print_function
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
def adjustData(img, mask, flag_multi_class, num_class):
if flag_multi_class:
img = img / 255
mask = mask[:, :, :, 0] if (len(mask.shape) == 4) else mask[:, :, 0]
new_mask = np.zeros(mask.shape + (num_class,))
for i in range(num_class):
new_mask[mask == i, i] = 1
new_mask = (
np.reshape(
new_mask,
(
new_mask.shape[0],
new_mask.shape[1] * new_mask.shape[2],
new_mask.shape[3],
),
)
if flag_multi_class
else np.reshape(
new_mask, (new_mask.shape[0] * new_mask.shape[1], new_mask.shape[2])
)
)
mask = new_mask
elif np.max(img) > 1:
img = img / 255
mask = mask / 255
mask[mask > 0.7] = 1
mask[mask <= 0.7] = 0
return img, mask
def trainGenerator(
batch_size,
train_path,
image_folder,
mask_folder,
aug_dict,
image_color_mode="grayscale",
mask_color_mode="grayscale",
image_save_prefix="image",
mask_save_prefix="mask",
flag_multi_class=False,
num_class=2,
save_to_dir=None,
target_size=(512, 512),
seed=1,
):
"""
can generate image and mask at the same time
use the same seed for image_datagen and mask_datagen to ensure the transformation for image and mask is the same
if you want to visualize the results of generator, set save_to_dir = "your path"
"""
image_datagen = ImageDataGenerator(**aug_dict)
mask_datagen = ImageDataGenerator(**aug_dict)
image_generator = image_datagen.flow_from_directory(
train_path,
classes=[image_folder],
class_mode=None,
color_mode=image_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=image_save_prefix,
seed=seed,
)
mask_generator = mask_datagen.flow_from_directory(
train_path,
classes=[mask_folder],
class_mode=None,
color_mode=mask_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=mask_save_prefix,
seed=seed,
)
train_generator = zip(image_generator, mask_generator)
for (img, mask) in train_generator:
img, mask = adjustData(img, mask, flag_multi_class, num_class)
yield (img, mask)
def testGenerator(
test_path,
num_image=60,
target_size=(512, 512),
flag_multi_class=False,
as_gray=True,
):
for i in range(num_image):
img = io.imread(os.path.join(test_path, "%d.png" % i), as_gray=as_gray)
img = img / 255
img = trans.resize(img, target_size)
img = np.reshape(img, img.shape + (1,)) if (not flag_multi_class) else img
img = np.reshape(img, (1,) + img.shape)
yield img
| [
"numpy.reshape",
"os.path.join",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"numpy.max",
"numpy.zeros",
"skimage.transform.resize"
] | [((2002, 2032), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**aug_dict)\n', (2020, 2032), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((2052, 2082), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**aug_dict)\n', (2070, 2082), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((656, 691), 'numpy.zeros', 'np.zeros', (['(mask.shape + (num_class,))'], {}), '(mask.shape + (num_class,))\n', (664, 691), True, 'import numpy as np\n'), ((3218, 3248), 'skimage.transform.resize', 'trans.resize', (['img', 'target_size'], {}), '(img, target_size)\n', (3230, 3248), True, 'import skimage.transform as trans\n'), ((3346, 3379), 'numpy.reshape', 'np.reshape', (['img', '((1,) + img.shape)'], {}), '(img, (1,) + img.shape)\n', (3356, 3379), True, 'import numpy as np\n'), ((799, 903), 'numpy.reshape', 'np.reshape', (['new_mask', '(new_mask.shape[0], new_mask.shape[1] * new_mask.shape[2], new_mask.shape[3])'], {}), '(new_mask, (new_mask.shape[0], new_mask.shape[1] * new_mask.shape\n [2], new_mask.shape[3]))\n', (809, 903), True, 'import numpy as np\n'), ((1074, 1159), 'numpy.reshape', 'np.reshape', (['new_mask', '(new_mask.shape[0] * new_mask.shape[1], new_mask.shape[2])'], {}), '(new_mask, (new_mask.shape[0] * new_mask.shape[1], new_mask.shape[2])\n )\n', (1084, 1159), True, 'import numpy as np\n'), ((1228, 1239), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (1234, 1239), True, 'import numpy as np\n'), ((3124, 3161), 'os.path.join', 'os.path.join', (['test_path', "('%d.png' % i)"], {}), "(test_path, '%d.png' % i)\n", (3136, 3161), False, 'import os\n'), ((3263, 3296), 'numpy.reshape', 'np.reshape', (['img', '(img.shape + (1,))'], {}), '(img, img.shape + (1,))\n', (3273, 3296), True, 'import numpy as np\n')] |
'''
Description: 构造一个数据集类,继承官方的torch.utils.data.Dataset
Author: HCQ
Company(School): UCAS
Email: <EMAIL>
Date: 2021-06-05 11:19:36
LastEditTime: 2021-06-10 10:40:49
FilePath: /pointnet-simple/framework/dataset.py
'''
import torch
import os
import json
from torch.utils.data import Dataset # 官方
from torch.utils.data import DataLoader # 官方
import numpy as np
# 读取pcd(txt)文件
def read_pcd_from_file(file):
np_pts = np.zeros(0)
with open(file, 'r') as f:
pts = []
for line in f: # '-0.098790,-0.182300,0.163800,0.829000,-0.557200,-0.048180\n'
one_pt = list(map(float, line[:-1].split(','))) # line[:-1]是把 \n去掉
pts.append(one_pt[:3]) # 前三列
np_pts = np.array(pts) # 转成numpy格式
return np_pts
# 读取文件名,得到文件里面每行的名字
def read_file_names_from_file(file):
with open(file, 'r') as f:
files = []
for line in f:
files.append(line.split('\n')[0]) # 得到每行的文件名,然后append
return files
class PointNetDataset(Dataset): # 继承父类Dataset
def __init__(self, root_dir, train):
super(PointNetDataset, self).__init__() # 执行父类的构造函数,使得我们能够调用父类的属性。
self._train = train # 0是训练文件,1是测试文件
self._classes = []
# 特征和label
self._features = []
self._labels = []
self.load(root_dir) #数据加载完毕后,所有东西就都在self._features和self._labels ,root_dir: modelnet40_normal_resampled
def classes(self):
return self._classes
def __len__(self): # 返回样本数量:
return len(self._features)
def __getitem__(self, idx): # 传入一个index,数据增强后,输出对应的特征和标签。
feature, label = self._features[idx], self._labels[idx]
# 数据增强(归一化、旋转、高斯噪声)
# normalize feature
center = np.expand_dims(np.mean(feature,axis=0), 0)
feature = feature-center
dist = np.max(np.sqrt(np.sum(feature ** 2, axis = 1)),0)
feature = feature / dist #scale
# rotation to feature
theta = np.random.uniform(0, np.pi*2)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) # 旋转矩阵
feature[:,[0,2]] = feature[:,[0,2]].dot(rotation_matrix)
# jitter噪声
feature += np.random.normal(0, 0.02, size=feature.shape) # np.random.normal(loc=0.0均值, scale=1.0标准差, size=None)
# 转置 方便后面一维卷积=================================
feature = torch.Tensor(feature.T) # 转置10000x3变为3x10000============================
# label需要从数字变成一个one hot的向量
l_lable = [0 for _ in range(len(self._classes))]
l_lable[self._classes.index(label)] = 1 # index对应位置为1
label = torch.Tensor(l_lable)
return feature, label # 返回特征和label
def load(self, root_dir): # 自执行 加载数据
things = os.listdir(root_dir) # 返回一个由文件名和目录名组成的列表,
files = []
for f in things:
if self._train == 0:
if f == 'modelnet40_train.txt':
files = read_file_names_from_file(root_dir + '/' + f) # 得到对应文件名list
elif self._train == 1:
if f == 'modelnet40_test.txt':
files = read_file_names_from_file(root_dir + '/' + f)
if f == "modelnet40_shape_names.txt":
self._classes = read_file_names_from_file(root_dir + '/' + f) # 40个类别名
# for循环结束
tmp_classes = []
for file in files: # 遍历 得到对应文件名list
num = file.split("_")[-1] # file:airplane_0001
kind = file.split("_" + num)[0] # 标签
if kind not in tmp_classes:
tmp_classes.append(kind) # 添加类别
pcd_file = root_dir + '/' + kind + '/' + file + '.txt' # txt文件全路径
np_pts = read_pcd_from_file(pcd_file) # 读取txt文件转成np矩阵
# print(np_pts.shape) # (10000, 3)
self._features.append(np_pts) # 样本的特征和标签存到成员变量中:
self._labels.append(kind)
if self._train == 0: # 训练集
print("There are " + str(len(self._labels)) + " trian files.") # There are 9843 trian files.
elif self._train == 1: # 测试集
print("There are " + str(len(self._labels)) + " test files.")
if __name__ == "__main__":
train_data = PointNetDataset("/home/hcq/data/modelnet40/modelnet40_normal_resampled", train=0) #txt文件 9843
train_loader = DataLoader(train_data, batch_size=2, shuffle=True) # 官方DataLoader 4922
cnt = 0
print(" len(train_loader:", len(train_loader))# 4922
for pts, label in train_loader: # batch_size=2 循环2次 Iteration=样本数/batch_size
print("pts.shape", pts.shape) # torch.Size([2, 3, 10000])
print("label.shape", label.shape) # torch.Size([2, 40])
cnt += 1
if cnt > 3:
break | [
"numpy.random.normal",
"numpy.mean",
"os.listdir",
"torch.utils.data.DataLoader",
"torch.Tensor",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.cos",
"numpy.random.uniform",
"numpy.sin"
] | [((417, 428), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (425, 428), True, 'import numpy as np\n'), ((3954, 4004), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'batch_size': '(2)', 'shuffle': '(True)'}), '(train_data, batch_size=2, shuffle=True)\n', (3964, 4004), False, 'from torch.utils.data import DataLoader\n'), ((701, 714), 'numpy.array', 'np.array', (['pts'], {}), '(pts)\n', (709, 714), True, 'import numpy as np\n'), ((1835, 1866), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(np.pi * 2)'], {}), '(0, np.pi * 2)\n', (1852, 1866), True, 'import numpy as np\n'), ((2062, 2107), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.02)'], {'size': 'feature.shape'}), '(0, 0.02, size=feature.shape)\n', (2078, 2107), True, 'import numpy as np\n'), ((2228, 2251), 'torch.Tensor', 'torch.Tensor', (['feature.T'], {}), '(feature.T)\n', (2240, 2251), False, 'import torch\n'), ((2460, 2481), 'torch.Tensor', 'torch.Tensor', (['l_lable'], {}), '(l_lable)\n', (2472, 2481), False, 'import torch\n'), ((2577, 2597), 'os.listdir', 'os.listdir', (['root_dir'], {}), '(root_dir)\n', (2587, 2597), False, 'import os\n'), ((1642, 1666), 'numpy.mean', 'np.mean', (['feature'], {'axis': '(0)'}), '(feature, axis=0)\n', (1649, 1666), True, 'import numpy as np\n'), ((1725, 1753), 'numpy.sum', 'np.sum', (['(feature ** 2)'], {'axis': '(1)'}), '(feature ** 2, axis=1)\n', (1731, 1753), True, 'import numpy as np\n'), ((1898, 1911), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1904, 1911), True, 'import numpy as np\n'), ((1931, 1944), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1937, 1944), True, 'import numpy as np\n'), ((1946, 1959), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1952, 1959), True, 'import numpy as np\n'), ((1914, 1927), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1920, 1927), True, 'import numpy as np\n')] |
import numpy as np
import numpy.linalg as LA
import quaternion
from . import wind
from .launcher import Launcher
from .air import Air
class Enviroment:
def __init__(
self,
latitude,
longitude,
altitude=0
):
self.latitude = latitude
self.longitude = longitude
self.alt_launcher = altitude
# GPS80地球楕円体モデル
# a:長半径[m], b:短半径(極半径)[m]
self.earth_a = 6378137
self.earth_b = 6356752
# 地球回転角速度[rad/s]
self.omega_earth = 0.000072722052166
# 射点緯度経度における楕円体半径[m]
self.earth_r = self.earth_a * np.cos(np.deg2rad(self.latitude)) +\
self.earth_b * np.sin(np.deg2rad(self.latitude))
# 射点静止座標系→地球中心回転座標系への変換行列Tel
sinlat = np.sin(np.deg2rad(self.latitude))
coslat = np.cos(np.deg2rad(self.latitude))
sinlon = np.sin(np.deg2rad(self.longitude))
coslon = np.cos(np.deg2rad(self.longitude))
self.Tel = np.array([
[ -sinlon, -sinlat*coslon, coslat*coslon],
[ coslon, -sinlat*sinlon, coslat*sinlon],
[ 0.0, coslat, sinlat]
])
# 射点静止座標系における自転角速度ベクトル
# 地球回転座標系での自転角速度を射点静止座標系に変換して求めている
self.omega_earth_local = np.dot(LA.inv(self.Tel), np.array([0., 0., self.omega_earth]))
def g(self, h):
# TODO: 重力を高度hの関数にする。
# 緯度経度から標高を算出する必要がある
return np.array([0.0, 0.0, -9.81])
def Coriolis(self, v_body, Tbl, mass=1.0):
# =======================================
# INPUT: v_body = velocity in body coord.
# Tbl = tranformation matrix from local coord. to body coord.
# OUTPUT: Fcor = Coriolis force vector in body coord.
# =======================================
# Coriolis force in body coord. note that self.omega_earth, omega of earth-spin, is given in local coord.
Fcor = -2.0*mass*np.cross(np.dot(Tbl, self.omega_earth_local), v_body)
return Fcor | [
"numpy.dot",
"numpy.array",
"numpy.deg2rad",
"numpy.linalg.inv"
] | [((1043, 1171), 'numpy.array', 'np.array', (['[[-sinlon, -sinlat * coslon, coslat * coslon], [coslon, -sinlat * sinlon, \n coslat * sinlon], [0.0, coslat, sinlat]]'], {}), '([[-sinlon, -sinlat * coslon, coslat * coslon], [coslon, -sinlat *\n sinlon, coslat * sinlon], [0.0, coslat, sinlat]])\n', (1051, 1171), True, 'import numpy as np\n'), ((1519, 1546), 'numpy.array', 'np.array', (['[0.0, 0.0, -9.81]'], {}), '([0.0, 0.0, -9.81])\n', (1527, 1546), True, 'import numpy as np\n'), ((838, 863), 'numpy.deg2rad', 'np.deg2rad', (['self.latitude'], {}), '(self.latitude)\n', (848, 863), True, 'import numpy as np\n'), ((890, 915), 'numpy.deg2rad', 'np.deg2rad', (['self.latitude'], {}), '(self.latitude)\n', (900, 915), True, 'import numpy as np\n'), ((942, 968), 'numpy.deg2rad', 'np.deg2rad', (['self.longitude'], {}), '(self.longitude)\n', (952, 968), True, 'import numpy as np\n'), ((995, 1021), 'numpy.deg2rad', 'np.deg2rad', (['self.longitude'], {}), '(self.longitude)\n', (1005, 1021), True, 'import numpy as np\n'), ((1359, 1375), 'numpy.linalg.inv', 'LA.inv', (['self.Tel'], {}), '(self.Tel)\n', (1365, 1375), True, 'import numpy.linalg as LA\n'), ((1377, 1415), 'numpy.array', 'np.array', (['[0.0, 0.0, self.omega_earth]'], {}), '([0.0, 0.0, self.omega_earth])\n', (1385, 1415), True, 'import numpy as np\n'), ((2050, 2085), 'numpy.dot', 'np.dot', (['Tbl', 'self.omega_earth_local'], {}), '(Tbl, self.omega_earth_local)\n', (2056, 2085), True, 'import numpy as np\n'), ((669, 694), 'numpy.deg2rad', 'np.deg2rad', (['self.latitude'], {}), '(self.latitude)\n', (679, 694), True, 'import numpy as np\n'), ((746, 771), 'numpy.deg2rad', 'np.deg2rad', (['self.latitude'], {}), '(self.latitude)\n', (756, 771), True, 'import numpy as np\n')] |
__author__ = 'sibirrer'
import numpy as np
from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase
from lenstronomy.Util import derivative_util as calc_util
__all__ = ['CoredDensity']
class CoredDensity(LensProfileBase):
"""
class for a uniform cored density dropping steep in the outskirts
This profile is e.g. featured in Blum et al. 2020 https://arxiv.org/abs/2001.07182v1
3d rho(r) = 2/pi * Sigma_crit R_c**3 * (R_c**2 + r**2)**(-2)
"""
_s = 0.000001 # numerical limit for minimal radius
param_names = ['sigma0', 'r_core', 'center_x', 'center_y']
lower_limit_default = {'sigma0': -1, 'r_core': 0, 'center_x': -100, 'center_y': -100}
upper_limit_default = {'sigma0': 10, 'r_core': 100, 'center_x': 100, 'center_y': 100}
def function(self, x, y, sigma0, r_core, center_x=0, center_y=0):
"""
potential of cored density profile
:param x: x-coordinate in angular units
:param y: y-coordinate in angular units
:param sigma0: convergence in the core
:param r_core: core radius
:param center_x: center of the profile
:param center_y: center of the profile
:return: lensing potential at (x, y)
"""
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_ ** 2 + y_ ** 2)
r = np.maximum(r, self._s)
return 2 * sigma0 * r_core ** 2 * (2 * np.log(r) - np.log(np.sqrt(r**2 + r_core**2) - r_core))
def derivatives(self, x, y, sigma0, r_core, center_x=0, center_y=0):
"""
deflection angle of cored density profile
:param x: x-coordinate in angular units
:param y: y-coordinate in angular units
:param sigma0: convergence in the core
:param r_core: core radius
:param center_x: center of the profile
:param center_y: center of the profile
:return: alpha_x, alpha_y at (x, y)
"""
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
r = np.maximum(r, self._s)
alpha_r = self.alpha_r(r, sigma0, r_core)
f_x = alpha_r * x_ / r
f_y = alpha_r * y_ / r
return f_x, f_y
def hessian(self, x, y, sigma0, r_core, center_x=0, center_y=0):
"""
:param x: x-coordinate in angular units
:param y: y-coordinate in angular units
:param sigma0: convergence in the core
:param r_core: core radius
:param center_x: center of the profile
:param center_y: center of the profile
:return: Hessian df/dxdx, df/dxdy, df/dydx, df/dydy at position (x, y)
"""
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_ ** 2 + y_ ** 2)
r = np.maximum(r, self._s)
d_alpha_dr = self.d_alpha_dr(r, sigma0, r_core)
alpha = self.alpha_r(r, sigma0, r_core)
dr_dx = calc_util.d_r_dx(x_, y_)
dr_dy = calc_util.d_r_dy(x_, y_)
f_xx = d_alpha_dr * dr_dx * x_ / r + alpha * calc_util.d_x_diffr_dx(x_, y_)
f_yy = d_alpha_dr * dr_dy * y_ / r + alpha * calc_util.d_y_diffr_dy(x_, y_)
f_xy = d_alpha_dr * dr_dy * x_ / r + alpha * calc_util.d_x_diffr_dy(x_, y_)
return f_xx, f_xy, f_xy, f_yy
@staticmethod
def alpha_r(r, sigma0, r_core):
"""
radial deflection angle of the cored density profile
:param r: radius (angular scale)
:param sigma0: convergence in the core
:param r_core: core radius
:return: deflection angle
"""
return 2 * sigma0 * r_core ** 2 / r * (1 - (1 + (r/r_core)**2) ** (-1./2))
@staticmethod
def d_alpha_dr(r, sigma0, r_core):
"""
radial derivatives of the radial deflection angle
:param r: radius (angular scale)
:param sigma0: convergence in the core
:param r_core: core radius
:return: dalpha/dr
"""
return 2 * sigma0 * (((1 + (r/r_core) ** 2) ** (-3./2)) - (r_core/r) ** 2 * (1 - (1+(r/r_core)**2) ** (-1./2)))
@staticmethod
def kappa_r(r, sigma0, r_core):
"""
convergence of the cored density profile. This routine is also for testing
:param r: radius (angular scale)
:param sigma0: convergence in the core
:param r_core: core radius
:return: convergence at r
"""
return sigma0 * (1 + (r/r_core)**2) ** (-3./2)
@staticmethod
def density(r, sigma0, r_core):
"""
rho(r) = 2/pi * Sigma_crit R_c**3 * (R_c**2 + r**2)**(-2)
:param r: radius (angular scale)
:param sigma0: convergence in the core
:param r_core: core radius
:return: density at radius r
"""
return 2/np.pi * sigma0 * r_core**3 * (r_core**2 + r**2) ** (-2)
def density_lens(self, r, sigma0, r_core):
"""
computes the density at 3d radius r given lens model parameterization.
The integral in the LOS projection of this quantity results in the convergence quantity.
:param r: radius (angular scale)
:param sigma0: convergence in the core
:param r_core: core radius
:return: desnity at radius r
"""
return self.density(r, sigma0, r_core)
def density_2d(self, x, y, sigma0, r_core, center_x=0, center_y=0):
"""
projected density at projected radius r
:param x: x-coordinate in angular units
:param y: y-coordinate in angular units
:param sigma0: convergence in the core
:param r_core: core radius
:param center_x: center of the profile
:param center_y: center of the profile
:return: projected density
"""
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_ ** 2 + y_ ** 2)
r = np.maximum(r, self._s)
return self.kappa_r(r, sigma0, r_core)
def mass_2d(self, r, sigma0, r_core):
"""
mass enclosed in cylinder of radius r
:param r: radius (angular scale)
:param sigma0: convergence in the core
:param r_core: core radius
:return: mass enclosed in cylinder of radius r
"""
return self.alpha_r(r, sigma0, r_core) * np.pi * r
@staticmethod
def mass_3d(r, sigma0, r_core):
"""
mass enclosed 3d radius
:param r: radius (angular scale)
:param sigma0: convergence in the core
:param r_core: core radius
:return: mass enclosed 3d radius
"""
return 8 * sigma0 * r_core**3 * (np.arctan(r/r_core)/(2*r_core) - r / (2 * (r**2 + r_core**2)))
def mass_3d_lens(self, r, sigma0, r_core):
"""
mass enclosed a 3d sphere or radius r given a lens parameterization with angular units
For this profile those are identical.
:param r: radius (angular scale)
:param sigma0: convergence in the core
:param r_core: core radius
:return: mass enclosed 3d radius
"""
return self.mass_3d(r, sigma0, r_core)
| [
"lenstronomy.Util.derivative_util.d_x_diffr_dx",
"numpy.sqrt",
"lenstronomy.Util.derivative_util.d_r_dy",
"lenstronomy.Util.derivative_util.d_x_diffr_dy",
"numpy.log",
"lenstronomy.Util.derivative_util.d_r_dx",
"numpy.maximum",
"lenstronomy.Util.derivative_util.d_y_diffr_dy",
"numpy.arctan"
] | [((1301, 1327), 'numpy.sqrt', 'np.sqrt', (['(x_ ** 2 + y_ ** 2)'], {}), '(x_ ** 2 + y_ ** 2)\n', (1308, 1327), True, 'import numpy as np\n'), ((1340, 1362), 'numpy.maximum', 'np.maximum', (['r', 'self._s'], {}), '(r, self._s)\n', (1350, 1362), True, 'import numpy as np\n'), ((1996, 2022), 'numpy.sqrt', 'np.sqrt', (['(x_ ** 2 + y_ ** 2)'], {}), '(x_ ** 2 + y_ ** 2)\n', (2003, 2022), True, 'import numpy as np\n'), ((2031, 2053), 'numpy.maximum', 'np.maximum', (['r', 'self._s'], {}), '(r, self._s)\n', (2041, 2053), True, 'import numpy as np\n'), ((2700, 2726), 'numpy.sqrt', 'np.sqrt', (['(x_ ** 2 + y_ ** 2)'], {}), '(x_ ** 2 + y_ ** 2)\n', (2707, 2726), True, 'import numpy as np\n'), ((2739, 2761), 'numpy.maximum', 'np.maximum', (['r', 'self._s'], {}), '(r, self._s)\n', (2749, 2761), True, 'import numpy as np\n'), ((2882, 2906), 'lenstronomy.Util.derivative_util.d_r_dx', 'calc_util.d_r_dx', (['x_', 'y_'], {}), '(x_, y_)\n', (2898, 2906), True, 'from lenstronomy.Util import derivative_util as calc_util\n'), ((2923, 2947), 'lenstronomy.Util.derivative_util.d_r_dy', 'calc_util.d_r_dy', (['x_', 'y_'], {}), '(x_, y_)\n', (2939, 2947), True, 'from lenstronomy.Util import derivative_util as calc_util\n'), ((5758, 5784), 'numpy.sqrt', 'np.sqrt', (['(x_ ** 2 + y_ ** 2)'], {}), '(x_ ** 2 + y_ ** 2)\n', (5765, 5784), True, 'import numpy as np\n'), ((5797, 5819), 'numpy.maximum', 'np.maximum', (['r', 'self._s'], {}), '(r, self._s)\n', (5807, 5819), True, 'import numpy as np\n'), ((3001, 3031), 'lenstronomy.Util.derivative_util.d_x_diffr_dx', 'calc_util.d_x_diffr_dx', (['x_', 'y_'], {}), '(x_, y_)\n', (3023, 3031), True, 'from lenstronomy.Util import derivative_util as calc_util\n'), ((3085, 3115), 'lenstronomy.Util.derivative_util.d_y_diffr_dy', 'calc_util.d_y_diffr_dy', (['x_', 'y_'], {}), '(x_, y_)\n', (3107, 3115), True, 'from lenstronomy.Util import derivative_util as calc_util\n'), ((3169, 3199), 'lenstronomy.Util.derivative_util.d_x_diffr_dy', 'calc_util.d_x_diffr_dy', (['x_', 'y_'], {}), '(x_, y_)\n', (3191, 3199), True, 'from lenstronomy.Util import derivative_util as calc_util\n'), ((1410, 1419), 'numpy.log', 'np.log', (['r'], {}), '(r)\n', (1416, 1419), True, 'import numpy as np\n'), ((6535, 6556), 'numpy.arctan', 'np.arctan', (['(r / r_core)'], {}), '(r / r_core)\n', (6544, 6556), True, 'import numpy as np\n'), ((1429, 1458), 'numpy.sqrt', 'np.sqrt', (['(r ** 2 + r_core ** 2)'], {}), '(r ** 2 + r_core ** 2)\n', (1436, 1458), True, 'import numpy as np\n')] |
"""
============================================================================
Comparing anomaly detection algorithms for outlier detection on toy datasets
============================================================================
This example shows characteristics of different anomaly detection algorithms
on 2D datasets. Datasets contain one or two modes (regions of high density)
to illustrate the ability of algorithms to cope with multimodal data.
For each dataset, 15% of samples are generated as random uniform noise. This
proportion is the value given to the nu parameter of the OneClassSVM and the
contamination parameter of the other outlier detection algorithms.
Decision boundaries between inliers and outliers are displayed in black
except for Local Outlier Factor (LOF) as it has no predict method to be applied
on new data when it is used for outlier detection.
The :class:`sklearn.svm.OneClassSVM` is known to be sensitive to outliers and
thus does not perform very well for outlier detection. This estimator is best
suited for novelty detection when the training set is not contaminated by
outliers. That said, outlier detection in high-dimension, or without any
assumptions on the distribution of the inlying data is very challenging, and a
One-class SVM might give useful results in these situations depending on the
value of its hyperparameters.
:class:`sklearn.covariance.EllipticEnvelope` assumes the data is Gaussian and
learns an ellipse. It thus degrades when the data is not unimodal. Notice
however that this estimator is robust to outliers.
:class:`sklearn.ensemble.IsolationForest` and
:class:`sklearn.neighbors.LocalOutlierFactor` seem to perform reasonably well
for multi-modal data sets. The advantage of
:class:`sklearn.neighbors.LocalOutlierFactor` over the other estimators is
shown for the third data set, where the two modes have different densities.
This advantage is explained by the local aspect of LOF, meaning that it only
compares the score of abnormality of one sample with the scores of its
neighbors.
Finally, for the last data set, it is hard to say that one sample is more
abnormal than another sample as they are uniformly distributed in a
hypercube. Except for the :class:`sklearn.svm.OneClassSVM` which overfits a
little, all estimators present decent solutions for this situation. In such a
case, it would be wise to look more closely at the scores of abnormality of
the samples as a good estimator should assign similar scores to all the
samples.
While these examples give some intuition about the algorithms, this
intuition might not apply to very high dimensional data.
Finally, note that parameters of the models have been here handpicked but
that in practice they need to be adjusted. In the absence of labelled data,
the problem is completely unsupervised so model selection can be a challenge.
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.datasets import make_moons, make_blobs
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
print(__doc__)
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
# Example settings
n_samples = 300
outliers_fraction = 0.15
n_outliers = int(outliers_fraction * n_samples)
n_inliers = n_samples - n_outliers
# define outlier/anomaly detection methods to be compared
anomaly_algorithms = [
("Robust covariance", EllipticEnvelope(contamination=outliers_fraction)),
("One-Class SVM", svm.OneClassSVM(nu=outliers_fraction, kernel="rbf",
gamma=0.1)),
("Isolation Forest", IsolationForest(contamination=outliers_fraction,
random_state=42)),
("Local Outlier Factor", LocalOutlierFactor(
n_neighbors=35, contamination=outliers_fraction))]
# Define datasets
blobs_params = dict(random_state=0, n_samples=n_inliers, n_features=2)
datasets = [
make_blobs(centers=[[0, 0], [0, 0]], cluster_std=0.5,
**blobs_params)[0],
make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[0.5, 0.5],
**blobs_params)[0],
make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[1.5, .3],
**blobs_params)[0],
4. * (make_moons(n_samples=n_samples, noise=.05, random_state=0)[0] -
np.array([0.5, 0.25])),
14. * (np.random.RandomState(42).rand(n_samples, 2) - 0.5)]
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 150),
np.linspace(-7, 7, 150))
plt.figure(figsize=(len(anomaly_algorithms) * 2 + 3, 12.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
rng = np.random.RandomState(42)
for i_dataset, X in enumerate(datasets):
# Add outliers
X = np.concatenate([X, rng.uniform(low=-6, high=6,
size=(n_outliers, 2))], axis=0)
for name, algorithm in anomaly_algorithms:
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
plt.subplot(len(datasets), len(anomaly_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
# fit the data and tag outliers
if name == "Local Outlier Factor":
y_pred = algorithm.fit_predict(X)
else:
y_pred = algorithm.fit(X).predict(X)
# plot the levels lines and the points
if name != "Local Outlier Factor": # LOF does not implement predict
Z = algorithm.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='black')
colors = np.array(['#377eb8', '#ff7f00'])
plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[(y_pred + 1) // 2])
plt.xlim(-7, 7)
plt.ylim(-7, 7)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| [
"numpy.array",
"numpy.random.RandomState",
"sklearn.datasets.make_blobs",
"matplotlib.pyplot.contour",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"sklearn.neighbors.LocalOutlierFactor",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"sklearn.svm.OneClassSVM",
"sklearn.covariance.Ellip... | [((4773, 4870), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.02)', 'right': '(0.98)', 'bottom': '(0.001)', 'top': '(0.96)', 'wspace': '(0.05)', 'hspace': '(0.01)'}), '(left=0.02, right=0.98, bottom=0.001, top=0.96, wspace=\n 0.05, hspace=0.01)\n', (4792, 4870), True, 'import matplotlib.pyplot as plt\n'), ((4900, 4925), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (4921, 4925), True, 'import numpy as np\n'), ((6261, 6271), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6269, 6271), True, 'import matplotlib.pyplot as plt\n'), ((4641, 4664), 'numpy.linspace', 'np.linspace', (['(-7)', '(7)', '(150)'], {}), '(-7, 7, 150)\n', (4652, 4664), True, 'import numpy as np\n'), ((4687, 4710), 'numpy.linspace', 'np.linspace', (['(-7)', '(7)', '(150)'], {}), '(-7, 7, 150)\n', (4698, 4710), True, 'import numpy as np\n'), ((3580, 3629), 'sklearn.covariance.EllipticEnvelope', 'EllipticEnvelope', ([], {'contamination': 'outliers_fraction'}), '(contamination=outliers_fraction)\n', (3596, 3629), False, 'from sklearn.covariance import EllipticEnvelope\n'), ((3654, 3716), 'sklearn.svm.OneClassSVM', 'svm.OneClassSVM', ([], {'nu': 'outliers_fraction', 'kernel': '"""rbf"""', 'gamma': '(0.1)'}), "(nu=outliers_fraction, kernel='rbf', gamma=0.1)\n", (3669, 3716), False, 'from sklearn import svm\n'), ((3782, 3847), 'sklearn.ensemble.IsolationForest', 'IsolationForest', ([], {'contamination': 'outliers_fraction', 'random_state': '(42)'}), '(contamination=outliers_fraction, random_state=42)\n', (3797, 3847), False, 'from sklearn.ensemble import IsolationForest\n'), ((3920, 3987), 'sklearn.neighbors.LocalOutlierFactor', 'LocalOutlierFactor', ([], {'n_neighbors': '(35)', 'contamination': 'outliers_fraction'}), '(n_neighbors=35, contamination=outliers_fraction)\n', (3938, 3987), False, 'from sklearn.neighbors import LocalOutlierFactor\n'), ((4106, 4175), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'centers': '[[0, 0], [0, 0]]', 'cluster_std': '(0.5)'}), '(centers=[[0, 0], [0, 0]], cluster_std=0.5, **blobs_params)\n', (4116, 4175), False, 'from sklearn.datasets import make_moons, make_blobs\n'), ((4199, 4277), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'centers': '[[2, 2], [-2, -2]]', 'cluster_std': '[0.5, 0.5]'}), '(centers=[[2, 2], [-2, -2]], cluster_std=[0.5, 0.5], **blobs_params)\n', (4209, 4277), False, 'from sklearn.datasets import make_moons, make_blobs\n'), ((4301, 4379), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'centers': '[[2, 2], [-2, -2]]', 'cluster_std': '[1.5, 0.3]'}), '(centers=[[2, 2], [-2, -2]], cluster_std=[1.5, 0.3], **blobs_params)\n', (4311, 4379), False, 'from sklearn.datasets import make_moons, make_blobs\n'), ((5158, 5169), 'time.time', 'time.time', ([], {}), '()\n', (5167, 5169), False, 'import time\n'), ((5208, 5219), 'time.time', 'time.time', ([], {}), '()\n', (5217, 5219), False, 'import time\n'), ((5868, 5900), 'numpy.array', 'np.array', (["['#377eb8', '#ff7f00']"], {}), "(['#377eb8', '#ff7f00'])\n", (5876, 5900), True, 'import numpy as np\n'), ((5909, 5977), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'s': '(10)', 'color': 'colors[(y_pred + 1) // 2]'}), '(X[:, 0], X[:, 1], s=10, color=colors[(y_pred + 1) // 2])\n', (5920, 5977), True, 'import matplotlib.pyplot as plt\n'), ((5987, 6002), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-7)', '(7)'], {}), '(-7, 7)\n', (5995, 6002), True, 'import matplotlib.pyplot as plt\n'), ((6011, 6026), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-7)', '(7)'], {}), '(-7, 7)\n', (6019, 6026), True, 'import matplotlib.pyplot as plt\n'), ((6035, 6049), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (6045, 6049), True, 'import matplotlib.pyplot as plt\n'), ((6058, 6072), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (6068, 6072), True, 'import matplotlib.pyplot as plt\n'), ((4482, 4503), 'numpy.array', 'np.array', (['[0.5, 0.25]'], {}), '([0.5, 0.25])\n', (4490, 4503), True, 'import numpy as np\n'), ((5329, 5353), 'matplotlib.pyplot.title', 'plt.title', (['name'], {'size': '(18)'}), '(name, size=18)\n', (5338, 5353), True, 'import matplotlib.pyplot as plt\n'), ((5785, 5849), 'matplotlib.pyplot.contour', 'plt.contour', (['xx', 'yy', 'Z'], {'levels': '[0]', 'linewidths': '(2)', 'colors': '"""black"""'}), "(xx, yy, Z, levels=[0], linewidths=2, colors='black')\n", (5796, 5849), True, 'import matplotlib.pyplot as plt\n'), ((4408, 4467), 'sklearn.datasets.make_moons', 'make_moons', ([], {'n_samples': 'n_samples', 'noise': '(0.05)', 'random_state': '(0)'}), '(n_samples=n_samples, noise=0.05, random_state=0)\n', (4418, 4467), False, 'from sklearn.datasets import make_moons, make_blobs\n'), ((4517, 4542), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (4538, 4542), True, 'import numpy as np\n'), ((6162, 6171), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6169, 6171), True, 'import matplotlib.pyplot as plt\n')] |
from numbers import Number
from typing import List
from typing import Union
import numpy as np
from error_propagation.core import Complex
def npv(
cash: Union[List[Number], List[Complex]],
discount_rate: Union[Number, Complex], # noqa
) -> Complex:
"""NPV accounts for the time value of money and can be used to compare
similar investment alternatives. The NPV relies on a discount rate that
may be derived from the cost of the capital required to invest, and any
project or investment with a negative NPV should be avoided. One
important drawback of NPV analysis is that it makes assumptions about
future events that may not be reliable.
For more information on NPV, see below
https://www.investopedia.com/terms/n/npv.asp
Args:
cash: Net cash inflow-outflows during a single period t
discount_rate: Discount rate or return that could be earned in
alternative investments
Returns:
present value of an investment's future cash flows above the
investment's initial cost
"""
denominator = (Complex(1, 0) + discount_rate) ** np.arange(1, len(cash) + 1)
cash = list(map(lambda x: x if isinstance(x, Complex) else Complex(x, 0), cash))
return np.sum(np.array(cash) / denominator)
| [
"error_propagation.core.Complex",
"numpy.array"
] | [((1090, 1103), 'error_propagation.core.Complex', 'Complex', (['(1)', '(0)'], {}), '(1, 0)\n', (1097, 1103), False, 'from error_propagation.core import Complex\n'), ((1255, 1269), 'numpy.array', 'np.array', (['cash'], {}), '(cash)\n', (1263, 1269), True, 'import numpy as np\n'), ((1215, 1228), 'error_propagation.core.Complex', 'Complex', (['x', '(0)'], {}), '(x, 0)\n', (1222, 1228), False, 'from error_propagation.core import Complex\n')] |
import numpy as np
import cPickle
import os
import pdb
import cv2
def unpickle(file):
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
def load_data(train_path,order,nb_groups, nb_cl, nb_val,SubMean = False):
xs = []
ys = []
for j in range(1):
d = unpickle(train_path+'cifar-100-python/train')
x = d['data']
y = d['fine_labels']
xs.append(x)
ys.append(y)
d = unpickle(train_path + 'cifar-100-python/test')
xs.append(d['data'])
ys.append(d['fine_labels'])
x = np.concatenate(xs)/np.float32(255)
y = np.concatenate(ys)
#x = np.dstack((x[:, :1024], x[:, 1024:2048], x[:, 2048:]))
x = x.reshape((x.shape[0], 3,32, 32)).transpose(0,2,3,1)
#x = np.transpose(x,(0,2,3,1))
#pdb.set_trace()
#cv2.imwrite("1.jpg",cv2.cvtColor(x[3,:,:,:]*255, cv2.COLOR_RGB2BGR))
#.transpose(0,3,1,2)
# subtract per-pixel mean
pixel_mean = np.mean(x[0:50000],axis=0)
#np.save('cifar_mean.npy',pixel_mean)
#pdb.set_trace()
if SubMean == True:
x -= pixel_mean
#pdb.set_trace()
# Create Train/Validation set
eff_samples_cl = 500-nb_val
X_train = np.zeros((eff_samples_cl*100,32, 32,3))
Y_train = np.zeros(eff_samples_cl*100)
X_valid = np.zeros((nb_val*100,32, 32,3))
Y_valid = np.zeros(nb_val*100)
for i in range(100):
index_y=np.where(y[0:50000]==i)[0]
np.random.shuffle(index_y)
X_train[i*eff_samples_cl:(i+1)*eff_samples_cl] = x[index_y[0:eff_samples_cl],:,:,:]
Y_train[i*eff_samples_cl:(i+1)*eff_samples_cl] = y[index_y[0:eff_samples_cl]]
X_valid[i*nb_val:(i+1)*nb_val] = x[index_y[eff_samples_cl:500],:,:,:]
Y_valid[i*nb_val:(i+1)*nb_val] = y[index_y[eff_samples_cl:500]]
X_test = x[50000:,:,:,:]
Y_test = y[50000:]
files_train = []
train_labels = []
files_valid = []
valid_labels = []
files_test = []
test_labels = []
for _ in range(nb_groups):
files_train.append([])
train_labels.append([])
files_valid.append([])
valid_labels.append([])
files_test.append([])
test_labels.append([])
for i in range(nb_groups):
for i2 in range(nb_cl):
labels_old = Y_train
#pdb.set_trace()
tmp_ind=np.where(labels_old == order[nb_cl*i+i2])[0]
np.random.shuffle(tmp_ind)
files_train[i].extend(X_train[tmp_ind[0:len(tmp_ind)]])
train_labels[i].extend(Y_train[tmp_ind[0:len(tmp_ind)]])
labels_old = Y_valid
tmp_ind=np.where(labels_old == order[nb_cl*i+i2])[0]
np.random.shuffle(tmp_ind)
files_valid[i].extend(X_valid[tmp_ind[0:len(tmp_ind)]])
valid_labels[i].extend(Y_valid[tmp_ind[0:len(tmp_ind)]])
labels_old = Y_test
tmp_ind=np.where(labels_old == order[nb_cl*i+i2])[0]
np.random.shuffle(tmp_ind)
files_test[i].extend(X_test[tmp_ind[0:len(tmp_ind)]])
test_labels[i].extend(Y_test[tmp_ind[0:len(tmp_ind)]])
#pdb.set_trace()
return files_train,train_labels,files_valid,valid_labels,files_test,test_labels
def aug(batch):
# as in paper :
# pad feature arrays with 4 pixels on each side
# and do random cropping of 32x32
#pdb.set_trace()
padded = np.pad(batch,((0,0),(4,4),(4,4),(0,0)),mode='constant')
random_cropped = np.zeros(batch.shape, dtype=np.float32)
crops = np.random.random_integers(0,high=8,size=(batch.shape[0],2))
for r in range(batch.shape[0]):
# Cropping and possible flipping
#if (np.random.randint(2) > 0):
random_cropped[r,:,:,:] = padded[r,crops[r,0]:(crops[r,0]+32),crops[r,1]:(crops[r,1]+32),:]
#else:
#random_cropped[r,:,:,:] = padded[r,crops[r,0]:(crops[r,0]+32),crops[r,1]:(crops[r,1]+32),:][:,:,::-1]
inp_exc = random_cropped
return inp_exc
def balanced_subsample(x,y,subsample_size=1.0):
class_xs = []
min_elems = None
#pdb.set_trace()
for yi in np.unique(y):
elems = x[(y == yi)]
class_xs.append((yi, elems))
if min_elems == None or elems.shape[0] < min_elems:
min_elems = elems.shape[0]
use_elems = min_elems
if subsample_size < 1:
use_elems = int(min_elems*subsample_size)
xs = []
ys = []
#pdb.set_trace()
for ci,this_xs in class_xs:
if len(this_xs) > use_elems:
np.random.shuffle(this_xs)
x_ = this_xs[:use_elems]
y_ = np.empty(use_elems)
y_.fill(ci)
xs.append(x_)
ys.append(y_)
xs = np.concatenate(xs)
ys = np.concatenate(ys)
return xs,ys
| [
"numpy.mean",
"numpy.unique",
"numpy.random.random_integers",
"numpy.where",
"numpy.zeros",
"numpy.empty",
"numpy.concatenate",
"numpy.pad",
"cPickle.load",
"numpy.float32",
"numpy.random.shuffle"
] | [((124, 140), 'cPickle.load', 'cPickle.load', (['fo'], {}), '(fo)\n', (136, 140), False, 'import cPickle\n'), ((608, 626), 'numpy.concatenate', 'np.concatenate', (['ys'], {}), '(ys)\n', (622, 626), True, 'import numpy as np\n'), ((954, 981), 'numpy.mean', 'np.mean', (['x[0:50000]'], {'axis': '(0)'}), '(x[0:50000], axis=0)\n', (961, 981), True, 'import numpy as np\n'), ((1193, 1236), 'numpy.zeros', 'np.zeros', (['(eff_samples_cl * 100, 32, 32, 3)'], {}), '((eff_samples_cl * 100, 32, 32, 3))\n', (1201, 1236), True, 'import numpy as np\n'), ((1247, 1277), 'numpy.zeros', 'np.zeros', (['(eff_samples_cl * 100)'], {}), '(eff_samples_cl * 100)\n', (1255, 1277), True, 'import numpy as np\n'), ((1290, 1325), 'numpy.zeros', 'np.zeros', (['(nb_val * 100, 32, 32, 3)'], {}), '((nb_val * 100, 32, 32, 3))\n', (1298, 1325), True, 'import numpy as np\n'), ((1336, 1358), 'numpy.zeros', 'np.zeros', (['(nb_val * 100)'], {}), '(nb_val * 100)\n', (1344, 1358), True, 'import numpy as np\n'), ((3315, 3379), 'numpy.pad', 'np.pad', (['batch', '((0, 0), (4, 4), (4, 4), (0, 0))'], {'mode': '"""constant"""'}), "(batch, ((0, 0), (4, 4), (4, 4), (0, 0)), mode='constant')\n", (3321, 3379), True, 'import numpy as np\n'), ((3392, 3431), 'numpy.zeros', 'np.zeros', (['batch.shape'], {'dtype': 'np.float32'}), '(batch.shape, dtype=np.float32)\n', (3400, 3431), True, 'import numpy as np\n'), ((3444, 3506), 'numpy.random.random_integers', 'np.random.random_integers', (['(0)'], {'high': '(8)', 'size': '(batch.shape[0], 2)'}), '(0, high=8, size=(batch.shape[0], 2))\n', (3469, 3506), True, 'import numpy as np\n'), ((4041, 4053), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (4050, 4053), True, 'import numpy as np\n'), ((4620, 4638), 'numpy.concatenate', 'np.concatenate', (['xs'], {}), '(xs)\n', (4634, 4638), True, 'import numpy as np\n'), ((4648, 4666), 'numpy.concatenate', 'np.concatenate', (['ys'], {}), '(ys)\n', (4662, 4666), True, 'import numpy as np\n'), ((565, 583), 'numpy.concatenate', 'np.concatenate', (['xs'], {}), '(xs)\n', (579, 583), True, 'import numpy as np\n'), ((584, 599), 'numpy.float32', 'np.float32', (['(255)'], {}), '(255)\n', (594, 599), True, 'import numpy as np\n'), ((1433, 1459), 'numpy.random.shuffle', 'np.random.shuffle', (['index_y'], {}), '(index_y)\n', (1450, 1459), True, 'import numpy as np\n'), ((4525, 4544), 'numpy.empty', 'np.empty', (['use_elems'], {}), '(use_elems)\n', (4533, 4544), True, 'import numpy as np\n'), ((1398, 1423), 'numpy.where', 'np.where', (['(y[0:50000] == i)'], {}), '(y[0:50000] == i)\n', (1406, 1423), True, 'import numpy as np\n'), ((2379, 2405), 'numpy.random.shuffle', 'np.random.shuffle', (['tmp_ind'], {}), '(tmp_ind)\n', (2396, 2405), True, 'import numpy as np\n'), ((2634, 2660), 'numpy.random.shuffle', 'np.random.shuffle', (['tmp_ind'], {}), '(tmp_ind)\n', (2651, 2660), True, 'import numpy as np\n'), ((2896, 2922), 'numpy.random.shuffle', 'np.random.shuffle', (['tmp_ind'], {}), '(tmp_ind)\n', (2913, 2922), True, 'import numpy as np\n'), ((4451, 4477), 'numpy.random.shuffle', 'np.random.shuffle', (['this_xs'], {}), '(this_xs)\n', (4468, 4477), True, 'import numpy as np\n'), ((2326, 2371), 'numpy.where', 'np.where', (['(labels_old == order[nb_cl * i + i2])'], {}), '(labels_old == order[nb_cl * i + i2])\n', (2334, 2371), True, 'import numpy as np\n'), ((2581, 2626), 'numpy.where', 'np.where', (['(labels_old == order[nb_cl * i + i2])'], {}), '(labels_old == order[nb_cl * i + i2])\n', (2589, 2626), True, 'import numpy as np\n'), ((2843, 2888), 'numpy.where', 'np.where', (['(labels_old == order[nb_cl * i + i2])'], {}), '(labels_old == order[nb_cl * i + i2])\n', (2851, 2888), True, 'import numpy as np\n')] |
import gym
import numpy as np
import pytest
from gym.spaces.discrete import Discrete
from gym.utils import seeding
from tianshou.data import Batch, Collector, ReplayBuffer
from tianshou.env import DummyVectorEnv, SubprocVectorEnv
from tianshou.policy import BasePolicy
class SimpleEnv(gym.Env):
"""A simplest example of self-defined env, used to minimize
data collect time and profile collector."""
def __init__(self):
self.action_space = Discrete(200)
self._fake_data = np.ones((10, 10, 1))
self.seed(0)
self.reset()
def reset(self):
self._index = 0
self.done = np.random.randint(3, high=200)
return {'observable': np.zeros((10, 10, 1)), 'hidden': self._index}
def step(self, action):
if self._index == self.done:
raise ValueError('step after done !!!')
self._index += 1
return {'observable': self._fake_data, 'hidden': self._index}, -1, \
self._index == self.done, {}
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
class SimplePolicy(BasePolicy):
"""A simplest example of self-defined policy, used
to minimize data collect time."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def learn(self, batch, **kwargs):
return super().learn(batch, **kwargs)
def forward(self, batch, state=None, **kwargs):
return Batch(act=np.array([30] * len(batch)), state=None, logits=None)
@pytest.fixture(scope="module")
def data():
np.random.seed(0)
env = SimpleEnv()
env.seed(0)
env_vec = DummyVectorEnv([lambda: SimpleEnv() for _ in range(100)])
env_vec.seed(np.random.randint(1000, size=100).tolist())
env_subproc = SubprocVectorEnv([lambda: SimpleEnv() for _ in range(8)])
env_subproc.seed(np.random.randint(1000, size=100).tolist())
env_subproc_init = SubprocVectorEnv(
[lambda: SimpleEnv() for _ in range(8)])
env_subproc_init.seed(np.random.randint(1000, size=100).tolist())
buffer = ReplayBuffer(50000)
policy = SimplePolicy()
collector = Collector(policy, env, ReplayBuffer(50000))
collector_vec = Collector(policy, env_vec, ReplayBuffer(50000))
collector_subproc = Collector(policy, env_subproc, ReplayBuffer(50000))
return {
"env": env,
"env_vec": env_vec,
"env_subproc": env_subproc,
"env_subproc_init": env_subproc_init,
"policy": policy,
"buffer": buffer,
"collector": collector,
"collector_vec": collector_vec,
"collector_subproc": collector_subproc,
}
def test_init(data):
for _ in range(5000):
Collector(data["policy"], data["env"], data["buffer"])
def test_reset(data):
for _ in range(5000):
data["collector"].reset()
def test_collect_st(data):
for _ in range(50):
data["collector"].collect(n_step=1000)
def test_collect_ep(data):
for _ in range(50):
data["collector"].collect(n_episode=10)
def test_init_vec_env(data):
for _ in range(5000):
Collector(data["policy"], data["env_vec"], data["buffer"])
def test_reset_vec_env(data):
for _ in range(5000):
data["collector_vec"].reset()
def test_collect_vec_env_st(data):
for _ in range(50):
data["collector_vec"].collect(n_step=1000)
def test_collect_vec_env_ep(data):
for _ in range(50):
data["collector_vec"].collect(n_episode=10)
def test_init_subproc_env(data):
for _ in range(5000):
Collector(data["policy"], data["env_subproc_init"], data["buffer"])
def test_reset_subproc_env(data):
for _ in range(5000):
data["collector_subproc"].reset()
def test_collect_subproc_env_st(data):
for _ in range(50):
data["collector_subproc"].collect(n_step=1000)
def test_collect_subproc_env_ep(data):
for _ in range(50):
data["collector_subproc"].collect(n_episode=10)
if __name__ == '__main__':
pytest.main(["-s", "-k collector_profile", "--durations=0", "-v"])
| [
"numpy.ones",
"tianshou.data.ReplayBuffer",
"tianshou.data.Collector",
"gym.spaces.discrete.Discrete",
"pytest.main",
"numpy.random.randint",
"numpy.zeros",
"numpy.random.seed",
"pytest.fixture",
"gym.utils.seeding.np_random"
] | [((1526, 1556), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1540, 1556), False, 'import pytest\n'), ((1573, 1590), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1587, 1590), True, 'import numpy as np\n'), ((2076, 2095), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['(50000)'], {}), '(50000)\n', (2088, 2095), False, 'from tianshou.data import Batch, Collector, ReplayBuffer\n'), ((4006, 4072), 'pytest.main', 'pytest.main', (["['-s', '-k collector_profile', '--durations=0', '-v']"], {}), "(['-s', '-k collector_profile', '--durations=0', '-v'])\n", (4017, 4072), False, 'import pytest\n'), ((463, 476), 'gym.spaces.discrete.Discrete', 'Discrete', (['(200)'], {}), '(200)\n', (471, 476), False, 'from gym.spaces.discrete import Discrete\n'), ((503, 523), 'numpy.ones', 'np.ones', (['(10, 10, 1)'], {}), '((10, 10, 1))\n', (510, 523), True, 'import numpy as np\n'), ((632, 662), 'numpy.random.randint', 'np.random.randint', (['(3)'], {'high': '(200)'}), '(3, high=200)\n', (649, 662), True, 'import numpy as np\n'), ((1063, 1086), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (1080, 1086), False, 'from gym.utils import seeding\n'), ((2163, 2182), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['(50000)'], {}), '(50000)\n', (2175, 2182), False, 'from tianshou.data import Batch, Collector, ReplayBuffer\n'), ((2231, 2250), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['(50000)'], {}), '(50000)\n', (2243, 2250), False, 'from tianshou.data import Batch, Collector, ReplayBuffer\n'), ((2307, 2326), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', (['(50000)'], {}), '(50000)\n', (2319, 2326), False, 'from tianshou.data import Batch, Collector, ReplayBuffer\n'), ((2706, 2760), 'tianshou.data.Collector', 'Collector', (["data['policy']", "data['env']", "data['buffer']"], {}), "(data['policy'], data['env'], data['buffer'])\n", (2715, 2760), False, 'from tianshou.data import Batch, Collector, ReplayBuffer\n'), ((3111, 3169), 'tianshou.data.Collector', 'Collector', (["data['policy']", "data['env_vec']", "data['buffer']"], {}), "(data['policy'], data['env_vec'], data['buffer'])\n", (3120, 3169), False, 'from tianshou.data import Batch, Collector, ReplayBuffer\n'), ((3560, 3627), 'tianshou.data.Collector', 'Collector', (["data['policy']", "data['env_subproc_init']", "data['buffer']"], {}), "(data['policy'], data['env_subproc_init'], data['buffer'])\n", (3569, 3627), False, 'from tianshou.data import Batch, Collector, ReplayBuffer\n'), ((693, 714), 'numpy.zeros', 'np.zeros', (['(10, 10, 1)'], {}), '((10, 10, 1))\n', (701, 714), True, 'import numpy as np\n'), ((1718, 1751), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {'size': '(100)'}), '(1000, size=100)\n', (1735, 1751), True, 'import numpy as np\n'), ((1859, 1892), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {'size': '(100)'}), '(1000, size=100)\n', (1876, 1892), True, 'import numpy as np\n'), ((2019, 2052), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {'size': '(100)'}), '(1000, size=100)\n', (2036, 2052), True, 'import numpy as np\n')] |
import os, sys
sys.path.insert(0, os.path.join(os.pardir, 'src'))
def sympy_solution():
from sympy import symbols, Rational, solve
C1, C3, C4 = symbols('C1 C3 C4')
s = solve([C1 - 1 - C3,
C1 - Rational(1,2) - C3 - C4,
2 + 2*C3 + C4], [C1,C3,C4])
return s
import numpy as np
import matplotlib.pyplot as plt
def plot_exact_solution():
x = np.linspace(0, 2, 101)
u = exact_solution(x)
plt.plot(x, u)
plt.xlabel('$x$'); plt.ylabel('$u$')
ax = plt.gca(); ax.set_aspect('equal')
plt.savefig('tmp.png'); plt.savefig('tmp.pdf')
def exact_solution(x):
if isinstance(x, np.ndarray):
return np.where(x < 1, -1./4*x, 0.5*x**2 - 5./4*x + 0.5)
else:
return -1./4*x if x < 1 else 0.5*x**2 - 5./4*x + 0.5
def sine_solution(x, N):
from numpy import pi, sin
s = 0
u = [] # u[i] is the solution for N=i
for i in range(N+1):
if i % 4 == 0:
cos_min_cos = -1
elif (i-1) % 4 == 0:
cos_min_cos = 2
elif (i-2) % 4 == 0:
cos_min_cos = -1
elif (i-1) % 4 == 0:
cos_min_cos = 0
b_i = 2/(pi*(i+1))*cos_min_cos
A_ii = (i+1)**2*pi**2/4
c_i = b_i/A_ii
s += c_i*sin((i+1)*x*pi/2)
u.append(s.copy())
return u
def plot_sine_solution():
x = np.linspace(0, 2, 101)
u = sine_solution(x, N=20)
plt.figure()
x = np.linspace(0, 2, 101)
plt.plot(x, exact_solution(x), '--')
N_values = 0, 1, 5
for N in 0, 1, 5, 10:
plt.plot(x, u[N])
plt.legend(['exact'] + ['N=%d' % N for N in N_values])
plt.savefig('tmp2.png'); plt.savefig('tmp2.pdf')
def P1_solution():
plt.figure()
from fe1D import mesh_uniform, u_glob
N_e_values = [2, 4, 8]
d = 1
legends = []
for N_e in N_e_values:
vertices, cells, dof_map = mesh_uniform(
N_e=N_e, d=d, Omega=[0,2], symbolic=False)
h = vertices[1] - vertices[0]
Ae = 1./h*np.array(
[[1, -1],
[-1, 1]])
N = N_e + 1
A = np.zeros((N, N))
b = np.zeros(N)
for e in range(N_e):
if vertices[e] >= 1:
be = -h/2.*np.array(
[1, 1])
else:
be = h/2.*np.array(
[0, 0])
for r in range(d+1):
for s in range(d+1):
A[dof_map[e][r], dof_map[e][s]] += Ae[r,s]
b[dof_map[e][r]] += be[r]
# Enforce boundary conditions
A[0,:] = 0; A[0,0] = 1; b[0] = 0
A[-1,:] = 0; A[-1,-1] = 1; b[-1] = 0
c = np.linalg.solve(A, b)
# Plot solution
print('c:', c)
print('vertices:', vertices)
print('cells:', cells)
print('len(cells):', len(cells))
print('dof_map:', dof_map)
xc, u, nodes = u_glob(c, vertices, cells, dof_map)
plt.plot(xc, u)
legends.append('$N_e=%d$' % N_e)
plt.plot(xc, exact_solution(xc), '--')
legends.append('exact')
plt.legend(legends, loc='lower left')
plt.savefig('tmp3.png'); plt.savefig('tmp3.pdf')
if __name__ == '__main__':
print(sympy_solution())
plot_sine_solution()
P1_solution()
plt.show()
| [
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.sin",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gca",
"sympy.symbols",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"numpy.linalg.solve",
... | [((34, 64), 'os.path.join', 'os.path.join', (['os.pardir', '"""src"""'], {}), "(os.pardir, 'src')\n", (46, 64), False, 'import os, sys\n'), ((153, 172), 'sympy.symbols', 'symbols', (['"""C1 C3 C4"""'], {}), "('C1 C3 C4')\n", (160, 172), False, 'from sympy import symbols, Rational, solve\n'), ((375, 397), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(101)'], {}), '(0, 2, 101)\n', (386, 397), True, 'import numpy as np\n'), ((428, 442), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'u'], {}), '(x, u)\n', (436, 442), True, 'import matplotlib.pyplot as plt\n'), ((447, 464), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (457, 464), True, 'import matplotlib.pyplot as plt\n'), ((466, 483), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$u$"""'], {}), "('$u$')\n", (476, 483), True, 'import matplotlib.pyplot as plt\n'), ((493, 502), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (500, 502), True, 'import matplotlib.pyplot as plt\n'), ((531, 553), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""tmp.png"""'], {}), "('tmp.png')\n", (542, 553), True, 'import matplotlib.pyplot as plt\n'), ((556, 578), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""tmp.pdf"""'], {}), "('tmp.pdf')\n", (567, 578), True, 'import matplotlib.pyplot as plt\n'), ((1335, 1357), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(101)'], {}), '(0, 2, 101)\n', (1346, 1357), True, 'import numpy as np\n'), ((1393, 1405), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1403, 1405), True, 'import matplotlib.pyplot as plt\n'), ((1414, 1436), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(101)'], {}), '(0, 2, 101)\n', (1425, 1436), True, 'import numpy as np\n'), ((1557, 1613), 'matplotlib.pyplot.legend', 'plt.legend', (["(['exact'] + [('N=%d' % N) for N in N_values])"], {}), "(['exact'] + [('N=%d' % N) for N in N_values])\n", (1567, 1613), True, 'import matplotlib.pyplot as plt\n'), ((1616, 1639), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""tmp2.png"""'], {}), "('tmp2.png')\n", (1627, 1639), True, 'import matplotlib.pyplot as plt\n'), ((1642, 1665), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""tmp2.pdf"""'], {}), "('tmp2.pdf')\n", (1653, 1665), True, 'import matplotlib.pyplot as plt\n'), ((1690, 1702), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1700, 1702), True, 'import matplotlib.pyplot as plt\n'), ((3047, 3084), 'matplotlib.pyplot.legend', 'plt.legend', (['legends'], {'loc': '"""lower left"""'}), "(legends, loc='lower left')\n", (3057, 3084), True, 'import matplotlib.pyplot as plt\n'), ((3089, 3112), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""tmp3.png"""'], {}), "('tmp3.png')\n", (3100, 3112), True, 'import matplotlib.pyplot as plt\n'), ((3114, 3137), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""tmp3.pdf"""'], {}), "('tmp3.pdf')\n", (3125, 3137), True, 'import matplotlib.pyplot as plt\n'), ((3241, 3251), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3249, 3251), True, 'import matplotlib.pyplot as plt\n'), ((652, 715), 'numpy.where', 'np.where', (['(x < 1)', '(-1.0 / 4 * x)', '(0.5 * x ** 2 - 5.0 / 4 * x + 0.5)'], {}), '(x < 1, -1.0 / 4 * x, 0.5 * x ** 2 - 5.0 / 4 * x + 0.5)\n', (660, 715), True, 'import numpy as np\n'), ((1535, 1552), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'u[N]'], {}), '(x, u[N])\n', (1543, 1552), True, 'import matplotlib.pyplot as plt\n'), ((1861, 1917), 'fe1D.mesh_uniform', 'mesh_uniform', ([], {'N_e': 'N_e', 'd': 'd', 'Omega': '[0, 2]', 'symbolic': '(False)'}), '(N_e=N_e, d=d, Omega=[0, 2], symbolic=False)\n', (1873, 1917), False, 'from fe1D import mesh_uniform, u_glob\n'), ((2073, 2089), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (2081, 2089), True, 'import numpy as np\n'), ((2102, 2113), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (2110, 2113), True, 'import numpy as np\n'), ((2634, 2655), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (2649, 2655), True, 'import numpy as np\n'), ((2871, 2906), 'fe1D.u_glob', 'u_glob', (['c', 'vertices', 'cells', 'dof_map'], {}), '(c, vertices, cells, dof_map)\n', (2877, 2906), False, 'from fe1D import mesh_uniform, u_glob\n'), ((2915, 2930), 'matplotlib.pyplot.plot', 'plt.plot', (['xc', 'u'], {}), '(xc, u)\n', (2923, 2930), True, 'import matplotlib.pyplot as plt\n'), ((1242, 1267), 'numpy.sin', 'sin', (['((i + 1) * x * pi / 2)'], {}), '((i + 1) * x * pi / 2)\n', (1245, 1267), False, 'from numpy import pi, sin\n'), ((1986, 2014), 'numpy.array', 'np.array', (['[[1, -1], [-1, 1]]'], {}), '([[1, -1], [-1, 1]])\n', (1994, 2014), True, 'import numpy as np\n'), ((2203, 2219), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (2211, 2219), True, 'import numpy as np\n'), ((2285, 2301), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (2293, 2301), True, 'import numpy as np\n'), ((217, 231), 'sympy.Rational', 'Rational', (['(1)', '(2)'], {}), '(1, 2)\n', (225, 231), False, 'from sympy import symbols, Rational, solve\n')] |
"""A script testing the extraction pipeline of RHEA
Steps
1) Initialise Format, Extractor and RadialVelocity
2) Define file paths for science, flat and dark frames
3) Extract/import spectra
4) Create/import reference spectra
5) Calculate radial velocities
6) Plot radial velocities
"""
import numpy as np
try:
import pyfits
except:
import astropy.io.fits as pyfits
import pymfe
import glob
from astropy.coordinates import SkyCoord
from astropy import units as u
#===============================================================================
# Parameters/Constants/Variables/Initialisation
#===============================================================================
# Constants/Variables
do_bcor = False
med_cut = 0.6
# Specified header parameters
xbin = 2
ybin = 1
exptime = 120
badpixel_mask= pyfits.getdata('/priv/mulga1/jbento/rhea2_data/badpix.fits')
badpix=np.where(badpixel_mask==1)
# Initialise objects
rhea2_format = pymfe.rhea.Format()
rhea2_extract = pymfe.Extractor(rhea2_format, transpose_data=False,
badpixmask=badpix)
xx, wave, blaze = rhea2_format.spectral_format()
rv = pymfe.rv.RadialVelocity()
#===============================================================================
# File paths (Observations, Flats and Darks, save/load directories)
#===============================================================================
# Science Frames
star = "thar"
base_path = "/priv/mulga1/jbento/rhea2_data/gammaCrucis/"
# Find all Gacrux ThAr files and sort by observation date in MJD
all_files = np.array(glob.glob(base_path + "2015*/*" + star + "_*.fit*"))
sorted = np.argsort([pyfits.getheader(e)['JD'] for e in all_files])
all_files = all_files[sorted]
files = []
# Only consider files that have the same exposure time and correct binning
for f in all_files:
fits = pyfits.open(f)
header = fits[0].header
x_head = header["XBINNING"]
y_head = header["YBINNING"]
exp_head = header["EXPTIME"]
if x_head == xbin and y_head == ybin and exp_head == exptime:
files.append(f)
fits.close()
# Flats and Darks
dark_path = base_path + "Dark frames/Masterdark_thar.fit"
star_dark = pyfits.getdata(dark_path)
flat_dark = pyfits.getdata(base_path + "Dark frames/Masterdark_flat.fit")
# Note: this particular flat was chosen as it has an exposure time of 3 seconds,
# the same length as the flat dark that will be used to correct it
flat_path = base_path + "20150527/20150527_Masterflat.fit"
flat_files = [flat_path]*len(files)
# Extracted spectra output
out_path = "/priv/mulga1/arains/Gacrux_Extracted_ThAr/"
#extracted_files = np.array(glob.glob(out_path + "*" + star + "*extracted.fits"))
# Sort to account for files not being labelled with MJD
#sorted = np.argsort([pyfits.getheader(e)['JD'] for e in extracted_files])
#extracted_files = extracted_files[sorted]
# RV csv output
base_rv_path = out_path + star
#===============================================================================
# Extract and save spectra/load previously extracted spectra
#===============================================================================
# Extract spectra ("wave" removed)
# OPTION 1: Extract and save spectra
fluxes, vars, bcors, mjds = rv.extract_spectra(files, rhea2_extract,
star_dark=star_dark,
flat_files=flat_files,
flat_dark=flat_dark,
do_bcor=do_bcor)
# Save spectra (Make sure to save "wave" generated from rhea2_format)
rv.save_fluxes(files, fluxes, vars, bcors, wave, mjds, out_path)
# OPTION 2: Load previously extracted spectra
#fluxes, vars, wave, bcors, mjds = rv.load_fluxes(extracted_files)
extracted_files = np.array(glob.glob(out_path + "*" + star + "*extracted.fits"))
# Sort to account for files not being labelled with MJD
sorted = np.argsort([pyfits.getheader(e)['JD'] for e in extracted_files])
extracted_files = extracted_files[sorted]
#===============================================================================
# Create and save/import reference spectrum
#===============================================================================
# Number of frames to use for reference spectrum
# Load the first 10 observations to use as a reference
fluxes, vars, wave, bcors, mjds = rv.load_fluxes(extracted_files[:10])
wave_ref, ref_spect = rv.create_ref_spect(wave, fluxes, vars, bcors,
med_cut=med_cut)
rv.save_ref_spect(extracted_files[:10], ref_spect, vars, wave_ref, bcors, mjds,
out_path, star)
# OPTION 2: Import a pre-existing reference spectrum
#ref_spect, vars_ref, wave_ref, bcors_ref, mjds_ref = rv.load_ref_spect(ref_path)
#===============================================================================
# Barycentrically correct based on the sun's location from moment to moment
#===============================================================================
# This loop is messy and there is probably a nicer way to do this...but it works
# The Linux servers are not happy with opening much more than 100 files,
# crashing and displaying a too many files warning. This is despite each .fits
# file being closed when the data have been loaded from it. A similar issue does
# not occur when initially extracting the files (975 were extracted in one go
# with no issues).
# Parameters to process files in batches of "increment"
num_files = len(extracted_files)
num_rvs_extracted = 0
increment = 100
low = 0
high = increment
all_rvs_calculated = False
# Will be concatenated at end to give final arrays
rv_list = []
rv_sig_list = []
bcors_list = []
mjds_list = []
# Obviously cannot open more files than exist
if high > num_files:
high = num_files
while not all_rvs_calculated:
num_rvs_extracted += high - low
# Load in a segment of files
fluxes, vars, wave, bcors, mjds = rv.load_fluxes(extracted_files[low:high])
nf = fluxes.shape[0]
nm = fluxes.shape[1]
ny = fluxes.shape[2]
# Calculate the RVs
rvs, rv_sigs = rv.calculate_rv_shift(wave_ref, ref_spect, fluxes, vars,
bcors, wave)
rv_list.append(rvs)
rv_sig_list.append(rv_sigs)
bcors_list.append(bcors)
mjds_list.append(mjds)
# Move to next segment
low += increment
high += increment
if high > num_files:
high = num_files
if num_rvs_extracted == num_files:
all_rvs_calculated = True
# Done, join together and save
all_rvs = np.concatenate(rv_list)
all_rv_sigs = np.concatenate(rv_sig_list)
all_bcors = np.concatenate(bcors_list)
all_mjds = np.concatenate(mjds_list)
#===============================================================================
# Save the extracted radial velocities
#===============================================================================
# Save RVs
bcor_rvs = all_rvs + all_bcors.repeat(nm).reshape( (num_files,nm) )
rv.save_rvs(all_rvs, all_rv_sigs, all_bcors, all_mjds, bcor_rvs, base_rv_path)
| [
"astropy.io.fits.getheader",
"numpy.where",
"pymfe.rhea.Format",
"pymfe.Extractor",
"astropy.io.fits.getdata",
"numpy.concatenate",
"astropy.io.fits.open",
"pymfe.rv.RadialVelocity",
"glob.glob"
] | [((850, 910), 'astropy.io.fits.getdata', 'pyfits.getdata', (['"""/priv/mulga1/jbento/rhea2_data/badpix.fits"""'], {}), "('/priv/mulga1/jbento/rhea2_data/badpix.fits')\n", (864, 910), True, 'import astropy.io.fits as pyfits\n'), ((919, 947), 'numpy.where', 'np.where', (['(badpixel_mask == 1)'], {}), '(badpixel_mask == 1)\n', (927, 947), True, 'import numpy as np\n'), ((986, 1005), 'pymfe.rhea.Format', 'pymfe.rhea.Format', ([], {}), '()\n', (1003, 1005), False, 'import pymfe\n'), ((1023, 1093), 'pymfe.Extractor', 'pymfe.Extractor', (['rhea2_format'], {'transpose_data': '(False)', 'badpixmask': 'badpix'}), '(rhea2_format, transpose_data=False, badpixmask=badpix)\n', (1038, 1093), False, 'import pymfe\n'), ((1184, 1209), 'pymfe.rv.RadialVelocity', 'pymfe.rv.RadialVelocity', ([], {}), '()\n', (1207, 1209), False, 'import pymfe\n'), ((2277, 2302), 'astropy.io.fits.getdata', 'pyfits.getdata', (['dark_path'], {}), '(dark_path)\n', (2291, 2302), True, 'import astropy.io.fits as pyfits\n'), ((2316, 2377), 'astropy.io.fits.getdata', 'pyfits.getdata', (["(base_path + 'Dark frames/Masterdark_flat.fit')"], {}), "(base_path + 'Dark frames/Masterdark_flat.fit')\n", (2330, 2377), True, 'import astropy.io.fits as pyfits\n'), ((7158, 7181), 'numpy.concatenate', 'np.concatenate', (['rv_list'], {}), '(rv_list)\n', (7172, 7181), True, 'import numpy as np\n'), ((7197, 7224), 'numpy.concatenate', 'np.concatenate', (['rv_sig_list'], {}), '(rv_sig_list)\n', (7211, 7224), True, 'import numpy as np\n'), ((7238, 7264), 'numpy.concatenate', 'np.concatenate', (['bcors_list'], {}), '(bcors_list)\n', (7252, 7264), True, 'import numpy as np\n'), ((7277, 7302), 'numpy.concatenate', 'np.concatenate', (['mjds_list'], {}), '(mjds_list)\n', (7291, 7302), True, 'import numpy as np\n'), ((1628, 1679), 'glob.glob', 'glob.glob', (["(base_path + '2015*/*' + star + '_*.fit*')"], {}), "(base_path + '2015*/*' + star + '_*.fit*')\n", (1637, 1679), False, 'import glob\n'), ((1904, 1918), 'astropy.io.fits.open', 'pyfits.open', (['f'], {}), '(f)\n', (1915, 1918), True, 'import astropy.io.fits as pyfits\n'), ((4085, 4137), 'glob.glob', 'glob.glob', (["(out_path + '*' + star + '*extracted.fits')"], {}), "(out_path + '*' + star + '*extracted.fits')\n", (4094, 4137), False, 'import glob\n'), ((1703, 1722), 'astropy.io.fits.getheader', 'pyfits.getheader', (['e'], {}), '(e)\n', (1719, 1722), True, 'import astropy.io.fits as pyfits\n'), ((4220, 4239), 'astropy.io.fits.getheader', 'pyfits.getheader', (['e'], {}), '(e)\n', (4236, 4239), True, 'import astropy.io.fits as pyfits\n')] |
# %%
import numpy as np
import pathlib as pth
import matplotlib.pyplot as plt
import re
try:
from pileupplots_utils import *
except:
from .pileupplots_utils import *
# %%
if __name__ == "__main__":
parser = argparser()
args = parser.parse_args()
data_path = pth.Path(args.vial_fld)
fig_path = pth.Path(args.fig_fld)
fig_path.mkdir(exist_ok=True)
# override show and save functions
show = show_function(args.show)
savefig = savefig_function(fig_path)
# %%
# data_path = pth.Path("../results/2022-02-08_RT_test/vial_04/")
# savefig = lambda x: None
# show = lambda: plt.show()
# get vial number
vial = re.search("vial_(\d+)/?$", str(data_path)).groups()[0]
print(f"preparing coverage plots for vial {vial}")
# load pileups and reference genome
pileups = load_pileups(data_path)
# evaluate coverages
coverages = {k: coverage(pp) for k, pp in pileups.items()}
# assign colors to timepoints
colors = color_dict(pileups)
# %%
# coverage plot
fig, axs = plt.subplot_mosaic("ABBBB", figsize=(20, 4))
# histogram of coverages
ax = axs["A"]
maxcov = max([max(cov) for cov in coverages.values()])
bins = np.arange(maxcov)
cumulative_histograms(
coverages, ax, colors, plotmeans=True, bins=bins, cumulative=True
)
ax.legend(loc="upper left", title="time")
ax.set_xlabel("coverage per site")
ax.set_ylabel("n. sites (cumulative)")
# coverage along the genome (mean of every kbp)
ax = axs["B"]
ax.set_title(f"vial {vial}")
step = 1000
for tp, cov in coverages.items():
cov_m = cov / cov.mean()
x, subcov = average_every_step(cov_m, step)
ax.plot(x, subcov, label=tp, color=colors[tp], alpha=0.8)
# guidelines (mean and 2 std)
cov_mat = np.vstack(list(coverages.values()))
cov_rescaled = (cov_mat.T / cov_mat.mean(axis=1)).T
cov_mean = cov_rescaled.mean(axis=0)
cov_std = cov_rescaled.std(axis=0)
step = 50000
xm, cov_mean = average_every_step(cov_mean, step)
xs, cov_std = average_every_step(cov_std, step)
ax.plot(xm, cov_mean, "--", color="gray")
ax.plot(xm, cov_mean - 2 * cov_std, ":", color="gray")
ax.plot(xm, cov_mean + 2 * cov_std, ":", color="gray")
ax.set_xlabel("position on the genome (bp)")
ax.set_ylabel(f"coverage ({step} bp mean) / mean coverage")
plt.tight_layout()
savefig("coverage.pdf")
show()
# %%
| [
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplot_mosaic",
"numpy.arange",
"pathlib.Path"
] | [((284, 307), 'pathlib.Path', 'pth.Path', (['args.vial_fld'], {}), '(args.vial_fld)\n', (292, 307), True, 'import pathlib as pth\n'), ((323, 345), 'pathlib.Path', 'pth.Path', (['args.fig_fld'], {}), '(args.fig_fld)\n', (331, 345), True, 'import pathlib as pth\n'), ((1065, 1109), 'matplotlib.pyplot.subplot_mosaic', 'plt.subplot_mosaic', (['"""ABBBB"""'], {'figsize': '(20, 4)'}), "('ABBBB', figsize=(20, 4))\n", (1083, 1109), True, 'import matplotlib.pyplot as plt\n'), ((1228, 1245), 'numpy.arange', 'np.arange', (['maxcov'], {}), '(maxcov)\n', (1237, 1245), True, 'import numpy as np\n'), ((2416, 2434), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2432, 2434), True, 'import matplotlib.pyplot as plt\n')] |
from typing import Union
import numpy as np
import talib
from jesse.helpers import get_candle_source
def efi(candles: np.ndarray, period: int = 13, source_type: str = "close", sequential: bool = False) -> Union[
float, np.ndarray]:
"""
EFI - Elders Force Index
:param candles: np.ndarray
:param period: int - default: 13
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: float | np.ndarray
"""
if not sequential and len(candles) > 240:
candles = candles[-240:]
source = get_candle_source(candles, source_type=source_type)
dif = np.zeros(len(source) - 1)
for i in range(1, len(source)):
dif[i - 1] = (source[i] - source[i - 1]) * candles[:, 5][i]
res = talib.EMA(dif, timeperiod=period)
res_with_nan = np.concatenate((np.full((candles.shape[0] - res.shape[0]), np.nan), res))
return res_with_nan if sequential else res_with_nan[-1]
| [
"numpy.full",
"talib.EMA",
"jesse.helpers.get_candle_source"
] | [((571, 622), 'jesse.helpers.get_candle_source', 'get_candle_source', (['candles'], {'source_type': 'source_type'}), '(candles, source_type=source_type)\n', (588, 622), False, 'from jesse.helpers import get_candle_source\n'), ((775, 808), 'talib.EMA', 'talib.EMA', (['dif'], {'timeperiod': 'period'}), '(dif, timeperiod=period)\n', (784, 808), False, 'import talib\n'), ((844, 892), 'numpy.full', 'np.full', (['(candles.shape[0] - res.shape[0])', 'np.nan'], {}), '(candles.shape[0] - res.shape[0], np.nan)\n', (851, 892), True, 'import numpy as np\n')] |
from numpy import column_stack, savetxt
import os
lf = os.linesep#determing the linefeed for the operating system ('\n' for Linux or '\r\n' for Windows)
def _dump_matrix(f, matrix, fmt='%0.10g', delim='\t'):
savetxt(f, matrix, fmt=fmt, delimiter=delim)
return f
def _dump_vectors(f, vectorlist, fmt='%0.10g', delim='\t'):
mymatrix = column_stack(vectorlist)#create a matrix with each vector as a column
_dump_matrix(f, mymatrix, fmt=fmt, delim=delim)
def _open_file(filename):
f = open(filename, 'w')#open a file for writing. Caution: this will overwrite an existing file
return f
def _save_labels(f, labels, delim='\t', comment_labels=False):
if comment_labels:
first_label = labels[0]
if first_label[0] != '#':
labels[0] = '#' + first_label
firstrow = delim.join(labels)#create a delimited first row of the labels
f.write(firstrow+'\n')#write the first row and a line feed to the file
return f
def _save_comments(f, comments):
if comments:
if type(comments) == str:
comments = [comments]
for line in comments:
if line[0] != '#':
line = '#' + line
f.write(line + '\n')
return f
def dump_vectors(filename, vectorlist, labels, fmt='%0.10g', delim='\t', \
comments=None, comment_labels=False):
"""Dump a list of vectors to a text file where each vector is a
columnm in a spreadsheet style text file.
filename is a string containing the filename or path to the file
to be saved. It will be overwritten if it exists.
labels is a list of strings containing the labels for the columns
in the file.
fmt is the format for converting numbers to strings when creating
the text file.
delim is the delimiter to use between columns of the file. The
default is a tab."""
f = _open_file(filename)
_save_comments(f,comments)
_save_labels(f, labels, delim=delim, comment_labels=comment_labels)
_dump_vectors(f, vectorlist, fmt=fmt, delim=delim)
f.close()#close the file
def dump_matrix(filename, matrix, labels, fmt='%0.10g', delim='\t', \
comments=None, comment_labels=False):
"""Similar to dump_vectors, but the data is already a matrix that
needs to be dumped to a file with a lable row."""
f = _open_file(filename)
_save_comments(f,comments)
_save_labels(f, labels, delim=delim, comment_labels=comment_labels)
_dump_matrix(f, matrix, fmt=fmt, delim=delim)
f.close()#close the file
| [
"numpy.column_stack",
"numpy.savetxt"
] | [((215, 259), 'numpy.savetxt', 'savetxt', (['f', 'matrix'], {'fmt': 'fmt', 'delimiter': 'delim'}), '(f, matrix, fmt=fmt, delimiter=delim)\n', (222, 259), False, 'from numpy import column_stack, savetxt\n'), ((353, 377), 'numpy.column_stack', 'column_stack', (['vectorlist'], {}), '(vectorlist)\n', (365, 377), False, 'from numpy import column_stack, savetxt\n')] |
import numpy as np
import torch
from scipy.stats import entropy as sc_entropy
class MultipredictionEntropy:
def __int__(self):
"""
Computes the entropy on multiple predictions of the same batch.
"""
super(MultipredictionEntropy, self).__init__()
def __call__(self, y, device='cpu'):
entr = []
for y in torch.argmax(y, dim=-1).transpose(dim0=0, dim1=1):
entr += [sc_entropy((np.unique(y, return_counts=True)[1] / y.shape[-1]), base=2)]
return torch.tensor(entr)
if __name__ == '__main__':
y = torch.tensor(
[
[ # pred 1
[.7, .3, .1],
[.7, .3, .1],
[.7, .3, .2]
],
[ # pred 2
[.4, .6, .3],
[.4, .6, .4],
[.6, .4, .3]
],
[ # pred 3
[.4, .6, .2],
[.6, .4, .8],
[.6, .4, .7]
],
[ # pred 4
[.1, .9, .3],
[.1, .9, .3],
[.1, .9, .3]
]
]
)
entropy_estimation = MultipredictionEntropy()
print(entropy_estimation(y)) | [
"torch.tensor",
"numpy.unique",
"torch.argmax"
] | [((575, 816), 'torch.tensor', 'torch.tensor', (['[[[0.7, 0.3, 0.1], [0.7, 0.3, 0.1], [0.7, 0.3, 0.2]], [[0.4, 0.6, 0.3], [\n 0.4, 0.6, 0.4], [0.6, 0.4, 0.3]], [[0.4, 0.6, 0.2], [0.6, 0.4, 0.8], [\n 0.6, 0.4, 0.7]], [[0.1, 0.9, 0.3], [0.1, 0.9, 0.3], [0.1, 0.9, 0.3]]]'], {}), '([[[0.7, 0.3, 0.1], [0.7, 0.3, 0.1], [0.7, 0.3, 0.2]], [[0.4, \n 0.6, 0.3], [0.4, 0.6, 0.4], [0.6, 0.4, 0.3]], [[0.4, 0.6, 0.2], [0.6, \n 0.4, 0.8], [0.6, 0.4, 0.7]], [[0.1, 0.9, 0.3], [0.1, 0.9, 0.3], [0.1, \n 0.9, 0.3]]])\n', (587, 816), False, 'import torch\n'), ((519, 537), 'torch.tensor', 'torch.tensor', (['entr'], {}), '(entr)\n', (531, 537), False, 'import torch\n'), ((362, 385), 'torch.argmax', 'torch.argmax', (['y'], {'dim': '(-1)'}), '(y, dim=-1)\n', (374, 385), False, 'import torch\n'), ((443, 475), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (452, 475), True, 'import numpy as np\n')] |
import time, math, cmath
import numpy as np
from functools import reduce
from qiskit import *
from qiskit.quantum_info import Statevector
from circuit_builder import CircuitBuilder
from agent import Agent
class QRPS_Agent(Agent):
def __init__(self, backend):
self.backend = backend
self.memory = {}
self.gamma = 0.0
self.n = 0.05
# def act2(self, env):
# transitions = np.array([
# [0.4, 0.2, 0.2, 0.2],
# [0.2, 0.4, 0.4, 0.4],
# [0.2, 0.2, 0.2, 0.2],
# [0.2, 0.2, 0.2, 0.2]
# ])
# flags = [1, 2]
# self.rank_two(transitions, flags)
def act(self, env):
env_state = env.state()
env_actions = env.actions()
# If only one action is available, there's nothing to learn here
if len(env_actions) == 1:
env.step(env_actions[0])
return
# Add to memory
if env_state not in self.memory:
self.memory[env_state] = np.array([1] * len(env_actions)), np.array(range(len(env_actions))), 0.0
weights, flags, glow = self.memory[env_state]
sum_weights = np.sum(weights)
prob = np.array([h / sum_weights for h in weights])
print('Pr:', prob)
print('Flags:', flags)
# Quantum deliberation
max_tries = 3
action_index = None
for _ in range(max_tries):
action_index = self.rank_one(prob, flags, debug=False)
if action_index in flags:
break
reward = env.step(env_actions[action_index])
print("Action:", env_actions[action_index], reward)
self.update_values(env_state, env_actions, action_index, reward)
def update_values(self, state, actions, action_index, reward):
"Updates the weights, flags and glow values according to the received reward"
weights, flags, glows = self.memory[state]
glows = np.array([1.0 if action_index == i else (1 - self.n) * g for i, g in enumerate(glows)])
weights[action_index] = weights[action_index] - self.gamma * (weights[action_index] - 1) + glows[action_index] * reward
flags = np.delete(flags, action_index) if reward < 0.0 else flags
if len(flags) == 0:
flags = np.array([i for i in range(len(actions)) if i is not action_index])
self.memory[state] = weights, flags, glows
def prob_to_angles(self, prob, previous=1):
"Calculates the angles to encode the given probabilities"
def calc_angle(x):
return 2 * math.acos(math.sqrt(x))
if len(prob) == 2:
return [calc_angle(prob[0] / previous)] if previous != 0 else [0]
lhs, rhs = np.split(prob, 2)
angles = np.array([calc_angle(np.sum(lhs) / previous)])
angles = np.append(angles, self.prob_to_angles(lhs, previous=np.sum(lhs)))
angles = np.append(angles, self.prob_to_angles(rhs, previous=np.sum(rhs)))
return angles
def rank_one(self, prob, flags, debug=False):
"Rank-one implementation of Reflective Projective Simulation"
num_qubits = math.ceil(math.log(len(prob), 2))
# Ensure lenght of probabilities is 2**num_qubits
if len(prob) != 2**num_qubits:
prob = np.append(prob, [0] * (2**num_qubits - len(prob)))
# Epsilon (probability of flagged actions)
epsilon = reduce(lambda e, i: e + prob[i], flags, 0.0)
# State preparation
U = CircuitBuilder().get_U(num_qubits, self.prob_to_angles(prob)).to_instruction()
# Quantum circuit
qreg = QuantumRegister(num_qubits, name='q')
circ = QuantumCircuit(qreg)
# Encode stationary distribution
circ.append(U, qreg)
k = math.floor(math.pi / (4 * math.sqrt(epsilon)))
for _ in range(k):
# Reflection around the flagged actions
circ.diagonal([-1 if i in flags else 1 for i in range(2**num_qubits)], qreg)
# Reflection around the stationary distribution
circ.append(U.inverse(), qreg)
circ.x(qreg)
if num_qubits == 1:
circ.z(qreg)
else:
circ.h(qreg[-1])
circ.mcx(qreg[:-1], qreg[-1])
circ.h(qreg[-1])
circ.x(qreg)
circ.append(U, qreg)
if debug:
print(circ.draw(fold=140))
circ.snapshot('sv')
# Sample from stationary distribution
circ.measure_all()
result = execute(circ, backend=self.backend, shots=1).result()
if debug:
resulting_sv = result.data()['snapshots']['statevector']['sv'][0]
print(Statevector(resulting_sv).probabilities_dict())
counts = result.get_counts(circ)
action_index = max(counts, key=counts.get)
return int(action_index, 2)
def rank_two(self, transitions, flags, debug=False):
eigvals = np.linalg.eigvals(transitions)
eigvals.sort()
num_qubits = int(math.log2(len(transitions)))
num_ancilla = math.ceil(math.log2(1 / math.sqrt(1 - abs(eigvals[-2])))) + 1
# Stationary distribution
S, U = np.linalg.eig(transitions)
stat_distr = np.array(U[:, np.where(np.abs(S - 1.) < 1e-8)[0][0]].flat)
stat_distr = stat_distr / np.sum(stat_distr)
print(stat_distr)
# Epsilon (probability of flagged actions)
epsilon = reduce(lambda e, i: e + stat_distr[i], flags, 0.0)
# Reverse transition matrix
rev_transitions = transitions * np.array(stat_distr)
rev_transitions = rev_transitions.transpose() / np.array(stat_distr)
# Angles
stat_angles = self.prob_to_angles(stat_distr)
angles = np.concatenate([self.prob_to_angles(transitions[:,i]) for i in range(2**num_qubits)])
rev_angles = np.concatenate([self.prob_to_angles(rev_transitions[:,i]) for i in range(2**num_qubits)])
# Quantum circuit
anc = AncillaRegister(num_ancilla, 'anc')
qreg1 = QuantumRegister(num_qubits, 'reg1')
qreg2 = QuantumRegister(num_qubits, 'reg2')
creg = ClassicalRegister(num_qubits, 'creg')
circ = QuantumCircuit(anc, qreg1, qreg2, creg)
# Encode stationary distribution
U = CircuitBuilder().get_U(num_qubits, stat_angles)
circ.append(U.to_instruction(), qreg1)
Up = CircuitBuilder().get_Up(num_qubits, angles)
circ.append(Up.to_instruction(), qreg1[:] + qreg2[:])
ARO = CircuitBuilder().get_ARO(num_qubits, num_ancilla)
k = math.floor(math.pi / (4 * math.sqrt(epsilon)))
for _ in range(k):
circ.diagonal([-1 if i in flags else 1 for i in range(2**num_qubits)], qreg1)
circ.append(ARO, anc[:] + qreg1[:] + qreg2[:])
print(circ.draw(fold=240))
# circ.snapshot('sv')
circ.measure(qreg1, creg)
# Bind transition angles
parameters = CircuitBuilder().get_parameters(num_qubits)
binds = dict(zip(parameters, np.concatenate([angles, rev_angles])))
start = time.time()
result = execute(circ, backend=self.backend, shots=2048, parameter_binds=[binds]).result()
end = time.time()
if debug:
resulting_sv = result.data()['snapshots']['statevector']['sv'][0]
print(Statevector(resulting_sv).probabilities_dict())
print("RUN took:", end - start)
print(result.get_counts(circ))
| [
"circuit_builder.CircuitBuilder",
"numpy.abs",
"numpy.linalg.eig",
"qiskit.quantum_info.Statevector",
"functools.reduce",
"numpy.delete",
"math.sqrt",
"numpy.sum",
"numpy.array",
"numpy.linalg.eigvals",
"numpy.split",
"numpy.concatenate",
"time.time"
] | [((1171, 1186), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (1177, 1186), True, 'import numpy as np\n'), ((1202, 1248), 'numpy.array', 'np.array', (['[(h / sum_weights) for h in weights]'], {}), '([(h / sum_weights) for h in weights])\n', (1210, 1248), True, 'import numpy as np\n'), ((2751, 2768), 'numpy.split', 'np.split', (['prob', '(2)'], {}), '(prob, 2)\n', (2759, 2768), True, 'import numpy as np\n'), ((3437, 3481), 'functools.reduce', 'reduce', (['(lambda e, i: e + prob[i])', 'flags', '(0.0)'], {}), '(lambda e, i: e + prob[i], flags, 0.0)\n', (3443, 3481), False, 'from functools import reduce\n'), ((5020, 5050), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['transitions'], {}), '(transitions)\n', (5037, 5050), True, 'import numpy as np\n'), ((5263, 5289), 'numpy.linalg.eig', 'np.linalg.eig', (['transitions'], {}), '(transitions)\n', (5276, 5289), True, 'import numpy as np\n'), ((5520, 5570), 'functools.reduce', 'reduce', (['(lambda e, i: e + stat_distr[i])', 'flags', '(0.0)'], {}), '(lambda e, i: e + stat_distr[i], flags, 0.0)\n', (5526, 5570), False, 'from functools import reduce\n'), ((7197, 7208), 'time.time', 'time.time', ([], {}), '()\n', (7206, 7208), False, 'import time, math, cmath\n'), ((7324, 7335), 'time.time', 'time.time', ([], {}), '()\n', (7333, 7335), False, 'import time, math, cmath\n'), ((2201, 2231), 'numpy.delete', 'np.delete', (['flags', 'action_index'], {}), '(flags, action_index)\n', (2210, 2231), True, 'import numpy as np\n'), ((5404, 5422), 'numpy.sum', 'np.sum', (['stat_distr'], {}), '(stat_distr)\n', (5410, 5422), True, 'import numpy as np\n'), ((5648, 5668), 'numpy.array', 'np.array', (['stat_distr'], {}), '(stat_distr)\n', (5656, 5668), True, 'import numpy as np\n'), ((5725, 5745), 'numpy.array', 'np.array', (['stat_distr'], {}), '(stat_distr)\n', (5733, 5745), True, 'import numpy as np\n'), ((6379, 6395), 'circuit_builder.CircuitBuilder', 'CircuitBuilder', ([], {}), '()\n', (6393, 6395), False, 'from circuit_builder import CircuitBuilder\n'), ((6488, 6504), 'circuit_builder.CircuitBuilder', 'CircuitBuilder', ([], {}), '()\n', (6502, 6504), False, 'from circuit_builder import CircuitBuilder\n'), ((6609, 6625), 'circuit_builder.CircuitBuilder', 'CircuitBuilder', ([], {}), '()\n', (6623, 6625), False, 'from circuit_builder import CircuitBuilder\n'), ((7060, 7076), 'circuit_builder.CircuitBuilder', 'CircuitBuilder', ([], {}), '()\n', (7074, 7076), False, 'from circuit_builder import CircuitBuilder\n'), ((7141, 7177), 'numpy.concatenate', 'np.concatenate', (['[angles, rev_angles]'], {}), '([angles, rev_angles])\n', (7155, 7177), True, 'import numpy as np\n'), ((2603, 2615), 'math.sqrt', 'math.sqrt', (['x'], {}), '(x)\n', (2612, 2615), False, 'import time, math, cmath\n'), ((2902, 2913), 'numpy.sum', 'np.sum', (['lhs'], {}), '(lhs)\n', (2908, 2913), True, 'import numpy as np\n'), ((2985, 2996), 'numpy.sum', 'np.sum', (['rhs'], {}), '(rhs)\n', (2991, 2996), True, 'import numpy as np\n'), ((3828, 3846), 'math.sqrt', 'math.sqrt', (['epsilon'], {}), '(epsilon)\n', (3837, 3846), False, 'import time, math, cmath\n'), ((6698, 6716), 'math.sqrt', 'math.sqrt', (['epsilon'], {}), '(epsilon)\n', (6707, 6716), False, 'import time, math, cmath\n'), ((2807, 2818), 'numpy.sum', 'np.sum', (['lhs'], {}), '(lhs)\n', (2813, 2818), True, 'import numpy as np\n'), ((3523, 3539), 'circuit_builder.CircuitBuilder', 'CircuitBuilder', ([], {}), '()\n', (3537, 3539), False, 'from circuit_builder import CircuitBuilder\n'), ((4766, 4791), 'qiskit.quantum_info.Statevector', 'Statevector', (['resulting_sv'], {}), '(resulting_sv)\n', (4777, 4791), False, 'from qiskit.quantum_info import Statevector\n'), ((7451, 7476), 'qiskit.quantum_info.Statevector', 'Statevector', (['resulting_sv'], {}), '(resulting_sv)\n', (7462, 7476), False, 'from qiskit.quantum_info import Statevector\n'), ((5334, 5349), 'numpy.abs', 'np.abs', (['(S - 1.0)'], {}), '(S - 1.0)\n', (5340, 5349), True, 'import numpy as np\n')] |
import logging
import os
import numpy as np
import tensorflow as tf
import sys
def create_log(name):
"""Logging."""
if os.path.exists(name):
os.remove(name)
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# handler for logger file
handler1 = logging.FileHandler(name)
handler1.setFormatter(logging.Formatter("H1, %(asctime)s %(levelname)8s %(message)s"))
# handler for standard output
handler2 = logging.StreamHandler()
handler2.setFormatter(logging.Formatter("H1, %(asctime)s %(levelname)8s %(message)s"))
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
def mnist_loader():
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
mnist = read_data_sets('MNIST_data', one_hot=True)
n_sample = mnist.train.num_examples
return mnist, n_sample
def shape_2d(_x, batch_size):
_x = _x.reshape(batch_size, 28, 28)
return np.expand_dims(_x, 3)
def mnist_train(model, epoch, save_path="./", mode="supervised", input_image=True):
""" Train model based on mini-batch of input data.
:param model:
:param epoch:
:param save_path:
:param mode: conditional, supervised, unsupervised
:param input_image: True if use CNN for top of the model
:return:
"""
# load mnist
data, n = mnist_loader()
n_iter = int(n / model.batch_size)
# logger
if not os.path.exists(save_path):
os.mkdir(save_path)
logger = create_log(save_path+"log")
logger.info("train: data size(%i), batch num(%i), batch size(%i)" % (n, n_iter, model.batch_size))
result = []
# Initializing the tensor flow variables
model.sess.run(tf.global_variables_initializer())
for _e in range(epoch):
_result = []
for _b in range(n_iter):
# train
_x, _y = data.train.next_batch(model.batch_size)
_x = shape_2d(_x, model.batch_size) if input_image else _x
if mode in ["conditional", "unsupervised"]: # conditional unsupervised model
feed_val = [model.summary, model.loss, model.re_loss, model.latent_loss, model.train]
feed_dict = {model.x: _x, model.y: _y} if mode == "conditional" else {model.x: _x}
summary, loss, re_loss, latent_loss, _ = model.sess.run(feed_val, feed_dict=feed_dict)
__result = [loss, re_loss, latent_loss]
elif mode == "supervised": # supervised model
feed_val = [model.summary, model.loss, model.accuracy, model.train]
feed_dict = {model.x: _x, model.y: _y, model.is_training: True}
summary, loss, acc, _ = model.sess.run(feed_val, feed_dict=feed_dict)
__result = [loss, acc]
else:
sys.exit("unknown mode !")
_result.append(__result)
model.writer.add_summary(summary, int(_b + _e * model.batch_size))
# validation
if mode == "supervised": # supervised model
_x = shape_2d(data.test.images, data.test.num_examples) if input_image else data.test.image
_y = data.test.labels
feed_dict = {model.x: _x, model.y: _y, model.is_training: False}
loss, acc = model.sess.run([model.loss, model.accuracy], feed_dict=feed_dict)
_result = np.append(np.mean(_result, 0), [loss, acc])
logger.info("epoch %i: acc %0.3f, loss %0.3f, train acc %0.3f, train loss %0.3f"
% (_e, acc, loss, _result[1], _result[0]))
else:
_result = np.mean(_result, 0)
logger.info("epoch %i: loss %0.3f, re loss %0.3f, latent loss %0.3f"
% (_e, _result[0], _result[1], _result[2]))
result.append(_result)
if _e % 50 == 0:
model.saver.save(model.sess, "%s/progress-%i-model.ckpt" % (save_path, _e))
np.savez("%s/progress-%i-acc.npz" % (save_path, _e), loss=np.array(result),
learning_rate=model.learning_rate, epoch=epoch, batch_size=model.batch_size,
clip=model.max_grad_norm)
model.saver.save(model.sess, "%s/model.ckpt" % save_path)
np.savez("%s/acc.npz" % save_path, loss=np.array(result), learning_rate=model.learning_rate, epoch=epoch,
batch_size=model.batch_size, clip=model.max_grad_norm)
| [
"logging.getLogger",
"os.path.exists",
"tensorflow.contrib.learn.python.learn.datasets.mnist.read_data_sets",
"logging.StreamHandler",
"numpy.mean",
"logging.Formatter",
"tensorflow.global_variables_initializer",
"numpy.array",
"logging.FileHandler",
"os.mkdir",
"numpy.expand_dims",
"sys.exit"... | [((129, 149), 'os.path.exists', 'os.path.exists', (['name'], {}), '(name)\n', (143, 149), False, 'import os\n'), ((188, 211), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (205, 211), False, 'import logging\n'), ((292, 317), 'logging.FileHandler', 'logging.FileHandler', (['name'], {}), '(name)\n', (311, 317), False, 'import logging\n'), ((458, 481), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (479, 481), False, 'import logging\n'), ((773, 815), 'tensorflow.contrib.learn.python.learn.datasets.mnist.read_data_sets', 'read_data_sets', (['"""MNIST_data"""'], {'one_hot': '(True)'}), "('MNIST_data', one_hot=True)\n", (787, 815), False, 'from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets\n'), ((966, 987), 'numpy.expand_dims', 'np.expand_dims', (['_x', '(3)'], {}), '(_x, 3)\n', (980, 987), True, 'import numpy as np\n'), ((159, 174), 'os.remove', 'os.remove', (['name'], {}), '(name)\n', (168, 174), False, 'import os\n'), ((344, 407), 'logging.Formatter', 'logging.Formatter', (['"""H1, %(asctime)s %(levelname)8s %(message)s"""'], {}), "('H1, %(asctime)s %(levelname)8s %(message)s')\n", (361, 407), False, 'import logging\n'), ((508, 571), 'logging.Formatter', 'logging.Formatter', (['"""H1, %(asctime)s %(levelname)8s %(message)s"""'], {}), "('H1, %(asctime)s %(levelname)8s %(message)s')\n", (525, 571), False, 'import logging\n'), ((1434, 1459), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (1448, 1459), False, 'import os\n'), ((1469, 1488), 'os.mkdir', 'os.mkdir', (['save_path'], {}), '(save_path)\n', (1477, 1488), False, 'import os\n'), ((1713, 1746), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1744, 1746), True, 'import tensorflow as tf\n'), ((3600, 3619), 'numpy.mean', 'np.mean', (['_result', '(0)'], {}), '(_result, 0)\n', (3607, 3619), True, 'import numpy as np\n'), ((4253, 4269), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (4261, 4269), True, 'import numpy as np\n'), ((3370, 3389), 'numpy.mean', 'np.mean', (['_result', '(0)'], {}), '(_result, 0)\n', (3377, 3389), True, 'import numpy as np\n'), ((2815, 2841), 'sys.exit', 'sys.exit', (['"""unknown mode !"""'], {}), "('unknown mode !')\n", (2823, 2841), False, 'import sys\n'), ((3984, 4000), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (3992, 4000), True, 'import numpy as np\n')] |
import os
import random
import time
import torch
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
import scipy.io.wavfile as wavfile
import matplotlib
from mir_eval.separation import bss_eval_sources
from arguments import ArgParser
from dataset import MUSICMixDataset
from models import ModelBuilder, activate
from utils import AverageMeter, \
recover_rgb, magnitude2heatmap,\
istft_reconstruction, warpgrid, \
combine_video_audio, save_video, makedirs
from viz import plot_loss_loc_sep_acc_metrics
import matplotlib.pyplot as plt
import soundfile
import cv2
# Network wrapper, defines forward pass
class NetWrapper1(torch.nn.Module):
def __init__(self, nets):
super(NetWrapper1, self).__init__()
self.net_sound = nets
def forward(self, mags, mag_mix, args):
mag_mix = mag_mix + 1e-10
N = args.num_mix
B = mag_mix.size(0)
T = mag_mix.size(3)
# warp the spectrogram
if args.log_freq:
grid_warp = torch.from_numpy(
warpgrid(B, 256, T, warp=True)).to(args.device)
mag_mix = F.grid_sample(mag_mix, grid_warp)
for n in range(N):
mags[n] = F.grid_sample(mags[n], grid_warp)
# calculate loss weighting coefficient: magnitude of input mixture
if args.weighted_loss:
weight = torch.log1p(mag_mix)
weight = torch.clamp(weight, 1e-3, 10)
else:
weight = torch.ones_like(mag_mix)
# ground truth masks are computed after warpping!
gt_masks = [None for n in range(N)]
for n in range(N):
if args.binary_mask:
# for simplicity, mag_N > 0.5 * mag_mix
gt_masks[n] = (mags[n] > 0.5 * mag_mix).float()
else:
gt_masks[n] = mags[n] / mag_mix
# clamp to avoid large numbers in ratio masks
gt_masks[n].clamp_(0., 5.)
# LOG magnitude
log_mag_mix = torch.log(mag_mix).detach()
# forward net_sound
feat_sound = self.net_sound(log_mag_mix)
feat_sound = activate(feat_sound, args.sound_activation)
return feat_sound, \
{'gt_masks': gt_masks, 'mag_mix': mag_mix, 'mags': mags, 'weight': weight}
class NetWrapper2(torch.nn.Module):
def __init__(self, nets):
super(NetWrapper2, self).__init__()
self.net_frame = nets
def forward(self, frame, args):
N = args.num_mix
# return appearance features and appearance embedding
feat_frames = [None for n in range(N)]
emb_frames = [None for n in range(N)]
for n in range(N):
feat_frames[n], emb_frames[n] = self.net_frame.forward_multiframe_feat_emb(frame[n], pool=True)
emb_frames[n] = activate(emb_frames[n], args.img_activation)
return feat_frames, emb_frames
class NetWrapper3(torch.nn.Module):
def __init__(self, nets):
super(NetWrapper3, self).__init__()
self.net_avol = nets
def forward(self, feat_frame, feat_sound, args):
N = args.num_mix
pred_mask = [None for n in range(N)]
# appearance attention
for n in range(N):
pred_mask[n] = self.net_avol(feat_frame[n], feat_sound)
pred_mask[n] = activate(pred_mask[n], args.output_activation)
return pred_mask
# Calculate metrics
def calc_metrics(batch_data, pred_masks_, args):
# meters
sdr_mix_meter = AverageMeter()
sdr_meter = AverageMeter()
sir_meter = AverageMeter()
sar_meter = AverageMeter()
# fetch data and predictions
mag_mix = batch_data['mag_mix']
phase_mix = batch_data['phase_mix']
audios = batch_data['audios']
# unwarp log scale
N = args.num_mix
B = mag_mix.size(0)
pred_masks_linear = [None for n in range(N)]
for n in range(N):
if args.log_freq:
grid_unwarp = torch.from_numpy(
warpgrid(B, args.stft_frame//2+1, pred_masks_[0].size(3), warp=False)).to(args.device)
pred_masks_linear[n] = F.grid_sample(pred_masks_[n], grid_unwarp)
else:
pred_masks_linear[n] = pred_masks_[n]
# convert into numpy
mag_mix = mag_mix.numpy()
phase_mix = phase_mix.numpy()
for n in range(N):
pred_masks_linear[n] = pred_masks_linear[n].detach().cpu().numpy()
# threshold if binary mask
if args.binary_mask:
pred_masks_linear[n] = (pred_masks_linear[n] > args.mask_thres).astype(np.float32)
# loop over each sample
for j in range(B):
# save mixture
mix_wav = istft_reconstruction(mag_mix[j, 0], phase_mix[j, 0], hop_length=args.stft_hop)
# save each component
preds_wav = [None for n in range(N)]
for n in range(N):
# Predicted audio recovery
pred_mag = mag_mix[j, 0] * pred_masks_linear[n][j, 0]
preds_wav[n] = istft_reconstruction(pred_mag, phase_mix[j, 0], hop_length=args.stft_hop)
# separation performance computes
L = preds_wav[0].shape[0]
gts_wav = [None for n in range(N)]
valid = True
for n in range(N):
gts_wav[n] = audios[n][j, 0:L].numpy()
valid *= np.sum(np.abs(gts_wav[n])) > 1e-5
valid *= np.sum(np.abs(preds_wav[n])) > 1e-5
if valid:
sdr, sir, sar, _ = bss_eval_sources(
np.asarray(gts_wav),
np.asarray(preds_wav),
False)
sdr_mix, _, _, _ = bss_eval_sources(
np.asarray(gts_wav),
np.asarray([mix_wav[0:L] for n in range(N)]),
False)
sdr_mix_meter.update(sdr_mix.mean())
sdr_meter.update(sdr.mean())
sir_meter.update(sir.mean())
sar_meter.update(sar.mean())
return [sdr_mix_meter.average(),
sdr_meter.average(),
sir_meter.average(),
sar_meter.average()]
# Visualize predictions
def output_visuals_PosNeg(vis_rows, batch_data, masks_pos, masks_neg, idx_pos, idx_neg, pred_masks_, gt_masks_, mag_mix_, weight_, args):
mag_mix = batch_data['mag_mix']
phase_mix = batch_data['phase_mix']
frames = batch_data['frames']
infos = batch_data['infos']
# masks to cpu, numpy
masks_pos = torch.squeeze(masks_pos, dim=1)
masks_pos = masks_pos.cpu().float().numpy()
masks_neg = torch.squeeze(masks_neg, dim=1)
masks_neg = masks_neg.cpu().float().numpy()
N = args.num_mix
B = mag_mix.size(0)
pred_masks_linear = [None for n in range(N)]
gt_masks_linear = [None for n in range(N)]
for n in range(N):
if args.log_freq:
grid_unwarp = torch.from_numpy(
warpgrid(B, args.stft_frame//2+1, gt_masks_[0].size(3), warp=False)).to(args.device)
pred_masks_linear[n] = F.grid_sample(pred_masks_[n], grid_unwarp)
gt_masks_linear[n] = F.grid_sample(gt_masks_[n], grid_unwarp)
else:
pred_masks_linear[n] = pred_masks_[n]
gt_masks_linear[n] = gt_masks_[n]
# convert into numpy
mag_mix = mag_mix.numpy()
mag_mix_ = mag_mix_.detach().cpu().numpy()
phase_mix = phase_mix.numpy()
weight_ = weight_.detach().cpu().numpy()
idx_pos = int(idx_pos.detach().cpu().numpy())
idx_neg = int(idx_neg.detach().cpu().numpy())
for n in range(N):
pred_masks_[n] = pred_masks_[n].detach().cpu().numpy()
pred_masks_linear[n] = pred_masks_linear[n].detach().cpu().numpy()
gt_masks_[n] = gt_masks_[n].detach().cpu().numpy()
gt_masks_linear[n] = gt_masks_linear[n].detach().cpu().numpy()
# threshold if binary mask
if args.binary_mask:
pred_masks_[n] = (pred_masks_[n] > args.mask_thres).astype(np.float32)
pred_masks_linear[n] = (pred_masks_linear[n] > args.mask_thres).astype(np.float32)
threshold = 0.5
# loop over each sample
for j in range(B):
row_elements = []
# video names
prefix = []
for n in range(N):
prefix.append('-'.join(infos[n][0][j].split('/')[-2:]).split('.')[0])
prefix = '+'.join(prefix)
makedirs(os.path.join(args.vis, prefix))
# save mixture
mix_wav = istft_reconstruction(mag_mix[j, 0], phase_mix[j, 0], hop_length=args.stft_hop)
mix_amp = magnitude2heatmap(mag_mix_[j, 0])
weight = magnitude2heatmap(weight_[j, 0], log=False, scale=100.)
filename_mixwav = os.path.join(prefix, 'mix.wav')
filename_mixmag = os.path.join(prefix, 'mix.jpg')
filename_weight = os.path.join(prefix, 'weight.jpg')
matplotlib.image.imsave(os.path.join(args.vis, filename_mixmag), mix_amp[::-1, :, :])
matplotlib.image.imsave(os.path.join(args.vis, filename_weight), weight[::-1, :])
wavfile.write(os.path.join(args.vis, filename_mixwav), args.audRate, mix_wav)
row_elements += [{'text': prefix}, {'image': filename_mixmag, 'audio': filename_mixwav}]
# save each component
preds_wav = [None for n in range(N)]
for n in range(N):
# GT and predicted audio recovery
gt_mag = mag_mix[j, 0] * gt_masks_linear[n][j, 0]
gt_mag_ = mag_mix_[j, 0] * gt_masks_[n][j, 0]
gt_wav = istft_reconstruction(gt_mag, phase_mix[j, 0], hop_length=args.stft_hop)
pred_mag = mag_mix[j, 0] * pred_masks_linear[n][j, 0]
pred_mag_ = mag_mix_[j, 0] * pred_masks_[n][j, 0]
preds_wav[n] = istft_reconstruction(pred_mag, phase_mix[j, 0], hop_length=args.stft_hop)
# output masks
filename_gtmask = os.path.join(prefix, 'gtmask{}.jpg'.format(n+1))
filename_predmask = os.path.join(prefix, 'predmask{}.jpg'.format(n+1))
gt_mask = (np.clip(gt_masks_[n][j, 0], 0, 1) * 255).astype(np.uint8)
pred_mask = (np.clip(pred_masks_[n][j, 0], 0, 1) * 255).astype(np.uint8)
matplotlib.image.imsave(os.path.join(args.vis, filename_gtmask), gt_mask[::-1, :])
matplotlib.image.imsave(os.path.join(args.vis, filename_predmask), pred_mask[::-1, :])
# ouput spectrogram (log of magnitude, show colormap)
filename_gtmag = os.path.join(prefix, 'gtamp{}.jpg'.format(n+1))
filename_predmag = os.path.join(prefix, 'predamp{}.jpg'.format(n+1))
gt_mag = magnitude2heatmap(gt_mag_)
pred_mag = magnitude2heatmap(pred_mag_)
matplotlib.image.imsave(os.path.join(args.vis, filename_gtmag), gt_mag[::-1, :, :])
matplotlib.image.imsave(os.path.join(args.vis, filename_predmag), pred_mag[::-1, :, :])
# output audio
filename_gtwav = os.path.join(prefix, 'gt{}.wav'.format(n+1))
filename_predwav = os.path.join(prefix, 'pred{}.wav'.format(n+1))
wavfile.write(os.path.join(args.vis, filename_gtwav), args.audRate, gt_wav)
wavfile.write(os.path.join(args.vis, filename_predwav), args.audRate, preds_wav[n])
# save frame
frames_tensor = recover_rgb(frames[idx_pos][j,:,int(args.num_frames//2)])
frames_tensor = np.asarray(frames_tensor)
filename_frame = os.path.join(prefix, 'frame{}.png'.format(idx_pos+1))
matplotlib.image.imsave(os.path.join(args.vis, filename_frame), frames_tensor)
frame = frames_tensor.copy()
# get heatmap and overlay for postive pair
height, width = masks_pos.shape[-2:]
heatmap = np.zeros((height*16, width*16))
for i in range(height):
for k in range(width):
mask_pos = masks_pos[j]
value = mask_pos[i,k]
value = 0 if value < threshold else value
ii = i * 16
jj = k * 16
heatmap[ii:ii + 16, jj:jj + 16] = value
heatmap = (heatmap * 255).astype(np.uint8)
filename_heatmap = os.path.join(prefix, 'heatmap_{}_{}.jpg'.format(idx_pos+1, idx_pos+1))
plt.imsave(os.path.join(args.vis, filename_heatmap), heatmap, cmap='hot')
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
fin = cv2.addWeighted(heatmap, 0.5, frame, 0.5, 0, dtype = cv2.CV_32F)
path_overlay = os.path.join(args.vis, prefix, 'overlay_{}_{}.jpg'.format(idx_pos+1, idx_pos+1))
cv2.imwrite(path_overlay, fin)
# save frame
frames_tensor = recover_rgb(frames[idx_neg][j,:,int(args.num_frames//2)])
frames_tensor = np.asarray(frames_tensor)
filename_frame = os.path.join(prefix, 'frame{}.png'.format(idx_neg+1))
matplotlib.image.imsave(os.path.join(args.vis, filename_frame), frames_tensor)
frame = frames_tensor.copy()
# get heatmap and overlay for postive pair
height, width = masks_neg.shape[-2:]
heatmap = np.zeros((height*16, width*16))
for i in range(height):
for k in range(width):
mask_neg = masks_neg[j]
value = mask_neg[i,k]
value = 0 if value < threshold else value
ii = i * 16
jj = k * 16
heatmap[ii:ii + 16, jj:jj + 16] = value
heatmap = (heatmap * 255).astype(np.uint8)
filename_heatmap = os.path.join(prefix, 'heatmap_{}_{}.jpg'.format(idx_pos+1, idx_neg+1))
plt.imsave(os.path.join(args.vis, filename_heatmap), heatmap, cmap='hot')
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
fin = cv2.addWeighted(heatmap, 0.5, frame, 0.5, 0, dtype = cv2.CV_32F)
path_overlay = os.path.join(args.vis, prefix, 'overlay_{}_{}.jpg'.format(idx_pos+1, idx_neg+1))
cv2.imwrite(path_overlay, fin)
vis_rows.append(row_elements)
def evaluate(crit_loc, crit_sep, netWrapper1, netWrapper2, netWrapper3, loader, history, epoch, args):
print('Evaluating at {} epochs...'.format(epoch))
torch.set_grad_enabled(False)
# remove previous viz results
makedirs(args.vis, remove=False)
# switch to eval mode
netWrapper1.eval()
netWrapper2.eval()
netWrapper3.eval()
# initialize meters
loss_meter = AverageMeter()
loss_acc_meter = AverageMeter()
loss_sep_meter = AverageMeter()
loss_loc_meter = AverageMeter()
sdr_mix_meter = AverageMeter()
sdr_meter = AverageMeter()
sir_meter = AverageMeter()
sar_meter = AverageMeter()
vis_rows = []
for i, batch_data in enumerate(loader):
mag_mix = batch_data['mag_mix']
mags = batch_data['mags']
frames = batch_data['frames']
N = args.num_mix
B = mag_mix.shape[0]
for n in range(N):
frames[n] = torch.autograd.Variable(frames[n]).to(args.device)
mags[n] = torch.autograd.Variable(mags[n]).to(args.device)
mag_mix = torch.autograd.Variable(mag_mix).to(args.device)
# forward pass
# return feat_sound
feat_sound, outputs = netWrapper1.forward(mags, mag_mix, args)
gt_masks = outputs['gt_masks']
mag_mix_ = outputs['mag_mix']
weight_ = outputs['weight']
# return feat_frame, and emb_frame
feat_frame, emb_frame = netWrapper2.forward(frames, args)
# random select positive/negative pairs
idx_pos = torch.randint(0,N, (1,))
idx_neg = N -1 -idx_pos
# appearance attention
masks = netWrapper3.forward(feat_frame, emb_frame[idx_pos], args)
mask_pos = masks[idx_pos]
mask_neg = masks[idx_neg]
# max pooling
pred_pos = F.adaptive_max_pool2d(mask_pos, 1)
pred_pos = pred_pos.view(mask_pos.shape[0])
pred_neg = F.adaptive_max_pool2d(mask_neg, 1)
pred_neg = pred_neg.view(mask_neg.shape[0])
# ground truth for the positive/negative pairs
y1 = torch.ones(B,device=args.device).detach()
y0 = torch.zeros(B, device=args.device).detach()
# localization loss
loss_loc_pos = crit_loc(pred_pos, y1).reshape(1)
loss_loc_neg = crit_loc(pred_neg, y0).reshape(1)
loss_loc = args.lamda * (loss_loc_pos + loss_loc_neg)/N
# Calculate val accuracy
pred_pos = (pred_pos > args.mask_thres)
pred_neg = (pred_neg > args.mask_thres)
valacc = 0
for j in range(B):
if pred_pos[j].item() == y1[j].item():
valacc += 1.0
if pred_neg[j].item() == y0[j].item():
valacc += 1.0
valacc = valacc/N/B
# sepatate sounds
sound_size = feat_sound.size()
B, C = sound_size[0], sound_size[1]
pred_masks = [None for n in range(N)]
for n in range(N):
feat_img = emb_frame[n]
feat_img = feat_img.view(B, 1, C)
pred_masks[n] = torch.bmm(feat_img, feat_sound.view(B, C, -1)) \
.view(B, 1, *sound_size[2:])
pred_masks[n] = activate(pred_masks[n], args.output_activation)
# separatioon loss
loss_sep = crit_sep(pred_masks, gt_masks, weight_).reshape(1)
# total loss
loss = loss_loc + loss_sep
loss_meter.update(loss.item())
loss_acc_meter.update(valacc)
loss_sep_meter.update(loss_sep.item())
loss_loc_meter.update(loss_loc.item())
print('[Eval] iter {}, loss: {:.4f}, loss_loc: {:.4f}, loss_sep: {:.4f}, acc: {:.4f} '.format(i, loss.item(), loss_loc.item(), loss_sep.item(), valacc))
# calculate metrics
sdr_mix, sdr, sir, sar = calc_metrics(batch_data, pred_masks, args)
sdr_mix_meter.update(sdr_mix)
sdr_meter.update(sdr)
sir_meter.update(sir)
sar_meter.update(sar)
# output visualization
if len(vis_rows) < args.num_vis:
output_visuals_PosNeg(vis_rows, batch_data, mask_pos, mask_neg, idx_pos, idx_neg, pred_masks, gt_masks, mag_mix_, weight_, args)
print('[Eval Summary] Epoch: {}, Loss: {:.4f}, Loss_loc: {:.4f}, Loss_sep: {:.4f}, acc: {:.4f}, sdr_mix: {:.4f}, sdr: {:.4f}, sir: {:.4f}, sar: {:.4f}, '
.format(epoch, loss_meter.average(), loss_loc_meter.average(), loss_sep_meter.average(), loss_acc_meter.average(), sdr_mix_meter.average(), sdr_meter.average(), sir_meter.average(), sar_meter.average()))
history['val']['epoch'].append(epoch)
history['val']['err'].append(loss_meter.average())
history['val']['err_loc'].append(loss_loc_meter.average())
history['val']['err_sep'].append(loss_sep_meter.average())
history['val']['acc'].append(loss_acc_meter.average())
history['val']['sdr'].append(sdr_meter.average())
history['val']['sir'].append(sir_meter.average())
history['val']['sar'].append(sar_meter.average())
# Plot figure
if epoch > 0:
print('Plotting figures...')
plot_loss_loc_sep_acc_metrics(args.ckpt, history)
print('this evaluation round is done!')
# train one epoch
def train(crit_loc, crit_sep, netWrapper1, netWrapper2, netWrapper3, loader, optimizer, history, epoch, args):
print('Training at {} epochs...'.format(epoch))
torch.set_grad_enabled(True)
batch_time = AverageMeter()
data_time = AverageMeter()
# switch to train mode
netWrapper1.train()
netWrapper2.train()
netWrapper3.train()
# main loop
torch.cuda.synchronize()
tic = time.perf_counter()
for i, batch_data in enumerate(loader):
mag_mix = batch_data['mag_mix']
mags = batch_data['mags']
frames = batch_data['frames']
N = args.num_mix
B = mag_mix.shape[0]
for n in range(N):
frames[n] = torch.autograd.Variable(frames[n]).to(args.device)
mags[n] = torch.autograd.Variable(mags[n]).to(args.device)
mag_mix = torch.autograd.Variable(mag_mix).to(args.device)
# forward pass
optimizer.zero_grad()
# return feat_sound
feat_sound, outputs = netWrapper1.forward(mags, mag_mix, args)
gt_masks = outputs['gt_masks']
mag_mix_ = outputs['mag_mix']
weight_ = outputs['weight']
# return feat_frame, and emb_frame
feat_frame, emb_frame = netWrapper2.forward(frames, args)
# random select positive/negative pairs
idx_pos = torch.randint(0,N, (1,))
idx_neg = N -1 -idx_pos
# appearance attention
masks = netWrapper3.forward(feat_frame, emb_frame[idx_pos], args)
mask_pos = masks[idx_pos]
mask_neg = masks[idx_neg]
# max pooling
pred_pos = F.adaptive_max_pool2d(mask_pos, 1)
pred_pos = pred_pos.view(mask_pos.shape[0])
pred_neg = F.adaptive_max_pool2d(mask_neg, 1)
pred_neg = pred_neg.view(mask_neg.shape[0])
# ground truth for the positive/negative pairs
y1 = torch.ones(B,device=args.device).detach()
y0 = torch.zeros(B, device=args.device).detach()
# localization loss and acc
loss_loc_pos = crit_loc(pred_pos, y1).reshape(1)
loss_loc_neg = crit_loc(pred_neg, y0).reshape(1)
loss_loc = args.lamda * (loss_loc_pos + loss_loc_neg)/N
pred_pos = (pred_pos > args.mask_thres)
pred_neg = (pred_neg > args.mask_thres)
valacc = 0
for j in range(B):
if pred_pos[j].item() == y1[j].item():
valacc += 1.0
if pred_neg[j].item() == y0[j].item():
valacc += 1.0
valacc = valacc/N/B
# sepatate sounds (for simplicity, we don't use the alpha and beta)
sound_size = feat_sound.size()
B, C = sound_size[0], sound_size[1]
pred_masks = [None for n in range(N)]
for n in range(N):
feat_img = emb_frame[n]
feat_img = feat_img.view(B, 1, C)
pred_masks[n] = torch.bmm(feat_img, feat_sound.view(B, C, -1)) \
.view(B, 1, *sound_size[2:])
pred_masks[n] = activate(pred_masks[n], args.output_activation)
# separation loss
loss_sep = crit_sep(pred_masks, gt_masks, weight_).reshape(1)
# total loss
loss = loss_loc + loss_sep
loss.backward()
optimizer.step()
# measure total time
torch.cuda.synchronize()
batch_time.update(time.perf_counter() - tic)
tic = time.perf_counter()
# display
if i % args.disp_iter == 0:
print('Epoch: [{}][{}/{}], Time: {:.2f}, Data: {:.2f}, '
'lr_sound: {}, lr_frame: {}, lr_avol: {}, '
'loss: {:.5f}, loss_loc: {:.5f}, loss_sep: {:.5f}, acc: {:.5f} '
.format(epoch, i, args.epoch_iters,
batch_time.average(), data_time.average(),
args.lr_sound, args.lr_frame, args.lr_avol,
loss.item(), loss_loc.item(), loss_sep.item(),
valacc))
fractional_epoch = epoch - 1 + 1. * i / args.epoch_iters
history['train']['epoch'].append(fractional_epoch)
history['train']['err'].append(loss.item())
history['train']['err_loc'].append(loss_loc.item())
history['train']['err_sep'].append(loss_sep.item())
history['train']['acc'].append(valacc)
def checkpoint(net_sound, net_frame, net_avol, optimizer, history, epoch, args):
print('Saving checkpoints at {} epochs.'.format(epoch))
suffix_latest = 'latest.pth'
suffix_best = 'best.pth'
state = {'epoch': epoch, \
'state_dict_net_sound': net_sound.state_dict(), \
'state_dict_net_frame': net_frame.state_dict(),\
'state_dict_net_avol': net_avol.state_dict(),\
'optimizer': optimizer.state_dict(), \
'history': history, }
torch.save(state, '{}/checkpoint_{}'.format(args.ckpt, suffix_latest))
cur_err = history['val']['err'][-1]
if cur_err <= args.best_err:
args.best_err = cur_err
torch.save(state, '{}/checkpoint_{}'.format(args.ckpt, suffix_best))
def load_checkpoint(net_sound, net_frame, net_avol, optimizer, history, filename):
start_epoch = 0
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
start_epoch = checkpoint['epoch'] + 1
net_sound.load_state_dict(checkpoint['state_dict_net_sound'])
net_frame.load_state_dict(checkpoint['state_dict_net_frame'])
net_avol.load_state_dict(checkpoint['state_dict_net_avol'])
optimizer.load_state_dict(checkpoint['optimizer'])
for state in optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
history = checkpoint['history']
print("=> loaded checkpoint '{}' (epoch {})"
.format(filename, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(filename))
return net_sound, net_frame, net_avol, optimizer, start_epoch, history
def load_checkpoint_from_train(net_sound, net_frame, net_avol, filename):
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
print('epoch: ', checkpoint['epoch'])
net_sound.load_state_dict(checkpoint['state_dict_net_sound'])
net_frame.load_state_dict(checkpoint['state_dict_net_frame'])
net_avol.load_state_dict(checkpoint['state_dict_net_avol'])
else:
print("=> no checkpoint found at '{}'".format(filename))
return net_sound, net_frame, net_avol
def load_sep(net_sound, net_frame, filename):
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
print('epoch: ', checkpoint['epoch'])
net_sound.load_state_dict(checkpoint['state_dict_net_sound'])
net_frame.load_state_dict(checkpoint['state_dict_net_frame'])
else:
print("=> no checkpoint found at '{}'".format(filename))
return net_sound, net_frame
def create_optimizer(net_sound, net_frame, net_avol, args):
param_groups = [{'params': net_sound.parameters(), 'lr': args.lr_frame},
{'params': net_frame.parameters(), 'lr': args.lr_sound},
{'params': net_avol.parameters(), 'lr': args.lr_avol}]
return torch.optim.SGD(param_groups, momentum=args.beta1, weight_decay=args.weight_decay)
def adjust_learning_rate(optimizer, args):
args.lr_sound *= 0.1
args.lr_frame *= 0.1
args.lr_avol *= 0.1
for param_group in optimizer.param_groups:
param_group['lr'] *= 0.1
def main(args):
# Network Builders
torch.manual_seed(0)
torch.cuda.manual_seed(0)
np.random.seed(0)
random.seed(0)
builder = ModelBuilder()
net_sound = builder.build_sound(
arch=args.arch_sound,
input_channel=1,
output_channel=args.num_channels,
fc_dim=args.num_channels,
weights=args.weights_sound)
net_frame = builder.build_frame(
arch=args.arch_frame,
fc_dim=args.num_channels,
pool_type=args.img_pool,
weights=args.weights_frame)
net_avol = builder.build_avol(
arch=args.arch_avol,
fc_dim=args.num_channels,
weights=args.weights_frame)
crit_loc = nn.BCELoss()
crit_sep = builder.build_criterion(arch=args.loss)
# Dataset and Loader
dataset_train = MUSICMixDataset(
args.list_train, args, split='train')
dataset_val = MUSICMixDataset(
args.list_val, args, max_sample=args.num_val, split='val')
loader_train = torch.utils.data.DataLoader(
dataset_train,
batch_size=args.batch_size,
shuffle=True,
num_workers=int(args.workers),
drop_last=True)
loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size,
shuffle=False,
num_workers=int(args.workers),
drop_last=False)
args.epoch_iters = len(dataset_train) // args.batch_size
print('1 Epoch = {} iters'.format(args.epoch_iters))
# Set up optimizer
optimizer = create_optimizer(net_sound, net_frame, net_avol, args)
# History of peroformance
history = {
'train': {'epoch': [], 'err': [], 'err_loc': [], 'err_sep': [], 'acc': []},
'val': {'epoch': [], 'err': [], 'err_loc': [], 'err_sep': [], 'acc': [], 'sdr': [], 'sir': [], 'sar': []}}
# Training loop
# Load from pretrained models
start_epoch = 1
model_name = args.ckpt + '/checkpoint.pth'
if os.path.exists(model_name):
if args.mode == 'eval':
net_sound, net_frame, net_avol = load_checkpoint_from_train(net_sound, net_frame, net_avol, model_name)
elif args.mode == 'train':
model_name = args.ckpt + '/checkpoint_latest.pth'
net_sound, net_frame, net_avol, optimizer, start_epoch, history = load_checkpoint(net_sound, net_frame, net_avol, optimizer, history, model_name)
print("Loading from previous checkpoint.")
else:
if args.mode == 'train' and start_epoch==1 and os.path.exists(args.weights_model):
net_sound, net_frame = load_sep(net_sound, net_frame, args.weights_model)
print("Loading from appearance + sound checkpoint.")
# Wrap networks
netWrapper1 = NetWrapper1(net_sound)
netWrapper1 = torch.nn.DataParallel(netWrapper1, device_ids=range(args.num_gpus)).cuda()
netWrapper1.to(args.device)
netWrapper2 = NetWrapper2(net_frame)
netWrapper2 = torch.nn.DataParallel(netWrapper2, device_ids=range(args.num_gpus)).cuda()
netWrapper2.to(args.device)
netWrapper3 = NetWrapper3(net_avol)
netWrapper3 = torch.nn.DataParallel(netWrapper3, device_ids=range(args.num_gpus)).cuda()
netWrapper3.to(args.device)
# Eval mode
#evaluate(crit_loc, crit_sep, netWrapper1, netWrapper2, netWrapper3, loader_val, history, 0, args)
if args.mode == 'eval':
evaluate(crit_loc, crit_sep, netWrapper1, netWrapper2, netWrapper3, loader_val, history, 0, args)
print('Evaluation Done!')
return
for epoch in range(start_epoch, args.num_epoch + 1):
train(crit_loc, crit_sep, netWrapper1, netWrapper2, netWrapper3, loader_train, optimizer, history, epoch, args)
# drop learning rate
if epoch in args.lr_steps:
adjust_learning_rate(optimizer, args)
## Evaluation and visualization
if epoch % args.eval_epoch == 0:
evaluate(crit_loc, crit_sep, netWrapper1, netWrapper2, netWrapper3, loader_val, history, epoch, args)
# checkpointing
checkpoint(net_sound, net_frame, net_avol, optimizer, history, epoch, args)
print('Training Done!')
if __name__ == '__main__':
# arguments
parser = ArgParser()
args = parser.parse_train_arguments()
args.batch_size = args.num_gpus * args.batch_size_per_gpu
args.device = torch.device("cuda")
# experiment name
if args.mode == 'train':
args.id += '-{}mix'.format(args.num_mix)
if args.log_freq:
args.id += '-LogFreq'
args.id += '-{}-{}-{}'.format(
args.arch_frame, args.arch_sound, args.arch_avol)
args.id += '-frames{}stride{}'.format(args.num_frames, args.stride_frames)
args.id += '-{}'.format(args.img_pool)
if args.binary_mask:
assert args.loss == 'bce', 'Binary Mask should go with BCE loss'
args.id += '-binary'
else:
args.id += '-ratio'
if args.weighted_loss:
args.id += '-weightedLoss'
args.id += '-channels{}'.format(args.num_channels)
args.id += '-epoch{}'.format(args.num_epoch)
args.id += '-step' + '_'.join([str(x) for x in args.lr_steps])
print('Model ID: {}'.format(args.id))
# paths to save/load output
args.ckpt = os.path.join(args.ckpt, args.id)
if args.mode == 'train':
args.weights_model = 'ckpt_res50_DV3P_MUSIC_N2_f1_binary_bs10_TrainS335_D65_ValValS100_ValTestS130_dup100_f8fps_11k/MUSIC-2mix-LogFreq-resnet18dilated_50-deeplabV3Plus_mobilenetv2-frames1stride24-maxpool-binary-weightedLoss-channels11-epoch100-step40_80/checkpoint.pth'
args.vis = os.path.join(args.ckpt, 'visualization_train/')
makedirs(args.ckpt, remove=False)
elif args.mode == 'eval':
args.vis = os.path.join(args.ckpt, 'visualization_val/')
elif args.mode == 'test':
args.vis = os.path.join(args.ckpt, 'visualization_test/')
# initialize best error with a big number
args.best_err = float("inf")
random.seed(args.seed)
torch.manual_seed(args.seed)
main(args)
| [
"numpy.clip",
"dataset.MUSICMixDataset",
"viz.plot_loss_loc_sep_acc_metrics",
"torch.cuda.synchronize",
"models.activate",
"torch.squeeze",
"os.path.exists",
"torch.nn.functional.grid_sample",
"utils.istft_reconstruction",
"models.ModelBuilder",
"numpy.asarray",
"time.perf_counter",
"torch.n... | [((3521, 3535), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3533, 3535), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((3552, 3566), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3564, 3566), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((3583, 3597), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3595, 3597), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((3614, 3628), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3626, 3628), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((6378, 6409), 'torch.squeeze', 'torch.squeeze', (['masks_pos'], {'dim': '(1)'}), '(masks_pos, dim=1)\n', (6391, 6409), False, 'import torch\n'), ((6474, 6505), 'torch.squeeze', 'torch.squeeze', (['masks_neg'], {'dim': '(1)'}), '(masks_neg, dim=1)\n', (6487, 6505), False, 'import torch\n'), ((13988, 14017), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (14010, 14017), False, 'import torch\n'), ((14057, 14089), 'utils.makedirs', 'makedirs', (['args.vis'], {'remove': '(False)'}), '(args.vis, remove=False)\n', (14065, 14089), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((14228, 14242), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (14240, 14242), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((14264, 14278), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (14276, 14278), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((14300, 14314), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (14312, 14314), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((14336, 14350), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (14348, 14350), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((14371, 14385), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (14383, 14385), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((14402, 14416), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (14414, 14416), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((14433, 14447), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (14445, 14447), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((14464, 14478), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (14476, 14478), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((19186, 19214), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (19208, 19214), False, 'import torch\n'), ((19232, 19246), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (19244, 19246), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((19263, 19277), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (19275, 19277), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((19398, 19422), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (19420, 19422), False, 'import torch\n'), ((19433, 19452), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (19450, 19452), False, 'import time\n'), ((24227, 24251), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (24241, 24251), False, 'import os\n'), ((25250, 25274), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (25264, 25274), False, 'import os\n'), ((25806, 25830), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (25820, 25830), False, 'import os\n'), ((26531, 26618), 'torch.optim.SGD', 'torch.optim.SGD', (['param_groups'], {'momentum': 'args.beta1', 'weight_decay': 'args.weight_decay'}), '(param_groups, momentum=args.beta1, weight_decay=args.\n weight_decay)\n', (26546, 26618), False, 'import torch\n'), ((26858, 26878), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (26875, 26878), False, 'import torch\n'), ((26883, 26908), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(0)'], {}), '(0)\n', (26905, 26908), False, 'import torch\n'), ((26913, 26930), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (26927, 26930), True, 'import numpy as np\n'), ((26935, 26949), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (26946, 26949), False, 'import random\n'), ((26964, 26978), 'models.ModelBuilder', 'ModelBuilder', ([], {}), '()\n', (26976, 26978), False, 'from models import ModelBuilder, activate\n'), ((27503, 27515), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (27513, 27515), True, 'import torch.nn as nn\n'), ((27617, 27670), 'dataset.MUSICMixDataset', 'MUSICMixDataset', (['args.list_train', 'args'], {'split': '"""train"""'}), "(args.list_train, args, split='train')\n", (27632, 27670), False, 'from dataset import MUSICMixDataset\n'), ((27698, 27772), 'dataset.MUSICMixDataset', 'MUSICMixDataset', (['args.list_val', 'args'], {'max_sample': 'args.num_val', 'split': '"""val"""'}), "(args.list_val, args, max_sample=args.num_val, split='val')\n", (27713, 27772), False, 'from dataset import MUSICMixDataset\n'), ((28755, 28781), 'os.path.exists', 'os.path.exists', (['model_name'], {}), '(model_name)\n', (28769, 28781), False, 'import os\n'), ((31031, 31042), 'arguments.ArgParser', 'ArgParser', ([], {}), '()\n', (31040, 31042), False, 'from arguments import ArgParser\n'), ((31165, 31185), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (31177, 31185), False, 'import torch\n'), ((32108, 32140), 'os.path.join', 'os.path.join', (['args.ckpt', 'args.id'], {}), '(args.ckpt, args.id)\n', (32120, 32140), False, 'import os\n'), ((32833, 32855), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (32844, 32855), False, 'import random\n'), ((32860, 32888), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (32877, 32888), False, 'import torch\n'), ((2139, 2182), 'models.activate', 'activate', (['feat_sound', 'args.sound_activation'], {}), '(feat_sound, args.sound_activation)\n', (2147, 2182), False, 'from models import ModelBuilder, activate\n'), ((4665, 4743), 'utils.istft_reconstruction', 'istft_reconstruction', (['mag_mix[j, 0]', 'phase_mix[j, 0]'], {'hop_length': 'args.stft_hop'}), '(mag_mix[j, 0], phase_mix[j, 0], hop_length=args.stft_hop)\n', (4685, 4743), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((8342, 8420), 'utils.istft_reconstruction', 'istft_reconstruction', (['mag_mix[j, 0]', 'phase_mix[j, 0]'], {'hop_length': 'args.stft_hop'}), '(mag_mix[j, 0], phase_mix[j, 0], hop_length=args.stft_hop)\n', (8362, 8420), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((8439, 8472), 'utils.magnitude2heatmap', 'magnitude2heatmap', (['mag_mix_[j, 0]'], {}), '(mag_mix_[j, 0])\n', (8456, 8472), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((8490, 8546), 'utils.magnitude2heatmap', 'magnitude2heatmap', (['weight_[j, 0]'], {'log': '(False)', 'scale': '(100.0)'}), '(weight_[j, 0], log=False, scale=100.0)\n', (8507, 8546), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((8572, 8603), 'os.path.join', 'os.path.join', (['prefix', '"""mix.wav"""'], {}), "(prefix, 'mix.wav')\n", (8584, 8603), False, 'import os\n'), ((8630, 8661), 'os.path.join', 'os.path.join', (['prefix', '"""mix.jpg"""'], {}), "(prefix, 'mix.jpg')\n", (8642, 8661), False, 'import os\n'), ((8688, 8722), 'os.path.join', 'os.path.join', (['prefix', '"""weight.jpg"""'], {}), "(prefix, 'weight.jpg')\n", (8700, 8722), False, 'import os\n'), ((11246, 11271), 'numpy.asarray', 'np.asarray', (['frames_tensor'], {}), '(frames_tensor)\n', (11256, 11271), True, 'import numpy as np\n'), ((11589, 11624), 'numpy.zeros', 'np.zeros', (['(height * 16, width * 16)'], {}), '((height * 16, width * 16))\n', (11597, 11624), True, 'import numpy as np\n'), ((12185, 12229), 'cv2.applyColorMap', 'cv2.applyColorMap', (['heatmap', 'cv2.COLORMAP_JET'], {}), '(heatmap, cv2.COLORMAP_JET)\n', (12202, 12229), False, 'import cv2\n'), ((12244, 12306), 'cv2.addWeighted', 'cv2.addWeighted', (['heatmap', '(0.5)', 'frame', '(0.5)', '(0)'], {'dtype': 'cv2.CV_32F'}), '(heatmap, 0.5, frame, 0.5, 0, dtype=cv2.CV_32F)\n', (12259, 12306), False, 'import cv2\n'), ((12421, 12451), 'cv2.imwrite', 'cv2.imwrite', (['path_overlay', 'fin'], {}), '(path_overlay, fin)\n', (12432, 12451), False, 'import cv2\n'), ((12580, 12605), 'numpy.asarray', 'np.asarray', (['frames_tensor'], {}), '(frames_tensor)\n', (12590, 12605), True, 'import numpy as np\n'), ((12923, 12958), 'numpy.zeros', 'np.zeros', (['(height * 16, width * 16)'], {}), '((height * 16, width * 16))\n', (12931, 12958), True, 'import numpy as np\n'), ((13519, 13563), 'cv2.applyColorMap', 'cv2.applyColorMap', (['heatmap', 'cv2.COLORMAP_JET'], {}), '(heatmap, cv2.COLORMAP_JET)\n', (13536, 13563), False, 'import cv2\n'), ((13578, 13640), 'cv2.addWeighted', 'cv2.addWeighted', (['heatmap', '(0.5)', 'frame', '(0.5)', '(0)'], {'dtype': 'cv2.CV_32F'}), '(heatmap, 0.5, frame, 0.5, 0, dtype=cv2.CV_32F)\n', (13593, 13640), False, 'import cv2\n'), ((13755, 13785), 'cv2.imwrite', 'cv2.imwrite', (['path_overlay', 'fin'], {}), '(path_overlay, fin)\n', (13766, 13785), False, 'import cv2\n'), ((15395, 15420), 'torch.randint', 'torch.randint', (['(0)', 'N', '(1,)'], {}), '(0, N, (1,))\n', (15408, 15420), False, 'import torch\n'), ((15668, 15702), 'torch.nn.functional.adaptive_max_pool2d', 'F.adaptive_max_pool2d', (['mask_pos', '(1)'], {}), '(mask_pos, 1)\n', (15689, 15702), True, 'import torch.nn.functional as F\n'), ((15774, 15808), 'torch.nn.functional.adaptive_max_pool2d', 'F.adaptive_max_pool2d', (['mask_neg', '(1)'], {}), '(mask_neg, 1)\n', (15795, 15808), True, 'import torch.nn.functional as F\n'), ((18905, 18954), 'viz.plot_loss_loc_sep_acc_metrics', 'plot_loss_loc_sep_acc_metrics', (['args.ckpt', 'history'], {}), '(args.ckpt, history)\n', (18934, 18954), False, 'from viz import plot_loss_loc_sep_acc_metrics\n'), ((20355, 20380), 'torch.randint', 'torch.randint', (['(0)', 'N', '(1,)'], {}), '(0, N, (1,))\n', (20368, 20380), False, 'import torch\n'), ((20627, 20661), 'torch.nn.functional.adaptive_max_pool2d', 'F.adaptive_max_pool2d', (['mask_pos', '(1)'], {}), '(mask_pos, 1)\n', (20648, 20661), True, 'import torch.nn.functional as F\n'), ((20733, 20767), 'torch.nn.functional.adaptive_max_pool2d', 'F.adaptive_max_pool2d', (['mask_neg', '(1)'], {}), '(mask_neg, 1)\n', (20754, 20767), True, 'import torch.nn.functional as F\n'), ((22297, 22321), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (22319, 22321), False, 'import torch\n'), ((22389, 22408), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (22406, 22408), False, 'import time\n'), ((24335, 24355), 'torch.load', 'torch.load', (['filename'], {}), '(filename)\n', (24345, 24355), False, 'import torch\n'), ((25358, 25378), 'torch.load', 'torch.load', (['filename'], {}), '(filename)\n', (25368, 25378), False, 'import torch\n'), ((25914, 25934), 'torch.load', 'torch.load', (['filename'], {}), '(filename)\n', (25924, 25934), False, 'import torch\n'), ((32467, 32514), 'os.path.join', 'os.path.join', (['args.ckpt', '"""visualization_train/"""'], {}), "(args.ckpt, 'visualization_train/')\n", (32479, 32514), False, 'import os\n'), ((32523, 32556), 'utils.makedirs', 'makedirs', (['args.ckpt'], {'remove': '(False)'}), '(args.ckpt, remove=False)\n', (32531, 32556), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((1126, 1159), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['mag_mix', 'grid_warp'], {}), '(mag_mix, grid_warp)\n', (1139, 1159), True, 'import torch.nn.functional as F\n'), ((1379, 1399), 'torch.log1p', 'torch.log1p', (['mag_mix'], {}), '(mag_mix)\n', (1390, 1399), False, 'import torch\n'), ((1421, 1451), 'torch.clamp', 'torch.clamp', (['weight', '(0.001)', '(10)'], {}), '(weight, 0.001, 10)\n', (1432, 1451), False, 'import torch\n'), ((1486, 1510), 'torch.ones_like', 'torch.ones_like', (['mag_mix'], {}), '(mag_mix)\n', (1501, 1510), False, 'import torch\n'), ((2832, 2876), 'models.activate', 'activate', (['emb_frames[n]', 'args.img_activation'], {}), '(emb_frames[n], args.img_activation)\n', (2840, 2876), False, 'from models import ModelBuilder, activate\n'), ((3344, 3390), 'models.activate', 'activate', (['pred_mask[n]', 'args.output_activation'], {}), '(pred_mask[n], args.output_activation)\n', (3352, 3390), False, 'from models import ModelBuilder, activate\n'), ((4120, 4162), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['pred_masks_[n]', 'grid_unwarp'], {}), '(pred_masks_[n], grid_unwarp)\n', (4133, 4162), True, 'import torch.nn.functional as F\n'), ((4978, 5051), 'utils.istft_reconstruction', 'istft_reconstruction', (['pred_mag', 'phase_mix[j, 0]'], {'hop_length': 'args.stft_hop'}), '(pred_mag, phase_mix[j, 0], hop_length=args.stft_hop)\n', (4998, 5051), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((6926, 6968), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['pred_masks_[n]', 'grid_unwarp'], {}), '(pred_masks_[n], grid_unwarp)\n', (6939, 6968), True, 'import torch.nn.functional as F\n'), ((7002, 7042), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['gt_masks_[n]', 'grid_unwarp'], {}), '(gt_masks_[n], grid_unwarp)\n', (7015, 7042), True, 'import torch.nn.functional as F\n'), ((8268, 8298), 'os.path.join', 'os.path.join', (['args.vis', 'prefix'], {}), '(args.vis, prefix)\n', (8280, 8298), False, 'import os\n'), ((8755, 8794), 'os.path.join', 'os.path.join', (['args.vis', 'filename_mixmag'], {}), '(args.vis, filename_mixmag)\n', (8767, 8794), False, 'import os\n'), ((8849, 8888), 'os.path.join', 'os.path.join', (['args.vis', 'filename_weight'], {}), '(args.vis, filename_weight)\n', (8861, 8888), False, 'import os\n'), ((8929, 8968), 'os.path.join', 'os.path.join', (['args.vis', 'filename_mixwav'], {}), '(args.vis, filename_mixwav)\n', (8941, 8968), False, 'import os\n'), ((9380, 9451), 'utils.istft_reconstruction', 'istft_reconstruction', (['gt_mag', 'phase_mix[j, 0]'], {'hop_length': 'args.stft_hop'}), '(gt_mag, phase_mix[j, 0], hop_length=args.stft_hop)\n', (9400, 9451), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((9607, 9680), 'utils.istft_reconstruction', 'istft_reconstruction', (['pred_mag', 'phase_mix[j, 0]'], {'hop_length': 'args.stft_hop'}), '(pred_mag, phase_mix[j, 0], hop_length=args.stft_hop)\n', (9627, 9680), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((10478, 10504), 'utils.magnitude2heatmap', 'magnitude2heatmap', (['gt_mag_'], {}), '(gt_mag_)\n', (10495, 10504), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((10528, 10556), 'utils.magnitude2heatmap', 'magnitude2heatmap', (['pred_mag_'], {}), '(pred_mag_)\n', (10545, 10556), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((11383, 11421), 'os.path.join', 'os.path.join', (['args.vis', 'filename_frame'], {}), '(args.vis, filename_frame)\n', (11395, 11421), False, 'import os\n'), ((12104, 12144), 'os.path.join', 'os.path.join', (['args.vis', 'filename_heatmap'], {}), '(args.vis, filename_heatmap)\n', (12116, 12144), False, 'import os\n'), ((12717, 12755), 'os.path.join', 'os.path.join', (['args.vis', 'filename_frame'], {}), '(args.vis, filename_frame)\n', (12729, 12755), False, 'import os\n'), ((13438, 13478), 'os.path.join', 'os.path.join', (['args.vis', 'filename_heatmap'], {}), '(args.vis, filename_heatmap)\n', (13450, 13478), False, 'import os\n'), ((17018, 17065), 'models.activate', 'activate', (['pred_masks[n]', 'args.output_activation'], {}), '(pred_masks[n], args.output_activation)\n', (17026, 17065), False, 'from models import ModelBuilder, activate\n'), ((22000, 22047), 'models.activate', 'activate', (['pred_masks[n]', 'args.output_activation'], {}), '(pred_masks[n], args.output_activation)\n', (22008, 22047), False, 'from models import ModelBuilder, activate\n'), ((29311, 29345), 'os.path.exists', 'os.path.exists', (['args.weights_model'], {}), '(args.weights_model)\n', (29325, 29345), False, 'import os\n'), ((32606, 32651), 'os.path.join', 'os.path.join', (['args.ckpt', '"""visualization_val/"""'], {}), "(args.ckpt, 'visualization_val/')\n", (32618, 32651), False, 'import os\n'), ((1217, 1250), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['mags[n]', 'grid_warp'], {}), '(mags[n], grid_warp)\n', (1230, 1250), True, 'import torch.nn.functional as F\n'), ((2012, 2030), 'torch.log', 'torch.log', (['mag_mix'], {}), '(mag_mix)\n', (2021, 2030), False, 'import torch\n'), ((5465, 5484), 'numpy.asarray', 'np.asarray', (['gts_wav'], {}), '(gts_wav)\n', (5475, 5484), True, 'import numpy as np\n'), ((5502, 5523), 'numpy.asarray', 'np.asarray', (['preds_wav'], {}), '(preds_wav)\n', (5512, 5523), True, 'import numpy as np\n'), ((5613, 5632), 'numpy.asarray', 'np.asarray', (['gts_wav'], {}), '(gts_wav)\n', (5623, 5632), True, 'import numpy as np\n'), ((10074, 10113), 'os.path.join', 'os.path.join', (['args.vis', 'filename_gtmask'], {}), '(args.vis, filename_gtmask)\n', (10086, 10113), False, 'import os\n'), ((10169, 10210), 'os.path.join', 'os.path.join', (['args.vis', 'filename_predmask'], {}), '(args.vis, filename_predmask)\n', (10181, 10210), False, 'import os\n'), ((10594, 10632), 'os.path.join', 'os.path.join', (['args.vis', 'filename_gtmag'], {}), '(args.vis, filename_gtmag)\n', (10606, 10632), False, 'import os\n'), ((10690, 10730), 'os.path.join', 'os.path.join', (['args.vis', 'filename_predmag'], {}), '(args.vis, filename_predmag)\n', (10702, 10730), False, 'import os\n'), ((10960, 10998), 'os.path.join', 'os.path.join', (['args.vis', 'filename_gtwav'], {}), '(args.vis, filename_gtwav)\n', (10972, 10998), False, 'import os\n'), ((11048, 11088), 'os.path.join', 'os.path.join', (['args.vis', 'filename_predwav'], {}), '(args.vis, filename_predwav)\n', (11060, 11088), False, 'import os\n'), ((14913, 14945), 'torch.autograd.Variable', 'torch.autograd.Variable', (['mag_mix'], {}), '(mag_mix)\n', (14936, 14945), False, 'import torch\n'), ((15930, 15963), 'torch.ones', 'torch.ones', (['B'], {'device': 'args.device'}), '(B, device=args.device)\n', (15940, 15963), False, 'import torch\n'), ((15985, 16019), 'torch.zeros', 'torch.zeros', (['B'], {'device': 'args.device'}), '(B, device=args.device)\n', (15996, 16019), False, 'import torch\n'), ((19855, 19887), 'torch.autograd.Variable', 'torch.autograd.Variable', (['mag_mix'], {}), '(mag_mix)\n', (19878, 19887), False, 'import torch\n'), ((20889, 20922), 'torch.ones', 'torch.ones', (['B'], {'device': 'args.device'}), '(B, device=args.device)\n', (20899, 20922), False, 'import torch\n'), ((20944, 20978), 'torch.zeros', 'torch.zeros', (['B'], {'device': 'args.device'}), '(B, device=args.device)\n', (20955, 20978), False, 'import torch\n'), ((22348, 22367), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (22365, 22367), False, 'import time\n'), ((32701, 32747), 'os.path.join', 'os.path.join', (['args.ckpt', '"""visualization_test/"""'], {}), "(args.ckpt, 'visualization_test/')\n", (32713, 32747), False, 'import os\n'), ((5298, 5316), 'numpy.abs', 'np.abs', (['gts_wav[n]'], {}), '(gts_wav[n])\n', (5304, 5316), True, 'import numpy as np\n'), ((5353, 5373), 'numpy.abs', 'np.abs', (['preds_wav[n]'], {}), '(preds_wav[n])\n', (5359, 5373), True, 'import numpy as np\n'), ((14773, 14807), 'torch.autograd.Variable', 'torch.autograd.Variable', (['frames[n]'], {}), '(frames[n])\n', (14796, 14807), False, 'import torch\n'), ((14846, 14878), 'torch.autograd.Variable', 'torch.autograd.Variable', (['mags[n]'], {}), '(mags[n])\n', (14869, 14878), False, 'import torch\n'), ((19715, 19749), 'torch.autograd.Variable', 'torch.autograd.Variable', (['frames[n]'], {}), '(frames[n])\n', (19738, 19749), False, 'import torch\n'), ((19788, 19820), 'torch.autograd.Variable', 'torch.autograd.Variable', (['mags[n]'], {}), '(mags[n])\n', (19811, 19820), False, 'import torch\n'), ((1056, 1086), 'utils.warpgrid', 'warpgrid', (['B', '(256)', 'T'], {'warp': '(True)'}), '(B, 256, T, warp=True)\n', (1064, 1086), False, 'from utils import AverageMeter, recover_rgb, magnitude2heatmap, istft_reconstruction, warpgrid, combine_video_audio, save_video, makedirs\n'), ((9894, 9927), 'numpy.clip', 'np.clip', (['gt_masks_[n][j, 0]', '(0)', '(1)'], {}), '(gt_masks_[n][j, 0], 0, 1)\n', (9901, 9927), True, 'import numpy as np\n'), ((9977, 10012), 'numpy.clip', 'np.clip', (['pred_masks_[n][j, 0]', '(0)', '(1)'], {}), '(pred_masks_[n][j, 0], 0, 1)\n', (9984, 10012), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import matplotlib.pyplot as plt
from random import *
def centroid_histogram(clt):
numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)
(hist, _) = np.histogram(clt.labels_, bins=numLabels)
hist = hist.astype("float")
hist /= hist.sum()
# Olusturulan histogrami donduruyor
return hist
def plot_colors(hist, centroids):
# olusucak patternin boyutlari
newPatternWidth = 1500
newPatternHeight = 1500
#baskin renkleri olusturmak icin bos canvas olustuluyor
#patterni olusturmank icin canvas olusturuluyor
bar = np.zeros((300, 300, 3), dtype="uint8")
img2 = np.zeros((newPatternWidth, newPatternHeight, 3), dtype="uint8")
startX = 0
ColorBalanceArray = []
# baskin renkleri olusturan donguyu aciyor
for (percent, color) in zip(hist, centroids):
# baskin renkleri olusturan sekli ciziyor
endX = startX + (percent * 300)
cv2.rectangle(bar, (int(startX), 0), (int(endX), 300),
color.astype("uint8").tolist(), -1)
# baskin renklerin orantisina gore diziye renkleri dolduruyor
for i in range(int(endX)-int(startX)):
if(color.astype(np.uint8)[0]<250 or color.astype(np.uint8)[0]<250 or color.astype(np.uint8)[0]<250):
ColorBalanceArray.append(color.astype("uint8").tolist() )
startX = endX
# Patternin her bir karesine baskin renklerden birini restgele seciyor
for a in range(newPatternHeight):
for i in range(newPatternWidth):
cv2.rectangle(img2,(int(i),a),(int(i+1),a+1),choice(ColorBalanceArray),-1)
plt.figure("Pattern")
plt.axis("off")
plt.imshow(img2)
# pattern save
plt.savefig('images/pattern.png')
# return the bar chart
return bar
| [
"matplotlib.pyplot.imshow",
"numpy.histogram",
"matplotlib.pyplot.savefig",
"numpy.unique",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.axis"
] | [((199, 240), 'numpy.histogram', 'np.histogram', (['clt.labels_'], {'bins': 'numLabels'}), '(clt.labels_, bins=numLabels)\n', (211, 240), True, 'import numpy as np\n'), ((604, 642), 'numpy.zeros', 'np.zeros', (['(300, 300, 3)'], {'dtype': '"""uint8"""'}), "((300, 300, 3), dtype='uint8')\n", (612, 642), True, 'import numpy as np\n'), ((655, 718), 'numpy.zeros', 'np.zeros', (['(newPatternWidth, newPatternHeight, 3)'], {'dtype': '"""uint8"""'}), "((newPatternWidth, newPatternHeight, 3), dtype='uint8')\n", (663, 718), True, 'import numpy as np\n'), ((1672, 1693), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Pattern"""'], {}), "('Pattern')\n", (1682, 1693), True, 'import matplotlib.pyplot as plt\n'), ((1698, 1713), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1706, 1713), True, 'import matplotlib.pyplot as plt\n'), ((1718, 1734), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img2'], {}), '(img2)\n', (1728, 1734), True, 'import matplotlib.pyplot as plt\n'), ((1764, 1797), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/pattern.png"""'], {}), "('images/pattern.png')\n", (1775, 1797), True, 'import matplotlib.pyplot as plt\n'), ((154, 176), 'numpy.unique', 'np.unique', (['clt.labels_'], {}), '(clt.labels_)\n', (163, 176), True, 'import numpy as np\n')] |
import numpy as np
from .parse import parse_xtekct_file
class Config(object):
def __init__(self):
""" Configuration object which contains all settings neccessary for the forward projection
and tomographic reconstruction using the axitom algorithm.
"""
self.n_voxels_x = 2000
self.n_voxels_y = 2000
self.n_voxels_z = 2000
self.object_size_x = 27.229675726
self.object_size_y = 27.229675726
self.object_size_z = 27.229675726
self.n_pixels_u = 2000
self.n_pixels_v = 2000
self.detector_size_u = 400.
self.detector_size_v = 400.
self.source_to_detector_dist = 797.8693
self.source_to_object_dist = 95.1665735244751
self.angular_inc = 1.
self.pixel_offset_u = 0
self.pixel_offset_v = 0
self.center_of_rot_y = 0
self.projection_angs = np.arange(0., 360, self.angular_inc)
self.n_projections = len(self.projection_angs)
self.voxel_size_x = self.object_size_x / self.n_voxels_x
self.voxel_size_y = self.object_size_y / self.n_voxels_y
self.voxel_size_z = self.object_size_z / self.n_voxels_z
self.pixel_size_u = self.detector_size_u / self.n_pixels_u
self.pixel_size_v = self.detector_size_v / self.n_pixels_v
self.object_xs = (np.arange(self.n_voxels_x, dtype=np.float32) - self.n_voxels_x / 2.) * self.voxel_size_x
self.object_ys = (np.arange(self.n_voxels_y, dtype=np.float32) - self.n_voxels_y / 2.) * self.voxel_size_y
self.object_zs = (np.arange(self.n_voxels_z, dtype=np.float32) - self.n_voxels_z / 2.) * self.voxel_size_z
self.detector_us = (np.arange(self.n_pixels_u,
dtype=np.float32) - self.n_pixels_u / 2.) * self.pixel_size_u + self.pixel_offset_u * self.pixel_size_u
self.detector_vs = (np.arange(self.n_pixels_v,
dtype=np.float32) - self.n_pixels_v / 2.) * self.pixel_size_v + self.pixel_offset_v * self.pixel_size_v
def update(self):
self.projection_angs = np.arange(0., 360, self.angular_inc)
self.n_projections = len(self.projection_angs)
self.voxel_size_x = self.object_size_x / self.n_voxels_x
self.voxel_size_y = self.object_size_y / self.n_voxels_y
self.voxel_size_z = self.object_size_z / self.n_voxels_z
self.pixel_size_u = self.detector_size_u / self.n_pixels_u
self.pixel_size_v = self.detector_size_v / self.n_pixels_v
self.object_xs = (np.arange(self.n_voxels_x, dtype=np.float32) - self.n_voxels_x / 2.) * self.voxel_size_x
self.object_ys = (np.arange(self.n_voxels_y, dtype=np.float32) - self.n_voxels_y / 2.) * self.voxel_size_y
self.object_zs = (np.arange(self.n_voxels_z, dtype=np.float32) - self.n_voxels_z / 2.) * self.voxel_size_z
self.detector_us = (np.arange(self.n_pixels_u,
dtype=np.float32) - self.n_pixels_u / 2.) * self.pixel_size_u + self.pixel_offset_u * self.pixel_size_u
self.detector_vs = (np.arange(self.n_pixels_v,
dtype=np.float32) - self.n_pixels_v / 2.) * self.pixel_size_v + self.pixel_offset_v * self.pixel_size_v
def config_from_xtekct(file_path):
""" Make config object from a Nikon X-tek CT input file
The .xtekct file is parsed and a config file containing all relevant settings is returned.
Parameters
----------
file_path : str
The path to the .xtekct file
Returns
-------
obj
Config object
"""
inputfile = parse_xtekct_file(file_path)
conf = Config()
try:
conf.n_voxels_x = inputfile["VoxelsX"]
conf.n_voxels_y = inputfile["VoxelsY"]
conf.n_voxels_z = inputfile["VoxelsZ"]
conf.object_size_x = inputfile["VoxelSizeX"] * conf.n_voxels_x
conf.object_size_y = inputfile["VoxelSizeY"] * conf.n_voxels_y
conf.object_size_z = inputfile["VoxelSizeZ"] * conf.n_voxels_z
conf.n_pixels_u = inputfile["DetectorPixelsX"]
conf.n_pixels_v = inputfile["DetectorPixelsY"]
conf.detector_size_u = inputfile["DetectorPixelSizeX"] * conf.n_pixels_u
conf.detector_size_v = inputfile["DetectorPixelSizeY"] * conf.n_pixels_v
conf.source_to_detector_dist = inputfile["SrcToDetector"]
conf.source_to_object_dist = inputfile["SrcToObject"]
except Exception as e:
raise IOError("Parsing of X-tec file failed with key: ", e)
return conf
| [
"numpy.arange"
] | [((908, 945), 'numpy.arange', 'np.arange', (['(0.0)', '(360)', 'self.angular_inc'], {}), '(0.0, 360, self.angular_inc)\n', (917, 945), True, 'import numpy as np\n'), ((2126, 2163), 'numpy.arange', 'np.arange', (['(0.0)', '(360)', 'self.angular_inc'], {}), '(0.0, 360, self.angular_inc)\n', (2135, 2163), True, 'import numpy as np\n'), ((1358, 1402), 'numpy.arange', 'np.arange', (['self.n_voxels_x'], {'dtype': 'np.float32'}), '(self.n_voxels_x, dtype=np.float32)\n', (1367, 1402), True, 'import numpy as np\n'), ((1473, 1517), 'numpy.arange', 'np.arange', (['self.n_voxels_y'], {'dtype': 'np.float32'}), '(self.n_voxels_y, dtype=np.float32)\n', (1482, 1517), True, 'import numpy as np\n'), ((1588, 1632), 'numpy.arange', 'np.arange', (['self.n_voxels_z'], {'dtype': 'np.float32'}), '(self.n_voxels_z, dtype=np.float32)\n', (1597, 1632), True, 'import numpy as np\n'), ((2576, 2620), 'numpy.arange', 'np.arange', (['self.n_voxels_x'], {'dtype': 'np.float32'}), '(self.n_voxels_x, dtype=np.float32)\n', (2585, 2620), True, 'import numpy as np\n'), ((2691, 2735), 'numpy.arange', 'np.arange', (['self.n_voxels_y'], {'dtype': 'np.float32'}), '(self.n_voxels_y, dtype=np.float32)\n', (2700, 2735), True, 'import numpy as np\n'), ((2806, 2850), 'numpy.arange', 'np.arange', (['self.n_voxels_z'], {'dtype': 'np.float32'}), '(self.n_voxels_z, dtype=np.float32)\n', (2815, 2850), True, 'import numpy as np\n'), ((1706, 1750), 'numpy.arange', 'np.arange', (['self.n_pixels_u'], {'dtype': 'np.float32'}), '(self.n_pixels_u, dtype=np.float32)\n', (1715, 1750), True, 'import numpy as np\n'), ((1903, 1947), 'numpy.arange', 'np.arange', (['self.n_pixels_v'], {'dtype': 'np.float32'}), '(self.n_pixels_v, dtype=np.float32)\n', (1912, 1947), True, 'import numpy as np\n'), ((2924, 2968), 'numpy.arange', 'np.arange', (['self.n_pixels_u'], {'dtype': 'np.float32'}), '(self.n_pixels_u, dtype=np.float32)\n', (2933, 2968), True, 'import numpy as np\n'), ((3121, 3165), 'numpy.arange', 'np.arange', (['self.n_pixels_v'], {'dtype': 'np.float32'}), '(self.n_pixels_v, dtype=np.float32)\n', (3130, 3165), True, 'import numpy as np\n')] |
from mod_copeland_yateesh import sample_complexity
args = {}
# args['heuristic'] = 'random'
args['heuristic'] = 'greedy'
# args['heuristic'] = 'mod_dcb'
args['n_voters'] = 4639
args['alpha'] = 0.05
args['seed'] = 42
args['ques_limit'] = 5
args['gamma'] = 0.5
args['probs'] = [0.05, 0.1, 0.2, 0.4]
q_limits = [1, 2, 3, 5, 8, 10, 13, 15, 20, 25, 30]
# q_limits = [1]
# gammas = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]
# gammas = [0.0]
greedy_itrs = []
random_itrs = []
seeds = [0, 1, 2, 3, 4]
# seeds = [0]
# for seed in seeds:
# args['seed'] = seed
# itr, winner = sample_complexity(args)
# print("seed", seed, "itr", itr, "winner", winner)
# random_itrs.append(itr)
#
# print(random_itrs)
# print(sum(random_itrs)/6)
# for seed in seeds:
# seed_vals = []
# args['seed'] = seed
# for q_limit in q_limits:
# args['ques_limit'] = q_limit
# print("Que. limit ", q_limit, "started")
# itr, winner = sample_complexity(args)
# print("seed", seed, "itr", itr, "winner", winner)
# seed_vals.append(itr)
# greedy_itrs.append(seed_vals)
# ###
# print(greedy_itrs)
# print(sample_complexity(args))
greedy_itrs = [[283, 199, 167, 167, 167, 167, 167, 167, 167, 167, 167], [209, 160, 109, 93, 93, 93, 93, 93, 93, 93, 93], [216, 169, 112, 104, 104, 110, 104, 104, 104, 104, 104], [228, 124, 116, 116, 116, 116, 116, 116, 116, 116, 116], [479, 362, 363, 363, 362, 362, 363, 363, 363, 363, 363]]
dcb_itrs = [[499, 373, 343, 170, 180, 179, 180, 180, 180, 180, 180], [893, 714, 660, 546, 547, 468, 340, 79, 79, 79, 79], [672, 298, 231, 201, 207, 180, 166, 169, 169, 169, 169], [940, 432, 310, 310, 116, 175, 194, 198, 198, 198, 198], [481, 523, 357, 352, 446, 365, 346, 345, 345, 345, 345]]
dcb_mod_itrs = [[589, 385, 183, 157, 168, 168, 175, 179, 178, 175, 174], [711, 558, 553, 469, 427, 299, 291, 57, 117, 119, 118], [454, 349, 267, 214, 168, 168, 165, 153, 103, 72, 103], [648, 440, 302, 310, 117, 120, 181, 180, 198, 197, 28], [564, 364, 448, 361, 345, 447, 363, 56, 345, 345, 345]]
import numpy as np
import matplotlib.pyplot as plt
def convert(lst):
lst = [np.array(i) for i in lst]
lst = sum(lst)
lst = [i/5 for i in lst]
return lst
print(convert(greedy_itrs))
print(convert(dcb_itrs))
print(convert(dcb_mod_itrs))
import seaborn as sns
sns.set_theme()
plt.plot(q_limits, convert(greedy_itrs), label="Greedy (ours)")
# plt.plot(q_limits, convert(dcb_itrs), label="DCB")
plt.plot(q_limits, convert(dcb_mod_itrs), label="DCB Extended")
plt.xlabel("Num of questions asked")
plt.ylabel("Avg sample complexity")
plt.title("US election 2012 data (16 candidates)")
plt.legend()
plt.savefig("us_comp.png")
plt.show()
# greedy_itrs = [[257, 307, 377, 424, 297, 453], [252, 303, 377, 424, 297, 453], [251, 307, 377, 424, 297, 453], [254, 307, 377, 424, 297, 453], [254, 307, 377, 424, 297, 453], [254, 307, 377, 424, 297, 453]]
#
# print([sum(i)/6 for i in greedy_itrs])
#
# import matplotlib.pyplot as plt
#
# plt.plot(gammas, [sum(i)/6 for i in greedy_itrs])
# plt.xlabel("Gamma")
# plt.ylabel("Average sample complexity")
# plt.show()
# res = [[649, 496, 496, 496, 496, 496], [565, 496, 496, 496, 496, 496], [524, 747, 782, 526, 526, 526]]
#
# def suml(l1, l2):
# return [(l1[i] + l2[i]) for i in range(len(l1))]
#
# resf = suml(suml(res[0], res[1]), res[2])
# resf = [i/3 for i in resf]
#
# import matplotlib.pyplot as plt
#
# plt.plot(gammas, resf)
# plt.xlabel("Gamma Values")
# plt.ylabel("Sample complexity averaged over 3 seeds")
# plt.show() | [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"seaborn.set_theme",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((2313, 2328), 'seaborn.set_theme', 'sns.set_theme', ([], {}), '()\n', (2326, 2328), True, 'import seaborn as sns\n'), ((2512, 2548), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Num of questions asked"""'], {}), "('Num of questions asked')\n", (2522, 2548), True, 'import matplotlib.pyplot as plt\n'), ((2549, 2584), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Avg sample complexity"""'], {}), "('Avg sample complexity')\n", (2559, 2584), True, 'import matplotlib.pyplot as plt\n'), ((2585, 2635), 'matplotlib.pyplot.title', 'plt.title', (['"""US election 2012 data (16 candidates)"""'], {}), "('US election 2012 data (16 candidates)')\n", (2594, 2635), True, 'import matplotlib.pyplot as plt\n'), ((2636, 2648), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2646, 2648), True, 'import matplotlib.pyplot as plt\n'), ((2649, 2675), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""us_comp.png"""'], {}), "('us_comp.png')\n", (2660, 2675), True, 'import matplotlib.pyplot as plt\n'), ((2676, 2686), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2684, 2686), True, 'import matplotlib.pyplot as plt\n'), ((2119, 2130), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (2127, 2130), True, 'import numpy as np\n')] |
import numpy as np
import unittest
from convolution import conv2d, add_padding
class TestConvolution(unittest.TestCase):
def test_paddings_shape(self, N: int = 1000):
for _ in range(N):
m_h = np.random.randint(3, 100)
m_w = np.random.randint(3, 100)
random_matrix = np.random.rand(m_h, m_w)
rows, cols = np.random.randint(0, 100, 2)
random_matrix_with_padding = add_padding(random_matrix, (rows, cols))
self.assertEqual(random_matrix_with_padding.shape, (m_h + rows*2, m_w + cols*2))
def test_random_case(self, N: int = 1000):
for _ in range(N):
d = np.random.randint(1, 100, 2)
k = np.random.choice([1, 3, 5, 7, 9, 10], 2) # `10` is to check oddness assertion
random_matrix = np.random.rand(*d)
random_kernel = np.random.rand(*k)
for __ in range(N):
stride = np.random.randint(0, 5, 2) # `0` is to check parameters assertion
dilation = np.random.randint(0, 5, 2) # `0` is to check parameters assertion
padding = np.random.randint(-1, 5, 2) # `-1` is to check parameters assertion
try: # `try` in case of division by zero when stride[0] or stride[1] equal to zero
h_out = np.floor((d[0] + 2 * padding[0] - k[0] - (k[0] - 1) * (dilation[0] - 1)) / stride[0]).astype(int) + 1
w_out = np.floor((d[1] + 2 * padding[1] - k[1] - (k[1] - 1) * (dilation[1] - 1)) / stride[1]).astype(int) + 1
except:
h_out, w_out = None, None
# print(f'Matr: {d} | Kern: {k} | Stri: {stride} | Dila: {dilation} | Padd: {padding} | OutD: {h_out, w_out}') # for debugging
if (stride[0] < 1 or stride[1] < 1 or dilation[0] < 1 or dilation[1] < 1 or padding[0] < 0 or padding[1] < 0 or
not isinstance(stride[0], int) or not isinstance(stride[1], int) or not isinstance(dilation[0], int) or
not isinstance(dilation[1], int) or not isinstance(padding[0], int) or not isinstance(padding[1], int)):
with self.assertRaises(AssertionError):
matrix_conved = conv2d(random_matrix, random_kernel, stride=stride, dilation=dilation, padding=padding)
elif k[0] % 2 != 1 or k[1] % 2 != 1:
with self.assertRaises(AssertionError):
matrix_conved = conv2d(random_matrix, random_kernel, stride=stride, dilation=dilation, padding=padding)
elif d[0] < k[0] or d[1] < k[1]:
with self.assertRaises(AssertionError):
matrix_conved = conv2d(random_matrix, random_kernel, stride=stride, dilation=dilation, padding=padding)
elif h_out <= 0 or w_out <= 0:
with self.assertRaises(AssertionError):
matrix_conved = conv2d(random_matrix, random_kernel, stride=stride, dilation=dilation, padding=padding)
else:
matrix_conved = conv2d(random_matrix, random_kernel, stride=stride, dilation=dilation, padding=padding)
self.assertEqual(matrix_conved.shape, (h_out, w_out))
def test_kernel_3x3_easy(self):
matrix = np.array([[0, 4, 3, 2, 0, 1, 0],
[4, 3, 0, 1, 0, 1, 0],
[1, 3, 4, 2, 0, 1, 0],
[3, 4, 2, 2, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0]])
kernel = np.array([[1, 1, 3],
[0, 2, 3],
[3, 3, 3]])
# stride = 1, dilation = 1, padding = 0
result_110 = conv2d(matrix, kernel)
answer_110 = np.array([[43, 43, 25, 17, 6],
[52, 44, 17, 16, 6],
[30, 23, 10, 11, 6]])
# stride = 1, dilation = 1, padding = 1
result_111 = conv2d(matrix, kernel, padding=(1, 1))
answer_111 = np.array([[33, 38, 24, 7, 9, 5, 3],
[41, 43, 43, 25, 17, 6, 4],
[45, 52, 44, 17, 16, 6, 4],
[28, 30, 23, 10, 11, 6, 4],
[15, 13, 12, 4, 8, 3, 1]])
# stride = 1, dilation = 2, padding = 0
result_120 = conv2d(matrix, kernel, dilation=(2, 2))
answer_120 = np.array([[11, 19, 3]])
# stride = 1, dilation = 2, padding = 1
result_121 = conv2d(matrix, kernel, dilation=(2, 2), padding=(1, 1))
answer_121 = np.array([[27, 15, 26, 6, 11],
[22, 11, 19, 3, 8],
[20, 8, 14, 0, 4]])
# stride = 2, dilation = 1, padding = 0
result_210 = conv2d(matrix, kernel, stride=(2, 2))
answer_210 = np.array([[43, 25, 6],
[30, 10, 6]])
# stride = 2, dilation = 1, padding = 1
result_211 = conv2d(matrix, kernel, stride=(2, 2), padding=(1, 1))
answer_211 = np.array([[33, 24, 9, 3],
[45, 44, 16, 4],
[15, 12, 8, 1]])
# stride = 2, dilation = 2, padding = 0
result_220 = conv2d(matrix, kernel, stride=(2, 2), dilation=(2, 2))
answer_220 = np.array([[11, 3]])
# stride = 2, dilation = 2, padding = 1
result_221 = conv2d(matrix, kernel, stride=(2, 2), dilation=(2, 2), padding=(1, 1))
answer_221 = np.array([[27, 26, 11],
[20, 14, 4]])
self.assertEqual(result_110.tolist(), answer_110.tolist())
self.assertEqual(result_111.tolist(), answer_111.tolist())
self.assertEqual(result_120.tolist(), answer_120.tolist())
self.assertEqual(result_121.tolist(), answer_121.tolist())
self.assertEqual(result_210.tolist(), answer_210.tolist())
self.assertEqual(result_211.tolist(), answer_211.tolist())
self.assertEqual(result_220.tolist(), answer_220.tolist())
self.assertEqual(result_221.tolist(), answer_221.tolist())
def test_kernel_5x5_difficult(self):
matrix = np.array([[1, 4, 4, 2, 1, 0, 0, 1, 0, 0, 3, 3, 3, 4],
[0, 2, 0, 2, 0, 3, 4, 4, 2, 1, 1, 3, 0, 4],
[1, 1, 0, 0, 3, 4, 2, 4, 4, 2, 3, 0, 0, 4],
[4, 0, 1, 2, 0, 2, 0, 3, 3, 3, 0, 4, 1, 0],
[3, 0, 0, 3, 3, 3, 2, 0, 2, 1, 1, 0, 4, 2],
[2, 4, 3, 1, 1, 0, 2, 1, 3, 4, 4, 0, 2, 3],
[2, 4, 3, 3, 2, 1, 4, 0, 3, 4, 1, 2, 0, 0],
[2, 1, 0, 1, 1, 2, 2, 3, 0, 0, 1, 2, 4, 2],
[3, 3, 1, 1, 1, 1, 4, 4, 2, 3, 2, 2, 2, 3]])
kernel = np.array([[2, 0, 2, 2, 2],
[2, 3, 1, 1, 3],
[3, 1, 1, 3, 1],
[2, 2, 3, 1, 1],
[0, 0, 1, 0, 0]])
# default params
result_11_11_00 = conv2d(matrix, kernel)
answer_11_11_00 = np.array([[44., 58., 59., 62., 70., 80., 75., 92., 64., 72.],
[52., 52., 59., 87., 92., 83., 77., 74., 71., 67.],
[66., 63., 60., 64., 76., 79., 75., 82., 77., 64.],
[75., 69., 64., 64., 69., 75., 70., 71., 75., 74.],
[74., 71., 63., 66., 61., 75., 79., 47., 73., 76.]])
# only stride: (1, 2), (1, 3), (2, 1), (2, 2), (2, 3), (3, 1), (3, 2), (4, 6)
result_12_11_00 = conv2d(matrix, kernel, stride=(1, 2))
answer_12_11_00 = np.array([[44., 59., 70., 75., 64.],
[52., 59., 92., 77., 71.],
[66., 60., 76., 75., 77.],
[75., 64., 69., 70., 75.],
[74., 63., 61., 79., 73.]])
result_13_11_00 = conv2d(matrix, kernel, stride=(1, 3))
answer_13_11_00 = np.array([[44., 62., 75., 72.],
[52., 87., 77., 67.],
[66., 64., 75., 64.],
[75., 64., 70., 74.],
[74., 66., 79., 76.]])
result_21_11_00 = conv2d(matrix, kernel, stride=(2, 1))
answer_21_11_00 = np.array([[44., 58., 59., 62., 70., 80., 75., 92., 64., 72.],
[66., 63., 60., 64., 76., 79., 75., 82., 77., 64.],
[74., 71., 63., 66., 61., 75., 79., 47., 73., 76.]])
result_22_11_00 = conv2d(matrix, kernel, stride=(2, 2))
answer_22_11_00 = np.array([[44., 59., 70., 75., 64],
[66., 60., 76., 75., 77],
[74., 63., 61., 79., 73]])
result_23_11_00 = conv2d(matrix, kernel, stride=(2, 3))
answer_23_11_00 = np.array([[44., 62., 75., 72.],
[66., 64., 75., 64.],
[74., 66., 79., 76.]])
result_31_11_00 = conv2d(matrix, kernel, stride=(3, 1))
answer_31_11_00 = np.array([[44., 58., 59., 62., 70., 80., 75., 92., 64., 72.],
[75., 69., 64., 64., 69., 75., 70., 71., 75., 74.]])
result_32_11_00 = conv2d(matrix, kernel, stride=(3, 2))
answer_32_11_00 = np.array([[44., 59., 70., 75., 64.],
[75., 64., 69., 70., 75.]])
result_46_11_00 = conv2d(matrix, kernel, stride=(4, 6))
answer_46_11_00 = np.array([[44., 75.],
[74., 79.]])
# only dilation: (1, 2), (1, 3), (2, 1), (2, 2), (2, 3)
result_11_12_00 = conv2d(matrix, kernel, dilation=(1, 2))
answer_11_12_00 = np.array([[46., 70., 50., 77., 65., 94.],
[67., 68., 67., 76., 53., 95.],
[80., 65., 60., 64., 70., 73.],
[74., 74., 77., 73., 79., 55.],
[81., 66., 74., 60., 70., 58.]])
result_11_13_00 = conv2d(matrix, kernel, dilation=(1, 3))
answer_11_13_00 = np.array([[48., 77.],
[65., 65.],
[73., 55.],
[97., 67.],
[84., 68.]])
result_11_21_00 = conv2d(matrix, kernel, dilation=(2, 1))
answer_11_21_00 = np.array([[78., 73., 64., 72., 81., 69., 73., 69., 68., 81.]])
result_11_22_00 = conv2d(matrix, kernel, dilation=(2, 2))
answer_11_22_00 = np.array([[67., 55., 80., 63., 77., 79.]])
result_11_23_00 = conv2d(matrix, kernel, dilation=(2, 3))
answer_11_23_00 = np.array([[65., 79.]])
# only paddings: (0, 1), (1, 0), (1, 1)
result_11_11_01 = conv2d(matrix, kernel, padding=(0, 1))
answer_11_11_01 = np.array([[41., 44., 58., 59., 62., 70., 80., 75., 92., 64., 72., 71.],
[34., 52., 52., 59., 87., 92., 83., 77., 74., 71., 67., 43.],
[51., 66., 63., 60., 64., 76., 79., 75., 82., 77., 64., 57.],
[63., 75., 69., 64., 64., 69., 75., 70., 71., 75., 74., 43.],
[51., 74., 71., 63., 66., 61., 75., 79., 47., 73., 76., 54.]])
result_11_11_10 = conv2d(matrix, kernel, padding=(1, 0))
answer_11_11_10 = np.array([[39., 45., 45., 61., 52., 58., 66., 63., 53., 56.],
[44., 58., 59., 62., 70., 80., 75., 92., 64., 72.],
[52., 52., 59., 87., 92., 83., 77., 74., 71., 67.],
[66., 63., 60., 64., 76., 79., 75., 82., 77., 64.],
[75., 69., 64., 64., 69., 75., 70., 71., 75., 74.],
[74., 71., 63., 66., 61., 75., 79., 47., 73., 76.],
[70., 59., 64., 55., 72., 83., 81., 77., 70., 69.]])
result_11_11_11 = conv2d(matrix, kernel, padding=(1, 1))
answer_11_11_11 = np.array([[26., 39., 45., 45., 61., 52., 58., 66., 63., 53., 56., 51.],
[41., 44., 58., 59., 62., 70., 80., 75., 92., 64., 72., 71.],
[34., 52., 52., 59., 87., 92., 83., 77., 74., 71., 67., 43.],
[51., 66., 63., 60., 64., 76., 79., 75., 82., 77., 64., 57.],
[63., 75., 69., 64., 64., 69., 75., 70., 71., 75., 74., 43.],
[51., 74., 71., 63., 66., 61., 75., 79., 47., 73., 76., 54.],
[59., 70., 59., 64., 55., 72., 83., 81., 77., 70., 69., 58.]])
# different sets of parameters
result_21_13_00 = conv2d(matrix, kernel, stride=(2, 1), dilation=(1, 3), padding=(0, 0))
answer_21_13_00 = np.array([[48., 77.],
[73., 55.],
[84., 68.]])
result_23_13_13 = conv2d(matrix, kernel, stride=(2, 3), dilation=(1, 3), padding=(1, 3))
answer_23_13_13 = np.array([[28., 36., 31.],
[53., 65., 47.],
[62., 97., 70.],
[64., 79., 74.]])
result_32_23_22 = conv2d(matrix, kernel, stride=(3, 2), dilation=(2, 3), padding=(2, 2))
answer_32_23_22 = np.array([[54., 55., 34.],
[34., 69., 43.]])
# default params
self.assertEqual(result_11_11_00.tolist(), answer_11_11_00.tolist())
# only stride: (1, 2), (1, 3), (2, 1), (2, 2), (2, 3), (3, 1), (3, 2), (4, 6)
self.assertEqual(result_12_11_00.tolist(), answer_12_11_00.tolist())
self.assertEqual(result_13_11_00.tolist(), answer_13_11_00.tolist())
self.assertEqual(result_21_11_00.tolist(), answer_21_11_00.tolist())
self.assertEqual(result_22_11_00.tolist(), answer_22_11_00.tolist())
self.assertEqual(result_23_11_00.tolist(), answer_23_11_00.tolist())
self.assertEqual(result_31_11_00.tolist(), answer_31_11_00.tolist())
self.assertEqual(result_32_11_00.tolist(), answer_32_11_00.tolist())
self.assertEqual(result_46_11_00.tolist(), answer_46_11_00.tolist())
# only dilation: (1, 2), (1, 3), (2, 1), (2, 2), (2, 3)
self.assertEqual(result_11_12_00.tolist(), answer_11_12_00.tolist())
self.assertEqual(result_11_13_00.tolist(), answer_11_13_00.tolist())
self.assertEqual(result_11_21_00.tolist(), answer_11_21_00.tolist())
self.assertEqual(result_11_22_00.tolist(), answer_11_22_00.tolist())
self.assertEqual(result_11_23_00.tolist(), answer_11_23_00.tolist())
# only paddings: (0, 1), (1, 0), (1, 1)
self.assertEqual(result_11_11_01.tolist(), answer_11_11_01.tolist())
self.assertEqual(result_11_11_10.tolist(), answer_11_11_10.tolist())
self.assertEqual(result_11_11_11.tolist(), answer_11_11_11.tolist())
# different sets of parameters
self.assertEqual(result_21_13_00.tolist(), answer_21_13_00.tolist())
self.assertEqual(result_23_13_13.tolist(), answer_23_13_13.tolist())
self.assertEqual(result_32_23_22.tolist(), answer_32_23_22.tolist())
def test_kernel_5x3_difficult(self):
matrix = np.array([[0, 4, 3, 2, 0, 1, 0],
[4, 3, 0, 1, 0, 1, 0],
[1, 3, 4, 2, 0, 1, 0],
[3, 4, 2, 2, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0]])
kernel = np.array([[1, 1, 3],
[0, 2, 3],
[3, 3, 3],
[0, 2, 1],
[3, 3, 0]])
# default params
result_11_11_00 = conv2d(matrix, kernel, stride=(1, 1), dilation=(1, 1), padding=(0, 0))
answer_11_11_00 = np.array([[53., 49., 29., 18., 11.]])
# different sets of parameters
result_21_13_00 = conv2d(matrix, kernel, stride=(2, 1), dilation=(1, 3), padding=(0, 0))
answer_21_13_00 = np.array([[17.]])
result_23_13_13 = conv2d(matrix, kernel, stride=(2, 3), dilation=(1, 3), padding=(1, 3))
answer_23_13_13 = np.array([[34., 38., 9.],
[30., 24., 7.]])
result_32_23_42 = conv2d(matrix, kernel, stride=(3, 2), dilation=(2, 3), padding=(4, 2))
answer_32_23_42 = np.array([[18., 10., 17.],
[18., 17., 11.]])
result_21_12_04 = conv2d(matrix, kernel, stride=(2, 1), dilation=(1, 2), padding=(0, 4))
answer_21_12_04 = np.array([[18., 34., 40., 44., 22., 37., 15., 19., 0., 7., 0.]])
result_22_12_04 = conv2d(matrix, kernel, stride=(2, 2), dilation=(1, 2), padding=(0, 4))
answer_22_12_04 = np.array([[18., 40., 22., 15., 0., 0.]])
result_23_13_25 = conv2d(matrix, kernel, stride=(2, 3), dilation=(1, 3), padding=(2, 5))
answer_23_13_25 = np.array([[15., 27., 21., 0.],
[34., 27., 13., 0.],
[21., 11., 3., 0.]])
result_11_11_33 = conv2d(matrix, kernel, stride=(1, 1), dilation=(1, 1), padding=(3, 3))
answer_11_11_33 = np.array([[ 0., 0., 16., 32., 17., 7., 4., 5., 3., 0., 0.],
[ 0., 4., 26., 39., 49., 35., 16., 8., 6., 0., 0.],
[ 0., 13., 47., 69., 52., 23., 16., 10., 6., 0., 0.],
[ 0., 18., 51., 53., 49., 29., 18., 11., 7., 0., 0.],
[ 0., 24., 45., 52., 44., 17., 17., 8., 4., 0., 0.],
[ 0., 12., 28., 30., 23., 10., 11., 6., 4., 0., 0.],
[ 0., 9., 15., 13., 12., 4., 8., 3., 1., 0., 0.]])
# default params
self.assertEqual(result_11_11_00.tolist(), answer_11_11_00.tolist())
# different sets of parameters
self.assertEqual(result_21_13_00.tolist(), answer_21_13_00.tolist())
self.assertEqual(result_23_13_13.tolist(), answer_23_13_13.tolist())
self.assertEqual(result_32_23_42.tolist(), answer_32_23_42.tolist())
self.assertEqual(result_21_12_04.tolist(), answer_21_12_04.tolist())
self.assertEqual(result_22_12_04.tolist(), answer_22_12_04.tolist())
self.assertEqual(result_23_13_25.tolist(), answer_23_13_25.tolist())
self.assertEqual(result_11_11_33.tolist(), answer_11_11_33.tolist())
if __name__ == '__main__':
unittest.main()
| [
"numpy.random.rand",
"numpy.random.choice",
"numpy.floor",
"numpy.array",
"numpy.random.randint",
"convolution.conv2d",
"unittest.main",
"convolution.add_padding"
] | [((19117, 19132), 'unittest.main', 'unittest.main', ([], {}), '()\n', (19130, 19132), False, 'import unittest\n'), ((3348, 3478), 'numpy.array', 'np.array', (['[[0, 4, 3, 2, 0, 1, 0], [4, 3, 0, 1, 0, 1, 0], [1, 3, 4, 2, 0, 1, 0], [3, 4,\n 2, 2, 0, 1, 0], [0, 0, 0, 0, 0, 1, 0]]'], {}), '([[0, 4, 3, 2, 0, 1, 0], [4, 3, 0, 1, 0, 1, 0], [1, 3, 4, 2, 0, 1, \n 0], [3, 4, 2, 2, 0, 1, 0], [0, 0, 0, 0, 0, 1, 0]])\n', (3356, 3478), True, 'import numpy as np\n'), ((3593, 3636), 'numpy.array', 'np.array', (['[[1, 1, 3], [0, 2, 3], [3, 3, 3]]'], {}), '([[1, 1, 3], [0, 2, 3], [3, 3, 3]])\n', (3601, 3636), True, 'import numpy as np\n'), ((3762, 3784), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {}), '(matrix, kernel)\n', (3768, 3784), False, 'from convolution import conv2d, add_padding\n'), ((3806, 3879), 'numpy.array', 'np.array', (['[[43, 43, 25, 17, 6], [52, 44, 17, 16, 6], [30, 23, 10, 11, 6]]'], {}), '([[43, 43, 25, 17, 6], [52, 44, 17, 16, 6], [30, 23, 10, 11, 6]])\n', (3814, 3879), True, 'import numpy as np\n'), ((4021, 4059), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'padding': '(1, 1)'}), '(matrix, kernel, padding=(1, 1))\n', (4027, 4059), False, 'from convolution import conv2d, add_padding\n'), ((4081, 4231), 'numpy.array', 'np.array', (['[[33, 38, 24, 7, 9, 5, 3], [41, 43, 43, 25, 17, 6, 4], [45, 52, 44, 17, 16,\n 6, 4], [28, 30, 23, 10, 11, 6, 4], [15, 13, 12, 4, 8, 3, 1]]'], {}), '([[33, 38, 24, 7, 9, 5, 3], [41, 43, 43, 25, 17, 6, 4], [45, 52, 44,\n 17, 16, 6, 4], [28, 30, 23, 10, 11, 6, 4], [15, 13, 12, 4, 8, 3, 1]])\n', (4089, 4231), True, 'import numpy as np\n'), ((4444, 4483), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'dilation': '(2, 2)'}), '(matrix, kernel, dilation=(2, 2))\n', (4450, 4483), False, 'from convolution import conv2d, add_padding\n'), ((4505, 4528), 'numpy.array', 'np.array', (['[[11, 19, 3]]'], {}), '([[11, 19, 3]])\n', (4513, 4528), True, 'import numpy as np\n'), ((4599, 4654), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'dilation': '(2, 2)', 'padding': '(1, 1)'}), '(matrix, kernel, dilation=(2, 2), padding=(1, 1))\n', (4605, 4654), False, 'from convolution import conv2d, add_padding\n'), ((4676, 4746), 'numpy.array', 'np.array', (['[[27, 15, 26, 6, 11], [22, 11, 19, 3, 8], [20, 8, 14, 0, 4]]'], {}), '([[27, 15, 26, 6, 11], [22, 11, 19, 3, 8], [20, 8, 14, 0, 4]])\n', (4684, 4746), True, 'import numpy as np\n'), ((4893, 4930), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(2, 2)'}), '(matrix, kernel, stride=(2, 2))\n', (4899, 4930), False, 'from convolution import conv2d, add_padding\n'), ((4952, 4988), 'numpy.array', 'np.array', (['[[43, 25, 6], [30, 10, 6]]'], {}), '([[43, 25, 6], [30, 10, 6]])\n', (4960, 4988), True, 'import numpy as np\n'), ((5099, 5152), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(2, 2)', 'padding': '(1, 1)'}), '(matrix, kernel, stride=(2, 2), padding=(1, 1))\n', (5105, 5152), False, 'from convolution import conv2d, add_padding\n'), ((5174, 5233), 'numpy.array', 'np.array', (['[[33, 24, 9, 3], [45, 44, 16, 4], [15, 12, 8, 1]]'], {}), '([[33, 24, 9, 3], [45, 44, 16, 4], [15, 12, 8, 1]])\n', (5182, 5233), True, 'import numpy as np\n'), ((5379, 5433), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(2, 2)', 'dilation': '(2, 2)'}), '(matrix, kernel, stride=(2, 2), dilation=(2, 2))\n', (5385, 5433), False, 'from convolution import conv2d, add_padding\n'), ((5455, 5474), 'numpy.array', 'np.array', (['[[11, 3]]'], {}), '([[11, 3]])\n', (5463, 5474), True, 'import numpy as np\n'), ((5545, 5615), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(2, 2)', 'dilation': '(2, 2)', 'padding': '(1, 1)'}), '(matrix, kernel, stride=(2, 2), dilation=(2, 2), padding=(1, 1))\n', (5551, 5615), False, 'from convolution import conv2d, add_padding\n'), ((5637, 5674), 'numpy.array', 'np.array', (['[[27, 26, 11], [20, 14, 4]]'], {}), '([[27, 26, 11], [20, 14, 4]])\n', (5645, 5674), True, 'import numpy as np\n'), ((6305, 6732), 'numpy.array', 'np.array', (['[[1, 4, 4, 2, 1, 0, 0, 1, 0, 0, 3, 3, 3, 4], [0, 2, 0, 2, 0, 3, 4, 4, 2, 1,\n 1, 3, 0, 4], [1, 1, 0, 0, 3, 4, 2, 4, 4, 2, 3, 0, 0, 4], [4, 0, 1, 2, 0,\n 2, 0, 3, 3, 3, 0, 4, 1, 0], [3, 0, 0, 3, 3, 3, 2, 0, 2, 1, 1, 0, 4, 2],\n [2, 4, 3, 1, 1, 0, 2, 1, 3, 4, 4, 0, 2, 3], [2, 4, 3, 3, 2, 1, 4, 0, 3,\n 4, 1, 2, 0, 0], [2, 1, 0, 1, 1, 2, 2, 3, 0, 0, 1, 2, 4, 2], [3, 3, 1, 1,\n 1, 1, 4, 4, 2, 3, 2, 2, 2, 3]]'], {}), '([[1, 4, 4, 2, 1, 0, 0, 1, 0, 0, 3, 3, 3, 4], [0, 2, 0, 2, 0, 3, 4,\n 4, 2, 1, 1, 3, 0, 4], [1, 1, 0, 0, 3, 4, 2, 4, 4, 2, 3, 0, 0, 4], [4, 0,\n 1, 2, 0, 2, 0, 3, 3, 3, 0, 4, 1, 0], [3, 0, 0, 3, 3, 3, 2, 0, 2, 1, 1, \n 0, 4, 2], [2, 4, 3, 1, 1, 0, 2, 1, 3, 4, 4, 0, 2, 3], [2, 4, 3, 3, 2, 1,\n 4, 0, 3, 4, 1, 2, 0, 0], [2, 1, 0, 1, 1, 2, 2, 3, 0, 0, 1, 2, 4, 2], [3,\n 3, 1, 1, 1, 1, 4, 4, 2, 3, 2, 2, 2, 3]])\n', (6313, 6732), True, 'import numpy as np\n'), ((6947, 7047), 'numpy.array', 'np.array', (['[[2, 0, 2, 2, 2], [2, 3, 1, 1, 3], [3, 1, 1, 3, 1], [2, 2, 3, 1, 1], [0, 0,\n 1, 0, 0]]'], {}), '([[2, 0, 2, 2, 2], [2, 3, 1, 1, 3], [3, 1, 1, 3, 1], [2, 2, 3, 1, 1\n ], [0, 0, 1, 0, 0]])\n', (6955, 7047), True, 'import numpy as np\n'), ((7206, 7228), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {}), '(matrix, kernel)\n', (7212, 7228), False, 'from convolution import conv2d, add_padding\n'), ((7255, 7595), 'numpy.array', 'np.array', (['[[44.0, 58.0, 59.0, 62.0, 70.0, 80.0, 75.0, 92.0, 64.0, 72.0], [52.0, 52.0,\n 59.0, 87.0, 92.0, 83.0, 77.0, 74.0, 71.0, 67.0], [66.0, 63.0, 60.0, \n 64.0, 76.0, 79.0, 75.0, 82.0, 77.0, 64.0], [75.0, 69.0, 64.0, 64.0, \n 69.0, 75.0, 70.0, 71.0, 75.0, 74.0], [74.0, 71.0, 63.0, 66.0, 61.0, \n 75.0, 79.0, 47.0, 73.0, 76.0]]'], {}), '([[44.0, 58.0, 59.0, 62.0, 70.0, 80.0, 75.0, 92.0, 64.0, 72.0], [\n 52.0, 52.0, 59.0, 87.0, 92.0, 83.0, 77.0, 74.0, 71.0, 67.0], [66.0, \n 63.0, 60.0, 64.0, 76.0, 79.0, 75.0, 82.0, 77.0, 64.0], [75.0, 69.0, \n 64.0, 64.0, 69.0, 75.0, 70.0, 71.0, 75.0, 74.0], [74.0, 71.0, 63.0, \n 66.0, 61.0, 75.0, 79.0, 47.0, 73.0, 76.0]])\n', (7263, 7595), True, 'import numpy as np\n'), ((7783, 7820), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(1, 2)'}), '(matrix, kernel, stride=(1, 2))\n', (7789, 7820), False, 'from convolution import conv2d, add_padding\n'), ((7847, 8027), 'numpy.array', 'np.array', (['[[44.0, 59.0, 70.0, 75.0, 64.0], [52.0, 59.0, 92.0, 77.0, 71.0], [66.0, \n 60.0, 76.0, 75.0, 77.0], [75.0, 64.0, 69.0, 70.0, 75.0], [74.0, 63.0, \n 61.0, 79.0, 73.0]]'], {}), '([[44.0, 59.0, 70.0, 75.0, 64.0], [52.0, 59.0, 92.0, 77.0, 71.0], [\n 66.0, 60.0, 76.0, 75.0, 77.0], [75.0, 64.0, 69.0, 70.0, 75.0], [74.0, \n 63.0, 61.0, 79.0, 73.0]])\n', (7855, 8027), True, 'import numpy as np\n'), ((8164, 8201), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(1, 3)'}), '(matrix, kernel, stride=(1, 3))\n', (8170, 8201), False, 'from convolution import conv2d, add_padding\n'), ((8228, 8373), 'numpy.array', 'np.array', (['[[44.0, 62.0, 75.0, 72.0], [52.0, 87.0, 77.0, 67.0], [66.0, 64.0, 75.0, \n 64.0], [75.0, 64.0, 70.0, 74.0], [74.0, 66.0, 79.0, 76.0]]'], {}), '([[44.0, 62.0, 75.0, 72.0], [52.0, 87.0, 77.0, 67.0], [66.0, 64.0, \n 75.0, 64.0], [75.0, 64.0, 70.0, 74.0], [74.0, 66.0, 79.0, 76.0]])\n', (8236, 8373), True, 'import numpy as np\n'), ((8520, 8557), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(2, 1)'}), '(matrix, kernel, stride=(2, 1))\n', (8526, 8557), False, 'from convolution import conv2d, add_padding\n'), ((8584, 8790), 'numpy.array', 'np.array', (['[[44.0, 58.0, 59.0, 62.0, 70.0, 80.0, 75.0, 92.0, 64.0, 72.0], [66.0, 63.0,\n 60.0, 64.0, 76.0, 79.0, 75.0, 82.0, 77.0, 64.0], [74.0, 71.0, 63.0, \n 66.0, 61.0, 75.0, 79.0, 47.0, 73.0, 76.0]]'], {}), '([[44.0, 58.0, 59.0, 62.0, 70.0, 80.0, 75.0, 92.0, 64.0, 72.0], [\n 66.0, 63.0, 60.0, 64.0, 76.0, 79.0, 75.0, 82.0, 77.0, 64.0], [74.0, \n 71.0, 63.0, 66.0, 61.0, 75.0, 79.0, 47.0, 73.0, 76.0]])\n', (8592, 8790), True, 'import numpy as np\n'), ((8850, 8887), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(2, 2)'}), '(matrix, kernel, stride=(2, 2))\n', (8856, 8887), False, 'from convolution import conv2d, add_padding\n'), ((8914, 9018), 'numpy.array', 'np.array', (['[[44.0, 59.0, 70.0, 75.0, 64], [66.0, 60.0, 76.0, 75.0, 77], [74.0, 63.0, \n 61.0, 79.0, 73]]'], {}), '([[44.0, 59.0, 70.0, 75.0, 64], [66.0, 60.0, 76.0, 75.0, 77], [74.0,\n 63.0, 61.0, 79.0, 73]])\n', (8922, 9018), True, 'import numpy as np\n'), ((9102, 9139), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(2, 3)'}), '(matrix, kernel, stride=(2, 3))\n', (9108, 9139), False, 'from convolution import conv2d, add_padding\n'), ((9166, 9259), 'numpy.array', 'np.array', (['[[44.0, 62.0, 75.0, 72.0], [66.0, 64.0, 75.0, 64.0], [74.0, 66.0, 79.0, 76.0]]'], {}), '([[44.0, 62.0, 75.0, 72.0], [66.0, 64.0, 75.0, 64.0], [74.0, 66.0, \n 79.0, 76.0]])\n', (9174, 9259), True, 'import numpy as np\n'), ((9342, 9379), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(3, 1)'}), '(matrix, kernel, stride=(3, 1))\n', (9348, 9379), False, 'from convolution import conv2d, add_padding\n'), ((9406, 9545), 'numpy.array', 'np.array', (['[[44.0, 58.0, 59.0, 62.0, 70.0, 80.0, 75.0, 92.0, 64.0, 72.0], [75.0, 69.0,\n 64.0, 64.0, 69.0, 75.0, 70.0, 71.0, 75.0, 74.0]]'], {}), '([[44.0, 58.0, 59.0, 62.0, 70.0, 80.0, 75.0, 92.0, 64.0, 72.0], [\n 75.0, 69.0, 64.0, 64.0, 69.0, 75.0, 70.0, 71.0, 75.0, 74.0]])\n', (9414, 9545), True, 'import numpy as np\n'), ((9584, 9621), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(3, 2)'}), '(matrix, kernel, stride=(3, 2))\n', (9590, 9621), False, 'from convolution import conv2d, add_padding\n'), ((9648, 9722), 'numpy.array', 'np.array', (['[[44.0, 59.0, 70.0, 75.0, 64.0], [75.0, 64.0, 69.0, 70.0, 75.0]]'], {}), '([[44.0, 59.0, 70.0, 75.0, 64.0], [75.0, 64.0, 69.0, 70.0, 75.0]])\n', (9656, 9722), True, 'import numpy as np\n'), ((9776, 9813), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(4, 6)'}), '(matrix, kernel, stride=(4, 6))\n', (9782, 9813), False, 'from convolution import conv2d, add_padding\n'), ((9840, 9878), 'numpy.array', 'np.array', (['[[44.0, 75.0], [74.0, 79.0]]'], {}), '([[44.0, 75.0], [74.0, 79.0]])\n', (9848, 9878), True, 'import numpy as np\n'), ((10002, 10041), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'dilation': '(1, 2)'}), '(matrix, kernel, dilation=(1, 2))\n', (10008, 10041), False, 'from convolution import conv2d, add_padding\n'), ((10068, 10278), 'numpy.array', 'np.array', (['[[46.0, 70.0, 50.0, 77.0, 65.0, 94.0], [67.0, 68.0, 67.0, 76.0, 53.0, 95.0],\n [80.0, 65.0, 60.0, 64.0, 70.0, 73.0], [74.0, 74.0, 77.0, 73.0, 79.0, \n 55.0], [81.0, 66.0, 74.0, 60.0, 70.0, 58.0]]'], {}), '([[46.0, 70.0, 50.0, 77.0, 65.0, 94.0], [67.0, 68.0, 67.0, 76.0, \n 53.0, 95.0], [80.0, 65.0, 60.0, 64.0, 70.0, 73.0], [74.0, 74.0, 77.0, \n 73.0, 79.0, 55.0], [81.0, 66.0, 74.0, 60.0, 70.0, 58.0]])\n', (10076, 10278), True, 'import numpy as np\n'), ((10410, 10449), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'dilation': '(1, 3)'}), '(matrix, kernel, dilation=(1, 3))\n', (10416, 10449), False, 'from convolution import conv2d, add_padding\n'), ((10476, 10561), 'numpy.array', 'np.array', (['[[48.0, 77.0], [65.0, 65.0], [73.0, 55.0], [97.0, 67.0], [84.0, 68.0]]'], {}), '([[48.0, 77.0], [65.0, 65.0], [73.0, 55.0], [97.0, 67.0], [84.0, 68.0]]\n )\n', (10484, 10561), True, 'import numpy as np\n'), ((10717, 10756), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'dilation': '(2, 1)'}), '(matrix, kernel, dilation=(2, 1))\n', (10723, 10756), False, 'from convolution import conv2d, add_padding\n'), ((10783, 10855), 'numpy.array', 'np.array', (['[[78.0, 73.0, 64.0, 72.0, 81.0, 69.0, 73.0, 69.0, 68.0, 81.0]]'], {}), '([[78.0, 73.0, 64.0, 72.0, 81.0, 69.0, 73.0, 69.0, 68.0, 81.0]])\n', (10791, 10855), True, 'import numpy as np\n'), ((10873, 10912), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'dilation': '(2, 2)'}), '(matrix, kernel, dilation=(2, 2))\n', (10879, 10912), False, 'from convolution import conv2d, add_padding\n'), ((10939, 10987), 'numpy.array', 'np.array', (['[[67.0, 55.0, 80.0, 63.0, 77.0, 79.0]]'], {}), '([[67.0, 55.0, 80.0, 63.0, 77.0, 79.0]])\n', (10947, 10987), True, 'import numpy as np\n'), ((11009, 11048), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'dilation': '(2, 3)'}), '(matrix, kernel, dilation=(2, 3))\n', (11015, 11048), False, 'from convolution import conv2d, add_padding\n'), ((11075, 11099), 'numpy.array', 'np.array', (['[[65.0, 79.0]]'], {}), '([[65.0, 79.0]])\n', (11083, 11099), True, 'import numpy as np\n'), ((11173, 11211), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'padding': '(0, 1)'}), '(matrix, kernel, padding=(0, 1))\n', (11179, 11211), False, 'from convolution import conv2d, add_padding\n'), ((11238, 11642), 'numpy.array', 'np.array', (['[[41.0, 44.0, 58.0, 59.0, 62.0, 70.0, 80.0, 75.0, 92.0, 64.0, 72.0, 71.0],\n [34.0, 52.0, 52.0, 59.0, 87.0, 92.0, 83.0, 77.0, 74.0, 71.0, 67.0, 43.0\n ], [51.0, 66.0, 63.0, 60.0, 64.0, 76.0, 79.0, 75.0, 82.0, 77.0, 64.0, \n 57.0], [63.0, 75.0, 69.0, 64.0, 64.0, 69.0, 75.0, 70.0, 71.0, 75.0, \n 74.0, 43.0], [51.0, 74.0, 71.0, 63.0, 66.0, 61.0, 75.0, 79.0, 47.0, \n 73.0, 76.0, 54.0]]'], {}), '([[41.0, 44.0, 58.0, 59.0, 62.0, 70.0, 80.0, 75.0, 92.0, 64.0, 72.0,\n 71.0], [34.0, 52.0, 52.0, 59.0, 87.0, 92.0, 83.0, 77.0, 74.0, 71.0, \n 67.0, 43.0], [51.0, 66.0, 63.0, 60.0, 64.0, 76.0, 79.0, 75.0, 82.0, \n 77.0, 64.0, 57.0], [63.0, 75.0, 69.0, 64.0, 64.0, 69.0, 75.0, 70.0, \n 71.0, 75.0, 74.0, 43.0], [51.0, 74.0, 71.0, 63.0, 66.0, 61.0, 75.0, \n 79.0, 47.0, 73.0, 76.0, 54.0]])\n', (11246, 11642), True, 'import numpy as np\n'), ((11730, 11768), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'padding': '(1, 0)'}), '(matrix, kernel, padding=(1, 0))\n', (11736, 11768), False, 'from convolution import conv2d, add_padding\n'), ((11795, 12269), 'numpy.array', 'np.array', (['[[39.0, 45.0, 45.0, 61.0, 52.0, 58.0, 66.0, 63.0, 53.0, 56.0], [44.0, 58.0,\n 59.0, 62.0, 70.0, 80.0, 75.0, 92.0, 64.0, 72.0], [52.0, 52.0, 59.0, \n 87.0, 92.0, 83.0, 77.0, 74.0, 71.0, 67.0], [66.0, 63.0, 60.0, 64.0, \n 76.0, 79.0, 75.0, 82.0, 77.0, 64.0], [75.0, 69.0, 64.0, 64.0, 69.0, \n 75.0, 70.0, 71.0, 75.0, 74.0], [74.0, 71.0, 63.0, 66.0, 61.0, 75.0, \n 79.0, 47.0, 73.0, 76.0], [70.0, 59.0, 64.0, 55.0, 72.0, 83.0, 81.0, \n 77.0, 70.0, 69.0]]'], {}), '([[39.0, 45.0, 45.0, 61.0, 52.0, 58.0, 66.0, 63.0, 53.0, 56.0], [\n 44.0, 58.0, 59.0, 62.0, 70.0, 80.0, 75.0, 92.0, 64.0, 72.0], [52.0, \n 52.0, 59.0, 87.0, 92.0, 83.0, 77.0, 74.0, 71.0, 67.0], [66.0, 63.0, \n 60.0, 64.0, 76.0, 79.0, 75.0, 82.0, 77.0, 64.0], [75.0, 69.0, 64.0, \n 64.0, 69.0, 75.0, 70.0, 71.0, 75.0, 74.0], [74.0, 71.0, 63.0, 66.0, \n 61.0, 75.0, 79.0, 47.0, 73.0, 76.0], [70.0, 59.0, 64.0, 55.0, 72.0, \n 83.0, 81.0, 77.0, 70.0, 69.0]])\n', (11803, 12269), True, 'import numpy as np\n'), ((12413, 12451), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'padding': '(1, 1)'}), '(matrix, kernel, padding=(1, 1))\n', (12419, 12451), False, 'from convolution import conv2d, add_padding\n'), ((12478, 13040), 'numpy.array', 'np.array', (['[[26.0, 39.0, 45.0, 45.0, 61.0, 52.0, 58.0, 66.0, 63.0, 53.0, 56.0, 51.0],\n [41.0, 44.0, 58.0, 59.0, 62.0, 70.0, 80.0, 75.0, 92.0, 64.0, 72.0, 71.0\n ], [34.0, 52.0, 52.0, 59.0, 87.0, 92.0, 83.0, 77.0, 74.0, 71.0, 67.0, \n 43.0], [51.0, 66.0, 63.0, 60.0, 64.0, 76.0, 79.0, 75.0, 82.0, 77.0, \n 64.0, 57.0], [63.0, 75.0, 69.0, 64.0, 64.0, 69.0, 75.0, 70.0, 71.0, \n 75.0, 74.0, 43.0], [51.0, 74.0, 71.0, 63.0, 66.0, 61.0, 75.0, 79.0, \n 47.0, 73.0, 76.0, 54.0], [59.0, 70.0, 59.0, 64.0, 55.0, 72.0, 83.0, \n 81.0, 77.0, 70.0, 69.0, 58.0]]'], {}), '([[26.0, 39.0, 45.0, 45.0, 61.0, 52.0, 58.0, 66.0, 63.0, 53.0, 56.0,\n 51.0], [41.0, 44.0, 58.0, 59.0, 62.0, 70.0, 80.0, 75.0, 92.0, 64.0, \n 72.0, 71.0], [34.0, 52.0, 52.0, 59.0, 87.0, 92.0, 83.0, 77.0, 74.0, \n 71.0, 67.0, 43.0], [51.0, 66.0, 63.0, 60.0, 64.0, 76.0, 79.0, 75.0, \n 82.0, 77.0, 64.0, 57.0], [63.0, 75.0, 69.0, 64.0, 64.0, 69.0, 75.0, \n 70.0, 71.0, 75.0, 74.0, 43.0], [51.0, 74.0, 71.0, 63.0, 66.0, 61.0, \n 75.0, 79.0, 47.0, 73.0, 76.0, 54.0], [59.0, 70.0, 59.0, 64.0, 55.0, \n 72.0, 83.0, 81.0, 77.0, 70.0, 69.0, 58.0]])\n', (12486, 13040), True, 'import numpy as np\n'), ((13205, 13275), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(2, 1)', 'dilation': '(1, 3)', 'padding': '(0, 0)'}), '(matrix, kernel, stride=(2, 1), dilation=(1, 3), padding=(0, 0))\n', (13211, 13275), False, 'from convolution import conv2d, add_padding\n'), ((13302, 13354), 'numpy.array', 'np.array', (['[[48.0, 77.0], [73.0, 55.0], [84.0, 68.0]]'], {}), '([[48.0, 77.0], [73.0, 55.0], [84.0, 68.0]])\n', (13310, 13354), True, 'import numpy as np\n'), ((13448, 13518), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(2, 3)', 'dilation': '(1, 3)', 'padding': '(1, 3)'}), '(matrix, kernel, stride=(2, 3), dilation=(1, 3), padding=(1, 3))\n', (13454, 13518), False, 'from convolution import conv2d, add_padding\n'), ((13545, 13639), 'numpy.array', 'np.array', (['[[28.0, 36.0, 31.0], [53.0, 65.0, 47.0], [62.0, 97.0, 70.0], [64.0, 79.0, 74.0]\n ]'], {}), '([[28.0, 36.0, 31.0], [53.0, 65.0, 47.0], [62.0, 97.0, 70.0], [64.0,\n 79.0, 74.0]])\n', (13553, 13639), True, 'import numpy as np\n'), ((13759, 13829), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(3, 2)', 'dilation': '(2, 3)', 'padding': '(2, 2)'}), '(matrix, kernel, stride=(3, 2), dilation=(2, 3), padding=(2, 2))\n', (13765, 13829), False, 'from convolution import conv2d, add_padding\n'), ((13856, 13906), 'numpy.array', 'np.array', (['[[54.0, 55.0, 34.0], [34.0, 69.0, 43.0]]'], {}), '([[54.0, 55.0, 34.0], [34.0, 69.0, 43.0]])\n', (13864, 13906), True, 'import numpy as np\n'), ((15808, 15938), 'numpy.array', 'np.array', (['[[0, 4, 3, 2, 0, 1, 0], [4, 3, 0, 1, 0, 1, 0], [1, 3, 4, 2, 0, 1, 0], [3, 4,\n 2, 2, 0, 1, 0], [0, 0, 0, 0, 0, 1, 0]]'], {}), '([[0, 4, 3, 2, 0, 1, 0], [4, 3, 0, 1, 0, 1, 0], [1, 3, 4, 2, 0, 1, \n 0], [3, 4, 2, 2, 0, 1, 0], [0, 0, 0, 0, 0, 1, 0]])\n', (15816, 15938), True, 'import numpy as np\n'), ((16061, 16126), 'numpy.array', 'np.array', (['[[1, 1, 3], [0, 2, 3], [3, 3, 3], [0, 2, 1], [3, 3, 0]]'], {}), '([[1, 1, 3], [0, 2, 3], [3, 3, 3], [0, 2, 1], [3, 3, 0]])\n', (16069, 16126), True, 'import numpy as np\n'), ((16296, 16366), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(1, 1)', 'dilation': '(1, 1)', 'padding': '(0, 0)'}), '(matrix, kernel, stride=(1, 1), dilation=(1, 1), padding=(0, 0))\n', (16302, 16366), False, 'from convolution import conv2d, add_padding\n'), ((16393, 16435), 'numpy.array', 'np.array', (['[[53.0, 49.0, 29.0, 18.0, 11.0]]'], {}), '([[53.0, 49.0, 29.0, 18.0, 11.0]])\n', (16401, 16435), True, 'import numpy as np\n'), ((16497, 16567), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(2, 1)', 'dilation': '(1, 3)', 'padding': '(0, 0)'}), '(matrix, kernel, stride=(2, 1), dilation=(1, 3), padding=(0, 0))\n', (16503, 16567), False, 'from convolution import conv2d, add_padding\n'), ((16594, 16612), 'numpy.array', 'np.array', (['[[17.0]]'], {}), '([[17.0]])\n', (16602, 16612), True, 'import numpy as np\n'), ((16639, 16709), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(2, 3)', 'dilation': '(1, 3)', 'padding': '(1, 3)'}), '(matrix, kernel, stride=(2, 3), dilation=(1, 3), padding=(1, 3))\n', (16645, 16709), False, 'from convolution import conv2d, add_padding\n'), ((16736, 16784), 'numpy.array', 'np.array', (['[[34.0, 38.0, 9.0], [30.0, 24.0, 7.0]]'], {}), '([[34.0, 38.0, 9.0], [30.0, 24.0, 7.0]])\n', (16744, 16784), True, 'import numpy as np\n'), ((16844, 16914), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(3, 2)', 'dilation': '(2, 3)', 'padding': '(4, 2)'}), '(matrix, kernel, stride=(3, 2), dilation=(2, 3), padding=(4, 2))\n', (16850, 16914), False, 'from convolution import conv2d, add_padding\n'), ((16941, 16991), 'numpy.array', 'np.array', (['[[18.0, 10.0, 17.0], [18.0, 17.0, 11.0]]'], {}), '([[18.0, 10.0, 17.0], [18.0, 17.0, 11.0]])\n', (16949, 16991), True, 'import numpy as np\n'), ((17049, 17119), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(2, 1)', 'dilation': '(1, 2)', 'padding': '(0, 4)'}), '(matrix, kernel, stride=(2, 1), dilation=(1, 2), padding=(0, 4))\n', (17055, 17119), False, 'from convolution import conv2d, add_padding\n'), ((17146, 17221), 'numpy.array', 'np.array', (['[[18.0, 34.0, 40.0, 44.0, 22.0, 37.0, 15.0, 19.0, 0.0, 7.0, 0.0]]'], {}), '([[18.0, 34.0, 40.0, 44.0, 22.0, 37.0, 15.0, 19.0, 0.0, 7.0, 0.0]])\n', (17154, 17221), True, 'import numpy as np\n'), ((17241, 17311), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(2, 2)', 'dilation': '(1, 2)', 'padding': '(0, 4)'}), '(matrix, kernel, stride=(2, 2), dilation=(1, 2), padding=(0, 4))\n', (17247, 17311), False, 'from convolution import conv2d, add_padding\n'), ((17338, 17384), 'numpy.array', 'np.array', (['[[18.0, 40.0, 22.0, 15.0, 0.0, 0.0]]'], {}), '([[18.0, 40.0, 22.0, 15.0, 0.0, 0.0]])\n', (17346, 17384), True, 'import numpy as np\n'), ((17408, 17478), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(2, 3)', 'dilation': '(1, 3)', 'padding': '(2, 5)'}), '(matrix, kernel, stride=(2, 3), dilation=(1, 3), padding=(2, 5))\n', (17414, 17478), False, 'from convolution import conv2d, add_padding\n'), ((17505, 17594), 'numpy.array', 'np.array', (['[[15.0, 27.0, 21.0, 0.0], [34.0, 27.0, 13.0, 0.0], [21.0, 11.0, 3.0, 0.0]]'], {}), '([[15.0, 27.0, 21.0, 0.0], [34.0, 27.0, 13.0, 0.0], [21.0, 11.0, \n 3.0, 0.0]])\n', (17513, 17594), True, 'import numpy as np\n'), ((17681, 17751), 'convolution.conv2d', 'conv2d', (['matrix', 'kernel'], {'stride': '(1, 1)', 'dilation': '(1, 1)', 'padding': '(3, 3)'}), '(matrix, kernel, stride=(1, 1), dilation=(1, 1), padding=(3, 3))\n', (17687, 17751), False, 'from convolution import conv2d, add_padding\n'), ((17778, 18252), 'numpy.array', 'np.array', (['[[0.0, 0.0, 16.0, 32.0, 17.0, 7.0, 4.0, 5.0, 3.0, 0.0, 0.0], [0.0, 4.0, \n 26.0, 39.0, 49.0, 35.0, 16.0, 8.0, 6.0, 0.0, 0.0], [0.0, 13.0, 47.0, \n 69.0, 52.0, 23.0, 16.0, 10.0, 6.0, 0.0, 0.0], [0.0, 18.0, 51.0, 53.0, \n 49.0, 29.0, 18.0, 11.0, 7.0, 0.0, 0.0], [0.0, 24.0, 45.0, 52.0, 44.0, \n 17.0, 17.0, 8.0, 4.0, 0.0, 0.0], [0.0, 12.0, 28.0, 30.0, 23.0, 10.0, \n 11.0, 6.0, 4.0, 0.0, 0.0], [0.0, 9.0, 15.0, 13.0, 12.0, 4.0, 8.0, 3.0, \n 1.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 16.0, 32.0, 17.0, 7.0, 4.0, 5.0, 3.0, 0.0, 0.0], [0.0,\n 4.0, 26.0, 39.0, 49.0, 35.0, 16.0, 8.0, 6.0, 0.0, 0.0], [0.0, 13.0, \n 47.0, 69.0, 52.0, 23.0, 16.0, 10.0, 6.0, 0.0, 0.0], [0.0, 18.0, 51.0, \n 53.0, 49.0, 29.0, 18.0, 11.0, 7.0, 0.0, 0.0], [0.0, 24.0, 45.0, 52.0, \n 44.0, 17.0, 17.0, 8.0, 4.0, 0.0, 0.0], [0.0, 12.0, 28.0, 30.0, 23.0, \n 10.0, 11.0, 6.0, 4.0, 0.0, 0.0], [0.0, 9.0, 15.0, 13.0, 12.0, 4.0, 8.0,\n 3.0, 1.0, 0.0, 0.0]])\n', (17786, 18252), True, 'import numpy as np\n'), ((219, 244), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (236, 244), True, 'import numpy as np\n'), ((263, 288), 'numpy.random.randint', 'np.random.randint', (['(3)', '(100)'], {}), '(3, 100)\n', (280, 288), True, 'import numpy as np\n'), ((317, 341), 'numpy.random.rand', 'np.random.rand', (['m_h', 'm_w'], {}), '(m_h, m_w)\n', (331, 341), True, 'import numpy as np\n'), ((368, 396), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', '(2)'], {}), '(0, 100, 2)\n', (385, 396), True, 'import numpy as np\n'), ((438, 478), 'convolution.add_padding', 'add_padding', (['random_matrix', '(rows, cols)'], {}), '(random_matrix, (rows, cols))\n', (449, 478), False, 'from convolution import conv2d, add_padding\n'), ((678, 706), 'numpy.random.randint', 'np.random.randint', (['(1)', '(100)', '(2)'], {}), '(1, 100, 2)\n', (695, 706), True, 'import numpy as np\n'), ((723, 763), 'numpy.random.choice', 'np.random.choice', (['[1, 3, 5, 7, 9, 10]', '(2)'], {}), '([1, 3, 5, 7, 9, 10], 2)\n', (739, 763), True, 'import numpy as np\n'), ((829, 847), 'numpy.random.rand', 'np.random.rand', (['*d'], {}), '(*d)\n', (843, 847), True, 'import numpy as np\n'), ((876, 894), 'numpy.random.rand', 'np.random.rand', (['*k'], {}), '(*k)\n', (890, 894), True, 'import numpy as np\n'), ((952, 978), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)', '(2)'], {}), '(0, 5, 2)\n', (969, 978), True, 'import numpy as np\n'), ((1045, 1071), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)', '(2)'], {}), '(0, 5, 2)\n', (1062, 1071), True, 'import numpy as np\n'), ((1137, 1164), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(5)', '(2)'], {}), '(-1, 5, 2)\n', (1154, 1164), True, 'import numpy as np\n'), ((2272, 2363), 'convolution.conv2d', 'conv2d', (['random_matrix', 'random_kernel'], {'stride': 'stride', 'dilation': 'dilation', 'padding': 'padding'}), '(random_matrix, random_kernel, stride=stride, dilation=dilation,\n padding=padding)\n', (2278, 2363), False, 'from convolution import conv2d, add_padding\n'), ((2513, 2604), 'convolution.conv2d', 'conv2d', (['random_matrix', 'random_kernel'], {'stride': 'stride', 'dilation': 'dilation', 'padding': 'padding'}), '(random_matrix, random_kernel, stride=stride, dilation=dilation,\n padding=padding)\n', (2519, 2604), False, 'from convolution import conv2d, add_padding\n'), ((1332, 1421), 'numpy.floor', 'np.floor', (['((d[0] + 2 * padding[0] - k[0] - (k[0] - 1) * (dilation[0] - 1)) / stride[0])'], {}), '((d[0] + 2 * padding[0] - k[0] - (k[0] - 1) * (dilation[0] - 1)) /\n stride[0])\n', (1340, 1421), True, 'import numpy as np\n'), ((1462, 1551), 'numpy.floor', 'np.floor', (['((d[1] + 2 * padding[1] - k[1] - (k[1] - 1) * (dilation[1] - 1)) / stride[1])'], {}), '((d[1] + 2 * padding[1] - k[1] - (k[1] - 1) * (dilation[1] - 1)) /\n stride[1])\n', (1470, 1551), True, 'import numpy as np\n'), ((2750, 2841), 'convolution.conv2d', 'conv2d', (['random_matrix', 'random_kernel'], {'stride': 'stride', 'dilation': 'dilation', 'padding': 'padding'}), '(random_matrix, random_kernel, stride=stride, dilation=dilation,\n padding=padding)\n', (2756, 2841), False, 'from convolution import conv2d, add_padding\n'), ((3131, 3222), 'convolution.conv2d', 'conv2d', (['random_matrix', 'random_kernel'], {'stride': 'stride', 'dilation': 'dilation', 'padding': 'padding'}), '(random_matrix, random_kernel, stride=stride, dilation=dilation,\n padding=padding)\n', (3137, 3222), False, 'from convolution import conv2d, add_padding\n'), ((2985, 3076), 'convolution.conv2d', 'conv2d', (['random_matrix', 'random_kernel'], {'stride': 'stride', 'dilation': 'dilation', 'padding': 'padding'}), '(random_matrix, random_kernel, stride=stride, dilation=dilation,\n padding=padding)\n', (2991, 3076), False, 'from convolution import conv2d, add_padding\n')] |
#! /usr/bin/env python3
# coding=utf-8
""""""
"""
Author: <EMAIL>
"""
import datetime
import argparse
import sys, os
import gc
import subprocess
import traceback
import numpy as np
import toml
sys.path.append("..")
import trace_source
parser = argparse.ArgumentParser()
parser.add_argument('--station', help='station name like limassol, barbados or mcmurdo')
parser.add_argument('--datetime', help='date in the format YYYYMMDD-HH')
parser.add_argument('--levels', nargs='+', type=int)
parser.add_argument('--dynamics', default='false', help='add the isobars/isoterms from the grib files')
#parser.add_argument('--daterange', help='date range in the format YYYYMMDD-YYYMMDD')
args = parser.parse_args()
config_file = 'config_{}.toml'.format(args.station)
with open(config_file) as f:
config = toml.loads(f.read())
end = datetime.datetime.strptime(args.datetime, '%Y%m%d-%H')
savepath = '{}/{}_maps'.format(config['plot_dir'], end.strftime('%Y%m%d_%H'))
print("savepath ", savepath)
folder = config['partposit_dir'] + '{}/'.format(end.strftime('%Y%m%d_%H'))
print('partposit_dir', folder)
dt_range = [end-datetime.timedelta(days=10), end]
files = os.listdir(folder)
files = sorted([f for f in files if 'partposit' in f])
ls = trace_source.land_sfc.land_sfc()
if args.levels is not None:
levels = args.levels
else:
# get levels from config file
raise ValueError
print('levels ', args.levels)
if args.dynamics == 'false':
add_dyn = False
elif args.dynamics == 'true':
add_dyn = True
else:
raise ValueError
level_to_heights = {}
for f in files[:]:
for i in levels:
dt = datetime.datetime.strptime(f[10:], '%Y%m%d%H%M%S')
part_pos = trace_source.flexpart.read_partpositions(folder + f, 1, ctable=False)
traj = trace_source.flexpart.read_flexpart_traj_meta(folder + "trajectories.txt")
level_to_heights[i] = np.mean(traj['releases_meta'][i]['heights'])
trace_source.flexpart.plot_part_loc_map(part_pos, i, dt, traj, savepath, ls=ls,
config=config,
add_dyn=add_dyn,
)
#add_fire='M6_7452')
gc.collect()
# for a nicer animation also include the last timestep
#
for i in levels:
dt = end
traj = trace_source.flexpart.read_flexpart_traj_meta(folder + "trajectories.txt")
meta = traj['releases_meta'][i]
part_pos = [[i, meta['lat_lon_bounds'][0], meta['lat_lon_bounds'][1], np.mean(meta['heights'])]]
trace_source.flexpart.plot_part_loc_map(part_pos, i, dt, traj, savepath, ls=ls,
config=config,
add_dyn=add_dyn,
)
os.chdir(savepath)
print(os.getcwd())
for i in levels:
fname_animation = "{}_{:.0f}_r{:0>2}_{}.gif".format(end.strftime('%Y%m%d_%H'), level_to_heights[i], i, args.station)
command = "convert -scale 70% -coalesce -layers Optimize -delay 20 -loop 0 `ls r{:0>2}*.png | sort -r` {}".format(i, fname_animation)
print('run: ', command)
try:
process = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True)
except:
traceback.print_exc()
fname_animation = "{}_{:.0f}_r{:0>2}_{}_f.gif".format(end.strftime('%Y%m%d_%H'), level_to_heights[i], i, args.station)
command = "convert -scale 70% -coalesce -layers Optimize -delay 20 -loop 0 `ls r{:0>2}*.png | sort ` {}".format(i, fname_animation)
print('run: ', command)
try:
process = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True)
except:
traceback.print_exc()
# from flexpart module
# convert -scale 70% -coalesce -layers Optimize -delay 20 -loop 0 `ls r11*.png | sort -r` r11.gif
# from notebook
# convert -delay 20 -loop 0 `ls r2*.png | sort -r` r2.gif
# convert -resize 1500x1000 -delay 20 -loop 0 `ls r4*.png | sort -r` r4.gif
# convert -scale 50% -coalesce -layers Optimize -delay 20 -loop 0 `ls r11*.png | sort -r` r11.gif
# convert -scale 70% -coalesce -layers Optimize -delay 20 -loop 0 `ls r11*.png | sort -r` r11.gif
| [
"numpy.mean",
"os.listdir",
"argparse.ArgumentParser",
"trace_source.flexpart.plot_part_loc_map",
"datetime.datetime.strptime",
"trace_source.land_sfc.land_sfc",
"trace_source.flexpart.read_partpositions",
"subprocess.run",
"trace_source.flexpart.read_flexpart_traj_meta",
"os.getcwd",
"os.chdir"... | [((197, 218), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (212, 218), False, 'import sys, os\n'), ((250, 275), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (273, 275), False, 'import argparse\n'), ((836, 890), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['args.datetime', '"""%Y%m%d-%H"""'], {}), "(args.datetime, '%Y%m%d-%H')\n", (862, 890), False, 'import datetime\n'), ((1165, 1183), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (1175, 1183), False, 'import sys, os\n'), ((1244, 1276), 'trace_source.land_sfc.land_sfc', 'trace_source.land_sfc.land_sfc', ([], {}), '()\n', (1274, 1276), False, 'import trace_source\n'), ((2852, 2870), 'os.chdir', 'os.chdir', (['savepath'], {}), '(savepath)\n', (2860, 2870), False, 'import sys, os\n'), ((2273, 2285), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2283, 2285), False, 'import gc\n'), ((2386, 2460), 'trace_source.flexpart.read_flexpart_traj_meta', 'trace_source.flexpart.read_flexpart_traj_meta', (["(folder + 'trajectories.txt')"], {}), "(folder + 'trajectories.txt')\n", (2431, 2460), False, 'import trace_source\n'), ((2603, 2719), 'trace_source.flexpart.plot_part_loc_map', 'trace_source.flexpart.plot_part_loc_map', (['part_pos', 'i', 'dt', 'traj', 'savepath'], {'ls': 'ls', 'config': 'config', 'add_dyn': 'add_dyn'}), '(part_pos, i, dt, traj, savepath, ls\n =ls, config=config, add_dyn=add_dyn)\n', (2642, 2719), False, 'import trace_source\n'), ((2877, 2888), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2886, 2888), False, 'import sys, os\n'), ((1122, 1149), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (1140, 1149), False, 'import datetime\n'), ((1626, 1676), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['f[10:]', '"""%Y%m%d%H%M%S"""'], {}), "(f[10:], '%Y%m%d%H%M%S')\n", (1652, 1676), False, 'import datetime\n'), ((1696, 1765), 'trace_source.flexpart.read_partpositions', 'trace_source.flexpart.read_partpositions', (['(folder + f)', '(1)'], {'ctable': '(False)'}), '(folder + f, 1, ctable=False)\n', (1736, 1765), False, 'import trace_source\n'), ((1782, 1856), 'trace_source.flexpart.read_flexpart_traj_meta', 'trace_source.flexpart.read_flexpart_traj_meta', (["(folder + 'trajectories.txt')"], {}), "(folder + 'trajectories.txt')\n", (1827, 1856), False, 'import trace_source\n'), ((1887, 1931), 'numpy.mean', 'np.mean', (["traj['releases_meta'][i]['heights']"], {}), "(traj['releases_meta'][i]['heights'])\n", (1894, 1931), True, 'import numpy as np\n'), ((1940, 2056), 'trace_source.flexpart.plot_part_loc_map', 'trace_source.flexpart.plot_part_loc_map', (['part_pos', 'i', 'dt', 'traj', 'savepath'], {'ls': 'ls', 'config': 'config', 'add_dyn': 'add_dyn'}), '(part_pos, i, dt, traj, savepath, ls\n =ls, config=config, add_dyn=add_dyn)\n', (1979, 2056), False, 'import trace_source\n'), ((3223, 3323), 'subprocess.run', 'subprocess.run', (['command'], {'shell': '(True)', 'check': '(True)', 'stdout': 'subprocess.PIPE', 'universal_newlines': '(True)'}), '(command, shell=True, check=True, stdout=subprocess.PIPE,\n universal_newlines=True)\n', (3237, 3323), False, 'import subprocess\n'), ((3677, 3777), 'subprocess.run', 'subprocess.run', (['command'], {'shell': '(True)', 'check': '(True)', 'stdout': 'subprocess.PIPE', 'universal_newlines': '(True)'}), '(command, shell=True, check=True, stdout=subprocess.PIPE,\n universal_newlines=True)\n', (3691, 3777), False, 'import subprocess\n'), ((2571, 2595), 'numpy.mean', 'np.mean', (["meta['heights']"], {}), "(meta['heights'])\n", (2578, 2595), True, 'import numpy as np\n'), ((3340, 3361), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3359, 3361), False, 'import traceback\n'), ((3794, 3815), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3813, 3815), False, 'import traceback\n')] |
# Copyright 2020 DeepLearningResearch
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file has been modified by DeepLearningResearch for the development of DEAL.
"""Variation ratio AL method for Softmax.
Samples in batches based on variation ratio scores.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pickle
from scipy.stats import mode
from sampling_methods.sampling_def import SamplingMethod
class VarRatio(SamplingMethod):
def __init__(self, X, y, seed):
self.X = X
self.y = y
self.name = 'VarRatio'
self.dropout_iterations = 25
self.test_time_dropout = True
def select_batch_(self, model, already_selected, N, **kwargs):
"""Returns batch of datapoints with highest uncertainty.
Args:
model: scikit learn model with decision_function implemented
already_selected: index of datapoints already selected
N: batch size
Returns:
indices of points selected to add using variation ratio active learner
"""
with open('./trained_models/All_Dropout_Classes_dataset', 'rb') as fp:
All_Dropout_Classes_dataset = pickle.load(fp)
if All_Dropout_Classes_dataset == 'mnist_keras':
X_Pool_Dropout = self.X
if All_Dropout_Classes_dataset == 'cifar10_keras':
X_Pool_Dropout = self.X
if All_Dropout_Classes_dataset == 'svhn':
X_Pool_Dropout = self.X[:87000]
All_Dropout_Classes = np.zeros(shape=(X_Pool_Dropout.shape[0], 1))
print('Use trained model for test time dropout')
for d in range(self.dropout_iterations):
print('Dropout Iteration', d)
try:
pred = model.decision_function(self.X, self.test_time_dropout)
except:
pred = model.predict_proba(self.X)
dropout_classes = np.argmax(pred, axis=1)
print(dropout_classes.shape)
dropout_classes = np.array([dropout_classes]).T
All_Dropout_Classes = np.append(All_Dropout_Classes, dropout_classes, axis=1)
Variation = np.zeros(shape=(X_Pool_Dropout.shape[0]))
for t in range(X_Pool_Dropout.shape[0]):
L = np.array([0])
for d_iter in range(self.dropout_iterations):
L = np.append(L, All_Dropout_Classes[t, d_iter + 1])
Predicted_Class, Mode = mode(L[1:])
v = np.array([1 - Mode / float(self.dropout_iterations)])
Variation[t] = v
a_1d = Variation.flatten()
x_pool_index = a_1d.argsort()[-a_1d.shape[0]:][::-1]
rank_ind = x_pool_index
rank_ind = [i for i in rank_ind if i not in already_selected]
active_samples = rank_ind[0:N]
return active_samples
| [
"scipy.stats.mode",
"pickle.load",
"numpy.argmax",
"numpy.append",
"numpy.array",
"numpy.zeros"
] | [((2019, 2063), 'numpy.zeros', 'np.zeros', ([], {'shape': '(X_Pool_Dropout.shape[0], 1)'}), '(shape=(X_Pool_Dropout.shape[0], 1))\n', (2027, 2063), True, 'import numpy as np\n'), ((2582, 2621), 'numpy.zeros', 'np.zeros', ([], {'shape': 'X_Pool_Dropout.shape[0]'}), '(shape=X_Pool_Dropout.shape[0])\n', (2590, 2621), True, 'import numpy as np\n'), ((1722, 1737), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (1733, 1737), False, 'import pickle\n'), ((2367, 2390), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (2376, 2390), True, 'import numpy as np\n'), ((2509, 2564), 'numpy.append', 'np.append', (['All_Dropout_Classes', 'dropout_classes'], {'axis': '(1)'}), '(All_Dropout_Classes, dropout_classes, axis=1)\n', (2518, 2564), True, 'import numpy as np\n'), ((2680, 2693), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2688, 2693), True, 'import numpy as np\n'), ((2837, 2848), 'scipy.stats.mode', 'mode', (['L[1:]'], {}), '(L[1:])\n', (2841, 2848), False, 'from scipy.stats import mode\n'), ((2450, 2477), 'numpy.array', 'np.array', (['[dropout_classes]'], {}), '([dropout_classes])\n', (2458, 2477), True, 'import numpy as np\n'), ((2758, 2806), 'numpy.append', 'np.append', (['L', 'All_Dropout_Classes[t, d_iter + 1]'], {}), '(L, All_Dropout_Classes[t, d_iter + 1])\n', (2767, 2806), True, 'import numpy as np\n')] |
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent))
import unittest
import nanopq
import numpy as np
class TestSuite(unittest.TestCase):
def setUp(self):
np.random.seed(123)
def test_property(self):
opq = nanopq.OPQ(M=4, Ks=256)
self.assertEqual(
(opq.M, opq.Ks, opq.verbose, opq.code_dtype),
(opq.pq.M, opq.pq.Ks, opq.pq.verbose, opq.pq.code_dtype),
)
def test_fit(self):
N, D, M, Ks = 100, 12, 4, 10
X = np.random.random((N, D)).astype(np.float32)
opq = nanopq.OPQ(M=M, Ks=Ks)
opq.fit(X)
self.assertEqual(opq.Ds, D / M)
self.assertEqual(opq.codewords.shape, (M, Ks, D / M))
self.assertEqual(opq.R.shape, (D, D))
opq2 = nanopq.OPQ(M=M, Ks=Ks).fit(X) # Can be called as a chain
self.assertTrue(np.allclose(opq.codewords, opq2.codewords))
def test_eq(self):
import copy
N, D, M, Ks = 100, 12, 4, 10
X = np.random.random((N, D)).astype(np.float32)
opq1 = nanopq.OPQ(M=M, Ks=Ks)
opq2 = nanopq.OPQ(M=M, Ks=Ks)
opq3 = copy.deepcopy(opq1)
opq4 = nanopq.OPQ(M=M, Ks=2 * Ks)
self.assertTrue(opq1 == opq1)
self.assertTrue(opq1 == opq2)
self.assertTrue(opq1 == opq3)
self.assertTrue(opq1 != opq4)
opq1.fit(X)
opq2.fit(X)
opq3 = copy.deepcopy(opq1)
opq4.fit(X)
self.assertTrue(opq1 == opq1)
self.assertTrue(opq1 == opq2)
self.assertTrue(opq1 == opq3)
self.assertTrue(opq1 != opq4)
def test_rotate(self):
N, D, M, Ks = 100, 12, 4, 10
X = np.random.random((N, D)).astype(np.float32)
opq = nanopq.OPQ(M=M, Ks=Ks)
opq.fit(X)
rotated_vec = opq.rotate(X[0])
rotated_vecs = opq.rotate(X[:3])
self.assertEqual(rotated_vec.shape, (D,))
self.assertEqual(rotated_vecs.shape, (3, D))
# Because R is a rotation matrix (R^t * R = I), R^t should be R^(-1)
self.assertAlmostEqual(
np.linalg.norm(opq.R.T - np.linalg.inv(opq.R)), 0.0, places=3
)
if __name__ == "__main__":
unittest.main()
| [
"numpy.allclose",
"pathlib.Path",
"numpy.random.random",
"numpy.linalg.inv",
"numpy.random.seed",
"copy.deepcopy",
"unittest.main",
"nanopq.OPQ"
] | [((2202, 2217), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2215, 2217), False, 'import unittest\n'), ((208, 227), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (222, 227), True, 'import numpy as np\n'), ((272, 295), 'nanopq.OPQ', 'nanopq.OPQ', ([], {'M': '(4)', 'Ks': '(256)'}), '(M=4, Ks=256)\n', (282, 295), False, 'import nanopq\n'), ((592, 614), 'nanopq.OPQ', 'nanopq.OPQ', ([], {'M': 'M', 'Ks': 'Ks'}), '(M=M, Ks=Ks)\n', (602, 614), False, 'import nanopq\n'), ((1077, 1099), 'nanopq.OPQ', 'nanopq.OPQ', ([], {'M': 'M', 'Ks': 'Ks'}), '(M=M, Ks=Ks)\n', (1087, 1099), False, 'import nanopq\n'), ((1115, 1137), 'nanopq.OPQ', 'nanopq.OPQ', ([], {'M': 'M', 'Ks': 'Ks'}), '(M=M, Ks=Ks)\n', (1125, 1137), False, 'import nanopq\n'), ((1153, 1172), 'copy.deepcopy', 'copy.deepcopy', (['opq1'], {}), '(opq1)\n', (1166, 1172), False, 'import copy\n'), ((1188, 1214), 'nanopq.OPQ', 'nanopq.OPQ', ([], {'M': 'M', 'Ks': '(2 * Ks)'}), '(M=M, Ks=2 * Ks)\n', (1198, 1214), False, 'import nanopq\n'), ((1423, 1442), 'copy.deepcopy', 'copy.deepcopy', (['opq1'], {}), '(opq1)\n', (1436, 1442), False, 'import copy\n'), ((1750, 1772), 'nanopq.OPQ', 'nanopq.OPQ', ([], {'M': 'M', 'Ks': 'Ks'}), '(M=M, Ks=Ks)\n', (1760, 1772), False, 'import nanopq\n'), ((880, 922), 'numpy.allclose', 'np.allclose', (['opq.codewords', 'opq2.codewords'], {}), '(opq.codewords, opq2.codewords)\n', (891, 922), True, 'import numpy as np\n'), ((534, 558), 'numpy.random.random', 'np.random.random', (['(N, D)'], {}), '((N, D))\n', (550, 558), True, 'import numpy as np\n'), ((798, 820), 'nanopq.OPQ', 'nanopq.OPQ', ([], {'M': 'M', 'Ks': 'Ks'}), '(M=M, Ks=Ks)\n', (808, 820), False, 'import nanopq\n'), ((1018, 1042), 'numpy.random.random', 'np.random.random', (['(N, D)'], {}), '((N, D))\n', (1034, 1042), True, 'import numpy as np\n'), ((1692, 1716), 'numpy.random.random', 'np.random.random', (['(N, D)'], {}), '((N, D))\n', (1708, 1716), True, 'import numpy as np\n'), ((57, 71), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (61, 71), False, 'from pathlib import Path\n'), ((2122, 2142), 'numpy.linalg.inv', 'np.linalg.inv', (['opq.R'], {}), '(opq.R)\n', (2135, 2142), True, 'import numpy as np\n')] |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeRaw op from parsing_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
class DecodeRawOpTest(test.TestCase):
def testToUint8(self):
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[2])
decode = parsing_ops.decode_raw(in_bytes, out_type=dtypes.uint8)
self.assertEqual([2, None], decode.get_shape().as_list())
result = decode.eval(feed_dict={in_bytes: ["A", "a"]})
self.assertAllEqual([[ord("A")], [ord("a")]], result)
result = decode.eval(feed_dict={in_bytes: ["wer", "XYZ"]})
self.assertAllEqual([[ord("w"), ord("e"), ord("r")],
[ord("X"), ord("Y"), ord("Z")]], result)
with self.assertRaisesOpError(
"DecodeRaw requires input strings to all be the same size, but "
"element 1 has size 5 != 6"):
decode.eval(feed_dict={in_bytes: ["short", "longer"]})
def testToInt16(self):
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
decode = parsing_ops.decode_raw(in_bytes, out_type=dtypes.int16)
self.assertEqual([None, None], decode.get_shape().as_list())
result = decode.eval(feed_dict={in_bytes: ["AaBC"]})
self.assertAllEqual(
[[ord("A") + ord("a") * 256, ord("B") + ord("C") * 256]], result)
with self.assertRaisesOpError(
"Input to DecodeRaw has length 3 that is not a multiple of 2, the "
"size of int16"):
decode.eval(feed_dict={in_bytes: ["123", "456"]})
def testEndianness(self):
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
decode_le = parsing_ops.decode_raw(
in_bytes, out_type=dtypes.int32, little_endian=True)
decode_be = parsing_ops.decode_raw(
in_bytes, out_type=dtypes.int32, little_endian=False)
result = decode_le.eval(feed_dict={in_bytes: ["\x01\x02\x03\x04"]})
self.assertAllEqual([[0x04030201]], result)
result = decode_be.eval(feed_dict={in_bytes: ["\x01\x02\x03\x04"]})
self.assertAllEqual([[0x01020304]], result)
def testToFloat16(self):
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
decode = parsing_ops.decode_raw(in_bytes, out_type=dtypes.float16)
self.assertEqual([None, None], decode.get_shape().as_list())
expected_result = np.matrix([[1, -2, -3, 4]], dtype=np.float16)
result = decode.eval(feed_dict={in_bytes: [expected_result.tostring()]})
self.assertAllEqual(expected_result, result)
def testEmptyStringInput(self):
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
decode = parsing_ops.decode_raw(in_bytes, out_type=dtypes.float16)
for num_inputs in range(3):
result = decode.eval(feed_dict={in_bytes: [""] * num_inputs})
self.assertEqual((num_inputs, 0), result.shape)
def testToUInt16(self):
with self.cached_session():
in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
decode = parsing_ops.decode_raw(in_bytes, out_type=dtypes.uint16)
self.assertEqual([None, None], decode.get_shape().as_list())
# Use FF/EE/DD/CC so that decoded value is higher than 32768 for uint16
result = decode.eval(feed_dict={in_bytes: [b"\xFF\xEE\xDD\xCC"]})
self.assertAllEqual(
[[0xFF + 0xEE * 256, 0xDD + 0xCC * 256]], result)
with self.assertRaisesOpError(
"Input to DecodeRaw has length 3 that is not a multiple of 2, the "
"size of uint16"):
decode.eval(feed_dict={in_bytes: ["123", "456"]})
if __name__ == "__main__":
test.main()
| [
"tensorflow.python.ops.array_ops.placeholder",
"numpy.matrix",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.parsing_ops.decode_raw"
] | [((4671, 4682), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (4680, 4682), False, 'from tensorflow.python.platform import test\n'), ((1163, 1210), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.string'], {'shape': '[2]'}), '(dtypes.string, shape=[2])\n', (1184, 1210), False, 'from tensorflow.python.ops import array_ops\n'), ((1226, 1281), 'tensorflow.python.ops.parsing_ops.decode_raw', 'parsing_ops.decode_raw', (['in_bytes'], {'out_type': 'dtypes.uint8'}), '(in_bytes, out_type=dtypes.uint8)\n', (1248, 1281), False, 'from tensorflow.python.ops import parsing_ops\n'), ((1952, 2002), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.string'], {'shape': '[None]'}), '(dtypes.string, shape=[None])\n', (1973, 2002), False, 'from tensorflow.python.ops import array_ops\n'), ((2018, 2073), 'tensorflow.python.ops.parsing_ops.decode_raw', 'parsing_ops.decode_raw', (['in_bytes'], {'out_type': 'dtypes.int16'}), '(in_bytes, out_type=dtypes.int16)\n', (2040, 2073), False, 'from tensorflow.python.ops import parsing_ops\n'), ((2584, 2634), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.string'], {'shape': '[None]'}), '(dtypes.string, shape=[None])\n', (2605, 2634), False, 'from tensorflow.python.ops import array_ops\n'), ((2653, 2728), 'tensorflow.python.ops.parsing_ops.decode_raw', 'parsing_ops.decode_raw', (['in_bytes'], {'out_type': 'dtypes.int32', 'little_endian': '(True)'}), '(in_bytes, out_type=dtypes.int32, little_endian=True)\n', (2675, 2728), False, 'from tensorflow.python.ops import parsing_ops\n'), ((2758, 2834), 'tensorflow.python.ops.parsing_ops.decode_raw', 'parsing_ops.decode_raw', (['in_bytes'], {'out_type': 'dtypes.int32', 'little_endian': '(False)'}), '(in_bytes, out_type=dtypes.int32, little_endian=False)\n', (2780, 2834), False, 'from tensorflow.python.ops import parsing_ops\n'), ((3171, 3221), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.string'], {'shape': '[None]'}), '(dtypes.string, shape=[None])\n', (3192, 3221), False, 'from tensorflow.python.ops import array_ops\n'), ((3237, 3294), 'tensorflow.python.ops.parsing_ops.decode_raw', 'parsing_ops.decode_raw', (['in_bytes'], {'out_type': 'dtypes.float16'}), '(in_bytes, out_type=dtypes.float16)\n', (3259, 3294), False, 'from tensorflow.python.ops import parsing_ops\n'), ((3387, 3432), 'numpy.matrix', 'np.matrix', (['[[1, -2, -3, 4]]'], {'dtype': 'np.float16'}), '([[1, -2, -3, 4]], dtype=np.float16)\n', (3396, 3432), True, 'import numpy as np\n'), ((3648, 3698), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.string'], {'shape': '[None]'}), '(dtypes.string, shape=[None])\n', (3669, 3698), False, 'from tensorflow.python.ops import array_ops\n'), ((3714, 3771), 'tensorflow.python.ops.parsing_ops.decode_raw', 'parsing_ops.decode_raw', (['in_bytes'], {'out_type': 'dtypes.float16'}), '(in_bytes, out_type=dtypes.float16)\n', (3736, 3771), False, 'from tensorflow.python.ops import parsing_ops\n'), ((4009, 4059), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.string'], {'shape': '[None]'}), '(dtypes.string, shape=[None])\n', (4030, 4059), False, 'from tensorflow.python.ops import array_ops\n'), ((4075, 4131), 'tensorflow.python.ops.parsing_ops.decode_raw', 'parsing_ops.decode_raw', (['in_bytes'], {'out_type': 'dtypes.uint16'}), '(in_bytes, out_type=dtypes.uint16)\n', (4097, 4131), False, 'from tensorflow.python.ops import parsing_ops\n')] |
"""This module contains the process that generates our regression test battery."""
import os
import json
import argparse
import numpy as np
from soepy.python.simulate.simulate_python import simulate
from soepy.python.soepy_config import TEST_RESOURCES_DIR
from soepy.test.random_init import random_init
from soepy.test.random_init import print_dict
from soepy.test.auxiliary import cleanup
def process_arguments(parser):
"""This function parses the input arguments."""
args = parser.parse_args()
# Distribute input arguments
request = args.request
num_test = args.num_test
seed = args.seed
# Test validity of input arguments
assert request in ["check", "create"]
if num_test is None:
num_test = 100
if seed is None:
seed = 123456
return request, num_test, seed
def create_vault(num_test=100, seed=123456):
"""This function creates our regression vault."""
np.random.seed(seed)
seeds = np.random.randint(0, 1000, size=num_test)
file_dir = os.path.join(TEST_RESOURCES_DIR, "regression_vault.soepy.json")
tests = []
for counter, seed in enumerate(seeds):
np.random.seed(seed)
init_dict = random_init()
df = simulate("test.soepy.yml")
stat = np.sum(df.sum())
tests += [(stat, init_dict)]
cleanup("regression")
json.dump(tests, open(file_dir, "w"))
def check_vault():
"""This function runs another simulation for each init file in our regression vault.
"""
file_dir = os.path.join(TEST_RESOURCES_DIR, "regression_vault.soepy.json")
tests = json.load(open(file_dir, "r"))
for test in tests:
stat, init_dict = test
print_dict(init_dict)
df = simulate("test.soepy.yml")
stat_new = np.sum(df.sum())
np.testing.assert_array_almost_equal(stat, stat_new)
cleanup("regression")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Work with regression tests for package."
)
parser.add_argument(
"--request",
action="store",
dest="request",
required=True,
choices=["check", "create"],
help="request",
)
parser.add_argument(
"--num", action="store", dest="num_test", type=int, help="number of init files"
)
parser.add_argument(
"--seed", action="store", dest="seed", type=int, help="seed value"
)
request, num_test, seed = process_arguments(parser)
if request == "check":
check_vault()
elif request == "create":
create_vault(num_test, seed)
| [
"soepy.test.auxiliary.cleanup",
"numpy.testing.assert_array_almost_equal",
"soepy.test.random_init.random_init",
"argparse.ArgumentParser",
"soepy.test.random_init.print_dict",
"os.path.join",
"soepy.python.simulate.simulate_python.simulate",
"numpy.random.randint",
"numpy.random.seed"
] | [((934, 954), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (948, 954), True, 'import numpy as np\n'), ((967, 1008), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {'size': 'num_test'}), '(0, 1000, size=num_test)\n', (984, 1008), True, 'import numpy as np\n'), ((1024, 1087), 'os.path.join', 'os.path.join', (['TEST_RESOURCES_DIR', '"""regression_vault.soepy.json"""'], {}), "(TEST_RESOURCES_DIR, 'regression_vault.soepy.json')\n", (1036, 1087), False, 'import os\n'), ((1328, 1349), 'soepy.test.auxiliary.cleanup', 'cleanup', (['"""regression"""'], {}), "('regression')\n", (1335, 1349), False, 'from soepy.test.auxiliary import cleanup\n'), ((1526, 1589), 'os.path.join', 'os.path.join', (['TEST_RESOURCES_DIR', '"""regression_vault.soepy.json"""'], {}), "(TEST_RESOURCES_DIR, 'regression_vault.soepy.json')\n", (1538, 1589), False, 'import os\n'), ((1865, 1886), 'soepy.test.auxiliary.cleanup', 'cleanup', (['"""regression"""'], {}), "('regression')\n", (1872, 1886), False, 'from soepy.test.auxiliary import cleanup\n'), ((1930, 2008), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Work with regression tests for package."""'}), "(description='Work with regression tests for package.')\n", (1953, 2008), False, 'import argparse\n'), ((1156, 1176), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1170, 1176), True, 'import numpy as np\n'), ((1198, 1211), 'soepy.test.random_init.random_init', 'random_init', ([], {}), '()\n', (1209, 1211), False, 'from soepy.test.random_init import random_init\n'), ((1226, 1252), 'soepy.python.simulate.simulate_python.simulate', 'simulate', (['"""test.soepy.yml"""'], {}), "('test.soepy.yml')\n", (1234, 1252), False, 'from soepy.python.simulate.simulate_python import simulate\n'), ((1698, 1719), 'soepy.test.random_init.print_dict', 'print_dict', (['init_dict'], {}), '(init_dict)\n', (1708, 1719), False, 'from soepy.test.random_init import print_dict\n'), ((1734, 1760), 'soepy.python.simulate.simulate_python.simulate', 'simulate', (['"""test.soepy.yml"""'], {}), "('test.soepy.yml')\n", (1742, 1760), False, 'from soepy.python.simulate.simulate_python import simulate\n'), ((1807, 1859), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['stat', 'stat_new'], {}), '(stat, stat_new)\n', (1843, 1859), True, 'import numpy as np\n')] |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import unittest
from generator import generator, generate
from extensions.ops.Cast import Cast
from mo.middle.passes.convert_data_type import packed_U4, packed_I4
from mo.middle.passes.infer import partial_infer
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import valued_const_with_data, regular_op_with_empty_data, \
result, build_graph, connect
nodes = lambda value, dst_type: {
**valued_const_with_data('value', np.array(value)),
**regular_op_with_empty_data('convert', {'dst_type': dst_type, 'infer': Cast.infer}),
**result(),
}
@generator
class CastTest(unittest.TestCase):
"""
Example of checking:
7 == 0111, padded to 0111 0000, results in 112
7 == 0111, 8 == 1000 packed to 0111 1000, results in 120
-8 == 1000, padded to 1000 0000, results in 128
"""
@generate(*[
([0], [0], packed_U4),
([1], [16], packed_U4),
([2], [32], packed_U4),
([3], [48], packed_U4),
([4], [64], packed_U4),
([5], [80], packed_U4),
([6], [96], packed_U4),
([7], [112], packed_U4),
([8], [128], packed_U4),
([9], [144], packed_U4),
([10], [160], packed_U4),
([11], [176], packed_U4),
([12], [192], packed_U4),
([13], [208], packed_U4),
([14], [224], packed_U4),
([15], [240], packed_U4),
([0, 15], [15], packed_U4),
([1, 14], [30], packed_U4),
([2, 13], [45], packed_U4),
([3, 12], [60], packed_U4),
([4, 11], [75], packed_U4),
([5, 10], [90], packed_U4),
([6, 9], [105], packed_U4),
([7, 8], [120], packed_U4),
([8, 7], [135], packed_U4),
([9, 6], [150], packed_U4),
([10, 5], [165], packed_U4),
([11, 4], [180], packed_U4),
([12, 3], [195], packed_U4),
([13, 2], [210], packed_U4),
([14, 1], [225], packed_U4),
([15, 0], [240], packed_U4),
([-8], [128], packed_I4),
([-7], [144], packed_I4),
([-6], [160], packed_I4),
([-5], [176], packed_I4),
([-4], [192], packed_I4),
([-3], [208], packed_I4),
([-2], [224], packed_I4),
([-1], [240], packed_I4),
([0], [0], packed_I4),
([1], [16], packed_I4),
([2], [32], packed_I4),
([3], [48], packed_I4),
([4], [64], packed_I4),
([5], [80], packed_I4),
([6], [96], packed_I4),
([7], [112], packed_I4),
([-8, 7], [135], packed_I4),
([-7, 6], [150], packed_I4),
([-6, 5], [165], packed_I4),
([-5, 4], [180], packed_I4),
([-4, 3], [195], packed_I4),
([-3, 2], [210], packed_I4),
([-2, 1], [225], packed_I4),
([-1, 0], [240], packed_I4),
([0, -1], [15], packed_I4),
([1, -2], [30], packed_I4),
([2, -3], [45], packed_I4),
([3, -4], [60], packed_I4),
([4, -5], [75], packed_I4),
([5, -6], [90], packed_I4),
([6, -7], [105], packed_I4),
([7, -8], [120], packed_I4),
])
def test_custom_value_propagation(self, value, expected, custom_dtype):
graph = build_graph(nodes(value, custom_dtype), [
*connect('value', 'convert'), *connect('convert', 'output'),
])
partial_infer(graph)
graph_ref = build_graph(nodes(value, custom_dtype), [
*connect('value', 'convert'), *connect('convert', 'output')],
{'convert_d': {'force_type': custom_dtype, 'force_shape': np.array(value).shape,
'value': expected}})
(flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)
self.assertTrue(flag, resp)
| [
"mo.utils.unittest.graph.connect",
"generator.generate",
"mo.utils.unittest.graph.regular_op_with_empty_data",
"mo.utils.ir_engine.compare_graphs.compare_graphs",
"mo.middle.passes.infer.partial_infer",
"mo.utils.unittest.graph.result",
"numpy.array"
] | [((989, 2814), 'generator.generate', 'generate', (['*[([0], [0], packed_U4), ([1], [16], packed_U4), ([2], [32], packed_U4), ([\n 3], [48], packed_U4), ([4], [64], packed_U4), ([5], [80], packed_U4), (\n [6], [96], packed_U4), ([7], [112], packed_U4), ([8], [128], packed_U4),\n ([9], [144], packed_U4), ([10], [160], packed_U4), ([11], [176],\n packed_U4), ([12], [192], packed_U4), ([13], [208], packed_U4), ([14],\n [224], packed_U4), ([15], [240], packed_U4), ([0, 15], [15], packed_U4),\n ([1, 14], [30], packed_U4), ([2, 13], [45], packed_U4), ([3, 12], [60],\n packed_U4), ([4, 11], [75], packed_U4), ([5, 10], [90], packed_U4), ([6,\n 9], [105], packed_U4), ([7, 8], [120], packed_U4), ([8, 7], [135],\n packed_U4), ([9, 6], [150], packed_U4), ([10, 5], [165], packed_U4), ([\n 11, 4], [180], packed_U4), ([12, 3], [195], packed_U4), ([13, 2], [210],\n packed_U4), ([14, 1], [225], packed_U4), ([15, 0], [240], packed_U4), (\n [-8], [128], packed_I4), ([-7], [144], packed_I4), ([-6], [160],\n packed_I4), ([-5], [176], packed_I4), ([-4], [192], packed_I4), ([-3],\n [208], packed_I4), ([-2], [224], packed_I4), ([-1], [240], packed_I4),\n ([0], [0], packed_I4), ([1], [16], packed_I4), ([2], [32], packed_I4),\n ([3], [48], packed_I4), ([4], [64], packed_I4), ([5], [80], packed_I4),\n ([6], [96], packed_I4), ([7], [112], packed_I4), ([-8, 7], [135],\n packed_I4), ([-7, 6], [150], packed_I4), ([-6, 5], [165], packed_I4), (\n [-5, 4], [180], packed_I4), ([-4, 3], [195], packed_I4), ([-3, 2], [210\n ], packed_I4), ([-2, 1], [225], packed_I4), ([-1, 0], [240], packed_I4),\n ([0, -1], [15], packed_I4), ([1, -2], [30], packed_I4), ([2, -3], [45],\n packed_I4), ([3, -4], [60], packed_I4), ([4, -5], [75], packed_I4), ([5,\n -6], [90], packed_I4), ([6, -7], [105], packed_I4), ([7, -8], [120],\n packed_I4)]'], {}), '(*[([0], [0], packed_U4), ([1], [16], packed_U4), ([2], [32],\n packed_U4), ([3], [48], packed_U4), ([4], [64], packed_U4), ([5], [80],\n packed_U4), ([6], [96], packed_U4), ([7], [112], packed_U4), ([8], [128\n ], packed_U4), ([9], [144], packed_U4), ([10], [160], packed_U4), ([11],\n [176], packed_U4), ([12], [192], packed_U4), ([13], [208], packed_U4),\n ([14], [224], packed_U4), ([15], [240], packed_U4), ([0, 15], [15],\n packed_U4), ([1, 14], [30], packed_U4), ([2, 13], [45], packed_U4), ([3,\n 12], [60], packed_U4), ([4, 11], [75], packed_U4), ([5, 10], [90],\n packed_U4), ([6, 9], [105], packed_U4), ([7, 8], [120], packed_U4), ([8,\n 7], [135], packed_U4), ([9, 6], [150], packed_U4), ([10, 5], [165],\n packed_U4), ([11, 4], [180], packed_U4), ([12, 3], [195], packed_U4), (\n [13, 2], [210], packed_U4), ([14, 1], [225], packed_U4), ([15, 0], [240\n ], packed_U4), ([-8], [128], packed_I4), ([-7], [144], packed_I4), ([-6\n ], [160], packed_I4), ([-5], [176], packed_I4), ([-4], [192], packed_I4\n ), ([-3], [208], packed_I4), ([-2], [224], packed_I4), ([-1], [240],\n packed_I4), ([0], [0], packed_I4), ([1], [16], packed_I4), ([2], [32],\n packed_I4), ([3], [48], packed_I4), ([4], [64], packed_I4), ([5], [80],\n packed_I4), ([6], [96], packed_I4), ([7], [112], packed_I4), ([-8, 7],\n [135], packed_I4), ([-7, 6], [150], packed_I4), ([-6, 5], [165],\n packed_I4), ([-5, 4], [180], packed_I4), ([-4, 3], [195], packed_I4), (\n [-3, 2], [210], packed_I4), ([-2, 1], [225], packed_I4), ([-1, 0], [240\n ], packed_I4), ([0, -1], [15], packed_I4), ([1, -2], [30], packed_I4),\n ([2, -3], [45], packed_I4), ([3, -4], [60], packed_I4), ([4, -5], [75],\n packed_I4), ([5, -6], [90], packed_I4), ([6, -7], [105], packed_I4), ([\n 7, -8], [120], packed_I4)])\n', (997, 2814), False, 'from generator import generator, generate\n'), ((596, 683), 'mo.utils.unittest.graph.regular_op_with_empty_data', 'regular_op_with_empty_data', (['"""convert"""', "{'dst_type': dst_type, 'infer': Cast.infer}"], {}), "('convert', {'dst_type': dst_type, 'infer': Cast.\n infer})\n", (622, 683), False, 'from mo.utils.unittest.graph import valued_const_with_data, regular_op_with_empty_data, result, build_graph, connect\n'), ((686, 694), 'mo.utils.unittest.graph.result', 'result', ([], {}), '()\n', (692, 694), False, 'from mo.utils.unittest.graph import valued_const_with_data, regular_op_with_empty_data, result, build_graph, connect\n'), ((3459, 3479), 'mo.middle.passes.infer.partial_infer', 'partial_infer', (['graph'], {}), '(graph)\n', (3472, 3479), False, 'from mo.middle.passes.infer import partial_infer\n'), ((3822, 3885), 'mo.utils.ir_engine.compare_graphs.compare_graphs', 'compare_graphs', (['graph', 'graph_ref', '"""output"""'], {'check_op_attrs': '(True)'}), "(graph, graph_ref, 'output', check_op_attrs=True)\n", (3836, 3885), False, 'from mo.utils.ir_engine.compare_graphs import compare_graphs\n'), ((572, 587), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (580, 587), True, 'import numpy as np\n'), ((3380, 3407), 'mo.utils.unittest.graph.connect', 'connect', (['"""value"""', '"""convert"""'], {}), "('value', 'convert')\n", (3387, 3407), False, 'from mo.utils.unittest.graph import valued_const_with_data, regular_op_with_empty_data, result, build_graph, connect\n'), ((3410, 3438), 'mo.utils.unittest.graph.connect', 'connect', (['"""convert"""', '"""output"""'], {}), "('convert', 'output')\n", (3417, 3438), False, 'from mo.utils.unittest.graph import valued_const_with_data, regular_op_with_empty_data, result, build_graph, connect\n'), ((3556, 3583), 'mo.utils.unittest.graph.connect', 'connect', (['"""value"""', '"""convert"""'], {}), "('value', 'convert')\n", (3563, 3583), False, 'from mo.utils.unittest.graph import valued_const_with_data, regular_op_with_empty_data, result, build_graph, connect\n'), ((3586, 3614), 'mo.utils.unittest.graph.connect', 'connect', (['"""convert"""', '"""output"""'], {}), "('convert', 'output')\n", (3593, 3614), False, 'from mo.utils.unittest.graph import valued_const_with_data, regular_op_with_empty_data, result, build_graph, connect\n'), ((3707, 3722), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (3715, 3722), True, 'import numpy as np\n')] |
import numpy as np
x = np.arange(18).reshape(6,3)
print(x)
y = np.array_split(x, 3)
y = np.delete(y, 1, axis=0).reshape(-1,3)
print(y)
print(x)
| [
"numpy.array_split",
"numpy.delete",
"numpy.arange"
] | [((65, 85), 'numpy.array_split', 'np.array_split', (['x', '(3)'], {}), '(x, 3)\n', (79, 85), True, 'import numpy as np\n'), ((24, 37), 'numpy.arange', 'np.arange', (['(18)'], {}), '(18)\n', (33, 37), True, 'import numpy as np\n'), ((90, 113), 'numpy.delete', 'np.delete', (['y', '(1)'], {'axis': '(0)'}), '(y, 1, axis=0)\n', (99, 113), True, 'import numpy as np\n')] |
"""
Module with reading functionalities for calibration spectra.
"""
import os
import configparser
from typing import Optional, Dict, Tuple
import h5py
import spectres
import numpy as np
from typeguard import typechecked
from scipy.optimize import curve_fit
from species.analysis import photometry
from species.core import box
from species.read import read_filter
from species.util import read_util
class ReadCalibration:
"""
Class for reading a calibration spectrum from the database.
"""
@typechecked
def __init__(self,
tag: str,
filter_name: Optional[str] = None) -> None:
"""
Parameters
----------
tag : str
Database tag of the calibration spectrum.
filter_name : str, None
Filter name that is used for the wavelength range. Full spectrum is used if set to
``None``.
Returns
-------
NoneType
None
"""
self.tag = tag
self.filter_name = filter_name
if filter_name is None:
self.wavel_range = None
else:
transmission = read_filter.ReadFilter(filter_name)
self.wavel_range = transmission.wavelength_range()
config_file = os.path.join(os.getcwd(), 'species_config.ini')
config = configparser.ConfigParser()
config.read_file(open(config_file))
self.database = config['species']['database']
@typechecked
def resample_spectrum(self,
wavel_points: np.ndarray,
model_param: Optional[Dict[str, float]] = None,
apply_mask: bool = False) -> box.SpectrumBox:
"""
Function for resampling of a spectrum and uncertainties onto a new wavelength grid.
Parameters
----------
wavel_points : np.ndarray
Wavelength points (um).
model_param : dict, None
Model parameters. Should contain the 'scaling' value. Not used if set to ``None``.
apply_mask : bool
Exclude negative values and NaN values.
Returns
-------
species.core.box.SpectrumBox
Box with the resampled spectrum.
"""
calibbox = self.get_spectrum()
if apply_mask:
indices = np.where(calibbox.flux > 0.)[0]
calibbox.wavelength = calibbox.wavelength[indices]
calibbox.flux = calibbox.flux[indices]
calibbox.error = calibbox.error[indices]
flux_new, error_new = spectres.spectres(wavel_points,
calibbox.wavelength,
calibbox.flux,
spec_errs=calibbox.error,
fill=0.,
verbose=False)
if model_param is not None:
flux_new = model_param['scaling']*flux_new
error_new = model_param['scaling']*error_new
return box.create_box(boxtype='spectrum',
spectrum='calibration',
wavelength=wavel_points,
flux=flux_new,
error=error_new,
name=self.tag,
simbad=None,
sptype=None,
distance=None)
@typechecked
def get_spectrum(self,
model_param: Optional[Dict[str, float]] = None,
apply_mask: bool = False,
spec_res: Optional[float] = None,
extrapolate: bool = False,
min_wavelength: Optional[float] = None) -> box.SpectrumBox:
"""
Function for selecting the calibration spectrum.
Parameters
----------
model_param : dict, None
Model parameters. Should contain the 'scaling' value. Not used if set to ``None``.
apply_mask : bool
Exclude negative values and NaN values.
spec_res : float, None
Spectral resolution. Original wavelength points are used if set to ``None``.
extrapolate : bool
Extrapolate to 6 um by fitting a power law function.
min_wavelength : float, None
Minimum wavelength used for fitting the power law function. All data is used if set
to ``None``.
Returns
-------
species.core.box.SpectrumBox
Box with the spectrum.
"""
with h5py.File(self.database, 'r') as h5_file:
data = np.asarray(h5_file[f'spectra/calibration/{self.tag}'])
wavelength = np.asarray(data[0, ])
flux = np.asarray(data[1, ])
error = np.asarray(data[2, ])
if apply_mask:
indices = np.where(flux > 0.)[0]
wavelength = wavelength[indices]
flux = flux[indices]
error = error[indices]
if model_param is not None:
flux = model_param['scaling']*flux
error = model_param['scaling']*error
if self.wavel_range is None:
wl_index = np.ones(wavelength.size, dtype=bool)
else:
wl_index = (flux > 0.) & (wavelength > self.wavel_range[0]) & \
(wavelength < self.wavel_range[1])
count = np.count_nonzero(wl_index)
if count > 0:
index = np.where(wl_index)[0]
if index[0] > 0:
wl_index[index[0] - 1] = True
if index[-1] < len(wl_index)-1:
wl_index[index[-1] + 1] = True
wavelength = wavelength[wl_index]
flux = flux[wl_index]
error = error[wl_index]
if extrapolate:
def _power_law(wavelength, offset, scaling, power_index):
return offset + scaling*wavelength**power_index
if min_wavelength:
indices = np.where(wavelength > min_wavelength)[0]
else:
indices = np.arange(0, wavelength.size, 1)
popt, pcov = curve_fit(f=_power_law,
xdata=wavelength[indices],
ydata=flux[indices],
p0=(0., np.mean(flux[indices]), -1.),
sigma=error[indices])
sigma = np.sqrt(np.diag(pcov))
print('Fit result for f(x) = a + b*x^c:')
print(f'a = {popt[0]} +/- {sigma[0]}')
print(f'b = {popt[1]} +/- {sigma[1]}')
print(f'c = {popt[2]} +/- {sigma[2]}')
while wavelength[-1] <= 6.:
wl_add = wavelength[-1] + wavelength[-1]/1000.
wavelength = np.append(wavelength, wl_add)
flux = np.append(flux, _power_law(wl_add, popt[0], popt[1], popt[2]))
error = np.append(error, 0.)
if spec_res is not None:
wavelength_new = read_util.create_wavelengths((wavelength[0], wavelength[-1]),
spec_res)
flux_new, error_new = spectres.spectres(wavelength_new,
wavelength,
flux,
spec_errs=error,
fill=0.,
verbose=True)
wavelength = wavelength_new
flux = flux_new
error = error_new
return box.create_box(boxtype='spectrum',
spectrum='calibration',
wavelength=wavelength,
flux=flux,
error=error,
name=self.tag,
simbad=None,
sptype=None,
distance=None)
@typechecked
def get_flux(self,
model_param: Optional[Dict[str, float]] = None) -> Tuple[float, float]:
"""
Function for calculating the average flux for the ``filter_name``.
Parameters
----------
model_param : dict, None
Model parameters. Should contain the 'scaling' value. Not used if set to ``None``.
Returns
-------
tuple(float, float)
Average flux and uncertainty (W m-2 um-1).
"""
specbox = self.get_spectrum(model_param=model_param)
synphot = photometry.SyntheticPhotometry(self.filter_name)
return synphot.spectrum_to_flux(specbox.wavelength, specbox.flux, error=specbox.flux)
@typechecked
def get_magnitude(self,
model_param: Optional[Dict[str, float]] = None,
distance: Optional[Tuple[float, float]] = None) -> Tuple[
Tuple[float, Optional[float]], Tuple[Optional[float], Optional[float]]]:
"""
Function for calculating the apparent magnitude for the ``filter_name``.
Parameters
----------
model_param : dict, None
Model parameters. Should contain the 'scaling' value. Not used if set to ``None``.
distance : tuple(float, float), None
Distance and uncertainty to the calibration object (pc). Not used if set to ``None``,
in which case the returned absolute magnitude is ``(None, None)``.
Returns
-------
tuple(float, float)
Apparent magnitude and uncertainty.
tuple(float, float), tuple(None, None)
Absolute magnitude and uncertainty.
"""
specbox = self.get_spectrum(model_param=model_param)
if np.count_nonzero(specbox.error) == 0:
error = None
else:
error = specbox.error
synphot = photometry.SyntheticPhotometry(self.filter_name)
return synphot.spectrum_to_magnitude(specbox.wavelength,
specbox.flux,
error=error,
distance=distance)
| [
"numpy.mean",
"configparser.ConfigParser",
"numpy.ones",
"numpy.where",
"numpy.asarray",
"species.util.read_util.create_wavelengths",
"species.core.box.create_box",
"numpy.count_nonzero",
"spectres.spectres",
"os.getcwd",
"h5py.File",
"numpy.diag",
"numpy.append",
"species.analysis.photome... | [((1350, 1377), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1375, 1377), False, 'import configparser\n'), ((2588, 2710), 'spectres.spectres', 'spectres.spectres', (['wavel_points', 'calibbox.wavelength', 'calibbox.flux'], {'spec_errs': 'calibbox.error', 'fill': '(0.0)', 'verbose': '(False)'}), '(wavel_points, calibbox.wavelength, calibbox.flux,\n spec_errs=calibbox.error, fill=0.0, verbose=False)\n', (2605, 2710), False, 'import spectres\n'), ((3111, 3292), 'species.core.box.create_box', 'box.create_box', ([], {'boxtype': '"""spectrum"""', 'spectrum': '"""calibration"""', 'wavelength': 'wavel_points', 'flux': 'flux_new', 'error': 'error_new', 'name': 'self.tag', 'simbad': 'None', 'sptype': 'None', 'distance': 'None'}), "(boxtype='spectrum', spectrum='calibration', wavelength=\n wavel_points, flux=flux_new, error=error_new, name=self.tag, simbad=\n None, sptype=None, distance=None)\n", (3125, 3292), False, 'from species.core import box\n'), ((5509, 5535), 'numpy.count_nonzero', 'np.count_nonzero', (['wl_index'], {}), '(wl_index)\n', (5525, 5535), True, 'import numpy as np\n'), ((7761, 7932), 'species.core.box.create_box', 'box.create_box', ([], {'boxtype': '"""spectrum"""', 'spectrum': '"""calibration"""', 'wavelength': 'wavelength', 'flux': 'flux', 'error': 'error', 'name': 'self.tag', 'simbad': 'None', 'sptype': 'None', 'distance': 'None'}), "(boxtype='spectrum', spectrum='calibration', wavelength=\n wavelength, flux=flux, error=error, name=self.tag, simbad=None, sptype=\n None, distance=None)\n", (7775, 7932), False, 'from species.core import box\n'), ((8756, 8804), 'species.analysis.photometry.SyntheticPhotometry', 'photometry.SyntheticPhotometry', (['self.filter_name'], {}), '(self.filter_name)\n', (8786, 8804), False, 'from species.analysis import photometry\n'), ((10097, 10145), 'species.analysis.photometry.SyntheticPhotometry', 'photometry.SyntheticPhotometry', (['self.filter_name'], {}), '(self.filter_name)\n', (10127, 10145), False, 'from species.analysis import photometry\n'), ((1162, 1197), 'species.read.read_filter.ReadFilter', 'read_filter.ReadFilter', (['filter_name'], {}), '(filter_name)\n', (1184, 1197), False, 'from species.read import read_filter\n'), ((1297, 1308), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1306, 1308), False, 'import os\n'), ((4683, 4712), 'h5py.File', 'h5py.File', (['self.database', '"""r"""'], {}), "(self.database, 'r')\n", (4692, 4712), False, 'import h5py\n'), ((4744, 4798), 'numpy.asarray', 'np.asarray', (["h5_file[f'spectra/calibration/{self.tag}']"], {}), "(h5_file[f'spectra/calibration/{self.tag}'])\n", (4754, 4798), True, 'import numpy as np\n'), ((4825, 4845), 'numpy.asarray', 'np.asarray', (['data[0,]'], {}), '(data[0,])\n', (4835, 4845), True, 'import numpy as np\n'), ((4866, 4886), 'numpy.asarray', 'np.asarray', (['data[1,]'], {}), '(data[1,])\n', (4876, 4886), True, 'import numpy as np\n'), ((4908, 4928), 'numpy.asarray', 'np.asarray', (['data[2,]'], {}), '(data[2,])\n', (4918, 4928), True, 'import numpy as np\n'), ((5307, 5343), 'numpy.ones', 'np.ones', (['wavelength.size'], {'dtype': 'bool'}), '(wavelength.size, dtype=bool)\n', (5314, 5343), True, 'import numpy as np\n'), ((7129, 7200), 'species.util.read_util.create_wavelengths', 'read_util.create_wavelengths', (['(wavelength[0], wavelength[-1])', 'spec_res'], {}), '((wavelength[0], wavelength[-1]), spec_res)\n', (7157, 7200), False, 'from species.util import read_util\n'), ((7294, 7391), 'spectres.spectres', 'spectres.spectres', (['wavelength_new', 'wavelength', 'flux'], {'spec_errs': 'error', 'fill': '(0.0)', 'verbose': '(True)'}), '(wavelength_new, wavelength, flux, spec_errs=error, fill=\n 0.0, verbose=True)\n', (7311, 7391), False, 'import spectres\n'), ((9967, 9998), 'numpy.count_nonzero', 'np.count_nonzero', (['specbox.error'], {}), '(specbox.error)\n', (9983, 9998), True, 'import numpy as np\n'), ((2357, 2386), 'numpy.where', 'np.where', (['(calibbox.flux > 0.0)'], {}), '(calibbox.flux > 0.0)\n', (2365, 2386), True, 'import numpy as np\n'), ((4976, 4996), 'numpy.where', 'np.where', (['(flux > 0.0)'], {}), '(flux > 0.0)\n', (4984, 4996), True, 'import numpy as np\n'), ((5579, 5597), 'numpy.where', 'np.where', (['wl_index'], {}), '(wl_index)\n', (5587, 5597), True, 'import numpy as np\n'), ((6188, 6220), 'numpy.arange', 'np.arange', (['(0)', 'wavelength.size', '(1)'], {}), '(0, wavelength.size, 1)\n', (6197, 6220), True, 'import numpy as np\n'), ((6548, 6561), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (6555, 6561), True, 'import numpy as np\n'), ((6905, 6934), 'numpy.append', 'np.append', (['wavelength', 'wl_add'], {}), '(wavelength, wl_add)\n', (6914, 6934), True, 'import numpy as np\n'), ((7045, 7066), 'numpy.append', 'np.append', (['error', '(0.0)'], {}), '(error, 0.0)\n', (7054, 7066), True, 'import numpy as np\n'), ((6103, 6140), 'numpy.where', 'np.where', (['(wavelength > min_wavelength)'], {}), '(wavelength > min_wavelength)\n', (6111, 6140), True, 'import numpy as np\n'), ((6432, 6454), 'numpy.mean', 'np.mean', (['flux[indices]'], {}), '(flux[indices])\n', (6439, 6454), True, 'import numpy as np\n')] |
# Copyright 2020 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for flax.linen."""
from absl.testing import absltest
import jax
from jax import random
from jax import lax
from jax.nn import initializers
import jax.numpy as jnp
import numpy as onp
from typing import Any, Tuple, Iterable, Callable
from flax import linen as nn
from flax.linen import compact
from flax.core import Scope, freeze
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
# Require JAX omnistaging mode.
jax.config.enable_omnistaging()
class DummyModule(nn.Module):
@compact
def __call__(self, x):
bias = self.param('bias', initializers.ones, x.shape)
return x + bias
class Dense(nn.Module):
features: int
@compact
def __call__(self, x):
kernel = self.param('kernel',
initializers.lecun_normal(),
(x.shape[-1], self.features))
y = jnp.dot(x, kernel)
return y
class ModuleTest(absltest.TestCase):
def test_init_module(self):
rngkey = jax.random.PRNGKey(0)
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
y = DummyModule(parent=scope)(x)
params = scope.variables()['params']
y2 = DummyModule(parent=scope.rewound())(x)
onp.testing.assert_allclose(y, y2)
onp.testing.assert_allclose(y, jnp.array([2.]))
self.assertEqual(params, {'bias': jnp.array([1.])})
def test_arg_module(self):
rngkey = jax.random.PRNGKey(0)
x = jnp.ones((10,))
scope = Scope({}, {'params': rngkey}, mutable=['params'])
y = Dense(3, parent=scope)(x)
params = scope.variables()['params']
y2 = Dense(3, parent=scope.rewound())(x)
onp.testing.assert_allclose(y, y2)
self.assertEqual(params['kernel'].shape, (10, 3))
def test_util_fun(self):
rngkey = jax.random.PRNGKey(0)
class MLP(nn.Module):
@compact
def __call__(self, x):
x = self._mydense(x)
x = self._mydense(x)
return x
def _mydense(self, x):
return Dense(3)(x)
x = jnp.ones((10,))
scope = Scope({}, {'params': rngkey}, mutable=['params'])
y = MLP(parent=scope)(x)
params = scope.variables()['params']
y2 = MLP(parent=scope.rewound())(x)
onp.testing.assert_allclose(y, y2)
param_shape = jax.tree_map(jnp.shape, params)
self.assertEqual(param_shape,
{'Dense_0': {'kernel': (10, 3)},
'Dense_1': {'kernel': (3, 3)}})
def test_nested_module_reuse(self):
rngkey = jax.random.PRNGKey(0)
class MLP(nn.Module):
@compact
def __call__(self, x):
x = self._mydense(x)
x = self._mydense(x)
return x
def _mydense(self, x):
return Dense(3)(x)
class Top(nn.Module):
@compact
def __call__(self, x):
mlp = MLP()
y = mlp(x)
z = mlp(x)
return y + z
x = jnp.ones((10,))
scope = Scope({}, {'params': rngkey}, mutable=['params'])
y = Top(parent=scope)(x)
params = scope.variables()['params']
y2 = Top(parent=scope.rewound())(x)
onp.testing.assert_allclose(y, y2)
param_shape = jax.tree_map(jnp.shape, params)
self.assertEqual(param_shape,
{'MLP_0':
{'Dense_0': {'kernel': (10, 3)},
'Dense_1': {'kernel': (3, 3)}}})
def test_setup_dict_assignment(self):
rngkey = jax.random.PRNGKey(0)
class MLP(nn.Module):
def setup(self):
self.lyrs1 = {'a': Dense(3), 'b': Dense(3),}
self.lyrs2 = [Dense(3), Dense(3)]
def __call__(self, x):
y = self.lyrs1['a'](x)
z = self.lyrs1['b'](y)
#w = self.lyrs2[0](x)
return z
x = jnp.ones((10,))
scope = Scope({}, {'params': rngkey}, mutable=['params'])
y = MLP(parent=scope)(x)
params = scope.variables()['params']
y2 = MLP(parent=scope.rewound())(x)
onp.testing.assert_allclose(y, y2)
param_shape = jax.tree_map(jnp.shape, params)
self.assertEqual(param_shape,
{'lyrs1_a': {'kernel': (10, 3)},
'lyrs1_b': {'kernel': (3, 3)}})
def test_setup_cloning(self):
class MLP(nn.Module):
def setup(self):
self.dense = Dense(3)
scope = Scope({})
MLPclone = MLP(parent=scope).clone()
def test_submodule_attr(self):
rngkey = jax.random.PRNGKey(0)
class Inner(nn.Module):
@compact
def __call__(self):
self.param('x', lambda rng: 40)
class Outer(nn.Module):
inner: nn.Module
def __call__(self):
return self.inner()
class Wrapper(nn.Module):
def setup(self):
self.inner = Inner()
self.outer = Outer(self.inner)
def __call__(self):
return self.outer()
scope = Scope({'params': {}}, rngs={'params': rngkey}, mutable=['params'])
# Make sure this doesn't raise "Can't attach to remote parent"
wrapper = Wrapper(parent=scope)
wrapper()
# Make sure that variables are registered at the level of the
# Wrapper submodule, not the Outer submodule.
self.assertEqual(40, scope.variables()['params']['inner']['x'])
def test_param_in_setup(self):
rngkey = jax.random.PRNGKey(0)
class DummyModule(nn.Module):
xshape: Tuple[int]
def setup(self):
self.bias = self.param('bias', initializers.ones, self.xshape)
def __call__(self, x):
return x + self.bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
y = DummyModule(x.shape, parent=scope)(x)
params = scope.variables()['params']
y2 = DummyModule(x.shape, parent=scope.rewound())(x)
onp.testing.assert_allclose(y, y2)
onp.testing.assert_allclose(y, jnp.array([2.]))
self.assertEqual(params, {'bias': jnp.array([1.])})
def test_init_outside_setup_without_compact(self):
rngkey = jax.random.PRNGKey(0)
class DummyModule(nn.Module):
def __call__(self, x):
bias = self.param('bias', initializers.ones, x.shape)
return x + bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
with self.assertRaisesRegex(ValueError, 'must be initialized.*setup'):
y = DummyModule(parent=scope)(x)
def test_init_outside_call(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
@compact
def __call__(self, x):
bias = self.param('bias', initializers.ones, x.shape)
return x + bias
def foo(self, x):
bias = self.param('bias', initializers.ones, x.shape)
return x + bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
with self.assertRaisesRegex(ValueError, 'must be initialized.*setup'):
y = Dummy(parent=scope).foo(x)
def test_setup_call_var_collision(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
xshape: Tuple[int]
def setup(self):
self.bias = self.param('bias', initializers.ones, self.xshape)
@compact
def __call__(self, x):
bias = self.param('bias', initializers.ones, x.shape)
return x + self.bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
with self.assertRaisesRegex(ValueError, 'bias already in use'):
y = Dummy(x.shape, parent=scope)(x)
def test_setup_var_collision(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
xshape: Tuple[int]
def setup(self):
self.bias = self.param('bias', initializers.ones, self.xshape)
self.bias = self.param('bias', initializers.ones, self.xshape)
def __call__(self, x):
return x + self.bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
with self.assertRaisesRegex(ValueError, 'bias already in use'):
y = Dummy(x.shape, parent=scope)(x)
def test_call_var_collision(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
xshape: Tuple[int]
@compact
def __call__(self, x):
bias = self.param('bias', initializers.ones, self.xshape)
bias = self.param('bias', initializers.ones, self.xshape)
return x + bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
with self.assertRaisesRegex(ValueError, 'bias already in use'):
y = Dummy(x.shape, parent=scope)(x)
def test_setattr_name_var_disagreement(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
xshape: Tuple[int]
def setup(self):
self.bias = self.param('notbias', initializers.ones, self.xshape)
def __call__(self, x):
return x + self.bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
with self.assertRaisesRegex(ValueError, 'notbias.*must equal.*bias'):
y = Dummy(x.shape, parent=scope)(x)
def test_submodule_var_collision(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
xshape: Tuple[int]
def setup(self):
self.bias = self.param('bias', initializers.ones, self.xshape)
self.bias = DummyModule()
def __call__(self, x):
return x + self.bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
with self.assertRaisesRegex(ValueError, 'name bias exists already'):
y = Dummy(x.shape, parent=scope)(x)
class Dummy(nn.Module):
xshape: Tuple[int]
def setup(self):
self.bias = self.param('bias', initializers.ones, self.xshape)
@compact
def __call__(self, x):
bias = DummyModule(name='bias')
return x + self.bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
with self.assertRaisesRegex(ValueError, 'name bias exists already'):
y = Dummy(x.shape, parent=scope)(x)
class Dummy(nn.Module):
xshape: Tuple[int]
def setup(self):
self.bias = DummyModule()
@compact
def __call__(self, x):
bias = self.param('bias', initializers.ones, self.xshape)
return x + self.bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
with self.assertRaisesRegex(ValueError, 'bias already'):
y = Dummy(x.shape, parent=scope)(x)
def test_setattr_name_submodule_redundant(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
xshape: Tuple[int]
def setup(self):
self.bias = DummyModule(name='bias')
def __call__(self, x):
return x + self.bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
with self.assertRaisesRegex(ValueError, 'In setup, assign names of Modules '
'via self.<name> and not using keyword argument name="<name>"'):
y = Dummy(x.shape, parent=scope)(x)
def test_attr_param_name_collision(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
bias: bool
def setup(self):
self.bias = self.param('bias', initializers.ones, (3, 3))
def __call__(self, x):
return x + self.bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
with self.assertRaisesRegex(ValueError, 'Name bias already in use'):
y = Dummy(x.shape, parent=scope)(x)
def test_attr_submodule_name_collision(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
bias: bool
def setup(self):
self.bias = DummyModule(name='bias')
def __call__(self, x):
return self.bias(x)
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
with self.assertRaisesRegex(ValueError, 'bias exists already'):
y = Dummy(x.shape, parent=scope)(x)
def test_only_one_compact_method(self):
with self.assertRaisesRegex(RuntimeError, '@compact'):
class Dummy(nn.Module):
@compact
def call1(self):
pass
@compact
def call2(self):
pass
def test_only_one_compact_method_subclass(self):
class Dummy(nn.Module):
@nn.compact
def __call__(self):
pass
class SubDummy(Dummy):
@nn.compact
def __call__(self):
super().__call__()
scope = Scope(variables={})
subdummy = SubDummy(parent=scope)
# Make sure the @compact annotation is valid on both base class and subclass, as long
# as its on the same method.
subdummy()
def test_forgotten_compact_annotation(self):
class Bar(nn.Module):
# user forgot to add @compact
def __call__(self, x):
return nn.Dense(1)(x)
class Foo(nn.Module):
@nn.compact
def __call__(self, x):
bar = Bar()
x = bar(x)
x = bar(x)
return x
with self.assertRaisesRegex(ValueError, '@compact'):
Foo().init(random.PRNGKey(0), jnp.ones((1, 3)))
def test_forgotten_compact_annotation_with_explicit_parent(self):
class Bar(nn.Module):
def __call__(self, x):
return nn.Dense(1, parent=self)(x)
class Foo(nn.Module):
@nn.compact
def __call__(self, x):
bar = Bar()
x = bar(x)
x = bar(x)
return x
with self.assertRaisesRegex(ValueError, '@compact'):
Foo().init(random.PRNGKey(0), jnp.ones((1, 3)))
def test_numpy_array_shape_class_args(self):
class MLP(nn.Module):
widths: Iterable
@nn.compact
def __call__(self, x):
for width in self.widths[:-1]:
x = nn.relu(nn.Dense(width)(x))
return nn.Dense(self.widths[-1])(x)
test = MLP(onp.array([3, 3], onp.int32))
params = test.init({'params': random.PRNGKey(42)}, jnp.ones((3, 3)))
_ = test.apply(params, jnp.ones((3, 3)))
def test_get_local_methods(self):
class Base:
@staticmethod
def bar(x):
return x
@classmethod
def baz(cls, x):
return x
def bleep(self, x):
return x
class Derived1(Base):
@staticmethod
def bar2(x):
return x
@classmethod
def baz2(cls, x):
return x
def bloop(self, x):
return x
class Derived2(Derived1):
pass
self.assertEqual(nn.module._get_local_method_names(Base), ('bleep',))
self.assertEqual(nn.module._get_local_method_names(Derived1), ('bloop',))
self.assertEqual(
nn.module._get_local_method_names(Derived1, exclude=('bloop',)), ())
self.assertEqual(nn.module._get_local_method_names(Derived2), ())
def test_inheritance_dataclass_attribs(self):
class Test(nn.Module):
bar: int
def __call__(self, x):
return x
class Test2(Test):
baz: int
def __call__(self, x):
return x
class Test3(Test):
baz: int
def __call__(self, x):
return x
key = random.PRNGKey(0)
x = jnp.ones((5,))
test1 = Test(bar=4)
test2 = Test2(bar=4, baz=2)
test3 = Test3(bar=4, baz=2)
self.assertEqual(test1.init_with_output(key, x), (x, freeze({})))
self.assertEqual(test2.init_with_output(key, x), (x, freeze({})))
self.assertEqual(test3.init_with_output(key, x), (x, freeze({})))
self.assertTrue(hasattr(test1, 'bar'))
self.assertTrue(hasattr(test1, 'name'))
self.assertTrue(hasattr(test1, 'parent'))
self.assertTrue(hasattr(test2, 'bar'))
self.assertTrue(hasattr(test2, 'baz'))
self.assertTrue(hasattr(test2, 'name'))
self.assertTrue(hasattr(test2, 'parent'))
self.assertTrue(hasattr(test3, 'bar'))
self.assertTrue(hasattr(test3, 'baz'))
self.assertTrue(hasattr(test3, 'name'))
self.assertTrue(hasattr(test3, 'parent'))
self.assertEqual(
list(Test.__dataclass_fields__.keys()),
['bar', 'parent', 'name'])
self.assertEqual(
list(Test2.__dataclass_fields__.keys()),
['bar', 'baz', 'parent', 'name'])
self.assertEqual(
list(Test3.__dataclass_fields__.keys()),
['bar', 'baz', 'parent', 'name'])
def test_get_suffix_value_pairs(self):
for x in [(), [], {}, None, 0, set()]:
self.assertEqual(
nn.module._get_suffix_value_pairs(x), [('', x)])
self.assertEqual(
nn.module._get_suffix_value_pairs(
{'a': 1, 'b': 2}), [('_a', 1), ('_b', 2)])
self.assertEqual(
nn.module._get_suffix_value_pairs(
[1, 2, 3]), [('_0', 1), ('_1', 2), ('_2', 3)])
x1 = [nn.Dense(10), nn.relu, nn.Dense(10)]
y1 = nn.module._get_suffix_value_pairs(x1)
self.assertEqual(y1, [('_0', x1[0]), ('_1', x1[1]), ('_2', x1[2])])
x2 = {'a': 1, 'b': {'c': nn.Dense(10), 'd': nn.relu}}
y2 = nn.module._get_suffix_value_pairs(x2)
self.assertEqual(y2,
[('_a', 1), ('_b_c', x2['b']['c']), ('_b_d', x2['b']['d'])])
def test_mixed_list_assignment_in_setup(self):
class Test(nn.Module):
def setup(self):
self.layers = [nn.Dense(10), nn.relu, nn.Dense(10)]
def __call__(self, x):
for lyr in self.layers:
x = lyr(x)
return x
x = random.uniform(random.PRNGKey(0), (5,5))
variables = Test().init(random.PRNGKey(0), jnp.ones((5,5)))
y = Test().apply(variables, x)
m0 = variables['params']['layers_0']['kernel']
m1 = variables['params']['layers_2']['kernel']
self.assertTrue(jnp.all(y == jnp.dot(nn.relu(jnp.dot(x, m0)), m1)))
def test_module_is_hashable(self):
module_a = nn.Dense(10)
module_a_2 = nn.Dense(10)
module_b = nn.Dense(5)
self.assertEqual(hash(module_a), hash(module_a_2))
self.assertNotEqual(hash(module_a), hash(module_b))
def test_module_with_scope_is_not_hashable(self):
module_a = nn.Dense(10, parent=Scope({}))
with self.assertRaisesWithLiteralMatch(ValueError, 'Can\'t call __hash__ on modules that hold variables.'):
hash(module_a)
def test_module_trace(self):
class MLP(nn.Module):
act: Callable = nn.relu
sizes: Iterable[int] = (3, 2)
@nn.compact
def __call__(self, x):
for size in self.sizes:
x = nn.Dense(size)(x)
x = self.act(x)
return repr(self)
mlp = MLP()
expected_trace = (
"""MLP(
# attributes
act = relu
sizes = (3, 2)
# children
Dense_0 = Dense(
# attributes
features = 3
use_bias = True
dtype = float32
precision = None
kernel_init = init
bias_init = zeros
)
Dense_1 = Dense(
# attributes
features = 2
use_bias = True
dtype = float32
precision = None
kernel_init = init
bias_init = zeros
)
)""")
x = jnp.ones((1, 2))
trace, variables = mlp.init_with_output(random.PRNGKey(0), x)
self.assertEqual(trace, expected_trace)
trace = mlp.apply(variables, x)
self.assertEqual(trace, expected_trace)
def test_call_unbound_compact_module_methods(self):
dense = Dense(3)
with self.assertRaisesRegex(ValueError, "compact.*unbound module"):
dense(jnp.ones((1, )))
def test_call_unbound_has_variable(self):
class EmptyModule(nn.Module):
def foo(self):
self.has_variable('bar', 'baz')
empty = EmptyModule()
with self.assertRaisesRegex(ValueError, "variable.*unbound module"):
empty.foo()
def test_call_unbound_make_rng(self):
class EmptyModule(nn.Module):
def foo(self):
self.make_rng('bar')
empty = EmptyModule()
with self.assertRaisesRegex(ValueError, "RNGs.*unbound module"):
empty.foo()
def test_call_unbound_variables(self):
class EmptyModule(nn.Module):
def foo(self):
self.variables
empty = EmptyModule()
with self.assertRaisesRegex(ValueError, "variables.*unbound module"):
empty.foo()
def test_call_unbound_noncompact_module_methods(self):
class EmptyModule(nn.Module):
foo: int = 3
def bar(self):
return self.foo
empty = EmptyModule()
# It's fine to call methods of unbound methods that don't depend on
# attributes defined during `setup`
self.assertEqual(empty.bar(), 3)
def test_call_unbound_noncompact_module_methods(self):
class EmptyModule(nn.Module):
foo: int = 3
def bar(self):
return self.foo
empty = EmptyModule()
# It's fine to call methods of unbound methods that don't depend on
# attributes defined during `setup`
self.assertEqual(empty.bar(), 3)
def test_call_unbound_noncompact_module_method_without_setup(self):
class EmptyModule(nn.Module):
def setup(self):
self.setup_called = True
def bar(self):
return self.setup_called
empty = EmptyModule()
# `empty.setup()` hasn't been called yet because it doesn't have a scope.
# it's fine to call methods but they won't have access to attributes defined
# in `setup()`
with self.assertRaisesRegex(AttributeError, "has no attribute 'setup_called'"):
empty.bar()
if __name__ == '__main__':
absltest.main()
| [
"jax.random.PRNGKey",
"flax.linen.Dense",
"jax.config.parse_flags_with_absl",
"flax.linen.module._get_suffix_value_pairs",
"flax.core.Scope",
"numpy.testing.assert_allclose",
"absl.testing.absltest.main",
"jax.numpy.array",
"jax.tree_map",
"numpy.array",
"flax.core.freeze",
"flax.linen.module.... | [((973, 1007), 'jax.config.parse_flags_with_absl', 'jax.config.parse_flags_with_absl', ([], {}), '()\n', (1005, 1007), False, 'import jax\n'), ((1040, 1071), 'jax.config.enable_omnistaging', 'jax.config.enable_omnistaging', ([], {}), '()\n', (1069, 1071), False, 'import jax\n'), ((21403, 21418), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (21416, 21418), False, 'from absl.testing import absltest\n'), ((1444, 1462), 'jax.numpy.dot', 'jnp.dot', (['x', 'kernel'], {}), '(x, kernel)\n', (1451, 1462), True, 'import jax.numpy as jnp\n'), ((1559, 1580), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (1577, 1580), False, 'import jax\n'), ((1589, 1605), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (1598, 1605), True, 'import jax.numpy as jnp\n'), ((1617, 1666), 'flax.core.Scope', 'Scope', (['{}', "{'params': rngkey}"], {'mutable': "['params']"}), "({}, {'params': rngkey}, mutable=['params'])\n", (1622, 1666), False, 'from flax.core import Scope, freeze\n'), ((1797, 1831), 'numpy.testing.assert_allclose', 'onp.testing.assert_allclose', (['y', 'y2'], {}), '(y, y2)\n', (1824, 1831), True, 'import numpy as onp\n'), ((1983, 2004), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (2001, 2004), False, 'import jax\n'), ((2013, 2028), 'jax.numpy.ones', 'jnp.ones', (['(10,)'], {}), '((10,))\n', (2021, 2028), True, 'import jax.numpy as jnp\n'), ((2041, 2090), 'flax.core.Scope', 'Scope', (['{}', "{'params': rngkey}"], {'mutable': "['params']"}), "({}, {'params': rngkey}, mutable=['params'])\n", (2046, 2090), False, 'from flax.core import Scope, freeze\n'), ((2215, 2249), 'numpy.testing.assert_allclose', 'onp.testing.assert_allclose', (['y', 'y2'], {}), '(y, y2)\n', (2242, 2249), True, 'import numpy as onp\n'), ((2345, 2366), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (2363, 2366), False, 'import jax\n'), ((2576, 2591), 'jax.numpy.ones', 'jnp.ones', (['(10,)'], {}), '((10,))\n', (2584, 2591), True, 'import jax.numpy as jnp\n'), ((2604, 2653), 'flax.core.Scope', 'Scope', (['{}', "{'params': rngkey}"], {'mutable': "['params']"}), "({}, {'params': rngkey}, mutable=['params'])\n", (2609, 2653), False, 'from flax.core import Scope, freeze\n'), ((2768, 2802), 'numpy.testing.assert_allclose', 'onp.testing.assert_allclose', (['y', 'y2'], {}), '(y, y2)\n', (2795, 2802), True, 'import numpy as onp\n'), ((2821, 2852), 'jax.tree_map', 'jax.tree_map', (['jnp.shape', 'params'], {}), '(jnp.shape, params)\n', (2833, 2852), False, 'import jax\n'), ((3017, 3038), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (3035, 3038), False, 'import jax\n'), ((3397, 3412), 'jax.numpy.ones', 'jnp.ones', (['(10,)'], {}), '((10,))\n', (3405, 3412), True, 'import jax.numpy as jnp\n'), ((3425, 3474), 'flax.core.Scope', 'Scope', (['{}', "{'params': rngkey}"], {'mutable': "['params']"}), "({}, {'params': rngkey}, mutable=['params'])\n", (3430, 3474), False, 'from flax.core import Scope, freeze\n'), ((3589, 3623), 'numpy.testing.assert_allclose', 'onp.testing.assert_allclose', (['y', 'y2'], {}), '(y, y2)\n', (3616, 3623), True, 'import numpy as onp\n'), ((3642, 3673), 'jax.tree_map', 'jax.tree_map', (['jnp.shape', 'params'], {}), '(jnp.shape, params)\n', (3654, 3673), False, 'import jax\n'), ((3860, 3881), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (3878, 3881), False, 'import jax\n'), ((4172, 4187), 'jax.numpy.ones', 'jnp.ones', (['(10,)'], {}), '((10,))\n', (4180, 4187), True, 'import jax.numpy as jnp\n'), ((4200, 4249), 'flax.core.Scope', 'Scope', (['{}', "{'params': rngkey}"], {'mutable': "['params']"}), "({}, {'params': rngkey}, mutable=['params'])\n", (4205, 4249), False, 'from flax.core import Scope, freeze\n'), ((4364, 4398), 'numpy.testing.assert_allclose', 'onp.testing.assert_allclose', (['y', 'y2'], {}), '(y, y2)\n', (4391, 4398), True, 'import numpy as onp\n'), ((4417, 4448), 'jax.tree_map', 'jax.tree_map', (['jnp.shape', 'params'], {}), '(jnp.shape, params)\n', (4429, 4448), False, 'import jax\n'), ((4684, 4693), 'flax.core.Scope', 'Scope', (['{}'], {}), '({})\n', (4689, 4693), False, 'from flax.core import Scope, freeze\n'), ((4782, 4803), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (4800, 4803), False, 'import jax\n'), ((5210, 5276), 'flax.core.Scope', 'Scope', (["{'params': {}}"], {'rngs': "{'params': rngkey}", 'mutable': "['params']"}), "({'params': {}}, rngs={'params': rngkey}, mutable=['params'])\n", (5215, 5276), False, 'from flax.core import Scope, freeze\n'), ((5626, 5647), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (5644, 5647), False, 'import jax\n'), ((5867, 5883), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (5876, 5883), True, 'import jax.numpy as jnp\n'), ((5895, 5944), 'flax.core.Scope', 'Scope', (['{}', "{'params': rngkey}"], {'mutable': "['params']"}), "({}, {'params': rngkey}, mutable=['params'])\n", (5900, 5944), False, 'from flax.core import Scope, freeze\n'), ((6093, 6127), 'numpy.testing.assert_allclose', 'onp.testing.assert_allclose', (['y', 'y2'], {}), '(y, y2)\n', (6120, 6127), True, 'import numpy as onp\n'), ((6303, 6324), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (6321, 6324), False, 'import jax\n'), ((6482, 6498), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (6491, 6498), True, 'import jax.numpy as jnp\n'), ((6510, 6559), 'flax.core.Scope', 'Scope', (['{}', "{'params': rngkey}"], {'mutable': "['params']"}), "({}, {'params': rngkey}, mutable=['params'])\n", (6515, 6559), False, 'from flax.core import Scope, freeze\n'), ((6724, 6745), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (6742, 6745), False, 'import jax\n'), ((7022, 7038), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (7031, 7038), True, 'import jax.numpy as jnp\n'), ((7050, 7099), 'flax.core.Scope', 'Scope', (['{}', "{'params': rngkey}"], {'mutable': "['params']"}), "({}, {'params': rngkey}, mutable=['params'])\n", (7055, 7099), False, 'from flax.core import Scope, freeze\n'), ((7269, 7290), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (7287, 7290), False, 'import jax\n'), ((7581, 7597), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (7590, 7597), True, 'import jax.numpy as jnp\n'), ((7609, 7658), 'flax.core.Scope', 'Scope', (['{}', "{'params': rngkey}"], {'mutable': "['params']"}), "({}, {'params': rngkey}, mutable=['params'])\n", (7614, 7658), False, 'from flax.core import Scope, freeze\n'), ((7821, 7842), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (7839, 7842), False, 'import jax\n'), ((8127, 8143), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (8136, 8143), True, 'import jax.numpy as jnp\n'), ((8155, 8204), 'flax.core.Scope', 'Scope', (['{}', "{'params': rngkey}"], {'mutable': "['params']"}), "({}, {'params': rngkey}, mutable=['params'])\n", (8160, 8204), False, 'from flax.core import Scope, freeze\n'), ((8366, 8387), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (8384, 8387), False, 'import jax\n'), ((8649, 8665), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (8658, 8665), True, 'import jax.numpy as jnp\n'), ((8677, 8726), 'flax.core.Scope', 'Scope', (['{}', "{'params': rngkey}"], {'mutable': "['params']"}), "({}, {'params': rngkey}, mutable=['params'])\n", (8682, 8726), False, 'from flax.core import Scope, freeze\n'), ((8899, 8920), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (8917, 8920), False, 'import jax\n'), ((9137, 9153), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (9146, 9153), True, 'import jax.numpy as jnp\n'), ((9165, 9214), 'flax.core.Scope', 'Scope', (['{}', "{'params': rngkey}"], {'mutable': "['params']"}), "({}, {'params': rngkey}, mutable=['params'])\n", (9170, 9214), False, 'from flax.core import Scope, freeze\n'), ((9387, 9408), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (9405, 9408), False, 'import jax\n'), ((9656, 9672), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (9665, 9672), True, 'import jax.numpy as jnp\n'), ((9684, 9733), 'flax.core.Scope', 'Scope', (['{}', "{'params': rngkey}"], {'mutable': "['params']"}), "({}, {'params': rngkey}, mutable=['params'])\n", (9689, 9733), False, 'from flax.core import Scope, freeze\n'), ((10117, 10133), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (10126, 10133), True, 'import jax.numpy as jnp\n'), ((10145, 10194), 'flax.core.Scope', 'Scope', (['{}', "{'params': rngkey}"], {'mutable': "['params']"}), "({}, {'params': rngkey}, mutable=['params'])\n", (10150, 10194), False, 'from flax.core import Scope, freeze\n'), ((10567, 10583), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (10576, 10583), True, 'import jax.numpy as jnp\n'), ((10595, 10644), 'flax.core.Scope', 'Scope', (['{}', "{'params': rngkey}"], {'mutable': "['params']"}), "({}, {'params': rngkey}, mutable=['params'])\n", (10600, 10644), False, 'from flax.core import Scope, freeze\n'), ((10813, 10834), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (10831, 10834), False, 'import jax\n'), ((11022, 11038), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (11031, 11038), True, 'import jax.numpy as jnp\n'), ((11050, 11099), 'flax.core.Scope', 'Scope', (['{}', "{'params': rngkey}"], {'mutable': "['params']"}), "({}, {'params': rngkey}, mutable=['params'])\n", (11055, 11099), False, 'from flax.core import Scope, freeze\n'), ((11354, 11375), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (11372, 11375), False, 'import jax\n'), ((11576, 11592), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (11585, 11592), True, 'import jax.numpy as jnp\n'), ((11604, 11653), 'flax.core.Scope', 'Scope', (['{}', "{'params': rngkey}"], {'mutable': "['params']"}), "({}, {'params': rngkey}, mutable=['params'])\n", (11609, 11653), False, 'from flax.core import Scope, freeze\n'), ((11831, 11852), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (11849, 11852), False, 'import jax\n'), ((12031, 12047), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (12040, 12047), True, 'import jax.numpy as jnp\n'), ((12059, 12108), 'flax.core.Scope', 'Scope', (['{}', "{'params': rngkey}"], {'mutable': "['params']"}), "({}, {'params': rngkey}, mutable=['params'])\n", (12064, 12108), False, 'from flax.core import Scope, freeze\n'), ((12713, 12732), 'flax.core.Scope', 'Scope', ([], {'variables': '{}'}), '(variables={})\n', (12718, 12732), False, 'from flax.core import Scope, freeze\n'), ((15272, 15289), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (15286, 15289), False, 'from jax import random\n'), ((15298, 15312), 'jax.numpy.ones', 'jnp.ones', (['(5,)'], {}), '((5,))\n', (15306, 15312), True, 'import jax.numpy as jnp\n'), ((16895, 16932), 'flax.linen.module._get_suffix_value_pairs', 'nn.module._get_suffix_value_pairs', (['x1'], {}), '(x1)\n', (16928, 16932), True, 'from flax import linen as nn\n'), ((17072, 17109), 'flax.linen.module._get_suffix_value_pairs', 'nn.module._get_suffix_value_pairs', (['x2'], {}), '(x2)\n', (17105, 17109), True, 'from flax import linen as nn\n'), ((17838, 17850), 'flax.linen.Dense', 'nn.Dense', (['(10)'], {}), '(10)\n', (17846, 17850), True, 'from flax import linen as nn\n'), ((17868, 17880), 'flax.linen.Dense', 'nn.Dense', (['(10)'], {}), '(10)\n', (17876, 17880), True, 'from flax import linen as nn\n'), ((17896, 17907), 'flax.linen.Dense', 'nn.Dense', (['(5)'], {}), '(5)\n', (17904, 17907), True, 'from flax import linen as nn\n'), ((19056, 19072), 'jax.numpy.ones', 'jnp.ones', (['(1, 2)'], {}), '((1, 2))\n', (19064, 19072), True, 'import jax.numpy as jnp\n'), ((1353, 1380), 'jax.nn.initializers.lecun_normal', 'initializers.lecun_normal', ([], {}), '()\n', (1378, 1380), False, 'from jax.nn import initializers\n'), ((1867, 1883), 'jax.numpy.array', 'jnp.array', (['[2.0]'], {}), '([2.0])\n', (1876, 1883), True, 'import jax.numpy as jnp\n'), ((6163, 6179), 'jax.numpy.array', 'jnp.array', (['[2.0]'], {}), '([2.0])\n', (6172, 6179), True, 'import jax.numpy as jnp\n'), ((14050, 14078), 'numpy.array', 'onp.array', (['[3, 3]', 'onp.int32'], {}), '([3, 3], onp.int32)\n', (14059, 14078), True, 'import numpy as onp\n'), ((14135, 14151), 'jax.numpy.ones', 'jnp.ones', (['(3, 3)'], {}), '((3, 3))\n', (14143, 14151), True, 'import jax.numpy as jnp\n'), ((14180, 14196), 'jax.numpy.ones', 'jnp.ones', (['(3, 3)'], {}), '((3, 3))\n', (14188, 14196), True, 'import jax.numpy as jnp\n'), ((14656, 14695), 'flax.linen.module._get_local_method_names', 'nn.module._get_local_method_names', (['Base'], {}), '(Base)\n', (14689, 14695), True, 'from flax import linen as nn\n'), ((14730, 14773), 'flax.linen.module._get_local_method_names', 'nn.module._get_local_method_names', (['Derived1'], {}), '(Derived1)\n', (14763, 14773), True, 'from flax import linen as nn\n'), ((14817, 14880), 'flax.linen.module._get_local_method_names', 'nn.module._get_local_method_names', (['Derived1'], {'exclude': "('bloop',)"}), "(Derived1, exclude=('bloop',))\n", (14850, 14880), True, 'from flax import linen as nn\n'), ((14907, 14950), 'flax.linen.module._get_local_method_names', 'nn.module._get_local_method_names', (['Derived2'], {}), '(Derived2)\n', (14940, 14950), True, 'from flax import linen as nn\n'), ((16625, 16676), 'flax.linen.module._get_suffix_value_pairs', 'nn.module._get_suffix_value_pairs', (["{'a': 1, 'b': 2}"], {}), "({'a': 1, 'b': 2})\n", (16658, 16676), True, 'from flax import linen as nn\n'), ((16745, 16789), 'flax.linen.module._get_suffix_value_pairs', 'nn.module._get_suffix_value_pairs', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (16778, 16789), True, 'from flax import linen as nn\n'), ((16849, 16861), 'flax.linen.Dense', 'nn.Dense', (['(10)'], {}), '(10)\n', (16857, 16861), True, 'from flax import linen as nn\n'), ((16872, 16884), 'flax.linen.Dense', 'nn.Dense', (['(10)'], {}), '(10)\n', (16880, 16884), True, 'from flax import linen as nn\n'), ((17486, 17503), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (17500, 17503), False, 'from jax import random\n'), ((17540, 17557), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (17554, 17557), False, 'from jax import random\n'), ((17559, 17575), 'jax.numpy.ones', 'jnp.ones', (['(5, 5)'], {}), '((5, 5))\n', (17567, 17575), True, 'import jax.numpy as jnp\n'), ((19117, 19134), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (19131, 19134), False, 'from jax import random\n'), ((1922, 1938), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (1931, 1938), True, 'import jax.numpy as jnp\n'), ((6218, 6234), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (6227, 6234), True, 'import jax.numpy as jnp\n'), ((13301, 13318), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (13315, 13318), False, 'from jax import random\n'), ((13320, 13336), 'jax.numpy.ones', 'jnp.ones', (['(1, 3)'], {}), '((1, 3))\n', (13328, 13336), True, 'import jax.numpy as jnp\n'), ((13729, 13746), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (13743, 13746), False, 'from jax import random\n'), ((13748, 13764), 'jax.numpy.ones', 'jnp.ones', (['(1, 3)'], {}), '((1, 3))\n', (13756, 13764), True, 'import jax.numpy as jnp\n'), ((14114, 14132), 'jax.random.PRNGKey', 'random.PRNGKey', (['(42)'], {}), '(42)\n', (14128, 14132), False, 'from jax import random\n'), ((15458, 15468), 'flax.core.freeze', 'freeze', (['{}'], {}), '({})\n', (15464, 15468), False, 'from flax.core import Scope, freeze\n'), ((15528, 15538), 'flax.core.freeze', 'freeze', (['{}'], {}), '({})\n', (15534, 15538), False, 'from flax.core import Scope, freeze\n'), ((15598, 15608), 'flax.core.freeze', 'freeze', (['{}'], {}), '({})\n', (15604, 15608), False, 'from flax.core import Scope, freeze\n'), ((16546, 16582), 'flax.linen.module._get_suffix_value_pairs', 'nn.module._get_suffix_value_pairs', (['x'], {}), '(x)\n', (16579, 16582), True, 'from flax import linen as nn\n'), ((17034, 17046), 'flax.linen.Dense', 'nn.Dense', (['(10)'], {}), '(10)\n', (17042, 17046), True, 'from flax import linen as nn\n'), ((18107, 18116), 'flax.core.Scope', 'Scope', (['{}'], {}), '({})\n', (18112, 18116), False, 'from flax.core import Scope, freeze\n'), ((19424, 19438), 'jax.numpy.ones', 'jnp.ones', (['(1,)'], {}), '((1,))\n', (19432, 19438), True, 'import jax.numpy as jnp\n'), ((13064, 13075), 'flax.linen.Dense', 'nn.Dense', (['(1)'], {}), '(1)\n', (13072, 13075), True, 'from flax import linen as nn\n'), ((13477, 13501), 'flax.linen.Dense', 'nn.Dense', (['(1)'], {'parent': 'self'}), '(1, parent=self)\n', (13485, 13501), True, 'from flax import linen as nn\n'), ((14006, 14031), 'flax.linen.Dense', 'nn.Dense', (['self.widths[-1]'], {}), '(self.widths[-1])\n', (14014, 14031), True, 'from flax import linen as nn\n'), ((17327, 17339), 'flax.linen.Dense', 'nn.Dense', (['(10)'], {}), '(10)\n', (17335, 17339), True, 'from flax import linen as nn\n'), ((17350, 17362), 'flax.linen.Dense', 'nn.Dense', (['(10)'], {}), '(10)\n', (17358, 17362), True, 'from flax import linen as nn\n'), ((18469, 18483), 'flax.linen.Dense', 'nn.Dense', (['size'], {}), '(size)\n', (18477, 18483), True, 'from flax import linen as nn\n'), ((13971, 13986), 'flax.linen.Dense', 'nn.Dense', (['width'], {}), '(width)\n', (13979, 13986), True, 'from flax import linen as nn\n'), ((17762, 17776), 'jax.numpy.dot', 'jnp.dot', (['x', 'm0'], {}), '(x, m0)\n', (17769, 17776), True, 'import jax.numpy as jnp\n')] |
import numpy as np
import argparse
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from torch.utils.tensorboard import SummaryWriter
import time
import torch
import random
import os
from transport import *
from models import *
import torch.nn.functional as F
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import json
#dump all these to a config file
torch.set_num_threads(8)
def seed_torch(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def load_data(root):
X = np.load("{}/X.npy".format(root))
Y = np.load("{}/Y.npy".format(root))
U = np.load("{}/U.npy".format(root))
indices = json.load(open("{}/indices.json".format(root)))
return X, U, Y, indices
def init_weights(model):
if type(model) == nn.Linear:
nn.init.kaiming_normal_(model.weight)
model.bias.data.fill_(0.01)
def plot_decision_boundary(c, u, X, Y, name):
y = np.argmax(Y[u], -1)
print(y)
# Set min and max values and give it some padding
x_min, x_max = -2.5, 2.0
y_min, y_max = -2.0, 2.0
h = 0.005
# Generate a grid of points with distance h between them
xx,yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
Z = torch.round(F.sigmoid(c(torch.FloatTensor(np.c_[xx.ravel(), yy.ravel()]), torch.tensor([[u/11]]*900*800)).detach())).numpy()
Z = Z.reshape(xx.shape)
#Z = np.zeros_like(Z)
# Plot the contour and training examples
#sns.heatmap(Z)
#plt.show()
plt.title('%dth domain - %s' %(u, name))
plt.contourf(xx, yy, Z, cmap=plt.cm.Blues, vmin=-1, vmax=2)
plt.scatter(X[u][:, 0], X[u][:, 1], c=y, cmap=plt.cm.binary)
plt.savefig('final_plots/%s_%f.pdf' %(name, u))
def plot_overlapping_boundary(c_1, c_2, u_1, u_2, X, Y, name):
matplotlib.rcParams['text.usetex'] = True
plt.rc('font', family='serif', size=24, weight='bold')
plt.rc('xtick', labelsize=20)
plt.rc('ytick', labelsize=20)
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{amsmath,amsfonts}"]
matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{bm}"]
plt.rc('axes', linewidth=1)
plt.rc('font', weight='bold')
matplotlib.rcParams['text.latex.preamble'] = [r'\boldmath']
Y2 = np.argmax(Y[u_2], -1)
Y1 = np.argmax(Y[u_1], -1)
x_min, x_max = -2.5, 2.0
y_min, y_max = -2.0, 2.0
h = 0.005
xx,yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z1 = c_2(torch.FloatTensor(np.c_[xx.ravel(), yy.ravel()]), torch.tensor([[u_1/11]]*900*900)).detach().numpy()
Z1 = Z1.reshape(xx.shape)
Z2 = c_1(torch.FloatTensor(np.c_[xx.ravel(), yy.ravel()]), torch.tensor([[u_2/11]]*900*900)).detach().numpy()
Z2 = Z2.reshape(xx.shape)
# Plot the contour and training examples
#sns.heatmap(Z)
#plt.show()
#print(Z)
#Z = (Z1 + 2*Z2)/3.0
'''
y1 = []
y2 = []
for i, x in enumerate(xx[0]):
y = Z1[:,i]
idx = np.where(y == 1.0)[0]
y1.append(yy[:,0][int(np.min(idx))])
y = Z2[:,i]
idx = np.where(y == 1.0)[0]
y2.append(yy[:,0][int(np.min(idx))])
'''
plt.xlabel(r'\textbf{feature} $x_1$')
plt.ylabel(r'\textbf{feature} $x_2$')
plt.xlim(-2.5, 2.0)
plt.ylim(-2.0, 2.5)
#plt.plot(xx[0], y1, 'c--', linewidth=3.0)
#plt.plot(xx[0], y2, color='#00004c', linewidth=3.0)
plt.contour(xx, yy, Z1, levels=[0], cmap=plt.cm.bwr, vmin=-1.0, vmax=2.0)
plt.contour(xx, yy, Z2, levels=[0], cmap=plt.cm.seismic)
prev = plt.scatter(X[u_2][:, 0], X[u_2][:, 1], s=25, c=Y2, cmap=plt.cm.seismic, alpha=0.7)
cur = plt.scatter(X[u_1][:, 0], X[u_1][:, 1], s=25, c=Y1, cmap=plt.cm.bwr, vmin=-1.0, vmax=2.0, alpha=0.7)
plt.gcf().subplots_adjust(left=0.15, bottom=0.15)
plt.savefig('final_plots/%s_%f_%f.pdf' %(name, u_1, u_2))
plt.clf()
class PredictionModelNN(nn.Module):
def __init__(self, input_shape, hidden_shapes, output_shape, **kwargs):
super(PredictionModelNN, self).__init__()
self.time_conditioning = kwargs['time_conditioning'] if kwargs.get('time_conditioning') else False
self.leaky = kwargs['leaky']
if self.time_conditioning:
self.leaky = kwargs['leaky'] if kwargs.get('leaky') else False
use_time2vec = kwargs['use_time2vec'] if kwargs.get('use_time2vec') else False
self.regress = kwargs['task'] == 'regression' if kwargs.get('task') else False
self.time_shape = 1
if use_time2vec:
self.time_shape = 8
self.time2vec = Time2Vec(1, 8)
else:
self.time_shape = 1
self.time2vec = None
self.layers = nn.ModuleList()
self.relus = nn.ModuleList()
self.input_shape = input_shape
self.hidden_shapes = hidden_shapes
self.output_shape = output_shape
if len(self.hidden_shapes) == 0: # single layer NN, no TReLU
self.layers.append(nn.Linear(input_shape, output_shape))
self.relus.append(nn.LeakyReLU())
else:
self.layers.append(nn.Linear(self.input_shape, self.hidden_shapes[0]))
if self.time_conditioning:
self.relus.append(TimeReLU(data_shape=self.hidden_shapes[0], time_shape=self.time_shape, leaky=self.leaky))
else:
if self.leaky:
self.relus.append(nn.LeakyReLU())
else:
self.relus.append(nn.ReLU())
for i in range(len(self.hidden_shapes) - 1):
self.layers.append(nn.Linear(self.hidden_shapes[i], self.hidden_shapes[i+1]))
if self.time_conditioning:
self.relus.append(TimeReLU(data_shape=self.hidden_shapes[i+1], time_shape=self.time_shape, leaky=self.leaky))
else:
if self.leaky:
self.relus.append(nn.LeakyReLU())
else:
self.relus.append(nn.ReLU())
self.layers.append(nn.Linear(self.hidden_shapes[-1], self.output_shape))
self.apply(init_weights)
for w in self.layers[0].parameters():
print(w)
def forward(self, X, times=None, logits=False, reps=False):
if self.time_conditioning:
X = torch.cat([X, times], dim=-1)
if self.time2vec is not None:
times = self.time2vec(times)
#if self.time_conditioning:
# X = self.relus[0](self.layers[0](X), times)
#else:
# X = self.relus[0](self.layers[0](X))
for i in range(0, len(self.layers)-1):
X = self.layers[i](X)
if self.time_conditioning:
X = self.relus[i](X, times)
else:
X = self.relus[i](X)
X = self.layers[-1](X)
#if self.regress:
# X = torch.relu(X)
#else:
# X = torch.softmax(X,dim=1)
'''
if not logits:
if self.output_shape > 1:
X = F.softmax(X, dim=-1)
else:
X = F.sigmoid(X)
'''
return X
"""
Method to train a classifier with a minibatch of examples
Arguments:
X: Training features
Y: Training labels
classifier: Model
classifier_optimizer: Optimizer
Returns:
prediction loss
"""
def train_classifier(X, Y, classifier, classifier_optimizer, binary):
classifier_optimizer.zero_grad()
if binary:
Y_pred = torch.sigmoid(classifier(X))
Y_true = torch.argmax(Y, 1).view(-1,1).float()
pred_loss = -torch.mean(Y_true * torch.log(Y_pred + 1e-15) + (1 - Y_true) * torch.log(1 - Y_pred + 1e-15))
else:
Y_pred = classifier(X)
Y_pred = torch.softmax(Y_pred, -1)
pred_loss = -torch.mean(Y * torch.log(Y_pred + 1e-15))
pred_loss.backward()
classifier_optimizer.step()
return pred_loss
def train(X_data, Y_data, U_data, source_indices, target_indices, args, binary):
log_file = open('cdot_%s' %(args.data), 'a+')
X_source = X_data[source_indices[0]]
if args.data == "mnist":
X_source = X_source.reshape(-1, 784)
Y_source = Y_data[source_indices[0]]
Y_source = np.argmax(Y_source, -1)
X_aux = list(X_data[source_indices[1:]])
Y_aux = list(Y_data[source_indices[1:]])
if args.data == "mnist":
X_aux = [x.reshape(-1, 784) for x in X_aux]
Y_aux2 = []
for i in range(len(Y_aux)):
Y_aux2.append(np.argmax(Y_aux[i], -1))
Y_aux = Y_aux2
X_target = X_data[target_indices[0]]
if args.data == "mnist":
X_target = X_target.reshape(-1, 784)
Y_target = Y_data[target_indices[0]]
Y_target = np.argmax(Y_target, -1)
X_source, X_aux, X_target = transform_samples_reg_otda(X_source, Y_source, X_aux, Y_aux, X_target, Y_target)
if args.data == "mnist":
X_source = X_source.reshape(-1, 1, 28, 28)
X_target = X_target.reshape(-1, 1, 28, 28)
X_aux = [x.reshape(-1, 1, 28, 28) for x in X_aux]
X_source = np.vstack([X_source] + X_aux)
Y_source = np.hstack([Y_source] + Y_aux)
num_classes = np.max(Y_source) + 1
Y_source = np.eye(num_classes)[Y_source]
Y_target = np.eye(num_classes)[Y_target]
BATCH_SIZE = args.bs
EPOCH = args.epoch
if args.data == "moons":
classifier = PredictionModelNN(2, [50, 50], 1, leaky=True)
classifier_optimizer = torch.optim.Adam(classifier.parameters(), 5e-3)
elif args.data == "mnist":
model_kwargs = {"block": ResidualBlock,
"layers": [2, 2, 2, 2],
"time_conditioning": False,
"leaky": False,
"append_time": False,
"use_time2vec": False
}
classifier = ResNet(**model_kwargs)
classifier_optimizer = torch.optim.Adam(classifier.parameters(), 1e-4)
elif args.data == "onp":
classifier = PredictionModelNN(58, [200], 1, leaky=True)
classifier_optimizer = torch.optim.Adam(classifier.parameters(), 1e-3)
elif args.data == "elec":
classifier = PredictionModelNN(8, [128, 128], 1, leaky=True)
classifier_optimizer = torch.optim.Adam(classifier.parameters(), 5e-3)
writer = SummaryWriter(comment='{}'.format(time.time()))
past_data = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(torch.tensor(X_source).float(), torch.tensor(Y_source).float()),BATCH_SIZE,False)
print('------------------------------------------------------------------------------------------')
print('TRAINING')
print('------------------------------------------------------------------------------------------')
for epoch in range(EPOCH):
loss = 0
for batch_X, batch_Y in past_data:
loss += train_classifier(batch_X, batch_Y, classifier, classifier_optimizer, binary)
if epoch%1 == 0: print('Epoch %d - %f' % (epoch, loss.detach().cpu().numpy()))
print('------------------------------------------------------------------------------------------')
print('TESTING')
print('------------------------------------------------------------------------------------------')
target_dataset = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(torch.tensor(X_target).float(), torch.tensor(Y_target).float()),BATCH_SIZE,False)
Y_pred = []
for batch_X, batch_Y in target_dataset:
batch_Y_pred = classifier(batch_X)
if binary:
batch_Y_pred = torch.sigmoid(batch_Y_pred).detach().cpu().numpy()
else:
batch_Y_pred = torch.softmax(batch_Y_pred, -1).detach().cpu().numpy()
Y_pred = Y_pred + [batch_Y_pred]
Y_pred = np.vstack(Y_pred)
# print(Y_pred)
Y_true = np.argmax(Y_target, -1)
if binary:
Y_pred = np.array([0 if y < 0.5 else 1 for y in Y_pred])
else:
Y_pred = np.argmax(Y_pred, -1)
print(accuracy_score(Y_true, Y_pred), file=log_file)
print(confusion_matrix(Y_true, Y_pred), file=log_file)
print(classification_report(Y_true, Y_pred), file=log_file)
def main(args):
seed_torch(args.seed)
if args.use_cuda:
args.device = "cuda:0"
else:
args.device = "cpu"
if args.data == "moons":
X_data, U_data, Y_data, indices = load_data('../../data/Moons/processed')
Y_data = np.eye(2)[Y_data]
X_data = np.array([X_data[ids] for ids in indices])
Y_data = np.array([Y_data[ids] for ids in indices])
U_data = np.array([U_data[ids] for ids in indices])
train(X_data, Y_data, U_data, list(range(0, 9)), [9], args, binary=True)
elif args.data == "mnist":
X_data, U_data, Y_data, indices = load_data('../../data/MNIST/processed')
Y_data = np.eye(10)[Y_data]
X_data = np.array([X_data[ids] for ids in indices])
Y_data = np.array([Y_data[ids] for ids in indices])
U_data = np.array([U_data[ids] for ids in indices])
train(X_data, Y_data, U_data, list(range(0, 4)), [4], args, binary=False)
elif args.data == "onp":
X_data, U_data, Y_data, indices = load_data('../../data/ONP/processed')
Y_data = np.eye(2)[Y_data]
X_data = np.array([X_data[ids] for ids in indices])
Y_data = np.array([Y_data[ids] for ids in indices])
U_data = np.array([U_data[ids] for ids in indices])
train(X_data, Y_data, U_data, list(range(0, 5)), [5], args, binary=True)
elif args.data == "elec":
X_data, U_data, Y_data, indices = load_data('../../data/Elec2')
Y_data = np.eye(2)[Y_data]
X_data = np.array([X_data[ids] for ids in indices])
Y_data = np.array([Y_data[ids] for ids in indices])
U_data = np.array([U_data[ids] for ids in indices])
train(X_data, Y_data, U_data, list(range(20, 29)), [29], args, binary=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data', help="String, needs to be one of mnist, sleep, moons, cars")
parser.add_argument('--epoch', default=5, help="Needs to be int, number of epochs for classifier",type=int)
parser.add_argument('--bs', default=100, help="Batch size",type=int)
parser.add_argument('--use_cuda', action='store_true', help="Should we use a GPU")
parser.add_argument('--seed', default=0, type=int)
args = parser.parse_args()
main(args) | [
"matplotlib.pyplot.ylabel",
"numpy.hstack",
"sklearn.metrics.classification_report",
"torch.softmax",
"numpy.array",
"matplotlib.rc",
"numpy.arange",
"matplotlib.pyplot.contourf",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"torch.set_num_threads",
"numpy.max",
"matplotlib.pyplot.... | [((462, 486), 'torch.set_num_threads', 'torch.set_num_threads', (['(8)'], {}), '(8)\n', (483, 486), False, 'import torch\n'), ((514, 531), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (525, 531), False, 'import random\n'), ((582, 602), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (596, 602), True, 'import numpy as np\n'), ((607, 630), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (624, 630), False, 'import torch\n'), ((635, 663), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (657, 663), False, 'import torch\n'), ((668, 700), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (694, 700), False, 'import torch\n'), ((1265, 1284), 'numpy.argmax', 'np.argmax', (['Y[u]', '(-1)'], {}), '(Y[u], -1)\n', (1274, 1284), True, 'import numpy as np\n'), ((1894, 1935), 'matplotlib.pyplot.title', 'plt.title', (["('%dth domain - %s' % (u, name))"], {}), "('%dth domain - %s' % (u, name))\n", (1903, 1935), True, 'import matplotlib.pyplot as plt\n'), ((1939, 1998), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx', 'yy', 'Z'], {'cmap': 'plt.cm.Blues', 'vmin': '(-1)', 'vmax': '(2)'}), '(xx, yy, Z, cmap=plt.cm.Blues, vmin=-1, vmax=2)\n', (1951, 1998), True, 'import matplotlib.pyplot as plt\n'), ((2003, 2063), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[u][:, 0]', 'X[u][:, 1]'], {'c': 'y', 'cmap': 'plt.cm.binary'}), '(X[u][:, 0], X[u][:, 1], c=y, cmap=plt.cm.binary)\n', (2014, 2063), True, 'import matplotlib.pyplot as plt\n'), ((2068, 2116), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('final_plots/%s_%f.pdf' % (name, u))"], {}), "('final_plots/%s_%f.pdf' % (name, u))\n", (2079, 2116), True, 'import matplotlib.pyplot as plt\n'), ((2239, 2293), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""', 'size': '(24)', 'weight': '"""bold"""'}), "('font', family='serif', size=24, weight='bold')\n", (2245, 2293), True, 'import matplotlib.pyplot as plt\n'), ((2298, 2327), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '(20)'}), "('xtick', labelsize=20)\n", (2304, 2327), True, 'import matplotlib.pyplot as plt\n'), ((2332, 2361), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': '(20)'}), "('ytick', labelsize=20)\n", (2338, 2361), True, 'import matplotlib.pyplot as plt\n'), ((2366, 2400), 'matplotlib.rc', 'matplotlib.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (2379, 2400), False, 'import matplotlib\n'), ((2555, 2582), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'linewidth': '(1)'}), "('axes', linewidth=1)\n", (2561, 2582), True, 'import matplotlib.pyplot as plt\n'), ((2587, 2616), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'weight': '"""bold"""'}), "('font', weight='bold')\n", (2593, 2616), True, 'import matplotlib.pyplot as plt\n'), ((2691, 2712), 'numpy.argmax', 'np.argmax', (['Y[u_2]', '(-1)'], {}), '(Y[u_2], -1)\n', (2700, 2712), True, 'import numpy as np\n'), ((2722, 2743), 'numpy.argmax', 'np.argmax', (['Y[u_1]', '(-1)'], {}), '(Y[u_1], -1)\n', (2731, 2743), True, 'import numpy as np\n'), ((3593, 3630), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""\\\\textbf{feature} $x_1$"""'], {}), "('\\\\textbf{feature} $x_1$')\n", (3603, 3630), True, 'import matplotlib.pyplot as plt\n'), ((3635, 3672), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""\\\\textbf{feature} $x_2$"""'], {}), "('\\\\textbf{feature} $x_2$')\n", (3645, 3672), True, 'import matplotlib.pyplot as plt\n'), ((3677, 3696), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-2.5)', '(2.0)'], {}), '(-2.5, 2.0)\n', (3685, 3696), True, 'import matplotlib.pyplot as plt\n'), ((3701, 3720), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-2.0)', '(2.5)'], {}), '(-2.0, 2.5)\n', (3709, 3720), True, 'import matplotlib.pyplot as plt\n'), ((3838, 3911), 'matplotlib.pyplot.contour', 'plt.contour', (['xx', 'yy', 'Z1'], {'levels': '[0]', 'cmap': 'plt.cm.bwr', 'vmin': '(-1.0)', 'vmax': '(2.0)'}), '(xx, yy, Z1, levels=[0], cmap=plt.cm.bwr, vmin=-1.0, vmax=2.0)\n', (3849, 3911), True, 'import matplotlib.pyplot as plt\n'), ((3916, 3972), 'matplotlib.pyplot.contour', 'plt.contour', (['xx', 'yy', 'Z2'], {'levels': '[0]', 'cmap': 'plt.cm.seismic'}), '(xx, yy, Z2, levels=[0], cmap=plt.cm.seismic)\n', (3927, 3972), True, 'import matplotlib.pyplot as plt\n'), ((3984, 4071), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[u_2][:, 0]', 'X[u_2][:, 1]'], {'s': '(25)', 'c': 'Y2', 'cmap': 'plt.cm.seismic', 'alpha': '(0.7)'}), '(X[u_2][:, 0], X[u_2][:, 1], s=25, c=Y2, cmap=plt.cm.seismic,\n alpha=0.7)\n', (3995, 4071), True, 'import matplotlib.pyplot as plt\n'), ((4078, 4183), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[u_1][:, 0]', 'X[u_1][:, 1]'], {'s': '(25)', 'c': 'Y1', 'cmap': 'plt.cm.bwr', 'vmin': '(-1.0)', 'vmax': '(2.0)', 'alpha': '(0.7)'}), '(X[u_1][:, 0], X[u_1][:, 1], s=25, c=Y1, cmap=plt.cm.bwr, vmin=-\n 1.0, vmax=2.0, alpha=0.7)\n', (4089, 4183), True, 'import matplotlib.pyplot as plt\n'), ((4237, 4295), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('final_plots/%s_%f_%f.pdf' % (name, u_1, u_2))"], {}), "('final_plots/%s_%f_%f.pdf' % (name, u_1, u_2))\n", (4248, 4295), True, 'import matplotlib.pyplot as plt\n'), ((4299, 4308), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4306, 4308), True, 'import matplotlib.pyplot as plt\n'), ((8767, 8790), 'numpy.argmax', 'np.argmax', (['Y_source', '(-1)'], {}), '(Y_source, -1)\n', (8776, 8790), True, 'import numpy as np\n'), ((9253, 9276), 'numpy.argmax', 'np.argmax', (['Y_target', '(-1)'], {}), '(Y_target, -1)\n', (9262, 9276), True, 'import numpy as np\n'), ((9598, 9627), 'numpy.vstack', 'np.vstack', (['([X_source] + X_aux)'], {}), '([X_source] + X_aux)\n', (9607, 9627), True, 'import numpy as np\n'), ((9643, 9672), 'numpy.hstack', 'np.hstack', (['([Y_source] + Y_aux)'], {}), '([Y_source] + Y_aux)\n', (9652, 9672), True, 'import numpy as np\n'), ((12315, 12332), 'numpy.vstack', 'np.vstack', (['Y_pred'], {}), '(Y_pred)\n', (12324, 12332), True, 'import numpy as np\n'), ((12366, 12389), 'numpy.argmax', 'np.argmax', (['Y_target', '(-1)'], {}), '(Y_target, -1)\n', (12375, 12389), True, 'import numpy as np\n'), ((14562, 14587), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14585, 14587), False, 'import argparse\n'), ((1510, 1536), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'h'], {}), '(x_min, x_max, h)\n', (1519, 1536), True, 'import numpy as np\n'), ((1538, 1564), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'h'], {}), '(y_min, y_max, h)\n', (1547, 1564), True, 'import numpy as np\n'), ((2842, 2868), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'h'], {}), '(x_min, x_max, h)\n', (2851, 2868), True, 'import numpy as np\n'), ((2870, 2896), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'h'], {}), '(y_min, y_max, h)\n', (2879, 2896), True, 'import numpy as np\n'), ((8285, 8310), 'torch.softmax', 'torch.softmax', (['Y_pred', '(-1)'], {}), '(Y_pred, -1)\n', (8298, 8310), False, 'import torch\n'), ((9691, 9707), 'numpy.max', 'np.max', (['Y_source'], {}), '(Y_source)\n', (9697, 9707), True, 'import numpy as np\n'), ((9727, 9746), 'numpy.eye', 'np.eye', (['num_classes'], {}), '(num_classes)\n', (9733, 9746), True, 'import numpy as np\n'), ((9772, 9791), 'numpy.eye', 'np.eye', (['num_classes'], {}), '(num_classes)\n', (9778, 9791), True, 'import numpy as np\n'), ((12422, 12471), 'numpy.array', 'np.array', (['[(0 if y < 0.5 else 1) for y in Y_pred]'], {}), '([(0 if y < 0.5 else 1) for y in Y_pred])\n', (12430, 12471), True, 'import numpy as np\n'), ((12498, 12519), 'numpy.argmax', 'np.argmax', (['Y_pred', '(-1)'], {}), '(Y_pred, -1)\n', (12507, 12519), True, 'import numpy as np\n'), ((12531, 12561), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_true', 'Y_pred'], {}), '(Y_true, Y_pred)\n', (12545, 12561), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, classification_report\n'), ((12588, 12620), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['Y_true', 'Y_pred'], {}), '(Y_true, Y_pred)\n', (12604, 12620), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, classification_report\n'), ((12647, 12684), 'sklearn.metrics.classification_report', 'classification_report', (['Y_true', 'Y_pred'], {}), '(Y_true, Y_pred)\n', (12668, 12684), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, classification_report\n'), ((13024, 13066), 'numpy.array', 'np.array', (['[X_data[ids] for ids in indices]'], {}), '([X_data[ids] for ids in indices])\n', (13032, 13066), True, 'import numpy as np\n'), ((13084, 13126), 'numpy.array', 'np.array', (['[Y_data[ids] for ids in indices]'], {}), '([Y_data[ids] for ids in indices])\n', (13092, 13126), True, 'import numpy as np\n'), ((13144, 13186), 'numpy.array', 'np.array', (['[U_data[ids] for ids in indices]'], {}), '([U_data[ids] for ids in indices])\n', (13152, 13186), True, 'import numpy as np\n'), ((4183, 4192), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4190, 4192), True, 'import matplotlib.pyplot as plt\n'), ((6820, 6849), 'torch.cat', 'torch.cat', (['[X, times]'], {'dim': '(-1)'}), '([X, times], dim=-1)\n', (6829, 6849), False, 'import torch\n'), ((9034, 9057), 'numpy.argmax', 'np.argmax', (['Y_aux[i]', '(-1)'], {}), '(Y_aux[i], -1)\n', (9043, 9057), True, 'import numpy as np\n'), ((12988, 12997), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (12994, 12997), True, 'import numpy as np\n'), ((13445, 13487), 'numpy.array', 'np.array', (['[X_data[ids] for ids in indices]'], {}), '([X_data[ids] for ids in indices])\n', (13453, 13487), True, 'import numpy as np\n'), ((13505, 13547), 'numpy.array', 'np.array', (['[Y_data[ids] for ids in indices]'], {}), '([Y_data[ids] for ids in indices])\n', (13513, 13547), True, 'import numpy as np\n'), ((13565, 13607), 'numpy.array', 'np.array', (['[U_data[ids] for ids in indices]'], {}), '([U_data[ids] for ids in indices])\n', (13573, 13607), True, 'import numpy as np\n'), ((10876, 10887), 'time.time', 'time.time', ([], {}), '()\n', (10885, 10887), False, 'import time\n'), ((13408, 13418), 'numpy.eye', 'np.eye', (['(10)'], {}), '(10)\n', (13414, 13418), True, 'import numpy as np\n'), ((13863, 13905), 'numpy.array', 'np.array', (['[X_data[ids] for ids in indices]'], {}), '([X_data[ids] for ids in indices])\n', (13871, 13905), True, 'import numpy as np\n'), ((13923, 13965), 'numpy.array', 'np.array', (['[Y_data[ids] for ids in indices]'], {}), '([Y_data[ids] for ids in indices])\n', (13931, 13965), True, 'import numpy as np\n'), ((13983, 14025), 'numpy.array', 'np.array', (['[U_data[ids] for ids in indices]'], {}), '([U_data[ids] for ids in indices])\n', (13991, 14025), True, 'import numpy as np\n'), ((8347, 8372), 'torch.log', 'torch.log', (['(Y_pred + 1e-15)'], {}), '(Y_pred + 1e-15)\n', (8356, 8372), False, 'import torch\n'), ((10966, 10988), 'torch.tensor', 'torch.tensor', (['X_source'], {}), '(X_source)\n', (10978, 10988), False, 'import torch\n'), ((10998, 11020), 'torch.tensor', 'torch.tensor', (['Y_source'], {}), '(Y_source)\n', (11010, 11020), False, 'import torch\n'), ((11880, 11902), 'torch.tensor', 'torch.tensor', (['X_target'], {}), '(X_target)\n', (11892, 11902), False, 'import torch\n'), ((11912, 11934), 'torch.tensor', 'torch.tensor', (['Y_target'], {}), '(Y_target)\n', (11924, 11934), False, 'import torch\n'), ((13827, 13836), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (13833, 13836), True, 'import numpy as np\n'), ((14272, 14314), 'numpy.array', 'np.array', (['[X_data[ids] for ids in indices]'], {}), '([X_data[ids] for ids in indices])\n', (14280, 14314), True, 'import numpy as np\n'), ((14332, 14374), 'numpy.array', 'np.array', (['[Y_data[ids] for ids in indices]'], {}), '([Y_data[ids] for ids in indices])\n', (14340, 14374), True, 'import numpy as np\n'), ((14392, 14434), 'numpy.array', 'np.array', (['[U_data[ids] for ids in indices]'], {}), '([U_data[ids] for ids in indices])\n', (14400, 14434), True, 'import numpy as np\n'), ((2962, 3000), 'torch.tensor', 'torch.tensor', (['([[u_1 / 11]] * 900 * 900)'], {}), '([[u_1 / 11]] * 900 * 900)\n', (2974, 3000), False, 'import torch\n'), ((3106, 3144), 'torch.tensor', 'torch.tensor', (['([[u_2 / 11]] * 900 * 900)'], {}), '([[u_2 / 11]] * 900 * 900)\n', (3118, 3144), False, 'import torch\n'), ((8072, 8090), 'torch.argmax', 'torch.argmax', (['Y', '(1)'], {}), '(Y, 1)\n', (8084, 8090), False, 'import torch\n'), ((8151, 8176), 'torch.log', 'torch.log', (['(Y_pred + 1e-15)'], {}), '(Y_pred + 1e-15)\n', (8160, 8176), False, 'import torch\n'), ((8194, 8223), 'torch.log', 'torch.log', (['(1 - Y_pred + 1e-15)'], {}), '(1 - Y_pred + 1e-15)\n', (8203, 8223), False, 'import torch\n'), ((14236, 14245), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (14242, 14245), True, 'import numpy as np\n'), ((1699, 1735), 'torch.tensor', 'torch.tensor', (['([[u / 11]] * 900 * 800)'], {}), '([[u / 11]] * 900 * 800)\n', (1711, 1735), False, 'import torch\n'), ((12111, 12138), 'torch.sigmoid', 'torch.sigmoid', (['batch_Y_pred'], {}), '(batch_Y_pred)\n', (12124, 12138), False, 'import torch\n'), ((12203, 12234), 'torch.softmax', 'torch.softmax', (['batch_Y_pred', '(-1)'], {}), '(batch_Y_pred, -1)\n', (12216, 12234), False, 'import torch\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 28 08:32:18 2018
@author: avanetten
Implement SP Metric
https://www.cv-foundation.org/openaccess/content_cvpr_2013/papers/Wegner_A_Higher-Order_CRF_2013_CVPR_paper.pdf
"""
import apls_utils
import apls
import os
import sys
import time
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.collections import PatchCollection
# import osmnx as ox
path_apls_src = os.path.dirname(os.path.realpath(__file__))
path_apls = os.path.dirname(path_apls_src)
sys.path.append(path_apls_src)
import osmnx_funcs
###############################################################################
def compute_single_sp(G_gt_, G_prop_, kd_idx_dic_prop, kdtree_prop,
x_coord='x', y_coord='y',
weight='length', query_radius=5,
length_buffer=0.05, make_plots=False, verbose=False):
'''Single SP metric
return 1 if within length_buffer
return 0 if path is outside length_buffer or DNE for either gt or prop
return -1 if path between randomly chosen nodes DNE for both graphs'''
# choose random ground truth source and target nodes
[source_gt, target_gt] = np.random.choice(
G_gt_.nodes(), size=2, replace=False)
if verbose:
print("source_gt:", source_gt, "target_gt:", target_gt)
# source_gt, target_gt = 10002, 10039
x_s_gt, y_s_gt = G_gt_.node[source_gt][x_coord], G_gt_.node[source_gt][y_coord]
x_t_gt, y_t_gt = G_gt_.node[target_gt][x_coord], G_gt_.node[target_gt][y_coord]
# if verbose:
# print ("x_s_gt:", x_s_gt)
# print ("y_s_gt:", y_s_gt)
# get route. If it does not exists, set len = -1
if not nx.has_path(G_gt_, source_gt, target_gt):
len_gt = -1
else:
len_gt = nx.dijkstra_path_length(
G_gt_, source_gt, target_gt, weight=weight)
# get nodes in prop graph
# see if source, target node exists in proposal
source_p_l, _ = apls_utils.nodes_near_point(x_s_gt, y_s_gt,
kdtree_prop, kd_idx_dic_prop,
x_coord=x_coord, y_coord=y_coord,
radius_m=query_radius)
target_p_l, _ = apls_utils.nodes_near_point(x_t_gt, y_t_gt,
kdtree_prop, kd_idx_dic_prop,
x_coord=x_coord, y_coord=y_coord,
radius_m=query_radius)
# if either source or target does not exists, set prop_len as -1
if (len(source_p_l) == 0) or (len(target_p_l) == 0):
len_prop = -1
else:
source_p, target_p = source_p_l[0], target_p_l[0]
x_s_p, y_s_p = G_prop_.node[source_p][x_coord], G_prop_.node[source_p][y_coord]
x_t_p, y_t_p = G_prop_.node[target_p][x_coord], G_prop_.node[target_p][y_coord]
# get route
if not nx.has_path(G_prop_, source_p, target_p):
len_prop = -1
else:
len_prop = nx.dijkstra_path_length(
G_prop_, source_p, target_p, weight=weight)
# path length difference, as a percentage
perc_diff = np.abs((len_gt - len_prop) / len_gt)
# check path lengths
# if both paths do not exist, skip
if (len_gt == -1) and (len_prop == -1):
match = -1
# if one is positive and one negative, return 0
elif (np.sign(len_gt) != np.sign(len_prop)):
match = 0
# else, campare lengths
elif perc_diff > length_buffer:
match = 0
else:
match = 1
if verbose:
# print ("source_gt:", source_gt, "target_gt:", target_gt)
print("len_gt:", len_gt)
print("len_prop:", len_prop)
print("perc_diff:", perc_diff)
if make_plots:
# plot G_gt_init
plt.close('all')
# plot initial graph
if len_gt != -1:
fig, ax = osmnx_funcs.plot_graph_route(G_gt_, nx.shortest_path(
G_gt_, source=source_gt, target=target_gt, weight=weight))
else:
fig, ax = osmnx_funcs.plot_graph(G_gt_, axis_off=True)
ax.set_title("Ground Truth, L = " + str(np.round(len_gt, 2)))
# draw a circle (this doesn't work unless it's a PatchCollection!)
patches = [Circle((x_s_gt, y_s_gt), query_radius, alpha=0.3),
Circle((x_t_gt, y_t_gt), query_radius, alpha=0.3)]
p = PatchCollection(patches, alpha=0.4, color='orange')
ax.add_collection(p)
# also a simple point
ax.scatter([x_s_gt], [y_s_gt], c='green', s=6)
ax.scatter([x_t_gt], [y_t_gt], c='red', s=6)
# plot proposal graph
if len_prop != -1:
fig, ax1 = osmnx_funcs.plot_graph_route(G_prop_, nx.shortest_path(
G_prop_, source=source_p, target=target_p, weight=weight))
else:
fig, ax1 = osmnx_funcs.plot_graph(G_prop_, axis_off=True)
ax1.set_title("Proposal, L = " + str(np.round(len_prop, 2)))
# draw patches from ground truth!
patches = [Circle((x_s_gt, y_s_gt), query_radius, alpha=0.3),
Circle((x_t_gt, y_t_gt), query_radius, alpha=0.3)]
p = PatchCollection(patches, alpha=0.4, color='orange')
ax1.add_collection(p)
if len_prop != -1:
# also a simple point
ax1.scatter([x_s_p], [y_s_p], c='green', s=6)
ax1.scatter([x_t_p], [y_t_p], c='red', s=6)
return match
###############################################################################
def compute_sp(G_gt_, G_prop_,
x_coord='x', y_coord='y',
weight='length', query_radius=5,
length_buffer=0.05, n_routes=10, verbose=False,
make_plots=True):
'''Compute SP metric'''
t0 = time.time()
if len(G_prop_.nodes()) == 0:
return [], 0
kd_idx_dic_p, kdtree_p, pos_arr_p = apls_utils.G_to_kdtree(G_prop_)
match_l = []
for i in range(n_routes):
if i == 0 and make_plots:
make_plots_tmp = True
else:
make_plots_tmp = False
if (i % 100) == 0:
print((i, "/", n_routes))
match_val = compute_single_sp(G_gt_, G_prop_, kd_idx_dic_p, kdtree_p,
x_coord=x_coord, y_coord=y_coord,
weight=weight, query_radius=query_radius,
length_buffer=length_buffer, make_plots=make_plots_tmp,
verbose=verbose)
if match_val != -1:
match_l.append(match_val)
# total score is fraction of routes that match
sp_tot = 1.0 * np.sum(match_l) / len(match_l)
if verbose:
print(("match_arr:", np.array(match_l)))
# print (" sp_tot:", sp_tot)
print("sp metric:")
print((" total time elapsed to compute sp:",
time.time() - t0, "seconds"))
return match_l, sp_tot
###############################################################################
###############################################################################
###############################################################################
if __name__ == "__main__":
# Test
##########################
n_measurement_nodes = 10
x_coord = 'x'
y_coord = 'y'
weight = 'length'
query_radius = 5
length_buffer = 0.05
n_routes = 500
verbose = False # True
run_all = True
#pick_random_start_node = True
truth_dir = '/raid/cosmiq/spacenet/data/spacenetv2/AOI_2_Vegas_Test/400m/gt_graph_pkls'
prop_dir = 'raid/cosmiq/basiss/inference_mod_new/results/rgb_test_sn_vegas/graphs'
##########################
name_list = os.listdir(truth_dir)
f = name_list[np.random.randint(len(name_list))]
#f = 'AOI_2_Vegas_img150.pkl'
print(("f:", f))
t0 = time.time()
# get original graph
outroot = f.split('.')[0]
print("\noutroot:", outroot)
gt_file = os.path.join(truth_dir, f)
prop_file = os.path.join(prop_dir, outroot + '.gpickle')
# ground truth graph
G_gt_init = nx.read_gpickle(gt_file)
G_gt_init1 = osmnx_funcs.simplify_graph(G_gt_init.to_directed()).to_undirected()
G_gt_init = osmnx_funcs.project_graph(G_gt_init1)
G_gt_init = apls.create_edge_linestrings(
G_gt_init, remove_redundant=True, verbose=False)
print(("G_gt_init.nodes():", G_gt_init.nodes()))
(u, v) = G_gt_init.edges()[0]
print(("random edge props:", G_gt_init.edge[u][v]))
# proposal graph
G_p_init = nx.read_gpickle(prop_file)
#G_p_init0 = nx.read_gpickle(prop_file)
#G_p_init1 = osmnx_funcs.simplify_graph(G_p_init0.to_directed()).to_undirected()
#G_p_init = osmnx_funcs.project_graph(G_p_init1)
G_p_init = apls.create_edge_linestrings(
G_p_init, remove_redundant=True, verbose=False)
t0 = time.time()
print("\nComputing score...")
match_list, score = compute_sp(G_gt_init, G_p_init,
x_coord=x_coord, y_coord=y_coord,
weight=weight, query_radius=query_radius,
length_buffer=length_buffer, n_routes=n_routes,
make_plots=True,
verbose=verbose)
print(("score:", score))
print(("Time to compute score:", time.time() - t0, "seconds"))
############
# also compute total topo metric for entire folder
if run_all:
t0 = time.time()
plt.close('all')
score_list = []
match_list = []
for i, f in enumerate(name_list):
if i == 0:
make_plots = True
else:
make_plots = False
# get original graph
outroot = f.split('.')[0]
print("\n", i, "/", len(name_list), "outroot:", outroot)
#print ("\n", i, "/", len(name_list), "outroot:", outroot)
gt_file = os.path.join(truth_dir, f)
# ground truth graph
G_gt_init = nx.read_gpickle(gt_file)
#G_gt_init1 = osmnx_funcs.simplify_graph(G_gt_init0.to_directed()).to_undirected()
#G_gt_init = osmnx_funcs.project_graph(G_gt_init1)
G_gt_init = apls.create_edge_linestrings(
G_gt_init, remove_redundant=True, verbose=False)
if len(G_gt_init.nodes()) == 0:
continue
# proposal graph
prop_file = os.path.join(prop_dir, outroot + '.gpickle')
if not os.path.exists(prop_file):
score_list.append(0)
continue
G_p_init0 = nx.read_gpickle(prop_file)
G_p_init1 = osmnx_funcs.simplify_graph(
G_p_init0.to_directed()).to_undirected()
G_p_init = osmnx_funcs.project_graph(G_p_init1)
G_p_init = apls.create_edge_linestrings(
G_p_init, remove_redundant=True, verbose=False)
match_list_tmp, score = compute_sp(G_gt_init, G_p_init,
x_coord=x_coord, y_coord=y_coord,
weight=weight, query_radius=query_radius,
length_buffer=length_buffer, n_routes=n_routes,
make_plots=make_plots,
verbose=verbose)
score_list.append(score)
match_list.extend(match_list_tmp)
# compute total score
# total score is fraction of routes that match
sp_tot = 1.0 * np.sum(match_list) / len(match_list)
#score_tot = np.sum(score_list)
print(("Total sp metric for", len(name_list), "files:"))
print((" query_radius:", query_radius, "length_buffer:", length_buffer))
print((" n_measurement_nodes:", n_measurement_nodes, "n_routes:", n_routes))
print((" total time elapsed to compute sp and make plots:",
time.time() - t0, "seconds"))
print((" total sp:", sp_tot))
| [
"apls.create_edge_linestrings",
"osmnx_funcs.plot_graph",
"numpy.array",
"networkx.shortest_path",
"sys.path.append",
"networkx.has_path",
"os.path.exists",
"apls_utils.nodes_near_point",
"os.listdir",
"matplotlib.pyplot.close",
"matplotlib.patches.Circle",
"numpy.round",
"numpy.abs",
"os.... | [((565, 595), 'os.path.dirname', 'os.path.dirname', (['path_apls_src'], {}), '(path_apls_src)\n', (580, 595), False, 'import os\n'), ((596, 626), 'sys.path.append', 'sys.path.append', (['path_apls_src'], {}), '(path_apls_src)\n', (611, 626), False, 'import sys\n'), ((525, 551), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (541, 551), False, 'import os\n'), ((2054, 2188), 'apls_utils.nodes_near_point', 'apls_utils.nodes_near_point', (['x_s_gt', 'y_s_gt', 'kdtree_prop', 'kd_idx_dic_prop'], {'x_coord': 'x_coord', 'y_coord': 'y_coord', 'radius_m': 'query_radius'}), '(x_s_gt, y_s_gt, kdtree_prop, kd_idx_dic_prop,\n x_coord=x_coord, y_coord=y_coord, radius_m=query_radius)\n', (2081, 2188), False, 'import apls_utils\n'), ((2349, 2483), 'apls_utils.nodes_near_point', 'apls_utils.nodes_near_point', (['x_t_gt', 'y_t_gt', 'kdtree_prop', 'kd_idx_dic_prop'], {'x_coord': 'x_coord', 'y_coord': 'y_coord', 'radius_m': 'query_radius'}), '(x_t_gt, y_t_gt, kdtree_prop, kd_idx_dic_prop,\n x_coord=x_coord, y_coord=y_coord, radius_m=query_radius)\n', (2376, 2483), False, 'import apls_utils\n'), ((3307, 3343), 'numpy.abs', 'np.abs', (['((len_gt - len_prop) / len_gt)'], {}), '((len_gt - len_prop) / len_gt)\n', (3313, 3343), True, 'import numpy as np\n'), ((5936, 5947), 'time.time', 'time.time', ([], {}), '()\n', (5945, 5947), False, 'import time\n'), ((6044, 6075), 'apls_utils.G_to_kdtree', 'apls_utils.G_to_kdtree', (['G_prop_'], {}), '(G_prop_)\n', (6066, 6075), False, 'import apls_utils\n'), ((7877, 7898), 'os.listdir', 'os.listdir', (['truth_dir'], {}), '(truth_dir)\n', (7887, 7898), False, 'import os\n'), ((8016, 8027), 'time.time', 'time.time', ([], {}), '()\n', (8025, 8027), False, 'import time\n'), ((8131, 8157), 'os.path.join', 'os.path.join', (['truth_dir', 'f'], {}), '(truth_dir, f)\n', (8143, 8157), False, 'import os\n'), ((8174, 8218), 'os.path.join', 'os.path.join', (['prop_dir', "(outroot + '.gpickle')"], {}), "(prop_dir, outroot + '.gpickle')\n", (8186, 8218), False, 'import os\n'), ((8261, 8285), 'networkx.read_gpickle', 'nx.read_gpickle', (['gt_file'], {}), '(gt_file)\n', (8276, 8285), True, 'import networkx as nx\n'), ((8387, 8424), 'osmnx_funcs.project_graph', 'osmnx_funcs.project_graph', (['G_gt_init1'], {}), '(G_gt_init1)\n', (8412, 8424), False, 'import osmnx_funcs\n'), ((8441, 8518), 'apls.create_edge_linestrings', 'apls.create_edge_linestrings', (['G_gt_init'], {'remove_redundant': '(True)', 'verbose': '(False)'}), '(G_gt_init, remove_redundant=True, verbose=False)\n', (8469, 8518), False, 'import apls\n'), ((8709, 8735), 'networkx.read_gpickle', 'nx.read_gpickle', (['prop_file'], {}), '(prop_file)\n', (8724, 8735), True, 'import networkx as nx\n'), ((8933, 9009), 'apls.create_edge_linestrings', 'apls.create_edge_linestrings', (['G_p_init'], {'remove_redundant': '(True)', 'verbose': '(False)'}), '(G_p_init, remove_redundant=True, verbose=False)\n', (8961, 9009), False, 'import apls\n'), ((9029, 9040), 'time.time', 'time.time', ([], {}), '()\n', (9038, 9040), False, 'import time\n'), ((1781, 1821), 'networkx.has_path', 'nx.has_path', (['G_gt_', 'source_gt', 'target_gt'], {}), '(G_gt_, source_gt, target_gt)\n', (1792, 1821), True, 'import networkx as nx\n'), ((1870, 1937), 'networkx.dijkstra_path_length', 'nx.dijkstra_path_length', (['G_gt_', 'source_gt', 'target_gt'], {'weight': 'weight'}), '(G_gt_, source_gt, target_gt, weight=weight)\n', (1893, 1937), True, 'import networkx as nx\n'), ((3947, 3963), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3956, 3963), True, 'import matplotlib.pyplot as plt\n'), ((4547, 4598), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['patches'], {'alpha': '(0.4)', 'color': '"""orange"""'}), "(patches, alpha=0.4, color='orange')\n", (4562, 4598), False, 'from matplotlib.collections import PatchCollection\n'), ((5325, 5376), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['patches'], {'alpha': '(0.4)', 'color': '"""orange"""'}), "(patches, alpha=0.4, color='orange')\n", (5340, 5376), False, 'from matplotlib.collections import PatchCollection\n'), ((9662, 9673), 'time.time', 'time.time', ([], {}), '()\n', (9671, 9673), False, 'import time\n'), ((9682, 9698), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (9691, 9698), True, 'import matplotlib.pyplot as plt\n'), ((3054, 3094), 'networkx.has_path', 'nx.has_path', (['G_prop_', 'source_p', 'target_p'], {}), '(G_prop_, source_p, target_p)\n', (3065, 3094), True, 'import networkx as nx\n'), ((3159, 3226), 'networkx.dijkstra_path_length', 'nx.dijkstra_path_length', (['G_prop_', 'source_p', 'target_p'], {'weight': 'weight'}), '(G_prop_, source_p, target_p, weight=weight)\n', (3182, 3226), True, 'import networkx as nx\n'), ((3533, 3548), 'numpy.sign', 'np.sign', (['len_gt'], {}), '(len_gt)\n', (3540, 3548), True, 'import numpy as np\n'), ((3552, 3569), 'numpy.sign', 'np.sign', (['len_prop'], {}), '(len_prop)\n', (3559, 3569), True, 'import numpy as np\n'), ((4205, 4249), 'osmnx_funcs.plot_graph', 'osmnx_funcs.plot_graph', (['G_gt_'], {'axis_off': '(True)'}), '(G_gt_, axis_off=True)\n', (4227, 4249), False, 'import osmnx_funcs\n'), ((4414, 4463), 'matplotlib.patches.Circle', 'Circle', (['(x_s_gt, y_s_gt)', 'query_radius'], {'alpha': '(0.3)'}), '((x_s_gt, y_s_gt), query_radius, alpha=0.3)\n', (4420, 4463), False, 'from matplotlib.patches import Circle\n'), ((4484, 4533), 'matplotlib.patches.Circle', 'Circle', (['(x_t_gt, y_t_gt)', 'query_radius'], {'alpha': '(0.3)'}), '((x_t_gt, y_t_gt), query_radius, alpha=0.3)\n', (4490, 4533), False, 'from matplotlib.patches import Circle\n'), ((5015, 5061), 'osmnx_funcs.plot_graph', 'osmnx_funcs.plot_graph', (['G_prop_'], {'axis_off': '(True)'}), '(G_prop_, axis_off=True)\n', (5037, 5061), False, 'import osmnx_funcs\n'), ((5192, 5241), 'matplotlib.patches.Circle', 'Circle', (['(x_s_gt, y_s_gt)', 'query_radius'], {'alpha': '(0.3)'}), '((x_s_gt, y_s_gt), query_radius, alpha=0.3)\n', (5198, 5241), False, 'from matplotlib.patches import Circle\n'), ((5262, 5311), 'matplotlib.patches.Circle', 'Circle', (['(x_t_gt, y_t_gt)', 'query_radius'], {'alpha': '(0.3)'}), '((x_t_gt, y_t_gt), query_radius, alpha=0.3)\n', (5268, 5311), False, 'from matplotlib.patches import Circle\n'), ((6824, 6839), 'numpy.sum', 'np.sum', (['match_l'], {}), '(match_l)\n', (6830, 6839), True, 'import numpy as np\n'), ((10134, 10160), 'os.path.join', 'os.path.join', (['truth_dir', 'f'], {}), '(truth_dir, f)\n', (10146, 10160), False, 'import os\n'), ((10219, 10243), 'networkx.read_gpickle', 'nx.read_gpickle', (['gt_file'], {}), '(gt_file)\n', (10234, 10243), True, 'import networkx as nx\n'), ((10426, 10503), 'apls.create_edge_linestrings', 'apls.create_edge_linestrings', (['G_gt_init'], {'remove_redundant': '(True)', 'verbose': '(False)'}), '(G_gt_init, remove_redundant=True, verbose=False)\n', (10454, 10503), False, 'import apls\n'), ((10644, 10688), 'os.path.join', 'os.path.join', (['prop_dir', "(outroot + '.gpickle')"], {}), "(prop_dir, outroot + '.gpickle')\n", (10656, 10688), False, 'import os\n'), ((10822, 10848), 'networkx.read_gpickle', 'nx.read_gpickle', (['prop_file'], {}), '(prop_file)\n', (10837, 10848), True, 'import networkx as nx\n'), ((10981, 11017), 'osmnx_funcs.project_graph', 'osmnx_funcs.project_graph', (['G_p_init1'], {}), '(G_p_init1)\n', (11006, 11017), False, 'import osmnx_funcs\n'), ((11041, 11117), 'apls.create_edge_linestrings', 'apls.create_edge_linestrings', (['G_p_init'], {'remove_redundant': '(True)', 'verbose': '(False)'}), '(G_p_init, remove_redundant=True, verbose=False)\n', (11069, 11117), False, 'import apls\n'), ((4076, 4150), 'networkx.shortest_path', 'nx.shortest_path', (['G_gt_'], {'source': 'source_gt', 'target': 'target_gt', 'weight': 'weight'}), '(G_gt_, source=source_gt, target=target_gt, weight=weight)\n', (4092, 4150), True, 'import networkx as nx\n'), ((4885, 4959), 'networkx.shortest_path', 'nx.shortest_path', (['G_prop_'], {'source': 'source_p', 'target': 'target_p', 'weight': 'weight'}), '(G_prop_, source=source_p, target=target_p, weight=weight)\n', (4901, 4959), True, 'import networkx as nx\n'), ((6901, 6918), 'numpy.array', 'np.array', (['match_l'], {}), '(match_l)\n', (6909, 6918), True, 'import numpy as np\n'), ((7045, 7056), 'time.time', 'time.time', ([], {}), '()\n', (7054, 7056), False, 'import time\n'), ((9530, 9541), 'time.time', 'time.time', ([], {}), '()\n', (9539, 9541), False, 'import time\n'), ((10708, 10733), 'os.path.exists', 'os.path.exists', (['prop_file'], {}), '(prop_file)\n', (10722, 10733), False, 'import os\n'), ((11795, 11813), 'numpy.sum', 'np.sum', (['match_list'], {}), '(match_list)\n', (11801, 11813), True, 'import numpy as np\n'), ((4298, 4317), 'numpy.round', 'np.round', (['len_gt', '(2)'], {}), '(len_gt, 2)\n', (4306, 4317), True, 'import numpy as np\n'), ((5107, 5128), 'numpy.round', 'np.round', (['len_prop', '(2)'], {}), '(len_prop, 2)\n', (5115, 5128), True, 'import numpy as np\n'), ((12190, 12201), 'time.time', 'time.time', ([], {}), '()\n', (12199, 12201), False, 'import time\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 18 17:57:38 2020
Copyright 2020 by <NAME>.
"""
# Standard library imports:
from math import exp, sqrt, pi
import numpy as np
# Learnpy imports:
from .RandVar import RandVar
from .RandVar2 import RandVar2
def mle(X, model):
"""
Compute the maximum likelihood estimate.
Inputs
------
X : numpy.ndarray
The data as a nxd matrix for n data points in dimension d.
model : str
The model for the probability distribution. Only 'normal' is
supported.
Output
------
output : RandVar (1d), RandVar2 (2d) or a function (d>2).
Example
-------
This is an example with 2d data.
import numpy as np
from learnpy.supervised import mle
data = np.array([[170, 80], [172, 90], [180, 68], [169, 77]])
output = mle(data, 'normal')
output.plot()
output.display()
See also the 'example_mle' file.
"""
# Get the number of data points and the dimension:
n = len(X)
if len(X.shape) == 1:
d = 1
else:
d = len(X[0])
# To IMPROVE: add more probability models.
# 1d case:
if (d == 1):
if (model == 'normal'):
mean = 1/n*sum(X)
var = 1/n*sum([(x - mean)**2 for x in X])
pdf = lambda x: 1/sqrt(2*pi*var)*exp(-1/(2*var)*(x-mean)**2)
left_bound = min(X) - 3*sqrt(var)
right_bound = max(X) + 3*sqrt(var)
domain = np.array([left_bound, right_bound])
randvar = RandVar(pdf, domain)
return randvar
# To IMPROVE: add more probability models.
# Dimension d>1:
else:
if (model == 'normal'):
mean = 1/n*sum(X)
covar = np.zeros([d, d])
for i in range(n):
covar += 1/n*np.outer((X[i,:]-mean), (X[i,:]-mean).T)
covar_inv = np.linalg.inv(covar)
det = np.linalg.det(covar)
scl = (2*pi)**(-d/2)*det**(-1/2)
fun = lambda x: (x - mean).T @ covar_inv @ (x - mean)
pdf = lambda x: scl*exp(-1/2*fun(x))
# In 2d, return a RANDVAR2:
if (d == 2):
pdf_2d = lambda x,y: pdf(np.array([x, y]))
left_x_bound = min(X[:,0]) - 3*sqrt(covar[0,0])
right_x_bound = max(X[:,0]) + 3*sqrt(covar[0,0])
left_y_bound = min(X[:,1]) - 3*sqrt(covar[1,1])
right_y_bound = max(X[:,1]) + 3*sqrt(covar[1,1])
domain = np.array([left_x_bound, right_x_bound,
left_y_bound, right_y_bound])
randvar2 = RandVar2(pdf_2d, domain)
return randvar2
# For d>2, return the probability distribution:
else:
return pdf | [
"math.sqrt",
"numpy.linalg.det",
"numpy.array",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.outer",
"math.exp"
] | [((1551, 1586), 'numpy.array', 'np.array', (['[left_bound, right_bound]'], {}), '([left_bound, right_bound])\n', (1559, 1586), True, 'import numpy as np\n'), ((1818, 1834), 'numpy.zeros', 'np.zeros', (['[d, d]'], {}), '([d, d])\n', (1826, 1834), True, 'import numpy as np\n'), ((1960, 1980), 'numpy.linalg.inv', 'np.linalg.inv', (['covar'], {}), '(covar)\n', (1973, 1980), True, 'import numpy as np\n'), ((1999, 2019), 'numpy.linalg.det', 'np.linalg.det', (['covar'], {}), '(covar)\n', (2012, 2019), True, 'import numpy as np\n'), ((2600, 2668), 'numpy.array', 'np.array', (['[left_x_bound, right_x_bound, left_y_bound, right_y_bound]'], {}), '([left_x_bound, right_x_bound, left_y_bound, right_y_bound])\n', (2608, 2668), True, 'import numpy as np\n'), ((1409, 1446), 'math.exp', 'exp', (['(-1 / (2 * var) * (x - mean) ** 2)'], {}), '(-1 / (2 * var) * (x - mean) ** 2)\n', (1412, 1446), False, 'from math import exp, sqrt, pi\n'), ((1473, 1482), 'math.sqrt', 'sqrt', (['var'], {}), '(var)\n', (1477, 1482), False, 'from math import exp, sqrt, pi\n'), ((1520, 1529), 'math.sqrt', 'sqrt', (['var'], {}), '(var)\n', (1524, 1529), False, 'from math import exp, sqrt, pi\n'), ((1895, 1939), 'numpy.outer', 'np.outer', (['(X[i, :] - mean)', '(X[i, :] - mean).T'], {}), '(X[i, :] - mean, (X[i, :] - mean).T)\n', (1903, 1939), True, 'import numpy as np\n'), ((1394, 1412), 'math.sqrt', 'sqrt', (['(2 * pi * var)'], {}), '(2 * pi * var)\n', (1398, 1412), False, 'from math import exp, sqrt, pi\n'), ((2299, 2315), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (2307, 2315), True, 'import numpy as np\n'), ((2364, 2381), 'math.sqrt', 'sqrt', (['covar[0, 0]'], {}), '(covar[0, 0])\n', (2368, 2381), False, 'from math import exp, sqrt, pi\n'), ((2429, 2446), 'math.sqrt', 'sqrt', (['covar[0, 0]'], {}), '(covar[0, 0])\n', (2433, 2446), False, 'from math import exp, sqrt, pi\n'), ((2493, 2510), 'math.sqrt', 'sqrt', (['covar[1, 1]'], {}), '(covar[1, 1])\n', (2497, 2510), False, 'from math import exp, sqrt, pi\n'), ((2558, 2575), 'math.sqrt', 'sqrt', (['covar[1, 1]'], {}), '(covar[1, 1])\n', (2562, 2575), False, 'from math import exp, sqrt, pi\n')] |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six import with_metaclass
import numpy as np
import itertools
from slicerator import Slicerator, propagate_attr, index_attr
from .frame import Frame
from abc import ABCMeta, abstractmethod, abstractproperty
from warnings import warn
class FramesStream(with_metaclass(ABCMeta, object)):
"""
A base class for wrapping input data which knows how to
advance to the next frame, but does not have random access.
The length does not need to be finite.
Does not support slicing.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __iter__(self):
pass
@abstractproperty
def pixel_type(self):
"""Returns a numpy.dtype for the data type of the pixel values"""
pass
@abstractproperty
def frame_shape(self):
"""Returns the shape of a single frame as a tuple ex (10, 12)"""
pass
@classmethod
def class_exts(cls):
"""
Return a set of the file extensions that this reader can deal with.
Sub-classes should over-ride this function to list what extensions
they deal with.
The default interpretation of the returned set is 'file
extensions including but not exclusively'.
"""
return set()
@property
def exts(self):
"""
Property to get the extensions of a FramesStream class.
Calls relevant classmethod.
"""
return type(self).class_exts()
def close(self):
"""
A method to clean up anything that need to be cleaned up.
Sub-classes should use super to call up the MRO stack and then
do any class-specific clean up
"""
pass
def _validate_process_func(self, process_func):
if process_func is None:
process_func = lambda x: x
if not callable(process_func):
raise ValueError("process_func must be a function, or None")
self.process_func = process_func
def _as_grey(self, as_grey, process_func):
# See skimage.color.colorconv in the scikit-image project.
# As noted there, the weights used in this conversion are calibrated
# for contemporary CRT phosphors. Any alpha channel is ignored."""
if as_grey:
if process_func is not None:
raise ValueError("The as_grey option cannot be used when "
"process_func is specified. Incorpate "
"greyscale conversion in the function "
"passed to process_func.")
shape = self.frame_shape
ndim = len(shape)
# Look for dimensions that look like color channels.
rgb_like = shape.count(3) == 1
rgba_like = shape.count(4) == 1
if ndim == 2:
# The image is already greyscale.
process_func = None
elif ndim == 3 and (rgb_like or rgba_like):
reduced_shape = list(shape)
if rgb_like:
color_axis_size = 3
calibration = [0.2125, 0.7154, 0.0721]
else:
color_axis_size = 4
calibration = [0.2125, 0.7154, 0.0721, 0]
reduced_shape.remove(color_axis_size)
self._im_sz = tuple(reduced_shape)
def convert_to_grey(img):
color_axis = img.shape.index(color_axis_size)
img = np.rollaxis(img, color_axis, 3)
grey = (img * calibration).sum(2)
return grey.astype(img.dtype) # coerce to original dtype
self.process_func = convert_to_grey
else:
raise NotImplementedError("I don't know how to convert an "
"image of shaped {0} to greyscale. "
"Write you own function and pass "
"it using the process_func "
"keyword argument.".format(shape))
# magic functions to make all sub-classes usable as context managers
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __repr__(self):
# May be overwritten by subclasses
return """<Frames>
Frame Shape: {frame_shape!r}
Pixel Datatype: {dtype}""".format(frame_shape=self.frame_shape,
dtype=self.pixel_type)
@Slicerator.from_class
class FramesSequence(FramesStream):
"""Baseclass for wrapping data buckets that have random access.
Support random access.
Supports standard slicing and fancy slicing and returns a resliceable
Slicerator object.
Must be finite length.
"""
propagate_attrs = ['frame_shape', 'pixel_type']
def __getitem__(self, key):
"""__getitem__ is handled by Slicerator. In all pims readers, the data
returning function is get_frame."""
return self.get_frame(key)
def __iter__(self):
return iter(self[:])
@abstractmethod
def __len__(self):
"""
It is obligatory that sub-classes define a length.
"""
pass
@abstractmethod
def get_frame(self, ind):
"""
Sub classes must over-ride this function for how to get a given
frame out of the file. Any data-type specific internal-state
nonsense should be dealt with in this function.
"""
pass
def __repr__(self):
# May be overwritten by subclasses
return """<Frames>
Length: {count} frames
Frame Shape: {w} x {h}
Pixel Datatype: {dtype}""".format(w=self.frame_shape[0],
h=self.frame_shape[1],
count=len(self),
dtype=self.pixel_type)
class FrameRewindableStream(FramesStream):
"""
A base class for holding the common code for
wrapping data sources that do not rewind easily.
"""
@abstractmethod
def rewind(self, j=0):
"""
Resets the stream to frame j
j : int
Frame to rewind the stream to
"""
pass
@abstractmethod
def skip_forward(self, j):
"""
Skip the stream forward by j frames.
j : int
Number of frames to skip
"""
pass
@abstractmethod
def next(self):
"""
return the next frame in the stream
"""
pass
@abstractmethod
def __len__(self):
pass
@abstractproperty
def current(self):
"""
The current location in the stream.
Can be an int if in stream or None if out the end.
"""
pass
def __iter__(self):
self.rewind(0)
return self
def __getitem__(self, arg):
"""
Returns a generator which yields frames
"""
if isinstance(arg, slice):
# get value from slice
start, stop, step = arg.start, arg.stop, arg.step
# sanitize step
if step is None:
step = 1
if step < 1:
raise ValueError("step must be positive")
# make sure the stream is in the right place to start
if start is None:
start = 0
if start < self.current:
self.rewind(start)
if start > self.current:
self.skip_forward(start - self.current)
# sanity check
if stop is not None and stop < start:
raise ValueError("start must be less than stop")
# special case, we can't just return self, because __iter__ rewinds
if step == 1 and stop is None:
# keep going until exhausted
return (self.next() for _ in itertools.repeat(True))
return self._step_gen(step, stop)
elif isinstance(arg, int):
self.rewind(arg)
return self.next()
else:
raise ValueError("Invalid argument, use either a `slice` or " +
"or an `int`. not {t}".format(t=str(type(arg))))
def _step_gen(self, step, stop):
"""
Wraps up the logic of stepping forward by step > 1
"""
while stop is None or self.current < stop:
yield self.next()
self.skip_forward(step - 1)
else:
raise StopIteration
def __repr__(self):
# May be overwritten by subclasses
return """<Frames>
Length: {count} frames
Frame Shape: {w} x {h}
Pixel Datatype: {dtype}""".format(w=self.frame_shape[0],
h=self.frame_shape[1],
count=len(self),
dtype=self.pixel_type)
def _iter_attr(obj):
try:
for ns in [obj] + obj.__class__.mro():
for attr in ns.__dict__:
yield ns.__dict__[attr]
except AttributeError:
raise StopIteration # obj has no __dict__
def _transpose(get_frame, expected_axes, desired_axes):
if list(expected_axes) == list(desired_axes):
return get_frame
else:
transposition = [expected_axes.index(a) for a in desired_axes]
def get_frame_T(**ind):
return get_frame(**ind).transpose(transposition)
return get_frame_T
def _bundle(get_frame, expected_axes, to_iter, sizes, dtype):
bundled_axes = to_iter + expected_axes
shape = [sizes[a] for a in bundled_axes]
iter_shape = shape[:len(to_iter)]
def get_frame_bundled(**ind):
result = np.empty(shape, dtype=dtype)
md_list = []
for indices in itertools.product(*[range(s) for s in iter_shape]):
ind.update({n: i for n, i in zip(to_iter, indices)})
frame = get_frame(**ind)
result[indices] = frame
if hasattr(frame, 'metadata'):
if frame.metadata is not None:
md_list.append(frame.metadata)
# propagate metadata
if len(md_list) == np.prod(iter_shape):
metadata = dict()
keys = md_list[0].keys()
for k in keys:
try:
metadata[k] = [row[k] for row in md_list]
except KeyError:
# if a field is not present in every frame, ignore it
warn('metadata field {} is not propagated')
else:
# if all values are equal, only return one value
if metadata[k][1:] == metadata[k][:-1]:
metadata[k] = metadata[k][0]
else: # cast into ndarray
metadata[k] = np.array(metadata[k])
metadata[k].shape = iter_shape
else:
metadata = None
return Frame(result, metadata=metadata)
return get_frame_bundled, bundled_axes
def _drop(get_frame, expected_axes, to_drop):
# sort axes in descending order for correct function of np.take
to_drop_inds = [list(expected_axes).index(a) for a in to_drop]
indices = np.argsort(to_drop_inds)
axes = [to_drop_inds[i] for i in reversed(indices)]
to_drop = [to_drop[i] for i in reversed(indices)]
result_axes = [a for a in expected_axes if a not in to_drop]
def get_frame_dropped(**ind):
result = get_frame(**ind)
for (ax, name) in zip(axes, to_drop):
result = np.take(result, ind[name], axis=ax)
return result
return get_frame_dropped, result_axes
def _make_get_frame(result_axes, get_frame_dict, sizes, dtype):
methods = list(get_frame_dict.keys())
result_axes = [a for a in result_axes]
result_axes_set = set(result_axes)
# search for get_frame methods that return the right axes
for axes in methods:
if len(set(axes) ^ result_axes_set) == 0:
# _transpose does nothing when axes == result_axes
return _transpose(get_frame_dict[axes], axes, result_axes)
# we need either to drop axes or to iterate over axes:
# collect some numbers to decide what to do
arr = [None] * len(methods)
for i, method in enumerate(methods):
axes_set = set(method)
to_iter_set = result_axes_set - axes_set
to_iter = [x for x in result_axes if x in to_iter_set] # fix the order
n_iter = int(np.prod([sizes[ax] for ax in to_iter]))
to_drop = list(axes_set - result_axes_set)
n_drop = int(np.prod([sizes[ax] for ax in to_drop]))
arr[i] = [method, axes_set, to_iter, n_iter, to_drop, n_drop]
# try to read as less data as possible: try n_drop == 0
# sort in increasing number of iterations
arr.sort(key=lambda x: x[3])
for method, axes_set, to_iter, n_iter, to_drop, n_drop in arr:
if n_drop > 0:
continue
bundled_axes = to_iter + list(method)
get_frame, after_bundle = _bundle(get_frame_dict[method], method,
to_iter, sizes, dtype)
return _transpose(get_frame, bundled_axes, result_axes)
# try to iterate without dropping axes
# sort in increasing number of dropped frames
# TODO: sometimes dropping some data is better than having many iterations
arr.sort(key=lambda x: x[5])
for method, axes_set, to_iter, n_iter, to_drop, n_drop in arr:
if n_iter > 0:
continue
get_frame, after_drop = _drop(get_frame_dict[method], method, to_drop)
return _transpose(get_frame, after_drop, result_axes)
# worst case: all methods have both too many axes and require iteration
# take lowest number of dropped frames
# if indecisive, take lowest number of iterations
arr.sort(key=lambda x: (x[3], x[5]))
method, axes_set, to_iter, n_iter, to_drop, n_drop = arr[0]
get_frame, after_drop = _drop(get_frame_dict[method], method, to_drop)
get_frame, after_bundle = _bundle(get_frame, after_drop, to_iter,
sizes, dtype)
return _transpose(get_frame, after_bundle, result_axes)
class FramesSequenceND(FramesSequence):
""" A base class defining a FramesSequence with an arbitrary number of
axes. In the context of this reader base class, dimensions like 'x', 'y',
't' and 'z' will be called axes. Indices along these axes will be called
coordinates.
The properties `bundle_axes`, `iter_axes`, and `default_coords` define
to which coordinates each index points. See below for a description of
each attribute.
Subclassed readers only need to define `pixel_type` and `__init__`. At least
one reader method needs to be registered as such using
`self._register_get_frame(method, <list of axes>)`.
In the `__init__`, axes need to be initialized using `_init_axis(name, size)`.
It is recommended to set default values to `bundle_axes` and `iter_axes`.
The attributes `__len__`, `get_frame`, and the attributes below are defined
by this base_class; these should not be changed by derived classes.
Attributes
----------
axes : list of strings
List of all available axes
ndim : int
Number of image axes
sizes : dict of int
Dictionary with all axis sizes
frame_shape : tuple of int
Shape of frames that will be returned by get_frame
iter_axes : iterable of strings
This determines which axes will be iterated over by the FramesSequence.
The last element in will iterate fastest. Default [].
bundle_axes : iterable of strings
This determines which axes will be bundled into one Frame. The axes in
the ndarray that is returned by get_frame have the same order as the
order in this list. Default ['y', 'x'].
default_coords: dict of int
When an axis is not present in both iter_axes and bundle_axes, the
coordinate contained in this dictionary will be used. Default 0 for each.
Examples
--------
>>> class DummyReaderND(FramesSequenceND):
... @property
... def pixel_type(self):
... return 'uint8'
... def __init__(self, shape, **axes):
... super(DummyReaderND, self).__init__() # properly initialize
... self._init_axis('y', shape[0])
... self._init_axis('x', shape[1])
... for name in axes:
... self._init_axis(name, axes[name])
... self._register_get_frame(self.get_frame_2D, 'yx')
... self.bundle_axes = 'yx' # set default value
... if 't' in axes:
... self.iter_axes = 't' # set default value
... def get_frame_2D(self, **ind):
... return np.zeros((self.sizes['y'], self.sizes['x']),
... dtype=self.pixel_type)
>>> frames = MDummy((64, 64), t=80, c=2, z=10, m=5)
>>> frames.bundle_axes = 'czyx'
>>> frames.iter_axes = 't'
>>> frames.default_coords['m'] = 3
>>> frames[5] # returns Frame at T=5, M=3 with shape (2, 10, 64, 64)
"""
def __init__(self):
self._clear_axes()
self._get_frame_dict = dict()
def _register_get_frame(self, method, axes):
axes = tuple([a for a in axes])
if not hasattr(self, '_get_frame_dict'):
warn("Please call FramesSequenceND.__init__() at the start of the"
"the reader initialization.")
self._get_frame_dict = dict()
self._get_frame_dict[axes] = method
def _clear_axes(self):
self._sizes = {}
self._default_coords = {}
self._iter_axes = []
self._bundle_axes = ['y', 'x']
self._get_frame_wrapped = None
def _init_axis(self, name, size, default=0):
# check if the axes have been initialized, if not, do it here
if not hasattr(self, '_sizes'):
warn("Please call FramesSequenceND.__init__() at the start of the"
"the reader initialization.")
self._clear_axes()
self._get_frame_dict = dict()
if name in self._sizes:
raise ValueError("axis '{}' already exists".format(name))
self._sizes[name] = int(size)
self.default_coords[name] = int(default)
def __len__(self):
return int(np.prod([self._sizes[d] for d in self._iter_axes]))
@property
def frame_shape(self):
""" Returns the shape of the frame as returned by get_frame. """
return tuple([self._sizes[d] for d in self._bundle_axes])
@property
def axes(self):
""" Returns a list of all axes. """
return [k for k in self._sizes]
@property
def ndim(self):
""" Returns the number of axes. """
return len(self._sizes)
@property
def sizes(self):
""" Returns a dict of all axis sizes. """
return self._sizes
@property
def bundle_axes(self):
""" This determines which axes will be bundled into one Frame.
The ndarray that is returned by get_frame has the same axis order
as the order of `bundle_axes`.
"""
return self._bundle_axes
@bundle_axes.setter
def bundle_axes(self, value):
value = list(value)
invalid = [k for k in value if k not in self._sizes]
if invalid:
raise ValueError("axes %r do not exist" % invalid)
for k in value:
if k in self._iter_axes:
del self._iter_axes[self._iter_axes.index(k)]
self._bundle_axes = value
if not hasattr(self, '_get_frame_dict'):
warn("Please call FramesSequenceND.__init__() at the start of the"
"the reader initialization.")
self._get_frame_dict = dict()
if len(self._get_frame_dict) == 0:
if hasattr(self, 'get_frame_2D'):
# include get_frame_2D for backwards compatibility
self._register_get_frame(self.get_frame_2D, 'yx')
else:
raise RuntimeError('No reader methods found. Register a reader '
'method with _register_get_frame')
# update the get_frame method
get_frame = _make_get_frame(self._bundle_axes, self._get_frame_dict,
self.sizes, self.pixel_type)
self._get_frame_wrapped = get_frame
@property
def iter_axes(self):
""" This determines which axes will be iterated over by the
FramesSequence. The last element will iterate fastest. """
return self._iter_axes
@iter_axes.setter
def iter_axes(self, value):
value = list(value)
invalid = [k for k in value if k not in self._sizes]
if invalid:
raise ValueError("axes %r do not exist" % invalid)
for k in value:
if k in self._bundle_axes:
del self._bundle_axes[self._bundle_axes.index(k)]
self._iter_axes = value
@property
def default_coords(self):
""" When a axis is not present in both iter_axes and bundle_axes, the
coordinate contained in this dictionary will be used. """
return self._default_coords
@default_coords.setter
def default_coords(self, value):
invalid = [k for k in value if k not in self._sizes]
if invalid:
raise ValueError("axes %r do not exist" % invalid)
self._default_coords.update(**value)
def get_frame(self, i):
""" Returns a Frame of shape determined by bundle_axes. The index value
is interpreted according to the iter_axes property. Coordinates not
present in both iter_axes and bundle_axes will be set to their default
value (see default_coords). """
if i > len(self):
raise IndexError('index out of range')
if self._get_frame_wrapped is None:
self.bundle_axes = tuple(self.bundle_axes) # kick bundle_axes
# start with the default coordinates
coords = self.default_coords.copy()
# list sizes of iteration axes
iter_sizes = [self._sizes[k] for k in self.iter_axes]
# list how much i has to increase to get an increase of coordinate n
iter_cumsizes = np.append(np.cumprod(iter_sizes[::-1])[-2::-1], 1)
# calculate the coordinates and update the coords dictionary
iter_coords = (i // iter_cumsizes) % iter_sizes
coords.update(**{k: v for k, v in zip(self.iter_axes, iter_coords)})
result = self._get_frame_wrapped(**coords)
if hasattr(result, 'metadata'):
metadata = result.metadata
else:
metadata = dict()
metadata_axes = set(self.axes) - set(self.bundle_axes)
metadata_coords = {ax: coords[ax] for ax in metadata_axes}
metadata.update(dict(axes=self.bundle_axes, coords=metadata_coords))
return Frame(result, frame_no=i, metadata=metadata)
def __repr__(self):
s = "<FramesSequenceND>\nAxes: {0}\n".format(self.ndim)
for dim in self._sizes:
s += "Axis '{0}' size: {1}\n".format(dim, self._sizes[dim])
s += """Pixel Datatype: {dtype}""".format(dtype=self.pixel_type)
return s
| [
"numpy.prod",
"numpy.rollaxis",
"numpy.argsort",
"numpy.take",
"numpy.array",
"numpy.empty",
"six.with_metaclass",
"warnings.warn",
"numpy.cumprod",
"itertools.repeat"
] | [((379, 410), 'six.with_metaclass', 'with_metaclass', (['ABCMeta', 'object'], {}), '(ABCMeta, object)\n', (393, 410), False, 'from six import with_metaclass\n'), ((11374, 11398), 'numpy.argsort', 'np.argsort', (['to_drop_inds'], {}), '(to_drop_inds)\n', (11384, 11398), True, 'import numpy as np\n'), ((9848, 9876), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (9856, 9876), True, 'import numpy as np\n'), ((10309, 10328), 'numpy.prod', 'np.prod', (['iter_shape'], {}), '(iter_shape)\n', (10316, 10328), True, 'import numpy as np\n'), ((11710, 11745), 'numpy.take', 'np.take', (['result', 'ind[name]'], {'axis': 'ax'}), '(result, ind[name], axis=ax)\n', (11717, 11745), True, 'import numpy as np\n'), ((12634, 12672), 'numpy.prod', 'np.prod', (['[sizes[ax] for ax in to_iter]'], {}), '([sizes[ax] for ax in to_iter])\n', (12641, 12672), True, 'import numpy as np\n'), ((12746, 12784), 'numpy.prod', 'np.prod', (['[sizes[ax] for ax in to_drop]'], {}), '([sizes[ax] for ax in to_drop])\n', (12753, 12784), True, 'import numpy as np\n'), ((17544, 17647), 'warnings.warn', 'warn', (['"""Please call FramesSequenceND.__init__() at the start of thethe reader initialization."""'], {}), "(\n 'Please call FramesSequenceND.__init__() at the start of thethe reader initialization.'\n )\n", (17548, 17647), False, 'from warnings import warn\n'), ((18110, 18213), 'warnings.warn', 'warn', (['"""Please call FramesSequenceND.__init__() at the start of thethe reader initialization."""'], {}), "(\n 'Please call FramesSequenceND.__init__() at the start of thethe reader initialization.'\n )\n", (18114, 18213), False, 'from warnings import warn\n'), ((18529, 18579), 'numpy.prod', 'np.prod', (['[self._sizes[d] for d in self._iter_axes]'], {}), '([self._sizes[d] for d in self._iter_axes])\n', (18536, 18579), True, 'import numpy as np\n'), ((19827, 19930), 'warnings.warn', 'warn', (['"""Please call FramesSequenceND.__init__() at the start of thethe reader initialization."""'], {}), "(\n 'Please call FramesSequenceND.__init__() at the start of thethe reader initialization.'\n )\n", (19831, 19930), False, 'from warnings import warn\n'), ((22477, 22505), 'numpy.cumprod', 'np.cumprod', (['iter_sizes[::-1]'], {}), '(iter_sizes[::-1])\n', (22487, 22505), True, 'import numpy as np\n'), ((3611, 3642), 'numpy.rollaxis', 'np.rollaxis', (['img', 'color_axis', '(3)'], {}), '(img, color_axis, 3)\n', (3622, 3642), True, 'import numpy as np\n'), ((8054, 8076), 'itertools.repeat', 'itertools.repeat', (['(True)'], {}), '(True)\n', (8070, 8076), False, 'import itertools\n'), ((10634, 10677), 'warnings.warn', 'warn', (['"""metadata field {} is not propagated"""'], {}), "('metadata field {} is not propagated')\n", (10638, 10677), False, 'from warnings import warn\n'), ((10967, 10988), 'numpy.array', 'np.array', (['metadata[k]'], {}), '(metadata[k])\n', (10975, 10988), True, 'import numpy as np\n')] |
from functools import wraps
import sys, time, os
import numpy as np
# Atomic weight
data = { "xx" : 1.00794, "H" : 1.00794, "He" : 4.00260, "Li" : 6.941, "Be" : 9.012187, "B" : 10.811,
"C" : 12.0107, "N" : 14.00674, "O" : 15.9994, "F" : 18.99840, "Ne" : 20.1797, "Na" : 22.98977,
"Mg" : 24.3050, "Al" : 26.98152, "Si" : 28.0855, "P" : 30.97376, "S" : 32.066, "Cl" : 35.4527,
"Ar" : 39.948, "K" : 39.0983, "Ca" : 40.078, "Sc" : 44.95591, "Ti" : 47.867, "V" : 50.9415,
"Cr" : 51.9961, "Mn" : 54.93805, "Fe" : 55.845, "Co" : 58.93320, "Ni" : 58.6934, "Cu" : 63.546,
"Zn" : 65.39, "Ga" : 69.723, "Ge" : 72.61, "As" : 74.92160, "Se" : 78.96, "Br" : 79.904,
"Kr" : 83.80, "Rb" : 85.4678, "Sr" : 87.62, "Y" : 88.90585, "Zr" : 91.224, "Nb" : 92.90638,
"Mo" : 95.94, "Tc" : 98.0, "Ru" : 101.07, "Rh" : 102.90550, "Pd" : 106.42, "Ag" : 107.8682,
"Cd" : 112.411, "In" : 114.818, "Sn" : 118.710, "Sb" : 121.760, "Te" : 127.60, "I" : 126.90477,
"Xe" : 131.29, "Cs" : 132.90545, "Ba" : 137.327, "La" : 138.9055, "Ce" : 140.116, "Pr" : 140.90765,
"Nd" : 144.24, "Pm" : 145.0, "Sm" : 150.36, "Eu" : 151.964, "Gd" : 157.24, "Tb" : 158.92534,
"Dy" : 162.50, "Ho" : 164.93032, "Er" : 167.26, "Tm" : 168.93421, "Yb" : 173.04, "Lu" : 174.967,
"Hf" : 178.49, "Ta" : 180.9479, "W" : 183.84, "Re" : 186.207, "Os" : 190.23, "Ir" : 192.217,
"Pt" : 195.078, "Au" : 196.96655, "Hg" : 200.59, "Tl" : 204.3833, "Pb" : 207.2, "Bi" :208.98038,
"Po" : 209.0, "At" : 210.0, "Rn" : 222.0, "Fr" :223.0, "Ra" : 226.0, "Ac" : 227.0,
"Th" : 232.0381, "Pa" : 231.03588, "U" : 238.0289, "Np" : 237.0, "Pu" : 244.0, "Am" : 243.0,
"Cm" : 247.0, "Bk" : 247.0, "Cf" : 251.0, "Es" : 252.0, "Fm" : 257.0, "Md" : 258.0,
"No" : 259.0, "Lr" : 262.0, "Rf" : 261.0, "Db" : 262.0, "Sg" : 263.0, "Bh" : 264.0,
"Hs" : 265.0, "Mt" : 268.0, "Ds" : 271.0, "Rg" : 272.0, "Uub" : 285.0, "Uut" : 284.0,
"Uuq" : 289.0, "Uup" : 288.0, "Uuh" : 292.0}
# Conversion units
amu_to_au = 1822.888486192
au_to_A = 0.529177249
A_to_au = 1 / au_to_A
au_to_fs = 0.02418884344
fs_to_au = 1 / au_to_fs # = 41.34137304
au_to_K = 3.15774646E+5
au_to_eV = 27.2113961
eV_to_au = 1 / au_to_eV # = 0.03674931
au_to_kcalmol = 627.503
kcalmol_to_au = 1 / au_to_kcalmol # = 0.00159362
# Speed of light in atomic unit
c = 137.035999108108
# Frequency unit
cm_to_au = 1.0E-8 * au_to_A * c
eps = 1.0E-12
data.update({n : amu_to_au * data[n] for n in data.keys()})
def elapsed_time(func):
@wraps(func)
def check(*args, **kwargs):
tbegin = time.time()
func(*args, **kwargs)
tend = time.time()
print (f"{func.__name__} : Elapsed time = {tend - tbegin} seconds", flush=True)
return check
def call_name():
return sys._getframe(1).f_code.co_name
def typewriter(string, dir_name, filename, mode):
""" Function to open/write any string in dir_name/filename
:param string string: Text string for output file
:param string dir_name: Directory of output file
:param string filename: Filename of output file
:param string mode: Fileopen mode
"""
tmp_name = os.path.join(dir_name, filename)
with open(tmp_name, mode) as f:
f.write(string + "\n")
def gaussian1d(x, const, sigma, x0):
if (sigma < 0.0):
return -1
else:
res = const / (sigma * np.sqrt(2. * np.pi)) * np.exp(- (x - x0) ** 2 / (2. * sigma ** 2))
return res
| [
"numpy.sqrt",
"os.path.join",
"sys._getframe",
"functools.wraps",
"numpy.exp",
"time.time"
] | [((2569, 2580), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (2574, 2580), False, 'from functools import wraps\n'), ((3216, 3248), 'os.path.join', 'os.path.join', (['dir_name', 'filename'], {}), '(dir_name, filename)\n', (3228, 3248), False, 'import sys, time, os\n'), ((2630, 2641), 'time.time', 'time.time', ([], {}), '()\n', (2639, 2641), False, 'import sys, time, os\n'), ((2687, 2698), 'time.time', 'time.time', ([], {}), '()\n', (2696, 2698), False, 'import sys, time, os\n'), ((2833, 2849), 'sys._getframe', 'sys._getframe', (['(1)'], {}), '(1)\n', (2846, 2849), False, 'import sys, time, os\n'), ((3458, 3501), 'numpy.exp', 'np.exp', (['(-(x - x0) ** 2 / (2.0 * sigma ** 2))'], {}), '(-(x - x0) ** 2 / (2.0 * sigma ** 2))\n', (3464, 3501), True, 'import numpy as np\n'), ((3435, 3455), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (3442, 3455), True, 'import numpy as np\n')] |
import json
import os
from os.path import join as pjoin
import nibabel as nib
import numpy as np
import torch
from deep_hilbert_inverse_3chan_sparse import DeepHilbertInverse, MyDataset
from spectral_blending import blend_method
from utils import HilbertPlane, NORMALIZATION
def dataset_prediction(plane: HilbertPlane):
with open('train_valid.json', 'r') as json_file:
json_dict = json.load(json_file)
test_files = json_dict['test_files']
pre_dir = 'valid_hilbert_inverse_3chan_sparse'
pre_path = [
x for x in sorted(os.listdir(pre_dir))
if x.endswith('.ckpt') and x.startswith('epoch')
][-1]
print(f'loading: {pjoin(pre_dir, pre_path)}')
model = DeepHilbertInverse.load_from_checkpoint(pjoin(pre_dir, pre_path))
model = model.cuda()
model.eval()
dataset = MyDataset(
filename=test_files[0],
plane=plane,
transform=model.trafo_valid,
)
reco = np.zeros((512, 512, 512))
for idx in range(512):
print(f'{idx+1}/512')
sample = dataset[idx]
hilbert, sparse = sample['hil'].cuda(), sample['sparse'].cuda()
hilbert = torch.cat([hilbert, sparse], dim=0)[None, ...]
pred = model(hilbert)
pred = pred.detach().cpu().numpy()[0, 0]
pred *= NORMALIZATION['images_99']
pred[pred < 0] = 0
if plane is HilbertPlane.CORONAL:
reco[:, idx] = pred
else:
reco[idx] = pred
image = nib.Nifti1Image(reco, np.eye(4))
nib.save(image, pjoin('testing', f'inv_sp3_{plane.name.lower()}.nii.gz'))
def main():
dataset_prediction(HilbertPlane.CORONAL)
dataset_prediction(HilbertPlane.SAGITTAL)
blend_method('inv_sp3')
if __name__ == "__main__":
main()
| [
"numpy.eye",
"os.listdir",
"os.path.join",
"numpy.zeros",
"json.load",
"spectral_blending.blend_method",
"deep_hilbert_inverse_3chan_sparse.MyDataset",
"torch.cat"
] | [((831, 906), 'deep_hilbert_inverse_3chan_sparse.MyDataset', 'MyDataset', ([], {'filename': 'test_files[0]', 'plane': 'plane', 'transform': 'model.trafo_valid'}), '(filename=test_files[0], plane=plane, transform=model.trafo_valid)\n', (840, 906), False, 'from deep_hilbert_inverse_3chan_sparse import DeepHilbertInverse, MyDataset\n'), ((949, 974), 'numpy.zeros', 'np.zeros', (['(512, 512, 512)'], {}), '((512, 512, 512))\n', (957, 974), True, 'import numpy as np\n'), ((1699, 1722), 'spectral_blending.blend_method', 'blend_method', (['"""inv_sp3"""'], {}), "('inv_sp3')\n", (1711, 1722), False, 'from spectral_blending import blend_method\n'), ((397, 417), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (406, 417), False, 'import json\n'), ((748, 772), 'os.path.join', 'pjoin', (['pre_dir', 'pre_path'], {}), '(pre_dir, pre_path)\n', (753, 772), True, 'from os.path import join as pjoin\n'), ((1501, 1510), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1507, 1510), True, 'import numpy as np\n'), ((1153, 1188), 'torch.cat', 'torch.cat', (['[hilbert, sparse]'], {'dim': '(0)'}), '([hilbert, sparse], dim=0)\n', (1162, 1188), False, 'import torch\n'), ((668, 692), 'os.path.join', 'pjoin', (['pre_dir', 'pre_path'], {}), '(pre_dir, pre_path)\n', (673, 692), True, 'from os.path import join as pjoin\n'), ((558, 577), 'os.listdir', 'os.listdir', (['pre_dir'], {}), '(pre_dir)\n', (568, 577), False, 'import os\n')] |
import stp.play as play
import stp.tactic as tactic
from rj_gameplay.tactic import clear_tactic, nmark_tactic, goalie_tactic
import stp.skill as skill
import stp.role as role
from stp.role.assignment.naive import NaiveRoleAssignment
import stp.rc as rc
import numpy as np
from typing import Dict, List, Tuple
class DefensiveClear(play.IPlay):
def __init__(self):
self.goalie = goalie_tactic.GoalieTactic()
self.two_mark = nmark_tactic.NMarkTactic(2)
self.clear = clear_tactic.Clear(np.array([0.0, 10.0]))
self.role_assigner = NaiveRoleAssignment()
def compute_props(self, prev_props):
pass
def tick(
self,
world_state: rc.WorldState,
prev_results: role.assignment.FlatRoleResults,
props,
) -> Tuple[Dict[tactic.SkillEntry, List[role.RoleResult]], List[tactic.SkillEntry]]:
# Get role requests from all tactics and put them into a dictionary
role_requests: play.RoleRequests = {}
role_requests[self.two_mark] = self.two_mark.get_requests(world_state, None)
role_requests[self.clear] = self.clear.get_requests(world_state, None)
role_requests[self.goalie] = self.goalie.get_requests(world_state, None)
# Flatten requests and use role assigner on them
flat_requests = play.flatten_requests(role_requests)
flat_results = self.role_assigner.assign_roles(
flat_requests, world_state, prev_results
)
role_results = play.unflatten_results(flat_results)
# Get list of all skills with assigned roles from tactics
skills: List[tactic.SkillEntry] = []
skills += self.two_mark.tick(world_state, role_results[self.two_mark])
skills += self.clear.tick(world_state, role_results[self.clear])
skills += self.goalie.tick(world_state, role_results[self.goalie])
skill_dict = {}
skill_dict.update(role_results[self.two_mark])
skill_dict.update(role_results[self.clear])
skill_dict.update(role_results[self.goalie])
return (skill_dict, skills)
def is_done(self, world_state):
return self.clear.is_done(world_state)
| [
"stp.play.flatten_requests",
"rj_gameplay.tactic.goalie_tactic.GoalieTactic",
"stp.play.unflatten_results",
"rj_gameplay.tactic.nmark_tactic.NMarkTactic",
"numpy.array",
"stp.role.assignment.naive.NaiveRoleAssignment"
] | [((393, 421), 'rj_gameplay.tactic.goalie_tactic.GoalieTactic', 'goalie_tactic.GoalieTactic', ([], {}), '()\n', (419, 421), False, 'from rj_gameplay.tactic import clear_tactic, nmark_tactic, goalie_tactic\n'), ((446, 473), 'rj_gameplay.tactic.nmark_tactic.NMarkTactic', 'nmark_tactic.NMarkTactic', (['(2)'], {}), '(2)\n', (470, 473), False, 'from rj_gameplay.tactic import clear_tactic, nmark_tactic, goalie_tactic\n'), ((566, 587), 'stp.role.assignment.naive.NaiveRoleAssignment', 'NaiveRoleAssignment', ([], {}), '()\n', (585, 587), False, 'from stp.role.assignment.naive import NaiveRoleAssignment\n'), ((1317, 1353), 'stp.play.flatten_requests', 'play.flatten_requests', (['role_requests'], {}), '(role_requests)\n', (1338, 1353), True, 'import stp.play as play\n'), ((1496, 1532), 'stp.play.unflatten_results', 'play.unflatten_results', (['flat_results'], {}), '(flat_results)\n', (1518, 1532), True, 'import stp.play as play\n'), ((514, 535), 'numpy.array', 'np.array', (['[0.0, 10.0]'], {}), '([0.0, 10.0])\n', (522, 535), True, 'import numpy as np\n')] |
import numpy as np
from pandas import (
TimedeltaIndex,
timedelta_range,
)
import pandas._testing as tm
class TestRepeat:
def test_repeat(self):
index = timedelta_range("1 days", periods=2, freq="D")
exp = TimedeltaIndex(["1 days", "1 days", "2 days", "2 days"])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = TimedeltaIndex(["1 days", "NaT", "3 days"])
exp = TimedeltaIndex(
[
"1 days",
"1 days",
"1 days",
"NaT",
"NaT",
"NaT",
"3 days",
"3 days",
"3 days",
]
)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
| [
"pandas._testing.assert_index_equal",
"pandas.TimedeltaIndex",
"numpy.repeat",
"pandas.timedelta_range"
] | [((187, 233), 'pandas.timedelta_range', 'timedelta_range', (['"""1 days"""'], {'periods': '(2)', 'freq': '"""D"""'}), "('1 days', periods=2, freq='D')\n", (202, 233), False, 'from pandas import TimedeltaIndex, timedelta_range\n'), ((249, 305), 'pandas.TimedeltaIndex', 'TimedeltaIndex', (["['1 days', '1 days', '2 days', '2 days']"], {}), "(['1 days', '1 days', '2 days', '2 days'])\n", (263, 305), False, 'from pandas import TimedeltaIndex, timedelta_range\n'), ((467, 510), 'pandas.TimedeltaIndex', 'TimedeltaIndex', (["['1 days', 'NaT', '3 days']"], {}), "(['1 days', 'NaT', '3 days'])\n", (481, 510), False, 'from pandas import TimedeltaIndex, timedelta_range\n'), ((526, 627), 'pandas.TimedeltaIndex', 'TimedeltaIndex', (["['1 days', '1 days', '1 days', 'NaT', 'NaT', 'NaT', '3 days', '3 days',\n '3 days']"], {}), "(['1 days', '1 days', '1 days', 'NaT', 'NaT', 'NaT', '3 days',\n '3 days', '3 days'])\n", (540, 627), False, 'from pandas import TimedeltaIndex, timedelta_range\n'), ((344, 363), 'numpy.repeat', 'np.repeat', (['index', '(2)'], {}), '(index, 2)\n', (353, 363), True, 'import numpy as np\n'), ((379, 410), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['res', 'exp'], {}), '(res, exp)\n', (400, 410), True, 'import pandas._testing as tm\n'), ((855, 874), 'numpy.repeat', 'np.repeat', (['index', '(3)'], {}), '(index, 3)\n', (864, 874), True, 'import numpy as np\n'), ((890, 921), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['res', 'exp'], {}), '(res, exp)\n', (911, 921), True, 'import pandas._testing as tm\n')] |
"""
Copyright (c) 2021, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import argparse
import glob
import logging
import os
import random
import sys
import timeit
from functools import partial
from os.path import join
from collections import OrderedDict
import numpy as np
from numpy.lib.shape_base import expand_dims
import torch
from torch.utils import data
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
AutoConfig,
AutoTokenizer,
get_linear_schedule_with_warmup,
)
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
from components.config import set_seed, to_list, register_args, validate_args, load_untrained_model, get_model_class
from components.dataset_utils import ListDataset
from components.disamb_dataset import (
read_disamb_instances_from_entity_candidates,
extract_disamb_features_from_examples,
disamb_collate_fn,
coverage_evaluation
)
from components.utils import mkdir_p, dump_json
logger = logging.getLogger(__name__)
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
mkdir_p(args.output_dir)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_collate_fn = partial(disamb_collate_fn, tokenizer=tokenizer)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=train_collate_fn)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=int(max(args.warmup_steps, t_total * args.warmup_ratio)),
num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
# multi-gpu training
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Warmup steps = %d", int(max(args.warmup_steps, t_total * args.warmup_ratio)))
logger.info(" Total optimization steps = %d", t_total)
global_step = 1
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
# Added here for reproductibility
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"token_type_ids": batch[1],
"attention_mask": batch[2],
"sample_mask": batch[3],
"labels": batch[4],
}
if args.model_type in ["roberta", "distilbert", "camembert", "bart"]:
del inputs["token_type_ids"]
outputs = model(**inputs)
# model outputs are always tuple in transformers (see doc)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
# Log infomation
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
logs['epoch'] = _ + (step + 1) / len(epoch_iterator)
logs['learning_rate'] = scheduler.get_last_lr()[0]
logs['loss'] = (tr_loss - logging_loss) / args.logging_steps
logs['step'] = global_step
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logger.info("Training logs: {}".format(logs))
logging_loss = tr_loss
# Log metrics
if args.local_rank in [-1, 0] and args.eval_steps > 0 and global_step % args.eval_steps == 0:
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank == -1 and args.evaluate_during_training:
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
logger.info("Eval results: {}".format(dict(results)))
# Save model checkpoint
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, output_prediction=False):
# load examples
dataset, examples = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=partial(disamb_collate_fn, tokenizer=tokenizer))
# multi-gpu evaluate
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
start_time = timeit.default_timer()
all_pred_indexes = []
all_labels = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"token_type_ids": batch[1],
"attention_mask": batch[2],
"sample_mask": batch[3],
"labels": batch[4],
}
if args.model_type in ["xlm", "roberta", "distilbert", "camembert", "bart"]:
del inputs["token_type_ids"]
logits = model(**inputs)[1]
pred_indexes = torch.argmax(logits, 1).detach().cpu()
all_pred_indexes.append(pred_indexes)
all_labels.append(batch[4].cpu())
all_pred_indexes = torch.cat(all_pred_indexes).numpy()
all_labels = torch.cat(all_labels).numpy()
acc = np.sum(all_pred_indexes == all_labels) / len(all_pred_indexes)
evalTime = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset))
coverage = coverage_evaluation(examples, dataset, all_pred_indexes)
results = {'num problem': len(all_pred_indexes), 'acc': acc, 'cov': coverage}
saving = OrderedDict([(feat.pid, pred) for feat, pred in zip(dataset, all_pred_indexes.tolist())])
# print(saving)
if output_prediction:
dump_json(OrderedDict([(feat.pid, pred) for feat, pred in zip(dataset, all_pred_indexes.tolist())]),
join(args.output_dir, 'predictions.json'))
return results
def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False):
if args.local_rank not in [-1, 0] and not evaluate:
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
torch.distributed.barrier()
# Load data features from cache or dataset file
input_dir = args.data_dir if args.data_dir else "."
split_file = args.predict_file if evaluate else args.train_file
dataset_id = os.path.basename(split_file).split('_')[0]
split_id = os.path.basename(split_file).split('_')[1]
# split_file = '_'.(join(os.path.basename(split_file).split('_')[:2])
cached_features_file = os.path.join('feature_cache',"disamb_{}_{}_{}_{}".format(dataset_id, split_id,args.model_type,args.max_seq_length))
# Init features and dataset from cache if it exists
if os.path.exists(cached_features_file) and not args.overwrite_cache:
# cache exists
logger.info("Loading features from cached file %s", cached_features_file)
data = torch.load(cached_features_file)
examples = data['examples']
features = data['features']
else:
# cache not exists, create it
logger.info("Creating features from dataset file at %s", input_dir)
candidate_file = args.predict_file if evaluate else args.train_file
# TODO: hard coded for now
example_cache = join('feature_cache', f'{dataset_id}_{split_id}_disamb_examples.bin')
if os.path.exists(example_cache) and not args.overwrite_cache:
examples = torch.load(example_cache)
else:
orig_split = split_id
dataset_file = join('outputs', f'grailqa_v1.0_{orig_split}.json')
examples = read_disamb_instances_from_entity_candidates(dataset_file, candidate_file)
torch.save(examples, example_cache)
features = extract_disamb_features_from_examples(args, tokenizer, examples, do_predict=args.do_predict)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save({'examples': examples, 'features': features}, cached_features_file)
if args.local_rank == 0 and not evaluate:
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
torch.distributed.barrier()
if output_examples:
return ListDataset(features), examples
else:
return ListDataset(features)
def main():
# parse args
parser = argparse.ArgumentParser()
register_args(parser)
args = parser.parse_args()
# check output dir
if (os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
args.server_ip = '0.0.0.0'
args.server_port = '12345'
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
stream=sys.stdout,
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
args.model_type = args.model_type.lower()
# load model for training
config, tokenizer, model = load_untrained_model(args)
if args.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save the trained model and the tokenizer
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = get_model_class(args).from_pretrained(args.output_dir) # , force_download=True)
tokenizer = AutoTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
model.to(args.device)
# Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
if args.do_train:
logger.info("Loading checkpoints saved during training for evaluation")
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c)
for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
else:
logger.info("Loading checkpoint %s for evaluation", args.model_name_or_path)
checkpoints = [args.model_name_or_path]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
# Reload the model
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
model = get_model_class(args).from_pretrained(checkpoint) # , force_download=True)
model.to(args.device)
# Evaluate
result = evaluate(args, model, tokenizer, output_prediction=True)
result = dict((k + ("_{}".format(global_step) if global_step else ""), v) for k, v in result.items())
results.update(result)
logger.info("Results: {}".format(results))
return results
if __name__ == "__main__":
main() | [
"logging.getLogger",
"components.utils.mkdir_p",
"components.config.set_seed",
"torch.cuda.device_count",
"torch.utils.data.distributed.DistributedSampler",
"torch.cuda.is_available",
"transformers.AutoTokenizer.from_pretrained",
"torch.distributed.get_rank",
"torch.distributed.barrier",
"os.path.... | [((1353, 1380), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1370, 1380), False, 'import logging\n'), ((1780, 1827), 'functools.partial', 'partial', (['disamb_collate_fn'], {'tokenizer': 'tokenizer'}), '(disamb_collate_fn, tokenizer=tokenizer)\n', (1787, 1827), False, 'from functools import partial\n'), ((1851, 1967), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'sampler': 'train_sampler', 'batch_size': 'args.train_batch_size', 'collate_fn': 'train_collate_fn'}), '(train_dataset, sampler=train_sampler, batch_size=args.\n train_batch_size, collate_fn=train_collate_fn)\n', (1861, 1967), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((2704, 2790), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate', 'eps': 'args.adam_epsilon'}), '(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.\n adam_epsilon)\n', (2709, 2790), False, 'from transformers import WEIGHTS_NAME, AdamW, AutoConfig, AutoTokenizer, get_linear_schedule_with_warmup\n'), ((4700, 4739), 'os.path.exists', 'os.path.exists', (['args.model_name_or_path'], {}), '(args.model_name_or_path)\n', (4714, 4739), False, 'import os\n'), ((5884, 5898), 'components.config.set_seed', 'set_seed', (['args'], {}), '(args)\n', (5892, 5898), False, 'from components.config import set_seed, to_list, register_args, validate_args, load_untrained_model, get_model_class\n'), ((10683, 10709), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['dataset'], {}), '(dataset)\n', (10700, 10709), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((11203, 11225), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (11223, 11225), False, 'import timeit\n'), ((11290, 11330), 'tqdm.tqdm', 'tqdm', (['eval_dataloader'], {'desc': '"""Evaluating"""'}), "(eval_dataloader, desc='Evaluating')\n", (11294, 11330), False, 'from tqdm import tqdm, trange\n'), ((12364, 12420), 'components.disamb_dataset.coverage_evaluation', 'coverage_evaluation', (['examples', 'dataset', 'all_pred_indexes'], {}), '(examples, dataset, all_pred_indexes)\n', (12383, 12420), False, 'from components.disamb_dataset import read_disamb_instances_from_entity_candidates, extract_disamb_features_from_examples, disamb_collate_fn, coverage_evaluation\n'), ((15420, 15445), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (15443, 15445), False, 'import argparse\n'), ((15450, 15471), 'components.config.register_args', 'register_args', (['parser'], {}), '(parser)\n', (15463, 15471), False, 'from components.config import set_seed, to_list, register_args, validate_args, load_untrained_model, get_model_class\n'), ((16898, 17112), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': '(logging.INFO if args.local_rank in [-1, 0] else logging.WARN)'}), "(stream=sys.stdout, format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO if args.local_rank in [-1, 0] else\n logging.WARN)\n", (16917, 17112), False, 'import logging\n'), ((17359, 17373), 'components.config.set_seed', 'set_seed', (['args'], {}), '(args)\n', (17367, 17373), False, 'from components.config import set_seed, to_list, register_args, validate_args, load_untrained_model, get_model_class\n'), ((17696, 17722), 'components.config.load_untrained_model', 'load_untrained_model', (['args'], {}), '(args)\n', (17716, 17722), False, 'from components.config import set_seed, to_list, register_args, validate_args, load_untrained_model, get_model_class\n'), ((1515, 1530), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (1528, 1530), False, 'from tensorboardX import SummaryWriter\n'), ((1539, 1563), 'components.utils.mkdir_p', 'mkdir_p', (['args.output_dir'], {}), '(args.output_dir)\n', (1546, 1563), False, 'from components.utils import mkdir_p, dump_json\n'), ((1664, 1692), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_dataset'], {}), '(train_dataset)\n', (1677, 1692), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((1723, 1756), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['train_dataset'], {}), '(train_dataset)\n', (1741, 1756), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((3518, 3546), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (3539, 3546), False, 'import torch\n'), ((3621, 3764), 'torch.nn.parallel.DistributedDataParallel', 'torch.nn.parallel.DistributedDataParallel', (['model'], {'device_ids': '[args.local_rank]', 'output_device': 'args.local_rank', 'find_unused_parameters': '(True)'}), '(model, device_ids=[args.\n local_rank], output_device=args.local_rank, find_unused_parameters=True)\n', (3662, 3764), False, 'import torch\n'), ((5954, 6039), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {'desc': '"""Iteration"""', 'disable': '(args.local_rank not in [-1, 0])'}), "(train_dataloader, desc='Iteration', disable=args.local_rank not in [-1, 0]\n )\n", (5958, 6039), False, 'from tqdm import tqdm, trange\n'), ((10504, 10532), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {}), '(args.output_dir)\n', (10515, 10532), False, 'import os\n'), ((10981, 11009), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (11002, 11009), False, 'import torch\n'), ((12125, 12163), 'numpy.sum', 'np.sum', (['(all_pred_indexes == all_labels)'], {}), '(all_pred_indexes == all_labels)\n', (12131, 12163), True, 'import numpy as np\n'), ((12203, 12225), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (12223, 12225), False, 'import timeit\n'), ((13108, 13135), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (13133, 13135), False, 'import torch\n'), ((13712, 13748), 'os.path.exists', 'os.path.exists', (['cached_features_file'], {}), '(cached_features_file)\n', (13726, 13748), False, 'import os\n'), ((13899, 13931), 'torch.load', 'torch.load', (['cached_features_file'], {}), '(cached_features_file)\n', (13909, 13931), False, 'import torch\n'), ((14263, 14332), 'os.path.join', 'join', (['"""feature_cache"""', 'f"""{dataset_id}_{split_id}_disamb_examples.bin"""'], {}), "('feature_cache', f'{dataset_id}_{split_id}_disamb_examples.bin')\n", (14267, 14332), False, 'from os.path import join\n'), ((14744, 14841), 'components.disamb_dataset.extract_disamb_features_from_examples', 'extract_disamb_features_from_examples', (['args', 'tokenizer', 'examples'], {'do_predict': 'args.do_predict'}), '(args, tokenizer, examples, do_predict\n =args.do_predict)\n', (14781, 14841), False, 'from components.disamb_dataset import read_disamb_instances_from_entity_candidates, extract_disamb_features_from_examples, disamb_collate_fn, coverage_evaluation\n'), ((15230, 15257), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (15255, 15257), False, 'import torch\n'), ((15354, 15375), 'components.dataset_utils.ListDataset', 'ListDataset', (['features'], {}), '(features)\n', (15365, 15375), False, 'from components.dataset_utils import ListDataset\n'), ((15535, 15566), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (15549, 15566), False, 'import os\n'), ((15571, 15598), 'os.listdir', 'os.listdir', (['args.output_dir'], {}), '(args.output_dir)\n', (15581, 15598), False, 'import os\n'), ((16186, 16275), 'ptvsd.enable_attach', 'ptvsd.enable_attach', ([], {'address': '(args.server_ip, args.server_port)', 'redirect_output': '(True)'}), '(address=(args.server_ip, args.server_port),\n redirect_output=True)\n', (16205, 16275), False, 'import ptvsd\n'), ((16280, 16303), 'ptvsd.wait_for_attach', 'ptvsd.wait_for_attach', ([], {}), '()\n', (16301, 16303), False, 'import ptvsd\n'), ((16670, 16708), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.local_rank'], {}), '(args.local_rank)\n', (16691, 16708), False, 'import torch\n'), ((16726, 16763), 'torch.device', 'torch.device', (['"""cuda"""', 'args.local_rank'], {}), "('cuda', args.local_rank)\n", (16738, 16763), False, 'import torch\n'), ((16772, 16824), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (16808, 16824), False, 'import torch\n'), ((17559, 17586), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (17584, 17586), False, 'import torch\n'), ((17856, 17883), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (17881, 17883), False, 'import torch\n'), ((19213, 19298), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['args.output_dir'], {'do_lower_case': 'args.do_lower_case'}), '(args.output_dir, do_lower_case=args.do_lower_case\n )\n', (19242, 19298), False, 'from transformers import WEIGHTS_NAME, AdamW, AutoConfig, AutoTokenizer, get_linear_schedule_with_warmup\n'), ((3058, 3111), 'os.path.join', 'os.path.join', (['args.model_name_or_path', '"""optimizer.pt"""'], {}), "(args.model_name_or_path, 'optimizer.pt')\n", (3070, 3111), False, 'import os\n'), ((3141, 3194), 'os.path.join', 'os.path.join', (['args.model_name_or_path', '"""scheduler.pt"""'], {}), "(args.model_name_or_path, 'scheduler.pt')\n", (3153, 3194), False, 'import os\n'), ((10432, 10463), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (10446, 10463), False, 'import os\n'), ((10818, 10865), 'functools.partial', 'partial', (['disamb_collate_fn'], {'tokenizer': 'tokenizer'}), '(disamb_collate_fn, tokenizer=tokenizer)\n', (10825, 10865), False, 'from functools import partial\n'), ((11422, 11437), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11435, 11437), False, 'import torch\n'), ((12031, 12058), 'torch.cat', 'torch.cat', (['all_pred_indexes'], {}), '(all_pred_indexes)\n', (12040, 12058), False, 'import torch\n'), ((12084, 12105), 'torch.cat', 'torch.cat', (['all_labels'], {}), '(all_labels)\n', (12093, 12105), False, 'import torch\n'), ((12774, 12815), 'os.path.join', 'join', (['args.output_dir', '"""predictions.json"""'], {}), "(args.output_dir, 'predictions.json')\n", (12778, 12815), False, 'from os.path import join\n'), ((14344, 14373), 'os.path.exists', 'os.path.exists', (['example_cache'], {}), '(example_cache)\n', (14358, 14373), False, 'import os\n'), ((14427, 14452), 'torch.load', 'torch.load', (['example_cache'], {}), '(example_cache)\n', (14437, 14452), False, 'import torch\n'), ((14528, 14578), 'os.path.join', 'join', (['"""outputs"""', 'f"""grailqa_v1.0_{orig_split}.json"""'], {}), "('outputs', f'grailqa_v1.0_{orig_split}.json')\n", (14532, 14578), False, 'from os.path import join\n'), ((14602, 14676), 'components.disamb_dataset.read_disamb_instances_from_entity_candidates', 'read_disamb_instances_from_entity_candidates', (['dataset_file', 'candidate_file'], {}), '(dataset_file, candidate_file)\n', (14646, 14676), False, 'from components.disamb_dataset import read_disamb_instances_from_entity_candidates, extract_disamb_features_from_examples, disamb_collate_fn, coverage_evaluation\n'), ((14689, 14724), 'torch.save', 'torch.save', (['examples', 'example_cache'], {}), '(examples, example_cache)\n', (14699, 14724), False, 'import torch\n'), ((14974, 15052), 'torch.save', 'torch.save', (["{'examples': examples, 'features': features}", 'cached_features_file'], {}), "({'examples': examples, 'features': features}, cached_features_file)\n", (14984, 15052), False, 'import torch\n'), ((15297, 15318), 'components.dataset_utils.ListDataset', 'ListDataset', (['features'], {}), '(features)\n', (15308, 15318), False, 'from components.dataset_utils import ListDataset\n'), ((16539, 16564), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (16562, 16564), False, 'import torch\n'), ((18972, 19022), 'os.path.join', 'os.path.join', (['args.output_dir', '"""training_args.bin"""'], {}), "(args.output_dir, 'training_args.bin')\n", (18984, 19022), False, 'import os\n'), ((3296, 3349), 'os.path.join', 'os.path.join', (['args.model_name_or_path', '"""optimizer.pt"""'], {}), "(args.model_name_or_path, 'optimizer.pt')\n", (3308, 3349), False, 'import os\n'), ((3397, 3450), 'os.path.join', 'os.path.join', (['args.model_name_or_path', '"""scheduler.pt"""'], {}), "(args.model_name_or_path, 'scheduler.pt')\n", (3409, 3450), False, 'import os\n'), ((4237, 4271), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (4269, 4271), False, 'import torch\n'), ((13330, 13358), 'os.path.basename', 'os.path.basename', (['split_file'], {}), '(split_file)\n', (13346, 13358), False, 'import os\n'), ((13388, 13416), 'os.path.basename', 'os.path.basename', (['split_file'], {}), '(split_file)\n', (13404, 13416), False, 'import os\n'), ((18370, 18398), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (18396, 18398), False, 'import torch\n'), ((19112, 19133), 'components.config.get_model_class', 'get_model_class', (['args'], {}), '(args)\n', (19127, 19133), False, 'from components.config import set_seed, to_list, register_args, validate_args, load_untrained_model, get_model_class\n'), ((16436, 16461), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (16459, 16461), False, 'import torch\n'), ((20289, 20310), 'components.config.get_model_class', 'get_model_class', (['args'], {}), '(args)\n', (20304, 20310), False, 'from components.config import set_seed, to_list, register_args, validate_args, load_untrained_model, get_model_class\n'), ((9467, 9512), 'os.path.join', 'os.path.join', (['output_dir', '"""training_args.bin"""'], {}), "(output_dir, 'training_args.bin')\n", (9479, 9512), False, 'import os\n'), ((9647, 9687), 'os.path.join', 'os.path.join', (['output_dir', '"""optimizer.pt"""'], {}), "(output_dir, 'optimizer.pt')\n", (9659, 9687), False, 'import os\n'), ((9744, 9784), 'os.path.join', 'os.path.join', (['output_dir', '"""scheduler.pt"""'], {}), "(output_dir, 'scheduler.pt')\n", (9756, 9784), False, 'import os\n'), ((19741, 19759), 'os.path.dirname', 'os.path.dirname', (['c'], {}), '(c)\n', (19756, 19759), False, 'import os\n'), ((11881, 11904), 'torch.argmax', 'torch.argmax', (['logits', '(1)'], {}), '(logits, 1)\n', (11893, 11904), False, 'import torch\n'), ((19796, 19862), 'glob.glob', 'glob.glob', (["(args.output_dir + '/**/' + WEIGHTS_NAME)"], {'recursive': '(True)'}), "(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)\n", (19805, 19862), False, 'import glob\n')] |
# Copyright 2016-2019 The <NAME> at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# <EMAIL>
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the retinanet layers"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
from tensorflow.python.keras import backend as K
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.platform import test
from deepcell.utils import testing_utils
from deepcell import layers
class TestAnchors(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
def test_anchors_2d(self):
with self.test_session(use_gpu=True):
testing_utils.layer_test(
layers.Anchors,
kwargs={'size': 1, 'stride': 1,
'data_format': 'channels_last'},
custom_objects={'Anchors': layers.Anchors},
input_shape=(3, 5, 6, 4))
testing_utils.layer_test(
layers.Anchors,
kwargs={'size': 1, 'stride': 1,
'data_format': 'channels_last'},
custom_objects={'Anchors': layers.Anchors},
input_shape=(3, None, None, None))
testing_utils.layer_test(
layers.Anchors,
kwargs={'size': 1, 'stride': 1,
'data_format': 'channels_first'},
custom_objects={'Anchors': layers.Anchors},
input_shape=(3, 5, 6, 4))
@tf_test_util.run_in_graph_and_eager_modes()
def test_simple(self):
with self.test_session():
# create simple Anchors layer
anchors_layer = layers.Anchors(
size=32,
stride=8,
ratios=np.array([1], K.floatx()),
scales=np.array([1], K.floatx()),
)
# create fake features input (only shape is used anyway)
features = np.zeros((1, 2, 2, 1024), dtype=K.floatx())
features = K.variable(features)
# call the Anchors layer
anchors = anchors_layer.call(features)
anchors = K.get_value(anchors)
# expected anchor values
expected = np.array([[
[-12, -12, 20, 20],
[-4, -12, 28, 20],
[-12, -4, 20, 28],
[-4, -4, 28, 28],
]], dtype=K.floatx())
# test anchor values
self.assertAllEqual(anchors, expected)
@tf_test_util.run_in_graph_and_eager_modes()
def test_mini_batch(self):
with self.test_session():
# create simple Anchors layer
anchors_layer = layers.Anchors(
size=32,
stride=8,
ratios=np.array([1], dtype=K.floatx()),
scales=np.array([1], dtype=K.floatx()),
)
# create fake features input with batch_size=2
features = np.zeros((2, 2, 2, 1024), dtype=K.floatx())
features = K.variable(features)
# call the Anchors layer
anchors = anchors_layer.call(features)
anchors = K.get_value(anchors)
# expected anchor values
expected = np.array([[
[-12, -12, 20, 20],
[-4, -12, 28, 20],
[-12, -4, 20, 28],
[-4, -4, 28, 28],
]], dtype=K.floatx())
expected = np.tile(expected, (2, 1, 1))
# test anchor values
self.assertAllEqual(anchors, expected)
class TestRegressBoxes(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
def test_simple(self):
with self.test_session():
# create simple RegressBoxes layer
layer = layers.RegressBoxes()
# create input
anchors = np.array([[
[0, 0, 10, 10],
[50, 50, 100, 100],
[20, 20, 40, 40],
]], dtype=K.floatx())
anchors = K.variable(anchors)
regression = np.array([[
[0, 0, 0, 0],
[0.1, 0.1, 0, 0],
[0, 0, 0.1, 0.1],
]], dtype=K.floatx())
regression = K.variable(regression)
# compute output
computed_shape = layer.compute_output_shape(
[anchors.shape, regression.shape])
actual = layer.call([anchors, regression])
actual = K.get_value(actual)
self.assertEqual(actual.shape, computed_shape)
# compute expected output
expected = np.array([[
[0, 0, 10, 10],
[51, 51, 100, 100],
[20, 20, 40.4, 40.4],
]], dtype=K.floatx())
self.assertAllClose(actual, expected)
@tf_test_util.run_in_graph_and_eager_modes()
def test_mini_batch(self):
with self.test_session():
mean = [0, 0, 0, 0]
std = [0.2, 0.2, 0.2, 0.2]
# create simple RegressBoxes layer
layer = layers.RegressBoxes(mean=mean, std=std)
# create input
anchors = np.array([
[
[0, 0, 10, 10], # 1
[50, 50, 100, 100], # 2
[20, 20, 40, 40], # 3
],
[
[20, 20, 40, 40], # 3
[0, 0, 10, 10], # 1
[50, 50, 100, 100], # 2
],
], dtype=K.floatx())
anchors = K.variable(anchors)
regression = np.array([
[
[0, 0, 0, 0], # 1
[0.1, 0.1, 0, 0], # 2
[0, 0, 0.1, 0.1], # 3
],
[
[0, 0, 0.1, 0.1], # 3
[0, 0, 0, 0], # 1
[0.1, 0.1, 0, 0], # 2
],
], dtype=K.floatx())
regression = K.variable(regression)
# compute output
actual = layer.call([anchors, regression])
actual = K.get_value(actual)
# compute expected output
expected = np.array([
[
[0, 0, 10, 10], # 1
[51, 51, 100, 100], # 2
[20, 20, 40.4, 40.4], # 3
],
[
[20, 20, 40.4, 40.4], # 3
[0, 0, 10, 10], # 1
[51, 51, 100, 100], # 2
],
], dtype=K.floatx())
self.assertAllClose(actual, expected)
@tf_test_util.run_in_graph_and_eager_modes()
def test_invalid_input(self):
bad_mean = 'invalid_data_type'
bad_std = 'invalid_data_type'
with self.assertRaises(ValueError):
layers.RegressBoxes(mean=bad_mean, std=None)
with self.assertRaises(ValueError):
layers.RegressBoxes(mean=None, std=bad_std)
class ClipBoxesTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
def test_simple(self):
img_h, img_w = np.random.randint(2, 5), np.random.randint(5, 9)
boxes = np.array([[
[9, 9, 9, 9],
[-1, -1, -1, -1],
[0, 0, img_w, img_h],
[0, 0, img_w + 1, img_h + 1],
[0, 0, img_w - 1, img_h - 1],
]], dtype='int')
boxes = K.variable(boxes)
# compute expected output
expected = np.array([[
[img_w, img_h, img_w, img_h],
[0, 0, 0, 0],
[0, 0, img_w, img_h],
[0, 0, img_w, img_h],
[0, 0, img_w - 1, img_h - 1],
]], dtype=K.floatx())
# test channels_last
with self.test_session():
# create input
image = K.variable(np.random.random((1, img_h, img_w, 3)))
# create simple ClipBoxes layer
layer = layers.ClipBoxes(data_format='channels_last')
# compute output
computed_shape = layer.compute_output_shape(
[image.shape, boxes.shape])
actual = layer.call([image, boxes])
actual = K.get_value(actual)
self.assertEqual(actual.shape, tuple(computed_shape))
self.assertAllClose(actual, expected)
# test channels_first
with self.test_session():
# create input
image = K.variable(np.random.random((1, 6, img_h, img_w)))
# create simple ClipBoxes layer
layer = layers.ClipBoxes(data_format='channels_first')
# compute output
computed_shape = layer.compute_output_shape(
[image.shape, boxes.shape])
actual = layer.call([image, boxes])
actual = K.get_value(actual)
self.assertEqual(actual.shape, tuple(computed_shape))
self.assertAllClose(actual, expected)
| [
"numpy.tile",
"tensorflow.python.keras.backend.get_value",
"tensorflow.python.framework.test_util.run_in_graph_and_eager_modes",
"deepcell.layers.RegressBoxes",
"deepcell.layers.ClipBoxes",
"numpy.random.random",
"numpy.array",
"numpy.random.randint",
"tensorflow.python.keras.backend.floatx",
"dee... | [((1666, 1709), 'tensorflow.python.framework.test_util.run_in_graph_and_eager_modes', 'tf_test_util.run_in_graph_and_eager_modes', ([], {}), '()\n', (1707, 1709), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), ((2634, 2677), 'tensorflow.python.framework.test_util.run_in_graph_and_eager_modes', 'tf_test_util.run_in_graph_and_eager_modes', ([], {}), '()\n', (2675, 2677), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), ((3641, 3684), 'tensorflow.python.framework.test_util.run_in_graph_and_eager_modes', 'tf_test_util.run_in_graph_and_eager_modes', ([], {}), '()\n', (3682, 3684), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), ((4747, 4790), 'tensorflow.python.framework.test_util.run_in_graph_and_eager_modes', 'tf_test_util.run_in_graph_and_eager_modes', ([], {}), '()\n', (4788, 4790), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), ((5964, 6007), 'tensorflow.python.framework.test_util.run_in_graph_and_eager_modes', 'tf_test_util.run_in_graph_and_eager_modes', ([], {}), '()\n', (6005, 6007), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), ((7791, 7834), 'tensorflow.python.framework.test_util.run_in_graph_and_eager_modes', 'tf_test_util.run_in_graph_and_eager_modes', ([], {}), '()\n', (7832, 7834), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), ((8192, 8235), 'tensorflow.python.framework.test_util.run_in_graph_and_eager_modes', 'tf_test_util.run_in_graph_and_eager_modes', ([], {}), '()\n', (8233, 8235), True, 'from tensorflow.python.framework import test_util as tf_test_util\n'), ((8352, 8496), 'numpy.array', 'np.array', (['[[[9, 9, 9, 9], [-1, -1, -1, -1], [0, 0, img_w, img_h], [0, 0, img_w + 1, \n img_h + 1], [0, 0, img_w - 1, img_h - 1]]]'], {'dtype': '"""int"""'}), "([[[9, 9, 9, 9], [-1, -1, -1, -1], [0, 0, img_w, img_h], [0, 0, \n img_w + 1, img_h + 1], [0, 0, img_w - 1, img_h - 1]]], dtype='int')\n", (8360, 8496), True, 'import numpy as np\n'), ((8579, 8596), 'tensorflow.python.keras.backend.variable', 'K.variable', (['boxes'], {}), '(boxes)\n', (8589, 8596), True, 'from tensorflow.python.keras import backend as K\n'), ((1799, 1983), 'deepcell.utils.testing_utils.layer_test', 'testing_utils.layer_test', (['layers.Anchors'], {'kwargs': "{'size': 1, 'stride': 1, 'data_format': 'channels_last'}", 'custom_objects': "{'Anchors': layers.Anchors}", 'input_shape': '(3, 5, 6, 4)'}), "(layers.Anchors, kwargs={'size': 1, 'stride': 1,\n 'data_format': 'channels_last'}, custom_objects={'Anchors': layers.\n Anchors}, input_shape=(3, 5, 6, 4))\n", (1823, 1983), False, 'from deepcell.utils import testing_utils\n'), ((2076, 2269), 'deepcell.utils.testing_utils.layer_test', 'testing_utils.layer_test', (['layers.Anchors'], {'kwargs': "{'size': 1, 'stride': 1, 'data_format': 'channels_last'}", 'custom_objects': "{'Anchors': layers.Anchors}", 'input_shape': '(3, None, None, None)'}), "(layers.Anchors, kwargs={'size': 1, 'stride': 1,\n 'data_format': 'channels_last'}, custom_objects={'Anchors': layers.\n Anchors}, input_shape=(3, None, None, None))\n", (2100, 2269), False, 'from deepcell.utils import testing_utils\n'), ((2362, 2547), 'deepcell.utils.testing_utils.layer_test', 'testing_utils.layer_test', (['layers.Anchors'], {'kwargs': "{'size': 1, 'stride': 1, 'data_format': 'channels_first'}", 'custom_objects': "{'Anchors': layers.Anchors}", 'input_shape': '(3, 5, 6, 4)'}), "(layers.Anchors, kwargs={'size': 1, 'stride': 1,\n 'data_format': 'channels_first'}, custom_objects={'Anchors': layers.\n Anchors}, input_shape=(3, 5, 6, 4))\n", (2386, 2547), False, 'from deepcell.utils import testing_utils\n'), ((3150, 3170), 'tensorflow.python.keras.backend.variable', 'K.variable', (['features'], {}), '(features)\n', (3160, 3170), True, 'from tensorflow.python.keras import backend as K\n'), ((3282, 3302), 'tensorflow.python.keras.backend.get_value', 'K.get_value', (['anchors'], {}), '(anchors)\n', (3293, 3302), True, 'from tensorflow.python.keras import backend as K\n'), ((4163, 4183), 'tensorflow.python.keras.backend.variable', 'K.variable', (['features'], {}), '(features)\n', (4173, 4183), True, 'from tensorflow.python.keras import backend as K\n'), ((4295, 4315), 'tensorflow.python.keras.backend.get_value', 'K.get_value', (['anchors'], {}), '(anchors)\n', (4306, 4315), True, 'from tensorflow.python.keras import backend as K\n'), ((4586, 4614), 'numpy.tile', 'np.tile', (['expected', '(2, 1, 1)'], {}), '(expected, (2, 1, 1))\n', (4593, 4614), True, 'import numpy as np\n'), ((4919, 4940), 'deepcell.layers.RegressBoxes', 'layers.RegressBoxes', ([], {}), '()\n', (4938, 4940), False, 'from deepcell import layers\n'), ((5161, 5180), 'tensorflow.python.keras.backend.variable', 'K.variable', (['anchors'], {}), '(anchors)\n', (5171, 5180), True, 'from tensorflow.python.keras import backend as K\n'), ((5376, 5398), 'tensorflow.python.keras.backend.variable', 'K.variable', (['regression'], {}), '(regression)\n', (5386, 5398), True, 'from tensorflow.python.keras import backend as K\n'), ((5613, 5632), 'tensorflow.python.keras.backend.get_value', 'K.get_value', (['actual'], {}), '(actual)\n', (5624, 5632), True, 'from tensorflow.python.keras import backend as K\n'), ((6212, 6251), 'deepcell.layers.RegressBoxes', 'layers.RegressBoxes', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (6231, 6251), False, 'from deepcell import layers\n'), ((6700, 6719), 'tensorflow.python.keras.backend.variable', 'K.variable', (['anchors'], {}), '(anchors)\n', (6710, 6719), True, 'from tensorflow.python.keras import backend as K\n'), ((7139, 7161), 'tensorflow.python.keras.backend.variable', 'K.variable', (['regression'], {}), '(regression)\n', (7149, 7161), True, 'from tensorflow.python.keras import backend as K\n'), ((7268, 7287), 'tensorflow.python.keras.backend.get_value', 'K.get_value', (['actual'], {}), '(actual)\n', (7279, 7287), True, 'from tensorflow.python.keras import backend as K\n'), ((8003, 8047), 'deepcell.layers.RegressBoxes', 'layers.RegressBoxes', ([], {'mean': 'bad_mean', 'std': 'None'}), '(mean=bad_mean, std=None)\n', (8022, 8047), False, 'from deepcell import layers\n'), ((8104, 8147), 'deepcell.layers.RegressBoxes', 'layers.RegressBoxes', ([], {'mean': 'None', 'std': 'bad_std'}), '(mean=None, std=bad_std)\n', (8123, 8147), False, 'from deepcell import layers\n'), ((8286, 8309), 'numpy.random.randint', 'np.random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (8303, 8309), True, 'import numpy as np\n'), ((8311, 8334), 'numpy.random.randint', 'np.random.randint', (['(5)', '(9)'], {}), '(5, 9)\n', (8328, 8334), True, 'import numpy as np\n'), ((9098, 9143), 'deepcell.layers.ClipBoxes', 'layers.ClipBoxes', ([], {'data_format': '"""channels_last"""'}), "(data_format='channels_last')\n", (9114, 9143), False, 'from deepcell import layers\n'), ((9344, 9363), 'tensorflow.python.keras.backend.get_value', 'K.get_value', (['actual'], {}), '(actual)\n', (9355, 9363), True, 'from tensorflow.python.keras import backend as K\n'), ((9709, 9755), 'deepcell.layers.ClipBoxes', 'layers.ClipBoxes', ([], {'data_format': '"""channels_first"""'}), "(data_format='channels_first')\n", (9725, 9755), False, 'from deepcell import layers\n'), ((9956, 9975), 'tensorflow.python.keras.backend.get_value', 'K.get_value', (['actual'], {}), '(actual)\n', (9967, 9975), True, 'from tensorflow.python.keras import backend as K\n'), ((8859, 8869), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (8867, 8869), True, 'from tensorflow.python.keras import backend as K\n'), ((8993, 9031), 'numpy.random.random', 'np.random.random', (['(1, img_h, img_w, 3)'], {}), '((1, img_h, img_w, 3))\n', (9009, 9031), True, 'import numpy as np\n'), ((9604, 9642), 'numpy.random.random', 'np.random.random', (['(1, 6, img_h, img_w)'], {}), '((1, 6, img_h, img_w))\n', (9620, 9642), True, 'import numpy as np\n'), ((3115, 3125), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3123, 3125), True, 'from tensorflow.python.keras import backend as K\n'), ((3538, 3548), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3546, 3548), True, 'from tensorflow.python.keras import backend as K\n'), ((4128, 4138), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (4136, 4138), True, 'from tensorflow.python.keras import backend as K\n'), ((4551, 4561), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (4559, 4561), True, 'from tensorflow.python.keras import backend as K\n'), ((5127, 5137), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (5135, 5137), True, 'from tensorflow.python.keras import backend as K\n'), ((5339, 5349), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (5347, 5349), True, 'from tensorflow.python.keras import backend as K\n'), ((5895, 5905), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (5903, 5905), True, 'from tensorflow.python.keras import backend as K\n'), ((6666, 6676), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (6674, 6676), True, 'from tensorflow.python.keras import backend as K\n'), ((7102, 7112), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (7110, 7112), True, 'from tensorflow.python.keras import backend as K\n'), ((7722, 7732), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (7730, 7732), True, 'from tensorflow.python.keras import backend as K\n'), ((2913, 2923), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (2921, 2923), True, 'from tensorflow.python.keras import backend as K\n'), ((2963, 2973), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (2971, 2973), True, 'from tensorflow.python.keras import backend as K\n'), ((3930, 3940), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3938, 3940), True, 'from tensorflow.python.keras import backend as K\n'), ((3986, 3996), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3994, 3996), True, 'from tensorflow.python.keras import backend as K\n')] |
#-*- coding: utf8 -*-
from __future__ import division
import numpy as n, pylab as p, networkx as x, random as r, collections as c, string
__doc__="""Este arquivo possui a classe Sistem, base para todas as animações
G=x.read_gml("1-400cpp.gml") # digrafo com peso
S=Sistem(G)
S.draw("grafo1.png")
S.add_msgs([msg1,msg2...])
S.rm_msgs([msg1,msg2...])
S.order()
S.draw("grafo1.png")
# nadds[i] tem as msgs adicionadas para o frame i+1.
nadds=[[msgs..,..,],[msgs..],[]]
# nrms[i] tem as msgs removidas para o frame i+1.
nrms=[[msgs..,..,],[msgs..],[]]
i=0
for a,r in zip(nadds,nrms):
S.add_msgs(a)
S.rm_msgs(b)
S.order()
S.draw("grafo%i.png"%(i,))
i+=1
0) fazer classe com parametros minimos. Fazer com que plote pelo pygraphviz sem layout.
1) fazer classe com parametros minimos. Fazer com que plote pelo pygraphviz com layout pre-determinado.
"""
class Sistem:
def __init__(self,G="lad3100_3200.gml",positionMethod="sinusoid",rankCriteria="degree",fixedSize=40):
"""G Digrafo do networkx ou gml para
positionMethod: "random" ou "sinusoid"
rankCriteria: "alphabetic" ou "degree" """
self.rank=None
self.positions=None
self.ecolors=[]
if type(G) == type("string"):
G=x.read_gml(G) # DiGraph
self.g=G
self.positionMethod=positionMethod
self.rankCriteria=rankCriteria
self.fixedSize=fixedSize
N=1. # aresta do quadrado considerado
# ranks nodes in an exact order
rank=self.rankNodes() # implement
# gets (x,y) pairs for each ranked node:
positions=self.positionRanks()
ruido=self.computeNoise()
def update(self):
self.rankNodes()
self.positionRanks()
def addNode(self,nodeName):
self.g.add_node(nodeName,weight=1.0)
self.update()
def addEdge(self,pre,pos):
self.g.add_edge(pre,pos,weight=1.0)
self.update()
def rankNodes(self,force=False):
"""Order nodes in convenient ways to visualisation"""
# order by degree, than by force, than by clustering coefficient, than alphabetically
if self.rank==None or force:
print("realizando ordenacao por %s" % (self.rankCriteria,))
if self.rankCriteria=="alphabetic":
self.rank=self.g.nodes()
self.rank.sort()
elif self.rankCriteria=="degree":
ordenacao= c.OrderedDict(sorted(self.g.degree().items(), key=lambda x: -x[1]))
self.ordenacao=ordenacao
self.rank=ordenacao.keys()
self.siglas=["G%i"%(i,) for i in ordenacao.values()]
else:
print(u"ordenação encontrada. Consulte Sistem.rank.")
return self.rank
def positionRanks(self,force=False):
"""Get a position for each of the self.order[i] node
Default is random.
set self.positionMethod to:
"random",
"sinusoid" """
if self.positions!=None and not force:
print("posicoes encontradas, não atualizando posições")
else:
print("posicoes nao encontradas, realizando posicionamento para o grafo considerado")
if self.positionMethod=="random":
nn=self.g.number_of_nodes()
loc=n.random.random(nn*2)
locxy=n.reshape(loc,(nn,2))
elif self.positionMethod=="sinusoid":
if self.fixedSize:
nn=self.fixedSize
else:
nn=self.g.number_of_nodes()
xx=n.linspace(0.0,1,nn) # aumentar intervalos primeiros
parte1=nn/4
parte2=nn-parte1
xx=n.hstack( (n.linspace(0.0,0.5,parte1,endpoint=False),n.linspace(0.5,1,parte2)) ) # aumentar intervalos primeiros
yy=n.sin(xx*2*n.pi) # mudar o numero de voltas
locxy=n.vstack((xx*4,yy)).T
sobra = len(self.rank)-locxy.shape[1]
if sobra < 0:
pass
#locxy=locxy[:sobra]
else:
poss=n.zeros((2,sobra))
#poss[0]+=3.5-n.linspace(0,2,sobra)
#poss[1]+=.2+n.linspace(0,2,sobra)
parte1=int(sobra/4)
parte2=sobra-parte1
poss[0]+=n.hstack( (3.5-n.linspace(0,1,parte1), 3.5-n.linspace(1,2,parte2)) )
poss[1]+=n.hstack( (.2+n.linspace(0,1,parte1),.2+n.linspace(1,2,parte2)) )
locxy=n.hstack( ( locxy.T, poss ) ).T
self.positions=locxy
#positions={}
#i=0
#for rank in self.rank:
# if i > self.g.number_of_nodes()-self.fixedSize:
# positions[rank]=locxy[i -self.g.number_of_nodes()+self.fixedSize ]
# else:
# positions[rank]=n.array((100,100))
# i+=1
#self.positions=positions
return self.positions
def util(self,which="plotpos"):
if which is "plotpos":
p.plot(SSi.positions[:,0],SSi.positions[:,1],"bo")
p.show()
def draw(self,nome="sistema.png",numero_serie=0):
p.clf()
A=x.to_agraph(self.g)
A.node_attr['style']='filled'
#A.node_attr['fillcolor']='red'
#A.graph_attr["bgcolor"]="forestgreen"
A.graph_attr["bgcolor"]="black"
#A.graph_attr["page"]="31.5,17"
#A.graph_attr["margin"]="1.7"
A.graph_attr["pad"]=.1
A.graph_attr["size"]="9.5,12"
#A.edge_attr["style"]="solid"
#A.graph_attr["size"]="55.0,55000.0"
#A.graph_attr['arrowhead']="diamond"
#A.graph_attr['dir']="both"
#A.graph_attr['rankdir']="LR"
#A.graph_attr['splines']="filled"
#A.edge_attr.update(arrowType="vec")
#A.graph_attr['splines']="compound"
#A.graph_attr['overlap']="true"
#A.graph_attr['forcelabels']=True
#A.graph_attr["center"]=1
#A.layout()
TTABELACORES=2**10 # tamanho da tabela de cores
cm=p.cm.Reds(range(TTABELACORES)) # tabela de cores
#cm=p.cm.hot(range(TTABELACORES)) # tabela de cores
self.cm=cm
nodes=A.nodes()
colors=[]
loopWeights=[]
loopnodes=[i[0] for i in self.g.selfloop_edges()]
self.loopnodes=loopnodes
# medidas para usar na visualização de cada vértice
# para largura e altura
ind=self.g.in_degree(weight='weight'); mind=max(ind.values())/3+0.1
oud=self.g.out_degree(weight='weight'); moud=max(oud.values())/3+.1
miod=max(mind,moud)
#miod=self.g.number_of_edges()+1.
s=self.g.degree()
# para colorir
cc=x.clustering(self.g.to_undirected()) # para colorir
self.cc=cc
ii=0
for node in nodes:
n_=A.get_node(node)
if node.isdigit():
foo=int(node)
else:
foo=node
ifoo=self.rank.index(foo)
#print ifoo, self.siglas[ifoo]
n_.attr['fillcolor']= '#%02x%02x%02x' % tuple([255*i for i in cm[int(cc[foo]*255)][:-1]])
if node in loopnodes:
loopW=self.g[node][node]["weight"]
loopWeights.append(loopW)
else:
loopW=0
n_.attr['fixedsize']=True
#n_.attr['width']= abs(20*((ind[foo]-loopW)/mind+0.5))
#n_.attr['height']= abs(20*((oud[foo]-loopW)/moud+0.5))
n_.attr['width']= abs(.07*((ind[foo]-loopW)/miod+0.5))
n_.attr['height']= abs(.07*((oud[foo]-loopW)/miod+0.5))
#n_.attr['width']= 10*max((ind[int(node)]-loopW)/mind,0.5)
#n_.attr['height']= 10*max((oud[int(node)]-loopW)/moud,0.5)
#print("largura, altura: ", n_.attr['width'],n_.attr['height'])
I=self.rank.index(foo)
#pos="%f,%f"%tuple(self.positions[ifoo]*300+100); ii+=1
pos="%f,%f"%tuple(self.positions[ifoo]); ii+=1
#n_.attr['label']="Q%i"%(ii+1,)
n_.attr["pos"]=pos
n_.attr["pin"]=True
#n_.attr["fontsize"]=1700
n_.attr["fontsize"]=15
n_.attr["fontcolor"]="white"
#n_.attr['label']="%s"%(self.siglas[ifoo],)
if numero_serie%100<20: # 2 slides a cada 10 slides
n_.attr['label']="%s"%(self.siglas[ifoo],)
else:
n_.attr['label']=""
#print(pos)
colors.append('#%02x%02x%02x' % tuple([255*i for i in cm[int(cc[foo]*255)][:-1]]))
"""
"""
edges=A.edges()
pesos=[s[2]["weight"] for s in S.g.edges(data=True)]
self.pesos=pesos
self.pesos_=[]
pesosMax=max(pesos)
self.pesosMax=pesosMax
for e in edges:
factor=float(e.attr['weight'])
self.pesos_.append(factor)
#e.attr['penwidth']=195*factor
e.attr['penwidth']=.2*factor
#e.attr['arrowhead']="diamond"
#e.attr["arrowsize"]=20
e.attr["arrowsize"]=.5
#e.attr['dir']="both"
#e.attr['dir']="forward"
#e.attr['rankdir']="LR"
#e.attr['splines']="curved"
#e.attr['splines']="compound"
#e.attr["fontsize"]=4000
#e.attr['headlabel']="A"
#e.attr['headlabel']=r"."
#e.attr["fontcolor"]="red"
#e.attr["arrowhead"]="both"
#e.attr["arrowhead"]="vee"
#e.attr["arrowhead"]="tee"
#e.attr["arrowhead"]="curve"
e.attr["arrowhead"]="lteeoldiamond"
#e.attr["style"]="solid"
#e.attr["arrowhead"]="diamond"
#e.attr["arrowtail"]="dot"
#e.attr["alpha"]=0.2
w=factor/pesosMax # factor em [0-1]
#cor=p.cm.hot(int(w*255))
cor=p.cm.Reds(int(w*255))
cor=p.cm.Spectral(int(w*255))
self.cor=cor
cor256=255*n.array(cor[:-1])
r0=int(cor256[0]/16)
r1=int(cor256[0]-r0*16)
r=hex(r0)[-1]+hex(r1)[-1]
g0=int(cor256[1]/16)
g1=int(cor256[1]-g0*16)
g=hex(g0)[-1]+hex(g1)[-1]
b0=int(cor256[2]/16)
b1=int(cor256[2]-b0*16)
b=hex(b0)[-1]+hex(b1)[-1]
#corRGB="#"+r+g+b+":#"+r+g+b
corRGB="#"+r+g+b
e.attr["color"]=corRGB
self.ecolors.append(e.attr["color"])
#e.attr["color"]="white"
#e.attr["color"]="#0000ff:#ff0000"
#A.layout(prog="fdp") # fdp ou neato
label="imagem: %i, |g|= %i, |e|= %i"%(numero_serie,A.number_of_nodes(),A.number_of_edges())
A.graph_attr["label"]=label
#A.graph_attr["fontsize"]="1400"
#### Adicionar nodes nas posicoes relativas a cada posicao
rank=1
for pos in self.positions:
A.add_node(rank)
n_=A.get_node(rank)
n_.attr['fixedsize']=True
n_.attr['width']= .05
n_.attr['height']= .05
n_.attr["pos"]="%f,%f"%tuple(pos);
if rank < 41:
n_.attr["pos"]="%f,%f"%(pos[0], -1.2)
else:
n_.attr["pos"]="%f,%f"%(pos[0]+.2, pos[1]+.2)
n_.attr["pin"]=True
n_.attr["fontsize"]=8.700
n_.attr["fontcolor"]="white"
if rank < 41:
if rank%5==0:
n_.attr['label']=str(rank)
else:
n_.attr['label']=""
else:
n_.attr['width']= .03
n_.attr['height']= .02
if rank%20==0:
n_.attr['label']=str(rank)
n_.attr['fontsize']=8
else:
n_.attr['label']=""
rank+=1
# adiciona alargador em x:
#amin=self.positions[:,0].min()
#amax=self.positions[:,0].max()
#ambito=amax-amin
#A.add_node(1000000)
#n_=A.get_node(1000000)
#n_.attr['fixedsize']=True
#n_.attr['width']= .03
#n_.attr['height']= .03
#n_.attr["pos"]="%f,%f"%(amin-.2,-1.1)
#print n_.attr["pos"]
#n_.attr['label']=""
#A.add_node(1000001)
#n_=A.get_node(1000001)
#n_.attr['fixedsize']=True
#n_.attr['width']=.03
#n_.attr['height']=.03
#n_.attr["pos"]="%f,%f"%(amax+ambito*.5,-1.1)
#n_.attr['label']=""
#A.layout(prog="neato") # fdp ou neato
#A.draw('%s' % (nome,), prog="fdp") # twopi ou circo
#A.graph_attr["size"]="15.0,55.0"
#A.graph_attr["longLabel"]=True
#A.graph_attr["color"]="gray90"
A.graph_attr["fontcolor"]="white"
#A.draw('%s' % (nome,)) # twopi ou circo
A.draw('%s' % (nome,), prog="neato") # twopi ou circo
print('scrita figura: %s' % (nome,)) # printando nome
################
# remoção de todos os vertices auxiliares
self.A=A
#A.draw('%s' % (nome,),prog="fdp") # twopi ou circo
def computeNoise(self):
"""Count empty messages, empty addressess, swapped messages in time.."""
pass
#
S=Sistem()
S.draw()
print("escrita figura teste")
#####################
## Roda no ipython
# : run sistema.py
# : S.draw() # cria figura sistema.png
# : run fazRedeInteracao.py # cria g, mm, aa, ids, etc
# : SS=Sistem(g)
# : SS.draw("sistema2.png") # salva no sistema2.png
# : g_=x.DiGraph()
# : SS_=Sistem(g_)
# : SS_.addMsgs([msg1,msg2...])
# : SS_.draw("sistema_.png") # salva no sistema2.png
# : SS_.addMsgs([msg1,msg2...])
# : SS_.rmMsgs([msg1,msg2...])
# : SS_.draw("sistema_2.png") # salva no sistema2.png
#######################################3
######################################3
from dateutil import parser
import mailbox, pytz
utc=pytz.UTC
figs=1
#figs=False
if figs:
import pylab as p
#caminho="/home/rfabbri/repos/FIM/python/cppStdLib/"
#caminho="/home/rfabbri/repos/FIM/python/lau/"
caminho="/home/rfabbri/repos/FIM/python/lad/"
#caminho="/home/rfabbri/repos/FIM/python/metarec/"
mm={} # dicionário com infos necessárias das msgs
ids=[] # ordem dos ids que apareceram
vz=[] # msgs vazias, para verificação
aa={} # dicionario com autores como chaves, valores sao msgs
ids_r={} # dicionario com chaves que sao ids das msgs aas quais sao resposta
for i in xrange(1,5001): # soh 500 msgs
mbox = mailbox.mbox(caminho+str(i))
if mbox.keys(): # se msg nao vazia
m=mbox[0]
au=m['from']
au=au.replace('"','')
au=au.split("<")[-1][:-1]
if " " in au:
au=au.split(" ")[0]
if au not in aa:
aa[au]=[]
date=m['date']
date=date.replace("METDST","MEST")
date=date.replace("MET DST","MEST")
#date=date.replace(" CST"," (CST)")
date=date.replace("(GMT Standard Time)","")
date=date.replace(" CDT"," (CDT)")
date=date.replace(" GMT","")
date=date.replace("(WET DST)","")
date=date.replace("-0600 CST","-0600")
#print date
if "GMT-" in date:
index=date[::-1].index("-")
date=date[:-index-1]+")"
if 'added' in date: date = date.split(" (")[0]
if m['references']:
id_ant=m['references'].split('\t')[-1]
id_ant=id_ant.split(' ')[-1]
else:
id_ant=None
if id_ant not in ids_r.keys():
ids_r[id_ant]=[]
date=parser.parse(date)
try: # colocando localizador em que não tem, para poder comparar
date=utc.localize(date)
except:
pass
ids_r[id_ant].append( (au,m["message-id"],date) )
mm[m["message-id"]]=(au,id_ant,date)
aa[au].append( (m["message-id"], id_ant, date) )
ids.append(m['message-id'])
else:
vz.append(i)
print("criados aa, mm, vz, ids")
ends=aa.keys()
g=x.DiGraph()
resposta_perdida=[] # para os ids das msgs cuja resposta está perdida
respondido_antes=[]
imgi=0
for i in ids:
m=mm[i]
if m[0] in g.nodes():
if "weight" in g.node[m[0]].keys():
g.node[m[0]]["weight"]+=1
else:
g.add_node(m[0],weight=1.)
respondido_antes.append(i)
else:
g.add_node(m[0],weight=1.)
if m[1]:
if m[1] in mm.keys():
m0=mm[m[1]]
if g.has_edge(m0[0],m[0]):
g[m0[0]][m[0]]["weight"]+=1
else:
g.add_edge(m0[0], m[0], weight=1.)
else:
resposta_perdida.append(i)
print("criado digrafo: g com todas as mensagens")
print("obtendo lista de vertices e suas siglas")
d=g.degree()
# Vertices ordenados do maior grau para o menor
sequencia=sorted(d, key=d.get, reverse=True)
siglas=["%s" % (d[s],) for s in sequencia]
Si=Sistem(g)
Si.rank=sequencia
Si.siglas=siglas
Si.positionRanks(True)
#Si.positionRanks()
Si.draw("este.png")
G=x.copy.deepcopy(g)
##############################################
print("iniciando animacao")
gg=x.DiGraph()
SSi=Sistem(gg)
SSi.rank=sequencia # preserva ordem e siglas do geral
SSi.siglas=siglas
SSi.positionRanks(True)
JANELA=100
resposta_perdida=[] # para os ids das msgs cuja resposta está perdida
respondido_antes=[]
imgi=0
j=0
m_passadas=[]
for i in ids:
m=mm[i] ; m_passadas.append(m)
if m[0] in gg.nodes():
if "weight" in gg.node[m[0]].keys():
gg.node[m[0]]["weight"]+=1
else:
gg.add_node(m[0],weight=1.)
respondido_antes.append(i)
else:
gg.add_node(m[0],weight=1.)
if m[1]:
if m[1] in mm.keys():
m0=mm[m[1]]
if gg.has_edge(m0[0],m[0]):
gg[m0[0]][m[0]]["weight"]+=1
else:
gg.add_edge(m0[0], m[0], weight=1.)
else:
resposta_perdida.append(i)
if j>=JANELA:
# deleta msgs antigas
m=m_passadas.pop(0)
if m[0] in gg.nodes():
if "weight" in gg.node[m[0]].keys():
if gg.node[m[0]]["weight"]>1:
gg.node[m[0]]["weight"]-=1.
else:
if gg.degree()[m[0]]>0:
print("deixando vertice permanecer devido aa aresta")
gg.node[m[0]]["weight"]-=1.
else:
print("removendo vertice")
gg.remove_node(m[0])
else:
print("vertice sem peso, iniciando com peso 0. Msg removida: %i Vertice reinicializado: %s"%(j-JANELA,m[0]))
gg.node[m[0]]["weight"]=0.
else:
print(u"vértice não existente quando procurado para diminuicao de peso: %s"%(m[0],))
if m[1]: # se é resposta para alguém
if m[1] in mm.keys():
m0=mm[m[1]] # mensagem original
if gg[m0[0]][m[0]]["weight"]>1:
gg[m0[0]][m[0]]["weight"]-=1
else:
gg.remove_edge(m0[0], m[0])
if gg.degree(m0[0])==0:
gg.remove_node(m0[0])
else:
resposta_perdida.append(i)
print("andando com a janela")
else:
print("formando janela")
j+=1
SSi.update()
SSi.draw("./v1lad/%05d.png"%(imgi,),imgi); imgi+=1
print("criado digrafo: gg mensagens")
| [
"dateutil.parser.parse",
"numpy.reshape",
"networkx.to_agraph",
"numpy.hstack",
"numpy.random.random",
"pylab.plot",
"networkx.DiGraph",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"numpy.vstack",
"networkx.copy.deepcopy",
"numpy.sin",
"networkx.read_gml",
"pylab.clf",
"pylab.show"... | [((16102, 16113), 'networkx.DiGraph', 'x.DiGraph', ([], {}), '()\n', (16111, 16113), True, 'import numpy as n, pylab as p, networkx as x, random as r, collections as c, string\n'), ((17122, 17140), 'networkx.copy.deepcopy', 'x.copy.deepcopy', (['g'], {}), '(g)\n', (17137, 17140), True, 'import numpy as n, pylab as p, networkx as x, random as r, collections as c, string\n'), ((17219, 17230), 'networkx.DiGraph', 'x.DiGraph', ([], {}), '()\n', (17228, 17230), True, 'import numpy as n, pylab as p, networkx as x, random as r, collections as c, string\n'), ((5277, 5284), 'pylab.clf', 'p.clf', ([], {}), '()\n', (5282, 5284), True, 'import pylab as p\n'), ((5295, 5314), 'networkx.to_agraph', 'x.to_agraph', (['self.g'], {}), '(self.g)\n', (5306, 5314), True, 'import numpy as n, pylab as p, networkx as x, random as r, collections as c, string\n'), ((15659, 15677), 'dateutil.parser.parse', 'parser.parse', (['date'], {}), '(date)\n', (15671, 15677), False, 'from dateutil import parser\n'), ((1269, 1282), 'networkx.read_gml', 'x.read_gml', (['G'], {}), '(G)\n', (1279, 1282), True, 'import numpy as n, pylab as p, networkx as x, random as r, collections as c, string\n'), ((5142, 5196), 'pylab.plot', 'p.plot', (['SSi.positions[:, 0]', 'SSi.positions[:, 1]', '"""bo"""'], {}), "(SSi.positions[:, 0], SSi.positions[:, 1], 'bo')\n", (5148, 5196), True, 'import pylab as p\n'), ((5205, 5213), 'pylab.show', 'p.show', ([], {}), '()\n', (5211, 5213), True, 'import pylab as p\n'), ((3336, 3359), 'numpy.random.random', 'n.random.random', (['(nn * 2)'], {}), '(nn * 2)\n', (3351, 3359), True, 'import numpy as n, pylab as p, networkx as x, random as r, collections as c, string\n'), ((3380, 3403), 'numpy.reshape', 'n.reshape', (['loc', '(nn, 2)'], {}), '(loc, (nn, 2))\n', (3389, 3403), True, 'import numpy as n, pylab as p, networkx as x, random as r, collections as c, string\n'), ((10119, 10136), 'numpy.array', 'n.array', (['cor[:-1]'], {}), '(cor[:-1])\n', (10126, 10136), True, 'import numpy as n, pylab as p, networkx as x, random as r, collections as c, string\n'), ((3614, 3636), 'numpy.linspace', 'n.linspace', (['(0.0)', '(1)', 'nn'], {}), '(0.0, 1, nn)\n', (3624, 3636), True, 'import numpy as n, pylab as p, networkx as x, random as r, collections as c, string\n'), ((3879, 3899), 'numpy.sin', 'n.sin', (['(xx * 2 * n.pi)'], {}), '(xx * 2 * n.pi)\n', (3884, 3899), True, 'import numpy as n, pylab as p, networkx as x, random as r, collections as c, string\n'), ((3945, 3967), 'numpy.vstack', 'n.vstack', (['(xx * 4, yy)'], {}), '((xx * 4, yy))\n', (3953, 3967), True, 'import numpy as n, pylab as p, networkx as x, random as r, collections as c, string\n'), ((4164, 4183), 'numpy.zeros', 'n.zeros', (['(2, sobra)'], {}), '((2, sobra))\n', (4171, 4183), True, 'import numpy as n, pylab as p, networkx as x, random as r, collections as c, string\n'), ((3758, 3802), 'numpy.linspace', 'n.linspace', (['(0.0)', '(0.5)', 'parte1'], {'endpoint': '(False)'}), '(0.0, 0.5, parte1, endpoint=False)\n', (3768, 3802), True, 'import numpy as n, pylab as p, networkx as x, random as r, collections as c, string\n'), ((3800, 3826), 'numpy.linspace', 'n.linspace', (['(0.5)', '(1)', 'parte2'], {}), '(0.5, 1, parte2)\n', (3810, 3826), True, 'import numpy as n, pylab as p, networkx as x, random as r, collections as c, string\n'), ((4593, 4618), 'numpy.hstack', 'n.hstack', (['(locxy.T, poss)'], {}), '((locxy.T, poss))\n', (4601, 4618), True, 'import numpy as n, pylab as p, networkx as x, random as r, collections as c, string\n'), ((4418, 4442), 'numpy.linspace', 'n.linspace', (['(0)', '(1)', 'parte1'], {}), '(0, 1, parte1)\n', (4428, 4442), True, 'import numpy as n, pylab as p, networkx as x, random as r, collections as c, string\n'), ((4446, 4470), 'numpy.linspace', 'n.linspace', (['(1)', '(2)', 'parte2'], {}), '(1, 2, parte2)\n', (4456, 4470), True, 'import numpy as n, pylab as p, networkx as x, random as r, collections as c, string\n'), ((4515, 4539), 'numpy.linspace', 'n.linspace', (['(0)', '(1)', 'parte1'], {}), '(0, 1, parte1)\n', (4525, 4539), True, 'import numpy as n, pylab as p, networkx as x, random as r, collections as c, string\n'), ((4541, 4565), 'numpy.linspace', 'n.linspace', (['(1)', '(2)', 'parte2'], {}), '(1, 2, parte2)\n', (4551, 4565), True, 'import numpy as n, pylab as p, networkx as x, random as r, collections as c, string\n')] |
import h5py
import numpy as np
from keras.datasets import mnist
from keras.utils import to_categorical
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float16')
x_test = x_test.astype('float16')
inputs = np.concatenate((x_train,x_test)) / 255
labels = np.concatenate((y_train,y_test)) # ints, 0 to 10
###########################################
# fix mis-labeled image(s) in Keras dataset
labels[10994] = 9
###########################################
targets = to_categorical(labels).astype("uint8")
string = h5py.special_dtype(vlen=str)
labels = np.array([str(label) for label in labels], dtype=string)
print("creating h5...")
with h5py.File("mnist.h5", "w") as h5:
dset = h5.create_dataset('inputs', data=[inputs], compression='gzip', compression_opts=9)
dset = h5.create_dataset('targets', data=[targets], compression='gzip', compression_opts=9)
dset = h5.create_dataset('labels', data=[labels], compression='gzip', compression_opts=9)
print("done!")
| [
"keras.datasets.mnist.load_data",
"h5py.File",
"keras.utils.to_categorical",
"numpy.concatenate",
"h5py.special_dtype"
] | [((255, 272), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (270, 272), False, 'from keras.datasets import mnist\n'), ((569, 602), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test)'], {}), '((y_train, y_test))\n', (583, 602), True, 'import numpy as np\n'), ((826, 854), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'vlen': 'str'}), '(vlen=str)\n', (844, 854), False, 'import h5py\n'), ((521, 554), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_test)'], {}), '((x_train, x_test))\n', (535, 554), True, 'import numpy as np\n'), ((951, 977), 'h5py.File', 'h5py.File', (['"""mnist.h5"""', '"""w"""'], {}), "('mnist.h5', 'w')\n", (960, 977), False, 'import h5py\n'), ((778, 800), 'keras.utils.to_categorical', 'to_categorical', (['labels'], {}), '(labels)\n', (792, 800), False, 'from keras.utils import to_categorical\n')] |
from __future__ import division
import cv2
import numpy as np
from opensfm import transformations
def rotation_from_angle_axis(angle_axis):
return cv2.Rodrigues(np.asarray(angle_axis))[0]
def rotation_from_ptr(pan, tilt, roll):
"""Camera rotation matrix from pan, tilt and roll."""
R1 = rotation_from_angle_axis([0.0, 0.0, roll])
R2 = rotation_from_angle_axis([tilt + np.pi/2, 0.0, 0.0])
R3 = rotation_from_angle_axis([0.0, 0.0, pan])
return R1.dot(R2).dot(R3)
def ptr_from_rotation(rotation_matrix):
"""Pan tilt and roll from camera rotation matrix"""
pan = pan_from_rotation(rotation_matrix)
tilt = tilt_from_rotation(rotation_matrix)
roll = roll_from_rotation(rotation_matrix)
return pan, tilt, roll
def pan_from_rotation(rotation_matrix):
Rt_ez = np.dot(rotation_matrix.T, [0, 0, 1])
return np.arctan2(Rt_ez[0], Rt_ez[1])
def tilt_from_rotation(rotation_matrix):
Rt_ez = np.dot(rotation_matrix.T, [0, 0, 1])
l = np.linalg.norm(Rt_ez[:2])
return np.arctan2(-Rt_ez[2], l)
def roll_from_rotation(rotation_matrix):
Rt_ex = np.dot(rotation_matrix.T, [1, 0, 0])
Rt_ez = np.dot(rotation_matrix.T, [0, 0, 1])
a = np.cross(Rt_ez, [0, 0, 1])
a /= np.linalg.norm(a)
b = np.cross(Rt_ex, a)
return np.arcsin(np.dot(Rt_ez, b))
def rotation_from_ptr_v2(pan, tilt, roll):
"""Camera rotation matrix from pan, tilt and roll.
This is the implementation used in the Single Image Calibration code.
"""
tilt += np.pi / 2
return transformations.euler_matrix(pan, tilt, roll, 'szxz')[:3, :3]
def ptr_from_rotation_v2(rotation_matrix):
"""Pan tilt and roll from camera rotation matrix.
This is the implementation used in the Single Image Calibration code.
"""
T = np.identity(4)
T[:3, :3] = rotation_matrix
pan, tilt, roll = transformations.euler_from_matrix(T, 'szxz')
return pan, tilt - np.pi / 2, roll
| [
"numpy.identity",
"numpy.cross",
"numpy.asarray",
"opensfm.transformations.euler_from_matrix",
"numpy.dot",
"numpy.arctan2",
"numpy.linalg.norm",
"opensfm.transformations.euler_matrix"
] | [((809, 845), 'numpy.dot', 'np.dot', (['rotation_matrix.T', '[0, 0, 1]'], {}), '(rotation_matrix.T, [0, 0, 1])\n', (815, 845), True, 'import numpy as np\n'), ((857, 887), 'numpy.arctan2', 'np.arctan2', (['Rt_ez[0]', 'Rt_ez[1]'], {}), '(Rt_ez[0], Rt_ez[1])\n', (867, 887), True, 'import numpy as np\n'), ((943, 979), 'numpy.dot', 'np.dot', (['rotation_matrix.T', '[0, 0, 1]'], {}), '(rotation_matrix.T, [0, 0, 1])\n', (949, 979), True, 'import numpy as np\n'), ((988, 1013), 'numpy.linalg.norm', 'np.linalg.norm', (['Rt_ez[:2]'], {}), '(Rt_ez[:2])\n', (1002, 1013), True, 'import numpy as np\n'), ((1025, 1049), 'numpy.arctan2', 'np.arctan2', (['(-Rt_ez[2])', 'l'], {}), '(-Rt_ez[2], l)\n', (1035, 1049), True, 'import numpy as np\n'), ((1105, 1141), 'numpy.dot', 'np.dot', (['rotation_matrix.T', '[1, 0, 0]'], {}), '(rotation_matrix.T, [1, 0, 0])\n', (1111, 1141), True, 'import numpy as np\n'), ((1154, 1190), 'numpy.dot', 'np.dot', (['rotation_matrix.T', '[0, 0, 1]'], {}), '(rotation_matrix.T, [0, 0, 1])\n', (1160, 1190), True, 'import numpy as np\n'), ((1199, 1225), 'numpy.cross', 'np.cross', (['Rt_ez', '[0, 0, 1]'], {}), '(Rt_ez, [0, 0, 1])\n', (1207, 1225), True, 'import numpy as np\n'), ((1235, 1252), 'numpy.linalg.norm', 'np.linalg.norm', (['a'], {}), '(a)\n', (1249, 1252), True, 'import numpy as np\n'), ((1261, 1279), 'numpy.cross', 'np.cross', (['Rt_ex', 'a'], {}), '(Rt_ex, a)\n', (1269, 1279), True, 'import numpy as np\n'), ((1791, 1805), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (1802, 1805), True, 'import numpy as np\n'), ((1860, 1904), 'opensfm.transformations.euler_from_matrix', 'transformations.euler_from_matrix', (['T', '"""szxz"""'], {}), "(T, 'szxz')\n", (1893, 1904), False, 'from opensfm import transformations\n'), ((1301, 1317), 'numpy.dot', 'np.dot', (['Rt_ez', 'b'], {}), '(Rt_ez, b)\n', (1307, 1317), True, 'import numpy as np\n'), ((1539, 1592), 'opensfm.transformations.euler_matrix', 'transformations.euler_matrix', (['pan', 'tilt', 'roll', '"""szxz"""'], {}), "(pan, tilt, roll, 'szxz')\n", (1567, 1592), False, 'from opensfm import transformations\n'), ((169, 191), 'numpy.asarray', 'np.asarray', (['angle_axis'], {}), '(angle_axis)\n', (179, 191), True, 'import numpy as np\n')] |
import os
import nibabel as nib
import numpy.ma as ma
import settings_dist
import numpy as np
from tqdm import tqdm
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("root_dir")
parser.add_argument("sample")
parser.add_argument("train_test_split")
parser.add_argument("resize")
parser.add_argument("save_path")
args = parser.parse_args()
root_dir = args.root_dir
sample = args.sample
resize = int(args.resize) # Final dimension (square), set resize = 0 if no resizing is desired
rotate = 3 # Number of counter-clockwise, 90 degree rotations
save_path = args.save_path
train_test_split = float(args.train_test_split)
save_interval = 10
def parse_segments(seg):
# Each channel corresponds to a different region of the tumor, decouple and stack these
msks_parsed = []
for slice in range(seg.shape[-1]):
curr = seg[:,:,slice]
GD = ma.masked_not_equal(curr,4).filled(fill_value=0)
edema = ma.masked_not_equal(curr,2).filled(fill_value=0)
necrotic = ma.masked_not_equal(curr,1).filled(fill_value=0)
none = ma.masked_not_equal(curr,0).filled(fill_value=0)
msks_parsed.append(np.dstack((none,necrotic,edema,GD)))
# Replace all tumorous areas with 1 (previously marked as 1, 2 or 4)
mask = np.asarray(msks_parsed)
mask[mask > 0] = 1
return mask
def parse_images(img):
slices = []
for slice in range(img.shape[-1]):
curr = img[:,:,slice]
slices.append(curr)
return np.asarray(slices)
def stack_img_slices(mode_track, stack_order):
# Put final image channels in the order listed in stack_order
full_brain = []
for slice in range(len(mode_track['t1'])):
current_slice = []
for mode in stack_order:
current_slice.append(mode_track[mode][slice,:,:])
full_brain.append(np.dstack(current_slice))
# Normalize stacked images (inference will not work if this is not performed)
stack = np.asarray(full_brain)
stack = (stack - np.mean(stack))/(np.std(stack))
return stack
def resize_data(dataset, new_size):
# Test/Train images must be the same size
start_index = (dataset.shape[1] - new_size)/2
end_index = dataset.shape[1] - start_index
if rotate != 0:
resized = np.rot90(dataset[:, start_index:end_index, start_index:end_index :], rotate, axes=(1,2))
else:
resized = dataset[:, start_index:end_index, start_index:end_index :]
return resized
def save_data(imgs_all, msks_all, split, save_path):
imgs_all = np.asarray(imgs_all)
msks_all = np.asarray(msks_all)
# Split entire dataset into train/test sets
train_size = int(msks_all.shape[0]*split)
new_imgs_train = imgs_all[0:train_size,:,:,:]
new_msks_train = msks_all[0:train_size,:,:,:]
new_imgs_test = imgs_all[train_size:,:,:,:]
new_msks_test = msks_all[train_size:,:,:,:]
if os.path.isfile("{}imgs_train.npy".format(save_path)):
# Open one file at a time (these will be large) and clear buffer immediately after concatenate/save
imgs_train = np.load("{}imgs_train.npy".format(save_path))
np.save("{}imgs_train.npy".format(save_path), np.concatenate((imgs_train,new_imgs_train), axis = 0))
imgs_train = []
msks_train = np.load("{}msks_train.npy".format(save_path))
np.save("{}msks_train.npy".format(save_path), np.concatenate((msks_train,new_msks_train), axis = 0))
msks_train = []
imgs_test = np.load("{}imgs_test.npy".format(save_path))
np.save("{}imgs_test.npy".format(save_path), np.concatenate((imgs_test,new_imgs_test), axis = 0))
imgs_test = []
msks_test = np.load("{}msks_test.npy".format(save_path))
np.save("{}msks_test.npy".format(save_path), np.concatenate((msks_test,new_msks_test), axis = 0))
msks_test = []
else:
np.save("{}imgs_train.npy".format(save_path), new_imgs_train)
np.save("{}msks_train.npy".format(save_path), new_msks_train)
np.save("{}imgs_test.npy".format(save_path), new_imgs_test)
np.save("{}msks_test.npy".format(save_path), new_msks_test)
imgs_all = []
msks_all = []
scan_count = 0
for subdir, dir, files in tqdm(os.walk(root_dir)):
# Ensure all necessary files are present
file_root = subdir.split('/')[-1] + "_"
extension = ".nii.gz"
img_modes = ["t1","t2","flair","t1ce"]
need_file = [file_root + mode + extension for mode in img_modes]
all_there = [(reqd in files) for reqd in need_file]
if all(all_there) and subdir.endswith(sample):
mode_track = {mode:[] for mode in img_modes}
for file in files:
if file.endswith('seg.nii.gz'):
path = os.path.join(subdir,file)
msk = np.array(nib.load(path).dataobj)
parsed = resize_data(parse_segments(msk), resize)
msks_all.extend(parsed)
if file.endswith('t1.nii.gz'):
path = os.path.join(subdir,file)
img = np.array(nib.load(path).dataobj)
mode_track['t1'] = resize_data(parse_images(img), resize)
if file.endswith('t2.nii.gz'):
path = os.path.join(subdir,file)
img = np.array(nib.load(path).dataobj)
mode_track['t2'] = resize_data(parse_images(img), resize)
if file.endswith('t1ce.nii.gz'):
path = os.path.join(subdir,file)
img = np.array(nib.load(path).dataobj)
mode_track['t1ce'] = resize_data(parse_images(img), resize)
if file.endswith('flair.nii.gz'):
path = os.path.join(subdir,file)
img = np.array(nib.load(path).dataobj)
mode_track['flair'] = resize_data(parse_images(img), resize)
scan_count += 1
imgs_all.extend(np.asarray(stack_img_slices(mode_track,img_modes)))
if (scan_count%save_interval == 0) & (scan_count != 0):
print("Total scans processed: {}".format(scan_count))
save_data(imgs_all, msks_all, train_test_split, save_path)
imgs_all = []
msks_all = []
# Save any leftover files - may miss a few at the end if the dataset size changes, this will catch those
if len(imgs_all) > 0:
save_data(imgs_all, msks_all, train_test_split, save_path)
print("Total scans processed: {}\nDone.".format(scan_count))
| [
"numpy.dstack",
"numpy.mean",
"argparse.ArgumentParser",
"nibabel.load",
"numpy.asarray",
"os.path.join",
"numpy.ma.masked_not_equal",
"numpy.rot90",
"numpy.std",
"numpy.concatenate",
"os.walk"
] | [((142, 167), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (165, 167), False, 'import argparse\n'), ((1231, 1254), 'numpy.asarray', 'np.asarray', (['msks_parsed'], {}), '(msks_parsed)\n', (1241, 1254), True, 'import numpy as np\n'), ((1420, 1438), 'numpy.asarray', 'np.asarray', (['slices'], {}), '(slices)\n', (1430, 1438), True, 'import numpy as np\n'), ((1852, 1874), 'numpy.asarray', 'np.asarray', (['full_brain'], {}), '(full_brain)\n', (1862, 1874), True, 'import numpy as np\n'), ((2395, 2415), 'numpy.asarray', 'np.asarray', (['imgs_all'], {}), '(imgs_all)\n', (2405, 2415), True, 'import numpy as np\n'), ((2428, 2448), 'numpy.asarray', 'np.asarray', (['msks_all'], {}), '(msks_all)\n', (2438, 2448), True, 'import numpy as np\n'), ((3951, 3968), 'os.walk', 'os.walk', (['root_dir'], {}), '(root_dir)\n', (3958, 3968), False, 'import os\n'), ((1910, 1923), 'numpy.std', 'np.std', (['stack'], {}), '(stack)\n', (1916, 1923), True, 'import numpy as np\n'), ((2143, 2234), 'numpy.rot90', 'np.rot90', (['dataset[:, start_index:end_index, start_index:end_index]', 'rotate'], {'axes': '(1, 2)'}), '(dataset[:, start_index:end_index, start_index:end_index], rotate,\n axes=(1, 2))\n', (2151, 2234), True, 'import numpy as np\n'), ((1114, 1152), 'numpy.dstack', 'np.dstack', (['(none, necrotic, edema, GD)'], {}), '((none, necrotic, edema, GD))\n', (1123, 1152), True, 'import numpy as np\n'), ((1736, 1760), 'numpy.dstack', 'np.dstack', (['current_slice'], {}), '(current_slice)\n', (1745, 1760), True, 'import numpy as np\n'), ((1893, 1907), 'numpy.mean', 'np.mean', (['stack'], {}), '(stack)\n', (1900, 1907), True, 'import numpy as np\n'), ((2996, 3048), 'numpy.concatenate', 'np.concatenate', (['(imgs_train, new_imgs_train)'], {'axis': '(0)'}), '((imgs_train, new_imgs_train), axis=0)\n', (3010, 3048), True, 'import numpy as np\n'), ((3181, 3233), 'numpy.concatenate', 'np.concatenate', (['(msks_train, new_msks_train)'], {'axis': '(0)'}), '((msks_train, new_msks_train), axis=0)\n', (3195, 3233), True, 'import numpy as np\n'), ((3363, 3413), 'numpy.concatenate', 'np.concatenate', (['(imgs_test, new_imgs_test)'], {'axis': '(0)'}), '((imgs_test, new_imgs_test), axis=0)\n', (3377, 3413), True, 'import numpy as np\n'), ((3542, 3592), 'numpy.concatenate', 'np.concatenate', (['(msks_test, new_msks_test)'], {'axis': '(0)'}), '((msks_test, new_msks_test), axis=0)\n', (3556, 3592), True, 'import numpy as np\n'), ((864, 892), 'numpy.ma.masked_not_equal', 'ma.masked_not_equal', (['curr', '(4)'], {}), '(curr, 4)\n', (883, 892), True, 'import numpy.ma as ma\n'), ((923, 951), 'numpy.ma.masked_not_equal', 'ma.masked_not_equal', (['curr', '(2)'], {}), '(curr, 2)\n', (942, 951), True, 'import numpy.ma as ma\n'), ((985, 1013), 'numpy.ma.masked_not_equal', 'ma.masked_not_equal', (['curr', '(1)'], {}), '(curr, 1)\n', (1004, 1013), True, 'import numpy.ma as ma\n'), ((1043, 1071), 'numpy.ma.masked_not_equal', 'ma.masked_not_equal', (['curr', '(0)'], {}), '(curr, 0)\n', (1062, 1071), True, 'import numpy.ma as ma\n'), ((4404, 4430), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (4416, 4430), False, 'import os\n'), ((4601, 4627), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (4613, 4627), False, 'import os\n'), ((4778, 4804), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (4790, 4804), False, 'import os\n'), ((4957, 4983), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (4969, 4983), False, 'import os\n'), ((5139, 5165), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (5151, 5165), False, 'import os\n'), ((4449, 4463), 'nibabel.load', 'nib.load', (['path'], {}), '(path)\n', (4457, 4463), True, 'import nibabel as nib\n'), ((4646, 4660), 'nibabel.load', 'nib.load', (['path'], {}), '(path)\n', (4654, 4660), True, 'import nibabel as nib\n'), ((4823, 4837), 'nibabel.load', 'nib.load', (['path'], {}), '(path)\n', (4831, 4837), True, 'import nibabel as nib\n'), ((5002, 5016), 'nibabel.load', 'nib.load', (['path'], {}), '(path)\n', (5010, 5016), True, 'import nibabel as nib\n'), ((5184, 5198), 'nibabel.load', 'nib.load', (['path'], {}), '(path)\n', (5192, 5198), True, 'import nibabel as nib\n')] |
import numpy as np
import torch
import itertools
from torch.autograd import Variable
def getGridMask(frame, dimensions, num_person, neighborhood_size, grid_size, is_occupancy = False):
'''
This function computes the binary mask that represents the
occupancy of each ped in the other's grid
params:
frame : This will be a MNP x 3 matrix with each row being [pedID, x, y]
dimensions : This will be a list [width, height]
neighborhood_size : Scalar value representing the size of neighborhood considered
grid_size : Scalar value representing the size of the grid discretization
num_person : number of people exist in given frame
is_occupancy: A flag using for calculation of accupancy map
'''
mnp = num_person
width, height = dimensions[0], dimensions[1]
if is_occupancy:
frame_mask = np.zeros((mnp, grid_size**2))
else:
frame_mask = np.zeros((mnp, mnp, grid_size**2))
frame_np = frame.data.numpy()
#width_bound, height_bound = (neighborhood_size/(width*1.0)), (neighborhood_size/(height*1.0))
width_bound, height_bound = (neighborhood_size/(width*1.0))*2, (neighborhood_size/(height*1.0))*2
#print("weight_bound: ", width_bound, "height_bound: ", height_bound)
#instead of 2 inner loop, we check all possible 2-permutations which is 2 times faster.
list_indices = list(range(0, mnp))
for real_frame_index, other_real_frame_index in itertools.permutations(list_indices, 2):
current_x, current_y = frame_np[real_frame_index, 0], frame_np[real_frame_index, 1]
width_low, width_high = current_x - width_bound/2, current_x + width_bound/2
height_low, height_high = current_y - height_bound/2, current_y + height_bound/2
other_x, other_y = frame_np[other_real_frame_index, 0], frame_np[other_real_frame_index, 1]
#if (other_x >= width_high).all() or (other_x < width_low).all() or (other_y >= height_high).all() or (other_y < height_low).all():
if (other_x >= width_high) or (other_x < width_low) or (other_y >= height_high) or (other_y < height_low):
# Ped not in surrounding, so binary mask should be zero
#print("not surrounding")
continue
# If in surrounding, calculate the grid cell
cell_x = int(np.floor(((other_x - width_low)/width_bound) * grid_size))
cell_y = int(np.floor(((other_y - height_low)/height_bound) * grid_size))
if cell_x >= grid_size or cell_x < 0 or cell_y >= grid_size or cell_y < 0:
continue
if is_occupancy:
frame_mask[real_frame_index, cell_x + cell_y*grid_size] = 1
else:
# Other ped is in the corresponding grid cell of current ped
frame_mask[real_frame_index, other_real_frame_index, cell_x + cell_y*grid_size] = 1
#Two inner loops aproach -> slower
# # For each ped in the frame (existent and non-existent)
# for real_frame_index in range(mnp):
# #real_frame_index = lookup_seq[pedindex]
# #print(real_frame_index)
# #print("****************************************")
# # Get x and y of the current ped
# current_x, current_y = frame[real_frame_index, 0], frame[real_frame_index, 1]
# #print("cur x : ", current_x, "cur_y: ", current_y)
# width_low, width_high = current_x - width_bound/2, current_x + width_bound/2
# height_low, height_high = current_y - height_bound/2, current_y + height_bound/2
# #print("width_low : ", width_low, "width_high: ", width_high, "height_low : ", height_low, "height_high: ", height_high)
# # For all the other peds
# for other_real_frame_index in range(mnp):
# #other_real_frame_index = lookup_seq[otherpedindex]
# #print(other_real_frame_index)
# #print("################################")
# # If the other pedID is the same as current pedID
# if other_real_frame_index == real_frame_index:
# # The ped cannot be counted in his own grid
# continue
# # Get x and y of the other ped
# other_x, other_y = frame[other_real_frame_index, 0], frame[other_real_frame_index, 1]
# #print("other_x: ", other_x, "other_y: ", other_y)
# if (other_x >= width_high).all() or (other_x < width_low).all() or (other_y >= height_high).all() or (other_y < height_low).all():
# # Ped not in surrounding, so binary mask should be zero
# #print("not surrounding")
# continue
# # If in surrounding, calculate the grid cell
# cell_x = int(np.floor(((other_x - width_low)/width_bound) * grid_size))
# cell_y = int(np.floor(((other_y - height_low)/height_bound) * grid_size))
# #print("cell_x: ", cell_x, "cell_y: ", cell_y)
# if cell_x >= grid_size or cell_x < 0 or cell_y >= grid_size or cell_y < 0:
# continue
# # Other ped is in the corresponding grid cell of current ped
# frame_mask[real_frame_index, other_real_frame_index, cell_x + cell_y*grid_size] = 1
# #print("frame mask shape %s"%str(frame_mask.shape))
return frame_mask
def getSequenceGridMask(sequence, dimensions, pedlist_seq, neighborhood_size, grid_size, using_cuda, is_occupancy=False):
'''
Get the grid masks for all the frames in the sequence
params:
sequence : A numpy matrix of shape SL x MNP x 3
dimensions : This will be a list [width, height]
neighborhood_size : Scalar value representing the size of neighborhood considered
grid_size : Scalar value representing the size of the grid discretization
using_cuda: Boolean value denoting if using GPU or not
is_occupancy: A flag using for calculation of accupancy map
'''
sl = len(sequence)
sequence_mask = []
for i in range(sl):
mask = Variable(torch.from_numpy(getGridMask(sequence[i], dimensions, len(pedlist_seq[i]), neighborhood_size, grid_size, is_occupancy)).float())
if using_cuda:
mask = mask.cuda()
sequence_mask.append(mask)
return sequence_mask
| [
"itertools.permutations",
"numpy.floor",
"numpy.zeros"
] | [((1441, 1480), 'itertools.permutations', 'itertools.permutations', (['list_indices', '(2)'], {}), '(list_indices, 2)\n', (1463, 1480), False, 'import itertools\n'), ((850, 881), 'numpy.zeros', 'np.zeros', (['(mnp, grid_size ** 2)'], {}), '((mnp, grid_size ** 2))\n', (858, 881), True, 'import numpy as np\n'), ((911, 947), 'numpy.zeros', 'np.zeros', (['(mnp, mnp, grid_size ** 2)'], {}), '((mnp, mnp, grid_size ** 2))\n', (919, 947), True, 'import numpy as np\n'), ((2327, 2384), 'numpy.floor', 'np.floor', (['((other_x - width_low) / width_bound * grid_size)'], {}), '((other_x - width_low) / width_bound * grid_size)\n', (2335, 2384), True, 'import numpy as np\n'), ((2407, 2466), 'numpy.floor', 'np.floor', (['((other_y - height_low) / height_bound * grid_size)'], {}), '((other_y - height_low) / height_bound * grid_size)\n', (2415, 2466), True, 'import numpy as np\n')] |
import numpy as np
from envs.focal_point_task_us_env import FocalPointTaskUsEnv
from envs.plane_task_us_env import PlaneTaskUsEnv
from envs.phantom import (
ScatterersPhantom,
Ball,
Teddy
)
from envs.imaging import ImagingSystem, Probe
from envs.generator import (
ConstPhantomGenerator,
ConstProbeGenerator,
RandomProbeGenerator)
import envs.logger
import sys
N_STEPS_PER_EPISODE = 32
N_WORKERS = 4
IMAGING_SYSTEM = ImagingSystem(
c=1540,
fs=100e6,
image_width=40 / 1000,
image_height=90 / 1000,
image_resolution=(40, 90), # [pixels]
median_filter_size=5,
dr_threshold=-200,
dec=1,
no_lines=64
)
DEFAULT_PHANTOM = ScatterersPhantom(
objects=[
Teddy(
belly_pos=np.array([0 / 1000, 0, 50 / 1000]), # X, Y, Z
scale=12 / 1000,
head_offset=.9
)
],
x_border=(-40 / 1000, 40 / 1000),
y_border=(-40 / 1000, 40 / 1000),
z_border=(0, 90 / 1000),
n_scatterers=int(1e4),
n_bck_scatterers=int(1e3),
seed=42,
)
DEFAULT_PHANTOM_GENERATOR = ConstPhantomGenerator(DEFAULT_PHANTOM)
def focal_point_env_fn(trajectory_logger, probe_generator,
phantom_generator=None,
probe_dislocation_prob=None,
dislocation_seed=None,
max_probe_dislocation=None,
step_size=10/1000):
if not phantom_generator:
phantom_generator = DEFAULT_PHANTOM_GENERATOR
imaging = IMAGING_SYSTEM
env = FocalPointTaskUsEnv(
dx_reward_coeff=2,
dz_reward_coeff=1,
imaging=imaging,
phantom_generator=phantom_generator,
probe_generator=probe_generator,
max_steps=N_STEPS_PER_EPISODE,
no_workers=N_WORKERS,
use_cache=True,
trajectory_logger=trajectory_logger,
probe_dislocation_prob=probe_dislocation_prob,
dislocation_seed=dislocation_seed,
max_probe_dislocation=max_probe_dislocation,
step_size=step_size
)
return env
def plane_task_env_fn(trajectory_logger, probe_generator,
phantom_generator=None,
probe_dislocation_prob=None,
dislocation_seed=None,
max_probe_disloc=None,
max_probe_disrot=None,
step_size=5/1000,
rot_deg=20):
if not phantom_generator:
phantom_generator = DEFAULT_PHANTOM_GENERATOR
imaging = IMAGING_SYSTEM
return PlaneTaskUsEnv(
dx_reward_coeff=1,
angle_reward_coeff=1,
imaging=imaging,
phantom_generator=phantom_generator,
probe_generator=probe_generator,
max_steps=N_STEPS_PER_EPISODE,
no_workers=N_WORKERS,
use_cache=True,
trajectory_logger=trajectory_logger,
step_size=step_size,
rot_deg=rot_deg,
probe_dislocation_prob=probe_dislocation_prob,
max_probe_disloc=max_probe_disloc,
max_probe_disrot=max_probe_disrot,
dislocation_seed=dislocation_seed
)
def test_reset():
"""Test created to check a single observation/env state visualization."""
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=10,
log_state_csv_freq=10,
log_state_render_freq=10
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=30 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(trajactory_logger, probe_generator=probe_generator)
env.reset()
def test_moving_probe_works():
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=10,
log_state_csv_freq=10,
log_state_render_freq=10
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=30 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(trajactory_logger, probe_generator=probe_generator)
env.reset()
env.step(1) # left
env.step(1) # left
env.step(2) # right (should come from cache)
env.step(2) # right (should come from cache)
env.step(2) # right
env.step(4) # down
env.step(3) # up (cached)
env.step(3) # up
def test_rewards():
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=10,
log_state_csv_freq=10,
log_state_render_freq=10
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=30 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(trajactory_logger, probe_generator=probe_generator)
env.reset()
env.step(1) # left
env.step(2) # right
env.step(4) # down
env.step(3) # up
def test_nop():
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=10,
log_state_csv_freq=10,
log_state_render_freq=10
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=30 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(trajactory_logger, probe_generator=probe_generator)
env.reset()
env.step(0) # NOP
env.step(2) # right
env.step(0) # NOP
def test_cannot_move_probe_outside_phantom_area():
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=10,
log_state_csv_freq=10,
log_state_render_freq=10
)
probe = Probe(
pos=np.array([-20 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=10 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(trajactory_logger, probe_generator=probe_generator)
env.reset()
env.step(1) # left - BUMP
env.step(2) # right # -10
env.step(2) # right # 0
env.step(2) # right # 10
env.step(2) # right # 20
env.step(2) # right # 20 - BUMP
env.step(3) # up # 0
env.step(3) # up # 0 - BUMP
env.step(4) # down # 10
env.step(4) # down # 20
env.step(4) # down # 30
env.step(4) # down # 40
env.step(4) # down # 50
env.step(4) # down # 60
env.step(4) # down # 70
env.step(4) # down # 80
env.step(4) # down # 90
env.step(4) # down # 90 - BUMP
def test_caching_works():
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=10,
log_state_csv_freq=10,
log_state_render_freq=10
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=30 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(trajactory_logger, probe_generator=probe_generator)
env.reset()
env.step(1) # left
env.step(2) # right (should come from cache)
def test_random_probe_generator():
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=1,
log_state_csv_freq=1,
log_state_render_freq=1
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=30 / 1000
)
teddy = Teddy(
belly_pos=np.array([0 / 1000, 0, 50 / 1000]), # X, Y, Z
scale=12 / 1000,
head_offset=.9
)
phantom = ScatterersPhantom(
objects=[teddy],
x_border=(-40 / 1000, 40 / 1000),
y_border=(-40 / 1000, 40 / 1000),
z_border=(0, 90 / 1000),
n_scatterers=int(1e4),
n_bck_scatterers=int(1e3),
seed=42,
)
phantom_generator = ConstPhantomGenerator(phantom)
probe_generator = RandomProbeGenerator(
ref_probe=probe,
object_to_align=teddy,
seed=42,
# x_pos default
# focal_pos default
)
env = focal_point_env_fn(trajactory_logger, probe_generator=probe_generator,
phantom_generator=phantom_generator)
env.reset()
env.step(1) # left
env.reset()
env.step(2)
env.reset()
env.step(3)
env.reset()
env.step(3)
env.reset()
env.step(1)
env.reset()
env.step(1)
def test_deep_focus():
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=1,
log_state_csv_freq=1,
log_state_render_freq=1
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=0 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(trajactory_logger, probe_generator=probe_generator)
env.reset()
env.step(4) # down - 10
env.step(4) # 20
env.step(4) # 30
env.step(4) # 40
env.step(4) # 50
env.step(4) # 60
env.step(4) # 70
env.step(4) # 80
env.step(4) # 90
# probe random dislocations (focal point env)
def test_random_dislocation_1():
"""
Just check if dislocation are drawn for this env.
"""
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=1,
log_state_csv_freq=1,
log_state_render_freq=1
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=50 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(
trajactory_logger,
probe_generator=probe_generator,
probe_dislocation_prob=.5,
dislocation_seed=42,
max_probe_dislocation=2
)
env.reset()
env.step(0)
env.step(0)
env.step(0)
env.step(0)
env.step(0)
env.step(0)
env.step(0)
env.step(0)
env.step(0)
def test_random_dislocation_2():
"""
Check if dislocations are drawn, and are properly applicated (
should not impact the last reward, should be observable in next state).
"""
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=1,
log_state_csv_freq=1,
log_state_render_freq=1
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=50 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(
trajactory_logger,
probe_generator=probe_generator,
probe_dislocation_prob=.5,
dislocation_seed=42,
max_probe_dislocation=2,
step_size=5/1000
)
env.reset()
env.step(1)
env.step(1)
env.step(2)
env.step(2)
env.step(1)
env.step(1)
env.step(2)
env.step(2)
def test_random_no_dislocation_2():
"""
Check if dislocations are drawn, and are properly applicated (
should not impact the last reward, should be observable in next state).
"""
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=1,
log_state_csv_freq=1,
log_state_render_freq=1
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=50 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = focal_point_env_fn(
trajactory_logger,
probe_generator=probe_generator,
probe_dislocation_prob=.5,
dislocation_seed=None,
max_probe_dislocation=2,
step_size=5/1000
)
env.reset()
env.step(1)
env.step(1)
env.step(2)
env.step(2)
env.step(1)
env.step(1)
env.step(2)
env.step(2)
def test_rotate_1():
"""
rotate in the center of the object 540 degree,
in one direction, in the other direction
"""
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=1,
log_state_csv_freq=1,
log_state_render_freq=1
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=50 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = plane_task_env_fn(trajactory_logger, probe_generator=probe_generator,
rot_deg=45)
env.reset()
env.step(3) # 45
env.step(3) # 90
env.step(3) # 135
env.step(3) # 180
env.step(3) # 225
env.step(3) # 270
env.step(3) # 315
env.step(3) # 360
env.step(3) # 45
env.step(4) # should use cache
env.step(4)
env.step(4)
env.step(4)
env.step(4)
env.step(4)
def test_rotate_2():
"""
left, left, rotate, rotate, right, right, right, rotate, rotate
"""
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=1,
log_state_csv_freq=1,
log_state_render_freq=1
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=50 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = plane_task_env_fn(trajactory_logger,
probe_generator=probe_generator,
rot_deg=10,
step_size=5/1000)
env.reset()
env.step(1)
env.step(1)
env.step(4)
env.step(4)
env.step(2)
env.step(2)
env.step(2)
env.step(3)
env.step(3)
def test_rotate_3():
"""
right, 9xrotate
"""
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=1,
log_state_csv_freq=1,
log_state_render_freq=1
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=50 / 1000
)
probe_generator = ConstProbeGenerator(probe)
env = plane_task_env_fn(trajactory_logger,
probe_generator=probe_generator,
rot_deg=20,
step_size=5/1000)
env.reset()
env.step(2)
for _ in range(9):
env.step(3)
def test_random_probe_generator_with_angle():
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=sys.argv[1],
log_action_csv_freq=1,
log_state_csv_freq=1,
log_state_render_freq=1
)
probe = Probe(
pos=np.array([0 / 1000, 0, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=50 / 1000
)
teddy = Teddy(
belly_pos=np.array([0 / 1000, 0, 50 / 1000]), # X, Y, Z
scale=12 / 1000,
head_offset=.9
)
phantom = ScatterersPhantom(
objects=[teddy],
x_border=(-40 / 1000, 40 / 1000),
y_border=(-40 / 1000, 40 / 1000),
z_border=(0, 90 / 1000),
n_scatterers=int(1e4),
n_bck_scatterers=int(1e3),
seed=42,
)
phantom_generator = ConstPhantomGenerator(phantom)
probe_generator = RandomProbeGenerator(
ref_probe=probe,
object_to_align=teddy,
seed=42,
# x_pos default
# focal_pos default
angle=[340, 350, 0, 10, 20]
)
env = plane_task_env_fn(
trajactory_logger,
probe_generator=probe_generator,
phantom_generator=phantom_generator,
rot_deg=10,
)
env.reset()
env.step(0) # left
env.reset()
env.step(0)
env.reset()
env.step(4)
env.reset()
env.step(1)
env.reset()
env.step(2)
env.reset()
env.step(3)
if __name__ == "__main__":
globals()[sys.argv[1]]()
| [
"envs.imaging.ImagingSystem",
"envs.generator.ConstProbeGenerator",
"envs.focal_point_task_us_env.FocalPointTaskUsEnv",
"envs.plane_task_us_env.PlaneTaskUsEnv",
"envs.generator.RandomProbeGenerator",
"numpy.array",
"envs.generator.ConstPhantomGenerator"
] | [((443, 624), 'envs.imaging.ImagingSystem', 'ImagingSystem', ([], {'c': '(1540)', 'fs': '(100000000.0)', 'image_width': '(40 / 1000)', 'image_height': '(90 / 1000)', 'image_resolution': '(40, 90)', 'median_filter_size': '(5)', 'dr_threshold': '(-200)', 'dec': '(1)', 'no_lines': '(64)'}), '(c=1540, fs=100000000.0, image_width=40 / 1000, image_height=\n 90 / 1000, image_resolution=(40, 90), median_filter_size=5,\n dr_threshold=-200, dec=1, no_lines=64)\n', (456, 624), False, 'from envs.imaging import ImagingSystem, Probe\n'), ((1225, 1263), 'envs.generator.ConstPhantomGenerator', 'ConstPhantomGenerator', (['DEFAULT_PHANTOM'], {}), '(DEFAULT_PHANTOM)\n', (1246, 1263), False, 'from envs.generator import ConstPhantomGenerator, ConstProbeGenerator, RandomProbeGenerator\n'), ((1687, 2106), 'envs.focal_point_task_us_env.FocalPointTaskUsEnv', 'FocalPointTaskUsEnv', ([], {'dx_reward_coeff': '(2)', 'dz_reward_coeff': '(1)', 'imaging': 'imaging', 'phantom_generator': 'phantom_generator', 'probe_generator': 'probe_generator', 'max_steps': 'N_STEPS_PER_EPISODE', 'no_workers': 'N_WORKERS', 'use_cache': '(True)', 'trajectory_logger': 'trajectory_logger', 'probe_dislocation_prob': 'probe_dislocation_prob', 'dislocation_seed': 'dislocation_seed', 'max_probe_dislocation': 'max_probe_dislocation', 'step_size': 'step_size'}), '(dx_reward_coeff=2, dz_reward_coeff=1, imaging=imaging,\n phantom_generator=phantom_generator, probe_generator=probe_generator,\n max_steps=N_STEPS_PER_EPISODE, no_workers=N_WORKERS, use_cache=True,\n trajectory_logger=trajectory_logger, probe_dislocation_prob=\n probe_dislocation_prob, dislocation_seed=dislocation_seed,\n max_probe_dislocation=max_probe_dislocation, step_size=step_size)\n', (1706, 2106), False, 'from envs.focal_point_task_us_env import FocalPointTaskUsEnv\n'), ((2702, 3165), 'envs.plane_task_us_env.PlaneTaskUsEnv', 'PlaneTaskUsEnv', ([], {'dx_reward_coeff': '(1)', 'angle_reward_coeff': '(1)', 'imaging': 'imaging', 'phantom_generator': 'phantom_generator', 'probe_generator': 'probe_generator', 'max_steps': 'N_STEPS_PER_EPISODE', 'no_workers': 'N_WORKERS', 'use_cache': '(True)', 'trajectory_logger': 'trajectory_logger', 'step_size': 'step_size', 'rot_deg': 'rot_deg', 'probe_dislocation_prob': 'probe_dislocation_prob', 'max_probe_disloc': 'max_probe_disloc', 'max_probe_disrot': 'max_probe_disrot', 'dislocation_seed': 'dislocation_seed'}), '(dx_reward_coeff=1, angle_reward_coeff=1, imaging=imaging,\n phantom_generator=phantom_generator, probe_generator=probe_generator,\n max_steps=N_STEPS_PER_EPISODE, no_workers=N_WORKERS, use_cache=True,\n trajectory_logger=trajectory_logger, step_size=step_size, rot_deg=\n rot_deg, probe_dislocation_prob=probe_dislocation_prob,\n max_probe_disloc=max_probe_disloc, max_probe_disrot=max_probe_disrot,\n dislocation_seed=dislocation_seed)\n', (2716, 3165), False, 'from envs.plane_task_us_env import PlaneTaskUsEnv\n'), ((3751, 3777), 'envs.generator.ConstProbeGenerator', 'ConstProbeGenerator', (['probe'], {}), '(probe)\n', (3770, 3777), False, 'from envs.generator import ConstPhantomGenerator, ConstProbeGenerator, RandomProbeGenerator\n'), ((4293, 4319), 'envs.generator.ConstProbeGenerator', 'ConstProbeGenerator', (['probe'], {}), '(probe)\n', (4312, 4319), False, 'from envs.generator import ConstPhantomGenerator, ConstProbeGenerator, RandomProbeGenerator\n'), ((5067, 5093), 'envs.generator.ConstProbeGenerator', 'ConstProbeGenerator', (['probe'], {}), '(probe)\n', (5086, 5093), False, 'from envs.generator import ConstPhantomGenerator, ConstProbeGenerator, RandomProbeGenerator\n'), ((5686, 5712), 'envs.generator.ConstProbeGenerator', 'ConstProbeGenerator', (['probe'], {}), '(probe)\n', (5705, 5712), False, 'from envs.generator import ConstPhantomGenerator, ConstProbeGenerator, RandomProbeGenerator\n'), ((6319, 6345), 'envs.generator.ConstProbeGenerator', 'ConstProbeGenerator', (['probe'], {}), '(probe)\n', (6338, 6345), False, 'from envs.generator import ConstPhantomGenerator, ConstProbeGenerator, RandomProbeGenerator\n'), ((7383, 7409), 'envs.generator.ConstProbeGenerator', 'ConstProbeGenerator', (['probe'], {}), '(probe)\n', (7402, 7409), False, 'from envs.generator import ConstPhantomGenerator, ConstProbeGenerator, RandomProbeGenerator\n'), ((8435, 8465), 'envs.generator.ConstPhantomGenerator', 'ConstPhantomGenerator', (['phantom'], {}), '(phantom)\n', (8456, 8465), False, 'from envs.generator import ConstPhantomGenerator, ConstProbeGenerator, RandomProbeGenerator\n'), ((8489, 8558), 'envs.generator.RandomProbeGenerator', 'RandomProbeGenerator', ([], {'ref_probe': 'probe', 'object_to_align': 'teddy', 'seed': '(42)'}), '(ref_probe=probe, object_to_align=teddy, seed=42)\n', (8509, 8558), False, 'from envs.generator import ConstPhantomGenerator, ConstProbeGenerator, RandomProbeGenerator\n'), ((9418, 9444), 'envs.generator.ConstProbeGenerator', 'ConstProbeGenerator', (['probe'], {}), '(probe)\n', (9437, 9444), False, 'from envs.generator import ConstPhantomGenerator, ConstProbeGenerator, RandomProbeGenerator\n'), ((10281, 10307), 'envs.generator.ConstProbeGenerator', 'ConstProbeGenerator', (['probe'], {}), '(probe)\n', (10300, 10307), False, 'from envs.generator import ConstPhantomGenerator, ConstProbeGenerator, RandomProbeGenerator\n'), ((11245, 11271), 'envs.generator.ConstProbeGenerator', 'ConstProbeGenerator', (['probe'], {}), '(probe)\n', (11264, 11271), False, 'from envs.generator import ConstPhantomGenerator, ConstProbeGenerator, RandomProbeGenerator\n'), ((12222, 12248), 'envs.generator.ConstProbeGenerator', 'ConstProbeGenerator', (['probe'], {}), '(probe)\n', (12241, 12248), False, 'from envs.generator import ConstPhantomGenerator, ConstProbeGenerator, RandomProbeGenerator\n'), ((13139, 13165), 'envs.generator.ConstProbeGenerator', 'ConstProbeGenerator', (['probe'], {}), '(probe)\n', (13158, 13165), False, 'from envs.generator import ConstPhantomGenerator, ConstProbeGenerator, RandomProbeGenerator\n'), ((14112, 14138), 'envs.generator.ConstProbeGenerator', 'ConstProbeGenerator', (['probe'], {}), '(probe)\n', (14131, 14138), False, 'from envs.generator import ConstPhantomGenerator, ConstProbeGenerator, RandomProbeGenerator\n'), ((14935, 14961), 'envs.generator.ConstProbeGenerator', 'ConstProbeGenerator', (['probe'], {}), '(probe)\n', (14954, 14961), False, 'from envs.generator import ConstPhantomGenerator, ConstProbeGenerator, RandomProbeGenerator\n'), ((16098, 16128), 'envs.generator.ConstPhantomGenerator', 'ConstPhantomGenerator', (['phantom'], {}), '(phantom)\n', (16119, 16128), False, 'from envs.generator import ConstPhantomGenerator, ConstProbeGenerator, RandomProbeGenerator\n'), ((16152, 16255), 'envs.generator.RandomProbeGenerator', 'RandomProbeGenerator', ([], {'ref_probe': 'probe', 'object_to_align': 'teddy', 'seed': '(42)', 'angle': '[340, 350, 0, 10, 20]'}), '(ref_probe=probe, object_to_align=teddy, seed=42, angle\n =[340, 350, 0, 10, 20])\n', (16172, 16255), False, 'from envs.generator import ConstPhantomGenerator, ConstProbeGenerator, RandomProbeGenerator\n'), ((3582, 3608), 'numpy.array', 'np.array', (['[0 / 1000, 0, 0]'], {}), '([0 / 1000, 0, 0])\n', (3590, 3608), True, 'import numpy as np\n'), ((4124, 4150), 'numpy.array', 'np.array', (['[0 / 1000, 0, 0]'], {}), '([0 / 1000, 0, 0])\n', (4132, 4150), True, 'import numpy as np\n'), ((4898, 4924), 'numpy.array', 'np.array', (['[0 / 1000, 0, 0]'], {}), '([0 / 1000, 0, 0])\n', (4906, 4924), True, 'import numpy as np\n'), ((5517, 5543), 'numpy.array', 'np.array', (['[0 / 1000, 0, 0]'], {}), '([0 / 1000, 0, 0])\n', (5525, 5543), True, 'import numpy as np\n'), ((6148, 6176), 'numpy.array', 'np.array', (['[-20 / 1000, 0, 0]'], {}), '([-20 / 1000, 0, 0])\n', (6156, 6176), True, 'import numpy as np\n'), ((7214, 7240), 'numpy.array', 'np.array', (['[0 / 1000, 0, 0]'], {}), '([0 / 1000, 0, 0])\n', (7222, 7240), True, 'import numpy as np\n'), ((7831, 7857), 'numpy.array', 'np.array', (['[0 / 1000, 0, 0]'], {}), '([0 / 1000, 0, 0])\n', (7839, 7857), True, 'import numpy as np\n'), ((8015, 8049), 'numpy.array', 'np.array', (['[0 / 1000, 0, 50 / 1000]'], {}), '([0 / 1000, 0, 50 / 1000])\n', (8023, 8049), True, 'import numpy as np\n'), ((9250, 9276), 'numpy.array', 'np.array', (['[0 / 1000, 0, 0]'], {}), '([0 / 1000, 0, 0])\n', (9258, 9276), True, 'import numpy as np\n'), ((10112, 10138), 'numpy.array', 'np.array', (['[0 / 1000, 0, 0]'], {}), '([0 / 1000, 0, 0])\n', (10120, 10138), True, 'import numpy as np\n'), ((11076, 11102), 'numpy.array', 'np.array', (['[0 / 1000, 0, 0]'], {}), '([0 / 1000, 0, 0])\n', (11084, 11102), True, 'import numpy as np\n'), ((12053, 12079), 'numpy.array', 'np.array', (['[0 / 1000, 0, 0]'], {}), '([0 / 1000, 0, 0])\n', (12061, 12079), True, 'import numpy as np\n'), ((12970, 12996), 'numpy.array', 'np.array', (['[0 / 1000, 0, 0]'], {}), '([0 / 1000, 0, 0])\n', (12978, 12996), True, 'import numpy as np\n'), ((13943, 13969), 'numpy.array', 'np.array', (['[0 / 1000, 0, 0]'], {}), '([0 / 1000, 0, 0])\n', (13951, 13969), True, 'import numpy as np\n'), ((14766, 14792), 'numpy.array', 'np.array', (['[0 / 1000, 0, 0]'], {}), '([0 / 1000, 0, 0])\n', (14774, 14792), True, 'import numpy as np\n'), ((15494, 15520), 'numpy.array', 'np.array', (['[0 / 1000, 0, 0]'], {}), '([0 / 1000, 0, 0])\n', (15502, 15520), True, 'import numpy as np\n'), ((15678, 15712), 'numpy.array', 'np.array', (['[0 / 1000, 0, 50 / 1000]'], {}), '([0 / 1000, 0, 50 / 1000])\n', (15686, 15712), True, 'import numpy as np\n'), ((812, 846), 'numpy.array', 'np.array', (['[0 / 1000, 0, 50 / 1000]'], {}), '([0 / 1000, 0, 50 / 1000])\n', (820, 846), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.neighbors import BallTree
from scipy.spatial.qhull import QhullError
from infomap import Infomap
from scipy.spatial import ConvexHull
from tqdm import tqdm
def pass_func(input, **kwargs):
return input
def query_neighbors(coords, r2, distance_metric='haversine', weighted=False):
"""Build a network from a set of points and a threshold distance.
Parameters
----------
coords : array-like (N, 2)
r2 : float
Threshold distance.
distance_metric : str
Either 'haversine' or None.
Returns
-------
nodes : list of ints
Correspond to the list of nodes
edges : list of tuples
An edge between two nodes exist if they are closer than r2.
singleton nodes : list of ints
Nodes that have no connections, e.g. have been visited once.
"""
# If the metric is haversine update points (to radians) and r2 accordingly.
if distance_metric == 'haversine':
coords = np.radians(coords)
r2 = r2 / 6371000
# Init tree
tree = BallTree(coords, metric=distance_metric)
# Query
return tree.query_radius(coords, r=r2, return_distance=weighted)
def infomap_communities(node_idx_neighbors, node_idx_distances, counts, weight_exponent, distance_metric, verbose):
"""Two-level partition of single-layer network with Infomap.
Parameters
----------
node_index_neighbors : array of arrays
Example: `array([array([0]), array([1]), array([2]), ..., array([9997]),
array([9998]), array([9999])], dtype=object)`.
Returns
-------
out : dict (node-community hash map).
"""
# Tracking
if verbose: progress = tqdm
else: progress = pass_func
# Initiate two-level Infomap
network = Infomap("--two-level")
# Add nodes (and reindex nodes because Infomap wants ranked indices)
if verbose: print(" ... adding nodes:")
name_map, name_map_inverse = {}, {}
singleton_nodes = []
infomap_idx = 0
for n, neighbors in progress(enumerate(node_idx_neighbors), total=len(node_idx_neighbors)):
if len(neighbors) > 1:
network.addNode(infomap_idx)
name_map_inverse[infomap_idx] = n
name_map[n] = infomap_idx
infomap_idx += 1
else:
singleton_nodes.append(n)
if verbose:
print(f" --> added {len(name_map)} nodes (found {len(singleton_nodes)} singleton nodes)")
# Add links
if verbose:
n_edges = 0
print(" ... adding edges")
if node_idx_distances is None:
for node, neighbors in progress(enumerate(node_idx_neighbors), total=len(node_idx_neighbors)):
for neighbor in neighbors[neighbors > node]:
network.addLink(name_map[node], name_map[neighbor], max(counts[node], counts[neighbor]))
if verbose: n_edges += 1
else:
for node, (neighbors, distances) in progress(enumerate(zip(node_idx_neighbors, node_idx_distances)), total=len(node_idx_neighbors)):
for neighbor, distance in zip(neighbors[neighbors > node], distances[neighbors > node]):
if distance_metric == "haversine":
distance *= 6371000
network.addLink(name_map[node], name_map[neighbor], max(counts[node], counts[neighbor]) * distance**(-weight_exponent))
if verbose: n_edges += 1
if verbose:
print(f" --> added {n_edges} edges")
# Run infomap
if verbose: print(" ... running Infomap...", end=" ")
if len(name_map) > 0:
network.run()
# Convert to node-community dict format
partition = dict([
(name_map_inverse[infomap_idx], module)
for infomap_idx, module in network.modules
])
if verbose: print("done")
else:
partition = {}
if verbose:
print(f"Found {len(set(partition.values()))-1} stop locations")
return partition, singleton_nodes
def label_network(node_idx_neighbors, node_idx_distances, counts, weight_exponent, label_singleton, distance_metric, verbose):
"""Infer infomap clusters from distance matrix and link distance threshold.
Parameters
----------
nodes: array
Nodes in the network.
edges: array
Edges in the network (two nodes are connected if distance<r2).
singleton_nodes: array
Non connected nodes.
label_singleton: bool
If True, give stationary locations that was only visited once their own
label. If False, label them as outliers (-1).
Returns
-------
out : array-like (N, )
Array of labels matching input in length. Detected stop locations are labeled from 0
and up, and typically locations with more observations have lower indices. If
`label_singleton=False`, coordinates with no neighbors within distance `r2` are
labeled -1.
"""
# Infer the partition with infomap. Partiton looks like `{node: community, ...}`
partition, singleton_nodes = infomap_communities(node_idx_neighbors, node_idx_distances, counts, weight_exponent, distance_metric, verbose)
# Add new labels to each singleton point (stop that was further than r2 from
# any other point and thus was not represented in the network)
if label_singleton:
max_label = max(partition.values(), default=-1)
partition.update(dict(zip(
singleton_nodes,
range(max_label+1, max_label+1+len(singleton_nodes))
)))
# Cast the partition as a vector of labels like `[0, 1, 0, 3, 0, 0, 2, ...]`
return np.array([
partition[n] if n in partition else -1
for n in range(len(node_idx_neighbors))
])
def max_pdist(points):
"""
Calculate the distance bewteen each pair in a set of points given a distance function.
Author: <NAME>
Source: https://github.com/sapiezynski/haversinevec
Input
-----
points : array-like (shape=(N, 2))
(lat, lon) in degree or radians (default is degree)
Output
------
result : array-like (shape=(N*(N-1)//2, ))
"""
def _l2(points_a, points_b):
return np.linalg.norm((points_a - points_b).reshape(-1,2),axis = 1)
c = points.shape[0]
result = np.zeros((c*(c-1)//2,), dtype=np.float64)
vec_idx = 0
for idx in range(0, c-1):
ref = points[idx]
temp = _l2(points[idx+1:c, :], ref)
#to be taken care of
result[vec_idx:vec_idx+temp.shape[0]] = temp
vec_idx += temp.shape[0]
return max(result)
def convex_hull(points, to_return='points'):
"""Return the convex hull of a collection of points."""
try:
hull = ConvexHull(points)
return points[hull.vertices, :]
except QhullError:
c = points.mean(0)
if points.shape[0] == 1:
l = 5e-5
else:
l = max_pdist(points)
return np.vstack([
c + np.array([-l/2, -l/2]), # bottom left
c + np.array([l/2, -l/2]), # bottom right
c + np.array([l/2, l/2]), # top right
c + np.array([-l/2, l/2]), # top right
])
| [
"numpy.radians",
"infomap.Infomap",
"scipy.spatial.ConvexHull",
"numpy.array",
"numpy.zeros",
"sklearn.neighbors.BallTree"
] | [((1117, 1157), 'sklearn.neighbors.BallTree', 'BallTree', (['coords'], {'metric': 'distance_metric'}), '(coords, metric=distance_metric)\n', (1125, 1157), False, 'from sklearn.neighbors import BallTree\n'), ((1857, 1879), 'infomap.Infomap', 'Infomap', (['"""--two-level"""'], {}), "('--two-level')\n", (1864, 1879), False, 'from infomap import Infomap\n'), ((6465, 6512), 'numpy.zeros', 'np.zeros', (['(c * (c - 1) // 2,)'], {'dtype': 'np.float64'}), '((c * (c - 1) // 2,), dtype=np.float64)\n', (6473, 6512), True, 'import numpy as np\n'), ((1044, 1062), 'numpy.radians', 'np.radians', (['coords'], {}), '(coords)\n', (1054, 1062), True, 'import numpy as np\n'), ((6891, 6909), 'scipy.spatial.ConvexHull', 'ConvexHull', (['points'], {}), '(points)\n', (6901, 6909), False, 'from scipy.spatial import ConvexHull\n'), ((7145, 7171), 'numpy.array', 'np.array', (['[-l / 2, -l / 2]'], {}), '([-l / 2, -l / 2])\n', (7153, 7171), True, 'import numpy as np\n'), ((7200, 7225), 'numpy.array', 'np.array', (['[l / 2, -l / 2]'], {}), '([l / 2, -l / 2])\n', (7208, 7225), True, 'import numpy as np\n'), ((7256, 7280), 'numpy.array', 'np.array', (['[l / 2, l / 2]'], {}), '([l / 2, l / 2])\n', (7264, 7280), True, 'import numpy as np\n'), ((7309, 7334), 'numpy.array', 'np.array', (['[-l / 2, l / 2]'], {}), '([-l / 2, l / 2])\n', (7317, 7334), True, 'import numpy as np\n')] |
"""
Derived module from filehandler.py to handle OpenFOAM files.
"""
import numpy as np
import pygem.filehandler as fh
class OpenFoamHandler(fh.FileHandler):
"""
OpenFOAM mesh file handler class.
:cvar string infile: name of the input file to be processed.
:cvar string outfile: name of the output file where to write in.
:cvar list extensions: extensions of the input/output files. It
is equal to [''] since openFOAM files do not have extension.
"""
def __init__(self):
super(OpenFoamHandler, self).__init__()
self.extensions = ['']
def parse(self, filename):
"""
Method to parse the `filename`. It returns a matrix with all
the coordinates.
:param string filename: name of the input file.
:return: mesh_points: it is a `n_points`-by-3 matrix containing
the coordinates of the points of the mesh
:rtype: numpy.ndarray
.. todo::
- specify when it works
"""
self._check_filename_type(filename)
self._check_extension(filename)
self.infile = filename
nrow = 0
i = 0
with open(self.infile, 'r') as input_file:
for line in input_file:
nrow += 1
if nrow == 19:
n_points = int(line)
mesh_points = np.zeros(shape=(n_points, 3))
if 20 < nrow < 21 + n_points:
line = line[line.index("(") + 1:line.rindex(")")]
j = 0
for number in line.split():
mesh_points[i][j] = float(number)
j += 1
i += 1
return mesh_points
def write(self, mesh_points, filename):
"""
Writes a openFOAM file, called filename, copying all the
lines from self.filename but the coordinates. mesh_points
is a matrix that contains the new coordinates to write in
the openFOAM file.
:param numpy.ndarray mesh_points: it is a `n_points`-by-3
matrix containing the coordinates of the points of the mesh.
:param string filename: name of the output file.
.. todo:: DOCS
"""
self._check_filename_type(filename)
self._check_extension(filename)
self._check_infile_instantiation()
self.outfile = filename
n_points = mesh_points.shape[0]
nrow = 0
i = 0
with open(self.infile, 'r') as input_file, open(self.outfile,
'w') as output_file:
for line in input_file:
nrow += 1
if 20 < nrow < 21 + n_points:
output_file.write('(' + str(mesh_points[i][0]) + ' ' + str(
mesh_points[i][1]) + ' ' + str(mesh_points[i][2]) + ')')
output_file.write('\n')
i += 1
else:
output_file.write(line)
| [
"numpy.zeros"
] | [((1385, 1414), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_points, 3)'}), '(shape=(n_points, 3))\n', (1393, 1414), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""This script is a simple example of how to add your own statistic to a
:class:`~silx.gui.plot.statsWidget.StatsWidget` from customs
:class:`~silx.gui.plot.stats.Stats` and display it.
On this example we will:
- show sum of values for each type
- compute curve integrals (only for 'curve').
- compute center of mass for all possible items
.. note:: for now the possible types manged by the Stats are ('curve', 'image',
'scatter' and 'histogram')
"""
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "06/06/2018"
from silx.gui import qt
from silx.gui.plot import Plot1D
from silx.gui.plot.stats.stats import StatBase
import numpy
class Integral(StatBase):
"""
Simple calculation of the line integral
"""
def __init__(self):
StatBase.__init__(self, name='integral', compatibleKinds=('curve',))
def calculate(self, context):
xData, yData = context.data
return numpy.trapz(x=xData, y=yData)
class COM(StatBase):
"""
Compute data center of mass
"""
def __init__(self):
StatBase.__init__(self, name='COM', description="Center of mass")
def calculate(self, context):
if context.kind in ('curve', 'histogram'):
xData, yData = context.data
com = numpy.sum(xData * yData).astype(numpy.float32) / numpy.sum(
yData).astype(numpy.float32)
return com
elif context.kind == 'scatter':
xData = context.data[0]
values = context.values
com = numpy.sum(xData * values).astype(numpy.float32) / numpy.sum(
values).astype(numpy.float32)
return com
def main():
app = qt.QApplication([])
plot = Plot1D()
x = numpy.arange(21)
y = numpy.arange(21)
plot.addCurve(x=x, y=y, legend='myCurve')
plot.addCurve(x=x, y=(y + 5), legend='myCurve2')
plot.setActiveCurve('myCurve')
plot.addScatter(x=[0, 2, 5, 5, 12, 20],
y=[2, 3, 4, 20, 15, 6],
value=[5, 6, 7, 10, 90, 20],
legend='myScatter')
stats = [
('sum', numpy.sum),
Integral(),
(COM(), '{0:.2f}'),
]
plot.getStatsWidget().setStats(stats)
plot.getStatsWidget().parent().setVisible(True)
# Update the checkedbox cause we arre playing with the visibility
plot.getStatsAction().setChecked(True)
plot.show()
app.exec_()
if __name__ == '__main__':
main()
| [
"numpy.trapz",
"silx.gui.qt.QApplication",
"silx.gui.plot.Plot1D",
"numpy.sum",
"silx.gui.plot.stats.stats.StatBase.__init__",
"numpy.arange"
] | [((3016, 3035), 'silx.gui.qt.QApplication', 'qt.QApplication', (['[]'], {}), '([])\n', (3031, 3035), False, 'from silx.gui import qt\n'), ((3048, 3056), 'silx.gui.plot.Plot1D', 'Plot1D', ([], {}), '()\n', (3054, 3056), False, 'from silx.gui.plot import Plot1D\n'), ((3066, 3082), 'numpy.arange', 'numpy.arange', (['(21)'], {}), '(21)\n', (3078, 3082), False, 'import numpy\n'), ((3091, 3107), 'numpy.arange', 'numpy.arange', (['(21)'], {}), '(21)\n', (3103, 3107), False, 'import numpy\n'), ((2106, 2174), 'silx.gui.plot.stats.stats.StatBase.__init__', 'StatBase.__init__', (['self'], {'name': '"""integral"""', 'compatibleKinds': "('curve',)"}), "(self, name='integral', compatibleKinds=('curve',))\n", (2123, 2174), False, 'from silx.gui.plot.stats.stats import StatBase\n'), ((2261, 2290), 'numpy.trapz', 'numpy.trapz', ([], {'x': 'xData', 'y': 'yData'}), '(x=xData, y=yData)\n', (2272, 2290), False, 'import numpy\n'), ((2394, 2459), 'silx.gui.plot.stats.stats.StatBase.__init__', 'StatBase.__init__', (['self'], {'name': '"""COM"""', 'description': '"""Center of mass"""'}), "(self, name='COM', description='Center of mass')\n", (2411, 2459), False, 'from silx.gui.plot.stats.stats import StatBase\n'), ((2604, 2628), 'numpy.sum', 'numpy.sum', (['(xData * yData)'], {}), '(xData * yData)\n', (2613, 2628), False, 'import numpy\n'), ((2653, 2669), 'numpy.sum', 'numpy.sum', (['yData'], {}), '(yData)\n', (2662, 2669), False, 'import numpy\n'), ((2862, 2887), 'numpy.sum', 'numpy.sum', (['(xData * values)'], {}), '(xData * values)\n', (2871, 2887), False, 'import numpy\n'), ((2912, 2929), 'numpy.sum', 'numpy.sum', (['values'], {}), '(values)\n', (2921, 2929), False, 'import numpy\n')] |
from PairedNeurons import PairedNeurons
from matplotlib import pyplot as plt
import os
import numpy as np
import cv2
from xlwt import Workbook
from skimage.segmentation import clear_border
SMOOTH = 1e-6
def iou_numpy(outputs: np.array, labels: np.array):
# outputs = outputs.squeeze(2)
intersection = (outputs & labels).sum((0, 1))
union = (outputs | labels).sum((0, 1))
iou = (intersection + SMOOTH) / (union + SMOOTH)
# thresholded = np.ceil(np.clip(20 * (iou - 0.5), 0, 10)) / 10
return iou # Or thresholded.mean()
img_dir = "/Users/mac/Desktop/Rice-COMP576/sartorius-cell-instance-segmentation/train"
csv_dir = "/Users/mac/Desktop/Rice-COMP576/sartorius-cell-instance-segmentation/train.csv"
pn = PairedNeurons(img_dir, csv_dir, 256, is_train=False)
sum1,sum2,sum3,sum4,sum5,sum6=0,0,0,0,0,0
# Workbook is created
wb = Workbook()
# add_sheet is used to create sheet.
sheet1 = wb.add_sheet('Sheet 1')
sheet1.write(0,0,"Image Name")
sheet1.write(0,1,"IOU for Binary+OSTU")
sheet1.write(0,2,"segmented further using watershed")
sheet1.write(0,3,"Using distance transform and thresholding")
sheet1.write(0,4,"threshold the dist transform at 1/2 its max value.")
for i in range(len(pn)):
x, y, l = pn.__getitem__(i)
sheet1.write(i+1, 0, l)
fig, axs = plt.subplots(2, 3, figsize=(16, 8))
# # print(fig.shape)
# # print(y.shape)
# # fig.colorbar(im)
# plt.savefig(os.path.join("./save", l))
# plt.close()
# plt.subplot(2, 3, i + 1)
###1
x=np.uint8(x*255)
axs[0,0].imshow(y, cmap="gray")
axs[0,0].axis("off")
axs[0,0].title.set_text("Grouth truth seg")
ret, th1 = cv2.threshold(x, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(th1,cv2.MORPH_OPEN,kernel, iterations = 1)
opening = clear_border(opening) #Remove edge touching grains
# print(iou_numpy(x.astype(int),np.uint8(opening).astype(int)))
sum1+=iou_numpy(x,opening.astype(int))
sheet1.write(i+1,1,sum1)
axs[0,1].imshow(opening, cmap="gray")
axs[0,1].axis("off")
axs[0,1].title.set_text("Threshold image to binary using OTSU")
###2
sure_bg = cv2.dilate(opening,kernel,iterations=1)
axs[0,2].imshow(sure_bg, cmap="gray")
axs[0,2].axis("off")
axs[0,2].title.set_text("segmented further using watershed")
sum2+=iou_numpy(np.uint8(y*255),sure_bg.astype(int))
sheet1.write(i+1,2,sum2)
###
###3
dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,3)
axs[1,0].imshow(dist_transform, cmap="gray")
axs[1,0].axis("off")
axs[1,0].title.set_text("Using distance transform and thresholding")
sum3+=iou_numpy(np.uint8(y*255),dist_transform.astype(int))
sheet1.write(i+1,3,sum3)
###4
ret2, sure_fg = cv2.threshold(dist_transform,0.5*dist_transform.max(),255,0)
axs[1,1].imshow(sure_bg, cmap="gray")
axs[1,1].axis("off")
axs[1,1].title.set_text("threshold the dist transform at 1/2 its max value.")
sum4+=iou_numpy(np.uint8(y*255),sure_bg.astype(int))
sheet1.write(i+1,4,sum4)
####
###5 Unknown ambiguous region is nothing but bkground - foreground
sure_fg = np.uint8(sure_fg) #Convert to uint8 from float
unknown = cv2.subtract(sure_bg,sure_fg)
sum4+=iou_numpy(np.uint8(y*255),sure_bg.astype(int))
axs[1,2].imshow(unknown, cmap="gray")
axs[1,2].axis("off")
axs[1,2].title.set_text("Unknown ambiguous region is nothing but bkground ")
sheet1.write(i+1,5,sum5)
fig.tight_layout()
# print(iou_numpy((x*255).astype(int),(th1*255).astype(int)))
plt.savefig(os.path.join("./save", l))
plt.close()
# plt.title(l)
# plt.subplot(2, 3, i + 1)
# plt.imshow(opening, 'gray')
# plt.show()
wb.save('result.xls')
# print(sum/len(pn)) | [
"numpy.uint8",
"numpy.ones",
"cv2.threshold",
"os.path.join",
"skimage.segmentation.clear_border",
"matplotlib.pyplot.close",
"cv2.morphologyEx",
"PairedNeurons.PairedNeurons",
"cv2.distanceTransform",
"cv2.dilate",
"cv2.subtract",
"xlwt.Workbook",
"matplotlib.pyplot.subplots"
] | [((748, 800), 'PairedNeurons.PairedNeurons', 'PairedNeurons', (['img_dir', 'csv_dir', '(256)'], {'is_train': '(False)'}), '(img_dir, csv_dir, 256, is_train=False)\n', (761, 800), False, 'from PairedNeurons import PairedNeurons\n'), ((870, 880), 'xlwt.Workbook', 'Workbook', ([], {}), '()\n', (878, 880), False, 'from xlwt import Workbook\n'), ((1319, 1354), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'figsize': '(16, 8)'}), '(2, 3, figsize=(16, 8))\n', (1331, 1354), True, 'from matplotlib import pyplot as plt\n'), ((1538, 1555), 'numpy.uint8', 'np.uint8', (['(x * 255)'], {}), '(x * 255)\n', (1546, 1555), True, 'import numpy as np\n'), ((1683, 1744), 'cv2.threshold', 'cv2.threshold', (['x', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(x, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (1696, 1744), False, 'import cv2\n'), ((1756, 1781), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (1763, 1781), True, 'import numpy as np\n'), ((1794, 1853), 'cv2.morphologyEx', 'cv2.morphologyEx', (['th1', 'cv2.MORPH_OPEN', 'kernel'], {'iterations': '(1)'}), '(th1, cv2.MORPH_OPEN, kernel, iterations=1)\n', (1810, 1853), False, 'import cv2\n'), ((1868, 1889), 'skimage.segmentation.clear_border', 'clear_border', (['opening'], {}), '(opening)\n', (1880, 1889), False, 'from skimage.segmentation import clear_border\n'), ((2227, 2268), 'cv2.dilate', 'cv2.dilate', (['opening', 'kernel'], {'iterations': '(1)'}), '(opening, kernel, iterations=1)\n', (2237, 2268), False, 'import cv2\n'), ((2523, 2569), 'cv2.distanceTransform', 'cv2.distanceTransform', (['opening', 'cv2.DIST_L2', '(3)'], {}), '(opening, cv2.DIST_L2, 3)\n', (2544, 2569), False, 'import cv2\n'), ((3237, 3254), 'numpy.uint8', 'np.uint8', (['sure_fg'], {}), '(sure_fg)\n', (3245, 3254), True, 'import numpy as np\n'), ((3299, 3329), 'cv2.subtract', 'cv2.subtract', (['sure_bg', 'sure_fg'], {}), '(sure_bg, sure_fg)\n', (3311, 3329), False, 'import cv2\n'), ((3709, 3720), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3718, 3720), True, 'from matplotlib import pyplot as plt\n'), ((2419, 2436), 'numpy.uint8', 'np.uint8', (['(y * 255)'], {}), '(y * 255)\n', (2427, 2436), True, 'import numpy as np\n'), ((2735, 2752), 'numpy.uint8', 'np.uint8', (['(y * 255)'], {}), '(y * 255)\n', (2743, 2752), True, 'import numpy as np\n'), ((3067, 3084), 'numpy.uint8', 'np.uint8', (['(y * 255)'], {}), '(y * 255)\n', (3075, 3084), True, 'import numpy as np\n'), ((3349, 3366), 'numpy.uint8', 'np.uint8', (['(y * 255)'], {}), '(y * 255)\n', (3357, 3366), True, 'import numpy as np\n'), ((3678, 3703), 'os.path.join', 'os.path.join', (['"""./save"""', 'l'], {}), "('./save', l)\n", (3690, 3703), False, 'import os\n')] |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""An efficient implementation of searchlight for M1NN.
"""
__docformat__ = "restructuredtext"
import numpy as np
from mvpa2.base.dochelpers import borrowkwargs, _repr_attrs
from mvpa2.misc.neighborhood import IndexQueryEngine, Sphere
from mvpa2.clfs.distance import squared_euclidean_distance, one_minus_correlation
from mvpa2.measures.adhocsearchlightbase import SimpleStatBaseSearchlight, _STATS
if __debug__:
from mvpa2.base import debug
import time as time
__all__ = ["M1NNSearchlight", "sphere_m1nnsearchlight"]
class M1NNSearchlight(SimpleStatBaseSearchlight):
"""Efficient implementation of Mean-Nearest-Neighbor `Searchlight`."""
@borrowkwargs(SimpleStatBaseSearchlight, "__init__")
def __init__(self, knn, generator, qe, **kwargs):
"""Initialize a M1NNSearchlight
TODO -- example? or just kill altogether
rethink providing knn sample vs specifying all parameters
explicitly
Parameters
----------
knn : `kNN`
Used to fetch space and dfx settings. TODO
"""
# verify that desired features are supported
if knn.dfx == squared_euclidean_distance:
self._distance = "euclidean"
elif knn.dfx == one_minus_correlation:
self._distance = "correlation"
# we rely on having simple indexes for ROI members ATM
if "indexsum" in kwargs and kwargs["indexsum"] != "fancy":
raise ValueError(
"Can only use indexsum='fancy' with correlation distance."
)
kwargs["indexsum"] = "fancy"
else:
raise ValueError(
"%s distance function is not yet supported by M1NNSearchlight"
% (knn.dfx,)
)
# init base class first
SimpleStatBaseSearchlight.__init__(self, generator, qe, **kwargs)
self._knn = knn
self.__pl_train = self.__pl_test = None
def __repr__(self, prefixes=None):
if prefixes is None:
prefixes = []
return super(M1NNSearchlight, self).__repr__(
prefixes=prefixes + _repr_attrs(self, ["knn"])
)
def _get_space(self):
return self.knn.get_space()
def _untrain(self):
super(M1NNSearchlight, self)._untrain()
self.__pl_train = self.__pl_test = None
def _reserve_pl_stats_space(self, shape):
# per each label: to be (re)computed within each loop split
# Let's try to reuse the memory though
self.__pl_train = _STATS()
self.__pl_test = _STATS()
for pl in (self.__pl_train, self.__pl_test):
pl.sums = np.zeros(shape)
pl.means = np.zeros(shape)
# means of squares for stddev computation
pl.sums2 = np.zeros(shape)
pl.variances = np.zeros(shape)
# degenerate dimension are added for easy broadcasting later on
pl.nsamples = np.zeros(shape[:1] + (1,) * (len(shape) - 1))
def _sl_call_on_a_split(
self,
split,
X,
training_sis,
testing_sis,
nroi_fids,
roi_fids,
indexsum_fx,
labels_numeric,
):
"""Call to M1NNSearchlight"""
# Local bindings
knn = self.knn
params = knn.params
pl_train = self.__pl_train
pl_test = self.__pl_test
training_nsamples, training_non0labels = self._compute_pl_stats(
training_sis, pl_train
)
testing_nsamples, testing_non0labels = self._compute_pl_stats(
testing_sis, pl_test
)
nlabels = len(pl_train.nsamples)
assert len(np.unique(labels_numeric)) == nlabels
assert training_non0labels == slice(
None
) # not sure/tested if we can handle this one
assert testing_non0labels == slice(
None
) # not sure/tested if we can handle this one
# squared distances between the means...
# hm, but we need for each combination of labels
# so we keep 0th dimension corresponding to test "samples/labels"
if self._distance == "euclidean":
diff_pl_pl = pl_test.means[:, None] - pl_train.means[None, :]
diff_pl_pl2 = np.square(diff_pl_pl)
# XXX OPT: is it worth may be reserving the space beforehand?
dist_pl_pl2_sl = np.zeros(diff_pl_pl2.shape[:-1] + (nroi_fids,))
indexsum_fx(diff_pl_pl2, roi_fids, out=dist_pl_pl2_sl)
elif self._distance == "correlation":
roi_nfids = np.array(list(map(len, roi_fids))) # # voxels in each ROI
# estimate the means of each of the searchlight within each condition
# indexsum, divide by # of elements
shape_ = pl_test.means.shape[:-1] + (nroi_fids,)
def get_means_per_roi(pl):
roi_means = np.empty(shape_)
indexsum_fx(pl.means, roi_fids, out=roi_means)
roi_means /= roi_nfids
return roi_means
roi_means_train = get_means_per_roi(pl_train)
roi_means_test = get_means_per_roi(pl_test)
# de-mean within each searchlight
# problema since within each SL will be a different demean, and different
# ROIs have different number of features so we can't just go into 3rd dimension
# (well we probably could but not sure if it would benefit us)
# we can't easily do that without going per each ROI I am afraid!
# So let's stop being (way too) smart and just do per each ROI for now
dist_pl_pl2_sl = np.ones((nlabels, nlabels, nroi_fids))
for i, (fids, nfids, mean_train, mean_test) in enumerate(
zip(roi_fids, roi_nfids, roi_means_train.T, roi_means_test.T)
):
# Select those means from train and test
# OPT: I could have avoided computing demeaned, but oh well -- will leave it for someone
# to investigate on how much speed up it would get
roi_train_demeaned = pl_train.means[:, fids] - mean_train[:, None]
# estimate stddevs of each of the searchlight
# take demeaned, square them, sum within each searchlight, divide by # of elements
roi_train_std = np.sqrt(
np.sum(roi_train_demeaned * roi_train_demeaned, axis=1) / nfids
)
roi_test_demeaned = pl_test.means[:, fids] - mean_test[:, None]
# estimate stddevs of each of the searchlight
# take demeaned, square them, sum within each searchlight, divide by # of elements
roi_test_std = np.sqrt(
np.sum(np.square(roi_test_demeaned), axis=1) / nfids
)
# estimate dot-products between each label pair of training/testing
# product, sum, divide by # of elements in each searchlight
dot_pl_pl = (
roi_test_demeaned[:, None] * roi_train_demeaned[None, :]
).mean(axis=-1)
# correlations, and subtract them from 1 so we get a distance
# divide by sttdevs of each pair of training/testing
dist_pl_pl2_sl[:, :, i] -= dot_pl_pl / (
roi_test_std[:, None] * roi_train_std[None, :]
)
else:
raise RuntimeError("Must have not got here")
# predictions are just the labels with minimal distance
predictions = np.argmin(dist_pl_pl2_sl, axis=1)
return np.asanyarray(self._ulabels_numeric), predictions
knn = property(fget=lambda self: self._knn)
@borrowkwargs(M1NNSearchlight, "__init__", exclude=["roi_ids", "queryengine"])
def sphere_m1nnsearchlight(
knn, generator, radius=1, center_ids=None, space="voxel_indices", *args, **kwargs
):
"""Creates a `M1NNSearchlight` to assess :term:`cross-validation`
classification performance of M1NN on all possible spheres of a
certain size within a dataset.
The idea of taking advantage of naiveness of M1NN for the sake of
quick searchlight-ing stems from <NAME> (paper under
review).
Parameters
----------
radius : float
All features within this radius around the center will be part
of a sphere.
center_ids : list of int
List of feature ids (not coordinates) the shall serve as sphere
centers. By default all features will be used (it is passed
roi_ids argument for Searchlight).
space : str
Name of a feature attribute of the input dataset that defines the spatial
coordinates of all features.
**kwargs
In addition this class supports all keyword arguments of
:class:`~mvpa2.measures.nnsearchlight.M1NNSearchlight`.
Notes
-----
If any `BaseSearchlight` is used as `SensitivityAnalyzer` one has to make
sure that the specified scalar `Measure` returns large
(absolute) values for high sensitivities and small (absolute) values
for low sensitivities. Especially when using error functions usually
low values imply high performance and therefore high sensitivity.
This would in turn result in sensitivity maps that have low
(absolute) values indicating high sensitivities and this conflicts
with the intended behavior of a `SensitivityAnalyzer`.
"""
# build a matching query engine from the arguments
kwa = {space: Sphere(radius)}
qe = IndexQueryEngine(**kwa)
# init the searchlight with the queryengine
return M1NNSearchlight(knn, generator, qe, roi_ids=center_ids, *args, **kwargs)
| [
"mvpa2.base.dochelpers.borrowkwargs",
"mvpa2.measures.adhocsearchlightbase._STATS",
"numpy.unique",
"numpy.ones",
"mvpa2.misc.neighborhood.IndexQueryEngine",
"numpy.square",
"numpy.asanyarray",
"numpy.sum",
"numpy.zeros",
"numpy.empty",
"mvpa2.base.dochelpers._repr_attrs",
"numpy.argmin",
"m... | [((8166, 8243), 'mvpa2.base.dochelpers.borrowkwargs', 'borrowkwargs', (['M1NNSearchlight', '"""__init__"""'], {'exclude': "['roi_ids', 'queryengine']"}), "(M1NNSearchlight, '__init__', exclude=['roi_ids', 'queryengine'])\n", (8178, 8243), False, 'from mvpa2.base.dochelpers import borrowkwargs, _repr_attrs\n'), ((1046, 1097), 'mvpa2.base.dochelpers.borrowkwargs', 'borrowkwargs', (['SimpleStatBaseSearchlight', '"""__init__"""'], {}), "(SimpleStatBaseSearchlight, '__init__')\n", (1058, 1097), False, 'from mvpa2.base.dochelpers import borrowkwargs, _repr_attrs\n'), ((9962, 9985), 'mvpa2.misc.neighborhood.IndexQueryEngine', 'IndexQueryEngine', ([], {}), '(**kwa)\n', (9978, 9985), False, 'from mvpa2.misc.neighborhood import IndexQueryEngine, Sphere\n'), ((2216, 2281), 'mvpa2.measures.adhocsearchlightbase.SimpleStatBaseSearchlight.__init__', 'SimpleStatBaseSearchlight.__init__', (['self', 'generator', 'qe'], {}), '(self, generator, qe, **kwargs)\n', (2250, 2281), False, 'from mvpa2.measures.adhocsearchlightbase import SimpleStatBaseSearchlight, _STATS\n'), ((2945, 2953), 'mvpa2.measures.adhocsearchlightbase._STATS', '_STATS', ([], {}), '()\n', (2951, 2953), False, 'from mvpa2.measures.adhocsearchlightbase import SimpleStatBaseSearchlight, _STATS\n'), ((2979, 2987), 'mvpa2.measures.adhocsearchlightbase._STATS', '_STATS', ([], {}), '()\n', (2985, 2987), False, 'from mvpa2.measures.adhocsearchlightbase import SimpleStatBaseSearchlight, _STATS\n'), ((8014, 8047), 'numpy.argmin', 'np.argmin', (['dist_pl_pl2_sl'], {'axis': '(1)'}), '(dist_pl_pl2_sl, axis=1)\n', (8023, 8047), True, 'import numpy as np\n'), ((9937, 9951), 'mvpa2.misc.neighborhood.Sphere', 'Sphere', (['radius'], {}), '(radius)\n', (9943, 9951), False, 'from mvpa2.misc.neighborhood import IndexQueryEngine, Sphere\n'), ((3063, 3078), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (3071, 3078), True, 'import numpy as np\n'), ((3102, 3117), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (3110, 3117), True, 'import numpy as np\n'), ((3195, 3210), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (3203, 3210), True, 'import numpy as np\n'), ((3238, 3253), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (3246, 3253), True, 'import numpy as np\n'), ((4678, 4699), 'numpy.square', 'np.square', (['diff_pl_pl'], {}), '(diff_pl_pl)\n', (4687, 4699), True, 'import numpy as np\n'), ((4804, 4851), 'numpy.zeros', 'np.zeros', (['(diff_pl_pl2.shape[:-1] + (nroi_fids,))'], {}), '(diff_pl_pl2.shape[:-1] + (nroi_fids,))\n', (4812, 4851), True, 'import numpy as np\n'), ((8064, 8100), 'numpy.asanyarray', 'np.asanyarray', (['self._ulabels_numeric'], {}), '(self._ulabels_numeric)\n', (8077, 8100), True, 'import numpy as np\n'), ((4083, 4108), 'numpy.unique', 'np.unique', (['labels_numeric'], {}), '(labels_numeric)\n', (4092, 4108), True, 'import numpy as np\n'), ((6068, 6106), 'numpy.ones', 'np.ones', (['(nlabels, nlabels, nroi_fids)'], {}), '((nlabels, nlabels, nroi_fids))\n', (6075, 6106), True, 'import numpy as np\n'), ((2536, 2562), 'mvpa2.base.dochelpers._repr_attrs', '_repr_attrs', (['self', "['knn']"], {}), "(self, ['knn'])\n", (2547, 2562), False, 'from mvpa2.base.dochelpers import borrowkwargs, _repr_attrs\n'), ((5311, 5327), 'numpy.empty', 'np.empty', (['shape_'], {}), '(shape_)\n', (5319, 5327), True, 'import numpy as np\n'), ((6806, 6861), 'numpy.sum', 'np.sum', (['(roi_train_demeaned * roi_train_demeaned)'], {'axis': '(1)'}), '(roi_train_demeaned * roi_train_demeaned, axis=1)\n', (6812, 6861), True, 'import numpy as np\n'), ((7199, 7227), 'numpy.square', 'np.square', (['roi_test_demeaned'], {}), '(roi_test_demeaned)\n', (7208, 7227), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import confusion_matrix
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
# from IPython.display import Image,display
import matplotlib.pyplot as plt
data = []
labels = []
alldata = []
# XORdata=np.array([[0,0,0],[0,1,1],[1,0,1],[1,1,0]])
# X=XORdata[:,0:2]
# y=XORdata[:,-1]
def print_network(net):
for i,layer in enumerate(net,1):
print("Layer {} ".format(i))
for j,neuron in enumerate(layer,1):
print("neuron {} :".format(j),neuron)
def rocForIndex(predictedx,expectedx,rnk,labelno):
newexpected2=[]
for index in range(len(expectedx)):
newexpected1=[]
for id,i in enumerate(expectedx[index].tolist()):
if i==1:
newexpected1.append(1)
else:
newexpected1.append(0)
newexpected2.append(newexpected1)
newpredicted2=[]
for index in range(len(predictedx)):
newpredicted1=[]
for id,i in enumerate(predictedx[index]):
if i==1:
newpredicted1.append(1)
else:
newpredicted1.append(0)
newpredicted2.append(newpredicted1)
return np.array(newexpected2) , np.array(newpredicted2)
def decodeBinaryToBinaryPred(expectedrows):
declist_expected=[]
for row in expectedrows:
expectedx=np.argmax(row[0])+1
if(expectedx==1):
expectedx=[1,0,0]
if(expectedx==2):
expectedx=[0,1,0]
if(expectedx==3):
expectedx=[0,0,1]
declist_expected.append(expectedx)
return declist_expected
def decodeBinaryToInt(expectedrows):
declist_expected=[]
for row in expectedrows:
expectedx=np.argmax(row)+1
declist_expected.append(expectedx)
return declist_expected
def drawROC(testY,probs,nolabels):
# probs=[i.tolist()[0] for i in probs]
plt.figure()
fpr = dict()
tpr = dict()
roc_auc = dict()
##################################
testY1= decodeBinaryToInt(testY)
probs1= decodeBinaryToBinaryPred(probs)
testY2, probs2=rocForIndex(probs1,testY, 1,3)
for i in range(3):
fpr[i], tpr[i], _ = roc_curve(testY2[:, i], probs2[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
fpr["micro"], tpr["micro"], _ = roc_curve(testY2.ravel(), probs2.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
plt.plot(fpr["micro"], tpr["micro"],label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]))
for i in range(3):
plt.plot(fpr[i], tpr[i], label='ROC curve of Rank1 for label {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
#################################################
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
def initialize_network():
input_neurons=len(X[0])
hidden_neurons=input_neurons+1
output_neurons=3
n_hidden_layers=1
net=list()
for h in range(n_hidden_layers):
if h!=0:
input_neurons=len(net[-1])
hidden_layer = [ { 'weights': np.random.uniform(low=-0.5, high=0.5,size=input_neurons)} for i in range(hidden_neurons) ]
net.append(hidden_layer)
output_layer = [ { 'weights': np.random.uniform(low=-0.5, high=0.5,size=hidden_neurons)} for i in range(output_neurons)]
net.append(output_layer)
return net
def activate_sigmoidactivat (sum):
return (1/(1+np.exp(-sum)))
def forward_propagationforward (net,input):
row=input
for layer in net:
prev_input=np.array([])
for neuron in layer:
sum=neuron['weights'].T.dot(row)
result=activate_sigmoid(sum)
neuron['result']=result
prev_input=np.append(prev_input,[result])
row =prev_input
return row
def sigmoidDerivative(output):
return output*(1.0-output)
def back_propagation(net,row,expected):
for i in reversed(range(len(net))):
layer=net[i]
errors=np.array([])
if i==len(net)-1:
results=[neuron['result'] for neuron in layer]
errors = expected-np.array(results)
else:
for j in range(len(layer)):
herror=0
nextlayer=net[i+1]
for neuron in nextlayer:
herror+=(neuron['weights'][j]*neuron['delta'])
errors=np.append(errors,[herror])
for j in range(len(layer)):
neuron=layer[j]
neuron['delta']=errors[j]*sigmoidDerivative(neuron['result'])
return net
def updateWeights(net,input,lrate):
for i in range(len(net)):
inputs = input
if i!=0:
inputs=[neuron['result'] for neuron in net[i-1]]
for neuron in net[i]:
for j in range(len(inputs)):
neuron['weights'][j]+=lrate*neuron['delta']*inputs[j]
return net
def training(net, epochs,lrate,n_outputs):
errors=[]
for epoch in range(epochs):
sum_error=0
for i,row in enumerate(X):
outputs,net=forward_propagation(net,row)
expected=[0.0 for i in range(n_outputs)]
sum_error+=sum([(expected[j]-outputs[j])**2 for j in range(len(expected))])
net=back_propagation(net,row,expected)
net=updateWeights(net,row,0.05)
if epoch%10 ==0:
print('>epoch=%d,error=%.3f'%(epoch,sum_error))
errors.append(sum_error)
return errors ,net
# Make a prediction with a network# Make a
def predict(network, rows):
totalvals=[]
for row in rows:
outputs = forward_propagation(network, row)
totalvals.append(outputs)
return totalvals
def forward_propagation(net,input):
row=input
for layer in net:
prev_input=np.array([])
for neuron in layer:
sum=neuron['weights'].T.dot(row)
result=activate_sigmoid(sum)
neuron['result']=result
prev_input=np.append(prev_input,[result])
row =prev_input
return row ,net
def activate_sigmoid(sum):
return (1/(1+np.exp(-sum)))
def numericlabels(data1):
integer_list=( [list( map(int,i) ) for i in data1] )
return integer_list
def numericdataandllabels(data):
numericdatavalues = list()
temprownumeric = list()
for i in range(0, (len(data))):
row = data[i]
temprow = list()
if(len(row) == 0):
del data[i]
continue
for j in range(len(row)-3, len(row)):
temp = row[j]
row[j] = temp[1:]
floatvalues = [float(item) for item in row]
temprownumeric.append(floatvalues)
return temprownumeric
def calcConfusion( predictedList,y_test_original):
acc_sum = 0
sens_sum = 0
spec_sum = 0
# for i in range(len(y_test_original)):
for i in range(len(y_test_original)):
ss = y_test_original# self.calculate_rank(y_test[i])
a=ss.tolist()[i]
b=predictedList[i]
bb=np.argmax(b[0])+1
if(bb==1):
aa=[1,0,0]
if(bb==2):
aa=[0,1,0]
if(bb==3):
aa=[0,0,1]
cm1 = confusion_matrix(a, aa,normalize='all')
# print('Confusion Matrix : \n', cm1)
cm1=np.nan_to_num(cm1)
#####from confusion matrix calculate accuracy
accuracy1 = (cm1[0, 0] + cm1[1, 1]) / (cm1[0, 0] + cm1[0, 1]+cm1[1, 0] + cm1[1, 1])
acc_sum += accuracy1
sensitivity1 = cm1[0, 0] / (cm1[0, 0] + cm1[0, 1])
if(np.isnan(sensitivity1)):
sensitivity1=0
sens_sum += sensitivity1
specificity1 = cm1[1, 1] / (cm1[1, 0] + cm1[1, 1])
if(np.isnan(specificity1)):
specificity1=0
spec_sum += specificity1
print('Accuracy : ', acc_sum / len(y_test_original))
print('Sensitivity : ', sens_sum / len(y_test_original))
print('Specificity : ', spec_sum / len(y_test_original))
def binaryToDecimal(binary):
binary1 = binary
decimal, i, n = 0, 0, 0
while(binary != 0):
dec = binary % 10
decimal = decimal + dec * pow(2, i)
binary = binary//10
i += 1
print(decimal)
return decimal
def numericdata(data):
numericdatavalues = list()
temprownumeric = list()
for i in range(0, (len(data))):
row = data[i]
temprow = list()
if(len(row) == 0):
del data[i]
continue
floatvalues = [(float(item)) for item in row]
temprownumeric.append(floatvalues)
return temprownumeric
###############################################################################################################################
###############################################################################################################################
##################################################################################################################################
from sklearn.model_selection import train_test_split
import time
import csv
start = time.time()
filename = 'C:\\Ayman\\PhDThesis\\iris_rank.txt'
# Set up input and output variables for the script
gpsTrack = open(filename, "r")
# Set up CSV reader and process the header
csvReader = csv.reader(gpsTrack)
# header = next(csvReader)
# Loop through the lines in the file and get each coordinate
for row in csvReader:
data.append(row[0:4])
labels.append(row[4:7])
numericlabels = numericlabels(labels)
numericdata_list = numericdata(data)
numericAlldata_list =numericdataandllabels(alldata)
enc=preprocessing.OneHotEncoder()
y=np.array(numericlabels)
X=np.array(numericdata_list)
numericAlldata_array=np.array(numericAlldata_list)
X,X_test,y,y_test=train_test_split(X,y,test_size=0.2,random_state=0)
#######################################################################
net=initialize_network()
errors,net=training(net,500, 0.07,3)
pred=predict(net,X_test)
calcConfusion(pred,y_test)
drawROC(y_test, pred, 1)
# # output=np.argmax(pred)
print("end")
# print_network(net)
| [
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"numpy.array",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.exp",
"matplotlib.pyplot.ylim",
"csv.reader",
"sklearn.metrics.confusion_matrix",
"sklearn.model_selection.train_test_split",
"numpy.argma... | [((9996, 10007), 'time.time', 'time.time', ([], {}), '()\n', (10005, 10007), False, 'import time\n'), ((10194, 10214), 'csv.reader', 'csv.reader', (['gpsTrack'], {}), '(gpsTrack)\n', (10204, 10214), False, 'import csv\n'), ((10513, 10542), 'sklearn.preprocessing.OneHotEncoder', 'preprocessing.OneHotEncoder', ([], {}), '()\n', (10540, 10542), False, 'from sklearn import preprocessing\n'), ((10547, 10570), 'numpy.array', 'np.array', (['numericlabels'], {}), '(numericlabels)\n', (10555, 10570), True, 'import numpy as np\n'), ((10573, 10599), 'numpy.array', 'np.array', (['numericdata_list'], {}), '(numericdata_list)\n', (10581, 10599), True, 'import numpy as np\n'), ((10621, 10650), 'numpy.array', 'np.array', (['numericAlldata_list'], {}), '(numericAlldata_list)\n', (10629, 10650), True, 'import numpy as np\n'), ((10670, 10723), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(0)'}), '(X, y, test_size=0.2, random_state=0)\n', (10686, 10723), False, 'from sklearn.model_selection import train_test_split\n'), ((2162, 2174), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2172, 2174), True, 'import matplotlib.pyplot as plt\n'), ((2680, 2711), 'sklearn.metrics.auc', 'auc', (["fpr['micro']", "tpr['micro']"], {}), "(fpr['micro'], tpr['micro'])\n", (2683, 2711), False, 'from sklearn.metrics import roc_curve, auc\n'), ((3122, 3153), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""k--"""'], {}), "([0, 1], [0, 1], 'k--')\n", (3130, 3153), True, 'import matplotlib.pyplot as plt\n'), ((3162, 3182), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (3170, 3182), True, 'import matplotlib.pyplot as plt\n'), ((3191, 3212), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (3199, 3212), True, 'import matplotlib.pyplot as plt\n'), ((3221, 3254), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (3231, 3254), True, 'import matplotlib.pyplot as plt\n'), ((3263, 3295), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (3273, 3295), True, 'import matplotlib.pyplot as plt\n'), ((3304, 3383), 'matplotlib.pyplot.title', 'plt.title', (['"""Some extension of Receiver operating characteristic to multi-class"""'], {}), "('Some extension of Receiver operating characteristic to multi-class')\n", (3313, 3383), True, 'import matplotlib.pyplot as plt\n'), ((3392, 3421), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (3402, 3421), True, 'import matplotlib.pyplot as plt\n'), ((3430, 3440), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3438, 3440), True, 'import matplotlib.pyplot as plt\n'), ((1436, 1458), 'numpy.array', 'np.array', (['newexpected2'], {}), '(newexpected2)\n', (1444, 1458), True, 'import numpy as np\n'), ((1461, 1484), 'numpy.array', 'np.array', (['newpredicted2'], {}), '(newpredicted2)\n', (1469, 1484), True, 'import numpy as np\n'), ((2488, 2525), 'sklearn.metrics.roc_curve', 'roc_curve', (['testY2[:, i]', 'probs2[:, i]'], {}), '(testY2[:, i], probs2[:, i])\n', (2497, 2525), False, 'from sklearn.metrics import roc_curve, auc\n'), ((2551, 2570), 'sklearn.metrics.auc', 'auc', (['fpr[i]', 'tpr[i]'], {}), '(fpr[i], tpr[i])\n', (2554, 2570), False, 'from sklearn.metrics import roc_curve, auc\n'), ((4229, 4241), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4237, 4241), True, 'import numpy as np\n'), ((4709, 4721), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4717, 4721), True, 'import numpy as np\n'), ((6587, 6599), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6595, 6599), True, 'import numpy as np\n'), ((8052, 8092), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['a', 'aa'], {'normalize': '"""all"""'}), "(a, aa, normalize='all')\n", (8068, 8092), False, 'from sklearn.metrics import confusion_matrix\n'), ((8158, 8176), 'numpy.nan_to_num', 'np.nan_to_num', (['cm1'], {}), '(cm1)\n', (8171, 8176), True, 'import numpy as np\n'), ((8444, 8466), 'numpy.isnan', 'np.isnan', (['sensitivity1'], {}), '(sensitivity1)\n', (8452, 8466), True, 'import numpy as np\n'), ((8616, 8638), 'numpy.isnan', 'np.isnan', (['specificity1'], {}), '(specificity1)\n', (8624, 8638), True, 'import numpy as np\n'), ((1603, 1620), 'numpy.argmax', 'np.argmax', (['row[0]'], {}), '(row[0])\n', (1612, 1620), True, 'import numpy as np\n'), ((1976, 1990), 'numpy.argmax', 'np.argmax', (['row'], {}), '(row)\n', (1985, 1990), True, 'import numpy as np\n'), ((3916, 3974), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-0.5)', 'high': '(0.5)', 'size': 'hidden_neurons'}), '(low=-0.5, high=0.5, size=hidden_neurons)\n', (3933, 3974), True, 'import numpy as np\n'), ((4112, 4124), 'numpy.exp', 'np.exp', (['(-sum)'], {}), '(-sum)\n', (4118, 4124), True, 'import numpy as np\n'), ((4442, 4473), 'numpy.append', 'np.append', (['prev_input', '[result]'], {}), '(prev_input, [result])\n', (4451, 4473), True, 'import numpy as np\n'), ((6800, 6831), 'numpy.append', 'np.append', (['prev_input', '[result]'], {}), '(prev_input, [result])\n', (6809, 6831), True, 'import numpy as np\n'), ((6926, 6938), 'numpy.exp', 'np.exp', (['(-sum)'], {}), '(-sum)\n', (6932, 6938), True, 'import numpy as np\n'), ((7870, 7885), 'numpy.argmax', 'np.argmax', (['b[0]'], {}), '(b[0])\n', (7879, 7885), True, 'import numpy as np\n'), ((3753, 3810), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-0.5)', 'high': '(0.5)', 'size': 'input_neurons'}), '(low=-0.5, high=0.5, size=input_neurons)\n', (3770, 3810), True, 'import numpy as np\n'), ((4849, 4866), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (4857, 4866), True, 'import numpy as np\n'), ((5141, 5168), 'numpy.append', 'np.append', (['errors', '[herror]'], {}), '(errors, [herror])\n', (5150, 5168), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""Split PDFS by QR code and move images and PDFs to correct folder."""
import os
import traceback
import numpy
from . import write_to_log as logger
from . import submitty_ocr as scanner
# try importing required modules
try:
from PyPDF2 import PdfFileReader, PdfFileWriter
from pdf2image import convert_from_bytes
import pyzbar.pyzbar as pyzbar
from pyzbar.pyzbar import ZBarSymbol
import cv2
except ImportError:
traceback.print_exc()
raise ImportError("One or more required python modules not installed correctly")
def main(args):
"""Scan through PDF and split PDF and images."""
filename = args[0]
split_path = args[1]
qr_prefix = args[2]
qr_suffix = args[3]
log_file_path = args[4]
use_ocr = args[5]
buff = "Process " + str(os.getpid()) + ": "
try:
os.chdir(split_path)
pdfPages = PdfFileReader(filename)
pdf_writer = PdfFileWriter()
i = id_index = 0
page_count = 1
prev_file = data = "BLANK"
output = {"filename": filename, "is_qr": True, "use_ocr": use_ocr}
json_file = os.path.join(split_path, "decoded.json")
for page_number in range(pdfPages.numPages):
# convert pdf to series of images for scanning
page = convert_from_bytes(
open(filename, 'rb').read(),
first_page=page_number+1, last_page=page_number+2)[0]
# increase contrast of image for better QR decoding
cv_img = numpy.array(page)
img_grey = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)
ret2, thresh = cv2.threshold(img_grey, 0, 255,
cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# decode img - only look for QR codes
val = pyzbar.decode(thresh, symbols=[ZBarSymbol.QRCODE])
if val != []:
# found a new qr code, split here
# convert byte literal to string
data = val[0][0].decode("utf-8")
if not use_ocr:
buff += "Found a QR code with value \'" + data + "\' on"
buff += " page " + str(page_number) + ", "
if data == "none": # blank exam with 'none' qr code
data = "BLANK EXAM"
else:
pre = data[0:len(qr_prefix)]
suf = data[(len(data)-len(qr_suffix)):len(data)]
if qr_prefix != '' and pre == qr_prefix:
data = data[len(qr_prefix):]
if qr_suffix != '' and suf == qr_suffix:
data = data[:-len(qr_suffix)]
# since QR splitting doesn't know the max page assume length of 3
prepended_index = str(i).zfill(3)
cover_filename = '{}_{}_cover.pdf'.format(filename[:-4],
prepended_index)
output_filename = '{}_{}.pdf'.format(filename[:-4], prepended_index)
output[output_filename] = {}
# if we're looking for a student's ID, use that as the value instead
if use_ocr:
data, confidences = scanner.getDigits(thresh, val)
buff += "Found student ID number of \'" + data + "\' on"
buff += " page " + str(page_number) + ", "
buff += "Confidences: " + str(confidences) + " "
output[output_filename]["confidences"] = str(confidences)
output[output_filename]['id'] = data
# save pdf
if i != 0 and prev_file != '':
output[prev_file]['page_count'] = page_count
# update json file
logger.write_to_json(json_file, output)
with open(prev_file, 'wb') as out:
pdf_writer.write(out)
if id_index == 1:
# correct first pdf's page count and print file
output[prev_file]['page_count'] = page_count
with open(prev_file, 'wb') as out:
pdf_writer.write(out)
# start a new pdf and grab the cover
cover_writer = PdfFileWriter()
pdf_writer = PdfFileWriter()
cover_writer.addPage(pdfPages.getPage(i))
pdf_writer.addPage(pdfPages.getPage(i))
# save cover
with open(cover_filename, 'wb') as out:
cover_writer.write(out)
# save cover image
page.save('{}.jpg'.format(cover_filename[:-4]), "JPEG", quality=100)
id_index += 1
page_count = 1
prev_file = output_filename
# save page as image, start indexing at 1
page.save(prev_file[:-4] + '_' + str(page_count).zfill(3) + '.jpg',
"JPEG", quality=100)
else:
# the first pdf page doesn't have a qr code
if i == 0:
prepended_index = str(i).zfill(3)
output_filename = '{}_{}.pdf'.format(filename[:-4], prepended_index)
cover_filename = '{}_{}_cover.pdf'.format(filename[:-4],
prepended_index)
output[output_filename] = {}
# set the value as blank so a human can check what happened
output[output_filename]['id'] = "BLANK"
prev_file = output_filename
id_index += 1
cover_writer = PdfFileWriter()
# save cover
cover_writer.addPage(pdfPages.getPage(i))
with open(cover_filename, 'wb') as out:
cover_writer.write(out)
# save cover image
page.save('{}.jpg'.format(cover_filename[:-4]), "JPEG", quality=100)
# add pages to current split_pdf
page_count += 1
pdf_writer.addPage(pdfPages.getPage(i))
# save page as image, start indexing at 1
page.save(prev_file[:-4] + '_' + str(page_count).zfill(3) + '.jpg',
"JPEG", quality=100)
i += 1
buff += "Finished splitting into {} files\n".format(id_index)
# save whatever is left
prepended_index = str(i).zfill(3)
output_filename = '{}_{}.pdf'.format(filename[:-4], prepended_index)
output[prev_file]['id'] = data
output[prev_file]['page_count'] = page_count
if use_ocr:
output[prev_file]['confidences'] = str(confidences)
logger.write_to_json(json_file, output)
with open(prev_file, 'wb') as out:
pdf_writer.write(out)
# write the buffer to the log file, so everything is on one line
logger.write_to_log(log_file_path, buff)
except Exception:
msg = "Failed when splitting pdf " + filename
print(msg)
traceback.print_exc()
# print everything in the buffer just in case it didn't write
logger.write_to_log(log_file_path, buff)
logger.write_to_log(log_file_path, msg + "\n" + traceback.format_exc())
if __name__ == "__main__":
main()
| [
"traceback.format_exc",
"cv2.threshold",
"os.path.join",
"os.chdir",
"numpy.array",
"pyzbar.pyzbar.decode",
"cv2.cvtColor",
"os.getpid",
"PyPDF2.PdfFileWriter",
"traceback.print_exc",
"PyPDF2.PdfFileReader"
] | [((463, 484), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (482, 484), False, 'import traceback\n'), ((854, 874), 'os.chdir', 'os.chdir', (['split_path'], {}), '(split_path)\n', (862, 874), False, 'import os\n'), ((894, 917), 'PyPDF2.PdfFileReader', 'PdfFileReader', (['filename'], {}), '(filename)\n', (907, 917), False, 'from PyPDF2 import PdfFileReader, PdfFileWriter\n'), ((939, 954), 'PyPDF2.PdfFileWriter', 'PdfFileWriter', ([], {}), '()\n', (952, 954), False, 'from PyPDF2 import PdfFileReader, PdfFileWriter\n'), ((1133, 1173), 'os.path.join', 'os.path.join', (['split_path', '"""decoded.json"""'], {}), "(split_path, 'decoded.json')\n", (1145, 1173), False, 'import os\n'), ((1527, 1544), 'numpy.array', 'numpy.array', (['page'], {}), '(page)\n', (1538, 1544), False, 'import numpy\n'), ((1569, 1609), 'cv2.cvtColor', 'cv2.cvtColor', (['cv_img', 'cv2.COLOR_BGR2GRAY'], {}), '(cv_img, cv2.COLOR_BGR2GRAY)\n', (1581, 1609), False, 'import cv2\n'), ((1637, 1705), 'cv2.threshold', 'cv2.threshold', (['img_grey', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(img_grey, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (1650, 1705), False, 'import cv2\n'), ((1814, 1864), 'pyzbar.pyzbar.decode', 'pyzbar.decode', (['thresh'], {'symbols': '[ZBarSymbol.QRCODE]'}), '(thresh, symbols=[ZBarSymbol.QRCODE])\n', (1827, 1864), True, 'import pyzbar.pyzbar as pyzbar\n'), ((7203, 7224), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7222, 7224), False, 'import traceback\n'), ((816, 827), 'os.getpid', 'os.getpid', ([], {}), '()\n', (825, 827), False, 'import os\n'), ((4324, 4339), 'PyPDF2.PdfFileWriter', 'PdfFileWriter', ([], {}), '()\n', (4337, 4339), False, 'from PyPDF2 import PdfFileReader, PdfFileWriter\n'), ((4369, 4384), 'PyPDF2.PdfFileWriter', 'PdfFileWriter', ([], {}), '()\n', (4382, 4384), False, 'from PyPDF2 import PdfFileReader, PdfFileWriter\n'), ((5757, 5772), 'PyPDF2.PdfFileWriter', 'PdfFileWriter', ([], {}), '()\n', (5770, 5772), False, 'from PyPDF2 import PdfFileReader, PdfFileWriter\n'), ((7400, 7422), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (7420, 7422), False, 'import traceback\n')] |
#coding=utf-8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import getpass
import os
import socket
import numpy as np
from PIL import Image, ImageFilter
import argparse
import time
import sys
#from utils import AverageMeter, calculate_accuracy
import pdb
import math
from dataset.dataset import *
from dataset.preprocess_data import *
from models.model import generate_model
from opts import parse_opts
from utils import *
import pdb
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph.learning_rate_scheduler import ReduceLROnPlateau
def resume_params(model, optimizer, opt):
"""
加载模型参数
参数:
model,定义的网络模型
optimizer,网络优化器
opt,配置参数
:return:
如果有之前保存的checkpoint,从之前的checkpoint恢复模型参数
"""
if opt.continue_train and os.path.exists(opt.Flow_resume_path):
print("you now read checkpoint!!!")
checkpoint_list=os.listdir(opt.Flow_resume_path)
max_epoch=0
for checkpoint in checkpoint_list:
if 'model_Flow_' in checkpoint:
max_epoch=max(int(checkpoint.split('_')[2]),max_epoch)
if max_epoch>0:
#从checkpoint读取模型参数和优化器参数
para_dict, opti_dict = fluid.dygraph.load_dygraph(os.path.join(opt.Flow_resume_path,'model_Flow_'+str(max_epoch)+'_saved'))
#设置网络模型参数为读取的模型参数
model.set_dict(para_dict)
#设置优化器参数为读取的优化器参数
optimizer.set_dict(opti_dict)
#更新当前网络的开始迭代次数
opt.begin_epoch=max_epoch+1
def train():
#读取配置文件
opt = parse_opts()
print(opt)
opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
#
with fluid.dygraph.guard(place = fluid.CUDAPlace(0)):
#训练数据加载器
print("Preprocessing train data ...")
train_data = globals()['{}_test'.format(opt.dataset)](split = opt.split, train = 1, opt = opt)
train_dataloader = paddle.batch(train_data, batch_size=opt.batch_size, drop_last=True)
#训练数据加载器
print("Preprocessing validation data ...")
val_data = globals()['{}_test'.format(opt.dataset)](split = opt.split, train = 2, opt = opt)
val_dataloader = paddle.batch(val_data, batch_size=opt.batch_size, drop_last=True)
#如果使用光流图像进行训练,输入通道数为2
opt.input_channels = 2
#构建网络模型结构
print("Loading Flow model... ", opt.model, opt.model_depth)
model,parameters = generate_model(opt)
print("Initializing the optimizer ...")
if opt.Flow_premodel_path:
opt.weight_decay = 1e-5
opt.learning_rate = 0.001
print("lr = {} \t momentum = {}, \t nesterov = {} \t LR patience = {} "
.format(opt.learning_rate, opt.momentum, opt.nesterov, opt.lr_patience))
#构建优化器
optimizer = fluid.optimizer.MomentumOptimizer(learning_rate=opt.learning_rate,
momentum=opt.momentum, parameter_list=parameters,
use_nesterov=opt.nesterov)
scheduler = ReduceLROnPlateau(opt.learning_rate, mode='min', patience=opt.lr_patience)
if opt.continue_train and opt.Flow_resume_path != '':
resume_params(model, optimizer, opt)
print('run')
losses_avg=np.zeros((1,),dtype=np.float)
for epoch in range(opt.begin_epoch, opt.n_epochs+1):
#设置模型为训练模式,模型中的参数可以被训练优化
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
accuracies = AverageMeter()
end_time = time.time()
for i, data in enumerate(train_dataloader()):
#输入视频图像或者光流
inputs = np.array([x[0] for x in data]).astype('float32')
# 输入视频图像或者光流的标签
targets = np.array([x[1] for x in data]).astype('int')
inputs = fluid.dygraph.base.to_variable(inputs)
targets = fluid.dygraph.base.to_variable(targets)
targets.stop_gradient = True
data_time.update(time.time() - end_time)
#计算网络输出结果
outputs = model(inputs)
#计算网络输出和标签的交叉熵损失
loss = fluid.layers.cross_entropy(outputs, targets)
avg_loss = fluid.layers.mean(loss)
#计算网络预测精度
acc = calculate_accuracy(outputs, targets)
losses.update(avg_loss.numpy()[0], inputs.shape[0])
accuracies.update(acc[0], inputs.shape[0])
#反向传播梯度
optimizer.clear_gradients()
avg_loss.backward()
#最小化损失来优化网络中的权重
#print(avg_loss)
#pdb.set_trace()
optimizer.minimize(avg_loss)
batch_time.update(time.time() - end_time)
end_time = time.time()
print('Epoch: [{0}][{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss val:{loss.val:.4f} (avg:{loss.avg:.4f})\t'
'Acc val:{acc.val:.3f} (avg:{acc.avg:.3f})'.format(
epoch,
i + 1,
batch_time=batch_time,
data_time=data_time,
loss=losses,
acc=accuracies))
losses_avg[0]=losses.avg
scheduler.step(losses_avg)
if epoch % opt.checkpoint == 0 and epoch != 0:
fluid.dygraph.save_dygraph(model.state_dict(),os.path.join(opt.Flow_resume_path,'model_Flow_'+str(epoch)+'_saved'))
fluid.dygraph.save_dygraph(optimizer.state_dict(), os.path.join(opt.Flow_resume_path,'model_Flow_'+str(epoch)+'_saved'))
#设置模型为验证模式,对验证数据集进行验证
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
accuracies = AverageMeter()
end_time = time.time()
for i, data in enumerate(val_dataloader()):
data_time.update(time.time() - end_time)
inputs = np.array([x[0] for x in data]).astype('float32')
targets = np.array([x[1] for x in data]).astype('int')
inputs = fluid.dygraph.base.to_variable(inputs)
targets = fluid.dygraph.base.to_variable(targets)
targets.stop_gradient = True
outputs = model(inputs)
loss = fluid.layers.cross_entropy(outputs, targets)
avg_loss = fluid.layers.mean(loss)
acc = calculate_accuracy(outputs, targets)
losses.update(avg_loss.numpy()[0], inputs.shape[0])
accuracies.update(acc[0], inputs.shape[0])
batch_time.update(time.time() - end_time)
end_time = time.time()
print('Val_Epoch: [{0}][{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc {acc.val:.3f} ({acc.avg:.3f})'.format(
epoch,
i + 1,
batch_time=batch_time,
data_time=data_time,
loss=losses,
acc=accuracies))
if __name__=="__main__":
train()
| [
"os.path.exists",
"paddle.fluid.dygraph.learning_rate_scheduler.ReduceLROnPlateau",
"os.listdir",
"paddle.fluid.dygraph.base.to_variable",
"paddle.fluid.layers.cross_entropy",
"paddle.fluid.layers.mean",
"opts.parse_opts",
"numpy.array",
"numpy.zeros",
"paddle.fluid.CUDAPlace",
"models.model.gen... | [((2083, 2095), 'opts.parse_opts', 'parse_opts', ([], {}), '()\n', (2093, 2095), False, 'from opts import parse_opts\n'), ((1326, 1362), 'os.path.exists', 'os.path.exists', (['opt.Flow_resume_path'], {}), '(opt.Flow_resume_path)\n', (1340, 1362), False, 'import os\n'), ((1432, 1464), 'os.listdir', 'os.listdir', (['opt.Flow_resume_path'], {}), '(opt.Flow_resume_path)\n', (1442, 1464), False, 'import os\n'), ((2428, 2495), 'paddle.batch', 'paddle.batch', (['train_data'], {'batch_size': 'opt.batch_size', 'drop_last': '(True)'}), '(train_data, batch_size=opt.batch_size, drop_last=True)\n', (2440, 2495), False, 'import paddle\n'), ((2692, 2757), 'paddle.batch', 'paddle.batch', (['val_data'], {'batch_size': 'opt.batch_size', 'drop_last': '(True)'}), '(val_data, batch_size=opt.batch_size, drop_last=True)\n', (2704, 2757), False, 'import paddle\n'), ((2951, 2970), 'models.model.generate_model', 'generate_model', (['opt'], {}), '(opt)\n', (2965, 2970), False, 'from models.model import generate_model\n'), ((3351, 3499), 'paddle.fluid.optimizer.MomentumOptimizer', 'fluid.optimizer.MomentumOptimizer', ([], {'learning_rate': 'opt.learning_rate', 'momentum': 'opt.momentum', 'parameter_list': 'parameters', 'use_nesterov': 'opt.nesterov'}), '(learning_rate=opt.learning_rate, momentum\n =opt.momentum, parameter_list=parameters, use_nesterov=opt.nesterov)\n', (3384, 3499), True, 'import paddle.fluid as fluid\n'), ((3557, 3631), 'paddle.fluid.dygraph.learning_rate_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', (['opt.learning_rate'], {'mode': '"""min"""', 'patience': 'opt.lr_patience'}), "(opt.learning_rate, mode='min', patience=opt.lr_patience)\n", (3574, 3631), False, 'from paddle.fluid.dygraph.learning_rate_scheduler import ReduceLROnPlateau\n'), ((3784, 3814), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.float'}), '((1,), dtype=np.float)\n', (3792, 3814), True, 'import numpy as np\n'), ((4125, 4136), 'time.time', 'time.time', ([], {}), '()\n', (4134, 4136), False, 'import time\n'), ((6608, 6619), 'time.time', 'time.time', ([], {}), '()\n', (6617, 6619), False, 'import time\n'), ((2212, 2230), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (2227, 2230), True, 'import paddle.fluid as fluid\n'), ((4428, 4466), 'paddle.fluid.dygraph.base.to_variable', 'fluid.dygraph.base.to_variable', (['inputs'], {}), '(inputs)\n', (4458, 4466), True, 'import paddle.fluid as fluid\n'), ((4493, 4532), 'paddle.fluid.dygraph.base.to_variable', 'fluid.dygraph.base.to_variable', (['targets'], {}), '(targets)\n', (4523, 4532), True, 'import paddle.fluid as fluid\n'), ((4757, 4801), 'paddle.fluid.layers.cross_entropy', 'fluid.layers.cross_entropy', (['outputs', 'targets'], {}), '(outputs, targets)\n', (4783, 4801), True, 'import paddle.fluid as fluid\n'), ((4829, 4852), 'paddle.fluid.layers.mean', 'fluid.layers.mean', (['loss'], {}), '(loss)\n', (4846, 4852), True, 'import paddle.fluid as fluid\n'), ((5397, 5408), 'time.time', 'time.time', ([], {}), '()\n', (5406, 5408), False, 'import time\n'), ((6914, 6952), 'paddle.fluid.dygraph.base.to_variable', 'fluid.dygraph.base.to_variable', (['inputs'], {}), '(inputs)\n', (6944, 6952), True, 'import paddle.fluid as fluid\n'), ((6979, 7018), 'paddle.fluid.dygraph.base.to_variable', 'fluid.dygraph.base.to_variable', (['targets'], {}), '(targets)\n', (7009, 7018), True, 'import paddle.fluid as fluid\n'), ((7149, 7193), 'paddle.fluid.layers.cross_entropy', 'fluid.layers.cross_entropy', (['outputs', 'targets'], {}), '(outputs, targets)\n', (7175, 7193), True, 'import paddle.fluid as fluid\n'), ((7221, 7244), 'paddle.fluid.layers.mean', 'fluid.layers.mean', (['loss'], {}), '(loss)\n', (7238, 7244), True, 'import paddle.fluid as fluid\n'), ((7522, 7533), 'time.time', 'time.time', ([], {}), '()\n', (7531, 7533), False, 'import time\n'), ((4248, 4278), 'numpy.array', 'np.array', (['[x[0] for x in data]'], {}), '([x[0] for x in data])\n', (4256, 4278), True, 'import numpy as np\n'), ((4358, 4388), 'numpy.array', 'np.array', (['[x[1] for x in data]'], {}), '([x[1] for x in data])\n', (4366, 4388), True, 'import numpy as np\n'), ((4611, 4622), 'time.time', 'time.time', ([], {}), '()\n', (4620, 4622), False, 'import time\n'), ((5346, 5357), 'time.time', 'time.time', ([], {}), '()\n', (5355, 5357), False, 'import time\n'), ((6709, 6720), 'time.time', 'time.time', ([], {}), '()\n', (6718, 6720), False, 'import time\n'), ((6758, 6788), 'numpy.array', 'np.array', (['[x[0] for x in data]'], {}), '([x[0] for x in data])\n', (6766, 6788), True, 'import numpy as np\n'), ((6844, 6874), 'numpy.array', 'np.array', (['[x[1] for x in data]'], {}), '([x[1] for x in data])\n', (6852, 6874), True, 'import numpy as np\n'), ((7471, 7482), 'time.time', 'time.time', ([], {}), '()\n', (7480, 7482), False, 'import time\n')] |
from math import gamma
from typing import Dict, List, Tuple
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from atcenv.MASAC.buffer import ReplayBuffer
from atcenv.MASAC.mactor_critic import Actor, CriticQ, CriticV
from torch.nn.utils.clip_grad import clip_grad_norm_
GAMMMA = 0.99
TAU =5e-3
INITIAL_RANDOM_STEPS = 100
POLICY_UPDATE_FREQUENCE = 2
NUM_AGENTS = 10
BUFFER_SIZE = 1000000
BATCH_SIZE = 256
ACTION_DIM = 2
STATE_DIM = 14
NUMBER_INTRUDERS_STATE = 2
MEANS = [57000,57000,0,0,0,0,0,0]
STDS = [31500,31500,100000,100000,1,1,1,1]
class MaSacAgent:
def __init__(self):
self.memory = ReplayBuffer(STATE_DIM,ACTION_DIM, BUFFER_SIZE, BATCH_SIZE)
try:
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('DEVICE USED: ', torch.cuda.device(torch.cuda.current_device()), torch.cuda.get_device_name(0))
except:
# Cuda isn't available
self.device = torch.device("cpu")
print('DEVICE USED: CPU')
self.target_alpha = -np.prod((ACTION_DIM,)).item()
self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)
self.alpha_optimizer = optim.Adam([self.log_alpha], lr=3e-4)
self.actor = Actor(STATE_DIM, ACTION_DIM).to(self.device)
self.vf = CriticV(STATE_DIM).to(self.device)
self.vf_target = CriticV(STATE_DIM).to(self.device)
self.vf_target.load_state_dict(self.vf.state_dict())
self.qf1 = CriticQ(STATE_DIM + ACTION_DIM).to(self.device)
self.qf2 = CriticQ(STATE_DIM + ACTION_DIM).to(self.device)
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=3e-4)
self.vf_optimizer = optim.Adam(self.vf.parameters(), lr=3e-4)
self.qf1_optimizer = optim.Adam(self.qf1.parameters(), lr=3e-4)
self.qf2_optimizer = optim.Adam(self.qf2.parameters(), lr=3e-4)
self.transition = [[] for i in range(NUM_AGENTS)]
self.total_step = 0
self.is_test = False
def do_step(self, state, max_speed, min_speed, test = False, batch = False):
if not test and self.total_step < INITIAL_RANDOM_STEPS and not self.is_test:
selected_action = np.random.uniform(-1, 1, (len(state), ACTION_DIM))
else:
selected_action = []
for i in range(len(state)):
action = self.actor(torch.FloatTensor(state[i]).to(self.device))[0].detach().cpu().numpy()
selected_action.append(action)
selected_action = np.array(selected_action)
selected_action = np.clip(selected_action, -1, 1)
self.total_step += 1
return selected_action.tolist()
def setResult(self,episode_name, state, new_state, reward, action, done):
if not self.is_test:
for i in range(len(state)):
self.transition[i] = [state[i], action[i], reward, new_state[i], done]
self.memory.store(*self.transition[i])
if (len(self.memory) > BATCH_SIZE and self.total_step > INITIAL_RANDOM_STEPS):
self.update_model()
def update_model(self):
device = self.device
samples = self.memory.sample_batch()
state = torch.FloatTensor(samples["obs"]).to(device)
next_state = torch.FloatTensor(samples["next_obs"]).to(device)
action = torch.FloatTensor(samples["acts"].reshape(-1, ACTION_DIM)).to(device)
reward = torch.FloatTensor(samples["rews"].reshape(-1,1)).to(device)
done = torch.FloatTensor(samples["done"].reshape(-1, 1)).to(device)
new_action, log_prob = self.actor(state)
alpha_loss = ( -self.log_alpha.exp() * (log_prob + self.target_alpha).detach()).mean()
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
alpha = self.log_alpha.exp()
mask = 1 - done
q1_pred = self.qf1(state, action)
q2_pred = self.qf2(state, action)
vf_target = self.vf_target(next_state)
q_target = reward + GAMMMA * vf_target * mask
qf1_loss = F.mse_loss(q_target.detach(), q1_pred)
qf2_loss = F.mse_loss(q_target.detach(), q2_pred)
v_pred = self.vf(state)
q_pred = torch.min(
self.qf1(state, new_action), self.qf2(state, new_action)
)
v_target = q_pred - alpha * log_prob
v_loss = F.mse_loss(v_pred, v_target.detach())
if self.total_step % POLICY_UPDATE_FREQUENCE== 0:
advantage = q_pred - v_pred.detach()
actor_loss = (alpha * log_prob - advantage).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
self._target_soft_update()
else:
actor_loss = torch.zeros(1)
self.qf1_optimizer.zero_grad()
qf1_loss.backward()
self.qf1_optimizer.step()
self.qf2_optimizer.zero_grad()
qf2_loss.backward()
self.qf2_optimizer.step()
qf_loss = qf1_loss + qf2_loss
self.vf_optimizer.zero_grad()
v_loss.backward()
self.vf_optimizer.step()
return actor_loss.data, qf_loss.data, v_loss.data, alpha_loss.data
def save_models(self):
torch.save(self.actor.state_dict(), "results/mactor.pt")
torch.save(self.qf1.state_dict(), "results/mqf1.pt")
torch.save(self.qf2.state_dict(), "results/mqf2.pt")
torch.save(self.vf.state_dict(), "results/mvf.pt")
def load_models(self):
# The models were trained on a CUDA device
# If you are running on a CPU-only machine, use torch.load with map_location=torch.device('cpu') to map your storages to the CPU.
self.actor.load_state_dict(torch.load("results/mactor.pt", map_location=torch.device('cpu')))
self.qf1.load_state_dict(torch.load("results/mqf1.pt", map_location=torch.device('cpu')))
self.qf2.load_state_dict(torch.load("results/mqf2.pt", map_location=torch.device('cpu')))
self.vf.load_state_dict(torch.load("results/mvf.pt", map_location=torch.device('cpu')))
def _target_soft_update(self):
for t_param, l_param in zip(
self.vf_target.parameters(), self.vf.parameters()
):
t_param.data.copy_(TAU * l_param.data + (1.0 - TAU) * t_param.data)
def normalizeState(self, s_t, max_speed, min_speed):
# distance to closest #NUMBER_INTRUDERS_STATE intruders
for i in range(0, NUMBER_INTRUDERS_STATE):
s_t[i] = (s_t[i]-MEANS[0])/(STDS[0]*2)
# relative bearing to closest #NUMBER_INTRUDERS_STATE intruders
for i in range(NUMBER_INTRUDERS_STATE, NUMBER_INTRUDERS_STATE*2):
s_t[i] = (s_t[i]-MEANS[1])/(STDS[1]*2)
for i in range(NUMBER_INTRUDERS_STATE*2, NUMBER_INTRUDERS_STATE*3):
s_t[i] = (s_t[i]-MEANS[2])/(STDS[2]*2)
for i in range(NUMBER_INTRUDERS_STATE*3, NUMBER_INTRUDERS_STATE*4):
s_t[i] = (s_t[i]-MEANS[3])/(STDS[3]*2)
for i in range(NUMBER_INTRUDERS_STATE*4, NUMBER_INTRUDERS_STATE*5):
s_t[i] = (s_t[i])/(3.1415)
# current bearing
# current speed
s_t[NUMBER_INTRUDERS_STATE*5] = ((s_t[NUMBER_INTRUDERS_STATE*5]-min_speed)/(max_speed-min_speed))*2 - 1
# optimal speed
s_t[NUMBER_INTRUDERS_STATE*5 + 1] = ((s_t[NUMBER_INTRUDERS_STATE*5 + 1]-min_speed)/(max_speed-min_speed))*2 - 1
# # distance to target
# s_t[NUMBER_INTRUDERS_STATE*2 + 2] = s_t[NUMBER_INTRUDERS_STATE*2 + 2]/MAX_DISTANCE
# # bearing to target
s_t[NUMBER_INTRUDERS_STATE*5+2] = s_t[NUMBER_INTRUDERS_STATE*5+2]
s_t[NUMBER_INTRUDERS_STATE*5+3] = s_t[NUMBER_INTRUDERS_STATE*5+3]
# s_t[0] = s_t[0]/MAX_BEARING
return s_t | [
"numpy.clip",
"torch.optim.Adam",
"torch.cuda.get_device_name",
"atcenv.MASAC.mactor_critic.Actor",
"numpy.prod",
"atcenv.MASAC.buffer.ReplayBuffer",
"torch.FloatTensor",
"numpy.array",
"atcenv.MASAC.mactor_critic.CriticV",
"torch.cuda.is_available",
"atcenv.MASAC.mactor_critic.CriticQ",
"torc... | [((700, 760), 'atcenv.MASAC.buffer.ReplayBuffer', 'ReplayBuffer', (['STATE_DIM', 'ACTION_DIM', 'BUFFER_SIZE', 'BATCH_SIZE'], {}), '(STATE_DIM, ACTION_DIM, BUFFER_SIZE, BATCH_SIZE)\n', (712, 760), False, 'from atcenv.MASAC.buffer import ReplayBuffer\n'), ((1207, 1261), 'torch.zeros', 'torch.zeros', (['(1)'], {'requires_grad': '(True)', 'device': 'self.device'}), '(1, requires_grad=True, device=self.device)\n', (1218, 1261), False, 'import torch\n'), ((1293, 1332), 'torch.optim.Adam', 'optim.Adam', (['[self.log_alpha]'], {'lr': '(0.0003)'}), '([self.log_alpha], lr=0.0003)\n', (1303, 1332), True, 'import torch.optim as optim\n'), ((2645, 2670), 'numpy.array', 'np.array', (['selected_action'], {}), '(selected_action)\n', (2653, 2670), True, 'import numpy as np\n'), ((2701, 2732), 'numpy.clip', 'np.clip', (['selected_action', '(-1)', '(1)'], {}), '(selected_action, -1, 1)\n', (2708, 2732), True, 'import numpy as np\n'), ((4947, 4961), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (4958, 4961), False, 'import torch\n'), ((943, 972), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['(0)'], {}), '(0)\n', (969, 972), False, 'import torch\n'), ((1056, 1075), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1068, 1075), False, 'import torch\n'), ((1353, 1381), 'atcenv.MASAC.mactor_critic.Actor', 'Actor', (['STATE_DIM', 'ACTION_DIM'], {}), '(STATE_DIM, ACTION_DIM)\n', (1358, 1381), False, 'from atcenv.MASAC.mactor_critic import Actor, CriticQ, CriticV\n'), ((1417, 1435), 'atcenv.MASAC.mactor_critic.CriticV', 'CriticV', (['STATE_DIM'], {}), '(STATE_DIM)\n', (1424, 1435), False, 'from atcenv.MASAC.mactor_critic import Actor, CriticQ, CriticV\n'), ((1477, 1495), 'atcenv.MASAC.mactor_critic.CriticV', 'CriticV', (['STATE_DIM'], {}), '(STATE_DIM)\n', (1484, 1495), False, 'from atcenv.MASAC.mactor_critic import Actor, CriticQ, CriticV\n'), ((1593, 1624), 'atcenv.MASAC.mactor_critic.CriticQ', 'CriticQ', (['(STATE_DIM + ACTION_DIM)'], {}), '(STATE_DIM + ACTION_DIM)\n', (1600, 1624), False, 'from atcenv.MASAC.mactor_critic import Actor, CriticQ, CriticV\n'), ((1660, 1691), 'atcenv.MASAC.mactor_critic.CriticQ', 'CriticQ', (['(STATE_DIM + ACTION_DIM)'], {}), '(STATE_DIM + ACTION_DIM)\n', (1667, 1691), False, 'from atcenv.MASAC.mactor_critic import Actor, CriticQ, CriticV\n'), ((3364, 3397), 'torch.FloatTensor', 'torch.FloatTensor', (["samples['obs']"], {}), "(samples['obs'])\n", (3381, 3397), False, 'import torch\n'), ((3430, 3468), 'torch.FloatTensor', 'torch.FloatTensor', (["samples['next_obs']"], {}), "(samples['next_obs'])\n", (3447, 3468), False, 'import torch\n'), ((822, 847), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (845, 847), False, 'import torch\n'), ((913, 940), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (938, 940), False, 'import torch\n'), ((1152, 1174), 'numpy.prod', 'np.prod', (['(ACTION_DIM,)'], {}), '((ACTION_DIM,))\n', (1159, 1174), True, 'import numpy as np\n'), ((5968, 5987), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5980, 5987), False, 'import torch\n'), ((6066, 6085), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6078, 6085), False, 'import torch\n'), ((6164, 6183), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6176, 6183), False, 'import torch\n'), ((6260, 6279), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6272, 6279), False, 'import torch\n'), ((2497, 2524), 'torch.FloatTensor', 'torch.FloatTensor', (['state[i]'], {}), '(state[i])\n', (2514, 2524), False, 'import torch\n')] |
from matplotlib import pyplot as plt
import numpy as np
def generate_and_save_images(model, epoch, test_input):
# Notice `training` is set to False.
# This is so all layers run in inference mode (batchnorm).
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(10,10))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i+1)
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
plt.axis('off')
plt.savefig('reports/image_at_epoch_{:04d}.png'.format(epoch))
plt.close()
def plot_loss(gl, dl, epoch):
plt.figure(figsize=(16,2))
plt.plot(np.arange(len(gl)),gl,label='Gen_loss')
plt.plot(np.arange(len(dl)),dl,label='Disc_loss')
plt.legend()
plt.title('Epoch '+str(epoch)+' Loss')
ymax = plt.ylim()[1]
plt.savefig('reports/loss_{:04d}.png'.format(epoch))
plt.close()
def plot_all_time_lostt(all_gl, all_dl):
plt.figure(figsize=(16,2))
plt.plot(np.arange(len(all_gl)),all_gl,label='Gen_loss')
plt.plot(np.arange(len(all_dl)),all_dl,label='Disc_loss')
plt.legend()
plt.ylim((0,np.min([1.1*np.max(all_gl),2*ymax])))
plt.title('All Time Loss')
plt.close()
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.close",
"numpy.max",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.legend"
] | [((276, 304), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (286, 304), True, 'from matplotlib import pyplot as plt\n'), ((547, 558), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (556, 558), True, 'from matplotlib import pyplot as plt\n'), ((594, 621), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 2)'}), '(figsize=(16, 2))\n', (604, 621), True, 'from matplotlib import pyplot as plt\n'), ((732, 744), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (742, 744), True, 'from matplotlib import pyplot as plt\n'), ((874, 885), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (883, 885), True, 'from matplotlib import pyplot as plt\n'), ((933, 960), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 2)'}), '(figsize=(16, 2))\n', (943, 960), True, 'from matplotlib import pyplot as plt\n'), ((1087, 1099), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1097, 1099), True, 'from matplotlib import pyplot as plt\n'), ((1158, 1184), 'matplotlib.pyplot.title', 'plt.title', (['"""All Time Loss"""'], {}), "('All Time Loss')\n", (1167, 1184), True, 'from matplotlib import pyplot as plt\n'), ((1189, 1200), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1198, 1200), True, 'from matplotlib import pyplot as plt\n'), ((355, 379), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(4)', '(i + 1)'], {}), '(4, 4, i + 1)\n', (366, 379), True, 'from matplotlib import pyplot as plt\n'), ((386, 450), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(predictions[i, :, :, 0] * 127.5 + 127.5)'], {'cmap': '"""gray"""'}), "(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')\n", (396, 450), True, 'from matplotlib import pyplot as plt\n'), ((459, 474), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (467, 474), True, 'from matplotlib import pyplot as plt\n'), ((799, 809), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (807, 809), True, 'from matplotlib import pyplot as plt\n'), ((1128, 1142), 'numpy.max', 'np.max', (['all_gl'], {}), '(all_gl)\n', (1134, 1142), True, 'import numpy as np\n')] |
import numpy as np
import scipy.io as scio
import scipy.sparse as scsp
import h5py as hp
from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, \
load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset
from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss
from models.DeepCCAModels import DeepCCA, cca_loss, gcca, cca
from models.DBN import mv_DBN
from torch.utils.data import DataLoader
from torch.nn import MSELoss, CrossEntropyLoss
from torch import optim
from tensorboardX import SummaryWriter
from tqdm import tqdm
import torch
import random
from img_config import epochs_set, layer_size_set, \
batch_size_set_mvae, lr_set_mvae, wd_set_mvae, \
batch_size_set_dcca, lr_set_dcca, wd_set_dcca, alpha_set_dcca, \
batch_size_set_dsvdd, lr_set_dsvdd, wd_set_dsvdd, \
batch_size_set_fused, lr_set_fused, wd_set_fused, pt_epochs_set, \
batch_size_set_split, lr_set_split, wd_set_split, \
batch_size_set_tf, lr_set_tf, wd_set_tf, r_set_tf, \
batch_size_set_corr, lr_set_corr, wd_set_corr, alpha_set_corr, \
batch_size_set_sim, lr_set_sim, wd_set_sim, alpha_set_sim, m_set_sim, \
batch_size_set_dgcca, lr_set_dgcca, wd_set_dgcca, alpha_set_dgcca, \
batch_size_set_dbn, lr_set_dbn, wd_set_dbn
import os
from transformations import get_rp_num, trans_mv_data_new
def get_radius(dist: torch.Tensor, nu: float):
"""Optimally solve for radius R via the (1-nu)-quantile of distances."""
return np.quantile(np.sqrt(dist.clone().data.cpu().numpy()), 1 - nu)
# Baselines:
def simple_mvae(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path, device, qualified_Y, ADdataset_mode='minority'):
torch.set_default_tensor_type(torch.DoubleTensor)
roc_results = [[] for i in range(repeat_times)]
pr_anom_results = [[] for i in range(repeat_times)]
pr_norm_results = [[] for i in range(repeat_times)]
tnr_results = [[] for i in range(repeat_times)]
single_best_roc_results = [[] for i in range(repeat_times)]
single_best_pr_anom_results = [[] for i in range(repeat_times)]
single_best_pr_norm_results = [[] for i in range(repeat_times)]
single_best_tnr_results = [[] for i in range(repeat_times)]
# training config
layer_size = layer_size_set[dataset_name]
batch_size = batch_size_set_mvae[dataset_name]
lr = lr_set_mvae[dataset_name]
wd = wd_set_mvae[dataset_name]
print('layer_size: {}, batch size: {}, lr: {}, wd:{}'.format(layer_size, batch_size, lr, wd))
loss_func = MSELoss()
input_size_set = [x.shape[0] for x in X]
for t in range(repeat_times):
print('The {}-th run begins!'.format(t))
for i in range(min([10, len(qualified_Y)])):
partition, Y_test = build_img_dataset(dataset_name, Y, neg_class_id=qualified_Y[i], mode=ADdataset_mode)
X_train, X_test = process_ad_dataset(X, partition)
# -----------------------------------------model training
seed = 0
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
model = mvae_ad(input_size_set=input_size_set, layer_sizes=layer_size).to(device).double()
trainloader = DataLoader(dataset=mv_dataset(X_train), batch_size=batch_size, shuffle=True, num_workers=1, collate_fn=mv_tabular_collate)
# specific training procedure
epochs = epochs_set[dataset_name]
print(max(epochs))
losses_set = [AverageMeter() for i in range(len(X_train))]
optimizer = [optim.Adam(model.ae_set[i].parameters(), lr=lr, weight_decay=wd) for i in range(len(X_train))]
# writer = SummaryWriter()
model.train()
cout = 0
for epoch in range(max(epochs)):
trainloader = tqdm(trainloader)
if ADdataset_mode is 'majority':
trainloader.set_description('mvae, epoch{}, abnormal class{}'.format(epoch, qualified_Y[i]))
else:
trainloader.set_description('mvae, epoch{}, normal class{}'.format(epoch, qualified_Y[i]))
for idx, batch in enumerate(trainloader):
postfix = ''
if batch[0].size(0) > 1: # for bn, batchsize > 1
batch = [batch[i].to(device) for i in range(len(batch))]
outputs_set = model(batch)
for view in range(len(X_train)):
if epoch < epochs[view]:
y = batch[view]
loss = loss_func(outputs_set[view], y)
optimizer[view].zero_grad()
loss.backward()
optimizer[view].step()
losses_set[view].update(loss.item(), batch[0].size(0))
cur_postfix = ' view{}, rec_loss: {:.4f} '.format(view, losses_set[view].avg)
# writer.add_scalar('runs/{}_loss_clasId_{}_view_{}'.format(dataset_name, y_set[i], view), loss.item(), cout)
postfix += cur_postfix
cout += 1
trainloader.set_postfix(log=postfix)
# writer.close()
# del writer
# # ----------------------------------------model testing
testloader = DataLoader(dataset=mv_dataset(X_test), batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=mv_tabular_collate)
scores = [[] for ss in range(len(X_test))]
with torch.no_grad():
model.eval()
for idx, batch in enumerate(tqdm(testloader)):
batch = [batch[i].to(device) for i in range(len(batch))]
cur_batch_scores = model.get_ad_scores(batch)
for ss in range(len(X_test)):
scores[ss].append(cur_batch_scores[ss])
scores = [torch.cat(scores[ss], dim=0) for ss in range(len(X_test))]
# To find a single-best model
scores_set = torch.cat(scores, dim=-1)
single_best_roc = 0
single_best_pr_norm = 0
single_best_pr_anom = 0
for i in range(scores_set.shape[-1]):
cur_view_scores = scores_set[:, i].cpu().detach().numpy()
cur_roc, cur_pr_anom, cur_pr_norm, cur_tnr = save_roc_pr_curve_data(scores=cur_view_scores, labels=Y_test, file_path=None, verbose=False)
if cur_roc > single_best_roc:
single_best_roc = cur_roc
single_best_pr_anom = cur_pr_anom
single_best_pr_norm = cur_pr_norm
single_best_tnr = cur_tnr
single_best_roc_results[t].append(single_best_roc)
single_best_pr_norm_results[t].append(single_best_pr_norm)
single_best_pr_anom_results[t].append(single_best_pr_anom)
single_best_tnr_results[t].append(single_best_tnr)
scores = late_fusion(scores, merge='avg')
# ----------------------------------------model eval
roc, pr_anom, pr_norm, tnr = save_roc_pr_curve_data(scores=scores, labels=Y_test, file_path=None, verbose=False)
roc_results[t].append(roc)
pr_anom_results[t].append(pr_anom)
pr_norm_results[t].append(pr_norm)
tnr_results[t].append(tnr)
del model
# save the results
file_name = results_path + dataset_name + '_mvae'
np.savez(file=file_name, ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(file_name)
file_name = results_path + dataset_name + '_mvae_single_best'
np.savez(file=file_name, ROC=single_best_roc_results, PR_norm=single_best_pr_norm_results, PR_anom=single_best_pr_anom_results, tnr=single_best_tnr_results)
load_print_results(file_name)
print('dataset: {}, layer_size: {}, batch size: {}, lr: {}, wd:{}, eppchs: {}'.format(dataset_name, layer_size, batch_size, lr, wd, max(epochs)))
def deepCCA(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path, device, qualified_Y, ADdataset_mode='minority'):
torch.set_default_tensor_type(torch.DoubleTensor)
roc_results = [[] for i in range(repeat_times)]
pr_anom_results = [[] for i in range(repeat_times)]
pr_norm_results = [[] for i in range(repeat_times)]
tnr_results = [[] for i in range(repeat_times)]
# training config
batch_size = batch_size_set_dcca[dataset_name]
layer_size = layer_size_set[dataset_name]
lr = lr_set_dcca[dataset_name]
wd = wd_set_dcca[dataset_name]
loss_func = MSELoss()
cca_loss_func = cca(outdim_size=layer_size[-1])
input_size_set = [x.shape[0] for x in X]
if results_path in ['./results/']:
alpha_set = [alpha_set_dcca[dataset_name]]
else:
alpha_set = [0.01, 0.1, 0.5, 0.9, 0.99]
for alpha in alpha_set:
for t in range(repeat_times):
print('The {}-th run begins!'.format(t))
for i in range(min([10, len(qualified_Y)])):
partition, Y_test = build_img_dataset(dataset_name, Y, neg_class_id=qualified_Y[i], mode=ADdataset_mode)
X_train, X_test = process_ad_dataset(X, partition)
# -----------------------------------------model training
seed = 0
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
model = mv_corrAE(input_size_set=input_size_set, layer_sizes=layer_size).to(device).double()
trainloader = DataLoader(dataset=mv_dataset(X_train), batch_size=batch_size, shuffle=True, num_workers=1, collate_fn=mv_tabular_collate)
# specific training procedure
epochs = epochs_set[dataset_name]
cca_losses = AverageMeter()
rec_losses = AverageMeter()
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wd)
model.train()
cout = 0
for epoch in range(max(epochs)):
trainloader = tqdm(trainloader)
if ADdataset_mode is 'majority':
trainloader.set_description('dcca, epoch{}, abnormal class{}'.format(epoch, qualified_Y[i]))
else:
trainloader.set_description('dcca, epoch{}, normal class{}'.format(epoch, qualified_Y[i]))
for idx, batch in enumerate(trainloader):
try:
if batch[0].size(0) > 1: # for bn, batchsize > 1
batch = [batch[i].to(device) for i in range(len(batch))]
latent_set, outputs_set = model(batch)
recon_loss = torch.zeros(1).to(device)
cca_loss_ = torch.zeros(1).to(device)
for view in range(len(X_train)):
if epoch < epochs[view]:
y = batch[view]
loss = loss_func(outputs_set[view], y)
recon_loss += loss
for v_idx in range(view + 1, len(X_train)):
try:
cur_cca_loss = cca_loss_func.loss(latent_set[view], latent_set[v_idx])
except:
cur_cca_loss = torch.zeros(1).to(device)
cca_loss_ += cur_cca_loss
rec_losses.update(recon_loss.item(), batch[0].size(0))
cca_losses.update(cca_loss_.item(), batch[0].size(0))
tot_loss = (1 - alpha) * recon_loss + alpha * cca_loss_
postfix = ' rec_loss: {:.4f}, cca_loss: {:.4f} '.format(rec_losses.avg, cca_losses.avg)
optimizer.zero_grad()
tot_loss.backward()
filter_nan_grad(optimizer) # if grad contains grad, the batch is not used to update the parameters
optimizer.step()
cout += 1
trainloader.set_postfix(log=postfix)
except:
print('The error idx is {}'.format(idx))
# # ----------------------------------------model testing
testloader = DataLoader(dataset=mv_dataset(X_test), batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=mv_tabular_collate)
with torch.no_grad():
model.eval()
scores = [[] for ss in range(len(X_test))]
for idx, batch in enumerate(tqdm(testloader)):
batch = [batch[i].to(device) for i in range(len(batch))]
cur_batch_scores = model.get_ad_scores(batch)
for ss in range(len(X_test)):
scores[ss].append(cur_batch_scores[ss])
scores = [torch.cat(scores[ss], dim=0) for ss in range(len(X_test))]
scores = late_fusion(scores, merge='avg')
# ----------------------------------------model eval
roc, pr_anom, pr_norm, tnr = save_roc_pr_curve_data(scores=scores, labels=Y_test, file_path=None, verbose=False)
roc_results[t].append(roc)
pr_anom_results[t].append(pr_anom)
pr_norm_results[t].append(pr_norm)
tnr_results[t].append(tnr)
del model
# save the results
if results_path in ['./results/']:
np.savez(file=results_path + dataset_name + '_dcca', ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(results_path + dataset_name + '_dcca')
else:
np.savez(file=results_path + dataset_name + '_dcca_alpha_{}'.format(alpha), ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(results_path + dataset_name + '_dcca_alpha_{}'.format(alpha))
def dgcca(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path, device, qualified_Y, ADdataset_mode='minority'):
torch.set_default_tensor_type(torch.DoubleTensor)
roc_results = [[] for i in range(repeat_times)]
pr_anom_results = [[] for i in range(repeat_times)]
pr_norm_results = [[] for i in range(repeat_times)]
tnr_results = [[] for i in range(repeat_times)]
# training config
batch_size = batch_size_set_dgcca[dataset_name]
lr = lr_set_dgcca[dataset_name]
wd = wd_set_dgcca[dataset_name]
layer_size = layer_size_set[dataset_name]
loss_func = MSELoss()
gcca_loss_func = gcca(outdim_size=layer_size[-1])
input_size_set = [x.shape[0] for x in X]
if results_path in ['./results/']:
alpha_set = [alpha_set_dgcca[dataset_name]]
else:
alpha_set = [0.01, 0.1, 0.5, 0.9, 0.99]
for alpha in alpha_set:
for t in range(repeat_times):
print('The {}-th run begins!'.format(t))
for i in range(min([10, len(qualified_Y)])):
partition, Y_test = build_img_dataset(dataset_name, Y, neg_class_id=qualified_Y[i], mode=ADdataset_mode)
X_train, X_test = process_ad_dataset(X, partition)
# -----------------------------------------model training
seed = 0
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
model = mv_corrAE(input_size_set=input_size_set, layer_sizes=layer_size).to(device).double()
trainloader = DataLoader(dataset=mv_dataset(X_train), batch_size=batch_size, shuffle=True, num_workers=1, collate_fn=mv_tabular_collate)
# specific training procedure
epochs = epochs_set[dataset_name]
gcca_losses = AverageMeter()
rec_losses = AverageMeter()
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wd)
model.train()
cout = 0
for epoch in range(max(epochs)):
trainloader = tqdm(trainloader)
if ADdataset_mode is 'majority':
trainloader.set_description('dgcca, epoch{}, abnormal class{}'.format(epoch, qualified_Y[i]))
else:
trainloader.set_description('dgcca, epoch{}, normal class{}'.format(epoch, qualified_Y[i]))
for idx, batch in enumerate(trainloader):
# try:
if batch[0].size(0) > 1: # for bn, batchsize > 1
batch = [batch[i].to(device) for i in range(len(batch))]
latent_set, outputs_set = model(batch)
recon_loss = torch.zeros(1).to(device)
try:
gcca_loss_ = gcca_loss_func.loss(latent_set)
except:
gcca_loss_ = torch.zeros(1).to(device)
for view in range(len(X_train)):
if epoch < epochs[view]:
y = batch[view]
loss = loss_func(outputs_set[view], y)
recon_loss += loss
rec_losses.update(recon_loss.item(), batch[0].size(0))
gcca_losses.update(gcca_loss_.item(), batch[0].size(0))
tot_loss = (1 - alpha) * recon_loss + alpha * gcca_loss_
postfix = ' rec_loss: {:.4f}, gcca_loss: {:.4f} '.format(rec_losses.avg, gcca_losses.avg)
optimizer.zero_grad()
tot_loss.backward()
filter_nan_grad(optimizer) # if grad contains grad, the batch is not used to update the parameters
optimizer.step()
cout += 1
trainloader.set_postfix(log=postfix)
# # ----------------------------------------model testing
testloader = DataLoader(dataset=mv_dataset(X_test), batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=mv_tabular_collate)
with torch.no_grad():
model.eval()
scores = [[] for ss in range(len(X_test))]
for idx, batch in enumerate(tqdm(testloader)):
batch = [batch[i].to(device) for i in range(len(batch))]
cur_batch_scores = model.get_ad_scores(batch)
for ss in range(len(X_test)):
scores[ss].append(cur_batch_scores[ss])
scores = [torch.cat(scores[ss], dim=0) for ss in range(len(X_test))]
scores = late_fusion(scores, merge='avg')
# ----------------------------------------model eval
roc, pr_anom, pr_norm, tnr = save_roc_pr_curve_data(scores=scores, labels=Y_test, file_path=None, verbose=False)
roc_results[t].append(roc)
pr_anom_results[t].append(pr_anom)
pr_norm_results[t].append(pr_norm)
tnr_results[t].append(tnr)
del model
# save the results
if results_path in ['./results/']:
np.savez(file=results_path + dataset_name + '_dgcca', ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(results_path + dataset_name + '_dgcca')
else:
np.savez(file=results_path + dataset_name + '_dgcca_alpha_{}'.format(alpha), ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(results_path + dataset_name + '_dgcca_alpha_{}'.format(alpha))
def simple_mvDSVDD(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path, device, qualified_Y, ADdataset_mode='minority'):
torch.set_default_tensor_type(torch.DoubleTensor)
roc_results = [[] for i in range(repeat_times)]
pr_anom_results = [[] for i in range(repeat_times)]
pr_norm_results = [[] for i in range(repeat_times)]
tnr_results = [[] for i in range(repeat_times)]
# training config
batch_size = batch_size_set_dsvdd[dataset_name]
layer_size = layer_size_set[dataset_name]
lr = lr_set_dsvdd[dataset_name]
wd = wd_set_dsvdd[dataset_name]
loss_func = MSELoss()
input_size_set = [x.shape[0] for x in X]
eps = 1e-10
pretrain = True # indicate whether the DSVDD is pre-trained like AE
mode = 'one_class'
assert mode in ['one_class', 'soft_bound']
if mode is 'soft_bound':
nu = 0.1
warm_epochs = 5
assert warm_epochs <= min(epochs_set[dataset_name])
for t in range(repeat_times):
print('The {}-th run begins!'.format(t))
for i in range(min([10, len(qualified_Y)])):
partition, Y_test = build_img_dataset(dataset_name, Y, neg_class_id=qualified_Y[i], mode=ADdataset_mode)
X_train, X_test = process_ad_dataset(X, partition)
# -----------------------------------------build dataloader under current config.
trainloader = DataLoader(dataset=mv_dataset(X_train), batch_size=batch_size, shuffle=True, num_workers=1, collate_fn=mv_tabular_collate)
testloader = DataLoader(dataset=mv_dataset(X_test), batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=mv_tabular_collate)
seed = 0
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
# -----------------------------------------pre-training procedure
if pretrain:
model = mvae_ad(input_size_set=input_size_set, layer_sizes=layer_size).to(device)
epochs = epochs_set[dataset_name]
losses_set = [AverageMeter() for i in range(len(X_train))]
optimizer = [optim.Adam(model.ae_set[i].parameters(), lr=lr, weight_decay=wd) for i in range(len(X_train))]
model.train()
cout = 0
for epoch in range(max(epochs)):
trainloader = tqdm(trainloader)
if ADdataset_mode is 'majority':
trainloader.set_description('dsvdd, epoch{}, abnormal class{}'.format(epoch, qualified_Y[i]))
else:
trainloader.set_description('dsvdd, epoch{}, normal class{}'.format(epoch, qualified_Y[i]))
for idx, batch in enumerate(trainloader):
postfix = ''
if batch[0].size(0) > 1: # for bn, batchsize > 1
batch = [batch[i].to(device) for i in range(len(batch))]
outputs_set = model(batch)
for view in range(len(X_train)):
if epoch < epochs[view]:
y = batch[view]
loss = loss_func(outputs_set[view], y)
optimizer[view].zero_grad()
loss.backward()
optimizer[view].step()
losses_set[view].update(loss.item(), batch[0].size(0))
cur_postfix = ' view{}, rec_loss: {:.4f} '.format(view, losses_set[view].avg)
postfix += cur_postfix
cout += 1
trainloader.set_postfix(log=postfix)
del trainloader
# # ----------------------------------------set C for multi-view DSVDD
trainloader = DataLoader(dataset=mv_dataset(X_train), batch_size=batch_size, shuffle=True, num_workers=1, collate_fn=mv_tabular_collate)
mvDSVDD = mvenc(input_size_set=input_size_set, layer_sizes=layer_size).to(device)
ae_dict = model.state_dict()
dsvdd_dict = mvDSVDD.state_dict()
ae_dict = {k: v for k, v in ae_dict.items() if k in dsvdd_dict}
dsvdd_dict.update(ae_dict)
mvDSVDD.load_state_dict(dsvdd_dict)
mvDSVDD.eval()
C_set = [[] for ss in range(len(X_train))]
R_set = [torch.zeros(1).to(device) for ss in range(len(X_train))]
with torch.no_grad():
for idx, batch in enumerate(tqdm(trainloader)):
batch = [batch[i].to(device) for i in range(len(batch))]
cur_batch_latent = mvDSVDD(batch)
for ss in range(len(X_train)):
C_set[ss].append(cur_batch_latent[ss].detach())
C_set = [torch.cat(C_set[cc], dim=0) for cc in range(len(X_train))]
for cc in range(len(C_set)):
tmp = torch.mean(C_set[cc], dim=0)
tmp[(abs(tmp) < eps) & (tmp < 0)] = -eps
tmp[(abs(tmp) < eps) & (tmp > 0)] = eps
C_set[cc] = tmp
# -------------------------------------train the DSVDD
epochs = epochs_set[dataset_name]
losses_set = [AverageMeter() for i in range(len(X_train))]
optimizer = [optim.Adam(mvDSVDD.ae_set[i].parameters(), lr=lr, weight_decay=wd) for i in range(len(X_train))]
mvDSVDD.train()
cout = 0
for epoch in range(max(epochs)):
trainloader = tqdm(trainloader)
if ADdataset_mode is 'majority':
trainloader.set_description('dsvdd, epoch{}, abnormal class{}'.format(epoch, qualified_Y[i]))
else:
trainloader.set_description('dsvdd, epoch{}, normal class{}'.format(epoch, qualified_Y[i]))
for idx, batch in enumerate(trainloader):
postfix = ''
if batch[0].size(0) > 1: # for bn, batchsize > 1
batch = [batch[i].to(device) for i in range(len(batch))]
outputs_set = mvDSVDD(batch)
for view in range(len(X_train)):
if epoch < epochs[view]:
cur_c = C_set[view].to(device)
cur_output = outputs_set[view]
dist = torch.sum((cur_output - cur_c) ** 2, dim=1)
if mode is 'soft_bound':
scores = dist - R_set[view] ** 2
loss = R_set[view] ** 2 + (1 / nu) * torch.mean(torch.max(torch.zeros_like(scores), scores))
else:
loss = torch.mean(dist)
optimizer[view].zero_grad()
loss.backward()
optimizer[view].step()
if mode is 'soft_bound' and epoch >= warm_epochs:
R_set[view].data = torch.tensor(get_radius(dist, nu), device=device)
losses_set[view].update(loss.item(), batch[0].size(0))
if mode is 'soft_bound':
cur_postfix = ' view{}, dd_loss: {:.4f} R: {:.4f} '.format(view, losses_set[view].avg, R_set[view].item())
else:
cur_postfix = ' view{}, dd_loss: {:.4f} '.format(view, losses_set[view].avg)
postfix += cur_postfix
cout += 1
trainloader.set_postfix(log=postfix)
# ----------------------------------------model inferrence
mvDSVDD.eval()
scores_set = [[] for ss in range(len(X_test))]
with torch.no_grad():
for idx, batch in enumerate(tqdm(testloader)):
batch = [batch[i].to(device) for i in range(len(batch))]
cur_batch_outputs = mvDSVDD(batch)
for view in range(len(X_train)):
scores = -1. * torch.sum((cur_batch_outputs[view] - C_set[view]) ** 2, dim=1, keepdim=True)
scores_set[view].append(scores.detach())
scores_set = [torch.cat(scores_set[ss], dim=0) for ss in range(len(X_train))]
scores = late_fusion(scores_set, merge='avg')
# ----------------------------------------model eval
roc, pr_anom, pr_norm, tnr = save_roc_pr_curve_data(scores=scores, labels=Y_test, file_path=None, verbose=False)
roc_results[t].append(roc)
pr_anom_results[t].append(pr_anom)
pr_norm_results[t].append(pr_norm)
tnr_results[t].append(tnr)
del model
# save the results
np.savez(file=results_path + dataset_name + '_dsvdd', ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(results_path + dataset_name + '_dsvdd')
def mv_corrae(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path, device, qualified_Y, ADdataset_mode='minority'):
torch.set_default_tensor_type(torch.DoubleTensor)
roc_results = [[] for i in range(repeat_times)]
pr_anom_results = [[] for i in range(repeat_times)]
pr_norm_results = [[] for i in range(repeat_times)]
tnr_results = [[] for i in range(repeat_times)]
# training config
batch_size = batch_size_set_corr[dataset_name]
lr = lr_set_corr[dataset_name]
wd = wd_set_corr[dataset_name]
layer_size = layer_size_set[dataset_name]
loss_func = MSELoss()
input_size_set = [x.shape[0] for x in X]
if results_path in ['./results/']:
alpha_set = [alpha_set_corr[dataset_name]]
else:
alpha_set = [0.01, 0.1, 0.5, 0.9, 0.99]
for alpha in alpha_set:
for t in range(repeat_times):
print('The {}-th run begins!'.format(t))
for i in range(min([10, len(qualified_Y)])):
partition, Y_test = build_img_dataset(dataset_name, Y, neg_class_id=qualified_Y[i], mode=ADdataset_mode)
X_train, X_test = process_ad_dataset(X, partition)
# -----------------------------------------model training
seed = 0
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
model = mv_corrAE(input_size_set=input_size_set, layer_sizes=layer_size).to(device).double()
trainloader = DataLoader(dataset=mv_dataset(X_train), batch_size=batch_size, shuffle=True, num_workers=1, collate_fn=mv_tabular_collate)
# specific training procedure
epochs = epochs_set[dataset_name]
rec_losses = AverageMeter()
corr_losses = AverageMeter()
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wd)
model.train()
cout = 0
for epoch in range(max(epochs)):
trainloader = tqdm(trainloader)
if ADdataset_mode is 'majority':
trainloader.set_description('corr, epoch{}, abnormal class{}'.format(epoch, qualified_Y[i]))
else:
trainloader.set_description('corr, epoch{}, normal class{}'.format(epoch, qualified_Y[i]))
for idx, batch in enumerate(trainloader):
if batch[0].size(0) > 1: # for bn, batchsize > 1
batch = [batch[i].to(device) for i in range(len(batch))]
latent_set, outputs_set = model(batch)
recon_loss = torch.zeros(1).to(device)
corr_loss = torch.zeros(1).to(device)
for view in range(len(X_train)):
if epoch < epochs[view]:
y = batch[view]
loss = loss_func(outputs_set[view], y)
recon_loss += loss
for v_idx in range(view + 1, len(X_train)):
cur_corr_loss = loss_func(latent_set[view], latent_set[v_idx])
corr_loss += cur_corr_loss
rec_losses.update(recon_loss.item(), batch[0].size(0))
corr_losses.update(corr_loss.item(), batch[0].size(0))
postfix = ' rec_loss: {:.4f}, corr_loss: {:.4f} '.format(rec_losses.avg, corr_losses.avg)
tot_loss = (1 - alpha) * recon_loss + alpha * corr_loss
optimizer.zero_grad()
tot_loss.backward()
optimizer.step()
cout += 1
trainloader.set_postfix(log=postfix)
# # ----------------------------------------model testing
testloader = DataLoader(dataset=mv_dataset(X_test), batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=mv_tabular_collate)
with torch.no_grad():
model.eval()
scores = [[] for ss in range(len(X_test))]
for idx, batch in enumerate(tqdm(testloader)):
batch = [batch[i].to(device) for i in range(len(batch))]
cur_batch_scores = model.get_ad_scores(batch)
for ss in range(len(X_test)):
scores[ss].append(cur_batch_scores[ss])
scores = [torch.cat(scores[ss], dim=0) for ss in range(len(X_test))]
scores = late_fusion(scores, merge='avg')
# ----------------------------------------model eval
roc, pr_anom, pr_norm, tnr = save_roc_pr_curve_data(scores=scores, labels=Y_test, file_path=None, verbose=False)
roc_results[t].append(roc)
pr_anom_results[t].append(pr_anom)
pr_norm_results[t].append(pr_norm)
tnr_results[t].append(tnr)
del model
# save the results
if results_path in ['./results/']:
np.savez(file=results_path + dataset_name + '_corrAE', ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(results_path + dataset_name + '_corrAE')
else:
np.savez(file=results_path + dataset_name + '_corrAE_alpha_{}'.format(alpha), ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(results_path + dataset_name + '_corrAE_alpha_{}'.format(alpha))
def mv_sim(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path, device, qualified_Y, ADdataset_mode='minority'):
torch.set_default_tensor_type(torch.DoubleTensor)
roc_results = [[] for i in range(repeat_times)]
pr_anom_results = [[] for i in range(repeat_times)]
pr_norm_results = [[] for i in range(repeat_times)]
tnr_results = [[] for i in range(repeat_times)]
# training config
batch_size = batch_size_set_sim[dataset_name]
lr = lr_set_sim[dataset_name]
wd = wd_set_sim[dataset_name]
layer_size = layer_size_set[dataset_name]
loss_func = MSELoss()
input_size_set = [x.shape[0] for x in X]
if results_path in ['./results/']:
alpha_set = [alpha_set_sim[dataset_name]]
else:
alpha_set = [0.01, 0.1, 0.5, 0.9, 0.99]
if results_path in ['./results/']:
m_set = [m_set_sim[dataset_name]]
else:
# m_set = [0, 1, 3, 5, 7]
m_set = [0]
for m in m_set:
for alpha in alpha_set:
for t in range(repeat_times):
print('The {}-th run begins!'.format(t))
for i in range(min([10, len(qualified_Y)])):
partition, Y_test = build_img_dataset(dataset_name, Y, neg_class_id=qualified_Y[i], mode=ADdataset_mode)
X_train, X_test = process_ad_dataset(X, partition)
# -----------------------------------------model training
seed = 0
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
model = mv_corrAE(input_size_set=input_size_set, layer_sizes=layer_size).to(device).double()
trainloader = DataLoader(dataset=mv_dataset(X_train), batch_size=batch_size, shuffle=True, num_workers=1, collate_fn=mv_tabular_collate)
# specific training procedure
epochs = epochs_set[dataset_name]
rec_losses = AverageMeter()
sim_losses = AverageMeter()
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wd)
model.train()
cout = 0
for epoch in range(max(epochs)):
trainloader = tqdm(trainloader)
if ADdataset_mode is 'majority':
trainloader.set_description('sim, epoch{}, abnormal class{}'.format(epoch, qualified_Y[i]))
else:
trainloader.set_description('sim, epoch{}, normal class{}'.format(epoch, qualified_Y[i]))
for idx, batch in enumerate(trainloader):
if batch[0].size(0) > 1: # for bn, batchsize > 1
batch = [batch[i].to(device) for i in range(len(batch))]
latent_set, outputs_set = model(batch)
recon_loss = torch.zeros(1).to(device)
sim_loss = torch.zeros(1).to(device)
for view in range(len(X_train)):
if epoch < epochs[view]:
y = batch[view]
loss = loss_func(outputs_set[view], y)
recon_loss += loss
for v_idx in range(view + 1, len(X_train)):
cur_sim_loss = similarity_hinge_loss(latent_set[view], latent_set[v_idx], m)
sim_loss += cur_sim_loss
rec_losses.update(recon_loss.item(), batch[0].size(0))
sim_losses.update(sim_loss.item(), batch[0].size(0))
postfix = ' rec_loss: {:.4f}, sim_loss: {:.4f} '.format(rec_losses.avg, sim_losses.avg)
tot_loss = (1 - alpha) * recon_loss + alpha * sim_loss
optimizer.zero_grad()
tot_loss.backward()
optimizer.step()
cout += 1
trainloader.set_postfix(log=postfix)
# # ----------------------------------------model testing
testloader = DataLoader(dataset=mv_dataset(X_test), batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=mv_tabular_collate)
with torch.no_grad():
model.eval()
scores = [[] for ss in range(len(X_test))]
for idx, batch in enumerate(tqdm(testloader)):
batch = [batch[i].to(device) for i in range(len(batch))]
cur_batch_scores = model.get_ad_scores(batch)
for ss in range(len(X_test)):
scores[ss].append(cur_batch_scores[ss])
scores = [torch.cat(scores[ss], dim=0) for ss in range(len(X_test))]
scores = late_fusion(scores, merge='avg')
# ----------------------------------------model eval
roc, pr_anom, pr_norm, tnr = save_roc_pr_curve_data(scores=scores, labels=Y_test, file_path=None, verbose=False)
roc_results[t].append(roc)
pr_anom_results[t].append(pr_anom)
pr_norm_results[t].append(pr_norm)
tnr_results[t].append(tnr)
del model
# save the results
if results_path in ['./results/']:
np.savez(file=results_path + dataset_name + '_sim', ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(results_path + dataset_name + '_sim')
else:
np.savez(file=results_path + dataset_name + '_sim_alpha_{}_m_{}'.format(alpha, m), ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(results_path + dataset_name + '_sim_alpha_{}_m_{}'.format(alpha, m))
def mvae_fuse_latent(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path, device, qualified_Y, ADdataset_mode='minority'):
torch.set_default_tensor_type(torch.DoubleTensor)
roc_results = [[] for i in range(repeat_times)]
pr_anom_results = [[] for i in range(repeat_times)]
pr_norm_results = [[] for i in range(repeat_times)]
tnr_results = [[] for i in range(repeat_times)]
# training config
batch_size = batch_size_set_fused[dataset_name]
layer_size = layer_size_set[dataset_name]
lr = lr_set_fused[dataset_name]
wd = wd_set_fused[dataset_name]
fuse_dim = layer_size[-1]
loss_func = MSELoss()
input_size_set = [x.shape[0] for x in X]
for t in range(repeat_times):
print('The {}-th run begins!'.format(t))
for i in range(min([10, len(qualified_Y)])):
partition, Y_test = build_img_dataset(dataset_name, Y, neg_class_id=qualified_Y[i], mode=ADdataset_mode)
X_train, X_test = process_ad_dataset(X, partition)
# -----------------------------------------model training
seed = 0
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
model = mvae_ad(input_size_set=input_size_set, layer_sizes=layer_size).to(device).double()
trainloader = DataLoader(dataset=mv_dataset(X_train), batch_size=batch_size, shuffle=True, num_workers=1, collate_fn=mv_tabular_collate)
# specific training procedure
epochs = epochs_set[dataset_name]
pt_epochs = pt_epochs_set[dataset_name]
assert max(epochs) > max(pt_epochs)
losses_set = [AverageMeter() for i in range(len(X_train))]
optimizer = [optim.Adam(model.ae_set[i].parameters(), lr=lr, weight_decay=wd) for i in range(len(X_train))]
# -----------------------------------------pretrain the MVAE
model.train()
cout = 0
for epoch in range(max(pt_epochs)):
trainloader = tqdm(trainloader)
if ADdataset_mode is 'majority':
trainloader.set_description('fuse, epoch{}, abnormal class{}'.format(epoch, qualified_Y[i]))
else:
trainloader.set_description('fuse, epoch{}, normal class{}'.format(epoch, qualified_Y[i]))
for idx, batch in enumerate(trainloader):
postfix = ''
if batch[0].size(0) > 1: # for bn, batchsize > 1
batch = [batch[i].to(device) for i in range(len(batch))]
outputs_set = model(batch)
for view in range(len(X_train)):
if epoch < epochs[view]:
y = batch[view]
loss = loss_func(outputs_set[view], y)
optimizer[view].zero_grad()
loss.backward()
optimizer[view].step()
losses_set[view].update(loss.item(), batch[0].size(0))
cur_postfix = ' view{}, rec_loss: {:.4f} '.format(view, losses_set[view].avg)
postfix += cur_postfix
cout += 1
trainloader.set_postfix(log=postfix)
del trainloader
# -------------------------------load pretrained weights to MVAE_fused
fused_model = mvae_fused(input_size_set=input_size_set, layer_sizes=layer_size, fuse_dim=fuse_dim).to(device).double()
ae_dict = model.state_dict()
ae_fused_dict = fused_model.state_dict()
ae_dict = {k: v for k, v in ae_dict.items() if k in ae_fused_dict}
ae_fused_dict.update(ae_dict)
fused_model.load_state_dict(ae_fused_dict)
fused_model.train()
trainloader = DataLoader(dataset=mv_dataset(X_train), batch_size=batch_size, shuffle=True, num_workers=1, collate_fn=mv_tabular_collate)
epochs = epochs_set[dataset_name]
losses = AverageMeter()
optimizer = optim.Adam(fused_model.parameters(), lr=lr, weight_decay=wd)
cout = 0
for epoch in range(max(epochs) - max(pt_epochs)):
trainloader = tqdm(trainloader)
if ADdataset_mode is 'majority':
trainloader.set_description('fuse, Epoch{}, abnormal class{}'.format(epoch, qualified_Y[i]))
else:
trainloader.set_description('fuse, Epoch{}, normal class{}'.format(epoch, qualified_Y[i]))
for idx, batch in enumerate(trainloader):
if batch[0].size(0) > 1: # for bn, batchsize > 1
batch = [batch[i].to(device) for i in range(len(batch))]
outputs_set = fused_model(batch)
loss = torch.zeros(1).to(device)
for ll in range(len(batch)):
loss += loss_func(outputs_set[ll], batch[ll])
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.update(loss.item(), batch[0].size(0))
postfix = ' fused rec_loss: {:.4f} '.format(losses.avg)
cout += 1
trainloader.set_postfix(log=postfix)
# # ----------------------------------------model testing
testloader = DataLoader(dataset=mv_dataset(X_test), batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=mv_tabular_collate)
with torch.no_grad():
fused_model.eval()
scores = [[] for ss in range(len(X_test))]
for idx, batch in enumerate(tqdm(testloader)):
batch = [batch[i].to(device) for i in range(len(batch))]
cur_batch_scores = fused_model.get_ad_scores(batch)
for ss in range(len(X_test)):
scores[ss].append(cur_batch_scores[ss])
scores = [torch.cat(scores[ss], dim=0) for ss in range(len(X_test))]
scores = late_fusion(scores, merge='avg')
# ----------------------------------------model eval
roc, pr_anom, pr_norm, tnr = save_roc_pr_curve_data(scores=scores, labels=Y_test, file_path=None, verbose=False)
roc_results[t].append(roc)
pr_anom_results[t].append(pr_anom)
pr_norm_results[t].append(pr_norm)
tnr_results[t].append(tnr)
del model
# save the results
np.savez(file=results_path + dataset_name + '_mvae_fused', ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(results_path + dataset_name + '_mvae_fused')
def mv_dbn(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path, device, qualified_Y, ADdataset_mode='minority'):
torch.set_default_tensor_type(torch.DoubleTensor)
roc_results = [[] for i in range(repeat_times)]
pr_anom_results = [[] for i in range(repeat_times)]
pr_norm_results = [[] for i in range(repeat_times)]
tnr_results = [[] for i in range(repeat_times)]
# training config
layer_size = layer_size_set[dataset_name]
batch_size = batch_size_set_dbn[dataset_name]
lr = lr_set_dbn[dataset_name]
wd = wd_set_dbn[dataset_name]
print('layer_size: {}, batch size: {}, lr: {}, wd:{}'.format(layer_size, batch_size, lr, wd))
input_size_set = [x.shape[0] for x in X]
fuse_dim = layer_size[-1]
k = 3
epochs = epochs_set[dataset_name]
for t in range(repeat_times):
print('The {}-th run begins!'.format(t))
for i in range(min([10, len(qualified_Y)])):
partition, Y_test = build_img_dataset(dataset_name, Y, neg_class_id=qualified_Y[i], mode=ADdataset_mode)
X_train, X_test = process_ad_dataset(X, partition, normalization_range='01')
# -----------------------------------------model training
seed = 0
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
# model = mvae_ad(input_size_set=input_size_set, layer_sizes=layer_size).to(device).double()
model = mv_DBN(input_sizes=input_size_set, layers=layer_size, fuse_dim=fuse_dim, k=k)
X_train = [torch.from_numpy(X_train[_]) for _ in range(len(X_train))]
model.train_mv_DBN(X_train, epochs=max(epochs), lr=lr, batch_size=batch_size)
# # ----------------------------------------model testing
X_test = [torch.from_numpy(X_test[_]) for _ in range(len(X_test))]
scores = model.get_ad_scores(X_test)
scores = late_fusion(scores, merge='avg')
# ----------------------------------------model eval
roc, pr_anom, pr_norm, tnr = save_roc_pr_curve_data(scores=scores, labels=Y_test, file_path=None, verbose=False)
roc_results[t].append(roc)
pr_anom_results[t].append(pr_anom)
pr_norm_results[t].append(pr_norm)
tnr_results[t].append(tnr)
del model
# save the results
file_name = results_path+dataset_name+'_dbn'
np.savez(file=file_name, ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(file_name)
def mv_splitae(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path, device, qualified_Y, ADdataset_mode='minority'):
torch.set_default_tensor_type(torch.DoubleTensor)
roc_results = [[] for i in range(repeat_times)]
pr_anom_results = [[] for i in range(repeat_times)]
pr_norm_results = [[] for i in range(repeat_times)]
tnr_results = [[] for i in range(repeat_times)]
# training config
batch_size = batch_size_set_split[dataset_name]
lr = lr_set_split[dataset_name]
wd = wd_set_split[dataset_name]
layer_size = layer_size_set[dataset_name]
loss_func = MSELoss()
input_size_set = [x.shape[0] for x in X]
dec_mode = 'fixed'
for t in range(repeat_times):
print('The {}-th run begins!'.format(t))
for i in range(min([10, len(qualified_Y)])):
partition, Y_test = build_img_dataset(dataset_name, Y, neg_class_id=qualified_Y[i], mode=ADdataset_mode)
X_train, X_test = process_ad_dataset(X, partition)
# -----------------------------------------model training
seed = 0
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
model = splitAE(input_size_set=input_size_set, layer_sizes=layer_size, dec_mode=dec_mode).to(device)
trainloader = DataLoader(dataset=mv_dataset(X_train), batch_size=batch_size, shuffle=True, num_workers=1, collate_fn=mv_tabular_collate)
# specific training procedure
epochs = epochs_set[dataset_name]
losses_set = [AverageMeter() for i in range(len(X_train))]
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wd)
model.train()
cout = 0
for epoch in range(max(epochs)):
trainloader = tqdm(trainloader)
if ADdataset_mode is 'majority':
trainloader.set_description('split_fix, epoch{}, abnormal class{}'.format(epoch, qualified_Y[i]))
else:
trainloader.set_description('split_fix, epoch{}, normal class{}'.format(epoch, qualified_Y[i]))
for idx, batch in enumerate(trainloader):
postfix = ''
if batch[0].size(0) > 1: # for bn, batchsize > 1
batch = [batch[i].to(device) for i in range(len(batch))]
outputs_set = model(batch)
loss = torch.zeros(1).to(device)
cur_view_losses = [0. for cc in range(len(X_train))]
for enc_ind in range(len(outputs_set)):
cur_enc_recon = outputs_set[enc_ind]
for view in range(len(X_train)):
y = batch[view]
cur_enc_view_loss = loss_func(cur_enc_recon[view], y)
loss += cur_enc_view_loss
cur_view_losses[view] += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
for view in range(len(X_train)):
losses_set[view].update(cur_view_losses[view], batch[0].size(0))
cur_postfix = ' view{}, rec_loss: {:.4f} '.format(view, losses_set[view].avg)
postfix += cur_postfix
cout += 1
trainloader.set_postfix(log=postfix)
# # ----------------------------------------model testing
testloader = DataLoader(dataset=mv_dataset(X_test), batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=mv_tabular_collate)
with torch.no_grad():
model.eval()
scores = [[] for ss in range(len(X_test))]
for idx, batch in enumerate(tqdm(testloader)):
batch = [batch[i].to(device) for i in range(len(batch))]
cur_batch_scores = model.get_ad_scores(batch)
for ss in range(len(X_test)):
scores[ss].append(cur_batch_scores[ss])
scores = [torch.cat(scores[ss], dim=0) for ss in range(len(X_test))]
scores = late_fusion(scores, merge='avg')
# ----------------------------------------model eval
roc, pr_anom, pr_norm, tnr = save_roc_pr_curve_data(scores=scores, labels=Y_test, file_path=None, verbose=False)
roc_results[t].append(roc)
pr_anom_results[t].append(pr_anom)
pr_norm_results[t].append(pr_norm)
tnr_results[t].append(tnr)
del model
# save the results
np.savez(file=results_path + dataset_name + '_splitAE_{}'.format(dec_mode), ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(results_path + dataset_name + '_splitAE_{}'.format(dec_mode))
def mv_ss(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path, device, qualified_Y, ADdataset_mode='minority', mode='fuse', param_mode='fixed'):
torch.set_default_tensor_type(torch.DoubleTensor)
roc_results = [[] for i in range(repeat_times)]
pr_anom_results = [[] for i in range(repeat_times)]
pr_norm_results = [[] for i in range(repeat_times)]
tnr_results = [[] for i in range(repeat_times)]
# training config
batch_size = batch_size_set_split[dataset_name]
lr = lr_set_split[dataset_name]
wd = wd_set_split[dataset_name]
layer_size = layer_size_set[dataset_name]
loss_func = MSELoss()
input_size_set = [x.shape[0] for x in X]
fuse_dim = layer_size[-1]
mode = mode # fuse/pred/split
if mode is 'fuse':
if param_mode not in ['nn', 'sum', 'max']:
raise NotImplementedError
else:
if param_mode not in ['fixed', 'non-fixed']:
raise NotImplementedError
for t in range(repeat_times):
print('The {}-th run begins!'.format(t))
for i in range(min([10, len(qualified_Y)])):
partition, Y_test = build_img_dataset(dataset_name, Y, neg_class_id=qualified_Y[i], mode=ADdataset_mode)
X_train, X_test = process_ad_dataset(X, partition)
# -----------------------------------------model training
seed = 0
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
model = mvae_ss(input_size_set=input_size_set, layer_sizes=layer_size, fuse_dim=fuse_dim, mode=mode, param_mode=param_mode).to(device)
trainloader = DataLoader(dataset=mv_dataset(X_train), batch_size=batch_size, shuffle=True, num_workers=1, collate_fn=mv_tabular_collate)
# specific training procedure
epochs = epochs_set[dataset_name]
losses_set = [AverageMeter() for i in range(len(X_train))]
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wd)
model.train()
for epoch in range(max(epochs)):
trainloader = tqdm(trainloader)
if ADdataset_mode is 'majority':
trainloader.set_description('ss_{}_{}, epoch{}, abnormal class{}'.format(mode, param_mode, epoch, qualified_Y[i]))
else:
trainloader.set_description('ss_{}_{}, epoch{}, normal class{}'.format(mode, param_mode, epoch, qualified_Y[i]))
for idx, batch in enumerate(trainloader):
postfix = ''
if batch[0].size(0) > 1: # for bn, batchsize > 1
batch = [batch[i].to(device) for i in range(len(batch))]
outputs_set = model(batch)
if mode is 'fuse' or mode is 'pred':
loss = torch.zeros(1).to(device)
for ll in range(len(batch)):
cur_view_loss = loss_func(outputs_set[ll], batch[ll])
losses_set[ll].update(cur_view_loss, batch[0].size(0))
cur_postfix = ' view{}, rec_loss: {:.4f} '.format(ll, losses_set[ll].avg)
postfix += cur_postfix
loss += cur_view_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
elif mode is 'split':
loss = torch.zeros(1).to(device)
cur_view_losses = [0. for cc in range(len(X_train))]
for enc_ind in range(len(outputs_set)):
cur_enc_recon = outputs_set[enc_ind]
for view in range(len(X_train)):
y = batch[view]
cur_enc_view_loss = loss_func(cur_enc_recon[view], y)
loss += cur_enc_view_loss
cur_view_losses[view] += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
for view in range(len(X_train)):
losses_set[view].update(cur_view_losses[view], batch[0].size(0))
cur_postfix = ' view{}, rec_loss: {:.4f} '.format(view, losses_set[view].avg)
postfix += cur_postfix
else:
raise NotImplementedError
trainloader.set_postfix(log=postfix)
# # ----------------------------------------model testing
testloader = DataLoader(dataset=mv_dataset(X_test), batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=mv_tabular_collate)
with torch.no_grad():
model.eval()
scores = [[] for ss in range(len(X_test))]
for idx, batch in enumerate(tqdm(testloader)):
batch = [batch[i].to(device) for i in range(len(batch))]
cur_batch_scores = model.get_ad_scores(batch)
for ss in range(len(X_test)):
scores[ss].append(cur_batch_scores[ss])
scores = [torch.cat(scores[ss], dim=0) for ss in range(len(X_test))]
scores = late_fusion(scores, merge='avg')
# ----------------------------------------model eval
roc, pr_anom, pr_norm, tnr = save_roc_pr_curve_data(scores=scores, labels=Y_test, file_path=None, verbose=False)
roc_results[t].append(roc)
pr_anom_results[t].append(pr_anom)
pr_norm_results[t].append(pr_norm)
tnr_results[t].append(tnr)
del model
# save the results
np.savez(file=results_path + dataset_name + '_ss_{}_{}'.format(mode, param_mode), ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(results_path + dataset_name + '_ss_{}_{}'.format(mode, param_mode))
def mvDSVDD_fused(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path, device, qualified_Y, ADdataset_mode='minority'):
torch.set_default_tensor_type(torch.DoubleTensor)
roc_results = [[] for i in range(repeat_times)]
pr_anom_results = [[] for i in range(repeat_times)]
pr_norm_results = [[] for i in range(repeat_times)]
tnr_results = [[] for i in range(repeat_times)]
# training config
batch_size = batch_size_set_dsvdd[dataset_name]
layer_size = layer_size_set[dataset_name]
lr = lr_set_dsvdd[dataset_name]
wd = wd_set_dsvdd[dataset_name]
loss_func = MSELoss()
input_size_set = [x.shape[0] for x in X]
eps = 1e-10
pretrain = True # indicate whether the DSVDD is pre-trained like AE
mode = 'one_class'
assert mode in ['one_class', 'soft_bound']
if mode is 'soft_bound':
nu = 0.1
warm_epochs = 5
assert warm_epochs <= min(epochs_set[dataset_name])
for t in range(repeat_times):
print('The {}-th run begins!'.format(t))
for i in range(min([10, len(qualified_Y)])):
partition, Y_test = build_img_dataset(dataset_name, Y, neg_class_id=qualified_Y[i], mode=ADdataset_mode)
X_train, X_test = process_ad_dataset(X, partition)
# -----------------------------------------build dataloader under current config.
trainloader = DataLoader(dataset=mv_dataset(X_train), batch_size=batch_size, shuffle=True, num_workers=1, collate_fn=mv_tabular_collate)
testloader = DataLoader(dataset=mv_dataset(X_test), batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=mv_tabular_collate)
seed = 0
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
# -----------------------------------------pre-training procedure
if pretrain:
model = mvae_ad(input_size_set=input_size_set, layer_sizes=layer_size).to(device)
epochs = epochs_set[dataset_name]
losses_set = [AverageMeter() for i in range(len(X_train))]
optimizer = [optim.Adam(model.ae_set[i].parameters(), lr=lr, weight_decay=wd) for i in range(len(X_train))]
model.train()
cout = 0
for epoch in range(max(epochs)):
trainloader = tqdm(trainloader)
if ADdataset_mode is 'majority':
trainloader.set_description('dsvdd_fuse, epoch{}, abnormal class{}'.format(epoch, qualified_Y[i]))
else:
trainloader.set_description('dsvdd_fuse, epoch{}, normal class{}'.format(epoch, qualified_Y[i]))
for idx, batch in enumerate(trainloader):
postfix = ''
if batch[0].size(0) > 1: # for bn, batchsize > 1
batch = [batch[i].to(device) for i in range(len(batch))]
outputs_set = model(batch)
for view in range(len(X_train)):
if epoch < epochs[view]:
y = batch[view]
loss = loss_func(outputs_set[view], y)
optimizer[view].zero_grad()
loss.backward()
optimizer[view].step()
losses_set[view].update(loss.item(), batch[0].size(0))
cur_postfix = ' view{}, rec_loss: {:.4f} '.format(view, losses_set[view].avg)
postfix += cur_postfix
cout += 1
trainloader.set_postfix(log=postfix)
del trainloader
# # ----------------------------------------set C for multi-view DSVDD
trainloader = DataLoader(dataset=mv_dataset(X_train), batch_size=batch_size, shuffle=True, num_workers=1, collate_fn=mv_tabular_collate)
mvDSVDD = mvenc_fuse(input_size_set=input_size_set, layer_sizes=layer_size).to(device)
ae_dict = model.state_dict()
dsvdd_dict = mvDSVDD.state_dict()
ae_dict = {k: v for k, v in ae_dict.items() if k in dsvdd_dict}
dsvdd_dict.update(ae_dict)
mvDSVDD.load_state_dict(dsvdd_dict)
mvDSVDD.eval()
C_set = []
R = torch.zeros(1).to(device)
with torch.no_grad():
for idx, batch in enumerate(tqdm(trainloader)):
batch = [batch[i].to(device) for i in range(len(batch))]
cur_batch_latent = mvDSVDD(batch)
C_set.append(cur_batch_latent.detach())
C_set = torch.cat(C_set, dim=0)
tmp = torch.mean(C_set, dim=0)
tmp[(abs(tmp) < eps) & (tmp < 0)] = -eps
tmp[(abs(tmp) < eps) & (tmp > 0)] = eps
C = tmp
# -------------------------------------train the DSVDD
epochs = epochs_set[dataset_name]
losses = AverageMeter()
optimizer = optim.Adam(mvDSVDD.parameters(), lr=lr, weight_decay=wd)
mvDSVDD.train()
cout = 0
for epoch in range(max(epochs)):
trainloader = tqdm(trainloader)
if ADdataset_mode is 'majority':
trainloader.set_description('dsvdd_fuse, epoch{}, abnormal class{}'.format(epoch, qualified_Y[i]))
else:
trainloader.set_description('dsvdd_fuse, epoch{}, normal class{}'.format(epoch, qualified_Y[i]))
for idx, batch in enumerate(trainloader):
postfix = ''
if batch[0].size(0) > 1: # for bn, batchsize > 1
batch = [batch[i].to(device) for i in range(len(batch))]
outputs = mvDSVDD(batch)
C = C.to(device)
dist = torch.sum((outputs - C) ** 2, dim=1)
if mode is 'soft_bound':
scores = dist - R ** 2
loss = R ** 2 + (1 / nu) * torch.mean(torch.max(torch.zeros_like(scores), scores))
else:
loss = torch.mean(dist)
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.update(loss.item(), batch[0].size(0))
if mode is 'soft_bound':
postfix += ' dd_loss: {:.4f} R: {:.4f} '.format(losses.avg, R.item())
else:
postfix += ' dd_loss: {:.4f} '.format(losses.avg)
if mode is 'soft_bound' and epoch >= warm_epochs:
R.data = torch.tensor(get_radius(dist, nu), device=device)
cout += 1
trainloader.set_postfix(log=postfix)
# ----------------------------------------model inferrence
mvDSVDD.eval()
scores_set = []
with torch.no_grad():
for idx, batch in enumerate(tqdm(testloader)):
batch = [batch[i].to(device) for i in range(len(batch))]
cur_batch_outputs = mvDSVDD(batch)
scores = -1. * torch.sum((cur_batch_outputs - C) ** 2, dim=1)
scores_set.append(scores.detach())
scores_set = torch.cat(scores_set, dim=0)
scores = scores_set.cpu().detach().numpy()
# ----------------------------------------model eval
roc, pr_anom, pr_norm, tnr = save_roc_pr_curve_data(scores=scores, labels=Y_test, file_path=None, verbose=False)
roc_results[t].append(roc)
pr_anom_results[t].append(pr_anom)
pr_norm_results[t].append(pr_norm)
tnr_results[t].append(tnr)
del model
# save the results
np.savez(file=results_path + dataset_name + '_dsvdd_fused', ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(results_path + dataset_name + '_dsvdd_fused')
def mv_tf(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path, device, qualified_Y, ADdataset_mode='minority'):
torch.set_default_tensor_type(torch.DoubleTensor)
roc_results = [[] for i in range(repeat_times)]
pr_anom_results = [[] for i in range(repeat_times)]
pr_norm_results = [[] for i in range(repeat_times)]
tnr_results = [[] for i in range(repeat_times)]
# training config
batch_size = batch_size_set_tf[dataset_name]
lr = lr_set_tf[dataset_name]
wd = wd_set_tf[dataset_name]
if results_path in ['./results/']:
r_set = [r_set_tf[dataset_name]]
else:
r_set = [4, 8, 16, 32, 64]
layer_size = layer_size_set[dataset_name]
loss_func = MSELoss()
input_size_set = [x.shape[0] for x in X]
for r in r_set:
for t in range(repeat_times):
print('The {}-th run begins!'.format(t))
for i in range(min([10, len(qualified_Y)])):
partition, Y_test = build_img_dataset(dataset_name, Y, neg_class_id=qualified_Y[i], mode=ADdataset_mode)
X_train, X_test = process_ad_dataset(X, partition)
# -----------------------------------------model training
seed = 0
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
model = mvae_tf(input_size_set=input_size_set, layer_sizes=layer_size, rank=r).to(device)
trainloader = DataLoader(dataset=mv_dataset(X_train), batch_size=batch_size, shuffle=True, num_workers=1, collate_fn=mv_tabular_collate)
# specific training procedure
epochs = epochs_set[dataset_name]
losses_set = [AverageMeter() for i in range(len(X_train))]
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wd)
model.train()
for epoch in range(max(epochs)):
trainloader = tqdm(trainloader)
if ADdataset_mode is 'majority':
trainloader.set_description('mvtf, epoch{}, abnormal class{}'.format(epoch, qualified_Y[i]))
else:
trainloader.set_description('mvtf, epoch{}, normal class{}'.format(epoch, qualified_Y[i]))
for idx, batch in enumerate(trainloader):
postfix = ''
if batch[0].size(0) > 1: # for bn, batchsize > 1
batch = [batch[i].to(device) for i in range(len(batch))]
outputs_set = model(batch)
loss = torch.zeros(1).to(device)
for ll in range(len(batch)):
cur_view_loss = loss_func(outputs_set[ll], batch[ll])
losses_set[ll].update(cur_view_loss, batch[0].size(0))
cur_postfix = ' view{}, rec_loss: {:.4f} '.format(ll, losses_set[ll].avg)
postfix += cur_postfix
loss += cur_view_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
trainloader.set_postfix(log=postfix)
# # ----------------------------------------model testing
testloader = DataLoader(dataset=mv_dataset(X_test), batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=mv_tabular_collate)
with torch.no_grad():
model.eval()
scores = [[] for ss in range(len(X_test))]
for idx, batch in enumerate(tqdm(testloader)):
batch = [batch[i].to(device) for i in range(len(batch))]
cur_batch_scores = model.get_ad_scores(batch)
for ss in range(len(X_test)):
scores[ss].append(cur_batch_scores[ss])
scores = [torch.cat(scores[ss], dim=0) for ss in range(len(X_test))]
scores = late_fusion(scores, merge='avg')
# ----------------------------------------model eval
roc, pr_anom, pr_norm, tnr = save_roc_pr_curve_data(scores=scores, labels=Y_test, file_path=None, verbose=False)
roc_results[t].append(roc)
pr_anom_results[t].append(pr_anom)
pr_norm_results[t].append(pr_norm)
tnr_results[t].append(tnr)
del model
# save the results
if results_path in ['./results/']:
np.savez(file=results_path + dataset_name + '_tf', ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(results_path + dataset_name + '_tf')
else:
np.savez(file=results_path + dataset_name + '_tf_r_{}'.format(r), ROC=roc_results, PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)
load_print_results(results_path + dataset_name + '_tf_r_{}'.format(r))
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# ----------------------------------------prepare the dataset
# dataset_set = ['oct', 'breast', 'retina', 'axial', 'coronal', 'sagittal', 'path', 'derma', 'pneumonia']
# dataset_set = ['mvtec_bottle',
# 'mvtec_cable', 'mvtec_capsule', 'mvtec_carpet', 'mvtec_grid', 'mvtec_hazelnut', 'mvtec_leather', 'mvtec_metal_nut', 'mvtec_pill',
# 'mvtec_screw', 'mvtec_tile', 'mvtec_toothbrush', 'mvtec_transistor', 'mvtec_wood', 'mvtec_zipper']
# dataset_set = ['cifar10', 'cifar100', 'mnist', 'fmnist', 'svhn']
# dataset_set = ['ytface']
dataset_set = ['ytface']
# -----------------------------------------methods
for dataset_name in dataset_set:
X, Y = read_dataset('./data/', dataset_name)
# for i in range(len(X)):
# cur_X = X[i]
# idx = np.isnan(cur_X).sum()
# if idx > 0:
# print('dataset: {}, view: {} has NaN!'.format(dataset_name, i))
min_class_num = 300
normal_train_ratio = 0.7
if dataset_name[:5] in ['mvtec']:
qualified_Y = get_large_class(Y, min_class_num=min_class_num, semantics=True)
else:
qualified_Y = get_large_class(Y, min_class_num=min_class_num)
num_qualified_class = len(qualified_Y)
repeat_times = 1 # train-test split is fixed
ADdataset_mode = 'minority'
if num_qualified_class > 0:
print('Training dataset name: {}, qualified class number: {}/{}'.format(dataset_name, num_qualified_class, len(get_all_labels(Y))))
# --------------------------------Baselines-----------------------------------
simple_mvae(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./results/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode)
# --------------------------------Multi-view fusion based methods-------------
# simple fusion:
mv_ss(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./results/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode, mode='fuse', param_mode='max')
mv_ss(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./results/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode, mode='fuse', param_mode='sum')
mv_ss(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./results/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode, mode='fuse', param_mode='nn')
# pre-trained AE based fusion
mvae_fuse_latent(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./results/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode)
# DBN based
try:
mv_dbn(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./results/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode)
except:
print('dataset: {} dbn error'.format(dataset_name))
pass
# Tensor fusion network based methods
mv_tf(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./results/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode)
# --------------------------------Multi-view alignment based methods----------
mv_corrae(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./results/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode)
mv_sim(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./results/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode)
try:
deepCCA(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./results/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode)
except:
print('dataset: {} dcca error'.format(dataset_name))
pass
try:
dgcca(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./results/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode)
except:
print('dataset: {} dgcca error'.format(dataset_name))
pass
# -------------------------------Deep one-class learning based methods tailored for multi-view case-----
simple_mvDSVDD(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./results/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode)
mvDSVDD_fused(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./results/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode)
# -------------------------------Self-supervision based methods---------------------------------------------
mv_ss(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./results/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode, mode='pred',
param_mode='fixed')
mv_ss(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./results/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode, mode='split',
param_mode='fixed')
# # -------------------------For hyperparameter analysis-------------------------------
#
# mv_sim(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./analysis/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode)
#
# mv_corrae(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./analysis/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode)
#
# try:
# deepCCA(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./analysis/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode)
# except:
# print('dataset: {} dcca error'.format(dataset_name))
# pass
#
# try:
# dgcca(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./analysis/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode)
# except:
# print('dataset: {} dgcca error'.format(dataset_name))
# pass
#
# mv_tf(dataset_name, X, Y, repeat_times, normal_train_ratio, results_path='./analysis/', device=device, qualified_Y=qualified_Y, ADdataset_mode=ADdataset_mode)
# else:
# print('No qualified class in this dataset!')
| [
"models.encoder_decoder.mvae_ad",
"models.encoder_decoder.mvenc",
"torch.from_numpy",
"models.encoder_decoder.mvae_tf",
"torch.nn.MSELoss",
"models.encoder_decoder.mvae_ss",
"torch.cuda.is_available",
"torch.sum",
"util.process_ad_dataset",
"util.filter_nan_grad",
"models.DeepCCAModels.cca",
"... | [((1981, 2030), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.DoubleTensor'], {}), '(torch.DoubleTensor)\n', (2010, 2030), False, 'import torch\n'), ((2818, 2827), 'torch.nn.MSELoss', 'MSELoss', ([], {}), '()\n', (2825, 2827), False, 'from torch.nn import MSELoss, CrossEntropyLoss\n'), ((8108, 8221), 'numpy.savez', 'np.savez', ([], {'file': 'file_name', 'ROC': 'roc_results', 'PR_norm': 'pr_norm_results', 'PR_anom': 'pr_anom_results', 'tnr': 'tnr_results'}), '(file=file_name, ROC=roc_results, PR_norm=pr_norm_results, PR_anom=\n pr_anom_results, tnr=tnr_results)\n', (8116, 8221), True, 'import numpy as np\n'), ((8222, 8251), 'util.load_print_results', 'load_print_results', (['file_name'], {}), '(file_name)\n', (8240, 8251), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((8323, 8489), 'numpy.savez', 'np.savez', ([], {'file': 'file_name', 'ROC': 'single_best_roc_results', 'PR_norm': 'single_best_pr_norm_results', 'PR_anom': 'single_best_pr_anom_results', 'tnr': 'single_best_tnr_results'}), '(file=file_name, ROC=single_best_roc_results, PR_norm=\n single_best_pr_norm_results, PR_anom=single_best_pr_anom_results, tnr=\n single_best_tnr_results)\n', (8331, 8489), True, 'import numpy as np\n'), ((8485, 8514), 'util.load_print_results', 'load_print_results', (['file_name'], {}), '(file_name)\n', (8503, 8514), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((8801, 8850), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.DoubleTensor'], {}), '(torch.DoubleTensor)\n', (8830, 8850), False, 'import torch\n'), ((9275, 9284), 'torch.nn.MSELoss', 'MSELoss', ([], {}), '()\n', (9282, 9284), False, 'from torch.nn import MSELoss, CrossEntropyLoss\n'), ((9305, 9336), 'models.DeepCCAModels.cca', 'cca', ([], {'outdim_size': 'layer_size[-1]'}), '(outdim_size=layer_size[-1])\n', (9308, 9336), False, 'from models.DeepCCAModels import DeepCCA, cca_loss, gcca, cca\n'), ((15354, 15403), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.DoubleTensor'], {}), '(torch.DoubleTensor)\n', (15383, 15403), False, 'import torch\n'), ((15831, 15840), 'torch.nn.MSELoss', 'MSELoss', ([], {}), '()\n', (15838, 15840), False, 'from torch.nn import MSELoss, CrossEntropyLoss\n'), ((15862, 15894), 'models.DeepCCAModels.gcca', 'gcca', ([], {'outdim_size': 'layer_size[-1]'}), '(outdim_size=layer_size[-1])\n', (15866, 15894), False, 'from models.DeepCCAModels import DeepCCA, cca_loss, gcca, cca\n'), ((21442, 21491), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.DoubleTensor'], {}), '(torch.DoubleTensor)\n', (21471, 21491), False, 'import torch\n'), ((21919, 21928), 'torch.nn.MSELoss', 'MSELoss', ([], {}), '()\n', (21926, 21928), False, 'from torch.nn import MSELoss, CrossEntropyLoss\n'), ((30556, 30697), 'numpy.savez', 'np.savez', ([], {'file': "(results_path + dataset_name + '_dsvdd')", 'ROC': 'roc_results', 'PR_norm': 'pr_norm_results', 'PR_anom': 'pr_anom_results', 'tnr': 'tnr_results'}), "(file=results_path + dataset_name + '_dsvdd', ROC=roc_results,\n PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)\n", (30564, 30697), True, 'import numpy as np\n'), ((30699, 30757), 'util.load_print_results', 'load_print_results', (["(results_path + dataset_name + '_dsvdd')"], {}), "(results_path + dataset_name + '_dsvdd')\n", (30717, 30757), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((30895, 30944), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.DoubleTensor'], {}), '(torch.DoubleTensor)\n', (30924, 30944), False, 'import torch\n'), ((31369, 31378), 'torch.nn.MSELoss', 'MSELoss', ([], {}), '()\n', (31376, 31378), False, 'from torch.nn import MSELoss, CrossEntropyLoss\n'), ((36857, 36906), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.DoubleTensor'], {}), '(torch.DoubleTensor)\n', (36886, 36906), False, 'import torch\n'), ((37328, 37337), 'torch.nn.MSELoss', 'MSELoss', ([], {}), '()\n', (37335, 37337), False, 'from torch.nn import MSELoss, CrossEntropyLoss\n'), ((43305, 43354), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.DoubleTensor'], {}), '(torch.DoubleTensor)\n', (43334, 43354), False, 'import torch\n'), ((43812, 43821), 'torch.nn.MSELoss', 'MSELoss', ([], {}), '()\n', (43819, 43821), False, 'from torch.nn import MSELoss, CrossEntropyLoss\n'), ((50027, 50173), 'numpy.savez', 'np.savez', ([], {'file': "(results_path + dataset_name + '_mvae_fused')", 'ROC': 'roc_results', 'PR_norm': 'pr_norm_results', 'PR_anom': 'pr_anom_results', 'tnr': 'tnr_results'}), "(file=results_path + dataset_name + '_mvae_fused', ROC=roc_results,\n PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)\n", (50035, 50173), True, 'import numpy as np\n'), ((50175, 50238), 'util.load_print_results', 'load_print_results', (["(results_path + dataset_name + '_mvae_fused')"], {}), "(results_path + dataset_name + '_mvae_fused')\n", (50193, 50238), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((50374, 50423), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.DoubleTensor'], {}), '(torch.DoubleTensor)\n', (50403, 50423), False, 'import torch\n'), ((52809, 52922), 'numpy.savez', 'np.savez', ([], {'file': 'file_name', 'ROC': 'roc_results', 'PR_norm': 'pr_norm_results', 'PR_anom': 'pr_anom_results', 'tnr': 'tnr_results'}), '(file=file_name, ROC=roc_results, PR_norm=pr_norm_results, PR_anom=\n pr_anom_results, tnr=tnr_results)\n', (52817, 52922), True, 'import numpy as np\n'), ((52923, 52952), 'util.load_print_results', 'load_print_results', (['file_name'], {}), '(file_name)\n', (52941, 52952), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((53091, 53140), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.DoubleTensor'], {}), '(torch.DoubleTensor)\n', (53120, 53140), False, 'import torch\n'), ((53568, 53577), 'torch.nn.MSELoss', 'MSELoss', ([], {}), '()\n', (53575, 53577), False, 'from torch.nn import MSELoss, CrossEntropyLoss\n'), ((58272, 58321), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.DoubleTensor'], {}), '(torch.DoubleTensor)\n', (58301, 58321), False, 'import torch\n'), ((58748, 58757), 'torch.nn.MSELoss', 'MSELoss', ([], {}), '()\n', (58755, 58757), False, 'from torch.nn import MSELoss, CrossEntropyLoss\n'), ((64608, 64657), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.DoubleTensor'], {}), '(torch.DoubleTensor)\n', (64637, 64657), False, 'import torch\n'), ((65085, 65094), 'torch.nn.MSELoss', 'MSELoss', ([], {}), '()\n', (65092, 65094), False, 'from torch.nn import MSELoss, CrossEntropyLoss\n'), ((72760, 72907), 'numpy.savez', 'np.savez', ([], {'file': "(results_path + dataset_name + '_dsvdd_fused')", 'ROC': 'roc_results', 'PR_norm': 'pr_norm_results', 'PR_anom': 'pr_anom_results', 'tnr': 'tnr_results'}), "(file=results_path + dataset_name + '_dsvdd_fused', ROC=roc_results,\n PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)\n", (72768, 72907), True, 'import numpy as np\n'), ((72909, 72973), 'util.load_print_results', 'load_print_results', (["(results_path + dataset_name + '_dsvdd_fused')"], {}), "(results_path + dataset_name + '_dsvdd_fused')\n", (72927, 72973), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((73107, 73156), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.DoubleTensor'], {}), '(torch.DoubleTensor)\n', (73136, 73156), False, 'import torch\n'), ((73700, 73709), 'torch.nn.MSELoss', 'MSELoss', ([], {}), '()\n', (73707, 73709), False, 'from torch.nn import MSELoss, CrossEntropyLoss\n'), ((79141, 79178), 'util.read_dataset', 'read_dataset', (['"""./data/"""', 'dataset_name'], {}), "('./data/', dataset_name)\n", (79153, 79178), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((3042, 3131), 'util.build_img_dataset', 'build_img_dataset', (['dataset_name', 'Y'], {'neg_class_id': 'qualified_Y[i]', 'mode': 'ADdataset_mode'}), '(dataset_name, Y, neg_class_id=qualified_Y[i], mode=\n ADdataset_mode)\n', (3059, 3131), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((3157, 3189), 'util.process_ad_dataset', 'process_ad_dataset', (['X', 'partition'], {}), '(X, partition)\n', (3175, 3189), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((3297, 3322), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3320, 3322), False, 'import torch\n'), ((3385, 3408), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (3402, 3408), False, 'import torch\n'), ((7747, 7834), 'util.save_roc_pr_curve_data', 'save_roc_pr_curve_data', ([], {'scores': 'scores', 'labels': 'Y_test', 'file_path': 'None', 'verbose': '(False)'}), '(scores=scores, labels=Y_test, file_path=None,\n verbose=False)\n', (7769, 7834), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((14733, 14873), 'numpy.savez', 'np.savez', ([], {'file': "(results_path + dataset_name + '_dcca')", 'ROC': 'roc_results', 'PR_norm': 'pr_norm_results', 'PR_anom': 'pr_anom_results', 'tnr': 'tnr_results'}), "(file=results_path + dataset_name + '_dcca', ROC=roc_results,\n PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)\n", (14741, 14873), True, 'import numpy as np\n'), ((14883, 14940), 'util.load_print_results', 'load_print_results', (["(results_path + dataset_name + '_dcca')"], {}), "(results_path + dataset_name + '_dcca')\n", (14901, 14940), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((20808, 20949), 'numpy.savez', 'np.savez', ([], {'file': "(results_path + dataset_name + '_dgcca')", 'ROC': 'roc_results', 'PR_norm': 'pr_norm_results', 'PR_anom': 'pr_anom_results', 'tnr': 'tnr_results'}), "(file=results_path + dataset_name + '_dgcca', ROC=roc_results,\n PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)\n", (20816, 20949), True, 'import numpy as np\n'), ((20959, 21017), 'util.load_print_results', 'load_print_results', (["(results_path + dataset_name + '_dgcca')"], {}), "(results_path + dataset_name + '_dgcca')\n", (20977, 21017), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((22433, 22522), 'util.build_img_dataset', 'build_img_dataset', (['dataset_name', 'Y'], {'neg_class_id': 'qualified_Y[i]', 'mode': 'ADdataset_mode'}), '(dataset_name, Y, neg_class_id=qualified_Y[i], mode=\n ADdataset_mode)\n', (22450, 22522), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((22548, 22580), 'util.process_ad_dataset', 'process_ad_dataset', (['X', 'partition'], {}), '(X, partition)\n', (22566, 22580), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((23010, 23035), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (23033, 23035), False, 'import torch\n'), ((23098, 23121), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (23115, 23121), False, 'import torch\n'), ((30104, 30140), 'models.encoder_decoder.late_fusion', 'late_fusion', (['scores_set'], {'merge': '"""avg"""'}), "(scores_set, merge='avg')\n", (30115, 30140), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((30248, 30335), 'util.save_roc_pr_curve_data', 'save_roc_pr_curve_data', ([], {'scores': 'scores', 'labels': 'Y_test', 'file_path': 'None', 'verbose': '(False)'}), '(scores=scores, labels=Y_test, file_path=None,\n verbose=False)\n', (30270, 30335), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((36227, 36369), 'numpy.savez', 'np.savez', ([], {'file': "(results_path + dataset_name + '_corrAE')", 'ROC': 'roc_results', 'PR_norm': 'pr_norm_results', 'PR_anom': 'pr_anom_results', 'tnr': 'tnr_results'}), "(file=results_path + dataset_name + '_corrAE', ROC=roc_results,\n PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)\n", (36235, 36369), True, 'import numpy as np\n'), ((36379, 36438), 'util.load_print_results', 'load_print_results', (["(results_path + dataset_name + '_corrAE')"], {}), "(results_path + dataset_name + '_corrAE')\n", (36397, 36438), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((44036, 44125), 'util.build_img_dataset', 'build_img_dataset', (['dataset_name', 'Y'], {'neg_class_id': 'qualified_Y[i]', 'mode': 'ADdataset_mode'}), '(dataset_name, Y, neg_class_id=qualified_Y[i], mode=\n ADdataset_mode)\n', (44053, 44125), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((44151, 44183), 'util.process_ad_dataset', 'process_ad_dataset', (['X', 'partition'], {}), '(X, partition)\n', (44169, 44183), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((44291, 44316), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (44314, 44316), False, 'import torch\n'), ((44379, 44402), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (44396, 44402), False, 'import torch\n'), ((47448, 47462), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (47460, 47462), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((49719, 49806), 'util.save_roc_pr_curve_data', 'save_roc_pr_curve_data', ([], {'scores': 'scores', 'labels': 'Y_test', 'file_path': 'None', 'verbose': '(False)'}), '(scores=scores, labels=Y_test, file_path=None,\n verbose=False)\n', (49741, 49806), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((51219, 51308), 'util.build_img_dataset', 'build_img_dataset', (['dataset_name', 'Y'], {'neg_class_id': 'qualified_Y[i]', 'mode': 'ADdataset_mode'}), '(dataset_name, Y, neg_class_id=qualified_Y[i], mode=\n ADdataset_mode)\n', (51236, 51308), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((51334, 51392), 'util.process_ad_dataset', 'process_ad_dataset', (['X', 'partition'], {'normalization_range': '"""01"""'}), "(X, partition, normalization_range='01')\n", (51352, 51392), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((51500, 51525), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (51523, 51525), False, 'import torch\n'), ((51588, 51611), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (51605, 51611), False, 'import torch\n'), ((51842, 51919), 'models.DBN.mv_DBN', 'mv_DBN', ([], {'input_sizes': 'input_size_set', 'layers': 'layer_size', 'fuse_dim': 'fuse_dim', 'k': 'k'}), '(input_sizes=input_size_set, layers=layer_size, fuse_dim=fuse_dim, k=k)\n', (51848, 51919), False, 'from models.DBN import mv_DBN\n'), ((52312, 52344), 'models.encoder_decoder.late_fusion', 'late_fusion', (['scores'], {'merge': '"""avg"""'}), "(scores, merge='avg')\n", (52323, 52344), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((52452, 52539), 'util.save_roc_pr_curve_data', 'save_roc_pr_curve_data', ([], {'scores': 'scores', 'labels': 'Y_test', 'file_path': 'None', 'verbose': '(False)'}), '(scores=scores, labels=Y_test, file_path=None,\n verbose=False)\n', (52474, 52539), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((53815, 53904), 'util.build_img_dataset', 'build_img_dataset', (['dataset_name', 'Y'], {'neg_class_id': 'qualified_Y[i]', 'mode': 'ADdataset_mode'}), '(dataset_name, Y, neg_class_id=qualified_Y[i], mode=\n ADdataset_mode)\n', (53832, 53904), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((53930, 53962), 'util.process_ad_dataset', 'process_ad_dataset', (['X', 'partition'], {}), '(X, partition)\n', (53948, 53962), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((54070, 54095), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (54093, 54095), False, 'import torch\n'), ((54158, 54181), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (54175, 54181), False, 'import torch\n'), ((57552, 57639), 'util.save_roc_pr_curve_data', 'save_roc_pr_curve_data', ([], {'scores': 'scores', 'labels': 'Y_test', 'file_path': 'None', 'verbose': '(False)'}), '(scores=scores, labels=Y_test, file_path=None,\n verbose=False)\n', (57574, 57639), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((59250, 59339), 'util.build_img_dataset', 'build_img_dataset', (['dataset_name', 'Y'], {'neg_class_id': 'qualified_Y[i]', 'mode': 'ADdataset_mode'}), '(dataset_name, Y, neg_class_id=qualified_Y[i], mode=\n ADdataset_mode)\n', (59267, 59339), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((59365, 59397), 'util.process_ad_dataset', 'process_ad_dataset', (['X', 'partition'], {}), '(X, partition)\n', (59383, 59397), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((59505, 59530), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (59528, 59530), False, 'import torch\n'), ((59593, 59616), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (59610, 59616), False, 'import torch\n'), ((63901, 63988), 'util.save_roc_pr_curve_data', 'save_roc_pr_curve_data', ([], {'scores': 'scores', 'labels': 'Y_test', 'file_path': 'None', 'verbose': '(False)'}), '(scores=scores, labels=Y_test, file_path=None,\n verbose=False)\n', (63923, 63988), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((65599, 65688), 'util.build_img_dataset', 'build_img_dataset', (['dataset_name', 'Y'], {'neg_class_id': 'qualified_Y[i]', 'mode': 'ADdataset_mode'}), '(dataset_name, Y, neg_class_id=qualified_Y[i], mode=\n ADdataset_mode)\n', (65616, 65688), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((65714, 65746), 'util.process_ad_dataset', 'process_ad_dataset', (['X', 'partition'], {}), '(X, partition)\n', (65732, 65746), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((66176, 66201), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (66199, 66201), False, 'import torch\n'), ((66264, 66287), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (66281, 66287), False, 'import torch\n'), ((69788, 69802), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (69800, 69802), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((72452, 72539), 'util.save_roc_pr_curve_data', 'save_roc_pr_curve_data', ([], {'scores': 'scores', 'labels': 'Y_test', 'file_path': 'None', 'verbose': '(False)'}), '(scores=scores, labels=Y_test, file_path=None,\n verbose=False)\n', (72474, 72539), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((77794, 77933), 'numpy.savez', 'np.savez', ([], {'file': "(results_path + dataset_name + '_tf')", 'ROC': 'roc_results', 'PR_norm': 'pr_norm_results', 'PR_anom': 'pr_anom_results', 'tnr': 'tnr_results'}), "(file=results_path + dataset_name + '_tf', ROC=roc_results, PR_norm\n =pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)\n", (77802, 77933), True, 'import numpy as np\n'), ((77942, 77997), 'util.load_print_results', 'load_print_results', (["(results_path + dataset_name + '_tf')"], {}), "(results_path + dataset_name + '_tf')\n", (77960, 77997), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((78369, 78394), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (78392, 78394), False, 'import torch\n'), ((79521, 79584), 'util.get_large_class', 'get_large_class', (['Y'], {'min_class_num': 'min_class_num', 'semantics': '(True)'}), '(Y, min_class_num=min_class_num, semantics=True)\n', (79536, 79584), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((79625, 79672), 'util.get_large_class', 'get_large_class', (['Y'], {'min_class_num': 'min_class_num'}), '(Y, min_class_num=min_class_num)\n', (79640, 79672), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((3340, 3372), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (3366, 3372), False, 'import torch\n'), ((3912, 3926), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3924, 3926), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((4239, 4256), 'tqdm.tqdm', 'tqdm', (['trainloader'], {}), '(trainloader)\n', (4243, 4256), False, 'from tqdm import tqdm\n'), ((6071, 6086), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6084, 6086), False, 'import torch\n'), ((6598, 6623), 'torch.cat', 'torch.cat', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (6607, 6623), False, 'import torch\n'), ((7607, 7639), 'models.encoder_decoder.late_fusion', 'late_fusion', (['scores'], {'merge': '"""avg"""'}), "(scores, merge='avg')\n", (7618, 7639), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((9744, 9833), 'util.build_img_dataset', 'build_img_dataset', (['dataset_name', 'Y'], {'neg_class_id': 'qualified_Y[i]', 'mode': 'ADdataset_mode'}), '(dataset_name, Y, neg_class_id=qualified_Y[i], mode=\n ADdataset_mode)\n', (9761, 9833), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((9863, 9895), 'util.process_ad_dataset', 'process_ad_dataset', (['X', 'partition'], {}), '(X, partition)\n', (9881, 9895), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((10015, 10040), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10038, 10040), False, 'import torch\n'), ((10111, 10134), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (10128, 10134), False, 'import torch\n'), ((10636, 10650), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (10648, 10650), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((10680, 10694), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (10692, 10694), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((14350, 14437), 'util.save_roc_pr_curve_data', 'save_roc_pr_curve_data', ([], {'scores': 'scores', 'labels': 'Y_test', 'file_path': 'None', 'verbose': '(False)'}), '(scores=scores, labels=Y_test, file_path=None,\n verbose=False)\n', (14372, 14437), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((16303, 16392), 'util.build_img_dataset', 'build_img_dataset', (['dataset_name', 'Y'], {'neg_class_id': 'qualified_Y[i]', 'mode': 'ADdataset_mode'}), '(dataset_name, Y, neg_class_id=qualified_Y[i], mode=\n ADdataset_mode)\n', (16320, 16392), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((16422, 16454), 'util.process_ad_dataset', 'process_ad_dataset', (['X', 'partition'], {}), '(X, partition)\n', (16440, 16454), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((16574, 16599), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (16597, 16599), False, 'import torch\n'), ((16670, 16693), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (16687, 16693), False, 'import torch\n'), ((17196, 17210), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (17208, 17210), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((17240, 17254), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (17252, 17254), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((20425, 20512), 'util.save_roc_pr_curve_data', 'save_roc_pr_curve_data', ([], {'scores': 'scores', 'labels': 'Y_test', 'file_path': 'None', 'verbose': '(False)'}), '(scores=scores, labels=Y_test, file_path=None,\n verbose=False)\n', (20447, 20512), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((23053, 23085), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (23079, 23085), False, 'import torch\n'), ((26035, 26050), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (26048, 26050), False, 'import torch\n'), ((26852, 26866), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (26864, 26866), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((27145, 27162), 'tqdm.tqdm', 'tqdm', (['trainloader'], {}), '(trainloader)\n', (27149, 27162), False, 'from tqdm import tqdm\n'), ((29543, 29558), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (29556, 29558), False, 'import torch\n'), ((31785, 31874), 'util.build_img_dataset', 'build_img_dataset', (['dataset_name', 'Y'], {'neg_class_id': 'qualified_Y[i]', 'mode': 'ADdataset_mode'}), '(dataset_name, Y, neg_class_id=qualified_Y[i], mode=\n ADdataset_mode)\n', (31802, 31874), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((31904, 31936), 'util.process_ad_dataset', 'process_ad_dataset', (['X', 'partition'], {}), '(X, partition)\n', (31922, 31936), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((32056, 32081), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (32079, 32081), False, 'import torch\n'), ((32152, 32175), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (32169, 32175), False, 'import torch\n'), ((32677, 32691), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (32689, 32691), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((32722, 32736), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (32734, 32736), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((35844, 35931), 'util.save_roc_pr_curve_data', 'save_roc_pr_curve_data', ([], {'scores': 'scores', 'labels': 'Y_test', 'file_path': 'None', 'verbose': '(False)'}), '(scores=scores, labels=Y_test, file_path=None,\n verbose=False)\n', (35866, 35931), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((42645, 42784), 'numpy.savez', 'np.savez', ([], {'file': "(results_path + dataset_name + '_sim')", 'ROC': 'roc_results', 'PR_norm': 'pr_norm_results', 'PR_anom': 'pr_anom_results', 'tnr': 'tnr_results'}), "(file=results_path + dataset_name + '_sim', ROC=roc_results,\n PR_norm=pr_norm_results, PR_anom=pr_anom_results, tnr=tnr_results)\n", (42653, 42784), True, 'import numpy as np\n'), ((42798, 42854), 'util.load_print_results', 'load_print_results', (["(results_path + dataset_name + '_sim')"], {}), "(results_path + dataset_name + '_sim')\n", (42816, 42854), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((44334, 44366), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (44360, 44366), False, 'import torch\n'), ((44975, 44989), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (44987, 44989), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((45340, 45357), 'tqdm.tqdm', 'tqdm', (['trainloader'], {}), '(trainloader)\n', (45344, 45357), False, 'from tqdm import tqdm\n'), ((47661, 47678), 'tqdm.tqdm', 'tqdm', (['trainloader'], {}), '(trainloader)\n', (47665, 47678), False, 'from tqdm import tqdm\n'), ((49032, 49047), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (49045, 49047), False, 'import torch\n'), ((49579, 49611), 'models.encoder_decoder.late_fusion', 'late_fusion', (['scores'], {'merge': '"""avg"""'}), "(scores, merge='avg')\n", (49590, 49611), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((51543, 51575), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (51569, 51575), False, 'import torch\n'), ((51943, 51971), 'torch.from_numpy', 'torch.from_numpy', (['X_train[_]'], {}), '(X_train[_])\n', (51959, 51971), False, 'import torch\n'), ((52185, 52212), 'torch.from_numpy', 'torch.from_numpy', (['X_test[_]'], {}), '(X_test[_])\n', (52201, 52212), False, 'import torch\n'), ((54113, 54145), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (54139, 54145), False, 'import torch\n'), ((54664, 54678), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (54676, 54678), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((54911, 54928), 'tqdm.tqdm', 'tqdm', (['trainloader'], {}), '(trainloader)\n', (54915, 54928), False, 'from tqdm import tqdm\n'), ((56877, 56892), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (56890, 56892), False, 'import torch\n'), ((57412, 57444), 'models.encoder_decoder.late_fusion', 'late_fusion', (['scores'], {'merge': '"""avg"""'}), "(scores, merge='avg')\n", (57423, 57444), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((59548, 59580), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (59574, 59580), False, 'import torch\n'), ((60133, 60147), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (60145, 60147), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((60359, 60376), 'tqdm.tqdm', 'tqdm', (['trainloader'], {}), '(trainloader)\n', (60363, 60376), False, 'from tqdm import tqdm\n'), ((63226, 63241), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (63239, 63241), False, 'import torch\n'), ((63761, 63793), 'models.encoder_decoder.late_fusion', 'late_fusion', (['scores'], {'merge': '"""avg"""'}), "(scores, merge='avg')\n", (63772, 63793), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((66219, 66251), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (66245, 66251), False, 'import torch\n'), ((69148, 69163), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (69161, 69163), False, 'import torch\n'), ((69444, 69467), 'torch.cat', 'torch.cat', (['C_set'], {'dim': '(0)'}), '(C_set, dim=0)\n', (69453, 69467), False, 'import torch\n'), ((69490, 69514), 'torch.mean', 'torch.mean', (['C_set'], {'dim': '(0)'}), '(C_set, dim=0)\n', (69500, 69514), False, 'import torch\n'), ((70010, 70027), 'tqdm.tqdm', 'tqdm', (['trainloader'], {}), '(trainloader)\n', (70014, 70027), False, 'from tqdm import tqdm\n'), ((71882, 71897), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (71895, 71897), False, 'import torch\n'), ((72261, 72289), 'torch.cat', 'torch.cat', (['scores_set'], {'dim': '(0)'}), '(scores_set, dim=0)\n', (72270, 72289), False, 'import torch\n'), ((73960, 74049), 'util.build_img_dataset', 'build_img_dataset', (['dataset_name', 'Y'], {'neg_class_id': 'qualified_Y[i]', 'mode': 'ADdataset_mode'}), '(dataset_name, Y, neg_class_id=qualified_Y[i], mode=\n ADdataset_mode)\n', (73977, 74049), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((74079, 74111), 'util.process_ad_dataset', 'process_ad_dataset', (['X', 'partition'], {}), '(X, partition)\n', (74097, 74111), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((74231, 74256), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (74254, 74256), False, 'import torch\n'), ((74327, 74350), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (74344, 74350), False, 'import torch\n'), ((77410, 77497), 'util.save_roc_pr_curve_data', 'save_roc_pr_curve_data', ([], {'scores': 'scores', 'labels': 'Y_test', 'file_path': 'None', 'verbose': '(False)'}), '(scores=scores, labels=Y_test, file_path=None,\n verbose=False)\n', (77432, 77497), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((3662, 3681), 'util.mv_dataset', 'mv_dataset', (['X_train'], {}), '(X_train)\n', (3672, 3681), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((5895, 5913), 'util.mv_dataset', 'mv_dataset', (['X_test'], {}), '(X_test)\n', (5905, 5913), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((6161, 6177), 'tqdm.tqdm', 'tqdm', (['testloader'], {}), '(testloader)\n', (6165, 6177), False, 'from tqdm import tqdm\n'), ((6463, 6491), 'torch.cat', 'torch.cat', (['scores[ss]'], {'dim': '(0)'}), '(scores[ss], dim=0)\n', (6472, 6491), False, 'import torch\n'), ((6937, 7034), 'util.save_roc_pr_curve_data', 'save_roc_pr_curve_data', ([], {'scores': 'cur_view_scores', 'labels': 'Y_test', 'file_path': 'None', 'verbose': '(False)'}), '(scores=cur_view_scores, labels=Y_test, file_path=\n None, verbose=False)\n', (6959, 7034), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((10062, 10094), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (10088, 10094), False, 'import torch\n'), ((10917, 10934), 'tqdm.tqdm', 'tqdm', (['trainloader'], {}), '(trainloader)\n', (10921, 10934), False, 'from tqdm import tqdm\n'), ((13631, 13646), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13644, 13646), False, 'import torch\n'), ((14202, 14234), 'models.encoder_decoder.late_fusion', 'late_fusion', (['scores'], {'merge': '"""avg"""'}), "(scores, merge='avg')\n", (14213, 14234), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((16621, 16653), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (16647, 16653), False, 'import torch\n'), ((17477, 17494), 'tqdm.tqdm', 'tqdm', (['trainloader'], {}), '(trainloader)\n', (17481, 17494), False, 'from tqdm import tqdm\n'), ((19706, 19721), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19719, 19721), False, 'import torch\n'), ((20277, 20309), 'models.encoder_decoder.late_fusion', 'late_fusion', (['scores'], {'merge': '"""avg"""'}), "(scores, merge='avg')\n", (20288, 20309), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((22721, 22740), 'util.mv_dataset', 'mv_dataset', (['X_train'], {}), '(X_train)\n', (22731, 22740), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((22869, 22887), 'util.mv_dataset', 'mv_dataset', (['X_test'], {}), '(X_test)\n', (22879, 22887), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((23508, 23522), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (23520, 23522), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((23816, 23833), 'tqdm.tqdm', 'tqdm', (['trainloader'], {}), '(trainloader)\n', (23820, 23833), False, 'from tqdm import tqdm\n'), ((25409, 25428), 'util.mv_dataset', 'mv_dataset', (['X_train'], {}), '(X_train)\n', (25419, 25428), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((25535, 25595), 'models.encoder_decoder.mvenc', 'mvenc', ([], {'input_size_set': 'input_size_set', 'layer_sizes': 'layer_size'}), '(input_size_set=input_size_set, layer_sizes=layer_size)\n', (25540, 25595), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((26096, 26113), 'tqdm.tqdm', 'tqdm', (['trainloader'], {}), '(trainloader)\n', (26100, 26113), False, 'from tqdm import tqdm\n'), ((26395, 26422), 'torch.cat', 'torch.cat', (['C_set[cc]'], {'dim': '(0)'}), '(C_set[cc], dim=0)\n', (26404, 26422), False, 'import torch\n'), ((26525, 26553), 'torch.mean', 'torch.mean', (['C_set[cc]'], {'dim': '(0)'}), '(C_set[cc], dim=0)\n', (26535, 26553), False, 'import torch\n'), ((29604, 29620), 'tqdm.tqdm', 'tqdm', (['testloader'], {}), '(testloader)\n', (29608, 29620), False, 'from tqdm import tqdm\n'), ((30019, 30051), 'torch.cat', 'torch.cat', (['scores_set[ss]'], {'dim': '(0)'}), '(scores_set[ss], dim=0)\n', (30028, 30051), False, 'import torch\n'), ((32103, 32135), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (32129, 32135), False, 'import torch\n'), ((32960, 32977), 'tqdm.tqdm', 'tqdm', (['trainloader'], {}), '(trainloader)\n', (32964, 32977), False, 'from tqdm import tqdm\n'), ((35125, 35140), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (35138, 35140), False, 'import torch\n'), ((35696, 35728), 'models.encoder_decoder.late_fusion', 'late_fusion', (['scores'], {'merge': '"""avg"""'}), "(scores, merge='avg')\n", (35707, 35728), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((37928, 38017), 'util.build_img_dataset', 'build_img_dataset', (['dataset_name', 'Y'], {'neg_class_id': 'qualified_Y[i]', 'mode': 'ADdataset_mode'}), '(dataset_name, Y, neg_class_id=qualified_Y[i], mode=\n ADdataset_mode)\n', (37945, 38017), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((38051, 38083), 'util.process_ad_dataset', 'process_ad_dataset', (['X', 'partition'], {}), '(X, partition)\n', (38069, 38083), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((38215, 38240), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (38238, 38240), False, 'import torch\n'), ((38319, 38342), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (38336, 38342), False, 'import torch\n'), ((38872, 38886), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (38884, 38886), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((38920, 38934), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (38932, 38934), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((42230, 42317), 'util.save_roc_pr_curve_data', 'save_roc_pr_curve_data', ([], {'scores': 'scores', 'labels': 'Y_test', 'file_path': 'None', 'verbose': '(False)'}), '(scores=scores, labels=Y_test, file_path=None,\n verbose=False)\n', (42252, 42317), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((44656, 44675), 'util.mv_dataset', 'mv_dataset', (['X_train'], {}), '(X_train)\n', (44666, 44675), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((47277, 47296), 'util.mv_dataset', 'mv_dataset', (['X_train'], {}), '(X_train)\n', (47287, 47296), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((48911, 48929), 'util.mv_dataset', 'mv_dataset', (['X_test'], {}), '(X_test)\n', (48921, 48929), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((49187, 49203), 'tqdm.tqdm', 'tqdm', (['testloader'], {}), '(testloader)\n', (49191, 49203), False, 'from tqdm import tqdm\n'), ((49495, 49523), 'torch.cat', 'torch.cat', (['scores[ss]'], {'dim': '(0)'}), '(scores[ss], dim=0)\n', (49504, 49523), False, 'import torch\n'), ((54307, 54393), 'models.encoder_decoder.splitAE', 'splitAE', ([], {'input_size_set': 'input_size_set', 'layer_sizes': 'layer_size', 'dec_mode': 'dec_mode'}), '(input_size_set=input_size_set, layer_sizes=layer_size, dec_mode=\n dec_mode)\n', (54314, 54393), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((54445, 54464), 'util.mv_dataset', 'mv_dataset', (['X_train'], {}), '(X_train)\n', (54455, 54464), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((56756, 56774), 'util.mv_dataset', 'mv_dataset', (['X_test'], {}), '(X_test)\n', (56766, 56774), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((57026, 57042), 'tqdm.tqdm', 'tqdm', (['testloader'], {}), '(testloader)\n', (57030, 57042), False, 'from tqdm import tqdm\n'), ((57328, 57356), 'torch.cat', 'torch.cat', (['scores[ss]'], {'dim': '(0)'}), '(scores[ss], dim=0)\n', (57337, 57356), False, 'import torch\n'), ((59742, 59862), 'models.encoder_decoder.mvae_ss', 'mvae_ss', ([], {'input_size_set': 'input_size_set', 'layer_sizes': 'layer_size', 'fuse_dim': 'fuse_dim', 'mode': 'mode', 'param_mode': 'param_mode'}), '(input_size_set=input_size_set, layer_sizes=layer_size, fuse_dim=\n fuse_dim, mode=mode, param_mode=param_mode)\n', (59749, 59862), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((59914, 59933), 'util.mv_dataset', 'mv_dataset', (['X_train'], {}), '(X_train)\n', (59924, 59933), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((63105, 63123), 'util.mv_dataset', 'mv_dataset', (['X_test'], {}), '(X_test)\n', (63115, 63123), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((63375, 63391), 'tqdm.tqdm', 'tqdm', (['testloader'], {}), '(testloader)\n', (63379, 63391), False, 'from tqdm import tqdm\n'), ((63677, 63705), 'torch.cat', 'torch.cat', (['scores[ss]'], {'dim': '(0)'}), '(scores[ss], dim=0)\n', (63686, 63705), False, 'import torch\n'), ((65887, 65906), 'util.mv_dataset', 'mv_dataset', (['X_train'], {}), '(X_train)\n', (65897, 65906), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((66035, 66053), 'util.mv_dataset', 'mv_dataset', (['X_test'], {}), '(X_test)\n', (66045, 66053), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((66674, 66688), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (66686, 66688), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((66982, 66999), 'tqdm.tqdm', 'tqdm', (['trainloader'], {}), '(trainloader)\n', (66986, 66999), False, 'from tqdm import tqdm\n'), ((68585, 68604), 'util.mv_dataset', 'mv_dataset', (['X_train'], {}), '(X_train)\n', (68595, 68604), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((68711, 68776), 'models.encoder_decoder.mvenc_fuse', 'mvenc_fuse', ([], {'input_size_set': 'input_size_set', 'layer_sizes': 'layer_size'}), '(input_size_set=input_size_set, layer_sizes=layer_size)\n', (68721, 68776), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((69105, 69119), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (69116, 69119), False, 'import torch\n'), ((69209, 69226), 'tqdm.tqdm', 'tqdm', (['trainloader'], {}), '(trainloader)\n', (69213, 69226), False, 'from tqdm import tqdm\n'), ((71943, 71959), 'tqdm.tqdm', 'tqdm', (['testloader'], {}), '(testloader)\n', (71947, 71959), False, 'from tqdm import tqdm\n'), ((74278, 74310), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (74304, 74310), False, 'import torch\n'), ((74850, 74864), 'util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (74862, 74864), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((75092, 75109), 'tqdm.tqdm', 'tqdm', (['trainloader'], {}), '(trainloader)\n', (75096, 75109), False, 'from tqdm import tqdm\n'), ((76691, 76706), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (76704, 76706), False, 'import torch\n'), ((77262, 77294), 'models.encoder_decoder.late_fusion', 'late_fusion', (['scores'], {'merge': '"""avg"""'}), "(scores, merge='avg')\n", (77273, 77294), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((10406, 10425), 'util.mv_dataset', 'mv_dataset', (['X_train'], {}), '(X_train)\n', (10416, 10425), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((13506, 13524), 'util.mv_dataset', 'mv_dataset', (['X_test'], {}), '(X_test)\n', (13516, 13524), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((13792, 13808), 'tqdm.tqdm', 'tqdm', (['testloader'], {}), '(testloader)\n', (13796, 13808), False, 'from tqdm import tqdm\n'), ((14114, 14142), 'torch.cat', 'torch.cat', (['scores[ss]'], {'dim': '(0)'}), '(scores[ss], dim=0)\n', (14123, 14142), False, 'import torch\n'), ((16965, 16984), 'util.mv_dataset', 'mv_dataset', (['X_train'], {}), '(X_train)\n', (16975, 16984), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((19581, 19599), 'util.mv_dataset', 'mv_dataset', (['X_test'], {}), '(X_test)\n', (19591, 19599), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((19867, 19883), 'tqdm.tqdm', 'tqdm', (['testloader'], {}), '(testloader)\n', (19871, 19883), False, 'from tqdm import tqdm\n'), ((20189, 20217), 'torch.cat', 'torch.cat', (['scores[ss]'], {'dim': '(0)'}), '(scores[ss], dim=0)\n', (20198, 20217), False, 'import torch\n'), ((23354, 23416), 'models.encoder_decoder.mvae_ad', 'mvae_ad', ([], {'input_size_set': 'input_size_set', 'layer_sizes': 'layer_size'}), '(input_size_set=input_size_set, layer_sizes=layer_size)\n', (23361, 23416), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((25961, 25975), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (25972, 25975), False, 'import torch\n'), ((32447, 32466), 'util.mv_dataset', 'mv_dataset', (['X_train'], {}), '(X_train)\n', (32457, 32466), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((35000, 35018), 'util.mv_dataset', 'mv_dataset', (['X_test'], {}), '(X_test)\n', (35010, 35018), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((35286, 35302), 'tqdm.tqdm', 'tqdm', (['testloader'], {}), '(testloader)\n', (35290, 35302), False, 'from tqdm import tqdm\n'), ((35608, 35636), 'torch.cat', 'torch.cat', (['scores[ss]'], {'dim': '(0)'}), '(scores[ss], dim=0)\n', (35617, 35636), False, 'import torch\n'), ((38266, 38298), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (38292, 38298), False, 'import torch\n'), ((39178, 39195), 'tqdm.tqdm', 'tqdm', (['trainloader'], {}), '(trainloader)\n', (39182, 39195), False, 'from tqdm import tqdm\n'), ((41467, 41482), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (41480, 41482), False, 'import torch\n'), ((42074, 42106), 'models.encoder_decoder.late_fusion', 'late_fusion', (['scores'], {'merge': '"""avg"""'}), "(scores, merge='avg')\n", (42085, 42106), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((66520, 66582), 'models.encoder_decoder.mvae_ad', 'mvae_ad', ([], {'input_size_set': 'input_size_set', 'layer_sizes': 'layer_size'}), '(input_size_set=input_size_set, layer_sizes=layer_size)\n', (66527, 66582), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((70700, 70736), 'torch.sum', 'torch.sum', (['((outputs - C) ** 2)'], {'dim': '(1)'}), '((outputs - C) ** 2, dim=1)\n', (70709, 70736), False, 'import torch\n'), ((72129, 72175), 'torch.sum', 'torch.sum', (['((cur_batch_outputs - C) ** 2)'], {'dim': '(1)'}), '((cur_batch_outputs - C) ** 2, dim=1)\n', (72138, 72175), False, 'import torch\n'), ((74488, 74558), 'models.encoder_decoder.mvae_tf', 'mvae_tf', ([], {'input_size_set': 'input_size_set', 'layer_sizes': 'layer_size', 'rank': 'r'}), '(input_size_set=input_size_set, layer_sizes=layer_size, rank=r)\n', (74495, 74558), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((74619, 74638), 'util.mv_dataset', 'mv_dataset', (['X_train'], {}), '(X_train)\n', (74629, 74638), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((76566, 76584), 'util.mv_dataset', 'mv_dataset', (['X_test'], {}), '(X_test)\n', (76576, 76584), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((76852, 76868), 'tqdm.tqdm', 'tqdm', (['testloader'], {}), '(testloader)\n', (76856, 76868), False, 'from tqdm import tqdm\n'), ((77174, 77202), 'torch.cat', 'torch.cat', (['scores[ss]'], {'dim': '(0)'}), '(scores[ss], dim=0)\n', (77183, 77202), False, 'import torch\n'), ((79970, 79987), 'util.get_all_labels', 'get_all_labels', (['Y'], {}), '(Y)\n', (79984, 79987), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((3534, 3596), 'models.encoder_decoder.mvae_ad', 'mvae_ad', ([], {'input_size_set': 'input_size_set', 'layer_sizes': 'layer_size'}), '(input_size_set=input_size_set, layer_sizes=layer_size)\n', (3541, 3596), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((19212, 19238), 'util.filter_nan_grad', 'filter_nan_grad', (['optimizer'], {}), '(optimizer)\n', (19227, 19238), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((29847, 29923), 'torch.sum', 'torch.sum', (['((cur_batch_outputs[view] - C_set[view]) ** 2)'], {'dim': '(1)', 'keepdim': '(True)'}), '((cur_batch_outputs[view] - C_set[view]) ** 2, dim=1, keepdim=True)\n', (29856, 29923), False, 'import torch\n'), ((38630, 38649), 'util.mv_dataset', 'mv_dataset', (['X_train'], {}), '(X_train)\n', (38640, 38649), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((41338, 41356), 'util.mv_dataset', 'mv_dataset', (['X_test'], {}), '(X_test)\n', (41348, 41356), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((41640, 41656), 'tqdm.tqdm', 'tqdm', (['testloader'], {}), '(testloader)\n', (41644, 41656), False, 'from tqdm import tqdm\n'), ((41982, 42010), 'torch.cat', 'torch.cat', (['scores[ss]'], {'dim': '(0)'}), '(scores[ss], dim=0)\n', (41991, 42010), False, 'import torch\n'), ((44528, 44590), 'models.encoder_decoder.mvae_ad', 'mvae_ad', ([], {'input_size_set': 'input_size_set', 'layer_sizes': 'layer_size'}), '(input_size_set=input_size_set, layer_sizes=layer_size)\n', (44535, 44590), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((46824, 46913), 'models.encoder_decoder.mvae_fused', 'mvae_fused', ([], {'input_size_set': 'input_size_set', 'layer_sizes': 'layer_size', 'fuse_dim': 'fuse_dim'}), '(input_size_set=input_size_set, layer_sizes=layer_size, fuse_dim=\n fuse_dim)\n', (46834, 46913), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((71013, 71029), 'torch.mean', 'torch.mean', (['dist'], {}), '(dist)\n', (71023, 71029), False, 'import torch\n'), ((10272, 10336), 'models.encoder_decoder.mv_corrAE', 'mv_corrAE', ([], {'input_size_set': 'input_size_set', 'layer_sizes': 'layer_size'}), '(input_size_set=input_size_set, layer_sizes=layer_size)\n', (10281, 10336), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((13024, 13050), 'util.filter_nan_grad', 'filter_nan_grad', (['optimizer'], {}), '(optimizer)\n', (13039, 13050), False, 'from util import read_mymat73, read_mymat, build_img_dataset, process_ad_dataset, mv_dataset, mv_tabular_collate, AverageMeter, save_roc_pr_curve_data, get_all_labels, load_print_results, filter_nan_grad, read_dataset, build_vad_dataset, ss_dataset, ss_tabular_collate, simple_accuracy, get_large_class, ss_goad_dataset\n'), ((16831, 16895), 'models.encoder_decoder.mv_corrAE', 'mv_corrAE', ([], {'input_size_set': 'input_size_set', 'layer_sizes': 'layer_size'}), '(input_size_set=input_size_set, layer_sizes=layer_size)\n', (16840, 16895), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((28032, 28075), 'torch.sum', 'torch.sum', (['((cur_output - cur_c) ** 2)'], {'dim': '(1)'}), '((cur_output - cur_c) ** 2, dim=1)\n', (28041, 28075), False, 'import torch\n'), ((32313, 32377), 'models.encoder_decoder.mv_corrAE', 'mv_corrAE', ([], {'input_size_set': 'input_size_set', 'layer_sizes': 'layer_size'}), '(input_size_set=input_size_set, layer_sizes=layer_size)\n', (32322, 32377), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((48272, 48286), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (48283, 48286), False, 'import torch\n'), ((55559, 55573), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (55570, 55573), False, 'import torch\n'), ((18169, 18183), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (18180, 18183), False, 'import torch\n'), ((28412, 28428), 'torch.mean', 'torch.mean', (['dist'], {}), '(dist)\n', (28422, 28428), False, 'import torch\n'), ((33619, 33633), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (33630, 33633), False, 'import torch\n'), ((33685, 33699), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (33696, 33699), False, 'import torch\n'), ((38492, 38556), 'models.encoder_decoder.mv_corrAE', 'mv_corrAE', ([], {'input_size_set': 'input_size_set', 'layer_sizes': 'layer_size'}), '(input_size_set=input_size_set, layer_sizes=layer_size)\n', (38501, 38556), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((61106, 61120), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (61117, 61120), False, 'import torch\n'), ((75770, 75784), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (75781, 75784), False, 'import torch\n'), ((11621, 11635), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (11632, 11635), False, 'import torch\n'), ((11691, 11705), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (11702, 11705), False, 'import torch\n'), ((39871, 39885), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (39882, 39885), False, 'import torch\n'), ((39940, 39954), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (39951, 39954), False, 'import torch\n'), ((61798, 61812), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (61809, 61812), False, 'import torch\n'), ((18386, 18400), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (18397, 18400), False, 'import torch\n'), ((40430, 40491), 'models.encoder_decoder.similarity_hinge_loss', 'similarity_hinge_loss', (['latent_set[view]', 'latent_set[v_idx]', 'm'], {}), '(latent_set[view], latent_set[v_idx], m)\n', (40451, 40491), False, 'from models.encoder_decoder import mvae_ad, late_fusion, mvenc, mvae_fused, splitAE, mv_corrAE, classifier, tc_loss_func, mvae_ss, mvenc_fuse, mvae_tf, similarity_hinge_loss\n'), ((70913, 70937), 'torch.zeros_like', 'torch.zeros_like', (['scores'], {}), '(scores)\n', (70929, 70937), False, 'import torch\n'), ((28296, 28320), 'torch.zeros_like', 'torch.zeros_like', (['scores'], {}), '(scores)\n', (28312, 28320), False, 'import torch\n'), ((12405, 12419), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (12416, 12419), False, 'import torch\n')] |
# coding: utf-8
#########################################################################
# Name:
#
# Calcurate equivalent potential temperature.
#
# Usage:
# example:
#
# Author: <NAME>
# Date: 2021/08/13
#########################################################################
import argparse
#from datetime import datetime, timedelta
import math
#import metpy
from netCDF4 import Dataset
import numpy as np
import os
from os.path import join, abspath
import re
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", help="set directory name.\
This is the initial time.", type=str)
p = parser.parse_args()
args = {"dir": p.dir}
return args
def mk_file_list(data_root_dir):
def get_abs_path(par_dir, child):
abs_dir_path = abspath(join(par_dir, child))
return abs_dir_path
troposphere_list = sorted([get_abs_path(data_root_dir, i)
for i in os.listdir(data_root_dir) if "troposphere-" in i])
return troposphere_list
class CalcPhysics:
def __init__(self, ncfile):
self.ncfile = Dataset(ncfile)
self.lat = [i if 20 <= i <= 60 else -
99 for i in self.ncfile.variables['latitude']]
self.lon = [i if 110 <= i <= 180 else -
99 for i in self.ncfile.variables['longitude']]
def get_lat_lon(self):
lat = np.array([i for i in self.lat if i != -99])
lon = np.array([i for i in self.lon if i != -99])
return lat, lon
def get_parameter(self, params, lat, lon):
data = []
data_raw = self.ncfile.variables[params][0]
for data_2d in data_raw:
data_1d = [data_2d[i][j] for i in range(len(
self.lat)) if self.lat[i] != -99 for j in range(len(self.lon)) if self.lon[j] != -99]
# convert to 2D.
data.append(np.array(data_1d).reshape(len(lat), len(lon)))
return np.array(data)
def to_celesius(self, kelvin):
celesius = kelvin - 273.15
return celesius
def cal_saturated_water_vaport_pressure(self,temperature_c):
self.swv_pressure_pha = 6.1078 * (10 ** (7.5*temperature_c/(temperature_c+237.3)))
return self.swv_pressure_pha
def cal_vapor_pressure(self,RH):
self.vapor_pressure_hpa = self.swv_pressure_pha * RH/100
return self.vapor_pressure_hpa
def cal_dew_point(self,lat,lon):
self.dew_point = []
for v_p in self.vapor_pressure_hpa:
dew_point_1d = [np.nan if v_p[i][j] is np.nan else 237.3 * math.log((6.1078/v_p[i][j]), 10) / (math.log((v_p[i][j]/6.1078), 10) - 7.5) for i in range(len(lat)) for j in range(len(lon))]
self.dew_point.append(np.array(dew_point_1d).reshape(len(lat),len(lon)))
self.dew_point = np.array(self.dew_point)
return self.dew_point
def display_test(value):
if len(value.shape) == 3:
for index, v in enumerate(value):
[print("{},{},{},{}".format(index, i, j, v[i][j])) for i in range(81) for j in range(141)]
elif len(value.shape) == 2:
[print("{},{},{},".format(i, j, value[i][j])) for i in range(81) for j in range(141)]
def main():
args = parse_args()
data_root_dir = join(abspath(args["dir"]))
outname = re.search('[0-9]{10}', data_root_dir).group()
troposphere_list = mk_file_list(data_root_dir)
for ncfile in troposphere_list:
cal_phys = CalcPhysics(ncfile)
lat, lon = cal_phys.get_lat_lon()
temperature_k = cal_phys.get_parameter('t', lat, lon)
temperature_c = cal_phys.to_celesius(temperature_k)
RH = cal_phys.get_parameter('r', lat, lon)
display_test(temperature_c)
cal_phys.cal_saturated_water_vaport_pressure(temperature_c)
#cal_phys.cal_vapor_pressure(RH)
#dew_point = cal_phys.cal_dew_point(lat,lon)
#display_test(dew_point)
break
if __name__ == "__main__":
main()
| [
"os.listdir",
"argparse.ArgumentParser",
"netCDF4.Dataset",
"os.path.join",
"math.log",
"numpy.array",
"os.path.abspath",
"re.search"
] | [((498, 523), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (521, 523), False, 'import argparse\n'), ((1132, 1147), 'netCDF4.Dataset', 'Dataset', (['ncfile'], {}), '(ncfile)\n', (1139, 1147), False, 'from netCDF4 import Dataset\n'), ((1419, 1462), 'numpy.array', 'np.array', (['[i for i in self.lat if i != -99]'], {}), '([i for i in self.lat if i != -99])\n', (1427, 1462), True, 'import numpy as np\n'), ((1477, 1520), 'numpy.array', 'np.array', (['[i for i in self.lon if i != -99]'], {}), '([i for i in self.lon if i != -99])\n', (1485, 1520), True, 'import numpy as np\n'), ((1971, 1985), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1979, 1985), True, 'import numpy as np\n'), ((2837, 2861), 'numpy.array', 'np.array', (['self.dew_point'], {}), '(self.dew_point)\n', (2845, 2861), True, 'import numpy as np\n'), ((3283, 3303), 'os.path.abspath', 'abspath', (["args['dir']"], {}), "(args['dir'])\n", (3290, 3303), False, 'from os.path import join, abspath\n'), ((826, 846), 'os.path.join', 'join', (['par_dir', 'child'], {}), '(par_dir, child)\n', (830, 846), False, 'from os.path import join, abspath\n'), ((3319, 3356), 're.search', 're.search', (['"""[0-9]{10}"""', 'data_root_dir'], {}), "('[0-9]{10}', data_root_dir)\n", (3328, 3356), False, 'import re\n'), ((978, 1003), 'os.listdir', 'os.listdir', (['data_root_dir'], {}), '(data_root_dir)\n', (988, 1003), False, 'import os\n'), ((1909, 1926), 'numpy.array', 'np.array', (['data_1d'], {}), '(data_1d)\n', (1917, 1926), True, 'import numpy as np\n'), ((2760, 2782), 'numpy.array', 'np.array', (['dew_point_1d'], {}), '(dew_point_1d)\n', (2768, 2782), True, 'import numpy as np\n'), ((2599, 2631), 'math.log', 'math.log', (['(6.1078 / v_p[i][j])', '(10)'], {}), '(6.1078 / v_p[i][j], 10)\n', (2607, 2631), False, 'import math\n'), ((2635, 2667), 'math.log', 'math.log', (['(v_p[i][j] / 6.1078)', '(10)'], {}), '(v_p[i][j] / 6.1078, 10)\n', (2643, 2667), False, 'import math\n')] |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from numpy import asarray
from scipy.spatial import Voronoi
from scipy.spatial import Delaunay
__all__ = [
'delaunay_from_points_numpy',
'voronoi_from_points_numpy',
]
def delaunay_from_points_numpy(points):
"""Computes the delaunay triangulation for a list of points using Numpy.
Parameters
----------
points : sequence of tuple
XYZ coordinates of the original points.
boundary : sequence of tuples
list of ordered points describing the outer boundary (optional)
holes : list of sequences of tuples
list of polygons (ordered points describing internal holes (optional)
Returns
-------
list
The faces of the triangulation.
Each face is a triplet of indices referring to the list of point coordinates.
Examples
--------
>>>
"""
xyz = asarray(points)
d = Delaunay(xyz[:, 0:2])
return d.simplices
def voronoi_from_points_numpy(points):
"""Generate a voronoi diagram from a set of points.
Parameters
----------
points : list of list of float
XYZ coordinates of the voronoi sites.
Returns
-------
Examples
--------
>>>
"""
points = asarray(points)
voronoi = Voronoi(points)
return voronoi
| [
"numpy.asarray",
"scipy.spatial.Voronoi",
"scipy.spatial.Delaunay"
] | [((955, 970), 'numpy.asarray', 'asarray', (['points'], {}), '(points)\n', (962, 970), False, 'from numpy import asarray\n'), ((979, 1000), 'scipy.spatial.Delaunay', 'Delaunay', (['xyz[:, 0:2]'], {}), '(xyz[:, 0:2])\n', (987, 1000), False, 'from scipy.spatial import Delaunay\n'), ((1315, 1330), 'numpy.asarray', 'asarray', (['points'], {}), '(points)\n', (1322, 1330), False, 'from numpy import asarray\n'), ((1345, 1360), 'scipy.spatial.Voronoi', 'Voronoi', (['points'], {}), '(points)\n', (1352, 1360), False, 'from scipy.spatial import Voronoi\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList
def _test_gather_nd(test_case, device):
input = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
indices = np.array([[0], [2]])
np_out = np.array([[1, 2, 3], [7, 8, 9]])
output = flow.gather_nd(
flow.Tensor(input, dtype=flow.float, device=flow.device(device)),
flow.Tensor(indices, dtype=flow.int, device=flow.device(device)),
)
test_case.assertTrue(np.array_equal(output.numpy(), np_out))
def _test_gather_nd_t(test_case, device):
input = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
indices = np.array([[0, 2], [2, 1]])
np_out = np.array([3, 8])
output = flow.gather_nd(
flow.Tensor(input, dtype=flow.float, device=flow.device(device)),
flow.Tensor(indices, dtype=flow.int, device=flow.device(device)),
)
test_case.assertTrue(np.array_equal(output.numpy(), np_out))
def _test_gather_nd_backward(test_case, device):
input = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
indices = np.array([[0], [2]])
np_out = np.array([[1, 2, 3], [7, 8, 9]])
np_grad = np.array([[1, 1, 1], [0, 0, 0], [1, 1, 1]])
of_input = flow.Tensor(
input, requires_grad=True, dtype=flow.float, device=flow.device(device)
)
output = flow.gather_nd(
of_input, flow.Tensor(indices, dtype=flow.int, device=flow.device(device))
)
out_sum = output.sum()
out_sum.backward()
test_case.assertTrue(np.array_equal(output.numpy(), np_out))
test_case.assertTrue(np.array_equal(of_input.grad.numpy(), np_grad))
def _test_gather_nd_backward_t(test_case, device):
input = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
indices = np.array([[0, 2], [2, 1]])
np_out = np.array([3, 8])
np_grad = np.array([[0, 0, 1], [0, 0, 0], [0, 1, 0]])
of_input = flow.Tensor(
input, requires_grad=True, dtype=flow.float, device=flow.device(device)
)
output = flow.gather_nd(
of_input, flow.Tensor(indices, dtype=flow.int, device=flow.device(device))
)
out_sum = output.sum()
out_sum.backward()
test_case.assertTrue(np.array_equal(output.numpy(), np_out))
test_case.assertTrue(np.array_equal(of_input.grad.numpy(), np_grad))
@flow.unittest.skip_unless_1n1d()
class TestGather_nd(flow.unittest.TestCase):
def test_gather_nd(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_gather_nd,
_test_gather_nd_t,
_test_gather_nd_backward,
_test_gather_nd_backward_t,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"collections.OrderedDict",
"oneflow.experimental.unittest.skip_unless_1n1d",
"numpy.array",
"test_util.GenArgList",
"unittest.main",
"oneflow.experimental.device"
] | [((2909, 2941), 'oneflow.experimental.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2939, 2941), True, 'import oneflow.experimental as flow\n'), ((786, 829), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n', (794, 829), True, 'import numpy as np\n'), ((844, 864), 'numpy.array', 'np.array', (['[[0], [2]]'], {}), '([[0], [2]])\n', (852, 864), True, 'import numpy as np\n'), ((878, 910), 'numpy.array', 'np.array', (['[[1, 2, 3], [7, 8, 9]]'], {}), '([[1, 2, 3], [7, 8, 9]])\n', (886, 910), True, 'import numpy as np\n'), ((1215, 1258), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n', (1223, 1258), True, 'import numpy as np\n'), ((1273, 1299), 'numpy.array', 'np.array', (['[[0, 2], [2, 1]]'], {}), '([[0, 2], [2, 1]])\n', (1281, 1299), True, 'import numpy as np\n'), ((1313, 1329), 'numpy.array', 'np.array', (['[3, 8]'], {}), '([3, 8])\n', (1321, 1329), True, 'import numpy as np\n'), ((1641, 1684), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n', (1649, 1684), True, 'import numpy as np\n'), ((1699, 1719), 'numpy.array', 'np.array', (['[[0], [2]]'], {}), '([[0], [2]])\n', (1707, 1719), True, 'import numpy as np\n'), ((1733, 1765), 'numpy.array', 'np.array', (['[[1, 2, 3], [7, 8, 9]]'], {}), '([[1, 2, 3], [7, 8, 9]])\n', (1741, 1765), True, 'import numpy as np\n'), ((1781, 1824), 'numpy.array', 'np.array', (['[[1, 1, 1], [0, 0, 0], [1, 1, 1]]'], {}), '([[1, 1, 1], [0, 0, 0], [1, 1, 1]])\n', (1789, 1824), True, 'import numpy as np\n'), ((2311, 2354), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n', (2319, 2354), True, 'import numpy as np\n'), ((2369, 2395), 'numpy.array', 'np.array', (['[[0, 2], [2, 1]]'], {}), '([[0, 2], [2, 1]])\n', (2377, 2395), True, 'import numpy as np\n'), ((2409, 2425), 'numpy.array', 'np.array', (['[3, 8]'], {}), '([3, 8])\n', (2417, 2425), True, 'import numpy as np\n'), ((2441, 2484), 'numpy.array', 'np.array', (['[[0, 0, 1], [0, 0, 0], [0, 1, 0]]'], {}), '([[0, 0, 1], [0, 0, 0], [0, 1, 0]])\n', (2449, 2484), True, 'import numpy as np\n'), ((3395, 3410), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3408, 3410), False, 'import unittest\n'), ((3041, 3054), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3052, 3054), False, 'from collections import OrderedDict\n'), ((3300, 3320), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (3310, 3320), False, 'from test_util import GenArgList\n'), ((1913, 1932), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1924, 1932), True, 'import oneflow.experimental as flow\n'), ((2573, 2592), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2584, 2592), True, 'import oneflow.experimental as flow\n'), ((992, 1011), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1003, 1011), True, 'import oneflow.experimental as flow\n'), ((1066, 1085), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1077, 1085), True, 'import oneflow.experimental as flow\n'), ((1411, 1430), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1422, 1430), True, 'import oneflow.experimental as flow\n'), ((1485, 1504), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1496, 1504), True, 'import oneflow.experimental as flow\n'), ((2030, 2049), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2041, 2049), True, 'import oneflow.experimental as flow\n'), ((2690, 2709), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2701, 2709), True, 'import oneflow.experimental as flow\n')] |
from builtins import *
import argparse
import numpy as np
import os
from bnpy.ioutil.DataReader import loadDataFromSavedTask, loadLPKwargsFromDisk
from bnpy.ioutil.DataReader import loadKwargsFromDisk
from bnpy.ioutil.ModelReader import loadModelForLap
from bnpy.util import StateSeqUtil
from bnpy.birthmove.BCreateOneProposal import \
makeSummaryForBirthProposal_HTMLWrapper
import bnpy.birthmove.BLogger as BLogger
DefaultBirthArgs = dict(
Kmax=100,
b_nStuckBeforeQuit=10,
b_creationProposalName='bregmankmeans',
b_Kfresh=10,
b_nRefineSteps=10,
b_NiterForBregmanKMeans=1,
b_minRespForEachTargetAtom=0.1,
b_minNumAtomsInEachTargetDoc=50,
b_minNumAtomsForNewComp=1,
b_minNumAtomsForTargetComp=2,
b_minPercChangeInNumAtomsToReactivate=0.01,
b_cleanupWithMerge=0,
b_cleanupMaxNumMergeIters=10,
b_cleanupMaxNumAcceptPerIter=1,
b_debugOutputDir='/tmp/',
b_debugWriteHTML=1,
b_method_xPi='normalized_counts',
b_method_initCoordAscent='fromprevious',
b_method_doInitCompleteLP=1,
b_localStepSingleDoc='fast',
)
def tryBirthForTask(
taskoutpath=None,
lap=None, lapFrac=0,
targetUID=0,
batchID=None,
**kwargs):
'''
Post Condition
--------------
* Logging messages are printed.
* HTML report is saved.
'''
if lap is not None:
lapFrac = lap
curModel, lapFrac = loadModelForLap(taskoutpath, lapFrac)
Data = loadDataFromSavedTask(taskoutpath, batchID=batchID)
LPkwargs = loadLPKwargsFromDisk(taskoutpath)
SavedBirthKwargs = loadKwargsFromDisk(taskoutpath, 'args-birth.txt')
if targetUID < 0:
targetUID = findCompInModelWithLargestMisalignment(curModel, Data)
BirthArgs = dict(**DefaultBirthArgs)
BirthArgs.update(SavedBirthKwargs)
for key, val in list(kwargs.items()):
if val is not None:
BirthArgs[key] = val
print('%s: %s' % (key, str(val)))
curLP = curModel.calc_local_params(Data, **LPkwargs)
curSS = curModel.get_global_suff_stats(
Data, curLP,
trackDocUsage=1, doPrecompEntropy=1, trackTruncationGrowth=1)
curLscore = curModel.calc_evidence(SS=curSS)
print("Target UID: %d" % (targetUID))
print("Current count: %.2f" % (curSS.getCountForUID(targetUID)))
xSS = makeSummaryForBirthProposal_HTMLWrapper(
Data, curModel, curLP,
curSSwhole=curSS,
targetUID=int(targetUID),
newUIDs=list(range(curSS.K, curSS.K + int(BirthArgs['b_Kfresh']))),
LPkwargs=LPkwargs,
lapFrac=lapFrac,
dataName=Data.name,
**BirthArgs)
'''
propModel, propSS = createBirthProposal(curModel, SS, xSS)
didAccept, AcceptInfo = evaluateBirthProposal(
curModel=curModel, curSS=curSS, propModel=propModel, propSS=propSS)
'''
def findCompInModelWithLargestMisalignment(model, Data, Zref=None):
''' Finds cluster in model that is best candidate for a birth move.
Post Condition
--------------
Prints useful info to stdout.
'''
if Zref is None:
Zref = Data.TrueParams['Z']
LP = model.calc_local_params(Data)
Z = LP['resp'].argmax(axis=1)
AZ, AlignInfo = StateSeqUtil.alignEstimatedStateSeqToTruth(
Z, Zref, returnInfo=1)
maxK = AZ.max()
dist = np.zeros(maxK)
for k in range(maxK):
mask = AZ == k
nDisagree = np.sum(Zref[mask] != k)
nTotal = mask.sum()
dist[k] = float(nDisagree) / (float(nTotal) + 1e-10)
print(k, dist[k])
ktarget = np.argmax(dist)
korig = AlignInfo['AlignedToOrigMap'][ktarget]
print('ktarget %d: %s' % (ktarget, chr(65+ktarget)))
print('korig %d' % (korig))
# Determine what is hiding inside of it that shouldnt be
mask = AZ == ktarget
nTarget = np.sum(mask)
print('%d total atoms assigned to ktarget...' % (nTarget))
trueLabels = np.asarray(np.unique(Zref[mask]), np.int32)
for ll in trueLabels:
nTrue = np.sum(Zref[mask] == ll)
print('%d/%d should have true label %d: %s' % (
nTrue, nTarget, ll, chr(65+ll)))
return korig
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('taskoutpath', type=str)
parser.add_argument('--lap', type=float, default=None)
parser.add_argument('--lapFrac', type=float, default=None)
parser.add_argument('--outputdir', type=str, default='/tmp/')
parser.add_argument('--targetUID', type=int, default=0)
parser.add_argument('--batchID', type=int, default=None)
for key, val in list(DefaultBirthArgs.items()):
parser.add_argument('--' + key, type=type(val), default=None)
args = parser.parse_args()
BLogger.configure(args.outputdir,
doSaveToDisk=0,
doWriteStdOut=1,
stdoutLevel=0)
tryBirthForTask(**args.__dict__)
| [
"bnpy.ioutil.ModelReader.loadModelForLap",
"bnpy.ioutil.DataReader.loadDataFromSavedTask",
"numpy.unique",
"argparse.ArgumentParser",
"bnpy.ioutil.DataReader.loadKwargsFromDisk",
"bnpy.util.StateSeqUtil.alignEstimatedStateSeqToTruth",
"bnpy.birthmove.BLogger.configure",
"numpy.argmax",
"numpy.sum",
... | [((1424, 1461), 'bnpy.ioutil.ModelReader.loadModelForLap', 'loadModelForLap', (['taskoutpath', 'lapFrac'], {}), '(taskoutpath, lapFrac)\n', (1439, 1461), False, 'from bnpy.ioutil.ModelReader import loadModelForLap\n'), ((1473, 1524), 'bnpy.ioutil.DataReader.loadDataFromSavedTask', 'loadDataFromSavedTask', (['taskoutpath'], {'batchID': 'batchID'}), '(taskoutpath, batchID=batchID)\n', (1494, 1524), False, 'from bnpy.ioutil.DataReader import loadDataFromSavedTask, loadLPKwargsFromDisk\n'), ((1541, 1574), 'bnpy.ioutil.DataReader.loadLPKwargsFromDisk', 'loadLPKwargsFromDisk', (['taskoutpath'], {}), '(taskoutpath)\n', (1561, 1574), False, 'from bnpy.ioutil.DataReader import loadDataFromSavedTask, loadLPKwargsFromDisk\n'), ((1598, 1647), 'bnpy.ioutil.DataReader.loadKwargsFromDisk', 'loadKwargsFromDisk', (['taskoutpath', '"""args-birth.txt"""'], {}), "(taskoutpath, 'args-birth.txt')\n", (1616, 1647), False, 'from bnpy.ioutil.DataReader import loadKwargsFromDisk\n'), ((3229, 3294), 'bnpy.util.StateSeqUtil.alignEstimatedStateSeqToTruth', 'StateSeqUtil.alignEstimatedStateSeqToTruth', (['Z', 'Zref'], {'returnInfo': '(1)'}), '(Z, Zref, returnInfo=1)\n', (3271, 3294), False, 'from bnpy.util import StateSeqUtil\n'), ((3335, 3349), 'numpy.zeros', 'np.zeros', (['maxK'], {}), '(maxK)\n', (3343, 3349), True, 'import numpy as np\n'), ((3572, 3587), 'numpy.argmax', 'np.argmax', (['dist'], {}), '(dist)\n', (3581, 3587), True, 'import numpy as np\n'), ((3828, 3840), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (3834, 3840), True, 'import numpy as np\n'), ((4191, 4216), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4214, 4216), False, 'import argparse\n'), ((4733, 4818), 'bnpy.birthmove.BLogger.configure', 'BLogger.configure', (['args.outputdir'], {'doSaveToDisk': '(0)', 'doWriteStdOut': '(1)', 'stdoutLevel': '(0)'}), '(args.outputdir, doSaveToDisk=0, doWriteStdOut=1,\n stdoutLevel=0)\n', (4750, 4818), True, 'import bnpy.birthmove.BLogger as BLogger\n'), ((3419, 3442), 'numpy.sum', 'np.sum', (['(Zref[mask] != k)'], {}), '(Zref[mask] != k)\n', (3425, 3442), True, 'import numpy as np\n'), ((3932, 3953), 'numpy.unique', 'np.unique', (['Zref[mask]'], {}), '(Zref[mask])\n', (3941, 3953), True, 'import numpy as np\n'), ((4007, 4031), 'numpy.sum', 'np.sum', (['(Zref[mask] == ll)'], {}), '(Zref[mask] == ll)\n', (4013, 4031), True, 'import numpy as np\n')] |
import numpy as np
import random
from FuncionAptitud import fitness
lista = [0, 1, 2, 3, 4, 5, 6, 7] # son los valores en los que puede estar la reyna
poblacion = np.empty((50,8))
for i in range(50):
random.shuffle(lista)
for j in range(8):
poblacion[i, j] = lista[j]
def padres(conjunto):
r1 = random.random()
r2 = random.random()
Aptitud = ([])
A_norm = ([])
probabilidad = ([])
prob_acomulada = ([])
posibles_papas = ([])
for i in range(50):
Aptitud.append(fitness(conjunto[i, :])) # contiene todos los ataques
maximo = max(Aptitud) # es el que tiene mas ataques
for i in range(50):
A_norm.append(Aptitud[i]-maximo)
suma = sum(A_norm)
for i in range(50):
probabilidad.append(A_norm[i]/ suma) # probabilidad de cada individuo de la población
prob_acomulada = probabilidad
for i in range(1,50):
prob_acomulada[i] = prob_acomulada[i] + prob_acomulada[i-1]
for i in range(50):
if prob_acomulada[i]>= r1:
posibles_papas.append(conjunto[i])
papa1 = posibles_papas[0]
posibles_papas = ([])
for i in range(50):
if prob_acomulada[i]>= r2:
posibles_papas.append(conjunto[i])
papa2 = posibles_papas[0]
return papa1,papa2
def selec_padres(poblacion):
pob_padres = np.empty((100, 8))
j =0
while j in range(100):
papas = padres(poblacion)
pob_padres[j]= papas[0]
pob_padres[j+1]= papas[1]
j = j+2
return pob_padres
| [
"FuncionAptitud.fitness",
"random.random",
"numpy.empty",
"random.shuffle"
] | [((165, 182), 'numpy.empty', 'np.empty', (['(50, 8)'], {}), '((50, 8))\n', (173, 182), True, 'import numpy as np\n'), ((206, 227), 'random.shuffle', 'random.shuffle', (['lista'], {}), '(lista)\n', (220, 227), False, 'import random\n'), ((318, 333), 'random.random', 'random.random', ([], {}), '()\n', (331, 333), False, 'import random\n'), ((343, 358), 'random.random', 'random.random', ([], {}), '()\n', (356, 358), False, 'import random\n'), ((1360, 1378), 'numpy.empty', 'np.empty', (['(100, 8)'], {}), '((100, 8))\n', (1368, 1378), True, 'import numpy as np\n'), ((519, 542), 'FuncionAptitud.fitness', 'fitness', (['conjunto[i, :]'], {}), '(conjunto[i, :])\n', (526, 542), False, 'from FuncionAptitud import fitness\n')] |
from __future__ import absolute_import
from __future__ import print_function
from pysnptools.util.mapreduce1.runner import *
import logging
import fastlmm.pyplink.plink as plink
import pysnptools.util as pstutil
import pysnptools.util.pheno as pstpheno
import numpy as np
from fastlmm.inference import LMM
import scipy.stats as stats
from pysnptools.snpreader import Bed
from fastlmm.util.pickle_io import load, save
import time
import pandas as pd
from six.moves import range
def epistasis(test_snps,pheno,G0, G1=None, mixing=0.0, covar=None,output_file_name=None,sid_list_0=None,sid_list_1=None,
log_delta=None, min_log_delta=-5, max_log_delta=10,
cache_file = None,
runner=None, count_A1=None):
"""
Function performing epistasis GWAS with ML (never REML). See http://www.nature.com/srep/2013/130122/srep01099/full/srep01099.html.
:param test_snps: SNPs from which to test pairs. If you give a string, it should be the base name of a set of PLINK Bed-formatted files.
:type test_snps: a `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string
:param pheno: A single phenotype: A 'pheno dictionary' contains an ndarray on the 'vals' key and a iid list on the 'iid' key.
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
:type pheno: a 'pheno dictionary' or a string
:param G0: SNPs from which to construct a similarity matrix.
If you give a string, it should be the base name of a set of PLINK Bed-formatted files.
:type G0: a `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string
:param G1: SNPs from which to construct a second similarity kernel, optional. Also, see 'mixing').
If you give a string, it should be the base name of a set of PLINK Bed-formatted files.
:type G1: a `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string
:param mixing: Weight between 0.0 (inclusive, default) and 1.1 (inclusive) given to G1 relative to G0.
If you give no mixing number, G0 will get all the weight and G1 will be ignored.
:type mixing: number
:param covar: covariate information, optional: A 'pheno dictionary' contains an ndarray on the 'vals' key and a iid list on the 'iid' key.
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
:type covar: a 'pheno dictionary' or a string
:param sid_list_0: list of sids, optional:
All unique pairs from sid_list_0 x sid_list_1 will be evaluated.
If you give no sid_list_0, all sids in test_snps will be used.
:type sid_list_0: list of strings
:param sid_list_1: list of sids, optional:
All unique pairs from sid_list_0 x sid_list_1 will be evaluated.
If you give no sid_list_1, all sids in test_snps will be used.
:type sid_list_1: list of strings
:param output_file_name: Name of file to write results to, optional. If not given, no output file will be created. The output format is tab-delimited text.
:type output_file_name: file name
:param log_delta: A parameter to LMM learning, optional
If not given will search for best value.
:type log_delta: number
:param min_log_delta: (default:-5)
When searching for log_delta, the lower bounds of the search.
:type min_log_delta: number
:param max_log_delta: (default:-5)
When searching for log_delta, the upper bounds of the search.
:type max_log_delta: number
:param cache_file: Name of file to read or write cached precomputation values to, optional.
If not given, no cache file will be used.
If given and file does not exists, will write precomputation values to file.
If given and file does exists, will read precomputation values from file.
The file contains the U and S matrix from the decomposition of the training matrix. It is in Python's np.savez (\*.npz) format.
Calls using the same cache file should have the same 'G0' and 'G1'
:type cache_file: file name
:param runner: a `Runner <http://fastlmm.github.io/PySnpTools/#util-mapreduce1-runner-runner>`__, optional: Tells how to run locally, multi-processor, or on a cluster.
If not given, the function is run locally.
:type runner: `Runner <http://fastlmm.github.io/PySnpTools/#util-mapreduce1-runner-runner>`__
:param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1
alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.
:type count_A1: bool
:rtype: Pandas dataframe with one row per SNP pair. Columns include "PValue"
:Example:
>>> from __future__ import print_function #Python 2 & 3 compatibility
>>> import logging
>>> from pysnptools.snpreader import Bed
>>> from fastlmm.association import epistasis
>>> logging.basicConfig(level=logging.INFO)
>>> test_snps = Bed('../../tests/datasets/all_chr.maf0.001.N300',count_A1=True)
>>> pheno = '../../tests/datasets/phenSynthFrom22.23.N300.randcidorder.txt'
>>> covar = '../../tests/datasets/all_chr.maf0.001.covariates.N300.txt'
>>> results_dataframe = epistasis(test_snps, pheno, G0=test_snps, covar=covar,
... sid_list_0=test_snps.sid[:10], #first 10 snps
... sid_list_1=test_snps.sid[5:15], #Skip 5 snps, use next 10
... count_A1=False)
>>> print(results_dataframe.iloc[0].SNP0, results_dataframe.iloc[0].SNP1,round(results_dataframe.iloc[0].PValue,5),len(results_dataframe))
1_12 1_9 0.07779 85
"""
if runner is None:
runner = Local()
epistasis = _Epistasis(test_snps, pheno, G0, G1, mixing, covar, sid_list_0, sid_list_1, log_delta, min_log_delta, max_log_delta, output_file_name, cache_file, count_A1=count_A1)
logging.info("# of pairs is {0}".format(epistasis.pair_count))
epistasis.fill_in_cache_file()
result = runner.run(epistasis)
return result
def write(sid0_list, sid1_list, pvalue_list, output_file):
"""
Given three arrays of the same length [as per the output of epistasis(...)], writes a header and the values to the given output file.
"""
with open(output_file,"w") as out_fp:
out_fp.write("{0}\t{1}\t{2}\n".format("sid0","sid1","pvalue"))
for i in range(len(pvalue_list)):
out_fp.write("{0}\t{1}\t{2}\n".format(sid0_list[i],sid1_list[i],pvalue_list[i]))
# could this be written without the inside-out of IDistributable?
class _Epistasis(object) : #implements IDistributable
def __init__(self, test_snps, pheno, G0, G1=None, mixing=0.0, covar=None,sid_list_0=None,sid_list_1=None,
log_delta=None, min_log_delta=-5, max_log_delta=10, output_file=None, cache_file=None, count_A1=None):
self._ran_once = False
self.test_snps = test_snps
self.pheno = pheno
self.output_file_or_none = output_file
self.cache_file = cache_file
self.count_A1 = count_A1
self.covar = covar
self.sid_list_0 = np.array(sid_list_0,dtype='str') if sid_list_0 is not None else None
self.sid_list_1 = np.array(sid_list_1,dtype='str') if sid_list_1 is not None else None
self.G0=G0
self.G1_or_none=G1
self.mixing=mixing
self.external_log_delta=log_delta
self.min_log_delta = min_log_delta
self.max_log_delta = max_log_delta
self._str = "{0}({1},{2},G0={6},G1={7},mixing={8},covar={3},output_file={12},sid_list_0={4},sid_list_1{5},log_delta={9},min_log_delta={10},max_log_delta={11},cache_file={13})".format(
self.__class__.__name__, self.test_snps,self.pheno,self.covar,self.sid_list_0,self.sid_list_1,
self.G0, self.G1_or_none, self.mixing, self.external_log_delta, self.min_log_delta, self.max_log_delta, output_file, cache_file)
self.block_size = 1000
def order_by_test_snps(self, sid_sequence):
return self.test_snps.sid[sorted(self.test_snps.sid_to_index(sid_sequence))]
def set_sid_sets(self):
sid_set_0 = set(self.sid_list_0)
self.intersect = self.order_by_test_snps(sid_set_0.intersection(self.sid_list_1))
self.just_sid_0 = self.order_by_test_snps(sid_set_0.difference(self.intersect))
self.just_sid_1 = self.order_by_test_snps(set(self.intersect).symmetric_difference(self.sid_list_1))
self._pair_count = len(self.just_sid_0)*len(self.intersect) + len(self.just_sid_0)*len(self.just_sid_1) + len(self.intersect)*len(self.just_sid_1) + len(self.intersect) * (len(self.intersect)-1)//2
self.test_snps, self.pheno, self.covar, self.G0, self.G1_or_none = pstutil.intersect_apply([self.test_snps, self.pheno, self.covar, self.G0, self.G1_or_none]) #should put G0 and G1 first
def _run_once(self):
if self._ran_once:
return
self._ran_once = None
if isinstance(self.test_snps, str):
self.test_snps = Bed(self.test_snps,count_A1=self.count_A1)
if isinstance(self.G0, str):
self.G0 = Bed(self.G0,count_A1=self.count_A1)
if isinstance(self.pheno, str):
self.pheno = pstpheno.loadOnePhen(self.pheno,vectorize=True,missing='NaN')
if self.covar is not None and isinstance(self.covar, str):
self.covar = pstpheno.loadPhen(self.covar,missing='NaN')
if self.G1_or_none is not None and isinstance(self.G1_or_none, str):
self.G1_or_none = Bed(self.G1_or_none,count_A1=self.count_A1)
if self.sid_list_0 is None:
self.sid_list_0 = self.test_snps.sid
if self.sid_list_1 is None:
self.sid_list_1 = self.test_snps.sid
self.set_sid_sets()
#!!Should fix up to add only of no constant columns - will need to add a test case for this
if self.covar is None:
self.covar = np.ones((self.test_snps.iid_count, 1))
else:
self.covar = np.hstack((self.covar['vals'],np.ones((self.test_snps.iid_count, 1))))
self.n_cov = self.covar.shape[1]
if self.output_file_or_none is None:
self.__tempdirectory = ".working"
else:
self.__tempdirectory = self.output_file_or_none + ".working"
self._ran_once = True
#start of IDistributable interface--------------------------------------
@property
def work_count(self):
self._run_once()
block_count = self.div_ceil(self._pair_count, self.block_size)
return block_count
def work_sequence(self):
self._run_once()
return self.work_sequence_range(0,self.work_count)
def work_sequence_range(self, start, end):
self._run_once()
lmm = self.lmm_from_cache_file()
lmm.sety(self.pheno['vals'])
for sid0_list, sid1_list in self.pair_block_sequence_range(start,end):
yield lambda lmm=lmm,sid0_list=sid0_list,sid1_list=sid1_list : self.do_work(lmm,sid0_list,sid1_list) # the 'lmm=lmm,...' is need to get around a strangeness in Python
def reduce(self, result_sequence):
#doesn't need "run_once()"
frame = pd.concat(result_sequence)
frame.sort_values(by="PValue", inplace=True)
frame.index = np.arange(len(frame))
if self.output_file_or_none is not None:
frame.to_csv(self.output_file_or_none, sep="\t", index=False)
return frame
#!!Find a place to output info like this near the end of the run
#logging.info("PhenotypeName\t{0}".format(pheno['header']))
#logging.info("SampleSize\t{0}".format(test_snps.iid_count))
#logging.info("SNPCount\t{0}".format(test_snps.sid_count))
#logging.info("Runtime\t{0}".format(time.time()-t0))
@property
def tempdirectory(self):
self._run_once()
return self.__tempdirectory
#optional override -- the str name of the instance is used by the cluster as the job name
def __str__(self):
#Doesn't need run_once
return self._str
def copyinputs(self, copier):
self._run_once()
if isinstance(self.test_snps, str):
copier.input(self.test_snps + ".bed")
copier.input(self.test_snps + ".bim")
copier.input(self.test_snps + ".fam")
else:
copier.input(self.test_snps)
copier.input(self.pheno)
copier.input(self.covar)
if isinstance(self.G0, str):
copier.input(self.G0 + ".bed")
copier.input(self.G0 + ".bim")
copier.input(self.G0 + ".fam")
else:
copier.input(self.G0)
copier.input(self.G1_or_none)
copier.input(self.cache_file)
def copyoutputs(self,copier):
#Doesn't need run_once
copier.output(self.output_file_or_none)
#end of IDistributable interface---------------------------------------
@staticmethod
def div_ceil(num, den): #!!move to utils?
return -(-num//den) #The -/- trick makes it do ceiling instead of floor. "//" will do integer division even in the future and on floats.
def pair_block_sequence_range(self,block_start,block_end):
self._run_once()
assert 0 <= block_start and block_start <= block_end and block_end <= self.work_count, "real assert"
block_index = block_start
start = block_index * self.pair_count // self.work_count
next_start = (block_index+1) * self.pair_count // self.work_count
size_goal = next_start - start
end = block_end * self.pair_count // self.work_count
sid0_list = []
sid1_list = []
for sid0, sid1 in self.pair_sequence_range(start,end):
sid0_list.append(sid0)
sid1_list.append(sid1)
if len(sid0_list) == size_goal:
yield sid0_list, sid1_list
block_index += 1
if block_index == block_end:
return
sid0_list = []
sid1_list = []
start = next_start
next_start = (block_index+1) * self.pair_count // self.work_count
size_goal = next_start - start
assert len(sid0_list) == 0, "real assert"
#If start == end, then returns without yielding anything
def pair_sequence_range(self,start,end):
self._run_once()
assert 0 <= start and start <= end and end <= self._pair_count, "real assert"
i = start
for sid0, sid1 in self.pair_sequence_with_start(start):
yield sid0, sid1
i = i + 1
if i == end:
break
assert i == end, "Not enough items found. Didn't get to the end"
def pair_sequence_with_start(self,start):
self._run_once()
skip_ref = [start]
just_sid_0_list = list(self.just_sid_0)
just_sid_1_list = list(self.just_sid_1)
intersect_list = list(self.intersect)
for sid0, sid1 in self.combo_distinct(just_sid_0_list, intersect_list, skip_ref):
yield sid0, sid1
for sid0, sid1 in self.combo_distinct(just_sid_0_list, just_sid_1_list, skip_ref):
yield sid0, sid1
for sid0, sid1 in self.combo_distinct(intersect_list, just_sid_1_list, skip_ref):
yield sid0, sid1
for sid0, sid1 in self.combo_same(intersect_list, skip_ref):
yield sid0, sid1
assert skip_ref[0] == 0, "real assert"
def combo_distinct(self, distinct__list0, distinct__list1, skip_ref):
row_count = len(distinct__list0)
col_count = len(distinct__list1)
if skip_ref[0] >= row_count * col_count:
skip_ref[0] = skip_ref[0] - row_count * col_count
assert skip_ref[0] >=0, "real assert"
return
row_start = skip_ref[0] // col_count
skip_ref[0] = skip_ref[0] - row_start * col_count
assert skip_ref[0] >=0, "real assert"
for row_index in range(row_start, row_count):
sid0 = distinct__list0[row_index]
if row_index == row_start:
col_start = skip_ref[0]
skip_ref[0] = 0
else:
col_start = 0
for col_index in range(col_start, col_count):
sid1 = distinct__list1[col_index]
yield sid0, sid1
def combo_same(self, list, skip_ref):
count = len(list)
full_size = count * (count + 1) // 2
if skip_ref[0] >= full_size:
skip_ref[0] = skip_ref[0] - full_size
assert skip_ref[0] >=0, "real assert"
return
row_start = int((-1 + 2*count - np.sqrt(1 - 4*count + 4*count**2 - 8*skip_ref[0]))//2)
skip_ref[0] = skip_ref[0] - (count*row_start - (row_start*(1 + row_start))//2)
assert skip_ref[0] >=0, "real assert"
for row_index in range(row_start, count):
sid0 = list[row_index]
if row_index == row_start:
col_start = skip_ref[0]
skip_ref[0] = 0
else:
col_start = 0
for col_index in range(col_start + 1 + row_index, count):
sid1 = list[col_index]
assert sid0 is not sid1, "real assert"
yield sid0, sid1
@property
def pair_count(self):
self._run_once()
return self._pair_count
def lmm_from_cache_file(self):
logging.info("Loading precomputation from {0}".format(self.cache_file))
lmm = LMM()
with np.load(self.cache_file) as data:
lmm.U = data['arr_0']
lmm.S = data['arr_1']
return lmm
def fill_in_cache_file(self):
self._run_once()
logging.info("filling in the cache_file and log_delta, as needed")
if self.G1_or_none is None:
self.G1val_or_none = None
else:
self.G1val_or_none = self.G1_or_none.read().val
# The S and U are always cached, in case they are needed for the cluster or for multi-threaded runs
if self.cache_file is None:
self.cache_file = os.path.join(self.__tempdirectory, "cache_file.npz")
if os.path.exists(self.cache_file): # If there is already a cache file in the temp directory, it must be removed because it might be out-of-date
os.remove(self.cache_file)
lmm = None
if not os.path.exists(self.cache_file):
logging.info("Precomputing eigen")
lmm = LMM()
G0_standardized = self.G0.read().standardize()
lmm.setG(G0_standardized.val, self.G1val_or_none, a2=self.mixing)
logging.info("Saving precomputation to {0}".format(self.cache_file))
pstutil.create_directory_if_necessary(self.cache_file)
np.savez(self.cache_file, lmm.U,lmm.S) #using np.savez instead of pickle because it seems to be faster to read and write
if self.external_log_delta is None:
if lmm is None:
lmm = self.lmm_from_cache_file()
logging.info("searching for internal delta")
lmm.setX(self.covar)
lmm.sety(self.pheno['vals'])
#log delta is used here. Might be better to use findH2, but if so will need to normalized G so that its K's diagonal would sum to iid_count
result = lmm.find_log_delta(REML=False, sid_count=self.G0.sid_count, min_log_delta=self.min_log_delta, max_log_delta=self.max_log_delta ) #!!what about findA2H2? minH2=0.00001
self.external_log_delta = result['log_delta']
self.internal_delta = np.exp(self.external_log_delta) * self.G0.sid_count
logging.info("internal_delta={0}".format(self.internal_delta))
logging.info("external_log_delta={0}".format(self.external_log_delta))
do_pair_count = 0
do_pair_time = time.time()
def do_work(self, lmm, sid0_list, sid1_list):
dataframe = pd.DataFrame(
index=np.arange(len(sid0_list)),
columns=('SNP0', 'Chr0', 'GenDist0', 'ChrPos0', 'SNP1', 'Chr1', 'GenDist1', 'ChrPos1', 'PValue', 'NullLogLike', 'AltLogLike')
)
#!!Is this the only way to set types in a dataframe?
dataframe['Chr0'] = dataframe['Chr0'].astype(np.float)
dataframe['GenDist0'] = dataframe['GenDist0'].astype(np.float)
dataframe['ChrPos0'] = dataframe['ChrPos0'].astype(np.float)
dataframe['Chr1'] = dataframe['Chr1'].astype(np.float)
dataframe['GenDist1'] = dataframe['GenDist1'].astype(np.float)
dataframe['ChrPos1'] = dataframe['ChrPos1'].astype(np.float)
dataframe['PValue'] = dataframe['PValue'].astype(np.float)
dataframe['NullLogLike'] = dataframe['NullLogLike'].astype(np.float)
dataframe['AltLogLike'] = dataframe['AltLogLike'].astype(np.float)
#This is some of the code for a different way that reads and dot-products 50% more, but does less copying. Seems about the same speed
#sid0_index_list = self.test_snps.sid_to_index(sid0_list)
#sid1_index_list = self.test_snps.sid_to_index(sid1_list)
#sid_index_union_dict = {}
#sid0_index_index_list = self.create_index_index(sid_index_union_dict, sid0_index_list)
#sid1_index_index_list = self.create_index_index(sid_index_union_dict, sid1_index_list)
#snps0_read = self.test_snps[:,sid0_index_list].read().standardize()
#snps1_read = self.test_snps[:,sid1_index_list].read().standardize()
sid_union = set(sid0_list).union(sid1_list)
sid_union_index_list = sorted(self.test_snps.sid_to_index(sid_union))
snps_read = self.test_snps[:,sid_union_index_list].read().standardize()
sid0_index_list = snps_read.sid_to_index(sid0_list)
sid1_index_list = snps_read.sid_to_index(sid1_list)
products = snps_read.val[:,sid0_index_list] * snps_read.val[:,sid1_index_list] # in the products matrix, each column i is the elementwise product of sid i in each list
X = np.hstack((self.covar, snps_read.val, products))
UX = lmm.U.T.dot(X)
k = lmm.S.shape[0]
N = X.shape[0]
if (k<N):
UUX = X - lmm.U.dot(UX)
else:
UUX = None
for pair_index, sid0 in enumerate(sid0_list):
sid1 = sid1_list[pair_index]
sid0_index = sid0_index_list[pair_index]
sid1_index = sid1_index_list[pair_index]
index_list = np.array([pair_index]) #index to product
index_list = index_list + len(sid_union_index_list) #Shift by the number of snps in the union
index_list = np.hstack((np.array([sid0_index,sid1_index]),index_list)) # index to sid0 and sid1
index_list = index_list + self.covar.shape[1] #Shift by the number of values in the covar
index_list = np.hstack((np.arange(self.covar.shape[1]),index_list)) #indexes of the covar
index_list_less_product = index_list[:-1] #index to everything but the product
#Null -- the two additive SNPs
lmm.X = X[:,index_list_less_product]
lmm.UX = UX[:,index_list_less_product]
if (k<N):
lmm.UUX = UUX[:,index_list_less_product]
else:
lmm.UUX = None
res_null = lmm.nLLeval(delta=self.internal_delta, REML=False)
ll_null = -res_null["nLL"]
#Alt -- now with the product feature
lmm.X = X[:,index_list]
lmm.UX = UX[:,index_list]
if (k<N):
lmm.UUX = UUX[:,index_list]
else:
lmm.UUX = None
res_alt = lmm.nLLeval(delta=self.internal_delta, REML=False)
ll_alt = -res_alt["nLL"]
test_statistic = ll_alt - ll_null
degrees_of_freedom = 1
pvalue = stats.chi2.sf(2.0 * test_statistic, degrees_of_freedom)
logging.debug("<{0},{1}>, null={2}, alt={3}, pvalue={4}".format(sid0,sid1,ll_null,ll_alt,pvalue))
dataframe.iloc[pair_index] = [
sid0, snps_read.pos[sid0_index,0], snps_read.pos[sid0_index,1], snps_read.pos[sid0_index,2],
sid1, snps_read.pos[sid1_index,0], snps_read.pos[sid1_index,1], snps_read.pos[sid1_index,2],
pvalue, ll_null, ll_alt]
self.do_pair_count += 1
if self.do_pair_count % 100 == 0:
start = self.do_pair_time
self.do_pair_time = time.time()
logging.info("do_pair_count={0}, time={1}".format(self.do_pair_count,self.do_pair_time-start))
return dataframe
if __name__ == "__main__":
import doctest
doctest.testmod()
print("done")
| [
"numpy.sqrt",
"pysnptools.util.create_directory_if_necessary",
"numpy.hstack",
"numpy.array",
"logging.info",
"pysnptools.util.pheno.loadOnePhen",
"numpy.arange",
"pysnptools.util.pheno.loadPhen",
"numpy.savez",
"numpy.exp",
"pysnptools.util.intersect_apply",
"doctest.testmod",
"numpy.ones",... | [((20159, 20170), 'time.time', 'time.time', ([], {}), '()\n', (20168, 20170), False, 'import time\n'), ((24987, 25004), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (25002, 25004), False, 'import doctest\n'), ((8978, 9073), 'pysnptools.util.intersect_apply', 'pstutil.intersect_apply', (['[self.test_snps, self.pheno, self.covar, self.G0, self.G1_or_none]'], {}), '([self.test_snps, self.pheno, self.covar, self.G0,\n self.G1_or_none])\n', (9001, 9073), True, 'import pysnptools.util as pstutil\n'), ((11458, 11484), 'pandas.concat', 'pd.concat', (['result_sequence'], {}), '(result_sequence)\n', (11467, 11484), True, 'import pandas as pd\n'), ((16274, 16301), 'six.moves.range', 'range', (['row_start', 'row_count'], {}), '(row_start, row_count)\n', (16279, 16301), False, 'from six.moves import range\n'), ((17174, 17197), 'six.moves.range', 'range', (['row_start', 'count'], {}), '(row_start, count)\n', (17179, 17197), False, 'from six.moves import range\n'), ((17820, 17825), 'fastlmm.inference.LMM', 'LMM', ([], {}), '()\n', (17823, 17825), False, 'from fastlmm.inference import LMM\n'), ((18029, 18095), 'logging.info', 'logging.info', (['"""filling in the cache_file and log_delta, as needed"""'], {}), "('filling in the cache_file and log_delta, as needed')\n", (18041, 18095), False, 'import logging\n'), ((22317, 22365), 'numpy.hstack', 'np.hstack', (['(self.covar, snps_read.val, products)'], {}), '((self.covar, snps_read.val, products))\n', (22326, 22365), True, 'import numpy as np\n'), ((7365, 7398), 'numpy.array', 'np.array', (['sid_list_0'], {'dtype': '"""str"""'}), "(sid_list_0, dtype='str')\n", (7373, 7398), True, 'import numpy as np\n'), ((7460, 7493), 'numpy.array', 'np.array', (['sid_list_1'], {'dtype': '"""str"""'}), "(sid_list_1, dtype='str')\n", (7468, 7493), True, 'import numpy as np\n'), ((9274, 9317), 'pysnptools.snpreader.Bed', 'Bed', (['self.test_snps'], {'count_A1': 'self.count_A1'}), '(self.test_snps, count_A1=self.count_A1)\n', (9277, 9317), False, 'from pysnptools.snpreader import Bed\n'), ((9377, 9413), 'pysnptools.snpreader.Bed', 'Bed', (['self.G0'], {'count_A1': 'self.count_A1'}), '(self.G0, count_A1=self.count_A1)\n', (9380, 9413), False, 'from pysnptools.snpreader import Bed\n'), ((9479, 9542), 'pysnptools.util.pheno.loadOnePhen', 'pstpheno.loadOnePhen', (['self.pheno'], {'vectorize': '(True)', 'missing': '"""NaN"""'}), "(self.pheno, vectorize=True, missing='NaN')\n", (9499, 9542), True, 'import pysnptools.util.pheno as pstpheno\n'), ((9634, 9678), 'pysnptools.util.pheno.loadPhen', 'pstpheno.loadPhen', (['self.covar'], {'missing': '"""NaN"""'}), "(self.covar, missing='NaN')\n", (9651, 9678), True, 'import pysnptools.util.pheno as pstpheno\n'), ((9786, 9830), 'pysnptools.snpreader.Bed', 'Bed', (['self.G1_or_none'], {'count_A1': 'self.count_A1'}), '(self.G1_or_none, count_A1=self.count_A1)\n', (9789, 9830), False, 'from pysnptools.snpreader import Bed\n'), ((10188, 10226), 'numpy.ones', 'np.ones', (['(self.test_snps.iid_count, 1)'], {}), '((self.test_snps.iid_count, 1))\n', (10195, 10226), True, 'import numpy as np\n'), ((16537, 16564), 'six.moves.range', 'range', (['col_start', 'col_count'], {}), '(col_start, col_count)\n', (16542, 16564), False, 'from six.moves import range\n'), ((17422, 17461), 'six.moves.range', 'range', (['(col_start + 1 + row_index)', 'count'], {}), '(col_start + 1 + row_index, count)\n', (17427, 17461), False, 'from six.moves import range\n'), ((17839, 17863), 'numpy.load', 'np.load', (['self.cache_file'], {}), '(self.cache_file)\n', (17846, 17863), True, 'import numpy as np\n'), ((18753, 18787), 'logging.info', 'logging.info', (['"""Precomputing eigen"""'], {}), "('Precomputing eigen')\n", (18765, 18787), False, 'import logging\n'), ((18806, 18811), 'fastlmm.inference.LMM', 'LMM', ([], {}), '()\n', (18809, 18811), False, 'from fastlmm.inference import LMM\n'), ((19042, 19096), 'pysnptools.util.create_directory_if_necessary', 'pstutil.create_directory_if_necessary', (['self.cache_file'], {}), '(self.cache_file)\n', (19079, 19096), True, 'import pysnptools.util as pstutil\n'), ((19109, 19148), 'numpy.savez', 'np.savez', (['self.cache_file', 'lmm.U', 'lmm.S'], {}), '(self.cache_file, lmm.U, lmm.S)\n', (19117, 19148), True, 'import numpy as np\n'), ((19365, 19409), 'logging.info', 'logging.info', (['"""searching for internal delta"""'], {}), "('searching for internal delta')\n", (19377, 19409), False, 'import logging\n'), ((19914, 19945), 'numpy.exp', 'np.exp', (['self.external_log_delta'], {}), '(self.external_log_delta)\n', (19920, 19945), True, 'import numpy as np\n'), ((22763, 22785), 'numpy.array', 'np.array', (['[pair_index]'], {}), '([pair_index])\n', (22771, 22785), True, 'import numpy as np\n'), ((24151, 24206), 'scipy.stats.chi2.sf', 'stats.chi2.sf', (['(2.0 * test_statistic)', 'degrees_of_freedom'], {}), '(2.0 * test_statistic, degrees_of_freedom)\n', (24164, 24206), True, 'import scipy.stats as stats\n'), ((24786, 24797), 'time.time', 'time.time', ([], {}), '()\n', (24795, 24797), False, 'import time\n'), ((10296, 10334), 'numpy.ones', 'np.ones', (['(self.test_snps.iid_count, 1)'], {}), '((self.test_snps.iid_count, 1))\n', (10303, 10334), True, 'import numpy as np\n'), ((16960, 17017), 'numpy.sqrt', 'np.sqrt', (['(1 - 4 * count + 4 * count ** 2 - 8 * skip_ref[0])'], {}), '(1 - 4 * count + 4 * count ** 2 - 8 * skip_ref[0])\n', (16967, 17017), True, 'import numpy as np\n'), ((22946, 22980), 'numpy.array', 'np.array', (['[sid0_index, sid1_index]'], {}), '([sid0_index, sid1_index])\n', (22954, 22980), True, 'import numpy as np\n'), ((23156, 23186), 'numpy.arange', 'np.arange', (['self.covar.shape[1]'], {}), '(self.covar.shape[1])\n', (23165, 23186), True, 'import numpy as np\n')] |
from dataset import CovidImageDataset
from argparse import ArgumentParser
import torch
import torch.nn as nn
from model import VGG
import numpy as np
import os
from pytorch_lightning.utilities.seed import seed_everything
import random
def seed_worker(worker_id):
'''
https://pytorch.org/docs/stable/notes/randomness.html#dataloader
to fix https://tanelp.github.io/posts/a-bug-that-plagues-thousands-of-open-source-ml-projects/
ensures different random numbers each batch with each worker every epoch while keeping reproducibility
'''
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
train_loss = 0
correct = 0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = nn.CrossEntropyLoss()(output, target)
loss.backward()
train_loss += loss
_, predicted = torch.max(output.data, 1)
correct += (predicted == target).sum().item()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def val(model, device, val_loader):
model.eval()
val_loss = 0
correct = 0
with torch.no_grad():
for data, target in val_loader:
data, target = data.to(device), target.to(device)
output = model(data)
val_loss += nn.CrossEntropyLoss()(output, target)
_, predicted = torch.max(output.data, 1)
correct += (predicted == target).sum().item()
print('\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
val_loss / len(val_loader), correct, len(val_loader.dataset),
100. * correct / len(val_loader.dataset)))
def main():
parser = ArgumentParser()
parser.add_argument("--data_dir", type=str, default='/hkfs/work/workspace/scratch/im9193-health_challenge/data')
parser.add_argument("--num_epochs", type=int, default=250)
parser.add_argument("--augment", type=str, default='resize_rotate_crop')
parser.add_argument("--seed", type=int, default=42)
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save_model', action='store_true', help='saves the trained model')
parser.add_argument('--model_name', type=str, help='model file name', default='vgg_baseline')
args = parser.parse_args()
device = torch.device("cuda")
# the following 3 lines are only needed to make the training fully reproducible, you can remove them
seed_everything(args.seed)
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':16:8'
torch.use_deterministic_algorithms(True)
data_base = args.data_dir
trainset = CovidImageDataset(
os.path.join(data_base, 'train.csv'),
os.path.join(data_base, 'imgs'),
transform=args.augment)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True, num_workers=128,
worker_init_fn=seed_worker)
valset = CovidImageDataset(
os.path.join(data_base, 'valid.csv'),
os.path.join(data_base, 'imgs'),
transform=None)
valloader = torch.utils.data.DataLoader(valset, batch_size=64, shuffle=False, num_workers=128,
worker_init_fn=seed_worker)
model = VGG('VGG19').cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, nesterov=True, momentum=0.9)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.num_epochs)
print('CUDA available:', torch.cuda.is_available())
for epoch in range(1, args.num_epochs + 1):
train(args, model, device, trainloader, optimizer, epoch)
val(model, device, valloader)
scheduler.step()
if args.save_model:
model_dir = '/hkfs/work/workspace/scratch/im9193-health_challenge_baseline/saved_models' # TODO adapt to your group workspace
os.makedirs(model_dir, exist_ok=True)
torch.save(model.state_dict(), os.path.join(model_dir, "{}.pt".format(args.model_name)))
if __name__ == '__main__':
main()
| [
"torch.nn.CrossEntropyLoss",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.initial_seed",
"os.makedirs",
"torch.max",
"os.path.join",
"random.seed",
"model.VGG",
"torch.cuda.is_available",
"numpy.random.seed",
"torch.use_determ... | [((609, 636), 'numpy.random.seed', 'np.random.seed', (['worker_seed'], {}), '(worker_seed)\n', (623, 636), True, 'import numpy as np\n'), ((641, 665), 'random.seed', 'random.seed', (['worker_seed'], {}), '(worker_seed)\n', (652, 665), False, 'import random\n'), ((2124, 2140), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (2138, 2140), False, 'from argparse import ArgumentParser\n'), ((2857, 2877), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2869, 2877), False, 'import torch\n'), ((2988, 3014), 'pytorch_lightning.utilities.seed.seed_everything', 'seed_everything', (['args.seed'], {}), '(args.seed)\n', (3003, 3014), False, 'from pytorch_lightning.utilities.seed import seed_everything\n'), ((3071, 3111), 'torch.use_deterministic_algorithms', 'torch.use_deterministic_algorithms', (['(True)'], {}), '(True)\n', (3105, 3111), False, 'import torch\n'), ((3315, 3430), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': '(64)', 'shuffle': '(True)', 'num_workers': '(128)', 'worker_init_fn': 'seed_worker'}), '(trainset, batch_size=64, shuffle=True,\n num_workers=128, worker_init_fn=seed_worker)\n', (3342, 3430), False, 'import torch\n'), ((3632, 3746), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valset'], {'batch_size': '(64)', 'shuffle': '(False)', 'num_workers': '(128)', 'worker_init_fn': 'seed_worker'}), '(valset, batch_size=64, shuffle=False,\n num_workers=128, worker_init_fn=seed_worker)\n', (3659, 3746), False, 'import torch\n'), ((3925, 4001), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'torch.optim.lr_scheduler.CosineAnnealingLR', (['optimizer'], {'T_max': 'args.num_epochs'}), '(optimizer, T_max=args.num_epochs)\n', (3967, 4001), False, 'import torch\n'), ((574, 594), 'torch.initial_seed', 'torch.initial_seed', ([], {}), '()\n', (592, 594), False, 'import torch\n'), ((1094, 1119), 'torch.max', 'torch.max', (['output.data', '(1)'], {}), '(output.data, 1)\n', (1103, 1119), False, 'import torch\n'), ((1562, 1577), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1575, 1577), False, 'import torch\n'), ((3186, 3222), 'os.path.join', 'os.path.join', (['data_base', '"""train.csv"""'], {}), "(data_base, 'train.csv')\n", (3198, 3222), False, 'import os\n'), ((3232, 3263), 'os.path.join', 'os.path.join', (['data_base', '"""imgs"""'], {}), "(data_base, 'imgs')\n", (3244, 3263), False, 'import os\n'), ((3513, 3549), 'os.path.join', 'os.path.join', (['data_base', '"""valid.csv"""'], {}), "(data_base, 'valid.csv')\n", (3525, 3549), False, 'import os\n'), ((3559, 3590), 'os.path.join', 'os.path.join', (['data_base', '"""imgs"""'], {}), "(data_base, 'imgs')\n", (3571, 3590), False, 'import os\n'), ((4032, 4057), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4055, 4057), False, 'import torch\n'), ((4405, 4442), 'os.makedirs', 'os.makedirs', (['model_dir'], {'exist_ok': '(True)'}), '(model_dir, exist_ok=True)\n', (4416, 4442), False, 'import os\n'), ((981, 1002), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1000, 1002), True, 'import torch.nn as nn\n'), ((1803, 1828), 'torch.max', 'torch.max', (['output.data', '(1)'], {}), '(output.data, 1)\n', (1812, 1828), False, 'import torch\n'), ((3800, 3812), 'model.VGG', 'VGG', (['"""VGG19"""'], {}), "('VGG19')\n", (3803, 3812), False, 'from model import VGG\n'), ((1738, 1759), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1757, 1759), True, 'import torch.nn as nn\n')] |
import math
#######
import random
import cv2
import numpy as np
import matplotlib.pyplot as plt
from tensorpack.dataflow.imgaug.geometry import RotationAndCropValid
def crop_meta_image(image,annos,mask):
_target_height=368
_target_width =368
if len(np.shape(image))==2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
height,width,_=np.shape(image)
# print("the size of original img is:", height, width)
if height<=width:
ratio=_target_height/height
new_width=int(ratio*width)
if height==width:
new_width=_target_height
image,annos,mask=_resize_image(image,annos,mask,new_width,_target_height)
for i in annos:
if len(i) is not 19:
print('Joints of person is not 19 ERROR FROM RESIZE')
if new_width>_target_width:
crop_range_x=np.random.randint(0, new_width-_target_width)
else:
crop_range_x=0
image= image[:, crop_range_x:crop_range_x + 368,:]
mask = mask[:, crop_range_x:crop_range_x + 368]
# joint_list= []
new_joints = []
#annos-pepople-joints (must be 19 or [])
for people in annos:
# print("number of keypoints is", np.shape(people))
new_keypoints = []
for keypoints in people:
if keypoints[0] < -10 or keypoints[1] < -10:
new_keypoints.append((-1000, -1000))
continue
top=crop_range_x+367
if keypoints[0]>=crop_range_x and keypoints[0]<= top:
# pts = (keypoints[0]-crop_range_x, keypoints[1])
pts = (int(keypoints[0] - crop_range_x),int(keypoints[1]))
else:
pts= (-1000,-1000)
new_keypoints.append(pts)
new_joints.append(new_keypoints)
if len(new_keypoints) != 19:
print('1:The Length of joints list should be 0 or 19 but actually:', len(new_keypoints))
annos = new_joints
if height>width:
ratio = _target_width / width
new_height = int(ratio * height)
image,annos,mask = _resize_image(image,annos,mask,_target_width, new_height)
for i in annos:
if len(i) is not 19:
print('Joints of person is not 19 ERROR')
if new_height > _target_height:
crop_range_y = np.random.randint(0, new_height - _target_height)
else:
crop_range_y = 0
image = image[crop_range_y:crop_range_y + 368, :, :]
mask = mask[crop_range_y:crop_range_y + 368, :]
new_joints = []
for people in annos:
new_keypoints = []
for keypoints in people:
# case orginal points are not usable
if keypoints[0] < 0 or keypoints[1] < 0:
new_keypoints.append((-1000, -1000))
continue
# y axis coordinate change
bot = crop_range_y + 367
if keypoints[1] >= crop_range_y and keypoints[1] <= bot:
# pts = (keypoints[0], keypoints[1]-crop_range_y)
pts = (int(keypoints[0]), int (keypoints[1] - crop_range_y))
# if pts[0]>367 or pts[1]>367:
# print('Error2')
else:
pts =(-1000,-1000)
new_keypoints.append(pts)
new_joints.append(new_keypoints)
if len(new_keypoints) != 19:
print('2:The Length of joints list should be 0 or 19 but actually:', len(new_keypoints))
annos = new_joints
# mask = cv2.resize(mask, (46, 46), interpolation=cv2.INTER_AREA)
return image,annos,mask
def _resize_image(image,annos,mask,_target_width,_target_height):
# _target_height=368
# _target_width =368
#original image
y,x,_=np.shape(image)
ratio_y= _target_height/y
ratio_x= _target_width/x
new_joints=[]
# update meta
# meta.height=_target_height
# meta.width =_target_width
for people in annos:
new_keypoints=[]
for keypoints in people:
if keypoints[0]<0 or keypoints[1]<0:
new_keypoints.append((-1000, -1000))
continue
pts = (int(keypoints[0] * ratio_x+0.5), int(keypoints[1] * ratio_y+0.5))
if pts[0] > _target_width-1 or pts[1] > _target_height-1:
new_keypoints.append((-1000, -1000))
continue
new_keypoints.append(pts)
new_joints.append(new_keypoints)
annos=new_joints
new_image = cv2.resize(image, (_target_width, _target_height), interpolation=cv2.INTER_AREA)
new_mask = cv2.resize(mask, (_target_width, _target_height), interpolation=cv2.INTER_AREA)
return new_image,annos,new_mask
def _rotate_coord(shape, newxy, point, angle):
angle = -1 * angle / 180.0 * math.pi
ox, oy = shape
px, py = point
ox /= 2
oy /= 2
qx = math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
new_x, new_y = newxy
qx += ox - new_x
qy += oy - new_y
return int(qx + 0.5), int(qy + 0.5)
def pose_rotation(image,annos,mask):
img_shape=np.shape(image)
height = img_shape[0]
width = img_shape[1]
deg = random.uniform(-15.0, 15.0)
img = image
center = (img.shape[1] * 0.5, img.shape[0] * 0.5) # x, y
rot_m = cv2.getRotationMatrix2D((int(center[0]), int(center[1])), deg, 1)
ret = cv2.warpAffine(img, rot_m, img.shape[1::-1], flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis]
neww, newh = RotationAndCropValid.largest_rotated_rect(ret.shape[1], ret.shape[0], deg)
neww = min(neww, ret.shape[1])
newh = min(newh, ret.shape[0])
newx = int(center[0] - neww * 0.5)
newy = int(center[1] - newh * 0.5)
# print(ret.shape, deg, newx, newy, neww, newh)
img = ret[newy:newy + newh, newx:newx + neww]
# adjust meta data
adjust_joint_list = []
for joint in annos:
adjust_joint = []
for point in joint:
if point[0] < -100 or point[1] < -100:
adjust_joint.append((-1000, -1000))
continue
# if point[0] <= 0 or point[1] <= 0:
# adjust_joint.append((-1, -1))
# continue
x, y = _rotate_coord((width, height), (newx, newy), point, deg)
# if x > neww or y > newh:
# adjust_joint.append((-1000, -1000))
# continue
if x>neww-1 or y>newh-1:
adjust_joint.append((-1000, -1000))
continue
if x < 0 or y < 0:
adjust_joint.append((-1000, -1000))
continue
adjust_joint.append((x, y))
adjust_joint_list.append(adjust_joint)
joint_list = adjust_joint_list
msk = mask
center = (msk.shape[1] * 0.5, msk.shape[0] * 0.5) # x, y
rot_m = cv2.getRotationMatrix2D((int(center[0]), int(center[1])), deg, 1)
ret = cv2.warpAffine(msk, rot_m, msk.shape[1::-1], flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT)
if msk.ndim == 3 and msk.ndim == 2:
ret = ret[:, :, np.newaxis]
neww, newh = RotationAndCropValid.largest_rotated_rect(ret.shape[1], ret.shape[0], deg)
neww = min(neww, ret.shape[1])
newh = min(newh, ret.shape[0])
newx = int(center[0] - neww * 0.5)
newy = int(center[1] - newh * 0.5)
# print(ret.shape, deg, newx, newy, neww, newh)
msk = ret[newy:newy + newh, newx:newx + neww]
return img, joint_list, msk
def random_flip(image,annos,mask_miss):
flip_list=[0, 1, 5, 6, 7, 2, 3, 4, 11, 12, 13, 8, 9, 10, 15, 14, 17, 16, 18]
prob = random.uniform(0, 1.0)
if prob > 0.5:
return image,annos,mask_miss
_, width, _ = np.shape(image)
image = cv2.flip(image, 1)
mask_miss=cv2.flip(mask_miss,1)
new_joints = []
for people in annos:
new_keypoints = []
for k in flip_list:
point=people[k]
if point[0] < 0 or point[1] < 0:
new_keypoints.append((-1000, -1000))
continue
if point[0]>image.shape[1]-1 or point[1]>image.shape[0]-1:
new_keypoints.append((-1000, -1000))
continue
if (width - point[0])>image.shape[1]-1:
new_keypoints.append((-1000, -1000))
continue
new_keypoints.append((width - point[0], point[1]))
new_joints.append(new_keypoints)
annos=new_joints
return image, annos, mask_miss
def pose_random_scale(image,annos,mask_miss):
height=image.shape[0]
width =image.shape[1]
scalew = np.random.uniform(0.8, 1.2)
scaleh = np.random.uniform(0.8, 1.2)
# scalew =scaleh=np.random.uniform(0.5, 1.1)
# scaleh=0.8934042054560039
# scalew=1.0860957314059887
neww = int(width * scalew)
newh = int(height * scaleh)
dst = cv2.resize(image, (neww, newh), interpolation=cv2.INTER_AREA)
mask_miss=cv2.resize(mask_miss, (neww, newh), interpolation=cv2.INTER_AREA)
# adjust meta data
adjust_joint_list = []
for joint in annos:
adjust_joint = []
for point in joint:
if point[0] < -100 or point[1] < -100:
adjust_joint.append((-1000, -1000))
continue
# if point[0] <= 0 or point[1] <= 0 or int(point[0] * scalew + 0.5) > neww or int(
# point[1] * scaleh + 0.5) > newh:
# adjust_joint.append((-1, -1))
# continue
adjust_joint.append((int(point[0] * scalew + 0.5), int(point[1] * scaleh + 0.5)))
adjust_joint_list.append(adjust_joint)
return dst,adjust_joint_list,mask_miss
def pose_resize_shortestedge_random(image,annos, mask):
_target_height = 368
_target_width = 368
if len(np.shape(image))==2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
height,width,_=np.shape(image)
ratio_w = _target_width / width
ratio_h = _target_height / height
ratio = min(ratio_w, ratio_h)
target_size = int(min(width * ratio + 0.5, height * ratio + 0.5))
random_target=random.uniform(0.95, 1.6)
# random_target=1.1318003767113862
target_size = int(target_size * random_target)
# target_size = int(min(_network_w, _network_h) * random.uniform(0.7, 1.5))
return pose_resize_shortestedge(image, annos, mask, target_size)
def pose_resize_shortestedge(image, annos,mask,target_size):
_target_height = 368
_target_width = 368
img=image
height, width, _ = np.shape(image)
# adjust image
scale = target_size / min(height, width)
if height < width:
newh, neww = target_size, int(scale * width + 0.5)
else:
newh, neww = int(scale * height + 0.5), target_size
dst = cv2.resize(img, (neww, newh), interpolation=cv2.INTER_AREA)
mask = cv2.resize(mask, (neww, newh), interpolation=cv2.INTER_AREA)
pw = ph = 0
if neww < _target_width or newh < _target_height:
pw = max(0, (_target_width - neww) // 2)
ph = max(0, (_target_height - newh) // 2)
mw = (_target_width - neww) % 2
mh = (_target_height - newh) % 2
color = np.random.uniform(0.0, 1.0)
dst = cv2.copyMakeBorder(dst, ph, ph + mh, pw, pw + mw, cv2.BORDER_CONSTANT, value=(0,0 ,color))
mask = cv2.copyMakeBorder(mask, ph, ph + mh, pw, pw + mw, cv2.BORDER_CONSTANT, value=1)
# adjust meta data
adjust_joint_list = []
for joint in annos:
adjust_joint = []
for point in joint:
if point[0] < -100 or point[1] < -100:
adjust_joint.append((-1000, -1000))
continue
# if point[0] <= 0 or point[1] <= 0 or int(point[0]*scale+0.5) > neww or int(point[1]*scale+0.5) > newh:
# adjust_joint.append((-1, -1))
# continue
adjust_joint.append((int(point[0] * scale + 0.5) + pw, int(point[1] * scale + 0.5) + ph))
adjust_joint_list.append(adjust_joint)
return dst,adjust_joint_list,mask
def pose_crop_random(image,annos,mask):
_target_height = 368
_target_width = 368
target_size = (_target_width, _target_height)
if len(np.shape(image))==2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
height,width,_=np.shape(image)
for _ in range(50):
x = random.randrange(0, width - target_size[0]) if width > target_size[0] else 0
y = random.randrange(0, height - target_size[1]) if height > target_size[1] else 0
# check whether any face is inside the box to generate a reasonably-balanced datasets
for joint in annos:
if x <= joint[0][0] < x + target_size[0] and y <= joint[0][1] < y + target_size[1]:
break
return pose_crop(image,annos,mask, x, y, target_size[0], target_size[1])
def pose_crop(image,annos,mask, x, y, w, h):
# adjust image
target_size = (w, h)
img = image
resized = img[y:y+target_size[1], x:x+target_size[0], :]
resized_mask = mask[y:y + target_size[1], x:x + target_size[0]]
# adjust meta data
adjust_joint_list = []
for joint in annos:
adjust_joint = []
for point in joint:
if point[0] < -10 or point[1] < -10:
adjust_joint.append((-1000, -1000))
continue
# if point[0] <= 0 or point[1] <= 0:
# adjust_joint.append((-1000, -1000))
# continue
new_x, new_y = point[0] - x, point[1] - y
# if new_x <= 0 or new_y <= 0 or new_x > target_size[0] or new_y > target_size[1]:
# adjust_joint.append((-1, -1))
# continue
if new_x > 367 or new_y > 367:
adjust_joint.append((-1000, -1000))
continue
adjust_joint.append((new_x, new_y))
adjust_joint_list.append(adjust_joint)
return resized,adjust_joint_list,resized_mask
def drawing(image, annos):
plt.imshow(image)
for j in annos:
for i in j:
if i[0]>0 and i[1]>0:
plt.scatter(i[0],i[1])
plt.savefig('fig/'+str(i)+'.jpg', dpi=100)
plt.show()
| [
"matplotlib.pyplot.imshow",
"random.uniform",
"cv2.warpAffine",
"cv2.resize",
"cv2.flip",
"random.randrange",
"cv2.copyMakeBorder",
"math.cos",
"numpy.random.randint",
"cv2.cvtColor",
"numpy.random.uniform",
"tensorpack.dataflow.imgaug.geometry.RotationAndCropValid.largest_rotated_rect",
"ma... | [((358, 373), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (366, 373), True, 'import numpy as np\n'), ((3920, 3935), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (3928, 3935), True, 'import numpy as np\n'), ((4659, 4744), 'cv2.resize', 'cv2.resize', (['image', '(_target_width, _target_height)'], {'interpolation': 'cv2.INTER_AREA'}), '(image, (_target_width, _target_height), interpolation=cv2.INTER_AREA\n )\n', (4669, 4744), False, 'import cv2\n'), ((4755, 4834), 'cv2.resize', 'cv2.resize', (['mask', '(_target_width, _target_height)'], {'interpolation': 'cv2.INTER_AREA'}), '(mask, (_target_width, _target_height), interpolation=cv2.INTER_AREA)\n', (4765, 4834), False, 'import cv2\n'), ((5321, 5336), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5329, 5336), True, 'import numpy as np\n'), ((5401, 5428), 'random.uniform', 'random.uniform', (['(-15.0)', '(15.0)'], {}), '(-15.0, 15.0)\n', (5415, 5428), False, 'import random\n'), ((5601, 5703), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'rot_m', 'img.shape[1::-1]'], {'flags': 'cv2.INTER_AREA', 'borderMode': 'cv2.BORDER_CONSTANT'}), '(img, rot_m, img.shape[1::-1], flags=cv2.INTER_AREA,\n borderMode=cv2.BORDER_CONSTANT)\n', (5615, 5703), False, 'import cv2\n'), ((5793, 5867), 'tensorpack.dataflow.imgaug.geometry.RotationAndCropValid.largest_rotated_rect', 'RotationAndCropValid.largest_rotated_rect', (['ret.shape[1]', 'ret.shape[0]', 'deg'], {}), '(ret.shape[1], ret.shape[0], deg)\n', (5834, 5867), False, 'from tensorpack.dataflow.imgaug.geometry import RotationAndCropValid\n'), ((7207, 7309), 'cv2.warpAffine', 'cv2.warpAffine', (['msk', 'rot_m', 'msk.shape[1::-1]'], {'flags': 'cv2.INTER_AREA', 'borderMode': 'cv2.BORDER_CONSTANT'}), '(msk, rot_m, msk.shape[1::-1], flags=cv2.INTER_AREA,\n borderMode=cv2.BORDER_CONSTANT)\n', (7221, 7309), False, 'import cv2\n'), ((7399, 7473), 'tensorpack.dataflow.imgaug.geometry.RotationAndCropValid.largest_rotated_rect', 'RotationAndCropValid.largest_rotated_rect', (['ret.shape[1]', 'ret.shape[0]', 'deg'], {}), '(ret.shape[1], ret.shape[0], deg)\n', (7440, 7473), False, 'from tensorpack.dataflow.imgaug.geometry import RotationAndCropValid\n'), ((7888, 7910), 'random.uniform', 'random.uniform', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (7902, 7910), False, 'import random\n'), ((7986, 8001), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (7994, 8001), True, 'import numpy as np\n'), ((8014, 8032), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (8022, 8032), False, 'import cv2\n'), ((8047, 8069), 'cv2.flip', 'cv2.flip', (['mask_miss', '(1)'], {}), '(mask_miss, 1)\n', (8055, 8069), False, 'import cv2\n'), ((8872, 8899), 'numpy.random.uniform', 'np.random.uniform', (['(0.8)', '(1.2)'], {}), '(0.8, 1.2)\n', (8889, 8899), True, 'import numpy as np\n'), ((8913, 8940), 'numpy.random.uniform', 'np.random.uniform', (['(0.8)', '(1.2)'], {}), '(0.8, 1.2)\n', (8930, 8940), True, 'import numpy as np\n'), ((9129, 9190), 'cv2.resize', 'cv2.resize', (['image', '(neww, newh)'], {'interpolation': 'cv2.INTER_AREA'}), '(image, (neww, newh), interpolation=cv2.INTER_AREA)\n', (9139, 9190), False, 'import cv2\n'), ((9205, 9270), 'cv2.resize', 'cv2.resize', (['mask_miss', '(neww, newh)'], {'interpolation': 'cv2.INTER_AREA'}), '(mask_miss, (neww, newh), interpolation=cv2.INTER_AREA)\n', (9215, 9270), False, 'import cv2\n'), ((10166, 10181), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (10174, 10181), True, 'import numpy as np\n'), ((10380, 10405), 'random.uniform', 'random.uniform', (['(0.95)', '(1.6)'], {}), '(0.95, 1.6)\n', (10394, 10405), False, 'import random\n'), ((10795, 10810), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (10803, 10810), True, 'import numpy as np\n'), ((11039, 11098), 'cv2.resize', 'cv2.resize', (['img', '(neww, newh)'], {'interpolation': 'cv2.INTER_AREA'}), '(img, (neww, newh), interpolation=cv2.INTER_AREA)\n', (11049, 11098), False, 'import cv2\n'), ((11110, 11170), 'cv2.resize', 'cv2.resize', (['mask', '(neww, newh)'], {'interpolation': 'cv2.INTER_AREA'}), '(mask, (neww, newh), interpolation=cv2.INTER_AREA)\n', (11120, 11170), False, 'import cv2\n'), ((12550, 12565), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (12558, 12565), True, 'import numpy as np\n'), ((14232, 14249), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (14242, 14249), True, 'import matplotlib.pyplot as plt\n'), ((14414, 14424), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14422, 14424), True, 'import matplotlib.pyplot as plt\n'), ((299, 338), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_GRAY2RGB'], {}), '(image, cv2.COLOR_GRAY2RGB)\n', (311, 338), False, 'import cv2\n'), ((10107, 10146), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_GRAY2RGB'], {}), '(image, cv2.COLOR_GRAY2RGB)\n', (10119, 10146), False, 'import cv2\n'), ((11437, 11464), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (11454, 11464), True, 'import numpy as np\n'), ((11479, 11574), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['dst', 'ph', '(ph + mh)', 'pw', '(pw + mw)', 'cv2.BORDER_CONSTANT'], {'value': '(0, 0, color)'}), '(dst, ph, ph + mh, pw, pw + mw, cv2.BORDER_CONSTANT,\n value=(0, 0, color))\n', (11497, 11574), False, 'import cv2\n'), ((11585, 11670), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['mask', 'ph', '(ph + mh)', 'pw', '(pw + mw)', 'cv2.BORDER_CONSTANT'], {'value': '(1)'}), '(mask, ph, ph + mh, pw, pw + mw, cv2.BORDER_CONSTANT, value=1\n )\n', (11603, 11670), False, 'import cv2\n'), ((12491, 12530), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_GRAY2RGB'], {}), '(image, cv2.COLOR_GRAY2RGB)\n', (12503, 12530), False, 'import cv2\n'), ((262, 277), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (270, 277), True, 'import numpy as np\n'), ((862, 909), 'numpy.random.randint', 'np.random.randint', (['(0)', '(new_width - _target_width)'], {}), '(0, new_width - _target_width)\n', (879, 909), True, 'import numpy as np\n'), ((2418, 2467), 'numpy.random.randint', 'np.random.randint', (['(0)', '(new_height - _target_height)'], {}), '(0, new_height - _target_height)\n', (2435, 2467), True, 'import numpy as np\n'), ((5034, 5049), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (5042, 5049), False, 'import math\n'), ((5064, 5079), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (5072, 5079), False, 'import math\n'), ((5101, 5116), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (5109, 5116), False, 'import math\n'), ((5131, 5146), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (5139, 5146), False, 'import math\n'), ((10070, 10085), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (10078, 10085), True, 'import numpy as np\n'), ((12454, 12469), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (12462, 12469), True, 'import numpy as np\n'), ((12603, 12646), 'random.randrange', 'random.randrange', (['(0)', '(width - target_size[0])'], {}), '(0, width - target_size[0])\n', (12619, 12646), False, 'import random\n'), ((12692, 12736), 'random.randrange', 'random.randrange', (['(0)', '(height - target_size[1])'], {}), '(0, height - target_size[1])\n', (12708, 12736), False, 'import random\n'), ((14340, 14363), 'matplotlib.pyplot.scatter', 'plt.scatter', (['i[0]', 'i[1]'], {}), '(i[0], i[1])\n', (14351, 14363), True, 'import matplotlib.pyplot as plt\n')] |
from __future__ import division
import random
import pprint
import sys
import time
import numpy as np
from optparse import OptionParser
import pickle
from keras import backend as K
from keras.optimizers import Adam, SGD, RMSprop
from keras.layers import Input
from keras.models import Model
from frcnn import config, data_generators
from frcnn import losses as losses
import frcnn.roi_helpers as roi_helpers
from log import logger
from keras.utils import generic_utils
import json
from keras.utils import plot_model
import os.path as osp
parser = OptionParser()
# DATASET_DIR = '/home/adam/.keras/datasets/VOCdevkit'
parser.add_option("-d", "--dataset", dest="dataset_dir", help="Path to training dataset.")
parser.add_option("-o", "--parser", dest="parser", help="Parser to use. One of 'simple' or 'pascal_voc'",
default="pascal_voc")
parser.add_option("-n", "--num_rois", type="int", dest="num_rois", help="Number of RoIs to process at once.",
default=32)
parser.add_option("--network", dest="network", help="Base network to use. Supports vgg and resnet50.",
default='resnet50')
parser.add_option("--hf", dest="horizontal_flips", help="Augment with horizontal flips in training.",
action="store_true", default=False)
parser.add_option("--vf", dest="vertical_flips", help="Augment with vertical flips in training.",
action="store_true", default=False)
parser.add_option("--rot", "--rotate", dest="rotate",
help="Augment with 90,180,270 degree rotations in training.",
action="store_true", default=False)
parser.add_option("--image_min_size", type="int", dest="image_min_size", help="Min side of image to resize.",
default=600)
parser.add_option("--num_epochs", type="int", dest="num_epochs", help="Number of epochs.", default=1000)
parser.add_option("--num_steps", type="int", dest="num_steps", help="Number of steps per epoch.", default=11540)
parser.add_option("--config_output_path", dest="config_output_path",
help="Location to store all the metadata related to the training (to be used when testing).",
default="config.pickle")
parser.add_option("--model_weight_path", dest="model_weight_path", help="Output path for model weights.")
parser.add_option("--base_net_weight_path", dest="base_net_weight_path",
help="Path for base network weights. If not specified, will try to load default weights provided by keras.")
(options, args) = parser.parse_args()
if not options.dataset_dir: # if dataset_dir is not specified
parser.error('Path to training dataset must be specified. Pass -d or --dataset to command line')
if options.parser == 'pascal_voc':
from frcnn.pascal_voc_parser import get_annotation_data
elif options.parser == 'simple':
from frcnn.simple_parser import get_data
else:
parser.error("Option parser must be one of 'pascal_voc' or 'simple'")
# pass the settings from the command line, and persist them in the config object
C = config.Config()
C.use_horizontal_flips = options.horizontal_flips
C.use_vertical_flips = options.vertical_flips
C.rotate = options.rotate
C.num_rois = options.num_rois
C.image_min_size = options.image_min_size
if options.network == 'vgg':
C.network = 'vgg'
from frcnn import vgg as nn
elif options.network == 'resnet50':
from frcnn import resnet as nn
C.network = 'resnet50'
else:
parser.error("Option network must be one of 'vgg' or 'resnet50'")
# check if output weight path was passed via command line
if options.model_weight_path:
C.model_weight_path = options.model_weight_path
else:
C.model_weight_path = 'frcnn_{}_{{}}_{{:.4f}}_{{:.4f}}_{{:.4f}}_{{:.4f}}_{{:.4f}}.hdf5'.format(C.network)
# check if base weight path was passed via command line
if options.base_net_weight_path:
C.base_net_weights_path = options.base_net_weight_path
else:
# set the path to weights based on backend and model
C.base_net_weights_path = nn.get_weight_path()
# all_annotation_data, classes_count, class_name_idx_mapping = get_annotation_data(DATASET_DIR)
all_annotations = json.load(open('annotation_data.json'))
classes_count = json.load(open('classes_count.json'))
class_name_idx_mapping = json.load(open('class_name_idx_mapping.json'))
if 'bg' not in classes_count:
classes_count['bg'] = 0
class_name_idx_mapping['bg'] = len(class_name_idx_mapping)
C.class_name_idx_mapping = class_name_idx_mapping
logger.debug('class_count={}'.format(classes_count))
logger.info('Num of classes (including bg) = {}'.format(len(classes_count)))
logger.debug('class_name_idx_mapping={}'.format(class_name_idx_mapping))
config_output_path = options.config_output_path
with open(config_output_path, 'wb') as config_f:
pickle.dump(C, config_f)
logger.info('Config has been written to {}, and can be loaded when testing to ensure correct results'.format(
config_output_path))
random.shuffle(all_annotations)
train_annotations = [annotation for annotation in all_annotations if annotation['imageset'] == 'train']
val_annotations = [annotation for annotation in all_annotations if annotation['imageset'] == 'val']
logger.info('Num of samples {}'.format(len(all_annotations)))
logger.info('Num of train samples {}'.format(len(train_annotations)))
logger.info('Num of val samples {}'.format(len(val_annotations)))
train_data_gen = data_generators.get_anchor_gt(train_annotations, classes_count, C, nn.get_feature_map_size,
mode='train')
val_data_gen = data_generators.get_anchor_gt(val_annotations, classes_count, C, nn.get_feature_map_size, mode='val')
input_shape = (None, None, 3)
image_input = Input(shape=input_shape)
rois_input = Input(shape=(None, 4))
# define the base network (resnet here, can be VGG, Inception, etc)
base_net_output = nn.base_net(image_input, trainable=True)
# define the RPN, built on the base net
num_anchors = len(C.anchor_scales) * len(C.anchor_ratios)
rpn_output = nn.rpn(base_net_output, num_anchors)
# [(batch_size=1, num_rois, num_classes),(batch_size=1, num_rois, 4 * (num_classes -1))
# 第二个元素是 regr 返回的值, 其中不包括 'bg'
rcnn_output = nn.rcnn(base_net_output, rois_input, C.num_rois, num_classes=len(classes_count))
model_rpn = Model(image_input, rpn_output)
model_rcnn = Model([image_input, rois_input], rcnn_output)
# this is a model that holds both the RPN and the RCNN, used to load/save weights for the models
model = Model([image_input, rois_input], rpn_output + rcnn_output)
if not osp.exists('model.jpg'):
plot_model(model, to_file='model.jpg')
try:
logger.info('loading weights from {}'.format(C.base_net_weights_path))
model_rpn.load_weights(C.base_net_weights_path, by_name=True)
model_rcnn.load_weights(C.base_net_weights_path, by_name=True)
except:
logger.exception('Could not load pretrained model weights of base net. '
'Weights can be found in https://github.com/fchollet/deep-learning-models/releases')
optimizer = Adam(lr=1e-5)
optimizer_classifier = Adam(lr=1e-5)
model_rpn.compile(optimizer=optimizer, loss=[losses.rpn_class_loss(num_anchors), losses.rpn_regr_loss(num_anchors)])
model_rcnn.compile(optimizer=optimizer_classifier,
loss=[losses.rcnn_class_loss, losses.rcnn_regr_loss(len(classes_count) - 1)],
metrics={'rcnn_class': 'accuracy'})
model.compile(optimizer='sgd', loss='mae')
num_epochs = int(options.num_epochs)
# 每 1000 个 epoch 输出详细日志保存模型
num_steps = int(options.num_steps)
step_idx = 0
# 每个 epoch 的 positive roi 的个数
num_pos_rois_per_epoch = []
losses = np.zeros((num_steps, 5))
start_time = time.time()
best_loss = np.Inf
logger.info('Training starts...')
for epoch_idx in range(num_epochs):
progbar = generic_utils.Progbar(num_steps)
logger.info('Epoch {}/{}'.format(epoch_idx + 1, num_epochs))
while True:
try:
X1, Y1, augmented_annotation = next(train_data_gen)
# loss_rpn = [loss,rpn_out_class_loss,rpn_out_regress_loss], 名字的组成有最后一层的 name + '_loss'
# 这里还要注意 label 的 shape 可以和模型输出的 shape 不一样,这取决于 loss function
rpn_loss = model_rpn.train_on_batch(X1, Y1)
# [(1,m,n,9),(1,m,n,36)]
rpn_prediction = model_rpn.predict_on_batch(X1)
# rois 的 shape 为 (None,4) (x1,y1,x2,y2)
rois = roi_helpers.rpn_to_roi(rpn_prediction[0], rpn_prediction[1], C, overlap_thresh=0.7, max_rois=300)
# NOTE: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format
# X2: x_roi Y21: y_class Y22: y_regr
X2, Y21, Y22, IoUs = roi_helpers.calc_iou(rois, augmented_annotation, C, class_name_idx_mapping)
if X2 is None:
num_pos_rois_per_epoch.append(0)
continue
# 假设 Y21 为 np.array([[[0,0,0,1],[0,0,0,0]]]),np.where(Y21[0,:,-1]==1) 返回 (array([0]),)
# Y21[0,:,-1] 表示的 class 为 'bg' 的值, 若为 1 表示 negative, 为 0 表示 positive
neg_roi_idxs = np.where(Y21[0, :, -1] == 1)[0]
pos_roi_idxs = np.where(Y21[0, :, -1] == 0)[0]
num_pos_rois_per_epoch.append((len(pos_roi_idxs)))
if C.num_rois > 1:
# 如果正样本个数不足 num_rois//2,全部送入训练
if len(pos_roi_idxs) < C.num_rois // 2:
selected_pos_idxs = pos_roi_idxs.tolist()
# 如果正样本个数超过 num_rois//2, 随机抽取 num_rois//2 个 送入训练
else:
# replace=False 表示不重复, without replacement
selected_pos_idxs = np.random.choice(pos_roi_idxs, C.num_rois // 2, replace=False).tolist()
try:
selected_neg_idxs = np.random.choice(neg_roi_idxs, C.num_rois - len(selected_pos_idxs),
replace=False).tolist()
except:
selected_neg_idxs = np.random.choice(neg_roi_idxs, C.num_rois - len(selected_pos_idxs),
replace=True).tolist()
selected_idxs = selected_pos_idxs + selected_neg_idxs
else:
# in the extreme case where num_rois = 1, we pick a random pos or neg sample
selected_pos_idxs = pos_roi_idxs.tolist()
selected_neg_idxs = neg_roi_idxs.tolist()
if np.random.randint(0, 2):
selected_idxs = random.choice(neg_roi_idxs)
else:
selected_idxs = random.choice(pos_roi_idxs)
rcnn_loss = model_rcnn.train_on_batch([X1, X2[:, selected_idxs, :]],
[Y21[:, selected_idxs, :], Y22[:, selected_idxs, :]])
losses[step_idx, 0] = rpn_loss[1]
losses[step_idx, 1] = rpn_loss[2]
losses[step_idx, 2] = rcnn_loss[1]
losses[step_idx, 3] = rcnn_loss[2]
# accuracy
losses[step_idx, 4] = rcnn_loss[3]
step_idx += 1
progbar.update(step_idx,
[('rpn_class_loss', np.mean(losses[:step_idx, 0])),
('rpn_regr_loss', np.mean(losses[:step_idx, 1])),
('rcnn_class_loss', np.mean(losses[:step_idx, 2])),
('rcnn_regr_loss', np.mean(losses[:step_idx, 3]))])
if step_idx == num_steps:
rpn_class_loss = np.mean(losses[:, 0])
rpn_regr_loss = np.mean(losses[:, 1])
rcnn_class_loss = np.mean(losses[:, 2])
rcnn_regr_loss = np.mean(losses[:, 3])
rcnn_class_acc = np.mean(losses[:, 4])
mean_num_pos_rois = float(sum(num_pos_rois_per_epoch)) / len(num_pos_rois_per_epoch)
num_pos_rois_per_epoch = []
curr_loss = rpn_class_loss + rpn_regr_loss + rcnn_class_loss + rcnn_regr_loss
if C.verbose:
logger.debug('Mean number of positive rois: {}'.format(
mean_num_pos_rois))
if mean_num_pos_rois == 0:
logger.warning(
'RPN is not producing positive rois. Check settings or keep training.')
logger.debug('RPN Classification Loss: {}'.format(rpn_class_loss))
logger.debug('RPN Regression Loss : {}'.format(rpn_regr_loss))
logger.debug('RCNN Classification Loss: {}'.format(rcnn_class_loss))
logger.debug('RCNN Regression Loss: {}'.format(rcnn_regr_loss))
logger.debug('Total Loss: {}'.format(curr_loss))
logger.debug('RCNN Classification Accuracy: {}'.format(rcnn_class_acc))
logger.debug('Elapsed time: {}'.format(time.time() - start_time))
step_idx = 0
start_time = time.time()
if curr_loss < best_loss:
if C.verbose:
logger.debug('Total loss decreased from {} to {}, saving weights'.format(best_loss, curr_loss))
best_loss = curr_loss
model.save_weights(
C.model_weight_path.format(epoch_idx, rpn_class_loss, rpn_regr_loss, rcnn_class_loss, rcnn_regr_loss,
rcnn_class_acc))
break
except Exception as e:
logger.exception('{}'.format(e))
continue
logger.info('Training complete, exiting.')
| [
"log.logger.exception",
"frcnn.resnet.base_net",
"frcnn.data_generators.get_anchor_gt",
"frcnn.roi_helpers.calc_iou",
"os.path.exists",
"numpy.mean",
"numpy.where",
"frcnn.resnet.rpn",
"keras.utils.plot_model",
"frcnn.losses.rpn_class_loss",
"frcnn.losses.rpn_regr_loss",
"keras.models.Model",
... | [((548, 562), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (560, 562), False, 'from optparse import OptionParser\n'), ((3065, 3080), 'frcnn.config.Config', 'config.Config', ([], {}), '()\n', (3078, 3080), False, 'from frcnn import config, data_generators\n'), ((4984, 5015), 'random.shuffle', 'random.shuffle', (['all_annotations'], {}), '(all_annotations)\n', (4998, 5015), False, 'import random\n'), ((5438, 5548), 'frcnn.data_generators.get_anchor_gt', 'data_generators.get_anchor_gt', (['train_annotations', 'classes_count', 'C', 'nn.get_feature_map_size'], {'mode': '"""train"""'}), "(train_annotations, classes_count, C, nn.\n get_feature_map_size, mode='train')\n", (5467, 5548), False, 'from frcnn import config, data_generators\n'), ((5606, 5712), 'frcnn.data_generators.get_anchor_gt', 'data_generators.get_anchor_gt', (['val_annotations', 'classes_count', 'C', 'nn.get_feature_map_size'], {'mode': '"""val"""'}), "(val_annotations, classes_count, C, nn.\n get_feature_map_size, mode='val')\n", (5635, 5712), False, 'from frcnn import config, data_generators\n'), ((5753, 5777), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (5758, 5777), False, 'from keras.layers import Input\n'), ((5791, 5813), 'keras.layers.Input', 'Input', ([], {'shape': '(None, 4)'}), '(shape=(None, 4))\n', (5796, 5813), False, 'from keras.layers import Input\n'), ((5900, 5940), 'frcnn.resnet.base_net', 'nn.base_net', (['image_input'], {'trainable': '(True)'}), '(image_input, trainable=True)\n', (5911, 5940), True, 'from frcnn import resnet as nn\n'), ((6052, 6088), 'frcnn.resnet.rpn', 'nn.rpn', (['base_net_output', 'num_anchors'], {}), '(base_net_output, num_anchors)\n', (6058, 6088), True, 'from frcnn import resnet as nn\n'), ((6315, 6345), 'keras.models.Model', 'Model', (['image_input', 'rpn_output'], {}), '(image_input, rpn_output)\n', (6320, 6345), False, 'from keras.models import Model\n'), ((6359, 6404), 'keras.models.Model', 'Model', (['[image_input, rois_input]', 'rcnn_output'], {}), '([image_input, rois_input], rcnn_output)\n', (6364, 6404), False, 'from keras.models import Model\n'), ((6510, 6568), 'keras.models.Model', 'Model', (['[image_input, rois_input]', '(rpn_output + rcnn_output)'], {}), '([image_input, rois_input], rpn_output + rcnn_output)\n', (6515, 6568), False, 'from keras.models import Model\n'), ((7061, 7075), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(1e-05)'}), '(lr=1e-05)\n', (7065, 7075), False, 'from keras.optimizers import Adam, SGD, RMSprop\n'), ((7098, 7112), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(1e-05)'}), '(lr=1e-05)\n', (7102, 7112), False, 'from keras.optimizers import Adam, SGD, RMSprop\n'), ((7656, 7680), 'numpy.zeros', 'np.zeros', (['(num_steps, 5)'], {}), '((num_steps, 5))\n', (7664, 7680), True, 'import numpy as np\n'), ((7694, 7705), 'time.time', 'time.time', ([], {}), '()\n', (7703, 7705), False, 'import time\n'), ((7726, 7759), 'log.logger.info', 'logger.info', (['"""Training starts..."""'], {}), "('Training starts...')\n", (7737, 7759), False, 'from log import logger\n'), ((13539, 13581), 'log.logger.info', 'logger.info', (['"""Training complete, exiting."""'], {}), "('Training complete, exiting.')\n", (13550, 13581), False, 'from log import logger\n'), ((4033, 4053), 'frcnn.resnet.get_weight_path', 'nn.get_weight_path', ([], {}), '()\n', (4051, 4053), True, 'from frcnn import resnet as nn\n'), ((4815, 4839), 'pickle.dump', 'pickle.dump', (['C', 'config_f'], {}), '(C, config_f)\n', (4826, 4839), False, 'import pickle\n'), ((6576, 6599), 'os.path.exists', 'osp.exists', (['"""model.jpg"""'], {}), "('model.jpg')\n", (6586, 6599), True, 'import os.path as osp\n'), ((6605, 6643), 'keras.utils.plot_model', 'plot_model', (['model'], {'to_file': '"""model.jpg"""'}), "(model, to_file='model.jpg')\n", (6615, 6643), False, 'from keras.utils import plot_model\n'), ((7810, 7842), 'keras.utils.generic_utils.Progbar', 'generic_utils.Progbar', (['num_steps'], {}), '(num_steps)\n', (7831, 7842), False, 'from keras.utils import generic_utils\n'), ((6869, 7033), 'log.logger.exception', 'logger.exception', (['"""Could not load pretrained model weights of base net. Weights can be found in https://github.com/fchollet/deep-learning-models/releases"""'], {}), "(\n 'Could not load pretrained model weights of base net. Weights can be found in https://github.com/fchollet/deep-learning-models/releases'\n )\n", (6885, 7033), False, 'from log import logger\n'), ((7157, 7191), 'frcnn.losses.rpn_class_loss', 'losses.rpn_class_loss', (['num_anchors'], {}), '(num_anchors)\n', (7178, 7191), True, 'from frcnn import losses as losses\n'), ((7193, 7226), 'frcnn.losses.rpn_regr_loss', 'losses.rpn_regr_loss', (['num_anchors'], {}), '(num_anchors)\n', (7213, 7226), True, 'from frcnn import losses as losses\n'), ((8398, 8499), 'frcnn.roi_helpers.rpn_to_roi', 'roi_helpers.rpn_to_roi', (['rpn_prediction[0]', 'rpn_prediction[1]', 'C'], {'overlap_thresh': '(0.7)', 'max_rois': '(300)'}), '(rpn_prediction[0], rpn_prediction[1], C,\n overlap_thresh=0.7, max_rois=300)\n', (8420, 8499), True, 'import frcnn.roi_helpers as roi_helpers\n'), ((8655, 8730), 'frcnn.roi_helpers.calc_iou', 'roi_helpers.calc_iou', (['rois', 'augmented_annotation', 'C', 'class_name_idx_mapping'], {}), '(rois, augmented_annotation, C, class_name_idx_mapping)\n', (8675, 8730), True, 'import frcnn.roi_helpers as roi_helpers\n'), ((9040, 9068), 'numpy.where', 'np.where', (['(Y21[0, :, -1] == 1)'], {}), '(Y21[0, :, -1] == 1)\n', (9048, 9068), True, 'import numpy as np\n'), ((9099, 9127), 'numpy.where', 'np.where', (['(Y21[0, :, -1] == 0)'], {}), '(Y21[0, :, -1] == 0)\n', (9107, 9127), True, 'import numpy as np\n'), ((10393, 10416), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (10410, 10416), True, 'import numpy as np\n'), ((11465, 11486), 'numpy.mean', 'np.mean', (['losses[:, 0]'], {}), '(losses[:, 0])\n', (11472, 11486), True, 'import numpy as np\n'), ((11519, 11540), 'numpy.mean', 'np.mean', (['losses[:, 1]'], {}), '(losses[:, 1])\n', (11526, 11540), True, 'import numpy as np\n'), ((11575, 11596), 'numpy.mean', 'np.mean', (['losses[:, 2]'], {}), '(losses[:, 2])\n', (11582, 11596), True, 'import numpy as np\n'), ((11630, 11651), 'numpy.mean', 'np.mean', (['losses[:, 3]'], {}), '(losses[:, 3])\n', (11637, 11651), True, 'import numpy as np\n'), ((11685, 11706), 'numpy.mean', 'np.mean', (['losses[:, 4]'], {}), '(losses[:, 4])\n', (11692, 11706), True, 'import numpy as np\n'), ((12933, 12944), 'time.time', 'time.time', ([], {}), '()\n', (12942, 12944), False, 'import time\n'), ((10454, 10481), 'random.choice', 'random.choice', (['neg_roi_idxs'], {}), '(neg_roi_idxs)\n', (10467, 10481), False, 'import random\n'), ((10540, 10567), 'random.choice', 'random.choice', (['pos_roi_idxs'], {}), '(pos_roi_idxs)\n', (10553, 10567), False, 'import random\n'), ((11123, 11152), 'numpy.mean', 'np.mean', (['losses[:step_idx, 0]'], {}), '(losses[:step_idx, 0])\n', (11130, 11152), True, 'import numpy as np\n'), ((11201, 11230), 'numpy.mean', 'np.mean', (['losses[:step_idx, 1]'], {}), '(losses[:step_idx, 1])\n', (11208, 11230), True, 'import numpy as np\n'), ((11281, 11310), 'numpy.mean', 'np.mean', (['losses[:step_idx, 2]'], {}), '(losses[:step_idx, 2])\n', (11288, 11310), True, 'import numpy as np\n'), ((11360, 11389), 'numpy.mean', 'np.mean', (['losses[:step_idx, 3]'], {}), '(losses[:step_idx, 3])\n', (11367, 11389), True, 'import numpy as np\n'), ((12168, 12259), 'log.logger.warning', 'logger.warning', (['"""RPN is not producing positive rois. Check settings or keep training."""'], {}), "(\n 'RPN is not producing positive rois. Check settings or keep training.')\n", (12182, 12259), False, 'from log import logger\n'), ((9582, 9644), 'numpy.random.choice', 'np.random.choice', (['pos_roi_idxs', '(C.num_rois // 2)'], {'replace': '(False)'}), '(pos_roi_idxs, C.num_rois // 2, replace=False)\n', (9598, 9644), True, 'import numpy as np\n'), ((12847, 12858), 'time.time', 'time.time', ([], {}), '()\n', (12856, 12858), False, 'import time\n')] |
"""
cobyladriver.py - Contains a driver that wraps the cobyla
optimizer as used in pyOpt:
Minimize a function using the Constrained Optimization BY Linear
Approximation (COBYLA) method.
COBYLA is gradient-free and can handle inequality constraints.
"""
from math import isnan
from numpy import zeros, array, hstack
from cobyla.cobyla import cobyla, closeunit
from openmdao.main.datatypes.api import Enum, Float, Int, Str
from openmdao.main.driver import Driver
from openmdao.main.hasparameters import HasParameters
from openmdao.main.hasconstraints import HasIneqConstraints
from openmdao.main.hasobjective import HasObjective
from openmdao.main.interfaces import IHasParameters, IHasIneqConstraints, \
IHasObjective, implements, IOptimizer
from openmdao.util.decorators import add_delegate
@add_delegate(HasParameters, HasIneqConstraints, HasObjective)
class COBYLAdriver(Driver):
"""Minimize a function using the Constrained Optimization BY Linear
Approximation (COBYLA) method.
COBYLA is gradient-free and can handle inequality constraints.
Note: Constraints should be added using the OpenMDAO convention
(positive = violated).
"""
implements(IHasParameters, IHasIneqConstraints, IHasObjective, IOptimizer)
# pylint: disable-msg=E1101
rhobeg = Float(1.0, iotype='in',
desc='Reasonable initial changes to the variables.')
rhoend = Float(1e-4, iotype='in',
desc='Final accuracy in the optimization'
' (not precisely guaranteed).')
iprint = Enum(1, [0, 1, 2, 3], iotype='in',
desc='Controls the frequency of output: 0 (no output),1,2,3.')
maxfun = Int(1000, iotype='in',
desc='Maximum number of function evaluations.')
iout = Int(6, iotype='in',
desc='Fortran output unit. Leave this at 6 for STDOUT.')
output_filename = Str('cobyla.out', iotype='in',
desc='Name of output file (if iout not 6).')
error_code = Int(0, iotype='out',
desc='Error code returned from COBYLA.')
def __init__(self):
super(COBYLAdriver, self).__init__()
self.error_messages = {
1 : 'Max. number of function evaluations reached',
2 : 'Rounding errors are becoming damaging'
}
self.x = zeros(0, 'd')
self.work_vector = zeros(0, 'd')
self.gg = zeros(0, 'd')
self.iact = zeros(0, 'd')
self.g = zeros(0, 'd')
self.ff = 0
self.nfvals = 0
self.nparam = 0
self.ncon = 0
self.upper = None
self.lower = None
self._continue = None
def start_iteration(self):
"""Perform initial setup before iteration loop begins."""
# Inital run to make sure the workflow executes
super(COBYLAdriver, self).run_iteration()
self.nparam = self.total_parameters()
self.ncon = self.total_ineq_constraints()
self.ncon += 2*self.nparam
self.g = zeros(self.ncon, 'd')
self.work_vector = zeros(self.ncon, 'd')
# get the initial values of the parameters
self.x = self.eval_parameters(self.parent)
self.upper = self.get_upper_bounds()
self.lower = self.get_lower_bounds()
n = self.nparam
m = self.ncon
self.work_vector = zeros([n*(3*n+2*m+11)+4*m+6], 'd')
self.iact = zeros([m+1], 'i')
self.gg = zeros([m], 'd')
self._continue = True
def run_iteration(self):
""" Note: cobyla controls the looping."""
try:
self.iact, self.error_code, self.nfvals = \
cobyla(self._func, self.nparam, self.ncon, self.x,
self.rhobeg, self.rhoend, self.iprint, self.maxfun,
self.work_vector, self.iact, self.error_code, self.nfvals,
self.iout, self.output_filename, self.ff, self.gg)
except Exception as err:
self._logger.error(str(err))
raise
if self.iprint > 0:
closeunit(self.iout)
# Log any errors
if self.error_code != 0:
self._logger.warning(self.error_messages[self.error_code])
# Iteration is complete
self._continue = False
def _func(self, n, m, xnew, f, g):
""" Return ndarrays containing the function and constraint
evaluations.
Note: n, m, f, and g are unused inputs."""
self.set_parameters(xnew)
super(COBYLAdriver, self).run_iteration()
f = self.eval_objective()
if isnan(f):
msg = "Numerical overflow in the objective"
self.raise_exception(msg, RuntimeError)
# Constraints (COBYLA defines positive as satisfied)
cons = -1. * array(self.eval_ineq_constraints())
# Side Constraints
vals = self.eval_parameters(self.parent)
g = hstack([cons, (vals - self.lower), (self.upper - vals)])
return f, g
| [
"openmdao.main.datatypes.api.Float",
"numpy.hstack",
"openmdao.main.datatypes.api.Enum",
"openmdao.main.interfaces.implements",
"cobyla.cobyla.cobyla",
"openmdao.main.datatypes.api.Int",
"numpy.zeros",
"cobyla.cobyla.closeunit",
"openmdao.util.decorators.add_delegate",
"openmdao.main.datatypes.api... | [((837, 898), 'openmdao.util.decorators.add_delegate', 'add_delegate', (['HasParameters', 'HasIneqConstraints', 'HasObjective'], {}), '(HasParameters, HasIneqConstraints, HasObjective)\n', (849, 898), False, 'from openmdao.util.decorators import add_delegate\n'), ((1211, 1285), 'openmdao.main.interfaces.implements', 'implements', (['IHasParameters', 'IHasIneqConstraints', 'IHasObjective', 'IOptimizer'], {}), '(IHasParameters, IHasIneqConstraints, IHasObjective, IOptimizer)\n', (1221, 1285), False, 'from openmdao.main.interfaces import IHasParameters, IHasIneqConstraints, IHasObjective, implements, IOptimizer\n'), ((1332, 1408), 'openmdao.main.datatypes.api.Float', 'Float', (['(1.0)'], {'iotype': '"""in"""', 'desc': '"""Reasonable initial changes to the variables."""'}), "(1.0, iotype='in', desc='Reasonable initial changes to the variables.')\n", (1337, 1408), False, 'from openmdao.main.datatypes.api import Enum, Float, Int, Str\n'), ((1442, 1544), 'openmdao.main.datatypes.api.Float', 'Float', (['(0.0001)'], {'iotype': '"""in"""', 'desc': '"""Final accuracy in the optimization (not precisely guaranteed)."""'}), "(0.0001, iotype='in', desc=\n 'Final accuracy in the optimization (not precisely guaranteed).')\n", (1447, 1544), False, 'from openmdao.main.datatypes.api import Enum, Float, Int, Str\n'), ((1598, 1700), 'openmdao.main.datatypes.api.Enum', 'Enum', (['(1)', '[0, 1, 2, 3]'], {'iotype': '"""in"""', 'desc': '"""Controls the frequency of output: 0 (no output),1,2,3."""'}), "(1, [0, 1, 2, 3], iotype='in', desc=\n 'Controls the frequency of output: 0 (no output),1,2,3.')\n", (1602, 1700), False, 'from openmdao.main.datatypes.api import Enum, Float, Int, Str\n'), ((1728, 1798), 'openmdao.main.datatypes.api.Int', 'Int', (['(1000)'], {'iotype': '"""in"""', 'desc': '"""Maximum number of function evaluations."""'}), "(1000, iotype='in', desc='Maximum number of function evaluations.')\n", (1731, 1798), False, 'from openmdao.main.datatypes.api import Enum, Float, Int, Str\n'), ((1828, 1904), 'openmdao.main.datatypes.api.Int', 'Int', (['(6)'], {'iotype': '"""in"""', 'desc': '"""Fortran output unit. Leave this at 6 for STDOUT."""'}), "(6, iotype='in', desc='Fortran output unit. Leave this at 6 for STDOUT.')\n", (1831, 1904), False, 'from openmdao.main.datatypes.api import Enum, Float, Int, Str\n'), ((1943, 2018), 'openmdao.main.datatypes.api.Str', 'Str', (['"""cobyla.out"""'], {'iotype': '"""in"""', 'desc': '"""Name of output file (if iout not 6)."""'}), "('cobyla.out', iotype='in', desc='Name of output file (if iout not 6).')\n", (1946, 2018), False, 'from openmdao.main.datatypes.api import Enum, Float, Int, Str\n'), ((2063, 2124), 'openmdao.main.datatypes.api.Int', 'Int', (['(0)'], {'iotype': '"""out"""', 'desc': '"""Error code returned from COBYLA."""'}), "(0, iotype='out', desc='Error code returned from COBYLA.')\n", (2066, 2124), False, 'from openmdao.main.datatypes.api import Enum, Float, Int, Str\n'), ((2398, 2411), 'numpy.zeros', 'zeros', (['(0)', '"""d"""'], {}), "(0, 'd')\n", (2403, 2411), False, 'from numpy import zeros, array, hstack\n'), ((2439, 2452), 'numpy.zeros', 'zeros', (['(0)', '"""d"""'], {}), "(0, 'd')\n", (2444, 2452), False, 'from numpy import zeros, array, hstack\n'), ((2471, 2484), 'numpy.zeros', 'zeros', (['(0)', '"""d"""'], {}), "(0, 'd')\n", (2476, 2484), False, 'from numpy import zeros, array, hstack\n'), ((2505, 2518), 'numpy.zeros', 'zeros', (['(0)', '"""d"""'], {}), "(0, 'd')\n", (2510, 2518), False, 'from numpy import zeros, array, hstack\n'), ((2536, 2549), 'numpy.zeros', 'zeros', (['(0)', '"""d"""'], {}), "(0, 'd')\n", (2541, 2549), False, 'from numpy import zeros, array, hstack\n'), ((3078, 3099), 'numpy.zeros', 'zeros', (['self.ncon', '"""d"""'], {}), "(self.ncon, 'd')\n", (3083, 3099), False, 'from numpy import zeros, array, hstack\n'), ((3127, 3148), 'numpy.zeros', 'zeros', (['self.ncon', '"""d"""'], {}), "(self.ncon, 'd')\n", (3132, 3148), False, 'from numpy import zeros, array, hstack\n'), ((3417, 3467), 'numpy.zeros', 'zeros', (['[n * (3 * n + 2 * m + 11) + 4 * m + 6]', '"""d"""'], {}), "([n * (3 * n + 2 * m + 11) + 4 * m + 6], 'd')\n", (3422, 3467), False, 'from numpy import zeros, array, hstack\n'), ((3472, 3491), 'numpy.zeros', 'zeros', (['[m + 1]', '"""i"""'], {}), "([m + 1], 'i')\n", (3477, 3491), False, 'from numpy import zeros, array, hstack\n'), ((3508, 3523), 'numpy.zeros', 'zeros', (['[m]', '"""d"""'], {}), "([m], 'd')\n", (3513, 3523), False, 'from numpy import zeros, array, hstack\n'), ((4649, 4657), 'math.isnan', 'isnan', (['f'], {}), '(f)\n', (4654, 4657), False, 'from math import isnan\n'), ((4975, 5027), 'numpy.hstack', 'hstack', (['[cons, vals - self.lower, self.upper - vals]'], {}), '([cons, vals - self.lower, self.upper - vals])\n', (4981, 5027), False, 'from numpy import zeros, array, hstack\n'), ((3719, 3939), 'cobyla.cobyla.cobyla', 'cobyla', (['self._func', 'self.nparam', 'self.ncon', 'self.x', 'self.rhobeg', 'self.rhoend', 'self.iprint', 'self.maxfun', 'self.work_vector', 'self.iact', 'self.error_code', 'self.nfvals', 'self.iout', 'self.output_filename', 'self.ff', 'self.gg'], {}), '(self._func, self.nparam, self.ncon, self.x, self.rhobeg, self.rhoend,\n self.iprint, self.maxfun, self.work_vector, self.iact, self.error_code,\n self.nfvals, self.iout, self.output_filename, self.ff, self.gg)\n', (3725, 3939), False, 'from cobyla.cobyla import cobyla, closeunit\n'), ((4123, 4143), 'cobyla.cobyla.closeunit', 'closeunit', (['self.iout'], {}), '(self.iout)\n', (4132, 4143), False, 'from cobyla.cobyla import cobyla, closeunit\n')] |
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.stats.weightstats import DescrStatsW
from scipy.stats import norm
from zepid.causal.utils import (propensity_score, plot_kde, plot_love,
standardized_mean_differences, positivity, _bounding_,
plot_kde_accuracy, outcome_accuracy)
class AIPTW:
r"""Augmented inverse probability of treatment weight estimator. This implementation calculates AIPTW for a
time-fixed exposure and a single time-point outcome. `AIPTW` supports correcting for informative censoring (missing
outcome data) through inverse probability of censoring/missingness weights.
AIPTW is a doubly robust estimator, with a desirable property. Both of the the g-formula and IPTW require that
our parametric regression models are correctly specified. Instead, AIPTW allows us to have two 'chances' at getting
the model correct. If either our outcome-model or treatment-model is correctly specified, then our estimate
will be unbiased. This property does not hold for the variance (i.e. the variance will not be doubly robust)
The augment-inverse probability weight estimator is calculated from the following formula
.. math::
\widehat{DR}(a) = \frac{YA}{\widehat{\Pr}(A=a|L)} - \frac{\hat{Y}^a*(A-\widehat{\Pr}(A=a|L)}{
\widehat{\Pr}(A=a|L)}
The risk difference and risk ratio are calculated using the following formulas, respectively
.. math::
\widehat{RD} = \widehat{DR}(a=1) - \widehat{DR}(a=0)
.. math::
\widehat{RR} = \frac{\widehat{DR}(a=1)}{\widehat{DR}(a=0)}
Confidence intervals for the risk difference come from the influence curve. Confidence intervals for the risk ratio
are less straight-forward. To get confidence intervals for the risk ratio, a bootstrap procedure should be used.
Parameters
----------
df : DataFrame
Pandas DataFrame object containing all variables of interest
exposure : str
Column name of the exposure variable. Currently only binary is supported
outcome : str
Column name of the outcome variable. Currently only binary is supported
weights : str, optional
Column name of weights. Weights allow for items like sampling weights to be used to estimate effects
alpha : float, optional
Alpha for confidence interval level. Default is 0.05, returning the 95% CL
Examples
--------
Set up the environment and the data set
>>> from zepid import load_sample_data, spline
>>> from zepid.causal.doublyrobust import AIPTW
>>> df = load_sample_data(timevary=False).drop(columns=['cd4_wk45'])
>>> df[['cd4_rs1','cd4_rs2']] = spline(df,'cd40',n_knots=3,term=2,restricted=True)
>>> df[['age_rs1','age_rs2']] = spline(df,'age0',n_knots=3,term=2,restricted=True)
Estimate the base AIPTW model
>>> aipw = AIPTW(df, exposure='art', outcome='dead')
>>> aipw.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
>>> aipw.outcome_model('art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
>>> aipw.fit()
>>> aipw.summary()
Estimate AIPTW accounting for missing outcome data
>>> aipw = AIPTW(df, exposure='art', outcome='dead')
>>> aipw.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
>>> aipw.missing_model('art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
>>> aipw.outcome_model('art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
>>> aipw.fit()
>>> aipw.summary()
AIPTW for continuous outcomes
>>> df = load_sample_data(timevary=False).drop(columns=['dead'])
>>> df[['cd4_rs1','cd4_rs2']] = spline(df,'cd40',n_knots=3,term=2,restricted=True)
>>> df[['age_rs1','age_rs2']] = spline(df,'age0',n_knots=3,term=2,restricted=True)
>>> aipw = AIPTW(df, exposure='art', outcome='cd4_wk45')
>>> aipw.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
>>> aipw.missing_model('art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
>>> aipw.outcome_model('art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
>>> aipw.fit()
>>> aipw.summary()
>>> aipw = AIPTW(df, exposure='art', outcome='cd4_wk45')
>>> ymodel = 'art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0'
>>> aipw.exposure_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0')
>>> aipw.missing_model(ymodel)
>>> aipw.outcome_model(ymodel, continuous_distribution='poisson')
>>> aipw.fit()
>>> aipw.summary()
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2011). Doubly robust
estimation of causal effects. American Journal of Epidemiology, 173(7), 761-767.
Lunceford JK, <NAME>. (2004). Stratification and weighting via the propensity score in estimation of causal
treatment effects: a comparative study. Statistics in medicine, 23(19), 2937-2960.
"""
def __init__(self, df, exposure, outcome, weights=None, alpha=0.05):
if df.dropna(subset=[d for d in df.columns if d != outcome]).shape[0] != df.shape[0]:
warnings.warn("There is missing data that is not the outcome in the data set. AIPTW will drop "
"all missing data that is not missing outcome data. AIPTW will fit "
+ str(df.dropna(subset=[d for d in df.columns if d != outcome]).shape[0]) +
' of ' + str(df.shape[0]) + ' observations', UserWarning)
self.df = df.copy().dropna(subset=[d for d in df.columns if d != outcome]).reset_index()
else:
self.df = df.copy().reset_index()
# Checking to see if missing outcome data occurs
self._missing_indicator = '__missing_indicator__'
if self.df.dropna(subset=[outcome]).shape[0] != self.df.shape[0]:
self._miss_flag = True
self.df[self._missing_indicator] = np.where(self.df[outcome].isna(), 0, 1)
else:
self._miss_flag = False
self.df[self._missing_indicator] = 1
self.exposure = exposure
self.outcome = outcome
if df[outcome].dropna().value_counts().index.isin([0, 1]).all():
self._continuous_outcome = False
else:
self._continuous_outcome = True
self._weight_ = weights
self.alpha = alpha
self.risk_difference = None
self.risk_ratio = None
self.risk_difference_ci = None
self.risk_ratio_ci = None
self.risk_difference_se = None
self.average_treatment_effect = None
self.average_treatment_effect_ci = None
self.average_treatment_effect_se = None
self._fit_exposure_ = False
self._fit_outcome_ = False
self._fit_missing_ = False
self._exp_model = None
self._out_model = None
def exposure_model(self, model, bound=False, print_results=True):
r"""Specify the propensity score / inverse probability weight model. Model used to predict the exposure via a
logistic regression model. This model estimates
.. math::
\widehat{\Pr}(A=1|L) = logit^{-1}(\widehat{\beta_0} + \widehat{\beta} L)
Parameters
----------
model : str
Independent variables to predict the exposure. For example, 'var1 + var2 + var3'
bound : float, list, optional
Value between 0,1 to truncate predicted probabilities. Helps to avoid near positivity violations.
Specifying this argument can improve finite sample performance for random positivity violations. However,
truncating weights leads to additional confounding. Default is False, meaning no truncation of
predicted probabilities occurs. Providing a single float assumes symmetric trunctation, where values below
or above the threshold are set to the threshold value. Alternatively a list of floats can be provided for
asymmetric trunctation, with the first value being the lower bound and the second being the upper bound
print_results : bool, optional
Whether to print the fitted model results. Default is True (prints results)
"""
self.__mweight = model
self._exp_model = self.exposure + ' ~ ' + model
fitmodel = propensity_score(self.df, self._exp_model, weights=self._weight_, print_results=print_results)
ps = fitmodel.predict(self.df)
self.df['_g1_'] = ps
self.df['_g0_'] = 1 - ps
# If bounds are requested
if bound:
self.df['_g1_'] = _bounding_(self.df['_g1_'], bounds=bound)
self.df['_g0_'] = _bounding_(self.df['_g0_'], bounds=bound)
self._fit_exposure_ = True
def missing_model(self, model, bound=False, print_results=True):
r"""Estimation of Pr(M=0|A,L), which is the missing data mechanism for the outcome. Predicted probabilities are
used to create inverse probability of censoring weights to account for informative missing data on the outcome.
Missing weights take the following form
.. math::
\frac{1}{\Pr(C=0|A=a, L)}
Weights are calculated for both A=1 and A=0
Note
----
The treatment variable should be included in the model
Parameters
----------
model : str
Independent variables to predict the exposure. Example) 'var1 + var2 + var3'. The treatment must be
included for the missing data model
bound : float, list, optional
Value between 0,1 to truncate predicted probabilities. Helps to avoid near positivity violations.
Specifying this argument can improve finite sample performance for random positivity violations. However,
truncating weights leads to additional confounding. Default is False, meaning no truncation of
predicted probabilities occurs. Providing a single float assumes symmetric trunctation, where values below
or above the threshold are set to the threshold value. Alternatively a list of floats can be provided for
asymmetric trunctation, with the first value being the lower bound and the second being the upper bound
print_results : bool, optional
Whether to print the fitted model results. Default is True (prints results)
"""
# Error if no missing outcome data
if not self._miss_flag:
raise ValueError("No missing outcome data is present in the data set")
# Warning if exposure is not included in the missingness of outcome model
if self.exposure not in model:
warnings.warn("For the specified missing outcome model, the exposure variable should be included in the "
"model", UserWarning)
# Warning if exposure is not included in the missingness of outcome model
if self.exposure not in model:
warnings.warn("For the specified missing outcome model, the exposure variable should be included in the "
"model", UserWarning)
self._miss_model = self._missing_indicator + ' ~ ' + model
fitmodel = propensity_score(self.df, self._miss_model, print_results=print_results)
dfx = self.df.copy()
dfx[self.exposure] = 1
self.df['_ipmw_a1_'] = np.where(self.df[self._missing_indicator] == 1,
fitmodel.predict(dfx), np.nan)
dfx = self.df.copy()
dfx[self.exposure] = 0
self.df['_ipmw_a0_'] = np.where(self.df[self._missing_indicator] == 1,
fitmodel.predict(dfx), np.nan)
# If bounds are requested
if bound:
self.df['_ipmw_a1_'] = _bounding_(self.df['_ipmw_a1_'], bounds=bound)
self.df['_ipmw_a0_'] = _bounding_(self.df['_ipmw_a0_'], bounds=bound)
self._fit_missing_ = True
def outcome_model(self, model, continuous_distribution='gaussian', print_results=True):
r"""Specify the outcome model. Model used to predict the outcome via a regression model. For binary outcome
data, a logistic regression model is used. For continuous outcomes, either linear or Poisson regression are
available.
.. math::
\widehat{\Pr}(Y|A,L) = logit^{-1}(\widehat{\beta_0} + \widehat{\beta_1} A + \widehat{\beta} L)
Parameters
----------
model : str
Independent variables to predict the outcome. For example, 'var1 + var2 + var3 + var4'
continuous_distribution : str, optional
Distribution to use for continuous outcomes. Options are 'gaussian' for normal distributions and 'poisson'
for Poisson distributions
print_results : bool, optional
Whether to print the fitted model results. Default is True (prints results)
"""
if self.exposure not in model:
warnings.warn("It looks like the exposure variable is missing from the outcome model", UserWarning)
self._out_model = self.outcome + ' ~ ' + model
if self._continuous_outcome:
self._continuous_type = continuous_distribution
if (continuous_distribution == 'gaussian') or (continuous_distribution == 'normal'):
f = sm.families.family.Gaussian()
elif continuous_distribution == 'poisson':
f = sm.families.family.Poisson()
else:
raise ValueError("Only 'gaussian' and 'poisson' distributions are supported")
else:
f = sm.families.family.Binomial()
if self._weight_ is None:
log = smf.glm(self._out_model, self.df, family=f).fit()
else:
log = smf.glm(self._out_model, self.df, freq_weights=self.df[self._weight_], family=f).fit()
if print_results:
print('\n----------------------------------------------------------------')
print('MODEL: ' + self._out_model)
print('-----------------------------------------------------------------')
print(log.summary())
# Generating predictions for observed variables
self._predicted_y_ = log.predict(self.df)
# Predicting under treatment strategies
dfx = self.df.copy()
dfx[self.exposure] = 1
self.df['_pY1_'] = log.predict(dfx)
dfx = self.df.copy()
dfx[self.exposure] = 0
self.df['_pY0_'] = log.predict(dfx)
self._fit_outcome_ = True
def fit(self):
"""Calculate the augmented inverse probability weights and effect measures from the predicted exposure
probabilities and predicted outcome values.
Note
----
Exposure and outcome models must be specified prior to `fit()`
Returns
-------
For binary outcomes, gains `risk_difference`, `risk_difference_ci`, and `risk_ratio` attributes. For continuous
outcomes, gains `average_treatment_effect` and `average_treatment_effect_ci` attributes
"""
if (self._fit_exposure_ is False) or (self._fit_outcome_ is False):
raise ValueError('The exposure and outcome models must be specified before the doubly robust estimate can '
'be generated')
if self._miss_flag and not self._fit_missing_:
warnings.warn("All missing outcome data is assumed to be missing completely at random. To relax this "
"assumption to outcome data is missing at random please use the `missing_model()` "
"function", UserWarning)
# Doubly robust estimator under all treated
a_obs = self.df[self.exposure]
y_obs = self.df[self.outcome]
py_a1 = self.df['_pY1_']
py_a0 = self.df['_pY0_']
if self._fit_missing_:
ps_g1 = self.df['_g1_'] * self.df['_ipmw_a1_']
ps_g0 = self.df['_g0_'] * self.df['_ipmw_a0_']
else:
ps_g1 = self.df['_g1_']
ps_g0 = self.df['_g0_']
# Doubly robust estimator under all treated
dr_a1 = np.where(a_obs == 1,
(y_obs / ps_g1) - ((py_a1 * ps_g0) / ps_g1),
py_a1)
# Doubly robust estimator under all untreated
dr_a0 = np.where(a_obs == 1,
py_a0,
(y_obs / ps_g0 - ((py_a0 * ps_g1) / ps_g0)))
# Generating estimates for the risk difference and risk ratio
zalpha = norm.ppf(1 - self.alpha / 2, loc=0, scale=1)
if self._weight_ is None:
if self._continuous_outcome:
self.average_treatment_effect = np.nanmean(dr_a1) - np.nanmean(dr_a0)
var_ic = np.nanvar((dr_a1 - dr_a0) - self.average_treatment_effect, ddof=1) / self.df.shape[0]
self.average_treatment_effect_se = np.sqrt(var_ic)
self.average_treatment_effect_ci = [self.average_treatment_effect - zalpha * np.sqrt(var_ic),
self.average_treatment_effect + zalpha * np.sqrt(var_ic)]
else:
self.risk_difference = np.nanmean(dr_a1) - np.nanmean(dr_a0)
self.risk_ratio = np.nanmean(dr_a1) / np.nanmean(dr_a0)
var_ic = np.nanvar((dr_a1 - dr_a0) - self.risk_difference, ddof=1) / self.df.shape[0]
self.risk_difference_se = np.sqrt(var_ic)
self.risk_difference_ci = [self.risk_difference - zalpha * np.sqrt(var_ic),
self.risk_difference + zalpha * np.sqrt(var_ic)]
else:
dr_m1 = DescrStatsW(dr_a1, weights=self.df[self._weight_]).mean
dr_m0 = DescrStatsW(dr_a0, weights=self.df[self._weight_]).mean
if self._continuous_outcome:
self.average_treatment_effect = dr_m1 - dr_m0
else:
self.risk_difference = dr_m1 - dr_m0
self.risk_ratio = dr_m1 / dr_m0
def summary(self, decimal=3):
"""Prints a summary of the results for the doubly robust estimator. Confidence intervals are only available for
the risk difference currently. For risk ratio confidence intervals, the user will need to manually conduct a
boostrapping procedure.
Parameters
----------
decimal : int, optional
Number of decimal places to display in the result
"""
if (self._fit_exposure_ is False) or (self._fit_exposure_ is False):
raise ValueError('The exposure and outcome models must be specified before the double robust estimate can '
'be generated')
print('======================================================================')
print(' Augmented Inverse Probability of Treatment Weights ')
print('======================================================================')
fmt = 'Treatment: {:<15} No. Observations: {:<20}'
print(fmt.format(self.exposure, self.df.shape[0]))
fmt = 'Outcome: {:<15} No. Missing Outcome: {:<20}'
print(fmt.format(self.outcome, np.sum(self.df[self.outcome].isnull())))
fmt = 'g-Model: {:<15} Q-model: {:<20}'
e = 'Logistic'
if self._continuous_outcome:
y = self._continuous_type
else:
y = 'Logistic'
print(fmt.format(e, y))
fmt = 'Missing model: {:<15}'
if self._fit_missing_:
m = 'Logistic'
else:
m = 'None'
print(fmt.format(m))
print('======================================================================')
if self._continuous_outcome:
print('Average Treatment Effect: ', round(float(self.average_treatment_effect), decimal))
if self._weight_ is None:
print(str(round(100 * (1 - self.alpha), 1)) + '% two-sided CI: (' +
str(round(self.average_treatment_effect_ci[0], decimal)), ',',
str(round(self.average_treatment_effect_ci[1], decimal)) + ')')
else:
print(str(round(100 * (1 - self.alpha), 1)) + '% two-sided CI: -')
else:
print('Risk Difference: ', round(float(self.risk_difference), decimal))
if self._weight_ is None:
print(str(round(100 * (1 - self.alpha), 1)) + '% two-sided CI: (' +
str(round(self.risk_difference_ci[0], decimal)), ',',
str(round(self.risk_difference_ci[1], decimal)) + ')')
else:
print(str(round(100 * (1 - self.alpha), 1)) + '% two-sided CI: -')
print('----------------------------------------------------------------------')
print('Risk Ratio: ', round(float(self.risk_ratio), decimal))
print(str(round(100 * (1 - self.alpha), 1)) + '% two-sided CI: -')
print('======================================================================')
def run_diagnostics(self, decimal=3):
"""Run all currently implemented diagnostics for the exposure and outcome models. Each
`run_diagnostics` provides results for all implemented diagnostics for ease of the user. For publication
quality presentations, I recommend calling each diagnostic function individually and utilizing the optional
parameters
Note
----
The plot presented cannot be edited. To edit the plots, call `plot_kde` or `plot_love` directly. Those
functions return an axes object
Parameters
----------
decimal : int, optional
Number of decimal places to display. Default is 3
Returns
-------
None
"""
if not self._fit_outcome_ or not self._fit_exposure_:
raise ValueError("The exposure_model and outcome_model function must be ran before any diagnostics")
# Weight diagnostics
print('\tExposure Model Diagnostics')
self.positivity(decimal=decimal)
print('\n======================================================================')
print(' Standardized Mean Differences')
print('======================================================================')
print(self.standardized_mean_differences().set_index(keys='labels'))
print('======================================================================\n')
# Outcome accuracy diagnostics
print('\tOutcome Model Diagnostics')
v = self._predicted_y_ - self.df[self.outcome]
outcome_accuracy(true=self.df[self.outcome], predicted=self._predicted_y_, decimal=decimal)
df = self.df.copy()
df['_ipw_'] = np.where(df[self.exposure] == 1, 1 / df['_g1_'], 1 / (1 - df['_g1_']))
plt.figure(figsize=[8, 6])
plt.subplot(221)
plot_love(df=df, treatment=self.exposure, weight='_ipw_', formula=self.__mweight)
plt.title("Love Plot")
plt.subplot(223)
plot_kde(df=df, treatment=self.exposure, probability='_g1_')
plt.title("Kernel Density of Propensity Scores")
plt.subplot(222)
plot_kde_accuracy(values=v.dropna(), color='green')
plt.title("Kernel Density of Accuracy")
plt.tight_layout()
plt.show()
def positivity(self, decimal=3):
"""Use this to assess whether positivity is a valid assumption for the exposure model / calculated IPTW. If
there are extreme outliers, this may indicate problems with the calculated weights. To reduce extreme weights,
the `bound` argument can be specified in `exposure_model()`
Parameters
----------
decimal : int, optional
Number of decimal places to display. Default is three
Returns
-------
None
Prints the positivity results to the console but does not return any objects
"""
df = self.df.copy()
df['_ipw_'] = np.where(df[self.exposure] == 1, 1 / df['_g1_'], 1 / (1 - df['_g1_']))
pos = positivity(df=df, weights='_ipw_')
print('======================================================================')
print(' Weight Positivity Diagnostics')
print('======================================================================')
print('If the mean of the weights is far from either the min or max, this may\n '
'indicate the model is incorrect or positivity is violated')
print('Average weight should be 2')
print('----------------------------------------------------------------------')
print('Mean weight: ', round(pos[0], decimal))
print('Standard Deviation: ', round(pos[1], decimal))
print('Minimum weight: ', round(pos[2], decimal))
print('Maximum weight: ', round(pos[3], decimal))
print('======================================================================\n')
def standardized_mean_differences(self):
"""Calculates the standardized mean differences for all variables based on the inverse probability weights.
Returns
-------
DataFrame
Returns pandas DataFrame of calculated standardized mean differences. Columns are labels (variables labels),
smd_u (unweighted standardized difference), and smd_w (weighted standardized difference)
"""
df = self.df.copy()
df['_ipw_'] = np.where(df[self.exposure] == 1, 1 / df['_g1_'], 1 / (1 - df['_g1_']))
s = standardized_mean_differences(df=df, treatment=self.exposure,
weight='_ipw_', formula=self.__mweight)
return s
def plot_kde(self, to_plot, bw_method='scott', fill=True, color='g', color_e='b', color_u='r'):
"""Generates density plots that can be used to check predictions qualitatively. Density plots can be generated
for assess either positivity violations of the exposure model or the accuracy in predicting the outcome for
the outcome model. The kernel density used is SciPy's Gaussian kernel. Either Scott's Rule or
Silverman's Rule can be implemented.
Parameters
------------
to_plot : str, optional
The plot to generate. Specifying 'exposure' returns only the density plot for treatment probabilities,
and 'outcome' returns only the density plot for the outcome accuracy
bw_method : str, optional
Method used to estimate the bandwidth. Following SciPy, either 'scott' or 'silverman' are valid options
fill : bool, optional
Whether to color the area under the density curves. Default is true
color : str, optional
Color of the line/area for predicted outcomes minus observed outcomes. Default is Green
color_e : str, optional
Color of the line/area for the treated group. Default is Blue
color_u : str, optional
Color of the line/area for the treated group. Default is Red
Returns
---------------
matplotlib axes
"""
if to_plot == 'exposure':
ax = plot_kde(df=self.df, treatment=self.exposure, probability='_g1_',
bw_method=bw_method, fill=fill, color_e=color_e, color_u=color_u)
ax.set_title("Kernel Density of Propensity Scores")
elif to_plot == 'outcome':
v = self._predicted_y_ - self.df[self.outcome]
ax = plot_kde_accuracy(values=v.dropna(), bw_method=bw_method, fill=fill, color=color)
ax.set_title("Kernel Density of Accuracy")
else:
raise ValueError("Please use one of the following options for `to_plot`; 'treatment', 'outcome'")
return ax
def plot_love(self, color_unweighted='r', color_weighted='b', shape_unweighted='o', shape_weighted='o'):
"""Generates a Love-plot to detail covariate balance based on the IPTW weights. Further details on the usage of
this plot are available in Austin PC & Stuart EA 2015 https://onlinelibrary.wiley.com/doi/full/10.1002/sim.6607
The Love plot generates a dashed line at standardized mean difference of 0.10. Ideally, weighted SMD are below
this level. Below 0.20 may also be sufficient. Variables above this level may be unbalanced despite the
weighting procedure. Different functional forms (or approaches like machine learning) may be worth considering
Parameters
----------
color_unweighted : str, optional
Color for the unweighted standardized mean differences. Default is red
color_weighted : str, optional
Color for the weighted standardized mean differences. Default is blue
shape_unweighted : str, optional
Shape of points for the unweighted standardized mean differences. Default is circles
shape_weighted:
Shape of points for the weighted standardized mean differences. Default is circles
Returns
-------
axes
Matplotlib axes of the Love plot
"""
df = self.df.copy()
df['_ipw_'] = np.where(df[self.exposure] == 1, 1 / df['_g1_'], 1 / (1 - df['_g1_']))
ax = plot_love(df=df, treatment=self.exposure, weight='_ipw_', formula=self.__mweight,
color_unweighted=color_unweighted, color_weighted=color_weighted,
shape_unweighted=shape_unweighted, shape_weighted=shape_weighted)
return ax
| [
"numpy.sqrt",
"zepid.causal.utils.plot_love",
"zepid.causal.utils.plot_kde",
"numpy.nanmean",
"statsmodels.api.families.family.Binomial",
"zepid.causal.utils.positivity",
"statsmodels.api.families.family.Gaussian",
"statsmodels.formula.api.glm",
"zepid.causal.utils.propensity_score",
"numpy.where"... | [((8650, 8748), 'zepid.causal.utils.propensity_score', 'propensity_score', (['self.df', 'self._exp_model'], {'weights': 'self._weight_', 'print_results': 'print_results'}), '(self.df, self._exp_model, weights=self._weight_,\n print_results=print_results)\n', (8666, 8748), False, 'from zepid.causal.utils import propensity_score, plot_kde, plot_love, standardized_mean_differences, positivity, _bounding_, plot_kde_accuracy, outcome_accuracy\n'), ((11543, 11615), 'zepid.causal.utils.propensity_score', 'propensity_score', (['self.df', 'self._miss_model'], {'print_results': 'print_results'}), '(self.df, self._miss_model, print_results=print_results)\n', (11559, 11615), False, 'from zepid.causal.utils import propensity_score, plot_kde, plot_love, standardized_mean_differences, positivity, _bounding_, plot_kde_accuracy, outcome_accuracy\n'), ((16508, 16574), 'numpy.where', 'np.where', (['(a_obs == 1)', '(y_obs / ps_g1 - py_a1 * ps_g0 / ps_g1)', 'py_a1'], {}), '(a_obs == 1, y_obs / ps_g1 - py_a1 * ps_g0 / ps_g1, py_a1)\n', (16516, 16574), True, 'import numpy as np\n'), ((16702, 16768), 'numpy.where', 'np.where', (['(a_obs == 1)', 'py_a0', '(y_obs / ps_g0 - py_a0 * ps_g1 / ps_g0)'], {}), '(a_obs == 1, py_a0, y_obs / ps_g0 - py_a0 * ps_g1 / ps_g0)\n', (16710, 16768), True, 'import numpy as np\n'), ((16913, 16957), 'scipy.stats.norm.ppf', 'norm.ppf', (['(1 - self.alpha / 2)'], {'loc': '(0)', 'scale': '(1)'}), '(1 - self.alpha / 2, loc=0, scale=1)\n', (16921, 16957), False, 'from scipy.stats import norm\n'), ((23114, 23209), 'zepid.causal.utils.outcome_accuracy', 'outcome_accuracy', ([], {'true': 'self.df[self.outcome]', 'predicted': 'self._predicted_y_', 'decimal': 'decimal'}), '(true=self.df[self.outcome], predicted=self._predicted_y_,\n decimal=decimal)\n', (23130, 23209), False, 'from zepid.causal.utils import propensity_score, plot_kde, plot_love, standardized_mean_differences, positivity, _bounding_, plot_kde_accuracy, outcome_accuracy\n'), ((23257, 23327), 'numpy.where', 'np.where', (['(df[self.exposure] == 1)', "(1 / df['_g1_'])", "(1 / (1 - df['_g1_']))"], {}), "(df[self.exposure] == 1, 1 / df['_g1_'], 1 / (1 - df['_g1_']))\n", (23265, 23327), True, 'import numpy as np\n'), ((23337, 23363), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[8, 6]'}), '(figsize=[8, 6])\n', (23347, 23363), True, 'import matplotlib.pyplot as plt\n'), ((23372, 23388), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (23383, 23388), True, 'import matplotlib.pyplot as plt\n'), ((23397, 23483), 'zepid.causal.utils.plot_love', 'plot_love', ([], {'df': 'df', 'treatment': 'self.exposure', 'weight': '"""_ipw_"""', 'formula': 'self.__mweight'}), "(df=df, treatment=self.exposure, weight='_ipw_', formula=self.\n __mweight)\n", (23406, 23483), False, 'from zepid.causal.utils import propensity_score, plot_kde, plot_love, standardized_mean_differences, positivity, _bounding_, plot_kde_accuracy, outcome_accuracy\n'), ((23487, 23509), 'matplotlib.pyplot.title', 'plt.title', (['"""Love Plot"""'], {}), "('Love Plot')\n", (23496, 23509), True, 'import matplotlib.pyplot as plt\n'), ((23519, 23535), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (23530, 23535), True, 'import matplotlib.pyplot as plt\n'), ((23544, 23604), 'zepid.causal.utils.plot_kde', 'plot_kde', ([], {'df': 'df', 'treatment': 'self.exposure', 'probability': '"""_g1_"""'}), "(df=df, treatment=self.exposure, probability='_g1_')\n", (23552, 23604), False, 'from zepid.causal.utils import propensity_score, plot_kde, plot_love, standardized_mean_differences, positivity, _bounding_, plot_kde_accuracy, outcome_accuracy\n'), ((23613, 23661), 'matplotlib.pyplot.title', 'plt.title', (['"""Kernel Density of Propensity Scores"""'], {}), "('Kernel Density of Propensity Scores')\n", (23622, 23661), True, 'import matplotlib.pyplot as plt\n'), ((23671, 23687), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (23682, 23687), True, 'import matplotlib.pyplot as plt\n'), ((23756, 23795), 'matplotlib.pyplot.title', 'plt.title', (['"""Kernel Density of Accuracy"""'], {}), "('Kernel Density of Accuracy')\n", (23765, 23795), True, 'import matplotlib.pyplot as plt\n'), ((23804, 23822), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (23820, 23822), True, 'import matplotlib.pyplot as plt\n'), ((23831, 23841), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23839, 23841), True, 'import matplotlib.pyplot as plt\n'), ((24517, 24587), 'numpy.where', 'np.where', (['(df[self.exposure] == 1)', "(1 / df['_g1_'])", "(1 / (1 - df['_g1_']))"], {}), "(df[self.exposure] == 1, 1 / df['_g1_'], 1 / (1 - df['_g1_']))\n", (24525, 24587), True, 'import numpy as np\n'), ((24603, 24637), 'zepid.causal.utils.positivity', 'positivity', ([], {'df': 'df', 'weights': '"""_ipw_"""'}), "(df=df, weights='_ipw_')\n", (24613, 24637), False, 'from zepid.causal.utils import propensity_score, plot_kde, plot_love, standardized_mean_differences, positivity, _bounding_, plot_kde_accuracy, outcome_accuracy\n'), ((26027, 26097), 'numpy.where', 'np.where', (['(df[self.exposure] == 1)', "(1 / df['_g1_'])", "(1 / (1 - df['_g1_']))"], {}), "(df[self.exposure] == 1, 1 / df['_g1_'], 1 / (1 - df['_g1_']))\n", (26035, 26097), True, 'import numpy as np\n'), ((26110, 26216), 'zepid.causal.utils.standardized_mean_differences', 'standardized_mean_differences', ([], {'df': 'df', 'treatment': 'self.exposure', 'weight': '"""_ipw_"""', 'formula': 'self.__mweight'}), "(df=df, treatment=self.exposure, weight=\n '_ipw_', formula=self.__mweight)\n", (26139, 26216), False, 'from zepid.causal.utils import propensity_score, plot_kde, plot_love, standardized_mean_differences, positivity, _bounding_, plot_kde_accuracy, outcome_accuracy\n'), ((29762, 29832), 'numpy.where', 'np.where', (['(df[self.exposure] == 1)', "(1 / df['_g1_'])", "(1 / (1 - df['_g1_']))"], {}), "(df[self.exposure] == 1, 1 / df['_g1_'], 1 / (1 - df['_g1_']))\n", (29770, 29832), True, 'import numpy as np\n'), ((29847, 30075), 'zepid.causal.utils.plot_love', 'plot_love', ([], {'df': 'df', 'treatment': 'self.exposure', 'weight': '"""_ipw_"""', 'formula': 'self.__mweight', 'color_unweighted': 'color_unweighted', 'color_weighted': 'color_weighted', 'shape_unweighted': 'shape_unweighted', 'shape_weighted': 'shape_weighted'}), "(df=df, treatment=self.exposure, weight='_ipw_', formula=self.\n __mweight, color_unweighted=color_unweighted, color_weighted=\n color_weighted, shape_unweighted=shape_unweighted, shape_weighted=\n shape_weighted)\n", (29856, 30075), False, 'from zepid.causal.utils import propensity_score, plot_kde, plot_love, standardized_mean_differences, positivity, _bounding_, plot_kde_accuracy, outcome_accuracy\n'), ((8929, 8970), 'zepid.causal.utils._bounding_', '_bounding_', (["self.df['_g1_']"], {'bounds': 'bound'}), "(self.df['_g1_'], bounds=bound)\n", (8939, 8970), False, 'from zepid.causal.utils import propensity_score, plot_kde, plot_love, standardized_mean_differences, positivity, _bounding_, plot_kde_accuracy, outcome_accuracy\n'), ((9001, 9042), 'zepid.causal.utils._bounding_', '_bounding_', (["self.df['_g0_']"], {'bounds': 'bound'}), "(self.df['_g0_'], bounds=bound)\n", (9011, 9042), False, 'from zepid.causal.utils import propensity_score, plot_kde, plot_love, standardized_mean_differences, positivity, _bounding_, plot_kde_accuracy, outcome_accuracy\n'), ((11014, 11148), 'warnings.warn', 'warnings.warn', (['"""For the specified missing outcome model, the exposure variable should be included in the model"""', 'UserWarning'], {}), "(\n 'For the specified missing outcome model, the exposure variable should be included in the model'\n , UserWarning)\n", (11027, 11148), False, 'import warnings\n'), ((11302, 11436), 'warnings.warn', 'warnings.warn', (['"""For the specified missing outcome model, the exposure variable should be included in the model"""', 'UserWarning'], {}), "(\n 'For the specified missing outcome model, the exposure variable should be included in the model'\n , UserWarning)\n", (11315, 11436), False, 'import warnings\n'), ((12125, 12171), 'zepid.causal.utils._bounding_', '_bounding_', (["self.df['_ipmw_a1_']"], {'bounds': 'bound'}), "(self.df['_ipmw_a1_'], bounds=bound)\n", (12135, 12171), False, 'from zepid.causal.utils import propensity_score, plot_kde, plot_love, standardized_mean_differences, positivity, _bounding_, plot_kde_accuracy, outcome_accuracy\n'), ((12207, 12253), 'zepid.causal.utils._bounding_', '_bounding_', (["self.df['_ipmw_a0_']"], {'bounds': 'bound'}), "(self.df['_ipmw_a0_'], bounds=bound)\n", (12217, 12253), False, 'from zepid.causal.utils import propensity_score, plot_kde, plot_love, standardized_mean_differences, positivity, _bounding_, plot_kde_accuracy, outcome_accuracy\n'), ((13313, 13421), 'warnings.warn', 'warnings.warn', (['"""It looks like the exposure variable is missing from the outcome model"""', 'UserWarning'], {}), "(\n 'It looks like the exposure variable is missing from the outcome model',\n UserWarning)\n", (13326, 13421), False, 'import warnings\n'), ((13960, 13989), 'statsmodels.api.families.family.Binomial', 'sm.families.family.Binomial', ([], {}), '()\n', (13987, 13989), True, 'import statsmodels.api as sm\n'), ((15743, 15958), 'warnings.warn', 'warnings.warn', (['"""All missing outcome data is assumed to be missing completely at random. To relax this assumption to outcome data is missing at random please use the `missing_model()` function"""', 'UserWarning'], {}), "(\n 'All missing outcome data is assumed to be missing completely at random. To relax this assumption to outcome data is missing at random please use the `missing_model()` function'\n , UserWarning)\n", (15756, 15958), False, 'import warnings\n'), ((27752, 27888), 'zepid.causal.utils.plot_kde', 'plot_kde', ([], {'df': 'self.df', 'treatment': 'self.exposure', 'probability': '"""_g1_"""', 'bw_method': 'bw_method', 'fill': 'fill', 'color_e': 'color_e', 'color_u': 'color_u'}), "(df=self.df, treatment=self.exposure, probability='_g1_', bw_method\n =bw_method, fill=fill, color_e=color_e, color_u=color_u)\n", (27760, 27888), False, 'from zepid.causal.utils import propensity_score, plot_kde, plot_love, standardized_mean_differences, positivity, _bounding_, plot_kde_accuracy, outcome_accuracy\n'), ((13684, 13713), 'statsmodels.api.families.family.Gaussian', 'sm.families.family.Gaussian', ([], {}), '()\n', (13711, 13713), True, 'import statsmodels.api as sm\n'), ((17282, 17297), 'numpy.sqrt', 'np.sqrt', (['var_ic'], {}), '(var_ic)\n', (17289, 17297), True, 'import numpy as np\n'), ((17830, 17845), 'numpy.sqrt', 'np.sqrt', (['var_ic'], {}), '(var_ic)\n', (17837, 17845), True, 'import numpy as np\n'), ((18064, 18114), 'statsmodels.stats.weightstats.DescrStatsW', 'DescrStatsW', (['dr_a1'], {'weights': 'self.df[self._weight_]'}), '(dr_a1, weights=self.df[self._weight_])\n', (18075, 18114), False, 'from statsmodels.stats.weightstats import DescrStatsW\n'), ((18140, 18190), 'statsmodels.stats.weightstats.DescrStatsW', 'DescrStatsW', (['dr_a0'], {'weights': 'self.df[self._weight_]'}), '(dr_a0, weights=self.df[self._weight_])\n', (18151, 18190), False, 'from statsmodels.stats.weightstats import DescrStatsW\n'), ((13789, 13817), 'statsmodels.api.families.family.Poisson', 'sm.families.family.Poisson', ([], {}), '()\n', (13815, 13817), True, 'import statsmodels.api as sm\n'), ((14043, 14086), 'statsmodels.formula.api.glm', 'smf.glm', (['self._out_model', 'self.df'], {'family': 'f'}), '(self._out_model, self.df, family=f)\n', (14050, 14086), True, 'import statsmodels.formula.api as smf\n'), ((14125, 14210), 'statsmodels.formula.api.glm', 'smf.glm', (['self._out_model', 'self.df'], {'freq_weights': 'self.df[self._weight_]', 'family': 'f'}), '(self._out_model, self.df, freq_weights=self.df[self._weight_], family=f\n )\n', (14132, 14210), True, 'import statsmodels.formula.api as smf\n'), ((17082, 17099), 'numpy.nanmean', 'np.nanmean', (['dr_a1'], {}), '(dr_a1)\n', (17092, 17099), True, 'import numpy as np\n'), ((17102, 17119), 'numpy.nanmean', 'np.nanmean', (['dr_a0'], {}), '(dr_a0)\n', (17112, 17119), True, 'import numpy as np\n'), ((17145, 17209), 'numpy.nanvar', 'np.nanvar', (['(dr_a1 - dr_a0 - self.average_treatment_effect)'], {'ddof': '(1)'}), '(dr_a1 - dr_a0 - self.average_treatment_effect, ddof=1)\n', (17154, 17209), True, 'import numpy as np\n'), ((17576, 17593), 'numpy.nanmean', 'np.nanmean', (['dr_a1'], {}), '(dr_a1)\n', (17586, 17593), True, 'import numpy as np\n'), ((17596, 17613), 'numpy.nanmean', 'np.nanmean', (['dr_a0'], {}), '(dr_a0)\n', (17606, 17613), True, 'import numpy as np\n'), ((17648, 17665), 'numpy.nanmean', 'np.nanmean', (['dr_a1'], {}), '(dr_a1)\n', (17658, 17665), True, 'import numpy as np\n'), ((17668, 17685), 'numpy.nanmean', 'np.nanmean', (['dr_a0'], {}), '(dr_a0)\n', (17678, 17685), True, 'import numpy as np\n'), ((17711, 17766), 'numpy.nanvar', 'np.nanvar', (['(dr_a1 - dr_a0 - self.risk_difference)'], {'ddof': '(1)'}), '(dr_a1 - dr_a0 - self.risk_difference, ddof=1)\n', (17720, 17766), True, 'import numpy as np\n'), ((17391, 17406), 'numpy.sqrt', 'np.sqrt', (['var_ic'], {}), '(var_ic)\n', (17398, 17406), True, 'import numpy as np\n'), ((17501, 17516), 'numpy.sqrt', 'np.sqrt', (['var_ic'], {}), '(var_ic)\n', (17508, 17516), True, 'import numpy as np\n'), ((17921, 17936), 'numpy.sqrt', 'np.sqrt', (['var_ic'], {}), '(var_ic)\n', (17928, 17936), True, 'import numpy as np\n'), ((18013, 18028), 'numpy.sqrt', 'np.sqrt', (['var_ic'], {}), '(var_ic)\n', (18020, 18028), True, 'import numpy as np\n')] |
import argparse
import time
import cv2
import numpy as np
from estimator import TfPoseEstimator
from loguru import logger
from alfred.utils.log import init_logger
init_logger()
fps_time = 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tf-pose-estimation Video')
parser.add_argument('--video', type=str, default='./medias/dance.mp4')
parser.add_argument('--resolution', type=str, default='432x368', help='network input resolution. default=432x368')
parser.add_argument('--model', type=str, default='mobilenet_v2_1.4', help='mobilenet_v2_1.4 cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
parser.add_argument('--show-process', type=bool, default=True,
help='for debug purpose, if enabled, speed for inference is dropped.')
parser.add_argument('--showBG', type=bool, default=True, help='False to show skeleton only.')
args = parser.parse_args()
w, h = 432, 368
e = TfPoseEstimator('graph/{}/graph_freeze.pb'.format(args.model), target_size=(w, h))
cap = cv2.VideoCapture(args.video)
if cap.isOpened() is False:
print("Error opening video stream or file")
while cap.isOpened():
ret_val, image = cap.read()
tic = time.time()
humans = e.inference(image, resize_to_default=True, upsample_size=4.0)
if not args.showBG:
image = np.zeros(image.shape)
res = TfPoseEstimator.draw_humans(image, humans, imgcopy=True)
cv2.putText(res, "FPS: %f" % (1.0 / (time.time() - tic)), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.imshow('rr', res)
toc = time.time()
logger.info('inference %.4f seconds.' % (toc-tic))
if cv2.waitKey(1) == 27:
break
cv2.destroyAllWindows()
logger.debug('finished+')
| [
"argparse.ArgumentParser",
"loguru.logger.debug",
"loguru.logger.info",
"estimator.TfPoseEstimator.draw_humans",
"cv2.imshow",
"numpy.zeros",
"alfred.utils.log.init_logger",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.waitKey",
"time.time"
] | [((166, 179), 'alfred.utils.log.init_logger', 'init_logger', ([], {}), '()\n', (177, 179), False, 'from alfred.utils.log import init_logger\n'), ((1813, 1838), 'loguru.logger.debug', 'logger.debug', (['"""finished+"""'], {}), "('finished+')\n", (1825, 1838), False, 'from loguru import logger\n'), ((236, 299), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""tf-pose-estimation Video"""'}), "(description='tf-pose-estimation Video')\n", (259, 299), False, 'import argparse\n'), ((1072, 1100), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.video'], {}), '(args.video)\n', (1088, 1100), False, 'import cv2\n'), ((1789, 1812), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1810, 1812), False, 'import cv2\n'), ((1262, 1273), 'time.time', 'time.time', ([], {}), '()\n', (1271, 1273), False, 'import time\n'), ((1437, 1493), 'estimator.TfPoseEstimator.draw_humans', 'TfPoseEstimator.draw_humans', (['image', 'humans'], {'imgcopy': '(True)'}), '(image, humans, imgcopy=True)\n', (1464, 1493), False, 'from estimator import TfPoseEstimator\n'), ((1626, 1647), 'cv2.imshow', 'cv2.imshow', (['"""rr"""', 'res'], {}), "('rr', res)\n", (1636, 1647), False, 'import cv2\n'), ((1662, 1673), 'time.time', 'time.time', ([], {}), '()\n', (1671, 1673), False, 'import time\n'), ((1682, 1734), 'loguru.logger.info', 'logger.info', (["('inference %.4f seconds.' % (toc - tic))"], {}), "('inference %.4f seconds.' % (toc - tic))\n", (1693, 1734), False, 'from loguru import logger\n'), ((1401, 1422), 'numpy.zeros', 'np.zeros', (['image.shape'], {}), '(image.shape)\n', (1409, 1422), True, 'import numpy as np\n'), ((1745, 1759), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1756, 1759), False, 'import cv2\n'), ((1539, 1550), 'time.time', 'time.time', ([], {}), '()\n', (1548, 1550), False, 'import time\n')] |
import numpy as np
import nanonet.tb as tb
from test.test_hamiltonian_module import expected_bulk_silicon_band_structure
def test_simple_atomic_chain():
""" """
site_energy = -1.0
coupling = -1.0
l_const = 1.0
a = tb.Orbitals('A')
a.add_orbital(title='s', energy=-1, )
xyz_file = """1
H cell
A 0.0000000000 0.0000000000 0.0000000000
"""
tb.set_tb_params(PARAMS_A_A={'ss_sigma': -1.0})
h = tb.HamiltonianSp(xyz=xyz_file, nn_distance=1.1)
h.initialize()
PRIMITIVE_CELL = [[0, 0, l_const]]
h.set_periodic_bc(PRIMITIVE_CELL)
num_points = 10
kk = np.linspace(0, 3.14 / l_const, num_points, endpoint=True)
band_structure = []
for jj in range(num_points):
vals, _ = h.diagonalize_periodic_bc([0.0, 0.0, kk[jj]])
band_structure.append(vals)
band_structure = np.array(band_structure)
desired_value = site_energy + 2 * coupling * np.cos(l_const * kk)
np.testing.assert_allclose(band_structure, desired_value[:, np.newaxis], atol=1e-9)
def test_atomic_chain_two_kinds_of_atoms():
""" """
site_energy1 = -1.0
site_energy2 = -2.0
coupling = -1.0
l_const = 2.0
a = tb.Orbitals('A')
a.add_orbital(title='s', energy=site_energy1, )
b = tb.Orbitals('B')
b.add_orbital(title='s', energy=site_energy2, )
xyz_file = """2
H cell
A 0.0000000000 0.0000000000 0.0000000000
B 0.0000000000 0.0000000000 1.0000000000
"""
tb.set_tb_params(PARAMS_A_B={'ss_sigma': coupling})
h = tb.HamiltonianSp(xyz=xyz_file, nn_distance=1.1)
h.initialize()
PRIMITIVE_CELL = [[0, 0, l_const]]
h.set_periodic_bc(PRIMITIVE_CELL)
num_points = 10
kk = np.linspace(0, 3.14 / 2, num_points, endpoint=True)
band_structure = []
for jj in range(num_points):
vals, _ = h.diagonalize_periodic_bc([0.0, 0.0, kk[jj]])
band_structure.append(vals)
band_structure = np.array(band_structure)
desired_value = np.zeros(band_structure.shape)
b = site_energy1 + site_energy2
c = site_energy1 * site_energy2 - (2.0 * coupling * np.cos(0.5 * kk * l_const)) ** 2
desired_value[:, 0] = 0.5 * (b - np.sqrt(b ** 2 - 4.0 * c))
desired_value[:, 1] = 0.5 * (b + np.sqrt(b ** 2 - 4.0 * c))
np.testing.assert_allclose(band_structure, desired_value, atol=1e-9)
def test_bulk_silicon():
""" """
a_si = 5.50
PRIMITIVE_CELL = [[0, 0.5 * a_si, 0.5 * a_si],
[0.5 * a_si, 0, 0.5 * a_si],
[0.5 * a_si, 0.5 * a_si, 0]]
tb.Orbitals.orbital_sets = {'Si': 'SiliconSP3D5S'}
xyz_file = """2
Si2 cell
Si1 0.0000000000 0.0000000000 0.0000000000
Si2 1.3750000000 1.3750000000 1.3750000000
"""
h = tb.HamiltonianSp(xyz=xyz_file, nn_distance=2.5)
h.initialize()
h.set_periodic_bc(PRIMITIVE_CELL)
sym_points = ['L', 'GAMMA', 'X']
num_points = [10, 25]
k = tb.get_k_coords(sym_points, num_points, 'Si')
band_sructure = []
vals = np.zeros((sum(num_points), h.num_eigs), dtype=complex)
for jj, item in enumerate(k):
vals[jj, :], _ = h.diagonalize_periodic_bc(item)
band_structure = np.real(np.array(vals))
np.testing.assert_allclose(band_structure, expected_bulk_silicon_band_structure()[:,:h.num_eigs], atol=1e-4)
if __name__ == '__main__':
# test_simple_atomic_chain()
# test_atomic_chain_two_kinds_of_atoms()
test_bulk_silicon()
| [
"nanonet.tb.HamiltonianSp",
"numpy.sqrt",
"nanonet.tb.get_k_coords",
"nanonet.tb.set_tb_params",
"nanonet.tb.Orbitals",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"test.test_hamiltonian_module.expected_bulk_silicon_band_structure",
"numpy.cos"
] | [((237, 253), 'nanonet.tb.Orbitals', 'tb.Orbitals', (['"""A"""'], {}), "('A')\n", (248, 253), True, 'import nanonet.tb as tb\n'), ((397, 444), 'nanonet.tb.set_tb_params', 'tb.set_tb_params', ([], {'PARAMS_A_A': "{'ss_sigma': -1.0}"}), "(PARAMS_A_A={'ss_sigma': -1.0})\n", (413, 444), True, 'import nanonet.tb as tb\n'), ((453, 500), 'nanonet.tb.HamiltonianSp', 'tb.HamiltonianSp', ([], {'xyz': 'xyz_file', 'nn_distance': '(1.1)'}), '(xyz=xyz_file, nn_distance=1.1)\n', (469, 500), True, 'import nanonet.tb as tb\n'), ((628, 685), 'numpy.linspace', 'np.linspace', (['(0)', '(3.14 / l_const)', 'num_points'], {'endpoint': '(True)'}), '(0, 3.14 / l_const, num_points, endpoint=True)\n', (639, 685), True, 'import numpy as np\n'), ((867, 891), 'numpy.array', 'np.array', (['band_structure'], {}), '(band_structure)\n', (875, 891), True, 'import numpy as np\n'), ((967, 1055), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['band_structure', 'desired_value[:, np.newaxis]'], {'atol': '(1e-09)'}), '(band_structure, desired_value[:, np.newaxis],\n atol=1e-09)\n', (993, 1055), True, 'import numpy as np\n'), ((1204, 1220), 'nanonet.tb.Orbitals', 'tb.Orbitals', (['"""A"""'], {}), "('A')\n", (1215, 1220), True, 'import nanonet.tb as tb\n'), ((1281, 1297), 'nanonet.tb.Orbitals', 'tb.Orbitals', (['"""B"""'], {}), "('B')\n", (1292, 1297), True, 'import nanonet.tb as tb\n'), ((1508, 1559), 'nanonet.tb.set_tb_params', 'tb.set_tb_params', ([], {'PARAMS_A_B': "{'ss_sigma': coupling}"}), "(PARAMS_A_B={'ss_sigma': coupling})\n", (1524, 1559), True, 'import nanonet.tb as tb\n'), ((1568, 1615), 'nanonet.tb.HamiltonianSp', 'tb.HamiltonianSp', ([], {'xyz': 'xyz_file', 'nn_distance': '(1.1)'}), '(xyz=xyz_file, nn_distance=1.1)\n', (1584, 1615), True, 'import nanonet.tb as tb\n'), ((1743, 1794), 'numpy.linspace', 'np.linspace', (['(0)', '(3.14 / 2)', 'num_points'], {'endpoint': '(True)'}), '(0, 3.14 / 2, num_points, endpoint=True)\n', (1754, 1794), True, 'import numpy as np\n'), ((1976, 2000), 'numpy.array', 'np.array', (['band_structure'], {}), '(band_structure)\n', (1984, 2000), True, 'import numpy as np\n'), ((2021, 2051), 'numpy.zeros', 'np.zeros', (['band_structure.shape'], {}), '(band_structure.shape)\n', (2029, 2051), True, 'import numpy as np\n'), ((2311, 2380), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['band_structure', 'desired_value'], {'atol': '(1e-09)'}), '(band_structure, desired_value, atol=1e-09)\n', (2337, 2380), True, 'import numpy as np\n'), ((2813, 2860), 'nanonet.tb.HamiltonianSp', 'tb.HamiltonianSp', ([], {'xyz': 'xyz_file', 'nn_distance': '(2.5)'}), '(xyz=xyz_file, nn_distance=2.5)\n', (2829, 2860), True, 'import nanonet.tb as tb\n'), ((2990, 3035), 'nanonet.tb.get_k_coords', 'tb.get_k_coords', (['sym_points', 'num_points', '"""Si"""'], {}), "(sym_points, num_points, 'Si')\n", (3005, 3035), True, 'import nanonet.tb as tb\n'), ((3248, 3262), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (3256, 3262), True, 'import numpy as np\n'), ((942, 962), 'numpy.cos', 'np.cos', (['(l_const * kk)'], {}), '(l_const * kk)\n', (948, 962), True, 'import numpy as np\n'), ((2215, 2240), 'numpy.sqrt', 'np.sqrt', (['(b ** 2 - 4.0 * c)'], {}), '(b ** 2 - 4.0 * c)\n', (2222, 2240), True, 'import numpy as np\n'), ((2279, 2304), 'numpy.sqrt', 'np.sqrt', (['(b ** 2 - 4.0 * c)'], {}), '(b ** 2 - 4.0 * c)\n', (2286, 2304), True, 'import numpy as np\n'), ((3311, 3349), 'test.test_hamiltonian_module.expected_bulk_silicon_band_structure', 'expected_bulk_silicon_band_structure', ([], {}), '()\n', (3347, 3349), False, 'from test.test_hamiltonian_module import expected_bulk_silicon_band_structure\n'), ((2145, 2171), 'numpy.cos', 'np.cos', (['(0.5 * kk * l_const)'], {}), '(0.5 * kk * l_const)\n', (2151, 2171), True, 'import numpy as np\n')] |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import logging as log
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value
from openvino.tools.mo.front.tf.common import tf_data_type_decode
from openvino.tools.mo.utils.error import Error
from openvino.tools.mo.utils.utils import refer_to_faq_msg
def tf_tensor_shape(pb):
return shape_array([dim.size if dim.size >= 0 else dynamic_dimension_value for dim in pb.dim])
def tf_int_list(pb):
return np.array(pb.i, dtype=np.int64)
def tf_dtype_extractor(pb_dtype, default=None):
return tf_data_type_decode[pb_dtype][0] if pb_dtype in tf_data_type_decode else default
def tf_data_format_spatial(pb):
if b"DHW" in pb.s:
return [pb.s.index(c) for c in b"DHW"]
return [pb.s.index(c) for c in b"HW"]
def tf_data_format_channel(pb):
return [pb.s.index(b'C')]
def tf_data_format_batch(pb):
return [pb.s.index(b'N')]
def get_tf_node_port(tensor):
delim = ':'
# tensor should have form 'name:port' or just 'name'
name_parts = tensor.split(delim)
if len(name_parts) == 1:
# just 'name', then port is 0 by default
return name_parts[0], 0
else:
# 'name:port', note name can contain ':' also but port is the last part
# TODO Is 'name' that contains other ':'s considered valid by TF?
return delim.join(name_parts[:-1]), int(name_parts[-1])
def tf_tensor_content(tf_dtype, shape, pb_tensor):
type_helper = tf_data_type_decode[tf_dtype] if tf_dtype in tf_data_type_decode else None
if type_helper is None:
raise Error("Data type is unsupported: {}. " +
refer_to_faq_msg(50), tf_dtype)
decode_err_msg = 'Failed to parse a tensor with Unicode characters. Note that Inference Engine does not support ' \
'string literals, so the string constant should be eliminated from the graph.'
if pb_tensor.tensor_content:
value = np.array(np.frombuffer(pb_tensor.tensor_content, type_helper[0]))
else:
# load typed value
if type_helper[0] != np.str:
value = np.array(type_helper[1](pb_tensor), dtype=type_helper[0])
else:
try:
value = np.array(type_helper[1](pb_tensor), dtype=type_helper[0])
except UnicodeDecodeError:
log.error(decode_err_msg, extra={'is_warning': True})
value = np.array(type_helper[1](pb_tensor))
# Ignore an empty value, if len(shape) > 1
# For example, value = [] and shape = [1, 1, 0]
# This is needed to reshape this value later and to return reshaped value = [[[]]]
# Otherwise there can be failures during partial inference, because we are storing an empty value with incorrect
# shape
if len(shape) == 0 or (len(shape) == 1 and shape.prod() == 0):
try:
value_length = len(value)
except TypeError:
# case, when value is a scalar
value_length = 0
if value_length == 1:
# return scalar if shape is [] otherwise broadcast according to shape
try:
return np.array(value[0], dtype=type_helper[0])
except UnicodeDecodeError:
log.error(decode_err_msg, extra={'is_warning': True})
return np.array(value[0])
else:
# no shape, return value as is
return value
if len(value) != shape.prod():
log.warning("Shape and content size of tensor don't match, shape: {} content size: {}".
format(shape, len(value)))
# broadcast semantics according to TensorFlow v1.5 documentation:
# The argument value can be a constant value, or a list of values of type dtype. If value is a list,
# then the length of the list must be less than or equal to the number of elements implied by the shape
# argument (if specified). In the case where the list length is less than the number of elements specified
# by shape, the last element in the list will be used to fill the remaining entries.
value_flatten = value.flatten()
add_value = value_flatten[-1]
add_length = shape.prod() - len(value_flatten)
value = np.concatenate([value_flatten, np.full([add_length], add_value)])
return value.reshape(shape)
def check_attr_type(a):
"""
Check type of attribute from TF prototxt message
param: a - attribute from TF prototxt message
return: type of attribute
"""
if a.s:
return 's'
if a.i:
return 'i'
if a.f:
return 'f'
if a.b:
return 'b'
if a.type:
return 'type'
if a.shape and a.shape.dim:
return 'shape'
if a.list:
return 'list'
def collect_tf_attrs(attrs):
"""
Function generates map for attributes and parsing functions
param: attrs - TF proto message with attributes
return: mapping attributes and parsing functions ready for use in update_node_stat function
"""
ret_attrs = {}
type_parsers = {
's': lambda x: x.s,
'i': lambda x: x.i,
'f': lambda x: x.f,
'b': lambda x: x.b,
'type': lambda x: tf_dtype_extractor(x.type),
'shape': lambda x: tf_tensor_shape(x.shape),
'list': lambda x: x.list
}
for a in attrs:
t = check_attr_type(attrs[a])
a_l = attrs[a]
while t == 'list':
a_l = type_parsers[t](attrs[a])
t = check_attr_type(a_l)
ret_attrs[a] = type_parsers[t](a_l)
return ret_attrs
| [
"openvino.tools.mo.front.common.partial_infer.utils.shape_array",
"numpy.full",
"numpy.array",
"openvino.tools.mo.utils.utils.refer_to_faq_msg",
"numpy.frombuffer",
"logging.error"
] | [((437, 530), 'openvino.tools.mo.front.common.partial_infer.utils.shape_array', 'shape_array', (['[(dim.size if dim.size >= 0 else dynamic_dimension_value) for dim in pb.dim]'], {}), '([(dim.size if dim.size >= 0 else dynamic_dimension_value) for\n dim in pb.dim])\n', (448, 530), False, 'from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value\n'), ((559, 589), 'numpy.array', 'np.array', (['pb.i'], {'dtype': 'np.int64'}), '(pb.i, dtype=np.int64)\n', (567, 589), True, 'import numpy as np\n'), ((2044, 2099), 'numpy.frombuffer', 'np.frombuffer', (['pb_tensor.tensor_content', 'type_helper[0]'], {}), '(pb_tensor.tensor_content, type_helper[0])\n', (2057, 2099), True, 'import numpy as np\n'), ((1733, 1753), 'openvino.tools.mo.utils.utils.refer_to_faq_msg', 'refer_to_faq_msg', (['(50)'], {}), '(50)\n', (1749, 1753), False, 'from openvino.tools.mo.utils.utils import refer_to_faq_msg\n'), ((3219, 3259), 'numpy.array', 'np.array', (['value[0]'], {'dtype': 'type_helper[0]'}), '(value[0], dtype=type_helper[0])\n', (3227, 3259), True, 'import numpy as np\n'), ((4355, 4387), 'numpy.full', 'np.full', (['[add_length]', 'add_value'], {}), '([add_length], add_value)\n', (4362, 4387), True, 'import numpy as np\n'), ((2421, 2474), 'logging.error', 'log.error', (['decode_err_msg'], {'extra': "{'is_warning': True}"}), "(decode_err_msg, extra={'is_warning': True})\n", (2430, 2474), True, 'import logging as log\n'), ((3315, 3368), 'logging.error', 'log.error', (['decode_err_msg'], {'extra': "{'is_warning': True}"}), "(decode_err_msg, extra={'is_warning': True})\n", (3324, 3368), True, 'import logging as log\n'), ((3392, 3410), 'numpy.array', 'np.array', (['value[0]'], {}), '(value[0])\n', (3400, 3410), True, 'import numpy as np\n')] |
import os
import pandas as pd
import numpy as np
import datetime as dt
import sys
from datetime import datetime
import rasterio
import geopandas as gpd
pkg_dir = os.path.join(os.path.dirname(__file__),'..')
sys.path.insert(0, pkg_dir)
from ela.textproc import *
from ela.spatial import *
from ela.classification import *
from ela.io import GeotiffExporter
from ela.utils import flip
from shapely.geometry import Point
def test_create_meshgrid():
xx, yy = create_meshgrid_cartesian(x_min=0.0, x_max=1.1, y_min=1.0, y_max=1.51, grid_res = 0.5)
assert xx.shape[0] == 3
assert xx.shape[1] == 2
assert yy.shape[0] == 3
assert yy.shape[1] == 2
class MockSlicePredictor:
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
def f(self, x, y):
return self.a * x + self.b * y + self.c
def predict_one_sample(self, sample):
x = sample[0]
y = sample[1]
return self.f(x, y)
def predict(self, X):
z = [self.predict_one_sample(x) for x in X]
return np.array(z)
def test_interpolate_slice():
m = create_meshgrid_cartesian(x_min=0.0, x_max=1.1, y_min=1.0, y_max=1.51, grid_res = 0.5)
xx, yy = m
a = 1.0
b = 0.1
c = 0.01
p = MockSlicePredictor(a, b, c)
def z_func(xi, yi):
return p.f(xx[xi, yi], yy[xi, yi])
predicted = interpolate_over_meshgrid(p, m)
assert predicted.shape[0] == 3
assert predicted.shape[1] == 2
assert predicted[0,0] == z_func(0, 0)
assert predicted[1,0] == z_func(1, 0)
assert predicted[2,0] == z_func(2, 0)
assert predicted[0,1] == z_func(0, 1)
assert predicted[1,1] == z_func(1, 1)
assert predicted[2,1] == z_func(2, 1)
# work around scikit behavior:
predicted = interpolate_over_meshgrid(None, m)
assert predicted.shape[0] == 3
assert predicted.shape[1] == 2
assert np.isnan(predicted[1,1])
def test_height_coordinate_functor():
z_index_for_ahd = z_index_for_ahd_functor(b=+100)
assert z_index_for_ahd(-100) == 0
assert z_index_for_ahd(-99) == 1
assert z_index_for_ahd(0) == 100
assert z_index_for_ahd(+50) == 150
def test_burn_volume():
dims = (3,4,5)
dim_x,dim_y,dim_z = dims
x = np.arange(0.0, dim_x*dim_y*dim_z, 1.0)
test_vol = np.reshape(x, dims)
z_index_for_ahd = z_index_for_ahd_functor(b=+1) # z = 0 is datum height -1, z = 4 is datum height 3
xx, yy = create_meshgrid_cartesian(x_min=0.0, x_max=0.51, y_min=0.0, y_max=0.76, grid_res = 0.25)
dem = xx + yy
assert dem[0,0] == 0.0
assert dem[1,1] == 0.5
assert dem[2,2] == 1.0
burnt = test_vol.copy()
burn_volume(burnt, dem, z_index_for_ahd, below=False, inclusive=False)
assert not np.isnan(burnt[0,0,0])
assert not np.isnan(burnt[0,0,1])
assert np.isnan(burnt[0,0,2])
assert not np.isnan(burnt[2,2,0])
assert not np.isnan(burnt[2,2,1])
assert not np.isnan(burnt[2,2,2])
assert np.isnan(burnt[2,2,3])
burnt = test_vol.copy()
burn_volume(burnt, dem, z_index_for_ahd, below=False, inclusive=True)
assert not np.isnan(burnt[0,0,0])
assert np.isnan(burnt[0,0,1])
assert np.isnan(burnt[0,0,2])
assert not np.isnan(burnt[2,2,0])
assert not np.isnan(burnt[2,2,1])
assert np.isnan(burnt[2,2,2])
assert np.isnan(burnt[2,2,3])
def test_slice_volume():
dims = (3,4,5)
dim_x,dim_y,dim_z = dims
x = np.arange(0.0, dim_x*dim_y*dim_z, 1.0)
test_vol = np.reshape(x, dims)
dem = np.empty((3, 4))
z_index_for_ahd = z_index_for_ahd_functor(b=+1) # z = 0 is datum height -1, z = 4 is datum height 3
dem[0,0] = -2.0
dem[0,1] = +5.0
dem[0,2] = -1.0
dem[0,3] = -1.0
dem[1,0] = -1.0
dem[1,1] = -1.0
dem[1,2] = -1.0
dem[1,3] = -1.0
dem[2,0] = -1.0
dem[2,1] = -1.0
dem[2,2] = np.nan
dem[2,3] = -1.0
# TODO: I do not really like using volume_value_at. Make sure this is unit tested itself.
def f(x, y):
return volume_value_at(test_vol, dem, z_index_for_ahd, x, y)
assert np.isnan(f(0,0))
assert np.isnan(f(0,1))
assert f(0,2) == test_vol[0,2,0]
assert f(0,3) == test_vol[0,3,0]
assert f(1,0) == test_vol[1,0,0]
assert f(1,1) == test_vol[1,1,0]
assert f(1,2) == test_vol[1,2,0]
assert f(1,3) == test_vol[1,3,0]
assert f(2,0) == test_vol[2,0,0]
assert f(2,1) == test_vol[2,1,0]
assert np.isnan(f(2,2))
assert f(2,3) == test_vol[2,3,0]
s = slice_volume(test_vol, dem, z_index_for_ahd)
assert np.isnan(s[0,0])
assert np.isnan(s[0,1])
assert f(0,2) == s[0,2]
assert f(0,3) == s[0,3]
assert f(1,0) == s[1,0]
assert f(1,1) == s[1,1]
assert f(1,2) == s[1,2]
assert f(1,3) == s[1,3]
assert f(2,0) == s[2,0]
assert f(2,1) == s[2,1]
assert np.isnan(s[2,2])
assert f(2,3) == s[2,3]
sops = SliceOperation(dem, z_index_for_ahd)
test_slices = sops.from_ahd_to_depth_below_ground_level(test_vol, from_depth=-1, to_depth=+1)
s = test_slices
assert s.shape[0] == dim_x
assert s.shape[1] == dim_y
assert s.shape[2] == 3
index_ground_lvl = 1 # the top level is for depth=0 (dem), but it is at index 1 in the resulting volume s. one metre below ground level is what is at index 0 for the third dimension.
assert np.isnan( s[0,0,index_ground_lvl])
assert np.isnan( s[0,1,index_ground_lvl])
assert f(0,2) == s[0,2,index_ground_lvl]
assert f(0,3) == s[0,3,index_ground_lvl]
assert f(1,0) == s[1,0,index_ground_lvl]
assert f(1,1) == s[1,1,index_ground_lvl]
assert f(1,2) == s[1,2,index_ground_lvl]
assert f(1,3) == s[1,3,index_ground_lvl]
assert f(2,0) == s[2,0,index_ground_lvl]
assert f(2,1) == s[2,1,index_ground_lvl]
assert np.isnan( s[2,2,index_ground_lvl])
assert f(2,3) == s[2,3,index_ground_lvl]
averaged_slices = sops.reduce_slices_at_depths(test_vol, from_depth=-1, to_depth=0, reduce_func=SliceOperation.arithmetic_average)
s = averaged_slices
assert np.isnan( s[0,0])
assert np.isnan( s[0,1])
# test_vol was constructed such that Z values increase by one at a given X/Y location, so the slicing/averaging result is like offsetting by 1/2:
assert f(0,2) + 0.5 == s[0,2]
assert f(0,3) + 0.5 == s[0,3]
assert f(1,0) + 0.5 == s[1,0]
assert f(1,1) + 0.5 == s[1,1]
assert f(1,2) + 0.5 == s[1,2]
assert f(1,3) + 0.5 == s[1,3]
assert f(2,0) + 0.5 == s[2,0]
assert f(2,1) + 0.5 == s[2,1]
assert np.isnan( s[2,2])
assert f(2,3) + 0.5 == s[2,3]
def get_test_bore_df():
x_min = 383200
y_max = 6422275
return pd.DataFrame({
EASTING_COL:np.array([x_min-.5,x_min+.5,x_min+1.1,x_min+1.1]),
NORTHING_COL:np.array([y_max-0.1,y_max-0.1,y_max-0.9,y_max-1.1]),
'fake_obs': np.array([.1, .2, .3, .4]),
DEPTH_FROM_COL: np.array([1.11, 2.22, 3.33, 4.44]),
DEPTH_TO_COL: np.array( [2.22, 3.33, 4.44, 5.55])
})
def create_test_slice(ni = 3, nj = 2, start=0.0, incr_1 = 1.0):
return np.array([
[(start + incr_1 * (i + ni * j)) for i in range(ni) ] for j in range(nj)
])
def get_slices_stack(n = 2, ni = 3, nj = 2, start=0.0, incr_1 = 1.0, incr_2 = 0.1):
x = create_test_slice(ni, nj, start, incr_1)
return [x + incr_2 * k for k in range(n)]
def create_test_raster(x_min = 383200, y_max = 6422275, grid_res = 1 , ni = 2, nj = 2, start=1.0, incr_1 = 1.0, output_file='c:/tmp/test_raster_drill.tif'):
crs = rasterio.crs.CRS({'proj': 'utm', 'zone': 50, 'south': True, 'ellps': 'GRS80', 'units': 'm', 'no_defs': True})
# Upper left hand corner is at (x_min, y_max), and in raster terms this is the origin.
from rasterio.transform import from_origin
transform = from_origin(x_min, y_max, grid_res, grid_res)
ge = GeotiffExporter(crs, transform)
# x = np.array([[1.0, 2.0],[3.0, 4.0]])
x = create_test_slice(ni, nj, start, incr_1)
ge.export_geotiff(x, output_file, None)
def test_raster_drill():
# create_test_raster(x_min = 383200, y_max = 6422275, grid_res = 1 ,output_file='c:/tmp/test_raster_drill.tif'):
x_min = 383200
y_max = 6422275
df = get_test_bore_df()
dem = rasterio.open(os.path.join(pkg_dir, 'tests', 'data', 'test_raster_drill.tif'))
cd = HeightDatumConverter(dem)
heights = cd.raster_drill_df(df)
assert np.isnan(heights[0])
assert heights[1] == 1.0
assert heights[2] == 2.0
assert heights[3] == 4.0
def test_add_ahd():
df = get_test_bore_df()
dem = rasterio.open(os.path.join(pkg_dir, 'tests', 'data', 'test_raster_drill.tif'))
cd = HeightDatumConverter(dem)
df_ahd = cd.add_height(df)
from_ahd = df_ahd[DEPTH_FROM_AHD_COL]
to_ahd = df_ahd[DEPTH_TO_AHD_COL]
assert np.isnan(from_ahd[0])
assert from_ahd[1] == 1.0 - 2.22
assert from_ahd[2] == 2.0 - 3.33
assert from_ahd[3] == 4.0 - 4.44
assert np.isnan(to_ahd[0])
assert to_ahd[1] == 1.0 - 3.33
assert to_ahd[2] == 2.0 - 4.44
assert to_ahd[3] == 4.0 - 5.55
def test_average_slices():
slices = get_slices_stack()
avg = average_slices(slices)
incr_2 = 0.1
assert avg[0,0] == incr_2 / 2
assert avg[0,1] == (incr_2 / 2) + 1.0
assert avg[1,0] == (incr_2 / 2) + 3.0
# create_test_raster(x_min = 383200, y_max = 6422275, grid_res = 25 , ni = 10, nj = 10, start=0.0, incr_1 = 1.0, output_file=os.path.join(pkg_dir, 'tests', 'data', 'test_raster_25m.tif'))
def test_surface_array():
dem = rasterio.open(os.path.join(pkg_dir, 'tests', 'data', 'test_raster_25m.tif'))
grid_res = 100
x_min = 383200 + 5 # first column
y_max = 6422275 - 5 # meaning falls within first row/band from top
y_min = y_max - (2 * grid_res) # 200 m offset over a 25m dem res, meaning falls within the 9th row/band from top
x_max = x_min + (2 * grid_res) # 200 m offset over a 25m dem res, meaning falls within the 9th column from left
surf_dem = surface_array(dem, x_min, y_min, x_max, y_max, grid_res)
assert surf_dem.shape[0] == 2
assert surf_dem.shape[1] == 2
assert surf_dem[0,0] == 80 + 0.0
assert surf_dem[0,1] == 80 + -4 * 10.0
assert surf_dem[1,0] == 80 + 4.0
assert surf_dem[1,1] == 80 + -4 * 10.0 + 4.0
def test_flip():
m = np.zeros([2,3,4])
m[1,2,3] = 3.14
assert flip(m, 0)[0,2,3] == 3.14
assert flip(m, 1)[1,0,3] == 3.14
assert flip(m, 2)[1,2,0] == 3.14
def test_get_coords_from_gpd_shape():
easting_values = np.array([.1, .2, .3, .3 ])
northing_values = np.array([.12, .22, .32, .32 ])
coords = get_unique_coordinates(easting_values, northing_values)
assert coords.shape[0] == 3
assert coords.shape[1] == 2
ptsdf = pd.DataFrame({ 'Coordinates' : list(zip(coords[:,0], coords[:,1])) })
ptsdf['Coordinates'] = ptsdf['Coordinates'].apply(Point)
gdf = gpd.GeoDataFrame(ptsdf, geometry='Coordinates')
gdf.crs = "+proj=utm +zone=56 +ellps=GRS80 +south +units=m +no_defs"
geoloc = get_coords_from_gpd_shape(gdf, colname='Coordinates', out_colnames=['xx','yy'])
x = geoloc.xx
y = geoloc.yy
assert len(x) == 3
assert x[0] == .1
assert x[1] == .2
assert x[2] == .3
assert y[0] == .12
assert y[1] == .22
assert y[2] == .32
def test_rounding_depths():
n = 6
df = pd.DataFrame({
EASTING_COL:np.full(n, 1.1),
NORTHING_COL:np.full(n, 2.2),
'fake_obs': np.array([.1, .2, .3, .4, .5, .6]),
DEPTH_FROM_COL: np.array([1.11, 1.16, 2.22, 3.33, 3.38, 4.44]),
DEPTH_TO_COL: np.array( [1.16, 2.22, 3.33, 3.38, 4.44, 5.55])
})
dr = DepthsRounding(DEPTH_FROM_COL, DEPTH_TO_COL)
assert dr.assess_num_collapsed(df) == 2
df_rd = dr.round_to_metre_depths(df, np.round, True)
f = df_rd[DEPTH_FROM_COL].values
t = df_rd[DEPTH_TO_COL].values
assert f[0] == 1.0
assert f[1] == 2.0
assert f[2] == 3.0
assert f[3] == 4.0
assert t[0] == 2.0
assert t[1] == 3.0
assert t[2] == 4.0
assert t[3] == 6.0
# test_add_ahd()
# test_flip()
# test_surface_array()
# test_average_slices()
# test_slice_volume()
# test_interpolate_slice()
# test_burn_volume()
# test_height_coordinate_functor()
# # test_make_training_set()
# test_raster_drill()
# test_get_coords_from_gpd_shape()
# test_rounding_depths()
| [
"rasterio.crs.CRS",
"sys.path.insert",
"numpy.reshape",
"ela.utils.flip",
"rasterio.transform.from_origin",
"ela.io.GeotiffExporter",
"os.path.join",
"os.path.dirname",
"numpy.zeros",
"numpy.array",
"numpy.isnan",
"numpy.empty",
"numpy.full",
"geopandas.GeoDataFrame",
"numpy.arange"
] | [((209, 236), 'sys.path.insert', 'sys.path.insert', (['(0)', 'pkg_dir'], {}), '(0, pkg_dir)\n', (224, 236), False, 'import sys\n'), ((176, 201), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (191, 201), False, 'import os\n'), ((1893, 1918), 'numpy.isnan', 'np.isnan', (['predicted[1, 1]'], {}), '(predicted[1, 1])\n', (1901, 1918), True, 'import numpy as np\n'), ((2243, 2285), 'numpy.arange', 'np.arange', (['(0.0)', '(dim_x * dim_y * dim_z)', '(1.0)'], {}), '(0.0, dim_x * dim_y * dim_z, 1.0)\n', (2252, 2285), True, 'import numpy as np\n'), ((2297, 2316), 'numpy.reshape', 'np.reshape', (['x', 'dims'], {}), '(x, dims)\n', (2307, 2316), True, 'import numpy as np\n'), ((2812, 2836), 'numpy.isnan', 'np.isnan', (['burnt[0, 0, 2]'], {}), '(burnt[0, 0, 2])\n', (2820, 2836), True, 'import numpy as np\n'), ((2961, 2985), 'numpy.isnan', 'np.isnan', (['burnt[2, 2, 3]'], {}), '(burnt[2, 2, 3])\n', (2969, 2985), True, 'import numpy as np\n'), ((3136, 3160), 'numpy.isnan', 'np.isnan', (['burnt[0, 0, 1]'], {}), '(burnt[0, 0, 1])\n', (3144, 3160), True, 'import numpy as np\n'), ((3170, 3194), 'numpy.isnan', 'np.isnan', (['burnt[0, 0, 2]'], {}), '(burnt[0, 0, 2])\n', (3178, 3194), True, 'import numpy as np\n'), ((3281, 3305), 'numpy.isnan', 'np.isnan', (['burnt[2, 2, 2]'], {}), '(burnt[2, 2, 2])\n', (3289, 3305), True, 'import numpy as np\n'), ((3315, 3339), 'numpy.isnan', 'np.isnan', (['burnt[2, 2, 3]'], {}), '(burnt[2, 2, 3])\n', (3323, 3339), True, 'import numpy as np\n'), ((3420, 3462), 'numpy.arange', 'np.arange', (['(0.0)', '(dim_x * dim_y * dim_z)', '(1.0)'], {}), '(0.0, dim_x * dim_y * dim_z, 1.0)\n', (3429, 3462), True, 'import numpy as np\n'), ((3474, 3493), 'numpy.reshape', 'np.reshape', (['x', 'dims'], {}), '(x, dims)\n', (3484, 3493), True, 'import numpy as np\n'), ((3504, 3520), 'numpy.empty', 'np.empty', (['(3, 4)'], {}), '((3, 4))\n', (3512, 3520), True, 'import numpy as np\n'), ((4531, 4548), 'numpy.isnan', 'np.isnan', (['s[0, 0]'], {}), '(s[0, 0])\n', (4539, 4548), True, 'import numpy as np\n'), ((4559, 4576), 'numpy.isnan', 'np.isnan', (['s[0, 1]'], {}), '(s[0, 1])\n', (4567, 4576), True, 'import numpy as np\n'), ((4811, 4828), 'numpy.isnan', 'np.isnan', (['s[2, 2]'], {}), '(s[2, 2])\n', (4819, 4828), True, 'import numpy as np\n'), ((5310, 5345), 'numpy.isnan', 'np.isnan', (['s[0, 0, index_ground_lvl]'], {}), '(s[0, 0, index_ground_lvl])\n', (5318, 5345), True, 'import numpy as np\n'), ((5356, 5391), 'numpy.isnan', 'np.isnan', (['s[0, 1, index_ground_lvl]'], {}), '(s[0, 1, index_ground_lvl])\n', (5364, 5391), True, 'import numpy as np\n'), ((5762, 5797), 'numpy.isnan', 'np.isnan', (['s[2, 2, index_ground_lvl]'], {}), '(s[2, 2, index_ground_lvl])\n', (5770, 5797), True, 'import numpy as np\n'), ((6012, 6029), 'numpy.isnan', 'np.isnan', (['s[0, 0]'], {}), '(s[0, 0])\n', (6020, 6029), True, 'import numpy as np\n'), ((6041, 6058), 'numpy.isnan', 'np.isnan', (['s[0, 1]'], {}), '(s[0, 1])\n', (6049, 6058), True, 'import numpy as np\n'), ((6492, 6509), 'numpy.isnan', 'np.isnan', (['s[2, 2]'], {}), '(s[2, 2])\n', (6500, 6509), True, 'import numpy as np\n'), ((7481, 7594), 'rasterio.crs.CRS', 'rasterio.crs.CRS', (["{'proj': 'utm', 'zone': 50, 'south': True, 'ellps': 'GRS80', 'units': 'm',\n 'no_defs': True}"], {}), "({'proj': 'utm', 'zone': 50, 'south': True, 'ellps':\n 'GRS80', 'units': 'm', 'no_defs': True})\n", (7497, 7594), False, 'import rasterio\n'), ((7745, 7790), 'rasterio.transform.from_origin', 'from_origin', (['x_min', 'y_max', 'grid_res', 'grid_res'], {}), '(x_min, y_max, grid_res, grid_res)\n', (7756, 7790), False, 'from rasterio.transform import from_origin\n'), ((7800, 7831), 'ela.io.GeotiffExporter', 'GeotiffExporter', (['crs', 'transform'], {}), '(crs, transform)\n', (7815, 7831), False, 'from ela.io import GeotiffExporter\n'), ((8351, 8371), 'numpy.isnan', 'np.isnan', (['heights[0]'], {}), '(heights[0])\n', (8359, 8371), True, 'import numpy as np\n'), ((8754, 8775), 'numpy.isnan', 'np.isnan', (['from_ahd[0]'], {}), '(from_ahd[0])\n', (8762, 8775), True, 'import numpy as np\n'), ((8898, 8917), 'numpy.isnan', 'np.isnan', (['to_ahd[0]'], {}), '(to_ahd[0])\n', (8906, 8917), True, 'import numpy as np\n'), ((10250, 10269), 'numpy.zeros', 'np.zeros', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (10258, 10269), True, 'import numpy as np\n'), ((10460, 10490), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3, 0.3]'], {}), '([0.1, 0.2, 0.3, 0.3])\n', (10468, 10490), True, 'import numpy as np\n'), ((10510, 10544), 'numpy.array', 'np.array', (['[0.12, 0.22, 0.32, 0.32]'], {}), '([0.12, 0.22, 0.32, 0.32])\n', (10518, 10544), True, 'import numpy as np\n'), ((10828, 10875), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['ptsdf'], {'geometry': '"""Coordinates"""'}), "(ptsdf, geometry='Coordinates')\n", (10844, 10875), True, 'import geopandas as gpd\n'), ((1061, 1072), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (1069, 1072), True, 'import numpy as np\n'), ((2740, 2764), 'numpy.isnan', 'np.isnan', (['burnt[0, 0, 0]'], {}), '(burnt[0, 0, 0])\n', (2748, 2764), True, 'import numpy as np\n'), ((2778, 2802), 'numpy.isnan', 'np.isnan', (['burnt[0, 0, 1]'], {}), '(burnt[0, 0, 1])\n', (2786, 2802), True, 'import numpy as np\n'), ((2851, 2875), 'numpy.isnan', 'np.isnan', (['burnt[2, 2, 0]'], {}), '(burnt[2, 2, 0])\n', (2859, 2875), True, 'import numpy as np\n'), ((2889, 2913), 'numpy.isnan', 'np.isnan', (['burnt[2, 2, 1]'], {}), '(burnt[2, 2, 1])\n', (2897, 2913), True, 'import numpy as np\n'), ((2927, 2951), 'numpy.isnan', 'np.isnan', (['burnt[2, 2, 2]'], {}), '(burnt[2, 2, 2])\n', (2935, 2951), True, 'import numpy as np\n'), ((3102, 3126), 'numpy.isnan', 'np.isnan', (['burnt[0, 0, 0]'], {}), '(burnt[0, 0, 0])\n', (3110, 3126), True, 'import numpy as np\n'), ((3209, 3233), 'numpy.isnan', 'np.isnan', (['burnt[2, 2, 0]'], {}), '(burnt[2, 2, 0])\n', (3217, 3233), True, 'import numpy as np\n'), ((3247, 3271), 'numpy.isnan', 'np.isnan', (['burnt[2, 2, 1]'], {}), '(burnt[2, 2, 1])\n', (3255, 3271), True, 'import numpy as np\n'), ((8203, 8266), 'os.path.join', 'os.path.join', (['pkg_dir', '"""tests"""', '"""data"""', '"""test_raster_drill.tif"""'], {}), "(pkg_dir, 'tests', 'data', 'test_raster_drill.tif')\n", (8215, 8266), False, 'import os\n'), ((8532, 8595), 'os.path.join', 'os.path.join', (['pkg_dir', '"""tests"""', '"""data"""', '"""test_raster_drill.tif"""'], {}), "(pkg_dir, 'tests', 'data', 'test_raster_drill.tif')\n", (8544, 8595), False, 'import os\n'), ((9491, 9552), 'os.path.join', 'os.path.join', (['pkg_dir', '"""tests"""', '"""data"""', '"""test_raster_25m.tif"""'], {}), "(pkg_dir, 'tests', 'data', 'test_raster_25m.tif')\n", (9503, 9552), False, 'import os\n'), ((6654, 6716), 'numpy.array', 'np.array', (['[x_min - 0.5, x_min + 0.5, x_min + 1.1, x_min + 1.1]'], {}), '([x_min - 0.5, x_min + 0.5, x_min + 1.1, x_min + 1.1])\n', (6662, 6716), True, 'import numpy as np\n'), ((6726, 6788), 'numpy.array', 'np.array', (['[y_max - 0.1, y_max - 0.1, y_max - 0.9, y_max - 1.1]'], {}), '([y_max - 0.1, y_max - 0.1, y_max - 0.9, y_max - 1.1])\n', (6734, 6788), True, 'import numpy as np\n'), ((6799, 6829), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3, 0.4]'], {}), '([0.1, 0.2, 0.3, 0.4])\n', (6807, 6829), True, 'import numpy as np\n'), ((6851, 6885), 'numpy.array', 'np.array', (['[1.11, 2.22, 3.33, 4.44]'], {}), '([1.11, 2.22, 3.33, 4.44])\n', (6859, 6885), True, 'import numpy as np\n'), ((6909, 6943), 'numpy.array', 'np.array', (['[2.22, 3.33, 4.44, 5.55]'], {}), '([2.22, 3.33, 4.44, 5.55])\n', (6917, 6943), True, 'import numpy as np\n'), ((10299, 10309), 'ela.utils.flip', 'flip', (['m', '(0)'], {}), '(m, 0)\n', (10303, 10309), False, 'from ela.utils import flip\n'), ((10336, 10346), 'ela.utils.flip', 'flip', (['m', '(1)'], {}), '(m, 1)\n', (10340, 10346), False, 'from ela.utils import flip\n'), ((10373, 10383), 'ela.utils.flip', 'flip', (['m', '(2)'], {}), '(m, 2)\n', (10377, 10383), False, 'from ela.utils import flip\n'), ((11319, 11334), 'numpy.full', 'np.full', (['n', '(1.1)'], {}), '(n, 1.1)\n', (11326, 11334), True, 'import numpy as np\n'), ((11357, 11372), 'numpy.full', 'np.full', (['n', '(2.2)'], {}), '(n, 2.2)\n', (11364, 11372), True, 'import numpy as np\n'), ((11394, 11434), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3, 0.4, 0.5, 0.6]'], {}), '([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])\n', (11402, 11434), True, 'import numpy as np\n'), ((11454, 11500), 'numpy.array', 'np.array', (['[1.11, 1.16, 2.22, 3.33, 3.38, 4.44]'], {}), '([1.11, 1.16, 2.22, 3.33, 3.38, 4.44])\n', (11462, 11500), True, 'import numpy as np\n'), ((11524, 11570), 'numpy.array', 'np.array', (['[1.16, 2.22, 3.33, 3.38, 4.44, 5.55]'], {}), '([1.16, 2.22, 3.33, 3.38, 4.44, 5.55])\n', (11532, 11570), True, 'import numpy as np\n')] |
from hybrid_astar_planner.HybridAStar.hybrid_astar_wrapper \
import apply_hybrid_astar
import numpy as np
from pylot.planning.planner import Planner
class HybridAStarPlanner(Planner):
"""Wrapper around the Hybrid A* planner.
Note:
Details can be found at `Hybrid A* Planner`_.
Args:
world: (:py:class:`~pylot.planning.world.World`): A reference to the
planning world.
flags (absl.flags): Object to be used to access absl flags.
.. _Hybrid A* Planner:
https://github.com/erdos-project/hybrid_astar_planner
"""
def __init__(self, world, flags, logger):
super().__init__(world, flags, logger)
self._hyperparameters = {
"step_size": flags.step_size_hybrid_astar,
"max_iterations": flags.max_iterations_hybrid_astar,
"completion_threshold": flags.completion_threshold,
"angle_completion_threshold": flags.angle_completion_threshold,
"rad_step": flags.rad_step,
"rad_upper_range": flags.rad_upper_range,
"rad_lower_range": flags.rad_lower_range,
"obstacle_clearance": flags.obstacle_clearance_hybrid_astar,
"lane_width": flags.lane_width_hybrid_astar,
"radius": flags.radius,
"car_length": flags.car_length,
"car_width": flags.car_width,
}
def run(self, timestamp, ttd=None):
"""Runs the planner.
Note:
The planner assumes that the world is up-to-date.
Returns:
:py:class:`~pylot.planning.waypoints.Waypoints`: Waypoints of the
planned trajectory.
"""
obstacle_list = self._world.get_obstacle_list()
if len(obstacle_list) == 0:
# Do not use Hybrid A* if there are no obstacles.
output_wps = self._world.follow_waypoints(self._flags.target_speed)
else:
# Hybrid a* does not take into account the driveable region.
# It constructs search space as a top down, minimum bounding
# rectangle with padding in each dimension.
self._logger.debug("@{}: Hyperparameters: {}".format(
timestamp, self._hyperparameters))
initial_conditions = self._compute_initial_conditions(
obstacle_list)
self._logger.debug("@{}: Initial conditions: {}".format(
timestamp, initial_conditions))
path_x, path_y, _, success = apply_hybrid_astar(
initial_conditions, self._hyperparameters)
if success:
self._logger.debug(
"@{}: Hybrid A* succeeded".format(timestamp))
speeds = [self._flags.target_speed] * len(path_x)
self._logger.debug("@{}: Hybrid A* Path X: {}".format(
timestamp, path_x.tolist()))
self._logger.debug("@{}: Hybrid A* Path Y: {}".format(
timestamp, path_y.tolist()))
self._logger.debug("@{}: Hybrid A* Speeds: {}".format(
timestamp, speeds))
output_wps = self.build_output_waypoints(
path_x, path_y, speeds)
else:
self._logger.error("@{}: Hybrid A* failed. "
"Sending emergency stop.".format(timestamp))
output_wps = self._world.follow_waypoints(0)
return output_wps
def _compute_initial_conditions(self, obstacles):
ego_transform = self._world.ego_transform
start = np.array([
ego_transform.location.x,
ego_transform.location.y,
np.deg2rad(ego_transform.rotation.yaw),
])
self._world.waypoints.remove_completed(ego_transform.location)
end_index = min(self._flags.num_waypoints_ahead,
len(self._world.waypoints.waypoints) - 1)
if end_index < 0:
# If no more waypoints left. Then our location is our end wp.
self._logger.debug("@{}: No more waypoints left")
end_wp = ego_transform
else:
end_wp = self._world.waypoints.waypoints[end_index]
end = np.array([
end_wp.location.x, end_wp.location.y,
np.deg2rad(ego_transform.rotation.yaw)
])
initial_conditions = {
"start": start,
"end": end,
"obs": obstacles,
}
return initial_conditions
| [
"hybrid_astar_planner.HybridAStar.hybrid_astar_wrapper.apply_hybrid_astar",
"numpy.deg2rad"
] | [((2491, 2552), 'hybrid_astar_planner.HybridAStar.hybrid_astar_wrapper.apply_hybrid_astar', 'apply_hybrid_astar', (['initial_conditions', 'self._hyperparameters'], {}), '(initial_conditions, self._hyperparameters)\n', (2509, 2552), False, 'from hybrid_astar_planner.HybridAStar.hybrid_astar_wrapper import apply_hybrid_astar\n'), ((3681, 3719), 'numpy.deg2rad', 'np.deg2rad', (['ego_transform.rotation.yaw'], {}), '(ego_transform.rotation.yaw)\n', (3691, 3719), True, 'import numpy as np\n'), ((4288, 4326), 'numpy.deg2rad', 'np.deg2rad', (['ego_transform.rotation.yaw'], {}), '(ego_transform.rotation.yaw)\n', (4298, 4326), True, 'import numpy as np\n')] |
import sys, os
import time
import numpy as np
import torch
import torch.nn as nn
from torch.utils import data
from parsers import parse_a3m, read_templates
from RoseTTAFoldModel import RoseTTAFoldModule_e2e
import util
from collections import namedtuple
from ffindex import *
from kinematics import xyz_to_c6d, c6d_to_bins2, xyz_to_t2d
from trFold import TRFold
script_dir = '/'.join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])
NBIN = [37, 37, 37, 19]
MODEL_PARAM ={
"n_module" : 8,
"n_module_str" : 4,
"n_module_ref" : 4,
"n_layer" : 1,
"d_msa" : 384 ,
"d_pair" : 288,
"d_templ" : 64,
"n_head_msa" : 12,
"n_head_pair" : 8,
"n_head_templ" : 4,
"d_hidden" : 64,
"r_ff" : 4,
"n_resblock" : 1,
"p_drop" : 0.1,
"use_templ" : True,
"performer_N_opts": {"nb_features": 64},
"performer_L_opts": {"nb_features": 64}
}
SE3_param = {
"num_layers" : 2,
"num_channels" : 16,
"num_degrees" : 2,
"l0_in_features": 32,
"l0_out_features": 8,
"l1_in_features": 3,
"l1_out_features": 3,
"num_edge_features": 32,
"div": 2,
"n_heads": 4
}
REF_param = {
"num_layers" : 3,
"num_channels" : 32,
"num_degrees" : 3,
"l0_in_features": 32,
"l0_out_features": 8,
"l1_in_features": 3,
"l1_out_features": 3,
"num_edge_features": 32,
"div": 4,
"n_heads": 4
}
MODEL_PARAM['SE3_param'] = SE3_param
MODEL_PARAM['REF_param'] = REF_param
# params for the folding protocol
fold_params = {
"SG7" : np.array([[[-2,3,6,7,6,3,-2]]])/21,
"SG9" : np.array([[[-21,14,39,54,59,54,39,14,-21]]])/231,
"DCUT" : 19.5,
"ALPHA" : 1.57,
# TODO: add Cb to the motif
"NCAC" : np.array([[-0.676, -1.294, 0. ],
[ 0. , 0. , 0. ],
[ 1.5 , -0.174, 0. ]], dtype=np.float32),
"CLASH" : 2.0,
"PCUT" : 0.5,
"DSTEP" : 0.5,
"ASTEP" : np.deg2rad(10.0),
"XYZRAD" : 7.5,
"WANG" : 0.1,
"WCST" : 0.1
}
fold_params["SG"] = fold_params["SG9"]
class Predictor():
def __init__(self, model_dir=None, use_cpu=False):
if model_dir == None:
self.model_dir = "%s/weights"%(script_dir)
else:
self.model_dir = model_dir
#
# define model name
self.model_name = "RoseTTAFold"
if torch.cuda.is_available() and (not use_cpu):
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
self.active_fn = nn.Softmax(dim=1)
# define model & load model
self.model = RoseTTAFoldModule_e2e(**MODEL_PARAM).to(self.device)
could_load = self.load_model(self.model_name)
if not could_load:
print ("ERROR: failed to load model")
sys.exit()
def load_model(self, model_name, suffix='e2e'):
chk_fn = "%s/%s_%s.pt"%(self.model_dir, model_name, suffix)
if not os.path.exists(chk_fn):
return False
checkpoint = torch.load(chk_fn, map_location=self.device)
self.model.load_state_dict(checkpoint['model_state_dict'], strict=True)
return True
def predict(self, a3m_fn, out_prefix, Ls, templ_npz=None, window=1000, shift=100):
msa = parse_a3m(a3m_fn)
N, L = msa.shape
#
if templ_npz != None:
templ = np.load(templ_npz)
xyz_t = torch.from_numpy(templ["xyz_t"])
t1d = torch.from_numpy(templ["t1d"])
t0d = torch.from_numpy(templ["t0d"])
else:
xyz_t = torch.full((1, L, 3, 3), np.nan).float()
t1d = torch.zeros((1, L, 3)).float()
t0d = torch.zeros((1,3)).float()
self.model.eval()
with torch.no_grad():
#
msa = torch.tensor(msa).long().view(1, -1, L)
idx_pdb_orig = torch.arange(L).long().view(1, L)
idx_pdb = torch.arange(L).long().view(1, L)
L_prev = 0
for L_i in Ls[:-1]:
idx_pdb[:,L_prev+L_i:] += 500 # it was 200 originally.
L_prev += L_i
seq = msa[:,0]
#
# template features
xyz_t = xyz_t.float().unsqueeze(0)
t1d = t1d.float().unsqueeze(0)
t0d = t0d.float().unsqueeze(0)
t2d = xyz_to_t2d(xyz_t, t0d)
#
# do cropped prediction
if L > window*2:
prob_s = [np.zeros((L,L,NBIN[i]), dtype=np.float32) for i in range(4)]
count_1d = np.zeros((L,), dtype=np.float32)
count_2d = np.zeros((L,L), dtype=np.float32)
node_s = np.zeros((L,MODEL_PARAM['d_msa']), dtype=np.float32)
#
grids = np.arange(0, L-window+shift, shift)
ngrids = grids.shape[0]
print("ngrid: ", ngrids)
print("grids: ", grids)
print("windows: ", window)
for i in range(ngrids):
for j in range(i, ngrids):
start_1 = grids[i]
end_1 = min(grids[i]+window, L)
start_2 = grids[j]
end_2 = min(grids[j]+window, L)
sel = np.zeros((L)).astype(np.bool)
sel[start_1:end_1] = True
sel[start_2:end_2] = True
input_msa = msa[:,:,sel]
mask = torch.sum(input_msa==20, dim=-1) < 0.5*sel.sum() # remove too gappy sequences
input_msa = input_msa[mask].unsqueeze(0)
input_msa = input_msa[:,:1000].to(self.device)
input_idx = idx_pdb[:,sel].to(self.device)
input_idx_orig = idx_pdb_orig[:,sel]
input_seq = input_msa[:,0].to(self.device)
#
# Select template
input_t1d = t1d[:,:,sel].to(self.device) # (B, T, L, 3)
input_t2d = t2d[:,:,sel][:,:,:,sel].to(self.device)
#
print ("running crop: %d-%d/%d-%d"%(start_1, end_1, start_2, end_2), input_msa.shape)
with torch.cuda.amp.autocast():
logit_s, node, init_crds, pred_lddt = self.model(input_msa, input_seq, input_idx, t1d=input_t1d, t2d=input_t2d, return_raw=True)
#
# Not sure How can we merge init_crds.....
pred_lddt = torch.clamp(pred_lddt, 0.0, 1.0)
sub_idx = input_idx_orig[0]
sub_idx_2d = np.ix_(sub_idx, sub_idx)
count_2d[sub_idx_2d] += 1.0
count_1d[sub_idx] += 1.0
node_s[sub_idx] += node[0].cpu().numpy()
for i_logit, logit in enumerate(logit_s):
prob = self.active_fn(logit.float()) # calculate distogram
prob = prob.squeeze(0).permute(1,2,0).cpu().numpy()
prob_s[i_logit][sub_idx_2d] += prob
del logit_s, node
#
for i in range(4):
prob_s[i] = prob_s[i] / count_2d[:,:,None]
prob_in = np.concatenate(prob_s, axis=-1)
node_s = node_s / count_1d[:, None]
#
node_s = torch.tensor(node_s).to(self.device).unsqueeze(0)
seq = msa[:,0].to(self.device)
idx_pdb = idx_pdb.to(self.device)
prob_in = torch.tensor(prob_in).to(self.device).unsqueeze(0)
with torch.cuda.amp.autocast():
xyz, lddt = self.model(node_s, seq, idx_pdb, prob_s=prob_in, refine_only=True)
print (lddt.mean())
else:
msa = msa[:,:1000].to(self.device)
seq = msa[:,0]
idx_pdb = idx_pdb.to(self.device)
t1d = t1d[:,:10].to(self.device)
t2d = t2d[:,:10].to(self.device)
with torch.cuda.amp.autocast():
logit_s, _, xyz, lddt = self.model(msa, seq, idx_pdb, t1d=t1d, t2d=t2d)
print (lddt.mean())
prob_s = list()
for logit in logit_s:
prob = self.active_fn(logit.float()) # distogram
prob = prob.reshape(-1, L, L).permute(1,2,0).cpu().numpy()
prob_s.append(prob)
np.savez_compressed("%s.npz"%out_prefix, dist=prob_s[0].astype(np.float16), \
omega=prob_s[1].astype(np.float16),\
theta=prob_s[2].astype(np.float16),\
phi=prob_s[3].astype(np.float16))
# run TRFold
prob_trF = list()
for prob in prob_s:
prob = torch.tensor(prob).permute(2,0,1).to(self.device)
prob += 1e-8
prob = prob / torch.sum(prob, dim=0)[None]
prob_trF.append(prob)
xyz = xyz[0, :, 1]
TRF = TRFold(prob_trF, fold_params)
xyz = TRF.fold(xyz, batch=15, lr=0.1, nsteps=200)
print (xyz.shape, lddt[0].shape, seq[0].shape)
self.write_pdb(seq[0], xyz, Ls, Bfacts=lddt[0], prefix=out_prefix)
def write_pdb(self, seq, atoms, Ls, Bfacts=None, prefix=None):
chainIDs = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
L = len(seq)
filename = "%s.pdb"%prefix
ctr = 1
with open(filename, 'wt') as f:
if Bfacts == None:
Bfacts = np.zeros(L)
else:
Bfacts = torch.clamp( Bfacts, 0, 1)
for i,s in enumerate(seq):
if (len(atoms.shape)==2):
resNo = i+1
chain = "A"
for i_chain in range(len(Ls)-1,0,-1):
tot_res = sum(Ls[:i_chain])
if i+1 > tot_res:
chain = chainIDs[i_chain]
resNo = i+1 - tot_res
break
f.write ("%-6s%5s %4s %3s %s%4d %8.3f%8.3f%8.3f%6.2f%6.2f\n"%(
"ATOM", ctr, " CA ", util.num2aa[s],
chain, resNo, atoms[i,0], atoms[i,1], atoms[i,2],
1.0, Bfacts[i] ) )
ctr += 1
elif atoms.shape[1]==3:
resNo = i+1
chain = "A"
for i_chain in range(len(Ls)-1,0,-1):
tot_res = sum(Ls[:i_chain])
if i+1 > tot_res:
chain = chainIDs[i_chain]
resNo = i+1 - tot_res
break
for j,atm_j in enumerate((" N "," CA "," C ")):
f.write ("%-6s%5s %4s %3s %s%4d %8.3f%8.3f%8.3f%6.2f%6.2f\n"%(
"ATOM", ctr, atm_j, util.num2aa[s],
chain, resNo, atoms[i,j,0], atoms[i,j,1], atoms[i,j,2],
1.0, Bfacts[i] ) )
ctr += 1
def get_args():
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-m", dest="model_dir", default="%s/weights"%(script_dir),
help="Path to pre-trained network weights [%s/weights]"%script_dir)
parser.add_argument("-i", dest="a3m_fn", required=True,
help="Input multiple sequence alignments (in a3m format)")
parser.add_argument("-o", dest="out_prefix", required=True,
help="Prefix for output file. The output files will be [out_prefix].npz and [out_prefix].pdb")
parser.add_argument("-Ls", dest="Ls", required=True, nargs="+", type=int,
help="The length of the each subunit (e.g. 220 400)")
parser.add_argument("--templ_npz", default=None,
help='''npz file containing complex template information (xyz_t, t1d, t0d). If not provided, zero matrices will be given as templates
- xyz_t: N, CA, C coordinates of complex templates (T, L, 3, 3) For the unaligned region, it should be NaN
- t1d: 1-D features from HHsearch results (score, SS, probab column from atab file) (T, L, 3). For the unaligned region, it should be zeros
- t0d: 0-D features from HHsearch (Probability/100.0, Ideintities/100.0, Similarity fro hhr file) (T, 3)''')
parser.add_argument("--cpu", dest='use_cpu', default=False, action='store_true')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
if not os.path.exists("%s.npz"%args.out_prefix):
pred = Predictor(model_dir=args.model_dir, use_cpu=args.use_cpu)
pred.predict(args.a3m_fn, args.out_prefix, args.Ls, templ_npz=args.templ_npz)
| [
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"torch.sum",
"sys.exit",
"numpy.arange",
"torch.arange",
"os.path.exists",
"argparse.ArgumentParser",
"RoseTTAFoldModel.RoseTTAFoldModule_e2e",
"numpy.ix_",
"torch.cuda.amp.autocast",
"numpy.concatenate",
"parsers.parse_a3m",
... | [((1975, 2067), 'numpy.array', 'np.array', (['[[-0.676, -1.294, 0.0], [0.0, 0.0, 0.0], [1.5, -0.174, 0.0]]'], {'dtype': 'np.float32'}), '([[-0.676, -1.294, 0.0], [0.0, 0.0, 0.0], [1.5, -0.174, 0.0]],\n dtype=np.float32)\n', (1983, 2067), True, 'import numpy as np\n'), ((2214, 2230), 'numpy.deg2rad', 'np.deg2rad', (['(10.0)'], {}), '(10.0)\n', (2224, 2230), True, 'import numpy as np\n'), ((11758, 11828), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.RawTextHelpFormatter'}), '(formatter_class=argparse.RawTextHelpFormatter)\n', (11781, 11828), False, 'import argparse\n'), ((1776, 1813), 'numpy.array', 'np.array', (['[[[-2, 3, 6, 7, 6, 3, -2]]]'], {}), '([[[-2, 3, 6, 7, 6, 3, -2]]])\n', (1784, 1813), True, 'import numpy as np\n'), ((1828, 1880), 'numpy.array', 'np.array', (['[[[-21, 14, 39, 54, 59, 54, 39, 14, -21]]]'], {}), '([[[-21, 14, 39, 54, 59, 54, 39, 14, -21]]])\n', (1836, 1880), True, 'import numpy as np\n'), ((2815, 2832), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (2825, 2832), True, 'import torch.nn as nn\n'), ((3304, 3348), 'torch.load', 'torch.load', (['chk_fn'], {'map_location': 'self.device'}), '(chk_fn, map_location=self.device)\n', (3314, 3348), False, 'import torch\n'), ((3555, 3572), 'parsers.parse_a3m', 'parse_a3m', (['a3m_fn'], {}), '(a3m_fn)\n', (3564, 3572), False, 'from parsers import parse_a3m, read_templates\n'), ((9537, 9566), 'trFold.TRFold', 'TRFold', (['prob_trF', 'fold_params'], {}), '(prob_trF, fold_params)\n', (9543, 9566), False, 'from trFold import TRFold\n'), ((13278, 13320), 'os.path.exists', 'os.path.exists', (["('%s.npz' % args.out_prefix)"], {}), "('%s.npz' % args.out_prefix)\n", (13292, 13320), False, 'import sys, os\n'), ((2638, 2663), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2661, 2663), False, 'import torch\n'), ((2709, 2729), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2721, 2729), False, 'import torch\n'), ((2770, 2789), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2782, 2789), False, 'import torch\n'), ((3087, 3097), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3095, 3097), False, 'import sys, os\n'), ((3234, 3256), 'os.path.exists', 'os.path.exists', (['chk_fn'], {}), '(chk_fn)\n', (3248, 3256), False, 'import sys, os\n'), ((3658, 3676), 'numpy.load', 'np.load', (['templ_npz'], {}), '(templ_npz)\n', (3665, 3676), True, 'import numpy as np\n'), ((3697, 3729), 'torch.from_numpy', 'torch.from_numpy', (["templ['xyz_t']"], {}), "(templ['xyz_t'])\n", (3713, 3729), False, 'import torch\n'), ((3748, 3778), 'torch.from_numpy', 'torch.from_numpy', (["templ['t1d']"], {}), "(templ['t1d'])\n", (3764, 3778), False, 'import torch\n'), ((3797, 3827), 'torch.from_numpy', 'torch.from_numpy', (["templ['t0d']"], {}), "(templ['t0d'])\n", (3813, 3827), False, 'import torch\n'), ((4044, 4059), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4057, 4059), False, 'import torch\n'), ((4631, 4653), 'kinematics.xyz_to_t2d', 'xyz_to_t2d', (['xyz_t', 't0d'], {}), '(xyz_t, t0d)\n', (4641, 4653), False, 'from kinematics import xyz_to_c6d, c6d_to_bins2, xyz_to_t2d\n'), ((2891, 2927), 'RoseTTAFoldModel.RoseTTAFoldModule_e2e', 'RoseTTAFoldModule_e2e', ([], {}), '(**MODEL_PARAM)\n', (2912, 2927), False, 'from RoseTTAFoldModel import RoseTTAFoldModule_e2e\n'), ((4848, 4880), 'numpy.zeros', 'np.zeros', (['(L,)'], {'dtype': 'np.float32'}), '((L,), dtype=np.float32)\n', (4856, 4880), True, 'import numpy as np\n'), ((4908, 4942), 'numpy.zeros', 'np.zeros', (['(L, L)'], {'dtype': 'np.float32'}), '((L, L), dtype=np.float32)\n', (4916, 4942), True, 'import numpy as np\n'), ((4967, 5020), 'numpy.zeros', 'np.zeros', (["(L, MODEL_PARAM['d_msa'])"], {'dtype': 'np.float32'}), "((L, MODEL_PARAM['d_msa']), dtype=np.float32)\n", (4975, 5020), True, 'import numpy as np\n'), ((5062, 5101), 'numpy.arange', 'np.arange', (['(0)', '(L - window + shift)', 'shift'], {}), '(0, L - window + shift, shift)\n', (5071, 5101), True, 'import numpy as np\n'), ((7727, 7758), 'numpy.concatenate', 'np.concatenate', (['prob_s'], {'axis': '(-1)'}), '(prob_s, axis=-1)\n', (7741, 7758), True, 'import numpy as np\n'), ((10059, 10070), 'numpy.zeros', 'np.zeros', (['L'], {}), '(L)\n', (10067, 10070), True, 'import numpy as np\n'), ((10114, 10139), 'torch.clamp', 'torch.clamp', (['Bfacts', '(0)', '(1)'], {}), '(Bfacts, 0, 1)\n', (10125, 10139), False, 'import torch\n'), ((402, 428), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (418, 428), False, 'import sys, os\n'), ((3862, 3894), 'torch.full', 'torch.full', (['(1, L, 3, 3)', 'np.nan'], {}), '((1, L, 3, 3), np.nan)\n', (3872, 3894), False, 'import torch\n'), ((3921, 3943), 'torch.zeros', 'torch.zeros', (['(1, L, 3)'], {}), '((1, L, 3))\n', (3932, 3943), False, 'import torch\n'), ((3970, 3989), 'torch.zeros', 'torch.zeros', (['(1, 3)'], {}), '((1, 3))\n', (3981, 3989), False, 'import torch\n'), ((4759, 4802), 'numpy.zeros', 'np.zeros', (['(L, L, NBIN[i])'], {'dtype': 'np.float32'}), '((L, L, NBIN[i]), dtype=np.float32)\n', (4767, 4802), True, 'import numpy as np\n'), ((8099, 8124), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {}), '()\n', (8122, 8124), False, 'import torch\n'), ((8530, 8555), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {}), '()\n', (8553, 8555), False, 'import torch\n'), ((9433, 9455), 'torch.sum', 'torch.sum', (['prob'], {'dim': '(0)'}), '(prob, dim=0)\n', (9442, 9455), False, 'import torch\n'), ((6933, 6965), 'torch.clamp', 'torch.clamp', (['pred_lddt', '(0.0)', '(1.0)'], {}), '(pred_lddt, 0.0, 1.0)\n', (6944, 6965), False, 'import torch\n'), ((7055, 7079), 'numpy.ix_', 'np.ix_', (['sub_idx', 'sub_idx'], {}), '(sub_idx, sub_idx)\n', (7061, 7079), True, 'import numpy as np\n'), ((4093, 4110), 'torch.tensor', 'torch.tensor', (['msa'], {}), '(msa)\n', (4105, 4110), False, 'import torch\n'), ((4160, 4175), 'torch.arange', 'torch.arange', (['L'], {}), '(L)\n', (4172, 4175), False, 'import torch\n'), ((4216, 4231), 'torch.arange', 'torch.arange', (['L'], {}), '(L)\n', (4228, 4231), False, 'import torch\n'), ((5822, 5856), 'torch.sum', 'torch.sum', (['(input_msa == 20)'], {'dim': '(-1)'}), '(input_msa == 20, dim=-1)\n', (5831, 5856), False, 'import torch\n'), ((6620, 6645), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {}), '()\n', (6643, 6645), False, 'import torch\n'), ((9332, 9350), 'torch.tensor', 'torch.tensor', (['prob'], {}), '(prob)\n', (9344, 9350), False, 'import torch\n'), ((5588, 5599), 'numpy.zeros', 'np.zeros', (['L'], {}), '(L)\n', (5596, 5599), True, 'import numpy as np\n'), ((7854, 7874), 'torch.tensor', 'torch.tensor', (['node_s'], {}), '(node_s)\n', (7866, 7874), False, 'import torch\n'), ((8027, 8048), 'torch.tensor', 'torch.tensor', (['prob_in'], {}), '(prob_in)\n', (8039, 8048), False, 'import torch\n')] |
import numpy as np
import pytest
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
)
import pandas._testing as tm
dtlike_dtypes = [
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
pd.DatetimeTZDtype("ns", "Asia/Tokyo"),
pd.PeriodDtype("ns"),
]
@pytest.mark.parametrize("ldtype", dtlike_dtypes)
@pytest.mark.parametrize("rdtype", dtlike_dtypes)
def test_get_indexer_non_unique_wrong_dtype(ldtype, rdtype):
vals = np.tile(3600 * 10 ** 9 * np.arange(3), 2)
def construct(dtype):
if dtype is dtlike_dtypes[-1]:
# PeriodArray will try to cast ints to strings
return DatetimeIndex(vals).astype(dtype)
return Index(vals, dtype=dtype)
left = construct(ldtype)
right = construct(rdtype)
result = left.get_indexer_non_unique(right)
if ldtype is rdtype:
ex1 = np.array([0, 3, 1, 4, 2, 5] * 2, dtype=np.intp)
ex2 = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result[0], ex1)
tm.assert_numpy_array_equal(result[1], ex2)
else:
no_matches = np.array([-1] * 6, dtype=np.intp)
missing = np.arange(6, dtype=np.intp)
tm.assert_numpy_array_equal(result[0], no_matches)
tm.assert_numpy_array_equal(result[1], missing)
| [
"pandas.DatetimeIndex",
"pandas.Index",
"pytest.mark.parametrize",
"numpy.array",
"pandas._testing.assert_numpy_array_equal",
"pandas.PeriodDtype",
"numpy.dtype",
"pandas.DatetimeTZDtype",
"numpy.arange"
] | [((313, 361), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ldtype"""', 'dtlike_dtypes'], {}), "('ldtype', dtlike_dtypes)\n", (336, 361), False, 'import pytest\n'), ((364, 412), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""rdtype"""', 'dtlike_dtypes'], {}), "('rdtype', dtlike_dtypes)\n", (387, 412), False, 'import pytest\n'), ((170, 197), 'numpy.dtype', 'np.dtype', (['"""timedelta64[ns]"""'], {}), "('timedelta64[ns]')\n", (178, 197), True, 'import numpy as np\n'), ((204, 230), 'numpy.dtype', 'np.dtype', (['"""datetime64[ns]"""'], {}), "('datetime64[ns]')\n", (212, 230), True, 'import numpy as np\n'), ((237, 275), 'pandas.DatetimeTZDtype', 'pd.DatetimeTZDtype', (['"""ns"""', '"""Asia/Tokyo"""'], {}), "('ns', 'Asia/Tokyo')\n", (255, 275), True, 'import pandas as pd\n'), ((282, 302), 'pandas.PeriodDtype', 'pd.PeriodDtype', (['"""ns"""'], {}), "('ns')\n", (296, 302), True, 'import pandas as pd\n'), ((730, 754), 'pandas.Index', 'Index', (['vals'], {'dtype': 'dtype'}), '(vals, dtype=dtype)\n', (735, 754), False, 'from pandas import DatetimeIndex, Index\n'), ((912, 959), 'numpy.array', 'np.array', (['([0, 3, 1, 4, 2, 5] * 2)'], {'dtype': 'np.intp'}), '([0, 3, 1, 4, 2, 5] * 2, dtype=np.intp)\n', (920, 959), True, 'import numpy as np\n'), ((975, 1002), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.intp'}), '([], dtype=np.intp)\n', (983, 1002), True, 'import numpy as np\n'), ((1012, 1055), 'pandas._testing.assert_numpy_array_equal', 'tm.assert_numpy_array_equal', (['result[0]', 'ex1'], {}), '(result[0], ex1)\n', (1039, 1055), True, 'import pandas._testing as tm\n'), ((1065, 1108), 'pandas._testing.assert_numpy_array_equal', 'tm.assert_numpy_array_equal', (['result[1]', 'ex2'], {}), '(result[1], ex2)\n', (1092, 1108), True, 'import pandas._testing as tm\n'), ((1144, 1177), 'numpy.array', 'np.array', (['([-1] * 6)'], {'dtype': 'np.intp'}), '([-1] * 6, dtype=np.intp)\n', (1152, 1177), True, 'import numpy as np\n'), ((1197, 1224), 'numpy.arange', 'np.arange', (['(6)'], {'dtype': 'np.intp'}), '(6, dtype=np.intp)\n', (1206, 1224), True, 'import numpy as np\n'), ((1234, 1284), 'pandas._testing.assert_numpy_array_equal', 'tm.assert_numpy_array_equal', (['result[0]', 'no_matches'], {}), '(result[0], no_matches)\n', (1261, 1284), True, 'import pandas._testing as tm\n'), ((1294, 1341), 'pandas._testing.assert_numpy_array_equal', 'tm.assert_numpy_array_equal', (['result[1]', 'missing'], {}), '(result[1], missing)\n', (1321, 1341), True, 'import pandas._testing as tm\n'), ((514, 526), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (523, 526), True, 'import numpy as np\n'), ((680, 699), 'pandas.DatetimeIndex', 'DatetimeIndex', (['vals'], {}), '(vals)\n', (693, 699), False, 'from pandas import DatetimeIndex, Index\n')] |
import numpy as np
from src.dqn.replay.memory import Memory
from src.dqn.replay.sum_tree import SumTree
class PERMemory(Memory):
def __init__(self, size, state_size, alpha, beta, epsilon, beta_grow):
super().__init__(size, state_size)
self.alpha = alpha
self.beta = beta
self.epsilon = epsilon
self.beta_grow = beta_grow
self.max_priority = 1.0
self.sum_tree = SumTree(self.size)
self.train_step = 0
def add_sample(self, sample):
# New transitions arrive without a known TD-error
# They are added with maximal priority to guarantee that they are seen and their TD-error(priority) is updated
self.sum_tree.add(self.max_priority ** self.alpha, sample)
self.added_samples += 1
def sample_batch(self, batch_size):
states = np.empty((batch_size, self.state_size))
actions = np.empty((batch_size, 1), dtype=np.int32)
rewards = np.empty((batch_size, 1), dtype=np.float32)
next_states = np.empty((batch_size, self.state_size), dtype=np.float32)
ends = np.empty((batch_size, 1), dtype=np.float32)
is_weights = np.empty((batch_size, 1), dtype=np.float32)
node_indices = np.empty((batch_size,), dtype=np.int32)
tree_total_sum = self.sum_tree.get_total_sum()
range_size = tree_total_sum / batch_size
max_is_weight = np.power(self.size * (self.sum_tree.min_value / tree_total_sum), -self.beta)
for i in range(batch_size):
sample_value = np.random.uniform(range_size * i, range_size * (i + 1))
data_index, value, data = self.sum_tree.get(sample_value)
states[i] = data[0]
actions[i] = data[1]
rewards[i] = data[2]
next_states[i] = data[3]
ends[i] = data[4]
is_weights[i] = (np.power(self.size * (value / tree_total_sum), -self.beta)) / max_is_weight
node_indices[i] = data_index
self.train_step += 1
self.beta = min(1, self.beta_grow(self.beta, self.train_step))
return states, actions, rewards, next_states, ends, is_weights, node_indices
def update_priorities(self, node_indices, td_errors):
updated_priorities = np.power(np.minimum(np.abs(td_errors) + self.epsilon, self.max_priority), self.alpha)
for i in range(len(updated_priorities)):
self.sum_tree.update(node_indices[i], updated_priorities[i])
| [
"numpy.abs",
"numpy.power",
"numpy.empty",
"numpy.random.uniform",
"src.dqn.replay.sum_tree.SumTree"
] | [((424, 442), 'src.dqn.replay.sum_tree.SumTree', 'SumTree', (['self.size'], {}), '(self.size)\n', (431, 442), False, 'from src.dqn.replay.sum_tree import SumTree\n'), ((840, 879), 'numpy.empty', 'np.empty', (['(batch_size, self.state_size)'], {}), '((batch_size, self.state_size))\n', (848, 879), True, 'import numpy as np\n'), ((898, 939), 'numpy.empty', 'np.empty', (['(batch_size, 1)'], {'dtype': 'np.int32'}), '((batch_size, 1), dtype=np.int32)\n', (906, 939), True, 'import numpy as np\n'), ((958, 1001), 'numpy.empty', 'np.empty', (['(batch_size, 1)'], {'dtype': 'np.float32'}), '((batch_size, 1), dtype=np.float32)\n', (966, 1001), True, 'import numpy as np\n'), ((1024, 1081), 'numpy.empty', 'np.empty', (['(batch_size, self.state_size)'], {'dtype': 'np.float32'}), '((batch_size, self.state_size), dtype=np.float32)\n', (1032, 1081), True, 'import numpy as np\n'), ((1097, 1140), 'numpy.empty', 'np.empty', (['(batch_size, 1)'], {'dtype': 'np.float32'}), '((batch_size, 1), dtype=np.float32)\n', (1105, 1140), True, 'import numpy as np\n'), ((1162, 1205), 'numpy.empty', 'np.empty', (['(batch_size, 1)'], {'dtype': 'np.float32'}), '((batch_size, 1), dtype=np.float32)\n', (1170, 1205), True, 'import numpy as np\n'), ((1229, 1268), 'numpy.empty', 'np.empty', (['(batch_size,)'], {'dtype': 'np.int32'}), '((batch_size,), dtype=np.int32)\n', (1237, 1268), True, 'import numpy as np\n'), ((1398, 1474), 'numpy.power', 'np.power', (['(self.size * (self.sum_tree.min_value / tree_total_sum))', '(-self.beta)'], {}), '(self.size * (self.sum_tree.min_value / tree_total_sum), -self.beta)\n', (1406, 1474), True, 'import numpy as np\n'), ((1539, 1594), 'numpy.random.uniform', 'np.random.uniform', (['(range_size * i)', '(range_size * (i + 1))'], {}), '(range_size * i, range_size * (i + 1))\n', (1556, 1594), True, 'import numpy as np\n'), ((1860, 1918), 'numpy.power', 'np.power', (['(self.size * (value / tree_total_sum))', '(-self.beta)'], {}), '(self.size * (value / tree_total_sum), -self.beta)\n', (1868, 1918), True, 'import numpy as np\n'), ((2272, 2289), 'numpy.abs', 'np.abs', (['td_errors'], {}), '(td_errors)\n', (2278, 2289), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.