code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# -*- coding: utf-8 -*-
"""
Utility functions for PypeIt parameter sets
"""
import os
import time
import glob
import warnings
import textwrap
from IPython import embed
import numpy as np
from astropy.table import Table
from configobj import ConfigObj
from pypeit import msgs
#-----------------------------------------------------------------------
# Parameter utility functions
#-----------------------------------------------------------------------
def _eval_ignore():
"""Provides a list of strings that should not be evaluated."""
return [ 'open', 'file', 'dict', 'list', 'tuple' ]
def recursive_dict_evaluate(d):
"""
Recursively run :func:`eval` on each element of the provided
dictionary.
A raw read of a configuration file with `ConfigObj` results in a
dictionary that contains strings or lists of strings. However, when
assigning the values for the various ParSets, the `from_dict`
methods expect the dictionary values to have the appropriate type.
E.g., the ConfigObj will have something like d['foo'] = '1', when
the `from_dict` method expects the value to be an integer (d['foo']
= 1).
This function tries to evaluate *all* dictionary values, except for
those listed above in the :func:`_eval_ignore` function. Any value
in this list or where::
eval(d[k]) for k in d.keys()
raises an exception is returned as the original string.
This is currently only used in :func:`PypitPar.from_cfg_file`; see
further comments there.
Args:
d (dict):
Dictionary of values to evaluate
Returns:
dict: Identical to input dictionary, but with all string values
replaced with the result of `eval(d[k])` for all `k` in
`d.keys()`.
"""
ignore = _eval_ignore()
for k in d.keys():
if isinstance(d[k], dict):
d[k] = recursive_dict_evaluate(d[k])
elif isinstance(d[k], list):
replacement = []
for v in d[k]:
if v in ignore:
replacement += [ v ]
else:
try:
replacement += [ eval(v) ]
except:
replacement += [ v ]
d[k] = replacement
else:
try:
d[k] = eval(d[k]) if d[k] not in ignore else d[k]
except:
pass
return d
def get_parset_list(cfg, pk, parsetclass):
"""
Create a list of ParSets based on a root keyword for a set of
defined groups in the configuration file.
For example, the :class:`InstrumentPar` group allows for a list of
detectors (:class:`DetectorPar`) with keywords like `detector1`,
`detector2`, etc. This function parses the provided configuration
object (`cfg`) to find any sections with `detector` (`pk`) as its
root. The remainder of the section name must be able to be
converted to an integer and the section itself must be able to setup
an instance of `parsetclass`. The sections must be number
sequentially from 1..N. E.g., the :class:`InstrumentPar`
configuration file cannot have `dectector1` and `detector3`, but no
`detector2`. The call to setup the detectors in the
:class:`InstrumentPar` is::
kwargs['detector'] = get_parset_list(cfg, 'detector', DetectorPar)
Args:
cfg (:class:`ConfigObj`, :obj:`dict`):
The top-level configuration that defines a list of
sub-ParSets.
pk (str):
The root of the keywords used to set a list of sub-ParSets.
parsetclass (:class:`pypeit.par.parset.ParSet`):
The class used to construct each element in the list of
parameter subsets. The class **must** have a `from_dict`
method that instantiates the
:class:`pypeit.par.parset.ParSet` based on the provide
subsection/subdict from cfg.
Returns:
list: A list of instances of `parsetclass` parsed from the
provided configuration data.
Raises:
ValueError:
Raised if the indices of the subsections are not sequential
and 1-indexed.
"""
# Get the full list of keys
k = cfg.keys()
# Iterate through the list of keys to find the appropriate sub
# parameter sets and their order.
par = []
order = []
for _k in k:
if _k == pk and cfg[_k] is None:
continue
if pk in _k:
try:
# Get the order for this subgroup (e.g., 2 for
# 'detector2'
order += [ int(_k.replace(pk,'')) ]
# And instantiate the parameter set
par += [ parsetclass.from_dict(cfg[_k]) ]
except:
continue
if len(par) > 0:
# Make sure the instances are correctly sorted and sequential
srt = np.argsort(order)
if np.any(np.array(order)[srt]-1 != np.arange(order[srt[-1]])):
raise ValueError('Parameter set series must be sequential and 1-indexed.')
# Return the sorted instances
return [par[i] for i in srt]
# No such subsets were defined, so return a null result
return None
def parset_to_dict(par):
"""
Convert the provided parset into a dictionary.
Args:
par (ParSet):
Returns:
dict: Converted ParSet
"""
try:
d = dict(ConfigObj(par.to_config(section_name='tmp'))['tmp'])
except:
d = dict(ConfigObj(par.to_config()))
return recursive_dict_evaluate(d)
#-----------------------------------------------------------------------
# Functions for parsing the input pypeit file
# TODO: Should these go into a different module? PypitSetup?
#-----------------------------------------------------------------------
def _read_pypeit_file_lines(ifile):
"""
General parser for a pypeit file.
- Checks that the file exists.
- Reads all the lines in the file
- Removes comments, empty lines, and replaces special characters.
Applies to settings, setup, and user-level reduction files.
Args:
ifile (str): Name of the file to parse.
Returns:
:obj:`numpy.ndarray`: Returns a list of the valid lines in the
files.
"""
# Check the files
if not os.path.isfile(ifile):
msgs.error('The filename does not exist -' + msgs.newline() + ifile)
# Read the input lines and replace special characters
with open(ifile, 'r') as f:
lines = np.array([l.replace('\t', ' ').replace('\n', ' ').strip() \
for l in f.readlines()])
# Remove empty or fully commented lines
lines = lines[np.array([ len(l) > 0 and l[0] != '#' for l in lines ])]
# Remove appended comments and return
return np.array([ l.split('#')[0] for l in lines ])
def _find_pypeit_block(lines, group):
"""
Find the PypeIt group block
Args:
lines (:obj:`list`):
List of file lines
group (:obj:`str`):
Name of group to parse
Returns:
int, int: Starting,ending line of the block; -1 if not present
"""
start = -1
end = -1
for i, l in enumerate(lines):
entries = l.split()
if start < 0 and entries[0] == group and entries[1] == 'read':
start = i+1
continue
if entries[0] == group and entries[1] == 'end':
end = i
continue
if start >= 0 and end >= 0:
break
return start, end
def _parse_data_file_name(inp, current_path):
"""
Expand the data file name as necessary and
then search for all data files
Args:
inp (str): Path
current_path (str or None):
Returns:
list: Glob list of files in the generated a path
"""
out = os.path.expanduser(inp) if inp[0] == '~' else inp
if current_path is not None:
out = os.path.join(current_path, out)
return glob.glob(out)
def _read_data_file_names(lines, file_check=True):
"""
Read the raw data file format
Args:
lines (list):
file_check (bool, optional):
Returns:
list: List of data file names
"""
# Pass through all the lines and:
# - Determine if a path is set
# - Gather the files to skip, can include wildcards
# - Gather the files to read, can include wildcards
current_path = None
skip_inp = []
read_inp = []
for l in lines:
_l = l.split(' ')
if _l[0] == 'skip':
space_ind = l.index(" ")
path = l[space_ind + 1:]
skip_inp += _parse_data_file_name(path, current_path)
continue
if _l[0] == 'path':
space_ind = l.index(" ")
current_path = l[space_ind + 1:]
continue
read_inp += _parse_data_file_name(l, current_path)
# Remove any repeated lines
if len(skip_inp) > 0 and len(skip_inp) != len(set(skip_inp)):
msgs.warn('There are duplicated files to skip.')
skip_inp = list(set(skip_inp))
if len(read_inp) > 0 and len(read_inp) != len(set(read_inp)):
msgs.warn('There are duplicated files to read.')
read_inp = list(set(read_inp))
# Remove any files to skip
for _skip in skip_inp:
if _skip in read_inp:
read_inp.remove(_skip)
# Check that the files exist
if file_check:
for f in read_inp:
if not os.path.isfile(f):
raise FileNotFoundError('{0} does not exist!'.format(f))
return read_inp
def _determine_data_format(lines):
"""
Determine the format of the data block in the .pypeit file.
The test used in this function is pretty basic. A table format is
assumed if the first character in *any* line is `|`.
Args:
lines (:obj:`list`):
The list of lines read from the data block of the pypeit
file.
Returns:
str: The syntax of the data files to read::
'raw': A (list of) file roots to be read or found using
`glob`.
'table': ASCII output of an astropy.table.Table
"""
for l in lines:
if l[0] == '|':
return 'table'
return 'raw'
def _read_data_file_table(lines, file_check=True):
"""
Read the file table format.
Args:
lines (:obj:`list`):
List of lines *within the data* block read from the pypeit
file.
file_check (:obj:`bool`, optional):
Check if the specified data files exist.
Returns:
list, dict, Table: Returns the list of data file names, a
dictionary with the frame types of each file where the key of
the dictionary is the file name, and a Table with the data
provided in the pypeit file. Note that the files listed in the
first object contain the full path, whereas the file names in
the frame type dictionary and the data table do not include the
full path to the file.
Raise:
PypeItError:
Raised if `file_check=True` and any of the specified files
to not exist, or if the table does not have a 'filename' or
'frametype' column.
"""
# Allow for multiple paths
paths = []
for l in lines:
space_ind = l.index(" ")
if l[:space_ind].strip() != 'path':
break
paths += [ l[space_ind+1:] ]
npaths = len(paths)
header = [ l.strip() for l in lines[npaths].split('|') ][1:-1]
# Minimum columns required
if 'filename' not in header:
msgs.error('Table format failure: No \'filename\' column.')
if 'frametype' not in header:
msgs.error('Table format failure: No \'frametype\' column.')
# Build the table
nfiles = len(lines) - npaths - 1
tbl = np.empty((nfiles, len(header)), dtype=object)
for i in range(nfiles):
row = np.array([ l.strip() for l in lines[i+npaths+1].split('|') ])[1:-1]
if len(row) != tbl.shape[1]:
raise ValueError('Data and header lines have mismatched columns!')
tbl[i,:] = row
data = {}
for i,key in enumerate(header):
data[key] = tbl[:,i]
tbl = Table(data)
# Build full paths to file and set frame types
frametype = {}
data_files = []
for i in range(nfiles):
frametype[tbl['filename'][i]] = tbl['frametype'][i]
for p in paths:
filename = os.path.join(p, tbl['filename'][i])
if os.path.isfile(filename):
break
data_files.append(filename)
if not os.path.isfile(filename) and file_check:
msgs.error('File does not exist: {0}'.format(filename))
return data_files, frametype, tbl
def _parse_setup_lines(lines):
"""Return a list of the setup names"""
setups = []
for l in lines:
if 'Setup' in l:
tsetup = l.split()[1].strip()
# Remove any lingering colon
if tsetup[-1] == ':':
setup = tsetup[:-1]
else:
setup = tsetup
setups.append(setup)
#
return setups
def parse_pypeit_file(ifile, file_check=True, runtime=False):
"""
Parse the user-provided .pypeit reduction file.
Args:
ifile (:obj:`str`):
Name of pypeit file
file_check (:obj:`bool`, optional):
Check that the files in the pypeit configuration data file
exist, and fault if they do not.
runtime (:obj:`bool`, optional):
Perform additional checks if called to run PypeIt
Returns:
5-element tuple containing
- list: List of configuration lines,
- list: List of datafiles to read,
- list: List of frametypes for each file
- :obj:`astropy.table.Table`: Table of user supplied info on data files
- list: List of setup lines.
"""
# Read in the pypeit reduction file
msgs.info('Loading the reduction file')
lines = _read_pypeit_file_lines(ifile)
# Used to select the configuration lines: Anything that isn't part
# of the data or setup blocks is assumed to be part of the
# configuration
is_config = np.ones(len(lines), dtype=bool)
# Parse data block
s, e = _find_pypeit_block(lines, 'data')
if s >= 0 and e < 0:
msgs.error("Missing 'data end' in {0}".format(ifile))
if s < 0:
msgs.error("You haven't specified any data!")
data_format = _determine_data_format(lines[s:e])
if data_format == 'raw':
frametype = None
usrtbl = None
data_files = _read_data_file_names(lines[s:e], file_check=file_check)
elif data_format == 'table':
data_files, frametype, usrtbl = _read_data_file_table(lines[s:e], file_check=file_check)
is_config[s-1:e+1] = False
if len(data_files) == 0 and file_check:
msgs.error('There are no raw data frames' + msgs.newline() +
'Perhaps the path to the data is incorrect?')
else:
msgs.info('Found {0:d} raw data frames'.format(len(data_files)))
# Parse the setup block
s, e = _find_pypeit_block(lines, 'setup')
if s >= 0 and e < 0:
msgs.error("Missing 'setup end' in {0}".format(ifile))
if s < 0:
setups = []
else:
setups = _parse_setup_lines(lines[s:e])
is_config[s-1:e+1] = False
# TODO: This should be moved to the PypeIt class
# Running PypeIt?
if runtime:
for key in ['filename', 'frametype']:
if key not in usrtbl.keys():
msgs.error("Add {:s} to your PypeIt file before using run_pypeit".format(key))
# Setup
if len(setups) != 1:
msgs.error("Add setup info to your PypeIt file in the setup block!")
msgs.info('Input file loaded successfully')
return list(lines[is_config]), data_files, frametype, usrtbl, setups
def pypeit_config_lines(ifile):
"""
Return the config lines from a PypeIt file.
Args:
ifile (str): Name of PypeIt file
Returns:
list: List of configuration lines; will be used for ConfigObj
"""
lines = _read_pypeit_file_lines(ifile)
# Find the config lines, assumed to be everything *except* the lines
# in the data and setup blocks
is_config = np.ones(len(lines), dtype=bool)
s, e = _find_pypeit_block(lines, 'data')
if s >= 0 and e < 0:
msgs.error("Missing 'data end' in {0}".format(ifile))
if not s < 0:
is_config[s-1:e+1] = False
s, e = _find_pypeit_block(lines, 'setup')
if s >= 0 and e < 0:
msgs.error("Missing 'setup end' in {0}".format(ifile))
if not s < 0:
is_config[s-1:e+1] = False
return list(lines[is_config])
def make_pypeit_file(pypeit_file, spectrograph, data_files, cfg_lines=None, setup_mode=False,
setup_lines=None, sorted_files=None, paths=None):
"""
Generate a default PypeIt file
Args:
pypeit_file (str): Name of PYPIT file to be generated
spectrograph (str): Name of spectrograph
data_files (list): List of data files -- essentially Deprecated
cfg_lines (list, optional): List of configuration lines for parameters
setup_mode (bool, optional): If True, 0 out required files for everything except Arc
setup_lines (list, optional):
sorted_files (list, optional):
paths (list, optional): List of paths for slurping data files
"""
# Error checking
if not isinstance(data_files, list):
raise IOError("data_files needs to be a list")
# Defaults
if cfg_lines is None:
_cfg_lines = ['[rdx]']
_cfg_lines += [' spectrograph = {0}'.format(spectrograph)]
else:
_cfg_lines = list(cfg_lines)
# TODO: Bring back checks for the appropriate number of calibration
# frames?
# TODO: Clean up and check validity of _cfg_lines by reading it into
# a ConfigObj?
# Here we go
with open(pypeit_file, 'w') as f:
f.write('# Auto-generated PypeIt file\n')
f.write('# {0}\n'.format(time.strftime("%a %d %b %Y %H:%M:%S",time.localtime())))
f.write("\n")
f.write("# User-defined execution parameters\n")
f.write('\n'.join(_cfg_lines))
f.write('\n')
f.write('\n')
if setup_lines is not None:
f.write("# Setup\n")
f.write("setup read\n")
for sline in setup_lines:
f.write(' '+sline+'\n')
f.write("setup end\n")
f.write("\n")
# Data
f.write("# Read in the data\n")
f.write("data read\n")
# Old school
for datafile in data_files:
f.write(' '+datafile+'\n')
# paths and Setupfiles
if paths is not None:
for path in paths:
f.write(' path '+path+'\n')
if sorted_files is not None:
f.write('\n'.join(sorted_files))
f.write('\n')
f.write("data end\n")
f.write("\n")
msgs.info('PypeIt file written to: {0}'.format(pypeit_file))
| [
"os.path.expanduser",
"pypeit.msgs.error",
"astropy.table.Table",
"numpy.arange",
"pypeit.msgs.info",
"os.path.join",
"pypeit.msgs.newline",
"numpy.argsort",
"os.path.isfile",
"numpy.array",
"pypeit.msgs.warn",
"time.localtime",
"glob.glob"
] | [((8023, 8037), 'glob.glob', 'glob.glob', (['out'], {}), '(out)\n', (8032, 8037), False, 'import glob\n'), ((12311, 12322), 'astropy.table.Table', 'Table', (['data'], {}), '(data)\n', (12316, 12322), False, 'from astropy.table import Table\n'), ((14062, 14101), 'pypeit.msgs.info', 'msgs.info', (['"""Loading the reduction file"""'], {}), "('Loading the reduction file')\n", (14071, 14101), False, 'from pypeit import msgs\n'), ((15896, 15939), 'pypeit.msgs.info', 'msgs.info', (['"""Input file loaded successfully"""'], {}), "('Input file loaded successfully')\n", (15905, 15939), False, 'from pypeit import msgs\n'), ((4929, 4946), 'numpy.argsort', 'np.argsort', (['order'], {}), '(order)\n', (4939, 4946), True, 'import numpy as np\n'), ((6355, 6376), 'os.path.isfile', 'os.path.isfile', (['ifile'], {}), '(ifile)\n', (6369, 6376), False, 'import os\n'), ((7883, 7906), 'os.path.expanduser', 'os.path.expanduser', (['inp'], {}), '(inp)\n', (7901, 7906), False, 'import os\n'), ((7980, 8011), 'os.path.join', 'os.path.join', (['current_path', 'out'], {}), '(current_path, out)\n', (7992, 8011), False, 'import os\n'), ((9054, 9102), 'pypeit.msgs.warn', 'msgs.warn', (['"""There are duplicated files to skip."""'], {}), "('There are duplicated files to skip.')\n", (9063, 9102), False, 'from pypeit import msgs\n'), ((9216, 9264), 'pypeit.msgs.warn', 'msgs.warn', (['"""There are duplicated files to read."""'], {}), "('There are duplicated files to read.')\n", (9225, 9264), False, 'from pypeit import msgs\n'), ((11693, 11750), 'pypeit.msgs.error', 'msgs.error', (['"""Table format failure: No \'filename\' column."""'], {}), '("Table format failure: No \'filename\' column.")\n', (11703, 11750), False, 'from pypeit import msgs\n'), ((11795, 11853), 'pypeit.msgs.error', 'msgs.error', (['"""Table format failure: No \'frametype\' column."""'], {}), '("Table format failure: No \'frametype\' column.")\n', (11805, 11853), False, 'from pypeit import msgs\n'), ((14526, 14571), 'pypeit.msgs.error', 'msgs.error', (['"""You haven\'t specified any data!"""'], {}), '("You haven\'t specified any data!")\n', (14536, 14571), False, 'from pypeit import msgs\n'), ((12549, 12584), 'os.path.join', 'os.path.join', (['p', "tbl['filename'][i]"], {}), "(p, tbl['filename'][i])\n", (12561, 12584), False, 'import os\n'), ((12600, 12624), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (12614, 12624), False, 'import os\n'), ((15822, 15890), 'pypeit.msgs.error', 'msgs.error', (['"""Add setup info to your PypeIt file in the setup block!"""'], {}), "('Add setup info to your PypeIt file in the setup block!')\n", (15832, 15890), False, 'from pypeit import msgs\n'), ((4991, 5016), 'numpy.arange', 'np.arange', (['order[srt[-1]]'], {}), '(order[srt[-1]])\n', (5000, 5016), True, 'import numpy as np\n'), ((9527, 9544), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (9541, 9544), False, 'import os\n'), ((12699, 12723), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (12713, 12723), False, 'import os\n'), ((6431, 6445), 'pypeit.msgs.newline', 'msgs.newline', ([], {}), '()\n', (6443, 6445), False, 'from pypeit import msgs\n'), ((15036, 15050), 'pypeit.msgs.newline', 'msgs.newline', ([], {}), '()\n', (15048, 15050), False, 'from pypeit import msgs\n'), ((18263, 18279), 'time.localtime', 'time.localtime', ([], {}), '()\n', (18277, 18279), False, 'import time\n'), ((4965, 4980), 'numpy.array', 'np.array', (['order'], {}), '(order)\n', (4973, 4980), True, 'import numpy as np\n')] |
import numpy as np
from tqdm.autonotebook import tqdm
import gc
import warnings
import sklearn.utils
_remove_cache = {}
def remove_retrain(nmask, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is retrained for each test sample with the important features set to a constant.
If you want to know how important a set of features is you can ask how the model would be
different if those features had never existed. To determine this we can mask those features
across the entire training and test datasets, then retrain the model. If we apply compare the
output of this retrained model to the original model we can see the effect produced by knowning
the features we masked. Since for individualized explanation methods each test sample has a
different set of most important features we need to retrain the model for every test sample
to get the change in model performance when a specified fraction of the most important features
are withheld.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
# see if we match the last cached call
global _remove_cache
args = (X_train, y_train, X_test, y_test, model_generator, metric)
cache_match = False
if "args" in _remove_cache:
if all(a is b for a,b in zip(_remove_cache["args"], args)) and np.all(_remove_cache["attr_test"] == attr_test):
cache_match = True
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# this is the model we will retrain many times
model_masked = model_generator()
# mask nmask top features and re-train the model for each test explanation
X_train_tmp = np.zeros(X_train.shape)
X_test_tmp = np.zeros(X_test.shape)
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
last_nmask = _remove_cache.get("nmask", None)
last_yp_masked_test = _remove_cache.get("yp_masked_test", None)
for i in tqdm(range(len(y_test)), "Retraining for the 'remove' metric"):
if cache_match and last_nmask[i] == nmask[i]:
yp_masked_test[i] = last_yp_masked_test[i]
elif nmask[i] == 0:
yp_masked_test[i] = trained_model.predict(X_test[i:i+1])[0]
else:
# mask out the most important features for this test instance
X_train_tmp[:] = X_train
X_test_tmp[:] = X_test
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_train_tmp[:,ordering[:nmask[i]]] = X_train[:,ordering[:nmask[i]]].mean()
X_test_tmp[i,ordering[:nmask[i]]] = X_train[:,ordering[:nmask[i]]].mean()
# retrain the model and make a prediction
model_masked.fit(X_train_tmp, y_train)
yp_masked_test[i] = model_masked.predict(X_test_tmp[i:i+1])[0]
# save our results so the next call to us can be faster when there is redundancy
_remove_cache["nmask"] = nmask
_remove_cache["yp_masked_test"] = yp_masked_test
_remove_cache["attr_test"] = attr_test
_remove_cache["args"] = args
return metric(y_test, yp_masked_test)
def remove_mask(nmask, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" Each test sample is masked by setting the important features to a constant.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# mask nmask top features for each test explanation
X_test_tmp = X_test.copy()
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nmask[i] > 0:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i,ordering[:nmask[i]]] = mean_vals[ordering[:nmask[i]]]
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test)
def remove_impute(nmask, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the important features set to an imputed value.
Note that the imputation is done using a multivariate normality assumption on the dataset. This depends on
being able to estimate the full data covariance matrix (and inverse) accuractly. So X_train.shape[0] should
be significantly bigger than X_train.shape[1].
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features for each test explanation
C = np.cov(X_train.T)
C += np.eye(C.shape[0]) * 1e-6
X_test_tmp = X_test.copy()
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nmask[i] > 0:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
observe_inds = ordering[nmask[i]:]
impute_inds = ordering[:nmask[i]]
# impute missing data assuming it follows a multivariate normal distribution
Coo_inv = np.linalg.inv(C[observe_inds,:][:,observe_inds])
Cio = C[impute_inds,:][:,observe_inds]
impute = mean_vals[impute_inds] + Cio @ Coo_inv @ (X_test[i, observe_inds] - mean_vals[observe_inds])
X_test_tmp[i, impute_inds] = impute
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test)
def remove_resample(nmask, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the important features set to resample background values.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# how many samples to take
nsamples = 100
# keep nkeep top features for each test explanation
N,M = X_test.shape
X_test_tmp = np.tile(X_test, [1, nsamples]).reshape(nsamples * N, M)
tie_breaking_noise = const_rand(M) * 1e-6
inds = sklearn.utils.resample(np.arange(N), n_samples=nsamples, random_state=random_state)
for i in range(N):
if nmask[i] > 0:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i*nsamples:(i+1)*nsamples, ordering[:nmask[i]]] = X_train[inds, :][:, ordering[:nmask[i]]]
yp_masked_test = trained_model.predict(X_test_tmp)
yp_masked_test = np.reshape(yp_masked_test, (N, nsamples)).mean(1) # take the mean output over all samples
return metric(y_test, yp_masked_test)
def batch_remove_retrain(nmask_train, nmask_test, X_train, y_train, X_test, y_test, attr_train, attr_test, model_generator, metric):
""" An approximation of holdout that only retraines the model once.
This is alse called ROAR (RemOve And Retrain) in work by Google. It is much more computationally
efficient that the holdout method because it masks the most important features in every sample
and then retrains the model once, instead of retraining the model for every test sample like
the holdout metric.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# mask nmask top features for each explanation
X_train_tmp = X_train.copy()
X_train_mean = X_train.mean(0)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
for i in range(len(y_train)):
if nmask_train[i] > 0:
ordering = np.argsort(-attr_train[i, :] + tie_breaking_noise)
X_train_tmp[i, ordering[:nmask_train[i]]] = X_train_mean[ordering[:nmask_train[i]]]
X_test_tmp = X_test.copy()
for i in range(len(y_test)):
if nmask_test[i] > 0:
ordering = np.argsort(-attr_test[i, :] + tie_breaking_noise)
X_test_tmp[i, ordering[:nmask_test[i]]] = X_train_mean[ordering[:nmask_test[i]]]
# train the model with all the given features masked
model_masked = model_generator()
model_masked.fit(X_train_tmp, y_train)
yp_test_masked = model_masked.predict(X_test_tmp)
return metric(y_test, yp_test_masked)
_keep_cache = {}
def keep_retrain(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is retrained for each test sample with the non-important features set to a constant.
If you want to know how important a set of features is you can ask how the model would be
different if only those features had existed. To determine this we can mask the other features
across the entire training and test datasets, then retrain the model. If we apply compare the
output of this retrained model to the original model we can see the effect produced by only
knowning the important features. Since for individualized explanation methods each test sample
has a different set of most important features we need to retrain the model for every test sample
to get the change in model performance when a specified fraction of the most important features
are retained.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
# see if we match the last cached call
global _keep_cache
args = (X_train, y_train, X_test, y_test, model_generator, metric)
cache_match = False
if "args" in _keep_cache:
if all(a is b for a,b in zip(_keep_cache["args"], args)) and np.all(_keep_cache["attr_test"] == attr_test):
cache_match = True
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# this is the model we will retrain many times
model_masked = model_generator()
# keep nkeep top features and re-train the model for each test explanation
X_train_tmp = np.zeros(X_train.shape)
X_test_tmp = np.zeros(X_test.shape)
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
last_nkeep = _keep_cache.get("nkeep", None)
last_yp_masked_test = _keep_cache.get("yp_masked_test", None)
for i in tqdm(range(len(y_test)), "Retraining for the 'keep' metric"):
if cache_match and last_nkeep[i] == nkeep[i]:
yp_masked_test[i] = last_yp_masked_test[i]
elif nkeep[i] == attr_test.shape[1]:
yp_masked_test[i] = trained_model.predict(X_test[i:i+1])[0]
else:
# mask out the most important features for this test instance
X_train_tmp[:] = X_train
X_test_tmp[:] = X_test
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_train_tmp[:,ordering[nkeep[i]:]] = X_train[:,ordering[nkeep[i]:]].mean()
X_test_tmp[i,ordering[nkeep[i]:]] = X_train[:,ordering[nkeep[i]:]].mean()
# retrain the model and make a prediction
model_masked.fit(X_train_tmp, y_train)
yp_masked_test[i] = model_masked.predict(X_test_tmp[i:i+1])[0]
# save our results so the next call to us can be faster when there is redundancy
_keep_cache["nkeep"] = nkeep
_keep_cache["yp_masked_test"] = yp_masked_test
_keep_cache["attr_test"] = attr_test
_keep_cache["args"] = args
return metric(y_test, yp_masked_test)
def keep_mask(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to their mean.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features for each test explanation
X_test_tmp = X_test.copy()
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nkeep[i] < X_test.shape[1]:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i,ordering[nkeep[i]:]] = mean_vals[ordering[nkeep[i]:]]
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test)
def keep_impute(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to an imputed value.
Note that the imputation is done using a multivariate normality assumption on the dataset. This depends on
being able to estimate the full data covariance matrix (and inverse) accuractly. So X_train.shape[0] should
be significantly bigger than X_train.shape[1].
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features for each test explanation
C = np.cov(X_train.T)
C += np.eye(C.shape[0]) * 1e-6
X_test_tmp = X_test.copy()
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nkeep[i] < X_test.shape[1]:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
observe_inds = ordering[:nkeep[i]]
impute_inds = ordering[nkeep[i]:]
# impute missing data assuming it follows a multivariate normal distribution
Coo_inv = np.linalg.inv(C[observe_inds,:][:,observe_inds])
Cio = C[impute_inds,:][:,observe_inds]
impute = mean_vals[impute_inds] + Cio @ Coo_inv @ (X_test[i, observe_inds] - mean_vals[observe_inds])
X_test_tmp[i, impute_inds] = impute
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test)
def keep_resample(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the non-important features set to resample background values.
""" # why broken? overwriting?
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# how many samples to take
nsamples = 100
# keep nkeep top features for each test explanation
N,M = X_test.shape
X_test_tmp = np.tile(X_test, [1, nsamples]).reshape(nsamples * N, M)
tie_breaking_noise = const_rand(M) * 1e-6
inds = sklearn.utils.resample(np.arange(N), n_samples=nsamples, random_state=random_state)
for i in range(N):
if nkeep[i] < M:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i*nsamples:(i+1)*nsamples, ordering[nkeep[i]:]] = X_train[inds, :][:, ordering[nkeep[i]:]]
yp_masked_test = trained_model.predict(X_test_tmp)
yp_masked_test = np.reshape(yp_masked_test, (N, nsamples)).mean(1) # take the mean output over all samples
return metric(y_test, yp_masked_test)
def batch_keep_retrain(nkeep_train, nkeep_test, X_train, y_train, X_test, y_test, attr_train, attr_test, model_generator, metric):
""" An approximation of keep that only retraines the model once.
This is alse called KAR (Keep And Retrain) in work by Google. It is much more computationally
efficient that the keep method because it masks the unimportant features in every sample
and then retrains the model once, instead of retraining the model for every test sample like
the keep metric.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# mask nkeep top features for each explanation
X_train_tmp = X_train.copy()
X_train_mean = X_train.mean(0)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
for i in range(len(y_train)):
if nkeep_train[i] < X_train.shape[1]:
ordering = np.argsort(-attr_train[i, :] + tie_breaking_noise)
X_train_tmp[i, ordering[nkeep_train[i]:]] = X_train_mean[ordering[nkeep_train[i]:]]
X_test_tmp = X_test.copy()
for i in range(len(y_test)):
if nkeep_test[i] < X_test.shape[1]:
ordering = np.argsort(-attr_test[i, :] + tie_breaking_noise)
X_test_tmp[i, ordering[nkeep_test[i]:]] = X_train_mean[ordering[nkeep_test[i]:]]
# train the model with all the features not given masked
model_masked = model_generator()
model_masked.fit(X_train_tmp, y_train)
yp_test_masked = model_masked.predict(X_test_tmp)
return metric(y_test, yp_test_masked)
def local_accuracy(X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model):
""" The how well do the features plus a constant base rate sum up to the model output.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features and re-train the model for each test explanation
yp_test = trained_model.predict(X_test)
return metric(yp_test, strip_list(attr_test).sum(1))
def to_array(*args):
return [a.values if str(type(a)).endswith("'pandas.core.frame.DataFrame'>") else a for a in args]
def const_rand(size, seed=23980):
""" Generate a random array with a fixed seed.
"""
old_seed = np.random.seed()
np.random.seed(seed)
out = np.random.rand(size)
np.random.seed(old_seed)
return out
def const_shuffle(arr, seed=23980):
""" Shuffle an array in-place with a fixed seed.
"""
old_seed = np.random.seed()
np.random.seed(seed)
np.random.shuffle(arr)
np.random.seed(old_seed)
def strip_list(attrs):
""" This assumes that if you have a list of outputs you just want the second one (the second class is the '1' class).
"""
if isinstance(attrs, list):
return attrs[1]
else:
return attrs
| [
"numpy.tile",
"numpy.eye",
"numpy.all",
"numpy.reshape",
"numpy.random.rand",
"numpy.argsort",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.random.seed",
"warnings.warn",
"numpy.cov",
"numpy.arange",
"numpy.random.shuffle"
] | [((1059, 1159), 'warnings.warn', 'warnings.warn', (['"""The retrain based measures can incorrectly evaluate models in some cases!"""'], {}), "(\n 'The retrain based measures can incorrectly evaluate models in some cases!'\n )\n", (1072, 1159), False, 'import warnings\n'), ((1813, 1836), 'numpy.zeros', 'np.zeros', (['X_train.shape'], {}), '(X_train.shape)\n', (1821, 1836), True, 'import numpy as np\n'), ((1854, 1876), 'numpy.zeros', 'np.zeros', (['X_test.shape'], {}), '(X_test.shape)\n', (1862, 1876), True, 'import numpy as np\n'), ((1898, 1920), 'numpy.zeros', 'np.zeros', (['y_test.shape'], {}), '(y_test.shape)\n', (1906, 1920), True, 'import numpy as np\n'), ((4826, 4843), 'numpy.cov', 'np.cov', (['X_train.T'], {}), '(X_train.T)\n', (4832, 4843), True, 'import numpy as np\n'), ((4931, 4953), 'numpy.zeros', 'np.zeros', (['y_test.shape'], {}), '(y_test.shape)\n', (4939, 4953), True, 'import numpy as np\n'), ((7493, 7593), 'warnings.warn', 'warnings.warn', (['"""The retrain based measures can incorrectly evaluate models in some cases!"""'], {}), "(\n 'The retrain based measures can incorrectly evaluate models in some cases!'\n )\n", (7506, 7593), False, 'import warnings\n'), ((9589, 9689), 'warnings.warn', 'warnings.warn', (['"""The retrain based measures can incorrectly evaluate models in some cases!"""'], {}), "(\n 'The retrain based measures can incorrectly evaluate models in some cases!'\n )\n", (9602, 9689), False, 'import warnings\n'), ((10335, 10358), 'numpy.zeros', 'np.zeros', (['X_train.shape'], {}), '(X_train.shape)\n', (10343, 10358), True, 'import numpy as np\n'), ((10376, 10398), 'numpy.zeros', 'np.zeros', (['X_test.shape'], {}), '(X_test.shape)\n', (10384, 10398), True, 'import numpy as np\n'), ((10420, 10442), 'numpy.zeros', 'np.zeros', (['y_test.shape'], {}), '(y_test.shape)\n', (10428, 10442), True, 'import numpy as np\n'), ((12263, 12285), 'numpy.zeros', 'np.zeros', (['y_test.shape'], {}), '(y_test.shape)\n', (12271, 12285), True, 'import numpy as np\n'), ((13426, 13443), 'numpy.cov', 'np.cov', (['X_train.T'], {}), '(X_train.T)\n', (13432, 13443), True, 'import numpy as np\n'), ((13531, 13553), 'numpy.zeros', 'np.zeros', (['y_test.shape'], {}), '(y_test.shape)\n', (13539, 13553), True, 'import numpy as np\n'), ((16119, 16219), 'warnings.warn', 'warnings.warn', (['"""The retrain based measures can incorrectly evaluate models in some cases!"""'], {}), "(\n 'The retrain based measures can incorrectly evaluate models in some cases!'\n )\n", (16132, 16219), False, 'import warnings\n'), ((18032, 18048), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (18046, 18048), True, 'import numpy as np\n'), ((18053, 18073), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (18067, 18073), True, 'import numpy as np\n'), ((18084, 18104), 'numpy.random.rand', 'np.random.rand', (['size'], {}), '(size)\n', (18098, 18104), True, 'import numpy as np\n'), ((18109, 18133), 'numpy.random.seed', 'np.random.seed', (['old_seed'], {}), '(old_seed)\n', (18123, 18133), True, 'import numpy as np\n'), ((18262, 18278), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (18276, 18278), True, 'import numpy as np\n'), ((18283, 18303), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (18297, 18303), True, 'import numpy as np\n'), ((18308, 18330), 'numpy.random.shuffle', 'np.random.shuffle', (['arr'], {}), '(arr)\n', (18325, 18330), True, 'import numpy as np\n'), ((18335, 18359), 'numpy.random.seed', 'np.random.seed', (['old_seed'], {}), '(old_seed)\n', (18349, 18359), True, 'import numpy as np\n'), ((4853, 4871), 'numpy.eye', 'np.eye', (['C.shape[0]'], {}), '(C.shape[0])\n', (4859, 4871), True, 'import numpy as np\n'), ((6447, 6459), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (6456, 6459), True, 'import numpy as np\n'), ((13453, 13471), 'numpy.eye', 'np.eye', (['C.shape[0]'], {}), '(C.shape[0])\n', (13459, 13471), True, 'import numpy as np\n'), ((15090, 15102), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (15099, 15102), True, 'import numpy as np\n'), ((1417, 1464), 'numpy.all', 'np.all', (["(_remove_cache['attr_test'] == attr_test)"], {}), "(_remove_cache['attr_test'] == attr_test)\n", (1423, 1464), True, 'import numpy as np\n'), ((3886, 3935), 'numpy.argsort', 'np.argsort', (['(-attr_test[i, :] + tie_breaking_noise)'], {}), '(-attr_test[i, :] + tie_breaking_noise)\n', (3896, 3935), True, 'import numpy as np\n'), ((5142, 5191), 'numpy.argsort', 'np.argsort', (['(-attr_test[i, :] + tie_breaking_noise)'], {}), '(-attr_test[i, :] + tie_breaking_noise)\n', (5152, 5191), True, 'import numpy as np\n'), ((5408, 5458), 'numpy.linalg.inv', 'np.linalg.inv', (['C[observe_inds, :][:, observe_inds]'], {}), '(C[observe_inds, :][:, observe_inds])\n', (5421, 5458), True, 'import numpy as np\n'), ((6311, 6341), 'numpy.tile', 'np.tile', (['X_test', '[1, nsamples]'], {}), '(X_test, [1, nsamples])\n', (6318, 6341), True, 'import numpy as np\n'), ((6579, 6628), 'numpy.argsort', 'np.argsort', (['(-attr_test[i, :] + tie_breaking_noise)'], {}), '(-attr_test[i, :] + tie_breaking_noise)\n', (6589, 6628), True, 'import numpy as np\n'), ((6819, 6860), 'numpy.reshape', 'np.reshape', (['yp_masked_test', '(N, nsamples)'], {}), '(yp_masked_test, (N, nsamples))\n', (6829, 6860), True, 'import numpy as np\n'), ((7982, 8032), 'numpy.argsort', 'np.argsort', (['(-attr_train[i, :] + tie_breaking_noise)'], {}), '(-attr_train[i, :] + tie_breaking_noise)\n', (7992, 8032), True, 'import numpy as np\n'), ((8246, 8295), 'numpy.argsort', 'np.argsort', (['(-attr_test[i, :] + tie_breaking_noise)'], {}), '(-attr_test[i, :] + tie_breaking_noise)\n', (8256, 8295), True, 'import numpy as np\n'), ((9941, 9986), 'numpy.all', 'np.all', (["(_keep_cache['attr_test'] == attr_test)"], {}), "(_keep_cache['attr_test'] == attr_test)\n", (9947, 9986), True, 'import numpy as np\n'), ((12488, 12537), 'numpy.argsort', 'np.argsort', (['(-attr_test[i, :] + tie_breaking_noise)'], {}), '(-attr_test[i, :] + tie_breaking_noise)\n', (12498, 12537), True, 'import numpy as np\n'), ((13756, 13805), 'numpy.argsort', 'np.argsort', (['(-attr_test[i, :] + tie_breaking_noise)'], {}), '(-attr_test[i, :] + tie_breaking_noise)\n', (13766, 13805), True, 'import numpy as np\n'), ((14022, 14072), 'numpy.linalg.inv', 'np.linalg.inv', (['C[observe_inds, :][:, observe_inds]'], {}), '(C[observe_inds, :][:, observe_inds])\n', (14035, 14072), True, 'import numpy as np\n'), ((14954, 14984), 'numpy.tile', 'np.tile', (['X_test', '[1, nsamples]'], {}), '(X_test, [1, nsamples])\n', (14961, 14984), True, 'import numpy as np\n'), ((15222, 15271), 'numpy.argsort', 'np.argsort', (['(-attr_test[i, :] + tie_breaking_noise)'], {}), '(-attr_test[i, :] + tie_breaking_noise)\n', (15232, 15271), True, 'import numpy as np\n'), ((15462, 15503), 'numpy.reshape', 'np.reshape', (['yp_masked_test', '(N, nsamples)'], {}), '(yp_masked_test, (N, nsamples))\n', (15472, 15503), True, 'import numpy as np\n'), ((16623, 16673), 'numpy.argsort', 'np.argsort', (['(-attr_train[i, :] + tie_breaking_noise)'], {}), '(-attr_train[i, :] + tie_breaking_noise)\n', (16633, 16673), True, 'import numpy as np\n'), ((16901, 16950), 'numpy.argsort', 'np.argsort', (['(-attr_test[i, :] + tie_breaking_noise)'], {}), '(-attr_test[i, :] + tie_breaking_noise)\n', (16911, 16950), True, 'import numpy as np\n'), ((2569, 2618), 'numpy.argsort', 'np.argsort', (['(-attr_test[i, :] + tie_breaking_noise)'], {}), '(-attr_test[i, :] + tie_breaking_noise)\n', (2579, 2618), True, 'import numpy as np\n'), ((11103, 11152), 'numpy.argsort', 'np.argsort', (['(-attr_test[i, :] + tie_breaking_noise)'], {}), '(-attr_test[i, :] + tie_breaking_noise)\n', (11113, 11152), True, 'import numpy as np\n')] |
import warnings
import pickle as pkl
import sys, os
import scipy.sparse as sp
import networkx as nx
import torch
import numpy as np
from sklearn import datasets
from sklearn.preprocessing import LabelBinarizer, scale
from sklearn.model_selection import train_test_split
from ogb.nodeproppred import DglNodePropPredDataset
import copy
from utils import sparse_mx_to_torch_sparse_tensor, dgl_graph_to_torch_sparse
warnings.simplefilter("ignore")
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_citation_network(dataset_str, sparse=None):
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range - min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
if not sparse:
adj = np.array(adj.todense(),dtype='float32')
else:
adj = sparse_mx_to_torch_sparse_tensor(adj)
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y) + 500)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
features = torch.FloatTensor(features.todense())
labels = torch.LongTensor(labels)
train_mask = torch.BoolTensor(train_mask)
val_mask = torch.BoolTensor(val_mask)
test_mask = torch.BoolTensor(test_mask)
nfeats = features.shape[1]
for i in range(labels.shape[0]):
sum_ = torch.sum(labels[i])
if sum_ != 1:
labels[i] = torch.tensor([1, 0, 0, 0, 0, 0])
labels = (labels == 1).nonzero()[:, 1]
nclasses = torch.max(labels).item() + 1
return features, nfeats, labels, nclasses, train_mask, val_mask, test_mask, adj
def load_data(args):
return load_citation_network(args.dataset, args.sparse) | [
"networkx.from_dict_of_lists",
"utils.sparse_mx_to_torch_sparse_tensor",
"torch.LongTensor",
"numpy.sort",
"torch.max",
"pickle.load",
"numpy.array",
"numpy.zeros",
"torch.tensor",
"torch.sum",
"numpy.vstack",
"warnings.simplefilter",
"scipy.sparse.vstack",
"torch.BoolTensor"
] | [((416, 447), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (437, 447), False, 'import warnings\n'), ((675, 686), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (683, 686), True, 'import numpy as np\n'), ((716, 745), 'numpy.array', 'np.array', (['mask'], {'dtype': 'np.bool'}), '(mask, dtype=np.bool)\n', (724, 745), True, 'import numpy as np\n'), ((1316, 1341), 'numpy.sort', 'np.sort', (['test_idx_reorder'], {}), '(test_idx_reorder)\n', (1323, 1341), True, 'import numpy as np\n'), ((2265, 2286), 'numpy.vstack', 'np.vstack', (['(ally, ty)'], {}), '((ally, ty))\n', (2274, 2286), True, 'import numpy as np\n'), ((2691, 2715), 'torch.LongTensor', 'torch.LongTensor', (['labels'], {}), '(labels)\n', (2707, 2715), False, 'import torch\n'), ((2733, 2761), 'torch.BoolTensor', 'torch.BoolTensor', (['train_mask'], {}), '(train_mask)\n', (2749, 2761), False, 'import torch\n'), ((2777, 2803), 'torch.BoolTensor', 'torch.BoolTensor', (['val_mask'], {}), '(val_mask)\n', (2793, 2803), False, 'import torch\n'), ((2820, 2847), 'torch.BoolTensor', 'torch.BoolTensor', (['test_mask'], {}), '(test_mask)\n', (2836, 2847), False, 'import torch\n'), ((2086, 2114), 'networkx.from_dict_of_lists', 'nx.from_dict_of_lists', (['graph'], {}), '(graph)\n', (2107, 2114), True, 'import networkx as nx\n'), ((2213, 2250), 'utils.sparse_mx_to_torch_sparse_tensor', 'sparse_mx_to_torch_sparse_tensor', (['adj'], {}), '(adj)\n', (2245, 2250), False, 'from utils import sparse_mx_to_torch_sparse_tensor, dgl_graph_to_torch_sparse\n'), ((2932, 2952), 'torch.sum', 'torch.sum', (['labels[i]'], {}), '(labels[i])\n', (2941, 2952), False, 'import torch\n'), ((1961, 1982), 'scipy.sparse.vstack', 'sp.vstack', (['(allx, tx)'], {}), '((allx, tx))\n', (1970, 1982), True, 'import scipy.sparse as sp\n'), ((2999, 3031), 'torch.tensor', 'torch.tensor', (['[1, 0, 0, 0, 0, 0]'], {}), '([1, 0, 0, 0, 0, 0])\n', (3011, 3031), False, 'import torch\n'), ((3090, 3107), 'torch.max', 'torch.max', (['labels'], {}), '(labels)\n', (3099, 3107), False, 'import torch\n'), ((1061, 1091), 'pickle.load', 'pkl.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (1069, 1091), True, 'import pickle as pkl\n'), ((1142, 1153), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (1150, 1153), True, 'import pickle as pkl\n')] |
import os
import numpy as np
import pydub as pd
from src.SBS.messenger import Messenger as messenger
from abc import ABC, abstractmethod
class ImageMessage(messenger, ABC):
""" Abstract methods """
def __init__(self, images_path=None):
super().__init__(images_path)
@abstractmethod
def __add__(self, other):
""" Add two messages
Args:
other (ImageMessage): message to be added
"""
pass
@abstractmethod
def __repr__(self):
""" Return string representation of message
"""
pass
def _getMessageFromFile(self, file) -> np.ndarray:
""" Return message from mp3 file
Args:
file (str): path of mp3 file
Returns:
np.ndarray: message from mp3 file
"""
# get message from mp3 file
# mp3 = pd.AudioSegment.from_mp3(f"{file}")
# left, right = mp3.split_to_mono()[0], mp3.split_to_mono()[1]
left = np.array([])
right = np.array([])
return np.array(left.get_array_of_samples()), np.array(right.get_array_of_samples())
| [
"numpy.array"
] | [((996, 1008), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1004, 1008), True, 'import numpy as np\n'), ((1025, 1037), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1033, 1037), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
# Disable unnecessary tfp warning, refer to https://stackoverflow.com/questions/35911252/disable-tensorflow-debugging-information
tf.logging.set_verbosity(tf.logging.INFO)
## Issue: as tfp isn't include in tensorflow 1.10.0, just workaround it! Orlando
# import tensorflow_probability as tfp
tfp = tf.contrib
from ex_utils import build_mlp
class Density_Model(object):
def __init__(self):
super(Density_Model, self).__init__()
def receive_tf_sess(self, sess):
self.sess = sess
def get_prob(self, state):
raise NotImplementedError
class Histogram(Density_Model):
def __init__(self, nbins, preprocessor):
super(Histogram, self).__init__()
self.nbins = nbins
self.total = 0.
self.hist = {}
for i in range(int(self.nbins)):
self.hist[i] = 0
self.preprocessor = preprocessor
def update_count(self, state, increment):
"""
### PROBLEM 1
### YOUR CODE HERE
args:
state: numpy array
increment: int
TODO:
1. increment the entry "bin_name" in self.hist by "increment"
2. increment self.total by "increment"
"""
bin_name = self.preprocessor(state)
# raise NotImplementedError
self.hist[bin_name] += increment
self.total += increment
def get_count(self, states):
"""
### PROBLEM 1
### YOUR CODE HERE
args:
states: numpy array (bsize, ob_dim)
returns:
counts: numpy_array (bsize)
TODO:
For each state in states:
1. get the bin_name using self.preprocessor
2. get the value of self.hist with key bin_name
"""
# raise NotImplementedError
counts = []
for state in states:
# loop for the first dimension
bin_name = self.preprocessor(state)
counts.append(self.hist[bin_name])
return np.asarray(counts)
def get_prob(self, states):
"""
### PROBLEM 1
### YOUR CODE HERE
args:
states: numpy array (bsize, ob_dim)
returns:
return the probabilities of the state (bsize)
NOTE:
remember to normalize by float(self.total)
"""
# raise NotImplementedError
probs = self.get_count(states) / float(self.total)
return probs
class RBF(Density_Model):
"""
https://en.wikipedia.org/wiki/Radial_basis_function_kernel
https://en.wikipedia.org/wiki/Kernel_density_estimation
"""
def __init__(self, sigma):
super(RBF, self).__init__()
self.sigma = sigma
self.means = None
def fit_data(self, data):
"""
### PROBLEM 2
### YOUR CODE HERE
args:
data: list of states of shape (ob_dim)
TODO:
We simply assign self.means to be equal to the data points.
Let the length of the data be B
self.means: np array (B, ob_dim)
"""
B, ob_dim = len(data), len(data[0])
# raise NotImplementedError
self.means = np.asarray(data)
assert self.means.shape == (B, ob_dim)
def get_prob(self, states):
"""
### PROBLEM 2
### YOUR CODE HERE
given:
states: (b, ob_dim)
where b is the number of states we wish to get the
probability of
self.means: (B, ob_dim)
where B is the number of states in the replay buffer
we will plop a Gaussian distribution on top of each
of self.means with a std of self.sigma
TODO:
1. Compute deltas: for each state in states, compute the
difference between that state and every mean in self.means.
2. Euclidean distance: sum the squared deltas
3. Gaussian: evaluate the probability of the state under the
gaussian centered around each mean. The hyperparameters
for the reference solution assume that you do not normalize
the gaussian. This is fine since the rewards will be
normalized later when we compute advantages anyways.
4. Average: average the probabilities from each gaussian
"""
b, ob_dim = states.shape
if self.means is None:
# Return a uniform distribution if we don't have samples in the
# replay buffer yet.
return (1.0/len(states))*np.ones(len(states))
else:
B, replay_dim = self.means.shape
assert states.ndim == self.means.ndim and ob_dim == replay_dim
# 1. Compute deltas
# raise NotImplementedError
deltas = np.tile(states, B).reshape(b, B, ob_dim) - np.tile(self.means, (b,1)).reshape(b, B, ob_dim)
assert deltas.shape == (b, B, ob_dim)
# 2. Euclidean distance
# raise NotImplementedError
euc_dists = np.sum(np.exp(-deltas ** 2 / (2 * self.sigma)), axis=-1)
assert euc_dists.shape == (b, B)
# 3. Gaussian ? => using unnormalized ||s-s`|| to eliminiate ob_dim => Dicussed with Liang, Li for KDE's calculation
# raise NotImplementedError
gaussians = euc_dists / ob_dim
assert gaussians.shape == (b, B)
# 4. Average
# raise NotImplementedError
densities = np.mean(gaussians, axis=-1)
assert densities.shape == (b,)
return densities
class Exemplar(Density_Model):
def __init__(self, ob_dim, hid_dim, learning_rate, kl_weight):
super(Exemplar, self).__init__()
self.ob_dim = ob_dim
self.hid_dim = hid_dim
self.learning_rate = learning_rate
self.kl_weight = kl_weight
def build_computation_graph(self):
"""
### PROBLEM 3
### YOUR CODE HERE
### Equation refactoring:
### e1: p(y|z) = D(z)^{y} * (1 − D(z))^{1−y}
### e2: log(p(y|z)) = y * log(D(z)) + (1-y) * log(1-D(z))
### e3: D_{KL}(q(z|x)||p(z)) = D_{KL}(q(z1|x)||p(z1)) + D_{KL}(q(z2|x)||p(z2))
TODO:
1. self.log_likelihood. shape: (batch_size)
- use tf.squeeze
- use the discriminator to get the log prob of the discrim_target
2. self.likelihood. shape: (batch_size)
- use tf.squeeze
- use the discriminator to get the prob of the discrim_target
3. self.kl. shape: (batch_size)
- simply add the kl divergence between self.encoder1 and
the prior and the kl divergence between self.encoder2
and the prior. Do not average.
4. self.elbo:
- subtract the kl (weighted by self.kl_weight) from the
log_likelihood, and average over the batch
5. self.update_op: use the AdamOptimizer with self.learning_rate
to minimize the -self.elbo (Note the negative sign!)
Hint:
https://www.tensorflow.org/probability/api_docs/python/tfp/distributions
"""
self.state1, self.state2 = self.define_placeholders()
self.encoder1, self.encoder2, self.prior, self.discriminator = self.forward_pass(self.state1, self.state2)
self.discrim_target = tf.placeholder(shape=[None, 1], name="discrim_target", dtype=tf.float32)
# raise NotImplementedError
## equation 2
self.log_likelihood = tf.squeeze(self.discriminator.log_prob(self.discrim_target), axis=-1)
## equation 1, can directly from self.log_likelihood
self.likelihood = tf.squeeze(self.discriminator.prob(self.discrim_target), axis=-1)
self.kl = tf.distributions.kl_divergence(self.encoder1, self.prior) + tf.distributions.kl_divergence(self.encoder2, self.prior)
assert len(self.log_likelihood.shape) == len(self.likelihood.shape) == len(self.kl.shape) == 1
# raise NotImplementedError
self.elbo = tf.reduce_mean(self.log_likelihood - self.kl_weight * self.kl)
## minimize(val) if equal with maximize(-val)
self.update_op = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(-self.elbo)
def define_placeholders(self):
state1 = tf.placeholder(shape=[None, self.ob_dim], name="s1", dtype=tf.float32)
state2 = tf.placeholder(shape=[None, self.ob_dim], name="s2", dtype=tf.float32)
return state1, state2
def make_encoder(self, state, z_size, scope, n_layers, hid_size):
"""
### PROBLEM 3
### YOUR CODE HERE
args:
state: tf variable
z_size: output dimension of the encoder network
scope: scope name
n_layers: number of layers of the encoder network
hid_size: hidden dimension of encoder network
TODO:
1. z_mean: the output of a neural network that takes the state as input,
has output dimension z_size, n_layers layers, and hidden
dimension hid_size
2. z_logstd: a trainable variable, initialized to 0
shape (z_size,)
Hint: use build_mlp
"""
# raise NotImplementedError
## Orlando's trial, just take tf.nn.relu as output activation layer
z_mean = build_mlp(state, z_size, scope, n_layers, hid_size, output_activation=tf.nn.relu)
z_logstd = tf.get_variable(shape=[z_size,], dtype=tf.float32, name="z_logstd", initializer=tf.zeros_initializer())
return tfp.distributions.MultivariateNormalDiag(loc=z_mean, scale_diag=tf.exp(z_logstd))
def make_prior(self, z_size):
"""
### PROBLEM 3
### YOUR CODE HERE
args:
z_size: output dimension of the encoder network
TODO:
prior_mean and prior_logstd are for a standard normal distribution
both have dimension z_size
"""
# raise NotImplementedError
## for prior estimation, just use variable to generate prior value
## in order to fit for standard normal distribution, prior_mean and prior_logstd should be (0, exp(0)) -> (0, 1); Prior is just to add noise for regularization, so tf.constant will be sufficient
prior_mean = tf.constant(0., shape=[z_size,], dtype=tf.float32, name="prior_mean")
prior_logstd = tf.constant(0., shape=[z_size,], dtype=tf.float32, name="prior_logstd")
return tfp.distributions.MultivariateNormalDiag(loc=prior_mean, scale_diag=tf.exp(prior_logstd))
def make_discriminator(self, z, output_size, scope, n_layers, hid_size):
"""
### PROBLEM 3
### YOUR CODE HERE
args:
z: input to to discriminator network
output_size: output dimension of discriminator network
scope: scope name
n_layers: number of layers of discriminator network
hid_size: hidden dimension of discriminator network
TODO:
1. logit: the output of a neural network that takes z as input,
has output size output_size, n_layers layers, and hidden
dimension hid_size
Hint: use build_mlp
"""
# raise NotImplementedError
## logit in [0, +infinity), in order to make derivative stable, use tf.nn.relu for trial
logit = build_mlp(z, output_size, scope, n_layers, hid_size, output_activation=tf.nn.relu)
return tfp.distributions.Bernoulli(logit)
def forward_pass(self, state1, state2):
"""
### PROBLEM 3
### YOUR CODE HERE
args:
state1: tf variable
state2: tf variable
encoder1: tfp.distributions.MultivariateNormalDiag distribution
encoder2: tfp.distributions.MultivariateNormalDiag distribution
prior: tfp.distributions.MultivariateNormalDiag distribution
discriminator: tfp.distributions.Bernoulli distribution
TODO:
1. z1: sample from encoder1
2. z2: sample from encoder2
3. z: concatenate z1 and z2
Hint:
https://www.tensorflow.org/probability/api_docs/python/tfp/distributions
"""
# Reuse
make_encoder1 = tf.make_template('encoder1', self.make_encoder)
make_encoder2 = tf.make_template('encoder2', self.make_encoder)
make_discriminator = tf.make_template('decoder', self.make_discriminator)
# Encoder
encoder1 = make_encoder1(state1, self.hid_dim/2, 'z1', n_layers=2, hid_size=self.hid_dim)
encoder2 = make_encoder2(state2, self.hid_dim/2, 'z2', n_layers=2, hid_size=self.hid_dim)
# Prior
prior = self.make_prior(self.hid_dim/2)
# print("self.hid_dim/2 = ", self.hid_dim/2)
# Sampled Latent
# raise NotImplementedError
## sample delta(x) = z1 * prior, P(x) = z2 * prior ?
# z1 = encoder1.sample() * prior.sample()
# z2 = encoder2.sample() * prior.sample()
## z1 is positive
z1 = encoder1.sample()
## z2 is negative
z2 = encoder2.sample()
# concatnate z1, z2 to shape=[, self.hid_dim] in order to get better for log_prob and prob
z = tf.concat([z1, z2], axis=-1)
# print("z.shape = ", z.shape)
# Discriminator
discriminator = make_discriminator(z, 1, 'discriminator', n_layers=2, hid_size=self.hid_dim)
return encoder1, encoder2, prior, discriminator
def update(self, state1, state2, target):
"""
### PROBLEM 3
### YOUR CODE HERE
args:
state1: np array (batch_size, ob_dim)
state2: np array (batch_size, ob_dim)
target: np array (batch_size, 1)
TODO:
train the density model and return
ll: log_likelihood
kl: kl divergence
elbo: elbo
"""
assert state1.ndim == state2.ndim == target.ndim
assert state1.shape[1] == state2.shape[1] == self.ob_dim
assert state1.shape[0] == state2.shape[0] == target.shape[0]
# raise NotImplementedError
## training for maximize likelihood - kl
_, ll, kl, elbo = self.sess.run([self.update_op, self.log_likelihood, self.kl, self.elbo], feed_dict={
self.state1: state1,
self.state2: state2,
self.discrim_target: target
})
return ll, kl, elbo
def get_likelihood(self, state1, state2):
"""
### PROBLEM 3
### YOUR CODE HERE
args:
state1: np array (batch_size, ob_dim)
state2: np array (batch_size, ob_dim)
TODO:
likelihood of state1 == state2
Hint:
what should be the value of self.discrim_target?
"""
assert state1.ndim == state2.ndim
assert state1.shape[1] == state2.shape[1] == self.ob_dim
assert state1.shape[0] == state2.shape[0]
# raise NotImplementedError
## infer for likelihood
likelihood = self.sess.run(self.likelihood, feed_dict={
self.state1: state1,
self.state2: state2,
self.discrim_target: np.asarray([0.5] * state1.shape[0]).reshape(-1, 1)
})
return likelihood
def get_prob(self, state):
"""
### PROBLEM 3
### YOUR CODE HERE
args:
state: np array (batch_size, ob_dim)
TODO:
likelihood:
evaluate the discriminator D(x,x) on the same input
prob:
compute the probability density of x from the discriminator
likelihood (see homework doc)
"""
# raise NotImplementedError
likelihood = self.get_likelihood(state, state)
# avoid divide by 0 and log(0)
likelihood = np.clip(np.squeeze(likelihood), 1e-5, 1-1e-5)
# raise NotImplementedError
prob = (1 - likelihood) / likelihood
return prob
| [
"numpy.mean",
"numpy.tile",
"ex_utils.build_mlp",
"tensorflow.placeholder",
"numpy.asarray",
"tensorflow.logging.set_verbosity",
"numpy.squeeze",
"numpy.exp",
"tensorflow.concat",
"tensorflow.constant",
"tensorflow.zeros_initializer",
"tensorflow.reduce_mean",
"tensorflow.train.AdamOptimizer... | [((173, 214), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (197, 214), True, 'import tensorflow as tf\n'), ((2120, 2138), 'numpy.asarray', 'np.asarray', (['counts'], {}), '(counts)\n', (2130, 2138), True, 'import numpy as np\n'), ((3379, 3395), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (3389, 3395), True, 'import numpy as np\n'), ((7858, 7930), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, 1]', 'name': '"""discrim_target"""', 'dtype': 'tf.float32'}), "(shape=[None, 1], name='discrim_target', dtype=tf.float32)\n", (7872, 7930), True, 'import tensorflow as tf\n'), ((8539, 8601), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(self.log_likelihood - self.kl_weight * self.kl)'], {}), '(self.log_likelihood - self.kl_weight * self.kl)\n', (8553, 8601), True, 'import tensorflow as tf\n'), ((8812, 8882), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, self.ob_dim]', 'name': '"""s1"""', 'dtype': 'tf.float32'}), "(shape=[None, self.ob_dim], name='s1', dtype=tf.float32)\n", (8826, 8882), True, 'import tensorflow as tf\n'), ((8900, 8970), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, self.ob_dim]', 'name': '"""s2"""', 'dtype': 'tf.float32'}), "(shape=[None, self.ob_dim], name='s2', dtype=tf.float32)\n", (8914, 8970), True, 'import tensorflow as tf\n'), ((9924, 10010), 'ex_utils.build_mlp', 'build_mlp', (['state', 'z_size', 'scope', 'n_layers', 'hid_size'], {'output_activation': 'tf.nn.relu'}), '(state, z_size, scope, n_layers, hid_size, output_activation=tf.nn\n .relu)\n', (9933, 10010), False, 'from ex_utils import build_mlp\n'), ((10909, 10978), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'shape': '[z_size]', 'dtype': 'tf.float32', 'name': '"""prior_mean"""'}), "(0.0, shape=[z_size], dtype=tf.float32, name='prior_mean')\n", (10920, 10978), True, 'import tensorflow as tf\n'), ((11002, 11073), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'shape': '[z_size]', 'dtype': 'tf.float32', 'name': '"""prior_logstd"""'}), "(0.0, shape=[z_size], dtype=tf.float32, name='prior_logstd')\n", (11013, 11073), True, 'import tensorflow as tf\n'), ((12049, 12136), 'ex_utils.build_mlp', 'build_mlp', (['z', 'output_size', 'scope', 'n_layers', 'hid_size'], {'output_activation': 'tf.nn.relu'}), '(z, output_size, scope, n_layers, hid_size, output_activation=tf.\n nn.relu)\n', (12058, 12136), False, 'from ex_utils import build_mlp\n'), ((13005, 13052), 'tensorflow.make_template', 'tf.make_template', (['"""encoder1"""', 'self.make_encoder'], {}), "('encoder1', self.make_encoder)\n", (13021, 13052), True, 'import tensorflow as tf\n'), ((13077, 13124), 'tensorflow.make_template', 'tf.make_template', (['"""encoder2"""', 'self.make_encoder'], {}), "('encoder2', self.make_encoder)\n", (13093, 13124), True, 'import tensorflow as tf\n'), ((13154, 13206), 'tensorflow.make_template', 'tf.make_template', (['"""decoder"""', 'self.make_discriminator'], {}), "('decoder', self.make_discriminator)\n", (13170, 13206), True, 'import tensorflow as tf\n'), ((13988, 14016), 'tensorflow.concat', 'tf.concat', (['[z1, z2]'], {'axis': '(-1)'}), '([z1, z2], axis=-1)\n', (13997, 14016), True, 'import tensorflow as tf\n'), ((5805, 5832), 'numpy.mean', 'np.mean', (['gaussians'], {'axis': '(-1)'}), '(gaussians, axis=-1)\n', (5812, 5832), True, 'import numpy as np\n'), ((8261, 8318), 'tensorflow.distributions.kl_divergence', 'tf.distributions.kl_divergence', (['self.encoder1', 'self.prior'], {}), '(self.encoder1, self.prior)\n', (8291, 8318), True, 'import tensorflow as tf\n'), ((8321, 8378), 'tensorflow.distributions.kl_divergence', 'tf.distributions.kl_divergence', (['self.encoder2', 'self.prior'], {}), '(self.encoder2, self.prior)\n', (8351, 8378), True, 'import tensorflow as tf\n'), ((16757, 16779), 'numpy.squeeze', 'np.squeeze', (['likelihood'], {}), '(likelihood)\n', (16767, 16779), True, 'import numpy as np\n'), ((5362, 5401), 'numpy.exp', 'np.exp', (['(-deltas ** 2 / (2 * self.sigma))'], {}), '(-deltas ** 2 / (2 * self.sigma))\n', (5368, 5401), True, 'import numpy as np\n'), ((8681, 8737), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (8703, 8737), True, 'import tensorflow as tf\n'), ((10105, 10127), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (10125, 10127), True, 'import tensorflow as tf\n'), ((10208, 10224), 'tensorflow.exp', 'tf.exp', (['z_logstd'], {}), '(z_logstd)\n', (10214, 10224), True, 'import tensorflow as tf\n'), ((11157, 11177), 'tensorflow.exp', 'tf.exp', (['prior_logstd'], {}), '(prior_logstd)\n', (11163, 11177), True, 'import tensorflow as tf\n'), ((5112, 5130), 'numpy.tile', 'np.tile', (['states', 'B'], {}), '(states, B)\n', (5119, 5130), True, 'import numpy as np\n'), ((5155, 5182), 'numpy.tile', 'np.tile', (['self.means', '(b, 1)'], {}), '(self.means, (b, 1))\n', (5162, 5182), True, 'import numpy as np\n'), ((16045, 16080), 'numpy.asarray', 'np.asarray', (['([0.5] * state1.shape[0])'], {}), '([0.5] * state1.shape[0])\n', (16055, 16080), True, 'import numpy as np\n')] |
# import the necessary packages
import numpy as np
import argparse
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", help = "path to the image")
args = vars(ap.parse_args())
# load the image
image = cv2.imread(args["image"])
# define the list of boundaries
boundaries = [
([17, 15, 100], [50, 56, 200]),
([86, 31, 4], [220, 88, 50]),
([25, 146, 190], [62, 174, 250]),
([103, 86, 65], [145, 133, 128])
]
# loop over the boundaries
for (lower, upper) in boundaries:
# create NumPy arrays from the boundaries
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
# find the colors within the specified boundaries and apply
# the mask
mask = cv2.inRange(image, lower, upper)
output = cv2.bitwise_and(image, image, mask = mask)
# show the images
cv2.imshow("images", np.hstack([image, output]))
cv2.waitKey(0) | [
"argparse.ArgumentParser",
"numpy.hstack",
"cv2.inRange",
"cv2.bitwise_and",
"numpy.array",
"cv2.waitKey",
"cv2.imread"
] | [((139, 164), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (162, 164), False, 'import argparse\n'), ((281, 306), 'cv2.imread', 'cv2.imread', (["args['image']"], {}), "(args['image'])\n", (291, 306), False, 'import cv2\n'), ((604, 634), 'numpy.array', 'np.array', (['lower'], {'dtype': '"""uint8"""'}), "(lower, dtype='uint8')\n", (612, 634), True, 'import numpy as np\n'), ((646, 676), 'numpy.array', 'np.array', (['upper'], {'dtype': '"""uint8"""'}), "(upper, dtype='uint8')\n", (654, 676), True, 'import numpy as np\n'), ((761, 793), 'cv2.inRange', 'cv2.inRange', (['image', 'lower', 'upper'], {}), '(image, lower, upper)\n', (772, 793), False, 'import cv2\n'), ((804, 844), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image', 'image'], {'mask': 'mask'}), '(image, image, mask=mask)\n', (819, 844), False, 'import cv2\n'), ((918, 932), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (929, 932), False, 'import cv2\n'), ((889, 915), 'numpy.hstack', 'np.hstack', (['[image, output]'], {}), '([image, output])\n', (898, 915), True, 'import numpy as np\n')] |
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
from tensorflow import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from tensorflow.keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from tensorflow.keras.applications.resnet50 import ResNet50#, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import json
from pathlib import Path
import signal
cwd = os.getcwd()
user = cwd.split('GIT')[0]
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('-b', '--batch_size', default=512, type=int,
metavar='N',
help='mini-batch size (default: 512), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--model', metavar='MODEL', type=str, help='specific model name')
parser.add_argument('--lr', metavar='LEARNING_RATE', type=float, help='learning rate')
parser.add_argument('--gpu_type', metavar='GPU', type=str, help='specific model name')
args = parser.parse_args()
gpu_type = args.gpu_type
# Training parameters
batch_size = args.batch_size # orig paper trained all networks with batch_size=128
epochs = 5
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
def lr_schedule(epoch):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr = args.lr #1e-3
print('Learning rate: ', lr)
return lr
model = models.Sequential()
if '50' in args.model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args.model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args.model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=lr_schedule(0)),
metrics=['accuracy'])
#model.fit(x_train, y_train, epochs=1, batch_size=20, validation_data=(x_test, y_test))
#def get_flops(model):
# run_meta = tf.RunMetadata()
# opts = tf.profiler.ProfileOptionBuilder.float_operation()
#
# # We use the Keras session graph in the call to the profiler.
# flops = tf.profiler.profile(graph=K.get_session().graph,
# run_meta=run_meta, cmd='op', options=opts)
#
# return flops.total_float_ops # Prints the "flops" of the model.
#
#pdb.set_trace()
#model.summary()
#print(get_flops(model))
model.summary()
print(model_type)
# Prepare model saving directory.
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'my_test_model.{epoch:03d}.h5'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)
# Prepare callbacks for model saving and for learning rate adjustment.
# Saves the model after each epoch. Saved to the filepath, the latest best
# model according to the quantity monitored will not be overwritten
checkpoint = ModelCheckpoint(filepath=filepath,
monitor='val_acc',
verbose=1,
save_best_only=True)
lr_scheduler = LearningRateScheduler(lr_schedule)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
cooldown=0,
patience=5,
min_lr=0.5e-6)
class RecordBatch(keras.callbacks.Callback):
def __init__(self):
super(RecordBatch, self).__init__()
self.batch_time = {}
self.batch_begin = 0
self.curr_epoch = 0
self.epoch_time = {}
def on_epoch_begin(self, epoch, logs=None):
self.curr_epoch = epoch
self.batch_time[epoch] = []
now = datetime.now()
self.epoch_time[epoch] = str(now)
def on_train_batch_begin(self, batch, logs=None):
self.batch_begin = time.time()
def on_train_batch_end(self, batch, logs=None):
self.batch_time[self.curr_epoch].append(round(time.time() - self.batch_begin, 4))
def on_train_end(self, logs=None):
# write to json
with open(f'logs/{gpu_type}_duration_resnet_train.json', 'w') as f:
json.dump(self.batch_time, f, indent=4)
with open(f'logs/{gpu_type}_timestamp_resnet_train.json', 'w') as f:
json.dump(self.epoch_time, f, indent=4)
my_callback = RecordBatch()
callbacks = [my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
with open(f'logs/{gpu_type}_duration_resnet_train.json', 'w') as f:
json.dump(my_callback.batch_time, f, indent=4)
with open(f'logs/{gpu_type}_timestamp_resnet_train.json', 'w') as f:
json.dump(my_callback.epoch_time, f, indent=4)
sys.exit()
signal.signal(signal.SIGINT, terminateProcess)
#################################################################################
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=500,
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks)
# Score trained model.
#scores = model.evaluate(x_test, y_test, verbose=1)
#print('Test loss:', scores[0])
#print('Test accuracy:', scores[1])
| [
"numpy.sqrt",
"sys.exit",
"keras.layers.Dense",
"numpy.mean",
"argparse.ArgumentParser",
"os.path.isdir",
"tensorflow.keras.utils.to_categorical",
"keras.callbacks.LearningRateScheduler",
"keras.layers.Flatten",
"keras.datasets.cifar10.load_data",
"keras.models.Sequential",
"time.time",
"ten... | [((926, 937), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (935, 937), False, 'import os\n'), ((975, 1041), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Tensorflow Cifar10 Training"""'}), "(description='Tensorflow Cifar10 Training')\n", (998, 1041), False, 'import argparse\n'), ((2156, 2175), 'keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (2173, 2175), False, 'from keras.datasets import cifar10\n'), ((2657, 2705), 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (2683, 2705), False, 'from tensorflow import keras\n'), ((2715, 2762), 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (2741, 2762), False, 'from tensorflow import keras\n'), ((3169, 3188), 'keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (3186, 3188), False, 'from keras import models, layers, optimizers\n'), ((5055, 5089), 'os.path.join', 'os.path.join', (['save_dir', 'model_name'], {}), '(save_dir, model_name)\n', (5067, 5089), False, 'import os\n'), ((5320, 5409), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'filepath', 'monitor': '"""val_acc"""', 'verbose': '(1)', 'save_best_only': '(True)'}), "(filepath=filepath, monitor='val_acc', verbose=1,\n save_best_only=True)\n", (5335, 5409), False, 'from keras.callbacks import ModelCheckpoint, LearningRateScheduler\n'), ((5509, 5543), 'keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['lr_schedule'], {}), '(lr_schedule)\n', (5530, 5543), False, 'from keras.callbacks import ModelCheckpoint, LearningRateScheduler\n'), ((7262, 7308), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'terminateProcess'], {}), '(signal.SIGINT, terminateProcess)\n', (7275, 7308), False, 'import signal\n'), ((2357, 2381), 'numpy.mean', 'np.mean', (['x_train'], {'axis': '(0)'}), '(x_train, axis=0)\n', (2364, 2381), True, 'import numpy as np\n'), ((3230, 3315), 'tensorflow.keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'weights': 'None', 'include_top': '(False)', 'input_shape': '(32, 32, 3)', 'pooling': 'None'}), '(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None\n )\n', (3238, 3315), False, 'from tensorflow.keras.applications.resnet50 import ResNet50\n'), ((3752, 3768), 'keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (3766, 3768), False, 'from keras import models, layers, optimizers\n'), ((4061, 4099), 'keras.layers.Dense', 'layers.Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (4073, 4099), False, 'from keras import models, layers, optimizers\n'), ((4913, 4924), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4922, 4924), False, 'import os\n'), ((4993, 5016), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (5006, 5016), False, 'import os\n'), ((5022, 5043), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (5033, 5043), False, 'import os\n'), ((7250, 7260), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7258, 7260), False, 'import sys\n'), ((5583, 5595), 'numpy.sqrt', 'np.sqrt', (['(0.1)'], {}), '(0.1)\n', (5590, 5595), True, 'import numpy as np\n'), ((6088, 6102), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6100, 6102), False, 'from datetime import datetime\n'), ((6226, 6237), 'time.time', 'time.time', ([], {}), '()\n', (6235, 6237), False, 'import time\n'), ((7067, 7113), 'json.dump', 'json.dump', (['my_callback.batch_time', 'f'], {'indent': '(4)'}), '(my_callback.batch_time, f, indent=4)\n', (7076, 7113), False, 'import json\n'), ((7195, 7241), 'json.dump', 'json.dump', (['my_callback.epoch_time', 'f'], {'indent': '(4)'}), '(my_callback.epoch_time, f, indent=4)\n', (7204, 7241), False, 'import json\n'), ((6531, 6570), 'json.dump', 'json.dump', (['self.batch_time', 'f'], {'indent': '(4)'}), '(self.batch_time, f, indent=4)\n', (6540, 6570), False, 'import json\n'), ((6660, 6699), 'json.dump', 'json.dump', (['self.epoch_time', 'f'], {'indent': '(4)'}), '(self.epoch_time, f, indent=4)\n', (6669, 6699), False, 'import json\n'), ((6344, 6355), 'time.time', 'time.time', ([], {}), '()\n', (6353, 6355), False, 'import time\n')] |
import pandas as pd
import numpy as np
data_schema = pd.read_csv('developer_survey_2020/survey_results_schema.csv', names=['key','text'], skiprows=[0])
df_schema = pd.DataFrame(data_schema, columns=['key','text'])
survey_columns = df_schema['key'].to_numpy()
data_survey = pd.read_csv('developer_survey_2020/survey_results_public.csv', names=survey_columns, skiprows=[0])
df_survey = pd.DataFrame(data_survey, columns=survey_columns)
# remove respondent
columns = np.delete(survey_columns, 0)
answers = []
for i in range(len(df_survey)):
print(f'Processing respondent ({df_survey.iloc[i][0]})')
for column in columns:
raw_answer = df_survey.iloc[i][column]
if str(raw_answer) != 'nan':
if type(raw_answer) == str:
answerList = raw_answer.split(';')
for answer in answerList:
answers.append([int(df_survey.iloc[i]['Respondent']), column, answer])
else:
answers.append([int(df_survey.iloc[i]['Respondent']), column, raw_answer])
df_answers = pd.DataFrame(answers)
df_answers.to_csv('answers.csv', index=False) | [
"pandas.DataFrame",
"numpy.delete",
"pandas.read_csv"
] | [((54, 157), 'pandas.read_csv', 'pd.read_csv', (['"""developer_survey_2020/survey_results_schema.csv"""'], {'names': "['key', 'text']", 'skiprows': '[0]'}), "('developer_survey_2020/survey_results_schema.csv', names=['key',\n 'text'], skiprows=[0])\n", (65, 157), True, 'import pandas as pd\n'), ((165, 215), 'pandas.DataFrame', 'pd.DataFrame', (['data_schema'], {'columns': "['key', 'text']"}), "(data_schema, columns=['key', 'text'])\n", (177, 215), True, 'import pandas as pd\n'), ((276, 379), 'pandas.read_csv', 'pd.read_csv', (['"""developer_survey_2020/survey_results_public.csv"""'], {'names': 'survey_columns', 'skiprows': '[0]'}), "('developer_survey_2020/survey_results_public.csv', names=\n survey_columns, skiprows=[0])\n", (287, 379), True, 'import pandas as pd\n'), ((387, 436), 'pandas.DataFrame', 'pd.DataFrame', (['data_survey'], {'columns': 'survey_columns'}), '(data_survey, columns=survey_columns)\n', (399, 436), True, 'import pandas as pd\n'), ((468, 496), 'numpy.delete', 'np.delete', (['survey_columns', '(0)'], {}), '(survey_columns, 0)\n', (477, 496), True, 'import numpy as np\n'), ((1079, 1100), 'pandas.DataFrame', 'pd.DataFrame', (['answers'], {}), '(answers)\n', (1091, 1100), True, 'import pandas as pd\n')] |
import json
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pykeen.models import models
HERE = os.path.dirname(__file__)
RESULTS = os.path.join(HERE, 'results')
SUMMARY_DIRECTORY = os.path.join(HERE, 'summary')
os.makedirs(SUMMARY_DIRECTORY, exist_ok=True)
logger = logging.getLogger(__name__)
GETTERS = {
'hits@10': lambda metrics: metrics['hits_at_k']['avg']['10']
}
ABLATION_HEADERS = [
'dataset',
'model',
'loss',
'optimizer',
'training_loop',
]
MODEL = {
'unstructuredmodel': 'um',
'structuredembedding': 'se',
}
LOSS = {
'marginranking': 'mr',
'crossentropy': 'ce',
}
def collate(key: str):
df = pd.DataFrame([
dict(searcher=searcher, **study)
for model, dataset, searcher, study in iterate_studies(key=key)
])
df = df.sort_values(ABLATION_HEADERS)
df.to_csv(os.path.join(SUMMARY_DIRECTORY, 'results_test_mehdi.tsv'), sep='\t', index=False)
def iterate_studies(key: str):
for model, dataset, d in _iter_dataset_dirs():
for searcher in os.listdir(d):
searcher_d = os.path.join(d, searcher)
if not os.path.isdir(searcher_d):
continue
for ablation in os.listdir(searcher_d):
ablation_d = os.path.join(searcher_d, ablation)
if not os.path.isdir(ablation_d):
continue
for hpo in os.listdir(ablation_d):
hpo_d = os.path.join(ablation_d, hpo)
if not os.path.isdir(hpo_d):
continue
study = get_results_from_hpo_directory(hpo_d, key=key)
if study is None:
continue
yield model, dataset, searcher, study
def get_results_from_hpo_directory(hpo_experiment_directory: str, key: str):
study_path = os.path.join(hpo_experiment_directory, 'study.json')
if not os.path.exists(study_path):
logger.warning(f'missing study path: {hpo_experiment_directory}')
return
with open(study_path) as file:
study = json.load(file)
for _delete_key in ['metric', 'pykeen_git_hash', 'pykeen_version']:
del study[_delete_key]
metrics = []
replicates_directory = os.path.join(hpo_experiment_directory, 'best_pipeline', 'replicates')
if not os.path.exists(replicates_directory):
raise FileNotFoundError
for replicate in os.listdir(replicates_directory):
replicate_results_path = os.path.join(replicates_directory, replicate, 'results.json')
with open(replicate_results_path) as file:
replicate_results = json.load(file)
metrics.append(GETTERS[key](replicate_results['metrics']))
study[key] = np.mean(metrics).round(5)
study[f'{key}_std'] = np.std(metrics).round(5)
return study
def _iter_model_dirs():
for model in os.listdir(RESULTS):
if model.startswith('_') or model.endswith('.py') or not os.path.isdir(os.path.join(RESULTS, model)):
continue
if model not in models:
raise KeyError(f'invalid model name: {model}')
assert model in models, f'{model} not found'
yield model, os.path.join(RESULTS, model)
def _iter_dataset_dirs():
for model, d in _iter_model_dirs():
for dataset in os.listdir(d):
dataset_d = os.path.join(d, dataset)
if not os.path.isdir(dataset_d):
continue
yield model, dataset, dataset_d
def add_inverse_triples_info():
for model, dataset, d in _iter_dataset_dirs():
for searcher in os.listdir(d):
searcher_d = os.path.join(d, searcher)
if not os.path.isdir(searcher_d):
continue
for ablation in os.listdir(searcher_d):
ablation_d = os.path.join(searcher_d, ablation)
if not os.path.isdir(ablation_d):
continue
for hpo in os.listdir(ablation_d):
hpo_d = os.path.join(ablation_d, hpo)
if not os.path.isdir(hpo_d):
continue
study_path = os.path.join(hpo_d, 'study.json')
if not os.path.exists(study_path):
logger.warning(f'missing study path: {hpo_d}')
continue
with open(study_path) as file:
study = json.load(file)
hpo_config_path = os.path.join(hpo_d, 'hpo_config.json')
if not os.path.exists(hpo_config_path):
logger.warning(f'missing hpo config path: {hpo_config_path}')
continue
with open(hpo_config_path) as file:
hpo_config = json.load(file)
try:
add_inverse_triples = hpo_config['pipeline']['dataset_kwargs']['create_inverse_triples']
except:
raise Exception(f'create_inverse_triples not in hpo config {hpo_config_path}')
study['create_inverse_triples'] = add_inverse_triples
with open(os.path.join(study_path), 'w') as file:
json.dump(study, file, indent=2)
def create_plots(target_header):
df = pd.read_csv(os.path.join(SUMMARY_DIRECTORY, 'results_test_mehdi.tsv'), sep='\t')
df['model'] = df['model'].map(lambda l: MODEL.get(l, l))
df['loss'] = df['loss'].map(lambda l: LOSS.get(l, l))
slice_dir = os.path.join(SUMMARY_DIRECTORY, '1D-slices_mehdi')
os.makedirs(slice_dir, exist_ok=True)
models = df['model'].unique()
datasets = df['dataset'].unique()
for dataset in datasets:
# Get subframe for dataset
subframe = df[df['dataset'] == dataset]
models = subframe['model'].unique()
num_models = len(models)
num_rows = (num_models // 2)
dif = num_models - (2 * num_rows)
num_rows += dif
i = 0
fig, axes = plt.subplots(num_rows, 2, figsize=(14, 10))
axes = axes.ravel()
for model in subframe.groupby(['model']):
y = model[1][target_header]
x = np.arange(0, len(y))
error = model[1][f'{target_header}_std']
axes[i].errorbar(x, y, error, linestyle='None', marker='^')
i +=1
plt.savefig(os.path.join(slice_dir, f'{dataset}_all_models.png'))
plt.close(fig)
def main():
key = 'hits@10'
# collate(key=key)
create_plots(target_header=key)
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"os.path.exists",
"numpy.mean",
"os.listdir",
"os.makedirs",
"os.path.join",
"matplotlib.pyplot.close",
"os.path.dirname",
"os.path.isdir",
"numpy.std",
"json.load",
"matplotlib.pyplot.subplots",
"json.dump"
] | [((150, 175), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (165, 175), False, 'import os\n'), ((186, 215), 'os.path.join', 'os.path.join', (['HERE', '"""results"""'], {}), "(HERE, 'results')\n", (198, 215), False, 'import os\n'), ((236, 265), 'os.path.join', 'os.path.join', (['HERE', '"""summary"""'], {}), "(HERE, 'summary')\n", (248, 265), False, 'import os\n'), ((266, 311), 'os.makedirs', 'os.makedirs', (['SUMMARY_DIRECTORY'], {'exist_ok': '(True)'}), '(SUMMARY_DIRECTORY, exist_ok=True)\n', (277, 311), False, 'import os\n'), ((322, 349), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (339, 349), False, 'import logging\n'), ((1912, 1964), 'os.path.join', 'os.path.join', (['hpo_experiment_directory', '"""study.json"""'], {}), "(hpo_experiment_directory, 'study.json')\n", (1924, 1964), False, 'import os\n'), ((2310, 2379), 'os.path.join', 'os.path.join', (['hpo_experiment_directory', '"""best_pipeline"""', '"""replicates"""'], {}), "(hpo_experiment_directory, 'best_pipeline', 'replicates')\n", (2322, 2379), False, 'import os\n'), ((2482, 2514), 'os.listdir', 'os.listdir', (['replicates_directory'], {}), '(replicates_directory)\n', (2492, 2514), False, 'import os\n'), ((2933, 2952), 'os.listdir', 'os.listdir', (['RESULTS'], {}), '(RESULTS)\n', (2943, 2952), False, 'import os\n'), ((5605, 5655), 'os.path.join', 'os.path.join', (['SUMMARY_DIRECTORY', '"""1D-slices_mehdi"""'], {}), "(SUMMARY_DIRECTORY, '1D-slices_mehdi')\n", (5617, 5655), False, 'import os\n'), ((5660, 5697), 'os.makedirs', 'os.makedirs', (['slice_dir'], {'exist_ok': '(True)'}), '(slice_dir, exist_ok=True)\n', (5671, 5697), False, 'import os\n'), ((899, 956), 'os.path.join', 'os.path.join', (['SUMMARY_DIRECTORY', '"""results_test_mehdi.tsv"""'], {}), "(SUMMARY_DIRECTORY, 'results_test_mehdi.tsv')\n", (911, 956), False, 'import os\n'), ((1089, 1102), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (1099, 1102), False, 'import os\n'), ((1976, 2002), 'os.path.exists', 'os.path.exists', (['study_path'], {}), '(study_path)\n', (1990, 2002), False, 'import os\n'), ((2145, 2160), 'json.load', 'json.load', (['file'], {}), '(file)\n', (2154, 2160), False, 'import json\n'), ((2391, 2427), 'os.path.exists', 'os.path.exists', (['replicates_directory'], {}), '(replicates_directory)\n', (2405, 2427), False, 'import os\n'), ((2549, 2610), 'os.path.join', 'os.path.join', (['replicates_directory', 'replicate', '"""results.json"""'], {}), "(replicates_directory, replicate, 'results.json')\n", (2561, 2610), False, 'import os\n'), ((3370, 3383), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (3380, 3383), False, 'import os\n'), ((3657, 3670), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (3667, 3670), False, 'import os\n'), ((5401, 5458), 'os.path.join', 'os.path.join', (['SUMMARY_DIRECTORY', '"""results_test_mehdi.tsv"""'], {}), "(SUMMARY_DIRECTORY, 'results_test_mehdi.tsv')\n", (5413, 5458), False, 'import os\n'), ((6098, 6141), 'matplotlib.pyplot.subplots', 'plt.subplots', (['num_rows', '(2)'], {'figsize': '(14, 10)'}), '(num_rows, 2, figsize=(14, 10))\n', (6110, 6141), True, 'import matplotlib.pyplot as plt\n'), ((6523, 6537), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (6532, 6537), True, 'import matplotlib.pyplot as plt\n'), ((1129, 1154), 'os.path.join', 'os.path.join', (['d', 'searcher'], {}), '(d, searcher)\n', (1141, 1154), False, 'import os\n'), ((1254, 1276), 'os.listdir', 'os.listdir', (['searcher_d'], {}), '(searcher_d)\n', (1264, 1276), False, 'import os\n'), ((2694, 2709), 'json.load', 'json.load', (['file'], {}), '(file)\n', (2703, 2709), False, 'import json\n'), ((2795, 2811), 'numpy.mean', 'np.mean', (['metrics'], {}), '(metrics)\n', (2802, 2811), True, 'import numpy as np\n'), ((2847, 2862), 'numpy.std', 'np.std', (['metrics'], {}), '(metrics)\n', (2853, 2862), True, 'import numpy as np\n'), ((3409, 3433), 'os.path.join', 'os.path.join', (['d', 'dataset'], {}), '(d, dataset)\n', (3421, 3433), False, 'import os\n'), ((3697, 3722), 'os.path.join', 'os.path.join', (['d', 'searcher'], {}), '(d, searcher)\n', (3709, 3722), False, 'import os\n'), ((3822, 3844), 'os.listdir', 'os.listdir', (['searcher_d'], {}), '(searcher_d)\n', (3832, 3844), False, 'import os\n'), ((6461, 6513), 'os.path.join', 'os.path.join', (['slice_dir', 'f"""{dataset}_all_models.png"""'], {}), "(slice_dir, f'{dataset}_all_models.png')\n", (6473, 6513), False, 'import os\n'), ((1174, 1199), 'os.path.isdir', 'os.path.isdir', (['searcher_d'], {}), '(searcher_d)\n', (1187, 1199), False, 'import os\n'), ((1307, 1341), 'os.path.join', 'os.path.join', (['searcher_d', 'ablation'], {}), '(searcher_d, ablation)\n', (1319, 1341), False, 'import os\n'), ((1448, 1470), 'os.listdir', 'os.listdir', (['ablation_d'], {}), '(ablation_d)\n', (1458, 1470), False, 'import os\n'), ((3250, 3278), 'os.path.join', 'os.path.join', (['RESULTS', 'model'], {}), '(RESULTS, model)\n', (3262, 3278), False, 'import os\n'), ((3453, 3477), 'os.path.isdir', 'os.path.isdir', (['dataset_d'], {}), '(dataset_d)\n', (3466, 3477), False, 'import os\n'), ((3742, 3767), 'os.path.isdir', 'os.path.isdir', (['searcher_d'], {}), '(searcher_d)\n', (3755, 3767), False, 'import os\n'), ((3875, 3909), 'os.path.join', 'os.path.join', (['searcher_d', 'ablation'], {}), '(searcher_d, ablation)\n', (3887, 3909), False, 'import os\n'), ((4016, 4038), 'os.listdir', 'os.listdir', (['ablation_d'], {}), '(ablation_d)\n', (4026, 4038), False, 'import os\n'), ((1365, 1390), 'os.path.isdir', 'os.path.isdir', (['ablation_d'], {}), '(ablation_d)\n', (1378, 1390), False, 'import os\n'), ((1500, 1529), 'os.path.join', 'os.path.join', (['ablation_d', 'hpo'], {}), '(ablation_d, hpo)\n', (1512, 1529), False, 'import os\n'), ((3033, 3061), 'os.path.join', 'os.path.join', (['RESULTS', 'model'], {}), '(RESULTS, model)\n', (3045, 3061), False, 'import os\n'), ((3933, 3958), 'os.path.isdir', 'os.path.isdir', (['ablation_d'], {}), '(ablation_d)\n', (3946, 3958), False, 'import os\n'), ((4068, 4097), 'os.path.join', 'os.path.join', (['ablation_d', 'hpo'], {}), '(ablation_d, hpo)\n', (4080, 4097), False, 'import os\n'), ((4214, 4247), 'os.path.join', 'os.path.join', (['hpo_d', '"""study.json"""'], {}), "(hpo_d, 'study.json')\n", (4226, 4247), False, 'import os\n'), ((4546, 4584), 'os.path.join', 'os.path.join', (['hpo_d', '"""hpo_config.json"""'], {}), "(hpo_d, 'hpo_config.json')\n", (4558, 4584), False, 'import os\n'), ((1557, 1577), 'os.path.isdir', 'os.path.isdir', (['hpo_d'], {}), '(hpo_d)\n', (1570, 1577), False, 'import os\n'), ((4125, 4145), 'os.path.isdir', 'os.path.isdir', (['hpo_d'], {}), '(hpo_d)\n', (4138, 4145), False, 'import os\n'), ((4275, 4301), 'os.path.exists', 'os.path.exists', (['study_path'], {}), '(study_path)\n', (4289, 4301), False, 'import os\n'), ((4491, 4506), 'json.load', 'json.load', (['file'], {}), '(file)\n', (4500, 4506), False, 'import json\n'), ((4612, 4643), 'os.path.exists', 'os.path.exists', (['hpo_config_path'], {}), '(hpo_config_path)\n', (4626, 4643), False, 'import os\n'), ((4858, 4873), 'json.load', 'json.load', (['file'], {}), '(file)\n', (4867, 4873), False, 'import json\n'), ((5312, 5344), 'json.dump', 'json.dump', (['study', 'file'], {'indent': '(2)'}), '(study, file, indent=2)\n', (5321, 5344), False, 'import json\n'), ((5248, 5272), 'os.path.join', 'os.path.join', (['study_path'], {}), '(study_path)\n', (5260, 5272), False, 'import os\n')] |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Bijectors Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.math.psd_kernels.internal import util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
def _numpy_exp_quad(amplitude, length_scale, x, y, feature_ndims):
dims = tuple(range(-feature_ndims, 0, 1))
return amplitude ** 2 * np.exp(
-0.5 * np.sum((x - y) ** 2, axis=dims) / length_scale ** 2)
def _numpy_exp_quad_matrix(amplitude, length_scale, x, feature_ndims):
return _numpy_exp_quad(
amplitude,
length_scale,
np.expand_dims(x, -feature_ndims - 2),
np.expand_dims(x, -feature_ndims - 1),
feature_ndims)
@test_util.run_all_in_graph_and_eager_modes
class _FeatureTransformedTest(parameterized.TestCase):
@parameterized.parameters(
{'feature_ndims': 1, 'dims': 3},
{'feature_ndims': 1, 'dims': 4},
{'feature_ndims': 2, 'dims': 2},
{'feature_ndims': 2, 'dims': 3},
{'feature_ndims': 3, 'dims': 2},
{'feature_ndims': 3, 'dims': 3})
def testValuesAreCorrectIdentity(self, feature_ndims, dims):
amplitude = self.dtype(5.)
length_scale = self.dtype(0.2)
kernel = tfp.math.psd_kernels.ExponentiatedQuadratic(
amplitude, length_scale, feature_ndims)
input_shape = [dims] * feature_ndims
identity_transformed_kernel = tfp.math.psd_kernels.FeatureTransformed(
kernel,
transformation_fn=lambda x, feature_ndims, param_expansion_ndims: x)
x = np.random.uniform(-1, 1, size=input_shape).astype(self.dtype)
y = np.random.uniform(-1, 1, size=input_shape).astype(self.dtype)
self.assertAllClose(
_numpy_exp_quad(
amplitude, length_scale, x, y, feature_ndims=feature_ndims),
self.evaluate(identity_transformed_kernel.apply(x, y)))
@parameterized.parameters(
{'feature_ndims': 1, 'dims': 3},
{'feature_ndims': 1, 'dims': 4},
{'feature_ndims': 2, 'dims': 2},
{'feature_ndims': 2, 'dims': 3},
{'feature_ndims': 3, 'dims': 2},
{'feature_ndims': 3, 'dims': 3})
def testValuesAreCorrectScalarTransform(self, feature_ndims, dims):
amplitude = self.dtype(5.)
length_scale = self.dtype(0.2)
kernel = tfp.math.psd_kernels.ExponentiatedQuadratic(
amplitude, length_scale, feature_ndims)
input_shape = [dims] * feature_ndims
bij = tfp.bijectors.AffineScalar(self.dtype(0.), self.dtype(2.))
# Flat multiplication by 2.
def scale_transform(x, feature_ndims, param_expansion_ndims):
del feature_ndims, param_expansion_ndims
return bij.forward(x)
scale_transformed_kernel = tfp.math.psd_kernels.FeatureTransformed(
kernel, transformation_fn=scale_transform)
x = np.random.uniform(-1, 1, size=input_shape).astype(self.dtype)
y = np.random.uniform(-1, 1, size=input_shape).astype(self.dtype)
self.assertAllClose(
_numpy_exp_quad
(amplitude, length_scale, 2. * x, 2. * y, feature_ndims=feature_ndims),
self.evaluate(scale_transformed_kernel.apply(x, y)))
@parameterized.parameters(
{'feature_ndims': 1, 'dims': 3},
{'feature_ndims': 1, 'dims': 4},
{'feature_ndims': 2, 'dims': 2},
{'feature_ndims': 2, 'dims': 3},
{'feature_ndims': 3, 'dims': 2},
{'feature_ndims': 3, 'dims': 3})
def testValuesAreCorrectVectorTransform(self, feature_ndims, dims):
amplitude = self.dtype(5.)
length_scale = self.dtype(0.2)
kernel = tfp.math.psd_kernels.ExponentiatedQuadratic(
amplitude, length_scale, feature_ndims)
input_shape = [dims] * feature_ndims
scale_diag = np.random.uniform(-1, 1, size=(dims,)).astype(self.dtype)
bij = tfp.bijectors.Affine(scale_diag=scale_diag)
# Scaling the last dimension.
def vector_transform(x, feature_ndims, param_expansion_ndims):
del feature_ndims, param_expansion_ndims
return bij.forward(x)
vector_transformed_kernel = tfp.math.psd_kernels.FeatureTransformed(
kernel, transformation_fn=vector_transform)
x = np.random.uniform(-1, 1, size=input_shape).astype(self.dtype)
y = np.random.uniform(-1, 1, size=input_shape).astype(self.dtype)
self.assertAllClose(
_numpy_exp_quad(
amplitude,
length_scale,
scale_diag * x,
scale_diag * y,
feature_ndims=feature_ndims),
self.evaluate(vector_transformed_kernel.apply(x, y)))
@parameterized.parameters(
{'feature_ndims': 1, 'dims': 3},
{'feature_ndims': 1, 'dims': 4},
{'feature_ndims': 2, 'dims': 2},
{'feature_ndims': 2, 'dims': 3},
{'feature_ndims': 3, 'dims': 2},
{'feature_ndims': 3, 'dims': 3})
def testKernelParametersBroadcast(self, feature_ndims, dims):
# Batch shape [10, 2]
amplitude = np.random.uniform(
low=1., high=10., size=[10, 2]).astype(self.dtype)
length_scale = np.random.uniform(
low=1., high=10., size=[1, 2]).astype(self.dtype)
kernel = tfp.math.psd_kernels.ExponentiatedQuadratic(
amplitude, length_scale, feature_ndims)
input_shape = [dims] * feature_ndims
# Batch shape [3, 1, 2].
scale_diag = np.random.uniform(
-1, 1, size=(3, 1, 2, dims,)).astype(self.dtype)
# Scaling the last dimension.
def vector_transform(x, feature_ndims, param_expansion_ndims):
diag = util.pad_shape_with_ones(
scale_diag,
param_expansion_ndims + feature_ndims - 1,
start=-2)
return diag * x
vector_transformed_kernel = tfp.math.psd_kernels.FeatureTransformed(
kernel, transformation_fn=vector_transform)
x = np.random.uniform(-1, 1, size=input_shape).astype(self.dtype)
y = np.random.uniform(-1, 1, size=input_shape).astype(self.dtype)
# Pad for each feature dimension.
expanded_scale_diag = scale_diag
for _ in range(feature_ndims - 1):
expanded_scale_diag = expanded_scale_diag[..., None, :]
self.assertAllClose(
_numpy_exp_quad(
amplitude,
length_scale,
expanded_scale_diag * x,
expanded_scale_diag * y,
feature_ndims=feature_ndims
),
self.evaluate(vector_transformed_kernel.apply(x, y)))
z = np.random.uniform(-1, 1, size=[10] + input_shape).astype(self.dtype)
self.assertAllClose(
_numpy_exp_quad_matrix(
# We need to take in to account the event dimension.
amplitude[..., None, None],
length_scale[..., None, None],
# Extra dimension for the event dimension.
expanded_scale_diag[..., None, :] * z,
feature_ndims=feature_ndims
),
self.evaluate(vector_transformed_kernel.matrix(z, z)))
class FeatureTransformedFloat32Test(_FeatureTransformedTest, tf.test.TestCase):
dtype = np.float32
class FeatureTransformedFloat64Test(_FeatureTransformedTest, tf.test.TestCase):
dtype = np.float64
del _FeatureTransformedTest
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow_probability.python.math.psd_kernels.internal.util.pad_shape_with_ones",
"tensorflow_probability.bijectors.Affine",
"absl.testing.parameterized.parameters",
"tensorflow_probability.math.psd_kernels.FeatureTransformed",
"numpy.sum",
"tensorflow.compat.v2.test.main",
"tensorflow_probability.mat... | [((1717, 1951), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["{'feature_ndims': 1, 'dims': 3}", "{'feature_ndims': 1, 'dims': 4}", "{'feature_ndims': 2, 'dims': 2}", "{'feature_ndims': 2, 'dims': 3}", "{'feature_ndims': 3, 'dims': 2}", "{'feature_ndims': 3, 'dims': 3}"], {}), "({'feature_ndims': 1, 'dims': 3}, {'feature_ndims':\n 1, 'dims': 4}, {'feature_ndims': 2, 'dims': 2}, {'feature_ndims': 2,\n 'dims': 3}, {'feature_ndims': 3, 'dims': 2}, {'feature_ndims': 3,\n 'dims': 3})\n", (1741, 1951), False, 'from absl.testing import parameterized\n'), ((2752, 2986), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["{'feature_ndims': 1, 'dims': 3}", "{'feature_ndims': 1, 'dims': 4}", "{'feature_ndims': 2, 'dims': 2}", "{'feature_ndims': 2, 'dims': 3}", "{'feature_ndims': 3, 'dims': 2}", "{'feature_ndims': 3, 'dims': 3}"], {}), "({'feature_ndims': 1, 'dims': 3}, {'feature_ndims':\n 1, 'dims': 4}, {'feature_ndims': 2, 'dims': 2}, {'feature_ndims': 2,\n 'dims': 3}, {'feature_ndims': 3, 'dims': 2}, {'feature_ndims': 3,\n 'dims': 3})\n", (2776, 2986), False, 'from absl.testing import parameterized\n'), ((3997, 4231), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["{'feature_ndims': 1, 'dims': 3}", "{'feature_ndims': 1, 'dims': 4}", "{'feature_ndims': 2, 'dims': 2}", "{'feature_ndims': 2, 'dims': 3}", "{'feature_ndims': 3, 'dims': 2}", "{'feature_ndims': 3, 'dims': 3}"], {}), "({'feature_ndims': 1, 'dims': 3}, {'feature_ndims':\n 1, 'dims': 4}, {'feature_ndims': 2, 'dims': 2}, {'feature_ndims': 2,\n 'dims': 3}, {'feature_ndims': 3, 'dims': 2}, {'feature_ndims': 3,\n 'dims': 3})\n", (4021, 4231), False, 'from absl.testing import parameterized\n'), ((5377, 5611), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["{'feature_ndims': 1, 'dims': 3}", "{'feature_ndims': 1, 'dims': 4}", "{'feature_ndims': 2, 'dims': 2}", "{'feature_ndims': 2, 'dims': 3}", "{'feature_ndims': 3, 'dims': 2}", "{'feature_ndims': 3, 'dims': 3}"], {}), "({'feature_ndims': 1, 'dims': 3}, {'feature_ndims':\n 1, 'dims': 4}, {'feature_ndims': 2, 'dims': 2}, {'feature_ndims': 2,\n 'dims': 3}, {'feature_ndims': 3, 'dims': 2}, {'feature_ndims': 3,\n 'dims': 3})\n", (5401, 5611), False, 'from absl.testing import parameterized\n'), ((7947, 7961), 'tensorflow.compat.v2.test.main', 'tf.test.main', ([], {}), '()\n', (7959, 7961), True, 'import tensorflow.compat.v2 as tf\n'), ((1507, 1544), 'numpy.expand_dims', 'np.expand_dims', (['x', '(-feature_ndims - 2)'], {}), '(x, -feature_ndims - 2)\n', (1521, 1544), True, 'import numpy as np\n'), ((1552, 1589), 'numpy.expand_dims', 'np.expand_dims', (['x', '(-feature_ndims - 1)'], {}), '(x, -feature_ndims - 1)\n', (1566, 1589), True, 'import numpy as np\n'), ((2119, 2206), 'tensorflow_probability.math.psd_kernels.ExponentiatedQuadratic', 'tfp.math.psd_kernels.ExponentiatedQuadratic', (['amplitude', 'length_scale', 'feature_ndims'], {}), '(amplitude, length_scale,\n feature_ndims)\n', (2162, 2206), True, 'import tensorflow_probability as tfp\n'), ((2287, 2407), 'tensorflow_probability.math.psd_kernels.FeatureTransformed', 'tfp.math.psd_kernels.FeatureTransformed', (['kernel'], {'transformation_fn': '(lambda x, feature_ndims, param_expansion_ndims: x)'}), '(kernel, transformation_fn=lambda x,\n feature_ndims, param_expansion_ndims: x)\n', (2326, 2407), True, 'import tensorflow_probability as tfp\n'), ((3161, 3248), 'tensorflow_probability.math.psd_kernels.ExponentiatedQuadratic', 'tfp.math.psd_kernels.ExponentiatedQuadratic', (['amplitude', 'length_scale', 'feature_ndims'], {}), '(amplitude, length_scale,\n feature_ndims)\n', (3204, 3248), True, 'import tensorflow_probability as tfp\n'), ((3570, 3657), 'tensorflow_probability.math.psd_kernels.FeatureTransformed', 'tfp.math.psd_kernels.FeatureTransformed', (['kernel'], {'transformation_fn': 'scale_transform'}), '(kernel, transformation_fn=\n scale_transform)\n', (3609, 3657), True, 'import tensorflow_probability as tfp\n'), ((4406, 4493), 'tensorflow_probability.math.psd_kernels.ExponentiatedQuadratic', 'tfp.math.psd_kernels.ExponentiatedQuadratic', (['amplitude', 'length_scale', 'feature_ndims'], {}), '(amplitude, length_scale,\n feature_ndims)\n', (4449, 4493), True, 'import tensorflow_probability as tfp\n'), ((4626, 4669), 'tensorflow_probability.bijectors.Affine', 'tfp.bijectors.Affine', ([], {'scale_diag': 'scale_diag'}), '(scale_diag=scale_diag)\n', (4646, 4669), True, 'import tensorflow_probability as tfp\n'), ((4880, 4968), 'tensorflow_probability.math.psd_kernels.FeatureTransformed', 'tfp.math.psd_kernels.FeatureTransformed', (['kernel'], {'transformation_fn': 'vector_transform'}), '(kernel, transformation_fn=\n vector_transform)\n', (4919, 4968), True, 'import tensorflow_probability as tfp\n'), ((5930, 6017), 'tensorflow_probability.math.psd_kernels.ExponentiatedQuadratic', 'tfp.math.psd_kernels.ExponentiatedQuadratic', (['amplitude', 'length_scale', 'feature_ndims'], {}), '(amplitude, length_scale,\n feature_ndims)\n', (5973, 6017), True, 'import tensorflow_probability as tfp\n'), ((6478, 6566), 'tensorflow_probability.math.psd_kernels.FeatureTransformed', 'tfp.math.psd_kernels.FeatureTransformed', (['kernel'], {'transformation_fn': 'vector_transform'}), '(kernel, transformation_fn=\n vector_transform)\n', (6517, 6566), True, 'import tensorflow_probability as tfp\n'), ((6302, 6395), 'tensorflow_probability.python.math.psd_kernels.internal.util.pad_shape_with_ones', 'util.pad_shape_with_ones', (['scale_diag', '(param_expansion_ndims + feature_ndims - 1)'], {'start': '(-2)'}), '(scale_diag, param_expansion_ndims + feature_ndims -\n 1, start=-2)\n', (6326, 6395), False, 'from tensorflow_probability.python.math.psd_kernels.internal import util\n'), ((2429, 2471), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'input_shape'}), '(-1, 1, size=input_shape)\n', (2446, 2471), True, 'import numpy as np\n'), ((2499, 2541), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'input_shape'}), '(-1, 1, size=input_shape)\n', (2516, 2541), True, 'import numpy as np\n'), ((3671, 3713), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'input_shape'}), '(-1, 1, size=input_shape)\n', (3688, 3713), True, 'import numpy as np\n'), ((3741, 3783), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'input_shape'}), '(-1, 1, size=input_shape)\n', (3758, 3783), True, 'import numpy as np\n'), ((4558, 4596), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(dims,)'}), '(-1, 1, size=(dims,))\n', (4575, 4596), True, 'import numpy as np\n'), ((4982, 5024), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'input_shape'}), '(-1, 1, size=input_shape)\n', (4999, 5024), True, 'import numpy as np\n'), ((5052, 5094), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'input_shape'}), '(-1, 1, size=input_shape)\n', (5069, 5094), True, 'import numpy as np\n'), ((5743, 5794), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(1.0)', 'high': '(10.0)', 'size': '[10, 2]'}), '(low=1.0, high=10.0, size=[10, 2])\n', (5760, 5794), True, 'import numpy as np\n'), ((5840, 5890), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(1.0)', 'high': '(10.0)', 'size': '[1, 2]'}), '(low=1.0, high=10.0, size=[1, 2])\n', (5857, 5890), True, 'import numpy as np\n'), ((6111, 6157), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(3, 1, 2, dims)'}), '(-1, 1, size=(3, 1, 2, dims))\n', (6128, 6157), True, 'import numpy as np\n'), ((6580, 6622), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'input_shape'}), '(-1, 1, size=input_shape)\n', (6597, 6622), True, 'import numpy as np\n'), ((6650, 6692), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'input_shape'}), '(-1, 1, size=input_shape)\n', (6667, 6692), True, 'import numpy as np\n'), ((7185, 7234), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '([10] + input_shape)'}), '(-1, 1, size=[10] + input_shape)\n', (7202, 7234), True, 'import numpy as np\n'), ((1312, 1343), 'numpy.sum', 'np.sum', (['((x - y) ** 2)'], {'axis': 'dims'}), '((x - y) ** 2, axis=dims)\n', (1318, 1343), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from ansys.dpf import core as dpf
from ansys.dpf.core import examples
@pytest.fixture()
def local_server():
try :
for server in dpf._server_instances :
if server() != dpf.SERVER:
server().info #check that the server is responsive
return server()
return dpf.start_local_server(as_global = False)
except:
return dpf.start_local_server(as_global = False)
@pytest.fixture()
def static_models(local_server):
otherfile = dpf.upload_file_in_tmp_folder(examples.static_rst, server=local_server)
return (dpf.Model(examples.static_rst), dpf.Model(otherfile, server=local_server))
@pytest.fixture()
def transient_models(local_server):
otherfile = dpf.upload_file_in_tmp_folder(examples.msup_transient, server=local_server)
return (dpf.Model(examples.msup_transient), dpf.Model(otherfile, server=local_server))
@pytest.fixture()
def cyc_models(local_server):
otherfile = dpf.upload_file_in_tmp_folder(examples.simple_cyclic, server=local_server)
return (dpf.Model(examples.simple_cyclic), dpf.Model(otherfile, server=local_server))
@pytest.fixture()
def all_kind_of_complexity_models(local_server):
otherfile = dpf.upload_file_in_tmp_folder(examples.download_all_kinds_of_complexity(), server=local_server)
return (dpf.Model(examples.download_all_kinds_of_complexity()), dpf.Model(otherfile, server=local_server))
def test_different_multi_server(static_models):
assert static_models[0]._server != static_models[1]._server
assert not static_models[0]._server == static_models[1]._server
assert static_models[0]._server.port != static_models[1]._server.port
assert static_models[0].metadata.data_sources.result_files[0]!=static_models[1].metadata.data_sources.result_files[0]
def test_model_time_freq_multi_server(static_models):
tf = static_models[0].metadata.time_freq_support
tf2 = static_models[1].metadata.time_freq_support
assert tf.time_frequencies.shape == tf2.time_frequencies.shape
assert tf.time_frequencies.size == tf2.time_frequencies.size
assert np.allclose(tf.time_frequencies.data,tf2.time_frequencies.data)
assert np.allclose(tf.time_frequencies.scoping.ids,tf2.time_frequencies.scoping.ids)
assert tf.n_sets == tf2.n_sets
assert tf.get_frequency(0,0) == tf2.get_frequency(0,0)
assert tf.get_cumulative_index(0,0) == tf2.get_cumulative_index(0,0)
# make sure that after starting a first local server we are still using
# 2 different servers
def test_different_multi_server2(static_models):
assert static_models[0]._server != static_models[1]._server
assert not static_models[0]._server == static_models[1]._server
assert static_models[0]._server.port != static_models[1]._server.port
assert static_models[0].metadata.data_sources.result_files[0]!=static_models[1].metadata.data_sources.result_files[0]
def test_model_mesh_multi_server(static_models):
mesh = static_models[0].metadata.meshed_region
mesh2 = static_models[1].metadata.meshed_region
assert mesh.unit == mesh2.unit
assert mesh.available_named_selections == mesh2.available_named_selections
assert np.allclose(mesh.named_selection(mesh.available_named_selections[0]).ids, mesh2.named_selection(mesh2.available_named_selections[0]).ids)
elements = mesh.elements
elements2 = mesh2.elements
assert np.allclose(elements.scoping.ids, elements2.scoping.ids)
assert np.allclose(elements.element_types_field.data, elements2.element_types_field.data)
assert np.allclose(elements.connectivities_field.data, elements2.connectivities_field.data)
assert np.allclose(elements.materials_field.data, elements2.materials_field.data)
assert elements.n_elements == elements2.n_elements
assert elements.has_shell_elements == elements2.has_shell_elements
assert elements.has_solid_elements == elements2.has_solid_elements
assert elements.has_beam_elements == elements2.has_beam_elements
assert elements.has_point_elements == elements2.has_point_elements
nodes = mesh.nodes
nodes2 = mesh2.nodes
assert np.allclose(nodes.scoping.ids, nodes2.scoping.ids)
def test_model_result_info_multi_server(static_models):
result_info = static_models[0].metadata.result_info
result_info2 = static_models[1].metadata.result_info
assert result_info.analysis_type == result_info2.analysis_type
assert result_info.physics_type == result_info2.physics_type
assert result_info.unit_system == result_info2.unit_system
assert result_info.cyclic_symmetry_type == result_info2.cyclic_symmetry_type
assert result_info.has_cyclic == result_info2.has_cyclic
available_results = result_info.available_results
available_results2 = result_info2.available_results
for i,res in enumerate(available_results):
assert res.name == available_results2[i].name
assert res.n_components == available_results2[i].n_components
assert res.dimensionality == available_results2[i].dimensionality
assert res.homogeneity == available_results2[i].homogeneity
assert res.unit == available_results2[i].unit
assert res.operator_name == available_results2[i].operator_name
assert res.sub_results == available_results2[i].sub_results
def test_model_cyc_support_multi_server(cyc_models):
result_info = cyc_models[0].metadata.result_info
result_info2 = cyc_models[1].metadata.result_info
assert result_info.has_cyclic == result_info2.has_cyclic
assert result_info.cyclic_symmetry_type == result_info2.cyclic_symmetry_type
cyc_support = result_info.cyclic_support
cyc_support2 = result_info2.cyclic_support
assert cyc_support.num_stages == cyc_support2.num_stages
assert cyc_support.num_sectors() == cyc_support2.num_sectors()
assert cyc_support.base_nodes_scoping().ids == cyc_support2.base_nodes_scoping().ids
assert cyc_support.base_elements_scoping().ids == cyc_support2.base_elements_scoping().ids
assert cyc_support.sectors_set_for_expansion().ids == cyc_support2.sectors_set_for_expansion().ids
assert cyc_support.expand_node_id(1).ids == cyc_support2.expand_node_id(1).ids
assert cyc_support.expand_element_id(1).ids == cyc_support2.expand_element_id(1).ids
assert cyc_support.expand_node_id(1,cyc_support.sectors_set_for_expansion()).ids == cyc_support2.expand_node_id(1,cyc_support2.sectors_set_for_expansion()).ids
assert cyc_support.expand_element_id(1,cyc_support.sectors_set_for_expansion()).ids == cyc_support2.expand_element_id(1,cyc_support2.sectors_set_for_expansion()).ids
def test_model_displacement_multi_server(transient_models):
tf = transient_models[0].metadata.time_freq_support
time_scoping = range(1, len(tf.time_frequencies)+1)
disp = transient_models[0].results.displacement()
disp.inputs.time_scoping(time_scoping)
disp2 = transient_models[1].results.displacement()
disp2.inputs.time_scoping(time_scoping)
fc = disp.outputs.fields_container()
fc2 = disp2.outputs.fields_container()
for i,f in enumerate(fc):
assert fc.get_label_space(i)==fc2.get_label_space(i)
ftocheck = fc2[i].deep_copy()
iden = dpf.operators.logic.identical_fields(f,ftocheck)
assert iden.outputs.boolean()
assert np.allclose(f.data, fc2[i].data)
assert np.allclose(f.scoping.ids, fc2[i].scoping.ids)
assert np.allclose(f.data, ftocheck.data)
assert np.allclose(f.scoping.ids, ftocheck.scoping.ids)
def check_fc(fc,fc2):
for i,f in enumerate(fc):
assert fc.get_label_space(i)==fc2.get_label_space(i)
ftocheck = fc2[i].deep_copy()
iden = dpf.operators.logic.identical_fields(f,ftocheck)
assert iden.outputs.boolean()
assert np.allclose(f.data, fc2[i].data)
assert np.allclose(f.scoping.ids, fc2[i].scoping.ids)
assert np.allclose(f.data, ftocheck.data)
assert np.allclose(f.scoping.ids, ftocheck.scoping.ids)
idenfc = dpf.operators.logic.identical_fc(fc,fc2.deep_copy())
assert idenfc.outputs.boolean()
def test_model_stress_multi_server(transient_models):
tf = transient_models[0].metadata.time_freq_support
time_scoping = range(1, len(tf.time_frequencies)+1)
disp = transient_models[0].results.stress()
disp.inputs.time_scoping(time_scoping)
disp2 = transient_models[1].results.stress()
disp2.inputs.time_scoping(time_scoping)
fc = disp.outputs.fields_container()
fc2 = disp2.outputs.fields_container()
check_fc(fc,fc2)
idenfc = dpf.operators.logic.identical_fc(fc.deep_copy(fc2._server),fc2, server =fc2._server )
assert idenfc.outputs.boolean()
def test_model_different_results_big_multi_server(all_kind_of_complexity_models):
tf = all_kind_of_complexity_models[0].metadata.time_freq_support
time_scoping = len(tf.time_frequencies)
results = all_kind_of_complexity_models[0].results
results2 = all_kind_of_complexity_models[1].results
op = results.displacement()
op.inputs.time_scoping(time_scoping)
op2 = results2.displacement()
op2.inputs.time_scoping(time_scoping)
fc = op.outputs.fields_container()
fc2 = op2.outputs.fields_container()
check_fc(fc,fc2)
op = results.stress()
op.inputs.time_scoping(time_scoping)
op2 = results2.stress()
op2.inputs.time_scoping(time_scoping)
fc = op.outputs.fields_container()
fc2 = op2.outputs.fields_container()
check_fc(fc,fc2)
op = results.elastic_strain()
op.inputs.time_scoping(time_scoping)
op2 = results2.elastic_strain()
op2.inputs.time_scoping(time_scoping)
fc = op.outputs.fields_container()
fc2 = op2.outputs.fields_container()
check_fc(fc,fc2)
op = results.elemental_volume()
op.inputs.time_scoping(time_scoping)
op2 = results2.elemental_volume()
op2.inputs.time_scoping(time_scoping)
fc = op.outputs.fields_container()
fc2 = op2.outputs.fields_container()
check_fc(fc,fc2)
| [
"ansys.dpf.core.Model",
"numpy.allclose",
"ansys.dpf.core.examples.download_all_kinds_of_complexity",
"ansys.dpf.core.start_local_server",
"pytest.fixture",
"ansys.dpf.core.upload_file_in_tmp_folder",
"ansys.dpf.core.operators.logic.identical_fields"
] | [((106, 122), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (120, 122), False, 'import pytest\n'), ((474, 490), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (488, 490), False, 'import pytest\n'), ((701, 717), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (715, 717), False, 'import pytest\n'), ((939, 955), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (953, 955), False, 'import pytest\n'), ((1169, 1185), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1183, 1185), False, 'import pytest\n'), ((540, 611), 'ansys.dpf.core.upload_file_in_tmp_folder', 'dpf.upload_file_in_tmp_folder', (['examples.static_rst'], {'server': 'local_server'}), '(examples.static_rst, server=local_server)\n', (569, 611), True, 'from ansys.dpf import core as dpf\n'), ((770, 845), 'ansys.dpf.core.upload_file_in_tmp_folder', 'dpf.upload_file_in_tmp_folder', (['examples.msup_transient'], {'server': 'local_server'}), '(examples.msup_transient, server=local_server)\n', (799, 845), True, 'from ansys.dpf import core as dpf\n'), ((1002, 1076), 'ansys.dpf.core.upload_file_in_tmp_folder', 'dpf.upload_file_in_tmp_folder', (['examples.simple_cyclic'], {'server': 'local_server'}), '(examples.simple_cyclic, server=local_server)\n', (1031, 1076), True, 'from ansys.dpf import core as dpf\n'), ((2142, 2206), 'numpy.allclose', 'np.allclose', (['tf.time_frequencies.data', 'tf2.time_frequencies.data'], {}), '(tf.time_frequencies.data, tf2.time_frequencies.data)\n', (2153, 2206), True, 'import numpy as np\n'), ((2217, 2295), 'numpy.allclose', 'np.allclose', (['tf.time_frequencies.scoping.ids', 'tf2.time_frequencies.scoping.ids'], {}), '(tf.time_frequencies.scoping.ids, tf2.time_frequencies.scoping.ids)\n', (2228, 2295), True, 'import numpy as np\n'), ((3435, 3491), 'numpy.allclose', 'np.allclose', (['elements.scoping.ids', 'elements2.scoping.ids'], {}), '(elements.scoping.ids, elements2.scoping.ids)\n', (3446, 3491), True, 'import numpy as np\n'), ((3503, 3590), 'numpy.allclose', 'np.allclose', (['elements.element_types_field.data', 'elements2.element_types_field.data'], {}), '(elements.element_types_field.data, elements2.\n element_types_field.data)\n', (3514, 3590), True, 'import numpy as np\n'), ((3597, 3686), 'numpy.allclose', 'np.allclose', (['elements.connectivities_field.data', 'elements2.connectivities_field.data'], {}), '(elements.connectivities_field.data, elements2.\n connectivities_field.data)\n', (3608, 3686), True, 'import numpy as np\n'), ((3693, 3767), 'numpy.allclose', 'np.allclose', (['elements.materials_field.data', 'elements2.materials_field.data'], {}), '(elements.materials_field.data, elements2.materials_field.data)\n', (3704, 3767), True, 'import numpy as np\n'), ((4169, 4219), 'numpy.allclose', 'np.allclose', (['nodes.scoping.ids', 'nodes2.scoping.ids'], {}), '(nodes.scoping.ids, nodes2.scoping.ids)\n', (4180, 4219), True, 'import numpy as np\n'), ((360, 399), 'ansys.dpf.core.start_local_server', 'dpf.start_local_server', ([], {'as_global': '(False)'}), '(as_global=False)\n', (382, 399), True, 'from ansys.dpf import core as dpf\n'), ((624, 654), 'ansys.dpf.core.Model', 'dpf.Model', (['examples.static_rst'], {}), '(examples.static_rst)\n', (633, 654), True, 'from ansys.dpf import core as dpf\n'), ((656, 697), 'ansys.dpf.core.Model', 'dpf.Model', (['otherfile'], {'server': 'local_server'}), '(otherfile, server=local_server)\n', (665, 697), True, 'from ansys.dpf import core as dpf\n'), ((858, 892), 'ansys.dpf.core.Model', 'dpf.Model', (['examples.msup_transient'], {}), '(examples.msup_transient)\n', (867, 892), True, 'from ansys.dpf import core as dpf\n'), ((894, 935), 'ansys.dpf.core.Model', 'dpf.Model', (['otherfile'], {'server': 'local_server'}), '(otherfile, server=local_server)\n', (903, 935), True, 'from ansys.dpf import core as dpf\n'), ((1089, 1122), 'ansys.dpf.core.Model', 'dpf.Model', (['examples.simple_cyclic'], {}), '(examples.simple_cyclic)\n', (1098, 1122), True, 'from ansys.dpf import core as dpf\n'), ((1124, 1165), 'ansys.dpf.core.Model', 'dpf.Model', (['otherfile'], {'server': 'local_server'}), '(otherfile, server=local_server)\n', (1133, 1165), True, 'from ansys.dpf import core as dpf\n'), ((1281, 1324), 'ansys.dpf.core.examples.download_all_kinds_of_complexity', 'examples.download_all_kinds_of_complexity', ([], {}), '()\n', (1322, 1324), False, 'from ansys.dpf.core import examples\n'), ((1415, 1456), 'ansys.dpf.core.Model', 'dpf.Model', (['otherfile'], {'server': 'local_server'}), '(otherfile, server=local_server)\n', (1424, 1456), True, 'from ansys.dpf import core as dpf\n'), ((7294, 7343), 'ansys.dpf.core.operators.logic.identical_fields', 'dpf.operators.logic.identical_fields', (['f', 'ftocheck'], {}), '(f, ftocheck)\n', (7330, 7343), True, 'from ansys.dpf import core as dpf\n'), ((7396, 7428), 'numpy.allclose', 'np.allclose', (['f.data', 'fc2[i].data'], {}), '(f.data, fc2[i].data)\n', (7407, 7428), True, 'import numpy as np\n'), ((7444, 7490), 'numpy.allclose', 'np.allclose', (['f.scoping.ids', 'fc2[i].scoping.ids'], {}), '(f.scoping.ids, fc2[i].scoping.ids)\n', (7455, 7490), True, 'import numpy as np\n'), ((7506, 7540), 'numpy.allclose', 'np.allclose', (['f.data', 'ftocheck.data'], {}), '(f.data, ftocheck.data)\n', (7517, 7540), True, 'import numpy as np\n'), ((7556, 7604), 'numpy.allclose', 'np.allclose', (['f.scoping.ids', 'ftocheck.scoping.ids'], {}), '(f.scoping.ids, ftocheck.scoping.ids)\n', (7567, 7604), True, 'import numpy as np\n'), ((7784, 7833), 'ansys.dpf.core.operators.logic.identical_fields', 'dpf.operators.logic.identical_fields', (['f', 'ftocheck'], {}), '(f, ftocheck)\n', (7820, 7833), True, 'from ansys.dpf import core as dpf\n'), ((7886, 7918), 'numpy.allclose', 'np.allclose', (['f.data', 'fc2[i].data'], {}), '(f.data, fc2[i].data)\n', (7897, 7918), True, 'import numpy as np\n'), ((7934, 7980), 'numpy.allclose', 'np.allclose', (['f.scoping.ids', 'fc2[i].scoping.ids'], {}), '(f.scoping.ids, fc2[i].scoping.ids)\n', (7945, 7980), True, 'import numpy as np\n'), ((7996, 8030), 'numpy.allclose', 'np.allclose', (['f.data', 'ftocheck.data'], {}), '(f.data, ftocheck.data)\n', (8007, 8030), True, 'import numpy as np\n'), ((8046, 8094), 'numpy.allclose', 'np.allclose', (['f.scoping.ids', 'ftocheck.scoping.ids'], {}), '(f.scoping.ids, ftocheck.scoping.ids)\n', (8057, 8094), True, 'import numpy as np\n'), ((429, 468), 'ansys.dpf.core.start_local_server', 'dpf.start_local_server', ([], {'as_global': '(False)'}), '(as_global=False)\n', (451, 468), True, 'from ansys.dpf import core as dpf\n'), ((1369, 1412), 'ansys.dpf.core.examples.download_all_kinds_of_complexity', 'examples.download_all_kinds_of_complexity', ([], {}), '()\n', (1410, 1412), False, 'from ansys.dpf.core import examples\n')] |
"""
Created on Thu Jan 26 17:04:11 2017
@author: <NAME>, <EMAIL>
Fit unet style nodule identifier on Luna databaset using 8-grid scheme
Physical resolution 2x2x2mm
Data aggregated, shuffled; wrap augmentation used (swrap)
"""
import numpy as np
from keras.models import load_model,Model
from keras.layers import MaxPooling3D
from keras.layers import Convolution3D
from keras.layers import Input, merge, UpSampling3D
from keras.optimizers import Adam
from keras import backend as K
#from keras.preprocessing.image import ImageDataGenerator # Keras original
from image_as_mod3d_2dmask import ImageDataGenerator # our modified version
K.set_image_dim_ordering('th')
smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
DICE_LOW_LIMIT = 0 ## was 0.001
def dice_coef_np(y_true, y_pred):
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
y_pred_f [y_pred_f < DICE_LOW_LIMIT] = 0.
y_pred_f [y_pred_f > 1- DICE_LOW_LIMIT] = 1.
intersection = np.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
def dice_coef_pos_np(y_true, y_pred, pos = 0):
y_true_f = y_true[:,pos].flatten()
y_pred_f = y_pred[:,pos].flatten()
intersection = np.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def unet_model_xd3_2_6l_grid(nb_filter=48, dim=5, clen=3 , img_rows=224, img_cols=224 ): # NOTE that this procedure is/should be used with img_rows & img_cols as None
# aiming for architecture similar to the http://cs231n.stanford.edu/reports2016/317_Report.pdf
# Our model is six layers deep, consisting of a series of three CONV-RELU-POOL layyers (with 32, 32, and 64 3x3 filters), a CONV-RELU layer (with 128 3x3 filters), three UPSCALE-CONV-RELU lay- ers (with 64, 32, and 32 3x3 filters), and a final 1x1 CONV- SIGMOID layer to output pixel-level predictions. Its struc- ture resembles Figure 2, though with the number of pixels, filters, and levels as described here
## 3D CNN version of a previously developed unet_model_xd_6j
zconv = clen
inputs = Input((1, dim, img_rows, img_cols))
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(inputs)
conv1 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool1)
conv2 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(pool2)
conv4 = Convolution3D(4*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv4)
up6 = merge([UpSampling3D(size=(2, 2, 2))(conv4), conv2], mode='concat', concat_axis=1)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up6)
conv6 = Convolution3D(2*nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling3D(size=(2, 2, 2))(conv6), conv1], mode='concat', concat_axis=1) # original - only works for even dim
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(up7)
conv7 = Convolution3D(nb_filter, zconv, clen, clen, activation='relu', border_mode='same')(conv7)
pool11 = MaxPooling3D(pool_size=(2, 1, 1))(conv7)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool11)
conv12 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv12)
pool12 = MaxPooling3D(pool_size=(2, 1, 1))(conv12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool12)
conv13 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv13)
pool13 = MaxPooling3D(pool_size=(2, 1, 1))(conv13)
if (dim < 16):
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool13)
else: # need one extra layer to get to 1D x 2D mask ...
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(pool13)
conv14 = Convolution3D(2*nb_filter, zconv, 1, 1, activation='relu', border_mode='same')(conv14)
pool14 = MaxPooling3D(pool_size=(2, 1, 1))(conv14)
conv8 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(pool14)
model = Model(input=inputs, output=conv8)
model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=[dice_coef])
#model.compile(optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), loss=dice_coef_loss, metrics=[dice_coef])
return model
def grid_data(source, grid=32, crop=16, expand=12):
height = source.shape[3] # should be 224 for our data, when used in the initial fix-size mode
width = source.shape[4]
gridheight = (height - 2 * crop) // grid # should be 6 for our data
gridwidth = (width - 2 * crop) // grid
cells = []
for j in range(gridheight):
for i in range (gridwidth):
cell = source[:,:,:, crop+j*grid-expand:crop+(j+1)*grid+expand, crop+i*grid-expand:crop+(i+1)*grid+expand]
cells.append(cell)
cells = np.vstack (cells)
return cells, gridwidth, gridheight
def data_from_grid (cells, gridwidth, gridheight, grid=32):
width = cells.shape[4]
crop = (width - grid ) // 2 ## for simplicity we are assuming the same crop (and grid) in x & y directions
if crop > 0: # do NOT crop with 0 as we get empty cells ...
cells = cells[:,:,:,crop:-crop,crop:-crop]
shape = cells.shape
new_shape_1_dim = shape[0]// (gridwidth * gridheight) # ws // 36 -- Improved on 20170306
new_shape = (gridwidth * gridheight, new_shape_1_dim, ) + tuple([x for x in shape][1:]) # was 36, Improved on 20170306
cells = np.reshape(cells, new_shape)
cells = np.moveaxis(cells, 0, -3)
shape = cells.shape
new_shape2 = tuple([x for x in shape[0:3]]) + (gridheight, gridwidth,) + tuple([x for x in shape[4:]])
cells = np.reshape(cells, new_shape2)
cells = cells.swapaxes(-2, -3)
shape = cells.shape
combine_shape =tuple([x for x in shape[0:3]]) + (shape[-4]*shape[-3], shape[-2]*shape[-1],)
cells = np.reshape(cells, combine_shape)
return cells
def data_from_grid_by_proximity (cells, gridwidth, gridheight, grid=32):
# disperse the sequential dats into layers and then use data_from_grid
shape = cells.shape
new_shape_1_dim = shape[0]// (gridwidth * gridheight) # ws // 36 -- Improved on 20170306
### NOTE tha we invert the order of shapes below to get the required proximity type ordering
new_shape = (new_shape_1_dim, gridwidth * gridheight, ) + tuple([x for x in shape][1:]) # was 36, Improved on 20170306
#new_shape = (gridwidth * gridheight, new_shape_1_dim, ) + tuple([x for x in shape][1:]) # was 36, Improved on 20170306
# swap ordering of axes
cells = np.reshape(cells, new_shape)
cells = cells.swapaxes(0, 1)
cells = np.reshape(cells, shape)
cells = data_from_grid (cells, gridwidth, gridheight, grid)
return cells
def load_aggregate_masks_scans (masks_mnames, grids, upgrid_multis):
scans = []
masks = []
igrid = 0
for masks_names in masks_mnames:
if (len(masks_names) > 0):
grid = grids[igrid]
upgrid_multi = upgrid_multis[igrid]
upgcount = upgrid_multi * upgrid_multi
scans1 = []
masks1 = []
for masks_name in masks_names:
print ("Loading: ", masks_name)
masks0 = np.load(''.join((masks_name, ".npz")))['arr_0']
scans0 = np.load(''.join((masks_name.replace("masks_", "scans_", 1), ".npz")))['arr_0']
masks1.append(masks0)
scans1.append(scans0)
scans1 = np.vstack(scans1)
masks1 = np.vstack(masks1)
if len(masks) > 0:
scans1 = np.vstack([scans1, scans])
masks1 = np.vstack([masks1, masks])
lm = len(masks1) // upgcount * upgcount
scans1 = scans1[0:lm] # cut to multiples of upgcount
masks1 = masks1[0:lm]
index_shuf = np.arange(lm)
np.random.shuffle(index_shuf)
scans1 = scans1[index_shuf]
masks1 = masks1[index_shuf]
scans = data_from_grid_by_proximity(scans1, upgrid_multi, upgrid_multi, grid=grid)
masks = data_from_grid_by_proximity(masks1, upgrid_multi, upgrid_multi, grid=grid)
igrid += 1
return masks, scans
def load_aggregate_masks_scans_downsample2 (masks_mnames, grids, upgrid_multis, down_base):
scans = []
masks = []
down_size = 50000
igrid = 0
for masks_names in masks_mnames:
if (len(masks_names) > 0):
grid = grids[igrid]
upgrid_multi = upgrid_multis[igrid]
upgcount = upgrid_multi * upgrid_multi
scans1 = []
masks1 = []
for masks_name in masks_names:
print ("Loading: ", masks_name)
masks0 = np.load(''.join((masks_name, ".npz")))['arr_0']
scans0 = np.load(''.join((masks_name.replace("masks_", "scans_", 1), ".npz")))['arr_0']
if igrid >= 0:
down = down_base * (4 ** igrid) # dynamic
if len(masks0) > down_size and down > 1:
print("Down-sampling masks0/scans0 by: ", masks_name, down)
lm = len(masks0)
index_shuf = np.arange(lm)
np.random.shuffle(index_shuf)
scans0 = scans0[index_shuf]
masks0 = masks0[index_shuf]
masks0 = masks0[0:len(masks0):down]
scans0 = scans0[0:len(scans0):down]
masks1.append(masks0)
scans1.append(scans0)
scans1 = np.vstack(scans1)
masks1 = np.vstack(masks1)
if len(masks) > 0:
scans1 = np.vstack([scans1, scans])
masks1 = np.vstack([masks1, masks])
lm = len(masks1) // upgcount * upgcount
scans1 = scans1[0:lm] # cut to multiples of upgcount
masks1 = masks1[0:lm]
index_shuf = np.arange(lm)
np.random.shuffle(index_shuf)
scans1 = scans1[index_shuf]
masks1 = masks1[index_shuf]
scans = data_from_grid_by_proximity(scans1, upgrid_multi, upgrid_multi, grid=grid)
masks = data_from_grid_by_proximity(masks1, upgrid_multi, upgrid_multi, grid=grid)
igrid += 1
return masks, scans
if __name__ == '__main__':
# Key initial parameters
dim = 8
start_from_scratch = True
load_initial_weights = False
if start_from_scratch and load_initial_weights:
model_weights_name_to_start_from = "../luna/models/d8_2x2x2_best_weights.h5" # only used when start_from_scratch is True and load_initial_weights is True
### KEY running parameteres
nb_epoch = 25
model_load_name = 'UNUSED../luna/models/d8g4a_model_25.h5'
model_save_name = '../luna/models/d8g4a_model_25.h5' ### MUST include "_model" string as we use this for a substituion for weights file
seed = 1000 # should be varied by steps/stages
downsample = 20
set_lr_value = False
new_lr_value = 1e-5 # only used when set_lr_value is True
use_large_validation = True
grids = [20, 40]
upgrid_multis = [2, 2] # we modify only the last one if/as needed
batch_size = 7 * int((8 // upgrid_multis[1])**2) # calculated for a 12GB graphics card (such as Tesla K80/AWS P2 system)
masks_mnames = [
[
#"../luna/models/masks_d8g1x20ba4a_2x2x2_nodules_0_3_6860",
"../luna/models/masks_d8g1x20ba4a_2x2x2_nodules_4_8_8178",
#"../luna/models/masks_d8g1x20ba4a_2x2x2_blanks_0_3_68442",
"../luna/models/masks_d8g1x20ba4a_2x2x2_blanks_4_8_97406"
],
[
"../luna/models/masks_d8g1x40ba4a_2x2x2_nodules_0_3_5940",
#"../luna/models/masks_d8g1x40ba4a_2x2x2_nodules_4_8_6925",
#"../luna/models/masks_d8g1x40ba4a_2x2x2_blanks_0_3_52367", ## unblock this one
#"../luna/models/masks_d8g1x40ba4a_2x2x2_blanks_4_8_74880"
]]
masks_val_mnames = [
[
"../luna/models/masks_d8g1x20ba4a_2x2x2_nodules_9_9_1442"
],
[
"../luna/models/masks_d8g1x40ba4a_2x2x2_nodules_9_9_1101"
]]
masks_val_large_mnames = [
[
"../luna/models/masks_d8g1x20ba4a_2x2x2_nodules_9_9_1442",
"../luna/models/masks_d8g1x20ba4a_2x2x2_blanks_9_9_19861"
],
[
"../luna/models/masks_d8g1x40ba4a_2x2x2_nodules_9_9_1101",
#"../luna/models/masks_d8g1x40ba4a_2x2x2_blanks_9_9_15122"
]]
np.random.seed(seed)
masks, scans = load_aggregate_masks_scans_downsample2 (masks_mnames, grids, upgrid_multis, downsample)
print ("Masks and Scans shapes: ", masks.shape, scans.shape)
masks[masks < 0] = 0 # just in case (eliminate the blanks's marking)
if masks.shape[2] > 1:
masks = masks[:,:,masks.shape[2] // 2] ## select the central value as this one contains still all data
masks = masks[:, np.newaxis]
print ("Masks shape after 2D mapping: ", masks.shape)
masks_val, scans_val = load_aggregate_masks_scans (masks_val_mnames, grids, upgrid_multis)
print ("Val Masks and Scans shapes: ", masks_val.shape, scans_val.shape)
masks_val[masks_val < 0] = 0
if masks_val.shape[2] > 1:
masks_val = masks_val[:,:,masks_val.shape[2] // 2] ## select the central value as this one contains still all data
masks_val = masks_val[:, np.newaxis]
print ("Masks_val shape after 2D mapping: ", masks_val.shape)
masks_val_large, scans_val_large = load_aggregate_masks_scans (masks_val_large_mnames, grids, upgrid_multis)
print ("Large Val Masks and Scans shapes: ", masks_val_large.shape, scans_val_large.shape)
masks_val_large[masks_val_large < 0] = 0
if masks_val_large.shape[2] > 1:
masks_val_large = masks_val_large[:,:,masks_val_large.shape[2] // 2] ## select the central value as this one contains still all data
masks_val_large = masks_val_large[:, np.newaxis]
print ("Large Val Masks shape after 2D mapping: ", masks_val_large.shape)
if start_from_scratch:
model = unet_model_xd3_2_6l_grid(nb_filter=20, dim=dim, clen=3, img_rows=None , img_cols=None )
print(model.summary())
if load_initial_weights:
model_weights_name = model_weights_name_to_start_from ### could potentially load best weights
model.load_weights(model_weights_name)
print("Weights and output models: ", model_weights_name, model_save_name)
else:
print("Start from scratch (no weights),output models: ", model_save_name)
else:
## load_previous_model
print ("Loading model: ", model_load_name)
model = load_model(model_load_name, #3
custom_objects={'dice_coef_loss': dice_coef_loss,
'dice_coef': dice_coef
}
)
#print(model.summary())
print("Load and output models: ", model_load_name, model_save_name)
## set the data ...
masks = masks.astype(np.int16)
final_couple_of_iterations = False
if final_couple_of_iterations:
masks = np.concatenate((masks, masks_val))
scans = np.concatenate((scans, scans_val))
data_gen_args = dict(featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0,
width_shift_range=0.055/2,
height_shift_range=0.055/2,
horizontal_flip=True,
vertical_flip=False,
fill_mode= "wrap",
zoom_range=0
)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
shuffle = True # default
image_datagen.fit(scans, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow(scans,
batch_size = batch_size,
#shuffle = shuffle,
seed=seed)
mask_generator = mask_datagen.flow(masks,
batch_size = batch_size,
#shuffle = shuffle,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
if set_lr_value:
print("Model learning rate (old): ", model.optimizer.lr.get_value()) # was 1e-4
model.optimizer.lr.set_value(new_lr_value)
print("Model learning rate(new): ", model.optimizer.lr.get_value())
samples_per_epoch = masks.shape[0]
model.fit_generator(
train_generator,
samples_per_epoch= samples_per_epoch,
nb_epoch = nb_epoch,
validation_data = ( scans_val, masks_val),
verbose=1)
model.save(model_save_name)
model.save_weights(model_save_name.replace("_model", "_weights", 1))
masks_pred = model.predict(scans_val, verbose=1)
dice_check = dice_coef_np(masks_val, masks_pred)
print ("dice_check: ", dice_check)
if use_large_validation:
masks_pred_large = model.predict(scans_val_large, batch_size =1, verbose=1)
dice_check = dice_coef_np(masks_val_large, masks_pred_large)
print ("Full dice_check: ", dice_check)
print("Model learning rate: ", model.optimizer.lr.get_value())
| [
"keras.backend.sum",
"keras.backend.flatten",
"numpy.moveaxis",
"keras.layers.UpSampling3D",
"image_as_mod3d_2dmask.ImageDataGenerator",
"numpy.arange",
"numpy.reshape",
"keras.models.Model",
"numpy.vstack",
"numpy.random.seed",
"numpy.concatenate",
"keras.optimizers.Adam",
"keras.layers.Con... | [((645, 675), 'keras.backend.set_image_dim_ordering', 'K.set_image_dim_ordering', (['"""th"""'], {}), "('th')\n", (669, 675), True, 'from keras import backend as K\n'), ((738, 755), 'keras.backend.flatten', 'K.flatten', (['y_true'], {}), '(y_true)\n', (747, 755), True, 'from keras import backend as K\n'), ((771, 788), 'keras.backend.flatten', 'K.flatten', (['y_pred'], {}), '(y_pred)\n', (780, 788), True, 'from keras import backend as K\n'), ((809, 835), 'keras.backend.sum', 'K.sum', (['(y_true_f * y_pred_f)'], {}), '(y_true_f * y_pred_f)\n', (814, 835), True, 'from keras import backend as K\n'), ((1168, 1195), 'numpy.sum', 'np.sum', (['(y_true_f * y_pred_f)'], {}), '(y_true_f * y_pred_f)\n', (1174, 1195), True, 'import numpy as np\n'), ((1434, 1461), 'numpy.sum', 'np.sum', (['(y_true_f * y_pred_f)'], {}), '(y_true_f * y_pred_f)\n', (1440, 1461), True, 'import numpy as np\n'), ((2425, 2460), 'keras.layers.Input', 'Input', (['(1, dim, img_rows, img_cols)'], {}), '((1, dim, img_rows, img_cols))\n', (2430, 2460), False, 'from keras.layers import Input, merge, UpSampling3D\n'), ((4932, 4965), 'keras.models.Model', 'Model', ([], {'input': 'inputs', 'output': 'conv8'}), '(input=inputs, output=conv8)\n', (4937, 4965), False, 'from keras.models import load_model, Model\n'), ((5762, 5778), 'numpy.vstack', 'np.vstack', (['cells'], {}), '(cells)\n', (5771, 5778), True, 'import numpy as np\n'), ((6418, 6446), 'numpy.reshape', 'np.reshape', (['cells', 'new_shape'], {}), '(cells, new_shape)\n', (6428, 6446), True, 'import numpy as np\n'), ((6461, 6486), 'numpy.moveaxis', 'np.moveaxis', (['cells', '(0)', '(-3)'], {}), '(cells, 0, -3)\n', (6472, 6486), True, 'import numpy as np\n'), ((6635, 6664), 'numpy.reshape', 'np.reshape', (['cells', 'new_shape2'], {}), '(cells, new_shape2)\n', (6645, 6664), True, 'import numpy as np\n'), ((6832, 6864), 'numpy.reshape', 'np.reshape', (['cells', 'combine_shape'], {}), '(cells, combine_shape)\n', (6842, 6864), True, 'import numpy as np\n'), ((7577, 7605), 'numpy.reshape', 'np.reshape', (['cells', 'new_shape'], {}), '(cells, new_shape)\n', (7587, 7605), True, 'import numpy as np\n'), ((7652, 7676), 'numpy.reshape', 'np.reshape', (['cells', 'shape'], {}), '(cells, shape)\n', (7662, 7676), True, 'import numpy as np\n'), ((13935, 13955), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (13949, 13955), True, 'import numpy as np\n'), ((17286, 17321), 'image_as_mod3d_2dmask.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**data_gen_args)\n', (17304, 17321), False, 'from image_as_mod3d_2dmask import ImageDataGenerator\n'), ((17341, 17376), 'image_as_mod3d_2dmask.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**data_gen_args)\n', (17359, 17376), False, 'from image_as_mod3d_2dmask import ImageDataGenerator\n'), ((2473, 2560), 'keras.layers.Convolution3D', 'Convolution3D', (['nb_filter', 'zconv', 'clen', 'clen'], {'activation': '"""relu"""', 'border_mode': '"""same"""'}), "(nb_filter, zconv, clen, clen, activation='relu', border_mode=\n 'same')\n", (2486, 2560), False, 'from keras.layers import Convolution3D\n'), ((2576, 2663), 'keras.layers.Convolution3D', 'Convolution3D', (['nb_filter', 'zconv', 'clen', 'clen'], {'activation': '"""relu"""', 'border_mode': '"""same"""'}), "(nb_filter, zconv, clen, clen, activation='relu', border_mode=\n 'same')\n", (2589, 2663), False, 'from keras.layers import Convolution3D\n'), ((2678, 2711), 'keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (2690, 2711), False, 'from keras.layers import MaxPooling3D\n'), ((2732, 2822), 'keras.layers.Convolution3D', 'Convolution3D', (['(2 * nb_filter)', 'zconv', 'clen', 'clen'], {'activation': '"""relu"""', 'border_mode': '"""same"""'}), "(2 * nb_filter, zconv, clen, clen, activation='relu',\n border_mode='same')\n", (2745, 2822), False, 'from keras.layers import Convolution3D\n'), ((2836, 2926), 'keras.layers.Convolution3D', 'Convolution3D', (['(2 * nb_filter)', 'zconv', 'clen', 'clen'], {'activation': '"""relu"""', 'border_mode': '"""same"""'}), "(2 * nb_filter, zconv, clen, clen, activation='relu',\n border_mode='same')\n", (2849, 2926), False, 'from keras.layers import Convolution3D\n'), ((2940, 2973), 'keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (2952, 2973), False, 'from keras.layers import MaxPooling3D\n'), ((2995, 3085), 'keras.layers.Convolution3D', 'Convolution3D', (['(4 * nb_filter)', 'zconv', 'clen', 'clen'], {'activation': '"""relu"""', 'border_mode': '"""same"""'}), "(4 * nb_filter, zconv, clen, clen, activation='relu',\n border_mode='same')\n", (3008, 3085), False, 'from keras.layers import Convolution3D\n'), ((3099, 3189), 'keras.layers.Convolution3D', 'Convolution3D', (['(4 * nb_filter)', 'zconv', 'clen', 'clen'], {'activation': '"""relu"""', 'border_mode': '"""same"""'}), "(4 * nb_filter, zconv, clen, clen, activation='relu',\n border_mode='same')\n", (3112, 3189), False, 'from keras.layers import Convolution3D\n'), ((3297, 3387), 'keras.layers.Convolution3D', 'Convolution3D', (['(2 * nb_filter)', 'zconv', 'clen', 'clen'], {'activation': '"""relu"""', 'border_mode': '"""same"""'}), "(2 * nb_filter, zconv, clen, clen, activation='relu',\n border_mode='same')\n", (3310, 3387), False, 'from keras.layers import Convolution3D\n'), ((3399, 3489), 'keras.layers.Convolution3D', 'Convolution3D', (['(2 * nb_filter)', 'zconv', 'clen', 'clen'], {'activation': '"""relu"""', 'border_mode': '"""same"""'}), "(2 * nb_filter, zconv, clen, clen, activation='relu',\n border_mode='same')\n", (3412, 3489), False, 'from keras.layers import Convolution3D\n'), ((3644, 3731), 'keras.layers.Convolution3D', 'Convolution3D', (['nb_filter', 'zconv', 'clen', 'clen'], {'activation': '"""relu"""', 'border_mode': '"""same"""'}), "(nb_filter, zconv, clen, clen, activation='relu', border_mode=\n 'same')\n", (3657, 3731), False, 'from keras.layers import Convolution3D\n'), ((3744, 3831), 'keras.layers.Convolution3D', 'Convolution3D', (['nb_filter', 'zconv', 'clen', 'clen'], {'activation': '"""relu"""', 'border_mode': '"""same"""'}), "(nb_filter, zconv, clen, clen, activation='relu', border_mode=\n 'same')\n", (3757, 3831), False, 'from keras.layers import Convolution3D\n'), ((3849, 3882), 'keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 1, 1)'}), '(pool_size=(2, 1, 1))\n', (3861, 3882), False, 'from keras.layers import MaxPooling3D\n'), ((3904, 3989), 'keras.layers.Convolution3D', 'Convolution3D', (['(2 * nb_filter)', 'zconv', '(1)', '(1)'], {'activation': '"""relu"""', 'border_mode': '"""same"""'}), "(2 * nb_filter, zconv, 1, 1, activation='relu', border_mode='same'\n )\n", (3917, 3989), False, 'from keras.layers import Convolution3D\n'), ((4004, 4089), 'keras.layers.Convolution3D', 'Convolution3D', (['(2 * nb_filter)', 'zconv', '(1)', '(1)'], {'activation': '"""relu"""', 'border_mode': '"""same"""'}), "(2 * nb_filter, zconv, 1, 1, activation='relu', border_mode='same'\n )\n", (4017, 4089), False, 'from keras.layers import Convolution3D\n'), ((4104, 4137), 'keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 1, 1)'}), '(pool_size=(2, 1, 1))\n', (4116, 4137), False, 'from keras.layers import MaxPooling3D\n'), ((4160, 4245), 'keras.layers.Convolution3D', 'Convolution3D', (['(2 * nb_filter)', 'zconv', '(1)', '(1)'], {'activation': '"""relu"""', 'border_mode': '"""same"""'}), "(2 * nb_filter, zconv, 1, 1, activation='relu', border_mode='same'\n )\n", (4173, 4245), False, 'from keras.layers import Convolution3D\n'), ((4260, 4345), 'keras.layers.Convolution3D', 'Convolution3D', (['(2 * nb_filter)', 'zconv', '(1)', '(1)'], {'activation': '"""relu"""', 'border_mode': '"""same"""'}), "(2 * nb_filter, zconv, 1, 1, activation='relu', border_mode='same'\n )\n", (4273, 4345), False, 'from keras.layers import Convolution3D\n'), ((4360, 4393), 'keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 1, 1)'}), '(pool_size=(2, 1, 1))\n', (4372, 4393), False, 'from keras.layers import MaxPooling3D\n'), ((16198, 16304), 'keras.models.load_model', 'load_model', (['model_load_name'], {'custom_objects': "{'dice_coef_loss': dice_coef_loss, 'dice_coef': dice_coef}"}), "(model_load_name, custom_objects={'dice_coef_loss':\n dice_coef_loss, 'dice_coef': dice_coef})\n", (16208, 16304), False, 'from keras.models import load_model, Model\n'), ((16738, 16772), 'numpy.concatenate', 'np.concatenate', (['(masks, masks_val)'], {}), '((masks, masks_val))\n', (16752, 16772), True, 'import numpy as np\n'), ((16789, 16823), 'numpy.concatenate', 'np.concatenate', (['(scans, scans_val)'], {}), '((scans, scans_val))\n', (16803, 16823), True, 'import numpy as np\n'), ((4438, 4485), 'keras.layers.Convolution3D', 'Convolution3D', (['(1)', '(1)', '(1)', '(1)'], {'activation': '"""sigmoid"""'}), "(1, 1, 1, 1, activation='sigmoid')\n", (4451, 4485), False, 'from keras.layers import Convolution3D\n'), ((4577, 4662), 'keras.layers.Convolution3D', 'Convolution3D', (['(2 * nb_filter)', 'zconv', '(1)', '(1)'], {'activation': '"""relu"""', 'border_mode': '"""same"""'}), "(2 * nb_filter, zconv, 1, 1, activation='relu', border_mode='same'\n )\n", (4590, 4662), False, 'from keras.layers import Convolution3D\n'), ((4685, 4770), 'keras.layers.Convolution3D', 'Convolution3D', (['(2 * nb_filter)', 'zconv', '(1)', '(1)'], {'activation': '"""relu"""', 'border_mode': '"""same"""'}), "(2 * nb_filter, zconv, 1, 1, activation='relu', border_mode='same'\n )\n", (4698, 4770), False, 'from keras.layers import Convolution3D\n'), ((4793, 4826), 'keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 1, 1)'}), '(pool_size=(2, 1, 1))\n', (4805, 4826), False, 'from keras.layers import MaxPooling3D\n'), ((4855, 4902), 'keras.layers.Convolution3D', 'Convolution3D', (['(1)', '(1)', '(1)', '(1)'], {'activation': '"""sigmoid"""'}), "(1, 1, 1, 1, activation='sigmoid')\n", (4868, 4902), False, 'from keras.layers import Convolution3D\n'), ((4996, 5011), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (5000, 5011), False, 'from keras.optimizers import Adam\n'), ((8541, 8558), 'numpy.vstack', 'np.vstack', (['scans1'], {}), '(scans1)\n', (8550, 8558), True, 'import numpy as np\n'), ((8580, 8597), 'numpy.vstack', 'np.vstack', (['masks1'], {}), '(masks1)\n', (8589, 8597), True, 'import numpy as np\n'), ((8924, 8937), 'numpy.arange', 'np.arange', (['lm'], {}), '(lm)\n', (8933, 8937), True, 'import numpy as np\n'), ((8950, 8979), 'numpy.random.shuffle', 'np.random.shuffle', (['index_shuf'], {}), '(index_shuf)\n', (8967, 8979), True, 'import numpy as np\n'), ((10742, 10759), 'numpy.vstack', 'np.vstack', (['scans1'], {}), '(scans1)\n', (10751, 10759), True, 'import numpy as np\n'), ((10781, 10798), 'numpy.vstack', 'np.vstack', (['masks1'], {}), '(masks1)\n', (10790, 10798), True, 'import numpy as np\n'), ((11111, 11124), 'numpy.arange', 'np.arange', (['lm'], {}), '(lm)\n', (11120, 11124), True, 'import numpy as np\n'), ((11137, 11166), 'numpy.random.shuffle', 'np.random.shuffle', (['index_shuf'], {}), '(index_shuf)\n', (11154, 11166), True, 'import numpy as np\n'), ((879, 894), 'keras.backend.sum', 'K.sum', (['y_true_f'], {}), '(y_true_f)\n', (884, 894), True, 'from keras import backend as K\n'), ((897, 912), 'keras.backend.sum', 'K.sum', (['y_pred_f'], {}), '(y_pred_f)\n', (902, 912), True, 'from keras import backend as K\n'), ((1239, 1255), 'numpy.sum', 'np.sum', (['y_true_f'], {}), '(y_true_f)\n', (1245, 1255), True, 'import numpy as np\n'), ((1258, 1274), 'numpy.sum', 'np.sum', (['y_pred_f'], {}), '(y_pred_f)\n', (1264, 1274), True, 'import numpy as np\n'), ((1505, 1521), 'numpy.sum', 'np.sum', (['y_true_f'], {}), '(y_true_f)\n', (1511, 1521), True, 'import numpy as np\n'), ((1524, 1540), 'numpy.sum', 'np.sum', (['y_pred_f'], {}), '(y_pred_f)\n', (1530, 1540), True, 'import numpy as np\n'), ((3210, 3238), 'keras.layers.UpSampling3D', 'UpSampling3D', ([], {'size': '(2, 2, 2)'}), '(size=(2, 2, 2))\n', (3222, 3238), False, 'from keras.layers import Input, merge, UpSampling3D\n'), ((3518, 3546), 'keras.layers.UpSampling3D', 'UpSampling3D', ([], {'size': '(2, 2, 2)'}), '(size=(2, 2, 2))\n', (3530, 3546), False, 'from keras.layers import Input, merge, UpSampling3D\n'), ((8654, 8680), 'numpy.vstack', 'np.vstack', (['[scans1, scans]'], {}), '([scans1, scans])\n', (8663, 8680), True, 'import numpy as np\n'), ((8706, 8732), 'numpy.vstack', 'np.vstack', (['[masks1, masks]'], {}), '([masks1, masks])\n', (8715, 8732), True, 'import numpy as np\n'), ((10855, 10881), 'numpy.vstack', 'np.vstack', (['[scans1, scans]'], {}), '([scans1, scans])\n', (10864, 10881), True, 'import numpy as np\n'), ((10907, 10933), 'numpy.vstack', 'np.vstack', (['[masks1, masks]'], {}), '([masks1, masks])\n', (10916, 10933), True, 'import numpy as np\n'), ((10351, 10364), 'numpy.arange', 'np.arange', (['lm'], {}), '(lm)\n', (10360, 10364), True, 'import numpy as np\n'), ((10389, 10418), 'numpy.random.shuffle', 'np.random.shuffle', (['index_shuf'], {}), '(index_shuf)\n', (10406, 10418), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ResNet-based Multi-Label Classification for Quantitative Precipitation Estimation.
- ref: https://www.kaggle.com/kumar1541/resnet-in-keras
- Read in QPESUMS data and precipitation data
- Initialize the CNN model
- Train and test
- Output the model
This version is based on TensorFlow 2.0
"""
import os, csv, logging, argparse, glob, h5py, pickle
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix as cfm
import tensorflow as tf
from tensorflow.keras.layers import Input, Dropout, Dense, Flatten, Activation
from tensorflow.keras.layers import Conv2D, BatchNormalization, MaxPooling2D, Add
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import regularizers
from tensorflow.keras import initializers
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, DataQualia Lab Co. Ltd."
__credits__ = ["<NAME>"]
__license__ = "Apache License 2.0"
__version__ = "0.3.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "development"
__date__ = '2019-10-27'
# Parameters
nLayer = 6 # 6 10-min dbz for an hour
nY = 275 # y-dimension of dbz data
nX = 162 # x-dimension of dbz data
batchSize = 128 # Batch size for training / testing
prec_bins=[0, 0.5, 10, 15, 30, 1000]
yseg_stat = [0.5, 8, 13, 29] # 40-year statistics of hourly precipitation of trace, 90%, 95%, and 99% percentile
yseg = [0.5, 10, 15, 30] # Actual segmentation for precipitation
CHANNEL_AXIS = 3
stride = 1
#-----------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------
# Load input/output data for model
def loadIOTab(srcx, srcy, dropna=False):
import pandas as pd
# Read raw input and output
#logging.info("Reading input X from: "+ srcx)
print("Reading input X from: "+ srcx)
xfiles = []
for root, dirs, files in os.walk(srcx):
for fn in files:
if fn.endswith('.npy'):
xfiles.append({'date':fn.replace('.npy',''), 'xuri':os.path.join(root, fn)})
xfiles = pd.DataFrame(xfiles)
print("... read input size: "+str(xfiles.shape))
#logging.info("Reading output Y from: "+ srcy)
print("Reading output Y from: "+ srcy)
yraw = pd.read_csv(srcy, encoding='utf-8')
yraw['date'] = yraw['date'].apply(str)
print("... read output size: "+str(yraw.shape))
# Create complete IO-data
print("Pairing X-Y and splitting training/testing data.")
iotab = pd.merge(yraw, xfiles, on='date', sort=True)
print("... data size after merging: "+str(iotab.shape))
# Dro NA if specified
if dropna:
print('Dropping records with NA')
iotab = iotab.dropna()
print("... data size after dropping-NAs: "+str(iotab.shape))
# Generate weited sampling
# Done
return(iotab)
def generate_equal_samples(iotab, prec_bins, ylab='y', shuffle=True):
'''Create equal sampling list:
repeat sample rare categories to mtach the frequency of the majority case.
'''
# Analysis the Precipitation
prec_hist = np.histogram(iotab[ylab], bins=prec_bins)
maxc = np.max(prec_hist[0]) # Count the most frequent category
nrep = np.round(maxc/prec_hist[0]).astype(int) # Multiples required to reach max-count
# Categorize precipitation by specified bins
#iotab['prec_cat'] = np.digitize(iotab[ylab], bins=prec_bins[1:-1])
logging.debug('Sample histogram before weighted sampling:')
logging.debug(iotab['prec_cat'].value_counts())
# Repeat sampling by p
for icat in range(0,len(prec_bins)-1):
repeat_n = nrep[icat]
tmp = iotab.loc[iotab['prec_cat']==icat,:]
print('Append data category: '+str(icat)+' for '+ str(repeat_n) +' times with size '+str(tmp.shape))
for j in range(int(repeat_n)-1):
iotab = iotab.append(tmp, ignore_index=True)
logging.debug('Sample histogram after weighted sampling:')
logging.debug(iotab['prec_cat'].value_counts().sort_index())
# Shuffle new dataset if specified
if shuffle:
iotab = iotab.sample(frac=1)#.reset_index(drop=True)
#
return(iotab)
def loadDBZ(flist):
''' Load a list a dbz files (in npy format) into one numpy array. '''
xdata = []
for f in flist:
tmp = np.load(f)
xdata.append(tmp)
x = np.array(xdata, dtype=np.float32)
return(x)
# Function to give report
def report_evaluation(y_true, y_pred, verbose=0):
import sklearn.metrics as metrics
# Calculate measures
results = {}
results['y_true_mean'] = y_true.mean()
results['y_true_var'] = y_true.var()
results['y_pred_mean'] = y_pred.mean()
results['y_pred_var'] = y_pred.var()
results['rmse'] = np.sqrt(metrics.mean_squared_error(y_true,y_pred))
if y_pred.var()<=10e-8:
results['corr'] = 0
else:
results['corr'] = np.corrcoef(y_true,y_pred)[0,1]
# Print results if verbose > 0
if verbose>0:
if verbose>1:
print('Mean of y_true: ' + str(results['y_true_mean']))
print('Variance of y_true: ' + str(results['y_true_var']))
print('Mean of y_pred: ' + str(results['y_pred_mean']))
print('Variance of y_pred: ' + str(results['y_pred_var']))
print('RMSE: ' + str(results['rmse']))
print('Corr: ' + str(results['corr']))
# Return results
return(results)
# Create cross validation splits
def create_CV_splits(iotable, k=5, ysegs=None, ylab='y', shuffle=False):
from sklearn.model_selection import StratifiedKFold, KFold
# Index of each fold
cvidx_train = []
cvidx_test = []
# If segmentation of y is not specified, use simple KFold
if ysegs is None:
kf = KFold(n_splits=k, random_state=None, shuffle=shuffle)
for idx_train, idx_test in kf.split(iotable['xuri']):
cvidx_train.append(idx_train)
cvidx_test.append(idx_test)
else:
kf = StratifiedKFold(n_splits=k, random_state=None, shuffle=shuffle)
for idx_train, idx_test in kf.split(iotable['xuri'], np.digitize(iotable[ylab], ysegs)):
cvidx_train.append(idx_train)
cvidx_test.append(idx_test)
return((cvidx_train, cvidx_test))
def to_onehot(y, nclasses=5):
''' Represent the given y vector into one-hot encoding of 5 classes (0,1,2,3,4). '''
L = len(y) # Length of vector y
yoh = np.zeros((L, nclasses), dtype=np.float32) # The one-hot encoding, initialized with 0
for i in range(L):
yoh[i, 0:y[i]] = 1 # Encode the corresponding class
yoh[i, y[i]] = 1 # Encode the corresponding class
return(yoh)
def onehot_to_category(y):
'''Convert one-hot encoding back to category'''
c = []
for r in y:
c.append(np.max(np.where(r==1)))
return(np.array(c))
def data_generator_mlc(iotab, batch_size, ylab='y'):
''' Data generator for batched processing. '''
nSample = len(iotab)
y = np.array(iotab[ylab])
# This line is just to make the generator infinite, keras needs that
while True:
batch_start = 0
batch_end = batch_size
while batch_start < nSample:
limit = min(batch_end, nSample)
X = loadDBZ(iotab['xuri'][batch_start:limit])
Y = to_onehot(y[batch_start:limit])
#print(X.shape)
yield (X,Y) #a tuple with two numpy arrays with batch_size samples
batch_start += batch_size
batch_end += batch_size
# End of generator
# CNN
def init_resnet_mlc(input_shape, num_filters=32, num_res_blocks=2):
"""
:Return:
Newly initialized model (multi-label classification) based on the ResNet architechture.
:param
int input_shape: The number of variables to use as input features.
"""
# Residual block:
def res_block(x, temp, filters=32, pooling=False, dropout=0.0):
temp = Conv2D(filters, (3,3), strides=stride, padding='same')(temp)
temp = BatchNormalization(axis=CHANNEL_AXIS)(temp)
temp = Activation('relu')(temp)
temp = Conv2D(filters, (3,3), strides=stride, padding="same")(temp)
x = Add()([temp, Conv2D(filters, (3,3), strides=stride, padding='same')(x)])
if pooling:
x = MaxPooling2D((2,2))(x)
if dropout != 0.0:
x = Dropout(dropout)(x)
temp = BatchNormalization(axis=CHANNEL_AXIS)(x)
temp = Activation('relu')(x)
return(x, temp)
# Input layer
inputs = Input(shape=input_shape)
# Initial block: CONV -> BN -> relu -> MaxPooling
x = Conv2D(16, (3, 3), strides=stride, name='conv1')(inputs)
x = BatchNormalization(axis=CHANNEL_AXIS, name='bn_conv1')(x)
x = Activation('relu')(x)
temp = x
# Residual sections:
#x,temp = res_block(x, temp, 32,dropout = 0.2)
#x,temp = res_block(x, temp, 32,dropout = 0.3)
x,temp = res_block(x, temp, 32,dropout = 0.4,pooling = True)
#x,temp = res_block(x, temp, 64,dropout = 0.2)
x,temp = res_block(x, temp, 64,dropout = 0.2,pooling = True)
x,temp = res_block(x, temp, 256,dropout = 0.4)
# Output section: Flatten -> Dense -> Dense -> Dense -> Dense -> softmax output
x = temp
x = Flatten()(x)
x = Dropout(0.4)(x)
#x = Dense(256, activation = "relu")(x)
#x = Dropout(0.23)(x)
#x = Dense(128, activation = "relu")(x)
#x = Dropout(0.3)(x)
x = Dense(64, activation = "relu")(x)
x = Dropout(0.2)(x)
x = Dense(32, activation = "relu")(x)
x = Dropout(0.2)(x)
out = Dense(5, activation='sigmoid', name='main_output')(x)
# Initialize model
model = Model(inputs = inputs, outputs = out, name='ResNet')
# Define compile parameters
adam = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
encoder = Model(inputs = inputs, outputs = x)
return((model, encoder))
#-----------------------------------------------------------------------
def main():
# Configure Argument Parser
parser = argparse.ArgumentParser(description='Retrieve DBZ data for further processing.')
parser.add_argument('--rawx', '-x', help='the directory containing preprocessed DBZ data.')
parser.add_argument('--rawy', '-y', help='the file containing the precipitation data.')
parser.add_argument('--output', '-o', help='the file to store training history.')
parser.add_argument('--samplesize', '-s', default=2, type=int, help='Weighted sampling size (multiple the original size).')
parser.add_argument('--batch_size', '-b', default=16, type=int, help='number of epochs.')
parser.add_argument('--epochs', '-e', default=1, type=int, help='number of epochs.')
parser.add_argument('--kfold', '-k', default=2, type=int, help='number of folds for cross validation.')
parser.add_argument('--logfile', '-l', default='reg.log', help='the log file.')
parser.add_argument('--random_seed', '-r', default=None, type=int, help='the random seed.')
args = parser.parse_args()
# Set up logging
if not args.logfile is None:
logging.basicConfig(level=logging.DEBUG, filename=args.logfile, filemode='w')
else:
logging.basicConfig(level=logging.DEBUG)
logging.debug(args)
#-------------------------------
# IO data generation
#-------------------------------
iotab = loadIOTab(args.rawx, args.rawy, dropna=True)
# Categorize y
iotab['prec_cat'] = np.digitize(iotab['t1hr'], bins=prec_bins[1:-1])
#-------------------------------
# Set random seed if specified
#-------------------------------
if not args.random_seed is None:
tf.random.set_seed(args.random_seed)
#-------------------------------
# Create Cross Validation splits
#-------------------------------
idx_trains, idx_tests = create_CV_splits(iotab, k=args.kfold, ysegs=prec_bins[1:-1], ylab='t1hr', shuffle=False)
#-------------------------------
# Run through CV
#-------------------------------
ys = []
hists = []
cv_report = []
for i in range(len(idx_trains)):
# Create weighted sampling for training data
iotrain = iotab.iloc[idx_trains[i],:]
iotrain = generate_equal_samples(iotrain, prec_bins=prec_bins, ylab='t1hr', shuffle=True)
# Initialize model
model = init_resnet_mlc((nY, nX, nLayer))
# Debug info
if i==0:
logging.debug(model[0].summary())
logging.info("Training data samples: "+str(iotrain.shape[0]))
steps_train = np.ceil(iotrain.shape[0]/args.batch_size)
logging.debug("Training data steps: " + str(steps_train))
logging.info("Testing data samples: "+ str(len(idx_tests[i])))
steps_test = np.ceil(len(idx_tests[i])/args.batch_size)
logging.debug("Testing data steps: " + str(steps_test))
# Fitting model
hist = model[0].fit_generator(data_generator_mlc(iotrain, args.batch_size, ylab='prec_cat'), steps_per_epoch=steps_train, epochs=args.epochs, max_queue_size=args.batch_size, verbose=0)
# Prediction
y_pred = model[0].predict_generator(data_generator_mlc(iotab.iloc[idx_tests[i],:], args.batch_size, ylab='prec_cat'), steps=steps_test, verbose=0)
# Prepare output
yt = np.array(iotab['prec_cat'])[idx_tests[i]]
yp = onehot_to_category((y_pred>0.5)*1)
ys.append(pd.DataFrame({'date': iotab.date.iloc[idx_tests[i]], 'y': yt, 'y_pred': yp,
'y0':y_pred[:,0], 'y1':y_pred[:,1], 'y2':y_pred[:,2], 'y3':y_pred[:,3], 'y4':y_pred[:,4]}))
hists.append(pd.DataFrame(hist.history))
cv_report.append(report_evaluation(yt, yp))
# Debug info
logging.debug('Confusion Matrix: ')
logging.debug(cfm(yt, yp))
# Output results
pd.concat(ys).to_csv(args.output+'_mlc_ys.csv', index=False)
pd.concat(hists).to_csv(args.output+'_mlc_hist.csv')
pd.DataFrame(cv_report).to_csv(args.output+'_mlc_report.csv')
# done
return(0)
#==========
# Script
#==========
if __name__=="__main__":
main()
| [
"logging.debug",
"pandas.read_csv",
"tensorflow.keras.layers.BatchNormalization",
"sklearn.model_selection.StratifiedKFold",
"numpy.array",
"tensorflow.keras.layers.Dense",
"sklearn.model_selection.KFold",
"os.walk",
"tensorflow.keras.layers.Input",
"numpy.histogram",
"tensorflow.keras.layers.Co... | [((2055, 2068), 'os.walk', 'os.walk', (['srcx'], {}), '(srcx)\n', (2062, 2068), False, 'import os, csv, logging, argparse, glob, h5py, pickle\n'), ((2241, 2261), 'pandas.DataFrame', 'pd.DataFrame', (['xfiles'], {}), '(xfiles)\n', (2253, 2261), True, 'import pandas as pd\n'), ((2420, 2455), 'pandas.read_csv', 'pd.read_csv', (['srcy'], {'encoding': '"""utf-8"""'}), "(srcy, encoding='utf-8')\n", (2431, 2455), True, 'import pandas as pd\n'), ((2655, 2699), 'pandas.merge', 'pd.merge', (['yraw', 'xfiles'], {'on': '"""date"""', 'sort': '(True)'}), "(yraw, xfiles, on='date', sort=True)\n", (2663, 2699), True, 'import pandas as pd\n'), ((3254, 3295), 'numpy.histogram', 'np.histogram', (['iotab[ylab]'], {'bins': 'prec_bins'}), '(iotab[ylab], bins=prec_bins)\n', (3266, 3295), True, 'import numpy as np\n'), ((3307, 3327), 'numpy.max', 'np.max', (['prec_hist[0]'], {}), '(prec_hist[0])\n', (3313, 3327), True, 'import numpy as np\n'), ((3600, 3659), 'logging.debug', 'logging.debug', (['"""Sample histogram before weighted sampling:"""'], {}), "('Sample histogram before weighted sampling:')\n", (3613, 3659), False, 'import os, csv, logging, argparse, glob, h5py, pickle\n'), ((4074, 4132), 'logging.debug', 'logging.debug', (['"""Sample histogram after weighted sampling:"""'], {}), "('Sample histogram after weighted sampling:')\n", (4087, 4132), False, 'import os, csv, logging, argparse, glob, h5py, pickle\n'), ((4527, 4560), 'numpy.array', 'np.array', (['xdata'], {'dtype': 'np.float32'}), '(xdata, dtype=np.float32)\n', (4535, 4560), True, 'import numpy as np\n'), ((6627, 6668), 'numpy.zeros', 'np.zeros', (['(L, nclasses)'], {'dtype': 'np.float32'}), '((L, nclasses), dtype=np.float32)\n', (6635, 6668), True, 'import numpy as np\n'), ((7092, 7103), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (7100, 7103), True, 'import numpy as np\n'), ((7243, 7264), 'numpy.array', 'np.array', (['iotab[ylab]'], {}), '(iotab[ylab])\n', (7251, 7264), True, 'import numpy as np\n'), ((8794, 8818), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (8799, 8818), False, 'from tensorflow.keras.layers import Input, Dropout, Dense, Flatten, Activation\n'), ((9918, 9966), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'out', 'name': '"""ResNet"""'}), "(inputs=inputs, outputs=out, name='ResNet')\n", (9923, 9966), False, 'from tensorflow.keras.models import Model, load_model\n'), ((10014, 10079), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.01)', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': '(1e-08)', 'decay': '(0.0)'}), '(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n', (10018, 10079), False, 'from tensorflow.keras.optimizers import Adam\n'), ((10178, 10209), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'x'}), '(inputs=inputs, outputs=x)\n', (10183, 10209), False, 'from tensorflow.keras.models import Model, load_model\n'), ((10374, 10459), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Retrieve DBZ data for further processing."""'}), "(description='Retrieve DBZ data for further processing.'\n )\n", (10397, 10459), False, 'import os, csv, logging, argparse, glob, h5py, pickle\n'), ((11562, 11581), 'logging.debug', 'logging.debug', (['args'], {}), '(args)\n', (11575, 11581), False, 'import os, csv, logging, argparse, glob, h5py, pickle\n'), ((11781, 11829), 'numpy.digitize', 'np.digitize', (["iotab['t1hr']"], {'bins': 'prec_bins[1:-1]'}), "(iotab['t1hr'], bins=prec_bins[1:-1])\n", (11792, 11829), True, 'import numpy as np\n'), ((4482, 4492), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (4489, 4492), True, 'import numpy as np\n'), ((4930, 4972), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (4956, 4972), True, 'import sklearn.metrics as metrics\n'), ((5918, 5971), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'k', 'random_state': 'None', 'shuffle': 'shuffle'}), '(n_splits=k, random_state=None, shuffle=shuffle)\n', (5923, 5971), False, 'from sklearn.model_selection import StratifiedKFold, KFold\n'), ((6139, 6202), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'k', 'random_state': 'None', 'shuffle': 'shuffle'}), '(n_splits=k, random_state=None, shuffle=shuffle)\n', (6154, 6202), False, 'from sklearn.model_selection import StratifiedKFold, KFold\n'), ((8881, 8929), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'strides': 'stride', 'name': '"""conv1"""'}), "(16, (3, 3), strides=stride, name='conv1')\n", (8887, 8929), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, MaxPooling2D, Add\n'), ((8946, 9000), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'CHANNEL_AXIS', 'name': '"""bn_conv1"""'}), "(axis=CHANNEL_AXIS, name='bn_conv1')\n", (8964, 9000), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, MaxPooling2D, Add\n'), ((9012, 9030), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9022, 9030), False, 'from tensorflow.keras.layers import Input, Dropout, Dense, Flatten, Activation\n'), ((9511, 9520), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (9518, 9520), False, 'from tensorflow.keras.layers import Input, Dropout, Dense, Flatten, Activation\n'), ((9532, 9544), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (9539, 9544), False, 'from tensorflow.keras.layers import Input, Dropout, Dense, Flatten, Activation\n'), ((9695, 9723), 'tensorflow.keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (9700, 9723), False, 'from tensorflow.keras.layers import Input, Dropout, Dense, Flatten, Activation\n'), ((9737, 9749), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (9744, 9749), False, 'from tensorflow.keras.layers import Input, Dropout, Dense, Flatten, Activation\n'), ((9761, 9789), 'tensorflow.keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (9766, 9789), False, 'from tensorflow.keras.layers import Input, Dropout, Dense, Flatten, Activation\n'), ((9803, 9815), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (9810, 9815), False, 'from tensorflow.keras.layers import Input, Dropout, Dense, Flatten, Activation\n'), ((9829, 9879), 'tensorflow.keras.layers.Dense', 'Dense', (['(5)'], {'activation': '"""sigmoid"""', 'name': '"""main_output"""'}), "(5, activation='sigmoid', name='main_output')\n", (9834, 9879), False, 'from tensorflow.keras.layers import Input, Dropout, Dense, Flatten, Activation\n'), ((11421, 11498), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'filename': 'args.logfile', 'filemode': '"""w"""'}), "(level=logging.DEBUG, filename=args.logfile, filemode='w')\n", (11440, 11498), False, 'import os, csv, logging, argparse, glob, h5py, pickle\n'), ((11517, 11557), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (11536, 11557), False, 'import os, csv, logging, argparse, glob, h5py, pickle\n'), ((11984, 12020), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['args.random_seed'], {}), '(args.random_seed)\n', (12002, 12020), True, 'import tensorflow as tf\n'), ((12877, 12920), 'numpy.ceil', 'np.ceil', (['(iotrain.shape[0] / args.batch_size)'], {}), '(iotrain.shape[0] / args.batch_size)\n', (12884, 12920), True, 'import numpy as np\n'), ((14035, 14070), 'logging.debug', 'logging.debug', (['"""Confusion Matrix: """'], {}), "('Confusion Matrix: ')\n", (14048, 14070), False, 'import os, csv, logging, argparse, glob, h5py, pickle\n'), ((3394, 3423), 'numpy.round', 'np.round', (['(maxc / prec_hist[0])'], {}), '(maxc / prec_hist[0])\n', (3402, 3423), True, 'import numpy as np\n'), ((5065, 5092), 'numpy.corrcoef', 'np.corrcoef', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (5076, 5092), True, 'import numpy as np\n'), ((6264, 6297), 'numpy.digitize', 'np.digitize', (['iotable[ylab]', 'ysegs'], {}), '(iotable[ylab], ysegs)\n', (6275, 6297), True, 'import numpy as np\n'), ((8202, 8257), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['filters', '(3, 3)'], {'strides': 'stride', 'padding': '"""same"""'}), "(filters, (3, 3), strides=stride, padding='same')\n", (8208, 8257), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, MaxPooling2D, Add\n'), ((8278, 8315), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'CHANNEL_AXIS'}), '(axis=CHANNEL_AXIS)\n', (8296, 8315), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, MaxPooling2D, Add\n'), ((8337, 8355), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (8347, 8355), False, 'from tensorflow.keras.layers import Input, Dropout, Dense, Flatten, Activation\n'), ((8377, 8432), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['filters', '(3, 3)'], {'strides': 'stride', 'padding': '"""same"""'}), "(filters, (3, 3), strides=stride, padding='same')\n", (8383, 8432), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, MaxPooling2D, Add\n'), ((8450, 8455), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (8453, 8455), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, MaxPooling2D, Add\n'), ((8660, 8697), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'CHANNEL_AXIS'}), '(axis=CHANNEL_AXIS)\n', (8678, 8697), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, MaxPooling2D, Add\n'), ((8716, 8734), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (8726, 8734), False, 'from tensorflow.keras.layers import Input, Dropout, Dense, Flatten, Activation\n'), ((13615, 13642), 'numpy.array', 'np.array', (["iotab['prec_cat']"], {}), "(iotab['prec_cat'])\n", (13623, 13642), True, 'import numpy as np\n'), ((13723, 13907), 'pandas.DataFrame', 'pd.DataFrame', (["{'date': iotab.date.iloc[idx_tests[i]], 'y': yt, 'y_pred': yp, 'y0': y_pred\n [:, 0], 'y1': y_pred[:, 1], 'y2': y_pred[:, 2], 'y3': y_pred[:, 3],\n 'y4': y_pred[:, 4]}"], {}), "({'date': iotab.date.iloc[idx_tests[i]], 'y': yt, 'y_pred': yp,\n 'y0': y_pred[:, 0], 'y1': y_pred[:, 1], 'y2': y_pred[:, 2], 'y3':\n y_pred[:, 3], 'y4': y_pred[:, 4]})\n", (13735, 13907), True, 'import pandas as pd\n'), ((13925, 13951), 'pandas.DataFrame', 'pd.DataFrame', (['hist.history'], {}), '(hist.history)\n', (13937, 13951), True, 'import pandas as pd\n'), ((14093, 14104), 'sklearn.metrics.confusion_matrix', 'cfm', (['yt', 'yp'], {}), '(yt, yp)\n', (14096, 14104), True, 'from sklearn.metrics import confusion_matrix as cfm\n'), ((14131, 14144), 'pandas.concat', 'pd.concat', (['ys'], {}), '(ys)\n', (14140, 14144), True, 'import pandas as pd\n'), ((14196, 14212), 'pandas.concat', 'pd.concat', (['hists'], {}), '(hists)\n', (14205, 14212), True, 'import pandas as pd\n'), ((14253, 14276), 'pandas.DataFrame', 'pd.DataFrame', (['cv_report'], {}), '(cv_report)\n', (14265, 14276), True, 'import pandas as pd\n'), ((7064, 7080), 'numpy.where', 'np.where', (['(r == 1)'], {}), '(r == 1)\n', (7072, 7080), True, 'import numpy as np\n'), ((8559, 8579), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (8571, 8579), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, MaxPooling2D, Add\n'), ((8625, 8641), 'tensorflow.keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (8632, 8641), False, 'from tensorflow.keras.layers import Input, Dropout, Dense, Flatten, Activation\n'), ((8463, 8518), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['filters', '(3, 3)'], {'strides': 'stride', 'padding': '"""same"""'}), "(filters, (3, 3), strides=stride, padding='same')\n", (8469, 8518), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, MaxPooling2D, Add\n'), ((2203, 2225), 'os.path.join', 'os.path.join', (['root', 'fn'], {}), '(root, fn)\n', (2215, 2225), False, 'import os, csv, logging, argparse, glob, h5py, pickle\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Trains and tests a loudness-threshold classifier for every given .pkl file.
For usage information, call without any parameters.
Author: <NAME>
"""
from __future__ import print_function
import sys
import os
from argparse import ArgumentParser
import numpy as np
import scipy.optimize
def opts_parser():
descr = ("Trains and tests a loudness-threshold classifier for every given "
".pkl file.")
parser = ArgumentParser(description=descr)
parser.add_argument('targets',
type=str,
help='a pickle file with ground truth targets')
parser.add_argument('inputs', nargs='+',
type=str,
help='a pickle file with loudnesses (summarized magnitudes)')
parser.add_argument('--plot',
action='store_true',
help='If given, saves a histogram plot next to each input file.')
parser.add_argument('--plot-data',
action='store_true',
help='If given along with --plot, saves the histogram values next '
'to each plot.')
parser.add_argument('--smooth-width', metavar='WIDTH',
type=int, default=0,
help='Optionally apply temporal median smoothing over WIDTH frames '
'(default: (default)s)')
return parser
def error(thr, inputs, targets):
return 1. - np.mean((inputs >= thr) == targets)
def main():
# parse command line
parser = opts_parser()
options = parser.parse_args()
targetfile = options.targets
inputfiles = options.inputs
# load targets
targetdata = np.load(targetfile)
targets = targetdata['labels']
splits = targetdata['splits']
targets_train = np.concatenate(targets[splits['train']])
# print baseline
for t in 0, 1:
print("baseline (all %d)" % t)
for part in 'train', 'valid', 'test':
print("%s err: %.4g" %
(part,
np.abs(t - np.mean(np.concatenate(targets[splits[part]])))))
# iterate over the inputs
for inputfile in inputfiles:
print(inputfile)
inp = np.load(inputfile)['sums']
if options.smooth_width:
from scipy.ndimage.filters import median_filter
inp = [median_filter(p, options.smooth_width, mode='nearest')
for p in inp]
inp_train = np.concatenate(inp[splits['train']])
thr = .5 * (inp_train.min() + inp_train.max())
thr = scipy.optimize.fmin(error, thr, args=(inp_train, targets_train),
disp=False)[0]
print("threshold: %f" % thr)
for part in 'train', 'valid', 'test':
print("%s err: %.4g" %
(part, error(thr, np.concatenate(inp[splits[part]]),
np.concatenate(targets[splits[part]]))))
if options.plot:
import matplotlib.pyplot as plt
if not 'log' in inputfile:
low = inp_train.max() / 10**(50. / 20) # cutoff at -50 dB FS
else:
k = int(len(inp_train) * .02) # cutoff at 2-percentile
low = np.partition(inp_train, k)[k]
np.maximum(low, inp_train, inp_train) # apply cutoff
if not 'log' in inputfile:
inp_train = 20 * np.log10(inp_train)
low = inp_train.min()
thr = 20 * np.log10(thr)
high = inp_train.max()
plt.figure()
posvals, bins, _ = plt.hist(inp_train[targets_train], 50,
range=(low, high), alpha=.8)
negvals, _, _ = plt.hist(inp_train[~targets_train], bins, alpha=.8)
plt.axvline(thr, linestyle=':', color='r')
plt.savefig(inputfile[:-3] + 'png')
if options.plot_data:
np.savez(inputfile[:-3] + 'hist.npz', bins=bins, thr=thr,
posvals=posvals, negvals=negvals)
if __name__=="__main__":
main()
| [
"numpy.mean",
"scipy.ndimage.filters.median_filter",
"numpy.savez",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"numpy.log10",
"numpy.partition",
"matplotlib.pyplot.figure",
"numpy.concatenate",
"numpy.maximum",
"numpy.load",
"matplotlib.pyplot.axvline"
... | [((481, 514), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': 'descr'}), '(description=descr)\n', (495, 514), False, 'from argparse import ArgumentParser\n'), ((1626, 1645), 'numpy.load', 'np.load', (['targetfile'], {}), '(targetfile)\n', (1633, 1645), True, 'import numpy as np\n'), ((1735, 1775), 'numpy.concatenate', 'np.concatenate', (["targets[splits['train']]"], {}), "(targets[splits['train']])\n", (1749, 1775), True, 'import numpy as np\n'), ((1388, 1423), 'numpy.mean', 'np.mean', (['((inputs >= thr) == targets)'], {}), '((inputs >= thr) == targets)\n', (1395, 1423), True, 'import numpy as np\n'), ((2396, 2432), 'numpy.concatenate', 'np.concatenate', (["inp[splits['train']]"], {}), "(inp[splits['train']])\n", (2410, 2432), True, 'import numpy as np\n'), ((2149, 2167), 'numpy.load', 'np.load', (['inputfile'], {}), '(inputfile)\n', (2156, 2167), True, 'import numpy as np\n'), ((3218, 3255), 'numpy.maximum', 'np.maximum', (['low', 'inp_train', 'inp_train'], {}), '(low, inp_train, inp_train)\n', (3228, 3255), True, 'import numpy as np\n'), ((3490, 3502), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3500, 3502), True, 'import matplotlib.pyplot as plt\n'), ((3534, 3602), 'matplotlib.pyplot.hist', 'plt.hist', (['inp_train[targets_train]', '(50)'], {'range': '(low, high)', 'alpha': '(0.8)'}), '(inp_train[targets_train], 50, range=(low, high), alpha=0.8)\n', (3542, 3602), True, 'import matplotlib.pyplot as plt\n'), ((3670, 3722), 'matplotlib.pyplot.hist', 'plt.hist', (['inp_train[~targets_train]', 'bins'], {'alpha': '(0.8)'}), '(inp_train[~targets_train], bins, alpha=0.8)\n', (3678, 3722), True, 'import matplotlib.pyplot as plt\n'), ((3734, 3776), 'matplotlib.pyplot.axvline', 'plt.axvline', (['thr'], {'linestyle': '""":"""', 'color': '"""r"""'}), "(thr, linestyle=':', color='r')\n", (3745, 3776), True, 'import matplotlib.pyplot as plt\n'), ((3789, 3824), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(inputfile[:-3] + 'png')"], {}), "(inputfile[:-3] + 'png')\n", (3800, 3824), True, 'import matplotlib.pyplot as plt\n'), ((2288, 2342), 'scipy.ndimage.filters.median_filter', 'median_filter', (['p', 'options.smooth_width'], {'mode': '"""nearest"""'}), "(p, options.smooth_width, mode='nearest')\n", (2301, 2342), False, 'from scipy.ndimage.filters import median_filter\n'), ((3875, 3970), 'numpy.savez', 'np.savez', (["(inputfile[:-3] + 'hist.npz')"], {'bins': 'bins', 'thr': 'thr', 'posvals': 'posvals', 'negvals': 'negvals'}), "(inputfile[:-3] + 'hist.npz', bins=bins, thr=thr, posvals=posvals,\n negvals=negvals)\n", (3883, 3970), True, 'import numpy as np\n'), ((3176, 3202), 'numpy.partition', 'np.partition', (['inp_train', 'k'], {}), '(inp_train, k)\n', (3188, 3202), True, 'import numpy as np\n'), ((3344, 3363), 'numpy.log10', 'np.log10', (['inp_train'], {}), '(inp_train)\n', (3352, 3363), True, 'import numpy as np\n'), ((3429, 3442), 'numpy.log10', 'np.log10', (['thr'], {}), '(thr)\n', (3437, 3442), True, 'import numpy as np\n'), ((2770, 2803), 'numpy.concatenate', 'np.concatenate', (['inp[splits[part]]'], {}), '(inp[splits[part]])\n', (2784, 2803), True, 'import numpy as np\n'), ((2836, 2873), 'numpy.concatenate', 'np.concatenate', (['targets[splits[part]]'], {}), '(targets[splits[part]])\n', (2850, 2873), True, 'import numpy as np\n'), ((2000, 2037), 'numpy.concatenate', 'np.concatenate', (['targets[splits[part]]'], {}), '(targets[splits[part]])\n', (2014, 2037), True, 'import numpy as np\n')] |
"""
Derived module from dmdbase.py for dmd with control.
Reference:
- <NAME>., <NAME>. and <NAME>., 2016. Dynamic mode decomposition
with control. SIAM Journal on Applied Dynamical Systems, 15(1), pp.142-161.
"""
from .dmdbase import DMDBase
from past.utils import old_div
import numpy as np
class DMDc(DMDBase):
"""
Dynamic Mode Decomposition with control.
This version does not allow to manipulate the temporal window within the
system is reconstructed.
:param svd_rank: the rank for the truncation; If 0, the method computes the
optimal rank and uses it for truncation; if positive interger, the
method uses the argument for the truncation; if float between 0 and 1,
the rank is the number of the biggest singular values that are needed
to reach the 'energy' specified by `svd_rank`; if -1, the method does
not compute truncation.
:type svd_rank: int or float
:param int tlsq_rank: rank truncation computing Total Least Square. Default
is 0, that means no truncation.
:param bool opt: flag to compute optimal amplitudes. See :class:`DMDBase`.
Default is False.
"""
def __init__(self, svd_rank=0, tlsq_rank=0, opt=False):
self.svd_rank = svd_rank
self.tlsq_rank = tlsq_rank
self.opt = opt
self.original_time = None
self._eigs = None
self._Atilde = None
self._B = None
self._modes = None # Phi
self._b = None # amplitudes
self._snapshots = None
self._snapshots_shape = None
self._controlin = None
self._controlin_shape = None
self._basis = None
@property
def B(self):
"""
Get the operator B.
:return: the operator B.
:rtype: numpy.ndarray
"""
return self._B
@property
def basis(self):
"""
Get the basis used to reduce the linear operator to the low dimensional
space.
:return: the matrix which columns are the basis vectors.
:rtype: numpy.ndarray
"""
return self._basis
def reconstructed_data(self, control_input=None):
"""
Return the reconstructed data, computed using the `control_input`
argument. If the `control_input` is not passed, the original input (in
the `fit` method) is used. The input dimension has to be consistent
with the dynamics.
:param numpy.ndarray control_input: the input control matrix.
:return: the matrix that contains the reconstructed snapshots.
:rtype: numpy.ndarray
"""
if control_input is None:
controlin, controlin_shape = self._controlin, self._controlin_shape
else:
controlin, controlin_shape = self._col_major_2darray(control_input)
if controlin.shape[1] != self.dynamics.shape[1]-1:
raise RuntimeError(
'The number of control inputs and the number of snapshots to reconstruct has to be the same')
if np.isnan(self.modes).any():
print('Modes contain nans; aborting without error')
return np.nan * np.zeros_like(self._snapshots)
omega = old_div(np.log(self.eigs), self.original_time['dt'])
eigs = np.exp(omega * self.dmd_time['dt'])
A = self.modes.dot(np.diag(eigs)).dot(np.linalg.pinv(self.modes))
data = [self._snapshots[:, 0]]
for i, u in enumerate(controlin.T):
data.append(A.dot(data[i]) + self._B.dot(u))
data = np.array(data).T
return data
def _fit_B_known(self, X, Y, B):
"""
Private method that performs the dynamic mode decomposition algorithm
with control when the matrix `B` is provided.
:param numpy.ndarray X: the first matrix of original snapshots.
:param numpy.ndarray Y: the second matrix of original snapshots.
:param numpy.ndarray I: the input control matrix.
:param numpy.ndarray B: the matrib B.
"""
X, Y = self._compute_tlsq(X, Y, self.tlsq_rank)
U, s, V = self._compute_svd(X, self.svd_rank)
self._Atilde = U.T.conj().dot(Y - B.dot(self._controlin)).dot(V).dot(
np.diag(np.reciprocal(s)))
self._eigs, self._modes = self._eig_from_lowrank_op(
self._Atilde, (Y - B.dot(self._controlin)), U, s, V, True)
self._b = self._compute_amplitudes(self._modes, self._snapshots,
self._eigs, self.opt)
self._B = B
self._basis = U
return self
def _fit_B_unknown(self, X, Y):
"""
Private method that performs the dynamic mode decomposition algorithm
with control when the matrix `B` is not provided.
:param numpy.ndarray X: the first matrix of original snapshots.
:param numpy.ndarray Y: the second matrix of original snapshots.
:param numpy.ndarray I: the input control matrix.
"""
omega = np.vstack([X, self._controlin])
Up, sp, Vp = self._compute_svd(omega, self.svd_rank)
Up1 = Up[:self._snapshots.shape[0], :]
Up2 = Up[self._snapshots.shape[0]:, :]
# TODO: a second svd_rank?
Ur, sr, Vr = self._compute_svd(Y, -1)
self._basis = Ur
reg = 1e-10 # Prevents nan in the reciprocal
self._Atilde = Ur.T.conj().dot(Y).dot(Vp).dot(
np.diag(np.reciprocal(sp+reg))).dot(Up1.T.conj()).dot(Ur)
self._Btilde = Ur.T.conj().dot(Y).dot(Vp).dot(
np.diag(np.reciprocal(sp+reg))).dot(Up2.T.conj())
self._B = Ur.dot(self._Btilde)
self._eigs, modes = np.linalg.eig(self._Atilde)
self._modes = Y.dot(Vp).dot(np.diag(np.reciprocal(sp))).dot(
Up1.T.conj()).dot(Ur).dot(modes)
self._b = self._compute_amplitudes(self._modes, X, self._eigs, self.opt)
def fit(self, X, I, B=None):
"""
Compute the Dynamic Modes Decomposition with control given the original
snapshots and the control input data. The matrix `B` that controls how
the control input influences the system evolution can be provided by
the user; otherwise, it is computed by the algorithm.
:param X: the input snapshots.
:type X: numpy.ndarray or iterable
:param I: the control input.
:type I: numpy.ndarray or iterable
:param numpy.ndarray B:
"""
self._snapshots, self._snapshots_shape = self._col_major_2darray(X)
self._controlin, self._controlin_shape = self._col_major_2darray(I)
n_samples = self._snapshots.shape[1]
X = self._snapshots[:, :-1]
Y = self._snapshots[:, 1:]
self.original_time = {'t0': 0, 'tend': n_samples - 1, 'dt': 1}
self.dmd_time = {'t0': 0, 'tend': n_samples - 1, 'dt': 1}
if B is None:
self._fit_B_unknown(X, Y)
else:
self._fit_B_known(X, Y, B)
return self
@property
def label_for_plots(self):
""" Defines a name to be used in plotting"""
name = 'DMDc'
if self.tlsq_rank > 0.0:
name = 'tls-' + name
return name | [
"numpy.linalg.eig",
"numpy.linalg.pinv",
"numpy.reciprocal",
"numpy.log",
"numpy.diag",
"numpy.exp",
"numpy.array",
"numpy.isnan",
"numpy.vstack",
"numpy.zeros_like"
] | [((3279, 3314), 'numpy.exp', 'np.exp', (["(omega * self.dmd_time['dt'])"], {}), "(omega * self.dmd_time['dt'])\n", (3285, 3314), True, 'import numpy as np\n'), ((5011, 5042), 'numpy.vstack', 'np.vstack', (['[X, self._controlin]'], {}), '([X, self._controlin])\n', (5020, 5042), True, 'import numpy as np\n'), ((5671, 5698), 'numpy.linalg.eig', 'np.linalg.eig', (['self._Atilde'], {}), '(self._Atilde)\n', (5684, 5698), True, 'import numpy as np\n'), ((3219, 3236), 'numpy.log', 'np.log', (['self.eigs'], {}), '(self.eigs)\n', (3225, 3236), True, 'import numpy as np\n'), ((3361, 3387), 'numpy.linalg.pinv', 'np.linalg.pinv', (['self.modes'], {}), '(self.modes)\n', (3375, 3387), True, 'import numpy as np\n'), ((3547, 3561), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3555, 3561), True, 'import numpy as np\n'), ((3043, 3063), 'numpy.isnan', 'np.isnan', (['self.modes'], {}), '(self.modes)\n', (3051, 3063), True, 'import numpy as np\n'), ((3163, 3193), 'numpy.zeros_like', 'np.zeros_like', (['self._snapshots'], {}), '(self._snapshots)\n', (3176, 3193), True, 'import numpy as np\n'), ((4239, 4255), 'numpy.reciprocal', 'np.reciprocal', (['s'], {}), '(s)\n', (4252, 4255), True, 'import numpy as np\n'), ((3342, 3355), 'numpy.diag', 'np.diag', (['eigs'], {}), '(eigs)\n', (3349, 3355), True, 'import numpy as np\n'), ((5561, 5584), 'numpy.reciprocal', 'np.reciprocal', (['(sp + reg)'], {}), '(sp + reg)\n', (5574, 5584), True, 'import numpy as np\n'), ((5436, 5459), 'numpy.reciprocal', 'np.reciprocal', (['(sp + reg)'], {}), '(sp + reg)\n', (5449, 5459), True, 'import numpy as np\n'), ((5743, 5760), 'numpy.reciprocal', 'np.reciprocal', (['sp'], {}), '(sp)\n', (5756, 5760), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
import numpy
from pyscf import lib
from pyscf.fci import cistring
from pyscf.fci.addons import _unpack_nelec
librdm = lib.load_library('libfci')
######################################################
# Spin squared operator
######################################################
# S^2 = (S+ * S- + S- * S+)/2 + Sz * Sz
# S+ = \sum_i S_i+ ~ effective for all beta occupied orbitals.
# S- = \sum_i S_i- ~ effective for all alpha occupied orbitals.
# There are two cases for S+*S-
# 1) same electron \sum_i s_i+*s_i-, <CI|s_i+*s_i-|CI> gives
# <p|s+s-|q> \gammalpha_qp = trace(\gammalpha) = neleca
# 2) different electrons for \sum s_i+*s_j- (i\neq j, n*(n-1) terms)
# As a two-particle operator S+*S-
# = <ij|s+s-|kl>Gamma_{ik,jl} = <iajb|s+s-|kbla>Gamma_{iakb,jbla}
# = <ia|s+|kb><jb|s-|la>Gamma_{iakb,jbla}
# <CI|S+*S-|CI> = neleca + <ia|s+|kb><jb|s-|la>Gamma_{iakb,jbla}
#
# There are two cases for S-*S+
# 1) same electron \sum_i s_i-*s_i+
# <p|s+s-|q> \gammabeta_qp = trace(\gammabeta) = nelecb
# 2) different electrons
# = <ij|s-s+|kl>Gamma_{ik,jl} = <ibja|s-s+|kalb>Gamma_{ibka,jalb}
# = <ib|s-|ka><ja|s+|lb>Gamma_{ibka,jalb}
# <CI|S-*S+|CI> = nelecb + <ib|s-|ka><ja|s+|lb>Gamma_{ibka,jalb}
#
# Sz*Sz = Msz^2 = (neleca-nelecb)^2
# 1) same electron
# <p|ss|q>\gamma_qp = <p|q>\gamma_qp = (neleca+nelecb)/4
# 2) different electrons
# <ij|2s1s2|kl>Gamma_{ik,jl}/2
# =(<ia|ka><ja|la>Gamma_{iaka,jala} - <ia|ka><jb|lb>Gamma_{iaka,jblb}
# - <ib|kb><ja|la>Gamma_{ibkb,jala} + <ib|kb><jb|lb>Gamma_{ibkb,jblb})/4
# set aolst for local spin expectation value, which is defined as
# <CI|ao><ao|S^2|CI>
# For a complete list of AOs, I = \sum |ao><ao|, it becomes <CI|S^2|CI>
def spin_square_general(dm1a, dm1b, dm2aa, dm2ab, dm2bb, mo_coeff, ovlp=1):
r'''General spin square operator.
... math::
<CI|S_+*S_-|CI> &= n_\alpha + \delta_{ik}\delta_{jl}Gamma_{i\alpha k\beta ,j\beta l\alpha } \\
<CI|S_-*S_+|CI> &= n_\beta + \delta_{ik}\delta_{jl}Gamma_{i\beta k\alpha ,j\alpha l\beta } \\
<CI|S_z*S_z|CI> &= \delta_{ik}\delta_{jl}(Gamma_{i\alpha k\alpha ,j\alpha l\alpha }
- Gamma_{i\alpha k\alpha ,j\beta l\beta }
- Gamma_{i\beta k\beta ,j\alpha l\alpha}
+ Gamma_{i\beta k\beta ,j\beta l\beta})
+ (n_\alpha+n_\beta)/4
Given the overlap betwen non-degenerate alpha and beta orbitals, this
function can compute the expectation value spin square operator for
UHF-FCI wavefunction
'''
if isinstance(mo_coeff, numpy.ndarray) and mo_coeff.ndim == 2:
mo_coeff = (mo_coeff, mo_coeff)
# projected overlap matrix elements for partial trace
if isinstance(ovlp, numpy.ndarray):
ovlpaa = reduce(numpy.dot, (mo_coeff[0].T, ovlp, mo_coeff[0]))
ovlpbb = reduce(numpy.dot, (mo_coeff[1].T, ovlp, mo_coeff[1]))
ovlpab = reduce(numpy.dot, (mo_coeff[0].T, ovlp, mo_coeff[1]))
ovlpba = reduce(numpy.dot, (mo_coeff[1].T, ovlp, mo_coeff[0]))
else:
ovlpaa = numpy.dot(mo_coeff[0].T, mo_coeff[0])
ovlpbb = numpy.dot(mo_coeff[1].T, mo_coeff[1])
ovlpab = numpy.dot(mo_coeff[0].T, mo_coeff[1])
ovlpba = numpy.dot(mo_coeff[1].T, mo_coeff[0])
# if ovlp=1, ssz = (neleca-nelecb)**2 * .25
ssz = (numpy.einsum('ijkl,ij,kl->', dm2aa, ovlpaa, ovlpaa) -
numpy.einsum('ijkl,ij,kl->', dm2ab, ovlpaa, ovlpbb) +
numpy.einsum('ijkl,ij,kl->', dm2bb, ovlpbb, ovlpbb) -
numpy.einsum('ijkl,ij,kl->', dm2ab, ovlpaa, ovlpbb)) * .25
ssz += (numpy.einsum('ji,ij->', dm1a, ovlpaa) +
numpy.einsum('ji,ij->', dm1b, ovlpbb)) *.25
dm2abba = -dm2ab.transpose(0,3,2,1) # alpha^+ beta^+ alpha beta
dm2baab = -dm2ab.transpose(2,1,0,3) # beta^+ alpha^+ beta alpha
ssxy =(numpy.einsum('ijkl,ij,kl->', dm2baab, ovlpba, ovlpab) +
numpy.einsum('ijkl,ij,kl->', dm2abba, ovlpab, ovlpba) +
numpy.einsum('ji,ij->', dm1a, ovlpaa) +
numpy.einsum('ji,ij->', dm1b, ovlpbb)) * .5
ss = ssxy + ssz
s = numpy.sqrt(ss+.25) - .5
multip = s*2+1
return ss, multip
@lib.with_doc(spin_square_general.__doc__)
def spin_square(fcivec, norb, nelec, mo_coeff=None, ovlp=1):
from pyscf.fci import direct_spin1
if mo_coeff is None:
mo_coeff = (numpy.eye(norb),) * 2
(dm1a, dm1b), (dm2aa, dm2ab, dm2bb) = \
direct_spin1.make_rdm12s(fcivec, norb, nelec)
return spin_square_general(dm1a, dm1b, dm2aa, dm2ab, dm2bb, mo_coeff, ovlp)
def spin_square0(fcivec, norb, nelec):
'''Spin square for RHF-FCI CI wfn only (obtained from spin-degenerated
Hamiltonian)'''
ci1 = contract_ss(fcivec, norb, nelec)
ss = numpy.einsum('ij,ij->', fcivec.reshape(ci1.shape), ci1)
s = numpy.sqrt(ss+.25) - .5
multip = s*2+1
return ss, multip
def local_spin(fcivec, norb, nelec, mo_coeff=None, ovlp=1, aolst=[]):
r'''Local spin expectation value, which is defined as
<CI|(local S^2)|CI>
The local S^2 operator only couples the orbitals specified in aolst. The
cross term which involves the interaction between the local part (in aolst)
and non-local part (not in aolst) is not included. As a result, the value
of local_spin is not additive. In other words, if local_spin is computed
twice with the complementary aolst in the two runs, the summation does not
equal to the S^2 of the entire system.
For a complete list of AOs, the value of local_spin is equivalent to <CI|S^2|CI>
'''
if isinstance(ovlp, numpy.ndarray):
nao = ovlp.shape[0]
if len(aolst) == 0:
lstnot = []
else:
lstnot = [i for i in range(nao) if i not in aolst]
s = ovlp.copy()
s[lstnot] = 0
s[:,lstnot] = 0
else:
if len(aolst) == 0:
aolst = numpy.arange(norb)
s = numpy.zeros((norb,norb))
s[aolst,aolst] = 1
return spin_square(fcivec, norb, nelec, mo_coeff, s)
# for S+*S-
# dm(pq,rs) * [p(beta)^+ q(alpha) r(alpha)^+ s(beta)]
# size of intermediate determinants (norb,neleca+1;norb,nelecb-1)
def make_rdm2_baab(fcivec, norb, nelec):
from pyscf.fci import direct_spin1
dm2aa, dm2ab, dm2bb = direct_spin1.make_rdm12s(fcivec, norb, nelec)[1]
dm2baab = -dm2ab.transpose(2,1,0,3)
return dm2baab
# for S-*S+
# dm(pq,rs) * [q(alpha)^+ p(beta) s(beta)^+ r(alpha)]
# size of intermediate determinants (norb,neleca-1;norb,nelecb+1)
def make_rdm2_abba(fcivec, norb, nelec):
from pyscf.fci import direct_spin1
dm2aa, dm2ab, dm2bb = direct_spin1.make_rdm12s(fcivec, norb, nelec)[1]
dm2abba = -dm2ab.transpose(0,3,2,1)
return dm2abba
def contract_ss(fcivec, norb, nelec):
'''Contract spin square operator with FCI wavefunction :math:`S^2 |CI>`
'''
neleca, nelecb = _unpack_nelec(nelec)
na = cistring.num_strings(norb, neleca)
nb = cistring.num_strings(norb, nelecb)
fcivec = fcivec.reshape(na,nb)
def gen_map(fstr_index, nelec, des=True):
a_index = fstr_index(range(norb), nelec)
amap = numpy.zeros((a_index.shape[0],norb,2), dtype=numpy.int32)
if des:
for k, tab in enumerate(a_index):
amap[k,tab[:,1]] = tab[:,2:]
else:
for k, tab in enumerate(a_index):
amap[k,tab[:,0]] = tab[:,2:]
return amap
if neleca > 0:
ades = gen_map(cistring.gen_des_str_index, neleca)
else:
ades = None
if nelecb > 0:
bdes = gen_map(cistring.gen_des_str_index, nelecb)
else:
bdes = None
if neleca < norb:
acre = gen_map(cistring.gen_cre_str_index, neleca, False)
else:
acre = None
if nelecb < norb:
bcre = gen_map(cistring.gen_cre_str_index, nelecb, False)
else:
bcre = None
def trans(ci1, aindex, bindex, nea, neb):
if aindex is None or bindex is None:
return None
t1 = numpy.zeros((cistring.num_strings(norb,nea),
cistring.num_strings(norb,neb)))
for i in range(norb):
signa = aindex[:,i,1]
signb = bindex[:,i,1]
maska = numpy.where(signa!=0)[0]
maskb = numpy.where(signb!=0)[0]
addra = aindex[maska,i,0]
addrb = bindex[maskb,i,0]
citmp = lib.take_2d(fcivec, maska, maskb)
citmp *= signa[maska].reshape(-1,1)
citmp *= signb[maskb]
#: t1[addra.reshape(-1,1),addrb] += citmp
lib.takebak_2d(t1, citmp, addra, addrb)
for i in range(norb):
signa = aindex[:,i,1]
signb = bindex[:,i,1]
maska = numpy.where(signa!=0)[0]
maskb = numpy.where(signb!=0)[0]
addra = aindex[maska,i,0]
addrb = bindex[maskb,i,0]
citmp = lib.take_2d(t1, addra, addrb)
citmp *= signa[maska].reshape(-1,1)
citmp *= signb[maskb]
#: ci1[maska.reshape(-1,1), maskb] += citmp
lib.takebak_2d(ci1, citmp, maska, maskb)
ci1 = numpy.zeros((na,nb))
trans(ci1, ades, bcre, neleca-1, nelecb+1) # S+*S-
trans(ci1, acre, bdes, neleca+1, nelecb-1) # S-*S+
ci1 *= .5
ci1 += (neleca-nelecb)**2*.25*fcivec
return ci1
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf import fci
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-0. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
['H', ( 0.,-1. ,-2. )],
['H', ( 1.,-1.5 , 1. )],
]
mol.basis = {'H': 'sto-3g'}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
cis = fci.solver(mol)
cis.verbose = 5
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = reduce(numpy.dot, (m.mo_coeff.T, m.get_hcore(), m.mo_coeff))
eri = ao2mo.incore.full(m._eri, m.mo_coeff)
e, ci0 = cis.kernel(h1e, eri, norb, nelec)
ss = spin_square(ci0, norb, nelec)
print(ss)
ss = spin_square0(ci0, norb, nelec)
print(ss)
ss = local_spin(ci0, norb, nelec, m.mo_coeff, m.get_ovlp(), range(5))
print('local spin for H1..H5 = 0.999', ss[0])
ci1 = numpy.zeros((4,4))
ci1[0,0] = 1
print(spin_square (ci1, 4, (3,1)))
print(spin_square0(ci1, 4, (3,1)))
print(numpy.einsum('ij,ij->', ci1, contract_ss(ci1, 4, (3,1))),
spin_square(ci1, 4, (3,1))[0])
| [
"pyscf.lib.takebak_2d",
"pyscf.fci.solver",
"numpy.sqrt",
"pyscf.lib.take_2d",
"numpy.einsum",
"pyscf.lib.with_doc",
"pyscf.lib.load_library",
"numpy.arange",
"numpy.where",
"numpy.dot",
"pyscf.fci.direct_spin1.make_rdm12s",
"numpy.eye",
"functools.reduce",
"pyscf.gto.Mole",
"pyscf.fci.a... | [((782, 808), 'pyscf.lib.load_library', 'lib.load_library', (['"""libfci"""'], {}), "('libfci')\n", (798, 808), False, 'from pyscf import lib\n'), ((4868, 4909), 'pyscf.lib.with_doc', 'lib.with_doc', (['spin_square_general.__doc__'], {}), '(spin_square_general.__doc__)\n', (4880, 4909), False, 'from pyscf import lib\n'), ((5134, 5179), 'pyscf.fci.direct_spin1.make_rdm12s', 'direct_spin1.make_rdm12s', (['fcivec', 'norb', 'nelec'], {}), '(fcivec, norb, nelec)\n', (5158, 5179), False, 'from pyscf.fci import direct_spin1\n'), ((7564, 7584), 'pyscf.fci.addons._unpack_nelec', '_unpack_nelec', (['nelec'], {}), '(nelec)\n', (7577, 7584), False, 'from pyscf.fci.addons import _unpack_nelec\n'), ((7594, 7628), 'pyscf.fci.cistring.num_strings', 'cistring.num_strings', (['norb', 'neleca'], {}), '(norb, neleca)\n', (7614, 7628), False, 'from pyscf.fci import cistring\n'), ((7638, 7672), 'pyscf.fci.cistring.num_strings', 'cistring.num_strings', (['norb', 'nelecb'], {}), '(norb, nelecb)\n', (7658, 7672), False, 'from pyscf.fci import cistring\n'), ((9821, 9842), 'numpy.zeros', 'numpy.zeros', (['(na, nb)'], {}), '((na, nb))\n', (9832, 9842), False, 'import numpy\n'), ((10168, 10178), 'pyscf.gto.Mole', 'gto.Mole', ([], {}), '()\n', (10176, 10178), False, 'from pyscf import gto\n'), ((10672, 10684), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (10679, 10684), False, 'from pyscf import scf\n'), ((10714, 10729), 'pyscf.fci.solver', 'fci.solver', (['mol'], {}), '(mol)\n', (10724, 10729), False, 'from pyscf import fci\n'), ((10888, 10925), 'pyscf.ao2mo.incore.full', 'ao2mo.incore.full', (['m._eri', 'm.mo_coeff'], {}), '(m._eri, m.mo_coeff)\n', (10905, 10925), False, 'from pyscf import ao2mo\n'), ((11214, 11233), 'numpy.zeros', 'numpy.zeros', (['(4, 4)'], {}), '((4, 4))\n', (11225, 11233), False, 'import numpy\n'), ((3474, 3527), 'functools.reduce', 'reduce', (['numpy.dot', '(mo_coeff[0].T, ovlp, mo_coeff[0])'], {}), '(numpy.dot, (mo_coeff[0].T, ovlp, mo_coeff[0]))\n', (3480, 3527), False, 'from functools import reduce\n'), ((3545, 3598), 'functools.reduce', 'reduce', (['numpy.dot', '(mo_coeff[1].T, ovlp, mo_coeff[1])'], {}), '(numpy.dot, (mo_coeff[1].T, ovlp, mo_coeff[1]))\n', (3551, 3598), False, 'from functools import reduce\n'), ((3616, 3669), 'functools.reduce', 'reduce', (['numpy.dot', '(mo_coeff[0].T, ovlp, mo_coeff[1])'], {}), '(numpy.dot, (mo_coeff[0].T, ovlp, mo_coeff[1]))\n', (3622, 3669), False, 'from functools import reduce\n'), ((3687, 3740), 'functools.reduce', 'reduce', (['numpy.dot', '(mo_coeff[1].T, ovlp, mo_coeff[0])'], {}), '(numpy.dot, (mo_coeff[1].T, ovlp, mo_coeff[0]))\n', (3693, 3740), False, 'from functools import reduce\n'), ((3768, 3805), 'numpy.dot', 'numpy.dot', (['mo_coeff[0].T', 'mo_coeff[0]'], {}), '(mo_coeff[0].T, mo_coeff[0])\n', (3777, 3805), False, 'import numpy\n'), ((3823, 3860), 'numpy.dot', 'numpy.dot', (['mo_coeff[1].T', 'mo_coeff[1]'], {}), '(mo_coeff[1].T, mo_coeff[1])\n', (3832, 3860), False, 'import numpy\n'), ((3878, 3915), 'numpy.dot', 'numpy.dot', (['mo_coeff[0].T', 'mo_coeff[1]'], {}), '(mo_coeff[0].T, mo_coeff[1])\n', (3887, 3915), False, 'import numpy\n'), ((3933, 3970), 'numpy.dot', 'numpy.dot', (['mo_coeff[1].T', 'mo_coeff[0]'], {}), '(mo_coeff[1].T, mo_coeff[0])\n', (3942, 3970), False, 'import numpy\n'), ((4801, 4822), 'numpy.sqrt', 'numpy.sqrt', (['(ss + 0.25)'], {}), '(ss + 0.25)\n', (4811, 4822), False, 'import numpy\n'), ((5512, 5533), 'numpy.sqrt', 'numpy.sqrt', (['(ss + 0.25)'], {}), '(ss + 0.25)\n', (5522, 5533), False, 'import numpy\n'), ((6616, 6641), 'numpy.zeros', 'numpy.zeros', (['(norb, norb)'], {}), '((norb, norb))\n', (6627, 6641), False, 'import numpy\n'), ((6964, 7009), 'pyscf.fci.direct_spin1.make_rdm12s', 'direct_spin1.make_rdm12s', (['fcivec', 'norb', 'nelec'], {}), '(fcivec, norb, nelec)\n', (6988, 7009), False, 'from pyscf.fci import direct_spin1\n'), ((7311, 7356), 'pyscf.fci.direct_spin1.make_rdm12s', 'direct_spin1.make_rdm12s', (['fcivec', 'norb', 'nelec'], {}), '(fcivec, norb, nelec)\n', (7335, 7356), False, 'from pyscf.fci import direct_spin1\n'), ((7819, 7878), 'numpy.zeros', 'numpy.zeros', (['(a_index.shape[0], norb, 2)'], {'dtype': 'numpy.int32'}), '((a_index.shape[0], norb, 2), dtype=numpy.int32)\n', (7830, 7878), False, 'import numpy\n'), ((4226, 4277), 'numpy.einsum', 'numpy.einsum', (['"""ijkl,ij,kl->"""', 'dm2ab', 'ovlpaa', 'ovlpbb'], {}), "('ijkl,ij,kl->', dm2ab, ovlpaa, ovlpbb)\n", (4238, 4277), False, 'import numpy\n'), ((4297, 4334), 'numpy.einsum', 'numpy.einsum', (['"""ji,ij->"""', 'dm1a', 'ovlpaa'], {}), "('ji,ij->', dm1a, ovlpaa)\n", (4309, 4334), False, 'import numpy\n'), ((4349, 4386), 'numpy.einsum', 'numpy.einsum', (['"""ji,ij->"""', 'dm1b', 'ovlpbb'], {}), "('ji,ij->', dm1b, ovlpbb)\n", (4361, 4386), False, 'import numpy\n'), ((4728, 4765), 'numpy.einsum', 'numpy.einsum', (['"""ji,ij->"""', 'dm1b', 'ovlpbb'], {}), "('ji,ij->', dm1b, ovlpbb)\n", (4740, 4765), False, 'import numpy\n'), ((6585, 6603), 'numpy.arange', 'numpy.arange', (['norb'], {}), '(norb)\n', (6597, 6603), False, 'import numpy\n'), ((9083, 9116), 'pyscf.lib.take_2d', 'lib.take_2d', (['fcivec', 'maska', 'maskb'], {}), '(fcivec, maska, maskb)\n', (9094, 9116), False, 'from pyscf import lib\n'), ((9265, 9304), 'pyscf.lib.takebak_2d', 'lib.takebak_2d', (['t1', 'citmp', 'addra', 'addrb'], {}), '(t1, citmp, addra, addrb)\n', (9279, 9304), False, 'from pyscf import lib\n'), ((9589, 9618), 'pyscf.lib.take_2d', 'lib.take_2d', (['t1', 'addra', 'addrb'], {}), '(t1, addra, addrb)\n', (9600, 9618), False, 'from pyscf import lib\n'), ((9769, 9809), 'pyscf.lib.takebak_2d', 'lib.takebak_2d', (['ci1', 'citmp', 'maska', 'maskb'], {}), '(ci1, citmp, maska, maskb)\n', (9783, 9809), False, 'from pyscf import lib\n'), ((4161, 4212), 'numpy.einsum', 'numpy.einsum', (['"""ijkl,ij,kl->"""', 'dm2bb', 'ovlpbb', 'ovlpbb'], {}), "('ijkl,ij,kl->', dm2bb, ovlpbb, ovlpbb)\n", (4173, 4212), False, 'import numpy\n'), ((4677, 4714), 'numpy.einsum', 'numpy.einsum', (['"""ji,ij->"""', 'dm1a', 'ovlpaa'], {}), "('ji,ij->', dm1a, ovlpaa)\n", (4689, 4714), False, 'import numpy\n'), ((5055, 5070), 'numpy.eye', 'numpy.eye', (['norb'], {}), '(norb)\n', (5064, 5070), False, 'import numpy\n'), ((8708, 8739), 'pyscf.fci.cistring.num_strings', 'cistring.num_strings', (['norb', 'nea'], {}), '(norb, nea)\n', (8728, 8739), False, 'from pyscf.fci import cistring\n'), ((8766, 8797), 'pyscf.fci.cistring.num_strings', 'cistring.num_strings', (['norb', 'neb'], {}), '(norb, neb)\n', (8786, 8797), False, 'from pyscf.fci import cistring\n'), ((8917, 8940), 'numpy.where', 'numpy.where', (['(signa != 0)'], {}), '(signa != 0)\n', (8928, 8940), False, 'import numpy\n'), ((8962, 8985), 'numpy.where', 'numpy.where', (['(signb != 0)'], {}), '(signb != 0)\n', (8973, 8985), False, 'import numpy\n'), ((9423, 9446), 'numpy.where', 'numpy.where', (['(signa != 0)'], {}), '(signa != 0)\n', (9434, 9446), False, 'import numpy\n'), ((9468, 9491), 'numpy.where', 'numpy.where', (['(signb != 0)'], {}), '(signb != 0)\n', (9479, 9491), False, 'import numpy\n'), ((4031, 4082), 'numpy.einsum', 'numpy.einsum', (['"""ijkl,ij,kl->"""', 'dm2aa', 'ovlpaa', 'ovlpaa'], {}), "('ijkl,ij,kl->', dm2aa, ovlpaa, ovlpaa)\n", (4043, 4082), False, 'import numpy\n'), ((4096, 4147), 'numpy.einsum', 'numpy.einsum', (['"""ijkl,ij,kl->"""', 'dm2ab', 'ovlpaa', 'ovlpbb'], {}), "('ijkl,ij,kl->', dm2ab, ovlpaa, ovlpbb)\n", (4108, 4147), False, 'import numpy\n'), ((4543, 4596), 'numpy.einsum', 'numpy.einsum', (['"""ijkl,ij,kl->"""', 'dm2baab', 'ovlpba', 'ovlpab'], {}), "('ijkl,ij,kl->', dm2baab, ovlpba, ovlpab)\n", (4555, 4596), False, 'import numpy\n'), ((4610, 4663), 'numpy.einsum', 'numpy.einsum', (['"""ijkl,ij,kl->"""', 'dm2abba', 'ovlpab', 'ovlpba'], {}), "('ijkl,ij,kl->', dm2abba, ovlpab, ovlpba)\n", (4622, 4663), False, 'import numpy\n')] |
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements STRIP: A Defence Against Trojan Attacks on Deep Neural Networks.
| Paper link: https://arxiv.org/abs/1902.06531
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from typing import Callable, Optional
import numpy as np
from scipy.stats import entropy, norm
from tqdm.auto import tqdm
from art.estimators.certification.abstain import AbstainPredictorMixin
logger = logging.getLogger(__name__)
class STRIPMixin(AbstainPredictorMixin):
"""
Implementation of STRIP: A Defence Against Trojan Attacks on Deep Neural Networks (Gao et. al. 2020)
| Paper link: https://arxiv.org/abs/1902.06531
"""
def __init__(
self,
predict_fn: Callable[[np.ndarray], np.ndarray],
num_samples: int = 20,
false_acceptance_rate: float = 0.01,
**kwargs
) -> None: # pragma: no cover
"""
Create a STRIP defense
:param predict_fn: The predict function of the original classifier
:param num_samples: The number of samples to use to test entropy at inference time
:param false_acceptance_rate: The percentage of acceptable false acceptance
"""
super().__init__(**kwargs)
self.predict_fn = predict_fn
self.num_samples = num_samples
self.false_acceptance_rate = false_acceptance_rate
self.entropy_threshold: Optional[float] = None
self.validation_data: Optional[np.ndarray] = None
def predict(self, x: np.ndarray) -> np.ndarray:
"""
Perform prediction of the given classifier for a batch of inputs, potentially filtering suspicious input
:param x: Input samples.
:return: Array of predictions of shape `(nb_inputs, nb_classes)`.
"""
raw_predictions = self.predict_fn(x)
if self.entropy_threshold is None or self.validation_data is None: # pragma: no cover
logger.warning("Mitigation has not been performed. Predictions may be unsafe.")
return raw_predictions
x_val = self.validation_data
final_predictions = []
for i, img in enumerate(x):
# Randomly select samples from test set
selected_indices = np.random.choice(np.arange(len(x_val)), self.num_samples)
# Perturb the images by combining them
perturbed_images = np.array([combine_images(img, x_val[idx]) for idx in selected_indices])
# Predict on the perturbed images
perturbed_predictions = self.predict_fn(perturbed_images)
# Calculate normalized entropy
normalized_entropy = np.sum(entropy(perturbed_predictions, base=2, axis=0)) / float(self.num_samples)
# Abstain if entropy is below threshold
if normalized_entropy <= self.entropy_threshold:
final_predictions.append(self.abstain())
else:
final_predictions.append(raw_predictions[i])
return np.array(final_predictions)
def mitigate(self, x_val: np.ndarray) -> None:
"""
Mitigates the effect of poison on a classifier
:param x_val: Validation data to use to mitigate the effect of poison.
"""
self.validation_data = x_val
entropies = []
# Find normal entropy distribution
for _, img in enumerate(tqdm(x_val)):
selected_indices = np.random.choice(np.arange(len(x_val)), self.num_samples)
perturbed_images = np.array([combine_images(img, x_val[idx]) for idx in selected_indices])
perturbed_predictions = self.predict_fn(perturbed_images)
normalized_entropy = np.sum(entropy(perturbed_predictions, base=2, axis=0)) / float(self.num_samples)
entropies.append(normalized_entropy)
mean_entropy, std_entropy = norm.fit(entropies)
# Set threshold to FAR percentile
self.entropy_threshold = norm.ppf(self.false_acceptance_rate, loc=mean_entropy, scale=std_entropy)
if self.entropy_threshold is not None and self.entropy_threshold < 0: # pragma: no cover
logger.warning("Entropy value is negative. Increase FAR for reasonable performance.")
def combine_images(img1: np.ndarray, img2: np.ndarray, alpha=0.5) -> np.ndarray:
"""
Combine two Numpy arrays of the same shape
:param img1: a Numpy array
:param img2: a Numpy array
:param alpha: percentage weight for the first image
:return: The combined image
"""
return alpha * img1 + (1 - alpha) * img2
| [
"logging.getLogger",
"scipy.stats.entropy",
"scipy.stats.norm.ppf",
"scipy.stats.norm.fit",
"numpy.array",
"tqdm.auto.tqdm"
] | [((1583, 1610), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1600, 1610), False, 'import logging\n'), ((4142, 4169), 'numpy.array', 'np.array', (['final_predictions'], {}), '(final_predictions)\n', (4150, 4169), True, 'import numpy as np\n'), ((4993, 5012), 'scipy.stats.norm.fit', 'norm.fit', (['entropies'], {}), '(entropies)\n', (5001, 5012), False, 'from scipy.stats import entropy, norm\n'), ((5089, 5162), 'scipy.stats.norm.ppf', 'norm.ppf', (['self.false_acceptance_rate'], {'loc': 'mean_entropy', 'scale': 'std_entropy'}), '(self.false_acceptance_rate, loc=mean_entropy, scale=std_entropy)\n', (5097, 5162), False, 'from scipy.stats import entropy, norm\n'), ((4517, 4528), 'tqdm.auto.tqdm', 'tqdm', (['x_val'], {}), '(x_val)\n', (4521, 4528), False, 'from tqdm.auto import tqdm\n'), ((3802, 3848), 'scipy.stats.entropy', 'entropy', (['perturbed_predictions'], {'base': '(2)', 'axis': '(0)'}), '(perturbed_predictions, base=2, axis=0)\n', (3809, 3848), False, 'from scipy.stats import entropy, norm\n'), ((4833, 4879), 'scipy.stats.entropy', 'entropy', (['perturbed_predictions'], {'base': '(2)', 'axis': '(0)'}), '(perturbed_predictions, base=2, axis=0)\n', (4840, 4879), False, 'from scipy.stats import entropy, norm\n')] |
#!/usr/bin/env python
#++++++++++++++++++++++++++++++++++++++++
# LAPART 1 Test +++++++++++++++++++++++++
# +++++++++++++++++++++++++++++++++++++++
# Copyright C. 2017, <NAME> ++++++
#++++++++++++++++++++++++++++++++++++++++
import time
import math
import numpy as np
import pandas as pd
from .art import ART
def norm(data,ma,mi):
tnorm = np.ones((len(data),len(data[0])))
for i in range(len(data)):
for j in range(len(data[0])):
tnorm[i,j] = (data[i,j]-mi[j])/(ma[j] - mi[j])
return tnorm
def dnorm(data,ma,mi):
dnorm = np.ones((len(data),len(data[0])))
for i in range(len(data)):
for j in range(len(data[0])):
dnorm[i,j] = (data[i,j]*(ma[j]-mi[j]))+mi[j]
return dnorm
class test:
def __init__(self,xA,TA,TB,L,rhoA,rhoB,beta,alpha,nep,memory_folder):
# Parameters
self.rhoA = rhoA
self.rhoB = rhoB
self.beta = beta
self.alpha = alpha
self.nep = nep
self.folder = memory_folder
''' Min and Max of Input Data '''
maxminA = pd.read_csv('%s/maxminA.csv'%self.folder).values
past_maxA,past_minA = maxminA[:,1:2],maxminA[:,2:3]
if len(xA) > 1:
current_maxA,current_minA = np.array([xA.max(axis=0)]).T,np.array([xA.min(axis=0)]).T
else:
current_maxA,current_minA = xA.T,xA.T
mmA = np.hstack([past_maxA,current_maxA,past_minA,current_minA]).T
self.maxA,self.minA = np.array([mmA.max(axis=0)]).T,np.array([mmA.min(axis=0)]).T
maxminB = pd.read_csv('%s/maxminB.csv'%self.folder).values
self.maxB,self.minB = maxminB[:,1:2],maxminB[:,2:3]
''' Normalize Input Data '''
self.xAn = norm(xA,self.maxA,self.minA)
''' Complement Code Data '''
self.IA = np.transpose(np.hstack([self.xAn, 1-self.xAn]))
''' Number of Inputs '''
self.nAB = len(self.IA[0])
self.TA = np.transpose(TA)
self.TB = np.transpose(TB)
self.L = L
self.ncA = len(TA)
self.minAm = np.ones((len(xA[0])*2,1))
self.chAm = np.zeros((len(TA)*10,1))
self.mA = np.zeros((len(TA)*10,1))
self.CA = np.zeros((len(xA)*2,1))
self.CB = np.zeros((len(xA)*2,1))
self.TBt = np.zeros((len(xA)*2,len(self.TB)))
def lapart_test(self,xA):
for ep in range(self.nep):
for j in range(0,self.nAB):
''' Present Inputs to A Side Templates '''
cmax,ch = ART(self.IA,self.TA,self.mA,self.chAm,self.ncA,self.minAm,self.rhoA,self.beta,j)
if cmax == -1:
''' A Templates do not resonate '''
self.CA[j] = -1
else:
''' A Templates do resonate '''
self.CA[j] = ch
for i in range(len(self.L[0])):
if self.L[ch,i] == 1:
if cmax == -1:
self.CB[j] = cmax
self.TBt[j] = self.TB[:,i] #np.ones((1,len(xA[0])))
else:
self.CB[j] = i
self.TBt[j] = self.TB[:,i]
break
TB = self.TBt[:len(xA)]
TBnl,TBnr = np.zeros((len(TB),1)),np.zeros((len(TB),1))
for i in range(len(TB[0])):
if i == len(TB[0])/2:
TBn = dnorm(1-np.array([TB[:,i]]).T,self.maxB,self.minB)
TBnr = np.append(TBnr,TBn,1)
else:
TBn = dnorm(np.array([TB[:,i]]).T,self.maxB,self.minB)
TBnl = np.append(TBnl,TBn,1)
TBn = np.hstack([TBnl[:,1:],TBnr[:,1:]])
return self.CA[:len(xA),:],self.CB[:len(xA),:],TB,TBn,self.IA[:len(xA[0]),:].T
def lapArt_test(xA,rhoA=0.9,rhoB=0.9,beta=0.000001,alpha=1.0,nep=1,memory_folder=''):
"""
Test LAPART Algorithm
:param xA: A-Side Input Matrix (float)
:param rhoA: A-Side free parameter (float)
:param rhoB: B-Side free parameter (float)
:param beta: Learning rate free parameter (float)
:param alpha: Choice Parameter (float)
:param nep: Number of epochs (integer)
:param memory_folder: Folder to store memory (string)
:return CB: B-Side categories vector (float)
:return TB: B-Side template matrix (float)
:return TBn: B-Side normalized template matrix (float)
:return df: Pandas dataframe - A & B categories, A-Side inputs
:return elapsed_time: Seconds to complete training (float)
"""
start_time = time.time()
TA,TB = pd.read_csv('%s/TA.csv'%memory_folder).values,pd.read_csv('%s/TB.csv'%memory_folder).values
L = pd.read_csv('%s/L.csv'%memory_folder).values
TA,TB,L = TA[:,1:],TB[:,1:],L[:,1:]
ann = test(xA,TA,TB,L,rhoA,rhoB,beta,alpha,nep,memory_folder)
CA,CB,TB,TBn,IA = ann.lapart_test(xA)
df = pd.DataFrame(np.hstack([CA,CB,IA]))
df = df.rename(columns={0: 'CA', 1: 'CB'})
elapsed_time = time.time() - start_time
return CB,TB,TBn,df,elapsed_time | [
"pandas.read_csv",
"numpy.hstack",
"numpy.append",
"numpy.array",
"numpy.transpose",
"time.time"
] | [((3946, 3957), 'time.time', 'time.time', ([], {}), '()\n', (3955, 3957), False, 'import time\n'), ((1754, 1770), 'numpy.transpose', 'np.transpose', (['TA'], {}), '(TA)\n', (1766, 1770), True, 'import numpy as np\n'), ((1783, 1799), 'numpy.transpose', 'np.transpose', (['TB'], {}), '(TB)\n', (1795, 1799), True, 'import numpy as np\n'), ((3099, 3136), 'numpy.hstack', 'np.hstack', (['[TBnl[:, 1:], TBnr[:, 1:]]'], {}), '([TBnl[:, 1:], TBnr[:, 1:]])\n', (3108, 3136), True, 'import numpy as np\n'), ((4065, 4104), 'pandas.read_csv', 'pd.read_csv', (["('%s/L.csv' % memory_folder)"], {}), "('%s/L.csv' % memory_folder)\n", (4076, 4104), True, 'import pandas as pd\n'), ((4269, 4292), 'numpy.hstack', 'np.hstack', (['[CA, CB, IA]'], {}), '([CA, CB, IA])\n', (4278, 4292), True, 'import numpy as np\n'), ((4354, 4365), 'time.time', 'time.time', ([], {}), '()\n', (4363, 4365), False, 'import time\n'), ((971, 1014), 'pandas.read_csv', 'pd.read_csv', (["('%s/maxminA.csv' % self.folder)"], {}), "('%s/maxminA.csv' % self.folder)\n", (982, 1014), True, 'import pandas as pd\n'), ((1244, 1305), 'numpy.hstack', 'np.hstack', (['[past_maxA, current_maxA, past_minA, current_minA]'], {}), '([past_maxA, current_maxA, past_minA, current_minA])\n', (1253, 1305), True, 'import numpy as np\n'), ((1405, 1448), 'pandas.read_csv', 'pd.read_csv', (["('%s/maxminB.csv' % self.folder)"], {}), "('%s/maxminB.csv' % self.folder)\n", (1416, 1448), True, 'import pandas as pd\n'), ((1643, 1678), 'numpy.hstack', 'np.hstack', (['[self.xAn, 1 - self.xAn]'], {}), '([self.xAn, 1 - self.xAn])\n', (1652, 1678), True, 'import numpy as np\n'), ((3968, 4008), 'pandas.read_csv', 'pd.read_csv', (["('%s/TA.csv' % memory_folder)"], {}), "('%s/TA.csv' % memory_folder)\n", (3979, 4008), True, 'import pandas as pd\n'), ((4014, 4054), 'pandas.read_csv', 'pd.read_csv', (["('%s/TB.csv' % memory_folder)"], {}), "('%s/TB.csv' % memory_folder)\n", (4025, 4054), True, 'import pandas as pd\n'), ((2965, 2988), 'numpy.append', 'np.append', (['TBnr', 'TBn', '(1)'], {}), '(TBnr, TBn, 1)\n', (2974, 2988), True, 'import numpy as np\n'), ((3068, 3091), 'numpy.append', 'np.append', (['TBnl', 'TBn', '(1)'], {}), '(TBnl, TBn, 1)\n', (3077, 3091), True, 'import numpy as np\n'), ((3013, 3033), 'numpy.array', 'np.array', (['[TB[:, i]]'], {}), '([TB[:, i]])\n', (3021, 3033), True, 'import numpy as np\n'), ((2911, 2931), 'numpy.array', 'np.array', (['[TB[:, i]]'], {}), '([TB[:, i]])\n', (2919, 2931), True, 'import numpy as np\n')] |
"""
This module is a rework of
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/preprocessing/_discretization.py
However, it is purely written in pandas instead of numpy because it is more
intuitive
Also, some custom modifications were included to allign it with our
Python Predictions methodology
Authors:
- <NAME> (methodology)
- <NAME> (implementation)
"""
# standard lib imports
from copy import deepcopy
from typing import List
import numbers
import logging
log = logging.getLogger(__name__)
# third party imports
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.exceptions import NotFittedError
#from sklearn.cluster import KMeans
class KBinsDiscretizer(BaseEstimator):
"""Bin continuous data into intervals of predefined size. It provides a
way to partition continuous data into discrete values, i.e. tranform
continuous data into nominal data. This can make a linear model more
expressive as it introduces nonlinearity to the model, while maintaining
the interpretability of the model afterwards.
Attributes
----------
auto_adapt_bins : bool
reduces the number of bins (starting from n_bins) as a function of
the number of missings
change_endpoint_format : bool
Whether or not to change the format of the lower and upper bins into
``<= x`` and ``> y`` resp.
closed : str
Whether to close the bins (intervals) from the left or right
label_format : str
format string to display the bin labels
e.g. ``min - max``, ``(min, max]``, ...
n_bins : int
Number of bins to produce. Raises ValueError if ``n_bins < 2``.
starting_precision : int
Initial precision for the bin edges to start from,
can also be negative. Given a list of bin edges, the class will
automatically choose the minimal precision required to have proper bins
e.g. ``[5.5555, 5.5744, ...]`` will be rounded to
``[5.56, 5.57, ...]``. In case of a negative number, an attempt will be
made to round up the numbers of the bin edges e.g. ``5.55 -> 10``,
``146 -> 100``, ...
strategy : str
Binning strategy. Currently only `uniform` and `quantile`
e.g. equifrequency is supported
"""
valid_strategies = ("uniform", "quantile")
valid_keys = ["n_bins", "strategy", "closed", "auto_adapt_bins",
"starting_precision", "label_format",
"change_endpoint_format"]
def __init__(self, n_bins: int=10, strategy: str="quantile",
closed: str="right",
auto_adapt_bins: bool=False,
starting_precision: int=0,
label_format: str="{} - {}",
change_endpoint_format: bool=False):
# validate number of bins
self._validate_n_bins(n_bins)
self.n_bins = n_bins
self.strategy = strategy.lower()
self.closed = closed.lower()
self.auto_adapt_bins = auto_adapt_bins
self.starting_precision = starting_precision
self.label_format = label_format
self.change_endpoint_format = change_endpoint_format
# dict to store fitted output in
self._bins_by_column = {}
def _validate_n_bins(self, n_bins: int):
"""Check if ``n_bins`` is of the proper type and if it is bigger
than two
Parameters
----------
n_bins : int
Number of bins KBinsDiscretizer has to produce for each variable
Raises
------
ValueError
in case ``n_bins`` is not an integer or if ``n_bins < 2``
"""
if not isinstance(n_bins, numbers.Integral):
raise ValueError("{} received an invalid n_bins type. "
"Received {}, expected int."
.format(KBinsDiscretizer.__name__,
type(n_bins).__name__))
if n_bins < 2:
raise ValueError("{} received an invalid number "
"of bins. Received {}, expected at least 2."
.format(KBinsDiscretizer.__name__, n_bins))
def attributes_to_dict(self) -> dict:
"""Return the attributes of KBinsDiscretizer in a dictionary
Returns
-------
dict
Contains the attributes of KBinsDiscretizer instance with the names
as keys
"""
params = self.get_params()
params["_bins_by_column"] = {
key: [list(tup) for tup in value] if value else None
for key, value in self._bins_by_column.items()
}
return params
def set_attributes_from_dict(self, params: dict):
"""Set instance attributes from a dictionary of values with key the
name of the attribute.
Parameters
----------
params : dict
Contains the attributes of KBinsDiscretizer with their
names as key.
Raises
------
ValueError
In case `_bins_by_column` is not of type dict
"""
_bins_by_column = params.pop("_bins_by_column", {})
if type(_bins_by_column) != dict:
raise ValueError("_bins_by_column is expected to be a dict "
"but is of type {} instead"
.format(type(_bins_by_column)))
# Clean out params dictionary to remove unknown keys (for safety!)
params = {key: params[key] for key in params if key in self.valid_keys}
# We cannot turn this method into a classmethod as we want to make use
# of the following method from BaseEstimator:
self.set_params(**params)
self._bins_by_column = {
key: ([tuple(l) for l in value] if value else None)
for key, value in _bins_by_column.items()
}
return self
def fit(self, data: pd.DataFrame, column_names: list):
"""Fits the estimator
Parameters
----------
data : pd.DataFrame
Data to be discretized
column_names : list
Names of the columns of the DataFrame to discretize
"""
if self.strategy not in self.valid_strategies:
raise ValueError("{}: valid options for 'strategy' are {}. "
"Got strategy={!r} instead."
.format(KBinsDiscretizer.__name__,
self.valid_strategies, self.strategy))
for column_name in column_names:
if column_name not in data.columns:
log.warning("DataFrame has no column '{}', so it will be "
"skipped in fitting" .format(column_name))
continue
bins = self._fit_column(data, column_name)
# Add to bins_by_column for later use
self._bins_by_column[column_name] = bins
def _fit_column(self, data: pd.DataFrame,
column_name: str) -> List[tuple]:
"""Compute bins for a specific column in data
Parameters
----------
data : pd.DataFrame
Data to be discretized
column_name : str
Name of the column of the DataFrame to discretize
Returns
-------
List[tuple]
list of bins as tuples
"""
col_min, col_max = data[column_name].min(), data[column_name].max()
if col_min == col_max:
log.warning("Predictor '{}' is constant and "
"will be ignored in computation".format(column_name))
return None
n_bins = self.n_bins
if self.auto_adapt_bins:
size = len(data.index)
missing_pct = data[column_name].isnull().sum()/size
n_bins = int(max(round((1 - missing_pct) * n_bins), 2))
bin_edges = self._compute_bin_edges(data, column_name, n_bins,
col_min, col_max)
if len(bin_edges) < 3:
log.warning("Only 1 bin was found for predictor '{}' so it will "
"be ignored in computation".format(column_name))
return None
if len(bin_edges) < n_bins + 1:
log.warning("The number of actual bins for predictor '{}' is {} "
"which is smaller than the requested number of bins "
"{}".format(column_name, len(bin_edges) - 1, n_bins))
return self._compute_bins_from_edges(bin_edges)
def transform(self, data: pd.DataFrame,
column_names: list) -> pd.DataFrame:
"""Discretizes the data in the given list of columns by mapping each
number to the appropriate bin computed by the fit method
Parameters
----------
data : pd.DataFrame
Data to be discretized
column_names : list
Names of the columns of the DataFrame to discretize
Returns
-------
pd.DataFrame
data with additional discretized variables
"""
if len(self._bins_by_column) == 0:
msg = ("{} instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
raise NotFittedError(msg.format(self.__class__.__name__))
for column_name in column_names:
if column_name not in self._bins_by_column:
log.warning("Column '{}' is not in fitted output "
"and will be skipped".format(column_name))
continue
# can be None for a column with a constant value!
bins = self._bins_by_column[column_name]
if bins is not None:
data = self._transform_column(data, column_name, bins)
return data
def _transform_column(self, data: pd.DataFrame,
column_name: str,
bins: List[tuple]) -> pd.DataFrame:
"""Given a DataFrame, a column name and a list of bins,
create an additional column which determines the bin in which the value
of column_name lies in.
Parameters
----------
data : pd.DataFrame
Original data to be discretized
column_name : str
name of the column to discretize
bins : List[tuple]
bins to discretize the data into
Returns
-------
pd.DataFrame
original DataFrame with an added binned column
"""
interval_idx = KBinsDiscretizer._create_index(bins, self.closed)
column_name_bin = column_name + "_bin"
# use pd.cut to compute bins
data.loc[:, column_name_bin] = pd.cut(x=data[column_name],
bins=interval_idx)
# Rename bins so that the output has a proper format
bin_labels = self._create_bin_labels(bins)
data.loc[:, column_name_bin] = (data[column_name_bin]
.cat.rename_categories(bin_labels))
if data[column_name_bin].isnull().sum() > 0:
# Add an additional bin for missing values
data[column_name_bin].cat.add_categories(["Missing"], inplace=True)
# Replace NULL with "Missing"
# Otherwise these will be ignored in groupby
data[column_name_bin].fillna("Missing", inplace=True)
return data
def fit_transform(self, data: pd.DataFrame,
column_names: list) -> pd.DataFrame:
"""Fits to data, then transform it
Parameters
----------
data : pd.DataFrame
Data to be discretized
column_names : list
Names of the columns of the DataFrame to discretize
Returns
-------
pd.DataFrame
data with additional discretized variables
"""
self.fit(data, column_names)
return self.transform(data, column_names)
def _compute_bin_edges(self, data: pd.DataFrame, column_name: str,
n_bins: int, col_min: float,
col_max: float) -> list:
"""Compute the bin edges for a given column, a DataFrame and the number
of required bins
Parameters
----------
data : pd.DataFrame
Data to be discretized
column_name : str
name of the column to discretize
n_bins : int
Number of bins to produce.
col_min : float
min value of the variable
col_max : float
max value of the variable
Returns
-------
list
list of bin edges from which to compute the bins
"""
bin_edges = []
if self.strategy == "quantile":
bin_edges = list(data[column_name]
.quantile(np.linspace(0, 1, n_bins + 1),
interpolation='linear'))
elif self.strategy == "uniform":
bin_edges = list(np.linspace(col_min, col_max, n_bins + 1))
# elif self.strategy == "kmeans":
# if data[column_name].isnull().sum() > 0:
# raise ValueError("{}: kmeans strategy cannot handle NULL "
# "values in the data."
# .format(KBinsDiscretizer.__name__))
# # Deterministic initialization with uniform spacing
# uniform_edges = np.linspace(col_min, col_max, n_bins + 1)
# init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5
# # 1D k-means
# kmeans = KMeans(n_clusters=n_bins, init=init, n_init=1)
# centers = (kmeans.fit(data[column_name][:, None])
# .cluster_centers_[:, 0])
# # Make sure to sort centers as they may be unsorted,
# # even with sorted init!
# centers.sort()
# # compute bin_edges from centers
# bin_edges = (centers[1:] + centers[:-1]) * 0.5
# bin_edges = np.r_[col_min, bin_edges, col_max]
# Make sure the bin_edges are unique and sorted
return sorted(list(set(bin_edges)))
def _compute_minimal_precision_of_bin_edges(self, bin_edges: list) -> int:
"""Compute the minimal precision of a list of bin_edges so that we end
up with a strictly ascending sequence of numbers.
The starting_precision attribute will be used as the initial precision.
In case of a negative starting_precision, the bin edges will be rounded
to the nearest 10, 100, ... (e.g. 5.55 -> 10, 246 -> 200, ...)
Parameters
----------
bin_edges : list
The bin edges for binning a continuous variable
Returns
-------
int
minimal precision for the bin edges
"""
precision = self.starting_precision
while True:
cont = False
for a, b in zip(bin_edges, bin_edges[1:]):
if a != b and round(a, precision) == round(b, precision):
# precision is not high enough, so increase
precision += 1
cont = True # set cont to True to keep looping
break # break out of the for loop
if not cont:
# if minimal precision was found,
# return to break out of while loop
return precision
def _compute_bins_from_edges(self, bin_edges: list) -> List[tuple]:
"""Given a list of bin edges, compute the minimal precision for which
we can make meaningful bins and make those bins
Parameters
----------
bin_edges : list
The bin edges for binning a continuous variable
Returns
-------
List[tuple]
A (sorted) list of bins as tuples
"""
# compute the minimal precision of the bin_edges
# this can be a negative number, which then
# rounds numbers to the nearest 10, 100, ...
precision = self._compute_minimal_precision_of_bin_edges(bin_edges)
bins = []
for a, b in zip(bin_edges, bin_edges[1:]):
fmt_a = round(a, precision)
fmt_b = round(b, precision)
bins.append((fmt_a, fmt_b))
return bins
@staticmethod
def _create_index(intervals: List[tuple],
closed: str="right") -> pd.IntervalIndex:
"""Create an pd.IntervalIndex based on a list of tuples.
This is basically a wrapper around pd.IntervalIndex.from_tuples
However, the lower bound of the first entry in the list (the lower bin)
is replaced by -np.inf. Similarly, the upper bound of the last entry in
the list (upper bin) is replaced by np.inf.
Parameters
----------
intervals : List[tuple]
a list of tuples describing the intervals
closed : str, optional
Whether the intervals should be closed on the left-side,
right-side, both or neither.
Returns
-------
pd.IntervalIndex
Description
"""
# check if closed is of the proper form
if closed not in ["left", "right"]:
raise ValueError("{}: valid options for 'closed' are {}. "
"Got strategy={!r} instead."
.format(KBinsDiscretizer.__name__,
["left", "right"], closed))
# deepcopy variable because we do not want to modify the content
# of intervals (which is still used outside of this function)
_intervals = deepcopy(intervals)
# Replace min and max with -np.inf and np.inf resp. so that these
# values are guaranteed to be included when transforming the data
_intervals[0] = (-np.inf, _intervals[0][1])
_intervals[-1] = (_intervals[-1][0], np.inf)
return pd.IntervalIndex.from_tuples(_intervals, closed)
def _create_bin_labels(self, bins: List[tuple]) -> list:
"""Given a list of bins, create a list of string containing the bins
as a string with a specific format (e.g. bin labels)
Parameters
----------
bins : List[tuple]
list of tuple containing for each bin the upper and lower bound
Returns
-------
list
list of (formatted) bin labels
"""
bin_labels = []
for interval in bins:
bin_labels.append(self.label_format.format(interval[0],
interval[1]))
# Format first and last bin as < x and > y resp.
if self.change_endpoint_format:
if self.closed == "left":
bin_labels[0] = "< {}".format(bins[0][1])
bin_labels[-1] = ">= {}".format(bins[-1][0])
else:
bin_labels[0] = "<= {}".format(bins[0][1])
bin_labels[-1] = "> {}".format(bins[-1][0])
return bin_labels
| [
"logging.getLogger",
"pandas.cut",
"pandas.IntervalIndex.from_tuples",
"numpy.linspace",
"copy.deepcopy"
] | [((489, 516), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (506, 516), False, 'import logging\n'), ((10846, 10892), 'pandas.cut', 'pd.cut', ([], {'x': 'data[column_name]', 'bins': 'interval_idx'}), '(x=data[column_name], bins=interval_idx)\n', (10852, 10892), True, 'import pandas as pd\n'), ((17910, 17929), 'copy.deepcopy', 'deepcopy', (['intervals'], {}), '(intervals)\n', (17918, 17929), False, 'from copy import deepcopy\n'), ((18199, 18247), 'pandas.IntervalIndex.from_tuples', 'pd.IntervalIndex.from_tuples', (['_intervals', 'closed'], {}), '(_intervals, closed)\n', (18227, 18247), True, 'import pandas as pd\n'), ((13033, 13062), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(n_bins + 1)'], {}), '(0, 1, n_bins + 1)\n', (13044, 13062), True, 'import numpy as np\n'), ((13198, 13239), 'numpy.linspace', 'np.linspace', (['col_min', 'col_max', '(n_bins + 1)'], {}), '(col_min, col_max, n_bins + 1)\n', (13209, 13239), True, 'import numpy as np\n')] |
import collections
import copy
import tensorflow as tf
import numpy as np
import os
import multiprocessing
import functools
from abc import ABC, abstractmethod
from rl.tools.utils.misc_utils import unflatten, flatten, cprint
tf_float = tf.float32
tf_int = tf.int32
"""
For compatibility with stop_gradient
"""
def tf_flatten(variables):
return tf.concat([tf.reshape(var, [-1]) for var in variables], axis=0)
def gradients(tensor, var_list):
grads = tf.gradients(tensor, var_list)
return [grad if grad is not None else tf.zeros_like(var)
for var, grad in zip(var_list, grads)]
"""
Wrapper of tensorflow graphs
"""
class tfObjectManager(object):
"""
An object manager that makes sure each one has an unique name.
"""
def __init__(self, default_name='tfObject'):
self._name = default_name
self._dict = collections.defaultdict(lambda: None)
self._table = collections.defaultdict(lambda: False)
def get_name(self, name=None):
""" automatically get a unique name for the constructing a tfObject instance """
if name is None:
name = self._name
name = str(name)
valid_name = False
while not valid_name:
# propose a new name
ind = self._dict[name]
if ind is None:
new_name = str(name)
self._dict[name] = 1
else:
new_name = str(name) + '_' + str(ind)
self._dict[name] += 1
# check if the proposed name exists
if not self._table[new_name]:
self._table[new_name] = True
valid_name = True
if name != new_name:
cprint('An tfObject under {} already exists. A new name {} is created by tfObjectManager'.format(name, new_name))
return new_name
# This makes sure every tfObject instance has an unique name
_tfOBJECT_MANAGER = tfObjectManager()
class tfObject(ABC):
"""
A helper class for defining custom classes based on tensorflow.
It makes sure that each instance of tfObject has an unique name (realized
as tf.variable_scope) and support basic functionalities, like
copy.deepcopy, assign, save, and restore.
Usage guideilnes:
The user needs to define _build_graph and use tfObject.save_init_args
to decorate its __init__. Note stateful non-tensorflow attributes
(which change during the use of the instance, like a counter) should be
NOT created inside _build_graph. The decorator tfObject.save_init_args
is used to record input arguments to __init__ for deepcopying. It
should be used to decorate a child class's __init__ when its signature
or default value changes.
In order to maintain desired deepcopy behaviors during inheritance, the
vuser should modify _pre_deepcopy_list and _post_deepcopy_list methods
to to add the name of attributes that should be copied during deepcopy.
By default, an tfObject instance does not deepcopy any attribute,
except for those provided by the user. This convention is chosen for
robust behvaiors in case of potential furture behavior changes of
tensorflow. When copy.deepcopy is called, tfObject calls the __init__
function defined by the custom class, in which before _build_graph is
called (through __init__ of tfObject) the attributes in
_pre_deepcopy_list will be deepcopied, and then deepcopies the
attributes in the _post_deepcopy_list. As a rule of thumb,
_pre_deepcopy_list should contain stateful attributes that pertain to
the tensorflow graph creation (i.e. those created before calling
__init__ ) _post_deepcopy_list contains other stateful attributes.
Note when defining _pre_deepcopy and _post_deepcopy_list, make sure it
contains the contents from the parent class.
Public attributes:
ts_vars, ts_allvars
Public methods:
copy, __deepcopy__, assign, save, restore
"""
def __init__(self, name='tfObject', max_to_keep=None, bg_kwargs=None):
"""
The tensorflow graph constructor.
Args:
name: the name of the object
max_to_keep: the maximal number of copies to save
bg_kwargs: the additional kwargs of _build_graph.
"""
assert hasattr(self, '_tfObject__args') and hasattr(self, '_tfObject__kwargs'), \
'Must use save_init_args decorator on __init__'
if bg_kwargs is None:
bg_kwargs = {}
# pre-processing
if hasattr(self, '_tfObject__pre_init_fun'):
self._tfObject__pre_init_fun()
if hasattr(self, '_tfObject__default_name'):
name = self._tfObject__default_name # force using a default name
# create the tensorflow graph
self.__name = _tfOBJECT_MANAGER.get_name(name) # get a unique name
with tf.variable_scope(self._tfObject__name):
self.__scope = tf.get_variable_scope().name # for later tensors retrieval
self._build_graph(**bg_kwargs)
# build getters and setters (np.ndarray)
if len(self.ts_vars) > 0:
self.__get_vars = build_get(self.ts_vars)
self.__set_vars = build_set(self.ts_vars)
if len(self.ts_allvars) > 0:
self.__get_allvars = build_get(self.ts_allvars)
self.__set_allvars = build_set(self.ts_allvars)
if len(self.ts_allvars) > 0:
self.__saver = tf.train.Saver(self.ts_allvars, max_to_keep=max_to_keep)
# for deepcopy
self.__pre_deepcopy_list = [] # attributes should be deep copied
self.__pre_deepcopy_list.extend(self._pre_deepcopy_list)
self.__post_deepcopy_list = ['_scope'] # attributes should be deep copied
self.__post_deepcopy_list.extend(self._post_deepcopy_list)
# Some functions for the user to define
@abstractmethod
def _build_graph(self, **kwargs):
""" Build the tensorflow graph """
@property
def _pre_deepcopy_list(self):
""" Return a list of strings of attribute names that should be deep
copied before calling tfObject.__init__ """
return []
@property
def _post_deepcopy_list(self):
""" Return a list of strings of attribute names that should be deep
copied before calling self.__init__ """
return []
# Functions for correct deepcopy
@staticmethod
def save_init_args(deepcopy_args=False):
""" A decorator for child class's __init__, which saves the input
arguments for performing deepcopying"""
if deepcopy_args: # whether to deepcopy the input arguments
def safe_copy(val):
try:
return copy.deepcopy(val)
except:
return copy.copy(val)
else:
def safe_copy(val): return val
def decorator(fun):
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
if hasattr(self, '_tfObject__args_saved'):
if self._tfObject__args_saved: # make sure it's only called once
return fun(self, *args, **kwargs)
# save the input arguments
self.__args, self.__kwargs = [], {}
self.__args = [safe_copy(arg) for arg in args]
self.__kwargs = {k: safe_copy(v) for k, v in kwargs.items()}
self.__args_saved = True
return fun(self, *args, **kwargs)
return wrapper
return decorator
def copy(self, new_name):
""" Like copy.deepcopy but with a new custom name """
return self.__deepcopy(name=new_name, memo={})
def __deepcopy__(self, memo):
# we need to overload this because of tensorflow graph
return self._tfObject__deepcopy(memo=memo)
def __deepcopy(self, memo, name=None):
# create new instance
tfobj = type(self).__new__(type(self), *self._tfObject__args, **self._tfObject__kwargs)
memo[id(self)] = tfobj # prevent forming a loop
# customize the behavior of tfObject.__init__
if name is not None:
tfobj.__default_name = name # use a new name
def _pre_deepcopy(): # deepcopy attributes before _build_graph
tfobj._tfObject__update_attrs(self, self._tfObject__pre_deepcopy_list, memo)
tfobj.__pre_init_fun = _pre_deepcopy
# initialize the instance as usual
tfobj.__init__(*self._tfObject__args, **self._tfObject__kwargs)
# post deepcopying
tfobj._tfObject__update_attrs(self, self._tfObject__post_deepcopy_list, memo)
# update tf.Variables
tfobj.assign(self)
return tfobj
def __update_attrs(self, src, attrs, memo):
# try to deepcopy attrs from src to self
for k in list(set(attrs) & set(src.__dict__.keys())):
setattr(self, k, copy.deepcopy(getattr(src, k), memo))
# Miscellaneous functions
def assign(self, other):
"""Set the tf.Variables of self as that of the other """
assert type(self) == type(other)
if len(self.ts_allvars) > 0:
self._tfObject__set_allvars(*other._tfObject__get_allvars()) # update tf.Variables
@property
def ts_vars(self): # list of trainable tf.Variables
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self._tfObject__scope)
@ts_vars.setter
def ts_vars(self, vals): # list of values to set to trainable tf.Variables
self._tfObject__set_vars(*vals)
@property
def ts_allvars(self): # list of all tf.Variables, including non-trainable ones
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self._tfObject__scope)
@ts_allvars.setter
def ts_allvars(self, vals): # list of all tf.Variables, including non-trainable ones
self._tfObject__set_allvars(*vals)
# TODO make this object pickle-compatible?
def save(self, path):
""" Save the ts_allvars to path """
if len(self.ts_allvars) > 0:
path = self._tfObject__saver.save(tf.get_default_session(), path)
return path
def restore(self, path, saved_name=None):
"""Recover ts_allvars from path saved with saved_name"""
if len(self.ts_allvars) > 0:
if saved_name is None:
saved_name = self._tfObject__name
ts_dict = {}
for ts in self.ts_allvars:
splits = ts.name.split('/')
# XXX only restore the vars match the saved_name!!!!
if splits[0] != saved_name:
continue
splits[0] = saved_name
saved_ts_name = '/'.join(splits)
assert saved_ts_name.split(':')[1] == '0'
saved_ts_name = saved_ts_name.split(':')[0]
ts_dict[saved_ts_name] = ts
saver = tf.train.Saver(ts_dict)
saver.restore(tf.get_default_session(), path)
"""
Session management.
"""
def make_session(num_cpu=None, make_default=False):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
tf_config.gpu_options.allocator_type = 'BFC'
tf_config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=tf_config)
else:
return tf.Session(config=tf_config)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
"""
Placeholder cache. Create if necessary.
"""
_PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape)
def get_placeholder(name, dtype=None, shape=None):
if name in _PLACEHOLDER_CACHE:
assert dtype is None
assert shape is None
return _PLACEHOLDER_CACHE[name][0]
else:
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
"""
Simple functions that construct tensors from tensors.
"""
def squared_sum(sy_x, axis=None):
sy_x_sqr = tf.square(sy_x)
return tf.reduce_sum(sy_x_sqr, axis=axis)
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
Args:
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
sy_x_shape = copy.copy(then_expression.get_shape())
sy_x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
sy_x.set_shape(sy_x_shape)
return sy_x
def build_multilayer_perceptron(
scope,
sy_input,
output_size,
n_layers=2,
size=64,
activation=tf.tanh,
hid_layer_std=1.0,
output_activation=None,
output_init_std=1.0,
reuse=False,
):
with tf.variable_scope(scope, reuse=reuse):
sy_y = sy_input
for _ in range(n_layers):
sy_y = tf.layers.dense(sy_y, size, activation=activation,
kernel_initializer=normc_initializer(hid_layer_std))
sy_y = tf.layers.dense(sy_y, output_size, activation=output_activation,
kernel_initializer=normc_initializer(output_init_std))
return sy_y
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def build_get(tensors):
return function([], tensors)
def build_set(tensors):
def get_ph(x, name=None):
if type(x) is not list:
return [tf.placeholder(shape=x.shape, dtype=x.dtype)]
else:
return [tf.placeholder(shape=v.shape, dtype=v.dtype) for v in x]
phs = get_ph(tensors)
assign_ops = [tf.assign(t, p) for (t, p) in zip(tensors, phs)]
set_fun = function(phs, [], assign_ops)
return set_fun
"""
Convert from flat tensors to list of tensors and back.
"""
"""
Shape related.
"""
def get_shape(x):
return x.get_shape().as_list()
def intprod(shape):
return int(np.prod(shape))
def get_size(x):
shape = get_shape(x)
assert all(isinstance(a, int) for a in shape), "shape function assumes that shape is fully known"
return intprod(shape)
def get_tensor_by_name(name):
return tf.get_default_graph().get_tensor_by_name('{}:0'.format(name))
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(get_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [get_size(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
class Shaper(object):
"""
A wrapper of a list of tf.Tensors for convenient conversions between a
list of tensors and its flat counterpart based on contiguous memory
allocation.
It creates tensorflow operators only when necessary.
"""
def __init__(self, tensors):
# tensors: a list of tensors.
self._tensors = tensors
@property
def _tensor_shapes(self):
return [t.shape.as_list() for t in self._tensors]
# for np.ndarray
def unflatten(self, val):
return unflatten(val, shapes=self._tensor_shapes)
def flatten(self, vs):
return flatten(vs)
def build_flat_ph(self):
""" Create return a single placeholder of the size as the number of
elements in self.tensors. Return the placeholder and a list of
tf.Tensors view of the created placeholder in accordinance with the
structure of self.tensors. """
total_size = sum([intprod(shape) for shape in self._tensor_shapes])
ph = tf.placeholder(dtype=tf.float32, shape=[total_size])
idx = 0
vs = []
for shape in self._tensor_shapes:
size = intprod(shape)
vs.append(tf.reshape(ph[idx:idx + size], shape))
idx += size
return ph, vs
@property
def variables(self):
if not hasattr(self, '_get_variables'):
self._get_variables = build_get(self._tensors)
return self._get_variables()
@variables.setter
def variables(self, vals):
if not hasattr(self, '_set_variables'):
self._set_variables = build_set(self._tensors)
self._set_variables(*vals)
@property
def variable(self):
return self.flatten(self.variables)
@variable.setter
def variable(self, val):
self.variables = self.unflatten(val)
"""
Create callable functions from tensors.
"""
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name(passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
for inpt in inputs:
if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = value
def __call__(self, *args):
assert len(args) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update the args
for inpt, value in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt])
results = tf.get_default_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
| [
"numpy.prod",
"tensorflow.reduce_sum",
"tensorflow.get_default_session",
"multiprocessing.cpu_count",
"tensorflow.gradients",
"tensorflow.group",
"tensorflow.get_variable_scope",
"copy.deepcopy",
"copy.copy",
"tensorflow.cast",
"tensorflow.Session",
"tensorflow.placeholder",
"functools.wraps... | [((464, 494), 'tensorflow.gradients', 'tf.gradients', (['tensor', 'var_list'], {}), '(tensor, var_list)\n', (476, 494), True, 'import tensorflow as tf\n'), ((11442, 11536), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'inter_op_parallelism_threads': 'num_cpu', 'intra_op_parallelism_threads': 'num_cpu'}), '(inter_op_parallelism_threads=num_cpu,\n intra_op_parallelism_threads=num_cpu)\n', (11456, 11536), True, 'import tensorflow as tf\n'), ((12471, 12486), 'tensorflow.square', 'tf.square', (['sy_x'], {}), '(sy_x)\n', (12480, 12486), True, 'import tensorflow as tf\n'), ((12498, 12532), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['sy_x_sqr'], {'axis': 'axis'}), '(sy_x_sqr, axis=axis)\n', (12511, 12532), True, 'import tensorflow as tf\n'), ((867, 905), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : None)'], {}), '(lambda : None)\n', (890, 905), False, 'import collections\n'), ((927, 966), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : False)'], {}), '(lambda : False)\n', (950, 966), False, 'import collections\n'), ((9528, 9602), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES', 'self._tfObject__scope'], {}), '(tf.GraphKeys.TRAINABLE_VARIABLES, self._tfObject__scope)\n', (9545, 9602), True, 'import tensorflow as tf\n'), ((9858, 9929), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES', 'self._tfObject__scope'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES, self._tfObject__scope)\n', (9875, 9929), True, 'import tensorflow as tf\n'), ((11681, 11720), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'config': 'tf_config'}), '(config=tf_config)\n', (11702, 11720), True, 'import tensorflow as tf\n'), ((11746, 11774), 'tensorflow.Session', 'tf.Session', ([], {'config': 'tf_config'}), '(config=tf_config)\n', (11756, 11774), True, 'import tensorflow as tf\n'), ((12230, 12281), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'dtype', 'shape': 'shape', 'name': 'name'}), '(dtype=dtype, shape=shape, name=name)\n', (12244, 12281), True, 'import tensorflow as tf\n'), ((13001, 13027), 'tensorflow.cast', 'tf.cast', (['condition', '"""bool"""'], {}), "(condition, 'bool')\n", (13008, 13027), True, 'import tensorflow as tf\n'), ((13440, 13477), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'reuse': 'reuse'}), '(scope, reuse=reuse)\n', (13457, 13477), True, 'import tensorflow as tf\n'), ((14142, 14158), 'tensorflow.constant', 'tf.constant', (['out'], {}), '(out)\n', (14153, 14158), True, 'import tensorflow as tf\n'), ((14531, 14546), 'tensorflow.assign', 'tf.assign', (['t', 'p'], {}), '(t, p)\n', (14540, 14546), True, 'import tensorflow as tf\n'), ((14824, 14838), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (14831, 14838), True, 'import numpy as np\n'), ((15364, 15399), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', '[total_size]'], {}), '(dtype, [total_size])\n', (15378, 15399), True, 'import tensorflow as tf\n'), ((15653, 15671), 'tensorflow.group', 'tf.group', (['*assigns'], {}), '(*assigns)\n', (15661, 15671), True, 'import tensorflow as tf\n'), ((16562, 16604), 'rl.tools.utils.misc_utils.unflatten', 'unflatten', (['val'], {'shapes': 'self._tensor_shapes'}), '(val, shapes=self._tensor_shapes)\n', (16571, 16604), False, 'from rl.tools.utils.misc_utils import unflatten, flatten, cprint\n'), ((16648, 16659), 'rl.tools.utils.misc_utils.flatten', 'flatten', (['vs'], {}), '(vs)\n', (16655, 16659), False, 'from rl.tools.utils.misc_utils import unflatten, flatten, cprint\n'), ((17042, 17094), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[total_size]'}), '(dtype=tf.float32, shape=[total_size])\n', (17056, 17094), True, 'import tensorflow as tf\n'), ((20061, 20079), 'tensorflow.group', 'tf.group', (['*updates'], {}), '(*updates)\n', (20069, 20079), True, 'import tensorflow as tf\n'), ((364, 385), 'tensorflow.reshape', 'tf.reshape', (['var', '[-1]'], {}), '(var, [-1])\n', (374, 385), True, 'import tensorflow as tf\n'), ((537, 555), 'tensorflow.zeros_like', 'tf.zeros_like', (['var'], {}), '(var)\n', (550, 555), True, 'import tensorflow as tf\n'), ((5014, 5053), 'tensorflow.variable_scope', 'tf.variable_scope', (['self._tfObject__name'], {}), '(self._tfObject__name)\n', (5031, 5053), True, 'import tensorflow as tf\n'), ((5625, 5681), 'tensorflow.train.Saver', 'tf.train.Saver', (['self.ts_allvars'], {'max_to_keep': 'max_to_keep'}), '(self.ts_allvars, max_to_keep=max_to_keep)\n', (5639, 5681), True, 'import tensorflow as tf\n'), ((7084, 7104), 'functools.wraps', 'functools.wraps', (['fun'], {}), '(fun)\n', (7099, 7104), False, 'import functools\n'), ((11094, 11117), 'tensorflow.train.Saver', 'tf.train.Saver', (['ts_dict'], {}), '(ts_dict)\n', (11108, 11117), True, 'import tensorflow as tf\n'), ((15055, 15077), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (15075, 15077), True, 'import tensorflow as tf\n'), ((5082, 5105), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (5103, 5105), True, 'import tensorflow as tf\n'), ((10288, 10312), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (10310, 10312), True, 'import tensorflow as tf\n'), ((11144, 11168), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (11166, 11168), True, 'import tensorflow as tf\n'), ((11396, 11423), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (11421, 11423), False, 'import multiprocessing\n'), ((14012, 14035), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (14027, 14035), True, 'import numpy as np\n'), ((14350, 14394), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': 'x.shape', 'dtype': 'x.dtype'}), '(shape=x.shape, dtype=x.dtype)\n', (14364, 14394), True, 'import tensorflow as tf\n'), ((14430, 14474), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': 'v.shape', 'dtype': 'v.dtype'}), '(shape=v.shape, dtype=v.dtype)\n', (14444, 14474), True, 'import tensorflow as tf\n'), ((15712, 15736), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (15734, 15736), True, 'import tensorflow as tf\n'), ((15973, 15997), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (15995, 15997), True, 'import tensorflow as tf\n'), ((17225, 17262), 'tensorflow.reshape', 'tf.reshape', (['ph[idx:idx + size]', 'shape'], {}), '(ph[idx:idx + size], shape)\n', (17235, 17262), True, 'import tensorflow as tf\n'), ((6900, 6918), 'copy.deepcopy', 'copy.deepcopy', (['val'], {}), '(val)\n', (6913, 6918), False, 'import copy\n'), ((15562, 15606), 'tensorflow.reshape', 'tf.reshape', (['theta[start:start + size]', 'shape'], {}), '(theta[start:start + size], shape)\n', (15572, 15606), True, 'import tensorflow as tf\n'), ((20825, 20849), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (20847, 20849), True, 'import tensorflow as tf\n'), ((6970, 6984), 'copy.copy', 'copy.copy', (['val'], {}), '(val)\n', (6979, 6984), False, 'import copy\n'), ((14084, 14098), 'numpy.square', 'np.square', (['out'], {}), '(out)\n', (14093, 14098), True, 'import numpy as np\n')] |
import json
# from collections import OrderedDict
from bokeh.io import show, save, output_file
from bokeh.plotting import figure
from bokeh.models import HoverTool
from bokeh.palettes import Set1_6
import numpy as np
from PIL import Image
import os
Image.MAX_IMAGE_PIXELS = None
if __name__ == '__main__':
tools_list = "pan," \
"box_select," \
"lasso_select," \
"box_zoom, " \
"wheel_zoom," \
"reset," \
"save," \
"help," \
# "hover"
folder = './ar_annotations'
file_names = os.listdir(folder)
rescale_index = 8
size_index = 1.0
types = ['PAS', 'AF_preIMS']
image_type = types[0]
shift_dict = {
"VAN0009-LK-102-7-PAS": (1, 0, -13, 0, 1, - 4),
"VAN0010-LK-155-40-PAS": (1, 0, -13, 0, 1, - 4),
"VAN0014-LK-203-108-PAS": (1.015, -0.010, 0, 0, 1, -2),
}
ALL_TYPE_LIST = ['Medulla', 'Inner medulla', 'Cortex', 'Outer Medulla', 'Outer Stripe']
for file_name in file_names[:]:
# file_name = './annotations/VAN0006-LK-2-85-AF_preIMS_registered_glomerulus_detections.json'
output_name = file_name.split('PAS')[0]
image_name = f'result/images/{image_type}/{output_name}{image_type}_registered_8.jpg'
if not os.path.isfile(image_name):
image_name = f'result/images/{image_type}/{output_name}{image_type}_FFPE.ome.jpg'
if not os.path.isfile(image_name):
print('image file does not exist')
continue
html_name = f"{output_name}{image_type}"
output_file(f"result/{html_name}.html")
with open(os.path.join(folder, file_name)) as data_file:
data = json.load(data_file)
coor_list = []
type_list = []
color_list = []
color_list_2 = []
for item in data:
coor_list.extend(item["geometry"]["coordinates"])
type_list.append(item["properties"]["classification"]["name"])
color_list.append(
f'{"#{:06x}".format(abs(int(item["properties"]["classification"]["colorRGB"])))}')
color_list_2.append(Set1_6[ALL_TYPE_LIST.index(type_list[-1])])
ALL_TYPE_LIST.extend(type_list)
# df = json_normalize(data, max_level=1)
# geo = df['geometry']
# coor = pd.read_json(geo.values) # geo['coordinates']
# print(file_name, len(coor_list))
x_list = [[xy[0] // rescale_index for xy in coor] for coor in coor_list]
y_list = [[xy[1] // rescale_index for xy in coor] for coor in coor_list]
background_img = Image.open(image_name).convert('RGBA')
if html_name in shift_dict:
background_img = background_img.transform(background_img.size, Image.AFFINE, shift_dict[html_name])
# background_img = np.roll(background_img,(10,0,0))
xdim, ydim = background_img.size
a_layer = np.empty((ydim, xdim), dtype=np.uint32)
view = a_layer.view(dtype=np.uint8).reshape((ydim, xdim, 4))
# view[:, :, :] = np.flipud(np.asarray(lena_img))
view[:, :, :] = np.asarray(background_img)
p = figure(match_aspect=True,
plot_width=int(xdim * size_index), plot_height=int(ydim * size_index),
tools=tools_list,
# title='nuclei/vessel distance',
)
# p.image_url(url=[image_name], x=0, y=0, anchor="bottom_left")
p.image_rgba(image=[a_layer], x=0, y=0, dw=xdim, dh=ydim)
p.xgrid.visible = False
p.ygrid.visible = False
p.axis.visible = False
p.background_fill_alpha = 0.0
p.outline_line_color = None
p.add_tools(HoverTool(show_arrow=False,
line_policy='nearest',
tooltips=None))
# VIS_source = ColumnDataSource(data={
# 'x_list': x_list,
# 'y_list': y_list,
# 'color': color_list,
# 'title': type_list
# })
for xs, ys, color, legend_title in zip(x_list, y_list, color_list_2, type_list):
p.patch(xs, ys,
fill_alpha=0.5, line_alpha=0.6, color=color, line_width=3,
hover_line_alpha=0.05,
hover_fill_alpha=0.05,
muted_alpha=0,
muted=False,
legend_label=legend_title, )
# source=VIS_source)
p.legend.location = "top_left"
p.legend.click_policy = "mute"
show_image = False
if show_image:
show(p)
else:
save(p)
# print(set(ALL_TYPE_LIST))
| [
"bokeh.io.output_file",
"bokeh.io.show",
"os.listdir",
"bokeh.io.save",
"PIL.Image.open",
"numpy.asarray",
"os.path.join",
"os.path.isfile",
"numpy.empty",
"json.load",
"bokeh.models.HoverTool"
] | [((617, 635), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (627, 635), False, 'import os\n'), ((1621, 1660), 'bokeh.io.output_file', 'output_file', (['f"""result/{html_name}.html"""'], {}), "(f'result/{html_name}.html')\n", (1632, 1660), False, 'from bokeh.io import show, save, output_file\n'), ((2958, 2997), 'numpy.empty', 'np.empty', (['(ydim, xdim)'], {'dtype': 'np.uint32'}), '((ydim, xdim), dtype=np.uint32)\n', (2966, 2997), True, 'import numpy as np\n'), ((3149, 3175), 'numpy.asarray', 'np.asarray', (['background_img'], {}), '(background_img)\n', (3159, 3175), True, 'import numpy as np\n'), ((1331, 1357), 'os.path.isfile', 'os.path.isfile', (['image_name'], {}), '(image_name)\n', (1345, 1357), False, 'import os\n'), ((1468, 1494), 'os.path.isfile', 'os.path.isfile', (['image_name'], {}), '(image_name)\n', (1482, 1494), False, 'import os\n'), ((1745, 1765), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (1754, 1765), False, 'import json\n'), ((3745, 3810), 'bokeh.models.HoverTool', 'HoverTool', ([], {'show_arrow': '(False)', 'line_policy': '"""nearest"""', 'tooltips': 'None'}), "(show_arrow=False, line_policy='nearest', tooltips=None)\n", (3754, 3810), False, 'from bokeh.models import HoverTool\n'), ((4639, 4646), 'bokeh.io.show', 'show', (['p'], {}), '(p)\n', (4643, 4646), False, 'from bokeh.io import show, save, output_file\n'), ((4673, 4680), 'bokeh.io.save', 'save', (['p'], {}), '(p)\n', (4677, 4680), False, 'from bokeh.io import show, save, output_file\n'), ((1679, 1710), 'os.path.join', 'os.path.join', (['folder', 'file_name'], {}), '(folder, file_name)\n', (1691, 1710), False, 'import os\n'), ((2652, 2674), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (2662, 2674), False, 'from PIL import Image\n')] |
import pickle
from collections import defaultdict, namedtuple
import numpy as np
import argparse
import os
import model.config as config
import preprocessing.util as util
from termcolor import colored
import tensorflow as tf
class VocabularyCounter(object):
"""counts the frequency of each word and each character in the corpus. With each
file that it processes it increases the counters. So one frequency vocab for all the files
that it processes."""
def __init__(self, lowercase_emb=False):
import gensim
self.model = gensim.models.KeyedVectors.load_word2vec_format(
config.base_folder+"data/basic_data/wordEmbeddings/Word2Vec/GoogleNews-vectors-negative300.bin", binary=True)
"""lowercase_emb=False if True then we lowercase the word for counting of
frequencies and hence for finding the pretrained embedding."""
self.word_freq = defaultdict(int)
self.char_freq = defaultdict(int) # how many times each character is encountered
self.lowercase_emb = lowercase_emb
self.not_in_word2vec_cnt = 0
self.all_words_cnt = 0
def add(self, filepath):
"""the file must be in the new dataset format."""
with open(filepath) as fin:
for line in fin:
if line.startswith("DOCSTART_") or line.startswith("DOCEND") or\
line.startswith("MMSTART_") or line.startswith("MMEND") or \
line.startswith("*NL*"):
continue
line = line.rstrip() # omit the '\n' character
word = line.lower() if self.lowercase_emb else line
self.all_words_cnt += 1
if word not in self.model:
self.not_in_word2vec_cnt += 1
else:
self.word_freq[word] += 1
for c in line:
self.char_freq[c] += 1
def print_statistics(self, word_edges=None,
char_edges=None):
"""Print some statistics about word and char frequency."""
if word_edges is None:
word_edges = [1, 2, 3, 6, 11, 21, 31, 51, 76, 101, 201, np.inf]
if char_edges is None:
char_edges = [1, 6, 11, 21, 51, 101, 201, 501, 1001, 2001, np.inf]
print("not_in_word2vec_cnt = ", self.not_in_word2vec_cnt)
print("all_words_cnt = ", self.all_words_cnt)
print("some frequency statistics. The bins are [...) ")
for d, name, edges in zip([self.word_freq, self.char_freq], ["word", "character"], [word_edges, char_edges]):
hist_values, _ = np.histogram(list(d.values()), edges)
cum_sum = np.cumsum(hist_values[::-1])
print(name, " frequency histogram, edges: ", edges)
print("absolute values: ", hist_values)
print("absolute cumulative (right to left): ", cum_sum[::-1])
print("probabilites cumulative (right to left):", (cum_sum / np.sum(hist_values))[::-1])
def serialize(self, folder=None, name="vocab_freq.pickle"):
if folder is None:
folder = config.base_folder+"data/vocabulary/"
if not os.path.exists(folder):
os.makedirs(folder)
with open(folder+name, 'wb') as handle:
pickle.dump((self.word_freq, self.char_freq), handle)
def count_datasets_vocabulary(self):
# **YD** change the directory location
new_dataset_folder = config.base_folder+"data/new_datasets/"
# new_dataset_folder = config.base_folder + "data/yd_datasets/"
"""
datasets = ['aida_train.txt', 'aida_dev.txt', 'aida_test.txt', 'ace2004.txt',
'aquaint.txt', 'clueweb.txt', 'msnbc.txt', 'wikipedia.txt']
"""
for dataset in util.get_immediate_files(new_dataset_folder):
dataset = os.path.basename(os.path.normpath(dataset))
print("Processing dataset: ", dataset)
self.add(new_dataset_folder+dataset)
self.print_statistics()
self.serialize(folder=config.base_folder+"data/vocabulary/",
name="vocab_freq.pickle")
def build_word_char_maps():
output_folder = config.base_folder+"data/tfrecords/"+args.experiment_name+"/"
if not os.path.exists(output_folder):
os.makedirs(output_folder)
with open(config.base_folder+"data/vocabulary/vocab_freq.pickle", 'rb') as handle:
word_freq, char_freq = pickle.load(handle)
word2id = dict()
id2word = dict()
char2id = dict()
id2char = dict()
wcnt = 0 # unknown word
word2id["<wunk>"] = wcnt
id2word[wcnt] = "<wunk>"
wcnt += 1
ccnt = 0 # unknown character
char2id["<u>"] = ccnt
id2char[ccnt] = "<u>"
ccnt += 1
# for every word in the corpus (we have already filtered out the words that are not in word2vec)
for word in word_freq:
if word_freq[word] >= args.word_freq_thr:
word2id[word] = wcnt
id2word[wcnt] = word
wcnt += 1
for c in char_freq:
if char_freq[c] >= args.char_freq_thr:
char2id[c] = ccnt
id2char[ccnt] = c
ccnt += 1
assert(len(word2id) == wcnt)
assert(len(char2id) == ccnt)
print("words in vocabulary: ", wcnt)
print("characters in vocabulary: ", ccnt)
with open(output_folder+"word_char_maps.pickle", 'wb') as handle:
pickle.dump((word2id, id2word, char2id, id2char, args.word_freq_thr,
args.char_freq_thr), handle)
import gensim
model = gensim.models.KeyedVectors.load_word2vec_format(
config.base_folder+"data/basic_data/wordEmbeddings/Word2Vec/GoogleNews-vectors-negative300.bin", binary=True)
embedding_dim = len(model['queen'])
embeddings_array = np.empty((wcnt, embedding_dim)) # id2emb
embeddings_array[0] = np.zeros(embedding_dim)
for i in range(1, wcnt):
embeddings_array[i] = model[id2word[i]]
np.save(output_folder+'embeddings_array.npy', embeddings_array)
return word2id, char2id
def build_word_char_maps_restore():
output_folder = config.base_folder+"data/tfrecords/"+args.experiment_name+"/"
with open(output_folder+"word_char_maps.pickle", 'rb') as handle:
word2id, _, char2id, _, _, _ = pickle.load(handle)
return word2id, char2id
class Chunker(object):
def __init__(self):
self.separator = args.chunking
self.chunk_ending = {'DOCEND'}
if self.separator == "per_paragraph":
self.chunk_ending.add('*NL*')
if self.separator == "per_sentence":
self.chunk_ending.add('.')
self.chunk_ending.add('*NL*')
self.parsing_errors = 0
def new_chunk(self):
self.chunk_words = []
self.begin_gm = [] # the starting positions of gold mentions
self.end_gm = [] # the end positions of gold mentions
self.ground_truth = [] # list with the correct entity ids
def compute_result(self, docid):
chunk_id = docid
if self.separator == "per_paragraph":
chunk_id = chunk_id + "&*" + str(self.par_cnt)
if self.separator == "per_sentence":
chunk_id = chunk_id + "&*" + str(self.par_cnt) + "&*" + str(self.sent_cnt)
result = (chunk_id, self.chunk_words, self.begin_gm, self.end_gm, self.ground_truth)
# correctness checks. not necessary
no_errors_flag = True
if len(self.begin_gm) != len(self.end_gm) or \
len(self.begin_gm) != len(self.ground_truth):
no_errors_flag = False
for b, e in zip(self.begin_gm, self.end_gm):
if e <= b or b >= len(self.chunk_words) or e > len(self.chunk_words):
no_errors_flag = False
self.new_chunk()
if no_errors_flag == False:
self.parsing_errors += 1
print("chunker parse error: ", result)
return None
else:
return result
def process(self, filepath):
with open(filepath) as fin:
self.new_chunk()
docid = ""
# paragraph and sentence counter are not actually useful. only for debugging purposes.
self.par_cnt = 0 # paragraph counter (useful if we work per paragraph)
self.sent_cnt = 0 # sentence counter (useful if we work per sentence)
for line in fin:
line = line.rstrip() # omit the '\n' character
if line in self.chunk_ending:
if len(self.chunk_words) > 0: # if we have continues *NL* *NL* do not return empty chunks
temp = self.compute_result(docid)
if temp is not None:
yield temp
# do not add the chunk separator, no use
if line == '*NL*':
self.par_cnt += 1
self.sent_cnt = 0
if line == '.':
self.sent_cnt += 1
elif line == '*NL*':
self.par_cnt += 1
self.sent_cnt = 0
# do not add this in our words list
elif line == '.':
self.sent_cnt += 1
self.chunk_words.append(line)
elif line.startswith('MMSTART_'):
ent_id = line[8:] # assert that ent_id in wiki_name_id_map
self.ground_truth.append(ent_id)
self.begin_gm.append(len(self.chunk_words))
elif line == 'MMEND':
self.end_gm.append(len(self.chunk_words))
elif line.startswith('DOCSTART_'):
docid = line[9:]
self.par_cnt = 0
self.sent_cnt = 0
else:
self.chunk_words.append(line)
print(filepath, " chunker parsing errors: ", self.parsing_errors)
self.parsing_errors = 0
GmonlySample = namedtuple("GmonlySample",
["chunk_id", "chunk_words", 'begin_gm', "end_gm",
"ground_truth", "cand_entities", "cand_entities_scores"])
AllspansSample = namedtuple("AllspansSample",
["chunk_id", "chunk_words", "begin_spans", "end_spans",
"ground_truth", "cand_entities", "cand_entities_scores",
"begin_gm", "end_gm"])
class SamplesGenerator(object):
def __init__(self, mode="allspans"):
self.mode = mode
self._generator = Chunker()
self.fetchFilteredCoreferencedCandEntities = util.FetchFilteredCoreferencedCandEntities(args)
self.all_gm_misses = 0
self.all_gt_misses = 0
self.all_gm = 0 # all the gm encountered in all the datasets
def set_gmonly_mode(self):
self.mode = "gmonly"
def set_allspans_mode(self):
self.mode = "allspans"
def is_gmonly_mode(self):
return True if self.mode == "gmonly" else False
def is_allspans_mode(self):
return True if self.mode == "allspans" else False
def process(self, filepath):
if self.is_allspans_mode():
return self._process_allspans(filepath)
else:
return self._process_gmonly(filepath)
def _process_allspans(self, filepath):
gt_misses = 0
gm_misses = 0
gm_this_file = 0 # how many gold mentions are in this document - dataset. so we can find percentage for misses
max_mention_width_violations = 0
for chunk in self._generator.process(filepath):
self.fetchFilteredCoreferencedCandEntities.init_coref(el_mode=True)
begin_spans = []
end_spans = []
cand_entities = [] # list of lists candidate entities
cand_entities_scores = []
chunk_id, chunk_words, begin_gm, end_gm, ground_truth = chunk
gm_this_file += len(begin_gm)
for left, right in self.all_spans(chunk_words):
cand_ent, scores = self.fetchFilteredCoreferencedCandEntities.process(left, right, chunk_words)
if cand_ent is not None:
begin_spans.append(left)
end_spans.append(right)
cand_entities.append(cand_ent)
cand_entities_scores.append(scores)
if args.calculate_stats:
# check if gold mentions are inside the candidate spans and if yes check if ground truth is in cand ent.
gm_spans = list(zip(begin_gm, end_gm)) # [(3, 5), (10, 11), (15, 18)]
all_spans = list(zip(begin_spans, end_spans))
for i, gm_span in enumerate(gm_spans):
if gm_span not in all_spans:
gm_misses += 1
#print("gm not in spans\t\t\t", colored(' '.join(chunk_words[gm_span[0]:gm_span[1]]), 'red'))
elif ground_truth[i] not in cand_entities[all_spans.index(gm_span)]:
gt_misses += 1
#print("gt not in cand ent", colored(' '.join(chunk_words[gm_span[0]:gm_span[1]]), 'green'))
#print("gt: ", ground_truth[i], "cand_ent: ", cand_entities[all_spans.index(gm_span)])
for b, e in zip(begin_gm, end_gm):
if e - b > args.max_mention_width:
max_mention_width_violations += 1
if begin_spans: # there are candidate spans in the processed text
yield AllspansSample(chunk_id, chunk_words, begin_spans, end_spans,
ground_truth, cand_entities, cand_entities_scores,
begin_gm, end_gm)
if args.calculate_stats:
print("max_mention_width_violations :", max_mention_width_violations)
print("gt_misses", gt_misses)
print("gm_misses", gm_misses)
print("gm_this_file: ", gm_this_file)
print("recall % : ", (1 - (gm_misses+gt_misses)/gm_this_file)*100, " %")
self.all_gt_misses += gt_misses
self.all_gm_misses += gm_misses
self.all_gm += gm_this_file
@staticmethod
def all_spans(chunk_words):
# this function produces all possible text spans that do not include spans separators (fullstops).
# divide the list of words to lists of lists based on spans_separator.
# e.g. if chunk_words is for the whole document divide it to sentences (a list of
# sentences) since no span extend above a fullstop.
separation_indexes = []
spans_separator = set(config.spans_separators)
for idx, word in enumerate(chunk_words):
if word in spans_separator:
separation_indexes.append(idx)
separation_indexes.append(len(chunk_words))
def all_spans_aux(begin_idx, end_idx):
for left_idx in range(begin_idx, end_idx):
for length in range(1, args.max_mention_width + 1):
if left_idx + length > end_idx:
break
yield left_idx, left_idx + length
begin_idx = 0
for end_idx in separation_indexes:
for left, right in all_spans_aux(begin_idx, end_idx):
# print(left, right, chunk_words[left:right])
# print(left, right, ' '.join(chunk_words[left:right])
yield left, right
begin_idx = end_idx + 1
def _process_gmonly(self, filepath):
gt_misses = 0
gm_misses = 0
gm_this_file = 0
max_mention_width_violations = 0
for chunk in self._generator.process(filepath):
self.fetchFilteredCoreferencedCandEntities.init_coref(el_mode=False)
cand_entities = [] # list of lists candidate entities
cand_entities_scores = []
chunk_id, chunk_words, begin_gm, end_gm, ground_truth = chunk
gm_this_file += len(begin_gm)
for left, right, gt in zip(begin_gm, end_gm, ground_truth):
cand_ent, scores = self.fetchFilteredCoreferencedCandEntities.process(left, right, chunk_words)
if cand_ent is None:
gm_misses += 1
cand_ent, scores = [], []
#print("gm not in p_e_m\t\t\t", colored(' '.join(chunk_words[left:right]), 'red'))
elif args.calculate_stats and gt not in cand_ent:
gt_misses += 1
#print("gt not in cand ent", colored(' '.join(chunk_words[left:right]), 'green'))
#print("gt: ", gt, "cand_ent: ", cand_ent)
if right - left > args.max_mention_width:
max_mention_width_violations += 1
#print(' '.join(chunk_words[left:right])
#print(cand_ent, scores)
cand_entities.append(cand_ent)
cand_entities_scores.append(scores)
if begin_gm: #not emtpy
yield GmonlySample(chunk_id, chunk_words, begin_gm, end_gm, ground_truth,
cand_entities, cand_entities_scores)
if args.calculate_stats:
print("max_mention_width_violations :", max_mention_width_violations)
print("gt_misses", gt_misses)
print("gm_misses", gm_misses)
print("gm_this_file", gm_this_file)
print("recall % : ", (1 - (gm_misses+gt_misses)/gm_this_file)*100, " %")
self.all_gt_misses += gt_misses
self.all_gm_misses += gm_misses
self.all_gm += gm_this_file
SampleEncoded = namedtuple("SampleEncoded",
["chunk_id",
"words", 'words_len', # list, scalar
'chars', 'chars_len', # list of lists, list
'begin_spans', "end_spans", 'spans_len', # the first 2 are lists, last is scalar
"cand_entities", "cand_entities_scores", 'cand_entities_labels', # lists of lists
'cand_entities_len', # list
"ground_truth", "ground_truth_len",
'begin_gm', 'end_gm']) # list
class EncoderGenerator(object):
"""receives samples Train or Test samples and encodes everything to numbers ready to
be transformed to tfrecords. Also filters out candidate entities that are not in the
entity universe."""
def __init__(self):
self._generator = SamplesGenerator()
self._word2id, self._char2id = build_word_char_maps()
#self._word2id, self._char2id = build_word_char_maps_restore() # alternative
self._wikiid2nnid = util.load_wikiid2nnid(args.entity_extension)
def set_gmonly_mode(self):
self._generator.set_gmonly_mode()
def set_allspans_mode(self):
self._generator.set_allspans_mode()
def is_gmonly_mode(self):
return self._generator.is_gmonly_mode()
def is_allspans_mode(self):
return self._generator.is_allspans_mode()
def process(self, filepath):
ground_truth_errors_cnt = 0
cand_entities_not_in_universe_cnt = 0
samples_with_errors = 0
for sample in self._generator.process(filepath):
words = []
chars = []
for word in sample.chunk_words:
words.append(self._word2id[word] if word in self._word2id
else self._word2id["<wunk>"])
chars.append([self._char2id[c] if c in self._char2id else self._char2id["<u>"]
for c in word])
chars_len = [len(word) for word in chars]
ground_truth_enc = [self._wikiid2nnid[gt] if gt in self._wikiid2nnid else self._wikiid2nnid["<u>"]
for gt in sample.ground_truth]
ground_truth_errors_cnt += ground_truth_enc.count(self._wikiid2nnid["<u>"]) # it is always zero
#print(colored("New sample", 'red'))
#print(sample)
if len(sample.begin_gm) != len(sample.end_gm) or \
len(sample.begin_gm) != len(ground_truth_enc):
samples_with_errors += 1
continue
if isinstance(sample, GmonlySample):
cand_entities, cand_entities_scores, cand_entities_labels, not_in_universe_cnt = \
self._encode_cand_entities_and_labels(
sample.cand_entities, sample.cand_entities_scores, sample.ground_truth)
yield SampleEncoded(chunk_id=sample.chunk_id,
words=words, words_len=len(words),
chars=chars, chars_len=chars_len,
begin_spans=sample.begin_gm, end_spans=sample.end_gm, spans_len=len(sample.begin_gm),
cand_entities=cand_entities, cand_entities_scores=cand_entities_scores,
cand_entities_labels=cand_entities_labels,
cand_entities_len=[len(t) for t in cand_entities],
ground_truth=ground_truth_enc, ground_truth_len=len(sample.ground_truth),
begin_gm=[], end_gm=[])
elif isinstance(sample, AllspansSample):
if len(sample.begin_spans) != len(sample.end_spans):
samples_with_errors += 1
continue
# for each span i have the gt or the value -1 if this span is not a gm
# and then i work in the same way as above
span_ground_truth = []
gm_spans = list(zip(sample.begin_gm, sample.end_gm)) # [(3, 5), (10, 11), (15, 18)]
for left, right in zip(sample.begin_spans, sample.end_spans):
if (left, right) in gm_spans:
span_ground_truth.append(sample.ground_truth[gm_spans.index((left, right))])
else:
span_ground_truth.append(-1) # this span is not a gm
cand_entities, cand_entities_scores, cand_entities_labels, not_in_universe_cnt = \
self._encode_cand_entities_and_labels(
sample.cand_entities, sample.cand_entities_scores, span_ground_truth)
yield SampleEncoded(chunk_id=sample.chunk_id,
words=words, words_len=len(words),
chars=chars, chars_len=chars_len,
begin_spans=sample.begin_spans, end_spans=sample.end_spans, spans_len=len(sample.begin_spans),
cand_entities=cand_entities, cand_entities_scores=cand_entities_scores,
cand_entities_labels=cand_entities_labels,
cand_entities_len=[len(t) for t in cand_entities],
ground_truth=ground_truth_enc, ground_truth_len=len(sample.ground_truth),
begin_gm=sample.begin_gm, end_gm=sample.end_gm)
cand_entities_not_in_universe_cnt += not_in_universe_cnt
print("ground_truth_errors_cnt =", ground_truth_errors_cnt)
print("cand_entities_not_in_universe_cnt =", cand_entities_not_in_universe_cnt)
print("encoder samples_with_errors =", samples_with_errors)
def _encode_cand_entities_and_labels(self, cand_entities_p, cand_entities_scores_p,
ground_truth_p):
"""receives cand_entities (list of lists), and ground_truth (list) and does the following:
1) removes cand ent that are not in our universe
2) creates a label 0, 1 if this candidate is correct or not (i.e. if the span is indeed a
gold mention (row of candidate entities array) and this specific candidate entity (column
of candidate entities array) is correct. Returns the filtered cand_entities
and the corresponding label (they have the same shape)"""
cand_entities = []
cand_entities_scores = []
cand_entities_labels = []
not_in_universe_cnt = 0
for cand_ent_l, cand_scores_l, gt in zip(cand_entities_p, cand_entities_scores_p, ground_truth_p):
ent_l = []
score_l = []
label_l = []
for cand_ent, score in zip(cand_ent_l, cand_scores_l):
if cand_ent in self._wikiid2nnid: # else continue, this entity not in our universe
ent_l.append(self._wikiid2nnid[cand_ent])
score_l.append(score)
label_l.append(1 if cand_ent == gt else 0)
else:
not_in_universe_cnt += 1
cand_entities.append(ent_l)
cand_entities_scores.append(score_l)
cand_entities_labels.append(label_l)
return cand_entities, cand_entities_scores, cand_entities_labels, not_in_universe_cnt
class TFRecordsGenerator(object):
def __init__(self):
self._generator = EncoderGenerator()
def set_gmonly_mode(self):
self._generator.set_gmonly_mode()
def set_allspans_mode(self):
self._generator.set_allspans_mode()
def is_gmonly_mode(self):
return self._generator.is_gmonly_mode()
def is_allspans_mode(self):
return self._generator.is_allspans_mode()
@staticmethod
def _to_sequence_example(sample):
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
# Those two create a simple feature. The first a simple feature with one integer, whereas the second a simple
# list of integers as one feature.
def _int64_feature(value):
"""value is a simple integer."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _int64list_feature(value):
"""value is a list of integers."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _int64_feature_list(values):
""" values is a list of integers like the words (words = [2,4,6,8,10])
a feature list where each feature has only one number (a list with fixed
number of elements, specifically only one)"""
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
def _int64list_feature_list(values):
""" like the chars = [[1,2,3], [4,5], [6], [7,8], [9,10,11,12]] a feature list where each feature can have variable
number of ements"""
return tf.train.FeatureList(feature=[_int64list_feature(v) for v in values])
def _floatlist_feature_list(values):
""" like the chars = [[0.1,0.2,0.3], [0.4,0.5]] a feature list where each feature can have variable
number of ements"""
def _floatlist_feature(value):
"""value is a list of integers."""
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
return tf.train.FeatureList(feature=[_floatlist_feature(v) for v in values])
context = tf.train.Features(feature={
"chunk_id": _bytes_feature(sample.chunk_id.encode('utf-8')),
"words_len": _int64_feature(sample.words_len),
"spans_len": _int64_feature(sample.spans_len),
"ground_truth_len": _int64_feature(sample.ground_truth_len)
})
feature_list = {
"words": _int64_feature_list(sample.words),
"chars": _int64list_feature_list(sample.chars),
"chars_len": _int64_feature_list(sample.chars_len),
"begin_span": _int64_feature_list(sample.begin_spans),
"end_span": _int64_feature_list(sample.end_spans),
"cand_entities": _int64list_feature_list(sample.cand_entities),
"cand_entities_scores": _floatlist_feature_list(sample.cand_entities_scores),
"cand_entities_labels": _int64list_feature_list(sample.cand_entities_labels),
"cand_entities_len": _int64_feature_list(sample.cand_entities_len),
"ground_truth": _int64_feature_list(sample.ground_truth)
}
if isinstance(sample, SampleEncoded):
feature_list["begin_gm"] = _int64_feature_list(sample.begin_gm)
feature_list["end_gm"] = _int64_feature_list(sample.end_gm)
feature_lists = tf.train.FeatureLists(feature_list=feature_list)
sequence_example = tf.train.SequenceExample(context=context, feature_lists=feature_lists)
return sequence_example
def process(self, filepath):
print("processing file: ", filepath)
#the name of the dataset. just extract the last part of path
filename = os.path.basename(os.path.normpath(filepath))[:-4] # omit the '.txt'
output_folder = config.base_folder+"data/tfrecords/"+args.experiment_name+"/"
output_folder += "gmonly/" if self.is_gmonly_mode() else "allspans/"
if not os.path.exists(output_folder):
os.makedirs(output_folder)
writer = tf.python_io.TFRecordWriter(output_folder+filename)
records_cnt = 0
for sample in self._generator.process(filepath):
#print(sample)
sequence_example = self._to_sequence_example(sample)
# write it to file
if sequence_example is not None:
writer.write(sequence_example.SerializeToString())
records_cnt += 1
writer.close()
print("records_cnt = ", records_cnt)
def create_tfrecords():
#new_dataset_folder = config.base_folder+"data/new_datasets/"
# **YD**
new_dataset_folder = config.base_folder + "data/yd_datasets/"
datasets = [os.path.basename(os.path.normpath(d)) for d in util.get_immediate_files(new_dataset_folder)]
print("datasets: ", datasets)
tfrecords_generator = TFRecordsGenerator()
tfrecords_generator.set_gmonly_mode()
for file in datasets:
tfrecords_generator.process(filepath=new_dataset_folder+file)
tfrecords_generator.set_allspans_mode()
for file in datasets:
tfrecords_generator.process(filepath=new_dataset_folder+file)
class PrintSamples(object):
def __init__(self, only_misses=True):
_, self.wiki_id_name_map = util.load_wiki_name_id_map()
self.only_misses = only_misses
def print_candidates(self, ent_ids_list):
"""takes as input a list of ent_id and returns a string. This string has each ent_id
together with the corresponding name (in the name withspaces are replaced by underscore)
and candidates are separated with a single space. e.g. ent_id,Barack_Obama ent_id2,US_President"""
acc = []
for ent_id in ent_ids_list:
acc.append(ent_id + "," + self.wiki_id_name_map[ent_id].replace(' ', '_'))
return ' '.join(acc)
def print_sample(self, sample):
chunk_words, begin_gm, end_gm, ground_truth, cand_entities = \
sample.chunk_words, sample.begin_gm, sample.end_gm, sample.ground_truth, sample.cand_entities
if isinstance(sample, GmonlySample):
misses_idx = []
for i, (gt, cand_ent) in enumerate(zip(ground_truth, cand_entities)):
if gt not in cand_ent:
misses_idx.append(i) # miss detected
if self.only_misses and misses_idx:
print(colored("New sample", 'red'))
print(' '.join(chunk_words))
for i in misses_idx:
message = ' '.join(chunk_words[begin_gm[i]:end_gm[i]]) + "\tgt=" + \
self.print_candidates([ground_truth[i]]) + \
"\tCandidates: " + self.print_candidates(cand_entities[i])
print(colored(message, 'yellow'))
if self.only_misses == False:
print(colored("New sample", 'red'))
print(' '.join(chunk_words))
for i in range(len(begin_gm)):
message = ' '.join(chunk_words[begin_gm[i]:end_gm[i]]) + "\tgt=" + \
self.print_candidates([ground_truth[i]]) + \
"\tCandidates: " + self.print_candidates(cand_entities[i])
print(colored(message, 'yellow' if i in misses_idx else 'white'))
elif isinstance(sample, AllspansSample):
begin_spans, end_spans = sample.begin_spans, sample.end_spans
gm_spans = list(zip(begin_gm, end_gm)) # [(3, 5), (10, 11), (15, 18)]
all_spans = list(zip(begin_spans, end_spans))
print(colored("New sample", 'red'))
print(' '.join(chunk_words))
for i, gm_span in enumerate(gm_spans):
if gm_span not in all_spans:
message = ' '.join(chunk_words[begin_gm[i]:end_gm[i]]) + "\tgt=" + \
self.print_candidates([ground_truth[i]]) + "\tgm_miss"
print(colored(message, 'magenta'))
elif ground_truth[i] not in cand_entities[all_spans.index(gm_span)]:
message = ' '.join(chunk_words[begin_gm[i]:end_gm[i]]) + "\tgt=" + \
self.print_candidates([ground_truth[i]]) + "\tgt_miss Candidates: " + \
self.print_candidates(cand_entities[all_spans.index(gm_span)])
print(colored(message, 'yellow'))
if self.only_misses == False:
# then also print all the spans and their candidate entities
for left, right, cand_ent in zip(begin_spans, end_spans, cand_entities):
# if span is a mention and includes gt then green color, otherwise white
if (left, right) in gm_spans and ground_truth[gm_spans.index((left, right))] in cand_ent:
message = ' '.join(chunk_words[left:right]) + "\tgt=" + \
self.print_candidates([ground_truth[gm_spans.index((left, right))]]) + \
"\tgm_gt_hit Candidates: " + \
self.print_candidates(cand_ent)
print(colored(message, 'green'))
else:
message = ' '.join(chunk_words[left:right]) + \
"\t not a mention Candidates: " + \
self.print_candidates(cand_ent)
print(colored(message, 'white'))
def create_entity_universe(gmonly_files=None, allspans_files=None, printSamples=None):
new_dataset_folder = config.base_folder+"data/new_datasets/"
if gmonly_files is None:
gmonly_files = []
if allspans_files is None:
allspans_files = ['aida_train.txt', 'aida_dev.txt', 'aida_test.txt', 'ace2004.txt',
'aquaint.txt', 'clueweb.txt', 'msnbc.txt', 'wikipedia.txt']
print("gmonly_files: ", gmonly_files)
print("allspans_files: ", allspans_files)
def create_entity_universe_aux(generator, datasets):
entities_universe = set()
for dataset in datasets:
print("Processing dataset: ", dataset)
for sample in generator.process(filepath=new_dataset_folder+dataset):
entities_universe.update(*sample.cand_entities)
entities_universe.update(sample.ground_truth)
if printSamples:
printSamples.print_sample(sample)
print("Overall statistics: ")
print("all_gm_misses: ", generator.all_gm_misses)
print("all_gt_misses: ", generator.all_gt_misses)
print("all_gm: ", generator.all_gm)
print("recall % : ", (1 - (generator.all_gm_misses+generator.all_gt_misses)/generator.all_gm)*100, " %")
print("len(entities_universe):\t\t\t", colored(len(entities_universe), 'red'))
return entities_universe
gmonly_entities, allspans_entities = set(), set()
samplesGenerator = SamplesGenerator()
if gmonly_files:
print("gmonly files statistics: ")
samplesGenerator.set_gmonly_mode()
gmonly_entities = create_entity_universe_aux(samplesGenerator, gmonly_files)
if allspans_files:
print("Test files statistics: ")
samplesGenerator.set_allspans_mode()
allspans_entities = create_entity_universe_aux(samplesGenerator, allspans_files)
all_entities = gmonly_entities | allspans_entities
print("len(all_entities) = ", len(all_entities))
# print the entities of our universe to a file together with the name
with open(config.base_folder+"data/entities/entities_universe.txt", "w") as fout:
_, wiki_id_name_map = util.load_wiki_name_id_map()
for ent_id in all_entities:
fout.write(ent_id + "\t" + wiki_id_name_map[ent_id].replace(' ', '_') + "\n")
return all_entities
def create_necessary_folders():
if not os.path.exists(config.base_folder+"data/tfrecords/"):
os.makedirs(config.base_folder+"data/tfrecords/")
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--chunking", default="per_document",
help="per_sentence or per_paragraph or per_article"
"per_document: each document is processed as one example"
"per_paragraph: each paragraph is processed as a separate example")
parser.add_argument("--p_e_m_choice", default="yago",
help="'wiki' p(e|m) constructed only from wikipedia dump (prob_wikipedia_p_e_m.txt file),\
'crosswiki' constructed from wikipedia dump + crosswiki (prob_crosswikis_wikipedia_p_e_m.txt),\
'yago' (prob_yago_crosswikis_wikipedia_p_e_m.txt)")
parser.add_argument("--cand_ent_num", type=int, default=30,
help="how many candidate entities to keep for each mention")
parser.add_argument("--lowercase_p_e_m", type=bool, default=False)
parser.add_argument("--lowercase_spans", type=bool, default=False)
parser.add_argument("--calculate_stats", type=bool, default=True)
parser.add_argument("--experiment_name", default="corefmerge",
help="under folder data/tfrecords/")
parser.add_argument("--include_wikidumpRLTD", type=bool, default=False)
parser.add_argument("--word_freq_thr", type=int, default=1,
help="words that have freq less than this are not included in our"
"vocabulary.")
parser.add_argument("--char_freq_thr", type=int, default=1)
parser.add_argument("--max_mention_width", type=int, default=10, help="in allspans mode consider all spans with"
"length <= to this value as candidate entities to be linked")
parser.add_argument("--entity_extension", default=None, help="extension_entities or extension_entities_all etc")
parser.add_argument("--persons_coreference", type=bool, default=True)
parser.add_argument("--persons_coreference_merge", type=bool, default=True)
parser.add_argument("--create_entity_universe", type=bool, default=False)
return parser.parse_args()
def log_args(folderpath):
if not os.path.exists(folderpath):
os.makedirs(folderpath)
with open(folderpath+"prepro_args.txt", "w") as fout:
attrs = vars(args)
fout.write('\n'.join("%s: %s" % item for item in attrs.items()))
with open(folderpath+"prepro_args.pickle", 'wb') as handle:
pickle.dump(args, handle)
if __name__ == "__main__":
args = _parse_args()
print(args)
create_necessary_folders()
log_args(config.base_folder+"data/tfrecords/"+args.experiment_name+"/")
vocabularyCounter = VocabularyCounter()
vocabularyCounter.count_datasets_vocabulary()
if args.create_entity_universe:
create_entity_universe(gmonly_files=[], allspans_files=['aida_train.txt', 'aida_dev.txt', 'aida_test.txt' # ])
, 'ace2004.txt', 'aquaint.txt', 'msnbc.txt'])
else:
create_tfrecords()
| [
"preprocessing.util.load_wiki_name_id_map",
"tensorflow.train.Int64List",
"tensorflow.train.SequenceExample",
"numpy.save",
"os.path.exists",
"argparse.ArgumentParser",
"os.path.normpath",
"numpy.empty",
"tensorflow.train.FloatList",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.train.Featu... | [((10066, 10204), 'collections.namedtuple', 'namedtuple', (['"""GmonlySample"""', "['chunk_id', 'chunk_words', 'begin_gm', 'end_gm', 'ground_truth',\n 'cand_entities', 'cand_entities_scores']"], {}), "('GmonlySample', ['chunk_id', 'chunk_words', 'begin_gm', 'end_gm',\n 'ground_truth', 'cand_entities', 'cand_entities_scores'])\n", (10076, 10204), False, 'from collections import defaultdict, namedtuple\n'), ((10270, 10442), 'collections.namedtuple', 'namedtuple', (['"""AllspansSample"""', "['chunk_id', 'chunk_words', 'begin_spans', 'end_spans', 'ground_truth',\n 'cand_entities', 'cand_entities_scores', 'begin_gm', 'end_gm']"], {}), "('AllspansSample', ['chunk_id', 'chunk_words', 'begin_spans',\n 'end_spans', 'ground_truth', 'cand_entities', 'cand_entities_scores',\n 'begin_gm', 'end_gm'])\n", (10280, 10442), False, 'from collections import defaultdict, namedtuple\n'), ((17800, 18082), 'collections.namedtuple', 'namedtuple', (['"""SampleEncoded"""', "['chunk_id', 'words', 'words_len', 'chars', 'chars_len', 'begin_spans',\n 'end_spans', 'spans_len', 'cand_entities', 'cand_entities_scores',\n 'cand_entities_labels', 'cand_entities_len', 'ground_truth',\n 'ground_truth_len', 'begin_gm', 'end_gm']"], {}), "('SampleEncoded', ['chunk_id', 'words', 'words_len', 'chars',\n 'chars_len', 'begin_spans', 'end_spans', 'spans_len', 'cand_entities',\n 'cand_entities_scores', 'cand_entities_labels', 'cand_entities_len',\n 'ground_truth', 'ground_truth_len', 'begin_gm', 'end_gm'])\n", (17810, 18082), False, 'from collections import defaultdict, namedtuple\n'), ((5584, 5752), 'gensim.models.KeyedVectors.load_word2vec_format', 'gensim.models.KeyedVectors.load_word2vec_format', (["(config.base_folder +\n 'data/basic_data/wordEmbeddings/Word2Vec/GoogleNews-vectors-negative300.bin'\n )"], {'binary': '(True)'}), "(config.base_folder +\n 'data/basic_data/wordEmbeddings/Word2Vec/GoogleNews-vectors-negative300.bin'\n , binary=True)\n", (5631, 5752), False, 'import gensim\n'), ((5814, 5845), 'numpy.empty', 'np.empty', (['(wcnt, embedding_dim)'], {}), '((wcnt, embedding_dim))\n', (5822, 5845), True, 'import numpy as np\n'), ((5883, 5906), 'numpy.zeros', 'np.zeros', (['embedding_dim'], {}), '(embedding_dim)\n', (5891, 5906), True, 'import numpy as np\n'), ((5988, 6053), 'numpy.save', 'np.save', (["(output_folder + 'embeddings_array.npy')", 'embeddings_array'], {}), "(output_folder + 'embeddings_array.npy', embeddings_array)\n", (5995, 6053), True, 'import numpy as np\n'), ((37540, 37565), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (37563, 37565), False, 'import argparse\n'), ((555, 723), 'gensim.models.KeyedVectors.load_word2vec_format', 'gensim.models.KeyedVectors.load_word2vec_format', (["(config.base_folder +\n 'data/basic_data/wordEmbeddings/Word2Vec/GoogleNews-vectors-negative300.bin'\n )"], {'binary': '(True)'}), "(config.base_folder +\n 'data/basic_data/wordEmbeddings/Word2Vec/GoogleNews-vectors-negative300.bin'\n , binary=True)\n", (602, 723), False, 'import gensim\n'), ((900, 916), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (911, 916), False, 'from collections import defaultdict, namedtuple\n'), ((942, 958), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (953, 958), False, 'from collections import defaultdict, namedtuple\n'), ((3805, 3849), 'preprocessing.util.get_immediate_files', 'util.get_immediate_files', (['new_dataset_folder'], {}), '(new_dataset_folder)\n', (3829, 3849), True, 'import preprocessing.util as util\n'), ((4290, 4319), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (4304, 4319), False, 'import os\n'), ((4329, 4355), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (4340, 4355), False, 'import os\n'), ((4474, 4493), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (4485, 4493), False, 'import pickle\n'), ((5434, 5536), 'pickle.dump', 'pickle.dump', (['(word2id, id2word, char2id, id2char, args.word_freq_thr, args.char_freq_thr)', 'handle'], {}), '((word2id, id2word, char2id, id2char, args.word_freq_thr, args.\n char_freq_thr), handle)\n', (5445, 5536), False, 'import pickle\n'), ((6309, 6328), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (6320, 6328), False, 'import pickle\n'), ((10702, 10750), 'preprocessing.util.FetchFilteredCoreferencedCandEntities', 'util.FetchFilteredCoreferencedCandEntities', (['args'], {}), '(args)\n', (10744, 10750), True, 'import preprocessing.util as util\n'), ((18928, 18972), 'preprocessing.util.load_wikiid2nnid', 'util.load_wikiid2nnid', (['args.entity_extension'], {}), '(args.entity_extension)\n', (18949, 18972), True, 'import preprocessing.util as util\n'), ((28825, 28873), 'tensorflow.train.FeatureLists', 'tf.train.FeatureLists', ([], {'feature_list': 'feature_list'}), '(feature_list=feature_list)\n', (28846, 28873), True, 'import tensorflow as tf\n'), ((28902, 28972), 'tensorflow.train.SequenceExample', 'tf.train.SequenceExample', ([], {'context': 'context', 'feature_lists': 'feature_lists'}), '(context=context, feature_lists=feature_lists)\n', (28926, 28972), True, 'import tensorflow as tf\n'), ((29507, 29560), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['(output_folder + filename)'], {}), '(output_folder + filename)\n', (29534, 29560), True, 'import tensorflow as tf\n'), ((30724, 30752), 'preprocessing.util.load_wiki_name_id_map', 'util.load_wiki_name_id_map', ([], {}), '()\n', (30750, 30752), True, 'import preprocessing.util as util\n'), ((37169, 37197), 'preprocessing.util.load_wiki_name_id_map', 'util.load_wiki_name_id_map', ([], {}), '()\n', (37195, 37197), True, 'import preprocessing.util as util\n'), ((37394, 37448), 'os.path.exists', 'os.path.exists', (["(config.base_folder + 'data/tfrecords/')"], {}), "(config.base_folder + 'data/tfrecords/')\n", (37408, 37448), False, 'import os\n'), ((37456, 37507), 'os.makedirs', 'os.makedirs', (["(config.base_folder + 'data/tfrecords/')"], {}), "(config.base_folder + 'data/tfrecords/')\n", (37467, 37507), False, 'import os\n'), ((39744, 39770), 'os.path.exists', 'os.path.exists', (['folderpath'], {}), '(folderpath)\n', (39758, 39770), False, 'import os\n'), ((39780, 39803), 'os.makedirs', 'os.makedirs', (['folderpath'], {}), '(folderpath)\n', (39791, 39803), False, 'import os\n'), ((40034, 40059), 'pickle.dump', 'pickle.dump', (['args', 'handle'], {}), '(args, handle)\n', (40045, 40059), False, 'import pickle\n'), ((2696, 2724), 'numpy.cumsum', 'np.cumsum', (['hist_values[::-1]'], {}), '(hist_values[::-1])\n', (2705, 2724), True, 'import numpy as np\n'), ((3192, 3214), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (3206, 3214), False, 'import os\n'), ((3228, 3247), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (3239, 3247), False, 'import os\n'), ((3308, 3361), 'pickle.dump', 'pickle.dump', (['(self.word_freq, self.char_freq)', 'handle'], {}), '((self.word_freq, self.char_freq), handle)\n', (3319, 3361), False, 'import pickle\n'), ((29420, 29449), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (29434, 29449), False, 'import os\n'), ((29463, 29489), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (29474, 29489), False, 'import os\n'), ((30181, 30200), 'os.path.normpath', 'os.path.normpath', (['d'], {}), '(d)\n', (30197, 30200), False, 'import os\n'), ((30211, 30255), 'preprocessing.util.get_immediate_files', 'util.get_immediate_files', (['new_dataset_folder'], {}), '(new_dataset_folder)\n', (30235, 30255), True, 'import preprocessing.util as util\n'), ((3890, 3915), 'os.path.normpath', 'os.path.normpath', (['dataset'], {}), '(dataset)\n', (3906, 3915), False, 'import os\n'), ((29190, 29216), 'os.path.normpath', 'os.path.normpath', (['filepath'], {}), '(filepath)\n', (29206, 29216), False, 'import os\n'), ((25850, 25883), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (25868, 25883), True, 'import tensorflow as tf\n'), ((26173, 26206), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[value]'}), '(value=[value])\n', (26191, 26206), True, 'import tensorflow as tf\n'), ((26342, 26373), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': 'value'}), '(value=value)\n', (26360, 26373), True, 'import tensorflow as tf\n'), ((31843, 31871), 'termcolor.colored', 'colored', (['"""New sample"""', '"""red"""'], {}), "('New sample', 'red')\n", (31850, 31871), False, 'from termcolor import colored\n'), ((32326, 32354), 'termcolor.colored', 'colored', (['"""New sample"""', '"""red"""'], {}), "('New sample', 'red')\n", (32333, 32354), False, 'from termcolor import colored\n'), ((33070, 33098), 'termcolor.colored', 'colored', (['"""New sample"""', '"""red"""'], {}), "('New sample', 'red')\n", (33077, 33098), False, 'from termcolor import colored\n'), ((2998, 3017), 'numpy.sum', 'np.sum', (['hist_values'], {}), '(hist_values)\n', (3004, 3017), True, 'import numpy as np\n'), ((27358, 27389), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': 'value'}), '(value=value)\n', (27376, 27389), True, 'import tensorflow as tf\n'), ((32234, 32260), 'termcolor.colored', 'colored', (['message', '"""yellow"""'], {}), "(message, 'yellow')\n", (32241, 32260), False, 'from termcolor import colored\n'), ((32727, 32785), 'termcolor.colored', 'colored', (['message', "('yellow' if i in misses_idx else 'white')"], {}), "(message, 'yellow' if i in misses_idx else 'white')\n", (32734, 32785), False, 'from termcolor import colored\n'), ((33437, 33464), 'termcolor.colored', 'colored', (['message', '"""magenta"""'], {}), "(message, 'magenta')\n", (33444, 33464), False, 'from termcolor import colored\n'), ((33861, 33887), 'termcolor.colored', 'colored', (['message', '"""yellow"""'], {}), "(message, 'yellow')\n", (33868, 33887), False, 'from termcolor import colored\n'), ((34651, 34676), 'termcolor.colored', 'colored', (['message', '"""green"""'], {}), "(message, 'green')\n", (34658, 34676), False, 'from termcolor import colored\n'), ((34942, 34967), 'termcolor.colored', 'colored', (['message', '"""white"""'], {}), "(message, 'white')\n", (34949, 34967), False, 'from termcolor import colored\n')] |
import numpy as np
import skimage.morphology as io
from skimage.morphology import disk
import matplotlib.pyplot as plt
import matplotlib.tri as tri
from matplotlib.path import Path
import scipy.ndimage as ndi
import math
import pymorph as pm
from PIL import Image
from PIL import ImageFilter
import os
class ImgMesh():
def __init__(self,name="img_test.jpg"):
Image.MAX_IMAGE_PIXELS = None
os.chdir('/home/hatef/Desktop/PhD/Phase_01_imageProcessing/ImagePython')
img= Image.open(name)
img= img.rotate(270)
self.gray = img.convert('L')
self.img=np.array(self.gray.getdata(),np.uint8).reshape(self.gray.size[1], self.gray.size[0])
print("Reading image done.\n")
self.bwimg = None
### Smear Elimination near pores using pore dilation
### Input: grayscale image, 'pc{desired pore criteria}', 'dr{desired disk radius}', 'fr{desired filter radius}'
### Example: PoreDilate(img,'pc30','dr10','fr30')
def PoreDilate(self,*args):
poreCrit=None
diskRad=None
filtRad=None
for arg in args:
if arg[:2]=='pc':
poreCrit=int(arg[2:])
elif arg[:2]=='dr':
diskRad=int(arg[2:])
elif arg[:2]=='fr':
filtRad=int(arg[2:])
poreCrit=30 if poreCrit is None else poreCrit
diskRad=10 if diskRad is None else diskRad
filtRad=30 if filtRad is None else filtRad
pores=(self.img<=poreCrit)*1
pores_med=ndi.median_filter(pores,size=filtRad)
DilPore=io.dilation(pores_med, disk(diskRad))
self.imgAlt = (1-DilPore)*self.img
### Extracts the black and white image from a grayscale image
### Input: grayscale image
### Example: BWextractor(img)
def BWextractor(self):
self.bwimg = self.gray.point(lambda x: 0 if x<20 else 255, '1')
self.bwimg = np.array(self.bwimg.getdata(),np.uint8).reshape(self.bwimg.size[1], self.bwimg.size[0])
disk = pm.sedisk(14,2)
self.bwimg = pm.dilate(self.bwimg,disk)
self.bwimg = pm.dilate(self.bwimg,disk)
print("Converted to BWs.\n")
### Mesh nodes calculator
### Input: black and white image (img) and the desired mesh size (h)
### Example: imgmesh(img,100)
def Mesh(self,h):
# xx_out,yy_out=imgOutline(img,7)
if self.bwimg==None:
self.BWextractor()
print("Converted to BW.\n")
x,y=np.shape(self.bwimg)
xpop=range(0,x,h)
ypop=range(0,y,int(h*math.sqrt(3)/2.0))
xv,yv=np.meshgrid(xpop,ypop,sparse=False, indexing='ij')
xv[:,1::2]=xv[:,1::2]+(h/2.0)
xv=xv[0:-1,:]
yv=yv[0:-1,:]
X=xv.flatten()
Y=yv.flatten()
self.xx=np.array([])
self.yy=np.array([])
for i in range(np.size(X)):
if self.bwimg[X[i],Y[i]]>250:
self.xx=np.append(self.xx,X[i])
self.yy=np.append(self.yy,Y[i])
self.triang = tri.Triangulation(self.xx, self.yy)
xmid = np.round(self.xx[self.triang.triangles].mean(axis=1))
ymid = np.round(self.yy[self.triang.triangles].mean(axis=1))
self.mesh=np.array([[0,0,0]])
for i in range(np.size(xmid)):
if self.bwimg[xmid[i],ymid[i]]==255:
self.mesh=np.concatenate((self.mesh,[self.triang.triangles[i,:]]),axis=0)
self.mesh=np.delete(self.mesh, 0, 0)
print("Mesh created. \n")
### Extracts the edge of the object in a black and white image
### Input: black and white image (img) and the desired mesh size (h)
### Example: imgOutline(img,20)
def imgOutline(self,h):
outline=Image.fromarray(self.img)
outline = outline.filter(ImageFilter.BLUR)
outline=np.array(outline.getdata(),np.uint8).reshape(outline.size[1], outline.size[0])
outline = (outline<120)*(outline>40)
outline=(1-(outline*1))*255
x,y=np.shape(outline)
xv,yv=np.meshgrid(range(0,x,h),range(0,y,h),sparse=False, indexing='ij')
X=xv.flatten()
Y=yv.flatten()
self.xxo=np.array([])
self.yyo=np.array([])
for i in range(np.size(X)):
if outline[X[i],Y[i]]<100:
self.xxBWextractoro=np.append(self.xxo,X[i])
self.yyo=np.append(self.yyo,Y[i])
self.xxo=self.xxo[0:-1:14]
self.yyo=self.yyo[0:-1:14]
def plotMesh(self):
try:
plt.plot(self.xx[self.mesh],self.yy[self.mesh],'ko')
plt.show()
except AttributeError:
raise AttributeError("Mesh is not defined.")
class ContOp(ImgMesh):
def __init__(self,meshParam,thresh):
self.thresh = thresh
self.img = meshParam.img
self.mesh = meshParam.mesh
self.XT = meshParam.xx[self.mesh]
self.YT = meshParam.yy[self.mesh]
self.eutArray = []
### Finds points in a triangular segment using barycentric method
### Input: coordinates of the vertices in the form of (X,Y)
### Example: ptsinmesh(np.array([1,2,3]),np.array([1,2,3]))
def PtsinMesh(self):
minx=int(np.min(self.xs))
maxx=int(np.max(self.xs))
miny=int(np.min(self.ys))
maxy=int(np.max(self.ys))
p = Path([[self.xs[0], self.ys[0]],[self.xs[1],self.ys[1]],[self.xs[2],self.ys[2]]])
self.pts=np.empty((0,2),int)
xv,yv=np.meshgrid(range(minx,maxx),range(miny,maxy))
xv=xv.flatten()
yv=yv.flatten()
self.pts=np.transpose(np.concatenate(([tuple(xv)],[tuple(yv)]),axis=0))
tmp= p.contains_points(tuple(self.pts))
self.pts=self.pts[tmp]
### Finds the eutectic area fraction in a micrograph segment
### Input: image (img), points in the desired segment (pts) in the format of a Nx2 matrix, threshold values (thresh) in the format of a 1x2 array
### Example: EutoverTot(img, np.array([[1,2],[3,4],[5,6]]), [60,153])
def EutoverTot(self):
# NumPts_Pore=np.sum((img[pts[:,0],pts[:,1]]>=0.0)*(img[pts[:,0],pts[:,1]]<thresh[0])*1.0)
NumPts_Eut=np.sum((self.img[self.pts[:,0],self.pts[:,1]]>=self.thresh[0])*(self.img[self.pts[:,0],self.pts[:,1]]<self.thresh[1])*1.0)
NumPts_Pri=np.sum((self.img[self.pts[:,0],self.pts[:,1]]>=self.thresh[1])*(self.img[self.pts[:,0],self.pts[:,1]]<250)*1.0)
try:
np.seterr(divide='ignore', invalid='ignore')
self.eutectic=NumPts_Eut / (NumPts_Pri+NumPts_Eut)
if math.isnan(self.eutectic):
self.eutectic=0
except:
self.eutectic=0
### Writes a Tecplot input file to construct a contour plot
### Input: coordinates of the nodes of each element(XT,YT), eutectic value of each element(eutectic)
### Example: toTecplot(X[mesh],Y[mesh],eutectic)
def toTecplot(self,fileID="test.dat"):
n=np.size(self.XT,axis=0)
nodes=np.zeros((n+2,2))
xtcat=np.concatenate((self.XT[:,0],self.XT[:,1],self.XT[:,2]))
ytcat=np.concatenate((self.YT[:,0],self.YT[:,1],self.YT[:,2]))
facecell=[]
eut=[]
i=0
while np.size(xtcat) is not 0:
nodes[i,0]=xtcat[0]
nodes[i,1]=ytcat[0]
logi=(self.XT==xtcat[0]) * (self.YT==ytcat[0])
facecell.append(logi)
temp2=logi[:,1]+logi[:,2]+logi[:,0]
eut.append(np.sum(self.eutArray[temp2]) / sum(temp2*1))
temp3=(xtcat==xtcat[0])*(ytcat==ytcat[0])
temp3=(temp3==False)
xtcat=xtcat[temp3]
ytcat=ytcat[temp3]
i+=1
faces=np.zeros((n,3))
for m in range(i):
faces+=facecell[m]*(m+1)
tecplot=open(fileID,"w+")
tecplot.write('''variables = "X", "Y", "Silicon Area Fraction"\n''')
tecplot.write('''zone N=%2.0d, E=%2.0d, F=FEPOINT, ET=TRIANGLE\n\n'''%(np.size(eut),n))
for i in range(np.size(eut)):
tecplot.write('''%4.0f\t%4.0f\t%4.4f\n'''%(nodes[i,0],nodes[i,1],eut[i]))
tecplot.write('''\n''')
for i in range(n):
tecplot.write('''%4.0d\t%4.0d\t%4.0d\n'''%(faces[i,0],faces[i,1],faces[i,2]))
tecplot.close()
def Iterate(self):
n = np.size(self.mesh,axis=0)
for i in range(n):
self.xs = self.XT[i,:]
self.ys = self.YT[i,:]
self.PtsinMesh()
self.EutoverTot()
self.eutArray=np.append(self.eutArray,self.eutectic)
self.toTecplot()
#img = ImgMesh()
#img.Mesh(30)
#img.plotMesh()
#cont = ContOp(img,[40,120])
#cont.Iterate()
| [
"matplotlib.tri.Triangulation",
"math.sqrt",
"numpy.array",
"pymorph.dilate",
"matplotlib.path.Path",
"numpy.delete",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.empty",
"numpy.concatenate",
"numpy.min",
"numpy.meshgrid",
"numpy.size",
"pymorph.sedisk",
"numpy.shape",
"skimage.morpho... | [((398, 470), 'os.chdir', 'os.chdir', (['"""/home/hatef/Desktop/PhD/Phase_01_imageProcessing/ImagePython"""'], {}), "('/home/hatef/Desktop/PhD/Phase_01_imageProcessing/ImagePython')\n", (406, 470), False, 'import os\n'), ((478, 494), 'PIL.Image.open', 'Image.open', (['name'], {}), '(name)\n', (488, 494), False, 'from PIL import Image\n'), ((1349, 1387), 'scipy.ndimage.median_filter', 'ndi.median_filter', (['pores'], {'size': 'filtRad'}), '(pores, size=filtRad)\n', (1366, 1387), True, 'import scipy.ndimage as ndi\n'), ((1820, 1836), 'pymorph.sedisk', 'pm.sedisk', (['(14)', '(2)'], {}), '(14, 2)\n', (1829, 1836), True, 'import pymorph as pm\n'), ((1851, 1878), 'pymorph.dilate', 'pm.dilate', (['self.bwimg', 'disk'], {}), '(self.bwimg, disk)\n', (1860, 1878), True, 'import pymorph as pm\n'), ((1893, 1920), 'pymorph.dilate', 'pm.dilate', (['self.bwimg', 'disk'], {}), '(self.bwimg, disk)\n', (1902, 1920), True, 'import pymorph as pm\n'), ((2236, 2256), 'numpy.shape', 'np.shape', (['self.bwimg'], {}), '(self.bwimg)\n', (2244, 2256), True, 'import numpy as np\n'), ((2327, 2379), 'numpy.meshgrid', 'np.meshgrid', (['xpop', 'ypop'], {'sparse': '(False)', 'indexing': '"""ij"""'}), "(xpop, ypop, sparse=False, indexing='ij')\n", (2338, 2379), True, 'import numpy as np\n'), ((2486, 2498), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2494, 2498), True, 'import numpy as np\n'), ((2509, 2521), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2517, 2521), True, 'import numpy as np\n'), ((2673, 2708), 'matplotlib.tri.Triangulation', 'tri.Triangulation', (['self.xx', 'self.yy'], {}), '(self.xx, self.yy)\n', (2690, 2708), True, 'import matplotlib.tri as tri\n'), ((2847, 2868), 'numpy.array', 'np.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (2855, 2868), True, 'import numpy as np\n'), ((3030, 3056), 'numpy.delete', 'np.delete', (['self.mesh', '(0)', '(0)'], {}), '(self.mesh, 0, 0)\n', (3039, 3056), True, 'import numpy as np\n'), ((3310, 3335), 'PIL.Image.fromarray', 'Image.fromarray', (['self.img'], {}), '(self.img)\n', (3325, 3335), False, 'from PIL import Image\n'), ((3547, 3564), 'numpy.shape', 'np.shape', (['outline'], {}), '(outline)\n', (3555, 3564), True, 'import numpy as np\n'), ((3685, 3697), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3693, 3697), True, 'import numpy as np\n'), ((3709, 3721), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3717, 3721), True, 'import numpy as np\n'), ((4717, 4806), 'matplotlib.path.Path', 'Path', (['[[self.xs[0], self.ys[0]], [self.xs[1], self.ys[1]], [self.xs[2], self.ys[2]]]'], {}), '([[self.xs[0], self.ys[0]], [self.xs[1], self.ys[1]], [self.xs[2], self\n .ys[2]]])\n', (4721, 4806), False, 'from matplotlib.path import Path\n'), ((4809, 4830), 'numpy.empty', 'np.empty', (['(0, 2)', 'int'], {}), '((0, 2), int)\n', (4817, 4830), True, 'import numpy as np\n'), ((5482, 5623), 'numpy.sum', 'np.sum', (['((self.img[self.pts[:, 0], self.pts[:, 1]] >= self.thresh[0]) * (self.img[\n self.pts[:, 0], self.pts[:, 1]] < self.thresh[1]) * 1.0)'], {}), '((self.img[self.pts[:, 0], self.pts[:, 1]] >= self.thresh[0]) * (self\n .img[self.pts[:, 0], self.pts[:, 1]] < self.thresh[1]) * 1.0)\n', (5488, 5623), True, 'import numpy as np\n'), ((5618, 5748), 'numpy.sum', 'np.sum', (['((self.img[self.pts[:, 0], self.pts[:, 1]] >= self.thresh[1]) * (self.img[\n self.pts[:, 0], self.pts[:, 1]] < 250) * 1.0)'], {}), '((self.img[self.pts[:, 0], self.pts[:, 1]] >= self.thresh[1]) * (self\n .img[self.pts[:, 0], self.pts[:, 1]] < 250) * 1.0)\n', (5624, 5748), True, 'import numpy as np\n'), ((6190, 6214), 'numpy.size', 'np.size', (['self.XT'], {'axis': '(0)'}), '(self.XT, axis=0)\n', (6197, 6214), True, 'import numpy as np\n'), ((6222, 6242), 'numpy.zeros', 'np.zeros', (['(n + 2, 2)'], {}), '((n + 2, 2))\n', (6230, 6242), True, 'import numpy as np\n'), ((6248, 6309), 'numpy.concatenate', 'np.concatenate', (['(self.XT[:, 0], self.XT[:, 1], self.XT[:, 2])'], {}), '((self.XT[:, 0], self.XT[:, 1], self.XT[:, 2]))\n', (6262, 6309), True, 'import numpy as np\n'), ((6313, 6374), 'numpy.concatenate', 'np.concatenate', (['(self.YT[:, 0], self.YT[:, 1], self.YT[:, 2])'], {}), '((self.YT[:, 0], self.YT[:, 1], self.YT[:, 2]))\n', (6327, 6374), True, 'import numpy as np\n'), ((6813, 6829), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {}), '((n, 3))\n', (6821, 6829), True, 'import numpy as np\n'), ((7364, 7390), 'numpy.size', 'np.size', (['self.mesh'], {'axis': '(0)'}), '(self.mesh, axis=0)\n', (7371, 7390), True, 'import numpy as np\n'), ((1420, 1433), 'skimage.morphology.disk', 'disk', (['diskRad'], {}), '(diskRad)\n', (1424, 1433), False, 'from skimage.morphology import disk\n'), ((2539, 2549), 'numpy.size', 'np.size', (['X'], {}), '(X)\n', (2546, 2549), True, 'import numpy as np\n'), ((2884, 2897), 'numpy.size', 'np.size', (['xmid'], {}), '(xmid)\n', (2891, 2897), True, 'import numpy as np\n'), ((3739, 3749), 'numpy.size', 'np.size', (['X'], {}), '(X)\n', (3746, 3749), True, 'import numpy as np\n'), ((3979, 4033), 'matplotlib.pyplot.plot', 'plt.plot', (['self.xx[self.mesh]', 'self.yy[self.mesh]', '"""ko"""'], {}), "(self.xx[self.mesh], self.yy[self.mesh], 'ko')\n", (3987, 4033), True, 'import matplotlib.pyplot as plt\n'), ((4035, 4045), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4043, 4045), True, 'import matplotlib.pyplot as plt\n'), ((4607, 4622), 'numpy.min', 'np.min', (['self.xs'], {}), '(self.xs)\n', (4613, 4622), True, 'import numpy as np\n'), ((4635, 4650), 'numpy.max', 'np.max', (['self.xs'], {}), '(self.xs)\n', (4641, 4650), True, 'import numpy as np\n'), ((4663, 4678), 'numpy.min', 'np.min', (['self.ys'], {}), '(self.ys)\n', (4669, 4678), True, 'import numpy as np\n'), ((4691, 4706), 'numpy.max', 'np.max', (['self.ys'], {}), '(self.ys)\n', (4697, 4706), True, 'import numpy as np\n'), ((5740, 5784), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (5749, 5784), True, 'import numpy as np\n'), ((5845, 5870), 'math.isnan', 'math.isnan', (['self.eutectic'], {}), '(self.eutectic)\n', (5855, 5870), False, 'import math\n'), ((6407, 6421), 'numpy.size', 'np.size', (['xtcat'], {}), '(xtcat)\n', (6414, 6421), True, 'import numpy as np\n'), ((7087, 7099), 'numpy.size', 'np.size', (['eut'], {}), '(eut)\n', (7094, 7099), True, 'import numpy as np\n'), ((7521, 7560), 'numpy.append', 'np.append', (['self.eutArray', 'self.eutectic'], {}), '(self.eutArray, self.eutectic)\n', (7530, 7560), True, 'import numpy as np\n'), ((2597, 2621), 'numpy.append', 'np.append', (['self.xx', 'X[i]'], {}), '(self.xx, X[i])\n', (2606, 2621), True, 'import numpy as np\n'), ((2633, 2657), 'numpy.append', 'np.append', (['self.yy', 'Y[i]'], {}), '(self.yy, Y[i])\n', (2642, 2657), True, 'import numpy as np\n'), ((2954, 3020), 'numpy.concatenate', 'np.concatenate', (['(self.mesh, [self.triang.triangles[i, :]])'], {'axis': '(0)'}), '((self.mesh, [self.triang.triangles[i, :]]), axis=0)\n', (2968, 3020), True, 'import numpy as np\n'), ((3815, 3840), 'numpy.append', 'np.append', (['self.xxo', 'X[i]'], {}), '(self.xxo, X[i])\n', (3824, 3840), True, 'import numpy as np\n'), ((3859, 3884), 'numpy.append', 'np.append', (['self.yyo', 'Y[i]'], {}), '(self.yyo, Y[i])\n', (3868, 3884), True, 'import numpy as np\n'), ((6624, 6652), 'numpy.sum', 'np.sum', (['self.eutArray[temp2]'], {}), '(self.eutArray[temp2])\n', (6630, 6652), True, 'import numpy as np\n'), ((7053, 7065), 'numpy.size', 'np.size', (['eut'], {}), '(eut)\n', (7060, 7065), True, 'import numpy as np\n'), ((2300, 2312), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (2309, 2312), False, 'import math\n')] |
#%%
import cv2;
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
# not used in this stub but often useful for finding various files
#project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
import seaborn as sns
import pandas as pd
from scipy import ndimage
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib import colors
import seaborn as sns
img = cv2.imread("C:\\Users\\Helldragger\\Documents\\projects\\MetaWatch\\MetaWatch\\src\\features\\original.jpg");
# we cut the image to keep only the interesting part: the overlay.
#%%
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
newmask = cv.imread('C:\\Users\\Helldragger\\Documents\\projects\\MetaWatch\\MetaWatch\\src\\features\\overlaymaskheroes.png',0)
# wherever it is marked white (sure foreground), change mask=1
# wherever it is marked black (sure background), change mask=0
mask[newmask == 0] = 0
mask[newmask == 255] = 1
mask, bgdModel, fgdModel = cv.grabCut(img,mask,None,bgdModel,fgdModel,5,cv.GC_INIT_WITH_MASK)
mask = np.where((mask==2)|(mask==0),0,1).astype('uint8')
img2 = img*mask[:,:,np.newaxis]
#%%
plt.imshow(img),plt.colorbar(),plt.show()
plt.imshow(img2),plt.colorbar(),plt.show()
cv2.imshow("cut", img2);
cv2.imshow("original", img);
cv2.waitKey(0)
croppedImg = img2[125:210, 35:1892];
plt.imshow(croppedImg),plt.colorbar(),plt.show()
cv2.imshow("cropped", croppedImg);
cv2.waitKey(0)
#%%
# read data about a single hero
src1_mask = cv2.imread('C:\\Users\\Helldragger\\Documents\\projects\\MetaWatch\\MetaWatch\\src\\features\\maskheroA1.png',0);
src1_mask = cv2.cvtColor(src1_mask,cv2.COLOR_GRAY2RGB)
masked_image = cv2.bitwise_and(img, src1_mask)
cv2.imshow("hero maked", masked_image);
cv2.waitKey(0)
#%%
#cropping the hhero image
y,x = masked_image[:,:,1].nonzero() # get the nonzero alpha coordinates
minx = np.min(x)
miny = np.min(y)
maxx = np.max(x)
maxy = np.max(y)
cropImg = masked_image[miny:maxy, minx:maxx]
#cv2.imwrite("cropped.png", cropImg)
cv2.imshow("cropped", cropImg)
cv2.waitKey(0)
#%%
# here we load various health bars and try to quantify their content: health, shield, armor, death symbol.
imgs = {};
srcs = [
"frame377_hero_A1_health",
"frame377_hero_A2_health",
"frame377_hero_A3_health",
"frame377_hero_A4_health",
"frame377_hero_A5_health",
"frame377_hero_A6_health",
"frame377_hero_B1_health",
"frame377_hero_B2_health",
"frame377_hero_B3_health",
"frame377_hero_B4_health",
"frame377_hero_B5_health",
"frame377_hero_B6_health",
]
for src in srcs:
imgs[src] = cv2.imread('C:\\Users\\Helldragger\\Documents\\projects\\MetaWatch\\MetaWatch\\src\\data\\tests\\'+src+'.jpg',-1);
#src1_mask = cv2.imread('C:\\Users\\Helldragger\\Documents\\projects\\MetaWatch\\MetaWatch\\src\\features\\maskheroA1.png',0);
#%%
# here we create an histogram for each image
# idea: counting the amount of health using the percentage of specific colours in the health zone.
img = imgs["frame377_hero_B1_health"];
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB )
img = cv2.cvtColor(img, cv2.COLOR_RGB2HLS )
channels = ('H','L','S')
colors = ("B", 'G', 'R')
for i,chann in enumerate(channels):
histr = cv2.calcHist(img, [i], None, [256], [1, 256], True, False)
histr /= max(histr)
plt.plot(histr,color = colors[i])
plt.xlim([0,256])
plt.show()
plt.imshow(img),plt.colorbar(),plt.show()
#for img in imgs.values():
#%%
# Reducing the amount of colors to 5 colors:
reducedimg = imgs["frame377_hero_B1_health"];
#cv2.cvtColor(reducedimg, cv2.COLOR_HLS2BGR )
plt.imshow(reducedimg),plt.colorbar(),plt.show()
reducedimg = reducedimg // 64
reducedimg = reducedimg * 64
plt.imshow(reducedimg),plt.colorbar(),plt.show()
#%%
img = imgs["frame377_hero_B1_health"];
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB )
pixel_colors = img.reshape((np.shape(img)[0]*np.shape(img)[1], 3))
norm = colors.Normalize(vmin=-1.,vmax=1.)
norm.autoscale(pixel_colors)
pixel_colors = norm(pixel_colors).tolist()
hlsImg = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
h, l, s = cv2.split(hlsImg);
#plt.imshow(img),plt.colorbar()
fig = plt.figure()
ax1 = fig.add_subplot(2, 1, 1, projection="3d")
ax1.scatter(h.flatten(), l.flatten(), s.flatten(), facecolors=pixel_colors, marker=".")
ax1.set_xlabel("Hue")
ax1.set_ylabel("Luminosity")
ax1.set_zlabel("Saturation")
ax2 = fig.add_subplot(2, 1, 2)
ax2.imshow(img)
plt.show()
#%%
# trying to filter out noise colors:
img = imgs["frame377_hero_B1_health"];
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB )
hlsImg = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
h, l, s = cv2.split(hlsImg);
fig = plt.figure()
ax = fig.add_subplot(4, 1, 1)
ax.imshow(img)
ax.set_label("pre filtering");
h, l, s = cv2.split(hlsImg);
ax = fig.add_subplot(4, 1, 2)
ax.imshow(h)
ax.set_label("Hue");
ax = fig.add_subplot(4, 1, 3)
ax.imshow(l)
ax.set_label("Luminance");
ax = fig.add_subplot(4, 1, 4)
ax.imshow(s)
ax.set_label("Saturation");
plt.show()
#%%
def getRGBImg(name):
return cv2.cvtColor(imgs[name], cv2.COLOR_BGR2RGB)
def showHealthDetected(img):
hlsImg = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
hlsImg[hlsImg[:,:,1] < 100] = 0
#hlsImg[:,:,0] = 126
h, l, s = cv2.split(hlsImg);
fig = plt.figure()
ax = fig.add_subplot(5, 1, 1)
ax.imshow(img)
ax = fig.add_subplot(5, 1, 2)
ax.imshow(h)
ax.set_label("Hue");
ax = fig.add_subplot(5, 1, 3)
ax.imshow(l)
ax.set_label("Luminance");
ax = fig.add_subplot(5, 1, 4)
ax.imshow(s)
ax.set_label("Saturation");
resultimg = cv2.cvtColor(hlsImg, cv2.COLOR_HLS2RGB)
ax = fig.add_subplot(5, 1, 5)
ax.imshow(resultimg)
plt.show()
return resultimg
analyzedImgs = {}
for imgSrc in imgs.keys():
analyzedImgs[imgSrc] = showHealthDetected(getRGBImg(imgSrc))
#%%
# recenter every img.
# calculate the histogram of pixels > 0 per line.
def showLineHistograms(img):
img[img > 0] = 1
Ysize = len(img[:,0,0])
hist = img.sum(axis=1)
fig = plt.figure()
plt.plot(hist)
plt.show()
hist = np.zeros((22));
observations = np.array([]);
for imgSrc in imgs.keys():
img = cv.cvtColor(analyzedImgs[imgSrc], cv2.COLOR_RGB2GRAY)
imgSum = img.sum(axis=1);
hist[: len(imgSum)] += imgSum
observations = np.concatenate((observations, np.where(imgSum > 0)[0]) );
hist /= max(hist)
fig = plt.figure()
plt.plot(hist)
plt.show()
sns.distplot(observations)
plt.show()
#%%
# here we try to detect the correct amount of notches on any image
# the image is passed through a gradient filter to show only the variations.
# this gradient filter is then
def detectLabels(img):
return ndimage.measurements.label(imgGrad)
def calculateLabellingErrorRate(gradImg, expected):
return detectLabels(gradImg)[1] - expected;
notchesExpected = {
"frame377_hero_A1_health":23,
"frame377_hero_A2_health":24,
"frame377_hero_A3_health":8,
"frame377_hero_A4_health":10,
"frame377_hero_A5_health":2,
"frame377_hero_A6_health":6,
"frame377_hero_B1_health":24,
"frame377_hero_B2_health":8,
"frame377_hero_B3_health":10,
"frame377_hero_B4_health":8,
"frame377_hero_B5_health":1,
"frame377_hero_B6_health":8,
}
gradientThreshold = 30
bin_amount = abs(min(256, 256))
gradientThresholds = np.linspace(0,256,bin_amount) // 1
#%%
errors = np.zeros(shape=(len(notchesExpected), bin_amount))
j = 0
for lowGradientThreshold in gradientThresholds:
i = 0
for src in imgs.keys():
expected = notchesExpected[src];
img = imgs[src];
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB )
imgGrad = ndimage.morphology.morphological_gradient(img[:,:,0], size=(1,2))
imgGrad[imgGrad<lowGradientThreshold] = 0
labels, count = ndimage.measurements.label(imgGrad)
errors[i, j] = abs(calculateLabellingErrorRate(img, expected))
i += 1
j+=1
sns.heatmap(errors)
plt.plot()
plt.figure()
errorsSummed = errors.sum(axis=0);
errorsSummed
sns.lineplot(data=errorsSummed)
plt.plot()
#plot.figure()
bestThreshold = np.where(errorsSummed == min(errorsSummed))[0][-1];
print("best high pass gradient threshold: ",bestThreshold, "\n\terror count:", errorsSummed[bestThreshold])
# low pass gradient gave a best score of 60 errors, not enough. combining both systems gave a best score of 38. we will keep the simple high pass.
#%%
# best system, simple high filter, with a threshold of 52 or around 50
#%%
img = imgs["frame377_hero_B1_health"];
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB )
plt.imshow(img),plt.colorbar(),plt.show()
imgGrad = ndimage.morphology.morphological_gradient(img[:,:,0], size=(1,2))
plt.imshow(imgGrad),plt.colorbar(),plt.show()
imgGrad[imgGrad<30] = 0
plt.imshow(imgGrad),plt.colorbar(),plt.show()
label, num_features = ndimage.measurements.label(imgGrad)
print("###RGB mode on gradient")
print(num_features, " features detected / 24.")
HLSImg = cv.cvtColor(analyzedImgs[imgSrc], cv.COLOR_RGB2HLS)
label, num_features = ndimage.measurements.label(HLSImg[:,:,0])
print("###HLSImg using Hue")
print(num_features, " features detected / 24.")
label, num_features = ndimage.measurements.label(HLSImg[:,:,1])
print("###HLSImg using Luminosity")
print(num_features, " features detected / 24.")
label, num_features = ndimage.measurements.label(HLSImg[:,:,2])
print("###HLSImg using Saturation")
print(num_features, " features detected / 24.")
#%%
# here we try to differentiate notches types.
notchesTypesExpected = {
"frame377_hero_A1_health":{"white":20,"yellow":3, "blue":0, "red":0},
"frame377_hero_A2_health":{"white":17,"yellow":7, "blue":0, "red":0},
"frame377_hero_A3_health":{"white":8,"yellow":0, "blue":0, "red":0},
"frame377_hero_A4_health":{"white":8,"yellow":2, "blue":0, "red":0},
"frame377_hero_A5_health":{"white":2,"yellow":0, "blue":0, "red":0},
"frame377_hero_A6_health":{"white":2,"yellow":0, "blue":4, "red":0},
"frame377_hero_B1_health":{"white":20,"yellow":4, "blue":0, "red":0},
"frame377_hero_B2_health":{"white":8,"yellow":0, "blue":0, "red":0},
"frame377_hero_B3_health":{"white":8,"yellow":2, "blue":0, "red":0},
"frame377_hero_B4_health":{"white":8,"yellow":0, "blue":0, "red":0},
"frame377_hero_B5_health":{"white":0,"yellow":0, "blue":0, "red":1},
"frame377_hero_B6_health":{"white":8,"yellow":0, "blue":0, "red":0},
}
#%%
img = imgs["frame377_hero_A6_health"];
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB )
originalimg = img
#plt.imshow(img),plt.colorbar(),plt.show()
imgHLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS )
#plt.imshow(imgHLS[:,:,1]),plt.colorbar(),plt.show()
#imgHLS[:,:,1] = ndimage.grey_erosion(imgHLS[:,:,1], size=(1,2));
imgGrad = ndimage.morphology.morphological_gradient(imgHLS[:,:,1], size=(1,4))
#plt.imshow(imgGrad),plt.colorbar(),plt.show()
imgGrad = ndimage.grey_erosion(imgGrad, size=(2,2));
#plt.imshow(imgGrad),plt.colorbar(),plt.show()
imgGrad = ndimage.grey_dilation(imgGrad, size=(2,2));
#plt.imshow(imgGrad),plt.colorbar(),plt.show()
imgGrad[imgGrad<52] = 0
#plt.imshow(imgGrad),plt.colorbar(),plt.show()
imgHLS[:,:,1] = imgGrad;
imgHLS[imgHLS[:,:,1] == 0] = 0
img = cv2.cvtColor(imgHLS, cv2.COLOR_HLS2RGB )
#plt.imshow(img),plt.colorbar(),plt.show()
labels, count = ndimage.label(imgGrad)
#plt.imshow(labels),plt.colorbar(),plt.show()
#detect colors
#plt.imshow(imgHLS[:,:,0]),plt.colorbar(),plt.show()
#plt.imshow(imgHLS[:,:,1]),plt.colorbar(),plt.show()
#plt.imshow(imgHLS[:,:,2]),plt.colorbar(),plt.show()
colors = {"white":0,"yellow":0, "blue":0, "red":0}
errors = np.zeros((len(notchesTypesExpected.keys()), 4))
i = 0
for imgKey in notchesTypesExpected.keys():
colors = {"white":0,"yellow":0, "blue":0, "red":0}
expectedColors = notchesTypesExpected[imgKey]
img = imgs[imgKey];
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB )
imgGrad = ndimage.morphology.morphological_gradient(img[:,:,0], size=(1,2))
imgGrad[imgGrad<52] = 0
labels, count = ndimage.measurements.label(imgGrad)
for label in range(1, count+1):
labelMeanRGB = np.array(img[labels==label, :].mean(axis=0))
best_dist = -1
best_color = ""
for color in COLOR_VALUES.keys():
curr_dist = np.sqrt(np.sum((COLOR_VALUES[color] - labelMeanRGB) ** 2))
if best_dist == -1 or curr_dist < best_dist:
best_dist = curr_dist
best_color = color
print(i,": ",labelMeanRGB," => ", best_color)
colors[best_color] += 1
# error detection
j=0
for color in colors.keys():
errors[i, j] += abs(expectedColors[color] - colors[color])
print(j,"=",color)
j+=1
i+=1
sns.heatmap(data=errors)
print("total errors:", errors.sum(axis=0))
#%%
#objective: find the best color codes to diminish errors in their case.
#%%
for i in range(1, count+1):
labelRGB = originalimg[labels==i, :]
labelMeanRGB = np.array(labelRGB.mean(axis=0))
best_dist = -1
best_color = ""
print("cluster ",i)
for color in COLOR_VALUES.keys():
curr_dist = np.sqrt(np.sum((COLOR_VALUES[color] - labelMeanRGB) ** 2))
print(color," => ",curr_dist)
if best_dist == -1 or curr_dist < best_dist:
best_dist = curr_dist
best_color = color
colors[best_color] += 1
print(colors)
#detectedimg = np.zeros(originalimg.shape)
#detectedimg = originalimg[labels != 0]
#plt.imshow(detectedimg),plt.colorbar(),plt.show()
#showHealthDetected(img)
#%%
labelHLS = imgHLS[labels==1, :]
labelMeanHLS = np.array(labelHLS.mean(axis=0))
labelMeanHLS[1] = labelMeanHLS[1]/256
plt.imshow(labelHLS),plt.colorbar(),plt.show()
plt.imshow([labelMeanHLS]),plt.colorbar(),plt.show()
distToRed = abs(labelMeanHLS[0] - RED_HUE)
distToYellow = abs(labelMeanHLS[0] - ORANGE_HUE)
distToBlue = abs(labelMeanHLS[0] - BLUE_HUE)
distToWhite = abs(labelMeanHLS[1] - 100)
#%%
labelRGB = originalimg[labels==1]
#plt.imshow(labelRGB),plt.colorbar(),plt.show()
labelMeanRGB = np.array([labelRGB.mean(axis=0)])
labelMeanRGB = labelMeanRGB / 255
#plt.imshow(labelMeanRGB),plt.colorbar(),plt.show()
plt.imshow([labelMeanRGB]),plt.colorbar(),plt.show()
#%%
# # reading the current results.
results = pd.read_csv("C:\\Users\\Helldragger\\Documents\\projects\\MetaWatch\\MetaWatch\\data\\temp\\2.csv")
# barlett, kinda clean (4-6 anomalies)
# bohman, blackmanharris, nuttall = same
# rolling window:
# [2.s] 120 clean( 1 anomaly)
# [1.5] 90 semi clean (3 anomalies).
# [1.s] 60 semi clean ( 4 - 6 death anomalies)
# [.5s] 30 ugly (10+)
res2 = results.groupby(['team', "hero"])["health", "armor", "shield", "death"].rolling(120).mean().reset_index()#.unstack(['team', "hero"])
res2["frame"] = res2["level_2"] // 12
res2.loc[res2.death > 0, "death"] = 1
res2 = res2.drop("level_2", axis=1)
res3 = pd.melt(res2, ['team', "hero", "frame", "death"])
#sns.relplot(x="frame", y="value", hue='variable', col="team", kind="line", data=res3, row="hero")
plt.style.use("seaborn-colorblind")
fig, axes = plt.subplots(6,2, figsize=(1920,1080), dpi=400)
i = 0
for team in res2.team.unique():
j = 0
for hero in res2.hero.unique():
frames = res2.loc[(res2.team==team) & (res2.hero == hero), "frame"]
health = res2.loc[(res2.team==team) & (res2.hero == hero), "health"]
shield = res2.loc[(res2.team==team) & (res2.hero == hero), "shield"]
armor = res2.loc[(res2.team==team) & (res2.hero == hero), "armor"]
axes[j,i].stackplot(frames,
health,
armor,
shield,cmap=plt.get_cmap("Accent"))
j+=1
i+=1
#plt.title('Recorded Game Statistics')
plt.show()
fig, axes = plt.subplots(6,2)
i = 0
for team in res2.team.unique():
j = 0
for hero in res2.hero.unique():
current_data = res2.loc[(res2.team==team) & (res2.hero == hero)]
frames = current_data.frame
daed_frames = (current_data.health < 25) & (current_data.armor < 25) & (current_data.death == 1)
axes[j,i].stackplot(frames,daed_frames, cmap=plt.get_cmap("Accent"))
j+=1
i+=1
#%%
# merging
class DeathInterval:
def __init__(self, start:int, previous=None, next=None):
self.end = self.start = start
self.previous = previous
self.next = next
def span(self):
return self.end - self.start
fig, axes = plt.subplots(6,2)
i = 0
for team in res2.team.unique():
j = 0
for hero in res2.hero.unique():
current_data = res2.loc[(res2.team==team) & (res2.hero == hero)]
frames = current_data.frame
# daed_frames = (current_data.health < 25) & (current_data.armor < 25) & (current_data.death == 1)
spawned_frames = (current_data.health >= 25)
axes[j,i].stackplot(frames,spawned_frames)
for frame in daed_frames:
pass # TODO merge small intervals with little distance together, in order to clean the death reports.
j+=1
i+=1
#%%
# reading ultimate values
import pytesseract
def ocr_core(filename):
"""
This function will handle the core OCR processing of images.
"""
text = pytesseract.image_to_string(cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2GRAY ), lang="Koverwatch") # We'll use Pillow's Image class to open the image and pytesseract to detect the string in the image
return text
#%%
import pytesseract
path = "C:/Users/Helldragger/Documents/projects/MetaWatch/MetaWatch/src/features/OCRTEST.png"
img = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2GRAY )
plt.imshow(img)
plt.show()
srcTri = np.array( [[0, 0], [img.shape[1] - 1, 0], [0, img.shape[0] - 1]] ).astype(np.float32)
dstTri = np.array( [[0, 0], [img.shape[1] - 1, 0], [img.shape[1]*0.08, img.shape[0] - 1]] ).astype(np.float32)
warp_mat = cv.getAffineTransform(srcTri, dstTri)
img = cv.warpAffine(img, warp_mat, (img.shape[1], img.shape[0]))
plt.imshow(img)
plt.show()
thresh = 200
maxValue = 255
img = cv2.threshold(img, thresh, maxValue, cv2.THRESH_BINARY )[1]
plt.imshow(img)
plt.show()
text = pytesseract.image_to_string(img, config="digits", lang="Koverwatch")
print("img text: '{}'".format(text))
#%%
import pytesseract
import numpy as np
import cv2
from matplotlib import pyplot as plt
import seaborn as sns
def convertUltimateImgToText(img):
plt.imshow(img);
plt.show();
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY )
plt.imshow(img);
plt.show();
srcTri = np.array( [[0, 0], [img.shape[1] - 1, 0], [0, img.shape[0] - 1]] ).astype(np.float32)
dstTri = np.array( [[0, 0], [img.shape[1] - 1, 0], [img.shape[1]*0.19, img.shape[0] - 1]] ).astype(np.float32)
warp_mat = cv2.getAffineTransform(srcTri, dstTri)
img = cv2.warpAffine(img, warp_mat, (img.shape[1], img.shape[0]))
plt.imshow(img);
plt.show();
thresh = 200
maxValue = 255
img = cv2.threshold(img, thresh, maxValue, cv2.THRESH_BINARY )[1]
plt.imshow(img);
plt.show();
text = pytesseract.image_to_string(img, config="digits", lang="Koverwatch")
print("img ult text: '{}'".format(text))
return text
def test_UltimateParsing(testSources):
# test format: COLOR_expectedValue.png
expected = {}
testedColors = set();
testedValues = set();
colorToIndex = {}
valueToIndex = {}
colorLabels = []
valueLabels = []
for src in testSources:
color, value = src.split("_");
expected[src] = {"value":value, "color":color};
if color not in testedColors:
colorToIndex[color] = len(testedColors)
testedColors.add(color)
colorLabels.append(color)
if value not in testedValues:
valueToIndex[value] = len(testedValues)
testedValues.add(value)
valueLabels.append(value)
imgs = {}
errors = np.zeros((len(testedValues), len(testedColors)))
i = 0
legendPrinted = False
for src in expected.keys():
ultimateImg = cv2.imread('src/data/tests/Ultimate/'+src+'.png',-1);
ultimateImg = cv2.cvtColor(ultimateImg, cv2.COLOR_BGR2RGB )
assert ultimateImg is not None
value = convertUltimateImgToText(ultimateImg)
if value != expected[src]["value"]:
errors[valueToIndex[expected[src]["value"]], colorToIndex[expected[src]["color"]]] += 1
sns.heatmap(data=errors, xticklabels=colorLabels, yticklabels=valueLabels)#.get_figure()#.savefig("src/data/tests/Ultimate/Tests_ErrorHeatmap.png")
plt.show()
totalErrors = errors.sum(axis=0).sum()
print("total color errors:", errors.sum(axis=0))
print("total values errors:", errors.sum(axis=1))
print("total errors:", errors.sum(axis=0).sum())
ultimateAcc = 1 - (totalErrors / len(testSources))
print("Ultimate detection accuracy: ", ultimateAcc)
#assert ultimateAcc >= 0.90 # 10% d'erreur
FULL_TEST_SOURCES = [
"BLUE_0",
"BLUE_1",
"BLUE_14",
"BLUE_15",
"BLUE_16",
"BLUE_24",
"BLUE_25",
"GREEN_13",
"GREEN_16",
"GREEN_21",
"GREEN_22",
"GREEN_39",
"GREEN_42",
"GREY_0",
"GREY_11",
"GREY_13",
"GREY_34",
"ORANGE_0",
"RED_10",
"RED_26",
"WHITE_0",
"WHITE_1",
"WHITE_8",
"WHITE_21",
"WHITE_24",
"WHITE_34"
]
ACTUAL_TEST_SOURCES = [
"BLUE_0",
"BLUE_1",
"BLUE_14",
"BLUE_15",
"BLUE_16",
"BLUE_24",
"BLUE_25",
]
test_UltimateParsing(ACTUAL_TEST_SOURCES)
#%%
from os import listdir
from os.path import isfile, join
imgDir = "src/data/tests/Ultimate/"
onlyfiles = [f for f in listdir(imgDir) if isfile(join(imgDir, f))]
imgHeatmap = None;
lastImg = None;
for fileSrc in onlyfiles:
img = cv.imread(imgDir+"/"+fileSrc)
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY);
if imgHeatmap is None:
imgHeatmap=np.zeros(img.shape)
lastImg = img
if img.shape != imgHeatmap.shape:
continue
else:
imgHeatmap = imgHeatmap + img
sns.heatmap(imgHeatmap);
plt.show();
thresholedHeatmap = imgHeatmap
thresholedHeatmap[thresholedHeatmap> 110000] = 0
sns.heatmap(thresholedHeatmap);
plt.show();
#%%
plt.show();
cv.imshow("heatmap",imgHeatmap)
cv.waitKey(0) | [
"pandas.read_csv",
"scipy.ndimage.measurements.label",
"cv2.imshow",
"numpy.array",
"matplotlib.colors.keys",
"matplotlib.pyplot.imshow",
"cv2.calcHist",
"os.listdir",
"seaborn.distplot",
"cv2.threshold",
"numpy.where",
"matplotlib.pyplot.plot",
"scipy.ndimage.label",
"matplotlib.pyplot.st... | [((629, 747), 'cv2.imread', 'cv2.imread', (['"""C:\\\\Users\\\\Helldragger\\\\Documents\\\\projects\\\\MetaWatch\\\\MetaWatch\\\\src\\\\features\\\\original.jpg"""'], {}), "(\n 'C:\\\\Users\\\\Helldragger\\\\Documents\\\\projects\\\\MetaWatch\\\\MetaWatch\\\\src\\\\features\\\\original.jpg'\n )\n", (639, 747), False, 'import cv2\n'), ((824, 853), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (832, 853), True, 'import numpy as np\n'), ((863, 892), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (871, 892), True, 'import numpy as np\n'), ((902, 1031), 'cv2.imread', 'cv.imread', (['"""C:\\\\Users\\\\Helldragger\\\\Documents\\\\projects\\\\MetaWatch\\\\MetaWatch\\\\src\\\\features\\\\overlaymaskheroes.png"""', '(0)'], {}), "(\n 'C:\\\\Users\\\\Helldragger\\\\Documents\\\\projects\\\\MetaWatch\\\\MetaWatch\\\\src\\\\features\\\\overlaymaskheroes.png'\n , 0)\n", (911, 1031), True, 'import cv2 as cv\n'), ((1222, 1294), 'cv2.grabCut', 'cv.grabCut', (['img', 'mask', 'None', 'bgdModel', 'fgdModel', '(5)', 'cv.GC_INIT_WITH_MASK'], {}), '(img, mask, None, bgdModel, fgdModel, 5, cv.GC_INIT_WITH_MASK)\n', (1232, 1294), True, 'import cv2 as cv\n'), ((1468, 1491), 'cv2.imshow', 'cv2.imshow', (['"""cut"""', 'img2'], {}), "('cut', img2)\n", (1478, 1491), False, 'import cv2\n'), ((1493, 1520), 'cv2.imshow', 'cv2.imshow', (['"""original"""', 'img'], {}), "('original', img)\n", (1503, 1520), False, 'import cv2\n'), ((1522, 1536), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1533, 1536), False, 'import cv2\n'), ((1626, 1659), 'cv2.imshow', 'cv2.imshow', (['"""cropped"""', 'croppedImg'], {}), "('cropped', croppedImg)\n", (1636, 1659), False, 'import cv2\n'), ((1661, 1675), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1672, 1675), False, 'import cv2\n'), ((1725, 1848), 'cv2.imread', 'cv2.imread', (['"""C:\\\\Users\\\\Helldragger\\\\Documents\\\\projects\\\\MetaWatch\\\\MetaWatch\\\\src\\\\features\\\\maskheroA1.png"""', '(0)'], {}), "(\n 'C:\\\\Users\\\\Helldragger\\\\Documents\\\\projects\\\\MetaWatch\\\\MetaWatch\\\\src\\\\features\\\\maskheroA1.png'\n , 0)\n", (1735, 1848), False, 'import cv2\n'), ((1851, 1894), 'cv2.cvtColor', 'cv2.cvtColor', (['src1_mask', 'cv2.COLOR_GRAY2RGB'], {}), '(src1_mask, cv2.COLOR_GRAY2RGB)\n', (1863, 1894), False, 'import cv2\n'), ((1913, 1944), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'src1_mask'], {}), '(img, src1_mask)\n', (1928, 1944), False, 'import cv2\n'), ((1945, 1983), 'cv2.imshow', 'cv2.imshow', (['"""hero maked"""', 'masked_image'], {}), "('hero maked', masked_image)\n", (1955, 1983), False, 'import cv2\n'), ((1985, 1999), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1996, 1999), False, 'import cv2\n'), ((2111, 2120), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (2117, 2120), True, 'import numpy as np\n'), ((2128, 2137), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (2134, 2137), True, 'import numpy as np\n'), ((2145, 2154), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (2151, 2154), True, 'import numpy as np\n'), ((2162, 2171), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (2168, 2171), True, 'import numpy as np\n'), ((2257, 2287), 'cv2.imshow', 'cv2.imshow', (['"""cropped"""', 'cropImg'], {}), "('cropped', cropImg)\n", (2267, 2287), False, 'import cv2\n'), ((2288, 2302), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2299, 2302), False, 'import cv2\n'), ((3292, 3328), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (3304, 3328), False, 'import cv2\n'), ((3336, 3372), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HLS'], {}), '(img, cv2.COLOR_RGB2HLS)\n', (3348, 3372), False, 'import cv2\n'), ((3617, 3627), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3625, 3627), True, 'from matplotlib import pyplot as plt\n'), ((4046, 4082), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (4058, 4082), False, 'import cv2\n'), ((4159, 4196), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': '(-1.0)', 'vmax': '(1.0)'}), '(vmin=-1.0, vmax=1.0)\n', (4175, 4196), False, 'from matplotlib import colors\n'), ((4276, 4312), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HLS'], {}), '(img, cv2.COLOR_RGB2HLS)\n', (4288, 4312), False, 'import cv2\n'), ((4323, 4340), 'cv2.split', 'cv2.split', (['hlsImg'], {}), '(hlsImg)\n', (4332, 4340), False, 'import cv2\n'), ((4383, 4395), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4393, 4395), True, 'from matplotlib import pyplot as plt\n'), ((4660, 4670), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4668, 4670), True, 'from matplotlib import pyplot as plt\n'), ((4759, 4795), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (4771, 4795), False, 'import cv2\n'), ((4807, 4843), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HLS'], {}), '(img, cv2.COLOR_RGB2HLS)\n', (4819, 4843), False, 'import cv2\n'), ((4854, 4871), 'cv2.split', 'cv2.split', (['hlsImg'], {}), '(hlsImg)\n', (4863, 4871), False, 'import cv2\n'), ((4880, 4892), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4890, 4892), True, 'from matplotlib import pyplot as plt\n'), ((4981, 4998), 'cv2.split', 'cv2.split', (['hlsImg'], {}), '(hlsImg)\n', (4990, 4998), False, 'import cv2\n'), ((5209, 5219), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5217, 5219), True, 'from matplotlib import pyplot as plt\n'), ((6315, 6327), 'numpy.zeros', 'np.zeros', (['(22)'], {}), '(22)\n', (6323, 6327), True, 'import numpy as np\n'), ((6346, 6358), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6354, 6358), True, 'import numpy as np\n'), ((6619, 6631), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6629, 6631), True, 'from matplotlib import pyplot as plt\n'), ((6632, 6646), 'matplotlib.pyplot.plot', 'plt.plot', (['hist'], {}), '(hist)\n', (6640, 6646), True, 'from matplotlib import pyplot as plt\n'), ((6647, 6657), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6655, 6657), True, 'from matplotlib import pyplot as plt\n'), ((6659, 6685), 'seaborn.distplot', 'sns.distplot', (['observations'], {}), '(observations)\n', (6671, 6685), True, 'import seaborn as sns\n'), ((6689, 6699), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6697, 6699), True, 'from matplotlib import pyplot as plt\n'), ((8157, 8176), 'seaborn.heatmap', 'sns.heatmap', (['errors'], {}), '(errors)\n', (8168, 8176), True, 'import seaborn as sns\n'), ((8177, 8187), 'matplotlib.pyplot.plot', 'plt.plot', ([], {}), '()\n', (8185, 8187), True, 'from matplotlib import pyplot as plt\n'), ((8188, 8200), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8198, 8200), True, 'from matplotlib import pyplot as plt\n'), ((8249, 8280), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'errorsSummed'}), '(data=errorsSummed)\n', (8261, 8280), True, 'import seaborn as sns\n'), ((8281, 8291), 'matplotlib.pyplot.plot', 'plt.plot', ([], {}), '()\n', (8289, 8291), True, 'from matplotlib import pyplot as plt\n'), ((8761, 8797), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (8773, 8797), False, 'import cv2\n'), ((8852, 8920), 'scipy.ndimage.morphology.morphological_gradient', 'ndimage.morphology.morphological_gradient', (['img[:, :, 0]'], {'size': '(1, 2)'}), '(img[:, :, 0], size=(1, 2))\n', (8893, 8920), False, 'from scipy import ndimage\n'), ((9058, 9093), 'scipy.ndimage.measurements.label', 'ndimage.measurements.label', (['imgGrad'], {}), '(imgGrad)\n', (9084, 9093), False, 'from scipy import ndimage\n'), ((9186, 9237), 'cv2.cvtColor', 'cv.cvtColor', (['analyzedImgs[imgSrc]', 'cv.COLOR_RGB2HLS'], {}), '(analyzedImgs[imgSrc], cv.COLOR_RGB2HLS)\n', (9197, 9237), True, 'import cv2 as cv\n'), ((9263, 9306), 'scipy.ndimage.measurements.label', 'ndimage.measurements.label', (['HLSImg[:, :, 0]'], {}), '(HLSImg[:, :, 0])\n', (9289, 9306), False, 'from scipy import ndimage\n'), ((9404, 9447), 'scipy.ndimage.measurements.label', 'ndimage.measurements.label', (['HLSImg[:, :, 1]'], {}), '(HLSImg[:, :, 1])\n', (9430, 9447), False, 'from scipy import ndimage\n'), ((9552, 9595), 'scipy.ndimage.measurements.label', 'ndimage.measurements.label', (['HLSImg[:, :, 2]'], {}), '(HLSImg[:, :, 2])\n', (9578, 9595), False, 'from scipy import ndimage\n'), ((10691, 10727), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (10703, 10727), False, 'import cv2\n'), ((10801, 10837), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HLS'], {}), '(img, cv2.COLOR_RGB2HLS)\n', (10813, 10837), False, 'import cv2\n'), ((10969, 11040), 'scipy.ndimage.morphology.morphological_gradient', 'ndimage.morphology.morphological_gradient', (['imgHLS[:, :, 1]'], {'size': '(1, 4)'}), '(imgHLS[:, :, 1], size=(1, 4))\n', (11010, 11040), False, 'from scipy import ndimage\n'), ((11095, 11137), 'scipy.ndimage.grey_erosion', 'ndimage.grey_erosion', (['imgGrad'], {'size': '(2, 2)'}), '(imgGrad, size=(2, 2))\n', (11115, 11137), False, 'from scipy import ndimage\n'), ((11196, 11239), 'scipy.ndimage.grey_dilation', 'ndimage.grey_dilation', (['imgGrad'], {'size': '(2, 2)'}), '(imgGrad, size=(2, 2))\n', (11217, 11239), False, 'from scipy import ndimage\n'), ((11422, 11461), 'cv2.cvtColor', 'cv2.cvtColor', (['imgHLS', 'cv2.COLOR_HLS2RGB'], {}), '(imgHLS, cv2.COLOR_HLS2RGB)\n', (11434, 11461), False, 'import cv2\n'), ((11524, 11546), 'scipy.ndimage.label', 'ndimage.label', (['imgGrad'], {}), '(imgGrad)\n', (11537, 11546), False, 'from scipy import ndimage\n'), ((12943, 12967), 'seaborn.heatmap', 'sns.heatmap', ([], {'data': 'errors'}), '(data=errors)\n', (12954, 12967), True, 'import seaborn as sns\n'), ((14481, 14590), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\Helldragger\\\\Documents\\\\projects\\\\MetaWatch\\\\MetaWatch\\\\data\\\\temp\\\\2.csv"""'], {}), "(\n 'C:\\\\Users\\\\Helldragger\\\\Documents\\\\projects\\\\MetaWatch\\\\MetaWatch\\\\data\\\\temp\\\\2.csv'\n )\n", (14492, 14590), True, 'import pandas as pd\n'), ((15078, 15127), 'pandas.melt', 'pd.melt', (['res2', "['team', 'hero', 'frame', 'death']"], {}), "(res2, ['team', 'hero', 'frame', 'death'])\n", (15085, 15127), True, 'import pandas as pd\n'), ((15228, 15263), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-colorblind"""'], {}), "('seaborn-colorblind')\n", (15241, 15263), True, 'from matplotlib import pyplot as plt\n'), ((15277, 15326), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(6)', '(2)'], {'figsize': '(1920, 1080)', 'dpi': '(400)'}), '(6, 2, figsize=(1920, 1080), dpi=400)\n', (15289, 15326), True, 'from matplotlib import pyplot as plt\n'), ((15948, 15958), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15956, 15958), True, 'from matplotlib import pyplot as plt\n'), ((15974, 15992), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(6)', '(2)'], {}), '(6, 2)\n', (15986, 15992), True, 'from matplotlib import pyplot as plt\n'), ((16657, 16675), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(6)', '(2)'], {}), '(6, 2)\n', (16669, 16675), True, 'from matplotlib import pyplot as plt\n'), ((17824, 17839), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (17834, 17839), True, 'from matplotlib import pyplot as plt\n'), ((17840, 17850), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17848, 17850), True, 'from matplotlib import pyplot as plt\n'), ((18071, 18108), 'cv2.getAffineTransform', 'cv.getAffineTransform', (['srcTri', 'dstTri'], {}), '(srcTri, dstTri)\n', (18092, 18108), True, 'import cv2 as cv\n'), ((18115, 18173), 'cv2.warpAffine', 'cv.warpAffine', (['img', 'warp_mat', '(img.shape[1], img.shape[0])'], {}), '(img, warp_mat, (img.shape[1], img.shape[0]))\n', (18128, 18173), True, 'import cv2 as cv\n'), ((18175, 18190), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (18185, 18190), True, 'from matplotlib import pyplot as plt\n'), ((18191, 18201), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18199, 18201), True, 'from matplotlib import pyplot as plt\n'), ((18298, 18313), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (18308, 18313), True, 'from matplotlib import pyplot as plt\n'), ((18314, 18324), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18322, 18324), True, 'from matplotlib import pyplot as plt\n'), ((18332, 18400), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['img'], {'config': '"""digits"""', 'lang': '"""Koverwatch"""'}), "(img, config='digits', lang='Koverwatch')\n", (18359, 18400), False, 'import pytesseract\n'), ((22399, 22422), 'seaborn.heatmap', 'sns.heatmap', (['imgHeatmap'], {}), '(imgHeatmap)\n', (22410, 22422), True, 'import seaborn as sns\n'), ((22424, 22434), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22432, 22434), True, 'from matplotlib import pyplot as plt\n'), ((22516, 22546), 'seaborn.heatmap', 'sns.heatmap', (['thresholedHeatmap'], {}), '(thresholedHeatmap)\n', (22527, 22546), True, 'import seaborn as sns\n'), ((22549, 22559), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22557, 22559), True, 'from matplotlib import pyplot as plt\n'), ((22565, 22575), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22573, 22575), True, 'from matplotlib import pyplot as plt\n'), ((22577, 22609), 'cv2.imshow', 'cv.imshow', (['"""heatmap"""', 'imgHeatmap'], {}), "('heatmap', imgHeatmap)\n", (22586, 22609), True, 'import cv2 as cv\n'), ((22609, 22622), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (22619, 22622), True, 'import cv2 as cv\n'), ((347, 360), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (358, 360), False, 'from dotenv import find_dotenv, load_dotenv\n'), ((1382, 1397), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1392, 1397), True, 'from matplotlib import pyplot as plt\n'), ((1398, 1412), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1410, 1412), True, 'from matplotlib import pyplot as plt\n'), ((1413, 1423), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1421, 1423), True, 'from matplotlib import pyplot as plt\n'), ((1424, 1440), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img2'], {}), '(img2)\n', (1434, 1440), True, 'from matplotlib import pyplot as plt\n'), ((1441, 1455), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1453, 1455), True, 'from matplotlib import pyplot as plt\n'), ((1456, 1466), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1464, 1466), True, 'from matplotlib import pyplot as plt\n'), ((1576, 1598), 'matplotlib.pyplot.imshow', 'plt.imshow', (['croppedImg'], {}), '(croppedImg)\n', (1586, 1598), True, 'from matplotlib import pyplot as plt\n'), ((1599, 1613), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1611, 1613), True, 'from matplotlib import pyplot as plt\n'), ((1614, 1624), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1622, 1624), True, 'from matplotlib import pyplot as plt\n'), ((2854, 2982), 'cv2.imread', 'cv2.imread', (["(\n 'C:\\\\Users\\\\Helldragger\\\\Documents\\\\projects\\\\MetaWatch\\\\MetaWatch\\\\src\\\\data\\\\tests\\\\'\n + src + '.jpg')", '(-1)'], {}), "(\n 'C:\\\\Users\\\\Helldragger\\\\Documents\\\\projects\\\\MetaWatch\\\\MetaWatch\\\\src\\\\data\\\\tests\\\\'\n + src + '.jpg', -1)\n", (2864, 2982), False, 'import cv2\n'), ((3473, 3531), 'cv2.calcHist', 'cv2.calcHist', (['img', '[i]', 'None', '[256]', '[1, 256]', '(True)', '(False)'], {}), '(img, [i], None, [256], [1, 256], True, False)\n', (3485, 3531), False, 'import cv2\n'), ((3560, 3592), 'matplotlib.pyplot.plot', 'plt.plot', (['histr'], {'color': 'colors[i]'}), '(histr, color=colors[i])\n', (3568, 3592), True, 'from matplotlib import pyplot as plt\n'), ((3598, 3616), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 256]'], {}), '([0, 256])\n', (3606, 3616), True, 'from matplotlib import pyplot as plt\n'), ((3628, 3643), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (3638, 3643), True, 'from matplotlib import pyplot as plt\n'), ((3644, 3658), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3656, 3658), True, 'from matplotlib import pyplot as plt\n'), ((3659, 3669), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3667, 3669), True, 'from matplotlib import pyplot as plt\n'), ((3839, 3861), 'matplotlib.pyplot.imshow', 'plt.imshow', (['reducedimg'], {}), '(reducedimg)\n', (3849, 3861), True, 'from matplotlib import pyplot as plt\n'), ((3862, 3876), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3874, 3876), True, 'from matplotlib import pyplot as plt\n'), ((3877, 3887), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3885, 3887), True, 'from matplotlib import pyplot as plt\n'), ((3947, 3969), 'matplotlib.pyplot.imshow', 'plt.imshow', (['reducedimg'], {}), '(reducedimg)\n', (3957, 3969), True, 'from matplotlib import pyplot as plt\n'), ((3970, 3984), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3982, 3984), True, 'from matplotlib import pyplot as plt\n'), ((3985, 3995), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3993, 3995), True, 'from matplotlib import pyplot as plt\n'), ((5258, 5301), 'cv2.cvtColor', 'cv2.cvtColor', (['imgs[name]', 'cv2.COLOR_BGR2RGB'], {}), '(imgs[name], cv2.COLOR_BGR2RGB)\n', (5270, 5301), False, 'import cv2\n'), ((5345, 5381), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HLS'], {}), '(img, cv2.COLOR_RGB2HLS)\n', (5357, 5381), False, 'import cv2\n'), ((5457, 5474), 'cv2.split', 'cv2.split', (['hlsImg'], {}), '(hlsImg)\n', (5466, 5474), False, 'import cv2\n'), ((5488, 5500), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5498, 5500), True, 'from matplotlib import pyplot as plt\n'), ((5816, 5855), 'cv2.cvtColor', 'cv2.cvtColor', (['hlsImg', 'cv2.COLOR_HLS2RGB'], {}), '(hlsImg, cv2.COLOR_HLS2RGB)\n', (5828, 5855), False, 'import cv2\n'), ((5920, 5930), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5928, 5930), True, 'from matplotlib import pyplot as plt\n'), ((6257, 6269), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6267, 6269), True, 'from matplotlib import pyplot as plt\n'), ((6274, 6288), 'matplotlib.pyplot.plot', 'plt.plot', (['hist'], {}), '(hist)\n', (6282, 6288), True, 'from matplotlib import pyplot as plt\n'), ((6293, 6303), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6301, 6303), True, 'from matplotlib import pyplot as plt\n'), ((6397, 6450), 'cv2.cvtColor', 'cv.cvtColor', (['analyzedImgs[imgSrc]', 'cv2.COLOR_RGB2GRAY'], {}), '(analyzedImgs[imgSrc], cv2.COLOR_RGB2GRAY)\n', (6408, 6450), True, 'import cv2 as cv\n'), ((6915, 6950), 'scipy.ndimage.measurements.label', 'ndimage.measurements.label', (['imgGrad'], {}), '(imgGrad)\n', (6941, 6950), False, 'from scipy import ndimage\n'), ((7556, 7587), 'numpy.linspace', 'np.linspace', (['(0)', '(256)', 'bin_amount'], {}), '(0, 256, bin_amount)\n', (7567, 7587), True, 'import numpy as np\n'), ((8800, 8815), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (8810, 8815), True, 'from matplotlib import pyplot as plt\n'), ((8816, 8830), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (8828, 8830), True, 'from matplotlib import pyplot as plt\n'), ((8831, 8841), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8839, 8841), True, 'from matplotlib import pyplot as plt\n'), ((8918, 8937), 'matplotlib.pyplot.imshow', 'plt.imshow', (['imgGrad'], {}), '(imgGrad)\n', (8928, 8937), True, 'from matplotlib import pyplot as plt\n'), ((8938, 8952), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (8950, 8952), True, 'from matplotlib import pyplot as plt\n'), ((8953, 8963), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8961, 8963), True, 'from matplotlib import pyplot as plt\n'), ((8989, 9008), 'matplotlib.pyplot.imshow', 'plt.imshow', (['imgGrad'], {}), '(imgGrad)\n', (8999, 9008), True, 'from matplotlib import pyplot as plt\n'), ((9009, 9023), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (9021, 9023), True, 'from matplotlib import pyplot as plt\n'), ((9024, 9034), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9032, 9034), True, 'from matplotlib import pyplot as plt\n'), ((12070, 12106), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (12082, 12106), False, 'import cv2\n'), ((12122, 12190), 'scipy.ndimage.morphology.morphological_gradient', 'ndimage.morphology.morphological_gradient', (['img[:, :, 0]'], {'size': '(1, 2)'}), '(img[:, :, 0], size=(1, 2))\n', (12163, 12190), False, 'from scipy import ndimage\n'), ((12236, 12271), 'scipy.ndimage.measurements.label', 'ndimage.measurements.label', (['imgGrad'], {}), '(imgGrad)\n', (12262, 12271), False, 'from scipy import ndimage\n'), ((12811, 12824), 'matplotlib.colors.keys', 'colors.keys', ([], {}), '()\n', (12822, 12824), False, 'from matplotlib import colors\n'), ((13876, 13896), 'matplotlib.pyplot.imshow', 'plt.imshow', (['labelHLS'], {}), '(labelHLS)\n', (13886, 13896), True, 'from matplotlib import pyplot as plt\n'), ((13897, 13911), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (13909, 13911), True, 'from matplotlib import pyplot as plt\n'), ((13912, 13922), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13920, 13922), True, 'from matplotlib import pyplot as plt\n'), ((13923, 13949), 'matplotlib.pyplot.imshow', 'plt.imshow', (['[labelMeanHLS]'], {}), '([labelMeanHLS])\n', (13933, 13949), True, 'from matplotlib import pyplot as plt\n'), ((13950, 13964), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (13962, 13964), True, 'from matplotlib import pyplot as plt\n'), ((13965, 13975), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13973, 13975), True, 'from matplotlib import pyplot as plt\n'), ((14378, 14404), 'matplotlib.pyplot.imshow', 'plt.imshow', (['[labelMeanRGB]'], {}), '([labelMeanRGB])\n', (14388, 14404), True, 'from matplotlib import pyplot as plt\n'), ((14405, 14419), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (14417, 14419), True, 'from matplotlib import pyplot as plt\n'), ((14420, 14430), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14428, 14430), True, 'from matplotlib import pyplot as plt\n'), ((17785, 17801), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (17795, 17801), False, 'import cv2\n'), ((18238, 18293), 'cv2.threshold', 'cv2.threshold', (['img', 'thresh', 'maxValue', 'cv2.THRESH_BINARY'], {}), '(img, thresh, maxValue, cv2.THRESH_BINARY)\n', (18251, 18293), False, 'import cv2\n'), ((18594, 18609), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (18604, 18609), True, 'from matplotlib import pyplot as plt\n'), ((18615, 18625), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18623, 18625), True, 'from matplotlib import pyplot as plt\n'), ((18637, 18674), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (18649, 18674), False, 'import cv2\n'), ((18685, 18700), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (18695, 18700), True, 'from matplotlib import pyplot as plt\n'), ((18706, 18716), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18714, 18716), True, 'from matplotlib import pyplot as plt\n'), ((18952, 18990), 'cv2.getAffineTransform', 'cv2.getAffineTransform', (['srcTri', 'dstTri'], {}), '(srcTri, dstTri)\n', (18974, 18990), False, 'import cv2\n'), ((19001, 19060), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'warp_mat', '(img.shape[1], img.shape[0])'], {}), '(img, warp_mat, (img.shape[1], img.shape[0]))\n', (19015, 19060), False, 'import cv2\n'), ((19065, 19080), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (19075, 19080), True, 'from matplotlib import pyplot as plt\n'), ((19086, 19096), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19094, 19096), True, 'from matplotlib import pyplot as plt\n'), ((19213, 19228), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (19223, 19228), True, 'from matplotlib import pyplot as plt\n'), ((19234, 19244), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19242, 19244), True, 'from matplotlib import pyplot as plt\n'), ((19257, 19325), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['img'], {'config': '"""digits"""', 'lang': '"""Koverwatch"""'}), "(img, config='digits', lang='Koverwatch')\n", (19284, 19325), False, 'import pytesseract\n'), ((20661, 20735), 'seaborn.heatmap', 'sns.heatmap', ([], {'data': 'errors', 'xticklabels': 'colorLabels', 'yticklabels': 'valueLabels'}), '(data=errors, xticklabels=colorLabels, yticklabels=valueLabels)\n', (20672, 20735), True, 'import seaborn as sns\n'), ((20813, 20823), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20821, 20823), True, 'from matplotlib import pyplot as plt\n'), ((22125, 22158), 'cv2.imread', 'cv.imread', (["(imgDir + '/' + fileSrc)"], {}), "(imgDir + '/' + fileSrc)\n", (22134, 22158), True, 'import cv2 as cv\n'), ((22165, 22200), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (22176, 22200), True, 'import cv2 as cv\n'), ((1296, 1337), 'numpy.where', 'np.where', (['((mask == 2) | (mask == 0))', '(0)', '(1)'], {}), '((mask == 2) | (mask == 0), 0, 1)\n', (1304, 1337), True, 'import numpy as np\n'), ((7829, 7865), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (7841, 7865), False, 'import cv2\n'), ((7885, 7953), 'scipy.ndimage.morphology.morphological_gradient', 'ndimage.morphology.morphological_gradient', (['img[:, :, 0]'], {'size': '(1, 2)'}), '(img[:, :, 0], size=(1, 2))\n', (7926, 7953), False, 'from scipy import ndimage\n'), ((8025, 8060), 'scipy.ndimage.measurements.label', 'ndimage.measurements.label', (['imgGrad'], {}), '(imgGrad)\n', (8051, 8060), False, 'from scipy import ndimage\n'), ((17863, 17927), 'numpy.array', 'np.array', (['[[0, 0], [img.shape[1] - 1, 0], [0, img.shape[0] - 1]]'], {}), '([[0, 0], [img.shape[1] - 1, 0], [0, img.shape[0] - 1]])\n', (17871, 17927), True, 'import numpy as np\n'), ((17958, 18044), 'numpy.array', 'np.array', (['[[0, 0], [img.shape[1] - 1, 0], [img.shape[1] * 0.08, img.shape[0] - 1]]'], {}), '([[0, 0], [img.shape[1] - 1, 0], [img.shape[1] * 0.08, img.shape[0] -\n 1]])\n', (17966, 18044), True, 'import numpy as np\n'), ((19149, 19204), 'cv2.threshold', 'cv2.threshold', (['img', 'thresh', 'maxValue', 'cv2.THRESH_BINARY'], {}), '(img, thresh, maxValue, cv2.THRESH_BINARY)\n', (19162, 19204), False, 'import cv2\n'), ((20263, 20320), 'cv2.imread', 'cv2.imread', (["('src/data/tests/Ultimate/' + src + '.png')", '(-1)'], {}), "('src/data/tests/Ultimate/' + src + '.png', -1)\n", (20273, 20320), False, 'import cv2\n'), ((20339, 20383), 'cv2.cvtColor', 'cv2.cvtColor', (['ultimateImg', 'cv2.COLOR_BGR2RGB'], {}), '(ultimateImg, cv2.COLOR_BGR2RGB)\n', (20351, 20383), False, 'import cv2\n'), ((22008, 22023), 'os.listdir', 'listdir', (['imgDir'], {}), '(imgDir)\n', (22015, 22023), False, 'from os import listdir\n'), ((22248, 22267), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (22256, 22267), True, 'import numpy as np\n'), ((13342, 13391), 'numpy.sum', 'np.sum', (['((COLOR_VALUES[color] - labelMeanRGB) ** 2)'], {}), '((COLOR_VALUES[color] - labelMeanRGB) ** 2)\n', (13348, 13391), True, 'import numpy as np\n'), ((17466, 17486), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (17476, 17486), False, 'import cv2\n'), ((18736, 18800), 'numpy.array', 'np.array', (['[[0, 0], [img.shape[1] - 1, 0], [0, img.shape[0] - 1]]'], {}), '([[0, 0], [img.shape[1] - 1, 0], [0, img.shape[0] - 1]])\n', (18744, 18800), True, 'import numpy as np\n'), ((18835, 18921), 'numpy.array', 'np.array', (['[[0, 0], [img.shape[1] - 1, 0], [img.shape[1] * 0.19, img.shape[0] - 1]]'], {}), '([[0, 0], [img.shape[1] - 1, 0], [img.shape[1] * 0.19, img.shape[0] -\n 1]])\n', (18843, 18921), True, 'import numpy as np\n'), ((22034, 22049), 'os.path.join', 'join', (['imgDir', 'f'], {}), '(imgDir, f)\n', (22038, 22049), False, 'from os.path import isfile, join\n'), ((4113, 4126), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (4121, 4126), True, 'import numpy as np\n'), ((4130, 4143), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (4138, 4143), True, 'import numpy as np\n'), ((6565, 6585), 'numpy.where', 'np.where', (['(imgSum > 0)'], {}), '(imgSum > 0)\n', (6573, 6585), True, 'import numpy as np\n'), ((12497, 12546), 'numpy.sum', 'np.sum', (['((COLOR_VALUES[color] - labelMeanRGB) ** 2)'], {}), '((COLOR_VALUES[color] - labelMeanRGB) ** 2)\n', (12503, 12546), True, 'import numpy as np\n'), ((15853, 15875), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Accent"""'], {}), "('Accent')\n", (15865, 15875), True, 'from matplotlib import pyplot as plt\n'), ((16343, 16365), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Accent"""'], {}), "('Accent')\n", (16355, 16365), True, 'from matplotlib import pyplot as plt\n')] |
import numpy as np
from scipy.ndimage import affine_transform
def apply_offset(matrix, x, y, z):
""" Needed in order to rotate along center of the voxelgrid.
Parameters
----------
matrix : (4,4) ndarray
x, y, z : uint
Dimensions of the voxelgrid.
"""
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
o_z = float(z) / 2 + 0.5
offset_matrix = np.array([[1, 0, 0, o_x],
[0, 1, 0, o_y],
[0, 0, 1, o_z],
[0, 0, 0, 1]])
reset_matrix = np.array([[1, 0, 0, -o_x],
[0, 1, 0, -o_y],
[0, 0, 1, -o_z],
[0, 0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x, transform_matrix, channel_axis, fill_mode="constant", cval=0.):
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:3, :3]
final_offset = transform_matrix[:3, 3]
channel_images = [affine_transform(x_channel,
final_affine_matrix,
final_offset,
order=0,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def combine_transforms(transforms):
""" Merge affine transforms into 1 single matrix
"""
t = transforms[0]
for i in range(1, len(transforms)):
t = np.dot(t, transforms[i])
return t
def Rx(angle, degrees=True):
if degrees:
cx = np.cos(np.deg2rad(angle))
sx = np.sin(np.deg2rad(angle))
else:
cx = np.cos(angle)
sx = np.sin(angle)
return np.array([[1, 0, 0, 0],
[0, cx, sx, 0],
[0, -sx, cx, 0],
[0, 0, 0, 1]])
def Ry(angle, degrees=True):
if degrees:
cy = np.cos(np.deg2rad(angle))
sy = np.sin(np.deg2rad(angle))
else:
cy = np.cos(angle)
sy = np.sin(angle)
return np.array([[cy, 0, -sy, 0],
[0, 1, 0, 0],
[sy, 0, cy, 0],
[0, 0, 0, 1]])
def Rz(angle, degrees=True):
if degrees:
cz = np.cos(np.deg2rad(angle))
sz = np.sin(np.deg2rad(angle))
else:
cz = np.cos(angle)
sz = np.sin(angle)
return np.array([[cz, sz, 0, 0],
[-sz, cz, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
def shift_voxels(tx, ty, tz):
return np.array([[1, 0, 0, tx],
[0, 1, 0, ty],
[0, 0, 1, tz],
[0, 0, 0, 1]])
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def random_channel_shift(x, intensity, channel_axis=0):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
| [
"scipy.ndimage.affine_transform",
"numpy.rollaxis",
"numpy.asarray",
"numpy.max",
"numpy.array",
"numpy.dot",
"numpy.stack",
"numpy.deg2rad",
"numpy.cos",
"numpy.random.uniform",
"numpy.min",
"numpy.sin"
] | [((394, 466), 'numpy.array', 'np.array', (['[[1, 0, 0, o_x], [0, 1, 0, o_y], [0, 0, 1, o_z], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, o_x], [0, 1, 0, o_y], [0, 0, 1, o_z], [0, 0, 0, 1]])\n', (402, 466), True, 'import numpy as np\n'), ((577, 652), 'numpy.array', 'np.array', (['[[1, 0, 0, -o_x], [0, 1, 0, -o_y], [0, 0, 1, -o_z], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, -o_x], [0, 1, 0, -o_y], [0, 0, 1, -o_z], [0, 0, 0, 1]])\n', (585, 652), True, 'import numpy as np\n'), ((942, 973), 'numpy.rollaxis', 'np.rollaxis', (['x', 'channel_axis', '(0)'], {}), '(x, channel_axis, 0)\n', (953, 973), True, 'import numpy as np\n'), ((1412, 1444), 'numpy.stack', 'np.stack', (['channel_images'], {'axis': '(0)'}), '(channel_images, axis=0)\n', (1420, 1444), True, 'import numpy as np\n'), ((1453, 1488), 'numpy.rollaxis', 'np.rollaxis', (['x', '(0)', '(channel_axis + 1)'], {}), '(x, 0, channel_axis + 1)\n', (1464, 1488), True, 'import numpy as np\n'), ((1913, 1984), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, cx, sx, 0], [0, -sx, cx, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0], [0, cx, sx, 0], [0, -sx, cx, 0], [0, 0, 0, 1]])\n', (1921, 1984), True, 'import numpy as np\n'), ((2248, 2319), 'numpy.array', 'np.array', (['[[cy, 0, -sy, 0], [0, 1, 0, 0], [sy, 0, cy, 0], [0, 0, 0, 1]]'], {}), '([[cy, 0, -sy, 0], [0, 1, 0, 0], [sy, 0, cy, 0], [0, 0, 0, 1]])\n', (2256, 2319), True, 'import numpy as np\n'), ((2583, 2654), 'numpy.array', 'np.array', (['[[cz, sz, 0, 0], [-sz, cz, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[cz, sz, 0, 0], [-sz, cz, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (2591, 2654), True, 'import numpy as np\n'), ((2761, 2830), 'numpy.array', 'np.array', (['[[1, 0, 0, tx], [0, 1, 0, ty], [0, 0, 1, tz], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, tx], [0, 1, 0, ty], [0, 0, 1, tz], [0, 0, 0, 1]])\n', (2769, 2830), True, 'import numpy as np\n'), ((3088, 3119), 'numpy.rollaxis', 'np.rollaxis', (['x', 'channel_axis', '(0)'], {}), '(x, channel_axis, 0)\n', (3099, 3119), True, 'import numpy as np\n'), ((3308, 3340), 'numpy.stack', 'np.stack', (['channel_images'], {'axis': '(0)'}), '(channel_images, axis=0)\n', (3316, 3340), True, 'import numpy as np\n'), ((3349, 3384), 'numpy.rollaxis', 'np.rollaxis', (['x', '(0)', '(channel_axis + 1)'], {}), '(x, 0, channel_axis + 1)\n', (3360, 3384), True, 'import numpy as np\n'), ((771, 800), 'numpy.dot', 'np.dot', (['offset_matrix', 'matrix'], {}), '(offset_matrix, matrix)\n', (777, 800), True, 'import numpy as np\n'), ((1090, 1192), 'scipy.ndimage.affine_transform', 'affine_transform', (['x_channel', 'final_affine_matrix', 'final_offset'], {'order': '(0)', 'mode': 'fill_mode', 'cval': 'cval'}), '(x_channel, final_affine_matrix, final_offset, order=0,\n mode=fill_mode, cval=cval)\n', (1106, 1192), False, 'from scipy.ndimage import affine_transform\n'), ((1675, 1699), 'numpy.dot', 'np.dot', (['t', 'transforms[i]'], {}), '(t, transforms[i])\n', (1681, 1699), True, 'import numpy as np\n'), ((1861, 1874), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1867, 1874), True, 'import numpy as np\n'), ((1888, 1901), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1894, 1901), True, 'import numpy as np\n'), ((2196, 2209), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (2202, 2209), True, 'import numpy as np\n'), ((2223, 2236), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (2229, 2236), True, 'import numpy as np\n'), ((2531, 2544), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (2537, 2544), True, 'import numpy as np\n'), ((2558, 2571), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (2564, 2571), True, 'import numpy as np\n'), ((3139, 3148), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (3145, 3148), True, 'import numpy as np\n'), ((3150, 3159), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (3156, 3159), True, 'import numpy as np\n'), ((1780, 1797), 'numpy.deg2rad', 'np.deg2rad', (['angle'], {}), '(angle)\n', (1790, 1797), True, 'import numpy as np\n'), ((1819, 1836), 'numpy.deg2rad', 'np.deg2rad', (['angle'], {}), '(angle)\n', (1829, 1836), True, 'import numpy as np\n'), ((2115, 2132), 'numpy.deg2rad', 'np.deg2rad', (['angle'], {}), '(angle)\n', (2125, 2132), True, 'import numpy as np\n'), ((2154, 2171), 'numpy.deg2rad', 'np.deg2rad', (['angle'], {}), '(angle)\n', (2164, 2171), True, 'import numpy as np\n'), ((2450, 2467), 'numpy.deg2rad', 'np.deg2rad', (['angle'], {}), '(angle)\n', (2460, 2467), True, 'import numpy as np\n'), ((2489, 2506), 'numpy.deg2rad', 'np.deg2rad', (['angle'], {}), '(angle)\n', (2499, 2506), True, 'import numpy as np\n'), ((2928, 2941), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (2938, 2941), True, 'import numpy as np\n'), ((3202, 3242), 'numpy.random.uniform', 'np.random.uniform', (['(-intensity)', 'intensity'], {}), '(-intensity, intensity)\n', (3219, 3242), True, 'import numpy as np\n')] |
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Experimental simulator plugin based on tensor network contractions,
using the TensorFlow backend for Jacobian computations.
"""
import copy
import numpy as np
try:
import tensorflow as tf
if tf.__version__[0] == "1":
raise ImportError("expt.tensornet.tf device requires TensorFlow>=2.0")
except ImportError as e:
raise ImportError("expt.tensornet.tf device requires TensorFlow>=2.0")
from pennylane.variable import Variable
from pennylane.beta.plugins.expt_tensornet import TensorNetwork, I, X, Y, Z
# tolerance for numerical errors
tolerance = 1e-10
C_DTYPE = tf.complex128
R_DTYPE = tf.float64
I = tf.constant(I, dtype=C_DTYPE)
X = tf.constant(X, dtype=C_DTYPE)
II = tf.eye(4, dtype=C_DTYPE)
ZZ = tf.constant(np.kron(Z, Z), dtype=C_DTYPE)
IX = tf.constant(np.kron(I, X), dtype=C_DTYPE)
IY = tf.constant(np.kron(I, Y), dtype=C_DTYPE)
IZ = tf.constant(np.kron(I, Z), dtype=C_DTYPE)
ZI = tf.constant(np.kron(Z, I), dtype=C_DTYPE)
ZX = tf.constant(np.kron(Z, X), dtype=C_DTYPE)
ZY = tf.constant(np.kron(Z, Y), dtype=C_DTYPE)
def Rphi(phi):
r"""One-qubit phase shift.
Args:
phi (float): phase shift angle
Returns:
array: unitary 2x2 phase shift matrix
"""
phi = tf.cast(phi, dtype=C_DTYPE)
return ((1 + tf.exp(1j * phi)) * I + (1 - tf.exp(1j * phi)) * Z) / 2
def Rotx(theta):
r"""One-qubit rotation about the x axis.
Args:
theta (float): rotation angle
Returns:
array: unitary 2x2 rotation matrix :math:`e^{-i \sigma_x \theta/2}`
"""
theta = tf.cast(theta, dtype=C_DTYPE)
return tf.cos(theta / 2) * I + 1j * tf.sin(-theta / 2) * X
def Roty(theta):
r"""One-qubit rotation about the y axis.
Args:
theta (float): rotation angle
Returns:
array: unitary 2x2 rotation matrix :math:`e^{-i \sigma_y \theta/2}`
"""
theta = tf.cast(theta, dtype=C_DTYPE)
return tf.cos(theta / 2) * I + 1j * tf.sin(-theta / 2) * Y
def Rotz(theta):
r"""One-qubit rotation about the z axis.
Args:
theta (float): rotation angle
Returns:
array: unitary 2x2 rotation matrix :math:`e^{-i \sigma_z \theta/2}`
"""
theta = tf.cast(theta, dtype=C_DTYPE)
return tf.cos(theta / 2) * I + 1j * tf.sin(-theta / 2) * Z
def Rot3(a, b, c):
r"""Arbitrary one-qubit rotation using three Euler angles.
Args:
a,b,c (float): rotation angles
Returns:
array: unitary 2x2 rotation matrix ``rz(c) @ ry(b) @ rz(a)``
"""
return Rotz(c) @ Roty(b) @ Rotz(a)
def CRotx(theta):
r"""Two-qubit controlled rotation about the x axis.
Args:
theta (float): rotation angle
Returns:
array: unitary 4x4 rotation matrix
:math:`|0\rangle\langle 0|\otimes \mathbb{I}+|1\rangle\langle 1|\otimes R_x(\theta)`
"""
theta = tf.cast(theta, dtype=C_DTYPE)
return (
tf.cos(theta / 4) ** 2 * II
- 1j * tf.sin(theta / 2) / 2 * IX
+ tf.sin(theta / 4) ** 2 * ZI
+ 1j * tf.sin(theta / 2) / 2 * ZX
)
def CRoty(theta):
r"""Two-qubit controlled rotation about the y axis.
Args:
theta (float): rotation angle
Returns:
array: unitary 4x4 rotation matrix :math:`|0\rangle\langle 0|\otimes \mathbb{I}+|1\rangle\langle 1|\otimes R_y(\theta)`
"""
theta = tf.cast(theta, dtype=C_DTYPE)
return (
tf.cos(theta / 4) ** 2 * II
- 1j * tf.sin(theta / 2) / 2 * IY
+ tf.sin(theta / 4) ** 2 * ZI
+ 1j * tf.sin(theta / 2) / 2 * ZY
)
def CRotz(theta):
r"""Two-qubit controlled rotation about the z axis.
Args:
theta (float): rotation angle
Returns:
array: unitary 4x4 rotation matrix
:math:`|0\rangle\langle 0|\otimes \mathbb{I}+|1\rangle\langle 1|\otimes R_z(\theta)`
"""
theta = tf.cast(theta, dtype=C_DTYPE)
return (
tf.cos(theta / 4) ** 2 * II
- 1j * tf.sin(theta / 2) / 2 * IZ
+ tf.sin(theta / 4) ** 2 * ZI
+ 1j * tf.sin(theta / 2) / 2 * ZZ
)
def CRot3(a, b, c):
r"""Arbitrary two-qubit controlled rotation using three Euler angles.
Args:
a,b,c (float): rotation angles
Returns:
array: unitary 4x4 rotation matrix
:math:`|0\rangle\langle 0|\otimes \mathbb{I}+|1\rangle\langle 1|\otimes R(a,b,c)`
"""
return CRotz(c) @ (CRoty(b) @ CRotz(a))
class TensorNetworkTF(TensorNetwork):
"""Experimental TensorFlow Tensor Network simulator device for PennyLane.
**Short name:** ``expt.tensornet.tf``
This experimental device extends ``expt.tensornet`` by making use of
the TensorFlow backend of TensorNetwork. As a result, it supports
classical backpropagation as a means to compute the Jacobian. This can
be faster than the parameter-shift rule for analytic quantum gradients
when the number of parameters to be optimized is large.
To use this device, you will need to install TensorFlow and TensorNetwork:
.. code-block:: bash
pip install tensornetwork>=0.2 tensorflow>=2.0
**Example:**
>>> dev = qml.device("expt.tensornet.tf", wires=1)
>>> @qml.qnode(dev, interface="autograd", diff_method="best")
>>> def circuit(x):
... qml.RX(x[1], wires=0)
... qml.Rot(x[0], x[1], x[2], wires=0)
... return qml.expval(qml.PauliZ(0))
>>> grad_fn = qml.grad(circuit, argnum=[0])
>>> print(grad_fn([0.2, 0.5, 0.1]))
([array(-0.22526717), array(-1.00864546), array(6.9388939e-18)],)
.. note::
TensorFlow is used as the device backend, and is independent
of the chosen QNode interface. In the example above, we combine
``expt.tensornet.tf`` with the ``autograd`` interface.
It can also be used with the ``torch`` and the ``tf`` interface.
Args:
wires (int): the number of modes to initialize the device in
shots (int): the number of shots used for returning samples
"""
# pylint: disable=too-many-instance-attributes
name = "PennyLane TensorNetwork (TensorFlow) simulator plugin"
short_name = "expt.tensornet.tf"
_capabilities = {"model": "qubit", "tensor_observables": True, "provides_jacobian": True}
_operation_map = copy.copy(TensorNetwork._operation_map)
_operation_map.update(
{
"PhaseShift": Rphi,
"RX": Rotx,
"RY": Roty,
"RZ": Rotz,
"Rot": Rot3,
"CRX": CRotx,
"CRY": CRoty,
"CRZ": CRotz,
"CRot": CRot3,
}
)
backend = "tensorflow"
_reshape = staticmethod(tf.reshape)
_array = staticmethod(tf.constant)
_asarray = staticmethod(tf.convert_to_tensor)
_real = staticmethod(tf.math.real)
_imag = staticmethod(tf.math.imag)
_abs = staticmethod(tf.abs)
C_DTYPE = C_DTYPE
R_DTYPE = R_DTYPE
def __init__(self, wires, shots=1000):
self.variables = []
"""List[tf.Variable]: Free parameters, cast to TensorFlow variables,
for this circuit."""
self.res = None
"""tf.tensor[tf.float64]: result from the last circuit execution"""
self.op_params = {}
"""dict[Operation, List[Any, tf.Variable]]: A mapping from each operation
in the queue, to the corresponding list of parameter values. These
values can be Python numeric types, NumPy arrays, or TensorFlow variables."""
self.tape = None
"""tf.GradientTape: the gradient tape under which all tensor network
modifications must be made"""
super().__init__(wires, shots=shots, analytic=True)
def reset(self):
self.res = None
self.variables = []
super().reset()
def execution_context(self):
self.tape = tf.GradientTape(persistent=True)
return self.tape
def pre_apply(self):
super().pre_apply()
self.op_params = {}
for operation in self.op_queue:
# Copy the operation parameters to the op_params dictionary.
# Note that these are the unwrapped parameters, so PennyLane
# free parameters will be represented as Variable instances.
self.op_params[operation] = operation.params[:]
# Loop through the free parameter reference dictionary
for _, par_dep_list in self.parameters.items():
if not par_dep_list:
# parameter is not used within circuit
v = tf.Variable(0, dtype=self.R_DTYPE)
self.variables.append(v)
continue
# get the first parameter dependency for each free parameter
first = par_dep_list[0]
# For the above parameter dependency, get the corresponding
# operation parameter variable, and get the numeric value.
# Convert the resulting value to a TensorFlow tensor.
val = first.op.params[first.par_idx].val
mult = first.op.params[first.par_idx].mult
v = tf.Variable(val / mult, dtype=self.R_DTYPE)
# Mark the variable to be watched by the gradient tape,
# and append it to the variable list.
self.variables.append(v)
for p in par_dep_list:
# Replace the existing Variable free parameter in the op_params dictionary
# with the corresponding tf.Variable parameter.
# Note that the free parameter might be scaled by the
# variable.mult scaling factor.
mult = p.op.params[p.par_idx].mult
self.op_params[p.op][p.par_idx] = v * mult
# check that no Variables remain in the op_params dictionary
values = [item for sublist in self.op_params.values() for item in sublist]
assert not any(
isinstance(v, Variable) for v in values
), "A pennylane.Variable instance was not correctly converted to a tf.Variable"
# flatten the variables list in case of nesting
self.variables = tf.nest.flatten(self.variables)
self.tape.watch(self.variables)
for operation in self.op_queue:
# Apply each operation, but instead of passing operation.parameters
# (which contains the evaluated numeric parameter values),
# pass op_params[operation], which contains numeric values
# for fixed parameters, and tf.Variable objects for free parameters.
super().apply(operation.name, operation.wires, self.op_params[operation])
def apply(self, operation, wires, par):
# individual operations are already applied inside self.pre_apply()
pass
def execute(self, queue, observables, parameters=None):
# pylint: disable=bad-super-call
results = super(TensorNetwork, self).execute(queue, observables, parameters=parameters)
with self.tape:
# convert the results list into a single tensor
self.res = tf.stack(results)
# return the results as a NumPy array
return self.res.numpy()
def jacobian(self, queue, observables, parameters):
"""Calculates the Jacobian of the device circuit using TensorFlow
backpropagation.
Args:
queue (list[Operation]): operations to be applied to the device
observables (list[Observable]): observables to be measured
parameters (dict[int, ParameterDependency]): reference dictionary
mapping free parameter values to the operations that
depend on them
Returns:
array[float]: Jacobian matrix of size (``num_params``, ``num_wires``)
"""
self.execute(queue, observables, parameters=parameters)
jac = self.tape.jacobian(self.res, self.variables, experimental_use_pfor=False)
jac = [i if i is not None else tf.zeros(self.res.shape, dtype=tf.float64) for i in jac]
jac = tf.stack(jac)
return jac.numpy().T
| [
"tensorflow.eye",
"tensorflow.Variable",
"tensorflow.sin",
"numpy.kron",
"tensorflow.GradientTape",
"tensorflow.exp",
"tensorflow.constant",
"tensorflow.nest.flatten",
"copy.copy",
"tensorflow.cast",
"tensorflow.zeros",
"tensorflow.cos",
"tensorflow.stack"
] | [((1228, 1257), 'tensorflow.constant', 'tf.constant', (['I'], {'dtype': 'C_DTYPE'}), '(I, dtype=C_DTYPE)\n', (1239, 1257), True, 'import tensorflow as tf\n'), ((1262, 1291), 'tensorflow.constant', 'tf.constant', (['X'], {'dtype': 'C_DTYPE'}), '(X, dtype=C_DTYPE)\n', (1273, 1291), True, 'import tensorflow as tf\n'), ((1298, 1322), 'tensorflow.eye', 'tf.eye', (['(4)'], {'dtype': 'C_DTYPE'}), '(4, dtype=C_DTYPE)\n', (1304, 1322), True, 'import tensorflow as tf\n'), ((1340, 1353), 'numpy.kron', 'np.kron', (['Z', 'Z'], {}), '(Z, Z)\n', (1347, 1353), True, 'import numpy as np\n'), ((1388, 1401), 'numpy.kron', 'np.kron', (['I', 'X'], {}), '(I, X)\n', (1395, 1401), True, 'import numpy as np\n'), ((1435, 1448), 'numpy.kron', 'np.kron', (['I', 'Y'], {}), '(I, Y)\n', (1442, 1448), True, 'import numpy as np\n'), ((1482, 1495), 'numpy.kron', 'np.kron', (['I', 'Z'], {}), '(I, Z)\n', (1489, 1495), True, 'import numpy as np\n'), ((1530, 1543), 'numpy.kron', 'np.kron', (['Z', 'I'], {}), '(Z, I)\n', (1537, 1543), True, 'import numpy as np\n'), ((1577, 1590), 'numpy.kron', 'np.kron', (['Z', 'X'], {}), '(Z, X)\n', (1584, 1590), True, 'import numpy as np\n'), ((1624, 1637), 'numpy.kron', 'np.kron', (['Z', 'Y'], {}), '(Z, Y)\n', (1631, 1637), True, 'import numpy as np\n'), ((1830, 1857), 'tensorflow.cast', 'tf.cast', (['phi'], {'dtype': 'C_DTYPE'}), '(phi, dtype=C_DTYPE)\n', (1837, 1857), True, 'import tensorflow as tf\n'), ((2154, 2183), 'tensorflow.cast', 'tf.cast', (['theta'], {'dtype': 'C_DTYPE'}), '(theta, dtype=C_DTYPE)\n', (2161, 2183), True, 'import tensorflow as tf\n'), ((2470, 2499), 'tensorflow.cast', 'tf.cast', (['theta'], {'dtype': 'C_DTYPE'}), '(theta, dtype=C_DTYPE)\n', (2477, 2499), True, 'import tensorflow as tf\n'), ((2786, 2815), 'tensorflow.cast', 'tf.cast', (['theta'], {'dtype': 'C_DTYPE'}), '(theta, dtype=C_DTYPE)\n', (2793, 2815), True, 'import tensorflow as tf\n'), ((3438, 3467), 'tensorflow.cast', 'tf.cast', (['theta'], {'dtype': 'C_DTYPE'}), '(theta, dtype=C_DTYPE)\n', (3445, 3467), True, 'import tensorflow as tf\n'), ((3931, 3960), 'tensorflow.cast', 'tf.cast', (['theta'], {'dtype': 'C_DTYPE'}), '(theta, dtype=C_DTYPE)\n', (3938, 3960), True, 'import tensorflow as tf\n'), ((4432, 4461), 'tensorflow.cast', 'tf.cast', (['theta'], {'dtype': 'C_DTYPE'}), '(theta, dtype=C_DTYPE)\n', (4439, 4461), True, 'import tensorflow as tf\n'), ((6829, 6868), 'copy.copy', 'copy.copy', (['TensorNetwork._operation_map'], {}), '(TensorNetwork._operation_map)\n', (6838, 6868), False, 'import copy\n'), ((8373, 8405), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'persistent': '(True)'}), '(persistent=True)\n', (8388, 8405), True, 'import tensorflow as tf\n'), ((10625, 10656), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['self.variables'], {}), '(self.variables)\n', (10640, 10656), True, 'import tensorflow as tf\n'), ((12534, 12547), 'tensorflow.stack', 'tf.stack', (['jac'], {}), '(jac)\n', (12542, 12547), True, 'import tensorflow as tf\n'), ((2195, 2212), 'tensorflow.cos', 'tf.cos', (['(theta / 2)'], {}), '(theta / 2)\n', (2201, 2212), True, 'import tensorflow as tf\n'), ((2511, 2528), 'tensorflow.cos', 'tf.cos', (['(theta / 2)'], {}), '(theta / 2)\n', (2517, 2528), True, 'import tensorflow as tf\n'), ((2827, 2844), 'tensorflow.cos', 'tf.cos', (['(theta / 2)'], {}), '(theta / 2)\n', (2833, 2844), True, 'import tensorflow as tf\n'), ((9607, 9650), 'tensorflow.Variable', 'tf.Variable', (['(val / mult)'], {'dtype': 'self.R_DTYPE'}), '(val / mult, dtype=self.R_DTYPE)\n', (9618, 9650), True, 'import tensorflow as tf\n'), ((11567, 11584), 'tensorflow.stack', 'tf.stack', (['results'], {}), '(results)\n', (11575, 11584), True, 'import tensorflow as tf\n'), ((2224, 2242), 'tensorflow.sin', 'tf.sin', (['(-theta / 2)'], {}), '(-theta / 2)\n', (2230, 2242), True, 'import tensorflow as tf\n'), ((2540, 2558), 'tensorflow.sin', 'tf.sin', (['(-theta / 2)'], {}), '(-theta / 2)\n', (2546, 2558), True, 'import tensorflow as tf\n'), ((2856, 2874), 'tensorflow.sin', 'tf.sin', (['(-theta / 2)'], {}), '(-theta / 2)\n', (2862, 2874), True, 'import tensorflow as tf\n'), ((9062, 9096), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'dtype': 'self.R_DTYPE'}), '(0, dtype=self.R_DTYPE)\n', (9073, 9096), True, 'import tensorflow as tf\n'), ((12463, 12505), 'tensorflow.zeros', 'tf.zeros', (['self.res.shape'], {'dtype': 'tf.float64'}), '(self.res.shape, dtype=tf.float64)\n', (12471, 12505), True, 'import tensorflow as tf\n'), ((1875, 1893), 'tensorflow.exp', 'tf.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (1881, 1893), True, 'import tensorflow as tf\n'), ((1904, 1922), 'tensorflow.exp', 'tf.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (1910, 1922), True, 'import tensorflow as tf\n'), ((3569, 3586), 'tensorflow.sin', 'tf.sin', (['(theta / 4)'], {}), '(theta / 4)\n', (3575, 3586), True, 'import tensorflow as tf\n'), ((3612, 3629), 'tensorflow.sin', 'tf.sin', (['(theta / 2)'], {}), '(theta / 2)\n', (3618, 3629), True, 'import tensorflow as tf\n'), ((4062, 4079), 'tensorflow.sin', 'tf.sin', (['(theta / 4)'], {}), '(theta / 4)\n', (4068, 4079), True, 'import tensorflow as tf\n'), ((4105, 4122), 'tensorflow.sin', 'tf.sin', (['(theta / 2)'], {}), '(theta / 2)\n', (4111, 4122), True, 'import tensorflow as tf\n'), ((4563, 4580), 'tensorflow.sin', 'tf.sin', (['(theta / 4)'], {}), '(theta / 4)\n', (4569, 4580), True, 'import tensorflow as tf\n'), ((4606, 4623), 'tensorflow.sin', 'tf.sin', (['(theta / 2)'], {}), '(theta / 2)\n', (4612, 4623), True, 'import tensorflow as tf\n'), ((3489, 3506), 'tensorflow.cos', 'tf.cos', (['(theta / 4)'], {}), '(theta / 4)\n', (3495, 3506), True, 'import tensorflow as tf\n'), ((3982, 3999), 'tensorflow.cos', 'tf.cos', (['(theta / 4)'], {}), '(theta / 4)\n', (3988, 3999), True, 'import tensorflow as tf\n'), ((4483, 4500), 'tensorflow.cos', 'tf.cos', (['(theta / 4)'], {}), '(theta / 4)\n', (4489, 4500), True, 'import tensorflow as tf\n'), ((3532, 3549), 'tensorflow.sin', 'tf.sin', (['(theta / 2)'], {}), '(theta / 2)\n', (3538, 3549), True, 'import tensorflow as tf\n'), ((4025, 4042), 'tensorflow.sin', 'tf.sin', (['(theta / 2)'], {}), '(theta / 2)\n', (4031, 4042), True, 'import tensorflow as tf\n'), ((4526, 4543), 'tensorflow.sin', 'tf.sin', (['(theta / 2)'], {}), '(theta / 2)\n', (4532, 4543), True, 'import tensorflow as tf\n')] |
from fractions import Fraction
import operator
import pandas as pd
import numpy as np
from abc import ABCMeta, abstractmethod
from probability.concept.random_variable import RandomVariable, SetOfRandomVariable
from typing import Callable
def joint_distribution(data):
from probability.new.joint_distribution import JointDistribution
if isinstance(data, pd.DataFrame):
return JointDistribution.from_dataframe(data)
else:
return JointDistribution.from_series(data)
class ProbabilityDistribution(metaclass=ABCMeta):
@property
def variables(self) -> SetOfRandomVariable:
index = self.series.index
names = [index.name] if type(index) == pd.Index else index.names
return SetOfRandomVariable(tuple(RandomVariable(column) for column in names))
@property
@abstractmethod
def series(self) -> pd.Series:
return None
def argmax(self, *variables: RandomVariable):
"""
Arguments of the maxima (or argmax) returns the assignments that causes the maximum
value in probability distribution.
:return:
"""
if not variables:
variables = self.variables
method = lambda assignment: assignment in variables
maximum = self.series.argmax()
argmax = tuple(variable == value for variable, value in zip(self.variables, maximum))
return tuple(filter(method, argmax))
def sum(self):
return self.series.sum()
def __eq__(self, other) -> bool:
if self.variables != other.variables:
return False
#common_variables = set(self.variables) & set(other.variables)
#common_variables = tuple(common_variables)
#this = self(*common_variables)
#other = other(*common_variables)
# Sort columns
variables = self.variables
if len(variables.to_tuple()) > 1:
series = other.series.reorder_levels(self.variables.names)
series.sort_index(inplace=True)
other = joint_distribution(series)
return np.isclose(self.series, other.series).all()
def __mul__(self, other: 'ProbabilityDistribution') -> 'ProbabilityDistribution':
name = self.series.name + ' ' + other.series.name
operation = operator.mul
return self._calcule(other, operation, name)
def __truediv__(self, other: 'ProbabilityDistribution') -> 'ProbabilityDistribution':
name = self.series.name + ' / ' + other.series.name
operation = operator.truediv
return self._calcule(other, operation, name)
def _calcule(self, other: 'ProbabilityDistribution', operation: Callable, new_name: str) -> 'ProbabilityDistribution':
"""
Based in: https://github.com/pandas-dev/pandas/issues/9368
"""
X = self.series
Y = other.series
on = tuple(set(X.index.names) & set(Y.index.names))
result = pd.merge(X.reset_index(), Y.reset_index(), on=on, how='outer')
result[new_name] = operation(result[X.name], result[Y.name])
result = result.drop([X.name, Y.name], axis=1)
variables = set(self.variables.names) | set(other.variables.names)
result.set_index(list(variables))
return joint_distribution(result)
def __repr__(self):
return self.series.map(lambda x: Fraction(x).limit_denominator()).__repr__()
| [
"numpy.isclose",
"fractions.Fraction",
"probability.concept.random_variable.RandomVariable",
"probability.new.joint_distribution.JointDistribution.from_dataframe",
"probability.new.joint_distribution.JointDistribution.from_series"
] | [((396, 434), 'probability.new.joint_distribution.JointDistribution.from_dataframe', 'JointDistribution.from_dataframe', (['data'], {}), '(data)\n', (428, 434), False, 'from probability.new.joint_distribution import JointDistribution\n'), ((460, 495), 'probability.new.joint_distribution.JointDistribution.from_series', 'JointDistribution.from_series', (['data'], {}), '(data)\n', (489, 495), False, 'from probability.new.joint_distribution import JointDistribution\n'), ((2093, 2130), 'numpy.isclose', 'np.isclose', (['self.series', 'other.series'], {}), '(self.series, other.series)\n', (2103, 2130), True, 'import numpy as np\n'), ((760, 782), 'probability.concept.random_variable.RandomVariable', 'RandomVariable', (['column'], {}), '(column)\n', (774, 782), False, 'from probability.concept.random_variable import RandomVariable, SetOfRandomVariable\n'), ((3368, 3379), 'fractions.Fraction', 'Fraction', (['x'], {}), '(x)\n', (3376, 3379), False, 'from fractions import Fraction\n')] |
# Trained net for region specific Classification
# 1. If not already set, download and set coco API and data set (See instruction)
# 1. Set Train image folder path in: TrainImageDir
# 2. Set the path to the coco Train annotation json file in: TrainAnnotationFile
# 3. Run the script
# 4. The trained net weight will appear in the folder defined in: logs_dir
# 5. For other training parameters see Input section in train.py script
#------------------------------------------------------------------------------------------------------------------------
##########################################################################################################################################################################
import numpy as np
#import Resnet50Attention as Net
import Resnet50AttentionFullAttentionAndBiasAllLayers as Net
import OpenSurfaceReader as Reader
import os
import scipy.misc as misc
import torch
#...........................................Input Parameters.................................................
TrainImageDir="/media/sagi/9be0bc81-09a7-43be-856a-45a5ab241d90/Data_zoo/OpenSurface/OpenSurfaceMaterialsSmall/Images/"
AnnotationDir="/media/sagi/9be0bc81-09a7-43be-856a-45a5ab241d90/Data_zoo/OpenSurface/OpenSurfaceMaterialsSmall/TrainLabels/"
UseCuda=True
MinSize=160 # min width/hight of image
MaxSize=1300 # max width/hight of image
MaxBatchSize=20 # Maximoum number of images per batch h*w*bsize (reduce to prevent oom problems)
MaxPixels=800*800*3.#800*800*3. # Maximoum number of pixel per batch h*w*bsize (reduce to prevent oom problems)
logs_dir= "logs/"# "path to logs directory where trained model and information will be stored"
if not os.path.exists(logs_dir): os.makedirs(logs_dir)
Trained_model_path="" # If you want to training start from pretrained model other wise set to =""
Learning_Rate=1e-5 #Learning rate for Adam Optimizer
learning_rate_decay=0.999999#
#-----------------------------Other Paramters------------------------------------------------------------------------
TrainLossTxtFile=logs_dir+"TrainLoss.txt" #Where train losses will be writen
ValidLossTxtFile=logs_dir+"ValidationLoss.txt"# Where validation losses will be writen
Weight_Decay=1e-5# Weight for the weight decay loss function
MAX_ITERATION = int(1000010) # Max number of training iteration
#----------------------------------------Create reader for data set--------------------------------------------------------------------------------------------------------------
Reader=Reader.Reader(ImageDir=TrainImageDir,AnnotationDir=AnnotationDir, MaxBatchSize=MaxBatchSize,MinSize=MinSize,MaxSize=MaxSize,MaxPixels=MaxPixels, AnnotationFileType="png", ImageFileType="jpg",BackgroundClass=0)
#Reader.Reader(TrainImageDir,TrainAnnotationFile, MaxBatchSize,MinSize,MaxSize,MaxPixels)
NumClasses = Reader.NumClass
#---------------------Initiate neural net------------------------------------------------------------------------------------
Net=Net.Net(NumClasses=NumClasses+1,UseGPU=UseCuda)
Net.AddAttententionLayer()
if Trained_model_path!="":
Net.load_state_dict(torch.load(Trained_model_path))
if UseCuda: Net.cuda()
#optimizer=torch.optim.SGD(params=Net.parameters(),lr=Learning_Rate,weight_decay=Weight_Decay,momentum=0.5)
optimizer=torch.optim.Adam(params=Net.parameters(),lr=Learning_Rate,weight_decay=Weight_Decay)
#--------------------------- Create files for saving loss----------------------------------------------------------------------------------------------------------
f = open(TrainLossTxtFile, "a")
f.write("Iteration\tloss\t Learning Rate="+str(Learning_Rate))
f.close()
AVGLoss=0
#..............Start Training loop: Main Training....................................................................
for itr in range(1,MAX_ITERATION):
# if np.random.randint(2)==1:
Images, SegmentMask, Labels, LabelsOneHot = Reader.ReadNextBatchRandom(EqualClassProbabilty=False)
# else:
# Images, SegmentMask, Labels, LabelsOneHot = Reader.ReadNextBatchRandom(EqualClassProbabilty=True,MinClassExample=np.random.randint(2,200))
#****************************************************************
# Images[:,:,:,1]*=SegmentMask
# for ii in range(Labels.shape[0]):
# print(Reader.CatNames[Labels[ii]])
# misc.imshow(Images[ii])
#**************************Run Trainin cycle***************************************************************************************
Prob, Lb=Net.forward(Images,ROI=SegmentMask) # Run net inference and get prediction
Net.zero_grad()
OneHotLabels=torch.autograd.Variable(torch.from_numpy(LabelsOneHot).cuda(), requires_grad=False)
Loss = -torch.mean((OneHotLabels * torch.log(Prob + 0.0000001))) # Calculate cross entropy loss
if AVGLoss==0: AVGLoss=float(np.array(Loss.data)) #Caclculate average loss for display
else: AVGLoss=AVGLoss*0.999+0.001*float(np.array(Loss.data))
Loss.backward() # Backpropogate loss
optimizer.step() # Apply gradient decend change weight
torch.cuda.empty_cache()
# --------------Save trained model------------------------------------------------------------------------------------------------------------------------------------------
if itr % 30000 == 0 and itr>0:
print("Saving Model to file in "+logs_dir)
torch.save(Net.state_dict(), logs_dir+ "/" + str(itr) + ".torch")
print("model saved")
#......................Write and display train loss..........................................................................
if itr % 10==0: # Display train loss
print("Step "+str(itr)+" Train Loss="+str(float(np.array(Loss.data)))+" Runnig Average Loss="+str(AVGLoss))
#Write train loss to file
with open(TrainLossTxtFile, "a") as f:
f.write("\n"+str(itr)+"\t"+str(float(np.array(Loss.data)))+"\t"+str(AVGLoss))
f.close()
##################################################################################################################################################
| [
"os.path.exists",
"torch.log",
"os.makedirs",
"Resnet50AttentionFullAttentionAndBiasAllLayers.zero_grad",
"torch.load",
"Resnet50AttentionFullAttentionAndBiasAllLayers.cuda",
"torch.from_numpy",
"numpy.array",
"Resnet50AttentionFullAttentionAndBiasAllLayers.AddAttententionLayer",
"Resnet50Attentio... | [((2512, 2740), 'OpenSurfaceReader.Reader', 'Reader.Reader', ([], {'ImageDir': 'TrainImageDir', 'AnnotationDir': 'AnnotationDir', 'MaxBatchSize': 'MaxBatchSize', 'MinSize': 'MinSize', 'MaxSize': 'MaxSize', 'MaxPixels': 'MaxPixels', 'AnnotationFileType': '"""png"""', 'ImageFileType': '"""jpg"""', 'BackgroundClass': '(0)'}), "(ImageDir=TrainImageDir, AnnotationDir=AnnotationDir,\n MaxBatchSize=MaxBatchSize, MinSize=MinSize, MaxSize=MaxSize, MaxPixels=\n MaxPixels, AnnotationFileType='png', ImageFileType='jpg', BackgroundClass=0\n )\n", (2525, 2740), True, 'import OpenSurfaceReader as Reader\n'), ((2976, 3026), 'Resnet50AttentionFullAttentionAndBiasAllLayers.Net', 'Net.Net', ([], {'NumClasses': '(NumClasses + 1)', 'UseGPU': 'UseCuda'}), '(NumClasses=NumClasses + 1, UseGPU=UseCuda)\n', (2983, 3026), True, 'import Resnet50AttentionFullAttentionAndBiasAllLayers as Net\n'), ((3024, 3050), 'Resnet50AttentionFullAttentionAndBiasAllLayers.AddAttententionLayer', 'Net.AddAttententionLayer', ([], {}), '()\n', (3048, 3050), True, 'import Resnet50AttentionFullAttentionAndBiasAllLayers as Net\n'), ((1683, 1707), 'os.path.exists', 'os.path.exists', (['logs_dir'], {}), '(logs_dir)\n', (1697, 1707), False, 'import os\n'), ((1709, 1730), 'os.makedirs', 'os.makedirs', (['logs_dir'], {}), '(logs_dir)\n', (1720, 1730), False, 'import os\n'), ((3146, 3156), 'Resnet50AttentionFullAttentionAndBiasAllLayers.cuda', 'Net.cuda', ([], {}), '()\n', (3154, 3156), True, 'import Resnet50AttentionFullAttentionAndBiasAllLayers as Net\n'), ((3874, 3928), 'OpenSurfaceReader.ReadNextBatchRandom', 'Reader.ReadNextBatchRandom', ([], {'EqualClassProbabilty': '(False)'}), '(EqualClassProbabilty=False)\n', (3900, 3928), True, 'import OpenSurfaceReader as Reader\n'), ((4453, 4489), 'Resnet50AttentionFullAttentionAndBiasAllLayers.forward', 'Net.forward', (['Images'], {'ROI': 'SegmentMask'}), '(Images, ROI=SegmentMask)\n', (4464, 4489), True, 'import Resnet50AttentionFullAttentionAndBiasAllLayers as Net\n'), ((4532, 4547), 'Resnet50AttentionFullAttentionAndBiasAllLayers.zero_grad', 'Net.zero_grad', ([], {}), '()\n', (4545, 4547), True, 'import Resnet50AttentionFullAttentionAndBiasAllLayers as Net\n'), ((5011, 5035), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (5033, 5035), False, 'import torch\n'), ((3102, 3132), 'torch.load', 'torch.load', (['Trained_model_path'], {}), '(Trained_model_path)\n', (3112, 3132), False, 'import torch\n'), ((3299, 3315), 'Resnet50AttentionFullAttentionAndBiasAllLayers.parameters', 'Net.parameters', ([], {}), '()\n', (3313, 3315), True, 'import Resnet50AttentionFullAttentionAndBiasAllLayers as Net\n'), ((4784, 4803), 'numpy.array', 'np.array', (['Loss.data'], {}), '(Loss.data)\n', (4792, 4803), True, 'import numpy as np\n'), ((5314, 5330), 'Resnet50AttentionFullAttentionAndBiasAllLayers.state_dict', 'Net.state_dict', ([], {}), '()\n', (5328, 5330), True, 'import Resnet50AttentionFullAttentionAndBiasAllLayers as Net\n'), ((4589, 4619), 'torch.from_numpy', 'torch.from_numpy', (['LabelsOneHot'], {}), '(LabelsOneHot)\n', (4605, 4619), False, 'import torch\n'), ((4688, 4711), 'torch.log', 'torch.log', (['(Prob + 1e-07)'], {}), '(Prob + 1e-07)\n', (4697, 4711), False, 'import torch\n'), ((4886, 4905), 'numpy.array', 'np.array', (['Loss.data'], {}), '(Loss.data)\n', (4894, 4905), True, 'import numpy as np\n'), ((5621, 5640), 'numpy.array', 'np.array', (['Loss.data'], {}), '(Loss.data)\n', (5629, 5640), True, 'import numpy as np\n'), ((5811, 5830), 'numpy.array', 'np.array', (['Loss.data'], {}), '(Loss.data)\n', (5819, 5830), True, 'import numpy as np\n')] |
import itertools
import numpy as np
def get_ways_to_split_nitems_to_kbins(
nitems, kbins, item_separator="┃", item_symbol="🧍"
):
items = [item_symbol] * nitems
# E.g., 3 bins = 2item separators
separators = [item_separator] * (kbins - 1)
symbols = items + separators
ways = set(itertools.permutations(symbols))
return ways
def get_ways_to_split_list_in_kbins(
items, kbins, item_separator="┃", item_symbol="🧍"
):
separation_ways = get_ways_to_split_nitems_to_kbins(
len(items),
kbins,
item_separator=item_separator,
item_symbol=item_symbol,
)
ways_pos_separators = [
[i for i in range(len(w)) if w[i] == item_separator]
for w in separation_ways
]
ways = []
for pos_separators in ways_pos_separators:
aux = list(items)
for p in pos_separators:
aux.insert(p, item_separator)
ways.append(aux)
return ways
def enumerate_ways(ways):
for i, way in enumerate(sorted(["".join(map(str, w)) for w in ways])):
print(f"{i+1:>3} {way}")
def get_solution_space(
customers, depot=0, n_vehicles=1, distinguish_vehicles=True
):
item_sep = "|"
# Only accept single-digit customer ids
if np.max(customers) > 9:
return None
routes = set()
if n_vehicles > 1:
symbol_permutations = itertools.permutations(
[item_sep] * (n_vehicles - 1) + customers
)
for s in symbol_permutations:
fleet_routes = ()
i = 0
for j, p in enumerate(s):
if p == item_sep:
if i == j:
vehicle_route = (depot,) + (depot,)
else:
vehicle_route = (depot,) + s[i:j] + (depot,)
i = j + 1
fleet_routes += (vehicle_route,)
## Add last vehicle route
vehicle_route = (depot,) + s[i:] + (depot,)
fleet_routes += (vehicle_route,)
routes.add(tuple(fleet_routes))
if not distinguish_vehicles:
routes = set([(tuple(sorted(r))) for r in routes])
else:
routes = {
((depot,) + tuple(p) + (depot,),)
for p in itertools.permutations(customers)
}
return routes
| [
"itertools.permutations",
"numpy.max"
] | [((307, 338), 'itertools.permutations', 'itertools.permutations', (['symbols'], {}), '(symbols)\n', (329, 338), False, 'import itertools\n'), ((1259, 1276), 'numpy.max', 'np.max', (['customers'], {}), '(customers)\n', (1265, 1276), True, 'import numpy as np\n'), ((1377, 1442), 'itertools.permutations', 'itertools.permutations', (['([item_sep] * (n_vehicles - 1) + customers)'], {}), '([item_sep] * (n_vehicles - 1) + customers)\n', (1399, 1442), False, 'import itertools\n'), ((2276, 2309), 'itertools.permutations', 'itertools.permutations', (['customers'], {}), '(customers)\n', (2298, 2309), False, 'import itertools\n')] |
from argparse import ArgumentParser
import json
import os
import cv2
import numpy as np
from modules.input_reader import VideoReader, ImageReader
from modules.draw import Plotter3d, draw_poses
from modules.parse_poses import parse_poses
def rotate_poses(poses_3d, R, t):
R_inv = np.linalg.inv(R)
for pose_id in range(len(poses_3d)):
pose_3d = poses_3d[pose_id].reshape((-1, 4)).transpose()
pose_3d[0:3, :] = np.dot(R_inv, pose_3d[0:3, :] - t)
poses_3d[pose_id] = pose_3d.transpose().reshape(-1)
return poses_3d
#python demo.py --model human-pose-estimation-3d.xml --device CPU --use-openvino --video 0
#python demo.py --model human-pose-estimation-3d.pth --video 0
if __name__ == '__main__':
parser = ArgumentParser(description='Lightweight 3D human pose estimation demo. '
'Press esc to exit, "p" to (un)pause video or process next image.')
parser.add_argument('-m', '--model',
help='Required. Path to checkpoint with a trained model '
'(or an .xml file in case of OpenVINO inference).',
type=str, required=True)
parser.add_argument('--video', help='Optional. Path to video file or camera id.', type=str, default='')
parser.add_argument('--video_width', help='Optional. camera width.', type=str, default='')
parser.add_argument('--video_height', help='Optional. camera height.', type=str, default='')
parser.add_argument('-d', '--device',
help='Optional. Specify the target device to infer on: CPU or GPU. '
'The demo will look for a suitable plugin for device specified '
'(by default, it is GPU).',
type=str, default='GPU')
parser.add_argument('--use-openvino',
help='Optional. Run network with OpenVINO as inference engine. '
'CPU, GPU, FPGA, HDDL or MYRIAD devices are supported.',
action='store_true')
parser.add_argument('--use-tensorrt', help='Optional. Run network with TensorRT as inference engine.',
action='store_true')
parser.add_argument('--images', help='Optional. Path to input image(s).', nargs='+', default='')
parser.add_argument('--height-size', help='Optional. Network input layer height size.', type=int, default=256)
parser.add_argument('--extrinsics-path',
help='Optional. Path to file with camera extrinsics.',
type=str, default=None)
parser.add_argument('--fx', type=np.float32, default=-1, help='Optional. Camera focal length.')
args = parser.parse_args()
if args.video == '' and args.images == '':
raise ValueError('Either --video or --image has to be provided')
stride = 8
if args.use_openvino:
from modules.inference_engine_openvino import InferenceEngineOpenVINO
net = InferenceEngineOpenVINO(args.model, args.device)
else:
from modules.inference_engine_pytorch import InferenceEnginePyTorch
net = InferenceEnginePyTorch(args.model, args.device, use_tensorrt=args.use_tensorrt)
canvas_3d = np.zeros((720, 1280, 3), dtype=np.uint8)
plotter = Plotter3d(canvas_3d.shape[:2])
canvas_3d_window_name = 'Canvas 3D'
cv2.namedWindow(canvas_3d_window_name)
cv2.setMouseCallback(canvas_3d_window_name, Plotter3d.mouse_callback)
file_path = args.extrinsics_path
if file_path is None:
file_path = os.path.join('data', 'extrinsics.json')
with open(file_path, 'r') as f:
extrinsics = json.load(f)
R = np.array(extrinsics['R'], dtype=np.float32)
t = np.array(extrinsics['t'], dtype=np.float32)
if args.iamges != '':
frame_provider = ImageReader(args.images)
is_video = False
elif args.video != '':
frame_provider = VideoReader(args.video, args.video_width, args.video_height)
is_video = True
base_height = args.height_size
fx = args.fx
delay = 1
esc_code = 27
p_code = 112
space_code = 32
mean_time = 0
for frame in frame_provider:
current_time = cv2.getTickCount()
if frame is None:
break
input_scale = base_height / frame.shape[0]
scaled_img = cv2.resize(frame, dsize=None, fx=input_scale, fy=input_scale)
scaled_img = scaled_img[:, 0:scaled_img.shape[1] - (scaled_img.shape[1] % stride)] # better to pad, but cut out for demo
if fx < 0: # Focal length is unknown
fx = np.float32(0.8 * frame.shape[1])
inference_result = net.infer(scaled_img)
poses_3d, poses_2d = parse_poses(inference_result, input_scale, stride, fx, is_video)
edges = []
if len(poses_3d):
poses_3d = rotate_poses(poses_3d, R, t)
poses_3d_copy = poses_3d.copy()
x = poses_3d_copy[:, 0::4]
y = poses_3d_copy[:, 1::4]
z = poses_3d_copy[:, 2::4]
poses_3d[:, 0::4], poses_3d[:, 1::4], poses_3d[:, 2::4] = -z, x, -y
poses_3d = poses_3d.reshape(poses_3d.shape[0], 19, -1)[:, :, 0:3]
edges = (Plotter3d.SKELETON_EDGES + 19 * np.arange(poses_3d.shape[0]).reshape((-1, 1, 1))).reshape((-1, 2))
plotter.plot(canvas_3d, poses_3d, edges)
cv2.imshow(canvas_3d_window_name, canvas_3d)
draw_poses(frame, poses_2d)
current_time = (cv2.getTickCount() - current_time) / cv2.getTickFrequency()
if mean_time == 0:
mean_time = current_time
else:
mean_time = mean_time * 0.95 + current_time * 0.05
cv2.putText(frame, 'FPS: {}'.format(int(1 / mean_time * 10) / 10),
(40, 80), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255))
cv2.imshow('ICV 3D Human Pose Estimation', frame)
key = cv2.waitKey(delay)
if key == esc_code:
break
if key == p_code:
if delay == 1:
delay = 0
else:
delay = 1
if delay == 0 or not is_video: # allow to rotate 3D canvas while on pause
key = 0
while (key != p_code
and key != esc_code
and key != space_code):
plotter.plot(canvas_3d, poses_3d, edges)
cv2.imshow(canvas_3d_window_name, canvas_3d)
key = cv2.waitKey(33)
if key == esc_code:
break
else:
delay = 1
| [
"modules.parse_poses.parse_poses",
"cv2.imshow",
"numpy.array",
"numpy.arange",
"cv2.setMouseCallback",
"argparse.ArgumentParser",
"modules.input_reader.VideoReader",
"modules.draw.Plotter3d",
"modules.inference_engine_openvino.InferenceEngineOpenVINO",
"numpy.dot",
"cv2.waitKey",
"cv2.getTick... | [((285, 301), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (298, 301), True, 'import numpy as np\n'), ((745, 892), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Lightweight 3D human pose estimation demo. Press esc to exit, "p" to (un)pause video or process next image."""'}), '(description=\n \'Lightweight 3D human pose estimation demo. Press esc to exit, "p" to (un)pause video or process next image.\'\n )\n', (759, 892), False, 'from argparse import ArgumentParser\n'), ((3252, 3292), 'numpy.zeros', 'np.zeros', (['(720, 1280, 3)'], {'dtype': 'np.uint8'}), '((720, 1280, 3), dtype=np.uint8)\n', (3260, 3292), True, 'import numpy as np\n'), ((3307, 3337), 'modules.draw.Plotter3d', 'Plotter3d', (['canvas_3d.shape[:2]'], {}), '(canvas_3d.shape[:2])\n', (3316, 3337), False, 'from modules.draw import Plotter3d, draw_poses\n'), ((3382, 3420), 'cv2.namedWindow', 'cv2.namedWindow', (['canvas_3d_window_name'], {}), '(canvas_3d_window_name)\n', (3397, 3420), False, 'import cv2\n'), ((3425, 3494), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['canvas_3d_window_name', 'Plotter3d.mouse_callback'], {}), '(canvas_3d_window_name, Plotter3d.mouse_callback)\n', (3445, 3494), False, 'import cv2\n'), ((3697, 3740), 'numpy.array', 'np.array', (["extrinsics['R']"], {'dtype': 'np.float32'}), "(extrinsics['R'], dtype=np.float32)\n", (3705, 3740), True, 'import numpy as np\n'), ((3749, 3792), 'numpy.array', 'np.array', (["extrinsics['t']"], {'dtype': 'np.float32'}), "(extrinsics['t'], dtype=np.float32)\n", (3757, 3792), True, 'import numpy as np\n'), ((434, 468), 'numpy.dot', 'np.dot', (['R_inv', '(pose_3d[0:3, :] - t)'], {}), '(R_inv, pose_3d[0:3, :] - t)\n', (440, 468), True, 'import numpy as np\n'), ((3006, 3054), 'modules.inference_engine_openvino.InferenceEngineOpenVINO', 'InferenceEngineOpenVINO', (['args.model', 'args.device'], {}), '(args.model, args.device)\n', (3029, 3054), False, 'from modules.inference_engine_openvino import InferenceEngineOpenVINO\n'), ((3155, 3234), 'modules.inference_engine_pytorch.InferenceEnginePyTorch', 'InferenceEnginePyTorch', (['args.model', 'args.device'], {'use_tensorrt': 'args.use_tensorrt'}), '(args.model, args.device, use_tensorrt=args.use_tensorrt)\n', (3177, 3234), False, 'from modules.inference_engine_pytorch import InferenceEnginePyTorch\n'), ((3579, 3618), 'os.path.join', 'os.path.join', (['"""data"""', '"""extrinsics.json"""'], {}), "('data', 'extrinsics.json')\n", (3591, 3618), False, 'import os\n'), ((3676, 3688), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3685, 3688), False, 'import json\n'), ((3845, 3869), 'modules.input_reader.ImageReader', 'ImageReader', (['args.images'], {}), '(args.images)\n', (3856, 3869), False, 'from modules.input_reader import VideoReader, ImageReader\n'), ((4228, 4246), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (4244, 4246), False, 'import cv2\n'), ((4363, 4424), 'cv2.resize', 'cv2.resize', (['frame'], {'dsize': 'None', 'fx': 'input_scale', 'fy': 'input_scale'}), '(frame, dsize=None, fx=input_scale, fy=input_scale)\n', (4373, 4424), False, 'import cv2\n'), ((4730, 4794), 'modules.parse_poses.parse_poses', 'parse_poses', (['inference_result', 'input_scale', 'stride', 'fx', 'is_video'], {}), '(inference_result, input_scale, stride, fx, is_video)\n', (4741, 4794), False, 'from modules.parse_poses import parse_poses\n'), ((5389, 5433), 'cv2.imshow', 'cv2.imshow', (['canvas_3d_window_name', 'canvas_3d'], {}), '(canvas_3d_window_name, canvas_3d)\n', (5399, 5433), False, 'import cv2\n'), ((5443, 5470), 'modules.draw.draw_poses', 'draw_poses', (['frame', 'poses_2d'], {}), '(frame, poses_2d)\n', (5453, 5470), False, 'from modules.draw import Plotter3d, draw_poses\n'), ((5851, 5900), 'cv2.imshow', 'cv2.imshow', (['"""ICV 3D Human Pose Estimation"""', 'frame'], {}), "('ICV 3D Human Pose Estimation', frame)\n", (5861, 5900), False, 'import cv2\n'), ((5916, 5934), 'cv2.waitKey', 'cv2.waitKey', (['delay'], {}), '(delay)\n', (5927, 5934), False, 'import cv2\n'), ((3947, 4007), 'modules.input_reader.VideoReader', 'VideoReader', (['args.video', 'args.video_width', 'args.video_height'], {}), '(args.video, args.video_width, args.video_height)\n', (3958, 4007), False, 'from modules.input_reader import VideoReader, ImageReader\n'), ((4618, 4650), 'numpy.float32', 'np.float32', (['(0.8 * frame.shape[1])'], {}), '(0.8 * frame.shape[1])\n', (4628, 4650), True, 'import numpy as np\n'), ((5532, 5554), 'cv2.getTickFrequency', 'cv2.getTickFrequency', ([], {}), '()\n', (5552, 5554), False, 'import cv2\n'), ((5495, 5513), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (5511, 5513), False, 'import cv2\n'), ((6395, 6439), 'cv2.imshow', 'cv2.imshow', (['canvas_3d_window_name', 'canvas_3d'], {}), '(canvas_3d_window_name, canvas_3d)\n', (6405, 6439), False, 'import cv2\n'), ((6462, 6477), 'cv2.waitKey', 'cv2.waitKey', (['(33)'], {}), '(33)\n', (6473, 6477), False, 'import cv2\n'), ((5265, 5293), 'numpy.arange', 'np.arange', (['poses_3d.shape[0]'], {}), '(poses_3d.shape[0])\n', (5274, 5293), True, 'import numpy as np\n')] |
"""
Module for calculating window related data. Windows can be indexed relative to
two starting indices.
- Local window index
- Window index relative to the TimeData is called "local_win"
- Local window indices always start at 0
- Global window index
- The global window index is relative to the project reference time
- The 0 index window begins at the reference time
- This window indexing is to synchronise data across sites
The global window index is considered the default and sometimes referred to as
the window. Local windows should be explicitly referred to as local_win in
all cases.
The window module includes functionality to do the following:
- Windowing utility functions to calculate window and overlap sizes
- Functions to map windows to samples in TimeData
- Converting a global index array to datetime
Usually with windowing, there is a window size and windows overlap with each
other for a set number of samples. As an illustrative examples, consider a
signal sampled at 10 Hz (dt=0.1 seconds) with 24 samples. This will be windowed
using a window size of 8 samples per window and a 2 sample overlap.
.. plot::
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> fs = 10
>>> n_samples = 24
>>> win_size = 8
>>> olap_size = 2
>>> times = np.arange(0, n_samples) * (1/fs)
The first window
>>> start_win1 = 0
>>> end_win1 = win_size
>>> win1_times = times[start_win1:end_win1]
The second window
>>> start_win2 = end_win1 - olap_size
>>> end_win2 = start_win2 + win_size
>>> win2_times = times[start_win2:end_win2]
The third window
>>> start_win3 = end_win2 - olap_size
>>> end_win3 = start_win3 + win_size
>>> win3_times = times[start_win3:end_win3]
The fourth window
>>> start_win4= end_win3 - olap_size
>>> end_win4 = start_win4 + win_size
>>> win4_times = times[start_win4:end_win4]
Let's look at the actual window times for each window
>>> win1_times
array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7])
>>> win2_times
array([0.6, 0.7, 0.8, 0.9, 1. , 1.1, 1.2, 1.3])
>>> win3_times
array([1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9])
>>> win4_times
array([1.8, 1.9, 2. , 2.1, 2.2, 2.3])
The duration and increments of windows can be calculated using provided
methods
>>> from resistics.window import win_duration, inc_duration
>>> print(win_duration(win_size, fs))
0:00:00.7
>>> print(inc_duration(win_size, olap_size, fs))
0:00:00.6
Plot the windows to give an illustration of how it works
>>> plt.plot(win1_times, np.ones_like(win1_times), "bo", label="window1") # doctest: +SKIP
>>> plt.plot(win2_times, np.ones_like(win2_times)*2, "ro", label="window2") # doctest: +SKIP
>>> plt.plot(win3_times, np.ones_like(win3_times)*3, "go", label="window3") # doctest: +SKIP
>>> plt.plot(win4_times, np.ones_like(win4_times)*4, "co", label="window4") # doctest: +SKIP
>>> plt.xlabel("Time [s]") # doctest: +SKIP
>>> plt.legend() # doctest: +SKIP
>>> plt.grid() # doctest: +SKIP
>>> plt.tight_layout() # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
"""
from loguru import logger
from pathlib import Path
from typing import Optional, List, Tuple, Dict, Union, Any
from pydantic import PositiveInt
import numpy as np
import pandas as pd
from resistics.errors import ProcessRunError
from resistics.common import History, ResisticsModel, ResisticsData, ResisticsProcess
from resistics.common import ResisticsWriter, Metadata, WriteableMetadata
from resistics.sampling import RSDateTime, RSTimeDelta, HighResDateTime
from resistics.time import ChanMetadata
from resistics.decimate import DecimatedLevelMetadata, DecimatedData
def win_duration(win_size: int, fs: float) -> RSTimeDelta:
"""
Get the window duration
Parameters
----------
win_size : int
Window size in samples
fs : float
Sampling frequency Hz
Returns
-------
RSTimeDelta
Duration
Examples
--------
A few examples with different sampling frequencies and window sizes
>>> from resistics.window import win_duration
>>> duration = win_duration(512, 512)
>>> print(duration)
0:00:00.998046875
>>> duration = win_duration(520, 512)
>>> print(duration)
0:00:01.013671875
>>> duration = win_duration(4096, 16_384)
>>> print(duration)
0:00:00.24993896484375
>>> duration = win_duration(200, 0.05)
>>> print(duration)
1:06:20
"""
from resistics.sampling import to_timedelta
return to_timedelta(1 / fs) * float(win_size - 1)
def inc_duration(win_size: int, olap_size: int, fs: float) -> RSTimeDelta:
"""
Get the increment between window start times
If the overlap size = 0, then the time increment between windows is simply
the window duration. However, when there is an overlap, the increment
between window start times has to be adjusted by the overlap size
Parameters
----------
win_size : int
The window size in samples
olap_size : int
The overlap size in samples
fs : float
The sample frequency Hz
Returns
-------
RSTimeDelta
The duration of the window
Examples
--------
>>> from resistics.window import inc_duration
>>> increment = inc_duration(128, 32, 128)
>>> print(increment)
0:00:00.75
>>> increment = inc_duration(128*3600, 128*60, 128)
>>> print(increment)
0:59:00
"""
from resistics.sampling import to_timedelta
return to_timedelta(1 / fs) * float(win_size - olap_size)
def win_to_datetime(
ref_time: RSDateTime, global_win: int, increment: RSTimeDelta
) -> RSDateTime:
"""
Convert reference window index to start time of window
Parameters
----------
ref_time : RSDateTime
Reference time
global_win : int
Window index relative to reference time
increment : RSTimeDelta
The increment duration
Returns
-------
RSDateTime
Start time of window
Examples
--------
An example with sampling at 1 Hz, a window size of 100 and an overlap size
of 25.
>>> from resistics.sampling import to_datetime
>>> from resistics.window import inc_duration, win_to_datetime
>>> ref_time = to_datetime("2021-01-01 00:00:00")
>>> fs = 1
>>> win_size = 60
>>> olap_size = 15
>>> increment = inc_duration(win_size, olap_size, fs)
>>> print(increment)
0:00:45
The increment is the time increment between the start of time one window and
the succeeding window.
>>> print(win_to_datetime(ref_time, 0, increment))
2021-01-01 00:00:00
>>> print(win_to_datetime(ref_time, 1, increment))
2021-01-01 00:00:45
>>> print(win_to_datetime(ref_time, 2, increment))
2021-01-01 00:01:30
>>> print(win_to_datetime(ref_time, 3, increment))
2021-01-01 00:02:15
"""
return ref_time + (global_win * increment)
def datetime_to_win(
ref_time: RSDateTime,
time: RSDateTime,
increment: RSTimeDelta,
method: str = "round",
) -> int:
"""
Convert a datetime to a global window index
Parameters
----------
ref_time : RSDateTime
Reference time
time : RSDateTime
Datetime to convert
increment : RSTimeDelta
The increment duration
method : str, optional
Method for dealing with float results, by default "round"
Returns
-------
int
The global window index i.e. the window index relative to the reference
time
Raises
------
ValueError
If time < ref_time
Examples
--------
A simple example to show the logic
>>> from resistics.sampling import to_datetime, to_timedelta
>>> from resistics.window import datetime_to_win, win_to_datetime, inc_duration
>>> ref_time = to_datetime("2021-01-01 00:00:00")
>>> time = to_datetime("2021-01-01 00:01:00")
>>> increment = to_timedelta(60)
>>> global_win = datetime_to_win(ref_time, time, increment)
>>> global_win
1
>>> print(win_to_datetime(ref_time, global_win, increment))
2021-01-01 00:01:00
A more complex logic with window sizes, overlap sizes and sampling
frequencies
>>> fs = 128
>>> win_size = 256
>>> olap_size = 64
>>> ref_time = to_datetime("2021-03-15 00:00:00")
>>> time = to_datetime("2021-04-17 18:00:00")
>>> increment = inc_duration(win_size, olap_size, fs)
>>> print(increment)
0:00:01.5
>>> global_win = datetime_to_win(ref_time, time, increment)
>>> global_win
1944000
>>> print(win_to_datetime(ref_time, global_win, increment))
2021-04-17 18:00:00
In this scenario, explore the use of rounding
>>> time = to_datetime("2021-04-17 18:00:00.50")
>>> global_win = datetime_to_win(ref_time, time, increment, method = "floor")
>>> global_win
1944000
>>> print(win_to_datetime(ref_time, global_win, increment))
2021-04-17 18:00:00
>>> global_win = datetime_to_win(ref_time, time, increment, method = "ceil")
>>> global_win
1944001
>>> print(win_to_datetime(ref_time, global_win, increment))
2021-04-17 18:00:01.5
>>> global_win = datetime_to_win(ref_time, time, increment, method = "round")
>>> global_win
1944000
>>> print(win_to_datetime(ref_time, global_win, increment))
2021-04-17 18:00:00
Another example with a window duration of greater than a day
>>> fs = 4.8828125e-05
>>> win_size = 64
>>> olap_size = 16
>>> ref_time = to_datetime("1985-07-18 01:00:20")
>>> time = to_datetime("1985-09-22 23:00:00")
>>> increment = inc_duration(win_size, olap_size, fs)
>>> print(increment)
11 days, 9:04:00
>>> global_win = datetime_to_win(ref_time, time, increment)
>>> global_win
6
>>> print(win_to_datetime(ref_time, global_win, increment))
1985-09-24 07:24:20
This time is greater than the time that was transformed to global window,
1985-09-22 23:00:00. Try again, this time with the floor option.
>>> global_win = datetime_to_win(ref_time, time, increment, method="floor")
>>> global_win
5
>>> print(win_to_datetime(ref_time, global_win, increment))
1985-09-12 22:20:20
"""
from math import floor, ceil
from resistics.sampling import to_seconds
if time < ref_time:
raise ValueError(f"Time {str(time)} < reference time {str(ref_time)}")
increment_days_in_seconds, increment_remaining_in_seconds = to_seconds(increment)
increment_seconds = increment_days_in_seconds + increment_remaining_in_seconds
delta_days_in_seconds, delta_remaining_in_seconds = to_seconds(time - ref_time)
delta_total_in_seconds = delta_days_in_seconds + delta_remaining_in_seconds
n_increments = delta_total_in_seconds / increment_seconds
if n_increments.is_integer():
n_increments = int(n_increments)
elif method == "floor":
n_increments = int(floor(n_increments))
elif method == "ceil":
n_increments = int(ceil(n_increments))
else:
n_increments = int(round(n_increments))
return n_increments
def get_first_and_last_win(
ref_time: RSDateTime,
metadata: DecimatedLevelMetadata,
win_size: int,
olap_size: int,
) -> Tuple[int, int]:
"""
Get first and last window for a decimated data level
.. note::
For the last window, on initial calculation this may be one or a
maximum of two windows beyond the last time. The last window is adjusted
in this function.
Two windows may occur when the time of the last sample is in the overlap
of the final two windows.
Parameters
----------
ref_time : RSDateTime
The reference time
metadata : DecimatedLevelMetadata
Metadata for the decimation level
win_size : int
Window size in samples
olap_size : int
Overlap size in samples
Returns
-------
Tuple[int, int]
First and last global windows. This is window indices relative to the
reference time
Raises
------
ValueError
If unable to calculate the last window correctly as this will result in
an incorrect number of windows
Examples
--------
Get the first and last window for the first decimation level in a decimated
data instance.
>>> from resistics.testing import decimated_data_random
>>> from resistics.sampling import to_datetime
>>> from resistics.window import get_first_and_last_win, win_to_datetime
>>> from resistics.window import win_duration, inc_duration
>>> ref_time = to_datetime("2021-01-01 00:00:00")
>>> dec_data = decimated_data_random(fs=0.1, first_time="2021-01-01 00:05:10", n_samples=100, factor=10)
Get the metadata for decimation level 0
>>> level_metadata = dec_data.metadata.levels_metadata[0]
>>> level_metadata.summary()
{
'fs': 10.0,
'n_samples': 10000,
'first_time': '2021-01-01 00:05:10.000000_000000_000000_000000',
'last_time': '2021-01-01 00:21:49.899999_999999_977300_000000'
}
.. note::
As a point of interest, note how the last time is actually slightly
incorrect. This is due to machine precision issues described in more
detail here https://docs.python.org/3/tutorial/floatingpoint.html.
Whilst there is value in using the high resolution datetime format for
high sampling rates, there is a tradeoff. Such are the perils of
floating point arithmetic.
The next step is to calculate the first and last window, relative to the
reference time
>>> win_size = 100
>>> olap_size = 25
>>> first_win, last_win = get_first_and_last_win(ref_time, level_metadata, win_size, olap_size)
>>> print(first_win, last_win)
42 173
These window indices can be converted to start times of the windows. The
last window is checked to make sure it does not extend past the end of the
time data. First get the window duration and increments.
>>> duration = win_duration(win_size, level_metadata.fs)
>>> print(duration)
0:00:09.9
>>> increment = inc_duration(win_size, olap_size, level_metadata.fs)
>>> print(increment)
0:00:07.5
Now calculate the times of the windows
>>> first_win_start_time = win_to_datetime(ref_time, 42, increment)
>>> last_win_start_time = win_to_datetime(ref_time, 173, increment)
>>> print(first_win_start_time, last_win_start_time)
2021-01-01 00:05:15 2021-01-01 00:21:37.5
>>> print(last_win_start_time + duration)
2021-01-01 00:21:47.4
>>> print(level_metadata.last_time)
2021-01-01 00:21:49.8999999999999773
>>> level_metadata.last_time > last_win_start_time + increment
True
"""
duration = win_duration(win_size, metadata.fs)
increment = inc_duration(win_size, olap_size, metadata.fs)
first_win = datetime_to_win(ref_time, metadata.first_time, increment, method="ceil")
last_win = datetime_to_win(ref_time, metadata.last_time, increment, method="floor")
# adjust if there is not enough date to complete the last window
last_win_time = win_to_datetime(ref_time, last_win, increment)
for attempt in range(2):
if metadata.last_time >= last_win_time + duration:
break
logger.debug(f"Adjusting last window attempt {attempt + 1}")
last_win -= 1
last_win_time = win_to_datetime(ref_time, last_win, increment)
if metadata.last_time < last_win_time + duration:
raise ValueError("Unable to correctly get the last window")
return first_win, last_win
def get_win_starts(
ref_time: RSDateTime,
win_size: int,
olap_size: int,
fs: float,
n_wins: int,
index_offset: int,
) -> pd.DatetimeIndex:
"""
Get window start times
This is a useful for getting the timestamps for the windows in a dataset
Parameters
----------
ref_time : RSDateTime
The reference time
win_size : int
The window size
olap_size : int
The overlap size
fs : float
The sampling frequency
n_wins : int
The number of windows
index_offset : int
The index offset from the reference time
Returns
-------
pd.DatetimeIndex
The start times of the windows
Examples
--------
>>> import pandas as pd
>>> from resistics.sampling import to_datetime
>>> from resistics.window import get_win_starts
>>> ref_time = to_datetime("2021-01-01 00:00:00")
>>> win_size = 100
>>> olap_size = 25
>>> fs = 10
>>> n_wins = 3
>>> index_offset = 480
>>> starts = get_win_starts(ref_time, win_size, olap_size, fs, n_wins, index_offset)
>>> pd.Series(starts)
0 2021-01-01 01:00:00.000
1 2021-01-01 01:00:07.500
2 2021-01-01 01:00:15.000
dtype: datetime64[ns]
"""
from resistics.sampling import datetime_array_estimate
increment = inc_duration(win_size, olap_size, fs)
first_win_time = win_to_datetime(ref_time, index_offset, increment)
increment_size = win_size - olap_size
return datetime_array_estimate(first_win_time, fs / increment_size, n_wins)
def get_win_ends(
starts: pd.DatetimeIndex,
win_size: int,
fs: float,
) -> pd.DatetimeIndex:
"""
Get window end times
Parameters
----------
starts : RSDateTime
The start times of the windows
win_size : int
The window size
fs : float
The sampling frequency
Returns
-------
pd.DatetimeIndex
The end times of the windows
Examples
--------
>>> import pandas as pd
>>> from resistics.sampling import to_datetime
>>> from resistics.window import get_win_starts, get_win_ends
>>> ref_time = to_datetime("2021-01-01 00:00:00")
>>> win_size = 100
>>> olap_size = 25
>>> fs = 10
>>> n_wins = 3
>>> index_offset = 480
>>> starts = get_win_starts(ref_time, win_size, olap_size, fs, n_wins, index_offset)
>>> pd.Series(starts)
0 2021-01-01 01:00:00.000
1 2021-01-01 01:00:07.500
2 2021-01-01 01:00:15.000
dtype: datetime64[ns]
>>> ends = get_win_ends(starts, win_size, fs)
>>> pd.Series(ends)
0 2021-01-01 01:00:09.900
1 2021-01-01 01:00:17.400
2 2021-01-01 01:00:24.900
dtype: datetime64[ns]
"""
return starts + pd.Timedelta((win_size - 1) * (1 / fs), "s")
def get_win_table(
ref_time: RSDateTime,
metadata: DecimatedLevelMetadata,
win_size: int,
olap_size: int,
) -> pd.DataFrame:
"""
Get a DataFrame with
Parameters
----------
ref_time : RSDateTime
Reference
metadata : DecimatedLevelMetadata
Metadata for the decimation level
win_size : int
The window size
olap_size : int
The overlap size
Returns
-------
pd.DataFrame
A pandas DataFrame with details about each window
Examples
--------
.. plot::
:width: 90%
>>> import matplotlib.pyplot as plt
>>> from resistics.decimate import DecimatedLevelMetadata
>>> from resistics.sampling import to_datetime, to_timedelta
>>> from resistics.window import get_win_table
>>> ref_time = to_datetime("2021-01-01 00:00:00")
>>> fs = 10
>>> n_samples = 1000
>>> first_time = to_datetime("2021-01-01 01:00:00")
>>> last_time = first_time + to_timedelta((n_samples-1)/fs)
>>> metadata = DecimatedLevelMetadata(fs=10, n_samples=1000, first_time=first_time, last_time=last_time)
>>> print(metadata.fs, metadata.first_time, metadata.last_time)
10.0 2021-01-01 01:00:00 2021-01-01 01:01:39.9
>>> win_size = 100
>>> olap_size = 25
>>> df = get_win_table(ref_time, metadata, win_size, olap_size)
>>> print(df.to_string())
global local from_sample to_sample win_start win_end
0 480 0 0 99 2021-01-01 01:00:00.000 2021-01-01 01:00:09.900
1 481 1 75 174 2021-01-01 01:00:07.500 2021-01-01 01:00:17.400
2 482 2 150 249 2021-01-01 01:00:15.000 2021-01-01 01:00:24.900
3 483 3 225 324 2021-01-01 01:00:22.500 2021-01-01 01:00:32.400
4 484 4 300 399 2021-01-01 01:00:30.000 2021-01-01 01:00:39.900
5 485 5 375 474 2021-01-01 01:00:37.500 2021-01-01 01:00:47.400
6 486 6 450 549 2021-01-01 01:00:45.000 2021-01-01 01:00:54.900
7 487 7 525 624 2021-01-01 01:00:52.500 2021-01-01 01:01:02.400
8 488 8 600 699 2021-01-01 01:01:00.000 2021-01-01 01:01:09.900
9 489 9 675 774 2021-01-01 01:01:07.500 2021-01-01 01:01:17.400
10 490 10 750 849 2021-01-01 01:01:15.000 2021-01-01 01:01:24.900
11 491 11 825 924 2021-01-01 01:01:22.500 2021-01-01 01:01:32.400
12 492 12 900 999 2021-01-01 01:01:30.000 2021-01-01 01:01:39.900
Plot six windows to illustrate the overlap
>>> plt.figure(figsize=(8, 3)) # doctest: +SKIP
>>> for idx, row in df.iterrows():
... color = "red" if idx%2 == 0 else "blue"
... plt.axvspan(row.loc["win_start"], row.loc["win_end"], alpha=0.5, color=color) # doctest: +SKIP
... if idx > 5:
... break
>>> plt.tight_layout() # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
"""
from resistics.sampling import to_n_samples, datetime_array_estimate
increment_size = win_size - olap_size
increment = inc_duration(win_size, olap_size, metadata.fs)
fs = metadata.fs
first_time = metadata.first_time
first_win, last_win = get_first_and_last_win(
ref_time, metadata, win_size, olap_size
)
first_win_time = win_to_datetime(ref_time, first_win, increment)
n_wins = last_win - first_win + 1
local_wins = np.arange(n_wins).astype(int)
# samples - use the floor here to avoid a situation where it is rounded up
# and then there are insufficient samples
first_sample = to_n_samples(first_win_time - first_time, fs, method="floor") - 1
starts = datetime_array_estimate(first_win_time, fs / increment_size, n_wins)
ends = get_win_ends(starts, win_size, fs)
df_dict = {
"global": np.arange(first_win, last_win + 1),
"local": local_wins,
"from_sample": first_sample + (local_wins * increment_size),
"to_sample": first_sample + win_size - 1 + (local_wins * increment_size),
"win_start": starts,
"win_end": ends,
}
return pd.DataFrame(data=df_dict)
class WindowParameters(ResisticsModel):
"""
Windowing parameters per decimation level
Windowing parameters are the window and overlap size for each decimation
level.
Parameters
----------
n_levels : int
The number of decimation levels
min_n_wins : int
Minimum number of windows
win_sizes : List[int]
The window sizes per decimation level
olap_sizes : List[int]
The overlap sizes per decimation level
Examples
--------
Generate decimation and windowing parameters for data sampled at 4096 Hz.
Note that requesting window sizes or overlap sizes for decimation levels
that do not exist will raise a ValueError.
>>> from resistics.decimate import DecimationSetup
>>> from resistics.window import WindowSetup
>>> dec_setup = DecimationSetup(n_levels=3, per_level=3)
>>> dec_params = dec_setup.run(4096)
>>> dec_params.summary()
{
'fs': 4096.0,
'n_levels': 3,
'per_level': 3,
'min_samples': 256,
'eval_freqs': [
1024.0,
724.0773439350246,
512.0,
362.0386719675123,
256.0,
181.01933598375615,
128.0,
90.50966799187808,
64.0
],
'dec_factors': [1, 2, 8],
'dec_increments': [1, 2, 4],
'dec_fs': [4096.0, 2048.0, 512.0]
}
>>> win_params = WindowSetup().run(dec_params.n_levels, dec_params.dec_fs)
>>> win_params.summary()
{
'n_levels': 3,
'min_n_wins': 5,
'win_sizes': [1024, 512, 128],
'olap_sizes': [256, 128, 32]
}
>>> win_params.get_win_size(0)
1024
>>> win_params.get_olap_size(0)
256
>>> win_params.get_olap_size(3)
Traceback (most recent call last):
...
ValueError: Level 3 must be 0 <= level < 3
"""
n_levels: int
min_n_wins: int
win_sizes: List[int]
olap_sizes: List[int]
def check_level(self, level: int):
"""Check the decimation level is within range"""
if level < 0 or level >= self.n_levels:
raise ValueError(f"Level {level} must be 0 <= level < {self.n_levels}")
def get_win_size(self, level: int) -> int:
"""Get window size for a decimation level"""
self.check_level(level)
return self.win_sizes[level]
def get_olap_size(self, level: int) -> int:
"""Get overlap size for a decimation level"""
self.check_level(level)
return self.olap_sizes[level]
class WindowSetup(ResisticsProcess):
"""
Setup WindowParameters
WindowSetup outputs the WindowParameters to use for windowing decimated
time data.
Window parameters are simply the window and overlap sizes for each
decimation level.
Parameters
----------
min_size : int, optional
Minimum window size, by default 128
min_olap : int, optional
Minimum overlap size, by default 32
win_factor : int, optional
Window factor, by default 4. Window sizes are calculated by sampling
frequency / 4 to ensure sufficient frequency resolution. If the
sampling frequency is small, window size will be adjusted to
min_size
olap_proportion : float, optional
The proportion of the window size to use as the overlap, by default
0.25. For example, for a window size of 128, the overlap would be
0.25 * 128 = 32
min_n_wins : int, optional
The minimum number of windows needed in a decimation level, by
default 5
win_sizes : Optional[List[int]], optional
Explicit define window sizes, by default None. Must have the same
length as number of decimation levels
olap_sizes : Optional[List[int]], optional
Explicitly define overlap sizes, by default None. Must have the same
length as number of decimation levels
Examples
--------
Generate decimation and windowing parameters for data sampled at 0.05 Hz or
20 seconds sampling period
>>> from resistics.decimate import DecimationSetup
>>> from resistics.window import WindowSetup
>>> dec_params = DecimationSetup(n_levels=3, per_level=3).run(0.05)
>>> dec_params.summary()
{
'fs': 0.05,
'n_levels': 3,
'per_level': 3,
'min_samples': 256,
'eval_freqs': [
0.0125,
0.008838834764831844,
0.00625,
0.004419417382415922,
0.003125,
0.002209708691207961,
0.0015625,
0.0011048543456039805,
0.00078125
],
'dec_factors': [1, 2, 8],
'dec_increments': [1, 2, 4],
'dec_fs': [0.05, 0.025, 0.00625]
}
>>> win_params = WindowSetup().run(dec_params.n_levels, dec_params.dec_fs)
>>> win_params.summary()
{
'n_levels': 3,
'min_n_wins': 5,
'win_sizes': [128, 128, 128],
'olap_sizes': [32, 32, 32]
}
Window parameters can also be explicitly defined
>>> from resistics.decimate import DecimationSetup
>>> from resistics.window import WindowSetup
>>> dec_setup = DecimationSetup(n_levels=3, per_level=3)
>>> dec_params = dec_setup.run(0.05)
>>> win_setup = WindowSetup(win_sizes=[1000, 578, 104])
>>> win_params = win_setup.run(dec_params.n_levels, dec_params.dec_fs)
>>> win_params.summary()
{
'n_levels': 3,
'min_n_wins': 5,
'win_sizes': [1000, 578, 104],
'olap_sizes': [250, 144, 32]
}
"""
min_size: int = 128
min_olap: int = 32
win_factor: int = 4
olap_proportion: float = 0.25
min_n_wins: int = 5
win_sizes: Optional[List[int]] = None
olap_sizes: Optional[List[int]] = None
def run(self, n_levels: int, dec_fs: List[float]) -> WindowParameters:
"""
Calculate window and overlap sizes for each decimation level based on
decimation level sampling frequency and minimum allowable parameters
The window and overlap sizes (number of samples) are calculated based in
the following way:
- window size = frequency at decimation level / window factor
- overlap size = window size * overlap proportion
This is to ensure good frequency resolution at high frequencies. At low
sampling frequencies, this would result in very small window sizes,
therefore, there a minimum allowable sizes for both windows and overlap
defined by min_size and min_olap in the initialiser. If window sizes
or overlaps size are calculated below these respecitively, they will be
set to the minimum values.
Parameters
----------
n_levels : int
The number of decimation levels
dec_fs : List[float]
The sampling frequencies for each decimation level
Returns
-------
WindowParameters
The window parameters, the window sizes and overlaps for each
decimation level
Raises
------
ValueError
If the number of windows does not match the number of levels
ValueError
If the number of overlaps does not match the number of levels
"""
if self.win_sizes is None:
win_sizes = self._get_win_sizes(n_levels, dec_fs)
else:
win_sizes = list(self.win_sizes)
if len(win_sizes) < n_levels:
raise ValueError(f"Num. windows {len(win_sizes)} < n_levels {n_levels}")
if len(win_sizes) > n_levels:
# this may happen with user input windows
# but decimated data has fewer levels
win_sizes = win_sizes[:n_levels]
if self.olap_sizes is None:
olap_sizes = self._get_olap_sizes(win_sizes)
else:
olap_sizes = self.olap_sizes
if len(olap_sizes) < n_levels:
raise ValueError(f"Num. overlaps {len(olap_sizes)} < n_levels {n_levels}")
if len(olap_sizes) > n_levels:
# this may happen with user input windows
# but decimated data has fewer levels
olap_sizes = olap_sizes[:n_levels]
return WindowParameters(
n_levels=n_levels,
min_n_wins=self.min_n_wins,
win_sizes=win_sizes,
olap_sizes=olap_sizes,
)
def _get_win_sizes(self, n_levels: int, dec_fs: List[float]) -> List[int]:
"""
Get the window sizes
Parameters
----------
n_levels : int
The number of decimation levels
dec_fs : List[float]
The sampling frequencies for each decimation level
Returns
-------
List[int]
Window sizes
"""
win_sizes = []
for ilevel in range(n_levels):
win_size = dec_fs[ilevel] // self.win_factor
if win_size < self.min_size:
win_size = self.min_size
win_sizes.append(int(win_size))
return win_sizes
def _get_olap_sizes(self, win_sizes: List[int]) -> List[int]:
"""
Get overlap sizes
Parameters
----------
win_sizes : List[int]
The window sizes
Returns
-------
List[int]
The overlap sizes
"""
olap_sizes = []
for win_size in win_sizes:
olap_size = int(win_size * self.olap_proportion)
if olap_size < self.min_olap:
olap_size = self.min_olap
olap_sizes.append(olap_size)
return olap_sizes
class WindowedLevelMetadata(Metadata):
"""Metadata for a windowed level"""
fs: float
"""The sampling frequency for the decimation level"""
n_wins: int
"""The number of windows"""
win_size: PositiveInt
"""The window size in samples"""
olap_size: PositiveInt
"""The overlap size in samples"""
index_offset: int
"""The global window offset for local window 0"""
@property
def dt(self):
return 1 / self.fs
class WindowedMetadata(WriteableMetadata):
"""Metadata for windowed data"""
fs: List[float]
chans: List[str]
n_chans: Optional[int] = None
n_levels: int
first_time: HighResDateTime
last_time: HighResDateTime
system: str = ""
serial: str = ""
wgs84_latitude: float = -999.0
wgs84_longitude: float = -999.0
easting: float = -999.0
northing: float = -999.0
elevation: float = -999.0
chans_metadata: Dict[str, ChanMetadata]
levels_metadata: List[WindowedLevelMetadata]
ref_time: HighResDateTime
history: History = History()
class Config:
extra = "ignore"
class WindowedData(ResisticsData):
"""
Windows of a DecimatedData object
The windowed data is stored in a dictionary attribute named data. This is
a dictionary with an entry for each decimation level. The shape for a single
decimation level is as follows:
n_wins x n_chans x n_samples
"""
def __init__(
self,
metadata: WindowedMetadata,
data: Dict[int, np.ndarray],
):
"""
Initialise the WindowedData
Parameters
----------
metadata : WindowedDataMetadata
The metadata for the windowed data
data : Dict[int, WindowedTimeData]
The windowed data
"""
logger.debug(f"Creating WindowedData with data type {data[0].dtype}")
self.metadata = metadata
self.data = data
def get_level(self, level: int) -> np.ndarray:
"""
Get windows for a decimation level
Parameters
----------
level : int
The decimation level
Returns
-------
np.ndarray
The window array
Raises
------
ValueError
If decimation level is not within range
"""
if level >= self.metadata.n_levels:
raise ValueError(f"Level {level} not <= max {self.metadata.n_levels - 1}")
return self.data[level]
def get_local(self, level: int, local_win: int) -> np.ndarray:
"""
Get window using local index
Parameters
----------
level : int
The decimation level
local_win : int
Local window index
Returns
-------
np.ndarray
Window data
Raises
------
ValueError
If local window index is out of range
"""
n_wins = self.metadata.levels_metadata[level].n_wins
if local_win < 0 or local_win >= n_wins:
raise ValueError(f"Local window {local_win} not 0 <= local_win < {n_wins}")
return self.get_level(level)[local_win]
def get_global(self, level: int, global_win: int) -> np.ndarray:
"""
Get window using global index
Parameters
----------
level : int
The decimation level
global_win : int
Global window index
Returns
-------
np.ndarray
Window data
"""
index_offset = self.metadata.levels_metadata[level].index_offset
return self.get_local(level, global_win + index_offset)
def get_chan(self, level: int, chan: str) -> np.ndarray:
"""
Get all the windows for a channel
Parameters
----------
level : int
The decimation level
chan : str
The channel
Returns
-------
np.ndarray
The data for the channels
Raises
------
ChannelNotFoundError
If the channel is not found in the data
"""
from resistics.errors import ChannelNotFoundError
if chan not in self.metadata.chans:
raise ChannelNotFoundError(chan, self.metadata.chans)
idx = self.metadata.chans.index(chan)
return self.get_level(level)[..., idx, :]
def to_string(self) -> str:
"""Class information as a string"""
return self.metadata.to_string()
class Windower(ResisticsProcess):
"""
Windows DecimatedData
This is the primary window making process for resistics and should be used
when alignment of windows with a site or across sites is required.
This method uses numpy striding to produce window views into the decimated
data.
See Also
--------
WindowerTarget : A windower to make a target number of windows
Examples
--------
The Windower windows a DecimatedData object given a reference time and some
window parameters.
There's quite a few imports needed for this example. Begin by doing the
imports, defining a reference time and generating random decimated data.
>>> from resistics.sampling import to_datetime
>>> from resistics.testing import decimated_data_linear
>>> from resistics.window import WindowSetup, Windower
>>> dec_data = decimated_data_linear(fs=128)
>>> ref_time = dec_data.metadata.first_time
>>> print(dec_data.to_string())
<class 'resistics.decimate.DecimatedData'>
fs dt n_samples first_time last_time
level
0 2048.0 0.000488 16384 2021-01-01 00:00:00 2021-01-01 00:00:07.99951171875
1 512.0 0.001953 4096 2021-01-01 00:00:00 2021-01-01 00:00:07.998046875
2 128.0 0.007812 1024 2021-01-01 00:00:00 2021-01-01 00:00:07.9921875
Next, initialise the window parameters. For this example, use small windows,
which will make inspecting them easier.
>>> win_params = WindowSetup(win_sizes=[16,16,16], min_olap=4).run(dec_data.metadata.n_levels, dec_data.metadata.fs)
>>> win_params.summary()
{
'n_levels': 3,
'min_n_wins': 5,
'win_sizes': [16, 16, 16],
'olap_sizes': [4, 4, 4]
}
Perform the windowing. This actually creates views into the decimated data
using the numpy.lib.stride_tricks.sliding_window_view function. The shape
for a data array at a decimation level is: n_wins x n_chans x win_size. The
information about each level is also in the levels_metadata attribute of
WindowedMetadata.
>>> win_data = Windower().run(ref_time, win_params, dec_data)
>>> win_data.data[0].shape
(1365, 2, 16)
>>> for level_metadata in win_data.metadata.levels_metadata:
... level_metadata.summary()
{
'fs': 2048.0,
'n_wins': 1365,
'win_size': 16,
'olap_size': 4,
'index_offset': 0
}
{
'fs': 512.0,
'n_wins': 341,
'win_size': 16,
'olap_size': 4,
'index_offset': 0
}
{
'fs': 128.0,
'n_wins': 85,
'win_size': 16,
'olap_size': 4,
'index_offset': 0
}
Let's look at an example of data from the first decimation level for the
first channel. This is simply a linear set of data ranging from 0...16_383.
>>> dec_data.data[0][0]
array([ 0, 1, 2, ..., 16381, 16382, 16383])
Inspecting the first few windows shows they are as expected including the
overlap.
>>> win_data.data[0][0, 0]
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
>>> win_data.data[0][1, 0]
array([12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27])
>>> win_data.data[0][2, 0]
array([24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39])
"""
def run(
self,
ref_time: RSDateTime,
win_params: WindowParameters,
dec_data: DecimatedData,
) -> WindowedData:
"""
Perform windowing of DecimatedData
Parameters
----------
ref_time : RSDateTime
The reference time
win_params : WindowParameters
The window parameters
dec_data : DecimatedData
The decimated data
Returns
-------
WindowedData
Windows for decimated data
Raises
------
ProcessRunError
If the number of windows calculated in the window table does not
match the size of the array views
"""
metadata_dict = dec_data.metadata.dict()
data = {}
win_levels_metadata = []
messages = []
for ilevel in range(0, dec_data.metadata.n_levels):
logger.info(f"Windowing decimation level {ilevel}")
win_size = win_params.get_win_size(ilevel)
olap_size = win_params.get_olap_size(ilevel)
level_metadata = dec_data.metadata.levels_metadata[ilevel]
win_table = get_win_table(ref_time, level_metadata, win_size, olap_size)
n_wins = len(win_table.index)
logger.info(f"{n_wins} windows, size {win_size}, overlap {olap_size}")
messages.append(f"Level {ilevel}, generated {n_wins} windows")
messages.append(f"Window size {win_size}, olap_size {olap_size}")
if n_wins < win_params.min_n_wins:
logger.debug(f"Number windows {n_wins} < min. {win_params.min_n_wins}")
messages.append(f"Num. windows {n_wins} < min. {win_params.min_n_wins}")
messages.append(f"Level {ilevel} incomplete, terminating windowing")
break
win_level_data = self._get_level_data(
dec_data.get_level(ilevel),
win_table,
dec_data.metadata.n_chans,
win_size,
olap_size,
)
if win_level_data.shape[0] != n_wins:
raise ProcessRunError(
self.name,
f"Num. windows mismatch {win_level_data.shape[0]} != {n_wins}",
)
win_level_metadata = self._get_level_metadata(
level_metadata,
win_table,
win_size,
olap_size,
)
data[ilevel] = win_level_data
win_levels_metadata.append(win_level_metadata)
metadata_dict["ref_time"] = ref_time
metadata = self._get_metadata(metadata_dict, win_levels_metadata)
metadata.history.add_record(self._get_record(messages))
logger.info("Windowing completed")
return WindowedData(metadata, data)
def _get_level_data(
self,
data: np.ndarray,
win_table: pd.DataFrame,
n_chans: int,
win_size: int,
olap_size: int,
) -> np.ndarray:
"""
Get window data for a decimation level
Parameters
----------
data : np.ndarray
The decimated time data for the level
win_table : pd.DataFrame
The window table
n_chans : int
The number of channels
win_size : int
The window size
olap_size : int
The overlap size
Returns
-------
np.ndarray
Sliding window views in an array for the decimation level
"""
from numpy.lib.stride_tricks import sliding_window_view
from_sample = win_table.loc[0, "from_sample"]
to_sample = win_table.loc[win_table.index[-1], "from_sample"]
increment_size = win_size - olap_size
view = np.squeeze(
sliding_window_view(data, window_shape=(n_chans, win_size), writeable=True)
)
return view[from_sample : to_sample + 1 : increment_size]
def _get_level_metadata(
self,
level_metadata: DecimatedLevelMetadata,
win_table: pd.DataFrame,
win_size: int,
olap_size: int,
) -> WindowedLevelMetadata:
"""Get the windowed metadata for a decimation level"""
offset = (win_table["global"] - win_table["local"]).unique()
if len(offset) != 1:
raise ValueError("Malformed window table, varying local to global offset")
return WindowedLevelMetadata(
fs=level_metadata.fs,
n_wins=len(win_table.index),
win_size=win_size,
olap_size=olap_size,
index_offset=offset[0],
)
def _get_metadata(
self,
metadata_dict: Dict[str, Any],
levels_metadata: List[WindowedLevelMetadata],
) -> WindowedMetadata:
"""Get the metadata for the windowed data"""
metadata_dict.pop("file_info")
metadata_dict["n_levels"] = len(levels_metadata)
metadata_dict["levels_metadata"] = levels_metadata
return WindowedMetadata(**metadata_dict)
class WindowerTarget(Windower):
"""
Windower that selects window sizes to meet a target number of windows
The minimum window size in window parameters will be respected even if the
generated number of windows is below the target. This is to avoid situations
where excessively small windows sizes are selected.
.. warning::
This process is primarily useful for quick processing of a single
measurement and should not be used when any alignment of windows is
required within a site or across sites.
Parameters
----------
target : int
The target number of windows for each decimation level
olap_proportion : float
The overlap proportion of the window size
See Also
--------
Windower : The window making process to use when alignment is required
"""
target: int = 1000
min_size: int = 64
olap_proportion: float = 0.25
def run(
self,
ref_time: RSDateTime,
win_params: WindowParameters,
dec_data: DecimatedData,
) -> WindowedData:
metadata_dict = dec_data.metadata.dict()
data = {}
win_levels_metadata = []
messages = []
for ilevel in range(0, dec_data.metadata.n_levels):
logger.info(f"Windowing decimation level {ilevel}")
level_metadata = dec_data.metadata.levels_metadata[ilevel]
win_size = self._get_win_size(level_metadata)
olap_size = int(np.floor(self.olap_proportion * win_size))
win_table = get_win_table(ref_time, level_metadata, win_size, olap_size)
n_wins = len(win_table.index)
logger.info(f"{n_wins} windows, size {win_size}, overlap {olap_size}")
messages.append(f"Level {ilevel}, generated {n_wins} windows")
messages.append(f"Window size {win_size}, olap_size {olap_size}")
if n_wins < win_params.min_n_wins:
logger.debug(f"Number windows {n_wins} < min. {win_params.min_n_wins}")
messages.append(f"Num. windows {n_wins} < min. {win_params.min_n_wins}")
messages.append(f"Level {ilevel} incomplete, terminating windowing")
break
win_level_data = self._get_level_data(
dec_data.get_level(ilevel),
win_table,
dec_data.metadata.n_chans,
win_size,
olap_size,
)
win_level_metadata = self._get_level_metadata(
level_metadata,
win_table,
win_size,
olap_size,
)
data[ilevel] = win_level_data
win_levels_metadata.append(win_level_metadata)
metadata_dict["ref_time"] = metadata_dict["first_time"]
metadata = self._get_metadata(metadata_dict, win_levels_metadata)
metadata.history.add_record(self._get_record(messages))
logger.info("Windowing completed")
return WindowedData(metadata, data)
def _get_win_size(self, level_metadata: DecimatedLevelMetadata) -> int:
r"""
Get window size that gives close to the target number of windows
Windows increment by (window size - overlap size), therefore the
follwing equation is solved,
.. math::
n_{samples} / ((1 - n_{overlap})*n_{window}) = target
Rearrangning, get,
.. math::
n_{window} = n_{samples} / ((1 - n_{overlap})*target)
Parameters
----------
level_metadata : DecimatedLevelMetadata
The metadata for the decimation level
Returns
-------
int
The window size
"""
win_size = level_metadata.n_samples / ((1 - self.olap_proportion) * self.target)
win_size = int(np.floor(win_size))
if win_size < self.min_size:
return self.min_size
return win_size
class WindowedDataWriter(ResisticsWriter):
"""Writer of resistics windowed data"""
def run(self, dir_path: Path, win_data: WindowedData) -> None:
"""
Write out WindowedData
Parameters
----------
dir_path : Path
The directory path to write to
win_data : WindowedData
Windowed data to write out
Raises
------
WriteError
If unable to write to the directory
"""
from resistics.errors import WriteError
if not self._check_dir(dir_path):
raise WriteError(dir_path, "Unable to write to directory, check logs")
logger.info(f"Writing windowed data to {dir_path}")
metadata_path = dir_path / "metadata.json"
data_path = dir_path / "data"
np.savez_compressed(data_path, **{str(x): y for x, y in win_data.data.items()})
metadata = win_data.metadata.copy()
metadata.history.add_record(self._get_record(dir_path, type(win_data)))
metadata.write(metadata_path)
class WindowedDataReader(ResisticsProcess):
"""Reader of resistics windowed data"""
def run(
self, dir_path: Path, metadata_only: bool = False
) -> Union[WindowedMetadata, WindowedData]:
"""
Read WindowedData
Parameters
----------
dir_path : Path
The directory path to read from
metadata_only : bool, optional
Flag for getting metadata only, by default False
Returns
-------
Union[WindowedMetadata, WindowedData]
The WindowedData or WindowedMetadata if metadata_only is True
Raises
------
ReadError
If the directory does not exist
"""
from resistics.errors import ReadError
if not dir_path.exists():
raise ReadError(dir_path, "Directory does not exist")
logger.info(f"Reading windowed data from {dir_path}")
metadata_path = dir_path / "metadata.json"
metadata = WindowedMetadata.parse_file(metadata_path)
if metadata_only:
return metadata
data_path = dir_path / "data.npz"
npz_file = np.load(data_path)
data = {int(level): npz_file[level] for level in npz_file.files}
messages = [f"Windowed data read from {dir_path}"]
metadata.history.add_record(self._get_record(messages))
return WindowedData(metadata, data)
| [
"resistics.sampling.to_n_samples",
"math.floor",
"resistics.common.History",
"numpy.lib.stride_tricks.sliding_window_view",
"numpy.arange",
"resistics.errors.WriteError",
"pandas.DataFrame",
"resistics.errors.ChannelNotFoundError",
"numpy.floor",
"resistics.errors.ProcessRunError",
"resistics.sa... | [((10612, 10633), 'resistics.sampling.to_seconds', 'to_seconds', (['increment'], {}), '(increment)\n', (10622, 10633), False, 'from resistics.sampling import to_seconds\n'), ((10774, 10801), 'resistics.sampling.to_seconds', 'to_seconds', (['(time - ref_time)'], {}), '(time - ref_time)\n', (10784, 10801), False, 'from resistics.sampling import to_seconds\n'), ((17272, 17340), 'resistics.sampling.datetime_array_estimate', 'datetime_array_estimate', (['first_win_time', '(fs / increment_size)', 'n_wins'], {}), '(first_win_time, fs / increment_size, n_wins)\n', (17295, 17340), False, 'from resistics.sampling import to_n_samples, datetime_array_estimate\n'), ((22597, 22665), 'resistics.sampling.datetime_array_estimate', 'datetime_array_estimate', (['first_win_time', '(fs / increment_size)', 'n_wins'], {}), '(first_win_time, fs / increment_size, n_wins)\n', (22620, 22665), False, 'from resistics.sampling import to_n_samples, datetime_array_estimate\n'), ((23033, 23059), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'df_dict'}), '(data=df_dict)\n', (23045, 23059), True, 'import pandas as pd\n'), ((33757, 33766), 'resistics.common.History', 'History', ([], {}), '()\n', (33764, 33766), False, 'from resistics.common import History, ResisticsModel, ResisticsData, ResisticsProcess\n'), ((4628, 4648), 'resistics.sampling.to_timedelta', 'to_timedelta', (['(1 / fs)'], {}), '(1 / fs)\n', (4640, 4648), False, 'from resistics.sampling import to_timedelta\n'), ((5618, 5638), 'resistics.sampling.to_timedelta', 'to_timedelta', (['(1 / fs)'], {}), '(1 / fs)\n', (5630, 5638), False, 'from resistics.sampling import to_timedelta\n'), ((15461, 15521), 'loguru.logger.debug', 'logger.debug', (['f"""Adjusting last window attempt {attempt + 1}"""'], {}), "(f'Adjusting last window attempt {attempt + 1}')\n", (15473, 15521), False, 'from loguru import logger\n'), ((18542, 18586), 'pandas.Timedelta', 'pd.Timedelta', (['((win_size - 1) * (1 / fs))', '"""s"""'], {}), "((win_size - 1) * (1 / fs), 's')\n", (18554, 18586), True, 'import pandas as pd\n'), ((22518, 22579), 'resistics.sampling.to_n_samples', 'to_n_samples', (['(first_win_time - first_time)', 'fs'], {'method': '"""floor"""'}), "(first_win_time - first_time, fs, method='floor')\n", (22530, 22579), False, 'from resistics.sampling import to_n_samples, datetime_array_estimate\n'), ((22746, 22780), 'numpy.arange', 'np.arange', (['first_win', '(last_win + 1)'], {}), '(first_win, last_win + 1)\n', (22755, 22780), True, 'import numpy as np\n'), ((34513, 34582), 'loguru.logger.debug', 'logger.debug', (['f"""Creating WindowedData with data type {data[0].dtype}"""'], {}), "(f'Creating WindowedData with data type {data[0].dtype}')\n", (34525, 34582), False, 'from loguru import logger\n'), ((43460, 43494), 'loguru.logger.info', 'logger.info', (['"""Windowing completed"""'], {}), "('Windowing completed')\n", (43471, 43494), False, 'from loguru import logger\n'), ((48727, 48761), 'loguru.logger.info', 'logger.info', (['"""Windowing completed"""'], {}), "('Windowing completed')\n", (48738, 48761), False, 'from loguru import logger\n'), ((50397, 50448), 'loguru.logger.info', 'logger.info', (['f"""Writing windowed data to {dir_path}"""'], {}), "(f'Writing windowed data to {dir_path}')\n", (50408, 50448), False, 'from loguru import logger\n'), ((51657, 51710), 'loguru.logger.info', 'logger.info', (['f"""Reading windowed data from {dir_path}"""'], {}), "(f'Reading windowed data from {dir_path}')\n", (51668, 51710), False, 'from loguru import logger\n'), ((51939, 51957), 'numpy.load', 'np.load', (['data_path'], {}), '(data_path)\n', (51946, 51957), True, 'import numpy as np\n'), ((22344, 22361), 'numpy.arange', 'np.arange', (['n_wins'], {}), '(n_wins)\n', (22353, 22361), True, 'import numpy as np\n'), ((36976, 37023), 'resistics.errors.ChannelNotFoundError', 'ChannelNotFoundError', (['chan', 'self.metadata.chans'], {}), '(chan, self.metadata.chans)\n', (36996, 37023), False, 'from resistics.errors import ChannelNotFoundError\n'), ((41598, 41649), 'loguru.logger.info', 'logger.info', (['f"""Windowing decimation level {ilevel}"""'], {}), "(f'Windowing decimation level {ilevel}')\n", (41609, 41649), False, 'from loguru import logger\n'), ((41972, 42042), 'loguru.logger.info', 'logger.info', (['f"""{n_wins} windows, size {win_size}, overlap {olap_size}"""'], {}), "(f'{n_wins} windows, size {win_size}, overlap {olap_size}')\n", (41983, 42042), False, 'from loguru import logger\n'), ((44533, 44608), 'numpy.lib.stride_tricks.sliding_window_view', 'sliding_window_view', (['data'], {'window_shape': '(n_chans, win_size)', 'writeable': '(True)'}), '(data, window_shape=(n_chans, win_size), writeable=True)\n', (44552, 44608), False, 'from numpy.lib.stride_tricks import sliding_window_view\n'), ((47051, 47102), 'loguru.logger.info', 'logger.info', (['f"""Windowing decimation level {ilevel}"""'], {}), "(f'Windowing decimation level {ilevel}')\n", (47062, 47102), False, 'from loguru import logger\n'), ((47442, 47512), 'loguru.logger.info', 'logger.info', (['f"""{n_wins} windows, size {win_size}, overlap {olap_size}"""'], {}), "(f'{n_wins} windows, size {win_size}, overlap {olap_size}')\n", (47453, 47512), False, 'from loguru import logger\n'), ((49614, 49632), 'numpy.floor', 'np.floor', (['win_size'], {}), '(win_size)\n', (49622, 49632), True, 'import numpy as np\n'), ((50324, 50388), 'resistics.errors.WriteError', 'WriteError', (['dir_path', '"""Unable to write to directory, check logs"""'], {}), "(dir_path, 'Unable to write to directory, check logs')\n", (50334, 50388), False, 'from resistics.errors import WriteError\n'), ((51601, 51648), 'resistics.errors.ReadError', 'ReadError', (['dir_path', '"""Directory does not exist"""'], {}), "(dir_path, 'Directory does not exist')\n", (51610, 51648), False, 'from resistics.errors import ReadError\n'), ((11075, 11094), 'math.floor', 'floor', (['n_increments'], {}), '(n_increments)\n', (11080, 11094), False, 'from math import floor, ceil\n'), ((42260, 42331), 'loguru.logger.debug', 'logger.debug', (['f"""Number windows {n_wins} < min. {win_params.min_n_wins}"""'], {}), "(f'Number windows {n_wins} < min. {win_params.min_n_wins}')\n", (42272, 42331), False, 'from loguru import logger\n'), ((42833, 42927), 'resistics.errors.ProcessRunError', 'ProcessRunError', (['self.name', 'f"""Num. windows mismatch {win_level_data.shape[0]} != {n_wins}"""'], {}), "(self.name,\n f'Num. windows mismatch {win_level_data.shape[0]} != {n_wins}')\n", (42848, 42927), False, 'from resistics.errors import ProcessRunError\n'), ((47260, 47301), 'numpy.floor', 'np.floor', (['(self.olap_proportion * win_size)'], {}), '(self.olap_proportion * win_size)\n', (47268, 47301), True, 'import numpy as np\n'), ((47730, 47801), 'loguru.logger.debug', 'logger.debug', (['f"""Number windows {n_wins} < min. {win_params.min_n_wins}"""'], {}), "(f'Number windows {n_wins} < min. {win_params.min_n_wins}')\n", (47742, 47801), False, 'from loguru import logger\n'), ((11150, 11168), 'math.ceil', 'ceil', (['n_increments'], {}), '(n_increments)\n', (11154, 11168), False, 'from math import floor, ceil\n')] |
import rps.robotarium as robotarium
from rps.utilities import *
from rps.utilities.barrier_certificates import *
from rps.utilities.controllers import *
from rps.utilities.transformations import *
from reachGoal import reachGoal
from matplotlib import patches
import numpy as np
import time
N = 1
initial_conditions = np.transpose(np.array([[-1, 0.2, 0]]))
r = robotarium.Robotarium(number_of_robots=N, show_figure=True, initial_conditions=initial_conditions, sim_in_real_time=False)
si_to_uni_dyn = create_si_to_uni_dynamics_with_backwards_motion()
_, uni_to_si_states = create_si_to_uni_mapping()
si_barrier_cert = create_single_integrator_barrier_certificate_with_boundary()
hg = -1
## Visualize goals and obstacles
r.axes.add_patch(patches.Circle((1, 0), 0.2, fill=False, zorder=10)) # Goal region
r.axes.add_patch(patches.Ellipse((0, 0), 0.4, 1.0, 0, fill=False)) # Obstacle
while (hg <= 0):
x = r.get_poses()
x_si = uni_to_si_states(x)
u, hg = reachGoal(x)
# Create safe control inputs (i.e., no collisions)
u = si_barrier_cert(u, x[0:2])
# Transform single integrator velocity commands to unicycle
dxu = si_to_uni_dyn(u, x)
# Set the velocities by mapping the single-integrator inputs to unciycle inputs
r.set_velocities(np.arange(N), dxu)
# Iterate the simulation
r.step()
r.call_at_scripts_end() | [
"reachGoal.reachGoal",
"rps.robotarium.Robotarium",
"numpy.array",
"matplotlib.patches.Ellipse",
"matplotlib.patches.Circle",
"numpy.arange"
] | [((363, 489), 'rps.robotarium.Robotarium', 'robotarium.Robotarium', ([], {'number_of_robots': 'N', 'show_figure': '(True)', 'initial_conditions': 'initial_conditions', 'sim_in_real_time': '(False)'}), '(number_of_robots=N, show_figure=True,\n initial_conditions=initial_conditions, sim_in_real_time=False)\n', (384, 489), True, 'import rps.robotarium as robotarium\n'), ((332, 356), 'numpy.array', 'np.array', (['[[-1, 0.2, 0]]'], {}), '([[-1, 0.2, 0]])\n', (340, 356), True, 'import numpy as np\n'), ((740, 790), 'matplotlib.patches.Circle', 'patches.Circle', (['(1, 0)', '(0.2)'], {'fill': '(False)', 'zorder': '(10)'}), '((1, 0), 0.2, fill=False, zorder=10)\n', (754, 790), False, 'from matplotlib import patches\n'), ((834, 882), 'matplotlib.patches.Ellipse', 'patches.Ellipse', (['(0, 0)', '(0.4)', '(1.0)', '(0)'], {'fill': '(False)'}), '((0, 0), 0.4, 1.0, 0, fill=False)\n', (849, 882), False, 'from matplotlib import patches\n'), ((1002, 1014), 'reachGoal.reachGoal', 'reachGoal', (['x'], {}), '(x)\n', (1011, 1014), False, 'from reachGoal import reachGoal\n'), ((1315, 1327), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1324, 1327), True, 'import numpy as np\n')] |
from time import time
from numpy import arange, mean
def set_time(times, sample_rate, samples):
"""
times = time stamps by fifo in each payload
sample_rate = sample rate
samples = number of data samples
"""
fifos = len(times)
differences = []
if fifos > 1:
for index, time in enumerate(times[0:-1]):
differences.append(times[index + 1] - time)
delta = mean(differences) / (samples / fifos)
else:
delta = 1 / sample_rate
times.append(times[0] + samples * delta)
return arange(times[0] - ((samples / fifos)) * delta, times[-1] + delta, delta).tolist()
def get_current_timestamp():
return time()
| [
"numpy.mean",
"time.time",
"numpy.arange"
] | [((682, 688), 'time.time', 'time', ([], {}), '()\n', (686, 688), False, 'from time import time\n'), ((417, 434), 'numpy.mean', 'mean', (['differences'], {}), '(differences)\n', (421, 434), False, 'from numpy import arange, mean\n'), ((558, 626), 'numpy.arange', 'arange', (['(times[0] - samples / fifos * delta)', '(times[-1] + delta)', 'delta'], {}), '(times[0] - samples / fifos * delta, times[-1] + delta, delta)\n', (564, 626), False, 'from numpy import arange, mean\n')] |
import numpy as np
from ..util.log import *
# Returns the (epsilon, delta) values of the adaptive concentration inequality
# for the given parameters.
#
# n: int (number of samples)
# delta: float (parameter delta)
# return: epsilon (parameter epsilon)
def get_type(n, delta):
n = np.float(n)
b = -np.log(delta / 24.0) / 1.8
epsilon = np.sqrt((0.6 * np.log(np.log(n)/np.log(1.1) + 1) + b) / n)
return epsilon
# Run type inference to get the type judgement for the fairness criterion.
#
# c: float (minimum probability ratio to be fair)
# Delta: float (threshold on inequalities)
# n: int (number of samples)
# delta: float (parameter delta)
# E_A: float (estimate of p(offer | female))
# E_B: float (estimate of p(offer | male))
# is_causal: bool (whether to use the causal specification)
# is_log: bool (whether to print logging information)
# return: int * int | None (None - continue sampling, or (fairness, ambiguous) with 0 - not fair, 1 - fair, 0 - not ambiguous, 1 - ambiguous)
def get_fairness_type(c, Delta, n, delta, E_A, E_B, is_causal, is_log):
# Step 1: Get (epsilon, delta) values from the adaptive concentration inequality for the current number of samples
epsilon = get_type(n, delta)
# Step 2: Check if |E_B| > epsilon_B
if np.abs(E_B) <= epsilon:
return None
# logging
if is_log:
log('INFO: n = {}, E_A = {}, E_B = {}'.format(n, E_A, E_B), INFO)
# Step 3: Compute the type judgement for the fairness property (before the inequality)
if is_causal:
E_fair = E_A - E_B + c
epsilon_fair = 2.0 * epsilon
delta_fair = 2.0 * delta
else:
E_fair = E_A / E_B - (1 - c)
epsilon_fair = epsilon / np.abs(E_B) + epsilon * (epsilon + np.abs(E_A)) / (np.abs(E_B) * (np.abs(E_B) - epsilon))
delta_fair = 2.0 * delta
# logging
if is_log:
log('INFO: n = {}, E_fair = {}, epsilon_fair = {}, delta_fair = {}'.format(n, E_fair, epsilon_fair, delta_fair), INFO)
# Step 4: Check if fairness holds
if E_fair - epsilon_fair > 0:
return 1, 0
# Step 5: Check if fairness does not hold
if E_fair + epsilon_fair < 0:
return 0, 0
# Step 6: Check if fairness holds (ambiguously)
if E_fair - epsilon_fair >= -Delta and epsilon_fair <= Delta:
return 1, 1
# Step 7: Check if fairness does not hold (ambiguously)
if E_fair + epsilon_fair <= Delta and epsilon_fair <= Delta:
return 0, 1
# Step 6: Continue sampling
return None
# model_A: int -> np.array([int]) (the model p(offer | female), takes number of samples as a parameter, output is {0, 1})
# model_B: int -> np.array([int]) (the model p(offer | male), takes number of samples as a parameter, output is {0, 1})
# c: float (minimum probability ratio to be fair)
# Delta: float (threshold on inequalities)
# delta: float (parameter delta)
# n_samples: int (number of samples per iteration)
# n_max: int (maximum number of iterations)
# is_causal: bool (whether to use the causal specification)
# int: log_iters
# return: (bool, int) (is fair, number of samples)
def verify(model_A, model_B, c, Delta, delta, n_samples, n_max, is_causal, log_iters):
# Step 1: Initialization
nE_A = 0.0
nE_B = 0.0
# Step 2: Iteratively sample and check whether fairness holds
for i in range(n_max):
# Step 2a: Sample points
x = np.sum(model_A.sample(n_samples))
y = np.sum(model_B.sample(n_samples))
# Step 2b: Update statistics
nE_A += x
nE_B += y
# Step 2c: Normalized statistics
n = (i + 1) * n_samples
E_A = nE_A / n
E_B = nE_B / n
# logging
is_log = not log_iters is None and i%log_iters == 0
# Step 2d: Get type judgement
t = get_fairness_type(c, Delta, n, delta, E_A, E_B, is_causal, is_log)
# Step 2e: Return if converged
if not t is None:
return t + (2 * n, E_A / E_B)
# Step 3: Failed to verify after maximum number of samples
return None
| [
"numpy.abs",
"numpy.log",
"numpy.float"
] | [((286, 297), 'numpy.float', 'np.float', (['n'], {}), '(n)\n', (294, 297), True, 'import numpy as np\n'), ((1281, 1292), 'numpy.abs', 'np.abs', (['E_B'], {}), '(E_B)\n', (1287, 1292), True, 'import numpy as np\n'), ((307, 327), 'numpy.log', 'np.log', (['(delta / 24.0)'], {}), '(delta / 24.0)\n', (313, 327), True, 'import numpy as np\n'), ((1720, 1731), 'numpy.abs', 'np.abs', (['E_B'], {}), '(E_B)\n', (1726, 1731), True, 'import numpy as np\n'), ((1771, 1782), 'numpy.abs', 'np.abs', (['E_B'], {}), '(E_B)\n', (1777, 1782), True, 'import numpy as np\n'), ((1755, 1766), 'numpy.abs', 'np.abs', (['E_A'], {}), '(E_A)\n', (1761, 1766), True, 'import numpy as np\n'), ((1786, 1797), 'numpy.abs', 'np.abs', (['E_B'], {}), '(E_B)\n', (1792, 1797), True, 'import numpy as np\n'), ((370, 379), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (376, 379), True, 'import numpy as np\n'), ((380, 391), 'numpy.log', 'np.log', (['(1.1)'], {}), '(1.1)\n', (386, 391), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
import numpy as np
import tensorflow as tf
from niftynet.io.image_reader import ImageReader
from niftynet.io.image_sets_partitioner import ImageSetsPartitioner
from niftynet.layer.discrete_label_normalisation import \
DiscreteLabelNormalisationLayer
from niftynet.layer.pad import PadLayer
from niftynet.utilities.util_common import ParserNamespace
from tests.reader_modular_test import generate_2d_images, SEG_THRESHOLD
from tests.niftynet_testcase import NiftyNetTestCase
generate_2d_images()
# test multiple modalities
MULTI_MOD_DATA = {
'T1': ParserNamespace(
csv_file=os.path.join('testing_data', 'T1reader.csv'),
path_to_search='testing_data',
filename_contains=('_o_T1_time',),
filename_not_contains=('Parcellation',),
interp_order=3,
pixdim=None,
axcodes=None,
loader=None
),
'FLAIR': ParserNamespace(
csv_file=os.path.join('testing_data', 'FLAIRreader.csv'),
path_to_search='testing_data',
filename_contains=('FLAIR_',),
filename_not_contains=('Parcellation',),
interp_order=3,
pixdim=None,
axcodes=None,
loader=None
)
}
MULTI_MOD_TASK = ParserNamespace(image=('T1', 'FLAIR'))
# test single modalities
SINGLE_MOD_DATA = {
'lesion': ParserNamespace(
csv_file=os.path.join('testing_data', 'lesion.csv'),
path_to_search='testing_data',
filename_contains=('Lesion',),
filename_not_contains=('Parcellation',),
interp_order=3,
pixdim=None,
axcodes=None,
loader=None
)
}
SINGLE_MOD_TASK = ParserNamespace(image=('lesion',))
EXISTING_DATA = {
'lesion': ParserNamespace(
csv_file=os.path.join('testing_data', 'lesion.csv'),
interp_order=3,
pixdim=None,
axcodes=None,
loader=None
)
}
# test labels
LABEL_DATA = {
'parcellation': ParserNamespace(
csv_file=os.path.join('testing_data', 'labels.csv'),
path_to_search='testing_data',
filename_contains=('Parcellation',),
filename_not_contains=('Lesion',),
interp_order=0,
pixdim=(3, 3.9, 3),
axcodes=None,
loader=None
)
}
LABEL_TASK = ParserNamespace(label=('parcellation',))
BAD_DATA = {
'lesion': ParserNamespace(
csv_file=os.path.join('testing_data', 'lesion.csv'),
path_to_search='testing_data',
filename_contains=('Lesion',),
filename_not_contains=('Parcellation',),
pixdim=None,
axcodes=None,
loader=None
# missing interp_order
)
}
BAD_TASK = ParserNamespace(image=('test',))
IMAGE_2D_DATA = {
'color_images': ParserNamespace(
csv_file=os.path.join('testing_data', 'images_2d_u.csv'),
path_to_search=os.path.join('testing_data', 'images2d'),
filename_contains=('_u.png',),
interp_order=1,
pixdim=None,
axcodes=None,
loader=None
),
'gray_images': ParserNamespace(
csv_file=os.path.join('testing_data', 'images_2d_g.csv'),
path_to_search=os.path.join('testing_data', 'images2d'),
filename_contains=('_g.png',),
interp_order=1,
pixdim=None,
axcodes=None,
loader=None
),
'seg_masks': ParserNamespace(
csv_file=os.path.join('testing_data', 'images_2d_m.csv'),
path_to_search=os.path.join('testing_data', 'images2d'),
filename_contains=('_m.png',),
interp_order=0,
pixdim=None,
axcodes=None,
loader=None
)
}
IMAGE_2D_TASK_COLOR = ParserNamespace(image=('color_images',))
IMAGE_2D_TASK_GRAY = ParserNamespace(image=('gray_images',))
IMAGE_2D_TASK_MASK = ParserNamespace(image=('seg_masks',))
# default data_partitioner
data_partitioner = ImageSetsPartitioner()
multi_mod_list = data_partitioner.initialise(MULTI_MOD_DATA).get_file_list()
single_mod_list = data_partitioner.initialise(SINGLE_MOD_DATA).get_file_list()
existing_list = data_partitioner.initialise(EXISTING_DATA).get_file_list()
label_list = data_partitioner.initialise(LABEL_DATA).get_file_list()
bad_data_list = data_partitioner.initialise(BAD_DATA).get_file_list()
image2d_data_list = data_partitioner.initialise(IMAGE_2D_DATA).get_file_list()
class ImageReaderTest(NiftyNetTestCase):
def test_initialisation(self):
with self.assertRaisesRegexp(ValueError, ''):
reader = ImageReader(['test'])
reader.initialise(MULTI_MOD_DATA, MULTI_MOD_TASK, multi_mod_list)
reader = ImageReader(None)
# reader.initialise(MULTI_MOD_DATA, MULTI_MOD_TASK, multi_mod_list)
reader = ImageReader(['image'])
reader.initialise(MULTI_MOD_DATA, MULTI_MOD_TASK, multi_mod_list)
self.assertEqual(len(reader.output_list), 4)
reader = ImageReader(['image'])
reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
self.assertEqual(len(reader.output_list), 4)
reader = ImageReader(['image'])
with self.assertRaisesRegexp(ValueError, ''):
reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, [])
def test_properties(self):
reader = ImageReader(['image'])
reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
self.assertEqual(len(reader.output_list), 4)
self.assertDictEqual(reader.spatial_ranks, {'image': 3})
self.assertDictEqual(reader.shapes,
{'image': (256, 168, 256, 1, 1)})
self.assertDictEqual(reader.tf_dtypes, {'image': tf.float32})
self.assertEqual(reader.names, ('image',))
self.assertDictEqual(reader.input_sources,
{'image': ('lesion',)})
self.assertEqual(reader.get_subject_id(1)[:4], 'Fin_')
self.assertTrue(isinstance(reader.get_subject(1), dict))
def test_existing_csv(self):
reader_for_csv = ImageReader(['image'])
reader_for_csv.initialise(
SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
reader = ImageReader(['image'])
reader.initialise(EXISTING_DATA, SINGLE_MOD_TASK, existing_list)
self.assertEqual(len(reader.output_list), 4)
self.assertDictEqual(reader.spatial_ranks, {'image': 3})
self.assertDictEqual(reader.shapes,
{'image': (256, 168, 256, 1, 1)})
self.assertDictEqual(reader.tf_dtypes, {'image': tf.float32})
self.assertEqual(reader.names, ('image',))
self.assertDictEqual(reader.input_sources,
{'image': ('lesion',)})
self.assertEqual(reader.get_subject_id(1)[:4], 'Fin_')
self.assertTrue(isinstance(reader.get_subject(1), dict))
def test_operations(self):
reader = ImageReader(['image'])
reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
idx, data, interp_order = reader()
self.assertEqual(
SINGLE_MOD_DATA['lesion'].interp_order, interp_order['image'][0])
self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1))
def test_preprocessing(self):
reader = ImageReader(['image'])
reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
idx, data, interp_order = reader()
self.assertEqual(SINGLE_MOD_DATA['lesion'].interp_order,
interp_order['image'][0])
self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1))
reader.add_preprocessing_layers(
[PadLayer(image_name=['image'], border=(10, 5, 5))])
idx, data, interp_order = reader(idx=2)
self.assertEqual(idx, 2)
self.assertAllClose(data['image'].shape, (276, 178, 266, 1, 1))
def test_preprocessing_zero_padding(self):
reader = ImageReader(['image'])
reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
idx, data, interp_order = reader()
self.assertEqual(SINGLE_MOD_DATA['lesion'].interp_order,
interp_order['image'][0])
self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1))
reader.add_preprocessing_layers(
[PadLayer(image_name=['image'], border=(0, 0, 0))])
idx, data, interp_order = reader(idx=2)
self.assertEqual(idx, 2)
self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1))
def test_trainable_preprocessing(self):
label_file = os.path.join('testing_data', 'label_reader.txt')
if os.path.exists(label_file):
os.remove(label_file)
label_normaliser = DiscreteLabelNormalisationLayer(
image_name='label',
modalities=vars(LABEL_TASK).get('label'),
model_filename=os.path.join('testing_data', 'label_reader.txt'))
reader = ImageReader(['label'])
with self.assertRaisesRegexp(AssertionError, ''):
reader.add_preprocessing_layers(label_normaliser)
reader.initialise(LABEL_DATA, LABEL_TASK, label_list)
reader.add_preprocessing_layers(label_normaliser)
reader.add_preprocessing_layers(
[PadLayer(image_name=['label'], border=(10, 5, 5))])
idx, data, interp_order = reader(idx=0)
unique_data = np.unique(data['label'])
expected_v1 = np.array(
[0., 1., 2., 3., 4., 5., 6., 7., 8.,
9., 10., 11., 12., 13., 14., 15., 16., 17.,
18., 19., 20., 21., 22., 23., 24., 25., 26., 27.,
28., 29., 30., 31., 32., 33., 34., 35., 36.,
37., 38., 39., 40., 41., 42., 43., 44., 45.,
46., 47., 48., 49., 50., 51., 52., 53., 54.,
55., 56., 57., 58., 59., 60., 61., 62., 63.,
64., 65., 66., 67., 68., 69., 70., 71., 72.,
73., 74., 75., 76., 77., 78., 79., 80., 81.,
82., 83., 84., 85., 86., 87., 88., 89., 90.,
91., 92., 93., 94., 95., 96., 97., 98., 99.,
100., 101., 102., 103., 104., 105., 106., 107., 108.,
109., 110., 111., 112., 113., 114., 115., 116., 117.,
118., 119., 120., 121., 122., 123., 124., 125., 126.,
127., 128., 129., 130., 131., 132., 133., 134., 135.,
136., 137., 138., 139., 140., 141., 142., 143., 144.,
145., 146., 147., 148., 149., 150., 151., 152., 153.,
154., 155., 156., 157.], dtype=np.float32)
expected_v2 = np.array(
[0., 1., 2., 3., 4., 5., 6., 7., 8.,
9., 10., 11., 12., 13., 14., 15., 16., 17.,
18., 20., 21., 22., 23., 24., 25., 26., 27.,
28., 29., 30., 31., 32., 33., 34., 35., 36.,
37., 38., 39., 40., 41., 42., 43., 44., 45.,
46., 47., 48., 49., 50., 51., 52., 53., 54.,
55., 56., 57., 58., 59., 60., 61., 62., 63.,
64., 65., 66., 67., 68., 69., 70., 71., 72.,
73., 74., 75., 76., 77., 78., 79., 80., 81.,
82., 83., 84., 85., 86., 87., 88., 89., 90.,
91., 92., 93., 94., 95., 96., 97., 98., 99.,
100., 101., 102., 103., 104., 105., 106., 107., 108.,
109., 110., 111., 112., 113., 114., 115., 116., 117.,
118., 119., 120., 121., 122., 123., 124., 125., 126.,
127., 128., 129., 130., 131., 132., 133., 134., 135.,
136., 137., 138., 139., 140., 141., 142., 143., 144.,
145., 146., 147., 148., 149., 150., 151., 152., 153.,
154., 155., 156., 157.], dtype=np.float32)
compatible_assert = \
np.all(unique_data == expected_v1) or \
np.all(unique_data == expected_v2)
self.assertTrue(compatible_assert)
self.assertAllClose(data['label'].shape, (103, 74, 93, 1, 1))
def test_errors(self):
reader = ImageReader(['image'])
reader.initialise(BAD_DATA, SINGLE_MOD_TASK, bad_data_list)
with self.assertRaisesRegexp(ValueError, ''):
reader = ImageReader(['image'])
reader.initialise(SINGLE_MOD_DATA, BAD_TASK, single_mod_list)
reader = ImageReader(['image'])
reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
idx, data, interp_order = reader(idx=100)
self.assertEqual(idx, -1)
self.assertEqual(data, None)
idx, data, interp_order = reader(shuffle=True)
self.assertEqual(data['image'].shape, (256, 168, 256, 1, 1))
def test_images2d(self):
reader = ImageReader(['image'])
# COLOR IMAGES
reader.initialise(IMAGE_2D_DATA, IMAGE_2D_TASK_COLOR,
image2d_data_list)
idx, data, interp_order = reader()
image = data['image']
# Check index
self.assertGreaterEqual(idx, 0)
self.assertLess(idx, 10)
# Check data type
self.assertGreaterEqual(image.min(), 0)
self.assertLessEqual(image.max(), 255)
self.assertEqual(image.dtype, np.float32)
# Check shape
self.assertEqual(image.ndim, 5)
self.assertAllEqual(image.shape, (100, 100, 1, 1, 3))
self.assertEqual(interp_order['image'], (1,))
# GRAY IMAGES
reader.initialise(IMAGE_2D_DATA, IMAGE_2D_TASK_GRAY,
image2d_data_list)
idx, data, interp_order = reader()
image = data['image']
# Check index
self.assertGreaterEqual(idx, 0)
self.assertLess(idx, 10)
# Check data type
self.assertGreaterEqual(image.min(), 0)
self.assertLessEqual(image.max(), 255)
self.assertEqual(image.dtype, np.float32)
# Check shape
self.assertEqual(image.ndim, 5)
self.assertAllEqual(image.shape, (100, 100, 1, 1, 1))
self.assertEqual(interp_order['image'], (1,))
gray_idx, gray_data, gray_order = reader(idx=5)
# SEGMENTATION MASKS
reader.initialise(IMAGE_2D_DATA, IMAGE_2D_TASK_MASK,
image2d_data_list)
idx, data, interp_order = reader()
image = data['image']
# Check index
self.assertGreaterEqual(idx, 0)
self.assertLess(idx, 10)
# Check data type
self.assertGreaterEqual(image.min(), 0)
self.assertLessEqual(image.max(), 255)
self.assertEqual(image.dtype, np.float32)
self.assertEqual(np.unique(image).size, 2)
# Check shape
self.assertEqual(image.ndim, 5)
self.assertAllEqual(image.shape, (100, 100, 1, 1, 1))
self.assertEqual(interp_order['image'], (0,))
# Compare segmentation masks to thresholding original image
mask_idx, mask_data, mask_order = reader(idx=5)
gray_data = gray_data['image']
mask_data = mask_data['image']
self.assertEqual(gray_idx, mask_idx)
self.assertEqual(gray_order['image'], (1,))
self.assertEqual(mask_order['image'], (0,))
self.assertAllEqual((gray_data > SEG_THRESHOLD) * 255, mask_data)
if __name__ == "__main__":
tf.test.main()
| [
"os.path.exists",
"tests.reader_modular_test.generate_2d_images",
"numpy.unique",
"niftynet.io.image_sets_partitioner.ImageSetsPartitioner",
"os.path.join",
"tensorflow.test.main",
"numpy.array",
"niftynet.layer.pad.PadLayer",
"niftynet.io.image_reader.ImageReader",
"numpy.all",
"niftynet.utilit... | [((571, 591), 'tests.reader_modular_test.generate_2d_images', 'generate_2d_images', ([], {}), '()\n', (589, 591), False, 'from tests.reader_modular_test import generate_2d_images, SEG_THRESHOLD\n'), ((1288, 1326), 'niftynet.utilities.util_common.ParserNamespace', 'ParserNamespace', ([], {'image': "('T1', 'FLAIR')"}), "(image=('T1', 'FLAIR'))\n", (1303, 1326), False, 'from niftynet.utilities.util_common import ParserNamespace\n'), ((1705, 1739), 'niftynet.utilities.util_common.ParserNamespace', 'ParserNamespace', ([], {'image': "('lesion',)"}), "(image=('lesion',))\n", (1720, 1739), False, 'from niftynet.utilities.util_common import ParserNamespace\n'), ((2316, 2356), 'niftynet.utilities.util_common.ParserNamespace', 'ParserNamespace', ([], {'label': "('parcellation',)"}), "(label=('parcellation',))\n", (2331, 2356), False, 'from niftynet.utilities.util_common import ParserNamespace\n'), ((2703, 2735), 'niftynet.utilities.util_common.ParserNamespace', 'ParserNamespace', ([], {'image': "('test',)"}), "(image=('test',))\n", (2718, 2735), False, 'from niftynet.utilities.util_common import ParserNamespace\n'), ((3678, 3718), 'niftynet.utilities.util_common.ParserNamespace', 'ParserNamespace', ([], {'image': "('color_images',)"}), "(image=('color_images',))\n", (3693, 3718), False, 'from niftynet.utilities.util_common import ParserNamespace\n'), ((3740, 3779), 'niftynet.utilities.util_common.ParserNamespace', 'ParserNamespace', ([], {'image': "('gray_images',)"}), "(image=('gray_images',))\n", (3755, 3779), False, 'from niftynet.utilities.util_common import ParserNamespace\n'), ((3801, 3838), 'niftynet.utilities.util_common.ParserNamespace', 'ParserNamespace', ([], {'image': "('seg_masks',)"}), "(image=('seg_masks',))\n", (3816, 3838), False, 'from niftynet.utilities.util_common import ParserNamespace\n'), ((3886, 3908), 'niftynet.io.image_sets_partitioner.ImageSetsPartitioner', 'ImageSetsPartitioner', ([], {}), '()\n', (3906, 3908), False, 'from niftynet.io.image_sets_partitioner import ImageSetsPartitioner\n'), ((15225, 15239), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (15237, 15239), True, 'import tensorflow as tf\n'), ((4628, 4645), 'niftynet.io.image_reader.ImageReader', 'ImageReader', (['None'], {}), '(None)\n', (4639, 4645), False, 'from niftynet.io.image_reader import ImageReader\n'), ((4740, 4762), 'niftynet.io.image_reader.ImageReader', 'ImageReader', (["['image']"], {}), "(['image'])\n", (4751, 4762), False, 'from niftynet.io.image_reader import ImageReader\n'), ((4908, 4930), 'niftynet.io.image_reader.ImageReader', 'ImageReader', (["['image']"], {}), "(['image'])\n", (4919, 4930), False, 'from niftynet.io.image_reader import ImageReader\n'), ((5079, 5101), 'niftynet.io.image_reader.ImageReader', 'ImageReader', (["['image']"], {}), "(['image'])\n", (5090, 5101), False, 'from niftynet.io.image_reader import ImageReader\n'), ((5273, 5295), 'niftynet.io.image_reader.ImageReader', 'ImageReader', (["['image']"], {}), "(['image'])\n", (5284, 5295), False, 'from niftynet.io.image_reader import ImageReader\n'), ((6010, 6032), 'niftynet.io.image_reader.ImageReader', 'ImageReader', (["['image']"], {}), "(['image'])\n", (6021, 6032), False, 'from niftynet.io.image_reader import ImageReader\n'), ((6148, 6170), 'niftynet.io.image_reader.ImageReader', 'ImageReader', (["['image']"], {}), "(['image'])\n", (6159, 6170), False, 'from niftynet.io.image_reader import ImageReader\n'), ((6871, 6893), 'niftynet.io.image_reader.ImageReader', 'ImageReader', (["['image']"], {}), "(['image'])\n", (6882, 6893), False, 'from niftynet.io.image_reader import ImageReader\n'), ((7242, 7264), 'niftynet.io.image_reader.ImageReader', 'ImageReader', (["['image']"], {}), "(['image'])\n", (7253, 7264), False, 'from niftynet.io.image_reader import ImageReader\n'), ((7897, 7919), 'niftynet.io.image_reader.ImageReader', 'ImageReader', (["['image']"], {}), "(['image'])\n", (7908, 7919), False, 'from niftynet.io.image_reader import ImageReader\n'), ((8552, 8600), 'os.path.join', 'os.path.join', (['"""testing_data"""', '"""label_reader.txt"""'], {}), "('testing_data', 'label_reader.txt')\n", (8564, 8600), False, 'import os\n'), ((8612, 8638), 'os.path.exists', 'os.path.exists', (['label_file'], {}), '(label_file)\n', (8626, 8638), False, 'import os\n'), ((8914, 8936), 'niftynet.io.image_reader.ImageReader', 'ImageReader', (["['label']"], {}), "(['label'])\n", (8925, 8936), False, 'from niftynet.io.image_reader import ImageReader\n'), ((9353, 9377), 'numpy.unique', 'np.unique', (["data['label']"], {}), "(data['label'])\n", (9362, 9377), True, 'import numpy as np\n'), ((9400, 10486), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, \n 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0,\n 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0,\n 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0,\n 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0,\n 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0, 73.0,\n 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, 81.0, 82.0, 83.0, 84.0, 85.0,\n 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0,\n 98.0, 99.0, 100.0, 101.0, 102.0, 103.0, 104.0, 105.0, 106.0, 107.0, \n 108.0, 109.0, 110.0, 111.0, 112.0, 113.0, 114.0, 115.0, 116.0, 117.0, \n 118.0, 119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 125.0, 126.0, 127.0, \n 128.0, 129.0, 130.0, 131.0, 132.0, 133.0, 134.0, 135.0, 136.0, 137.0, \n 138.0, 139.0, 140.0, 141.0, 142.0, 143.0, 144.0, 145.0, 146.0, 147.0, \n 148.0, 149.0, 150.0, 151.0, 152.0, 153.0, 154.0, 155.0, 156.0, 157.0]'], {'dtype': 'np.float32'}), '([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, \n 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0,\n 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0,\n 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0,\n 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0,\n 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0,\n 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, 81.0, 82.0, 83.0,\n 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0,\n 96.0, 97.0, 98.0, 99.0, 100.0, 101.0, 102.0, 103.0, 104.0, 105.0, 106.0,\n 107.0, 108.0, 109.0, 110.0, 111.0, 112.0, 113.0, 114.0, 115.0, 116.0, \n 117.0, 118.0, 119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 125.0, 126.0, \n 127.0, 128.0, 129.0, 130.0, 131.0, 132.0, 133.0, 134.0, 135.0, 136.0, \n 137.0, 138.0, 139.0, 140.0, 141.0, 142.0, 143.0, 144.0, 145.0, 146.0, \n 147.0, 148.0, 149.0, 150.0, 151.0, 152.0, 153.0, 154.0, 155.0, 156.0, \n 157.0], dtype=np.float32)\n', (9408, 10486), True, 'import numpy as np\n'), ((10574, 11655), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, \n 14.0, 15.0, 16.0, 17.0, 18.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0,\n 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0,\n 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0,\n 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0, 62.0,\n 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0, 73.0, 74.0,\n 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0,\n 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0,\n 99.0, 100.0, 101.0, 102.0, 103.0, 104.0, 105.0, 106.0, 107.0, 108.0, \n 109.0, 110.0, 111.0, 112.0, 113.0, 114.0, 115.0, 116.0, 117.0, 118.0, \n 119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 125.0, 126.0, 127.0, 128.0, \n 129.0, 130.0, 131.0, 132.0, 133.0, 134.0, 135.0, 136.0, 137.0, 138.0, \n 139.0, 140.0, 141.0, 142.0, 143.0, 144.0, 145.0, 146.0, 147.0, 148.0, \n 149.0, 150.0, 151.0, 152.0, 153.0, 154.0, 155.0, 156.0, 157.0]'], {'dtype': 'np.float32'}), '([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, \n 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 20.0, 21.0, 22.0, 23.0, 24.0,\n 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0,\n 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0,\n 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0,\n 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0,\n 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, 81.0, 82.0, 83.0, 84.0,\n 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0,\n 97.0, 98.0, 99.0, 100.0, 101.0, 102.0, 103.0, 104.0, 105.0, 106.0, \n 107.0, 108.0, 109.0, 110.0, 111.0, 112.0, 113.0, 114.0, 115.0, 116.0, \n 117.0, 118.0, 119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 125.0, 126.0, \n 127.0, 128.0, 129.0, 130.0, 131.0, 132.0, 133.0, 134.0, 135.0, 136.0, \n 137.0, 138.0, 139.0, 140.0, 141.0, 142.0, 143.0, 144.0, 145.0, 146.0, \n 147.0, 148.0, 149.0, 150.0, 151.0, 152.0, 153.0, 154.0, 155.0, 156.0, \n 157.0], dtype=np.float32)\n', (10582, 11655), True, 'import numpy as np\n'), ((12008, 12030), 'niftynet.io.image_reader.ImageReader', 'ImageReader', (["['image']"], {}), "(['image'])\n", (12019, 12030), False, 'from niftynet.io.image_reader import ImageReader\n'), ((12289, 12311), 'niftynet.io.image_reader.ImageReader', 'ImageReader', (["['image']"], {}), "(['image'])\n", (12300, 12311), False, 'from niftynet.io.image_reader import ImageReader\n'), ((12681, 12703), 'niftynet.io.image_reader.ImageReader', 'ImageReader', (["['image']"], {}), "(['image'])\n", (12692, 12703), False, 'from niftynet.io.image_reader import ImageReader\n'), ((682, 726), 'os.path.join', 'os.path.join', (['"""testing_data"""', '"""T1reader.csv"""'], {}), "('testing_data', 'T1reader.csv')\n", (694, 726), False, 'import os\n'), ((1000, 1047), 'os.path.join', 'os.path.join', (['"""testing_data"""', '"""FLAIRreader.csv"""'], {}), "('testing_data', 'FLAIRreader.csv')\n", (1012, 1047), False, 'import os\n'), ((1421, 1463), 'os.path.join', 'os.path.join', (['"""testing_data"""', '"""lesion.csv"""'], {}), "('testing_data', 'lesion.csv')\n", (1433, 1463), False, 'import os\n'), ((1807, 1849), 'os.path.join', 'os.path.join', (['"""testing_data"""', '"""lesion.csv"""'], {}), "('testing_data', 'lesion.csv')\n", (1819, 1849), False, 'import os\n'), ((2030, 2072), 'os.path.join', 'os.path.join', (['"""testing_data"""', '"""labels.csv"""'], {}), "('testing_data', 'labels.csv')\n", (2042, 2072), False, 'import os\n'), ((2419, 2461), 'os.path.join', 'os.path.join', (['"""testing_data"""', '"""lesion.csv"""'], {}), "('testing_data', 'lesion.csv')\n", (2431, 2461), False, 'import os\n'), ((2809, 2856), 'os.path.join', 'os.path.join', (['"""testing_data"""', '"""images_2d_u.csv"""'], {}), "('testing_data', 'images_2d_u.csv')\n", (2821, 2856), False, 'import os\n'), ((2881, 2921), 'os.path.join', 'os.path.join', (['"""testing_data"""', '"""images2d"""'], {}), "('testing_data', 'images2d')\n", (2893, 2921), False, 'import os\n'), ((3109, 3156), 'os.path.join', 'os.path.join', (['"""testing_data"""', '"""images_2d_g.csv"""'], {}), "('testing_data', 'images_2d_g.csv')\n", (3121, 3156), False, 'import os\n'), ((3181, 3221), 'os.path.join', 'os.path.join', (['"""testing_data"""', '"""images2d"""'], {}), "('testing_data', 'images2d')\n", (3193, 3221), False, 'import os\n'), ((3407, 3454), 'os.path.join', 'os.path.join', (['"""testing_data"""', '"""images_2d_m.csv"""'], {}), "('testing_data', 'images_2d_m.csv')\n", (3419, 3454), False, 'import os\n'), ((3479, 3519), 'os.path.join', 'os.path.join', (['"""testing_data"""', '"""images2d"""'], {}), "('testing_data', 'images2d')\n", (3491, 3519), False, 'import os\n'), ((4511, 4532), 'niftynet.io.image_reader.ImageReader', 'ImageReader', (["['test']"], {}), "(['test'])\n", (4522, 4532), False, 'from niftynet.io.image_reader import ImageReader\n'), ((8652, 8673), 'os.remove', 'os.remove', (['label_file'], {}), '(label_file)\n', (8661, 8673), False, 'import os\n'), ((11763, 11797), 'numpy.all', 'np.all', (['(unique_data == expected_v1)'], {}), '(unique_data == expected_v1)\n', (11769, 11797), True, 'import numpy as np\n'), ((11815, 11849), 'numpy.all', 'np.all', (['(unique_data == expected_v2)'], {}), '(unique_data == expected_v2)\n', (11821, 11849), True, 'import numpy as np\n'), ((12174, 12196), 'niftynet.io.image_reader.ImageReader', 'ImageReader', (["['image']"], {}), "(['image'])\n", (12185, 12196), False, 'from niftynet.io.image_reader import ImageReader\n'), ((7627, 7676), 'niftynet.layer.pad.PadLayer', 'PadLayer', ([], {'image_name': "['image']", 'border': '(10, 5, 5)'}), "(image_name=['image'], border=(10, 5, 5))\n", (7635, 7676), False, 'from niftynet.layer.pad import PadLayer\n'), ((8282, 8330), 'niftynet.layer.pad.PadLayer', 'PadLayer', ([], {'image_name': "['image']", 'border': '(0, 0, 0)'}), "(image_name=['image'], border=(0, 0, 0))\n", (8290, 8330), False, 'from niftynet.layer.pad import PadLayer\n'), ((8847, 8895), 'os.path.join', 'os.path.join', (['"""testing_data"""', '"""label_reader.txt"""'], {}), "('testing_data', 'label_reader.txt')\n", (8859, 8895), False, 'import os\n'), ((9231, 9280), 'niftynet.layer.pad.PadLayer', 'PadLayer', ([], {'image_name': "['label']", 'border': '(10, 5, 5)'}), "(image_name=['label'], border=(10, 5, 5))\n", (9239, 9280), False, 'from niftynet.layer.pad import PadLayer\n'), ((14560, 14576), 'numpy.unique', 'np.unique', (['image'], {}), '(image)\n', (14569, 14576), True, 'import numpy as np\n')] |
from __future__ import division
import os
import numpy as np
import pprint
import tensorflow as tf
import tensorflow.contrib.slim as slim
import pickle, csv
from utils import *
from model import UNet3D, SurvivalVAE
flags = tf.app.flags
flags.DEFINE_integer("epoch", 4, "Epoch to train [4]")
flags.DEFINE_string("train_patch_dir", "patches", "Directory of the training data [patches]")
flags.DEFINE_bool("split_train", False, "Whether to split the train data into train and val [False]")
flags.DEFINE_string("train_data_dir", "../BraTS17TrainingData", "Directory of the train data [../BraTS17TrainingData]")
flags.DEFINE_string("deploy_data_dir", "../BraTS17ValidationData", "Directory of the test data [../BraTS17ValidationData]")
flags.DEFINE_string("deploy_output_dir", "output_validation", "Directory name of the output data [output]")
flags.DEFINE_string("train_csv", "../BraTS17TrainingData/survival_data.csv", "CSV path of the training data")
flags.DEFINE_string("deploy_csv", "../BraTS17ValidationData/survival_evaluation.csv", "CSV path of the validation data")
flags.DEFINE_integer("batch_size", 1, "Batch size [1]")
flags.DEFINE_integer("seg_features_root", 48, "Number of features in the first filter in the seg net [48]")
flags.DEFINE_integer("survival_features", 16, "Number of features in the survival net [16]")
flags.DEFINE_integer("conv_size", 3, "Convolution kernel size in encoding and decoding paths [3]")
flags.DEFINE_integer("layers", 3, "Encoding and deconding layers [3]")
flags.DEFINE_string("loss_type", "cross_entropy", "Loss type in the model [cross_entropy]")
flags.DEFINE_float("dropout", 0.5, "Drop out ratio [0.5]")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_string("log_dir", "logs", "Directory name to save logs [logs]")
flags.DEFINE_boolean("train", False, "True for training, False for deploying [False]")
flags.DEFINE_boolean("run_seg", True, "True if run segmentation [True]")
flags.DEFINE_boolean("run_survival", False, "True if run survival prediction [True]")
FLAGS = flags.FLAGS
def main(_):
pp = pprint.PrettyPrinter()
pp.pprint(flags.FLAGS.__flags)
# Train
all_train_paths = []
for dirpath, dirnames, files in os.walk(FLAGS.train_data_dir):
if os.path.basename(dirpath)[0:7] == 'Brats17':
all_train_paths.append(dirpath)
if FLAGS.split_train:
if os.path.exists(os.path.join(FLAGS.train_patch_dir, 'files.log')):
with open(os.path.join(FLAGS.train_patch_dir, 'files.log'), 'r') as f:
training_paths, testing_paths = pickle.load(f)
else:
all_paths = [os.path.join(FLAGS.train_patch_dir, p) for p in sorted(os.listdir(FLAGS.train_data_dir))]
np.random.shuffle(all_paths)
n_training = int(len(all_paths) * 4 / 5)
training_paths = all_paths[:n_training]
testing_paths = all_paths[n_training:]
# Save the training paths and testing paths
with open(os.path.join(FLAGS.train_data_dir, 'files.log'), 'w') as f:
pickle.dump([training_paths, testing_paths], f)
training_ids = [os.path.basename(i) for i in training_paths]
testing_ids = [os.path.basename(i) for i in testing_paths]
training_survival_data = {}
testing_survival_data = {}
with open(FLAGS.train_csv, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if row[0] in training_ids:
training_survival_data[row[0]] = (row[1], row[2])
elif row[0] in testing_ids:
testing_survival_data[row[0]] = (row[1], row[2])
training_survival_paths = [p for p in all_train_paths if os.path.basename(p) in training_survival_data.keys()]
testing_survival_paths = [p for p in all_train_paths if os.path.basename(p) in testing_survival_data.keys()]
else:
training_paths = [os.path.join(FLAGS.train_patch_dir, name) for name in os.listdir(FLAGS.train_patch_dir)
if '.log' not in name]
testing_paths = None
training_ids = [os.path.basename(i) for i in training_paths]
training_survival_paths = []
testing_survival_paths = None
training_survival_data = {}
testing_survival_data = None
with open(FLAGS.train_csv, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if row[0] in training_ids:
training_survival_data[row[0]] = (row[1], row[2])
training_survival_paths = [p for p in all_train_paths if os.path.basename(p) in training_survival_data.keys()]
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.log_dir):
os.makedirs(FLAGS.log_dir)
# Segmentation net
if FLAGS.run_seg:
run_config = tf.ConfigProto()
with tf.Session(config=run_config) as sess:
unet = UNet3D(sess, checkpoint_dir=FLAGS.checkpoint_dir, log_dir=FLAGS.log_dir, training_paths=training_paths,
testing_paths=testing_paths, batch_size=FLAGS.batch_size, layers=FLAGS.layers,
features_root=FLAGS.seg_features_root, conv_size=FLAGS.conv_size,
dropout=FLAGS.dropout, loss_type=FLAGS.loss_type)
if FLAGS.train:
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
train_config = {}
train_config['epoch'] = FLAGS.epoch
unet.train(train_config)
else:
# Deploy
if not os.path.exists(FLAGS.deploy_output_dir):
os.makedirs(FLAGS.deploy_output_dir)
unet.deploy(FLAGS.deploy_data_dir, FLAGS.deploy_output_dir)
tf.reset_default_graph()
# Survival net
if FLAGS.run_survival:
run_config = tf.ConfigProto()
with tf.Session(config=run_config) as sess:
survivalvae = SurvivalVAE(sess, checkpoint_dir=FLAGS.checkpoint_dir, log_dir=FLAGS.log_dir,
training_paths=training_survival_paths, testing_paths=testing_survival_paths,
training_survival_data=training_survival_data,
testing_survival_data=testing_survival_data)
if FLAGS.train:
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
train_config = {}
train_config['epoch'] = FLAGS.epoch * 100
survivalvae.train(train_config)
else:
all_deploy_paths = []
for dirpath, dirnames, files in os.walk(FLAGS.deploy_data_dir):
if os.path.basename(dirpath)[0:7] == 'Brats17':
all_deploy_paths.append(dirpath)
deploy_survival_data = {}
with open(FLAGS.deploy_csv, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if row[0] != 'Brats17ID':
deploy_survival_data[row[0]] = row[1]
deploy_survival_paths = [p for p in all_deploy_paths if os.path.basename(p) in deploy_survival_data.keys()]
survivalnet.deploy(FLAGS.deploy_survival_paths, FLAGS.deploy_survival_data)
if __name__ == '__main__':
tf.app.run() | [
"os.walk",
"tensorflow.app.run",
"model.SurvivalVAE",
"os.path.exists",
"os.listdir",
"tensorflow.Session",
"pprint.PrettyPrinter",
"tensorflow.ConfigProto",
"tensorflow.trainable_variables",
"csv.reader",
"pickle.load",
"model.UNet3D",
"tensorflow.reset_default_graph",
"pickle.dump",
"o... | [((2123, 2145), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {}), '()\n', (2143, 2145), False, 'import pprint\n'), ((2259, 2288), 'os.walk', 'os.walk', (['FLAGS.train_data_dir'], {}), '(FLAGS.train_data_dir)\n', (2266, 2288), False, 'import os\n'), ((7677, 7689), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (7687, 7689), True, 'import tensorflow as tf\n'), ((4761, 4797), 'os.path.exists', 'os.path.exists', (['FLAGS.checkpoint_dir'], {}), '(FLAGS.checkpoint_dir)\n', (4775, 4797), False, 'import os\n'), ((4807, 4840), 'os.makedirs', 'os.makedirs', (['FLAGS.checkpoint_dir'], {}), '(FLAGS.checkpoint_dir)\n', (4818, 4840), False, 'import os\n'), ((4857, 4886), 'os.path.exists', 'os.path.exists', (['FLAGS.log_dir'], {}), '(FLAGS.log_dir)\n', (4871, 4886), False, 'import os\n'), ((4896, 4922), 'os.makedirs', 'os.makedirs', (['FLAGS.log_dir'], {}), '(FLAGS.log_dir)\n', (4907, 4922), False, 'import os\n'), ((4994, 5010), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (5008, 5010), True, 'import tensorflow as tf\n'), ((5998, 6022), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (6020, 6022), True, 'import tensorflow as tf\n'), ((6092, 6108), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (6106, 6108), True, 'import tensorflow as tf\n'), ((2443, 2491), 'os.path.join', 'os.path.join', (['FLAGS.train_patch_dir', '"""files.log"""'], {}), "(FLAGS.train_patch_dir, 'files.log')\n", (2455, 2491), False, 'import os\n'), ((2781, 2809), 'numpy.random.shuffle', 'np.random.shuffle', (['all_paths'], {}), '(all_paths)\n', (2798, 2809), True, 'import numpy as np\n'), ((3193, 3212), 'os.path.basename', 'os.path.basename', (['i'], {}), '(i)\n', (3209, 3212), False, 'import os\n'), ((3261, 3280), 'os.path.basename', 'os.path.basename', (['i'], {}), '(i)\n', (3277, 3280), False, 'import os\n'), ((3450, 3469), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (3460, 3469), False, 'import pickle, csv\n'), ((4000, 4041), 'os.path.join', 'os.path.join', (['FLAGS.train_patch_dir', 'name'], {}), '(FLAGS.train_patch_dir, name)\n', (4012, 4041), False, 'import os\n'), ((4191, 4210), 'os.path.basename', 'os.path.basename', (['i'], {}), '(i)\n', (4207, 4210), False, 'import os\n'), ((4458, 4477), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (4468, 4477), False, 'import pickle, csv\n'), ((5024, 5053), 'tensorflow.Session', 'tf.Session', ([], {'config': 'run_config'}), '(config=run_config)\n', (5034, 5053), True, 'import tensorflow as tf\n'), ((5082, 5398), 'model.UNet3D', 'UNet3D', (['sess'], {'checkpoint_dir': 'FLAGS.checkpoint_dir', 'log_dir': 'FLAGS.log_dir', 'training_paths': 'training_paths', 'testing_paths': 'testing_paths', 'batch_size': 'FLAGS.batch_size', 'layers': 'FLAGS.layers', 'features_root': 'FLAGS.seg_features_root', 'conv_size': 'FLAGS.conv_size', 'dropout': 'FLAGS.dropout', 'loss_type': 'FLAGS.loss_type'}), '(sess, checkpoint_dir=FLAGS.checkpoint_dir, log_dir=FLAGS.log_dir,\n training_paths=training_paths, testing_paths=testing_paths, batch_size=\n FLAGS.batch_size, layers=FLAGS.layers, features_root=FLAGS.\n seg_features_root, conv_size=FLAGS.conv_size, dropout=FLAGS.dropout,\n loss_type=FLAGS.loss_type)\n', (5088, 5398), False, 'from model import UNet3D, SurvivalVAE\n'), ((6122, 6151), 'tensorflow.Session', 'tf.Session', ([], {'config': 'run_config'}), '(config=run_config)\n', (6132, 6151), True, 'import tensorflow as tf\n'), ((6187, 6448), 'model.SurvivalVAE', 'SurvivalVAE', (['sess'], {'checkpoint_dir': 'FLAGS.checkpoint_dir', 'log_dir': 'FLAGS.log_dir', 'training_paths': 'training_survival_paths', 'testing_paths': 'testing_survival_paths', 'training_survival_data': 'training_survival_data', 'testing_survival_data': 'testing_survival_data'}), '(sess, checkpoint_dir=FLAGS.checkpoint_dir, log_dir=FLAGS.\n log_dir, training_paths=training_survival_paths, testing_paths=\n testing_survival_paths, training_survival_data=training_survival_data,\n testing_survival_data=testing_survival_data)\n', (6198, 6448), False, 'from model import UNet3D, SurvivalVAE\n'), ((2301, 2326), 'os.path.basename', 'os.path.basename', (['dirpath'], {}), '(dirpath)\n', (2317, 2326), False, 'import os\n'), ((2625, 2639), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2636, 2639), False, 'import pickle, csv\n'), ((2679, 2717), 'os.path.join', 'os.path.join', (['FLAGS.train_patch_dir', 'p'], {}), '(FLAGS.train_patch_dir, p)\n', (2691, 2717), False, 'import os\n'), ((3120, 3167), 'pickle.dump', 'pickle.dump', (['[training_paths, testing_paths]', 'f'], {}), '([training_paths, testing_paths], f)\n', (3131, 3167), False, 'import pickle, csv\n'), ((4054, 4087), 'os.listdir', 'os.listdir', (['FLAGS.train_patch_dir'], {}), '(FLAGS.train_patch_dir)\n', (4064, 4087), False, 'import os\n'), ((5517, 5541), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (5539, 5541), True, 'import tensorflow as tf\n'), ((5558, 5619), 'tensorflow.contrib.slim.model_analyzer.analyze_vars', 'slim.model_analyzer.analyze_vars', (['model_vars'], {'print_info': '(True)'}), '(model_vars, print_info=True)\n', (5590, 5619), True, 'import tensorflow.contrib.slim as slim\n'), ((6608, 6632), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (6630, 6632), True, 'import tensorflow as tf\n'), ((6649, 6710), 'tensorflow.contrib.slim.model_analyzer.analyze_vars', 'slim.model_analyzer.analyze_vars', (['model_vars'], {'print_info': '(True)'}), '(model_vars, print_info=True)\n', (6681, 6710), True, 'import tensorflow.contrib.slim as slim\n'), ((6957, 6987), 'os.walk', 'os.walk', (['FLAGS.deploy_data_dir'], {}), '(FLAGS.deploy_data_dir)\n', (6964, 6987), False, 'import os\n'), ((2516, 2564), 'os.path.join', 'os.path.join', (['FLAGS.train_patch_dir', '"""files.log"""'], {}), "(FLAGS.train_patch_dir, 'files.log')\n", (2528, 2564), False, 'import os\n'), ((3044, 3091), 'os.path.join', 'os.path.join', (['FLAGS.train_data_dir', '"""files.log"""'], {}), "(FLAGS.train_data_dir, 'files.log')\n", (3056, 3091), False, 'import os\n'), ((3793, 3812), 'os.path.basename', 'os.path.basename', (['p'], {}), '(p)\n', (3809, 3812), False, 'import os\n'), ((3911, 3930), 'os.path.basename', 'os.path.basename', (['p'], {}), '(p)\n', (3927, 3930), False, 'import os\n'), ((4687, 4706), 'os.path.basename', 'os.path.basename', (['p'], {}), '(p)\n', (4703, 4706), False, 'import os\n'), ((5815, 5854), 'os.path.exists', 'os.path.exists', (['FLAGS.deploy_output_dir'], {}), '(FLAGS.deploy_output_dir)\n', (5829, 5854), False, 'import os\n'), ((5876, 5912), 'os.makedirs', 'os.makedirs', (['FLAGS.deploy_output_dir'], {}), '(FLAGS.deploy_output_dir)\n', (5887, 5912), False, 'import os\n'), ((7246, 7265), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (7256, 7265), False, 'import pickle, csv\n'), ((2734, 2766), 'os.listdir', 'os.listdir', (['FLAGS.train_data_dir'], {}), '(FLAGS.train_data_dir)\n', (2744, 2766), False, 'import os\n'), ((7012, 7037), 'os.path.basename', 'os.path.basename', (['dirpath'], {}), '(dirpath)\n', (7028, 7037), False, 'import os\n'), ((7493, 7512), 'os.path.basename', 'os.path.basename', (['p'], {}), '(p)\n', (7509, 7512), False, 'import os\n')] |
import matplotlib.pyplot as plt
import numpy
import seaborn
import tensorflow as tf
import muzero.models as models
from muzero.self_play import MCTS, Node, SelfPlay
class DiagnoseModel:
"""
Tools to understand the learned model.
Args:
weights: weights for the model to diagnose.
config: configuration class instance related to the weights.
"""
def __init__(self, checkpoint, config):
self.config = config
# Initialize the network
self.model = models.MuZeroNetwork(self.config)
self.model.set_weights(checkpoint["weights"])
self.model.eval()
def get_virtual_trajectory_from_obs(
self, observation, horizon, plot=True, to_play=0
):
"""
MuZero plays a game but uses its model instead of using the environment.
We still do an MCTS at each step.
"""
trajectory_info = Trajectoryinfo("Virtual trajectory", self.config)
root, mcts_info = MCTS(self.config).run(
self.model, observation, self.config.action_space, to_play, True
)
trajectory_info.store_info(root, mcts_info, None, numpy.NaN)
virtual_to_play = to_play
for i in range(horizon):
action = SelfPlay.select_action(root, 0)
# Players play turn by turn
if virtual_to_play + 1 < len(self.config.players):
virtual_to_play = self.config.players[virtual_to_play + 1]
else:
virtual_to_play = self.config.players[0]
# Generate new root
value, reward, policy_logits, hidden_state = self.model.recurrent_inference(
root.hidden_state,
tf.identity([[action]]),
)
value = models.support_to_scalar(value, self.config.support_size).item()
reward = models.support_to_scalar(reward, self.config.support_size).item()
root = Node(0)
root.expand(
self.config.action_space,
virtual_to_play,
reward,
policy_logits,
hidden_state,
)
root, mcts_info = MCTS(self.config).run(
self.model, None, self.config.action_space, virtual_to_play, True, root
)
trajectory_info.store_info(
root, mcts_info, action, reward, new_prior_root_value=value
)
if plot:
trajectory_info.plot_trajectory()
return trajectory_info
def compare_virtual_with_real_trajectories(
self, first_obs, game, horizon, plot=True
):
"""
First, MuZero plays a game but uses its model instead of using the environment.
Then, MuZero plays the optimal trajectory according precedent trajectory but performs it in the
real environment until arriving at an action impossible in the real environment.
It does an MCTS too, but doesn't take it into account.
All information during the two trajectories are recorded and displayed.
"""
virtual_trajectory_info = self.get_virtual_trajectory_from_obs(
first_obs, horizon, False
)
real_trajectory_info = Trajectoryinfo("Real trajectory", self.config)
trajectory_divergence_index = None
real_trajectory_end_reason = "Reached horizon"
# Illegal moves are masked at the root
root, mcts_info = MCTS(self.config).run(
self.model,
first_obs,
game.legal_actions(),
game.to_play(),
True,
)
self.plot_mcts(root, plot)
real_trajectory_info.store_info(root, mcts_info, None, numpy.NaN)
for i, action in enumerate(virtual_trajectory_info.action_history):
# Follow virtual trajectory until it reaches an illegal move in the real env
if action not in game.legal_actions():
break # Comment to keep playing after trajectory divergence
action = SelfPlay.select_action(root, 0)
if trajectory_divergence_index is None:
trajectory_divergence_index = i
real_trajectory_end_reason = f"Virtual trajectory reached an illegal move at timestep {trajectory_divergence_index}."
observation, reward, done = game.step(action)
root, mcts_info = MCTS(self.config).run(
self.model,
observation,
game.legal_actions(),
game.to_play(),
True,
)
real_trajectory_info.store_info(root, mcts_info, action, reward)
if done:
real_trajectory_end_reason = "Real trajectory reached Done"
break
if plot:
virtual_trajectory_info.plot_trajectory()
real_trajectory_info.plot_trajectory()
print(real_trajectory_end_reason)
return (
virtual_trajectory_info,
real_trajectory_info,
trajectory_divergence_index,
)
def close_all(self):
plt.close("all")
def plot_mcts(self, root, plot=True):
"""
Plot the MCTS, pdf file is saved in the current directory.
"""
try:
from graphviz import Digraph
except ModuleNotFoundError:
print("Please install graphviz to get the MCTS plot.")
return None
graph = Digraph(comment="MCTS", engine="neato")
graph.attr("graph", rankdir="LR", splines="true", overlap="false")
id = 0
def traverse(node, action, parent_id, best):
nonlocal id
node_id = id
graph.node(
str(node_id),
label=f"Action: {action}\nValue: {node.value():.2f}\nVisit count: {node.visit_count}\nPrior: {node.prior:.2f}\nReward: {node.reward:.2f}",
color="orange" if best else "black",
)
id += 1
if parent_id is not None:
graph.edge(str(parent_id), str(node_id), constraint="false")
if len(node.children) != 0:
best_visit_count = max(
[child.visit_count for child in node.children.values()]
)
else:
best_visit_count = False
for action, child in node.children.items():
if child.visit_count != 0:
traverse(
child,
action,
node_id,
True
if best_visit_count and child.visit_count == best_visit_count
else False,
)
traverse(root, None, None, True)
graph.node(str(0), color="red")
# print(graph.source)
graph.render("mcts", view=plot, cleanup=True, format="pdf")
return graph
class Trajectoryinfo:
"""
Store the information about a trajectory (rewards, search information for every step, ...).
"""
def __init__(self, title, config):
self.title = title + ": "
self.config = config
self.action_history = []
self.reward_history = []
self.prior_policies = []
self.policies_after_planning = []
# Not implemented, need to store them in every nodes of the mcts
self.prior_values = []
self.values_after_planning = [[numpy.NaN] * len(self.config.action_space)]
self.prior_root_value = []
self.root_value_after_planning = []
self.prior_rewards = [[numpy.NaN] * len(self.config.action_space)]
self.mcts_depth = []
def store_info(self, root, mcts_info, action, reward, new_prior_root_value=None):
if action is not None:
self.action_history.append(action)
if reward is not None:
self.reward_history.append(reward)
self.prior_policies.append(
[
root.children[action].prior
if action in root.children.keys()
else numpy.NaN
for action in self.config.action_space
]
)
self.policies_after_planning.append(
[
root.children[action].visit_count / self.config.num_simulations
if action in root.children.keys()
else numpy.NaN
for action in self.config.action_space
]
)
self.values_after_planning.append(
[
root.children[action].value()
if action in root.children.keys()
else numpy.NaN
for action in self.config.action_space
]
)
self.prior_root_value.append(
mcts_info["root_predicted_value"]
if not new_prior_root_value
else new_prior_root_value
)
self.root_value_after_planning.append(root.value())
self.prior_rewards.append(
[
root.children[action].reward
if action in root.children.keys()
else numpy.NaN
for action in self.config.action_space
]
)
self.mcts_depth.append(mcts_info["max_tree_depth"])
def plot_trajectory(self):
name = "Prior policies"
print(name, self.prior_policies, "\n")
plt.figure(self.title + name)
ax = seaborn.heatmap(
self.prior_policies,
mask=numpy.isnan(self.prior_policies),
annot=True,
)
ax.set(xlabel="Action", ylabel="Timestep")
ax.set_title(name)
name = "Policies after planning"
print(name, self.policies_after_planning, "\n")
plt.figure(self.title + name)
ax = seaborn.heatmap(
self.policies_after_planning,
mask=numpy.isnan(self.policies_after_planning),
annot=True,
)
ax.set(xlabel="Action", ylabel="Timestep")
ax.set_title(name)
if 0 < len(self.action_history):
name = "Action history"
print(name, self.action_history, "\n")
plt.figure(self.title + name)
# ax = seaborn.lineplot(x=list(range(len(self.action_history))), y=self.action_history)
ax = seaborn.heatmap(
numpy.transpose([self.action_history]),
mask=numpy.isnan(numpy.transpose([self.action_history])),
xticklabels=False,
annot=True,
)
ax.set(ylabel="Timestep")
ax.set_title(name)
name = "Values after planning"
print(name, self.values_after_planning, "\n")
plt.figure(self.title + name)
ax = seaborn.heatmap(
self.values_after_planning,
mask=numpy.isnan(self.values_after_planning),
annot=True,
)
ax.set(xlabel="Action", ylabel="Timestep")
ax.set_title(name)
name = "Prior root value"
print(name, self.prior_root_value, "\n")
plt.figure(self.title + name)
# ax = seaborn.lineplot(x=list(range(len(self.prior_root_value))), y=self.prior_root_value)
ax = seaborn.heatmap(
numpy.transpose([self.prior_root_value]),
mask=numpy.isnan(numpy.transpose([self.prior_root_value])),
xticklabels=False,
annot=True,
)
ax.set(ylabel="Timestep")
ax.set_title(name)
name = "Root value after planning"
print(name, self.root_value_after_planning, "\n")
plt.figure(self.title + name)
# ax = seaborn.lineplot(x=list(range(len(self.root_value_after_planning))), y=self.root_value_after_planning)
ax = seaborn.heatmap(
numpy.transpose([self.root_value_after_planning]),
mask=numpy.isnan(numpy.transpose([self.root_value_after_planning])),
xticklabels=False,
annot=True,
)
ax.set(ylabel="Timestep")
ax.set_title(name)
name = "Prior rewards"
print(name, self.prior_rewards, "\n")
plt.figure(self.title + name)
ax = seaborn.heatmap(
self.prior_rewards, mask=numpy.isnan(self.prior_rewards), annot=True
)
ax.set(xlabel="Action", ylabel="Timestep")
ax.set_title(name)
if 0 < len(self.reward_history):
name = "Reward history"
print(name, self.reward_history, "\n")
plt.figure(self.title + name)
# ax = seaborn.lineplot(x=list(range(len(self.reward_history))), y=self.reward_history)
ax = seaborn.heatmap(
numpy.transpose([self.reward_history]),
mask=numpy.isnan(numpy.transpose([self.reward_history])),
xticklabels=False,
annot=True,
)
ax.set(ylabel="Timestep")
ax.set_title(name)
name = "MCTS depth"
print(name, self.mcts_depth, "\n")
plt.figure(self.title + name)
# ax = seaborn.lineplot(x=list(range(len(self.mcts_depth))), y=self.mcts_depth)
ax = seaborn.heatmap(
numpy.transpose([self.mcts_depth]),
mask=numpy.isnan(numpy.transpose([self.mcts_depth])),
xticklabels=False,
annot=True,
)
ax.set(ylabel="Timestep")
ax.set_title(name)
plt.show(block=False)
| [
"muzero.self_play.SelfPlay.select_action",
"muzero.self_play.MCTS",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.isnan",
"muzero.models.support_to_scalar",
"graphviz.Digraph",
"tensorflow.identity",
"muzero.models.MuZeroNetwork",
"numpy.transpose",
"muzero.self_play.Node",
"ma... | [((510, 543), 'muzero.models.MuZeroNetwork', 'models.MuZeroNetwork', (['self.config'], {}), '(self.config)\n', (530, 543), True, 'import muzero.models as models\n'), ((5128, 5144), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5137, 5144), True, 'import matplotlib.pyplot as plt\n'), ((5477, 5516), 'graphviz.Digraph', 'Digraph', ([], {'comment': '"""MCTS"""', 'engine': '"""neato"""'}), "(comment='MCTS', engine='neato')\n", (5484, 5516), False, 'from graphviz import Digraph\n'), ((9428, 9457), 'matplotlib.pyplot.figure', 'plt.figure', (['(self.title + name)'], {}), '(self.title + name)\n', (9438, 9457), True, 'import matplotlib.pyplot as plt\n'), ((9790, 9819), 'matplotlib.pyplot.figure', 'plt.figure', (['(self.title + name)'], {}), '(self.title + name)\n', (9800, 9819), True, 'import matplotlib.pyplot as plt\n'), ((10747, 10776), 'matplotlib.pyplot.figure', 'plt.figure', (['(self.title + name)'], {}), '(self.title + name)\n', (10757, 10776), True, 'import matplotlib.pyplot as plt\n'), ((11109, 11138), 'matplotlib.pyplot.figure', 'plt.figure', (['(self.title + name)'], {}), '(self.title + name)\n', (11119, 11138), True, 'import matplotlib.pyplot as plt\n'), ((11631, 11660), 'matplotlib.pyplot.figure', 'plt.figure', (['(self.title + name)'], {}), '(self.title + name)\n', (11641, 11660), True, 'import matplotlib.pyplot as plt\n'), ((12165, 12194), 'matplotlib.pyplot.figure', 'plt.figure', (['(self.title + name)'], {}), '(self.title + name)\n', (12175, 12194), True, 'import matplotlib.pyplot as plt\n'), ((13055, 13084), 'matplotlib.pyplot.figure', 'plt.figure', (['(self.title + name)'], {}), '(self.title + name)\n', (13065, 13084), True, 'import matplotlib.pyplot as plt\n'), ((13452, 13473), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (13460, 13473), True, 'import matplotlib.pyplot as plt\n'), ((1247, 1278), 'muzero.self_play.SelfPlay.select_action', 'SelfPlay.select_action', (['root', '(0)'], {}), '(root, 0)\n', (1269, 1278), False, 'from muzero.self_play import MCTS, Node, SelfPlay\n'), ((1936, 1943), 'muzero.self_play.Node', 'Node', (['(0)'], {}), '(0)\n', (1940, 1943), False, 'from muzero.self_play import MCTS, Node, SelfPlay\n'), ((10205, 10234), 'matplotlib.pyplot.figure', 'plt.figure', (['(self.title + name)'], {}), '(self.title + name)\n', (10215, 10234), True, 'import matplotlib.pyplot as plt\n'), ((11281, 11321), 'numpy.transpose', 'numpy.transpose', (['[self.prior_root_value]'], {}), '([self.prior_root_value])\n', (11296, 11321), False, 'import numpy\n'), ((11821, 11870), 'numpy.transpose', 'numpy.transpose', (['[self.root_value_after_planning]'], {}), '([self.root_value_after_planning])\n', (11836, 11870), False, 'import numpy\n'), ((12535, 12564), 'matplotlib.pyplot.figure', 'plt.figure', (['(self.title + name)'], {}), '(self.title + name)\n', (12545, 12564), True, 'import matplotlib.pyplot as plt\n'), ((13215, 13249), 'numpy.transpose', 'numpy.transpose', (['[self.mcts_depth]'], {}), '([self.mcts_depth])\n', (13230, 13249), False, 'import numpy\n'), ((979, 996), 'muzero.self_play.MCTS', 'MCTS', (['self.config'], {}), '(self.config)\n', (983, 996), False, 'from muzero.self_play import MCTS, Node, SelfPlay\n'), ((1706, 1729), 'tensorflow.identity', 'tf.identity', (['[[action]]'], {}), '([[action]])\n', (1717, 1729), True, 'import tensorflow as tf\n'), ((3449, 3466), 'muzero.self_play.MCTS', 'MCTS', (['self.config'], {}), '(self.config)\n', (3453, 3466), False, 'from muzero.self_play import MCTS, Node, SelfPlay\n'), ((4036, 4067), 'muzero.self_play.SelfPlay.select_action', 'SelfPlay.select_action', (['root', '(0)'], {}), '(root, 0)\n', (4058, 4067), False, 'from muzero.self_play import MCTS, Node, SelfPlay\n'), ((9538, 9570), 'numpy.isnan', 'numpy.isnan', (['self.prior_policies'], {}), '(self.prior_policies)\n', (9549, 9570), False, 'import numpy\n'), ((9909, 9950), 'numpy.isnan', 'numpy.isnan', (['self.policies_after_planning'], {}), '(self.policies_after_planning)\n', (9920, 9950), False, 'import numpy\n'), ((10385, 10423), 'numpy.transpose', 'numpy.transpose', (['[self.action_history]'], {}), '([self.action_history])\n', (10400, 10423), False, 'import numpy\n'), ((10864, 10903), 'numpy.isnan', 'numpy.isnan', (['self.values_after_planning'], {}), '(self.values_after_planning)\n', (10875, 10903), False, 'import numpy\n'), ((12262, 12293), 'numpy.isnan', 'numpy.isnan', (['self.prior_rewards'], {}), '(self.prior_rewards)\n', (12273, 12293), False, 'import numpy\n'), ((12715, 12753), 'numpy.transpose', 'numpy.transpose', (['[self.reward_history]'], {}), '([self.reward_history])\n', (12730, 12753), False, 'import numpy\n'), ((1765, 1822), 'muzero.models.support_to_scalar', 'models.support_to_scalar', (['value', 'self.config.support_size'], {}), '(value, self.config.support_size)\n', (1789, 1822), True, 'import muzero.models as models\n'), ((1851, 1909), 'muzero.models.support_to_scalar', 'models.support_to_scalar', (['reward', 'self.config.support_size'], {}), '(reward, self.config.support_size)\n', (1875, 1909), True, 'import muzero.models as models\n'), ((2174, 2191), 'muzero.self_play.MCTS', 'MCTS', (['self.config'], {}), '(self.config)\n', (2178, 2191), False, 'from muzero.self_play import MCTS, Node, SelfPlay\n'), ((4403, 4420), 'muzero.self_play.MCTS', 'MCTS', (['self.config'], {}), '(self.config)\n', (4407, 4420), False, 'from muzero.self_play import MCTS, Node, SelfPlay\n'), ((11352, 11392), 'numpy.transpose', 'numpy.transpose', (['[self.prior_root_value]'], {}), '([self.prior_root_value])\n', (11367, 11392), False, 'import numpy\n'), ((11901, 11950), 'numpy.transpose', 'numpy.transpose', (['[self.root_value_after_planning]'], {}), '([self.root_value_after_planning])\n', (11916, 11950), False, 'import numpy\n'), ((13280, 13314), 'numpy.transpose', 'numpy.transpose', (['[self.mcts_depth]'], {}), '([self.mcts_depth])\n', (13295, 13314), False, 'import numpy\n'), ((10458, 10496), 'numpy.transpose', 'numpy.transpose', (['[self.action_history]'], {}), '([self.action_history])\n', (10473, 10496), False, 'import numpy\n'), ((12788, 12826), 'numpy.transpose', 'numpy.transpose', (['[self.reward_history]'], {}), '([self.reward_history])\n', (12803, 12826), False, 'import numpy\n')] |
#
# Call Option Pricing with Circular Convolution (General)
# 06_fou/call_convolution_general.py
#
# (c) Dr. <NAME>
# Derivatives Analytics with Python
#
import numpy as np
from convolution import revnp, convolution
from parameters import *
# Parmeter Adjustments
M = 3 # number of time steps
dt, df, u, d, q = get_binomial_parameters(M)
# Array Generation for Stock Prices
mu = np.arange(M + 1)
mu = np.resize(mu, (M + 1, M + 1))
md = np.transpose(mu)
mu = u ** (mu - md)
md = d ** md
S = S0 * mu * md
# Valuation
V = np.maximum(S - K, 0)
qv = np.zeros((M + 1), dtype=np.float)
qv[0] = q
qv[1] = 1 - q
for t in range(M - 1, -1, -1):
V[:, t] = convolution(V[:, t + 1], revnp(qv)) * df
print("Value of the Call Option %8.3f" % V[0, 0])
| [
"convolution.revnp",
"numpy.resize",
"numpy.zeros",
"numpy.maximum",
"numpy.transpose",
"numpy.arange"
] | [((382, 398), 'numpy.arange', 'np.arange', (['(M + 1)'], {}), '(M + 1)\n', (391, 398), True, 'import numpy as np\n'), ((404, 433), 'numpy.resize', 'np.resize', (['mu', '(M + 1, M + 1)'], {}), '(mu, (M + 1, M + 1))\n', (413, 433), True, 'import numpy as np\n'), ((439, 455), 'numpy.transpose', 'np.transpose', (['mu'], {}), '(mu)\n', (451, 455), True, 'import numpy as np\n'), ((523, 543), 'numpy.maximum', 'np.maximum', (['(S - K)', '(0)'], {}), '(S - K, 0)\n', (533, 543), True, 'import numpy as np\n'), ((549, 580), 'numpy.zeros', 'np.zeros', (['(M + 1)'], {'dtype': 'np.float'}), '(M + 1, dtype=np.float)\n', (557, 580), True, 'import numpy as np\n'), ((677, 686), 'convolution.revnp', 'revnp', (['qv'], {}), '(qv)\n', (682, 686), False, 'from convolution import revnp, convolution\n')] |
import os
from gym import spaces
import numpy as np
import pybullet as p
from .env import AssistiveEnv
class BedBathingEnv(AssistiveEnv):
def __init__(self, robot_type='pr2', human_control=False):
super(BedBathingEnv, self).__init__(robot_type=robot_type, task='bed_bathing', human_control=human_control, frame_skip=5, time_step=0.02, action_robot_len=7, action_human_len=(10 if human_control else 0), obs_robot_len=24, obs_human_len=(28 if human_control else 0))
def step(self, action):
self.take_step(action, robot_arm='left', gains=self.config('robot_gains'), forces=self.config('robot_forces'), human_gains=0.05)
total_force, tool_force, tool_force_on_human, total_force_on_human, new_contact_points = self.get_total_force()
end_effector_velocity = np.linalg.norm(p.getLinkState(self.tool, 1, computeForwardKinematics=True, computeLinkVelocity=True, physicsClientId=self.id)[6])
obs = self._get_obs([tool_force], [total_force_on_human, tool_force_on_human])
# Get human preferences
preferences_score = self.human_preferences(end_effector_velocity=end_effector_velocity, total_force_on_human=total_force_on_human, tool_force_at_target=tool_force_on_human)
reward_distance = -min([c[8] for c in p.getClosestPoints(self.tool, self.human, distance=4.0, physicsClientId=self.id)])
reward_action = -np.sum(np.square(action)) # Penalize actions
reward_new_contact_points = new_contact_points # Reward new contact points on a person
reward = self.config('distance_weight')*reward_distance + self.config('action_weight')*reward_action + self.config('wiping_reward_weight')*reward_new_contact_points + preferences_score
if self.gui and tool_force_on_human > 0:
print('Task success:', self.task_success, 'Force at tool on human:', tool_force_on_human, reward_new_contact_points)
info = {'total_force_on_human': total_force_on_human, 'task_success': int(self.task_success >= (self.total_target_count*self.config('task_success_threshold'))), 'action_robot_len': self.action_robot_len, 'action_human_len': self.action_human_len, 'obs_robot_len': self.obs_robot_len, 'obs_human_len': self.obs_human_len}
done = False
return obs, reward, done, info
def get_total_force(self):
total_force = 0
tool_force = 0
tool_force_on_human = 0
total_force_on_human = 0
new_contact_points = 0
for c in p.getContactPoints(bodyA=self.tool, physicsClientId=self.id):
total_force += c[9]
tool_force += c[9]
for c in p.getContactPoints(bodyA=self.robot, physicsClientId=self.id):
bodyB = c[2]
if bodyB != self.tool:
total_force += c[9]
for c in p.getContactPoints(bodyA=self.robot, bodyB=self.human, physicsClientId=self.id):
total_force_on_human += c[9]
for c in p.getContactPoints(bodyA=self.tool, bodyB=self.human, physicsClientId=self.id):
linkA = c[3]
linkB = c[4]
contact_position = np.array(c[6])
total_force_on_human += c[9]
if linkA in [1]:
tool_force_on_human += c[9]
# Contact with human upperarm, forearm, hand
if linkB < 0 or linkB > p.getNumJoints(self.human, physicsClientId=self.id):
continue
indices_to_delete = []
for i, (target_pos_world, target) in enumerate(zip(self.targets_pos_upperarm_world, self.targets_upperarm)):
if np.linalg.norm(contact_position - target_pos_world) < 0.025:
# The robot made contact with a point on the person's arm
new_contact_points += 1
self.task_success += 1
p.resetBasePositionAndOrientation(target, [1000, 1000, 1000], [0, 0, 0, 1], physicsClientId=self.id)
indices_to_delete.append(i)
self.targets_pos_on_upperarm = [t for i, t in enumerate(self.targets_pos_on_upperarm) if i not in indices_to_delete]
self.targets_upperarm = [t for i, t in enumerate(self.targets_upperarm) if i not in indices_to_delete]
self.targets_pos_upperarm_world = [t for i, t in enumerate(self.targets_pos_upperarm_world) if i not in indices_to_delete]
indices_to_delete = []
for i, (target_pos_world, target) in enumerate(zip(self.targets_pos_forearm_world, self.targets_forearm)):
if np.linalg.norm(contact_position - target_pos_world) < 0.025:
# The robot made contact with a point on the person's arm
new_contact_points += 1
self.task_success += 1
p.resetBasePositionAndOrientation(target, [1000, 1000, 1000], [0, 0, 0, 1], physicsClientId=self.id)
indices_to_delete.append(i)
self.targets_pos_on_forearm = [t for i, t in enumerate(self.targets_pos_on_forearm) if i not in indices_to_delete]
self.targets_forearm = [t for i, t in enumerate(self.targets_forearm) if i not in indices_to_delete]
self.targets_pos_forearm_world = [t for i, t in enumerate(self.targets_pos_forearm_world) if i not in indices_to_delete]
return total_force, tool_force, tool_force_on_human, total_force_on_human, new_contact_points
def _get_obs(self, forces, forces_human):
torso_pos = np.array(p.getLinkState(self.robot, 15 if self.robot_type == 'pr2' else 0, computeForwardKinematics=True, physicsClientId=self.id)[0])
state = p.getLinkState(self.tool, 1, computeForwardKinematics=True, physicsClientId=self.id)
tool_pos = np.array(state[0])
tool_orient = np.array(state[1]) # Quaternions
robot_joint_states = p.getJointStates(self.robot, jointIndices=self.robot_left_arm_joint_indices, physicsClientId=self.id)
robot_joint_positions = np.array([x[0] for x in robot_joint_states])
robot_pos, robot_orient = p.getBasePositionAndOrientation(self.robot, physicsClientId=self.id)
if self.human_control:
human_pos = np.array(p.getBasePositionAndOrientation(self.human, physicsClientId=self.id)[0])
human_joint_states = p.getJointStates(self.human, jointIndices=self.human_controllable_joint_indices, physicsClientId=self.id)
human_joint_positions = np.array([x[0] for x in human_joint_states])
# Human shoulder, elbow, and wrist joint locations
shoulder_pos, shoulder_orient = p.getLinkState(self.human, 5, computeForwardKinematics=True, physicsClientId=self.id)[:2]
elbow_pos, elbow_orient = p.getLinkState(self.human, 7, computeForwardKinematics=True, physicsClientId=self.id)[:2]
wrist_pos, wrist_orient = p.getLinkState(self.human, 9, computeForwardKinematics=True, physicsClientId=self.id)[:2]
robot_obs = np.concatenate([tool_pos-torso_pos, tool_orient, robot_joint_positions, shoulder_pos-torso_pos, elbow_pos-torso_pos, wrist_pos-torso_pos, forces]).ravel()
if self.human_control:
human_obs = np.concatenate([tool_pos-human_pos, tool_orient, human_joint_positions, shoulder_pos-human_pos, elbow_pos-human_pos, wrist_pos-human_pos, forces_human]).ravel()
else:
human_obs = []
return np.concatenate([robot_obs, human_obs]).ravel()
def reset(self):
self.setup_timing()
self.task_success = 0
self.contact_points_on_arm = {}
self.human, self.bed, self.robot, self.robot_lower_limits, self.robot_upper_limits, self.human_lower_limits, self.human_upper_limits, self.robot_right_arm_joint_indices, self.robot_left_arm_joint_indices, self.gender = self.world_creation.create_new_world(furniture_type='bed', static_human_base=False, human_impairment='random', print_joints=False, gender='random')
self.robot_lower_limits = self.robot_lower_limits[self.robot_left_arm_joint_indices]
self.robot_upper_limits = self.robot_upper_limits[self.robot_left_arm_joint_indices]
self.reset_robot_joints()
friction = 5
p.changeDynamics(self.bed, -1, lateralFriction=friction, spinningFriction=friction, rollingFriction=friction, physicsClientId=self.id)
# Setup human in the air and let them settle into a resting pose on the bed
joints_positions = [(3, np.deg2rad(30))]
controllable_joints = []
self.world_creation.setup_human_joints(self.human, joints_positions, controllable_joints, use_static_joints=False, human_reactive_force=None)
p.resetBasePositionAndOrientation(self.human, [-0.15, 0.2, 0.95], p.getQuaternionFromEuler([-np.pi/2.0, 0, 0], physicsClientId=self.id), physicsClientId=self.id)
p.setGravity(0, 0, -1, physicsClientId=self.id)
# Add small variation in human joint positions
for j in range(p.getNumJoints(self.human, physicsClientId=self.id)):
if p.getJointInfo(self.human, j, physicsClientId=self.id)[2] != p.JOINT_FIXED:
p.resetJointState(self.human, jointIndex=j, targetValue=self.np_random.uniform(-0.1, 0.1), targetVelocity=0, physicsClientId=self.id)
# Let the person settle on the bed
for _ in range(100):
p.stepSimulation(physicsClientId=self.id)
# Lock human joints and set velocities to 0
joints_positions = []
self.human_controllable_joint_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] if self.human_control else []
self.world_creation.setup_human_joints(self.human, joints_positions, self.human_controllable_joint_indices, use_static_joints=True, human_reactive_force=None, human_reactive_gain=0.01)
self.target_human_joint_positions = []
if self.human_control:
human_joint_states = p.getJointStates(self.human, jointIndices=self.human_controllable_joint_indices, physicsClientId=self.id)
self.target_human_joint_positions = np.array([x[0] for x in human_joint_states])
self.human_lower_limits = self.human_lower_limits[self.human_controllable_joint_indices]
self.human_upper_limits = self.human_upper_limits[self.human_controllable_joint_indices]
p.changeDynamics(self.human, -1, mass=0, physicsClientId=self.id)
p.resetBaseVelocity(self.human, linearVelocity=[0, 0, 0], angularVelocity=[0, 0, 0], physicsClientId=self.id)
p.setGravity(0, 0, 0, physicsClientId=self.id)
p.setGravity(0, 0, -1, body=self.human, physicsClientId=self.id)
# Find the base position and joint positions for a static person in bed
# print(p.getBasePositionAndOrientation(self.human, physicsClientId=self.id))
# joint_states = p.getJointStates(self.human, jointIndices=list(range(p.getNumJoints(self.human, physicsClientId=self.id))), physicsClientId=self.id)
# joint_positions = np.array([x[0] for x in joint_states])
# joint_string = '['
# for i, jp in enumerate(joint_positions):
# joint_string += '(%d, %.4f), ' % (i, jp)
# print(joint_string + ']')
# exit()
shoulder_pos, shoulder_orient = p.getLinkState(self.human, 5, computeForwardKinematics=True, physicsClientId=self.id)[:2]
elbow_pos, elbow_orient = p.getLinkState(self.human, 7, computeForwardKinematics=True, physicsClientId=self.id)[:2]
wrist_pos, wrist_orient = p.getLinkState(self.human, 9, computeForwardKinematics=True, physicsClientId=self.id)[:2]
target_pos = np.array([-0.6, 0.2, 1]) + self.np_random.uniform(-0.05, 0.05, size=3)
if self.robot_type == 'pr2':
target_orient = np.array(p.getQuaternionFromEuler(np.array([0, 0, 0]), physicsClientId=self.id))
self.position_robot_toc(self.robot, 76, [(target_pos, target_orient)], [(shoulder_pos, None), (elbow_pos, None), (wrist_pos, None)], self.robot_left_arm_joint_indices, self.robot_lower_limits, self.robot_upper_limits, ik_indices=range(29, 29+7), pos_offset=np.array([-0.1, 0, 0]), max_ik_iterations=200, step_sim=True, check_env_collisions=False, human_joint_indices=self.human_controllable_joint_indices, human_joint_positions=self.target_human_joint_positions)
self.world_creation.set_gripper_open_position(self.robot, position=0.2, left=True, set_instantly=True)
self.tool = self.world_creation.init_tool(self.robot, mesh_scale=[1]*3, pos_offset=[0, 0, 0], orient_offset=p.getQuaternionFromEuler([0, 0, 0], physicsClientId=self.id), maximal=False)
elif self.robot_type == 'jaco':
target_orient = p.getQuaternionFromEuler(np.array([0, np.pi/2.0, 0]), physicsClientId=self.id)
base_position, base_orientation, _ = self.position_robot_toc(self.robot, 8, [(target_pos, target_orient)], [(shoulder_pos, None), (elbow_pos, None), (wrist_pos, None)], self.robot_left_arm_joint_indices, self.robot_lower_limits, self.robot_upper_limits, ik_indices=[0, 1, 2, 3, 4, 5, 6], pos_offset=np.array([-0.05, 1.05, 0.6]), max_ik_iterations=200, step_sim=True, random_position=0.1, check_env_collisions=False, human_joint_indices=self.human_controllable_joint_indices, human_joint_positions=self.target_human_joint_positions)
self.world_creation.set_gripper_open_position(self.robot, position=1.1, left=True, set_instantly=True)
self.tool = self.world_creation.init_tool(self.robot, mesh_scale=[1]*3, pos_offset=[-0.01, 0, 0.03], orient_offset=p.getQuaternionFromEuler([0, -np.pi/2.0, 0], physicsClientId=self.id), maximal=False)
# Load a nightstand in the environment for the jaco arm
self.nightstand_scale = 0.275
visual_filename = os.path.join(self.world_creation.directory, 'nightstand', 'nightstand.obj')
collision_filename = os.path.join(self.world_creation.directory, 'nightstand', 'nightstand.obj')
nightstand_visual = p.createVisualShape(shapeType=p.GEOM_MESH, fileName=visual_filename, meshScale=[self.nightstand_scale]*3, rgbaColor=[0.5, 0.5, 0.5, 1.0], physicsClientId=self.id)
nightstand_collision = p.createCollisionShape(shapeType=p.GEOM_MESH, fileName=collision_filename, meshScale=[self.nightstand_scale]*3, physicsClientId=self.id)
nightstand_pos = np.array([-0.9, 0.7, 0]) + base_position
nightstand_orient = p.getQuaternionFromEuler(np.array([np.pi/2.0, 0, 0]), physicsClientId=self.id)
self.nightstand = p.createMultiBody(baseMass=0, baseCollisionShapeIndex=nightstand_collision, baseVisualShapeIndex=nightstand_visual, basePosition=nightstand_pos, baseOrientation=nightstand_orient, baseInertialFramePosition=[0, 0, 0], useMaximalCoordinates=False, physicsClientId=self.id)
else:
target_orient = p.getQuaternionFromEuler(np.array([0, np.pi/2.0, 0]), physicsClientId=self.id)
if self.robot_type == 'baxter':
self.position_robot_toc(self.robot, 48, [(target_pos, target_orient)], [(shoulder_pos, None), (elbow_pos, None), (wrist_pos, None)], self.robot_left_arm_joint_indices, self.robot_lower_limits, self.robot_upper_limits, ik_indices=range(10, 17), pos_offset=np.array([-0.2, 0, 0.975]), max_ik_iterations=200, step_sim=True, check_env_collisions=False, human_joint_indices=self.human_controllable_joint_indices, human_joint_positions=self.target_human_joint_positions)
else:
self.position_robot_toc(self.robot, 19, [(target_pos, target_orient)], [(shoulder_pos, None), (elbow_pos, None), (wrist_pos, None)], self.robot_left_arm_joint_indices, self.robot_lower_limits, self.robot_upper_limits, ik_indices=[0, 2, 3, 4, 5, 6, 7], pos_offset=np.array([-0.2, 0, 0.975]), max_ik_iterations=200, step_sim=True, check_env_collisions=False, human_joint_indices=self.human_controllable_joint_indices, human_joint_positions=self.target_human_joint_positions)
self.world_creation.set_gripper_open_position(self.robot, position=0.0125, left=True, set_instantly=True)
self.tool = self.world_creation.init_tool(self.robot, mesh_scale=[0.001]*3, pos_offset=[0, 0.1175, 0], orient_offset=p.getQuaternionFromEuler([np.pi/2.0, 0, np.pi/2.0], physicsClientId=self.id), maximal=False)
self.generate_targets()
# Enable rendering
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1, physicsClientId=self.id)
return self._get_obs([0], [0, 0])
def generate_targets(self):
self.target_indices_to_ignore = []
if self.gender == 'male':
self.upperarm, self.upperarm_length, self.upperarm_radius = 5, 0.279, 0.043
self.forearm, self.forearm_length, self.forearm_radius = 7, 0.257, 0.033
else:
self.upperarm, self.upperarm_length, self.upperarm_radius = 5, 0.264, 0.0355
self.forearm, self.forearm_length, self.forearm_radius = 7, 0.234, 0.027
self.targets_pos_on_upperarm = self.util.capsule_points(p1=np.array([0, 0, 0]), p2=np.array([0, 0, -self.upperarm_length]), radius=self.upperarm_radius, distance_between_points=0.03)
self.targets_pos_on_forearm = self.util.capsule_points(p1=np.array([0, 0, 0]), p2=np.array([0, 0, -self.forearm_length]), radius=self.forearm_radius, distance_between_points=0.03)
sphere_collision = -1
sphere_visual = p.createVisualShape(shapeType=p.GEOM_SPHERE, radius=0.01, rgbaColor=[0, 1, 1, 1], physicsClientId=self.id)
self.targets_upperarm = []
self.targets_forearm = []
for _ in range(len(self.targets_pos_on_upperarm)):
self.targets_upperarm.append(p.createMultiBody(baseMass=0.0, baseCollisionShapeIndex=sphere_collision, baseVisualShapeIndex=sphere_visual, basePosition=[0, 0, 0], useMaximalCoordinates=False, physicsClientId=self.id))
for _ in range(len(self.targets_pos_on_forearm)):
self.targets_forearm.append(p.createMultiBody(baseMass=0.0, baseCollisionShapeIndex=sphere_collision, baseVisualShapeIndex=sphere_visual, basePosition=[0, 0, 0], useMaximalCoordinates=False, physicsClientId=self.id))
self.total_target_count = len(self.targets_upperarm) + len(self.targets_forearm)
self.update_targets()
def update_targets(self):
upperarm_pos, upperarm_orient = p.getLinkState(self.human, self.upperarm, computeForwardKinematics=True, physicsClientId=self.id)[:2]
self.targets_pos_upperarm_world = []
for target_pos_on_arm, target in zip(self.targets_pos_on_upperarm, self.targets_upperarm):
target_pos = np.array(p.multiplyTransforms(upperarm_pos, upperarm_orient, target_pos_on_arm, [0, 0, 0, 1], physicsClientId=self.id)[0])
self.targets_pos_upperarm_world.append(target_pos)
p.resetBasePositionAndOrientation(target, target_pos, [0, 0, 0, 1], physicsClientId=self.id)
forearm_pos, forearm_orient = p.getLinkState(self.human, self.forearm, computeForwardKinematics=True, physicsClientId=self.id)[:2]
self.targets_pos_forearm_world = []
for target_pos_on_arm, target in zip(self.targets_pos_on_forearm, self.targets_forearm):
target_pos = np.array(p.multiplyTransforms(forearm_pos, forearm_orient, target_pos_on_arm, [0, 0, 0, 1], physicsClientId=self.id)[0])
self.targets_pos_forearm_world.append(target_pos)
p.resetBasePositionAndOrientation(target, target_pos, [0, 0, 0, 1], physicsClientId=self.id)
| [
"pybullet.setGravity",
"numpy.array",
"pybullet.resetBaseVelocity",
"numpy.linalg.norm",
"pybullet.createCollisionShape",
"pybullet.getNumJoints",
"pybullet.getQuaternionFromEuler",
"pybullet.createVisualShape",
"numpy.concatenate",
"pybullet.resetBasePositionAndOrientation",
"pybullet.getJointS... | [((2479, 2539), 'pybullet.getContactPoints', 'p.getContactPoints', ([], {'bodyA': 'self.tool', 'physicsClientId': 'self.id'}), '(bodyA=self.tool, physicsClientId=self.id)\n', (2497, 2539), True, 'import pybullet as p\n'), ((2621, 2682), 'pybullet.getContactPoints', 'p.getContactPoints', ([], {'bodyA': 'self.robot', 'physicsClientId': 'self.id'}), '(bodyA=self.robot, physicsClientId=self.id)\n', (2639, 2682), True, 'import pybullet as p\n'), ((2797, 2876), 'pybullet.getContactPoints', 'p.getContactPoints', ([], {'bodyA': 'self.robot', 'bodyB': 'self.human', 'physicsClientId': 'self.id'}), '(bodyA=self.robot, bodyB=self.human, physicsClientId=self.id)\n', (2815, 2876), True, 'import pybullet as p\n'), ((2936, 3014), 'pybullet.getContactPoints', 'p.getContactPoints', ([], {'bodyA': 'self.tool', 'bodyB': 'self.human', 'physicsClientId': 'self.id'}), '(bodyA=self.tool, bodyB=self.human, physicsClientId=self.id)\n', (2954, 3014), True, 'import pybullet as p\n'), ((5710, 5799), 'pybullet.getLinkState', 'p.getLinkState', (['self.tool', '(1)'], {'computeForwardKinematics': '(True)', 'physicsClientId': 'self.id'}), '(self.tool, 1, computeForwardKinematics=True, physicsClientId\n =self.id)\n', (5724, 5799), True, 'import pybullet as p\n'), ((5814, 5832), 'numpy.array', 'np.array', (['state[0]'], {}), '(state[0])\n', (5822, 5832), True, 'import numpy as np\n'), ((5855, 5873), 'numpy.array', 'np.array', (['state[1]'], {}), '(state[1])\n', (5863, 5873), True, 'import numpy as np\n'), ((5917, 6022), 'pybullet.getJointStates', 'p.getJointStates', (['self.robot'], {'jointIndices': 'self.robot_left_arm_joint_indices', 'physicsClientId': 'self.id'}), '(self.robot, jointIndices=self.robot_left_arm_joint_indices,\n physicsClientId=self.id)\n', (5933, 6022), True, 'import pybullet as p\n'), ((6051, 6095), 'numpy.array', 'np.array', (['[x[0] for x in robot_joint_states]'], {}), '([x[0] for x in robot_joint_states])\n', (6059, 6095), True, 'import numpy as np\n'), ((6130, 6198), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['self.robot'], {'physicsClientId': 'self.id'}), '(self.robot, physicsClientId=self.id)\n', (6161, 6198), True, 'import pybullet as p\n'), ((8235, 8374), 'pybullet.changeDynamics', 'p.changeDynamics', (['self.bed', '(-1)'], {'lateralFriction': 'friction', 'spinningFriction': 'friction', 'rollingFriction': 'friction', 'physicsClientId': 'self.id'}), '(self.bed, -1, lateralFriction=friction, spinningFriction=\n friction, rollingFriction=friction, physicsClientId=self.id)\n', (8251, 8374), True, 'import pybullet as p\n'), ((8866, 8913), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-1)'], {'physicsClientId': 'self.id'}), '(0, 0, -1, physicsClientId=self.id)\n', (8878, 8913), True, 'import pybullet as p\n'), ((10312, 10377), 'pybullet.changeDynamics', 'p.changeDynamics', (['self.human', '(-1)'], {'mass': '(0)', 'physicsClientId': 'self.id'}), '(self.human, -1, mass=0, physicsClientId=self.id)\n', (10328, 10377), True, 'import pybullet as p\n'), ((10386, 10500), 'pybullet.resetBaseVelocity', 'p.resetBaseVelocity', (['self.human'], {'linearVelocity': '[0, 0, 0]', 'angularVelocity': '[0, 0, 0]', 'physicsClientId': 'self.id'}), '(self.human, linearVelocity=[0, 0, 0], angularVelocity=[\n 0, 0, 0], physicsClientId=self.id)\n', (10405, 10500), True, 'import pybullet as p\n'), ((10505, 10551), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(0)'], {'physicsClientId': 'self.id'}), '(0, 0, 0, physicsClientId=self.id)\n', (10517, 10551), True, 'import pybullet as p\n'), ((10560, 10624), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-1)'], {'body': 'self.human', 'physicsClientId': 'self.id'}), '(0, 0, -1, body=self.human, physicsClientId=self.id)\n', (10572, 10624), True, 'import pybullet as p\n'), ((16365, 16443), 'pybullet.configureDebugVisualizer', 'p.configureDebugVisualizer', (['p.COV_ENABLE_RENDERING', '(1)'], {'physicsClientId': 'self.id'}), '(p.COV_ENABLE_RENDERING, 1, physicsClientId=self.id)\n', (16391, 16443), True, 'import pybullet as p\n'), ((17393, 17504), 'pybullet.createVisualShape', 'p.createVisualShape', ([], {'shapeType': 'p.GEOM_SPHERE', 'radius': '(0.01)', 'rgbaColor': '[0, 1, 1, 1]', 'physicsClientId': 'self.id'}), '(shapeType=p.GEOM_SPHERE, radius=0.01, rgbaColor=[0, 1, \n 1, 1], physicsClientId=self.id)\n', (17412, 17504), True, 'import pybullet as p\n'), ((3097, 3111), 'numpy.array', 'np.array', (['c[6]'], {}), '(c[6])\n', (3105, 3111), True, 'import numpy as np\n'), ((6369, 6479), 'pybullet.getJointStates', 'p.getJointStates', (['self.human'], {'jointIndices': 'self.human_controllable_joint_indices', 'physicsClientId': 'self.id'}), '(self.human, jointIndices=self.\n human_controllable_joint_indices, physicsClientId=self.id)\n', (6385, 6479), True, 'import pybullet as p\n'), ((6511, 6555), 'numpy.array', 'np.array', (['[x[0] for x in human_joint_states]'], {}), '([x[0] for x in human_joint_states])\n', (6519, 6555), True, 'import numpy as np\n'), ((6656, 6745), 'pybullet.getLinkState', 'p.getLinkState', (['self.human', '(5)'], {'computeForwardKinematics': '(True)', 'physicsClientId': 'self.id'}), '(self.human, 5, computeForwardKinematics=True,\n physicsClientId=self.id)\n', (6670, 6745), True, 'import pybullet as p\n'), ((6780, 6869), 'pybullet.getLinkState', 'p.getLinkState', (['self.human', '(7)'], {'computeForwardKinematics': '(True)', 'physicsClientId': 'self.id'}), '(self.human, 7, computeForwardKinematics=True,\n physicsClientId=self.id)\n', (6794, 6869), True, 'import pybullet as p\n'), ((6904, 6993), 'pybullet.getLinkState', 'p.getLinkState', (['self.human', '(9)'], {'computeForwardKinematics': '(True)', 'physicsClientId': 'self.id'}), '(self.human, 9, computeForwardKinematics=True,\n physicsClientId=self.id)\n', (6918, 6993), True, 'import pybullet as p\n'), ((8761, 8832), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['[-np.pi / 2.0, 0, 0]'], {'physicsClientId': 'self.id'}), '([-np.pi / 2.0, 0, 0], physicsClientId=self.id)\n', (8785, 8832), True, 'import pybullet as p\n'), ((8993, 9044), 'pybullet.getNumJoints', 'p.getNumJoints', (['self.human'], {'physicsClientId': 'self.id'}), '(self.human, physicsClientId=self.id)\n', (9007, 9044), True, 'import pybullet as p\n'), ((9373, 9414), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {'physicsClientId': 'self.id'}), '(physicsClientId=self.id)\n', (9389, 9414), True, 'import pybullet as p\n'), ((9911, 10021), 'pybullet.getJointStates', 'p.getJointStates', (['self.human'], {'jointIndices': 'self.human_controllable_joint_indices', 'physicsClientId': 'self.id'}), '(self.human, jointIndices=self.\n human_controllable_joint_indices, physicsClientId=self.id)\n', (9927, 10021), True, 'import pybullet as p\n'), ((10065, 10109), 'numpy.array', 'np.array', (['[x[0] for x in human_joint_states]'], {}), '([x[0] for x in human_joint_states])\n', (10073, 10109), True, 'import numpy as np\n'), ((11246, 11335), 'pybullet.getLinkState', 'p.getLinkState', (['self.human', '(5)'], {'computeForwardKinematics': '(True)', 'physicsClientId': 'self.id'}), '(self.human, 5, computeForwardKinematics=True,\n physicsClientId=self.id)\n', (11260, 11335), True, 'import pybullet as p\n'), ((11370, 11459), 'pybullet.getLinkState', 'p.getLinkState', (['self.human', '(7)'], {'computeForwardKinematics': '(True)', 'physicsClientId': 'self.id'}), '(self.human, 7, computeForwardKinematics=True,\n physicsClientId=self.id)\n', (11384, 11459), True, 'import pybullet as p\n'), ((11494, 11583), 'pybullet.getLinkState', 'p.getLinkState', (['self.human', '(9)'], {'computeForwardKinematics': '(True)', 'physicsClientId': 'self.id'}), '(self.human, 9, computeForwardKinematics=True,\n physicsClientId=self.id)\n', (11508, 11583), True, 'import pybullet as p\n'), ((11606, 11630), 'numpy.array', 'np.array', (['[-0.6, 0.2, 1]'], {}), '([-0.6, 0.2, 1])\n', (11614, 11630), True, 'import numpy as np\n'), ((18335, 18436), 'pybullet.getLinkState', 'p.getLinkState', (['self.human', 'self.upperarm'], {'computeForwardKinematics': '(True)', 'physicsClientId': 'self.id'}), '(self.human, self.upperarm, computeForwardKinematics=True,\n physicsClientId=self.id)\n', (18349, 18436), True, 'import pybullet as p\n'), ((18804, 18900), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['target', 'target_pos', '[0, 0, 0, 1]'], {'physicsClientId': 'self.id'}), '(target, target_pos, [0, 0, 0, 1],\n physicsClientId=self.id)\n', (18837, 18900), True, 'import pybullet as p\n'), ((18936, 19036), 'pybullet.getLinkState', 'p.getLinkState', (['self.human', 'self.forearm'], {'computeForwardKinematics': '(True)', 'physicsClientId': 'self.id'}), '(self.human, self.forearm, computeForwardKinematics=True,\n physicsClientId=self.id)\n', (18950, 19036), True, 'import pybullet as p\n'), ((19398, 19494), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['target', 'target_pos', '[0, 0, 0, 1]'], {'physicsClientId': 'self.id'}), '(target, target_pos, [0, 0, 0, 1],\n physicsClientId=self.id)\n', (19431, 19494), True, 'import pybullet as p\n'), ((812, 926), 'pybullet.getLinkState', 'p.getLinkState', (['self.tool', '(1)'], {'computeForwardKinematics': '(True)', 'computeLinkVelocity': '(True)', 'physicsClientId': 'self.id'}), '(self.tool, 1, computeForwardKinematics=True,\n computeLinkVelocity=True, physicsClientId=self.id)\n', (826, 926), True, 'import pybullet as p\n'), ((1390, 1407), 'numpy.square', 'np.square', (['action'], {}), '(action)\n', (1399, 1407), True, 'import numpy as np\n'), ((5568, 5693), 'pybullet.getLinkState', 'p.getLinkState', (['self.robot', "(15 if self.robot_type == 'pr2' else 0)"], {'computeForwardKinematics': '(True)', 'physicsClientId': 'self.id'}), "(self.robot, 15 if self.robot_type == 'pr2' else 0,\n computeForwardKinematics=True, physicsClientId=self.id)\n", (5582, 5693), True, 'import pybullet as p\n'), ((7015, 7178), 'numpy.concatenate', 'np.concatenate', (['[tool_pos - torso_pos, tool_orient, robot_joint_positions, shoulder_pos -\n torso_pos, elbow_pos - torso_pos, wrist_pos - torso_pos, forces]'], {}), '([tool_pos - torso_pos, tool_orient, robot_joint_positions, \n shoulder_pos - torso_pos, elbow_pos - torso_pos, wrist_pos - torso_pos,\n forces])\n', (7029, 7178), True, 'import numpy as np\n'), ((7443, 7481), 'numpy.concatenate', 'np.concatenate', (['[robot_obs, human_obs]'], {}), '([robot_obs, human_obs])\n', (7457, 7481), True, 'import numpy as np\n'), ((8487, 8501), 'numpy.deg2rad', 'np.deg2rad', (['(30)'], {}), '(30)\n', (8497, 8501), True, 'import numpy as np\n'), ((13769, 13844), 'os.path.join', 'os.path.join', (['self.world_creation.directory', '"""nightstand"""', '"""nightstand.obj"""'], {}), "(self.world_creation.directory, 'nightstand', 'nightstand.obj')\n", (13781, 13844), False, 'import os\n'), ((13878, 13953), 'os.path.join', 'os.path.join', (['self.world_creation.directory', '"""nightstand"""', '"""nightstand.obj"""'], {}), "(self.world_creation.directory, 'nightstand', 'nightstand.obj')\n", (13890, 13953), False, 'import os\n'), ((13986, 14158), 'pybullet.createVisualShape', 'p.createVisualShape', ([], {'shapeType': 'p.GEOM_MESH', 'fileName': 'visual_filename', 'meshScale': '([self.nightstand_scale] * 3)', 'rgbaColor': '[0.5, 0.5, 0.5, 1.0]', 'physicsClientId': 'self.id'}), '(shapeType=p.GEOM_MESH, fileName=visual_filename,\n meshScale=[self.nightstand_scale] * 3, rgbaColor=[0.5, 0.5, 0.5, 1.0],\n physicsClientId=self.id)\n', (14005, 14158), True, 'import pybullet as p\n'), ((14184, 14326), 'pybullet.createCollisionShape', 'p.createCollisionShape', ([], {'shapeType': 'p.GEOM_MESH', 'fileName': 'collision_filename', 'meshScale': '([self.nightstand_scale] * 3)', 'physicsClientId': 'self.id'}), '(shapeType=p.GEOM_MESH, fileName=collision_filename,\n meshScale=[self.nightstand_scale] * 3, physicsClientId=self.id)\n', (14206, 14326), True, 'import pybullet as p\n'), ((14532, 14814), 'pybullet.createMultiBody', 'p.createMultiBody', ([], {'baseMass': '(0)', 'baseCollisionShapeIndex': 'nightstand_collision', 'baseVisualShapeIndex': 'nightstand_visual', 'basePosition': 'nightstand_pos', 'baseOrientation': 'nightstand_orient', 'baseInertialFramePosition': '[0, 0, 0]', 'useMaximalCoordinates': '(False)', 'physicsClientId': 'self.id'}), '(baseMass=0, baseCollisionShapeIndex=nightstand_collision,\n baseVisualShapeIndex=nightstand_visual, basePosition=nightstand_pos,\n baseOrientation=nightstand_orient, baseInertialFramePosition=[0, 0, 0],\n useMaximalCoordinates=False, physicsClientId=self.id)\n', (14549, 14814), True, 'import pybullet as p\n'), ((17026, 17045), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (17034, 17045), True, 'import numpy as np\n'), ((17050, 17089), 'numpy.array', 'np.array', (['[0, 0, -self.upperarm_length]'], {}), '([0, 0, -self.upperarm_length])\n', (17058, 17089), True, 'import numpy as np\n'), ((17216, 17235), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (17224, 17235), True, 'import numpy as np\n'), ((17240, 17278), 'numpy.array', 'np.array', (['[0, 0, -self.forearm_length]'], {}), '([0, 0, -self.forearm_length])\n', (17248, 17278), True, 'import numpy as np\n'), ((17669, 17864), 'pybullet.createMultiBody', 'p.createMultiBody', ([], {'baseMass': '(0.0)', 'baseCollisionShapeIndex': 'sphere_collision', 'baseVisualShapeIndex': 'sphere_visual', 'basePosition': '[0, 0, 0]', 'useMaximalCoordinates': '(False)', 'physicsClientId': 'self.id'}), '(baseMass=0.0, baseCollisionShapeIndex=sphere_collision,\n baseVisualShapeIndex=sphere_visual, basePosition=[0, 0, 0],\n useMaximalCoordinates=False, physicsClientId=self.id)\n', (17686, 17864), True, 'import pybullet as p\n'), ((17956, 18151), 'pybullet.createMultiBody', 'p.createMultiBody', ([], {'baseMass': '(0.0)', 'baseCollisionShapeIndex': 'sphere_collision', 'baseVisualShapeIndex': 'sphere_visual', 'basePosition': '[0, 0, 0]', 'useMaximalCoordinates': '(False)', 'physicsClientId': 'self.id'}), '(baseMass=0.0, baseCollisionShapeIndex=sphere_collision,\n baseVisualShapeIndex=sphere_visual, basePosition=[0, 0, 0],\n useMaximalCoordinates=False, physicsClientId=self.id)\n', (17973, 18151), True, 'import pybullet as p\n'), ((6263, 6331), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['self.human'], {'physicsClientId': 'self.id'}), '(self.human, physicsClientId=self.id)\n', (6294, 6331), True, 'import pybullet as p\n'), ((7225, 7394), 'numpy.concatenate', 'np.concatenate', (['[tool_pos - human_pos, tool_orient, human_joint_positions, shoulder_pos -\n human_pos, elbow_pos - human_pos, wrist_pos - human_pos, forces_human]'], {}), '([tool_pos - human_pos, tool_orient, human_joint_positions, \n shoulder_pos - human_pos, elbow_pos - human_pos, wrist_pos - human_pos,\n forces_human])\n', (7239, 7394), True, 'import numpy as np\n'), ((9062, 9116), 'pybullet.getJointInfo', 'p.getJointInfo', (['self.human', 'j'], {'physicsClientId': 'self.id'}), '(self.human, j, physicsClientId=self.id)\n', (9076, 9116), True, 'import pybullet as p\n'), ((11776, 11795), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (11784, 11795), True, 'import numpy as np\n'), ((12092, 12114), 'numpy.array', 'np.array', (['[-0.1, 0, 0]'], {}), '([-0.1, 0, 0])\n', (12100, 12114), True, 'import numpy as np\n'), ((12533, 12593), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['[0, 0, 0]'], {'physicsClientId': 'self.id'}), '([0, 0, 0], physicsClientId=self.id)\n', (12557, 12593), True, 'import pybullet as p\n'), ((12703, 12732), 'numpy.array', 'np.array', (['[0, np.pi / 2.0, 0]'], {}), '([0, np.pi / 2.0, 0])\n', (12711, 12732), True, 'import numpy as np\n'), ((14350, 14374), 'numpy.array', 'np.array', (['[-0.9, 0.7, 0]'], {}), '([-0.9, 0.7, 0])\n', (14358, 14374), True, 'import numpy as np\n'), ((14448, 14477), 'numpy.array', 'np.array', (['[np.pi / 2.0, 0, 0]'], {}), '([np.pi / 2.0, 0, 0])\n', (14456, 14477), True, 'import numpy as np\n'), ((14870, 14899), 'numpy.array', 'np.array', (['[0, np.pi / 2.0, 0]'], {}), '([0, np.pi / 2.0, 0])\n', (14878, 14899), True, 'import numpy as np\n'), ((18615, 18729), 'pybullet.multiplyTransforms', 'p.multiplyTransforms', (['upperarm_pos', 'upperarm_orient', 'target_pos_on_arm', '[0, 0, 0, 1]'], {'physicsClientId': 'self.id'}), '(upperarm_pos, upperarm_orient, target_pos_on_arm, [0, \n 0, 0, 1], physicsClientId=self.id)\n', (18635, 18729), True, 'import pybullet as p\n'), ((19212, 19323), 'pybullet.multiplyTransforms', 'p.multiplyTransforms', (['forearm_pos', 'forearm_orient', 'target_pos_on_arm', '[0, 0, 0, 1]'], {'physicsClientId': 'self.id'}), '(forearm_pos, forearm_orient, target_pos_on_arm, [0, 0,\n 0, 1], physicsClientId=self.id)\n', (19232, 19323), True, 'import pybullet as p\n'), ((1275, 1360), 'pybullet.getClosestPoints', 'p.getClosestPoints', (['self.tool', 'self.human'], {'distance': '(4.0)', 'physicsClientId': 'self.id'}), '(self.tool, self.human, distance=4.0, physicsClientId=self.id\n )\n', (1293, 1360), True, 'import pybullet as p\n'), ((3327, 3378), 'pybullet.getNumJoints', 'p.getNumJoints', (['self.human'], {'physicsClientId': 'self.id'}), '(self.human, physicsClientId=self.id)\n', (3341, 3378), True, 'import pybullet as p\n'), ((3597, 3648), 'numpy.linalg.norm', 'np.linalg.norm', (['(contact_position - target_pos_world)'], {}), '(contact_position - target_pos_world)\n', (3611, 3648), True, 'import numpy as np\n'), ((3859, 3963), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['target', '[1000, 1000, 1000]', '[0, 0, 0, 1]'], {'physicsClientId': 'self.id'}), '(target, [1000, 1000, 1000], [0, 0, 0, 1],\n physicsClientId=self.id)\n', (3892, 3963), True, 'import pybullet as p\n'), ((4589, 4640), 'numpy.linalg.norm', 'np.linalg.norm', (['(contact_position - target_pos_world)'], {}), '(contact_position - target_pos_world)\n', (4603, 4640), True, 'import numpy as np\n'), ((4851, 4955), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['target', '[1000, 1000, 1000]', '[0, 0, 0, 1]'], {'physicsClientId': 'self.id'}), '(target, [1000, 1000, 1000], [0, 0, 0, 1],\n physicsClientId=self.id)\n', (4884, 4955), True, 'import pybullet as p\n'), ((13068, 13096), 'numpy.array', 'np.array', (['[-0.05, 1.05, 0.6]'], {}), '([-0.05, 1.05, 0.6])\n', (13076, 13096), True, 'import numpy as np\n'), ((13543, 13614), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['[0, -np.pi / 2.0, 0]'], {'physicsClientId': 'self.id'}), '([0, -np.pi / 2.0, 0], physicsClientId=self.id)\n', (13567, 13614), True, 'import pybullet as p\n'), ((16203, 16288), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['[np.pi / 2.0, 0, np.pi / 2.0]'], {'physicsClientId': 'self.id'}), '([np.pi / 2.0, 0, np.pi / 2.0], physicsClientId=self.id\n )\n', (16227, 16288), True, 'import pybullet as p\n'), ((15239, 15265), 'numpy.array', 'np.array', (['[-0.2, 0, 0.975]'], {}), '([-0.2, 0, 0.975])\n', (15247, 15265), True, 'import numpy as np\n'), ((15746, 15772), 'numpy.array', 'np.array', (['[-0.2, 0, 0.975]'], {}), '([-0.2, 0, 0.975])\n', (15754, 15772), True, 'import numpy as np\n')] |
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "2,3"
import torch
from torch.utils import data
from torch.autograd import Variable
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from albumentations import (
HorizontalFlip,
VerticalFlip,
Compose,
Transpose,
RandomRotate90,
OneOf,
CLAHE,
RandomGamma,
HueSaturationValue,
IAAAdditiveGaussianNoise, GaussNoise,
RandomBrightnessContrast,
IAASharpen, IAAEmboss
)
from models.unet import UNet
from models.discriminator import FCDiscriminator
from dataset.refuge import REFUGE
from pytorch_utils import (adjust_learning_rate, adjust_learning_rate_D,
calc_mse_loss, Weighted_Jaccard_loss, dice_loss)
from models import optim_weight_ema
from arguments import get_arguments
import tensorboard_logger as tb_logger
import numpy as np
aug_student = Compose([
OneOf([
Transpose(p=0.5),
HorizontalFlip(p=0.5),
VerticalFlip(p=0.5),
RandomRotate90(p=0.5)], p=0.2),
OneOf([
IAAAdditiveGaussianNoise(p=0.5),
GaussNoise(p=0.5),
], p=0.2),
OneOf([
CLAHE(clip_limit=2),
IAASharpen(p=0.5),
IAAEmboss(p=0.5),
RandomBrightnessContrast(p=0.5),
], p=0.2),
HueSaturationValue(p=0.2),
RandomGamma(p=0.2)])
aug_teacher = Compose([
OneOf([
IAAAdditiveGaussianNoise(p=0.5),
GaussNoise(p=0.5),
], p=0.2),
OneOf([
CLAHE(clip_limit=2),
IAASharpen(p=0.5),
IAAEmboss(p=0.5),
RandomBrightnessContrast(p=0.5),
], p=0.2),
HueSaturationValue(p=0.2),
RandomGamma(p=0.2)])
def main():
"""Create the model and start the training."""
args = get_arguments()
cudnn.enabled = True
n_discriminators = 5
logger = tb_logger.Logger(logdir= args.tensorboard_dir, flush_secs=2)
# create teacher & student
student_net = UNet(3, n_classes=args.num_classes)
# saved_state_dict = torch.load(args.restore_from)
# print('The pretrained weights have been loaded', args.restore_from)
# student_net.load_state_dict(saved_state_dict)
teacher_net = UNet(3, n_classes=args.num_classes)
# saved_state_dict = torch.load(args.restore_from)
# teacher_net.load_state_dict(saved_state_dict)
student_params = list(student_net.parameters())
# teacher doesn't need gradient as it's just a EMA of the student
teacher_params = list(teacher_net.parameters())
for param in teacher_params:
param.requires_grad = False
student_net.train()
student_net.cuda(args.gpu)
teacher_net.train()
teacher_net.cuda(args.gpu)
cudnn.benchmark = True
unsup_weights = [args.unsup_weight6, args.unsup_weight7, args.unsup_weight8,
args.unsup_weight9, args.unsup_weight10]
lambda_adv_tgts = [args.lambda_adv_tgt6, args.lambda_adv_tgt7,
args.lambda_adv_tgt8, args.lambda_adv_tgt9,
args.lambda_adv_tgt10]
mse_weights = [args.mse_weight6, args.mse_weight7, args.mse_weight8, args.mse_weight9,
args.mse_weight10]
# create a list of discriminators
discriminators = []
for dis_idx in range(n_discriminators):
discriminators.append(FCDiscriminator(num_classes=args.num_classes))
discriminators[dis_idx].train()
discriminators[dis_idx].cuda(args.gpu)
if not os.path.exists(args.snapshot_dir):
os.makedirs(args.snapshot_dir)
max_iters = args.num_steps * args.iter_size * args.batch_size
src_set = REFUGE(True, domain='REFUGE_SRC', is_transform=True,
augmentations=aug_student, aug_for_target=aug_teacher, max_iters=max_iters)
src_loader = data.DataLoader(src_set,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True)
src_loader_iter = enumerate(src_loader)
tgt_set = REFUGE(True, domain='REFUGE_DST', is_transform=True,
augmentations=aug_student, aug_for_target=aug_teacher,
max_iters=max_iters)
tgt_loader = data.DataLoader(tgt_set,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True)
tgt_loader_iter = enumerate(tgt_loader)
student_optimizer = optim.SGD(student_params,
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
teacher_optimizer = optim_weight_ema.WeightEMA(
teacher_params, student_params, alpha=args.teacher_alpha)
d_optimizers = []
for idx in range(n_discriminators):
optimizer = optim.Adam(discriminators[idx].parameters(),
lr=args.learning_rate_D,
betas=(0.9, 0.99))
d_optimizers.append(optimizer)
calc_bce_loss = torch.nn.BCEWithLogitsLoss()
# labels for adversarial training
source_label, tgt_label = 0, 1
for i_iter in range(args.num_steps):
total_seg_loss = 0
seg_loss_vals = [0] * n_discriminators
adv_tgt_loss_vals = [0] * n_discriminators
d_loss_vals = [0] * n_discriminators
unsup_loss_vals = [0] * n_discriminators
for d_optimizer in d_optimizers:
d_optimizer.zero_grad()
adjust_learning_rate_D(d_optimizer, i_iter, args)
student_optimizer.zero_grad()
adjust_learning_rate(student_optimizer, i_iter, args)
for sub_i in range(args.iter_size):
# ******** Optimize source network with segmentation loss ********
# As we don't change the discriminators, their parameters are fixed
for discriminator in discriminators:
for param in discriminator.parameters():
param.requires_grad = False
_, src_batch = src_loader_iter.__next__()
_, _, src_images, src_labels, _ = src_batch
src_images = Variable(src_images).cuda(args.gpu)
# calculate the segmentation losses
sup_preds = list(student_net(src_images))
seg_losses, total_seg_loss = [], 0
for idx, sup_pred in enumerate(sup_preds):
sup_interp_pred = (sup_pred)
# you also can use dice loss like: dice_loss(src_labels, sup_interp_pred)
seg_loss = Weighted_Jaccard_loss(src_labels, sup_interp_pred, args.class_weights, args.gpu)
seg_losses.append(seg_loss)
total_seg_loss += seg_loss * unsup_weights[idx]
seg_loss_vals[idx] += seg_loss.item() / args.iter_size
_, tgt_batch = tgt_loader_iter.__next__()
tgt_images0, tgt_lbl0, tgt_images1, tgt_lbl1, _ = tgt_batch
tgt_images0 = Variable(tgt_images0).cuda(args.gpu)
tgt_images1 = Variable(tgt_images1).cuda(args.gpu)
# calculate ensemble losses
stu_unsup_preds = list(student_net(tgt_images1))
tea_unsup_preds = teacher_net(tgt_images0)
total_mse_loss = 0
for idx in range(n_discriminators):
stu_unsup_probs = F.softmax(stu_unsup_preds[idx], dim=-1)
tea_unsup_probs = F.softmax(tea_unsup_preds[idx], dim=-1)
unsup_loss = calc_mse_loss(stu_unsup_probs, tea_unsup_probs, args.batch_size)
unsup_loss_vals[idx] += unsup_loss.item() / args.iter_size
total_mse_loss += unsup_loss * mse_weights[idx]
total_mse_loss = total_mse_loss / args.iter_size
# As the requires_grad is set to False in the discriminator, the
# gradients are only accumulated in the generator, the target
# student network is optimized to make the outputs of target domain
# images close to the outputs of source domain images
stu_unsup_preds = list(student_net(tgt_images0))
d_outs, total_adv_loss = [], 0
for idx in range(n_discriminators):
stu_unsup_interp_pred = (stu_unsup_preds[idx])
d_outs.append(discriminators[idx](stu_unsup_interp_pred))
label_size = d_outs[idx].data.size()
labels = torch.FloatTensor(label_size).fill_(source_label)
labels = Variable(labels).cuda(args.gpu)
adv_tgt_loss = calc_bce_loss(d_outs[idx], labels)
total_adv_loss += lambda_adv_tgts[idx] * adv_tgt_loss
adv_tgt_loss_vals[idx] += adv_tgt_loss.item() / args.iter_size
total_adv_loss = total_adv_loss / args.iter_size
# requires_grad is set to True in the discriminator, we only
# accumulate gradients in the discriminators, the discriminators are
# optimized to make true predictions
d_losses = []
for idx in range(n_discriminators):
discriminator = discriminators[idx]
for param in discriminator.parameters():
param.requires_grad = True
sup_preds[idx] = sup_preds[idx].detach()
d_outs[idx] = discriminators[idx](sup_preds[idx])
label_size = d_outs[idx].data.size()
labels = torch.FloatTensor(label_size).fill_(source_label)
labels = Variable(labels).cuda(args.gpu)
d_losses.append(calc_bce_loss(d_outs[idx], labels))
d_losses[idx] = d_losses[idx] / args.iter_size / 2
d_losses[idx].backward()
d_loss_vals[idx] += d_losses[idx].item()
for idx in range(n_discriminators):
stu_unsup_preds[idx] = stu_unsup_preds[idx].detach()
d_outs[idx] = discriminators[idx](stu_unsup_preds[idx])
label_size = d_outs[idx].data.size()
labels = torch.FloatTensor(label_size).fill_(tgt_label)
labels = Variable(labels).cuda(args.gpu)
d_losses[idx] = calc_bce_loss(d_outs[idx], labels)
d_losses[idx] = d_losses[idx] / args.iter_size / 2
d_losses[idx].backward()
d_loss_vals[idx] += d_losses[idx].item()
for d_optimizer in d_optimizers:
d_optimizer.step()
total_loss = total_seg_loss + total_adv_loss + total_mse_loss
logger.log_value('total_seg_loss', total_seg_loss, i_iter)
logger.log_value('adv_loss', total_adv_loss, i_iter)
logger.log_value('mse_loss', total_mse_loss, i_iter)
logger.log_value('target_loss', total_adv_loss + total_mse_loss, i_iter)
logger.log_value('d_loss_0,', d_loss_vals[0], i_iter)
logger.log_value('d_loss_1,', d_loss_vals[1], i_iter)
logger.log_value('d_loss_2,', d_loss_vals[2], i_iter)
logger.log_value('d_loss_3,', d_loss_vals[3], i_iter)
logger.log_value('d_loss_4,', d_loss_vals[4], i_iter)
logger.log_value('d_loss', np.mean(d_loss_vals[0] + d_loss_vals[1] + d_loss_vals[2] + d_loss_vals[3] + d_loss_vals[4]), i_iter)
total_loss.backward()
student_optimizer.step()
teacher_optimizer.step()
log_str = 'iter = {0:7d}/{1:7d}'.format(i_iter, args.num_steps)
log_str += ', total_seg_loss = {0:.3f} '.format(total_seg_loss)
templ = 'seg_losses = [' + ', '.join(['%.2f'] * len(seg_loss_vals))
log_str += templ % tuple(seg_loss_vals) + '] '
templ = 'ens_losses = [' + ', '.join(['%.5f'] * len(unsup_loss_vals))
log_str += templ % tuple(unsup_loss_vals) + '] '
templ = 'adv_losses = [' + ', '.join(['%.2f'] * len(adv_tgt_loss_vals))
log_str += templ % tuple(adv_tgt_loss_vals) + '] '
templ = 'd_losses = [' + ', '.join(['%.2f'] * len(d_loss_vals))
log_str += templ % tuple(d_loss_vals) + '] '
print(log_str)
if i_iter >= args.num_steps_stop - 1:
print('save model ...')
filename = 'UNet' + str(args.num_steps_stop) + '_v18_weightedclass.pth'
torch.save(teacher_net.cpu().state_dict(),
os.path.join(args.snapshot_dir, filename))
break
if i_iter % args.save_pred_every == 0 and i_iter != 0:
print('taking snapshot ...')
filename = 'UNet' + str(i_iter) + '_v18_weightedclass.pth'
torch.save(teacher_net.cpu().state_dict(),
os.path.join(args.snapshot_dir, filename))
teacher_net.cuda(args.gpu)
if __name__ == '__main__':
main() | [
"albumentations.RandomBrightnessContrast",
"albumentations.RandomGamma",
"albumentations.HueSaturationValue",
"torch.nn.functional.softmax",
"os.path.exists",
"arguments.get_arguments",
"numpy.mean",
"models.unet.UNet",
"albumentations.GaussNoise",
"pytorch_utils.calc_mse_loss",
"torch.autograd.... | [((1817, 1832), 'arguments.get_arguments', 'get_arguments', ([], {}), '()\n', (1830, 1832), False, 'from arguments import get_arguments\n'), ((1897, 1956), 'tensorboard_logger.Logger', 'tb_logger.Logger', ([], {'logdir': 'args.tensorboard_dir', 'flush_secs': '(2)'}), '(logdir=args.tensorboard_dir, flush_secs=2)\n', (1913, 1956), True, 'import tensorboard_logger as tb_logger\n'), ((2007, 2042), 'models.unet.UNet', 'UNet', (['(3)'], {'n_classes': 'args.num_classes'}), '(3, n_classes=args.num_classes)\n', (2011, 2042), False, 'from models.unet import UNet\n'), ((2239, 2274), 'models.unet.UNet', 'UNet', (['(3)'], {'n_classes': 'args.num_classes'}), '(3, n_classes=args.num_classes)\n', (2243, 2274), False, 'from models.unet import UNet\n'), ((3656, 3789), 'dataset.refuge.REFUGE', 'REFUGE', (['(True)'], {'domain': '"""REFUGE_SRC"""', 'is_transform': '(True)', 'augmentations': 'aug_student', 'aug_for_target': 'aug_teacher', 'max_iters': 'max_iters'}), "(True, domain='REFUGE_SRC', is_transform=True, augmentations=\n aug_student, aug_for_target=aug_teacher, max_iters=max_iters)\n", (3662, 3789), False, 'from dataset.refuge import REFUGE\n'), ((3823, 3940), 'torch.utils.data.DataLoader', 'data.DataLoader', (['src_set'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(src_set, batch_size=args.batch_size, shuffle=True,\n num_workers=args.num_workers, pin_memory=True)\n', (3838, 3940), False, 'from torch.utils import data\n'), ((4128, 4261), 'dataset.refuge.REFUGE', 'REFUGE', (['(True)'], {'domain': '"""REFUGE_DST"""', 'is_transform': '(True)', 'augmentations': 'aug_student', 'aug_for_target': 'aug_teacher', 'max_iters': 'max_iters'}), "(True, domain='REFUGE_DST', is_transform=True, augmentations=\n aug_student, aug_for_target=aug_teacher, max_iters=max_iters)\n", (4134, 4261), False, 'from dataset.refuge import REFUGE\n'), ((4316, 4433), 'torch.utils.data.DataLoader', 'data.DataLoader', (['tgt_set'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(tgt_set, batch_size=args.batch_size, shuffle=True,\n num_workers=args.num_workers, pin_memory=True)\n', (4331, 4433), False, 'from torch.utils import data\n'), ((4631, 4739), 'torch.optim.SGD', 'optim.SGD', (['student_params'], {'lr': 'args.learning_rate', 'momentum': 'args.momentum', 'weight_decay': 'args.weight_decay'}), '(student_params, lr=args.learning_rate, momentum=args.momentum,\n weight_decay=args.weight_decay)\n', (4640, 4739), True, 'import torch.optim as optim\n'), ((4862, 4951), 'models.optim_weight_ema.WeightEMA', 'optim_weight_ema.WeightEMA', (['teacher_params', 'student_params'], {'alpha': 'args.teacher_alpha'}), '(teacher_params, student_params, alpha=args.\n teacher_alpha)\n', (4888, 4951), False, 'from models import optim_weight_ema\n'), ((5250, 5278), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ([], {}), '()\n', (5276, 5278), False, 'import torch\n'), ((1360, 1385), 'albumentations.HueSaturationValue', 'HueSaturationValue', ([], {'p': '(0.2)'}), '(p=0.2)\n', (1378, 1385), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((1391, 1409), 'albumentations.RandomGamma', 'RandomGamma', ([], {'p': '(0.2)'}), '(p=0.2)\n', (1402, 1409), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((1690, 1715), 'albumentations.HueSaturationValue', 'HueSaturationValue', ([], {'p': '(0.2)'}), '(p=0.2)\n', (1708, 1715), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((1721, 1739), 'albumentations.RandomGamma', 'RandomGamma', ([], {'p': '(0.2)'}), '(p=0.2)\n', (1732, 1739), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((3501, 3534), 'os.path.exists', 'os.path.exists', (['args.snapshot_dir'], {}), '(args.snapshot_dir)\n', (3515, 3534), False, 'import os\n'), ((3544, 3574), 'os.makedirs', 'os.makedirs', (['args.snapshot_dir'], {}), '(args.snapshot_dir)\n', (3555, 3574), False, 'import os\n'), ((5801, 5854), 'pytorch_utils.adjust_learning_rate', 'adjust_learning_rate', (['student_optimizer', 'i_iter', 'args'], {}), '(student_optimizer, i_iter, args)\n', (5821, 5854), False, 'from pytorch_utils import adjust_learning_rate, adjust_learning_rate_D, calc_mse_loss, Weighted_Jaccard_loss, dice_loss\n'), ((3355, 3400), 'models.discriminator.FCDiscriminator', 'FCDiscriminator', ([], {'num_classes': 'args.num_classes'}), '(num_classes=args.num_classes)\n', (3370, 3400), False, 'from models.discriminator import FCDiscriminator\n'), ((5704, 5753), 'pytorch_utils.adjust_learning_rate_D', 'adjust_learning_rate_D', (['d_optimizer', 'i_iter', 'args'], {}), '(d_optimizer, i_iter, args)\n', (5726, 5753), False, 'from pytorch_utils import adjust_learning_rate, adjust_learning_rate_D, calc_mse_loss, Weighted_Jaccard_loss, dice_loss\n'), ((11346, 11441), 'numpy.mean', 'np.mean', (['(d_loss_vals[0] + d_loss_vals[1] + d_loss_vals[2] + d_loss_vals[3] +\n d_loss_vals[4])'], {}), '(d_loss_vals[0] + d_loss_vals[1] + d_loss_vals[2] + d_loss_vals[3] +\n d_loss_vals[4])\n', (11353, 11441), True, 'import numpy as np\n'), ((991, 1007), 'albumentations.Transpose', 'Transpose', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1000, 1007), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((1017, 1038), 'albumentations.HorizontalFlip', 'HorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1031, 1038), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((1048, 1067), 'albumentations.VerticalFlip', 'VerticalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1060, 1067), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((1077, 1098), 'albumentations.RandomRotate90', 'RandomRotate90', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1091, 1098), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((1130, 1161), 'albumentations.IAAAdditiveGaussianNoise', 'IAAAdditiveGaussianNoise', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1154, 1161), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((1171, 1188), 'albumentations.GaussNoise', 'GaussNoise', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1181, 1188), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((1226, 1245), 'albumentations.CLAHE', 'CLAHE', ([], {'clip_limit': '(2)'}), '(clip_limit=2)\n', (1231, 1245), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((1255, 1272), 'albumentations.IAASharpen', 'IAASharpen', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1265, 1272), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((1282, 1298), 'albumentations.IAAEmboss', 'IAAEmboss', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1291, 1298), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((1308, 1339), 'albumentations.RandomBrightnessContrast', 'RandomBrightnessContrast', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1332, 1339), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((1460, 1491), 'albumentations.IAAAdditiveGaussianNoise', 'IAAAdditiveGaussianNoise', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1484, 1491), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((1501, 1518), 'albumentations.GaussNoise', 'GaussNoise', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1511, 1518), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((1556, 1575), 'albumentations.CLAHE', 'CLAHE', ([], {'clip_limit': '(2)'}), '(clip_limit=2)\n', (1561, 1575), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((1585, 1602), 'albumentations.IAASharpen', 'IAASharpen', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1595, 1602), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((1612, 1628), 'albumentations.IAAEmboss', 'IAAEmboss', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1621, 1628), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((1638, 1669), 'albumentations.RandomBrightnessContrast', 'RandomBrightnessContrast', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1662, 1669), False, 'from albumentations import HorizontalFlip, VerticalFlip, Compose, Transpose, RandomRotate90, OneOf, CLAHE, RandomGamma, HueSaturationValue, IAAAdditiveGaussianNoise, GaussNoise, RandomBrightnessContrast, IAASharpen, IAAEmboss\n'), ((6753, 6838), 'pytorch_utils.Weighted_Jaccard_loss', 'Weighted_Jaccard_loss', (['src_labels', 'sup_interp_pred', 'args.class_weights', 'args.gpu'], {}), '(src_labels, sup_interp_pred, args.class_weights, args.gpu\n )\n', (6774, 6838), False, 'from pytorch_utils import adjust_learning_rate, adjust_learning_rate_D, calc_mse_loss, Weighted_Jaccard_loss, dice_loss\n'), ((7538, 7577), 'torch.nn.functional.softmax', 'F.softmax', (['stu_unsup_preds[idx]'], {'dim': '(-1)'}), '(stu_unsup_preds[idx], dim=-1)\n', (7547, 7577), True, 'import torch.nn.functional as F\n'), ((7612, 7651), 'torch.nn.functional.softmax', 'F.softmax', (['tea_unsup_preds[idx]'], {'dim': '(-1)'}), '(tea_unsup_preds[idx], dim=-1)\n', (7621, 7651), True, 'import torch.nn.functional as F\n'), ((7682, 7746), 'pytorch_utils.calc_mse_loss', 'calc_mse_loss', (['stu_unsup_probs', 'tea_unsup_probs', 'args.batch_size'], {}), '(stu_unsup_probs, tea_unsup_probs, args.batch_size)\n', (7695, 7746), False, 'from pytorch_utils import adjust_learning_rate, adjust_learning_rate_D, calc_mse_loss, Weighted_Jaccard_loss, dice_loss\n'), ((12488, 12529), 'os.path.join', 'os.path.join', (['args.snapshot_dir', 'filename'], {}), '(args.snapshot_dir, filename)\n', (12500, 12529), False, 'import os\n'), ((12803, 12844), 'os.path.join', 'os.path.join', (['args.snapshot_dir', 'filename'], {}), '(args.snapshot_dir, filename)\n', (12815, 12844), False, 'import os\n'), ((6350, 6370), 'torch.autograd.Variable', 'Variable', (['src_images'], {}), '(src_images)\n', (6358, 6370), False, 'from torch.autograd import Variable\n'), ((7166, 7187), 'torch.autograd.Variable', 'Variable', (['tgt_images0'], {}), '(tgt_images0)\n', (7174, 7187), False, 'from torch.autograd import Variable\n'), ((7229, 7250), 'torch.autograd.Variable', 'Variable', (['tgt_images1'], {}), '(tgt_images1)\n', (7237, 7250), False, 'from torch.autograd import Variable\n'), ((8615, 8644), 'torch.FloatTensor', 'torch.FloatTensor', (['label_size'], {}), '(label_size)\n', (8632, 8644), False, 'import torch\n'), ((8690, 8706), 'torch.autograd.Variable', 'Variable', (['labels'], {}), '(labels)\n', (8698, 8706), False, 'from torch.autograd import Variable\n'), ((9638, 9667), 'torch.FloatTensor', 'torch.FloatTensor', (['label_size'], {}), '(label_size)\n', (9655, 9667), False, 'import torch\n'), ((9713, 9729), 'torch.autograd.Variable', 'Variable', (['labels'], {}), '(labels)\n', (9721, 9729), False, 'from torch.autograd import Variable\n'), ((10248, 10277), 'torch.FloatTensor', 'torch.FloatTensor', (['label_size'], {}), '(label_size)\n', (10265, 10277), False, 'import torch\n'), ((10320, 10336), 'torch.autograd.Variable', 'Variable', (['labels'], {}), '(labels)\n', (10328, 10336), False, 'from torch.autograd import Variable\n')] |
from env_common import get_screen
from common import select_action_policy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import RMSprop
from itertools import count
import numpy as np
import gym
import visdom
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Hyperparameters
printout_freq = 1
num_episodes = 300
batch_size = 5
learning_rate_policy = 0.001
learning_rate_value = 0.001
gamma = 0.99
lam = 0.99 # lambda for GAE-lambda
train_v_iters = 10
train_pi_iters = 10
clip_ratio=0.1 # how far can new policy deviate from old policy
# Initialize visualization
viz = visdom.Visdom()
loss_window = viz.line(
Y=torch.zeros((1)),
X=torch.zeros((1)),
opts=dict(xlabel='step', ylabel='Loss', title='Training loss'))
episode_length_window = viz.line(
Y=torch.zeros((1)),
X=torch.zeros((1)),
opts=dict(xlabel='step', ylabel='Episode length', title='Episode length'))
# Initialize environment and replay buffer
env = gym.make("CartPole-v0")
env.reset()
init_screen = get_screen(env)
_, _, screen_height, screen_width = init_screen.shape
num_actions = env.action_space.n
class Buffer:
def __init__(self, gamma, lam):
self.buffer = []
self.advantages = []
self.discounted_rewards = []
self.gamma = gamma
self.lam = lam
def add(self, state, action, logp, value, reward):
self.buffer.append((state, action, logp, value, reward))
def get(self, i):
"""Return state, action, log probability of action, discounted advantage and discounted reward at i.
Requires that finalize() has been called previously to calculate
discounted rewards.
"""
if i >= len(self.buffer) or i >= len(self.advantages) or i >= len(self.discounted_rewards):
return None
else:
state, action, logp, _, _ = self.buffer[i]
reward = self.discounted_rewards[i]
advantage = self.advantages[i]
return state, torch.FloatTensor([action]).to(device), logp, advantage, reward
def finalize(self):
"""Call at end of sample collection to calculate advantages and discounted rewards.
"""
_, _, _, values, rewards = zip(*self.buffer)
# Calculate advantages
self.advantages = [0] * len(self.buffer)
for i in range(len(self.advantages)-1):
if rewards[i] != 0: # if reward is zero, we ended the episode
delta = rewards[i] + self.gamma * values[i+1] - values[i]
self.advantages[i] = delta.item()
# Discount advantages
running_add = 0
for i in reversed(range(len(self.advantages))):
if self.advantages[i] == 0:
running_add = 0
else:
running_add = running_add * self.gamma * self.lam + self.advantages[i]
self.advantages[i] = running_add
# Normalize advantages
adv_mean = np.mean(self.advantages)
adv_std = np.std(self.advantages)
for i in range(steps):
self.advantages[i] = (self.advantages[i] - adv_mean) / adv_std
# Calculate discounted rewards
self.discounted_rewards = [0] * len(self.buffer)
running_add = 0
for i in reversed(range(len(self.discounted_rewards))):
if rewards[i] == 0:
running_add = 0
else:
running_add = running_add * self.gamma + rewards[i]
self.discounted_rewards[i] = running_add
def empty(self):
self.buffer = []
self.advantages = []
self.discounted_rewards = []
buffer = Buffer(gamma, lam)
class PolicyNet(nn.Module):
def __init__(self, h, w, outputs):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
def conv2d_size_out(size, kernel_size = 5, stride = 2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
linear_input_size = convw * convh * 32
self.head = nn.Linear(linear_input_size, outputs)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return self.head(x.view(x.size(0), -1))
policy_network = PolicyNet(screen_height, screen_width, num_actions).to(device)
value_network = PolicyNet(screen_height, screen_width, 1).to(device)
optimizer_policy = RMSprop(policy_network.parameters(), lr=learning_rate_policy)
optimizer_value = RMSprop(value_network.parameters(), lr=learning_rate_value)
# Store duration of episodes to test performance
episode_durations = []
# Training loop
steps = 0
training_step = 0
for episode in range(num_episodes):
env.reset()
last_screen = get_screen(env)
current_screen = get_screen(env)
state = current_screen - last_screen
state = state.to(device)
for t in count():
action, logp, val = select_action_policy(state, policy_network, value_network)
_, reward, done, _ = env.step(action)
# Move to next state
last_screen = current_screen
current_screen = get_screen(env)
next_state = current_screen - last_screen
next_state = next_state.to(device)
# To mark boundarys between episodes
if done:
reward = 0
buffer.add(state, float(action), logp, val, reward)
state = next_state
steps += 1
if done:
episode_durations.append(t + 1)
viz.line(X=torch.ones((1, 1))*episode, Y=torch.ones((1, 1)) * episode_durations[-1],
win=episode_length_window, update='append', name='Episode durations')
# Plot 50 episode averages
if len(episode_durations) >= 50:
mean = np.mean(episode_durations[-50:])
viz.line(X=torch.ones((1, 1))*episode, Y=torch.ones((1, 1)) * mean,
win=episode_length_window, update='append', name='Mean episode durations')
break
# Update policy
if episode > 0 and episode % batch_size == 0:
# Compute discounted rewards
buffer.finalize()
# Policy function learning
for i in range(train_pi_iters):
optimizer_policy.zero_grad()
for i in range(steps):
state, action, logp_old, advantage, _ = buffer.get(i)
probs = policy_network(state).squeeze(0)
m = torch.distributions.Categorical(logits=probs)
logp = m.log_prob(action) # new log probability
# PPO loss
ratio = torch.exp(logp - logp_old)
clip_adv = torch.clamp(ratio, 1-clip_ratio, 1+clip_ratio) * advantage
policy_loss = -(torch.min(ratio * advantage, clip_adv))
policy_loss.backward()
optimizer_policy.step()
# Value function learning
for i in range(train_v_iters):
optimizer_value.zero_grad()
for i in range(steps):
state, action, _, _, reward = buffer.get(i)
value_loss = ((value_network(state).squeeze(0) - reward)**2).mean()
value_loss.backward()
optimizer_value.step()
if training_step % printout_freq == 0:
viz.line(X=torch.ones((1, 1)) * training_step, Y=torch.ones((1, 1)) * policy_loss.item(),
win=loss_window, update='append', name='Policy loss')
training_step = training_step + 1
buffer.empty()
steps = 0 | [
"numpy.mean",
"torch.nn.BatchNorm2d",
"torch.ones",
"torch.distributions.Categorical",
"torch.FloatTensor",
"torch.exp",
"env_common.get_screen",
"torch.nn.Conv2d",
"torch.min",
"torch.cuda.is_available",
"itertools.count",
"common.select_action_policy",
"torch.nn.Linear",
"numpy.std",
"... | [((631, 646), 'visdom.Visdom', 'visdom.Visdom', ([], {}), '()\n', (644, 646), False, 'import visdom\n'), ((1000, 1023), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (1008, 1023), False, 'import gym\n'), ((1051, 1066), 'env_common.get_screen', 'get_screen', (['env'], {}), '(env)\n', (1061, 1066), False, 'from env_common import get_screen\n'), ((5182, 5197), 'env_common.get_screen', 'get_screen', (['env'], {}), '(env)\n', (5192, 5197), False, 'from env_common import get_screen\n'), ((5219, 5234), 'env_common.get_screen', 'get_screen', (['env'], {}), '(env)\n', (5229, 5234), False, 'from env_common import get_screen\n'), ((5319, 5326), 'itertools.count', 'count', ([], {}), '()\n', (5324, 5326), False, 'from itertools import count\n'), ((278, 303), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (301, 303), False, 'import torch\n'), ((677, 691), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (688, 691), False, 'import torch\n'), ((701, 715), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (712, 715), False, 'import torch\n'), ((828, 842), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (839, 842), False, 'import torch\n'), ((852, 866), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (863, 866), False, 'import torch\n'), ((2990, 3014), 'numpy.mean', 'np.mean', (['self.advantages'], {}), '(self.advantages)\n', (2997, 3014), True, 'import numpy as np\n'), ((3033, 3056), 'numpy.std', 'np.std', (['self.advantages'], {}), '(self.advantages)\n', (3039, 3056), True, 'import numpy as np\n'), ((3818, 3859), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)'], {'kernel_size': '(5)', 'stride': '(2)'}), '(3, 16, kernel_size=5, stride=2)\n', (3827, 3859), True, 'import torch.nn as nn\n'), ((3879, 3897), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(16)'], {}), '(16)\n', (3893, 3897), True, 'import torch.nn as nn\n'), ((3919, 3961), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(32)'], {'kernel_size': '(5)', 'stride': '(2)'}), '(16, 32, kernel_size=5, stride=2)\n', (3928, 3961), True, 'import torch.nn as nn\n'), ((3981, 3999), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (3995, 3999), True, 'import torch.nn as nn\n'), ((4021, 4063), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(32)'], {'kernel_size': '(5)', 'stride': '(2)'}), '(32, 32, kernel_size=5, stride=2)\n', (4030, 4063), True, 'import torch.nn as nn\n'), ((4083, 4101), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (4097, 4101), True, 'import torch.nn as nn\n'), ((4437, 4474), 'torch.nn.Linear', 'nn.Linear', (['linear_input_size', 'outputs'], {}), '(linear_input_size, outputs)\n', (4446, 4474), True, 'import torch.nn as nn\n'), ((5356, 5414), 'common.select_action_policy', 'select_action_policy', (['state', 'policy_network', 'value_network'], {}), '(state, policy_network, value_network)\n', (5376, 5414), False, 'from common import select_action_policy\n'), ((5553, 5568), 'env_common.get_screen', 'get_screen', (['env'], {}), '(env)\n', (5563, 5568), False, 'from env_common import get_screen\n'), ((6210, 6242), 'numpy.mean', 'np.mean', (['episode_durations[-50:]'], {}), '(episode_durations[-50:])\n', (6217, 6242), True, 'import numpy as np\n'), ((6875, 6920), 'torch.distributions.Categorical', 'torch.distributions.Categorical', ([], {'logits': 'probs'}), '(logits=probs)\n', (6906, 6920), False, 'import torch\n'), ((7037, 7063), 'torch.exp', 'torch.exp', (['(logp - logp_old)'], {}), '(logp - logp_old)\n', (7046, 7063), False, 'import torch\n'), ((7091, 7141), 'torch.clamp', 'torch.clamp', (['ratio', '(1 - clip_ratio)', '(1 + clip_ratio)'], {}), '(ratio, 1 - clip_ratio, 1 + clip_ratio)\n', (7102, 7141), False, 'import torch\n'), ((7182, 7220), 'torch.min', 'torch.min', (['(ratio * advantage)', 'clip_adv'], {}), '(ratio * advantage, clip_adv)\n', (7191, 7220), False, 'import torch\n'), ((2029, 2056), 'torch.FloatTensor', 'torch.FloatTensor', (['[action]'], {}), '([action])\n', (2046, 2056), False, 'import torch\n'), ((5942, 5960), 'torch.ones', 'torch.ones', (['(1, 1)'], {}), '((1, 1))\n', (5952, 5960), False, 'import torch\n'), ((5972, 5990), 'torch.ones', 'torch.ones', (['(1, 1)'], {}), '((1, 1))\n', (5982, 5990), False, 'import torch\n'), ((7735, 7753), 'torch.ones', 'torch.ones', (['(1, 1)'], {}), '((1, 1))\n', (7745, 7753), False, 'import torch\n'), ((7773, 7791), 'torch.ones', 'torch.ones', (['(1, 1)'], {}), '((1, 1))\n', (7783, 7791), False, 'import torch\n'), ((6270, 6288), 'torch.ones', 'torch.ones', (['(1, 1)'], {}), '((1, 1))\n', (6280, 6288), False, 'import torch\n'), ((6300, 6318), 'torch.ones', 'torch.ones', (['(1, 1)'], {}), '((1, 1))\n', (6310, 6318), False, 'import torch\n')] |
"""
Brief
=====
This module is not part of the public API. It contains encoding and decoding
functionality which is exclusively used by `io.write` and `io.read`. It enables
storing and transmitting Pyfar- and Numpy-objects without using the unsafe
pickle protocoll.
Design and Function
===================
The `_encode` and `_decode` functions are entry points for an algorithm
that recursively processes data structures of varying depths. The result is
stored in a zipfile or similar structure.
Data structures are decomposed into one of the following
three basic encoding/decoding types:
(1) Builtins types are directly written in a JSON-format.
(2) Types that can be trivially derived from builtins are first
cast into their builtin-counterparts. The result and a type-hint
are put into a list-type pair of type hint and object and written into
the same JSON-string. The pair looks as follows:
[str, builtin] e.g. ['$int64', 42]
(3) Types that cannot be easily derived from builtins, such as
numpy.ndarrays, are encoded separately with a dedicated function like
`_encode_ndarray`. The result is written to a dedicated path in the
zip-archive. This zip-path is stored as a reference together with a
type-hint as a pair into the JSON-form
[str, str] e.g. ['$ndarray', '/my_obj/_signal']
Numpy-types can be stored directly in the zipfile. In this case type hints,
such as `$ndarray`, become the name of the file in the archive.
Class-Level
===========
Actually pyfar_obj.__dict__ could be passed to `_encode` or `_decode`
respectively. Some objects however need special treatment at the highest level
of encoding and decoding. Therefore `PyfarClass._encode` and
`PyfarClass._decode` must be implemented in each class. E.g.:
class MyClass:
def _encode(self):
return self.copy().__dict__
@classmethod
def _decode(cls, obj_dict):
obj = cls()
obj.__dict__.update(obj_dict)
return obj
Data Inspection
===============
Once data is written to disk you can rename the file-extension to .zip, open
and inspect the archive. The JSON-file in the archive must reflect the
data structure of an object, e.g. like this:
JSON
----
{
"_n": 42,
"_comment": "My String",
"_subobj": {
"signal": [
"$ndarray",
"my_obj/_subobj/signal"
],
"_m": 49
}
"_list": [
1,
[
"$dtype",
"int32"
],
]
}
----
Names, Type Hints and Zippath
=============================
(1) Object names are the keys of **objs in `io.write` and only
exist at the first hierarchical layer of zip_paths ([0])
(2) Object hints are e.g. `$Coordinates` for Pyfar-objects, `$ndarray` etc.
and exist only at the second layer of zip_paths ([1])
E.g. in a single zip_path: `MyMicrophonePositions/$Coordinates`
- `MyMicrophonePositions` is the name of the object and
- `$Coordinates` is the type hint.
"""
import io
import sys
import json
import numpy as np
def _decode(obj, zipfile):
"""
This function is exclusively used by `io.read` and enables recursive
decoding for objects of varying depth.
Parameters
----------
obj : PyFar-object.
zipfile: zipfile-object.
The zipfile object is looped in the recursive structure
e.g. to decode ndarrays when they occur.
"""
if isinstance(obj, dict):
for key in obj.keys():
_inner_decode(obj, key, zipfile)
elif any([isinstance(obj, x) for x in [list, tuple, set, frozenset]]):
for i in range(0, len(obj)):
_inner_decode(obj, i, zipfile)
return obj
def _inner_decode(obj, key, zipfile):
"""
This function is exclusively used by `_codec._encode` and casts the obj
in case it was not JSON-serializable back into ther original type
e.g. by using a typehint (str) like: '$ndarray', '$tuple' etc.
If the obj is nested, _innner_decode goes one level deeper by reentering
_decode again.
Parameters
----------
obj : PyFar-object.
key : str or int
The key provided by the dict or list over which currently is being
iterated.
zipfile: zipfile
"""
if not _is_type_hint(obj[key]):
_decode(obj[key], zipfile)
elif _is_pyfar_type(obj[key][0][1:]):
PyfarType = _str_to_type(obj[key][0][1:])
obj[key] = PyfarType._decode(obj[key][1])
_decode(obj[key].__dict__, zipfile)
elif obj[key][0][1:] == 'dtype':
obj[key] = getattr(np, obj[key][1])
elif obj[key][0][1:] == 'ndarray':
obj[key] = _decode_ndarray(obj[key][1], zipfile)
elif obj[key][0][1:] == 'complex':
obj[key] = complex(obj[key][1][0], obj[key][1][1])
elif obj[key][0][1:] == 'tuple':
obj[key] = tuple(obj[key][1])
elif obj[key][0][1:] == 'set':
obj[key] = set(tuple(obj[key][1]))
elif obj[key][0][1:] == 'frozenset':
obj[key] = frozenset(tuple(obj[key][1]))
else:
_decode_numpy_scalar(obj, key)
def _decode_numpy_scalar(obj, key):
""" This function is exclusively used by `io._inner_decode` and
decodes numpy scalars e.g. of type `numpy.int32`.
"""
try:
numpy_scalar = getattr(np, obj[key][0][1:])
except AttributeError:
pass
else:
obj[key] = numpy_scalar(obj[key][1])
def _decode_ndarray(obj, zipfile):
""" This function is exclusively used by `io._inner_decode` and
decodes `numpy.ndarrays` from a memfile.
"""
# Numpy.load is applied on a memory file instead of a physical file
memfile = io.BytesIO()
nd_bytes = zipfile.read(obj)
memfile.write(nd_bytes)
memfile.seek(0)
return np.load(memfile, allow_pickle=False)
def _decode_object_json_aided(name, type_hint, zipfile):
"""
Decodes composed objects with the help of JSON.
Parameters
----------
name: str
The object name, usually keys from **objs, see `io.write`.
type_hint: str
The object's type hint, starts with '$'.
zipfile: zipfile
The zipfile from where we'd like to read data.
"""
json_str = zipfile.read(f'{name}/{type_hint}').decode('UTF-8')
obj_dict_encoded = json.loads(json_str)
obj_dict = _decode(obj_dict_encoded, zipfile)
ObjType = _str_to_type(type_hint[1:])
try:
return ObjType._decode(obj_dict)
except AttributeError:
raise NotImplementedError(
f'You must implement `{type}._decode` first.')
def _encode(obj, zip_path, zipfile):
"""
Chooses the right encoding depending on the object type.
Parameters
----------
obj : PyFar-object.
zip_path: str.
zipfile acceps a path-like-string to know where to write
special types e.g. ndarrays into the archive.
zipfile: zipfile-object.
The zipfile object is looped in the recursive structure
e.g. to encode ndarrays when they occur.
Returns
-------
obj : dict
A dict derived from the original object that must be JSON-serializable
and encodes all not-JSON-serializable objects as:
(1) A pair of type-hint and value:
[str, JSON-serializable] e.g.
['$numpy.int32', 42], ['$tuple', [1, 2, 3]]
or
(2) A pair of ndarray-hint and reference/zip_path:
[str, str] e.g. ['ndarray', 'my_coordinates/_points']
"""
if isinstance(obj, dict):
for key in obj.keys():
_inner_encode(obj, key, f'{zip_path}/{key}', zipfile)
elif isinstance(obj, (list, tuple, set, frozenset)):
for i in range(0, len(obj)):
_inner_encode(obj, i, f'{zip_path}/{i}', zipfile)
return obj
def _inner_encode(obj, key, zip_path, zipfile):
"""
This function is exclusively used by `_codec._encode` and casts the obj
in case it is not JSON-serializable into a proper format for the zipfile
e.g. by adding a typehint (str) like: '$ndarray', '$tuple' etc. and
a proper zip path/reference if necessary like it's the case for ndarrays.
If the obj is nested, _innner_ecode goes one level deeper by reentering
_encode again.
Parameters
----------
obj : PyFar-object.
key : str or int
The key provided by the dict or list over which currently is being
iterated.
zip_path: str
The potential zip path looped through all recursions.
zipfile: zipfile
"""
if _is_dtype(obj[key]):
obj[key] = ['$dtype', obj[key].__name__]
elif isinstance(obj[key], np.ndarray):
zipfile.writestr(zip_path, _encode_ndarray(obj[key]))
obj[key] = ['$ndarray', zip_path]
elif _is_pyfar_type(obj[key]):
obj[key] = [f'${type(obj[key]).__name__}', obj[key]._encode()]
_encode(obj[key][1], zip_path, zipfile)
elif _is_numpy_scalar(obj[key]):
obj[key] = [f'${type(obj[key]).__name__}', obj[key].item()]
elif isinstance(obj[key], complex):
obj[key] = ['$complex', [obj[key].real, obj[key].imag]]
elif isinstance(obj[key], (tuple, set, frozenset)):
obj[key] = [f'${type(obj[key]).__name__ }', list(obj[key])]
else:
_encode(obj[key], zip_path, zipfile)
def _encode_ndarray(ndarray):
"""
The encoding of objects that are composed of primitive and numpy types
utilizes `obj.__dict__()` and numpy encoding methods.
Parameters
----------
ndarray: numpy.array.
Returns
-------
bytes.
They bytes that where written by `numpy.save` into a memfile.
Note
----
* Do not allow pickling. It is not safe!
"""
# `Numpy.save` is applied on a memory file instead of a physical file
memfile = io.BytesIO()
np.save(memfile, ndarray, allow_pickle=False)
memfile.seek(0)
return memfile.read()
def _encode_object_json_aided(obj, name, zipfile):
"""
Encodes composed objects with the help of JSON.
Parameters
----------
obj: PyFar-type
The object, usually values from **objs, see `io.write`.
name: str
The object's name, usually keys from **objs, see `io.write`.
zipfile: zipfile
The zipfile where we'd like to write data.
"""
try:
obj_dict = _encode(obj._encode(), name, zipfile)
type_hint = f'${type(obj).__name__}'
zipfile.writestr(
f'{name}/{type_hint}',
json.dumps(obj_dict))
except AttributeError:
raise NotImplementedError(
f'You must implement `{type}._encode` first.')
def _is_pyfar_type(obj):
""" True if object is a Pyfar-type.
"""
type_str = obj if isinstance(obj, str) else type(obj).__name__
return type_str in [
'Orientations',
'Coordinates',
'Signal',
'Filter',
'FilterFIR',
'FilterIIR',
'FilterSOS',
'SphericalVoronoi',
'TimeData',
'FrequencyData']
def _is_numpy_type(obj):
""" True if object is a Numpy-type.
"""
return type(obj).__module__ == np.__name__
def _is_dtype(obj):
""" True if object is `numpy.dtype`.
"""
return isinstance(obj, type) and (
obj.__module__ == 'numpy' or obj == complex)
def _is_numpy_scalar(obj):
""" True if object is any numpy.dtype scalar e.g. `numpy.int32`.
"""
return type(obj).__module__ == 'numpy'
def _is_type_hint(obj):
""" Check if object is stored along with its type in the typical format:
[str, str] => [typehint, value] e.g. ['$complex', (3 + 4j)]
"""
return isinstance(obj, list) \
and len(obj) == 2 \
and isinstance(obj[0], str) \
and obj[0][0] == '$'
def _str_to_type(type_as_string, module='pyfar'):
"""
Recursively find a PyfarType by passing in a valid type as a string.
Parameters
----------
type_as_string: string.
A valid PyfarType.
module: string.
Either 'pyfar' or a submodule of pyfar, e.g. 'pyfar.spatial'
The default is 'pyfar'.
Returns
----------
PyfarType: type.
A valid PyfarType.
"""
try:
return getattr(sys.modules[module], type_as_string)
except AttributeError:
submodules = [
attrib for attrib in dir(sys.modules[module])
if not attrib.startswith('__') and attrib.islower()]
except KeyError:
return
for submodule in submodules:
PyfarType = _str_to_type(
type_as_string, module=f'{module}.{submodule}')
if PyfarType:
return PyfarType
| [
"json.loads",
"json.dumps",
"io.BytesIO",
"numpy.load",
"numpy.save"
] | [((5696, 5708), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5706, 5708), False, 'import io\n'), ((5801, 5837), 'numpy.load', 'np.load', (['memfile'], {'allow_pickle': '(False)'}), '(memfile, allow_pickle=False)\n', (5808, 5837), True, 'import numpy as np\n'), ((6311, 6331), 'json.loads', 'json.loads', (['json_str'], {}), '(json_str)\n', (6321, 6331), False, 'import json\n'), ((9807, 9819), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (9817, 9819), False, 'import io\n'), ((9824, 9869), 'numpy.save', 'np.save', (['memfile', 'ndarray'], {'allow_pickle': '(False)'}), '(memfile, ndarray, allow_pickle=False)\n', (9831, 9869), True, 'import numpy as np\n'), ((10491, 10511), 'json.dumps', 'json.dumps', (['obj_dict'], {}), '(obj_dict)\n', (10501, 10511), False, 'import json\n')] |
import numpy as np
from AnalyticGeometryFunctions import computeVectorNorm, computeAngleBetweenVectors
import pygame as pg
import os
# init variables
actionSpace = [[0, 1], [1, 0], [-1, 0], [0, -1], [1, 1], [-1, -1], [1, -1], [-1, 1]]
xBoundary = [0, 180]
yBoundary = [0, 180]
vel = 1
class OptimalPolicy:
def __init__(self, actionSpace):
self.actionSpace = actionSpace
def __call__(self, state):
targetState = state[2:4]
agentState = state[0:2]
relativeVector = np.array(targetState) - np.array(agentState)
angleBetweenVectors = {computeAngleBetweenVectors(relativeVector, action): action for action in
np.array(self.actionSpace)}
action = angleBetweenVectors[min(angleBetweenVectors.keys())]
return action
def checkBound(state, xBoundary, yBoundary):
xMin, xMax = xBoundary
yMin, yMax = yBoundary
xPos, yPos = state
if xPos >= xMax or xPos <= xMin:
return False
elif yPos >= yMax or yPos <= yMin:
return False
return True
def getEachState(state):
return state[:2], state[2:]
class TransitionFunction():
def __init__(self, xBoundary, yBoundary, velocity):
self.xBoundary = xBoundary
self.yBoundary = yBoundary
self.velocity = velocity
def __call__(self, state, action):
agentState, targetPosition = getEachState(state)
actionMagnitude = computeVectorNorm(np.array(action))
modifiedAction = np.array(action) * self.velocity / actionMagnitude
newAgentState = np.array(agentState) + modifiedAction
if checkBound(newAgentState, self.xBoundary, self.yBoundary):
return np.concatenate([newAgentState, targetPosition])
return np.concatenate([agentState, targetPosition])
class IsTerminal():
def __init__(self, minDistance):
self.minDistance = minDistance
return
def __call__(self, state):
agentState, targetPosition = getEachState(state)
relativeVector = np.array(agentState) - np.array(targetPosition)
relativeDistance = computeVectorNorm(relativeVector)
if relativeDistance <= self.minDistance:
return True
return False
class Reset():
def __init__(self, xBoundary, yBoundary):
self.xBoundary = xBoundary
self.yBoundary = yBoundary
def __call__(self):
xMin, xMax = self.xBoundary
yMin, yMax = self.yBoundary
initialAgentState = np.array([np.random.uniform(xMin, xMax), np.random.uniform(yMin, yMax)])
targetPosition = np.array([np.random.uniform(xMin, xMax), np.random.uniform(yMin, yMax)])
while not (checkBound(initialAgentState, self.xBoundary, self.yBoundary) and checkBound(targetPosition,
self.xBoundary,
self.yBoundary)):
initialAgentState = np.array([np.random.uniform(xMin, xMax), np.random.uniform(yMin, yMax)])
targetPosition = np.array([np.random.uniform(xMin, xMax), np.random.uniform(yMin, yMax)])
return np.concatenate([initialAgentState, targetPosition])
class FixedReset():
def __init__(self, xBoundary, yBoundary):
self.xBoundary = xBoundary
self.yBoundary = yBoundary
def __call__(self):
xMin, xMax = self.xBoundary
yMin, yMax = self.yBoundary
initialAgentState = np.array([np.random.uniform(xMin, xMax), np.random.uniform(yMin, yMax)])
targetPosition = np.array([np.random.uniform(xMin, xMax), np.random.uniform(yMin, yMax)])
initialDistance = computeVectorNorm(targetPosition - initialAgentState)
while not (checkBound(initialAgentState, self.xBoundary, self.yBoundary) and checkBound(targetPosition,
self.xBoundary,
self.yBoundary) and initialDistance >= 20):
initialAgentState = np.array([np.random.uniform(xMin, xMax), np.random.uniform(yMin, yMax)])
targetPosition = np.array([np.random.uniform(xMin, xMax), np.random.uniform(yMin, yMax)])
initialDistance = computeVectorNorm(targetPosition - initialAgentState)
return np.concatenate([initialAgentState, targetPosition])
class Render():
def __init__(self, numAgent, numOneAgentState, positionIndex, screen, screenColor, circleColorList, circleSize,
saveImage, saveImagePath):
self.numAgent = numAgent
self.numOneAgentState = numOneAgentState
self.positionIndex = positionIndex
self.screen = screen
self.screenColor = screenColor
self.circleColorList = circleColorList
self.circleSize = circleSize
self.saveImage = saveImage
self.saveImagePath = saveImagePath
def __call__(self, state):
for j in range(1):
for event in pg.event.get():
if event.type == pg.QUIT:
pg.quit()
self.screen.fill(self.screenColor)
for i in range(self.numAgent):
oneAgentState = state[self.numOneAgentState * i: self.numOneAgentState * (i + 1)]
oneAgentPosition = oneAgentState[min(self.positionIndex): max(self.positionIndex) + 1]
pg.draw.circle(self.screen, self.circleColorList[i],
[np.int(oneAgentPosition[0]), np.int(oneAgentPosition[1])], self.circleSize)
pg.display.flip()
if self.saveImage == True:
filenameList = os.listdir(self.saveImagePath)
pg.image.save(self.screen, self.saveImagePath + '/' + str(len(filenameList)) + '.png')
pg.time.wait(1)
| [
"os.listdir",
"pygame.quit",
"pygame.event.get",
"pygame.time.wait",
"pygame.display.flip",
"numpy.array",
"numpy.concatenate",
"numpy.random.uniform",
"AnalyticGeometryFunctions.computeAngleBetweenVectors",
"AnalyticGeometryFunctions.computeVectorNorm",
"numpy.int"
] | [((1762, 1806), 'numpy.concatenate', 'np.concatenate', (['[agentState, targetPosition]'], {}), '([agentState, targetPosition])\n', (1776, 1806), True, 'import numpy as np\n'), ((2109, 2142), 'AnalyticGeometryFunctions.computeVectorNorm', 'computeVectorNorm', (['relativeVector'], {}), '(relativeVector)\n', (2126, 2142), False, 'from AnalyticGeometryFunctions import computeVectorNorm, computeAngleBetweenVectors\n'), ((3226, 3277), 'numpy.concatenate', 'np.concatenate', (['[initialAgentState, targetPosition]'], {}), '([initialAgentState, targetPosition])\n', (3240, 3277), True, 'import numpy as np\n'), ((3738, 3791), 'AnalyticGeometryFunctions.computeVectorNorm', 'computeVectorNorm', (['(targetPosition - initialAgentState)'], {}), '(targetPosition - initialAgentState)\n', (3755, 3791), False, 'from AnalyticGeometryFunctions import computeVectorNorm, computeAngleBetweenVectors\n'), ((4462, 4513), 'numpy.concatenate', 'np.concatenate', (['[initialAgentState, targetPosition]'], {}), '([initialAgentState, targetPosition])\n', (4476, 4513), True, 'import numpy as np\n'), ((507, 528), 'numpy.array', 'np.array', (['targetState'], {}), '(targetState)\n', (515, 528), True, 'import numpy as np\n'), ((531, 551), 'numpy.array', 'np.array', (['agentState'], {}), '(agentState)\n', (539, 551), True, 'import numpy as np\n'), ((583, 633), 'AnalyticGeometryFunctions.computeAngleBetweenVectors', 'computeAngleBetweenVectors', (['relativeVector', 'action'], {}), '(relativeVector, action)\n', (609, 633), False, 'from AnalyticGeometryFunctions import computeVectorNorm, computeAngleBetweenVectors\n'), ((1454, 1470), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (1462, 1470), True, 'import numpy as np\n'), ((1572, 1592), 'numpy.array', 'np.array', (['agentState'], {}), '(agentState)\n', (1580, 1592), True, 'import numpy as np\n'), ((1699, 1746), 'numpy.concatenate', 'np.concatenate', (['[newAgentState, targetPosition]'], {}), '([newAgentState, targetPosition])\n', (1713, 1746), True, 'import numpy as np\n'), ((2034, 2054), 'numpy.array', 'np.array', (['agentState'], {}), '(agentState)\n', (2042, 2054), True, 'import numpy as np\n'), ((2057, 2081), 'numpy.array', 'np.array', (['targetPosition'], {}), '(targetPosition)\n', (2065, 2081), True, 'import numpy as np\n'), ((4393, 4446), 'AnalyticGeometryFunctions.computeVectorNorm', 'computeVectorNorm', (['(targetPosition - initialAgentState)'], {}), '(targetPosition - initialAgentState)\n', (4410, 4446), False, 'from AnalyticGeometryFunctions import computeVectorNorm, computeAngleBetweenVectors\n'), ((5131, 5145), 'pygame.event.get', 'pg.event.get', ([], {}), '()\n', (5143, 5145), True, 'import pygame as pg\n'), ((5699, 5716), 'pygame.display.flip', 'pg.display.flip', ([], {}), '()\n', (5714, 5716), True, 'import pygame as pg\n'), ((5933, 5948), 'pygame.time.wait', 'pg.time.wait', (['(1)'], {}), '(1)\n', (5945, 5948), True, 'import pygame as pg\n'), ((687, 713), 'numpy.array', 'np.array', (['self.actionSpace'], {}), '(self.actionSpace)\n', (695, 713), True, 'import numpy as np\n'), ((1497, 1513), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (1505, 1513), True, 'import numpy as np\n'), ((2505, 2534), 'numpy.random.uniform', 'np.random.uniform', (['xMin', 'xMax'], {}), '(xMin, xMax)\n', (2522, 2534), True, 'import numpy as np\n'), ((2536, 2565), 'numpy.random.uniform', 'np.random.uniform', (['yMin', 'yMax'], {}), '(yMin, yMax)\n', (2553, 2565), True, 'import numpy as np\n'), ((2603, 2632), 'numpy.random.uniform', 'np.random.uniform', (['xMin', 'xMax'], {}), '(xMin, xMax)\n', (2620, 2632), True, 'import numpy as np\n'), ((2634, 2663), 'numpy.random.uniform', 'np.random.uniform', (['yMin', 'yMax'], {}), '(yMin, yMax)\n', (2651, 2663), True, 'import numpy as np\n'), ((3551, 3580), 'numpy.random.uniform', 'np.random.uniform', (['xMin', 'xMax'], {}), '(xMin, xMax)\n', (3568, 3580), True, 'import numpy as np\n'), ((3582, 3611), 'numpy.random.uniform', 'np.random.uniform', (['yMin', 'yMax'], {}), '(yMin, yMax)\n', (3599, 3611), True, 'import numpy as np\n'), ((3649, 3678), 'numpy.random.uniform', 'np.random.uniform', (['xMin', 'xMax'], {}), '(xMin, xMax)\n', (3666, 3678), True, 'import numpy as np\n'), ((3680, 3709), 'numpy.random.uniform', 'np.random.uniform', (['yMin', 'yMax'], {}), '(yMin, yMax)\n', (3697, 3709), True, 'import numpy as np\n'), ((5787, 5817), 'os.listdir', 'os.listdir', (['self.saveImagePath'], {}), '(self.saveImagePath)\n', (5797, 5817), False, 'import os\n'), ((3046, 3075), 'numpy.random.uniform', 'np.random.uniform', (['xMin', 'xMax'], {}), '(xMin, xMax)\n', (3063, 3075), True, 'import numpy as np\n'), ((3077, 3106), 'numpy.random.uniform', 'np.random.uniform', (['yMin', 'yMax'], {}), '(yMin, yMax)\n', (3094, 3106), True, 'import numpy as np\n'), ((3148, 3177), 'numpy.random.uniform', 'np.random.uniform', (['xMin', 'xMax'], {}), '(xMin, xMax)\n', (3165, 3177), True, 'import numpy as np\n'), ((3179, 3208), 'numpy.random.uniform', 'np.random.uniform', (['yMin', 'yMax'], {}), '(yMin, yMax)\n', (3196, 3208), True, 'import numpy as np\n'), ((4198, 4227), 'numpy.random.uniform', 'np.random.uniform', (['xMin', 'xMax'], {}), '(xMin, xMax)\n', (4215, 4227), True, 'import numpy as np\n'), ((4229, 4258), 'numpy.random.uniform', 'np.random.uniform', (['yMin', 'yMax'], {}), '(yMin, yMax)\n', (4246, 4258), True, 'import numpy as np\n'), ((4300, 4329), 'numpy.random.uniform', 'np.random.uniform', (['xMin', 'xMax'], {}), '(xMin, xMax)\n', (4317, 4329), True, 'import numpy as np\n'), ((4331, 4360), 'numpy.random.uniform', 'np.random.uniform', (['yMin', 'yMax'], {}), '(yMin, yMax)\n', (4348, 4360), True, 'import numpy as np\n'), ((5209, 5218), 'pygame.quit', 'pg.quit', ([], {}), '()\n', (5216, 5218), True, 'import pygame as pg\n'), ((5611, 5638), 'numpy.int', 'np.int', (['oneAgentPosition[0]'], {}), '(oneAgentPosition[0])\n', (5617, 5638), True, 'import numpy as np\n'), ((5640, 5667), 'numpy.int', 'np.int', (['oneAgentPosition[1]'], {}), '(oneAgentPosition[1])\n', (5646, 5667), True, 'import numpy as np\n')] |
import pdb
import copy
import numpy as np
from importlib_resources import files
import matplotlib.pyplot as plt
from matplotlib import font_manager
from matplotlib.figure import Figure
from .utilities import *
from .process_model import *
# Define font settings
fontsize = 12
font_files = font_manager.findSystemFonts(fontpaths=[files('hes_off.core').joinpath("fonts")])
for font_file in font_files:
font_manager.fontManager.addfont(font_file)
plt.rcParams['font.family'] = 'Arial'
class IntegratedModel:
"""
The IntegratedModel object defines and stores parameters of the HES-OFF concept
"""
stage_labels = ["Peak years", "Midlife years", "Tail years"]
def __init__(self, IN):
# Declare input variables as instance variables
self.IN= copy.deepcopy(IN)
# Process specifications
self.HEAT_DEMAND = np.asarray(self.IN["HEAT_DEMAND"])*1e6
self.POWER_DEMAND = np.asarray(self.IN["POWER_DEMAND"])*1e6
self.STAGE_LENGTH = np.asarray(self.IN["STAGE_LENGTH"])
# Gas turbine specifications
self.GT_MODEL = self.IN["GT_MODEL"]
self.GT_UNITS = self.IN["GT_UNITS"]
self.GT_MAX_H2 = self.IN["GT_MAX_H2"]/100
# Wind farm specifications
self.WT_MODEL = self.IN["WT_MODEL"]
self.WT_REF_HEIGHT = IN["WT_REF_HEIGHT"]
self.WT_HUB_HEIGHT = IN["WT_HUB_HEIGHT"]
self.WT_RATED_POWER = self.IN["WT_RATED_POWER"]*1e6
# Electrolizer system specifications
self.EL_MODEL = self.IN["EL_MODEL"]
self.EL_RATED_POWER = self.IN["EL_RATED_POWER"]*1e6
try:
self.EL_EFFICIENCY = np.asarray(self.IN["EL_EFFICIENCY"])
if self.EL_EFFICIENCY[0] is None:
self.EL_EFFICIENCY = np.zeros((1,))
except:
self.EL_EFFICIENCY = np.zeros((1,))
# Fuel cell system specifications
self.FC_MODEL = self.IN["FC_MODEL"]
self.FC_RATED_POWER = self.IN["FC_RATED_POWER"]*1e6
try:
self.FC_EFFICIENCY = np.asarray(self.IN["FC_EFFICIENCY"])
if self.FC_EFFICIENCY[0] is None:
self.FC_EFFICIENCY = np.zeros((1,))
except:
self.FC_EFFICIENCY = np.zeros((1,))
# Hydrogen storage specifications
self.H2_CAPACITY = self.IN["H2_CAPACITY"]
self.H2_INITIAL_LEVEL = self.IN["H2_INITIAL_LEVEL"] / 100
self.H2_RECHARGE_THRESHOLD = self.IN["H2_RECHARGE_THRESHOLD"]/100
self.H2_COFIRE_THRESHOLD = self.IN["H2_COFIRE_THRESHOLD"]/100
# Wind data specifications
self.WIND_FILENAME = self.IN["WIND_FILENAME"]
self.WIND_DATA = read_wind_data(IN["WIND_FILENAME"])
self.WIND_SPEED = self.WIND_DATA["speed"]
self.WIND_TIME = self.WIND_DATA["time"]
# Check the input variable values
self.check_input_variables()
def __str__(self):
class_info = '\nHES-OFF concept specifications\n'
for key, value in self.IN.items():
class_info += "{:<24}{}\n".format(key, value)
return class_info
def check_input_variables(self):
""" Check that the values of the input variables are feasible """
if not (self.HEAT_DEMAND.size == self.POWER_DEMAND.size == self.STAGE_LENGTH.size):
raise Exception("The number of elements of POWER_DEMAND, HEAT_DEMAND and STAGE_LENGTH must be the same")
if np.any(self.HEAT_DEMAND < 0.0):
raise Exception("HEAT_DEMAND values must be positive")
if np.any(self.POWER_DEMAND < 0.0):
raise Exception("POWER_DEMAND values must be positive")
if np.any(self.STAGE_LENGTH < 0.0):
raise Exception("STAGE_LENGTH values must be positive")
if self.GT_MAX_H2 < 0.0 or self.GT_MAX_H2 > 1.00:
raise Exception("GT_MAX_H2 must be between zero and one")
if self.GT_UNITS < 0 or not self.GT_UNITS.is_integer():
raise Exception("GT_UNITS must be a positive integer")
if self.WT_RATED_POWER < 0.0:
raise Exception("WT_RATED_POWER must be positive or zero")
if self.WT_HUB_HEIGHT < 0.0:
raise Exception("WT_HUB_HEIGHT must be positive")
if self.WT_REF_HEIGHT < 0.0:
raise Exception("WT_REF_HEIGHT must be positive")
if self.WT_HUB_HEIGHT < self.WT_REF_HEIGHT:
raise Exception("WT_HUB_HEIGHT must be larger than WT_REF_HEIGHT")
if self.EL_RATED_POWER < 0.0:
raise Exception("EL_RATED_POWER must be positive or zero")
if self.FC_RATED_POWER < 0.0:
raise Exception("FC_RATED_POWER must be positive or zero")
if self.H2_CAPACITY < 0.0:
raise Exception("H2_CAPACITY must be positive or zero")
if self.H2_INITIAL_LEVEL < 0.0 or self.H2_INITIAL_LEVEL > 1.00:
raise Exception("H2_INITIAL_LEVEL must be between zero and one")
if self.H2_RECHARGE_THRESHOLD < 0.0 or self.H2_RECHARGE_THRESHOLD > 1.00:
raise Exception("H2_RECHARGE_THRESHOLD must be between zero and one")
if self.H2_COFIRE_THRESHOLD < 0.0 or self.H2_COFIRE_THRESHOLD > 1.00:
raise Exception("H2_COFIRE_THRESHOLD must be between zero and one")
if self.H2_COFIRE_THRESHOLD <= self.H2_RECHARGE_THRESHOLD:
raise Exception("H2_RECHARGE_THRESHOLD must be lower than H2_COFIRE_THRESHOLD")
return True
def evaluate_process_model(self):
# Evaluate the process model
self.process_output = evaluate_process_model(self.HEAT_DEMAND, self.POWER_DEMAND,
self.GT_MODEL, self.GT_UNITS, self.GT_MAX_H2,
self.WT_MODEL, self.WT_RATED_POWER,
self.WT_REF_HEIGHT, self.WT_HUB_HEIGHT,
self.EL_MODEL, self.EL_RATED_POWER, self.EL_EFFICIENCY,
self.FC_MODEL, self.FC_RATED_POWER, self.FC_EFFICIENCY,
self.H2_CAPACITY, self.H2_INITIAL_LEVEL,
self.H2_RECHARGE_THRESHOLD, self.H2_COFIRE_THRESHOLD,
self.WIND_SPEED, self.WIND_TIME,
natural_gas, hydrogen)
# output_dict = {"CO2_emissions":}
self.GT_energy = self.create_entry("GT_power")
self.WT_energy = self.create_entry("WT_power")
self.FC_energy = self.create_entry("FC_power")
self.EL_energy = self.create_entry("EL_power")
self.H2_utilized = self.create_entry("H2_utilized")
self.NG_utilized = self.create_entry("NG_utilized")
self.CO2_emissions = self.create_entry("CO2_emissions")
self.energy_deficit = self.create_entry("power_deficit")
self.WT_energy_loss = self.create_entry("WT_energy_loss")
self.WT_energy_available = self.create_entry("WT_energy_available")
def create_entry(self, name):
value = np.sum(self.process_output[name], axis=1)*self.STAGE_LENGTH # Units (kg)
value = np.concatenate((value, np.sum(value)[np.newaxis]))
return value
def get_H2_depletion_time_GT(self):
GT_fuel = create_fluid_mixture(fluids=(natural_gas, hydrogen), fractions=(1.00 - self.GT_MAX_H2, self.GT_MAX_H2), fraction_type="molar")
GT_power_max = compute_GT_maximum_power(model=self.GT_MODEL, number_of_units=self.GT_UNITS)
GT_efficiency = compute_GT_efficiency(model=self.GT_MODEL, number_of_units=self.GT_UNITS, power_output=GT_power_max)
GT_fuel_flow = compute_GT_fuel_consumption(power_output=GT_power_max, conversion_efficiency=GT_efficiency, fuel=GT_fuel)[0]
H2_mass_flow = GT_fuel_flow * GT_fuel["y"][-1] # Hydrogen is the last component
depletion_time = self.H2_CAPACITY / H2_mass_flow / 3600
return depletion_time
def get_H2_depletion_time_FC(self):
# Compute mass flow rate of hydrogen fed to the fuel cell system (kg/s)
H2_mass_flow = compute_FC_hydrogen_consumption(model=self.FC_MODEL,
efficiency_coefficients=self.FC_EFFICIENCY,
rated_power=self.FC_RATED_POWER,
power_output=self.FC_RATED_POWER)[0]
# Compute the time required to deplete the entire hydrogen storage
depletion_time = self.H2_CAPACITY / H2_mass_flow / 3600
return depletion_time # Units (h)
def get_H2_refilling_time_EL(self):
# Compute mass flow rate of hydrogen fed to the fuel cell system (kg/s)
H2_mass_flow = compute_EL_hydrogen_production(model=self.EL_MODEL,
efficiency_coefficients=self.EL_EFFICIENCY,
rated_power=self.EL_RATED_POWER,
power_input=self.EL_RATED_POWER)[0]
# Compute the time required to deplete the entire hydrogen storage
refill_time = self.H2_CAPACITY / H2_mass_flow / 3600
return refill_time # Units (h)
# ------------------------------------------------------------------------------------------------------------------ ##
# Plot results
# ------------------------------------------------------------------------------------------------------------------ ##
def plot_wind_timeseries(self, is_sorted=False):
# Plot the wind speed profile as a function of time
fig = Figure(figsize=(6.0, 4.0))
ax = fig.subplots(1)
ax.set_xlabel('Time (hours)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.set_ylabel('Wind speed (m/s)', fontsize=fontsize, color='k', labelpad=fontsize)
# ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.0f'))
# ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.0f'))
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
if is_sorted:
ax.plot(self.WIND_TIME, np.sort(self.WIND_SPEED)[::-1], linewidth=0.75, linestyle='-', color='k',
marker=' ', markersize=4.5, markeredgewidth=1.25, markeredgecolor='k', markerfacecolor='w',
label=None)
else:
ax.plot(self.WIND_TIME, self.WIND_SPEED, linewidth=0.75, linestyle='-', color='k',
marker=' ', markersize=4.5, markeredgewidth=1.25, markeredgecolor='k', markerfacecolor='w',
label=None)
fig.tight_layout()
return fig
def plot_carbon_dioxide_emissions(self):
""" Plot the carbon dioxide emissions over time """
fig = Figure(figsize=(6.0, 6.5))
fig.suptitle('CO$_2$ emissions accumulated over a year (kton)', fontsize=fontsize+1, fontweight='normal', color='k')
axes = fig.subplots(len(self.stage_labels))
for index, ax in enumerate(axes):
x, y = self.process_output["times"][index, :] / 24, np.cumsum(self.process_output["CO2_emissions"][index, :]) / 1e3 / 1e3
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.plot(x, y, linewidth=0.75, linestyle='-', color='k', label="Period "+str(index+1), marker="")
# ax.set_ylabel('CO$_2$ emissions (Mt)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.set_ylabel(self.stage_labels[index], fontsize=fontsize, color='k', labelpad=fontsize)
if index + 1 == len(self.stage_labels):
ax.set_xlabel('Time (days)', fontsize=fontsize, color='k', labelpad=fontsize)
# ax.legend(ncol=1, loc='lower right', fontsize=fontsize-1, edgecolor='k', framealpha=1.0, handlelength=0.0)
dy = np.max(y)
ax.set_ylim([-dy/5, np.max(y)+dy/5])
fig.tight_layout()
return fig
def plot_power_deficit(self):
""" Plot the energy deficit over time """
fig = Figure(figsize=(6.0, 6.5))
fig.suptitle('Power deficit over a year (MW)', fontsize=fontsize + 1, fontweight='normal', color='k')
axes = fig.subplots(len(self.stage_labels))
for index, ax in enumerate(axes):
x, y = self.process_output["times"][index, :] / 24, self.process_output["power_deficit"][index, :] / 1e6
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.plot(x, y, linewidth=0.75, linestyle='-', color='k', label=self.stage_labels[index], marker="")
# ax.set_ylabel('Deficit (MW)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.set_ylabel(self.stage_labels[index], fontsize=fontsize, color='k', labelpad=fontsize)
if index + 1 == len(self.stage_labels):
ax.set_xlabel('Time (days)', fontsize=fontsize, color='k', labelpad=fontsize)
# ax.legend(ncol=1, loc='lower right', fontsize=fontsize - 1, edgecolor='k', framealpha=1.0, handlelength=0.0)
dy = max(1, np.max(y))
ax.set_ylim([-dy / 5, np.max(y) + dy / 5])
fig.tight_layout()
return fig
def plot_hydrogen_level(self):
""" Plot hydrogen storage level over time """
fig = Figure(figsize=(6.0, 6.5))
fig.suptitle('Hydrogen storage level over a year (kg)', fontsize=fontsize+1, fontweight='normal', color='k')
axes = fig.subplots(len(self.stage_labels))
for index, ax in enumerate(axes):
x, y = self.process_output["times"][index, :] / 24, self.process_output["H2_level"][index, :]
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.plot([0.0], [0.0], linestyle="", marker="", label="Period " + str(index + 1))
ax.plot(x, y, linewidth=0.75, linestyle='-', color='k', label="", marker="")
# ax.set_ylabel('H$_2$ level (kg)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.set_ylabel(self.stage_labels[index], fontsize=fontsize, color='k', labelpad=fontsize)
if index + 1 == len(self.stage_labels):
ax.set_xlabel('Time (days)', fontsize=fontsize, color='k', labelpad=fontsize)
# ax.legend(ncol=1, loc='lower right', fontsize=fontsize-1, edgecolor='k', framealpha=1.0, handlelength=0.0)
dy = np.max(y)
dy = np.maximum(dy, 1.00)
ax.set_ylim([-dy/5, np.max(y)+dy/5])
fig.tight_layout()
return fig
def plot_power_balance(self):
""" Plot hydrogen storage level over time """
fig = Figure(figsize=(6.0, 6.5))
fig.suptitle('Power balance (MW)', fontsize=fontsize + 1, fontweight='normal', color='k')
axes = fig.subplots(len(self.stage_labels))
for index, ax in enumerate(axes):
x = self.process_output["times"][index, :] / 24
y1 = self.process_output["GT_power"][index, :] / 1e6
y2 = self.process_output["WT_power"][index, :] / 1e6
y3 = self.process_output["FC_power"][index, :] / 1e6
y4 = self.process_output["EL_power"][index, :] / 1e6
y5 = y1 + y2 + y3 - y4
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.stackplot(x, y1, y2, y3, labels=["GT", "WT", "FC"], colors=["orangered", "forestgreen", "black"])
ax.stackplot(x, -y4, labels=["EL"], colors=["royalblue"])
# ax.set_ylabel('Power (MW)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.set_ylabel(self.stage_labels[index], fontsize=fontsize, color='k', labelpad=fontsize)
if index + 1 == len(self.stage_labels):
ax.set_xlabel('Time (days)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.legend(ncol=1,bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=fontsize - 1, edgecolor='k', framealpha=1.0)
# dy = np.max(y)
# dy = np.maximum(dy, 1.00)
# ax.set_ylim([-dy / 5, np.max(y) + dy / 5])
ax.set_xlim([0, x[-1]])
fig.tight_layout()
return fig
def plot_hydrogen_balance(self):
""" Plot the hydrogen balance over time """
fig = Figure(figsize=(6.0, 6.5))
fig.suptitle('Hydrogen balance over a year (kg/s)', fontsize=fontsize+1, fontweight='normal', color='k')
axes = fig.subplots(len(self.stage_labels))
for index, ax in enumerate(axes):
x = self.process_output["times"][index, :] / 24
y1 = self.process_output["H2_cofired"][index, :]
y2 = self.process_output["H2_utilized"][index, :]
y3 = self.process_output["H2_produced"][index, :]
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.stackplot(x, y3, labels=["EL"], colors=["royalblue"])
ax.stackplot(x, -y1, -y2, labels=["GT", "FC"], colors=["orangered", "black"])
# ax.set_ylabel('Mass flow (kg/s)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.set_ylabel(self.stage_labels[index], fontsize=fontsize, color='k', labelpad=fontsize)
if index + 1 == len(self.stage_labels):
ax.set_xlabel('Time (days)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.legend(ncol=1, title="Period " + str(index + 1), bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=fontsize - 1, edgecolor='k', framealpha=1.0)
dy = np.max([np.max(y1+y2), np.max(y3), 0.002])
ax.set_ylim([-1*dy, 1*dy])
fig.tight_layout()
return fig
def plot_flag(self):
""" Plot the operation flag over time """
n_axes = self.process_output["times"].shape[0]
fig = plt.figure(figsize=(6.0, 5.5))
fig.suptitle('Process flag over a year', fontsize=fontsize+1, fontweight='normal', color='k')
axes = fig.subplots(n_axes)
for index, ax in enumerate(axes):
x, y = self.process_output["times"][index, :] / 24, self.process_output["flag"][index, :]
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.plot(x, y, linewidth=0.75, linestyle='', color='k', label="Period "+str(index+1), marker="o",
markerfacecolor="w", markeredgecolor="k", markersize=3.0, markeredgewidth=0.75)
ax.set_ylabel('Flag', fontsize=fontsize, color='k', labelpad=fontsize)
if index + 1 == n_axes:
ax.set_xlabel('Time (days)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.legend(ncol=1, loc='lower right', fontsize=fontsize-1, edgecolor='k', framealpha=1.0, handlelength=0.0)
ax.set_ylim([-0.5, 6.5])
fig.tight_layout()
return fig
def plot_sensitivity_analysis(self, variable, low_value, high_value):
# Create a copy of the input dictionary
IN = copy.deepcopy(self.IN)
# Compute the performance for a range of input variables
values = np.linspace(low_value, high_value, 21)
CO2_emissions, energy_deficit = [], []
print("Starting", variable, "sensitivity analysis...")
for i, value in enumerate(values):
print_progress(i, len(values))
IN[variable] = value
EnergySystem = IntegratedModel(IN)
EnergySystem.evaluate_process_model()
CO2_emissions.append(EnergySystem.CO2_emissions_total)
energy_deficit.append(EnergySystem.energy_deficit_total)
# Convert list to Numpy arrays
CO2_emissions = np.asarray(CO2_emissions)
energy_deficit = np.asarray(energy_deficit)
# Plot the results
fig = plt.figure(figsize=(6, 4))
ax = fig.add_subplot(111)
ax.set_xlabel(variable, fontsize=fontsize, color='k', labelpad=fontsize)
ax.set_ylabel('CO$_2$ emissions (Mt)', fontsize=fontsize, color='k', labelpad=fontsize)
# ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.0f'))
# ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.0f'))
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.plot(values, CO2_emissions/1e6, linewidth=0.75, linestyle='-', color='k',
marker=' ', markersize=4.5, markeredgewidth=1.25, markeredgecolor='k', markerfacecolor='w', label=None)
fig.tight_layout()
# Plot the results
fig = plt.figure(figsize=(6, 4))
ax = fig.add_subplot(111)
ax.set_xlabel(variable, fontsize=fontsize, color='k', labelpad=fontsize)
ax.set_ylabel('Energy deficit (MWh)', fontsize=fontsize, color='k', labelpad=fontsize)
# ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.0f'))
# ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.0f'))
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.plot(values, energy_deficit, linewidth=0.75, linestyle='-', color='k',
marker=' ', markersize=4.5, markeredgewidth=1.25, markeredgecolor='k', markerfacecolor='w', label=None)
fig.tight_layout()
return fig
def plot_wt_power_available(self):
""" Plot available wind power over time """
fig = Figure(figsize=(6.0, 6.5))
fig.suptitle('Available wind power over a year (MW)', fontsize=fontsize+1, fontweight='normal', color='k')
axes = fig.subplots(len(self.stage_labels))
for index, ax in enumerate(axes):
x, y = self.process_output["times"][index, :] / 24, self.process_output["WT_energy_available"][index, :] /1e6
for t in ax.xaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
for t in ax.yaxis.get_major_ticks(): t.label1.set_fontsize(fontsize)
ax.plot([0.0], [0.0], linestyle="", marker="", label="Period " + str(index + 1))
ax.plot(x[::70], y[::70], linewidth=0.75, linestyle='-', color='k', label="", marker="")
# ax.set_ylabel('Power (MW)', fontsize=fontsize, color='k', labelpad=fontsize)
ax.set_ylabel(self.stage_labels[index], fontsize=fontsize, color='k', labelpad=fontsize)
if index + 1 == len(self.stage_labels):
ax.set_xlabel('Time (days)', fontsize=fontsize, color='k', labelpad=fontsize)
# ax.legend(ncol=1, loc='lower right', fontsize=fontsize-1, edgecolor='k', framealpha=1.0, handlelength=0.0)
dy = np.max(y)
dy = np.maximum(dy, 1.00)
ax.set_ylim([-dy/5, np.max(y)+dy/5])
fig.tight_layout()
return fig
| [
"matplotlib.figure.Figure",
"numpy.sort",
"numpy.asarray",
"numpy.any",
"numpy.max",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.zeros",
"importlib_resources.files",
"copy.deepcopy",
"numpy.cumsum",
"numpy.maximum",
"matplotlib.font_manager.fontManager.addfont"
] | [((408, 451), 'matplotlib.font_manager.fontManager.addfont', 'font_manager.fontManager.addfont', (['font_file'], {}), '(font_file)\n', (440, 451), False, 'from matplotlib import font_manager\n'), ((784, 801), 'copy.deepcopy', 'copy.deepcopy', (['IN'], {}), '(IN)\n', (797, 801), False, 'import copy\n'), ((998, 1033), 'numpy.asarray', 'np.asarray', (["self.IN['STAGE_LENGTH']"], {}), "(self.IN['STAGE_LENGTH'])\n", (1008, 1033), True, 'import numpy as np\n'), ((3412, 3442), 'numpy.any', 'np.any', (['(self.HEAT_DEMAND < 0.0)'], {}), '(self.HEAT_DEMAND < 0.0)\n', (3418, 3442), True, 'import numpy as np\n'), ((3523, 3554), 'numpy.any', 'np.any', (['(self.POWER_DEMAND < 0.0)'], {}), '(self.POWER_DEMAND < 0.0)\n', (3529, 3554), True, 'import numpy as np\n'), ((3636, 3667), 'numpy.any', 'np.any', (['(self.STAGE_LENGTH < 0.0)'], {}), '(self.STAGE_LENGTH < 0.0)\n', (3642, 3667), True, 'import numpy as np\n'), ((9764, 9790), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(6.0, 4.0)'}), '(figsize=(6.0, 4.0))\n', (9770, 9790), False, 'from matplotlib.figure import Figure\n'), ((11004, 11030), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(6.0, 6.5)'}), '(figsize=(6.0, 6.5))\n', (11010, 11030), False, 'from matplotlib.figure import Figure\n'), ((12347, 12373), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(6.0, 6.5)'}), '(figsize=(6.0, 6.5))\n', (12353, 12373), False, 'from matplotlib.figure import Figure\n'), ((13672, 13698), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(6.0, 6.5)'}), '(figsize=(6.0, 6.5))\n', (13678, 13698), False, 'from matplotlib.figure import Figure\n'), ((15089, 15115), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(6.0, 6.5)'}), '(figsize=(6.0, 6.5))\n', (15095, 15115), False, 'from matplotlib.figure import Figure\n'), ((16786, 16812), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(6.0, 6.5)'}), '(figsize=(6.0, 6.5))\n', (16792, 16812), False, 'from matplotlib.figure import Figure\n'), ((18383, 18413), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.0, 5.5)'}), '(figsize=(6.0, 5.5))\n', (18393, 18413), True, 'import matplotlib.pyplot as plt\n'), ((19620, 19642), 'copy.deepcopy', 'copy.deepcopy', (['self.IN'], {}), '(self.IN)\n', (19633, 19642), False, 'import copy\n'), ((19726, 19764), 'numpy.linspace', 'np.linspace', (['low_value', 'high_value', '(21)'], {}), '(low_value, high_value, 21)\n', (19737, 19764), True, 'import numpy as np\n'), ((20291, 20316), 'numpy.asarray', 'np.asarray', (['CO2_emissions'], {}), '(CO2_emissions)\n', (20301, 20316), True, 'import numpy as np\n'), ((20342, 20368), 'numpy.asarray', 'np.asarray', (['energy_deficit'], {}), '(energy_deficit)\n', (20352, 20368), True, 'import numpy as np\n'), ((20411, 20437), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (20421, 20437), True, 'import matplotlib.pyplot as plt\n'), ((21233, 21259), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (21243, 21259), True, 'import matplotlib.pyplot as plt\n'), ((22137, 22163), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(6.0, 6.5)'}), '(figsize=(6.0, 6.5))\n', (22143, 22163), False, 'from matplotlib.figure import Figure\n'), ((863, 897), 'numpy.asarray', 'np.asarray', (["self.IN['HEAT_DEMAND']"], {}), "(self.IN['HEAT_DEMAND'])\n", (873, 897), True, 'import numpy as np\n'), ((930, 965), 'numpy.asarray', 'np.asarray', (["self.IN['POWER_DEMAND']"], {}), "(self.IN['POWER_DEMAND'])\n", (940, 965), True, 'import numpy as np\n'), ((1644, 1680), 'numpy.asarray', 'np.asarray', (["self.IN['EL_EFFICIENCY']"], {}), "(self.IN['EL_EFFICIENCY'])\n", (1654, 1680), True, 'import numpy as np\n'), ((2036, 2072), 'numpy.asarray', 'np.asarray', (["self.IN['FC_EFFICIENCY']"], {}), "(self.IN['FC_EFFICIENCY'])\n", (2046, 2072), True, 'import numpy as np\n'), ((7156, 7197), 'numpy.sum', 'np.sum', (['self.process_output[name]'], {'axis': '(1)'}), '(self.process_output[name], axis=1)\n', (7162, 7197), True, 'import numpy as np\n'), ((12142, 12151), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (12148, 12151), True, 'import numpy as np\n'), ((14842, 14851), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (14848, 14851), True, 'import numpy as np\n'), ((14869, 14888), 'numpy.maximum', 'np.maximum', (['dy', '(1.0)'], {}), '(dy, 1.0)\n', (14879, 14888), True, 'import numpy as np\n'), ((23327, 23336), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (23333, 23336), True, 'import numpy as np\n'), ((23354, 23373), 'numpy.maximum', 'np.maximum', (['dy', '(1.0)'], {}), '(dy, 1.0)\n', (23364, 23373), True, 'import numpy as np\n'), ((1764, 1778), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (1772, 1778), True, 'import numpy as np\n'), ((1828, 1842), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (1836, 1842), True, 'import numpy as np\n'), ((2156, 2170), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (2164, 2170), True, 'import numpy as np\n'), ((2220, 2234), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (2228, 2234), True, 'import numpy as np\n'), ((13455, 13464), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (13461, 13464), True, 'import numpy as np\n'), ((333, 354), 'importlib_resources.files', 'files', (['"""hes_off.core"""'], {}), "('hes_off.core')\n", (338, 354), False, 'from importlib_resources import files\n'), ((7269, 7282), 'numpy.sum', 'np.sum', (['value'], {}), '(value)\n', (7275, 7282), True, 'import numpy as np\n'), ((10366, 10390), 'numpy.sort', 'np.sort', (['self.WIND_SPEED'], {}), '(self.WIND_SPEED)\n', (10373, 10390), True, 'import numpy as np\n'), ((18117, 18132), 'numpy.max', 'np.max', (['(y1 + y2)'], {}), '(y1 + y2)\n', (18123, 18132), True, 'import numpy as np\n'), ((18132, 18142), 'numpy.max', 'np.max', (['y3'], {}), '(y3)\n', (18138, 18142), True, 'import numpy as np\n'), ((11314, 11371), 'numpy.cumsum', 'np.cumsum', (["self.process_output['CO2_emissions'][index, :]"], {}), "(self.process_output['CO2_emissions'][index, :])\n", (11323, 11371), True, 'import numpy as np\n'), ((12184, 12193), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (12190, 12193), True, 'import numpy as np\n'), ((13500, 13509), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (13506, 13509), True, 'import numpy as np\n'), ((14922, 14931), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (14928, 14931), True, 'import numpy as np\n'), ((23407, 23416), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (23413, 23416), True, 'import numpy as np\n')] |
import numpy as np
from PIL import Image
from classification import Classification, _preprocess_input
from utils.utils import letterbox_image
class top1_Classification(Classification):
def detect_image(self, image):
crop_img = letterbox_image(image, [self.input_shape[0],self.input_shape[1]])
photo = np.array(crop_img,dtype = np.float32)
# 图片预处理,归一化
photo = np.reshape(_preprocess_input(photo),[1,self.input_shape[0],self.input_shape[1],self.input_shape[2]])
preds = self.model.predict(photo)[0]
arg_pred = np.argmax(preds)
return arg_pred
def evaluteTop1(classfication, lines):
correct = 0
total = len(lines)
for index, line in enumerate(lines):
annotation_path = line.split(';')[1].split('\n')[0]
x = Image.open(annotation_path)
y = int(line.split(';')[0])
pred = classfication.detect_image(x)
correct += pred == y
if index % 100 == 0:
print("[%d/%d]"%(index,total))
return correct / total
classfication = top1_Classification()
with open(r"./cls_test.txt","r") as f:
lines = f.readlines()
top1 = evaluteTop1(classfication, lines)
print("top-1 accuracy = %.2f%%" % (top1*100))
| [
"utils.utils.letterbox_image",
"PIL.Image.open",
"numpy.argmax",
"numpy.array",
"classification._preprocess_input"
] | [((242, 308), 'utils.utils.letterbox_image', 'letterbox_image', (['image', '[self.input_shape[0], self.input_shape[1]]'], {}), '(image, [self.input_shape[0], self.input_shape[1]])\n', (257, 308), False, 'from utils.utils import letterbox_image\n'), ((324, 360), 'numpy.array', 'np.array', (['crop_img'], {'dtype': 'np.float32'}), '(crop_img, dtype=np.float32)\n', (332, 360), True, 'import numpy as np\n'), ((565, 581), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (574, 581), True, 'import numpy as np\n'), ((798, 825), 'PIL.Image.open', 'Image.open', (['annotation_path'], {}), '(annotation_path)\n', (808, 825), False, 'from PIL import Image\n'), ((410, 434), 'classification._preprocess_input', '_preprocess_input', (['photo'], {}), '(photo)\n', (427, 434), False, 'from classification import Classification, _preprocess_input\n')] |
from os.path import dirname, join, realpath
import pytest
from numpy import array, dtype, nan
from numpy.testing import assert_array_equal, assert_equal
from pandas_plink import example_file_prefix, read_plink, read_plink1_bin
def test_read_plink():
datafiles = join(dirname(realpath(__file__)), "data_files")
file_prefix = join(datafiles, "data")
(bim, fam, bed) = read_plink(file_prefix, verbose=False)
assert_equal(bed.dtype, dtype("float32"))
assert_array_equal(bim.query("chrom=='1' and pos==72515")["snp"], ["rs4030300"])
assert_array_equal(bim.query("chrom=='1'").shape, [10, 7])
assert_array_equal(
fam.query("fid=='Sample_2' and iid=='Sample_2'")["trait"], ["-9"]
)
assert_array_equal(
bed,
array(
[
[2, 2, 1],
[2, 1, 2],
[nan, nan, nan],
[nan, nan, 1],
[2, 2, 2],
[2, 2, 2],
[2, 1, 0],
[2, 2, 2],
[1, 2, 2],
[2, 1, 2],
]
),
)
def test_read_plink_prefix_dot():
with pytest.raises(IOError):
read_plink("/home/joao/84757.genotypes.norm.renamed")
def test_read_plink_wildcard():
datafiles = join(dirname(realpath(__file__)), "data_files")
file_prefix = join(datafiles, "chr*")
(bim, fam, bed) = read_plink(file_prefix, verbose=False)
assert_array_equal(bim[bim["chrom"] == "11"]["i"].values[:2], [0, 1])
assert_array_equal(bim[bim["chrom"] == "12"]["i"].values[:2], [779, 780])
def test_read_plink1_bin():
datafiles = join(dirname(realpath(__file__)), "data_files")
file_prefix = join(datafiles, "data")
bim = file_prefix + ".bim"
bed = file_prefix + ".bed"
fam = file_prefix + ".fam"
G = read_plink1_bin(bed, bim, fam, verbose=False)
assert_equal(G.data.dtype, dtype("float32"))
snp = G.where((G.chrom == "1") & (G.pos == 72515), drop=True)["snp"].values
assert_array_equal(snp, ["rs4030300"])
shape = G.where(G.chrom == "1", drop=True).shape
assert_array_equal(shape, [3, 10])
shape = G.where(G.chrom == "2", drop=True).shape
assert_array_equal(shape, [3, 0])
g = G.where((G.fid == "Sample_2") & (G.iid == "Sample_2"), drop=True)
assert_array_equal(g["trait"].values, ["-9"])
arr = [
[2.0, 2.0, nan, nan, 2.0, 2.0, 2.0, 2.0, 1.0, 2.0],
[2.0, 1.0, nan, nan, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0],
[1.0, 2.0, nan, 1.0, 2.0, 2.0, 0.0, 2.0, 2.0, 2.0],
]
assert_array_equal(G, arr)
def test_read_plink1_bin_a0():
datafiles = join(dirname(realpath(__file__)), "data_files")
file_prefix = join(datafiles, "data")
bim = file_prefix + ".bim"
bed = file_prefix + ".bed"
fam = file_prefix + ".fam"
G = read_plink1_bin(bed, bim, fam, verbose=False, ref="a0")
assert_equal(G.data.dtype, dtype("float32"))
snp = G.where((G.chrom == "1") & (G.pos == 72515), drop=True)["snp"].values
assert_array_equal(snp, ["rs4030300"])
shape = G.where(G.chrom == "1", drop=True).shape
assert_array_equal(shape, [3, 10])
shape = G.where(G.chrom == "2", drop=True).shape
assert_array_equal(shape, [3, 0])
g = G.where((G.fid == "Sample_2") & (G.iid == "Sample_2"), drop=True)
assert_array_equal(g["trait"].values, ["-9"])
arr = [
[0.0, 0.0, nan, nan, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, nan, nan, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, nan, 1.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0],
]
assert_array_equal(G, arr)
def test_read_plink1_bin_wildcard_not_found():
datafiles = join(dirname(realpath(__file__)), "data_files")
bed_files = join(datafiles, "chrr*.bed")
with pytest.raises(ValueError):
read_plink1_bin(bed_files, verbose=False)
bed_files = join(datafiles, "chr*.bed")
with pytest.raises(ValueError):
read_plink1_bin(bed_files, "chr11.bim", verbose=False)
bed_files = join(datafiles, "chr*.bed")
bim_files = join(datafiles, "chrr*.bim")
with pytest.raises(ValueError):
read_plink1_bin(bed_files, bim_files, verbose=False)
bed_files = join(datafiles, "chr*.bed")
bim_files = join(datafiles, "chr*.bim")
fam_files = join(datafiles, "chr*.fam")
with pytest.warns(UserWarning):
read_plink1_bin(bed_files, bim_files, fam_files, verbose=True)
def test_read_plink1_bin_wildcard():
datafiles = join(dirname(realpath(__file__)), "data_files")
bed_files = join(datafiles, "chr*.bed")
G = read_plink1_bin(bed_files, verbose=False)
G.where(G.chrom == "11", drop=True).values
assert_equal(G.where(G.chrom == "11", drop=True).shape, (14, 779))
assert_equal(G.where(G.chrom == "12", drop=True).shape, (14, 473))
x = [[0.00, 0.00], [0.00, 1.00]]
assert_equal(G.where(G.chrom == "11", drop=True).values[:2, :2], x)
def test_example_file_prefix():
with pytest.warns(DeprecationWarning):
example_file_prefix()
| [
"os.path.join",
"pytest.warns",
"os.path.realpath",
"pandas_plink.read_plink",
"numpy.array",
"pandas_plink.read_plink1_bin",
"pytest.raises",
"numpy.dtype",
"numpy.testing.assert_array_equal",
"pandas_plink.example_file_prefix"
] | [((337, 360), 'os.path.join', 'join', (['datafiles', '"""data"""'], {}), "(datafiles, 'data')\n", (341, 360), False, 'from os.path import dirname, join, realpath\n'), ((384, 422), 'pandas_plink.read_plink', 'read_plink', (['file_prefix'], {'verbose': '(False)'}), '(file_prefix, verbose=False)\n', (394, 422), False, 'from pandas_plink import example_file_prefix, read_plink, read_plink1_bin\n'), ((1347, 1370), 'os.path.join', 'join', (['datafiles', '"""chr*"""'], {}), "(datafiles, 'chr*')\n", (1351, 1370), False, 'from os.path import dirname, join, realpath\n'), ((1394, 1432), 'pandas_plink.read_plink', 'read_plink', (['file_prefix'], {'verbose': '(False)'}), '(file_prefix, verbose=False)\n', (1404, 1432), False, 'from pandas_plink import example_file_prefix, read_plink, read_plink1_bin\n'), ((1437, 1506), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["bim[bim['chrom'] == '11']['i'].values[:2]", '[0, 1]'], {}), "(bim[bim['chrom'] == '11']['i'].values[:2], [0, 1])\n", (1455, 1506), False, 'from numpy.testing import assert_array_equal, assert_equal\n'), ((1511, 1584), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["bim[bim['chrom'] == '12']['i'].values[:2]", '[779, 780]'], {}), "(bim[bim['chrom'] == '12']['i'].values[:2], [779, 780])\n", (1529, 1584), False, 'from numpy.testing import assert_array_equal, assert_equal\n'), ((1698, 1721), 'os.path.join', 'join', (['datafiles', '"""data"""'], {}), "(datafiles, 'data')\n", (1702, 1721), False, 'from os.path import dirname, join, realpath\n'), ((1824, 1869), 'pandas_plink.read_plink1_bin', 'read_plink1_bin', (['bed', 'bim', 'fam'], {'verbose': '(False)'}), '(bed, bim, fam, verbose=False)\n', (1839, 1869), False, 'from pandas_plink import example_file_prefix, read_plink, read_plink1_bin\n'), ((2004, 2042), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['snp', "['rs4030300']"], {}), "(snp, ['rs4030300'])\n", (2022, 2042), False, 'from numpy.testing import assert_array_equal, assert_equal\n'), ((2101, 2135), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['shape', '[3, 10]'], {}), '(shape, [3, 10])\n', (2119, 2135), False, 'from numpy.testing import assert_array_equal, assert_equal\n'), ((2194, 2227), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['shape', '[3, 0]'], {}), '(shape, [3, 0])\n', (2212, 2227), False, 'from numpy.testing import assert_array_equal, assert_equal\n'), ((2307, 2352), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["g['trait'].values", "['-9']"], {}), "(g['trait'].values, ['-9'])\n", (2325, 2352), False, 'from numpy.testing import assert_array_equal, assert_equal\n'), ((2556, 2582), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['G', 'arr'], {}), '(G, arr)\n', (2574, 2582), False, 'from numpy.testing import assert_array_equal, assert_equal\n'), ((2699, 2722), 'os.path.join', 'join', (['datafiles', '"""data"""'], {}), "(datafiles, 'data')\n", (2703, 2722), False, 'from os.path import dirname, join, realpath\n'), ((2825, 2880), 'pandas_plink.read_plink1_bin', 'read_plink1_bin', (['bed', 'bim', 'fam'], {'verbose': '(False)', 'ref': '"""a0"""'}), "(bed, bim, fam, verbose=False, ref='a0')\n", (2840, 2880), False, 'from pandas_plink import example_file_prefix, read_plink, read_plink1_bin\n'), ((3015, 3053), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['snp', "['rs4030300']"], {}), "(snp, ['rs4030300'])\n", (3033, 3053), False, 'from numpy.testing import assert_array_equal, assert_equal\n'), ((3112, 3146), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['shape', '[3, 10]'], {}), '(shape, [3, 10])\n', (3130, 3146), False, 'from numpy.testing import assert_array_equal, assert_equal\n'), ((3205, 3238), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['shape', '[3, 0]'], {}), '(shape, [3, 0])\n', (3223, 3238), False, 'from numpy.testing import assert_array_equal, assert_equal\n'), ((3318, 3363), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["g['trait'].values", "['-9']"], {}), "(g['trait'].values, ['-9'])\n", (3336, 3363), False, 'from numpy.testing import assert_array_equal, assert_equal\n'), ((3567, 3593), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['G', 'arr'], {}), '(G, arr)\n', (3585, 3593), False, 'from numpy.testing import assert_array_equal, assert_equal\n'), ((3723, 3751), 'os.path.join', 'join', (['datafiles', '"""chrr*.bed"""'], {}), "(datafiles, 'chrr*.bed')\n", (3727, 3751), False, 'from os.path import dirname, join, realpath\n'), ((3856, 3883), 'os.path.join', 'join', (['datafiles', '"""chr*.bed"""'], {}), "(datafiles, 'chr*.bed')\n", (3860, 3883), False, 'from os.path import dirname, join, realpath\n'), ((4000, 4027), 'os.path.join', 'join', (['datafiles', '"""chr*.bed"""'], {}), "(datafiles, 'chr*.bed')\n", (4004, 4027), False, 'from os.path import dirname, join, realpath\n'), ((4044, 4072), 'os.path.join', 'join', (['datafiles', '"""chrr*.bim"""'], {}), "(datafiles, 'chrr*.bim')\n", (4048, 4072), False, 'from os.path import dirname, join, realpath\n'), ((4187, 4214), 'os.path.join', 'join', (['datafiles', '"""chr*.bed"""'], {}), "(datafiles, 'chr*.bed')\n", (4191, 4214), False, 'from os.path import dirname, join, realpath\n'), ((4231, 4258), 'os.path.join', 'join', (['datafiles', '"""chr*.bim"""'], {}), "(datafiles, 'chr*.bim')\n", (4235, 4258), False, 'from os.path import dirname, join, realpath\n'), ((4275, 4302), 'os.path.join', 'join', (['datafiles', '"""chr*.fam"""'], {}), "(datafiles, 'chr*.fam')\n", (4279, 4302), False, 'from os.path import dirname, join, realpath\n'), ((4529, 4556), 'os.path.join', 'join', (['datafiles', '"""chr*.bed"""'], {}), "(datafiles, 'chr*.bed')\n", (4533, 4556), False, 'from os.path import dirname, join, realpath\n'), ((4566, 4607), 'pandas_plink.read_plink1_bin', 'read_plink1_bin', (['bed_files'], {'verbose': '(False)'}), '(bed_files, verbose=False)\n', (4581, 4607), False, 'from pandas_plink import example_file_prefix, read_plink, read_plink1_bin\n'), ((451, 467), 'numpy.dtype', 'dtype', (['"""float32"""'], {}), "('float32')\n", (456, 467), False, 'from numpy import array, dtype, nan\n'), ((768, 899), 'numpy.array', 'array', (['[[2, 2, 1], [2, 1, 2], [nan, nan, nan], [nan, nan, 1], [2, 2, 2], [2, 2, 2],\n [2, 1, 0], [2, 2, 2], [1, 2, 2], [2, 1, 2]]'], {}), '([[2, 2, 1], [2, 1, 2], [nan, nan, nan], [nan, nan, 1], [2, 2, 2], [2,\n 2, 2], [2, 1, 0], [2, 2, 2], [1, 2, 2], [2, 1, 2]])\n', (773, 899), False, 'from numpy import array, dtype, nan\n'), ((1145, 1167), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (1158, 1167), False, 'import pytest\n'), ((1177, 1230), 'pandas_plink.read_plink', 'read_plink', (['"""/home/joao/84757.genotypes.norm.renamed"""'], {}), "('/home/joao/84757.genotypes.norm.renamed')\n", (1187, 1230), False, 'from pandas_plink import example_file_prefix, read_plink, read_plink1_bin\n'), ((1901, 1917), 'numpy.dtype', 'dtype', (['"""float32"""'], {}), "('float32')\n", (1906, 1917), False, 'from numpy import array, dtype, nan\n'), ((2912, 2928), 'numpy.dtype', 'dtype', (['"""float32"""'], {}), "('float32')\n", (2917, 2928), False, 'from numpy import array, dtype, nan\n'), ((3762, 3787), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3775, 3787), False, 'import pytest\n'), ((3797, 3838), 'pandas_plink.read_plink1_bin', 'read_plink1_bin', (['bed_files'], {'verbose': '(False)'}), '(bed_files, verbose=False)\n', (3812, 3838), False, 'from pandas_plink import example_file_prefix, read_plink, read_plink1_bin\n'), ((3893, 3918), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3906, 3918), False, 'import pytest\n'), ((3928, 3982), 'pandas_plink.read_plink1_bin', 'read_plink1_bin', (['bed_files', '"""chr11.bim"""'], {'verbose': '(False)'}), "(bed_files, 'chr11.bim', verbose=False)\n", (3943, 3982), False, 'from pandas_plink import example_file_prefix, read_plink, read_plink1_bin\n'), ((4082, 4107), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4095, 4107), False, 'import pytest\n'), ((4117, 4169), 'pandas_plink.read_plink1_bin', 'read_plink1_bin', (['bed_files', 'bim_files'], {'verbose': '(False)'}), '(bed_files, bim_files, verbose=False)\n', (4132, 4169), False, 'from pandas_plink import example_file_prefix, read_plink, read_plink1_bin\n'), ((4312, 4337), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (4324, 4337), False, 'import pytest\n'), ((4347, 4409), 'pandas_plink.read_plink1_bin', 'read_plink1_bin', (['bed_files', 'bim_files', 'fam_files'], {'verbose': '(True)'}), '(bed_files, bim_files, fam_files, verbose=True)\n', (4362, 4409), False, 'from pandas_plink import example_file_prefix, read_plink, read_plink1_bin\n'), ((4949, 4981), 'pytest.warns', 'pytest.warns', (['DeprecationWarning'], {}), '(DeprecationWarning)\n', (4961, 4981), False, 'import pytest\n'), ((4991, 5012), 'pandas_plink.example_file_prefix', 'example_file_prefix', ([], {}), '()\n', (5010, 5012), False, 'from pandas_plink import example_file_prefix, read_plink, read_plink1_bin\n'), ((284, 302), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (292, 302), False, 'from os.path import dirname, join, realpath\n'), ((1294, 1312), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (1302, 1312), False, 'from os.path import dirname, join, realpath\n'), ((1645, 1663), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (1653, 1663), False, 'from os.path import dirname, join, realpath\n'), ((2646, 2664), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (2654, 2664), False, 'from os.path import dirname, join, realpath\n'), ((3672, 3690), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (3680, 3690), False, 'from os.path import dirname, join, realpath\n'), ((4478, 4496), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (4486, 4496), False, 'from os.path import dirname, join, realpath\n')] |
#
# Copyright 2019 <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
import sys
sys.path.insert(0, '../getting_started/common')
from acados_template import *
from export_pendulum_ode_model import export_pendulum_ode_model
import numpy as np
import scipy.linalg
import argparse
# set to 'True' to generate test data
GENERATE_DATA = False
LOCAL_TEST = False
TEST_TOL = 1e-8
if LOCAL_TEST is True:
COST_MODULE = 'LS'
COST_MODULE_N = 'LS'
SOLVER_TYPE = 'SQP_RTI'
QP_SOLVER = 'FULL_CONDENSING_QPOASES'
INTEGRATOR_TYPE = 'IRK'
HESS_APPROX = 'GAUSS_NEWTON'
else:
parser = argparse.ArgumentParser(description='test Python interface on pendulum example.')
parser.add_argument('--COST_MODULE', dest='COST_MODULE',
default='LS',
help='COST_MODULE: linear least-squares (LS) or nonlinear \
least-squares (NLS) (default: LS), external (EXTERNAL)')
parser.add_argument('--COST_MODULE_N', dest='COST_MODULE_N',
default='LS',
help='COST_MODULE_N: linear least-squares (LS) or nonlinear \
least-squares (NLS) (default: LS), external (EXTERNAL)')
parser.add_argument('--QP_SOLVER', dest='QP_SOLVER',
default='PARTIAL_CONDENSING_HPIPM',
help='QP_SOLVER: PARTIAL_CONDENSING_HPIPM, FULL_CONDENSING_HPIPM, ' \
'FULL_CONDENSING_HPIPM (default: PARTIAL_CONDENSING_HPIPM)')
parser.add_argument('--INTEGRATOR_TYPE', dest='INTEGRATOR_TYPE',
default='ERK',
help='INTEGRATOR_TYPE: explicit (ERK) or implicit (IRK) or GNSF-IRK (GNSF) ' \
' Runge-Kutta (default: ERK)')
parser.add_argument('--SOLVER_TYPE', dest='SOLVER_TYPE',
default='SQP_RTI',
help='SOLVER_TYPE: (full step) sequential quadratic programming (SQP) or ' \
' real-time iteration (SQP-RTI) (default: SQP-RTI)')
parser.add_argument('--HESS_APPROX', dest='HESS_APPROX',
default='GAUSS_NEWTON',
help='HESS_APPROX: GAUSS_NEWTON or ' \
' EXACT (default: GAUSS_NEWTON)')
parser.add_argument('--REGULARIZATION', dest='REGULARIZATION',
default='NO_REGULARIZE',
help='REGULARIZATION: NO_REGULARIZE or MIRROR or PROJECT or CONVEXIFY' \
' or PROJECT_REDUC_HESS (default: NO_REGULARIZE)')
args = parser.parse_args()
COST_MODULE = args.COST_MODULE
COST_MODULE_values = ['LS', 'NLS', 'EXTERNAL']
if COST_MODULE not in COST_MODULE_values:
raise Exception('Invalid unit test value {} for parameter COST_MODULE. Possible values are' \
' {}. Exiting.'.format(COST_MODULE, COST_MODULE_values))
COST_MODULE_N = args.COST_MODULE_N
COST_MODULE_N_values = ['LS', 'NLS', 'EXTERNAL']
if COST_MODULE_N not in COST_MODULE_N_values:
raise Exception('Invalid unit test value {} for parameter COST_MODULE_N. Possible values are' \
' {}. Exiting.'.format(COST_MODULE_N, COST_MODULE_N_values))
QP_SOLVER = args.QP_SOLVER
QP_SOLVER_values = ['PARTIAL_CONDENSING_HPIPM', 'FULL_CONDENSING_HPIPM', 'FULL_CONDENSING_QPOASES']
if QP_SOLVER not in QP_SOLVER_values:
raise Exception('Invalid unit test value {} for parameter QP_SOLVER. Possible values are' \
' {}. Exiting.'.format(QP_SOLVER, QP_SOLVER_values))
INTEGRATOR_TYPE = args.INTEGRATOR_TYPE
INTEGRATOR_TYPE_values = ['ERK', 'IRK', 'GNSF']
if INTEGRATOR_TYPE not in INTEGRATOR_TYPE:
raise Exception('Invalid unit test value {} for parameter INTEGRATOR_TYPE. Possible values are' \
' {}. Exiting.'.format(INTEGRATOR_TYPE, INTEGRATOR_TYPE_values))
SOLVER_TYPE = args.SOLVER_TYPE
SOLVER_TYPE_values = ['SQP', 'SQP-RTI']
if SOLVER_TYPE not in SOLVER_TYPE:
raise Exception('Invalid unit test value {} for parameter SOLVER_TYPE. Possible values are' \
' {}. Exiting.'.format(SOLVER_TYPE, SOLVER_TYPE_values))
HESS_APPROX = args.HESS_APPROX
HESS_APPROX_values = ['GAUSS_NEWTON', 'EXACT']
if HESS_APPROX not in HESS_APPROX:
raise Exception('Invalid unit test value {} for parameter HESS_APPROX. Possible values are' \
' {}. Exiting.'.format(HESS_APPROX, HESS_APPROX_values))
REGULARIZATION = args.REGULARIZATION
REGULARIZATION_values = ['NO_REGULARIZE', 'MIRROR', 'PROJECT', 'PROJECT_REDUC_HESS', 'CONVEXIFY']
if REGULARIZATION not in REGULARIZATION:
raise Exception('Invalid unit test value {} for parameter REGULARIZATION. Possible values are' \
' {}. Exiting.'.format(REGULARIZATION, REGULARIZATION_values))
# print test setting
print("Running test with:", \
"\n\tcost module terminal:", COST_MODULE_N,\
"\n\tcost module:", COST_MODULE, \
"\n\thessian approximation:", HESS_APPROX, \
"\n\tintergrator:", INTEGRATOR_TYPE, \
"\n\tqp solver:", QP_SOLVER,\
"\n\tregularization:", REGULARIZATION, \
"\n\tsolver:", SOLVER_TYPE)
# create ocp object to formulate the OCP
ocp = AcadosOcp()
# set model
model = export_pendulum_ode_model()
ocp.model = model
Tf = 1.0
nx = model.x.size()[0]
nu = model.u.size()[0]
ny = nx + nu
ny_e = nx
N = 20
# set dimensions
ocp.dims.N = N
# set cost
Q = 2*np.diag([1e3, 1e3, 1e-2, 1e-2])
R = 2*np.diag([1e-2])
x = ocp.model.x
u = ocp.model.u
if COST_MODULE == 'LS':
ocp.cost.cost_type = 'LINEAR_LS'
ocp.cost.Vx = np.zeros((ny, nx))
ocp.cost.Vx[:nx,:nx] = np.eye(nx)
Vu = np.zeros((ny, nu))
Vu[4,0] = 1.0
ocp.cost.Vu = Vu
ocp.cost.W = scipy.linalg.block_diag(Q, R)
ocp.cost.yref = np.zeros((ny, ))
elif COST_MODULE == 'NLS':
ocp.cost.cost_type = 'NONLINEAR_LS'
ocp.model.cost_y_expr = vertcat(x, u)
ocp.cost.W = scipy.linalg.block_diag(Q, R)
ocp.cost.yref = np.zeros((ny, ))
elif COST_MODULE == 'EXTERNAL':
ocp.cost.cost_type = 'EXTERNAL'
ocp.model.cost_expr_ext_cost = 0.5 * vertcat(x, u).T @ scipy.linalg.block_diag(Q, R) @ vertcat(x, u)
else:
raise Exception('Unknown COST_MODULE. Possible values are \'LS\', \'NLS\', \'EXTERNAL\'.')
if COST_MODULE_N == 'LS':
ocp.cost.cost_type_e = 'LINEAR_LS'
ocp.cost.Vx_e = np.eye(nx)
ocp.cost.W_e = Q
ocp.cost.yref_e = np.zeros((ny_e, ))
elif COST_MODULE_N == 'NLS':
ocp.cost.cost_type_e = 'NONLINEAR_LS'
ocp.model.cost_y_expr_e = x
ocp.cost.W_e = Q
ocp.cost.yref_e = np.zeros((ny_e, ))
elif COST_MODULE_N == 'EXTERNAL':
ocp.cost.cost_type_e = 'EXTERNAL'
ocp.model.cost_expr_ext_cost_e = 0.5 * x.T @ Q @ x
else:
raise Exception('Unknown COST_MODULE_N. Possible values are \'LS\', \'NLS\', \'EXTERNAL\'.')
# set constraints
Fmax = 80
ocp.constraints.constr_type = 'BGH'
ocp.constraints.lbu = np.array([-Fmax])
ocp.constraints.ubu = np.array([+Fmax])
ocp.constraints.x0 = np.array([0.0, np.pi, 0.0, 0.0])
ocp.constraints.idxbu = np.array([0])
# set options
ocp.solver_options.qp_solver = QP_SOLVER
ocp.solver_options.hessian_approx = HESS_APPROX
ocp.solver_options.exact_hess_constr = 1
ocp.solver_options.exact_hess_cost = 1
ocp.solver_options.exact_hess_dyn = 1
ocp.solver_options.regularize_method = REGULARIZATION
ocp.solver_options.integrator_type = INTEGRATOR_TYPE
ocp.solver_options.sim_method_num_stages = 2
ocp.solver_options.sim_method_num_steps = 5
ocp.solver_options.sim_method_newton_iter = 3
ocp.solver_options.nlp_solver_tol_stat = TEST_TOL
ocp.solver_options.nlp_solver_tol_eq = TEST_TOL
ocp.solver_options.nlp_solver_tol_ineq = TEST_TOL
ocp.solver_options.nlp_solver_tol_comp = TEST_TOL
ocp.solver_options.qp_solver_cond_N = int(N/2)
ocp.solver_options.nlp_solver_max_iter = 200
ocp.solver_options.qp_solver_iter_max = 50
ocp.solver_options.print_level = 0
# set prediction horizon
ocp.solver_options.tf = Tf
ocp.solver_options.nlp_solver_type = SOLVER_TYPE
if ocp.solver_options.integrator_type == 'GNSF':
with open('../getting_started/common/' + model.name + '_gnsf_functions.json', 'r') as f:
gnsf_dict = json.load(f)
ocp.gnsf_model = gnsf_dict
ocp_solver = AcadosOcpSolver(ocp, json_file = 'acados_ocp.json')
# initialize solver
x_traj_init = np.transpose( np.vstack( [np.zeros((N+1,)), \
np.arange(pi, -pi/N,- pi/N), np.zeros((N+1,)), np.zeros((N+1,))]) )
for i in range(N+1):
ocp_solver.set(i, "x", x_traj_init[i])
pi_init = np.ones((N, nx))
for i in range(N):
ocp_solver.set(i, "pi", pi_init[i])
u_init = np.zeros((N, nu))
for i in range(N):
ocp_solver.set(i, "u", u_init[i])
# solve ocp
simX = np.ndarray((N+1, nx))
simU = np.ndarray((N, nu))
status = ocp_solver.solve()
ocp_solver.print_statistics()
if status != 0:
# import pdb; pdb.set_trace()
raise Exception('acados returned status {}. Exiting.'.format(status))
sqp_iter = ocp_solver.get_stats('sqp_iter')
if SOLVER_TYPE in {'SQP'}:
print("Problem solved: SQP iterations ", sqp_iter, "\n")
# get solution
for i in range(N):
simX[i,:] = ocp_solver.get(i, "x")
simU[i,:] = ocp_solver.get(i, "u")
simX[N,:] = ocp_solver.get(N, "x")
if COST_MODULE in {'LINEAR_LS', 'NONLINEAR_LS'}:
# update reference
for j in range(N):
ocp_solver.cost_set(j, "yref", np.array([0, 0, 0, 0, 0]))
ocp_solver.cost_set(N, "yref", np.array([0, 0, 0, 0]))
# dump result to JSON file for unit testing
test_file_name = 'test_data/pendulum_ocp_formulations/test_ocp_' + COST_MODULE + \
'_' + COST_MODULE_N + '_' + QP_SOLVER + \
'_' + INTEGRATOR_TYPE + \
'_' + SOLVER_TYPE + \
'_' + HESS_APPROX + \
'_' + REGULARIZATION + \
'.json'
if GENERATE_DATA:
with open(test_file_name, 'w') as f:
json.dump({"simX": simX.tolist(), "simU": simU.tolist()}, f, indent=4, sort_keys=True)
else:
with open(test_file_name, 'r') as f:
test_data = json.load(f)
simX_error = np.linalg.norm(test_data['simX'] - simX)
simU_error = np.linalg.norm(test_data['simU'] - simU)
CHECK_TOL = 50 * TEST_TOL
if simX_error > CHECK_TOL or simU_error > CHECK_TOL:
raise Exception("Python acados test failure with accuracies" +
" {:.2E} and {:.2E} ({:.2E} required)".format(simX_error, simU_error, CHECK_TOL) +
" on pendulum example! Exiting.\n")
else:
print('Python test passed with accuracy {:.2E}'.format(max(simU_error, simX_error)))
| [
"numpy.eye",
"sys.path.insert",
"numpy.ones",
"argparse.ArgumentParser",
"export_pendulum_ode_model.export_pendulum_ode_model",
"numpy.diag",
"numpy.array",
"numpy.zeros",
"numpy.ndarray",
"numpy.linalg.norm",
"numpy.arange"
] | [((1506, 1553), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../getting_started/common"""'], {}), "(0, '../getting_started/common')\n", (1521, 1553), False, 'import sys\n'), ((6810, 6837), 'export_pendulum_ode_model.export_pendulum_ode_model', 'export_pendulum_ode_model', ([], {}), '()\n', (6835, 6837), False, 'from export_pendulum_ode_model import export_pendulum_ode_model\n'), ((8491, 8508), 'numpy.array', 'np.array', (['[-Fmax]'], {}), '([-Fmax])\n', (8499, 8508), True, 'import numpy as np\n'), ((8531, 8548), 'numpy.array', 'np.array', (['[+Fmax]'], {}), '([+Fmax])\n', (8539, 8548), True, 'import numpy as np\n'), ((8570, 8602), 'numpy.array', 'np.array', (['[0.0, np.pi, 0.0, 0.0]'], {}), '([0.0, np.pi, 0.0, 0.0])\n', (8578, 8602), True, 'import numpy as np\n'), ((8627, 8640), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (8635, 8640), True, 'import numpy as np\n'), ((10081, 10097), 'numpy.ones', 'np.ones', (['(N, nx)'], {}), '((N, nx))\n', (10088, 10097), True, 'import numpy as np\n'), ((10167, 10184), 'numpy.zeros', 'np.zeros', (['(N, nu)'], {}), '((N, nu))\n', (10175, 10184), True, 'import numpy as np\n'), ((10262, 10285), 'numpy.ndarray', 'np.ndarray', (['(N + 1, nx)'], {}), '((N + 1, nx))\n', (10272, 10285), True, 'import numpy as np\n'), ((10291, 10310), 'numpy.ndarray', 'np.ndarray', (['(N, nu)'], {}), '((N, nu))\n', (10301, 10310), True, 'import numpy as np\n'), ((2023, 2109), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""test Python interface on pendulum example."""'}), "(description=\n 'test Python interface on pendulum example.')\n", (2046, 2109), False, 'import argparse\n'), ((6993, 7030), 'numpy.diag', 'np.diag', (['[1000.0, 1000.0, 0.01, 0.01]'], {}), '([1000.0, 1000.0, 0.01, 0.01])\n', (7000, 7030), True, 'import numpy as np\n'), ((7031, 7046), 'numpy.diag', 'np.diag', (['[0.01]'], {}), '([0.01])\n', (7038, 7046), True, 'import numpy as np\n'), ((7161, 7179), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {}), '((ny, nx))\n', (7169, 7179), True, 'import numpy as np\n'), ((7207, 7217), 'numpy.eye', 'np.eye', (['nx'], {}), '(nx)\n', (7213, 7217), True, 'import numpy as np\n'), ((7228, 7246), 'numpy.zeros', 'np.zeros', (['(ny, nu)'], {}), '((ny, nu))\n', (7236, 7246), True, 'import numpy as np\n'), ((7355, 7370), 'numpy.zeros', 'np.zeros', (['(ny,)'], {}), '((ny,))\n', (7363, 7370), True, 'import numpy as np\n'), ((7931, 7941), 'numpy.eye', 'np.eye', (['nx'], {}), '(nx)\n', (7937, 7941), True, 'import numpy as np\n'), ((7985, 8002), 'numpy.zeros', 'np.zeros', (['(ny_e,)'], {}), '((ny_e,))\n', (7993, 8002), True, 'import numpy as np\n'), ((11643, 11683), 'numpy.linalg.norm', 'np.linalg.norm', (["(test_data['simX'] - simX)"], {}), "(test_data['simX'] - simX)\n", (11657, 11683), True, 'import numpy as np\n'), ((11701, 11741), 'numpy.linalg.norm', 'np.linalg.norm', (["(test_data['simU'] - simU)"], {}), "(test_data['simU'] - simU)\n", (11715, 11741), True, 'import numpy as np\n'), ((7551, 7566), 'numpy.zeros', 'np.zeros', (['(ny,)'], {}), '((ny,))\n', (7559, 7566), True, 'import numpy as np\n'), ((8151, 8168), 'numpy.zeros', 'np.zeros', (['(ny_e,)'], {}), '((ny_e,))\n', (8159, 8168), True, 'import numpy as np\n'), ((10974, 10996), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (10982, 10996), True, 'import numpy as np\n'), ((9913, 9931), 'numpy.zeros', 'np.zeros', (['(N + 1,)'], {}), '((N + 1,))\n', (9921, 9931), True, 'import numpy as np\n'), ((9938, 9969), 'numpy.arange', 'np.arange', (['pi', '(-pi / N)', '(-pi / N)'], {}), '(pi, -pi / N, -pi / N)\n', (9947, 9969), True, 'import numpy as np\n'), ((9967, 9985), 'numpy.zeros', 'np.zeros', (['(N + 1,)'], {}), '((N + 1,))\n', (9975, 9985), True, 'import numpy as np\n'), ((9985, 10003), 'numpy.zeros', 'np.zeros', (['(N + 1,)'], {}), '((N + 1,))\n', (9993, 10003), True, 'import numpy as np\n'), ((10912, 10937), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0])\n', (10920, 10937), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
def _enforce_ratio(goal_ratio, supx, infx, supy, infy):
"""
Computes the right value of `supx,infx,supy,infy` to obtain the desired
ratio in :func:`plot_eigs`. Ratio is defined as
::
dx = supx - infx
dy = supy - infy
max(dx,dy) / min(dx,dy)
:param float goal_ratio: the desired ratio.
:param float supx: the old value of `supx`, to be adjusted.
:param float infx: the old value of `infx`, to be adjusted.
:param float supy: the old value of `supy`, to be adjusted.
:param float infy: the old value of `infy`, to be adjusted.
:return tuple: a tuple which contains the updated values of
`supx,infx,supy,infy` in this order.
"""
dx = supx - infx
if dx == 0:
dx = 1.0e-16
dy = supy - infy
if dy == 0:
dy = 1.0e-16
ratio = max(dx, dy) / min(dx, dy)
if ratio >= goal_ratio:
if dx < dy:
goal_size = dy / goal_ratio
supx += (goal_size - dx) / 2
infx -= (goal_size - dx) / 2
elif dy < dx:
goal_size = dx / goal_ratio
supy += (goal_size - dy) / 2
infy -= (goal_size - dy) / 2
return (supx, infx, supy, infy)
def _plot_limits(numbers, narrow_view):
if narrow_view:
supx = max(numbers.real) + 0.05
infx = min(numbers.real) - 0.05
supy = max(numbers.imag) + 0.05
infy = min(numbers.imag) - 0.05
return _enforce_ratio(8, supx, infx, supy, infy)
else:
return np.max(np.ceil(np.abs(numbers)))
def plot(
numbers,
show_axes=True,
show_unit_circle=True,
figsize=(8, 8),
title="",
narrow_view=False,
dpi=None,
filename=None,
):
"""
Plot complex numbers (for instance, eigenvalues).
:param np.ndarray numbers: Array of complex numbers to be plotted.
:param bool show_axes: if True, the cartesian axes will be showed in the
plot. Default is True.
:param bool show_unit_circle: if True, the circle with unitary radius
and center in the origin will be showed. Default is True.
:param tuple(int,int) figsize: Tuple defining the figure size in inches.
Default is (8, 8).
:param str title: Title of the plot.
:param bool narrow_view: if True, the plot will show only the smallest
rectangular area which contains all the eigenvalues, with a padding
of 0.05. Not compatible with `show_axes=True`. Default is False.
:param int dpi: If not None, the given value is passed to
``plt.figure`` in order to specify the DPI of the resulting figure.
:param str filename: If specified, the plot is saved at `filename`.
Example:
>>> import numpy as np; from smithers.plot import plot_complex
>>> complex_numbers = np.random.rand(10) + np.random.rand(10) * 1j
>>> plot_complex(complex_numbers, title="Random complex numbers", narrow_view=True)
"""
if dpi is not None:
plt.figure(figsize=figsize, dpi=dpi)
else:
plt.figure(figsize=figsize)
plt.title(title)
plt.gcf()
ax = plt.gca()
points = ax.plot(
numbers.real, numbers.imag, "bo"
)
if narrow_view:
supx, infx, supy, infy = _plot_limits(numbers, narrow_view)
# set limits for axis
ax.set_xlim((infx, supx))
ax.set_ylim((infy, supy))
# x and y axes
if show_axes:
endx = np.min([supx, 1.0])
ax.annotate(
"",
xy=(endx, 0.0),
xytext=(np.max([infx, -1.0]), 0.0),
arrowprops=dict(arrowstyle=("->" if endx == 1.0 else "-")),
)
endy = np.min([supy, 1.0])
ax.annotate(
"",
xy=(0.0, endy),
xytext=(0.0, np.max([infy, -1.0])),
arrowprops=dict(arrowstyle=("->" if endy == 1.0 else "-")),
)
else:
# set limits for axis
limit = _plot_limits(numbers, narrow_view)
ax.set_xlim((-limit, limit))
ax.set_ylim((-limit, limit))
# x and y axes
if show_axes:
ax.annotate(
"",
xy=(np.max([limit * 0.8, 1.0]), 0.0),
xytext=(np.min([-limit * 0.8, -1.0]), 0.0),
arrowprops=dict(arrowstyle="->"),
)
ax.annotate(
"",
xy=(0.0, np.max([limit * 0.8, 1.0])),
xytext=(0.0, np.min([-limit * 0.8, -1.0])),
arrowprops=dict(arrowstyle="->"),
)
plt.ylabel("Imaginary")
plt.xlabel("Real")
if show_unit_circle:
unit_circle = plt.Circle(
(0.0, 0.0),
1.0,
color="green",
fill=False,
linestyle="--",
)
ax.add_artist(unit_circle)
# Dashed grid
gridlines = ax.get_xgridlines() + ax.get_ygridlines()
for line in gridlines:
line.set_linestyle("-.")
ax.grid(True)
ax.set_aspect("equal")
if filename:
plt.savefig(filename)
else:
plt.show()
| [
"numpy.abs",
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"numpy.max",
"matplotlib.pyplot.figure",
"numpy.min",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((3235, 3251), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3244, 3251), True, 'import matplotlib.pyplot as plt\n'), ((3260, 3269), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3267, 3269), True, 'import matplotlib.pyplot as plt\n'), ((3283, 3292), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3290, 3292), True, 'import matplotlib.pyplot as plt\n'), ((4953, 4976), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Imaginary"""'], {}), "('Imaginary')\n", (4963, 4976), True, 'import matplotlib.pyplot as plt\n'), ((4985, 5003), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Real"""'], {}), "('Real')\n", (4995, 5003), True, 'import matplotlib.pyplot as plt\n'), ((3135, 3171), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'dpi': 'dpi'}), '(figsize=figsize, dpi=dpi)\n', (3145, 3171), True, 'import matplotlib.pyplot as plt\n'), ((3198, 3225), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (3208, 3225), True, 'import matplotlib.pyplot as plt\n'), ((5060, 5130), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(0.0, 0.0)', '(1.0)'], {'color': '"""green"""', 'fill': '(False)', 'linestyle': '"""--"""'}), "((0.0, 0.0), 1.0, color='green', fill=False, linestyle='--')\n", (5070, 5130), True, 'import matplotlib.pyplot as plt\n'), ((5506, 5527), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (5517, 5527), True, 'import matplotlib.pyplot as plt\n'), ((5554, 5564), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5562, 5564), True, 'import matplotlib.pyplot as plt\n'), ((3660, 3679), 'numpy.min', 'np.min', (['[supx, 1.0]'], {}), '([supx, 1.0])\n', (3666, 3679), True, 'import numpy as np\n'), ((3947, 3966), 'numpy.min', 'np.min', (['[supy, 1.0]'], {}), '([supy, 1.0])\n', (3953, 3966), True, 'import numpy as np\n'), ((1584, 1599), 'numpy.abs', 'np.abs', (['numbers'], {}), '(numbers)\n', (1590, 1599), True, 'import numpy as np\n'), ((3797, 3817), 'numpy.max', 'np.max', (['[infx, -1.0]'], {}), '([infx, -1.0])\n', (3803, 3817), True, 'import numpy as np\n'), ((4089, 4109), 'numpy.max', 'np.max', (['[infy, -1.0]'], {}), '([infy, -1.0])\n', (4095, 4109), True, 'import numpy as np\n'), ((4527, 4553), 'numpy.max', 'np.max', (['[limit * 0.8, 1.0]'], {}), '([limit * 0.8, 1.0])\n', (4533, 4553), True, 'import numpy as np\n'), ((4589, 4617), 'numpy.min', 'np.min', (['[-limit * 0.8, -1.0]'], {}), '([-limit * 0.8, -1.0])\n', (4595, 4617), True, 'import numpy as np\n'), ((4779, 4805), 'numpy.max', 'np.max', (['[limit * 0.8, 1.0]'], {}), '([limit * 0.8, 1.0])\n', (4785, 4805), True, 'import numpy as np\n'), ((4841, 4869), 'numpy.min', 'np.min', (['[-limit * 0.8, -1.0]'], {}), '([-limit * 0.8, -1.0])\n', (4847, 4869), True, 'import numpy as np\n')] |
import numpy as np
import unittest
import os
from openmdao.api import Problem, Group
from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials
from pycycle.elements.shaft import Shaft
fpath = os.path.dirname(os.path.realpath(__file__))
ref_data = np.loadtxt(fpath + "/reg_data/shaft.csv",
delimiter=",", skiprows=1)
header = [
'trqLoad1',
'trqLoad2',
'trqLoad3',
'Nmech',
'HPX',
'fracLoss',
'trqIn',
'trqOut',
'trqNet',
'pwrIn',
'pwrOut',
'pwrNet']
h_map = dict(((v_name, i) for i, v_name in enumerate(header)))
class ShaftTestCase(unittest.TestCase):
def setUp(self):
self.prob = Problem()
self.prob.model = Group()
self.prob.model.add_subsystem("shaft", Shaft(num_ports=3), promotes=["*"])
self.prob.model.set_input_defaults('trq_0', 17., units='ft*lbf')
self.prob.model.set_input_defaults('trq_1', 17., units='ft*lbf')
self.prob.model.set_input_defaults('trq_2', 17., units='ft*lbf')
self.prob.model.set_input_defaults('Nmech', 17., units='rpm')
self.prob.model.set_input_defaults('HPX', 17., units='hp')
self.prob.model.set_input_defaults('fracLoss', 17.)
self.prob.setup(check=False, force_alloc_complex=True)
def test_case1(self):
# 6 cases to check against
for i, data in enumerate(ref_data):
# input torques
self.prob['trq_0'] = data[h_map['trqLoad1']]
self.prob['trq_1'] = data[h_map['trqLoad2']]
self.prob['trq_2'] = data[h_map['trqLoad3']]
# shaft inputs
self.prob['Nmech'] = data[h_map['Nmech']]
self.prob['HPX'] = data[h_map['HPX']]
self.prob['fracLoss'] = data[h_map['fracLoss']]
self.prob.run_model()
# check outputs
trqIn, trqOut, trqNet = data[
h_map['trqIn']], data[
h_map['trqOut']], data[
h_map['trqNet']]
pwrIn, pwrOut, pwrNet = data[
h_map['pwrIn']], data[
h_map['pwrOut']], data[
h_map['pwrNet']]
trqIn_comp = self.prob['trq_in']
trqOut_comp = self.prob['trq_out']
trqNet_comp = self.prob['trq_net']
pwrIn_comp = self.prob['pwr_in']
pwrOut_comp = self.prob['pwr_out']
pwrNet_comp = self.prob['pwr_net']
tol = 1.0e-4
assert_near_equal(trqIn_comp, trqIn, tol)
assert_near_equal(trqOut_comp, trqOut, tol)
assert_near_equal(trqNet_comp, trqNet, tol)
assert_near_equal(pwrIn_comp, pwrIn, tol)
assert_near_equal(pwrOut_comp, pwrOut, tol)
assert_near_equal(pwrNet_comp, pwrNet, tol)
partial_data = self.prob.check_partials(out_stream=None, method='cs')
assert_check_partials(partial_data, atol=1e-8, rtol=1e-8)
if __name__ == "__main__":
unittest.main()
| [
"pycycle.elements.shaft.Shaft",
"openmdao.utils.assert_utils.assert_check_partials",
"openmdao.api.Group",
"os.path.realpath",
"openmdao.utils.assert_utils.assert_near_equal",
"unittest.main",
"numpy.loadtxt",
"openmdao.api.Problem"
] | [((273, 341), 'numpy.loadtxt', 'np.loadtxt', (["(fpath + '/reg_data/shaft.csv')"], {'delimiter': '""","""', 'skiprows': '(1)'}), "(fpath + '/reg_data/shaft.csv', delimiter=',', skiprows=1)\n", (283, 341), True, 'import numpy as np\n'), ((234, 260), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (250, 260), False, 'import os\n'), ((2990, 3005), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3003, 3005), False, 'import unittest\n'), ((693, 702), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (700, 702), False, 'from openmdao.api import Problem, Group\n'), ((729, 736), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (734, 736), False, 'from openmdao.api import Problem, Group\n'), ((784, 802), 'pycycle.elements.shaft.Shaft', 'Shaft', ([], {'num_ports': '(3)'}), '(num_ports=3)\n', (789, 802), False, 'from pycycle.elements.shaft import Shaft\n'), ((2485, 2526), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (['trqIn_comp', 'trqIn', 'tol'], {}), '(trqIn_comp, trqIn, tol)\n', (2502, 2526), False, 'from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials\n'), ((2539, 2582), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (['trqOut_comp', 'trqOut', 'tol'], {}), '(trqOut_comp, trqOut, tol)\n', (2556, 2582), False, 'from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials\n'), ((2595, 2638), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (['trqNet_comp', 'trqNet', 'tol'], {}), '(trqNet_comp, trqNet, tol)\n', (2612, 2638), False, 'from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials\n'), ((2651, 2692), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (['pwrIn_comp', 'pwrIn', 'tol'], {}), '(pwrIn_comp, pwrIn, tol)\n', (2668, 2692), False, 'from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials\n'), ((2705, 2748), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (['pwrOut_comp', 'pwrOut', 'tol'], {}), '(pwrOut_comp, pwrOut, tol)\n', (2722, 2748), False, 'from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials\n'), ((2761, 2804), 'openmdao.utils.assert_utils.assert_near_equal', 'assert_near_equal', (['pwrNet_comp', 'pwrNet', 'tol'], {}), '(pwrNet_comp, pwrNet, tol)\n', (2778, 2804), False, 'from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials\n'), ((2900, 2959), 'openmdao.utils.assert_utils.assert_check_partials', 'assert_check_partials', (['partial_data'], {'atol': '(1e-08)', 'rtol': '(1e-08)'}), '(partial_data, atol=1e-08, rtol=1e-08)\n', (2921, 2959), False, 'from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials\n')] |
""" Demo script modified
Generates forward and backward flow for cityscapes sequence.
T0 = 19
T1 = 19+[1..10]
Uses two GPUs, one for fwd and the other for bwd
"""
import sys
sys.path.append('core')
import argparse
import os
import cv2
import glob
import numpy as np
import torch
from PIL import Image
from pathlib import Path
from raft import RAFT
from utils import flow_viz
from utils.utils import InputPadder
from tqdm import tqdm
DEVICE = 'cuda'
def load_image(imfile):
img = np.array(Image.open(imfile)).astype(np.uint8)
img = torch.from_numpy(img).permute(2, 0, 1).float()
return img[None].cuda()
# def norm_flow(flo, img_size):
def viz(img, img2, flo_fwd, flo_bwd, idx):
img = img[0].permute(1,2,0).cpu().numpy()
img2 = img2[0].permute(1,2,0).cpu().numpy()
flo_fwd = flo_fwd[0].permute(1,2,0).cpu().numpy()
flo_bwd = flo_bwd[0].permute(1,2,0).cpu().numpy()
# map flow to rgb image
flo_fwd = flow_viz.flow_to_image(flo_fwd)
flo_bwd = flow_viz.flow_to_image(flo_bwd)
img_flo = np.concatenate([img, img2, flo_fwd, flo_bwd], axis=0)
# import matplotlib.pyplot as plt
# plt.imshow(img_flo / 255.0)
# plt.show()
cv2.imwrite(f"flow_img/{idx}.png", img_flo[:, :, [2,1,0]])
print(f"flow_img/{idx}.png")
def save_flow(flo, filename):
flo = flo[0].permute(1,2,0).cpu().numpy()
flo = flo * 64.0 + 32768.0
img = np.zeros((flo.shape[0], flo.shape[1], 3), dtype=np.uint16)
img[:, :, :2] = flo
if not os.path.exists(os.path.dirname(filename)):
print(f"make dir: {os.path.dirname(filename)}")
os.makedirs(os.path.dirname(filename))
if os.path.exists(filename):
print(f"Warning: file {filename} exists! Will overwrite.")
if not cv2.imwrite(filename, img):
raise Exception(f"Can't write to {filename}!")
print(f"Written {filename}.")
def demo(args):
model = torch.nn.DataParallel(RAFT(args), device_ids=[0, 1], output_device=1).cuda()
model.load_state_dict(torch.load(args.model))
# model = model.module
# model.to(DEVICE)
model.eval()
with torch.no_grad():
# images = glob.glob(os.path.join(args.path, '*.png')) + \
# glob.glob(os.path.join(args.path, '*.jpg'))
with open('seqheads.txt') as fs:
image_seqheads = fs.read().splitlines()
fileidx = 0
for img1 in tqdm(image_seqheads):
city, n1, n2, suf = img1.split('/', maxsplit=1)[-1].split("_")
n2 = int(n2)
for idx in range(1, 11):
img2 = f"leftImg8bit_sequence/{city}_{n1}_{n2+idx:06d}_{suf}"
savepath = f"flow_sequence/{city}_{n1}_{n2+idx:06d}"
print(f"{img1}\n{img2}")
image1 = load_image(os.path.join(args.path, img1))
image2 = load_image(os.path.join(args.path, img2))
savepath = os.path.join(args.path, savepath)
padder = InputPadder(image1.shape)
image1, image2 = padder.pad(image1, image2)
image1_b = torch.cat([image1, image2], 0)
image2_b = torch.cat([image2, image1], 0)
flow_low, flow_up = model(image1_b, image2_b, iters=20, test_mode=True)
flow_up_fwd = flow_up[0].unsqueeze(0)
flow_up_bwd = flow_up[1].unsqueeze(0)
# viz(image1, image2, flow_up_fwd, flow_up_bwd, fileidx)
save_flow(flow_up_fwd, f"{savepath}_flow_fwd.png")
save_flow(flow_up_bwd, f"{savepath}_flow_bwd.png")
fileidx += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', help="restore checkpoint")
parser.add_argument('--path', help="dataset for evaluation")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation')
args = parser.parse_args()
demo(args)
| [
"cv2.imwrite",
"os.path.exists",
"PIL.Image.open",
"argparse.ArgumentParser",
"torch.load",
"tqdm.tqdm",
"os.path.join",
"utils.utils.InputPadder",
"torch.from_numpy",
"raft.RAFT",
"os.path.dirname",
"utils.flow_viz.flow_to_image",
"numpy.zeros",
"numpy.concatenate",
"torch.no_grad",
"... | [((190, 213), 'sys.path.append', 'sys.path.append', (['"""core"""'], {}), "('core')\n", (205, 213), False, 'import sys\n'), ((960, 991), 'utils.flow_viz.flow_to_image', 'flow_viz.flow_to_image', (['flo_fwd'], {}), '(flo_fwd)\n', (982, 991), False, 'from utils import flow_viz\n'), ((1006, 1037), 'utils.flow_viz.flow_to_image', 'flow_viz.flow_to_image', (['flo_bwd'], {}), '(flo_bwd)\n', (1028, 1037), False, 'from utils import flow_viz\n'), ((1052, 1105), 'numpy.concatenate', 'np.concatenate', (['[img, img2, flo_fwd, flo_bwd]'], {'axis': '(0)'}), '([img, img2, flo_fwd, flo_bwd], axis=0)\n', (1066, 1105), True, 'import numpy as np\n'), ((1201, 1261), 'cv2.imwrite', 'cv2.imwrite', (['f"""flow_img/{idx}.png"""', 'img_flo[:, :, [2, 1, 0]]'], {}), "(f'flow_img/{idx}.png', img_flo[:, :, [2, 1, 0]])\n", (1212, 1261), False, 'import cv2\n'), ((1412, 1470), 'numpy.zeros', 'np.zeros', (['(flo.shape[0], flo.shape[1], 3)'], {'dtype': 'np.uint16'}), '((flo.shape[0], flo.shape[1], 3), dtype=np.uint16)\n', (1420, 1470), True, 'import numpy as np\n'), ((1661, 1685), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (1675, 1685), False, 'import os\n'), ((3649, 3674), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3672, 3674), False, 'import argparse\n'), ((1766, 1792), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'img'], {}), '(filename, img)\n', (1777, 1792), False, 'import cv2\n'), ((2017, 2039), 'torch.load', 'torch.load', (['args.model'], {}), '(args.model)\n', (2027, 2039), False, 'import torch\n'), ((2119, 2134), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2132, 2134), False, 'import torch\n'), ((2400, 2420), 'tqdm.tqdm', 'tqdm', (['image_seqheads'], {}), '(image_seqheads)\n', (2404, 2420), False, 'from tqdm import tqdm\n'), ((1522, 1547), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (1537, 1547), False, 'import os\n'), ((1626, 1651), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (1641, 1651), False, 'import os\n'), ((515, 533), 'PIL.Image.open', 'Image.open', (['imfile'], {}), '(imfile)\n', (525, 533), False, 'from PIL import Image\n'), ((1936, 1946), 'raft.RAFT', 'RAFT', (['args'], {}), '(args)\n', (1940, 1946), False, 'from raft import RAFT\n'), ((2910, 2943), 'os.path.join', 'os.path.join', (['args.path', 'savepath'], {}), '(args.path, savepath)\n', (2922, 2943), False, 'import os\n'), ((2970, 2995), 'utils.utils.InputPadder', 'InputPadder', (['image1.shape'], {}), '(image1.shape)\n', (2981, 2995), False, 'from utils.utils import InputPadder\n'), ((3084, 3114), 'torch.cat', 'torch.cat', (['[image1, image2]', '(0)'], {}), '([image1, image2], 0)\n', (3093, 3114), False, 'import torch\n'), ((3142, 3172), 'torch.cat', 'torch.cat', (['[image2, image1]', '(0)'], {}), '([image2, image1], 0)\n', (3151, 3172), False, 'import torch\n'), ((562, 583), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (578, 583), False, 'import torch\n'), ((1577, 1602), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (1592, 1602), False, 'import os\n'), ((2785, 2814), 'os.path.join', 'os.path.join', (['args.path', 'img1'], {}), '(args.path, img1)\n', (2797, 2814), False, 'import os\n'), ((2852, 2881), 'os.path.join', 'os.path.join', (['args.path', 'img2'], {}), '(args.path, img2)\n', (2864, 2881), False, 'import os\n')] |
"""
Module containing logic related to eager DataFrames
"""
import os
import sys
import warnings
from io import BytesIO, IOBase, StringIO
from pathlib import Path
from typing import (
Any,
BinaryIO,
Callable,
Dict,
Generic,
Iterable,
Iterator,
List,
Mapping,
Optional,
Sequence,
TextIO,
Tuple,
Type,
TypeVar,
Union,
overload,
)
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal # pragma: no cover
import numpy as np
try:
import pyarrow as pa
import pyarrow.compute
import pyarrow.parquet
_PYARROW_AVAILABLE = True
except ImportError: # pragma: no cover
_PYARROW_AVAILABLE = False
from polars import internals as pli
from polars.internals.construction import (
ColumnsType,
arrow_to_pydf,
dict_to_pydf,
numpy_to_pydf,
pandas_to_pydf,
sequence_to_pydf,
series_to_pydf,
)
from .lazy_frame import LazyFrame, wrap_ldf # noqa: F401
try:
from polars.polars import PyDataFrame, PySeries
_DOCUMENTING = False
except ImportError: # pragma: no cover
_DOCUMENTING = True
from polars._html import NotebookFormatter
from polars.datatypes import Boolean, DataType, UInt32, Utf8, py_type_to_dtype
from polars.utils import (
_prepare_row_count_args,
_process_null_values,
format_path,
handle_projection_columns,
is_int_sequence,
is_str_sequence,
range_to_slice,
)
try:
import pandas as pd
_PANDAS_AVAILABLE = True
except ImportError: # pragma: no cover
_PANDAS_AVAILABLE = False
# A type variable used to refer to a polars.DataFrame or any subclass of it.
# Used to annotate DataFrame methods which returns the same type as self.
DF = TypeVar("DF", bound="DataFrame")
def wrap_df(df: "PyDataFrame") -> "DataFrame":
return DataFrame._from_pydf(df)
def _prepare_other_arg(other: Any) -> "pli.Series":
# if not a series create singleton series such that it will broadcast
if not isinstance(other, pli.Series):
if isinstance(other, str):
pass
elif isinstance(other, Sequence):
raise ValueError("Operation not supported.")
other = pli.Series("", [other])
return other
class DataFrameMetaClass(type):
"""
Custom metaclass for DataFrame class.
This metaclass is responsible for constructing the relationship between the
DataFrame class and the LazyFrame class. Originally, without inheritance, the
relationship is as follows:
DataFrame <-> LazyFrame
This two-way relationship is represented by the following pointers:
- cls._lazyframe_class: A pointer on the DataFrame (sub)class to a LazyFrame
(sub)class. This class property can be used in DataFrame methods in order
to construct new lazy dataframes.
- cls._lazyframe_class._dataframe_class: A pointer on the LazyFrame (sub)class
back to the original DataFrame (sub)class. This allows LazyFrame methods to
construct new non-lazy dataframes with the correct type. This pointer should
always be set to cls such that the following is always `True`:
`type(cls) is type(cls.lazy().collect())`.
If an end user subclasses DataFrame like so:
>>> class MyDataFrame(pl.DataFrame):
... pass
...
Then the following class is dynamically created by the metaclass and saved on the
class variable `MyDataFrame._lazyframe_class`.
>>> class LazyMyDataFrame(pl.DataFrame):
... _dataframe_class = MyDataFrame
...
If an end user needs to extend both `DataFrame` and `LazyFrame`, it can be done like
so:
>>> class MyLazyFrame(pl.LazyFrame):
... @classmethod
... @property
... def _dataframe_class(cls):
... return MyDataFrame
...
>>> class MyDataFrame(pl.DataFrame):
... _lazyframe_class = MyLazyFrame
...
"""
def __init__(cls, name: str, bases: tuple, clsdict: dict) -> None:
"""Construct new DataFrame class."""
if not bases:
# This is not a subclass of DataFrame and we can simply hard-link to
# LazyFrame instead of dynamically defining a new subclass of LazyFrame.
cls._lazyframe_class = LazyFrame
elif cls._lazyframe_class is LazyFrame:
# This is a subclass of DataFrame which has *not* specified a custom
# LazyFrame subclass by setting `cls._lazyframe_class`. We must therefore
# dynamically create a subclass of LazyFrame with `_dataframe_class` set
# to `cls` in order to preserve types after `.lazy().collect()` roundtrips.
cls._lazyframe_class = type( # type: ignore
f"Lazy{name}",
(LazyFrame,),
{"_dataframe_class": cls},
)
super().__init__(name, bases, clsdict)
class DataFrame(metaclass=DataFrameMetaClass):
"""
A DataFrame is a two-dimensional data structure that represents data as a table
with rows and columns.
Parameters
----------
data : dict, Sequence, ndarray, Series, or pandas.DataFrame
Two-dimensional data in various forms. dict must contain Sequences.
Sequence may contain Series or other Sequences.
columns : Sequence of str or (str,DataType) pairs, default None
Column labels to use for resulting DataFrame. If specified, overrides any
labels already present in the data. Must match data dimensions.
orient : {'col', 'row'}, default None
Whether to interpret two-dimensional data as columns or as rows. If None,
the orientation is inferred by matching the columns and data dimensions. If
this does not yield conclusive results, column orientation is used.
Examples
--------
Constructing a DataFrame from a dictionary:
>>> data = {"a": [1, 2], "b": [3, 4]}
>>> df = pl.DataFrame(data)
>>> df
shape: (2, 2)
┌─────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 4 │
└─────┴─────┘
Notice that the dtype is automatically inferred as a polars Int64:
>>> df.dtypes
[<class 'polars.datatypes.Int64'>, <class 'polars.datatypes.Int64'>]
In order to specify dtypes for your columns, initialize the DataFrame with a list
of typed Series, or set the columns parameter with a list of (name,dtype) pairs:
>>> data = [
... pl.Series("col1", [1, 2], dtype=pl.Float32),
... pl.Series("col2", [3, 4], dtype=pl.Int64),
... ]
>>> df2 = pl.DataFrame(data)
>>> df2
shape: (2, 2)
┌──────┬──────┐
│ col1 ┆ col2 │
│ --- ┆ --- │
│ f32 ┆ i64 │
╞══════╪══════╡
│ 1.0 ┆ 3 │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2.0 ┆ 4 │
└──────┴──────┘
# or, equivalent... (and also compatible with all of the other valid data parameter types):
>>> df3 = pl.DataFrame(data, columns=[("col1", pl.Float32), ("col2", pl.Int64)])
>>> df3
┌──────┬──────┐
│ col1 ┆ col2 │
│ --- ┆ --- │
│ f32 ┆ i64 │
╞══════╪══════╡
│ 1.0 ┆ 3 │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2.0 ┆ 4 │
└──────┴──────┘
Constructing a DataFrame from a numpy ndarray, specifying column names:
>>> import numpy as np
>>> data = np.array([(1, 2), (3, 4)], dtype=np.int64)
>>> df4 = pl.DataFrame(data, columns=["a", "b"], orient="col")
>>> df4
shape: (2, 2)
┌─────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 4 │
└─────┴─────┘
Constructing a DataFrame from a list of lists, row orientation inferred:
>>> data = [[1, 2, 3], [4, 5, 6]]
>>> df4 = pl.DataFrame(data, columns=["a", "b", "c"])
>>> df4
shape: (2, 3)
┌─────┬─────┬─────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ 1 ┆ 2 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 4 ┆ 5 ┆ 6 │
└─────┴─────┴─────┘
"""
def __init__(
self,
data: Optional[
Union[
Dict[str, Sequence[Any]],
Sequence[Any],
np.ndarray,
"pa.Table",
"pd.DataFrame",
"pli.Series",
]
] = None,
columns: Optional[ColumnsType] = None,
orient: Optional[str] = None,
):
if data is None:
self._df = dict_to_pydf({}, columns=columns)
elif isinstance(data, dict):
self._df = dict_to_pydf(data, columns=columns)
elif isinstance(data, np.ndarray):
self._df = numpy_to_pydf(data, columns=columns, orient=orient)
elif _PYARROW_AVAILABLE and isinstance(data, pa.Table):
self._df = arrow_to_pydf(data, columns=columns)
elif isinstance(data, Sequence) and not isinstance(data, str):
self._df = sequence_to_pydf(data, columns=columns, orient=orient)
elif isinstance(data, pli.Series):
self._df = series_to_pydf(data, columns=columns)
elif _PANDAS_AVAILABLE and isinstance(data, pd.DataFrame):
if not _PYARROW_AVAILABLE:
raise ImportError( # pragma: no cover
"'pyarrow' is required for converting a pandas DataFrame to a polars DataFrame."
)
self._df = pandas_to_pydf(data, columns=columns)
else:
raise ValueError("DataFrame constructor not called properly.")
def estimated_size(self) -> int:
"""
Returns an estimation of the total (heap) allocated size of the `DataFrame` in bytes.
This estimation is the sum of the size of its buffers, validity, including nested arrays.
Multiple arrays may share buffers and bitmaps. Therefore, the size of 2 arrays is not the
sum of the sizes computed from this function. In particular, [`StructArray`]'s size is an upper bound.
When an array is sliced, its allocated size remains constant because the buffer unchanged.
However, this function will yield a smaller number. This is because this function returns
the visible size of the buffer, not its total capacity.
FFI buffers are included in this estimation.
"""
return self._df.estimated_size()
@classmethod
def _from_pydf(cls: Type[DF], py_df: "PyDataFrame") -> DF:
"""
Construct Polars DataFrame from FFI PyDataFrame object.
"""
df = cls.__new__(cls)
df._df = py_df
return df
@classmethod
def _from_dicts(
cls: Type[DF],
data: Sequence[Dict[str, Any]],
) -> DF:
pydf = PyDataFrame.read_dicts(data)
return cls._from_pydf(pydf)
@classmethod
def _from_dict(
cls: Type[DF],
data: Dict[str, Sequence[Any]],
columns: Optional[Sequence[str]] = None,
) -> DF:
"""
Construct a DataFrame from a dictionary of sequences.
Parameters
----------
data : dict of sequences
Two-dimensional data represented as a dictionary. dict must contain
Sequences.
columns : Sequence of str, default None
Column labels to use for resulting DataFrame. If specified, overrides any
labels already present in the data. Must match data dimensions.
Returns
-------
DataFrame
"""
return cls._from_pydf(dict_to_pydf(data, columns=columns))
@classmethod
def _from_records(
cls: Type[DF],
data: Union[np.ndarray, Sequence[Sequence[Any]]],
columns: Optional[Sequence[str]] = None,
orient: Optional[str] = None,
) -> DF:
"""
Construct a DataFrame from a numpy ndarray or sequence of sequences.
Parameters
----------
data : numpy ndarray or Sequence of sequences
Two-dimensional data represented as numpy ndarray or sequence of sequences.
columns : Sequence of str, default None
Column labels to use for resulting DataFrame. Must match data dimensions.
If not specified, columns will be named `column_0`, `column_1`, etc.
orient : {'col', 'row'}, default None
Whether to interpret two-dimensional data as columns or as rows. If None,
the orientation is inferred by matching the columns and data dimensions. If
this does not yield conclusive results, column orientation is used.
Returns
-------
DataFrame
"""
if isinstance(data, np.ndarray):
pydf = numpy_to_pydf(data, columns=columns, orient=orient)
else:
pydf = sequence_to_pydf(data, columns=columns, orient=orient)
return cls._from_pydf(pydf)
@classmethod
def _from_arrow(
cls: Type[DF],
data: "pa.Table",
columns: Optional[Sequence[str]] = None,
rechunk: bool = True,
) -> DF:
"""
Construct a DataFrame from an Arrow table.
This operation will be zero copy for the most part. Types that are not
supported by Polars may be cast to the closest supported type.
Parameters
----------
data : numpy ndarray or Sequence of sequences
Two-dimensional data represented as Arrow table.
columns : Sequence of str, default None
Column labels to use for resulting DataFrame. Must match data dimensions.
If not specified, existing Array table columns are used, with missing names
named as `column_0`, `column_1`, etc.
rechunk : bool, default True
Make sure that all data is contiguous.
Returns
-------
DataFrame
"""
return cls._from_pydf(arrow_to_pydf(data, columns=columns, rechunk=rechunk))
@classmethod
def _from_pandas(
cls: Type[DF],
data: "pd.DataFrame",
columns: Optional[Sequence[str]] = None,
rechunk: bool = True,
nan_to_none: bool = True,
) -> DF:
"""
Construct a Polars DataFrame from a pandas DataFrame.
Parameters
----------
data : pandas DataFrame
Two-dimensional data represented as a pandas DataFrame.
columns : Sequence of str, default None
Column labels to use for resulting DataFrame. If specified, overrides any
labels already present in the data. Must match data dimensions.
rechunk : bool, default True
Make sure that all data is contiguous.
nan_to_none : bool, default True
If data contains NaN values PyArrow will convert the NaN to None
Returns
-------
DataFrame
"""
# path for table without rows that keeps datatype
if data.shape[0] == 0:
series = []
for name in data.columns:
pd_series = data[name]
if pd_series.dtype == np.dtype("O"):
series.append(pli.Series(name, [], dtype=Utf8))
else:
col = pli.Series(name, pd_series)
series.append(pli.Series(name, col))
return cls(series)
return cls._from_pydf(
pandas_to_pydf(
data, columns=columns, rechunk=rechunk, nan_to_none=nan_to_none
)
)
@classmethod
def _read_csv(
cls: Type[DF],
file: Union[str, Path, BinaryIO, bytes],
has_header: bool = True,
columns: Optional[Union[List[int], List[str]]] = None,
sep: str = ",",
comment_char: Optional[str] = None,
quote_char: Optional[str] = r'"',
skip_rows: int = 0,
dtypes: Optional[
Union[Mapping[str, Type[DataType]], List[Type[DataType]]]
] = None,
null_values: Optional[Union[str, List[str], Dict[str, str]]] = None,
ignore_errors: bool = False,
parse_dates: bool = False,
n_threads: Optional[int] = None,
infer_schema_length: Optional[int] = 100,
batch_size: int = 8192,
n_rows: Optional[int] = None,
encoding: str = "utf8",
low_memory: bool = False,
rechunk: bool = True,
skip_rows_after_header: int = 0,
row_count_name: Optional[str] = None,
row_count_offset: int = 0,
sample_size: int = 1024,
) -> DF:
"""
see pl.read_csv
"""
self = cls.__new__(cls)
path: Optional[str]
if isinstance(file, (str, Path)):
path = format_path(file)
else:
path = None
if isinstance(file, BytesIO):
file = file.getvalue()
if isinstance(file, StringIO):
file = file.getvalue().encode()
dtype_list: Optional[List[Tuple[str, Type[DataType]]]] = None
dtype_slice: Optional[List[Type[DataType]]] = None
if dtypes is not None:
if isinstance(dtypes, dict):
dtype_list = []
for k, v in dtypes.items():
dtype_list.append((k, py_type_to_dtype(v)))
elif isinstance(dtypes, list):
dtype_slice = dtypes
else:
raise ValueError("dtype arg should be list or dict")
processed_null_values = _process_null_values(null_values)
if isinstance(file, str) and "*" in file:
dtypes_dict = None
if dtype_list is not None:
dtypes_dict = {name: dt for (name, dt) in dtype_list}
if dtype_slice is not None:
raise ValueError(
"cannot use glob patterns and unnamed dtypes as `dtypes` argument; Use dtypes: Mapping[str, Type[DataType]"
)
from polars import scan_csv
scan = scan_csv(
file,
has_header=has_header,
sep=sep,
comment_char=comment_char,
quote_char=quote_char,
skip_rows=skip_rows,
dtypes=dtypes_dict,
null_values=null_values,
ignore_errors=ignore_errors,
infer_schema_length=infer_schema_length,
n_rows=n_rows,
low_memory=low_memory,
rechunk=rechunk,
skip_rows_after_header=skip_rows_after_header,
row_count_name=row_count_name,
row_count_offset=row_count_offset,
)
if columns is None:
return self._from_pydf(scan.collect()._df)
elif is_str_sequence(columns, False):
return self._from_pydf(scan.select(columns).collect()._df)
else:
raise ValueError(
"cannot use glob patterns and integer based projection as `columns` argument; Use columns: List[str]"
)
projection, columns = handle_projection_columns(columns)
self._df = PyDataFrame.read_csv(
file,
infer_schema_length,
batch_size,
has_header,
ignore_errors,
n_rows,
skip_rows,
projection,
sep,
rechunk,
columns,
encoding,
n_threads,
path,
dtype_list,
dtype_slice,
low_memory,
comment_char,
quote_char,
processed_null_values,
parse_dates,
skip_rows_after_header,
_prepare_row_count_args(row_count_name, row_count_offset),
sample_size=sample_size,
)
return self
@classmethod
def _read_parquet(
cls: Type[DF],
file: Union[str, Path, BinaryIO],
columns: Optional[Union[List[int], List[str]]] = None,
n_rows: Optional[int] = None,
parallel: bool = True,
row_count_name: Optional[str] = None,
row_count_offset: int = 0,
) -> DF:
"""
Read into a DataFrame from a parquet file.
Parameters
----------
file
Path to a file or a file-like object. Any valid filepath can be used.
columns
Columns to select. Accepts a list of column indices (starting at zero) or a list of column names.
n_rows
Stop reading from parquet file after reading ``n_rows``.
parallel
Read the parquet file in parallel. The single threaded reader consumes less memory.
"""
if isinstance(file, (str, Path)):
file = format_path(file)
if isinstance(file, str) and "*" in file:
from polars import scan_parquet
scan = scan_parquet(
file,
n_rows=n_rows,
rechunk=True,
parallel=parallel,
row_count_name=row_count_name,
row_count_offset=row_count_offset,
)
if columns is None:
return cls._from_pydf(scan.collect()._df)
elif is_str_sequence(columns, False):
return cls._from_pydf(scan.select(columns).collect()._df)
else:
raise ValueError(
"cannot use glob patterns and integer based projection as `columns` argument; Use columns: List[str]"
)
projection, columns = handle_projection_columns(columns)
self = cls.__new__(cls)
self._df = PyDataFrame.read_parquet(
file,
columns,
projection,
n_rows,
parallel,
_prepare_row_count_args(row_count_name, row_count_offset),
)
return self
@classmethod
def _read_avro(
cls: Type[DF],
file: Union[str, Path, BinaryIO],
columns: Optional[Union[List[int], List[str]]] = None,
n_rows: Optional[int] = None,
) -> DF:
"""
Read into a DataFrame from Apache Avro format.
Parameters
----------
file
Path to a file or a file-like object.
n_rows
Stop reading from Apache Avro file after reading ``n_rows``.
Returns
-------
DataFrame
"""
if isinstance(file, (str, Path)):
file = format_path(file)
projection, columns = handle_projection_columns(columns)
self = cls.__new__(cls)
self._df = PyDataFrame.read_avro(file, columns, projection, n_rows)
return self
@classmethod
def _read_ipc(
cls: Type[DF],
file: Union[str, Path, BinaryIO],
columns: Optional[Union[List[int], List[str]]] = None,
n_rows: Optional[int] = None,
row_count_name: Optional[str] = None,
row_count_offset: int = 0,
rechunk: bool = True,
) -> DF:
"""
Read into a DataFrame from Arrow IPC stream format. This is also called the Feather (v2) format.
Parameters
----------
file
Path to a file or a file-like object.
columns
Columns to select. Accepts a list of column indices (starting at zero) or a list of column names.
n_rows
Stop reading from IPC file after reading ``n_rows``.
rechunk
Make sure that all data is contiguous.
Returns
-------
DataFrame
"""
if isinstance(file, (str, Path)):
file = format_path(file)
if isinstance(file, str) and "*" in file:
from polars import scan_ipc
scan = scan_ipc(
file,
n_rows=n_rows,
rechunk=rechunk,
row_count_name=row_count_name,
row_count_offset=row_count_offset,
)
if columns is None:
scan.collect()
elif is_str_sequence(columns, False):
scan.select(columns).collect()
else:
raise ValueError(
"cannot use glob patterns and integer based projection as `columns` argument; Use columns: List[str]"
)
projection, columns = handle_projection_columns(columns)
self = cls.__new__(cls)
self._df = PyDataFrame.read_ipc(
file,
columns,
projection,
n_rows,
_prepare_row_count_args(row_count_name, row_count_offset),
)
return self
@classmethod
def _read_json(
cls: Type[DF],
file: Union[str, Path, IOBase],
json_lines: bool = False,
) -> DF:
"""
See Also pl.read_json
"""
if isinstance(file, StringIO):
file = BytesIO(file.getvalue().encode())
elif isinstance(file, (str, Path)):
file = format_path(file)
self = cls.__new__(cls)
self._df = PyDataFrame.read_json(file, json_lines)
return self
def to_arrow(self) -> "pa.Table":
"""
Collect the underlying arrow arrays in an Arrow Table.
This operation is mostly zero copy.
Data types that do copy:
- CategoricalType
"""
if not _PYARROW_AVAILABLE:
raise ImportError( # pragma: no cover
"'pyarrow' is required for converting a polars DataFrame to an Arrow Table."
)
record_batches = self._df.to_arrow()
return pa.Table.from_batches(record_batches)
@overload
def to_dict(self, as_series: Literal[True] = ...) -> Dict[str, "pli.Series"]:
...
@overload
def to_dict(self, as_series: Literal[False]) -> Dict[str, List[Any]]:
...
@overload
def to_dict(
self, as_series: bool = True
) -> Union[Dict[str, "pli.Series"], Dict[str, List[Any]]]:
...
def to_dict(
self, as_series: bool = True
) -> Union[Dict[str, "pli.Series"], Dict[str, List[Any]]]:
"""
Convert DataFrame to a dictionary mapping column name to values.
Parameters
----------
as_series
True -> Values are series
False -> Values are List[Any]
Examples
--------
>>> df = pl.DataFrame(
... {
... "A": [1, 2, 3, 4, 5],
... "fruits": ["banana", "banana", "apple", "apple", "banana"],
... "B": [5, 4, 3, 2, 1],
... "cars": ["beetle", "audi", "beetle", "beetle", "beetle"],
... "optional": [28, 300, None, 2, -30],
... }
... )
>>> df
shape: (5, 5)
┌─────┬────────┬─────┬────────┬──────────┐
│ A ┆ fruits ┆ B ┆ cars ┆ optional │
│ --- ┆ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ str ┆ i64 ┆ str ┆ i64 │
╞═════╪════════╪═════╪════════╪══════════╡
│ 1 ┆ banana ┆ 5 ┆ beetle ┆ 28 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 2 ┆ banana ┆ 4 ┆ audi ┆ 300 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 3 ┆ apple ┆ 3 ┆ beetle ┆ null │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 4 ┆ apple ┆ 2 ┆ beetle ┆ 2 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 5 ┆ banana ┆ 1 ┆ beetle ┆ -30 │
└─────┴────────┴─────┴────────┴──────────┘
>>> df.to_dict(as_series=False)
{'A': [1, 2, 3, 4, 5],
'fruits': ['banana', 'banana', 'apple', 'apple', 'banana'],
'B': [5, 4, 3, 2, 1],
'cars': ['beetle', 'audi', 'beetle', 'beetle', 'beetle'],
'optional': [28, 300, None, 2, -30]}
>>> df.to_dict(as_series=True)
{'A': shape: (5,)
Series: 'A' [i64]
[
1
2
3
4
5
], 'fruits': shape: (5,)
Series: 'fruits' [str]
[
"banana"
"banana"
"apple"
"apple"
"banana"
], 'B': shape: (5,)
Series: 'B' [i64]
[
5
4
3
2
1
], 'cars': shape: (5,)
Series: 'cars' [str]
[
"beetle"
"audi"
"beetle"
"beetle"
"beetle"
], 'optional': shape: (5,)
Series: 'optional' [i64]
[
28
300
null
2
-30
]}
"""
if as_series:
return {s.name: s for s in self}
else:
return {s.name: s.to_list() for s in self}
@overload
def to_json(
self,
file: Optional[Union[IOBase, str, Path]] = ...,
pretty: bool = ...,
row_oriented: bool = ...,
json_lines: bool = ...,
*,
to_string: Literal[True],
) -> str:
...
@overload
def to_json(
self,
file: Optional[Union[IOBase, str, Path]] = ...,
pretty: bool = ...,
row_oriented: bool = ...,
json_lines: bool = ...,
*,
to_string: Literal[False] = ...,
) -> None:
...
@overload
def to_json(
self,
file: Optional[Union[IOBase, str, Path]] = ...,
pretty: bool = ...,
row_oriented: bool = ...,
json_lines: bool = ...,
*,
to_string: bool = ...,
) -> Optional[str]:
...
def to_json(
self,
file: Optional[Union[IOBase, str, Path]] = None,
pretty: bool = False,
row_oriented: bool = False,
json_lines: bool = False,
*,
to_string: bool = False,
) -> Optional[str]: # pragma: no cover
"""
.. deprecated:: 0.13.12
Please use `write_json`
"""
warnings.warn(
"'to_json' is deprecated. please use 'write_json'", DeprecationWarning
)
return self.write_json(
file, pretty, row_oriented, json_lines, to_string=to_string
)
@overload
def write_json(
self,
file: Optional[Union[IOBase, str, Path]] = ...,
pretty: bool = ...,
row_oriented: bool = ...,
json_lines: bool = ...,
*,
to_string: Literal[True],
) -> str:
...
@overload
def write_json(
self,
file: Optional[Union[IOBase, str, Path]] = ...,
pretty: bool = ...,
row_oriented: bool = ...,
json_lines: bool = ...,
*,
to_string: Literal[False] = ...,
) -> None:
...
@overload
def write_json(
self,
file: Optional[Union[IOBase, str, Path]] = ...,
pretty: bool = ...,
row_oriented: bool = ...,
json_lines: bool = ...,
*,
to_string: bool = ...,
) -> Optional[str]:
...
def write_json(
self,
file: Optional[Union[IOBase, str, Path]] = None,
pretty: bool = False,
row_oriented: bool = False,
json_lines: bool = False,
*,
to_string: bool = False,
) -> Optional[str]:
"""
Serialize to JSON representation.
Parameters
----------
file
Write to this file instead of returning a string.
pretty
Pretty serialize json.
row_oriented
Write to row oriented json. This is slower, but more common.
json_lines
Write to Json Lines format
to_string
Ignore file argument and return a string.
"""
if isinstance(file, (str, Path)):
file = format_path(file)
to_string_io = (file is not None) and isinstance(file, StringIO)
if to_string or file is None or to_string_io:
with BytesIO() as buf:
self._df.to_json(buf, pretty, row_oriented, json_lines)
json_bytes = buf.getvalue()
json_str = json_bytes.decode("utf8")
if to_string_io:
file.write(json_str) # type: ignore[union-attr]
else:
return json_str
else:
self._df.to_json(file, pretty, row_oriented, json_lines)
return None
def to_pandas(
self, *args: Any, date_as_object: bool = False, **kwargs: Any
) -> "pd.DataFrame": # noqa: F821
"""
Cast to a pandas DataFrame.
This requires that pandas and pyarrow are installed.
This operation clones data.
Parameters
----------
args
Arguments will be sent to pyarrow.Table.to_pandas.
date_as_object
Cast dates to objects. If False, convert to datetime64[ns] dtype.
kwargs
Arguments will be sent to pyarrow.Table.to_pandas.
Examples
--------
>>> import pandas
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> pandas_df = df.to_pandas()
>>> type(pandas_df)
<class 'pandas.core.frame.DataFrame'>
"""
if not _PYARROW_AVAILABLE:
raise ImportError( # pragma: no cover
"'pyarrow' is required when using to_pandas()."
)
record_batches = self._df.to_pandas()
tbl = pa.Table.from_batches(record_batches)
return tbl.to_pandas(*args, date_as_object=date_as_object, **kwargs)
def write_csv(
self,
file: Optional[Union[TextIO, BytesIO, str, Path]] = None,
has_header: bool = True,
sep: str = ",",
quote: str = '"',
) -> Optional[str]:
"""
Write Dataframe to comma-separated values file (csv).
Parameters
----------
file
File path to which the file should be written.
has_header
Whether to include header in the CSV output.
sep
Separate CSV fields with this symbol.
quote
byte to use as quoting character
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3, 4, 5],
... "bar": [6, 7, 8, 9, 10],
... "ham": ["a", "b", "c", "d", "e"],
... }
... )
>>> df.write_csv("new_file.csv", sep=",")
"""
if len(sep) > 1:
raise ValueError("only single byte separator is allowed")
if len(quote) > 1:
raise ValueError("only single byte quote char is allowed")
if file is None:
buffer = BytesIO()
self._df.to_csv(buffer, has_header, ord(sep), ord(quote))
return str(buffer.getvalue(), encoding="utf-8")
if isinstance(file, (str, Path)):
file = format_path(file)
self._df.to_csv(file, has_header, ord(sep), ord(quote))
return None
def to_csv(
self,
file: Optional[Union[TextIO, BytesIO, str, Path]] = None,
has_header: bool = True,
sep: str = ",",
) -> Optional[str]: # pragma: no cover
"""
.. deprecated:: 0.13.12
Please use `write_csv`
"""
warnings.warn(
"'to_csv' is deprecated. please use 'write_csv'", DeprecationWarning
)
return self.write_csv(file, has_header, sep)
def write_avro(
self,
file: Union[BinaryIO, BytesIO, str, Path],
compression: Literal["uncompressed", "snappy", "deflate"] = "uncompressed",
) -> None:
"""
Write to Apache Avro file.
Parameters
----------
file
File path to which the file should be written.
compression
Compression method. Choose one of:
- "uncompressed"
- "snappy"
- "deflate"
"""
if isinstance(file, (str, Path)):
file = format_path(file)
self._df.to_avro(file, compression)
def to_avro(
self,
file: Union[BinaryIO, BytesIO, str, Path],
compression: Literal["uncompressed", "snappy", "deflate"] = "uncompressed",
) -> None: # pragma: no cover
"""
.. deprecated:: 0.13.12
Please use `write_avro`
"""
warnings.warn(
"'to_avro' is deprecated. please use 'write_avro'", DeprecationWarning
)
return self.write_avro(file, compression)
def write_ipc(
self,
file: Union[BinaryIO, BytesIO, str, Path],
compression: Optional[Literal["uncompressed", "lz4", "zstd"]] = "uncompressed",
) -> None:
"""
Write to Arrow IPC binary stream, or a feather file.
Parameters
----------
file
File path to which the file should be written.
compression
Compression method. Choose one of:
- "uncompressed"
- "lz4"
- "zstd"
"""
if compression is None:
compression = "uncompressed"
if isinstance(file, (str, Path)):
file = format_path(file)
self._df.to_ipc(file, compression)
def to_ipc(
self,
file: Union[BinaryIO, BytesIO, str, Path],
compression: Optional[Literal["uncompressed", "lz4", "zstd"]] = "uncompressed",
) -> None: # pragma: no cover
"""
.. deprecated:: 0.13.12
Please use `write_ipc`
"""
warnings.warn(
"'to_ipc' is deprecated. please use 'write_ipc'", DeprecationWarning
)
return self.write_ipc(file, compression)
def to_dicts(self) -> List[Dict[str, Any]]:
"""
Convert every row to a dictionary.
Note that this is slow.
Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3], "bar": [4, 5, 6]})
>>> df.to_dicts()
[{'foo': 1, 'bar': 4}, {'foo': 2, 'bar': 5}, {'foo': 3, 'bar': 6}]
"""
pydf = self._df
names = self.columns
return [
{k: v for k, v in zip(names, pydf.row_tuple(i))}
for i in range(0, self.height)
]
def transpose(
self: DF,
include_header: bool = False,
header_name: str = "column",
column_names: Optional[Union[Iterator[str], Sequence[str]]] = None,
) -> DF:
"""
Transpose a DataFrame over the diagonal.
Parameters
----------
include_header:
If set, the column names will be added as first column.
header_name:
If `include_header` is set, this determines the name of the column that will be inserted
column_names:
Optional generator/iterator that yields column names. Will be used to replace the columns in the DataFrame.
Notes
-----
This is a very expensive operation. Perhaps you can do it differently.
Returns
-------
DataFrame
Examples
--------
>>> df = pl.DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]})
>>> df.transpose(include_header=True)
shape: (2, 4)
┌────────┬──────────┬──────────┬──────────┐
│ column ┆ column_0 ┆ column_1 ┆ column_2 │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 ┆ i64 │
╞════════╪══════════╪══════════╪══════════╡
│ a ┆ 1 ┆ 2 ┆ 3 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ b ┆ 1 ┆ 2 ┆ 3 │
└────────┴──────────┴──────────┴──────────┘
# replace the auto generated column names with a list
>>> df.transpose(include_header=False, column_names=["a", "b", "c"])
shape: (2, 3)
┌─────┬─────┬─────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ 1 ┆ 2 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 1 ┆ 2 ┆ 3 │
└─────┴─────┴─────┘
Include the header as a separate column
>>> df.transpose(
... include_header=True, header_name="foo", column_names=["a", "b", "c"]
... )
shape: (2, 4)
┌─────┬─────┬─────┬─────┐
│ foo ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 ┆ i64 │
╞═════╪═════╪═════╪═════╡
│ a ┆ 1 ┆ 2 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 1 ┆ 2 ┆ 3 │
└─────┴─────┴─────┴─────┘
Replace the auto generated column with column names from a generator function
>>> def name_generator():
... base_name = "my_column_"
... count = 0
... while True:
... yield f"{base_name}{count}"
... count += 1
...
>>> df.transpose(include_header=False, column_names=name_generator())
shape: (2, 3)
┌─────────────┬─────────────┬─────────────┐
│ my_column_0 ┆ my_column_1 ┆ my_column_2 │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 │
╞═════════════╪═════════════╪═════════════╡
│ 1 ┆ 2 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 1 ┆ 2 ┆ 3 │
└─────────────┴─────────────┴─────────────┘
"""
df = self._from_pydf(self._df.transpose(include_header, header_name))
if column_names is not None:
names = []
n = df.width
if include_header:
names.append(header_name)
n -= 1
column_names = iter(column_names)
for _ in range(n):
names.append(next(column_names))
df.columns = names
return df
def write_parquet(
self,
file: Union[str, Path, BytesIO],
compression: Optional[
Union[
Literal[
"uncompressed", "snappy", "gzip", "lzo", "brotli", "lz4", "zstd"
],
str,
]
] = "lz4",
statistics: bool = False,
use_pyarrow: bool = False,
**kwargs: Any,
) -> None:
"""
Write the DataFrame disk in parquet format.
Parameters
----------
file
File path to which the file should be written.
compression
Compression method. Choose one of:
- "uncompressed" (not supported by pyarrow)
- "snappy"
- "gzip"
- "lzo"
- "brotli"
- "lz4"
- "zstd"
statistics
Write statistics to the parquet headers. This requires extra compute.
use_pyarrow
Use C++ parquet implementation vs rust parquet implementation.
At the moment C++ supports more features.
**kwargs are passed to pyarrow.parquet.write_table
"""
if compression is None:
compression = "uncompressed"
if isinstance(file, (str, Path)):
file = format_path(file)
if use_pyarrow:
if not _PYARROW_AVAILABLE:
raise ImportError( # pragma: no cover
"'pyarrow' is required when using 'write_parquet(..., use_pyarrow=True)'."
)
tbl = self.to_arrow()
data = {}
for i, column in enumerate(tbl):
# extract the name before casting
if column._name is None:
name = f"column_{i}"
else:
name = column._name
data[name] = column
tbl = pa.table(data)
pa.parquet.write_table(
table=tbl,
where=file,
compression=compression,
write_statistics=statistics,
**kwargs,
)
else:
self._df.to_parquet(file, compression, statistics)
def to_parquet(
self,
file: Union[str, Path, BytesIO],
compression: Optional[
Union[
Literal[
"uncompressed", "snappy", "gzip", "lzo", "brotli", "lz4", "zstd"
],
str,
]
] = "snappy",
statistics: bool = False,
use_pyarrow: bool = False,
**kwargs: Any,
) -> None: # pragma: no cover
"""
.. deprecated:: 0.13.12
Please use `write_parquet`
"""
warnings.warn(
"'to_parquet' is deprecated. please use 'write_parquet'", DeprecationWarning
)
return self.write_parquet(file, compression, statistics, use_pyarrow, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
Convert DataFrame to a 2d numpy array.
This operation clones data.
Notes
-----
If you're attempting to convert Utf8 to an array you'll need to install `pyarrow`.
Examples
--------
>>> df = pl.DataFrame(
... {"foo": [1, 2, 3], "bar": [6, 7, 8], "ham": ["a", "b", "c"]}
... )
>>> numpy_array = df.to_numpy()
>>> type(numpy_array)
<class 'numpy.ndarray'>
"""
out = self._df.to_numpy()
if out is None:
return np.vstack(
[self.to_series(i).to_numpy() for i in range(self.width)]
).T
else:
return out
def __getstate__(self): # type: ignore
return self.get_columns()
def __setstate__(self, state): # type: ignore
self._df = DataFrame(state)._df
def __mul__(
self: DF, other: Union["DataFrame", "pli.Series", int, float, bool]
) -> DF:
if isinstance(other, DataFrame):
return self._from_pydf(self._df.mul_df(other._df))
other = _prepare_other_arg(other)
return self._from_pydf(self._df.mul(other._s))
def __truediv__(
self: DF, other: Union["DataFrame", "pli.Series", int, float, bool]
) -> DF:
if isinstance(other, DataFrame):
return self._from_pydf(self._df.div_df(other._df))
other = _prepare_other_arg(other)
return self._from_pydf(self._df.div(other._s))
def __add__(
self: DF,
other: Union["DataFrame", "pli.Series", int, float, bool, str],
) -> DF:
if isinstance(other, DataFrame):
return self._from_pydf(self._df.add_df(other._df))
other = _prepare_other_arg(other)
return self._from_pydf(self._df.add(other._s))
def __sub__(
self: DF, other: Union["DataFrame", "pli.Series", int, float, bool]
) -> DF:
if isinstance(other, DataFrame):
return self._from_pydf(self._df.sub_df(other._df))
other = _prepare_other_arg(other)
return self._from_pydf(self._df.sub(other._s))
def __mod__(
self: DF, other: Union["DataFrame", "pli.Series", int, float, bool]
) -> DF:
if isinstance(other, DataFrame):
return self._from_pydf(self._df.rem_df(other._df))
other = _prepare_other_arg(other)
return self._from_pydf(self._df.rem(other._s))
def __str__(self) -> str:
return self._df.as_str()
def __repr__(self) -> str:
return self.__str__()
def __getattr__(self, item: Any) -> "PySeries":
"""
Access columns as attribute.
"""
# it is important that we return an AttributeError here
# this is used by ipython to check some private
# `_ipython_canary_method_should_not_exist_`
# if we return any other error than AttributeError pretty printing
# will not work in notebooks.
# See: https://github.com/jupyter/notebook/issues/2014
if item.startswith("_"):
raise AttributeError(item)
try: # pragma: no cover
warnings.warn(
"accessing series as Attribute of a DataFrame is deprecated",
DeprecationWarning,
)
return pli.wrap_s(self._df.column(item))
except Exception:
raise AttributeError(item)
def __iter__(self) -> Iterator[Any]:
return self.get_columns().__iter__()
def find_idx_by_name(self, name: str) -> int:
"""
Find the index of a column by name.
Parameters
----------
name
Name of the column to find.
Examples
--------
>>> df = pl.DataFrame(
... {"foo": [1, 2, 3], "bar": [6, 7, 8], "ham": ["a", "b", "c"]}
... )
>>> df.find_idx_by_name("ham")
2
"""
return self._df.find_idx_by_name(name)
def _pos_idx(self, idx: int, dim: int) -> int:
if idx >= 0:
return idx
else:
return self.shape[dim] + idx
# __getitem__() mostly returns a dataframe. The major exception is when a string is passed in. Note that there are
# more subtle cases possible where a non-string value leads to a Series.
@overload
def __getitem__(self, item: str) -> "pli.Series":
...
@overload
def __getitem__(
self: DF,
item: Union[
int, range, slice, np.ndarray, "pli.Expr", "pli.Series", List, tuple
],
) -> DF:
...
def __getitem__(
self: DF,
item: Union[
str, int, range, slice, np.ndarray, "pli.Expr", "pli.Series", List, tuple
],
) -> Union[DF, "pli.Series"]:
"""
Does quite a lot. Read the comments.
"""
if isinstance(item, pli.Expr): # pragma: no cover
warnings.warn(
"'using expressions in []' is deprecated. please use 'select'",
DeprecationWarning,
)
return self.select(item)
# select rows and columns at once
# every 2d selection, i.e. tuple is row column order, just like numpy
if isinstance(item, tuple) and len(item) == 2:
row_selection, col_selection = item
# df[:, unknown]
if isinstance(row_selection, slice):
# multiple slices
# df[:, :]
if isinstance(col_selection, slice):
# slice can be
# by index
# [1:8]
# or by column name
# ["foo":"bar"]
# first we make sure that the slice is by index
start = col_selection.start
stop = col_selection.stop
if isinstance(col_selection.start, str):
start = self.find_idx_by_name(col_selection.start)
if isinstance(col_selection.stop, str):
stop = self.find_idx_by_name(col_selection.stop) + 1
col_selection = slice(start, stop, col_selection.step)
df = self.__getitem__(self.columns[col_selection])
return df[row_selection]
# slice and boolean mask
# df[:2, [True, False, True]]
if isinstance(col_selection, (Sequence, pli.Series)):
if (
isinstance(col_selection[0], bool)
or isinstance(col_selection, pli.Series)
and col_selection.dtype() == Boolean
):
df = self.__getitem__(row_selection)
select = []
for col, valid in zip(df.columns, col_selection):
if valid:
select.append(col)
return df.select(select)
# single slice
# df[:, unknown]
series = self.__getitem__(col_selection)
# s[:]
pli.wrap_s(series[row_selection])
# df[2, :] (select row as df)
if isinstance(row_selection, int):
if isinstance(col_selection, (slice, list, np.ndarray)):
df = self[:, col_selection]
return df.slice(row_selection, 1)
# df[2, "a"]
if isinstance(col_selection, str):
return self[col_selection][row_selection]
# column selection can be "a" and ["a", "b"]
if isinstance(col_selection, str):
col_selection = [col_selection]
# df[:, 1]
if isinstance(col_selection, int):
series = self.to_series(col_selection)
return series[row_selection]
if isinstance(col_selection, list):
# df[:, [1, 2]]
# select by column indexes
if isinstance(col_selection[0], int):
series_list = [self.to_series(i) for i in col_selection]
df = self.__class__(series_list)
return df[row_selection]
df = self.__getitem__(col_selection)
return df.__getitem__(row_selection)
# select single column
# df["foo"]
if isinstance(item, str):
return pli.wrap_s(self._df.column(item))
# df[idx]
if isinstance(item, int):
return self.slice(self._pos_idx(item, dim=0), 1)
# df[range(n)]
if isinstance(item, range):
return self[range_to_slice(item)]
# df[:]
if isinstance(item, slice):
# special case df[::-1]
if item.start is None and item.stop is None and item.step == -1:
return self.select(pli.col("*").reverse())
if getattr(item, "end", False):
raise ValueError("A slice with steps larger than 1 is not supported.")
if item.start is None:
start = 0
else:
start = item.start
if item.stop is None:
stop = self.height
else:
stop = item.stop
length = stop - start
if item.step is None:
# df[start:stop]
return self.slice(start, length)
else:
# df[start:stop:step]
return self.select(
pli.col("*").slice(start, length).take_every(item.step)
)
# select rows by numpy mask or index
# df[[1, 2, 3]]
# df[[true, false, true]]
if isinstance(item, np.ndarray):
if item.dtype == int:
return self._from_pydf(self._df.take(item))
if isinstance(item[0], str):
return self._from_pydf(self._df.select(item))
if item.dtype == bool:
return self._from_pydf(self._df.filter(pli.Series("", item).inner()))
if isinstance(item, Sequence):
if isinstance(item[0], str):
# select multiple columns
# df[["foo", "bar"]]
return self._from_pydf(self._df.select(item))
elif isinstance(item[0], pli.Expr):
return self.select(item)
elif type(item[0]) == bool:
item = pli.Series("", item) # fall through to next if isinstance
elif is_int_sequence(item):
return self._from_pydf(
self._df.take([self._pos_idx(i, dim=0) for i in item])
)
if isinstance(item, pli.Series):
dtype = item.dtype
if dtype == Boolean:
return self._from_pydf(self._df.filter(item.inner()))
if dtype == UInt32:
return self._from_pydf(self._df.take_with_series(item.inner()))
# if no data has been returned, the operation is not supported
raise NotImplementedError
def __setitem__(
self, key: Union[str, List, Tuple[Any, Union[str, int]]], value: Any
) -> None: # pragma: no cover
warnings.warn(
"setting a DataFrame by indexing is deprecated; Consider using DataFrame.with_column",
DeprecationWarning,
)
# df["foo"] = series
if isinstance(key, str):
try:
self.replace(key, pli.Series(key, value))
except Exception:
self.hstack([pli.Series(key, value)], in_place=True)
# df[["C", "D"]]
elif isinstance(key, list):
value = np.array(value)
if len(value.shape) != 2:
raise ValueError("can only set multiple columns with 2D matrix")
if value.shape[1] != len(key):
raise ValueError(
"matrix columns should be equal to list use to determine column names"
)
for (i, name) in enumerate(key):
self[name] = value[:, i]
# df[a, b]
elif isinstance(key, tuple):
row_selection, col_selection = key
# get series column selection
if isinstance(col_selection, str):
s = self.__getitem__(col_selection)
elif isinstance(col_selection, int):
s = self[:, col_selection] # type: ignore
else:
raise ValueError(f"column selection not understood: {col_selection}")
# dispatch to __setitem__ of Series to do modification
s[row_selection] = value
# now find the location to place series
# df[idx]
if isinstance(col_selection, int):
self.replace_at_idx(col_selection, s)
# df["foo"]
elif isinstance(col_selection, str):
self.replace(col_selection, s)
else:
raise NotImplementedError
def __len__(self) -> int:
return self.height
def _repr_html_(self) -> str:
"""
Used by jupyter notebooks to get a html table.
Output rows and columns can be modified by setting the following ENVIRONMENT variables:
* POLARS_FMT_MAX_COLS: set the number of columns
* POLARS_FMT_MAX_ROWS: set the number of rows
"""
max_cols = int(os.environ.get("POLARS_FMT_MAX_COLS", default=75))
max_rows = int(os.environ.get("POLARS_FMT_MAX_ROWS", default=25))
return "\n".join(NotebookFormatter(self, max_cols, max_rows).render())
def to_series(self, index: int = 0) -> "pli.Series":
"""
Select column as Series at index location.
Parameters
----------
index
Location of selection.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.to_series(1)
shape: (3,)
Series: 'bar' [i64]
[
6
7
8
]
"""
return pli.wrap_s(self._df.select_at_idx(index))
def rename(self: DF, mapping: Dict[str, str]) -> DF:
"""
Rename column names.
Parameters
----------
mapping
Key value pairs that map from old name to new name.
Examples
--------
>>> df = pl.DataFrame(
... {"foo": [1, 2, 3], "bar": [6, 7, 8], "ham": ["a", "b", "c"]}
... )
>>> df.rename({"foo": "apple"})
shape: (3, 3)
┌───────┬─────┬─────┐
│ apple ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═══════╪═════╪═════╡
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
├╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c │
└───────┴─────┴─────┘
"""
return self.lazy().rename(mapping).collect(no_optimization=True)
def insert_at_idx(self, index: int, series: "pli.Series") -> None:
"""
Insert a Series at a certain column index. This operation is in place.
Parameters
----------
index
Column to insert the new `Series` column.
series
`Series` to insert.
Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3], "bar": [4, 5, 6]})
>>> s = pl.Series("baz", [97, 98, 99])
>>> df.insert_at_idx(1, s) # returns None
>>> df
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ baz ┆ bar │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ 1 ┆ 97 ┆ 4 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 98 ┆ 5 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 99 ┆ 6 │
└─────┴─────┴─────┘
"""
self._df.insert_at_idx(index, series._s)
def filter(self: DF, predicate: "pli.Expr") -> DF:
"""
Filter the rows in the DataFrame based on a predicate expression.
Parameters
----------
predicate
Expression that evaluates to a boolean Series.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
Filter on one condition:
>>> df.filter(pl.col("foo") < 3)
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
└─────┴─────┴─────┘
Filter on multiple conditions:
>>> df.filter((pl.col("foo") < 3) & (pl.col("ham") == "a"))
shape: (1, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ a │
└─────┴─────┴─────┘
"""
return (
self.lazy()
.filter(predicate)
.collect(no_optimization=True, string_cache=False)
)
@property
def shape(self) -> Tuple[int, int]:
"""
Get the shape of the DataFrame.
Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3, 4, 5]})
>>> df.shape
(5, 1)
"""
return self._df.shape()
@property
def height(self) -> int:
"""
Get the height of the DataFrame.
Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3, 4, 5]})
>>> df.height
5
"""
return self._df.height()
@property
def width(self) -> int:
"""
Get the width of the DataFrame.
Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3, 4, 5]})
>>> df.width
1
"""
return self._df.width()
@property
def columns(self) -> List[str]:
"""
Get or set column names.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.columns
['foo', 'bar', 'ham']
Set column names:
>>> df.columns = ["apple", "banana", "orange"]
>>> df
shape: (3, 3)
┌───────┬────────┬────────┐
│ apple ┆ banana ┆ orange │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═══════╪════════╪════════╡
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
├╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c │
└───────┴────────┴────────┘
"""
return self._df.columns()
@columns.setter
def columns(self, columns: Sequence[str]) -> None:
"""
Change the column names of the `DataFrame`.
Parameters
----------
columns
A list with new names for the `DataFrame`.
The length of the list should be equal to the width of the `DataFrame`.
"""
self._df.set_column_names(columns)
@property
def dtypes(self) -> List[Type[DataType]]:
"""
Get dtypes of columns in DataFrame. Dtypes can also be found in column headers when printing the DataFrame.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6.0, 7.0, 8.0],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.dtypes
[<class 'polars.datatypes.Int64'>, <class 'polars.datatypes.Float64'>, <class 'polars.datatypes.Utf8'>]
>>> df
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c │
└─────┴─────┴─────┘
See Also
--------
schema : Return a dict of [column name, dtype]
"""
return self._df.dtypes()
@property
def schema(self) -> Dict[str, Type[DataType]]:
"""
Get a dict[column name, DataType]
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6.0, 7.0, 8.0],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.schema
{'foo': <class 'polars.datatypes.Int64'>, 'bar': <class 'polars.datatypes.Float64'>, 'ham': <class 'polars.datatypes.Utf8'>}
"""
return dict(zip(self.columns, self.dtypes))
def describe(self: DF) -> DF:
"""
Summary statistics for a DataFrame. Only summarizes numeric datatypes at the moment and returns nulls for non numeric datatypes.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1.0, 2.8, 3.0],
... "b": [4, 5, 6],
... "c": [True, False, True],
... }
... )
>>> df.describe()
shape: (5, 4)
┌──────────┬────────────────────┬─────┬──────┐
│ describe ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ f64 ┆ f64 ┆ f64 │
╞══════════╪════════════════════╪═════╪══════╡
│ mean ┆ 2.2666666666666666 ┆ 5 ┆ null │
├╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ std ┆ 1.1015141094572205 ┆ 1 ┆ null │
├╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ min ┆ 1 ┆ 4 ┆ 0.0 │
├╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ max ┆ 3 ┆ 6 ┆ 1 │
├╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ median ┆ 2.8 ┆ 5 ┆ null │
└──────────┴────────────────────┴─────┴──────┘
"""
def describe_cast(self: DF) -> DF:
columns = []
for s in self:
if s.is_numeric() or s.is_boolean():
columns.append(s.cast(float))
else:
columns.append(s)
return self.__class__(columns)
summary = self._from_pydf(
pli.concat(
[
describe_cast(self.mean()),
describe_cast(self.std()),
describe_cast(self.min()),
describe_cast(self.max()),
describe_cast(self.median()),
]
)._df
)
summary.insert_at_idx(
0, pli.Series("describe", ["mean", "std", "min", "max", "median"])
)
return summary
def replace_at_idx(self, index: int, series: "pli.Series") -> None:
"""
Replace a column at an index location.
Parameters
----------
index
Column index.
series
Series that will replace the column.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> x = pl.Series("apple", [10, 20, 30])
>>> df.replace_at_idx(0, x)
>>> df
shape: (3, 3)
┌───────┬─────┬─────┐
│ apple ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═══════╪═════╪═════╡
│ 10 ┆ 6 ┆ a │
├╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 20 ┆ 7 ┆ b │
├╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 30 ┆ 8 ┆ c │
└───────┴─────┴─────┘
"""
self._df.replace_at_idx(index, series._s)
@overload
def sort(
self: DF,
by: Union[str, "pli.Expr", List[str], List["pli.Expr"]],
reverse: Union[bool, List[bool]] = ...,
nulls_last: bool = ...,
*,
in_place: Literal[False] = ...,
) -> DF:
...
@overload
def sort(
self,
by: Union[str, "pli.Expr", List[str], List["pli.Expr"]],
reverse: Union[bool, List[bool]] = ...,
nulls_last: bool = ...,
*,
in_place: Literal[True],
) -> None:
...
@overload
def sort(
self: DF,
by: Union[str, "pli.Expr", List[str], List["pli.Expr"]],
reverse: Union[bool, List[bool]] = ...,
nulls_last: bool = ...,
*,
in_place: bool,
) -> Optional[DF]:
...
def sort(
self: DF,
by: Union[str, "pli.Expr", List[str], List["pli.Expr"]],
reverse: Union[bool, List[bool]] = False,
nulls_last: bool = False,
*,
in_place: bool = False,
) -> Optional[DF]:
"""
Sort the DataFrame by column.
Parameters
----------
by
By which column to sort. Only accepts string.
reverse
Reverse/descending sort.
in_place
Perform operation in-place.
nulls_last
Place null values last. Can only be used if sorted by a single column.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6.0, 7.0, 8.0],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.sort("foo", reverse=True)
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ str │
╞═════╪═════╪═════╡
│ 3 ┆ 8 ┆ c │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 1 ┆ 6 ┆ a │
└─────┴─────┴─────┘
**Sort by multiple columns.**
For multiple columns we can also use expression syntax.
>>> df.sort(
... [pl.col("foo"), pl.col("bar") ** 2],
... reverse=[True, False],
... )
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ str │
╞═════╪═════╪═════╡
│ 3 ┆ 64 ┆ c │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 49 ┆ b │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 1 ┆ 36 ┆ a │
└─────┴─────┴─────┘
"""
if type(by) is list or isinstance(by, pli.Expr):
df = (
self.lazy()
.sort(by, reverse, nulls_last)
.collect(no_optimization=True, string_cache=False)
)
if in_place: # pragma: no cover
warnings.warn(
"in-place sorting is deprecated; please use default sorting",
DeprecationWarning,
)
self._df = df._df
return self
return df
if in_place: # pragma: no cover
warnings.warn(
"in-place sorting is deprecated; please use default sorting",
DeprecationWarning,
)
self._df.sort_in_place(by, reverse)
return None
else:
return self._from_pydf(self._df.sort(by, reverse, nulls_last))
def frame_equal(self, other: "DataFrame", null_equal: bool = True) -> bool:
"""
Check if DataFrame is equal to other.
Parameters
----------
other
DataFrame to compare with.
null_equal
Consider null values as equal.
Examples
--------
>>> df1 = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6.0, 7.0, 8.0],
... "ham": ["a", "b", "c"],
... }
... )
>>> df2 = pl.DataFrame(
... {
... "foo": [3, 2, 1],
... "bar": [8.0, 7.0, 6.0],
... "ham": ["c", "b", "a"],
... }
... )
>>> df1.frame_equal(df1)
True
>>> df1.frame_equal(df2)
False
"""
return self._df.frame_equal(other._df, null_equal)
def replace(self, column: str, new_col: "pli.Series") -> None:
"""
Replace a column by a new Series.
Parameters
----------
column
Column to replace.
new_col
New column to insert.
Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3], "bar": [4, 5, 6]})
>>> s = pl.Series([10, 20, 30])
>>> df.replace("foo", s) # works in-place!
>>> df
shape: (3, 2)
┌─────┬─────┐
│ foo ┆ bar │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 10 ┆ 4 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 20 ┆ 5 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 30 ┆ 6 │
└─────┴─────┘
"""
self._df.replace(column, new_col.inner())
def slice(self: DF, offset: int, length: int) -> DF:
"""
Slice this DataFrame over the rows direction.
Parameters
----------
offset
Offset index.
length
Length of the slice.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6.0, 7.0, 8.0],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.slice(1, 2)
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ str │
╞═════╪═════╪═════╡
│ 2 ┆ 7 ┆ b │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c │
└─────┴─────┴─────┘
"""
if length < 0:
length = self.height - offset + length
return self._from_pydf(self._df.slice(offset, length))
def limit(self: DF, length: int = 5) -> DF:
"""
Get first N rows as DataFrame.
See Also `DataFrame.head`
Parameters
----------
length
Amount of rows to take.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.limit(2)
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
└─────┴─────┴─────┘
"""
return self.head(length)
def head(self: DF, length: int = 5) -> DF:
"""
Get first N rows as DataFrame.
Parameters
----------
length
Length of the head.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3, 4, 5],
... "bar": [6, 7, 8, 9, 10],
... "ham": ["a", "b", "c", "d", "e"],
... }
... )
>>> df.head(3)
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c │
└─────┴─────┴─────┘
"""
return self._from_pydf(self._df.head(length))
def tail(self: DF, length: int = 5) -> DF:
"""
Get last N rows as DataFrame.
Parameters
----------
length
Length of the tail.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3, 4, 5],
... "bar": [6, 7, 8, 9, 10],
... "ham": ["a", "b", "c", "d", "e"],
... }
... )
>>> df.tail(3)
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 3 ┆ 8 ┆ c │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 4 ┆ 9 ┆ d │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 5 ┆ 10 ┆ e │
└─────┴─────┴─────┘
"""
return self._from_pydf(self._df.tail(length))
def drop_nulls(self: DF, subset: Optional[Union[str, List[str]]] = None) -> DF:
"""
Return a new DataFrame where the null values are dropped.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, None, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.drop_nulls()
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c │
└─────┴─────┴─────┘
This method only drops nulls row-wise if any single value of the row is null.
Below are some example snippets that show how you could drop null values based on other
conditions
>>> df = pl.DataFrame(
... {
... "a": [None, None, None, None],
... "b": [1, 2, None, 1],
... "c": [1, None, None, 1],
... }
... )
>>> df
shape: (4, 3)
┌──────┬──────┬──────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ f64 ┆ i64 ┆ i64 │
╞══════╪══════╪══════╡
│ null ┆ 1 ┆ 1 │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 2 ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ null ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 1 ┆ 1 │
└──────┴──────┴──────┘
Drop a row only if all values are null:
>>> df.filter(
... ~pl.fold(
... acc=True,
... f=lambda acc, s: acc & s.is_null(),
... exprs=pl.all(),
... )
... )
shape: (3, 3)
┌──────┬─────┬──────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ f64 ┆ i64 ┆ i64 │
╞══════╪═════╪══════╡
│ null ┆ 1 ┆ 1 │
├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 2 ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 1 ┆ 1 │
└──────┴─────┴──────┘
Drop a column if all values are null:
>>> df[:, [not (s.null_count() == df.height) for s in df]]
shape: (4, 2)
┌──────┬──────┐
│ b ┆ c │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞══════╪══════╡
│ 1 ┆ 1 │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2 ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 1 ┆ 1 │
└──────┴──────┘
"""
if isinstance(subset, str):
subset = [subset]
return self._from_pydf(self._df.drop_nulls(subset))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
"""
Apply a function on Self.
Parameters
----------
func
Callable.
args
Arguments.
kwargs
Keyword arguments.
Examples
--------
>>> def cast_str_to_int(data, col_name):
... return data.with_column(pl.col(col_name).cast(pl.Int64))
...
>>> df = pl.DataFrame({"a": [1, 2, 3, 4], "b": ["10", "20", "30", "40"]})
>>> df.pipe(cast_str_to_int, col_name="b")
shape: (4, 2)
┌─────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1 ┆ 10 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 20 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 30 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 4 ┆ 40 │
└─────┴─────┘
"""
return func(self, *args, **kwargs)
def with_row_count(self: DF, name: str = "row_nr", offset: int = 0) -> DF:
"""
Add a column at index 0 that counts the rows.
Parameters
----------
name
Name of the column to add.
offset
Start the row count at this offset. Default = 0
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 3, 5],
... "b": [2, 4, 6],
... }
... )
>>> df.with_row_count()
shape: (3, 3)
┌────────┬─────┬─────┐
│ row_nr ┆ a ┆ b │
│ --- ┆ --- ┆ --- │
│ u32 ┆ i64 ┆ i64 │
╞════════╪═════╪═════╡
│ 0 ┆ 1 ┆ 2 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 1 ┆ 3 ┆ 4 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 5 ┆ 6 │
└────────┴─────┴─────┘
"""
return self._from_pydf(self._df.with_row_count(name, offset))
def groupby(
self: DF,
by: Union[str, "pli.Expr", Sequence[str], Sequence["pli.Expr"]],
maintain_order: bool = False,
) -> "GroupBy[DF]":
"""
Start a groupby operation.
Parameters
----------
by
Column(s) to group by.
maintain_order
Make sure that the order of the groups remain consistent. This is more expensive than a default groupby.
Note that this only works in expression aggregations.
Examples
--------
Below we group by column `"a"`, and we sum column `"b"`.
>>> df = pl.DataFrame(
... {
... "a": ["a", "b", "a", "b", "b", "c"],
... "b": [1, 2, 3, 4, 5, 6],
... "c": [6, 5, 4, 3, 2, 1],
... }
... )
>>> df.groupby("a")["b"].sum().sort(by="a")
shape: (3, 2)
┌─────┬───────┐
│ a ┆ b_sum │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═══════╡
│ a ┆ 4 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ b ┆ 11 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ c ┆ 6 │
└─────┴───────┘
We can also loop over the grouped `DataFrame`
>>> for sub_df in df.groupby("a"):
... print(sub_df) # doctest: +IGNORE_RESULT
...
shape: (3, 3)
┌─────┬─────┬─────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ b ┆ 2 ┆ 5 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 4 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 5 ┆ 2 │
└─────┴─────┴─────┘
shape: (1, 3)
┌─────┬─────┬─────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ c ┆ 6 ┆ 1 │
└─────┴─────┴─────┘
"""
if isinstance(by, str):
by = [by]
return GroupBy(
self._df,
by, # type: ignore
dataframe_class=self.__class__,
maintain_order=maintain_order,
)
def groupby_rolling(
self: DF,
index_column: str,
period: str,
offset: Optional[str] = None,
closed: str = "right",
by: Optional[Union[str, List[str], "pli.Expr", List["pli.Expr"]]] = None,
) -> "RollingGroupBy[DF]":
"""
Create rolling groups based on a time column (or index value of type Int32, Int64).
Different from a rolling groupby the windows are now determined by the individual values and are not of constant
intervals. For constant intervals use *groupby_dynamic*
.. seealso::
groupby_dynamic
The `period` and `offset` arguments are created with
the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 day)
- 1w (1 week)
- 1mo (1 calendar month)
- 1y (1 calendar year)
- 1i (1 index count)
Or combine them:
"3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds
In case of a groupby_rolling on an integer column, the windows are defined by:
- **"1i" # length 1**
- **"10i" # length 10**
Parameters
----------
index_column
Column used to group based on the time window.
Often to type Date/Datetime
This column must be sorted in ascending order. If not the output will not make sense.
In case of a rolling groupby on indices, dtype needs to be one of {Int32, Int64}. Note that
Int32 gets temporarily cast to Int64, so if performance matters use an Int64 column.
period
length of the window
offset
offset of the window. Default is -period
closed
Defines if the window interval is closed or not.
Any of {"left", "right", "both" "none"}
by
Also group by this column/these columns
Examples
--------
>>> dates = [
... "2020-01-01 13:45:48",
... "2020-01-01 16:42:13",
... "2020-01-01 16:45:09",
... "2020-01-02 18:12:48",
... "2020-01-03 19:45:32",
... "2020-01-08 23:16:43",
... ]
>>> df = pl.DataFrame({"dt": dates, "a": [3, 7, 5, 9, 2, 1]}).with_column(
... pl.col("dt").str.strptime(pl.Datetime)
... )
>>> out = df.groupby_rolling(index_column="dt", period="2d").agg(
... [
... pl.sum("a").alias("sum_a"),
... pl.min("a").alias("min_a"),
... pl.max("a").alias("max_a"),
... ]
... )
>>> assert out["sum_a"].to_list() == [3, 10, 15, 24, 11, 1]
>>> assert out["max_a"].to_list() == [3, 7, 7, 9, 9, 1]
>>> assert out["min_a"].to_list() == [3, 3, 3, 3, 2, 1]
>>> out
shape: (6, 4)
┌─────────────────────┬───────┬───────┬───────┐
│ dt ┆ a_sum ┆ a_max ┆ a_min │
│ --- ┆ --- ┆ --- ┆ --- │
│ datetime[ms] ┆ i64 ┆ i64 ┆ i64 │
╞═════════════════════╪═══════╪═══════╪═══════╡
│ 2020-01-01 13:45:48 ┆ 3 ┆ 3 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2020-01-01 16:42:13 ┆ 10 ┆ 7 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2020-01-01 16:45:09 ┆ 15 ┆ 7 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2020-01-02 18:12:48 ┆ 24 ┆ 9 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2020-01-03 19:45:32 ┆ 11 ┆ 9 ┆ 2 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2020-01-08 23:16:43 ┆ 1 ┆ 1 ┆ 1 │
└─────────────────────┴───────┴───────┴───────┘
"""
return RollingGroupBy(self, index_column, period, offset, closed, by)
def groupby_dynamic(
self,
index_column: str,
every: str,
period: Optional[str] = None,
offset: Optional[str] = None,
truncate: bool = True,
include_boundaries: bool = False,
closed: str = "right",
by: Optional[Union[str, List[str], "pli.Expr", List["pli.Expr"]]] = None,
) -> "DynamicGroupBy":
"""
Groups based on a time value (or index value of type Int32, Int64). Time windows are calculated and rows are assigned to windows.
Different from a normal groupby is that a row can be member of multiple groups. The time/index window could
be seen as a rolling window, with a window size determined by dates/times/values instead of slots in the DataFrame.
A window is defined by:
- every: interval of the window
- period: length of the window
- offset: offset of the window
The `every`, `period` and `offset` arguments are created with
the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 day)
- 1w (1 week)
- 1mo (1 calendar month)
- 1y (1 calendar year)
- 1i (1 index count)
Or combine them:
"3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds
In case of a groupby_dynamic on an integer column, the windows are defined by:
- "1i" # length 1
- "10i" # length 10
Parameters
----------
index_column
Column used to group based on the time window.
Often to type Date/Datetime
This column must be sorted in ascending order. If not the output will not make sense.
In case of a dynamic groupby on indices, dtype needs to be one of {Int32, Int64}. Note that
Int32 gets temporarily cast to Int64, so if performance matters use an Int64 column.
every
interval of the window
period
length of the window, if None it is equal to 'every'
offset
offset of the window if None and period is None it will be equal to negative `every`
truncate
truncate the time value to the window lower bound
include_boundaries
add the lower and upper bound of the window to the "_lower_bound" and "_upper_bound" columns.
this will impact performance because it's harder to parallelize
closed
Defines if the window interval is closed or not.
Any of {"left", "right", "both" "none"}
by
Also group by this column/these columns
Examples
--------
>>> from datetime import datetime
>>> # create an example dataframe
>>> df = pl.DataFrame(
... {
... "time": pl.date_range(
... low=datetime(2021, 12, 16),
... high=datetime(2021, 12, 16, 3),
... interval="30m",
... ),
... "n": range(7),
... }
... )
>>> df
shape: (7, 2)
┌─────────────────────┬─────┐
│ time ┆ n │
│ --- ┆ --- │
│ datetime[ns] ┆ i64 │
╞═════════════════════╪═════╡
│ 2021-12-16 00:00:00 ┆ 0 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2021-12-16 00:30:00 ┆ 1 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2021-12-16 01:00:00 ┆ 2 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2021-12-16 01:30:00 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2021-12-16 02:00:00 ┆ 4 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2021-12-16 02:30:00 ┆ 5 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 2021-12-16 03:00:00 ┆ 6 │
└─────────────────────┴─────┘
Group by windows of 1 hour starting at 2021-12-16 00:00:00.
>>> (
... df.groupby_dynamic("time", every="1h").agg(
... [
... pl.col("time").min().alias("time_min"),
... pl.col("time").max().alias("time_max"),
... ]
... )
... )
shape: (3, 3)
┌─────────────────────┬─────────────────────┬─────────────────────┐
│ time ┆ time_min ┆ time_max │
│ --- ┆ --- ┆ --- │
│ datetime[ns] ┆ datetime[ns] ┆ datetime[ns] │
╞═════════════════════╪═════════════════════╪═════════════════════╡
│ 2021-12-16 00:00:00 ┆ 2021-12-16 00:30:00 ┆ 2021-12-16 01:00:00 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 01:00:00 ┆ 2021-12-16 01:30:00 ┆ 2021-12-16 02:00:00 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 02:00:00 ┆ 2021-12-16 02:30:00 ┆ 2021-12-16 03:00:00 │
└─────────────────────┴─────────────────────┴─────────────────────┘
The window boundaries can also be added to the aggregation result
>>> (
... df.groupby_dynamic("time", every="1h", include_boundaries=True).agg(
... [pl.col("time").count().alias("time_count")]
... )
... )
shape: (3, 4)
┌─────────────────────┬─────────────────────┬─────────────────────┬────────────┐
│ _lower_boundary ┆ _upper_boundary ┆ time ┆ time_count │
│ --- ┆ --- ┆ --- ┆ --- │
│ datetime[ns] ┆ datetime[ns] ┆ datetime[ns] ┆ u32 │
╞═════════════════════╪═════════════════════╪═════════════════════╪════════════╡
│ 2021-12-16 00:00:00 ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 00:00:00 ┆ 2 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 01:00:00 ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 01:00:00 ┆ 2 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 02:00:00 ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 02:00:00 ┆ 2 │
└─────────────────────┴─────────────────────┴─────────────────────┴────────────┘
When closed="left", should not include right end of interval [lower_bound, upper_bound)
>>> (
... df.groupby_dynamic("time", every="1h", closed="left").agg(
... [
... pl.col("time").count().alias("time_count"),
... pl.col("time").list().alias("time_agg_list"),
... ]
... )
... )
shape: (4, 3)
┌─────────────────────┬────────────┬─────────────────────────────────────┐
│ time ┆ time_count ┆ time_agg_list │
│ --- ┆ --- ┆ --- │
│ datetime[ns] ┆ u32 ┆ list [datetime[ns]] │
╞═════════════════════╪════════════╪═════════════════════════════════════╡
│ 2021-12-16 00:00:00 ┆ 2 ┆ [2021-12-16 00:00:00, 2021-12-16... │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 01:00:00 ┆ 2 ┆ [2021-12-16 01:00:00, 2021-12-16... │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 02:00:00 ┆ 2 ┆ [2021-12-16 02:00:00, 2021-12-16... │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 03:00:00 ┆ 1 ┆ [2021-12-16 03:00:00] │
└─────────────────────┴────────────┴─────────────────────────────────────┘
When closed="both" the time values at the window boundaries belong to 2 groups.
>>> (
... df.groupby_dynamic("time", every="1h", closed="both").agg(
... [pl.col("time").count().alias("time_count")]
... )
... )
shape: (4, 2)
┌─────────────────────┬────────────┐
│ time ┆ time_count │
│ --- ┆ --- │
│ datetime[ns] ┆ u32 │
╞═════════════════════╪════════════╡
│ 2021-12-16 00:00:00 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 01:00:00 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 02:00:00 ┆ 3 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2021-12-16 03:00:00 ┆ 1 │
└─────────────────────┴────────────┘
Dynamic groupbys can also be combined with grouping on normal keys
>>> df = pl.DataFrame(
... {
... "time": pl.date_range(
... low=datetime(2021, 12, 16),
... high=datetime(2021, 12, 16, 3),
... interval="30m",
... ),
... "groups": ["a", "a", "a", "b", "b", "a", "a"],
... }
... )
>>> df
shape: (7, 2)
┌─────────────────────┬────────┐
│ time ┆ groups │
│ --- ┆ --- │
│ datetime[ns] ┆ str │
╞═════════════════════╪════════╡
│ 2021-12-16 00:00:00 ┆ a │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-12-16 00:30:00 ┆ a │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-12-16 01:00:00 ┆ a │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-12-16 01:30:00 ┆ b │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-12-16 02:00:00 ┆ b │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-12-16 02:30:00 ┆ a │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-12-16 03:00:00 ┆ a │
└─────────────────────┴────────┘
>>> (
... df.groupby_dynamic(
... "time",
... every="1h",
... closed="both",
... by="groups",
... include_boundaries=True,
... ).agg([pl.col("time").count().alias("time_count")])
... )
shape: (6, 5)
┌────────┬─────────────────────┬─────────────────────┬─────────────────────┬────────────┐
│ groups ┆ _lower_boundary ┆ _upper_boundary ┆ time ┆ time_count │
│ --- ┆ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ datetime[ns] ┆ datetime[ns] ┆ datetime[ns] ┆ u32 │
╞════════╪═════════════════════╪═════════════════════╪═════════════════════╪════════════╡
│ a ┆ 2021-12-16 00:00:00 ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 00:00:00 ┆ 3 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ a ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 01:00:00 ┆ 1 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ a ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 02:00:00 ┆ 2 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ a ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 04:00:00 ┆ 2021-12-16 03:00:00 ┆ 1 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ b ┆ 2021-12-16 01:00:00 ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 01:00:00 ┆ 2 │
├╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ b ┆ 2021-12-16 02:00:00 ┆ 2021-12-16 03:00:00 ┆ 2021-12-16 02:00:00 ┆ 1 │
└────────┴─────────────────────┴─────────────────────┴─────────────────────┴────────────┘
Dynamic groupby on an index column
>>> df = pl.DataFrame(
... {
... "idx": np.arange(6),
... "A": ["A", "A", "B", "B", "B", "C"],
... }
... )
>>> (
... df.groupby_dynamic(
... "idx",
... every="2i",
... period="3i",
... include_boundaries=True,
... ).agg(pl.col("A").list().alias("A_agg_list"))
... )
shape: (3, 4)
┌─────────────────┬─────────────────┬─────┬─────────────────┐
│ _lower_boundary ┆ _upper_boundary ┆ idx ┆ A_agg_list │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 ┆ list [str] │
╞═════════════════╪═════════════════╪═════╪═════════════════╡
│ 0 ┆ 3 ┆ 0 ┆ ["A", "B", "B"] │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2 ┆ 5 ┆ 2 ┆ ["B", "B", "C"] │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 4 ┆ 7 ┆ 4 ┆ ["C"] │
└─────────────────┴─────────────────┴─────┴─────────────────┘
"""
return DynamicGroupBy(
self,
index_column,
every,
period,
offset,
truncate,
include_boundaries,
closed,
by,
)
def upsample(
self: DF,
time_column: str,
every: str,
offset: Optional[str] = None,
by: Optional[Union[str, Sequence[str]]] = None,
maintain_order: bool = False,
) -> DF:
"""
Upsample a DataFrame at a regular frequency.
Parameters
----------
time_column
time column will be used to determine a date_range.
Note that this column has to be sorted for the output to make sense.
every
interval will start 'every' duration
offset
change the start of the date_range by this offset.
by
First group by these columns and then upsample for every group
maintain_order
Keep the ordering predictable. This is slower.
The `period` and `offset` arguments are created with
the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 day)
- 1w (1 week)
- 1mo (1 calendar month)
- 1y (1 calendar year)
- 1i (1 index count)
Or combine them:
"3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds
Examples
--------
Upsample a DataFrame by a certain interval.
>>> from datetime import datetime
>>> df = pl.DataFrame(
... {
... "time": [
... datetime(2021, 2, 1),
... datetime(2021, 4, 1),
... datetime(2021, 5, 1),
... datetime(2021, 6, 1),
... ],
... "groups": ["A", "B", "A", "B"],
... "values": [0, 1, 2, 3],
... }
... )
>>> (
... df.upsample(
... time_column="time", every="1mo", by="groups", maintain_order=True
... ).select(pl.all().forward_fill())
... )
shape: (7, 3)
┌─────────────────────┬────────┬────────┐
│ time ┆ groups ┆ values │
│ --- ┆ --- ┆ --- │
│ datetime[ns] ┆ str ┆ i64 │
╞═════════════════════╪════════╪════════╡
│ 2021-02-01 00:00:00 ┆ A ┆ 0 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-03-01 00:00:00 ┆ A ┆ 0 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-04-01 00:00:00 ┆ A ┆ 0 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-05-01 00:00:00 ┆ A ┆ 2 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-04-01 00:00:00 ┆ B ┆ 1 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-05-01 00:00:00 ┆ B ┆ 1 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤
│ 2021-06-01 00:00:00 ┆ B ┆ 3 │
└─────────────────────┴────────┴────────┘
"""
if by is None:
by = []
if isinstance(by, str):
by = [by]
if offset is None:
offset = "0ns"
return self._from_pydf(
self._df.upsample(by, time_column, every, offset, maintain_order)
)
def join_asof(
self: DF,
df: "DataFrame",
left_on: Optional[str] = None,
right_on: Optional[str] = None,
on: Optional[str] = None,
by_left: Optional[Union[str, List[str]]] = None,
by_right: Optional[Union[str, List[str]]] = None,
by: Optional[Union[str, List[str]]] = None,
strategy: str = "backward",
suffix: str = "_right",
tolerance: Optional[Union[str, int, float]] = None,
allow_parallel: bool = True,
force_parallel: bool = False,
) -> DF:
"""
Perform an asof join. This is similar to a left-join except that we
match on nearest key rather than equal keys.
Both DataFrames must be sorted by the asof_join key.
For each row in the left DataFrame:
- A "backward" search selects the last row in the right DataFrame whose
'on' key is less than or equal to the left's key.
- A "forward" search selects the first row in the right DataFrame whose
'on' key is greater than or equal to the left's key.
The default is "backward".
Parameters
----------
ldf
Lazy DataFrame to join with.
left_on
Join column of the left DataFrame.
right_on
Join column of the right DataFrame.
on
Join column of both DataFrames. If set, `left_on` and `right_on` should be None.
by
join on these columns before doing asof join
by_left
join on these columns before doing asof join
by_right
join on these columns before doing asof join
strategy
One of {'forward', 'backward'}
suffix
Suffix to append to columns with a duplicate name.
tolerance
Numeric tolerance. By setting this the join will only be done if the near keys are within this distance.
If an asof join is done on columns of dtype "Date", "Datetime", "Duration" or "Time" you
use the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 day)
- 1w (1 week)
- 1mo (1 calendar month)
- 1y (1 calendar year)
- 1i (1 index count)
Or combine them:
"3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds
allow_parallel
Allow the physical plan to optionally evaluate the computation of both DataFrames up to the join in parallel.
force_parallel
Force the physical plan to evaluate the computation of both DataFrames up to the join in parallel.
Examples
--------
>>> from datetime import datetime
>>> gdp = pl.DataFrame(
... {
... "date": [
... datetime(2016, 1, 1),
... datetime(2017, 1, 1),
... datetime(2018, 1, 1),
... datetime(2019, 1, 1),
... ], # note record date: Jan 1st (sorted!)
... "gdp": [4164, 4411, 4566, 4696],
... }
... )
>>> population = pl.DataFrame(
... {
... "date": [
... datetime(2016, 5, 12),
... datetime(2017, 5, 12),
... datetime(2018, 5, 12),
... datetime(2019, 5, 12),
... ], # note record date: May 12th (sorted!)
... "population": [82.19, 82.66, 83.12, 83.52],
... }
... )
>>> population.join_asof(
... gdp, left_on="date", right_on="date", strategy="backward"
... )
shape: (4, 3)
┌─────────────────────┬────────────┬──────┐
│ date ┆ population ┆ gdp │
│ --- ┆ --- ┆ --- │
│ datetime[μs] ┆ f64 ┆ i64 │
╞═════════════════════╪════════════╪══════╡
│ 2016-05-12 00:00:00 ┆ 82.19 ┆ 4164 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2017-05-12 00:00:00 ┆ 82.66 ┆ 4411 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2018-05-12 00:00:00 ┆ 83.12 ┆ 4566 │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2019-05-12 00:00:00 ┆ 83.52 ┆ 4696 │
└─────────────────────┴────────────┴──────┘
"""
return (
self.lazy()
.join_asof(
df.lazy(),
left_on=left_on,
right_on=right_on,
on=on,
by_left=by_left,
by_right=by_right,
by=by,
strategy=strategy,
suffix=suffix,
tolerance=tolerance,
allow_parallel=allow_parallel,
force_parallel=force_parallel,
)
.collect(no_optimization=True)
)
def join(
self: DF,
df: "DataFrame",
left_on: Optional[Union[str, "pli.Expr", List[Union[str, "pli.Expr"]]]] = None,
right_on: Optional[Union[str, "pli.Expr", List[Union[str, "pli.Expr"]]]] = None,
on: Optional[Union[str, "pli.Expr", List[Union[str, "pli.Expr"]]]] = None,
how: str = "inner",
suffix: str = "_right",
asof_by: Optional[Union[str, List[str]]] = None,
asof_by_left: Optional[Union[str, List[str]]] = None,
asof_by_right: Optional[Union[str, List[str]]] = None,
) -> DF:
"""
SQL like joins.
Parameters
----------
df
DataFrame to join with.
left_on
Name(s) of the left join column(s).
right_on
Name(s) of the right join column(s).
on
Name(s) of the join columns in both DataFrames.
how
Join strategy
- "inner"
- "left"
- "outer"
- "asof"
- "cross"
- "semi"
- "anti"
suffix
Suffix to append to columns with a duplicate name.
asof_by
join on these columns before doing asof join
asof_by_left
join on these columns before doing asof join
asof_by_right
join on these columns before doing asof join
Returns
-------
Joined DataFrame
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6.0, 7.0, 8.0],
... "ham": ["a", "b", "c"],
... }
... )
>>> other_df = pl.DataFrame(
... {
... "apple": ["x", "y", "z"],
... "ham": ["a", "b", "d"],
... }
... )
>>> df.join(other_df, on="ham")
shape: (2, 4)
┌─────┬─────┬─────┬───────┐
│ foo ┆ bar ┆ ham ┆ apple │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ str ┆ str │
╞═════╪═════╪═════╪═══════╡
│ 1 ┆ 6 ┆ a ┆ x │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b ┆ y │
└─────┴─────┴─────┴───────┘
>>> df.join(other_df, on="ham", how="outer")
shape: (4, 4)
┌──────┬──────┬─────┬───────┐
│ foo ┆ bar ┆ ham ┆ apple │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ str ┆ str │
╞══════╪══════╪═════╪═══════╡
│ 1 ┆ 6 ┆ a ┆ x │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b ┆ y │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ null ┆ null ┆ d ┆ z │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c ┆ null │
└──────┴──────┴─────┴───────┘
**Asof join**
This is similar to a left-join except that we match on near keys rather than equal keys.
The direction is backward
The keys must be sorted to perform an asof join
"""
if how == "asof": # pragma: no cover
warnings.warn(
"using asof join via DataFrame.join is deprecated, please use DataFrame.join_asof",
DeprecationWarning,
)
if how == "cross":
return self._from_pydf(self._df.join(df._df, [], [], how, suffix))
left_on_: Optional[List[Union[str, pli.Expr]]]
if isinstance(left_on, (str, pli.Expr)):
left_on_ = [left_on]
else:
left_on_ = left_on
right_on_: Optional[List[Union[str, pli.Expr]]]
if isinstance(right_on, (str, pli.Expr)):
right_on_ = [right_on]
else:
right_on_ = right_on
if isinstance(on, str):
left_on_ = [on]
right_on_ = [on]
elif isinstance(on, list):
left_on_ = on
right_on_ = on
if left_on_ is None or right_on_ is None:
raise ValueError("You should pass the column to join on as an argument.")
if (
isinstance(left_on_[0], pli.Expr)
or isinstance(right_on_[0], pli.Expr)
or asof_by_left is not None
or asof_by_right is not None
or asof_by is not None
):
return (
self.lazy()
.join(
df.lazy(),
left_on,
right_on,
on=on,
how=how,
suffix=suffix,
asof_by_right=asof_by_right,
asof_by_left=asof_by_left,
asof_by=asof_by,
)
.collect(no_optimization=True)
)
else:
return self._from_pydf(
self._df.join(df._df, left_on_, right_on_, how, suffix)
)
def apply(
self: DF,
f: Callable[[Tuple[Any, ...]], Any],
return_dtype: Optional[Type[DataType]] = None,
inference_size: int = 256,
) -> DF:
"""
Apply a custom function over the rows of the DataFrame. The rows are passed as tuple.
Beware, this is slow.
Parameters
----------
f
Custom function/ lambda function.
return_dtype
Output type of the operation. If none given, Polars tries to infer the type.
inference_size
Only used in the case when the custom function returns rows.
This uses the first `n` rows to determine the output schema
Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3], "bar": [-1, 5, 8]})
# return rows
>>> df.apply(lambda t: (t[0] * 2, t[1] * 3))
shape: (3, 2)
┌──────────┬──────────┐
│ column_0 ┆ column_1 │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞══════════╪══════════╡
│ 2 ┆ -3 │
├╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 4 ┆ 15 │
├╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┤
│ 6 ┆ 24 │
└──────────┴──────────┘
# return scalar
>>> df.apply(lambda t: (t[0] * 2 + t[1]))
shape: (3, 1)
┌───────┐
│ apply │
│ --- │
│ i64 │
╞═══════╡
│ 1 │
├╌╌╌╌╌╌╌┤
│ 9 │
├╌╌╌╌╌╌╌┤
│ 14 │
└───────┘
"""
out, is_df = self._df.apply(f, return_dtype, inference_size)
if is_df:
return self._from_pydf(out)
else:
return self._from_pydf(pli.wrap_s(out).to_frame()._df)
def with_column(self: DF, column: Union["pli.Series", "pli.Expr"]) -> DF:
"""
Return a new DataFrame with the column added or replaced.
Parameters
----------
column
Series, where the name of the Series refers to the column in the DataFrame.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 3, 5],
... "b": [2, 4, 6],
... }
... )
>>> df.with_column((pl.col("b") ** 2).alias("b_squared")) # added
shape: (3, 3)
┌─────┬─────┬───────────┐
│ a ┆ b ┆ b_squared │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ f64 │
╞═════╪═════╪═══════════╡
│ 1 ┆ 2 ┆ 4.0 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┤
│ 3 ┆ 4 ┆ 16.0 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┤
│ 5 ┆ 6 ┆ 36.0 │
└─────┴─────┴───────────┘
>>> df.with_column(pl.col("a") ** 2) # replaced
shape: (3, 2)
┌──────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ f64 ┆ i64 │
╞══════╪═════╡
│ 1.0 ┆ 2 │
├╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 9.0 ┆ 4 │
├╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 25.0 ┆ 6 │
└──────┴─────┘
"""
if isinstance(column, list):
raise ValueError(
"`with_column` expects a single expression, not a list. Consider using `with_columns`"
)
if isinstance(column, pli.Expr):
return self.with_columns([column])
else:
return self._from_pydf(self._df.with_column(column._s))
def hstack(
self: DF,
columns: Union[List["pli.Series"], "DataFrame"],
in_place: bool = False,
) -> Optional[DF]:
"""
Return a new DataFrame grown horizontally by stacking multiple Series to it.
Parameters
----------
columns
Series to stack.
in_place
Modify in place.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> x = pl.Series("apple", [10, 20, 30])
>>> df.hstack([x])
shape: (3, 4)
┌─────┬─────┬─────┬───────┐
│ foo ┆ bar ┆ ham ┆ apple │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str ┆ i64 │
╞═════╪═════╪═════╪═══════╡
│ 1 ┆ 6 ┆ a ┆ 10 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b ┆ 20 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c ┆ 30 │
└─────┴─────┴─────┴───────┘
"""
if not isinstance(columns, list):
columns = columns.get_columns()
if in_place:
self._df.hstack_mut([s.inner() for s in columns])
return None
else:
return self._from_pydf(self._df.hstack([s.inner() for s in columns]))
@overload
def vstack(self, df: "DataFrame", in_place: Literal[True]) -> None:
...
@overload
def vstack(self: DF, df: "DataFrame", in_place: Literal[False] = ...) -> DF:
...
@overload
def vstack(self: DF, df: "DataFrame", in_place: bool) -> Optional[DF]:
...
def vstack(self: DF, df: "DataFrame", in_place: bool = False) -> Optional[DF]:
"""
Grow this DataFrame vertically by stacking a DataFrame to it.
Parameters
----------
df
DataFrame to stack.
in_place
Modify in place
Examples
--------
>>> df1 = pl.DataFrame(
... {
... "foo": [1, 2],
... "bar": [6, 7],
... "ham": ["a", "b"],
... }
... )
>>> df2 = pl.DataFrame(
... {
... "foo": [3, 4],
... "bar": [8, 9],
... "ham": ["c", "d"],
... }
... )
>>> df1.vstack(df2)
shape: (4, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 4 ┆ 9 ┆ d │
└─────┴─────┴─────┘
"""
if in_place:
self._df.vstack_mut(df._df)
return None
else:
return self._from_pydf(self._df.vstack(df._df))
def extend(self, other: "DataFrame") -> None:
"""
Extend the memory backed by this `DataFrame` with the values from `other`.
Different from `vstack` which adds the chunks from `other` to the chunks of this `DataFrame`
`extent` appends the data from `other` to the underlying memory locations and thus may cause a reallocation.
If this does not cause a reallocation, the resulting data structure will not have any extra chunks
and thus will yield faster queries.
Prefer `extend` over `vstack` when you want to do a query after a single append. For instance during
online operations where you add `n` rows and rerun a query.
Prefer `vstack` over `extend` when you want to append many times before doing a query. For instance
when you read in multiple files and when to store them in a single `DataFrame`.
In the latter case, finish the sequence of `vstack` operations with a `rechunk`.
Parameters
----------
other
DataFrame to vertically add.
Examples
--------
>>> df1 = pl.DataFrame({"foo": [1, 2, 3], "bar": [4, 5, 6]})
>>> df2 = pl.DataFrame({"foo": [10, 20, 30], "bar": [40, 50, 60]})
>>> df1.extend(df2) # returns None
>>> df1
shape: (6, 2)
┌─────┬─────┐
│ foo ┆ bar │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1 ┆ 4 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 5 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 6 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 10 ┆ 40 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 20 ┆ 50 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 30 ┆ 60 │
└─────┴─────┘
"""
self._df.extend(other._df)
def drop(self: DF, name: Union[str, List[str]]) -> DF:
"""
Remove column from DataFrame and return as new.
Parameters
----------
name
Column(s) to drop.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6.0, 7.0, 8.0],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.drop("ham")
shape: (3, 2)
┌─────┬─────┐
│ foo ┆ bar │
│ --- ┆ --- │
│ i64 ┆ f64 │
╞═════╪═════╡
│ 1 ┆ 6 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 │
└─────┴─────┘
"""
if isinstance(name, list):
df = self.clone()
for name in name:
df._df.drop_in_place(name)
return df
return self._from_pydf(self._df.drop(name))
def drop_in_place(self, name: str) -> "pli.Series":
"""
Drop in place.
Parameters
----------
name
Column to drop.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.drop_in_place("ham")
shape: (3,)
Series: 'ham' [str]
[
"a"
"b"
"c"
]
"""
return pli.wrap_s(self._df.drop_in_place(name))
def select_at_idx(self, idx: int) -> "pli.Series":
"""
Select column at index location.
Parameters
----------
idx
Location of selection.
.. deprecated:: 0.10.20
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.select_at_idx(1)
shape: (3,)
Series: 'bar' [i64]
[
6
7
8
]
"""
return pli.wrap_s(self._df.select_at_idx(idx))
def clone(self: DF) -> DF:
"""
Very cheap deep clone.
"""
return self._from_pydf(self._df.clone())
def __copy__(self: DF) -> DF:
return self.clone()
def __deepcopy__(self: DF, memodict={}) -> DF: # type: ignore
return self.clone()
def get_columns(self) -> List["pli.Series"]:
"""
Get the DataFrame as a List of Series.
Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3], "bar": [4, 5, 6]})
>>> df.get_columns()
[shape: (3,)
Series: 'foo' [i64]
[
1
2
3
], shape: (3,)
Series: 'bar' [i64]
[
4
5
6
]]
"""
return list(map(lambda s: pli.wrap_s(s), self._df.get_columns()))
def get_column(self, name: str) -> "pli.Series":
"""
Get a single column as Series by name.
Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3], "bar": [4, 5, 6]})
>>> df.get_column("foo")
shape: (3,)
Series: 'foo' [i64]
[
1
2
3
]
"""
return self[name]
def fill_null(self: DF, strategy: Union[str, "pli.Expr", Any]) -> DF:
"""
Fill null values using a filling strategy, literal, or Expr.
.. seealso::
fill_nan
Parameters
----------
strategy
One of:
- "backward"
- "forward"
- "mean"
- "min'
- "max"
- "zero"
- "one"
Or an expression.
Returns
-------
DataFrame with None values replaced by the filling strategy.
"""
if isinstance(strategy, pli.Expr):
return self.lazy().fill_null(strategy).collect(no_optimization=True)
if not isinstance(strategy, str):
return self.fill_null(pli.lit(strategy))
return self._from_pydf(self._df.fill_null(strategy))
def fill_nan(self: DF, fill_value: Union["pli.Expr", int, float]) -> DF:
"""
Fill floating point NaN values by an Expression evaluation.
.. seealso::
fill_null
Warnings
--------
NOTE that floating point NaNs (Not a Number) are not missing values!
to replace missing values, use `fill_null`.
Parameters
----------
fill_value
value to fill NaN with
Returns
-------
DataFrame with NaN replaced with fill_value
"""
return self.lazy().fill_nan(fill_value).collect(no_optimization=True)
def explode(
self: DF,
columns: Union[str, List[str], "pli.Expr", List["pli.Expr"]],
) -> DF:
"""
Explode `DataFrame` to long format by exploding a column with Lists.
Parameters
----------
columns
Column of LargeList type.
Returns
-------
DataFrame
Examples
--------
>>> df = pl.DataFrame(
... {
... "letters": ["c", "c", "a", "c", "a", "b"],
... "nrs": [[1, 2], [1, 3], [4, 3], [5, 5, 5], [6], [2, 1, 2]],
... }
... )
>>> df
shape: (6, 2)
┌─────────┬────────────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ list [i64] │
╞═════════╪════════════╡
│ c ┆ [1, 2] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ c ┆ [1, 3] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ a ┆ [4, 3] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ c ┆ [5, 5, 5] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ a ┆ [6] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ b ┆ [2, 1, 2] │
└─────────┴────────────┘
>>> df.explode("nrs")
shape: (13, 2)
┌─────────┬─────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ c ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ ... ┆ ... │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ a ┆ 6 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 2 │
└─────────┴─────┘
"""
return self.lazy().explode(columns).collect(no_optimization=True)
def pivot(
self: DF,
values: Union[List[str], str],
index: Union[List[str], str],
columns: Union[List[str], str],
aggregate_fn: str = "first",
maintain_order: bool = True,
sort_columns: bool = False,
) -> DF:
"""
Create a spreadsheet-style pivot table as a DataFrame.
Parameters
----------
values
Column values to aggregate. Can be multiple columns if the *columns* arguments contains multiple columns as well
index
One or multiple keys to group by
columns
Columns whose values will be used as the header of the output DataFrame
aggregate_fn
Any of:
- "sum"
- "max"
- "min"
- "mean"
- "median"
- "first"
- "last"
- "count"
maintain_order
Sort the grouped keys so that the output order is predictable.
sort_columns
Sort the transposed columns by name. Default is by order of discovery.
Returns
-------
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": ["one", "one", "one", "two", "two", "two"],
... "bar": ["A", "B", "C", "A", "B", "C"],
... "baz": [1, 2, 3, 4, 5, 6],
... }
... )
>>> df.pivot(values="baz", index="foo", columns="bar")
shape: (2, 4)
┌─────┬─────┬─────┬─────┐
│ foo ┆ A ┆ B ┆ C │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 ┆ i64 │
╞═════╪═════╪═════╪═════╡
│ one ┆ 1 ┆ 2 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ two ┆ 4 ┆ 5 ┆ 6 │
└─────┴─────┴─────┴─────┘
"""
if isinstance(values, str):
values = [values]
if isinstance(index, str):
index = [index]
if isinstance(columns, str):
columns = [columns]
return self._from_pydf(
self._df.pivot2(
values, index, columns, aggregate_fn, maintain_order, sort_columns
)
)
def melt(
self: DF,
id_vars: Optional[Union[List[str], str]] = None,
value_vars: Optional[Union[List[str], str]] = None,
variable_name: Optional[str] = None,
value_name: Optional[str] = None,
) -> DF:
"""
Unpivot a DataFrame from wide to long format, optionally leaving identifiers set.
This function is useful to massage a DataFrame into a format where one or more columns are identifier variables
(id_vars), while all other columns, considered measured variables (value_vars), are “unpivoted” to the row axis,
leaving just two non-identifier columns, ‘variable’ and ‘value’.
Parameters
----------
id_vars
Columns to use as identifier variables.
value_vars
Values to use as identifier variables.
If `value_vars` is empty all columns that are not in `id_vars` will be used.
variable_name
Name to give to the `value` column. Defaults to "variable"
value_name
Name to give to the `value` column. Defaults to "value"
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": ["x", "y", "z"],
... "b": [1, 3, 5],
... "c": [2, 4, 6],
... }
... )
>>> df.melt(id_vars="a", value_vars=["b", "c"])
shape: (6, 3)
┌─────┬──────────┬───────┐
│ a ┆ variable ┆ value │
│ --- ┆ --- ┆ --- │
│ str ┆ str ┆ i64 │
╞═════╪══════════╪═══════╡
│ x ┆ b ┆ 1 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ y ┆ b ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ z ┆ b ┆ 5 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ x ┆ c ┆ 2 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ y ┆ c ┆ 4 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ z ┆ c ┆ 6 │
└─────┴──────────┴───────┘
"""
if isinstance(value_vars, str):
value_vars = [value_vars]
if isinstance(id_vars, str):
id_vars = [id_vars]
if value_vars is None:
value_vars = []
if id_vars is None:
id_vars = []
return self._from_pydf(
self._df.melt(id_vars, value_vars, value_name, variable_name)
)
@overload
def partition_by(
self: DF,
groups: Union[str, List[str]],
maintain_order: bool,
*,
as_dict: Literal[False] = ...,
) -> List[DF]:
...
@overload
def partition_by(
self: DF,
groups: Union[str, List[str]],
maintain_order: bool,
*,
as_dict: Literal[True],
) -> Dict[Any, DF]:
...
@overload
def partition_by(
self: DF,
groups: Union[str, List[str]],
maintain_order: bool,
*,
as_dict: bool,
) -> Union[List[DF], Dict[Any, DF]]:
...
def partition_by(
self: DF,
groups: Union[str, List[str]],
maintain_order: bool = True,
*,
as_dict: bool = False,
) -> Union[List[DF], Dict[Any, DF]]:
"""
Split into multiple DataFrames partitioned by groups.
Parameters
----------
groups
Groups to partition by
maintain_order
Keep predictable output order. This is slower as it requires and extra sort operation.
as_dict
Return as dictionary
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": ["A", "A", "B", "B", "C"],
... "N": [1, 2, 2, 4, 2],
... "bar": ["k", "l", "m", "m", "l"],
... }
... )
>>> df.partition_by(groups="foo", maintain_order=True)
[shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ N ┆ bar │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ A ┆ 1 ┆ k │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ A ┆ 2 ┆ l │
└─────┴─────┴─────┘,
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ N ┆ bar │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ B ┆ 2 ┆ m │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ B ┆ 4 ┆ m │
└─────┴─────┴─────┘,
shape: (1, 3)
┌─────┬─────┬─────┐
│ foo ┆ N ┆ bar │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ C ┆ 2 ┆ l │
└─────┴─────┴─────┘]
"""
if isinstance(groups, str):
groups = [groups]
if as_dict:
out: Dict[Any, DF] = dict()
if len(groups) == 1:
for _df in self._df.partition_by(groups, maintain_order):
df = self._from_pydf(_df)
out[df[groups][0, 0]] = df
else:
for _df in self._df.partition_by(groups, maintain_order):
df = self._from_pydf(_df)
out[df[groups].row(0)] = df
return out
else:
return [
self._from_pydf(_df)
for _df in self._df.partition_by(groups, maintain_order)
]
def shift(self: DF, periods: int) -> DF:
"""
Shift the values by a given period and fill the parts that will be empty due to this operation
with `Nones`.
Parameters
----------
periods
Number of places to shift (may be negative).
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.shift(periods=1)
shape: (3, 3)
┌──────┬──────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞══════╪══════╪══════╡
│ null ┆ null ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
└──────┴──────┴──────┘
>>> df.shift(periods=-1)
shape: (3, 3)
┌──────┬──────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞══════╪══════╪══════╡
│ 2 ┆ 7 ┆ b │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ 3 ┆ 8 ┆ c │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ null ┆ null │
└──────┴──────┴──────┘
"""
return self._from_pydf(self._df.shift(periods))
def shift_and_fill(
self: DF, periods: int, fill_value: Union[int, str, float]
) -> DF:
"""
Shift the values by a given period and fill the parts that will be empty due to this operation
with the result of the `fill_value` expression.
Parameters
----------
periods
Number of places to shift (may be negative).
fill_value
fill None values with this value.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.shift_and_fill(periods=1, fill_value=0)
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 0 ┆ 0 ┆ 0 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 1 ┆ 6 ┆ a │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
└─────┴─────┴─────┘
"""
return (
self.lazy()
.shift_and_fill(periods, fill_value)
.collect(no_optimization=True, string_cache=False)
)
def is_duplicated(self) -> "pli.Series":
"""
Get a mask of all duplicated rows in this DataFrame.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 3, 1],
... "b": ["x", "y", "z", "x"],
... }
... )
>>> df.is_duplicated()
shape: (4,)
Series: '' [bool]
[
true
false
false
true
]
"""
return pli.wrap_s(self._df.is_duplicated())
def is_unique(self) -> "pli.Series":
"""
Get a mask of all unique rows in this DataFrame.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 3, 1],
... "b": ["x", "y", "z", "x"],
... }
... )
>>> df.is_unique()
shape: (4,)
Series: '' [bool]
[
false
true
true
false
]
"""
return pli.wrap_s(self._df.is_unique())
def lazy(self: DF) -> "pli.LazyFrame[DF]":
"""
Start a lazy query from this point. This returns a `LazyFrame` object.
Operations on a `LazyFrame` are not executed until this is requested by either calling:
* `.fetch()` (run on a small number of rows)
* `.collect()` (run on all data)
* `.describe_plan()` (print unoptimized query plan)
* `.describe_optimized_plan()` (print optimized query plan)
* `.show_graph()` (show (un)optimized query plan) as graphviz graph)
Lazy operations are advised because they allow for query optimization and more parallelization.
"""
return self._lazyframe_class._from_pyldf(self._df.lazy())
def select(
self: DF,
exprs: Union[
str,
"pli.Expr",
Sequence[Union[str, "pli.Expr", bool, int, float, "pli.Series"]],
"pli.Series",
],
) -> DF:
"""
Select columns from this DataFrame.
Parameters
----------
exprs
Column or columns to select.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.select("foo")
shape: (3, 1)
┌─────┐
│ foo │
│ --- │
│ i64 │
╞═════╡
│ 1 │
├╌╌╌╌╌┤
│ 2 │
├╌╌╌╌╌┤
│ 3 │
└─────┘
"""
return (
self.lazy()
.select(exprs) # type: ignore
.collect(no_optimization=True, string_cache=False)
)
def with_columns(self: DF, exprs: Union["pli.Expr", List["pli.Expr"]]) -> DF:
"""
Add or overwrite multiple columns in a DataFrame.
Parameters
----------
exprs
List of Expressions that evaluate to columns.
"""
if not isinstance(exprs, list):
exprs = [exprs]
return (
self.lazy()
.with_columns(exprs)
.collect(no_optimization=True, string_cache=False)
)
def n_chunks(self) -> int:
"""
Get number of chunks used by the ChunkedArrays of this DataFrame.
"""
return self._df.n_chunks()
@overload
def max(self: DF, axis: Literal[0] = ...) -> DF:
...
@overload
def max(self, axis: Literal[1]) -> "pli.Series":
...
@overload
def max(self: DF, axis: int = 0) -> Union[DF, "pli.Series"]:
...
def max(self: DF, axis: int = 0) -> Union[DF, "pli.Series"]:
"""
Aggregate the columns of this DataFrame to their maximum value.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.max()
shape: (1, 3)
┌─────┬─────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪══════╡
│ 3 ┆ 8 ┆ null │
└─────┴─────┴──────┘
"""
if axis == 0:
return self._from_pydf(self._df.max())
if axis == 1:
return pli.wrap_s(self._df.hmax())
raise ValueError("Axis should be 0 or 1.") # pragma: no cover
@overload
def min(self: DF, axis: Literal[0] = ...) -> DF:
...
@overload
def min(self, axis: Literal[1]) -> "pli.Series":
...
@overload
def min(self: DF, axis: int = 0) -> Union[DF, "pli.Series"]:
...
def min(self: DF, axis: int = 0) -> Union[DF, "pli.Series"]:
"""
Aggregate the columns of this DataFrame to their minimum value.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.min()
shape: (1, 3)
┌─────┬─────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪══════╡
│ 1 ┆ 6 ┆ null │
└─────┴─────┴──────┘
"""
if axis == 0:
return self._from_pydf(self._df.min())
if axis == 1:
return pli.wrap_s(self._df.hmin())
raise ValueError("Axis should be 0 or 1.") # pragma: no cover
@overload
def sum(self: DF, *, axis: Literal[0] = ..., null_strategy: str = "ignore") -> DF:
...
@overload
def sum(self, *, axis: Literal[1], null_strategy: str = "ignore") -> "pli.Series":
...
@overload
def sum(
self: DF, *, axis: int = 0, null_strategy: str = "ignore"
) -> Union[DF, "pli.Series"]:
...
def sum(
self: DF, *, axis: int = 0, null_strategy: str = "ignore"
) -> Union[DF, "pli.Series"]:
"""
Aggregate the columns of this DataFrame to their sum value.
Parameters
----------
axis
either 0 or 1
null_strategy
{'ignore', 'propagate'}
this argument is only used if axis == 1
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.sum()
shape: (1, 3)
┌─────┬─────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪══════╡
│ 6 ┆ 21 ┆ null │
└─────┴─────┴──────┘
"""
if axis == 0:
return self._from_pydf(self._df.sum())
if axis == 1:
return pli.wrap_s(self._df.hsum(null_strategy))
raise ValueError("Axis should be 0 or 1.") # pragma: no cover
@overload
def mean(self: DF, *, axis: Literal[0] = ..., null_strategy: str = "ignore") -> DF:
...
@overload
def mean(self, *, axis: Literal[1], null_strategy: str = "ignore") -> "pli.Series":
...
@overload
def mean(
self: DF, *, axis: int = 0, null_strategy: str = "ignore"
) -> Union[DF, "pli.Series"]:
...
def mean(
self: DF, axis: int = 0, null_strategy: str = "ignore"
) -> Union[DF, "pli.Series"]:
"""
Aggregate the columns of this DataFrame to their mean value.
Parameters
----------
axis
either 0 or 1
null_strategy
{'ignore', 'propagate'}
this argument is only used if axis == 1
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.mean()
shape: (1, 3)
┌─────┬─────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ f64 ┆ f64 ┆ str │
╞═════╪═════╪══════╡
│ 2 ┆ 7 ┆ null │
└─────┴─────┴──────┘
Note: the mean of booleans evaluates to null.
>>> df = pl.DataFrame(
... {
... "a": [True, True, False],
... "b": [True, True, True],
... }
... )
# mean evaluates to null
>>> df.mean()
shape: (1, 2)
┌──────┬──────┐
│ a ┆ b │
│ --- ┆ --- │
│ bool ┆ bool │
╞══════╪══════╡
│ null ┆ null │
└──────┴──────┘
# instead, cast to numeric type
>>> df.select(pl.all().cast(pl.UInt8)).mean()
shape: (1, 2)
┌──────────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ f64 ┆ f64 │
╞══════════╪═════╡
│ 0.666667 ┆ 1.0 │
└──────────┴─────┘
"""
if axis == 0:
return self._from_pydf(self._df.mean())
if axis == 1:
return pli.wrap_s(self._df.hmean(null_strategy))
raise ValueError("Axis should be 0 or 1.") # pragma: no cover
def std(self: DF) -> DF:
"""
Aggregate the columns of this DataFrame to their standard deviation value.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.std()
shape: (1, 3)
┌─────┬─────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ f64 ┆ f64 ┆ str │
╞═════╪═════╪══════╡
│ 1 ┆ 1 ┆ null │
└─────┴─────┴──────┘
"""
return self._from_pydf(self._df.std())
def var(self: DF) -> DF:
"""
Aggregate the columns of this DataFrame to their variance value.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.var()
shape: (1, 3)
┌─────┬─────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ f64 ┆ f64 ┆ str │
╞═════╪═════╪══════╡
│ 1 ┆ 1 ┆ null │
└─────┴─────┴──────┘
"""
return self._from_pydf(self._df.var())
def median(self: DF) -> DF:
"""
Aggregate the columns of this DataFrame to their median value.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.median()
shape: (1, 3)
┌─────┬─────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ f64 ┆ f64 ┆ str │
╞═════╪═════╪══════╡
│ 2 ┆ 7 ┆ null │
└─────┴─────┴──────┘
"""
return self._from_pydf(self._df.median())
def product(self: DF) -> DF:
"""
Aggregate the columns of this DataFrame to their product values
Examples
--------
>>> df = pl.DataFrame({"factors": [1.0, 0.99, 1.3, 0.87]})
>>> df.product()
shape: (1, 1)
┌─────────┐
│ factors │
│ --- │
│ f64 │
╞═════════╡
│ 1.1196 │
└─────────┘
"""
return self.select(pli.all().product())
def quantile(self: DF, quantile: float, interpolation: str = "nearest") -> DF:
"""
Aggregate the columns of this DataFrame to their quantile value.
Parameters
----------
quantile
quantile between 0.0 and 1.0
interpolation
interpolation type, options: ['nearest', 'higher', 'lower', 'midpoint', 'linear']
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.quantile(0.5, "nearest")
shape: (1, 3)
┌─────┬─────┬──────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪══════╡
│ 2 ┆ 7 ┆ null │
└─────┴─────┴──────┘
"""
return self._from_pydf(self._df.quantile(quantile, interpolation))
def to_dummies(self: DF) -> DF:
"""
Get one hot encoded dummy variables.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.to_dummies()
shape: (3, 9)
┌───────┬───────┬───────┬───────┬─────┬───────┬───────┬───────┬───────┐
│ foo_1 ┆ foo_2 ┆ foo_3 ┆ bar_6 ┆ ... ┆ bar_8 ┆ ham_a ┆ ham_b ┆ ham_c │
│ --- ┆ --- ┆ --- ┆ --- ┆ ┆ --- ┆ --- ┆ --- ┆ --- │
│ u8 ┆ u8 ┆ u8 ┆ u8 ┆ ┆ u8 ┆ u8 ┆ u8 ┆ u8 │
╞═══════╪═══════╪═══════╪═══════╪═════╪═══════╪═══════╪═══════╪═══════╡
│ 1 ┆ 0 ┆ 0 ┆ 1 ┆ ... ┆ 0 ┆ 1 ┆ 0 ┆ 0 │
├╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 0 ┆ 1 ┆ 0 ┆ 0 ┆ ... ┆ 0 ┆ 0 ┆ 1 ┆ 0 │
├╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ 0 ┆ 0 ┆ 1 ┆ 0 ┆ ... ┆ 1 ┆ 0 ┆ 0 ┆ 1 │
└───────┴───────┴───────┴───────┴─────┴───────┴───────┴───────┴───────┘
"""
return self._from_pydf(self._df.to_dummies())
def distinct(
self: DF,
maintain_order: bool = True,
subset: Optional[Union[str, List[str]]] = None,
keep: str = "first",
) -> DF:
"""
.. deprecated:: 0.13.13
Please use `unique`
"""
return self.unique(maintain_order, subset, keep)
def unique(
self: DF,
maintain_order: bool = True,
subset: Optional[Union[str, List[str]]] = None,
keep: str = "first",
) -> DF:
"""
Drop duplicate rows from this DataFrame.
Warnings
--------
Note that this fails if there is a column of type `List` in the DataFrame or subset.
Parameters
----------
maintain_order
Keep the same order as the original DataFrame. This requires more work to compute.
subset
Subset to use to compare rows
keep
any of {"first", "last"}
Returns
-------
DataFrame with unique rows
Examples
--------
>>> df = pl.DataFrame({"x": [1, 2, 3, 2, 1], "y": [3, 2, 1, 2, 3]})
>>> df.unique()
shape: (3, 2)
┌─────┬─────┐
│ x ┆ y │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 2 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 1 │
└─────┴─────┘
"""
if subset is not None and not isinstance(subset, list):
subset = [subset]
return self._from_pydf(self._df.unique(maintain_order, subset, keep))
def rechunk(self: DF) -> DF:
"""
Rechunk the data in this DataFrame to a contiguous allocation.
This will make sure all subsequent operations have optimal and predictable performance.
"""
return self._from_pydf(self._df.rechunk())
def null_count(self: DF) -> DF:
"""
Create a new DataFrame that shows the null counts per column.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, None, 3],
... "bar": [6, 7, None],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.null_count()
shape: (1, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ u32 ┆ u32 ┆ u32 │
╞═════╪═════╪═════╡
│ 1 ┆ 1 ┆ 0 │
└─────┴─────┴─────┘
"""
return self._from_pydf(self._df.null_count())
def sample(
self: DF,
n: Optional[int] = None,
frac: Optional[float] = None,
with_replacement: bool = False,
shuffle: bool = False,
seed: Optional[int] = None,
) -> DF:
"""
Sample from this DataFrame by setting either `n` or `frac`.
Parameters
----------
n
Number of samples < self.len() .
frac
Fraction between 0.0 and 1.0 .
with_replacement
Sample with replacement.
shuffle
Shuffle the order of sampled data points.
seed
Initialization seed. If None is given a random seed is used.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.sample(n=2, seed=0) # doctest: +IGNORE_RESULT
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 3 ┆ 8 ┆ c │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 2 ┆ 7 ┆ b │
└─────┴─────┴─────┘
"""
if n is not None and frac is not None:
raise ValueError("n and frac were both supplied")
if n is None and frac is not None:
return self._from_pydf(
self._df.sample_frac(frac, with_replacement, shuffle, seed)
)
if n is None:
n = 1
return self._from_pydf(self._df.sample_n(n, with_replacement, shuffle, seed))
def fold(
self, operation: Callable[["pli.Series", "pli.Series"], "pli.Series"]
) -> "pli.Series":
"""
Apply a horizontal reduction on a DataFrame. This can be used to effectively
determine aggregations on a row level, and can be applied to any DataType that
can be supercasted (casted to a similar parent type).
An example of the supercast rules when applying an arithmetic operation on two DataTypes are for instance:
Int8 + Utf8 = Utf8
Float32 + Int64 = Float32
Float32 + Float64 = Float64
Examples
--------
A horizontal sum operation:
>>> df = pl.DataFrame(
... {
... "a": [2, 1, 3],
... "b": [1, 2, 3],
... "c": [1.0, 2.0, 3.0],
... }
... )
>>> df.fold(lambda s1, s2: s1 + s2)
shape: (3,)
Series: 'a' [f64]
[
4
5
9
]
A horizontal minimum operation:
>>> df = pl.DataFrame({"a": [2, 1, 3], "b": [1, 2, 3], "c": [1.0, 2.0, 3.0]})
>>> df.fold(lambda s1, s2: s1.zip_with(s1 < s2, s2))
shape: (3,)
Series: 'a' [f64]
[
1
1
3
]
A horizontal string concatenation:
>>> df = pl.DataFrame(
... {
... "a": ["foo", "bar", 2],
... "b": [1, 2, 3],
... "c": [1.0, 2.0, 3.0],
... }
... )
>>> df.fold(lambda s1, s2: s1 + s2)
shape: (3,)
Series: 'a' [str]
[
"foo11.0"
"bar22.0"
null
]
A horizontal boolean or, similar to a row-wise .any():
>>> df = pl.DataFrame(
... {
... "a": [False, False, True],
... "b": [False, True, False],
... }
... )
>>> df.fold(lambda s1, s2: s1 | s2)
shape: (3,)
Series: 'a' [bool]
[
false
true
true
]
Parameters
----------
operation
function that takes two `Series` and returns a `Series`.
"""
acc = self.to_series(0)
for i in range(1, self.width):
acc = operation(acc, self.to_series(i))
return acc
def row(self, index: int) -> Tuple[Any]:
"""
Get a row as tuple.
Parameters
----------
index
Row index.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.row(2)
(3, 8, 'c')
"""
return self._df.row_tuple(index)
def rows(self) -> List[Tuple]:
"""
Convert columnar data to rows as python tuples.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 3, 5],
... "b": [2, 4, 6],
... }
... )
>>> df.rows()
[(1, 2), (3, 4), (5, 6)]
"""
return self._df.row_tuples()
@overload
def shrink_to_fit(self: DF, in_place: Literal[False] = ...) -> DF:
...
@overload
def shrink_to_fit(self, in_place: Literal[True]) -> None:
...
@overload
def shrink_to_fit(self: DF, in_place: bool) -> Optional[DF]:
...
def shrink_to_fit(self: DF, in_place: bool = False) -> Optional[DF]:
"""
Shrink memory usage of this DataFrame to fit the exact capacity needed to hold the data.
"""
if in_place:
self._df.shrink_to_fit()
return None
else:
df = self.clone()
df._df.shrink_to_fit()
return df
def hash_rows(
self, k0: int = 0, k1: int = 1, k2: int = 2, k3: int = 3
) -> "pli.Series":
"""
Hash and combine the rows in this DataFrame.
Hash value is UInt64.
Parameters
----------
k0
seed parameter
k1
seed parameter
k2
seed parameter
k3
seed parameter
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, 2, 3],
... "bar": [6, 7, 8],
... "ham": ["a", "b", "c"],
... }
... )
>>> df.hash(k0=42) # doctest: +SKIP
shape: (3,)
Series: '' [u64]
[
1208206736888326229
8040480609798856146
18282897888575762835
]
"""
return pli.wrap_s(self._df.hash_rows(k0, k1, k2, k3))
def interpolate(self: DF) -> DF:
"""
Interpolate intermediate values. The interpolation method is linear.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, None, 9, 10],
... "bar": [6, 7, 9, None],
... "baz": [1, None, None, 9],
... }
... )
>>> df.interpolate()
shape: (4, 3)
┌─────┬──────┬─────┐
│ foo ┆ bar ┆ baz │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 │
╞═════╪══════╪═════╡
│ 1 ┆ 6 ┆ 1 │
├╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 5 ┆ 7 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 9 ┆ 9 ┆ 6 │
├╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌┤
│ 10 ┆ null ┆ 9 │
└─────┴──────┴─────┘
"""
return self.select(pli.col("*").interpolate())
def is_empty(self) -> bool:
"""
Check if the dataframe is empty
Examples
--------
>>> df = pl.DataFrame({"foo": [1, 2, 3], "bar": [4, 5, 6]})
>>> df.filter(pl.col("foo") > 99).is_empty()
True
"""
return self.height == 0
def to_struct(self, name: str) -> "pli.Series":
"""
Convert a ``DataFrame`` to a ``Series`` of type ``Struct``
Parameters
----------
name
Name for the struct Series
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 3, 4, 5],
... "b": ["one", "two", "three", "four", "five"],
... }
... )
>>> df.to_struct("nums")
shape: (5,)
Series: 'nums' [struct[2]{'a': i64, 'b': str}]
[
{1,"one"}
{2,"two"}
{3,"three"}
{4,"four"}
{5,"five"}
]
"""
return pli.wrap_s(self._df.to_struct(name))
def unnest(self: DF, names: Union[str, List[str]]) -> DF:
"""
Decompose a struct into its fields. The fields will be inserted in to the `DataFrame` on the
location of the `struct` type.
Parameters
----------
names
Names of the struct columns that will be decomposed by its fields
Examples
--------
>>> df = (
... pl.DataFrame(
... {
... "int": [1, 2],
... "str": ["a", "b"],
... "bool": [True, None],
... "list": [[1, 2], [3]],
... }
... )
... .to_struct("my_struct")
... .to_frame()
... )
>>> df
shape: (2, 1)
┌─────────────────────────────┐
│ my_struct │
│ --- │
│ struct[4]{'int',...,'list'} │
╞═════════════════════════════╡
│ {1,"a",true,[1, 2]} │
├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ {2,"b",null,[3]} │
└─────────────────────────────┘
>>> df.unnest("my_struct")
shape: (2, 4)
┌─────┬─────┬──────┬────────────┐
│ int ┆ str ┆ bool ┆ list │
│ --- ┆ --- ┆ --- ┆ --- │
│ i64 ┆ str ┆ bool ┆ list [i64] │
╞═════╪═════╪══════╪════════════╡
│ 1 ┆ a ┆ true ┆ [1, 2] │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ 2 ┆ b ┆ null ┆ [3] │
└─────┴─────┴──────┴────────────┘
"""
if isinstance(names, str):
names = [names]
return self._from_pydf(self._df.unnest(names))
class RollingGroupBy(Generic[DF]):
"""
A rolling grouper. This has an `.agg` method which will allow you to run all polars expressions
in a groupby context.
"""
def __init__(
self,
df: DF,
index_column: str,
period: str,
offset: Optional[str],
closed: str = "none",
by: Optional[Union[str, List[str], "pli.Expr", List["pli.Expr"]]] = None,
):
self.df = df
self.time_column = index_column
self.period = period
self.offset = offset
self.closed = closed
self.by = by
def agg(
self,
column_to_agg: Union[
List[Tuple[str, List[str]]],
Dict[str, Union[str, List[str]]],
List["pli.Expr"],
"pli.Expr",
],
) -> DF:
return (
self.df.lazy()
.groupby_rolling(
self.time_column, self.period, self.offset, self.closed, self.by
)
.agg(column_to_agg) # type: ignore[arg-type]
.collect(no_optimization=True, string_cache=False)
)
class DynamicGroupBy(Generic[DF]):
"""
A dynamic grouper. This has an `.agg` method which will allow you to run all polars expressions
in a groupby context.
"""
def __init__(
self,
df: DF,
index_column: str,
every: str,
period: Optional[str],
offset: Optional[str],
truncate: bool = True,
include_boundaries: bool = True,
closed: str = "none",
by: Optional[Union[str, List[str], "pli.Expr", List["pli.Expr"]]] = None,
):
self.df = df
self.time_column = index_column
self.every = every
self.period = period
self.offset = offset
self.truncate = truncate
self.include_boundaries = include_boundaries
self.closed = closed
self.by = by
def agg(
self,
column_to_agg: Union[
List[Tuple[str, List[str]]],
Dict[str, Union[str, List[str]]],
List["pli.Expr"],
"pli.Expr",
],
) -> DF:
return (
self.df.lazy()
.groupby_dynamic(
self.time_column,
self.every,
self.period,
self.offset,
self.truncate,
self.include_boundaries,
self.closed,
self.by,
)
.agg(column_to_agg) # type: ignore[arg-type]
.collect(no_optimization=True, string_cache=False)
)
class GroupBy(Generic[DF]):
"""
Starts a new GroupBy operation.
You can also loop over this Object to loop over `DataFrames` with unique groups.
Examples
--------
>>> df = pl.DataFrame({"foo": ["a", "a", "b"], "bar": [1, 2, 3]})
>>> for group in df.groupby("foo"):
... print(group)
... # doctest: +IGNORE_RESULT
...
shape: (2, 2)
┌─────┬─────┐
│ foo ┆ bar │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═════╡
│ a ┆ 1 │
├╌╌╌╌╌┼╌╌╌╌╌┤
│ a ┆ 2 │
└─────┴─────┘
shape: (1, 2)
┌─────┬─────┐
│ foo ┆ bar │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═════╡
│ b ┆ 3 │
└─────┴─────┘
"""
def __init__(
self,
df: "PyDataFrame",
by: Union[str, List[str]],
dataframe_class: Type[DF],
maintain_order: bool = False,
):
"""
Construct class representing a group by operation over the given dataframe.
Parameters
----------
df
PyDataFrame to perform operation over.
by
Column(s) to group by.
dataframe_class
The class used to wrap around the given dataframe. Used to construct new
dataframes returned from the group by operation.
maintain_order
Make sure that the order of the groups remain consistent. This is more
expensive than a default groupby. Note that this only works in expression
aggregations.
"""
self._df = df
self._dataframe_class = dataframe_class
self.by = by
self.maintain_order = maintain_order
def __getitem__(self, item: Any) -> "GBSelection[DF]":
print(
"accessing GroupBy by index is deprecated, consider using the `.agg` method"
)
return self._select(item)
def _select(
self, columns: Union[str, List[str]]
) -> "GBSelection[DF]": # pragma: no cover
"""
Select the columns that will be aggregated.
Parameters
----------
columns
One or multiple columns.
"""
warnings.warn(
"accessing GroupBy by index is deprecated, consider using the `.agg` method",
DeprecationWarning,
)
if isinstance(columns, str):
columns = [columns]
return GBSelection(
self._df,
self.by,
columns,
dataframe_class=self._dataframe_class,
)
def __iter__(self) -> Iterable[Any]:
groups_df = self.groups()
groups = groups_df["groups"]
df = self._dataframe_class._from_pydf(self._df)
for i in range(groups_df.height):
yield df[groups[i]]
def get_group(self, group_value: Union[Any, Tuple[Any]]) -> DF:
"""
Select a single group as a new DataFrame.
.. deprecated:: 0.13.32
Please use `partition_by`
Parameters
----------
group_value
Group to select.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": ["one", "one", "one", "two", "two", "two"],
... "bar": ["A", "B", "C", "A", "B", "C"],
... "baz": [1, 2, 3, 4, 5, 6],
... }
... )
>>> df.groupby("foo").get_group("one")
shape: (3, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ baz │
│ --- ┆ --- ┆ --- │
│ str ┆ str ┆ i64 │
╞═════╪═════╪═════╡
│ one ┆ A ┆ 1 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ one ┆ B ┆ 2 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ one ┆ C ┆ 3 │
└─────┴─────┴─────┘
"""
groups_df = self.groups()
groups = groups_df["groups"]
if not isinstance(group_value, list):
group_value = [group_value]
by = self.by
if not isinstance(by, list):
by = [by]
mask = None
for column, group_val in zip(by, group_value):
local_mask = groups_df[column] == group_val
if mask is None:
mask = local_mask
else:
mask = mask & local_mask
# should be only one match
try:
groups_idx = groups[mask][0] # type: ignore
except IndexError:
raise ValueError(f"no group: {group_value} found")
df = self._dataframe_class._from_pydf(self._df)
return df[groups_idx]
def groups(self) -> DF: # pragma: no cover
"""
Return a `DataFrame` with:
* the groupby keys
* the group indexes aggregated as lists
"""
warnings.warn(
"accessing GroupBy by index is deprecated, consider using the `.agg` method",
DeprecationWarning,
)
return self._dataframe_class._from_pydf(
self._df.groupby(self.by, None, "groups")
)
def apply(self, f: Callable[[DataFrame], DataFrame]) -> DF:
"""
Apply a function over the groups as a sub-DataFrame.
Beware, this is slow.
Parameters
----------
f
Custom function.
Returns
-------
DataFrame
"""
return self._dataframe_class._from_pydf(self._df.groupby_apply(self.by, f))
def agg(
self,
column_to_agg: Union[
List[Tuple[str, List[str]]],
Dict[str, Union[str, List[str]]],
List["pli.Expr"],
"pli.Expr",
],
) -> DF:
"""
Use multiple aggregations on columns. This can be combined with complete lazy API
and is considered idiomatic polars.
Parameters
----------
column_to_agg
map column to aggregation functions.
Returns
-------
Result of groupby split apply operations.
Examples
--------
>>> df = pl.DataFrame(
... {"foo": ["one", "two", "two", "one", "two"], "bar": [5, 3, 2, 4, 1]}
... )
>>> df.groupby("foo").agg(
... [
... pl.sum("bar").suffix("_sum"),
... pl.col("bar").sort().tail(2).sum().suffix("_tail_sum"),
... ]
... )
shape: (2, 3)
┌─────┬─────────┬──────────────┐
│ foo ┆ bar_sum ┆ bar_tail_sum │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 │
╞═════╪═════════╪══════════════╡
│ one ┆ 9 ┆ 9 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤
│ two ┆ 6 ┆ 5 │
└─────┴─────────┴──────────────┘
"""
# a single list comprehension would be cleaner, but mypy complains on different
# lines for py3.7 vs py3.10 about typing errors, so this is the same logic,
# but broken down into two small functions
def _str_to_list(y: Any) -> Any:
return [y] if isinstance(y, str) else y
def _wrangle(x: Any) -> list:
return [(xi[0], _str_to_list(xi[1])) for xi in x]
if isinstance(column_to_agg, pli.Expr):
column_to_agg = [column_to_agg]
if isinstance(column_to_agg, dict):
column_to_agg = _wrangle(column_to_agg.items())
elif isinstance(column_to_agg, list):
if isinstance(column_to_agg[0], tuple):
column_to_agg = _wrangle(column_to_agg)
elif isinstance(column_to_agg[0], pli.Expr):
return (
self._dataframe_class._from_pydf(self._df)
.lazy()
.groupby(self.by, maintain_order=self.maintain_order)
.agg(column_to_agg) # type: ignore[arg-type]
.collect(no_optimization=True, string_cache=False)
)
pass
else:
raise ValueError(
f"argument: {column_to_agg} not understood, have you passed a list of expressions?"
)
else:
raise ValueError(
f"argument: {column_to_agg} not understood, have you passed a list of expressions?"
)
return self._dataframe_class._from_pydf(
self._df.groupby_agg(self.by, column_to_agg)
)
def head(self, n: int = 5) -> DF:
"""
Return first n rows of each group.
Parameters
----------
n
Number of values of the group to select
Examples
--------
>>> df = pl.DataFrame(
... {
... "letters": ["c", "c", "a", "c", "a", "b"],
... "nrs": [1, 2, 3, 4, 5, 6],
... }
... )
>>> df
shape: (6, 2)
┌─────────┬─────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ c ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ a ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 4 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ a ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 6 │
└─────────┴─────┘
>>> df.groupby("letters").head(2).sort("letters")
shape: (5, 2)
┌─────────┬─────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ a ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ a ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 6 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 2 │
└─────────┴─────┘
"""
return (
self._dataframe_class._from_pydf(self._df)
.lazy()
.groupby(self.by, self.maintain_order)
.head(n)
.collect(no_optimization=True, string_cache=False)
)
def tail(self, n: int = 5) -> DF:
"""
Return last n rows of each group.
Parameters
----------
n
Number of values of the group to select
Examples
--------
>>> df = pl.DataFrame(
... {
... "letters": ["c", "c", "a", "c", "a", "b"],
... "nrs": [1, 2, 3, 4, 5, 6],
... }
... )
>>> df
shape: (6, 2)
┌─────────┬─────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ c ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ a ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 4 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ a ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 6 │
└─────────┴─────┘
>>> (df.groupby("letters").tail(2).sort("letters"))
shape: (5, 2)
┌─────────┬─────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ a ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ a ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ b ┆ 6 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ c ┆ 4 │
└─────────┴─────┘
"""
return (
self._dataframe_class._from_pydf(self._df)
.lazy()
.groupby(self.by, self.maintain_order)
.tail(n)
.collect(no_optimization=True, string_cache=False)
)
def _select_all(self) -> "GBSelection[DF]":
"""
Select all columns for aggregation.
"""
return GBSelection(
self._df,
self.by,
None,
dataframe_class=self._dataframe_class,
)
def pivot(
self, pivot_column: Union[str, List[str]], values_column: Union[str, List[str]]
) -> "PivotOps[DF]":
"""
Do a pivot operation based on the group key, a pivot column and an aggregation function on the values column.
.. note::
Polars'/arrow memory is not ideal for transposing operations like pivots. If you have a relatively large
table, consider using a groupby over a pivot.
Parameters
----------
pivot_column
Column to pivot.
values_column
Column that will be aggregated.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": ["one", "one", "one", "two", "two", "two"],
... "bar": ["A", "B", "C", "A", "B", "C"],
... "baz": [1, 2, 3, 4, 5, 6],
... }
... )
>>> df.groupby("foo").pivot(pivot_column="bar", values_column="baz").first()
shape: (2, 4)
┌─────┬─────┬─────┬─────┐
│ foo ┆ A ┆ B ┆ C │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 ┆ i64 │
╞═════╪═════╪═════╪═════╡
│ one ┆ 1 ┆ 2 ┆ 3 │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ two ┆ 4 ┆ 5 ┆ 6 │
└─────┴─────┴─────┴─────┘
"""
if isinstance(pivot_column, str):
pivot_column = [pivot_column]
if isinstance(values_column, str):
values_column = [values_column]
return PivotOps(
self._df,
self.by,
pivot_column,
values_column,
dataframe_class=self._dataframe_class,
)
def first(self) -> DF:
"""
Aggregate the first values in the group.
"""
return self.agg(pli.all().first())
def last(self) -> DF:
"""
Aggregate the last values in the group.
"""
return self.agg(pli.all().last())
def sum(self) -> DF:
"""
Reduce the groups to the sum.
"""
return self.agg(pli.all().sum())
def min(self) -> DF:
"""
Reduce the groups to the minimal value.
"""
return self.agg(pli.all().min())
def max(self) -> DF:
"""
Reduce the groups to the maximal value.
"""
return self.agg(pli.all().max())
def count(self) -> DF:
"""
Count the number of values in each group.
"""
return self.agg(pli.lazy_functions.count())
def mean(self) -> DF:
"""
Reduce the groups to the mean values.
"""
return self.agg(pli.all().mean())
def n_unique(self) -> DF:
"""
Count the unique values per group.
"""
return self.agg(pli.all().n_unique())
def quantile(self, quantile: float, interpolation: str = "nearest") -> DF:
"""
Compute the quantile per group.
Parameters
----------
quantile
quantile between 0.0 and 1.0
interpolation
interpolation type, options: ['nearest', 'higher', 'lower', 'midpoint', 'linear']
"""
return self.agg(pli.all().quantile(quantile, interpolation))
def median(self) -> DF:
"""
Return the median per group.
"""
return self.agg(pli.all().median())
def agg_list(self) -> DF:
"""
Aggregate the groups into Series.
Examples
--------
>>> df = pl.DataFrame({"a": ["one", "two", "one", "two"], "b": [1, 2, 3, 4]})
>>> df.groupby("a").agg_list()
shape: (2, 2)
┌─────┬────────────┐
│ a ┆ b │
│ --- ┆ --- │
│ str ┆ list [i64] │
╞═════╪════════════╡
│ one ┆ [1, 3] │
├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ two ┆ [2, 4] │
└─────┴────────────┘
"""
return self.agg(pli.all().list())
class PivotOps(Generic[DF]):
"""
Utility class returned in a pivot operation.
"""
def __init__(
self,
df: DataFrame,
by: Union[str, List[str]],
pivot_column: Union[str, List[str]],
values_column: Union[str, List[str]],
dataframe_class: Type[DF],
):
self._df = df
self.by = by
self.pivot_column = pivot_column
self.values_column = values_column
self._dataframe_class = dataframe_class
def first(self) -> DF:
"""
Get the first value per group.
"""
return self._dataframe_class._from_pydf(
self._df.pivot(self.by, self.pivot_column, self.values_column, "first")
)
def sum(self) -> DF:
"""
Get the sum per group.
"""
return self._dataframe_class._from_pydf(
self._df.pivot(self.by, self.pivot_column, self.values_column, "sum")
)
def min(self) -> DF:
"""
Get the minimal value per group.
"""
return self._dataframe_class._from_pydf(
self._df.pivot(self.by, self.pivot_column, self.values_column, "min")
)
def max(self) -> DF:
"""
Get the maximal value per group.
"""
return self._dataframe_class._from_pydf(
self._df.pivot(self.by, self.pivot_column, self.values_column, "max")
)
def mean(self) -> DF:
"""
Get the mean value per group.
"""
return self._dataframe_class._from_pydf(
self._df.pivot(self.by, self.pivot_column, self.values_column, "mean")
)
def count(self) -> DF:
"""
Count the values per group.
"""
return self._dataframe_class._from_pydf(
self._df.pivot(self.by, self.pivot_column, self.values_column, "count")
)
def median(self) -> DF:
"""
Get the median value per group.
"""
return self._dataframe_class._from_pydf(
self._df.pivot(self.by, self.pivot_column, self.values_column, "median")
)
def last(self) -> DF:
"""
Get the last value per group.
"""
return self._dataframe_class._from_pydf(
self._df.pivot(self.by, self.pivot_column, self.values_column, "last")
)
class GBSelection(Generic[DF]):
"""
Utility class returned in a groupby operation.
"""
def __init__(
self,
df: "PyDataFrame",
by: Union[str, List[str]],
selection: Optional[List[str]],
dataframe_class: Type[DF],
):
self._df = df
self.by = by
self.selection = selection
self._dataframe_class = dataframe_class
def first(self) -> DF:
"""
Aggregate the first values in the group.
"""
return self._dataframe_class._from_pydf(
self._df.groupby(self.by, self.selection, "first")
)
def last(self) -> DF:
"""
Aggregate the last values in the group.
"""
return self._dataframe_class._from_pydf(
self._df.groupby(self.by, self.selection, "last")
)
def sum(self) -> DF:
"""
Reduce the groups to the sum.
"""
return self._dataframe_class._from_pydf(
self._df.groupby(self.by, self.selection, "sum")
)
def min(self) -> DF:
"""
Reduce the groups to the minimal value.
"""
return self._dataframe_class._from_pydf(
self._df.groupby(self.by, self.selection, "min")
)
def max(self) -> DF:
"""
Reduce the groups to the maximal value.
"""
return self._dataframe_class._from_pydf(
self._df.groupby(self.by, self.selection, "max")
)
def count(self) -> DF:
"""
Count the number of values in each group.
Examples
--------
>>> df = pl.DataFrame(
... {
... "foo": [1, None, 3, 4],
... "bar": ["a", "b", "c", "a"],
... }
... )
>>> df.groupby("bar").count() # counts nulls
shape: (3, 2)
┌─────┬───────┐
│ bar ┆ count │
│ --- ┆ --- │
│ str ┆ u32 │
╞═════╪═══════╡
│ c ┆ 1 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ a ┆ 2 │
├╌╌╌╌╌┼╌╌╌╌╌╌╌┤
│ b ┆ 1 │
└─────┴───────┘
"""
return self._dataframe_class._from_pydf(
self._df.groupby(self.by, self.selection, "count")
)
def mean(self) -> DF:
"""
Reduce the groups to the mean values.
"""
return self._dataframe_class._from_pydf(
self._df.groupby(self.by, self.selection, "mean")
)
def n_unique(self) -> DF:
"""
Count the unique values per group.
"""
return self._dataframe_class._from_pydf(
self._df.groupby(self.by, self.selection, "n_unique")
)
def quantile(self, quantile: float, interpolation: str = "nearest") -> DF:
"""
Compute the quantile per group.
Parameters
----------
quantile
quantile between 0.0 and 1.0
interpolation
interpolation type, options: ['nearest', 'higher', 'lower', 'midpoint', 'linear']
"""
return self._dataframe_class._from_pydf(
self._df.groupby_quantile(self.by, self.selection, quantile, interpolation)
)
def median(self) -> DF:
"""
Return the median per group.
"""
return self._dataframe_class._from_pydf(
self._df.groupby(self.by, self.selection, "median")
)
def agg_list(self) -> DF:
"""
Aggregate the groups into Series.
"""
return self._dataframe_class._from_pydf(
self._df.groupby(self.by, self.selection, "agg_list")
)
def apply(
self,
func: Callable[[Any], Any],
return_dtype: Optional[Type[DataType]] = None,
) -> DF:
"""
Apply a function over the groups.
"""
df = self.agg_list()
if self.selection is None:
raise TypeError(
"apply not available for Groupby.select_all(). Use select() instead."
)
for name in self.selection:
s = df.drop_in_place(name + "_agg_list").apply(func, return_dtype)
s.rename(name, in_place=True)
df[name] = s
return df
| [
"polars.utils._process_null_values",
"polars.utils.format_path",
"polars.internals.construction.arrow_to_pydf",
"io.BytesIO",
"polars.polars.PyDataFrame.read_json",
"polars.internals.wrap_s",
"polars.internals.construction.pandas_to_pydf",
"numpy.array",
"polars.internals.lit",
"polars.internals.c... | [((1764, 1796), 'typing.TypeVar', 'TypeVar', (['"""DF"""'], {'bound': '"""DataFrame"""'}), "('DF', bound='DataFrame')\n", (1771, 1796), False, 'from typing import Any, BinaryIO, Callable, Dict, Generic, Iterable, Iterator, List, Mapping, Optional, Sequence, TextIO, Tuple, Type, TypeVar, Union, overload\n'), ((2220, 2243), 'polars.internals.Series', 'pli.Series', (['""""""', '[other]'], {}), "('', [other])\n", (2230, 2243), True, 'from polars import internals as pli\n'), ((10819, 10847), 'polars.polars.PyDataFrame.read_dicts', 'PyDataFrame.read_dicts', (['data'], {}), '(data)\n', (10841, 10847), False, 'from polars.polars import PyDataFrame, PySeries\n'), ((17519, 17552), 'polars.utils._process_null_values', '_process_null_values', (['null_values'], {}), '(null_values)\n', (17539, 17552), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((19135, 19169), 'polars.utils.handle_projection_columns', 'handle_projection_columns', (['columns'], {}), '(columns)\n', (19160, 19169), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((21623, 21657), 'polars.utils.handle_projection_columns', 'handle_projection_columns', (['columns'], {}), '(columns)\n', (21648, 21657), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((22587, 22621), 'polars.utils.handle_projection_columns', 'handle_projection_columns', (['columns'], {}), '(columns)\n', (22612, 22621), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((22673, 22729), 'polars.polars.PyDataFrame.read_avro', 'PyDataFrame.read_avro', (['file', 'columns', 'projection', 'n_rows'], {}), '(file, columns, projection, n_rows)\n', (22694, 22729), False, 'from polars.polars import PyDataFrame, PySeries\n'), ((24413, 24447), 'polars.utils.handle_projection_columns', 'handle_projection_columns', (['columns'], {}), '(columns)\n', (24438, 24447), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((25132, 25171), 'polars.polars.PyDataFrame.read_json', 'PyDataFrame.read_json', (['file', 'json_lines'], {}), '(file, json_lines)\n', (25153, 25171), False, 'from polars.polars import PyDataFrame, PySeries\n'), ((25679, 25716), 'pyarrow.Table.from_batches', 'pa.Table.from_batches', (['record_batches'], {}), '(record_batches)\n', (25700, 25716), True, 'import pyarrow as pa\n'), ((30073, 30162), 'warnings.warn', 'warnings.warn', (['"""\'to_json\' is deprecated. please use \'write_json\'"""', 'DeprecationWarning'], {}), '("\'to_json\' is deprecated. please use \'write_json\'",\n DeprecationWarning)\n', (30086, 30162), False, 'import warnings\n'), ((33669, 33706), 'pyarrow.Table.from_batches', 'pa.Table.from_batches', (['record_batches'], {}), '(record_batches)\n', (33690, 33706), True, 'import pyarrow as pa\n'), ((35537, 35624), 'warnings.warn', 'warnings.warn', (['"""\'to_csv\' is deprecated. please use \'write_csv\'"""', 'DeprecationWarning'], {}), '("\'to_csv\' is deprecated. please use \'write_csv\'",\n DeprecationWarning)\n', (35550, 35624), False, 'import warnings\n'), ((36632, 36721), 'warnings.warn', 'warnings.warn', (['"""\'to_avro\' is deprecated. please use \'write_avro\'"""', 'DeprecationWarning'], {}), '("\'to_avro\' is deprecated. please use \'write_avro\'",\n DeprecationWarning)\n', (36645, 36721), False, 'import warnings\n'), ((37823, 37910), 'warnings.warn', 'warnings.warn', (['"""\'to_ipc\' is deprecated. please use \'write_ipc\'"""', 'DeprecationWarning'], {}), '("\'to_ipc\' is deprecated. please use \'write_ipc\'",\n DeprecationWarning)\n', (37836, 37910), False, 'import warnings\n'), ((44998, 45093), 'warnings.warn', 'warnings.warn', (['"""\'to_parquet\' is deprecated. please use \'write_parquet\'"""', 'DeprecationWarning'], {}), '("\'to_parquet\' is deprecated. please use \'write_parquet\'",\n DeprecationWarning)\n', (45011, 45093), False, 'import warnings\n'), ((56514, 56644), 'warnings.warn', 'warnings.warn', (['"""setting a DataFrame by indexing is deprecated; Consider using DataFrame.with_column"""', 'DeprecationWarning'], {}), "(\n 'setting a DataFrame by indexing is deprecated; Consider using DataFrame.with_column'\n , DeprecationWarning)\n", (56527, 56644), False, 'import warnings\n'), ((173372, 173493), 'warnings.warn', 'warnings.warn', (['"""accessing GroupBy by index is deprecated, consider using the `.agg` method"""', 'DeprecationWarning'], {}), "(\n 'accessing GroupBy by index is deprecated, consider using the `.agg` method'\n , DeprecationWarning)\n", (173385, 173493), False, 'import warnings\n'), ((175926, 176047), 'warnings.warn', 'warnings.warn', (['"""accessing GroupBy by index is deprecated, consider using the `.agg` method"""', 'DeprecationWarning'], {}), "(\n 'accessing GroupBy by index is deprecated, consider using the `.agg` method'\n , DeprecationWarning)\n", (175939, 176047), False, 'import warnings\n'), ((8566, 8599), 'polars.internals.construction.dict_to_pydf', 'dict_to_pydf', (['{}'], {'columns': 'columns'}), '({}, columns=columns)\n', (8578, 8599), False, 'from polars.internals.construction import ColumnsType, arrow_to_pydf, dict_to_pydf, numpy_to_pydf, pandas_to_pydf, sequence_to_pydf, series_to_pydf\n'), ((11599, 11634), 'polars.internals.construction.dict_to_pydf', 'dict_to_pydf', (['data'], {'columns': 'columns'}), '(data, columns=columns)\n', (11611, 11634), False, 'from polars.internals.construction import ColumnsType, arrow_to_pydf, dict_to_pydf, numpy_to_pydf, pandas_to_pydf, sequence_to_pydf, series_to_pydf\n'), ((12766, 12817), 'polars.internals.construction.numpy_to_pydf', 'numpy_to_pydf', (['data'], {'columns': 'columns', 'orient': 'orient'}), '(data, columns=columns, orient=orient)\n', (12779, 12817), False, 'from polars.internals.construction import ColumnsType, arrow_to_pydf, dict_to_pydf, numpy_to_pydf, pandas_to_pydf, sequence_to_pydf, series_to_pydf\n'), ((12851, 12905), 'polars.internals.construction.sequence_to_pydf', 'sequence_to_pydf', (['data'], {'columns': 'columns', 'orient': 'orient'}), '(data, columns=columns, orient=orient)\n', (12867, 12905), False, 'from polars.internals.construction import ColumnsType, arrow_to_pydf, dict_to_pydf, numpy_to_pydf, pandas_to_pydf, sequence_to_pydf, series_to_pydf\n'), ((13943, 13996), 'polars.internals.construction.arrow_to_pydf', 'arrow_to_pydf', (['data'], {'columns': 'columns', 'rechunk': 'rechunk'}), '(data, columns=columns, rechunk=rechunk)\n', (13956, 13996), False, 'from polars.internals.construction import ColumnsType, arrow_to_pydf, dict_to_pydf, numpy_to_pydf, pandas_to_pydf, sequence_to_pydf, series_to_pydf\n'), ((15428, 15507), 'polars.internals.construction.pandas_to_pydf', 'pandas_to_pydf', (['data'], {'columns': 'columns', 'rechunk': 'rechunk', 'nan_to_none': 'nan_to_none'}), '(data, columns=columns, rechunk=rechunk, nan_to_none=nan_to_none)\n', (15442, 15507), False, 'from polars.internals.construction import ColumnsType, arrow_to_pydf, dict_to_pydf, numpy_to_pydf, pandas_to_pydf, sequence_to_pydf, series_to_pydf\n'), ((16749, 16766), 'polars.utils.format_path', 'format_path', (['file'], {}), '(file)\n', (16760, 16766), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((18024, 18451), 'polars.scan_csv', 'scan_csv', (['file'], {'has_header': 'has_header', 'sep': 'sep', 'comment_char': 'comment_char', 'quote_char': 'quote_char', 'skip_rows': 'skip_rows', 'dtypes': 'dtypes_dict', 'null_values': 'null_values', 'ignore_errors': 'ignore_errors', 'infer_schema_length': 'infer_schema_length', 'n_rows': 'n_rows', 'low_memory': 'low_memory', 'rechunk': 'rechunk', 'skip_rows_after_header': 'skip_rows_after_header', 'row_count_name': 'row_count_name', 'row_count_offset': 'row_count_offset'}), '(file, has_header=has_header, sep=sep, comment_char=comment_char,\n quote_char=quote_char, skip_rows=skip_rows, dtypes=dtypes_dict,\n null_values=null_values, ignore_errors=ignore_errors,\n infer_schema_length=infer_schema_length, n_rows=n_rows, low_memory=\n low_memory, rechunk=rechunk, skip_rows_after_header=\n skip_rows_after_header, row_count_name=row_count_name, row_count_offset\n =row_count_offset)\n', (18032, 18451), False, 'from polars import scan_csv\n'), ((19758, 19815), 'polars.utils._prepare_row_count_args', '_prepare_row_count_args', (['row_count_name', 'row_count_offset'], {}), '(row_count_name, row_count_offset)\n', (19781, 19815), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((20809, 20826), 'polars.utils.format_path', 'format_path', (['file'], {}), '(file)\n', (20820, 20826), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((20941, 21077), 'polars.scan_parquet', 'scan_parquet', (['file'], {'n_rows': 'n_rows', 'rechunk': '(True)', 'parallel': 'parallel', 'row_count_name': 'row_count_name', 'row_count_offset': 'row_count_offset'}), '(file, n_rows=n_rows, rechunk=True, parallel=parallel,\n row_count_name=row_count_name, row_count_offset=row_count_offset)\n', (20953, 21077), False, 'from polars import scan_parquet\n'), ((21852, 21909), 'polars.utils._prepare_row_count_args', '_prepare_row_count_args', (['row_count_name', 'row_count_offset'], {}), '(row_count_name, row_count_offset)\n', (21875, 21909), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((22539, 22556), 'polars.utils.format_path', 'format_path', (['file'], {}), '(file)\n', (22550, 22556), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((23694, 23711), 'polars.utils.format_path', 'format_path', (['file'], {}), '(file)\n', (23705, 23711), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((23822, 23939), 'polars.scan_ipc', 'scan_ipc', (['file'], {'n_rows': 'n_rows', 'rechunk': 'rechunk', 'row_count_name': 'row_count_name', 'row_count_offset': 'row_count_offset'}), '(file, n_rows=n_rows, rechunk=rechunk, row_count_name=\n row_count_name, row_count_offset=row_count_offset)\n', (23830, 23939), False, 'from polars import scan_ipc\n'), ((24616, 24673), 'polars.utils._prepare_row_count_args', '_prepare_row_count_args', (['row_count_name', 'row_count_offset'], {}), '(row_count_name, row_count_offset)\n', (24639, 24673), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((31895, 31912), 'polars.utils.format_path', 'format_path', (['file'], {}), '(file)\n', (31906, 31912), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((34935, 34944), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (34942, 34944), False, 'from io import BytesIO, IOBase, StringIO\n'), ((35137, 35154), 'polars.utils.format_path', 'format_path', (['file'], {}), '(file)\n', (35148, 35154), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((36267, 36284), 'polars.utils.format_path', 'format_path', (['file'], {}), '(file)\n', (36278, 36284), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((37457, 37474), 'polars.utils.format_path', 'format_path', (['file'], {}), '(file)\n', (37468, 37474), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((43543, 43560), 'polars.utils.format_path', 'format_path', (['file'], {}), '(file)\n', (43554, 43560), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((44146, 44160), 'pyarrow.table', 'pa.table', (['data'], {}), '(data)\n', (44154, 44160), True, 'import pyarrow as pa\n'), ((44174, 44287), 'pyarrow.parquet.write_table', 'pa.parquet.write_table', ([], {'table': 'tbl', 'where': 'file', 'compression': 'compression', 'write_statistics': 'statistics'}), '(table=tbl, where=file, compression=compression,\n write_statistics=statistics, **kwargs)\n', (44196, 44287), True, 'import pyarrow as pa\n'), ((48379, 48478), 'warnings.warn', 'warnings.warn', (['"""accessing series as Attribute of a DataFrame is deprecated"""', 'DeprecationWarning'], {}), "('accessing series as Attribute of a DataFrame is deprecated',\n DeprecationWarning)\n", (48392, 48478), False, 'import warnings\n'), ((50144, 50245), 'warnings.warn', 'warnings.warn', (['"""\'using expressions in []\' is deprecated. please use \'select\'"""', 'DeprecationWarning'], {}), '("\'using expressions in []\' is deprecated. please use \'select\'",\n DeprecationWarning)\n', (50157, 50245), False, 'import warnings\n'), ((58709, 58758), 'os.environ.get', 'os.environ.get', (['"""POLARS_FMT_MAX_COLS"""'], {'default': '(75)'}), "('POLARS_FMT_MAX_COLS', default=75)\n", (58723, 58758), False, 'import os\n'), ((58783, 58832), 'os.environ.get', 'os.environ.get', (['"""POLARS_FMT_MAX_ROWS"""'], {'default': '(25)'}), "('POLARS_FMT_MAX_ROWS', default=25)\n", (58797, 58832), False, 'import os\n'), ((68483, 68546), 'polars.internals.Series', 'pli.Series', (['"""describe"""', "['mean', 'std', 'min', 'max', 'median']"], {}), "('describe', ['mean', 'std', 'min', 'max', 'median'])\n", (68493, 68546), True, 'from polars import internals as pli\n'), ((72786, 72885), 'warnings.warn', 'warnings.warn', (['"""in-place sorting is deprecated; please use default sorting"""', 'DeprecationWarning'], {}), "('in-place sorting is deprecated; please use default sorting',\n DeprecationWarning)\n", (72799, 72885), False, 'import warnings\n'), ((114436, 114563), 'warnings.warn', 'warnings.warn', (['"""using asof join via DataFrame.join is deprecated, please use DataFrame.join_asof"""', 'DeprecationWarning'], {}), "(\n 'using asof join via DataFrame.join is deprecated, please use DataFrame.join_asof'\n , DeprecationWarning)\n", (114449, 114563), False, 'import warnings\n'), ((185577, 185603), 'polars.internals.lazy_functions.count', 'pli.lazy_functions.count', ([], {}), '()\n', (185601, 185603), True, 'from polars import internals as pli\n'), ((8661, 8696), 'polars.internals.construction.dict_to_pydf', 'dict_to_pydf', (['data'], {'columns': 'columns'}), '(data, columns=columns)\n', (8673, 8696), False, 'from polars.internals.construction import ColumnsType, arrow_to_pydf, dict_to_pydf, numpy_to_pydf, pandas_to_pydf, sequence_to_pydf, series_to_pydf\n'), ((18804, 18835), 'polars.utils.is_str_sequence', 'is_str_sequence', (['columns', '(False)'], {}), '(columns, False)\n', (18819, 18835), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((21293, 21324), 'polars.utils.is_str_sequence', 'is_str_sequence', (['columns', '(False)'], {}), '(columns, False)\n', (21308, 21324), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((24110, 24141), 'polars.utils.is_str_sequence', 'is_str_sequence', (['columns', '(False)'], {}), '(columns, False)\n', (24125, 24141), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((25062, 25079), 'polars.utils.format_path', 'format_path', (['file'], {}), '(file)\n', (25073, 25079), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((32057, 32066), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (32064, 32066), False, 'from io import BytesIO, IOBase, StringIO\n'), ((52406, 52439), 'polars.internals.wrap_s', 'pli.wrap_s', (['series[row_selection]'], {}), '(series[row_selection])\n', (52416, 52439), True, 'from polars import internals as pli\n'), ((53959, 53979), 'polars.utils.range_to_slice', 'range_to_slice', (['item'], {}), '(item)\n', (53973, 53979), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((56987, 57002), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (56995, 57002), True, 'import numpy as np\n'), ((72494, 72593), 'warnings.warn', 'warnings.warn', (['"""in-place sorting is deprecated; please use default sorting"""', 'DeprecationWarning'], {}), "('in-place sorting is deprecated; please use default sorting',\n DeprecationWarning)\n", (72507, 72593), False, 'import warnings\n'), ((128758, 128775), 'polars.internals.lit', 'pli.lit', (['strategy'], {}), '(strategy)\n', (128765, 128775), True, 'from polars import internals as pli\n'), ((8764, 8815), 'polars.internals.construction.numpy_to_pydf', 'numpy_to_pydf', (['data'], {'columns': 'columns', 'orient': 'orient'}), '(data, columns=columns, orient=orient)\n', (8777, 8815), False, 'from polars.internals.construction import ColumnsType, arrow_to_pydf, dict_to_pydf, numpy_to_pydf, pandas_to_pydf, sequence_to_pydf, series_to_pydf\n'), ((15137, 15150), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (15145, 15150), True, 'import numpy as np\n'), ((15268, 15295), 'polars.internals.Series', 'pli.Series', (['name', 'pd_series'], {}), '(name, pd_series)\n', (15278, 15295), True, 'from polars import internals as pli\n'), ((56783, 56805), 'polars.internals.Series', 'pli.Series', (['key', 'value'], {}), '(key, value)\n', (56793, 56805), True, 'from polars import internals as pli\n'), ((58859, 58902), 'polars._html.NotebookFormatter', 'NotebookFormatter', (['self', 'max_cols', 'max_rows'], {}), '(self, max_cols, max_rows)\n', (58876, 58902), False, 'from polars._html import NotebookFormatter\n'), ((127532, 127545), 'polars.internals.wrap_s', 'pli.wrap_s', (['s'], {}), '(s)\n', (127542, 127545), True, 'from polars import internals as pli\n'), ((153548, 153557), 'polars.internals.all', 'pli.all', ([], {}), '()\n', (153555, 153557), True, 'from polars import internals as pli\n'), ((165794, 165806), 'polars.internals.col', 'pli.col', (['"""*"""'], {}), "('*')\n", (165801, 165806), True, 'from polars import internals as pli\n'), ((184884, 184893), 'polars.internals.all', 'pli.all', ([], {}), '()\n', (184891, 184893), True, 'from polars import internals as pli\n'), ((185026, 185035), 'polars.internals.all', 'pli.all', ([], {}), '()\n', (185033, 185035), True, 'from polars import internals as pli\n'), ((185156, 185165), 'polars.internals.all', 'pli.all', ([], {}), '()\n', (185163, 185165), True, 'from polars import internals as pli\n'), ((185295, 185304), 'polars.internals.all', 'pli.all', ([], {}), '()\n', (185302, 185304), True, 'from polars import internals as pli\n'), ((185434, 185443), 'polars.internals.all', 'pli.all', ([], {}), '()\n', (185441, 185443), True, 'from polars import internals as pli\n'), ((185726, 185735), 'polars.internals.all', 'pli.all', ([], {}), '()\n', (185733, 185735), True, 'from polars import internals as pli\n'), ((185866, 185875), 'polars.internals.all', 'pli.all', ([], {}), '()\n', (185873, 185875), True, 'from polars import internals as pli\n'), ((186271, 186280), 'polars.internals.all', 'pli.all', ([], {}), '()\n', (186278, 186280), True, 'from polars import internals as pli\n'), ((186430, 186439), 'polars.internals.all', 'pli.all', ([], {}), '()\n', (186437, 186439), True, 'from polars import internals as pli\n'), ((187016, 187025), 'polars.internals.all', 'pli.all', ([], {}), '()\n', (187023, 187025), True, 'from polars import internals as pli\n'), ((8904, 8940), 'polars.internals.construction.arrow_to_pydf', 'arrow_to_pydf', (['data'], {'columns': 'columns'}), '(data, columns=columns)\n', (8917, 8940), False, 'from polars.internals.construction import ColumnsType, arrow_to_pydf, dict_to_pydf, numpy_to_pydf, pandas_to_pydf, sequence_to_pydf, series_to_pydf\n'), ((15186, 15218), 'polars.internals.Series', 'pli.Series', (['name', '[]'], {'dtype': 'Utf8'}), '(name, [], dtype=Utf8)\n', (15196, 15218), True, 'from polars import internals as pli\n'), ((15330, 15351), 'polars.internals.Series', 'pli.Series', (['name', 'col'], {}), '(name, col)\n', (15340, 15351), True, 'from polars import internals as pli\n'), ((55746, 55766), 'polars.internals.Series', 'pli.Series', (['""""""', 'item'], {}), "('', item)\n", (55756, 55766), True, 'from polars import internals as pli\n'), ((55822, 55843), 'polars.utils.is_int_sequence', 'is_int_sequence', (['item'], {}), '(item)\n', (55837, 55843), False, 'from polars.utils import _prepare_row_count_args, _process_null_values, format_path, handle_projection_columns, is_int_sequence, is_str_sequence, range_to_slice\n'), ((9036, 9090), 'polars.internals.construction.sequence_to_pydf', 'sequence_to_pydf', (['data'], {'columns': 'columns', 'orient': 'orient'}), '(data, columns=columns, orient=orient)\n', (9052, 9090), False, 'from polars.internals.construction import ColumnsType, arrow_to_pydf, dict_to_pydf, numpy_to_pydf, pandas_to_pydf, sequence_to_pydf, series_to_pydf\n'), ((17297, 17316), 'polars.datatypes.py_type_to_dtype', 'py_type_to_dtype', (['v'], {}), '(v)\n', (17313, 17316), False, 'from polars.datatypes import Boolean, DataType, UInt32, Utf8, py_type_to_dtype\n'), ((54182, 54194), 'polars.internals.col', 'pli.col', (['"""*"""'], {}), "('*')\n", (54189, 54194), True, 'from polars import internals as pli\n'), ((56866, 56888), 'polars.internals.Series', 'pli.Series', (['key', 'value'], {}), '(key, value)\n', (56876, 56888), True, 'from polars import internals as pli\n'), ((117961, 117976), 'polars.internals.wrap_s', 'pli.wrap_s', (['out'], {}), '(out)\n', (117971, 117976), True, 'from polars import internals as pli\n'), ((9158, 9195), 'polars.internals.construction.series_to_pydf', 'series_to_pydf', (['data'], {'columns': 'columns'}), '(data, columns=columns)\n', (9172, 9195), False, 'from polars.internals.construction import ColumnsType, arrow_to_pydf, dict_to_pydf, numpy_to_pydf, pandas_to_pydf, sequence_to_pydf, series_to_pydf\n'), ((55341, 55361), 'polars.internals.Series', 'pli.Series', (['""""""', 'item'], {}), "('', item)\n", (55351, 55361), True, 'from polars import internals as pli\n'), ((9500, 9537), 'polars.internals.construction.pandas_to_pydf', 'pandas_to_pydf', (['data'], {'columns': 'columns'}), '(data, columns=columns)\n', (9514, 9537), False, 'from polars.internals.construction import ColumnsType, arrow_to_pydf, dict_to_pydf, numpy_to_pydf, pandas_to_pydf, sequence_to_pydf, series_to_pydf\n'), ((54835, 54847), 'polars.internals.col', 'pli.col', (['"""*"""'], {}), "('*')\n", (54842, 54847), True, 'from polars import internals as pli\n')] |
# coding:utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
# Our application logic will be here
def cnn_model_fn(features, labels, mode):
"""Model function for CNN"""
# Input Layer
# black and white picture
"""
filter=5 ,the filter's num
kernel_size , filter's width*heigth
"""
input_layer = tf.reshape(features['x'],[-1,28,28, 1])
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5,5],
padding="same",
activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2,2], strides=2)
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5,5],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2,2], strides=2)
pool2_flat = tf.reshape(pool2, [-1,7*7*64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
# Our output tensor dropout has shape [batch_size, 1024]
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Our final output densor of CNN, logits , has shape[bathc_size, 1024]
logits = tf.layers.dense(inputs=dropout, units=10)
#tf.argmax(input=logits, axis=1)
#tf.nn.softmaxlogits, name="softmax_tensor"()
predictions = {
"class":tf.argmax(input=logits, axis=1),
"probabilities":tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32),depth=10)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
onehot_labels = tf.one_hot(indices=tf.cast(labels,tf.int32), depth=10)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step = tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
eval_metric_ops = {
"accuracy":tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loess, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# load training and eval data
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
eval_data = mnist.test.images
eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
# create the Estimator
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/tmp/mnist_convent_model")
# set up a logging Hook
tensors_to_log = {"probabilities":"softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x":train_data},
y=train_labels,
batch_size=100,
num_epochs=None,
shuffle=True)
mnist_classifier.train(
input_fn=train_input_fn,
steps=20000,
hooks=[logging_hook])
# Evaluate the model and print result
eval_input_tf = tf.estimator.inputs.numpy_input_fn(
x={"x":eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False)
eval_result = mnist_classifier.evaluate(input_fn=eval_input_fn)
print(eval_result)
if __name__ == '__main__':
tf.app.run()
| [
"tensorflow.contrib.learn.datasets.load_dataset",
"tensorflow.logging.set_verbosity",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.estimator.inputs.numpy_input_fn",
"tensorflow.nn.softmax",
"tensorflow.cast",
"tensorflow.app.run",
"tensorflow.estimator.Estimator",
"numpy.asarray",
"tensorflow... | [((170, 211), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (194, 211), True, 'import tensorflow as tf\n'), ((482, 524), 'tensorflow.reshape', 'tf.reshape', (["features['x']", '[-1, 28, 28, 1]'], {}), "(features['x'], [-1, 28, 28, 1])\n", (492, 524), True, 'import tensorflow as tf\n'), ((534, 645), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'input_layer', 'filters': '(32)', 'kernel_size': '[5, 5]', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(inputs=input_layer, filters=32, kernel_size=[5, 5],\n padding='same', activation=tf.nn.relu)\n", (550, 645), True, 'import tensorflow as tf\n'), ((718, 784), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv1', 'pool_size': '[2, 2]', 'strides': '(2)'}), '(inputs=conv1, pool_size=[2, 2], strides=2)\n', (741, 784), True, 'import tensorflow as tf\n'), ((796, 902), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'pool1', 'filters': '(64)', 'kernel_size': '[5, 5]', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(inputs=pool1, filters=64, kernel_size=[5, 5], padding=\n 'same', activation=tf.nn.relu)\n", (812, 902), True, 'import tensorflow as tf\n'), ((971, 1037), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv2', 'pool_size': '[2, 2]', 'strides': '(2)'}), '(inputs=conv2, pool_size=[2, 2], strides=2)\n', (994, 1037), True, 'import tensorflow as tf\n'), ((1054, 1089), 'tensorflow.reshape', 'tf.reshape', (['pool2', '[-1, 7 * 7 * 64]'], {}), '(pool2, [-1, 7 * 7 * 64])\n', (1064, 1089), True, 'import tensorflow as tf\n'), ((1102, 1171), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'pool2_flat', 'units': '(1024)', 'activation': 'tf.nn.relu'}), '(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n', (1117, 1171), True, 'import tensorflow as tf\n'), ((1252, 1344), 'tensorflow.layers.dropout', 'tf.layers.dropout', ([], {'inputs': 'dense', 'rate': '(0.4)', 'training': '(mode == tf.estimator.ModeKeys.TRAIN)'}), '(inputs=dense, rate=0.4, training=mode == tf.estimator.\n ModeKeys.TRAIN)\n', (1269, 1344), True, 'import tensorflow as tf\n'), ((1442, 1483), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'dropout', 'units': '(10)'}), '(inputs=dropout, units=10)\n', (1457, 1483), True, 'import tensorflow as tf\n'), ((1935, 2010), 'tensorflow.losses.softmax_cross_entropy', 'tf.losses.softmax_cross_entropy', ([], {'onehot_labels': 'onehot_labels', 'logits': 'logits'}), '(onehot_labels=onehot_labels, logits=logits)\n', (1966, 2010), True, 'import tensorflow as tf\n'), ((2579, 2666), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'loess', 'eval_metric_ops': 'eval_metric_ops'}), '(mode=mode, loss=loess, eval_metric_ops=\n eval_metric_ops)\n', (2605, 2666), True, 'import tensorflow as tf\n'), ((2746, 2793), 'tensorflow.contrib.learn.datasets.load_dataset', 'tf.contrib.learn.datasets.load_dataset', (['"""mnist"""'], {}), "('mnist')\n", (2784, 2793), True, 'import tensorflow as tf\n'), ((2849, 2895), 'numpy.asarray', 'np.asarray', (['mnist.train.labels'], {'dtype': 'np.int32'}), '(mnist.train.labels, dtype=np.int32)\n', (2859, 2895), True, 'import numpy as np\n'), ((2948, 2993), 'numpy.asarray', 'np.asarray', (['mnist.test.labels'], {'dtype': 'np.int32'}), '(mnist.test.labels, dtype=np.int32)\n', (2958, 2993), True, 'import numpy as np\n'), ((3046, 3134), 'tensorflow.estimator.Estimator', 'tf.estimator.Estimator', ([], {'model_fn': 'cnn_model_fn', 'model_dir': '"""/tmp/mnist_convent_model"""'}), "(model_fn=cnn_model_fn, model_dir=\n '/tmp/mnist_convent_model')\n", (3068, 3134), True, 'import tensorflow as tf\n'), ((3252, 3319), 'tensorflow.train.LoggingTensorHook', 'tf.train.LoggingTensorHook', ([], {'tensors': 'tensors_to_log', 'every_n_iter': '(50)'}), '(tensors=tensors_to_log, every_n_iter=50)\n', (3278, 3319), True, 'import tensorflow as tf\n'), ((3382, 3504), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': train_data}", 'y': 'train_labels', 'batch_size': '(100)', 'num_epochs': 'None', 'shuffle': '(True)'}), "(x={'x': train_data}, y=train_labels,\n batch_size=100, num_epochs=None, shuffle=True)\n", (3416, 3504), True, 'import tensorflow as tf\n'), ((3754, 3856), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': eval_data}", 'y': 'eval_labels', 'num_epochs': '(1)', 'shuffle': '(False)'}), "(x={'x': eval_data}, y=eval_labels,\n num_epochs=1, shuffle=False)\n", (3788, 3856), True, 'import tensorflow as tf\n'), ((4025, 4037), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (4035, 4037), True, 'import tensorflow as tf\n'), ((1614, 1645), 'tensorflow.argmax', 'tf.argmax', ([], {'input': 'logits', 'axis': '(1)'}), '(input=logits, axis=1)\n', (1623, 1645), True, 'import tensorflow as tf\n'), ((1671, 1715), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'name': '"""softmax_tensor"""'}), "(logits, name='softmax_tensor')\n", (1684, 1715), True, 'import tensorflow as tf\n'), ((1784, 1846), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'predictions': 'predictions'}), '(mode=mode, predictions=predictions)\n', (1810, 1846), True, 'import tensorflow as tf\n'), ((2169, 2223), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (2202, 2223), True, 'import tensorflow as tf\n'), ((2363, 2430), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'loss', 'train_op': 'train_op'}), '(mode=mode, loss=loss, train_op=train_op)\n', (2389, 2430), True, 'import tensorflow as tf\n'), ((2479, 2549), 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'labels', 'predictions': "predictions['classes']"}), "(labels=labels, predictions=predictions['classes'])\n", (2498, 2549), True, 'import tensorflow as tf\n'), ((1887, 1912), 'tensorflow.cast', 'tf.cast', (['labels', 'tf.int32'], {}), '(labels, tf.int32)\n', (1894, 1912), True, 'import tensorflow as tf\n'), ((2064, 2089), 'tensorflow.cast', 'tf.cast', (['labels', 'tf.int32'], {}), '(labels, tf.int32)\n', (2071, 2089), True, 'import tensorflow as tf\n'), ((2320, 2346), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (2344, 2346), True, 'import tensorflow as tf\n')] |
from typing import Tuple, Callable, Optional
from time import time
from numbers import Number
import numpy as np
from torch import optim, Tensor
import mrphy
from mrphy.mobjs import SpinCube, Pulse
def arctanLBFGS(
target: dict, cube: SpinCube, pulse: Pulse,
fn_err: Callable[[Tensor, Tensor, Optional[Tensor]], Tensor],
fn_pen: Callable[[Tensor], Tensor],
niter: int = 8, niter_gr: int = 2, niter_rf: int = 2,
eta: Number = 4.,
b1Map_: Optional[Tensor] = None, b1Map: Optional[Tensor] = None,
doQuiet: bool = False, doRelax: bool = True
) -> Tuple[Pulse, dict]:
r"""Joint RF/GR optimization via direct arctan trick
Usage:
``arctanLBFGS(target, cube, pulse, fn_err, fn_pen; eta=eta)``
Inputs:
- ``target``: dict, with fields:
``d_``: `(1, nM, xy)`, desired excitation;
``weight_``: `(1, nM)`.
- ``cube``: mrphy.mobjs.SpinCube.
- ``pulse``: mrphy.mobjs.Pulse.
- ``fn_err``: error metric function. See :mod:`~adpulses.metrics`.
- ``fn_pen``: penalty function. See :mod:`~adpulses.penalties`.
Optionals:
- ``niter``: int, number of iterations.
- ``niter_gr``: int, number of LBFGS iters for updating *gradients*.
- ``niter_rf``: int, number of LBFGS iters for updating *RF*.
- ``eta``: `(1,)`, penalization term weighting coefficient.
- ``b1Map_``: `(1, nM, xy,(nCoils))`, a.u., transmit sensitivity.
- ``doRelax``: [T/f], whether accounting relaxation effects in simu.
Outputs:
- ``pulse``: mrphy.mojbs.Pulse, optimized pulse.
- ``optInfos``: dict, optimization informations.
"""
rfmax, smax = pulse.rfmax, pulse.smax
eta *= pulse.dt*1e6/4 # normalize eta by dt
assert ((b1Map_ is None) or (b1Map is None))
b1Map_ = (b1Map_ if b1Map is None else cube.extract(b1Map))
b1Map_ = b1Map_[..., None] if len(b1Map_.shape) == 3 else b1Map_
# nc = (1 if b1Map_ is None else b1Map_.shape[3])
# eta /= nc
# Set up: Interior mapping
tρ, θ = mrphy.utils.rf2tρθ(pulse.rf, rfmax)
tsl = mrphy.utils.s2ts(mrphy.utils.g2s(pulse.gr, pulse.dt), smax)
# enforce contiguousness of optimization variables, o.w. LBFGS may fail
tρ, θ, tsl = tρ.contiguous(), θ.contiguous(), tsl.contiguous()
opt_rf = optim.LBFGS([tρ, θ], lr=3., max_iter=10, history_size=30,
tolerance_change=1e-4,
line_search_fn='strong_wolfe')
opt_sl = optim.LBFGS([tsl], lr=3., max_iter=40, history_size=60,
tolerance_change=1e-6,
line_search_fn='strong_wolfe')
tρ.requires_grad = θ.requires_grad = tsl.requires_grad = True
# Set up: optimizer
length = 1+niter*(niter_gr+niter_rf)
time_hist = np.full((length,), np.nan)
loss_hist = np.full((length,), np.nan)
err_hist = np.full((length,), np.nan)
pen_hist = np.full((length,), np.nan)
Md_, w_ = target['d_'], target['weight_'].sqrt() # (1, nM, xy), (1, nM)
nM = w_.numel()
def fn_loss(cube, pulse):
Mr_ = cube.applypulse(pulse, b1Map_=b1Map_, doRelax=doRelax)
loss_err, loss_pen = fn_err(Mr_, Md_, w_=w_), fn_pen(pulse.rf)
return loss_err, loss_pen
log_col = ('\n#iter\t ‖ elapsed time\t ‖ error\t ‖ penalty\t ‖'
' total loss\t ‖ avg loss')
def logger(i, t0, loss, err, pen):
e, p, lo = err.item(), pen.item(), loss.item()
msg = (f'{i}\t | {time()-t0:.3f}\t | {e:.3e}\t | {p:.3e}\t | '
f'{lo:.3e}\t | {lo/nM:.3e}')
print(msg)
return loss
loss_err, loss_pen = fn_loss(cube, pulse)
loss = loss_err + eta*loss_pen
logger(0, time(), loss, loss_err, loss_pen)
time_hist[0], loss_hist[0], err_hist[0], pen_hist[0] = (
0.0, loss.item(), loss_err.item(), loss_pen.item())
# Optimization
t0 = time()
for i in range(niter):
if not (i % 5):
print(log_col)
log_ind = 0
def closure():
opt_rf.zero_grad()
opt_sl.zero_grad()
pulse.rf = mrphy.utils.tρθ2rf(tρ, θ, rfmax)
pulse.gr = mrphy.utils.s2g(mrphy.utils.ts2s(tsl, smax), pulse.dt)
loss_err, loss_pen = fn_loss(cube, pulse)
loss = loss_err + eta*loss_pen
loss.backward()
return loss
print('rf-loop: ', niter_rf)
for _ in range(niter_rf):
opt_rf.step(closure)
loss_err, loss_pen = fn_loss(cube, pulse)
loss = loss_err + eta*loss_pen
if not doQuiet:
logger(i+1, t0, loss, loss_err, loss_pen)
ind = i*(niter_gr+niter_rf)+log_ind+1
time_hist[ind], loss_hist[ind], err_hist[ind], pen_hist[ind] = (
time()-t0, loss.item(), loss_err.item(), loss_pen.item())
log_ind += 1
print('gr-loop: ', niter_gr)
for _ in range(niter_gr):
opt_sl.step(closure)
loss_err, loss_pen = fn_loss(cube, pulse)
loss = loss_err + eta*loss_pen
if not doQuiet:
logger(i+1, t0, loss, loss_err, loss_pen)
ind = i*(niter_gr+niter_rf)+log_ind+1
time_hist[ind], loss_hist[ind], err_hist[ind], pen_hist[ind] = (
time()-t0, loss.item(), loss_err.item(), loss_pen.item())
log_ind += 1
print('\n== Results: ==')
print(log_col)
loss = loss_err + eta*loss_pen
logger(i+1, t0, loss, loss_err, loss_pen)
pulse.rf.detach_()
pulse.gr.detach_()
optInfos = {'time_hist': time_hist, 'loss_hist': loss_hist,
'err_hist': err_hist, 'pen_hist': pen_hist}
return pulse, optInfos
| [
"mrphy.utils.g2s",
"mrphy.utils.tρθ2rf",
"mrphy.utils.ts2s",
"torch.optim.LBFGS",
"mrphy.utils.rf2tρθ",
"numpy.full",
"time.time"
] | [((2060, 2095), 'mrphy.utils.rf2tρθ', 'mrphy.utils.rf2tρθ', (['pulse.rf', 'rfmax'], {}), '(pulse.rf, rfmax)\n', (2078, 2095), False, 'import mrphy\n'), ((2322, 2441), 'torch.optim.LBFGS', 'optim.LBFGS', (['[tρ, θ]'], {'lr': '(3.0)', 'max_iter': '(10)', 'history_size': '(30)', 'tolerance_change': '(0.0001)', 'line_search_fn': '"""strong_wolfe"""'}), "([tρ, θ], lr=3.0, max_iter=10, history_size=30, tolerance_change\n =0.0001, line_search_fn='strong_wolfe')\n", (2333, 2441), False, 'from torch import optim, Tensor\n'), ((2498, 2614), 'torch.optim.LBFGS', 'optim.LBFGS', (['[tsl]'], {'lr': '(3.0)', 'max_iter': '(40)', 'history_size': '(60)', 'tolerance_change': '(1e-06)', 'line_search_fn': '"""strong_wolfe"""'}), "([tsl], lr=3.0, max_iter=40, history_size=60, tolerance_change=\n 1e-06, line_search_fn='strong_wolfe')\n", (2509, 2614), False, 'from torch import optim, Tensor\n'), ((2807, 2833), 'numpy.full', 'np.full', (['(length,)', 'np.nan'], {}), '((length,), np.nan)\n', (2814, 2833), True, 'import numpy as np\n'), ((2850, 2876), 'numpy.full', 'np.full', (['(length,)', 'np.nan'], {}), '((length,), np.nan)\n', (2857, 2876), True, 'import numpy as np\n'), ((2892, 2918), 'numpy.full', 'np.full', (['(length,)', 'np.nan'], {}), '((length,), np.nan)\n', (2899, 2918), True, 'import numpy as np\n'), ((2934, 2960), 'numpy.full', 'np.full', (['(length,)', 'np.nan'], {}), '((length,), np.nan)\n', (2941, 2960), True, 'import numpy as np\n'), ((3906, 3912), 'time.time', 'time', ([], {}), '()\n', (3910, 3912), False, 'from time import time\n'), ((2121, 2156), 'mrphy.utils.g2s', 'mrphy.utils.g2s', (['pulse.gr', 'pulse.dt'], {}), '(pulse.gr, pulse.dt)\n', (2136, 2156), False, 'import mrphy\n'), ((3722, 3728), 'time.time', 'time', ([], {}), '()\n', (3726, 3728), False, 'from time import time\n'), ((4123, 4155), 'mrphy.utils.tρθ2rf', 'mrphy.utils.tρθ2rf', (['tρ', 'θ', 'rfmax'], {}), '(tρ, θ, rfmax)\n', (4141, 4155), False, 'import mrphy\n'), ((4195, 4222), 'mrphy.utils.ts2s', 'mrphy.utils.ts2s', (['tsl', 'smax'], {}), '(tsl, smax)\n', (4211, 4222), False, 'import mrphy\n'), ((3497, 3503), 'time.time', 'time', ([], {}), '()\n', (3501, 3503), False, 'from time import time\n'), ((4818, 4824), 'time.time', 'time', ([], {}), '()\n', (4822, 4824), False, 'from time import time\n'), ((5336, 5342), 'time.time', 'time', ([], {}), '()\n', (5340, 5342), False, 'from time import time\n')] |
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data
from torch_model_base import TorchModelBase
from sklearn.model_selection import train_test_split
import utils
from utils import START_SYMBOL, END_SYMBOL, UNK_SYMBOL
import time
__author__ = "<NAME>"
__version__ = "CS224u, Stanford, Spring 2020"
class ColorDataset(torch.utils.data.Dataset):
"""PyTorch dataset for contextual color describers. The primary
function of this dataset is to organize the raw data into
batches of Tensors of the appropriate shape and type. When
using this dataset with `torch.utils.data.DataLoader`, it is
crucial to supply the `collate_fn` method as the argument for
the `DataLoader.collate_fn` parameter.
Parameters
----------
color_seqs : list of lists of lists of floats, or np.array
Dimension (m, n, p) where m is the number of examples, n is
the number of colors in each context, and p is the length
of the color representations.
word_seqs : list of list of int
Dimension m, the number of examples. The length of each
sequence can vary.
ex_lengths : list of int
Dimension m. Each value gives the length of the corresponding
word sequence in `word_seqs`.
"""
def __init__(self, color_seqs, word_seqs, ex_lengths):
assert len(color_seqs) == len(ex_lengths)
assert len(color_seqs) == len(word_seqs)
self.color_seqs = color_seqs
self.word_seqs = word_seqs
self.ex_lengths = ex_lengths
@staticmethod
def collate_fn(batch):
"""Function for creating batches.
Parameter
---------
batch : tuple of length 3
Contains the `color_seqs`, `word_seqs`, and `ex_lengths`,
all as lists or similar Python iterables. The function
turns them into Tensors.
Returns
-------
color_seqs : torch.FloatTensor
Dimension (m, n, p).
word_seqs : torch.LongTensor
This is a padded sequence, dimension (m, k), where k is
the length of the longest sequence in the batch.
ex_lengths : torch.LongTensor
targets : torch.LongTensor
This is a padded sequence, dimension (m, k-1), where k is
the length of the longest sequence in the batch. The
targets match `word_seqs` except we drop the first symbol,
as it is always START_SYMBOL. When the loss is calculated,
we compare this sequence to `word_seqs` excluding the
final character, which is always the END_SYMBOL. The result
is that each timestep t is trained to predict the symbol
at t+1.
"""
color_seqs, word_seqs, ex_lengths = zip(*batch)
# Conversion to Tensors:
color_seqs = torch.FloatTensor(color_seqs)
word_seqs = [torch.LongTensor(seq) for seq in word_seqs]
ex_lengths = torch.LongTensor(ex_lengths)
# Targets as next-word predictions:
targets = [x[1: , ] for x in word_seqs]
# Padding
word_seqs = torch.nn.utils.rnn.pad_sequence(
word_seqs, batch_first=True)
targets = torch.nn.utils.rnn.pad_sequence(
targets, batch_first=True)
return color_seqs, word_seqs, ex_lengths, targets
def __len__(self):
return len(self.color_seqs)
def __getitem__(self, idx):
return (self.color_seqs[idx], self.word_seqs[idx], self.ex_lengths[idx])
class QuadraticForm(torch.autograd.Function):
"""
This is a custom function that, given two parameters mew and sigma, implements quadratic form.
This function takes a representation of a color in vector space and returns a unnormalized score attributed to that color swab.
"""
@staticmethod
def forward(ctx, mew, co_var, color):
"""
mew : FloatTensor
m x k matrix, where m is the number of examples, where k is the length of the representation of the color
co_var: FloatTensor
m x k x k matrix, sigma in the quadratic form.
color: FloatTensor
m x p x k matrix, where each example has a p vectors of a single color representations of length k
------
outputs:
m x p matrix of scores.
"""
ctx.save_for_backward(mew, co_var, color)
shifted_color = color - mew.unsqueeze(1)
vec_mat_mult = -torch.matmul(shifted_color.unsqueeze(2), co_var.unsqueeze(1)).squeeze(1)
output = (vec_mat_mult.squeeze(2) * shifted_color).sum(2)
#output = (torch.matmul(shifted_color.unsqueeze(1), co_var).squeeze(1) * shifted_color).sum(1)
return output
@staticmethod
def backward(ctx, grad_output):
"""
The derivative of the quadratic form.
input : tuple of FloatTensors
mew : FloatTensor
m x k matrix, where m is the number of examples, where k is the length of the representation of the color
co_var: FloatTensor
m x k x k matrix, sigma in the quadratic form
color: FloatTensor
m x k matrix, where each example has a vector of a single color representation of length k
output : FloatTensor
The float tensor for the gradients of a quadratic function
"""
mew, co_var, color = ctx.saved_tensors
grad_mean = grad_co_var = grad_color = None
shifted_color = color - mew.unsqueeze(1)
if ctx.needs_input_grad[0]:
# Calculated using chain rule df/dmew = (df/dx)*(dx/dmew)
grad_mean = torch.matmul(shifted_color.unsqueeze(2), (co_var + co_var.permute(0,2,1)).unsqueeze(1)).squeeze(2)
grad_mean = grad_mean * grad_output.unsqueeze(2)
#print(grad_mean.shape)
#print(grad_mean.norm(p=2,dim=2))
# Sum over all gradients
grad_mean = grad_mean.sum(1)
if ctx.needs_input_grad[1]:
# Derivative of the matrix is the outer product of the shifted_color
grad_co_var = -torch.einsum('bki,bkj->bkij', (shifted_color, shifted_color))
grad_co_var = grad_co_var * grad_output.unsqueeze(2).unsqueeze(3)
#print(grad_co_var.shape)
#print(grad_co_var.norm(p='fro',dim=(2,3)))
grad_co_var = grad_co_var.sum(1)
return grad_mean, grad_co_var, grad_color
'''
Models
'''
class Encoder(nn.Module):
"""Simple Encoder model based on a GRU cell.
Parameters
----------
color_dim : int
hidden_dim : int
"""
def __init__(self, color_dim, hidden_dim, force_cpu=False):
super(Encoder, self).__init__()
self.device = "cuda" if torch.cuda.is_available() and not force_cpu else "cpu"
print(self.__class__.__name__,self.device)
self.color_dim = color_dim
self.hidden_dim = hidden_dim
self.rnn = nn.LSTM(
input_size=self.color_dim,
hidden_size=self.hidden_dim,
batch_first=True).to(self.device)
def forward(self, color_seqs):
output, hidden = self.rnn(color_seqs)
return hidden
class Decoder(nn.Module):
"""Simple Decoder model based on a GRU cell. The hidden
representations of the GRU are passed through a dense linear
layer, and those logits are used to train the language model
according to a softmax objective in `ContextualColorDescriber`.
Parameters
----------
vocab_size : int
embed_dim : int
hidden_dim : int
embedding : np.array or None
If `None`, a random embedding is created. If `np.array`, this
value becomes the embedding.
"""
def __init__(self, vocab_size, embed_dim, hidden_dim, embedding=None, force_cpu=False):
super(Decoder, self).__init__()
self.device = "cuda" if torch.cuda.is_available() and not force_cpu else "cpu"
print(self.__class__.__name__,self.device)
self.vocab_size = vocab_size
self.embedding = self._define_embedding(embedding, vocab_size, embed_dim).to(self.device)
self.embed_dim = self.embedding.embedding_dim
self.hidden_dim = hidden_dim
self.rnn = nn.LSTM(
input_size=self.embed_dim,
hidden_size=self.hidden_dim,
batch_first=True).to(self.device)
self.output_layer = nn.Linear(self.hidden_dim, self.vocab_size).to(self.device)
def forward(self, word_seqs, seq_lengths=None, hidden=None, target_colors=None):
embs = self.get_embeddings(word_seqs, target_colors=target_colors)
if self.training:
# Packed sequence for performance:
embs = torch.nn.utils.rnn.pack_padded_sequence(
embs, batch_first=True, lengths=seq_lengths, enforce_sorted=False)
# RNN forward:
output, hidden = self.rnn(embs, hidden)
# Unpack:
output, seq_lengths = torch.nn.utils.rnn.pad_packed_sequence(
output, batch_first=True)
# Output dense layer to get logits:
output = self.output_layer(output)
# Drop the final element:
output = output[: , : -1, :]
# Reshape for the sake of the loss function:
output = output.transpose(1, 2)
return output, hidden
else:
output, hidden = self.rnn(embs, hidden)
output = self.output_layer(output)
return output, hidden
def get_embeddings(self, word_seqs, target_colors=None):
"""Gets the input token representations. At present, these are
just taken directly from `self.embedding`, but `target_colors`
can be made available in case the user wants to subclass this
function to append these representations to each input token.
Parameters
----------
word_seqs : torch.LongTensor
This is a padded sequence, dimension (m, k), where k is
the length of the longest sequence in the batch.
target_colors : torch.FloatTensor
Dimension (m, c), where m is the number of exampkes and
c is the dimensionality of the color representations.
"""
if isinstance(word_seqs, nn.utils.rnn.PackedSequence):
word_seqs, _ = torch.nn.utils.rnn.pad_packed_sequence(
word_seqs, batch_first=True)
return self.embedding(word_seqs).to(self.device)
@staticmethod
def _define_embedding(embedding, vocab_size, embed_dim):
if embedding is None:
return nn.Embedding(vocab_size, embed_dim)
else:
embedding = torch.FloatTensor(embedding)
return nn.Embedding.from_pretrained(embedding)
class EncoderDecoder(nn.Module):
"""This class knits the `Encoder` and `Decoder` into a single class
that serves as the model for `ContextualColorDescriber`. This is
largely a convenience: it means that `ContextualColorDescriber`
can use a single `model` argument, and it allows us to localize
the core computations in the `forward` method of this class.
Parameters
----------
encoder : `Encoder`
decoder : `Decoder`
"""
def __init__(self, encoder, decoder, force_cpu=False):
super(EncoderDecoder, self).__init__()
self.device = "cuda" if torch.cuda.is_available() and not force_cpu else "cpu"
print(self.__class__.__name__,self.device)
self.encoder = encoder
self.decoder = decoder
def forward(self,
color_seqs,
word_seqs,
seq_lengths=None,
hidden=None,
targets=None):
"""This is the core method for this module. It has a lot of
arguments mainly to make it easy to create subclasses of this
class that do interesting things without requring modifications
to the `fit` method of `ContextualColorDescriber`.
Parameters
----------
color_seqs : torch.FloatTensor
Dimension (m, n, p), where m is the number of examples,
n is the number of colors in each context, and p is the
dimensionality of each color.
word_seqs : torch.LongTensor
Dimension (m, k), where m is the number of examples and k
is the length of all the (padded) sequences in the batch.
seq_lengths : torch.LongTensor or None
The true lengths of the sequences in `word_seqs`. If this
is None, then we are predicting new sequences, so we will
continue predicting until we hit a maximum length or we
generate STOP_SYMBOL.
hidden : torch.FloatTensor or None
The hidden representation for each of the m examples in this
batch. If this is None, we are predicting new sequences
and so the hidden representation is computed for each timestep
during decoding.
targets : torch.LongTensor
Dimension (m, k-1). These are ignored entirely by the current
implementation, but they are passed in so that they could be
used, for example, to allow some non-teacher-forcing training.
Returns
-------
output : torch.FloatTensor
Dimension (m, k, c), where m is the number of examples, k
is the length of the sequences in this batch, and c is the
number of classes (the size of the vocabulary).
hidden : torch.FloatTensor
Dimension (m, h) where m is the number of examples and h is
the dimensionality of the hidden representations of the model.
targets : torch.LongTensor
Should be identical to `targets` as passed in.
"""
if hidden is None:
hidden = self.encoder(color_seqs)
output, hidden = self.decoder(
word_seqs, seq_lengths=seq_lengths, hidden=hidden)
return output, hidden, targets
class ContextualColorDescriber(TorchModelBase):
"""The primary interface to modeling contextual colors datasets.
Parameters
----------
vocab : list of str
This should be the vocabulary. It needs to be aligned with
`embedding` in the sense that the ith element of vocab
should be represented by the ith row of `embedding`.
embedding : np.array or None
Each row represents a word in `vocab`, as described above.
embed_dim : int
Dimensionality for the initial embeddings. This is ignored
if `embedding` is not None, as a specified value there
determines this value.
hidden_dim : int
Dimensionality of the hidden layer.
max_iter : int
Maximum number of training epochs.
eta : float
Learning rate.
optimizer : PyTorch optimizer
Default is `torch.optim.Adam`.
l2_strength : float
L2 regularization strength. Default 0 is no regularization.
warm_start : bool
If True, calling `fit` will resume training with previously
defined trainable parameters. If False, calling `fit` will
reinitialize all trainable parameters. Default: False.
device : 'cpu' or 'cuda'
The default is to use 'cuda' iff available
"""
def __init__(self,
vocab,
embedding=None,
embed_dim=50,
hidden_dim=50,
force_cpu=False,
lr_rate=1.,
dropout_prob=0.,
**kwargs):
super(ContextualColorDescriber, self).__init__(
hidden_dim=hidden_dim, **kwargs)
self.vocab = vocab
self.embedding = embedding
self.vocab_size = len(vocab)
self.word2index = dict(zip(self.vocab, range(self.vocab_size)))
self.index2word = dict(zip(range(self.vocab_size), self.vocab))
self.embed_dim = embed_dim
self.output_dim = self.vocab_size
self.start_index = self.vocab.index(START_SYMBOL)
self.end_index = self.vocab.index(END_SYMBOL)
self.unk_index = self.vocab.index(UNK_SYMBOL)
self.params += ['embed_dim', 'embedding']
# The base class has this attribute, but this model doesn't,
# so we remove it to avoid misleading people:
delattr(self, 'hidden_activation')
self.params.remove('hidden_activation')
self.force_cpu = force_cpu
self.lr_rate = lr_rate
self.cur_epoch = 0
self.dropout_prob = dropout_prob
def fit(self, color_seqs, word_seqs):
"""Standard `fit` method where `color_seqs` are the inputs and
`word_seqs` are the sequences to predict.
Parameters
----------
color_seqs : list of lists of lists of floats, or np.array
Dimension (m, n, p) where m is the number of examples, n is
the number of colors in each context, and p is the length
of the color representations.
word_seqs : list of list of int
Dimension m, the number of examples. The length of each
sequence can vary.
Returns
-------
self
"""
self.color_dim = len(color_seqs[0][0])
if not self.warm_start or not hasattr(self, "model"):
self.model = self.build_graph()
self.opt = self.optimizer(
self.model.parameters(),
lr=self.eta,
weight_decay=self.l2_strength)
# Make sure that these attributes are aligned -- important
# where a supplied pretrained embedding has determined
# a `embed_dim` that might be different from the user's
# argument.
self.embed_dim = self.model.decoder.embed_dim
self.model.to(self.device)
self.model.train()
dataset = self.build_dataset(color_seqs, word_seqs)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=True,
drop_last=False,
pin_memory=True,
collate_fn=dataset.collate_fn)
loss = nn.CrossEntropyLoss()
for iteration in range(1, self.max_iter+1):
epoch_error = 0.0
start = time.time()
for batch_colors, batch_words, batch_lens, targets in dataloader:
batch_colors = batch_colors.to(self.device, non_blocking=True)
batch_words = torch.nn.utils.rnn.pack_padded_sequence(
batch_words, batch_first=True, lengths=batch_lens, enforce_sorted=False)
batch_words = batch_words.to(self.device, non_blocking=True)
targets = torch.nn.utils.rnn.pack_padded_sequence(
targets, batch_first=True,
lengths=batch_lens-1, enforce_sorted=False)
targets = targets.to(self.device, non_blocking=True)
batch_lens = batch_lens.to(self.device, non_blocking=True)
output, _, targets = self.model(
color_seqs=batch_colors,
word_seqs=batch_words,
seq_lengths=batch_lens,
targets=targets)
targets, _ = torch.nn.utils.rnn.pad_packed_sequence(
targets, batch_first=True)
err = loss(output, targets)
epoch_error += err.item()
self.opt.zero_grad()
err.backward()
print("Epoch {}; train err = {}; time = {}".format(iteration, epoch_error, time.time() - start))
return self
def build_dataset(self, color_seqs, word_seqs):
word_seqs = [[self.word2index.get(w, self.unk_index) for w in seq]
for seq in word_seqs]
ex_lengths = [len(seq) for seq in word_seqs]
return ColorDataset(color_seqs, word_seqs, ex_lengths)
def build_graph(self):
encoder = Encoder(
color_dim=self.color_dim,
hidden_dim=self.hidden_dim,
force_cpu=self.force_cpu)
decoder = Decoder(
vocab_size=self.vocab_size,
embed_dim=self.embed_dim,
embedding=self.embedding,
hidden_dim=self.hidden_dim,
force_cpu=self.force_cpu)
return EncoderDecoder(encoder, decoder, self.force_cpu)
def predict(self, color_seqs, max_length=20):
"""Predict new sequences based on the color contexts in
`color_seqs`.
Parameters
----------
color_seqs : list of lists of lists of floats, or np.array
Dimension (m, n, p) where m is the number of examples, n is
the number of colors in each context, and p is the length
of the color representations.
max_length : int
Length of the longest sequences to create.
Returns
-------
list of str
"""
color_seqs = torch.FloatTensor(color_seqs).to(self.device)
self.model.to(self.device)
self.model.eval()
preds = []
with torch.no_grad():
# Get the hidden representations from the color contexts:
hidden = self.model.encoder(color_seqs)
# Start with START_SYMBOL for all examples:
decoder_input = [[self.start_index]] * len(color_seqs)
decoder_input = torch.LongTensor(decoder_input).to(self.device)
preds.append(decoder_input)
# Now move through the remaiming timesteps using the
# previous timestep to predict the next one:
for i in range(1, max_length):
output, hidden, _ = self.model(
color_seqs=color_seqs,
word_seqs=decoder_input,
seq_lengths=None,
hidden=hidden)
# Always take the highest probability token to
# be the prediction:
p = output.argmax(2)
preds.append(p)
decoder_input = p
# Convert all the predictions from indices to elements of
# `self.vocab`:
preds = torch.cat(preds, axis=1)
preds = [self._convert_predictions(p) for p in preds]
return preds
def _convert_predictions(self, pred):
rep = []
for i in pred:
i = i.item()
rep.append(self.index2word[i])
if i == self.end_index:
return rep
return rep
def predict_proba(self, color_seqs, word_seqs):
"""Calculate the predicted probabilties of the sequences in
`word_seqs` given the color contexts in `color_seqs`.
Parameters
----------
color_seqs : list of lists of lists of floats, or np.array
Dimension (m, n, p) where m is the number of examples, n is
the number of colors in each context, and p is the length
of the color representations.
word_seqs : list of list of int
Dimension m, the number of examples. The length of each
sequence can vary.
Returns
-------
list of lists of predicted probabilities. In other words,
for each example, at each timestep, there is a probability
distribution over the entire vocabulary.
"""
dataset = self.build_dataset(color_seqs, word_seqs)
self.model.to(self.device)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=False,
drop_last=False,
pin_memory=True,
collate_fn=dataset.collate_fn)
self.model.eval()
softmax = nn.Softmax(dim=2)
start_probs = np.zeros(self.vocab_size)
start_probs[self.start_index] = 1.0
all_probs = []
with torch.no_grad():
for batch_colors, batch_words, batch_lens, targets in dataloader:
batch_colors = batch_colors.to(self.device)
batch_words = torch.nn.utils.rnn.pack_padded_sequence(
batch_words, batch_first=True, lengths=batch_lens, enforce_sorted=False)
batch_words = batch_words.to(self.device)
batch_lens = batch_lens.to(self.device)
output, _, _ = self.model(
color_seqs=batch_colors,
word_seqs=batch_words,
seq_lengths=batch_lens)
probs = softmax(output)
probs = probs.cpu().numpy()
probs = np.insert(probs, 0, start_probs, axis=1)
all_probs += [p[: n] for p, n in zip(probs, batch_lens)]
utils.progress_bar("all_probs {}".format(len(all_probs)))
return all_probs
def perplexities(self, color_seqs, word_seqs):
"""Compute the perplexity of each sequence in `word_seqs`
given `color_seqs`. For a sequence of conditional probabilities
p1, p2, ..., pN, the perplexity is calculated as
(p1 * p2 * ... * pN)**(-1/N)
Parameters
----------
color_seqs : list of lists of floats, or np.array
Dimension (m, n, p) where m is the number of examples, n is
the number of colors in each context, and p is the length
of the color representations.
word_seqs : list of list of int
Dimension m, the number of examples, and the length of
each sequence can vary.
Returns
-------
list of float
"""
probs = self.predict_proba(color_seqs, word_seqs)
scores = []
for pred, seq in zip(probs, word_seqs):
# pred is n by |V|
# Get the probabilities corresponding to the path `seq`:
s = np.array([t[self.word2index.get(w, self.unk_index)] for t, w in zip(pred, seq)])
scores.append(s)
perp = [np.prod(s)**(-1/len(s)) for s in scores]
return perp
def listener_predict_one(self, context, seq):
context = np.array(context)
n_colors = len(context)
# Get all possible context orders:
indices = list(range(n_colors))
orders = [list(x) for x in itertools.product(indices, repeat=n_colors)]
# All contexts as color sequences:
contexts = [context[x] for x in orders]
# Repeat the single utterance the needed number of times:
seqs = [seq] * len(contexts)
# All perplexities:
perps = self.perplexities(contexts, seqs)
# Ranking, using `order_indices` rather than colors and
# index sequences to avoid sorting errors from some versions
# of Python:
order_indices = range(len(orders))
ranking = sorted(zip(perps, order_indices))
# Return the minimum perplexity, the chosen color, and the
# index of the chosen color in the original context:
min_perp, order_index = ranking[0]
pred_color = contexts[order_index][-1]
pred_index = orders[order_index][-1]
return min_perp, pred_color, pred_index
def listener_accuracy(self, color_seqs, word_seqs):
"""Compute the "listener accuracy" of the model for each example.
For the ith example, this is defined as
prediction = max_{c in C_i} P(word_seq[i] | c)
where C_i is every possible permutation of the three colors in
color_seqs[i]. We take the model's prediction to be correct
if it chooses a c in which the target is in the privileged final
position in the color sequence. (There are two such c's, since
the distractors can be in two orders; we give full credit if one
of these two c's is chosen.)
Parameters
----------
color_seqs : list of lists of list of floats, or np.array
Dimension (m, n, p) where m is the number of examples, n is
the number of colors in each context, and p is the length
of the color representations.
word_seqs : list of list of int
Dimension m, the number of examples, and the length of
each sequence can vary.
Returns
-------
list of float
"""
correct = 0
for color_seq, word_seq in zip(color_seqs, word_seqs):
target_index = len(color_seq) - 1
min_perp, pred, pred_index = self.listener_predict_one(
color_seq, word_seq)
correct += int(target_index == pred_index)
return correct / len(color_seqs)
def score(self, color_seqs, word_seqs):
"""Alias for `listener_accuracy`. This method is included to
make it easier to use sklearn cross-validators, which expect
a method called `score`.
"""
return self.listener_accuracy(color_seqs, word_seqs)
class ColorizedNeuralListenerEncoder(Decoder):
'''
Simple encoder model for the neural literal/pragmatic listener.
'''
def __init__(self, color_dim, dropout_prob=0., *args, **kwargs):
super().__init__(*args, **kwargs)
self.color_dim = color_dim
self.mew_layer = nn.Linear(#self.hidden_dim*2 + self.embed_dim,
self.embed_dim,
color_dim).to(self.device)
self.sigma_layer = nn.Linear(#self.hidden_dim*2 + self.embed_dim,
self.embed_dim,
color_dim * color_dim).to(self.device)
self.mew_dropout = nn.Dropout(p=dropout_prob)
self.sigma_dropout = nn.Dropout(p=dropout_prob)
def forward(self, word_seqs, seq_lengths=None):
embs = self.get_embeddings(word_seqs)
# Packed sequence for performance:
embs = torch.nn.utils.rnn.pack_padded_sequence(
embs, batch_first=True, lengths=seq_lengths, enforce_sorted=False)
# RNN forward:
output, hidden = self.rnn(embs)
# Unpack:
output, seq_lengths = torch.nn.utils.rnn.pad_packed_sequence(
output, batch_first=True)
output = output[[i for i in range(output.shape[0])],seq_lengths-1]
# Combine all hidden states and feed into linear layer
#hidden = [hidden[0]]
#hidden_state = torch.cat(hidden, dim=2).squeeze(0)
#hidden_state = torch.cat([hidden_state, output], dim=1)
hidden_state = output
#print(hidden_state.shape)
self.mew_hidden = self.mew_layer(hidden_state)
mew = self.mew_dropout(self.mew_hidden)
self.sigma_hidden = self.sigma_layer(hidden_state)
sigma = self.sigma_dropout(self.sigma_hidden)
sigma = sigma.view(-1, self.color_dim, self.color_dim)
return output, mew, sigma
class ColorizedNeuralListenerDecoder(nn.Module):
'''
Simple decoder model for the neural literal/pragmatic listener.
This model takes in two statistical params, mew and sigma, and returns a vector containing the normalized scores
of each color in the context.
'''
def __init__(self, force_cpu, *args, **kwargs):
super().__init__(*args, **kwargs)
self.force_cpu = force_cpu
self.device = "cuda" if torch.cuda.is_available() and not force_cpu else "cpu"
self.transform_func = QuadraticForm.apply
self.hidden_activation = nn.Softmax(dim=1)
def forward(self, color_seqs, mew, sigma):
'''
color_seqs : FloatTensor
A m x k x n tensor where m is the number of examples, k is the number of colors in the context, and
n is the size of the color dimension after transform
'''
color_scores = self.transform_func(mew, sigma, color_seqs)
output = self.hidden_activation(color_scores)
return output
class ColorizedNeuralListenerEncoderDecoder(EncoderDecoder):
def forward(self,
color_seqs,
word_seqs,
seq_lengths=None,
mew=None,
sigma=None):
if mew is None or sigma is None:
_, mew, sigma = self.encoder(word_seqs, seq_lengths)
output = self.decoder(
color_seqs, mew=mew, sigma=sigma)
return output
class ColorizedNeuralListener(ContextualColorDescriber):
def build_graph(self):
encoder = ColorizedNeuralListenerEncoder(
vocab_size=self.vocab_size,
embed_dim=self.embed_dim,
embedding=self.embedding,
hidden_dim=self.hidden_dim,
color_dim=self.color_dim,
dropout_prob=self.dropout_prob,
force_cpu=self.force_cpu)
decoder = ColorizedNeuralListenerDecoder(
force_cpu=self.force_cpu)
return ColorizedNeuralListenerEncoderDecoder(encoder, decoder, self.force_cpu)
def fit(self, color_seqs, word_seqs):
"""Standard `fit` method where `word_seqs` are the inputs and
`color_seqs` are the sequences to predict.
Parameters
----------
color_seqs : list of lists of lists of floats, or np.array
Dimension (m, n, p) where m is the number of examples, n is
the number of colors in each context, and p is the length
of the color representations.
word_seqs : list of list of int
Dimension m, the number of examples. The length of each
sequence can vary.
Returns
-------
self
"""
color_seqs_train, color_seqs_validate, word_seqs_train, word_seqs_validate = \
train_test_split(color_seqs, word_seqs)
self.color_dim = len(color_seqs[0][0])
if not self.warm_start or not hasattr(self, "model"):
self.model = self.build_graph()
self.opt = self.optimizer(
self.model.parameters(),
lr=self.eta,
weight_decay=self.l2_strength)
self.lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(self.opt, gamma=self.lr_rate)
self.cur_epoch=0
# Make sure that these attributes are aligned -- important
# where a supplied pretrained embedding has determined
# a `embed_dim` that might be different from the user's
# argument.
self.embed_dim = self.model.encoder.embed_dim
self.model.to(self.device)
self.model.train()
dataset = self.build_dataset(color_seqs, word_seqs)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=True,
drop_last=False,
pin_memory=True,
collate_fn=dataset.collate_fn)
loss = nn.CrossEntropyLoss()
for iteration in range(self.cur_epoch + 1, self.cur_epoch + self.max_iter+1):
epoch_error = 0.0
start = time.time()
for batch_colors, batch_words, batch_lens, _ in dataloader:
batch_colors = batch_colors.to(self.device, non_blocking=True)
batch_words = torch.nn.utils.rnn.pack_padded_sequence(
batch_words, batch_first=True, lengths=batch_lens, enforce_sorted=False)
batch_words = batch_words.to(self.device, non_blocking=True)
batch_lens = batch_lens.to(self.device, non_blocking=True)
output = self.model(
color_seqs=batch_colors,
word_seqs=batch_words,
seq_lengths=batch_lens)
color_targets = torch.ones(output.shape[0], dtype=torch.long) * 2
color_targets = color_targets.to(self.device)
err = loss(output, color_targets)
epoch_error += err.item()
self.opt.zero_grad()
#self.model.encoder.rnn.weight_ih_l0.register_hook(lambda grad: print(grad))
err.backward()
self.opt.step()
if iteration % 15 == 0:
self.lr_scheduler.step()
for param_group in self.opt.param_groups:
print(param_group["lr"])
print(output.mean(dim=0), err.item())
#print("output:", output.argmax(1).float())
print("Train: Epoch {}; err = {}; time = {}".format(iteration, epoch_error, time.time() - start))
self.cur_epoch = self.cur_epoch + 1
return self
def predict(self, color_seqs, word_seqs, probabilities=False, verbose=False, max_length=20):
self.model.to(self.device)
self.model.eval()
dataset = self.build_dataset(color_seqs, word_seqs)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=len(color_seqs),
shuffle=False,
drop_last=False,
pin_memory=True,
collate_fn=dataset.collate_fn)
loss = nn.CrossEntropyLoss()
preds = []
start = time.time()
with torch.no_grad():
for batch_colors, batch_words, batch_lens, _ in dataloader:
batch_colors = batch_colors.to(self.device, non_blocking=True)
batch_words = torch.nn.utils.rnn.pack_padded_sequence(
batch_words, batch_first=True, lengths=batch_lens, enforce_sorted=False)
batch_words = batch_words.to(self.device, non_blocking=True)
batch_lens = batch_lens.to(self.device, non_blocking=True)
output = self.model(
color_seqs=batch_colors,
word_seqs=batch_words,
seq_lengths=batch_lens)
color_targets = torch.ones(output.shape[0], dtype=torch.long) * 2
color_targets = color_targets.to(self.device)
err = loss(output, color_targets)
#print(output.mean(dim=0), err.item())
if verbose:
print("Testing err = {}; time = {}".format(err.item(), time.time() - start))
if not probabilities:
output = output.argmax(1)
p = output.cpu().detach().numpy()
preds.extend(list(p))
#print("new preds:", preds)
return preds
def create_example_dataset(group_size=100, vec_dim=2):
"""Creates simple datasets in which the inputs are three-vector
sequences and the outputs are simple character sequences, with
the range of values in the final vector in the input determining
the output sequence. For example, a single input/output pair
will look like this:
[[0.44, 0.51], [0.87, 0.89], [0.1, 0.2]], ['<s>', 'A', '</s>']
The sequences are meaningless, as are their lengths (which were
chosen only to be different from each other).
"""
import random
groups = ((0.0, 0.2), (0.4, 0.6), (0.8, 1.0))
vocab = ['<s>', '</s>', 'A', 'B', '$UNK']
seqs = [
['<s>', 'A', '</s>'],
['<s>', 'A', 'B', '</s>'],
['<s>', 'B', 'A', 'B', 'A', '</s>']]
color_seqs = []
word_seqs = []
for i, ((l, u), seq) in enumerate(zip(groups, seqs)):
dis_indices = list(range(len(groups)))
dis_indices.remove(i)
random.shuffle(dis_indices)
disl1, disu1 = groups[dis_indices[0]]
dis2 = disl2, disu2 = groups[dis_indices[1]]
for _ in range(group_size):
target = utils.randvec(vec_dim, l, u)
dis1 = utils.randvec(vec_dim, disl1, disu1)
dis2 = utils.randvec(vec_dim, disl2, disu2)
context = [dis1, dis2, target]
color_seqs.append(context)
word_seqs += [seq for _ in range(group_size)]
return color_seqs, word_seqs, vocab
def simple_example(group_size=100, vec_dim=2, initial_embedding=False):
from sklearn.model_selection import train_test_split
color_seqs, word_seqs, vocab = create_example_dataset(
group_size=group_size, vec_dim=vec_dim)
if initial_embedding:
import numpy as np
embedding = np.random.uniform(
low=-0.5, high=0.5, size=(len(vocab), 11))
else:
embedding = None
X_train, X_test, y_train, y_test = train_test_split(
color_seqs, word_seqs)
mod = ContextualColorDescriber(
vocab,
embed_dim=10,
hidden_dim=10,
max_iter=100,
embedding=embedding)
mod.fit(X_train, y_train)
preds = mod.predict(X_test)
correct = 0
for y, p in zip(y_test, preds):
if y == p:
correct += 1
print("\nExact sequence: {} of {} correct".format(correct, len(y_test)))
lis_acc = mod.listener_accuracy(X_test, y_test)
print("\nListener accuracy {}".format(lis_acc))
return lis_acc
def simple_neural_listener_example(group_size=100, vec_dim=2, initial_embedding=False):
from sklearn.model_selection import train_test_split
color_seqs, word_seqs, vocab = create_example_dataset(
group_size=group_size, vec_dim=vec_dim)
if initial_embedding:
import numpy as np
embedding = np.random.normal(
loc=0, scale=0.01, size=(len(vocab), 11))
else:
embedding = None
X_train, X_test, y_train, y_test = train_test_split(
color_seqs, word_seqs)
mod = ColorizedNeuralListener(
vocab,
embed_dim=100,
hidden_dim=100,
max_iter=50,
embedding=embedding)
mod.fit(X_train, y_train)
pred_indices = mod.predict2(X_train, y_train)
correct = 0
for color_seq, pred_index in zip(y_train, pred_indices):
target_index = len(color_seq[0]) - 1
correct += int(target_index == pred_index)
acc = correct / len(pred_indices)
print("\nExact sequence: {} of {} correct. Accuracy: {}".format(correct, len(pred_indices), acc))
return correct
def quadratic_grad_check():
# gradcheck takes a tuple of tensors as input, check if your gradient
# evaluated with these tensors are close enough to numerical
# approximations and returns True if they all verify this condition.
mew = torch.randn(128,14,dtype=torch.double,requires_grad=True)
sigma = torch.randn(128,14,14,dtype=torch.double,requires_grad=True)
color = torch.randn(128,3,14,dtype=torch.double,requires_grad=False)
input = (mew, sigma, color)
test = torch.autograd.gradcheck(QuadraticForm.apply, input, eps=1e-6, atol=1e-4)
print(test)
if __name__ == '__main__':
#simple_example()
simple_neural_listener_example()
#quadratic_test()
#quadratic_grad_check() | [
"numpy.prod",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.LongTensor",
"utils.randvec",
"torch.nn.utils.rnn.pad_sequence",
"numpy.array",
"torch.cuda.is_available",
"torch.nn.LSTM",
"itertools.product",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.autograd.gradcheck",
"torch.... | [((40612, 40651), 'sklearn.model_selection.train_test_split', 'train_test_split', (['color_seqs', 'word_seqs'], {}), '(color_seqs, word_seqs)\n', (40628, 40651), False, 'from sklearn.model_selection import train_test_split\n'), ((41649, 41688), 'sklearn.model_selection.train_test_split', 'train_test_split', (['color_seqs', 'word_seqs'], {}), '(color_seqs, word_seqs)\n', (41665, 41688), False, 'from sklearn.model_selection import train_test_split\n'), ((42522, 42582), 'torch.randn', 'torch.randn', (['(128)', '(14)'], {'dtype': 'torch.double', 'requires_grad': '(True)'}), '(128, 14, dtype=torch.double, requires_grad=True)\n', (42533, 42582), False, 'import torch\n'), ((42592, 42656), 'torch.randn', 'torch.randn', (['(128)', '(14)', '(14)'], {'dtype': 'torch.double', 'requires_grad': '(True)'}), '(128, 14, 14, dtype=torch.double, requires_grad=True)\n', (42603, 42656), False, 'import torch\n'), ((42665, 42729), 'torch.randn', 'torch.randn', (['(128)', '(3)', '(14)'], {'dtype': 'torch.double', 'requires_grad': '(False)'}), '(128, 3, 14, dtype=torch.double, requires_grad=False)\n', (42676, 42729), False, 'import torch\n'), ((42769, 42845), 'torch.autograd.gradcheck', 'torch.autograd.gradcheck', (['QuadraticForm.apply', 'input'], {'eps': '(1e-06)', 'atol': '(0.0001)'}), '(QuadraticForm.apply, input, eps=1e-06, atol=0.0001)\n', (42793, 42845), False, 'import torch\n'), ((2867, 2896), 'torch.FloatTensor', 'torch.FloatTensor', (['color_seqs'], {}), '(color_seqs)\n', (2884, 2896), False, 'import torch\n'), ((2983, 3011), 'torch.LongTensor', 'torch.LongTensor', (['ex_lengths'], {}), '(ex_lengths)\n', (2999, 3011), False, 'import torch\n'), ((3142, 3202), 'torch.nn.utils.rnn.pad_sequence', 'torch.nn.utils.rnn.pad_sequence', (['word_seqs'], {'batch_first': '(True)'}), '(word_seqs, batch_first=True)\n', (3173, 3202), False, 'import torch\n'), ((3234, 3292), 'torch.nn.utils.rnn.pad_sequence', 'torch.nn.utils.rnn.pad_sequence', (['targets'], {'batch_first': '(True)'}), '(targets, batch_first=True)\n', (3265, 3292), False, 'import torch\n'), ((17975, 18123), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'drop_last': '(False)', 'pin_memory': '(True)', 'collate_fn': 'dataset.collate_fn'}), '(dataset, batch_size=self.batch_size, shuffle=\n True, drop_last=False, pin_memory=True, collate_fn=dataset.collate_fn)\n', (18002, 18123), False, 'import torch\n'), ((18208, 18229), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (18227, 18229), True, 'import torch.nn as nn\n'), ((22321, 22345), 'torch.cat', 'torch.cat', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (22330, 22345), False, 'import torch\n'), ((23629, 23778), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'drop_last': '(False)', 'pin_memory': '(True)', 'collate_fn': 'dataset.collate_fn'}), '(dataset, batch_size=self.batch_size, shuffle=\n False, drop_last=False, pin_memory=True, collate_fn=dataset.collate_fn)\n', (23656, 23778), False, 'import torch\n'), ((23893, 23910), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(2)'}), '(dim=2)\n', (23903, 23910), True, 'import torch.nn as nn\n'), ((23934, 23959), 'numpy.zeros', 'np.zeros', (['self.vocab_size'], {}), '(self.vocab_size)\n', (23942, 23959), True, 'import numpy as np\n'), ((26259, 26276), 'numpy.array', 'np.array', (['context'], {}), '(context)\n', (26267, 26276), True, 'import numpy as np\n'), ((29766, 29792), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_prob'}), '(p=dropout_prob)\n', (29776, 29792), True, 'import torch.nn as nn\n'), ((29822, 29848), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_prob'}), '(p=dropout_prob)\n', (29832, 29848), True, 'import torch.nn as nn\n'), ((30016, 30127), 'torch.nn.utils.rnn.pack_padded_sequence', 'torch.nn.utils.rnn.pack_padded_sequence', (['embs'], {'batch_first': '(True)', 'lengths': 'seq_lengths', 'enforce_sorted': '(False)'}), '(embs, batch_first=True, lengths=\n seq_lengths, enforce_sorted=False)\n', (30055, 30127), False, 'import torch\n'), ((30247, 30311), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['output'], {'batch_first': '(True)'}), '(output, batch_first=True)\n', (30285, 30311), False, 'import torch\n'), ((31633, 31650), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (31643, 31650), True, 'import torch.nn as nn\n'), ((33881, 33920), 'sklearn.model_selection.train_test_split', 'train_test_split', (['color_seqs', 'word_seqs'], {}), '(color_seqs, word_seqs)\n', (33897, 33920), False, 'from sklearn.model_selection import train_test_split\n'), ((34794, 34942), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'drop_last': '(False)', 'pin_memory': '(True)', 'collate_fn': 'dataset.collate_fn'}), '(dataset, batch_size=self.batch_size, shuffle=\n True, drop_last=False, pin_memory=True, collate_fn=dataset.collate_fn)\n', (34821, 34942), False, 'import torch\n'), ((35027, 35048), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (35046, 35048), True, 'import torch.nn as nn\n'), ((37371, 37392), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (37390, 37392), True, 'import torch.nn as nn\n'), ((37429, 37440), 'time.time', 'time.time', ([], {}), '()\n', (37438, 37440), False, 'import time\n'), ((39646, 39673), 'random.shuffle', 'random.shuffle', (['dis_indices'], {}), '(dis_indices)\n', (39660, 39673), False, 'import random\n'), ((2918, 2939), 'torch.LongTensor', 'torch.LongTensor', (['seq'], {}), '(seq)\n', (2934, 2939), False, 'import torch\n'), ((8799, 8910), 'torch.nn.utils.rnn.pack_padded_sequence', 'torch.nn.utils.rnn.pack_padded_sequence', (['embs'], {'batch_first': '(True)', 'lengths': 'seq_lengths', 'enforce_sorted': '(False)'}), '(embs, batch_first=True, lengths=\n seq_lengths, enforce_sorted=False)\n', (8838, 8910), False, 'import torch\n'), ((9058, 9122), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['output'], {'batch_first': '(True)'}), '(output, batch_first=True)\n', (9096, 9122), False, 'import torch\n'), ((10424, 10491), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['word_seqs'], {'batch_first': '(True)'}), '(word_seqs, batch_first=True)\n', (10462, 10491), False, 'import torch\n'), ((10695, 10730), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'embed_dim'], {}), '(vocab_size, embed_dim)\n', (10707, 10730), True, 'import torch.nn as nn\n'), ((10769, 10797), 'torch.FloatTensor', 'torch.FloatTensor', (['embedding'], {}), '(embedding)\n', (10786, 10797), False, 'import torch\n'), ((10817, 10856), 'torch.nn.Embedding.from_pretrained', 'nn.Embedding.from_pretrained', (['embedding'], {}), '(embedding)\n', (10845, 10856), True, 'import torch.nn as nn\n'), ((18333, 18344), 'time.time', 'time.time', ([], {}), '()\n', (18342, 18344), False, 'import time\n'), ((21254, 21269), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (21267, 21269), False, 'import torch\n'), ((24042, 24057), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (24055, 24057), False, 'import torch\n'), ((34280, 34348), 'torch.optim.lr_scheduler.ExponentialLR', 'torch.optim.lr_scheduler.ExponentialLR', (['self.opt'], {'gamma': 'self.lr_rate'}), '(self.opt, gamma=self.lr_rate)\n', (34318, 34348), False, 'import torch\n'), ((35186, 35197), 'time.time', 'time.time', ([], {}), '()\n', (35195, 35197), False, 'import time\n'), ((37454, 37469), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (37467, 37469), False, 'import torch\n'), ((39832, 39860), 'utils.randvec', 'utils.randvec', (['vec_dim', 'l', 'u'], {}), '(vec_dim, l, u)\n', (39845, 39860), False, 'import utils\n'), ((39880, 39916), 'utils.randvec', 'utils.randvec', (['vec_dim', 'disl1', 'disu1'], {}), '(vec_dim, disl1, disu1)\n', (39893, 39916), False, 'import utils\n'), ((39936, 39972), 'utils.randvec', 'utils.randvec', (['vec_dim', 'disl2', 'disu2'], {}), '(vec_dim, disl2, disu2)\n', (39949, 39972), False, 'import utils\n'), ((6198, 6259), 'torch.einsum', 'torch.einsum', (['"""bki,bkj->bkij"""', '(shifted_color, shifted_color)'], {}), "('bki,bkj->bkij', (shifted_color, shifted_color))\n", (6210, 6259), False, 'import torch\n'), ((6841, 6866), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6864, 6866), False, 'import torch\n'), ((7039, 7125), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'self.color_dim', 'hidden_size': 'self.hidden_dim', 'batch_first': '(True)'}), '(input_size=self.color_dim, hidden_size=self.hidden_dim, batch_first\n =True)\n', (7046, 7125), True, 'import torch.nn as nn\n'), ((7970, 7995), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7993, 7995), False, 'import torch\n'), ((8321, 8407), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'self.embed_dim', 'hidden_size': 'self.hidden_dim', 'batch_first': '(True)'}), '(input_size=self.embed_dim, hidden_size=self.hidden_dim, batch_first\n =True)\n', (8328, 8407), True, 'import torch.nn as nn\n'), ((8484, 8527), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_dim', 'self.vocab_size'], {}), '(self.hidden_dim, self.vocab_size)\n', (8493, 8527), True, 'import torch.nn as nn\n'), ((11460, 11485), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11483, 11485), False, 'import torch\n'), ((18566, 18682), 'torch.nn.utils.rnn.pack_padded_sequence', 'torch.nn.utils.rnn.pack_padded_sequence', (['batch_words'], {'batch_first': '(True)', 'lengths': 'batch_lens', 'enforce_sorted': '(False)'}), '(batch_words, batch_first=True,\n lengths=batch_lens, enforce_sorted=False)\n', (18605, 18682), False, 'import torch\n'), ((18820, 18937), 'torch.nn.utils.rnn.pack_padded_sequence', 'torch.nn.utils.rnn.pack_padded_sequence', (['targets'], {'batch_first': '(True)', 'lengths': '(batch_lens - 1)', 'enforce_sorted': '(False)'}), '(targets, batch_first=True, lengths=\n batch_lens - 1, enforce_sorted=False)\n', (18859, 18937), False, 'import torch\n'), ((19403, 19468), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['targets'], {'batch_first': '(True)'}), '(targets, batch_first=True)\n', (19441, 19468), False, 'import torch\n'), ((21115, 21144), 'torch.FloatTensor', 'torch.FloatTensor', (['color_seqs'], {}), '(color_seqs)\n', (21132, 21144), False, 'import torch\n'), ((24229, 24345), 'torch.nn.utils.rnn.pack_padded_sequence', 'torch.nn.utils.rnn.pack_padded_sequence', (['batch_words'], {'batch_first': '(True)', 'lengths': 'batch_lens', 'enforce_sorted': '(False)'}), '(batch_words, batch_first=True,\n lengths=batch_lens, enforce_sorted=False)\n', (24268, 24345), False, 'import torch\n'), ((24762, 24802), 'numpy.insert', 'np.insert', (['probs', '(0)', 'start_probs'], {'axis': '(1)'}), '(probs, 0, start_probs, axis=1)\n', (24771, 24802), True, 'import numpy as np\n'), ((26129, 26139), 'numpy.prod', 'np.prod', (['s'], {}), '(s)\n', (26136, 26139), True, 'import numpy as np\n'), ((26428, 26471), 'itertools.product', 'itertools.product', (['indices'], {'repeat': 'n_colors'}), '(indices, repeat=n_colors)\n', (26445, 26471), False, 'import itertools\n'), ((29366, 29402), 'torch.nn.Linear', 'nn.Linear', (['self.embed_dim', 'color_dim'], {}), '(self.embed_dim, color_dim)\n', (29375, 29402), True, 'import torch.nn as nn\n'), ((29560, 29608), 'torch.nn.Linear', 'nn.Linear', (['self.embed_dim', '(color_dim * color_dim)'], {}), '(self.embed_dim, color_dim * color_dim)\n', (29569, 29608), True, 'import torch.nn as nn\n'), ((31495, 31520), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (31518, 31520), False, 'import torch\n'), ((35413, 35529), 'torch.nn.utils.rnn.pack_padded_sequence', 'torch.nn.utils.rnn.pack_padded_sequence', (['batch_words'], {'batch_first': '(True)', 'lengths': 'batch_lens', 'enforce_sorted': '(False)'}), '(batch_words, batch_first=True,\n lengths=batch_lens, enforce_sorted=False)\n', (35452, 35529), False, 'import torch\n'), ((37654, 37770), 'torch.nn.utils.rnn.pack_padded_sequence', 'torch.nn.utils.rnn.pack_padded_sequence', (['batch_words'], {'batch_first': '(True)', 'lengths': 'batch_lens', 'enforce_sorted': '(False)'}), '(batch_words, batch_first=True,\n lengths=batch_lens, enforce_sorted=False)\n', (37693, 37770), False, 'import torch\n'), ((21546, 21577), 'torch.LongTensor', 'torch.LongTensor', (['decoder_input'], {}), '(decoder_input)\n', (21562, 21577), False, 'import torch\n'), ((35935, 35980), 'torch.ones', 'torch.ones', (['output.shape[0]'], {'dtype': 'torch.long'}), '(output.shape[0], dtype=torch.long)\n', (35945, 35980), False, 'import torch\n'), ((38144, 38189), 'torch.ones', 'torch.ones', (['output.shape[0]'], {'dtype': 'torch.long'}), '(output.shape[0], dtype=torch.long)\n', (38154, 38189), False, 'import torch\n'), ((19733, 19744), 'time.time', 'time.time', ([], {}), '()\n', (19742, 19744), False, 'import time\n'), ((36782, 36793), 'time.time', 'time.time', ([], {}), '()\n', (36791, 36793), False, 'import time\n'), ((38457, 38468), 'time.time', 'time.time', ([], {}), '()\n', (38466, 38468), False, 'import time\n')] |
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
"""Tests for tensorflow_datasets.core.features.feature."""
import pathlib
import sys
import textwrap
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_datasets import testing
from tensorflow_datasets.core import features as features_lib
tf.enable_v2_behavior()
class AnInputConnector(features_lib.FeatureConnector):
"""Simple FeatureConnector implementing the based methods used for test."""
def get_tensor_info(self):
# With this connector, the way the data is on disk ({'a', 'b'}) do not match
# the way it is exposed to the user (int64), so we overwrite
# FeaturesDict.get_tensor_info
return features_lib.TensorInfo(shape=(), dtype=tf.int64)
def get_serialized_info(self):
return {
'a': features_lib.TensorInfo(shape=(), dtype=tf.int64),
'b': features_lib.TensorInfo(shape=(), dtype=tf.int64),
}
def encode_example(self, example_data):
# Encode take the input data and wrap in in a dict
return {'a': example_data + 1, 'b': example_data * 10}
def decode_example(self, tfexample_dict):
# Merge the two values
return tfexample_dict['a'] + tfexample_dict['b']
class AnOutputConnector(features_lib.FeatureConnector):
"""Simple FeatureConnector implementing the based methods used for test."""
def get_tensor_info(self):
return features_lib.TensorInfo(shape=(), dtype=tf.float32)
def encode_example(self, example_data):
return example_data * 10.0
def decode_example(self, tfexample_data):
return tfexample_data / 10.0
class FeatureDictTest(testing.FeatureExpectationsTestCase):
def test_tensor_info(self):
self.assertEqual(
features_lib.TensorInfo(shape=(None, 3), dtype=tf.string),
features_lib.TensorInfo(shape=(None, 3), dtype=tf.string),
)
self.assertNotEqual(
features_lib.TensorInfo(shape=(None, 3), dtype=tf.string),
features_lib.TensorInfo(shape=(None, 3), dtype=tf.int32),
)
self.assertNotEqual(
features_lib.TensorInfo(shape=(2, 3), dtype=tf.string),
features_lib.TensorInfo(shape=(5, 3), dtype=tf.string),
)
t = features_lib.TensorInfo(shape=(None, 3), dtype=tf.string)
self.assertEqual(t, features_lib.TensorInfo.copy_from(t))
def test_fdict(self):
self.assertFeature(
feature=features_lib.FeaturesDict({
'input': AnInputConnector(),
'output': AnOutputConnector(),
'img': {
'size': {
'height': tf.int64,
'width': tf.int64,
},
'metadata/path': tf.string,
}
}),
serialized_info={
'input': {
'a': features_lib.TensorInfo(shape=(), dtype=tf.int64),
'b': features_lib.TensorInfo(shape=(), dtype=tf.int64),
},
'output': features_lib.TensorInfo(shape=(), dtype=tf.float32),
'img': {
'size': {
'height': features_lib.TensorInfo(shape=(), dtype=tf.int64),
'width': features_lib.TensorInfo(shape=(), dtype=tf.int64),
},
'metadata/path':
features_lib.TensorInfo(shape=(), dtype=tf.string),
}
},
dtype={
'input': tf.int64,
'output': tf.float32,
'img': {
'size': {
'height': tf.int64,
'width': tf.int64,
},
'metadata/path': tf.string,
}
},
shape={
'input': (),
'output': (),
'img': {
'size': {
'height': (),
'width': (),
},
'metadata/path': (),
},
},
tests=[
# Np array
testing.FeatureExpectationItem(
value={
'input': 1,
'output': -1,
'img': {
'size': {
'height': 256,
'width': 128,
},
'metadata/path': 'path/to/xyz.jpg',
}
},
expected_serialized={
'input': {
'a': 2, # 1 + 1
'b': 10, # 1 * 10
},
'output': -10.0, # -1 * 10.0
'img': {
'size': {
'height': 256,
'width': 128,
},
'metadata/path': 'path/to/xyz.jpg',
}
},
expected={
# a = 1 + 1, b = 1 * 10 => output = a + b = 2 + 10 = 12
'input': 12, # 2 + 10
'output': -1.0,
'img': {
'size': {
'height': 256,
'width': 128,
},
'metadata/path': tf.compat.as_bytes('path/to/xyz.jpg'),
},
},
),
],
)
def test_feature_getitem(self):
fdict = features_lib.FeaturesDict({
'integer': tf.int32,
'string': tf.string,
})
self.assertEqual(fdict['integer'].dtype, tf.int32)
self.assertEqual(fdict['string'].dtype, tf.string)
def test_feature__repr__(self):
label = features_lib.ClassLabel(names=['m', 'f'])
feature_dict = features_lib.FeaturesDict({
'metadata':
features_lib.Sequence({
'frame': features_lib.Image(shape=(32, 32, 3)),
}),
'label':
features_lib.Sequence(label),
})
self.assertEqual(
repr(feature_dict),
textwrap.dedent("""\
FeaturesDict({
'label': Sequence(ClassLabel(shape=(), dtype=tf.int64, num_classes=2)),
'metadata': Sequence({
'frame': Image(shape=(32, 32, 3), dtype=tf.uint8),
}),
})"""),
)
def test_feature_save_load_metadata_slashes(self):
with testing.tmp_dir() as data_dir:
fd = features_lib.FeaturesDict({
'image/frame': features_lib.Image(shape=(32, 32, 3)),
'image/label': features_lib.ClassLabel(num_classes=2),
})
fd.save_metadata(data_dir)
fd.load_metadata(data_dir)
class FeatureTensorTest(testing.FeatureExpectationsTestCase):
def test_shape_static(self):
np_input = np.random.rand(2, 3).astype(np.float32)
array_input = [
[1, 2, 3],
[4, 5, 6],
]
self.assertFeature(
feature=features_lib.Tensor(shape=(2, 3), dtype=tf.float32),
dtype=tf.float32,
shape=(2, 3),
tests=[
# Np array
testing.FeatureExpectationItem(
value=np_input,
expected=np_input,
),
# Python array
testing.FeatureExpectationItem(
value=array_input,
expected=array_input,
),
# Invalid dtype
testing.FeatureExpectationItem(
# On Windows, np default dtype is `int32`
value=np.random.randint(256, size=(2, 3), dtype=np.int64),
raise_cls=ValueError,
raise_msg='int64 do not match',
),
# Invalid shape
testing.FeatureExpectationItem(
value=np.random.rand(2, 4).astype(np.float32),
raise_cls=ValueError,
raise_msg='are incompatible',
),
],
)
def test_shape_dynamic(self):
np_input_dynamic_1 = np.random.randint(256, size=(2, 3, 2), dtype=np.int32)
np_input_dynamic_2 = np.random.randint(256, size=(5, 3, 2), dtype=np.int32)
self.assertFeature(
feature=features_lib.Tensor(shape=(None, 3, 2), dtype=tf.int32),
dtype=tf.int32,
shape=(None, 3, 2),
tests=[
testing.FeatureExpectationItem(
value=np_input_dynamic_1,
expected=np_input_dynamic_1,
),
testing.FeatureExpectationItem(
value=np_input_dynamic_2,
expected=np_input_dynamic_2,
),
# Invalid shape
testing.FeatureExpectationItem(
value=np.random.randint(256, size=(2, 3, 1), dtype=np.int32),
raise_cls=ValueError,
raise_msg='are incompatible',
),
])
def test_bool_flat(self):
self.assertFeature(
feature=features_lib.Tensor(shape=(), dtype=tf.bool),
dtype=tf.bool,
shape=(),
tests=[
testing.FeatureExpectationItem(
value=np.array(True),
expected=True,
),
testing.FeatureExpectationItem(
value=np.array(False),
expected=False,
),
testing.FeatureExpectationItem(
value=True,
expected=True,
),
testing.FeatureExpectationItem(
value=False,
expected=False,
),
])
def test_bool_array(self):
self.assertFeature(
feature=features_lib.Tensor(shape=(3,), dtype=tf.bool),
dtype=tf.bool,
shape=(3,),
tests=[
testing.FeatureExpectationItem(
value=np.array([True, True, False]),
expected=[True, True, False],
),
testing.FeatureExpectationItem(
value=[True, False, True],
expected=[True, False, True],
),
])
def test_string(self):
nonunicode_text = 'hello world'
unicode_text = u'你好'
self.assertFeature(
feature=features_lib.Tensor(shape=(), dtype=tf.string),
shape=(),
dtype=tf.string,
tests=[
# Non-unicode
testing.FeatureExpectationItem(
value=nonunicode_text,
expected=tf.compat.as_bytes(nonunicode_text),
),
# Unicode
testing.FeatureExpectationItem(
value=unicode_text,
expected=tf.compat.as_bytes(unicode_text),
),
# Empty string
testing.FeatureExpectationItem(
value='',
expected=b'',
),
# Trailing zeros
testing.FeatureExpectationItem(
value=b'abc\x00\x00',
expected=b'abc\x00\x00',
),
],
)
self.assertFeature(
feature=features_lib.Tensor(shape=(2, 1), dtype=tf.string),
shape=(2, 1),
dtype=tf.string,
tests=[
testing.FeatureExpectationItem(
value=[[nonunicode_text], [unicode_text]],
expected=[
[tf.compat.as_bytes(nonunicode_text)],
[tf.compat.as_bytes(unicode_text)],
],
),
testing.FeatureExpectationItem(
value=[nonunicode_text, unicode_text], # Wrong shape
raise_cls=ValueError,
raise_msg='(2,) and (2, 1) must have the same rank',
),
testing.FeatureExpectationItem(
value=[['some text'], [123]], # Wrong dtype
raise_cls=TypeError,
raise_msg='Expected binary or unicode string, got 123',
),
],
)
def test_repr_tensor(self):
# Top level Tensor is printed expanded
self.assertEqual(
repr(features_lib.Tensor(shape=(), dtype=tf.int32)),
'Tensor(shape=(), dtype=tf.int32)',
)
# Sequences colapse tensor repr
self.assertEqual(
repr(features_lib.Sequence(tf.int32)),
'Sequence(tf.int32)',
)
class ChildTensor(features_lib.Tensor):
pass
self.assertEqual(
repr(
features_lib.FeaturesDict({
'colapsed': features_lib.Tensor(shape=(), dtype=tf.int32),
# Tensor with defined shape are printed expanded
'noncolapsed': features_lib.Tensor(shape=(1,), dtype=tf.int32),
# Tensor inherited are expanded
'child': ChildTensor(shape=(), dtype=tf.int32),
})),
textwrap.dedent("""\
FeaturesDict({
'child': ChildTensor(shape=(), dtype=tf.int32),
'colapsed': tf.int32,
'noncolapsed': Tensor(shape=(1,), dtype=tf.int32),
})"""),
)
def test_custom_feature_connector(tmp_path: pathlib.Path):
module_name = 'tensorflow_datasets.core.features.test_feature'
feature_qualname = f'{module_name}.CustomFeatureConnector'
assert module_name not in sys.modules
assert feature_qualname not in features_lib.FeatureConnector._registered_features
# Save tfds.feature.Tensor to avoid importing the custom feature connector
feature = features_lib.Tensor(shape=(), dtype=tf.int32)
feature.save_config(tmp_path)
# Update the features.json file to simulate as if the original
# FeatureConnector had been save
feature_path = tmp_path / 'features.json'
content = feature_path.read_text()
content = content.replace(
'tensorflow_datasets.core.features.feature.Tensor',
feature_qualname,
)
feature_path.write_text(content)
# Loading will load the custom feature connector, even if it is not
# registered yet.
feature = features_lib.FeatureConnector.from_config(tmp_path)
# After loading, the custom feature connector is registered
assert module_name in sys.modules
assert feature_qualname in features_lib.FeatureConnector._registered_features
# Check that the loaded feature is the custom feature
from tensorflow_datasets.core.features import test_feature # pylint: disable=g-import-not-at-top
assert isinstance(feature, test_feature.CustomFeatureConnector)
if __name__ == '__main__':
testing.test_main()
| [
"tensorflow_datasets.testing.FeatureExpectationItem",
"textwrap.dedent",
"tensorflow_datasets.core.features.Sequence",
"tensorflow.compat.v2.compat.as_bytes",
"tensorflow_datasets.testing.test_main",
"numpy.random.rand",
"tensorflow_datasets.core.features.TensorInfo",
"tensorflow_datasets.core.feature... | [((886, 909), 'tensorflow.compat.v2.enable_v2_behavior', 'tf.enable_v2_behavior', ([], {}), '()\n', (907, 909), True, 'import tensorflow.compat.v2 as tf\n'), ((13807, 13852), 'tensorflow_datasets.core.features.Tensor', 'features_lib.Tensor', ([], {'shape': '()', 'dtype': 'tf.int32'}), '(shape=(), dtype=tf.int32)\n', (13826, 13852), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((14320, 14371), 'tensorflow_datasets.core.features.FeatureConnector.from_config', 'features_lib.FeatureConnector.from_config', (['tmp_path'], {}), '(tmp_path)\n', (14361, 14371), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((14805, 14824), 'tensorflow_datasets.testing.test_main', 'testing.test_main', ([], {}), '()\n', (14822, 14824), False, 'from tensorflow_datasets import testing\n'), ((1267, 1316), 'tensorflow_datasets.core.features.TensorInfo', 'features_lib.TensorInfo', ([], {'shape': '()', 'dtype': 'tf.int64'}), '(shape=(), dtype=tf.int64)\n', (1290, 1316), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((1957, 2008), 'tensorflow_datasets.core.features.TensorInfo', 'features_lib.TensorInfo', ([], {'shape': '()', 'dtype': 'tf.float32'}), '(shape=(), dtype=tf.float32)\n', (1980, 2008), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((2751, 2808), 'tensorflow_datasets.core.features.TensorInfo', 'features_lib.TensorInfo', ([], {'shape': '(None, 3)', 'dtype': 'tf.string'}), '(shape=(None, 3), dtype=tf.string)\n', (2774, 2808), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((5971, 6040), 'tensorflow_datasets.core.features.FeaturesDict', 'features_lib.FeaturesDict', (["{'integer': tf.int32, 'string': tf.string}"], {}), "({'integer': tf.int32, 'string': tf.string})\n", (5996, 6040), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((6222, 6263), 'tensorflow_datasets.core.features.ClassLabel', 'features_lib.ClassLabel', ([], {'names': "['m', 'f']"}), "(names=['m', 'f'])\n", (6245, 6263), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((8475, 8529), 'numpy.random.randint', 'np.random.randint', (['(256)'], {'size': '(2, 3, 2)', 'dtype': 'np.int32'}), '(256, size=(2, 3, 2), dtype=np.int32)\n', (8492, 8529), True, 'import numpy as np\n'), ((8555, 8609), 'numpy.random.randint', 'np.random.randint', (['(256)'], {'size': '(5, 3, 2)', 'dtype': 'np.int32'}), '(256, size=(5, 3, 2), dtype=np.int32)\n', (8572, 8609), True, 'import numpy as np\n'), ((1377, 1426), 'tensorflow_datasets.core.features.TensorInfo', 'features_lib.TensorInfo', ([], {'shape': '()', 'dtype': 'tf.int64'}), '(shape=(), dtype=tf.int64)\n', (1400, 1426), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((1441, 1490), 'tensorflow_datasets.core.features.TensorInfo', 'features_lib.TensorInfo', ([], {'shape': '()', 'dtype': 'tf.int64'}), '(shape=(), dtype=tf.int64)\n', (1464, 1490), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((2285, 2342), 'tensorflow_datasets.core.features.TensorInfo', 'features_lib.TensorInfo', ([], {'shape': '(None, 3)', 'dtype': 'tf.string'}), '(shape=(None, 3), dtype=tf.string)\n', (2308, 2342), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((2352, 2409), 'tensorflow_datasets.core.features.TensorInfo', 'features_lib.TensorInfo', ([], {'shape': '(None, 3)', 'dtype': 'tf.string'}), '(shape=(None, 3), dtype=tf.string)\n', (2375, 2409), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((2451, 2508), 'tensorflow_datasets.core.features.TensorInfo', 'features_lib.TensorInfo', ([], {'shape': '(None, 3)', 'dtype': 'tf.string'}), '(shape=(None, 3), dtype=tf.string)\n', (2474, 2508), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((2518, 2574), 'tensorflow_datasets.core.features.TensorInfo', 'features_lib.TensorInfo', ([], {'shape': '(None, 3)', 'dtype': 'tf.int32'}), '(shape=(None, 3), dtype=tf.int32)\n', (2541, 2574), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((2616, 2670), 'tensorflow_datasets.core.features.TensorInfo', 'features_lib.TensorInfo', ([], {'shape': '(2, 3)', 'dtype': 'tf.string'}), '(shape=(2, 3), dtype=tf.string)\n', (2639, 2670), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((2680, 2734), 'tensorflow_datasets.core.features.TensorInfo', 'features_lib.TensorInfo', ([], {'shape': '(5, 3)', 'dtype': 'tf.string'}), '(shape=(5, 3), dtype=tf.string)\n', (2703, 2734), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((2833, 2869), 'tensorflow_datasets.core.features.TensorInfo.copy_from', 'features_lib.TensorInfo.copy_from', (['t'], {}), '(t)\n', (2866, 2869), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((6572, 6840), 'textwrap.dedent', 'textwrap.dedent', (['""" FeaturesDict({\n \'label\': Sequence(ClassLabel(shape=(), dtype=tf.int64, num_classes=2)),\n \'metadata\': Sequence({\n \'frame\': Image(shape=(32, 32, 3), dtype=tf.uint8),\n }),\n })"""'], {}), '(\n """ FeaturesDict({\n \'label\': Sequence(ClassLabel(shape=(), dtype=tf.int64, num_classes=2)),\n \'metadata\': Sequence({\n \'frame\': Image(shape=(32, 32, 3), dtype=tf.uint8),\n }),\n })"""\n )\n', (6587, 6840), False, 'import textwrap\n'), ((6903, 6920), 'tensorflow_datasets.testing.tmp_dir', 'testing.tmp_dir', ([], {}), '()\n', (6918, 6920), False, 'from tensorflow_datasets import testing\n'), ((13183, 13406), 'textwrap.dedent', 'textwrap.dedent', (['""" FeaturesDict({\n \'child\': ChildTensor(shape=(), dtype=tf.int32),\n \'colapsed\': tf.int32,\n \'noncolapsed\': Tensor(shape=(1,), dtype=tf.int32),\n })"""'], {}), '(\n """ FeaturesDict({\n \'child\': ChildTensor(shape=(), dtype=tf.int32),\n \'colapsed\': tf.int32,\n \'noncolapsed\': Tensor(shape=(1,), dtype=tf.int32),\n })"""\n )\n', (13198, 13406), False, 'import textwrap\n'), ((6476, 6504), 'tensorflow_datasets.core.features.Sequence', 'features_lib.Sequence', (['label'], {}), '(label)\n', (6497, 6504), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((7289, 7309), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)'], {}), '(2, 3)\n', (7303, 7309), True, 'import numpy as np\n'), ((7434, 7485), 'tensorflow_datasets.core.features.Tensor', 'features_lib.Tensor', ([], {'shape': '(2, 3)', 'dtype': 'tf.float32'}), '(shape=(2, 3), dtype=tf.float32)\n', (7453, 7485), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((8651, 8706), 'tensorflow_datasets.core.features.Tensor', 'features_lib.Tensor', ([], {'shape': '(None, 3, 2)', 'dtype': 'tf.int32'}), '(shape=(None, 3, 2), dtype=tf.int32)\n', (8670, 8706), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((9398, 9442), 'tensorflow_datasets.core.features.Tensor', 'features_lib.Tensor', ([], {'shape': '()', 'dtype': 'tf.bool'}), '(shape=(), dtype=tf.bool)\n', (9417, 9442), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((10079, 10125), 'tensorflow_datasets.core.features.Tensor', 'features_lib.Tensor', ([], {'shape': '(3,)', 'dtype': 'tf.bool'}), '(shape=(3,), dtype=tf.bool)\n', (10098, 10125), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((10631, 10677), 'tensorflow_datasets.core.features.Tensor', 'features_lib.Tensor', ([], {'shape': '()', 'dtype': 'tf.string'}), '(shape=(), dtype=tf.string)\n', (10650, 10677), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((11467, 11517), 'tensorflow_datasets.core.features.Tensor', 'features_lib.Tensor', ([], {'shape': '(2, 1)', 'dtype': 'tf.string'}), '(shape=(2, 1), dtype=tf.string)\n', (11486, 11517), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((12453, 12498), 'tensorflow_datasets.core.features.Tensor', 'features_lib.Tensor', ([], {'shape': '()', 'dtype': 'tf.int32'}), '(shape=(), dtype=tf.int32)\n', (12472, 12498), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((12623, 12654), 'tensorflow_datasets.core.features.Sequence', 'features_lib.Sequence', (['tf.int32'], {}), '(tf.int32)\n', (12644, 12654), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((3494, 3545), 'tensorflow_datasets.core.features.TensorInfo', 'features_lib.TensorInfo', ([], {'shape': '()', 'dtype': 'tf.float32'}), '(shape=(), dtype=tf.float32)\n', (3517, 3545), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((6998, 7035), 'tensorflow_datasets.core.features.Image', 'features_lib.Image', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (7016, 7035), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((7062, 7100), 'tensorflow_datasets.core.features.ClassLabel', 'features_lib.ClassLabel', ([], {'num_classes': '(2)'}), '(num_classes=2)\n', (7085, 7100), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((7586, 7651), 'tensorflow_datasets.testing.FeatureExpectationItem', 'testing.FeatureExpectationItem', ([], {'value': 'np_input', 'expected': 'np_input'}), '(value=np_input, expected=np_input)\n', (7616, 7651), False, 'from tensorflow_datasets import testing\n'), ((7739, 7810), 'tensorflow_datasets.testing.FeatureExpectationItem', 'testing.FeatureExpectationItem', ([], {'value': 'array_input', 'expected': 'array_input'}), '(value=array_input, expected=array_input)\n', (7769, 7810), False, 'from tensorflow_datasets import testing\n'), ((8788, 8878), 'tensorflow_datasets.testing.FeatureExpectationItem', 'testing.FeatureExpectationItem', ([], {'value': 'np_input_dynamic_1', 'expected': 'np_input_dynamic_1'}), '(value=np_input_dynamic_1, expected=\n np_input_dynamic_1)\n', (8818, 8878), False, 'from tensorflow_datasets import testing\n'), ((8934, 9024), 'tensorflow_datasets.testing.FeatureExpectationItem', 'testing.FeatureExpectationItem', ([], {'value': 'np_input_dynamic_2', 'expected': 'np_input_dynamic_2'}), '(value=np_input_dynamic_2, expected=\n np_input_dynamic_2)\n', (8964, 9024), False, 'from tensorflow_datasets import testing\n'), ((9771, 9828), 'tensorflow_datasets.testing.FeatureExpectationItem', 'testing.FeatureExpectationItem', ([], {'value': '(True)', 'expected': '(True)'}), '(value=True, expected=True)\n', (9801, 9828), False, 'from tensorflow_datasets import testing\n'), ((9889, 9948), 'tensorflow_datasets.testing.FeatureExpectationItem', 'testing.FeatureExpectationItem', ([], {'value': '(False)', 'expected': '(False)'}), '(value=False, expected=False)\n', (9919, 9948), False, 'from tensorflow_datasets import testing\n'), ((10356, 10448), 'tensorflow_datasets.testing.FeatureExpectationItem', 'testing.FeatureExpectationItem', ([], {'value': '[True, False, True]', 'expected': '[True, False, True]'}), '(value=[True, False, True], expected=[True, \n False, True])\n', (10386, 10448), False, 'from tensorflow_datasets import testing\n'), ((11139, 11193), 'tensorflow_datasets.testing.FeatureExpectationItem', 'testing.FeatureExpectationItem', ([], {'value': '""""""', 'expected': "b''"}), "(value='', expected=b'')\n", (11169, 11193), False, 'from tensorflow_datasets import testing\n'), ((11283, 11360), 'tensorflow_datasets.testing.FeatureExpectationItem', 'testing.FeatureExpectationItem', ([], {'value': "b'abc\\x00\\x00'", 'expected': "b'abc\\x00\\x00'"}), "(value=b'abc\\x00\\x00', expected=b'abc\\x00\\x00')\n", (11313, 11360), False, 'from tensorflow_datasets import testing\n'), ((11873, 12021), 'tensorflow_datasets.testing.FeatureExpectationItem', 'testing.FeatureExpectationItem', ([], {'value': '[nonunicode_text, unicode_text]', 'raise_cls': 'ValueError', 'raise_msg': '"""(2,) and (2, 1) must have the same rank"""'}), "(value=[nonunicode_text, unicode_text],\n raise_cls=ValueError, raise_msg='(2,) and (2, 1) must have the same rank')\n", (11903, 12021), False, 'from tensorflow_datasets import testing\n'), ((12109, 12251), 'tensorflow_datasets.testing.FeatureExpectationItem', 'testing.FeatureExpectationItem', ([], {'value': "[['some text'], [123]]", 'raise_cls': 'TypeError', 'raise_msg': '"""Expected binary or unicode string, got 123"""'}), "(value=[['some text'], [123]], raise_cls=\n TypeError, raise_msg='Expected binary or unicode string, got 123')\n", (12139, 12251), False, 'from tensorflow_datasets import testing\n'), ((3334, 3383), 'tensorflow_datasets.core.features.TensorInfo', 'features_lib.TensorInfo', ([], {'shape': '()', 'dtype': 'tf.int64'}), '(shape=(), dtype=tf.int64)\n', (3357, 3383), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((3406, 3455), 'tensorflow_datasets.core.features.TensorInfo', 'features_lib.TensorInfo', ([], {'shape': '()', 'dtype': 'tf.int64'}), '(shape=(), dtype=tf.int64)\n', (3429, 3455), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((3827, 3877), 'tensorflow_datasets.core.features.TensorInfo', 'features_lib.TensorInfo', ([], {'shape': '()', 'dtype': 'tf.string'}), '(shape=(), dtype=tf.string)\n', (3850, 3877), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((6392, 6429), 'tensorflow_datasets.core.features.Image', 'features_lib.Image', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (6410, 6429), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((12854, 12899), 'tensorflow_datasets.core.features.Tensor', 'features_lib.Tensor', ([], {'shape': '()', 'dtype': 'tf.int32'}), '(shape=(), dtype=tf.int32)\n', (12873, 12899), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((12997, 13044), 'tensorflow_datasets.core.features.Tensor', 'features_lib.Tensor', ([], {'shape': '(1,)', 'dtype': 'tf.int32'}), '(shape=(1,), dtype=tf.int32)\n', (13016, 13044), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((3624, 3673), 'tensorflow_datasets.core.features.TensorInfo', 'features_lib.TensorInfo', ([], {'shape': '()', 'dtype': 'tf.int64'}), '(shape=(), dtype=tf.int64)\n', (3647, 3673), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((3704, 3753), 'tensorflow_datasets.core.features.TensorInfo', 'features_lib.TensorInfo', ([], {'shape': '()', 'dtype': 'tf.int64'}), '(shape=(), dtype=tf.int64)\n', (3727, 3753), True, 'from tensorflow_datasets.core import features as features_lib\n'), ((8011, 8062), 'numpy.random.randint', 'np.random.randint', (['(256)'], {'size': '(2, 3)', 'dtype': 'np.int64'}), '(256, size=(2, 3), dtype=np.int64)\n', (8028, 8062), True, 'import numpy as np\n'), ((9162, 9216), 'numpy.random.randint', 'np.random.randint', (['(256)'], {'size': '(2, 3, 1)', 'dtype': 'np.int32'}), '(256, size=(2, 3, 1), dtype=np.int32)\n', (9179, 9216), True, 'import numpy as np\n'), ((9567, 9581), 'numpy.array', 'np.array', (['(True)'], {}), '(True)\n', (9575, 9581), True, 'import numpy as np\n'), ((9695, 9710), 'numpy.array', 'np.array', (['(False)'], {}), '(False)\n', (9703, 9710), True, 'import numpy as np\n'), ((10252, 10281), 'numpy.array', 'np.array', (['[True, True, False]'], {}), '([True, True, False])\n', (10260, 10281), True, 'import numpy as np\n'), ((10872, 10907), 'tensorflow.compat.v2.compat.as_bytes', 'tf.compat.as_bytes', (['nonunicode_text'], {}), '(nonunicode_text)\n', (10890, 10907), True, 'import tensorflow.compat.v2 as tf\n'), ((11051, 11083), 'tensorflow.compat.v2.compat.as_bytes', 'tf.compat.as_bytes', (['unicode_text'], {}), '(unicode_text)\n', (11069, 11083), True, 'import tensorflow.compat.v2 as tf\n'), ((5811, 5848), 'tensorflow.compat.v2.compat.as_bytes', 'tf.compat.as_bytes', (['"""path/to/xyz.jpg"""'], {}), "('path/to/xyz.jpg')\n", (5829, 5848), True, 'import tensorflow.compat.v2 as tf\n'), ((8259, 8279), 'numpy.random.rand', 'np.random.rand', (['(2)', '(4)'], {}), '(2, 4)\n', (8273, 8279), True, 'import numpy as np\n'), ((11733, 11768), 'tensorflow.compat.v2.compat.as_bytes', 'tf.compat.as_bytes', (['nonunicode_text'], {}), '(nonunicode_text)\n', (11751, 11768), True, 'import tensorflow.compat.v2 as tf\n'), ((11792, 11824), 'tensorflow.compat.v2.compat.as_bytes', 'tf.compat.as_bytes', (['unicode_text'], {}), '(unicode_text)\n', (11810, 11824), True, 'import tensorflow.compat.v2 as tf\n')] |
import numpy as np
import torch
import torch.nn as nn
from collections import OrderedDict
def summary(model, x, *args, **kwargs):
"""Summarize the given input model.
Summarized information are 1) output shape, 2) kernel shape,
3) number of the parameters and 4) operations (Mult-Adds)
Args:
model (Module): Model to summarize
x (Tensor): Input tensor of the model with [N, C, H, W] shape
dtype and device have to match to the model
args, kwargs: Other argument used in `model.forward` function
"""
def register_hook(module):
def hook(module, inputs, outputs):
cls_name = str(module.__class__).split(".")[-1].split("'")[0]
module_idx = len(summary)
key = "{}_{}".format(module_idx, cls_name)
info = OrderedDict()
info["id"] = id(module)
if isinstance(outputs, (list, tuple)):
info["out"] = list(outputs[0].size())
else:
info["out"] = list(outputs.size())
info["ksize"] = "-"
info["inner"] = OrderedDict()
info["params"], info["macs"] = 0, 0
for name, param in module.named_parameters():
info["params"] += param.nelement()
if "weight" == name:
ksize = list(param.size())
# to make [in_shape, out_shape, ksize, ksize]
if len(ksize) > 1:
ksize[0], ksize[1] = ksize[1], ksize[0]
info["ksize"] = ksize
# ignore N, C when calculate Mult-Adds in ConvNd
if "Conv" in cls_name:
info["macs"] += int(param.nelement() * np.prod(info["out"][2:]))
else:
info["macs"] += param.nelement()
# RNN modules have inner weights such as weight_ih_l0
elif "weight" in name:
info["inner"][name] = list(param.size())
info["macs"] += param.nelement()
# if the current module is already-used, mark as "(recursive)"
# check if this module has params
if list(module.named_parameters()):
for v in summary.values():
if info["id"] == v["id"]:
info["params"] = "(recursive)"
if info["params"] == 0:
info["params"], info["macs"] = "-", "-"
summary[key] = info
# ignore Sequential and ModuleList
if not module._modules:
hooks.append(module.register_forward_hook(hook))
hooks = []
summary = OrderedDict()
model.apply(register_hook)
model.eval()
with torch.no_grad():
input_params = x if isinstance(x, list) or isinstance(x, tuple) else [x]
model(*input_params) if not (kwargs or args) else model(
*input_params, *args, **kwargs
)
for hook in hooks:
hook.remove()
print("-" * 100)
print(
"{:<15} {:>20} {:>20} {:>20} {:>20}".format(
"Layer", "Kernel Shape", "Output Shape", "# Params (K)", "# Mult-Adds (M)"
)
)
print("=" * 100)
total_params, total_macs = 0, 0
for layer, info in summary.items():
repr_ksize = str(info["ksize"])
repr_out = str(info["out"])
repr_params = info["params"]
repr_macs = info["macs"]
if isinstance(repr_params, (int, float)):
total_params += repr_params
repr_params = "{0:,.2f}".format(repr_params / 1000)
if isinstance(repr_macs, (int, float)):
total_macs += repr_macs
repr_macs = "{0:,.2f}".format(repr_macs / 1000000)
print(
"{:<15} {:>20} {:>20} {:>20} {:>20}".format(
layer, repr_ksize, repr_out, repr_params, repr_macs
)
)
# for RNN, describe inner weights (i.e. w_hh, w_ih)
for inner_name, inner_shape in info["inner"].items():
print(" {:<13} {:>20}".format(inner_name, str(inner_shape)))
print("=" * 100)
print("# Params: {0:,.2f}K".format(total_params / 1000))
print("# Mult-Adds: {0:,.2f}M".format(total_macs / 1000000))
print("-" * 100)
| [
"torch.no_grad",
"collections.OrderedDict",
"numpy.prod"
] | [((2695, 2708), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2706, 2708), False, 'from collections import OrderedDict\n'), ((2767, 2782), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2780, 2782), False, 'import torch\n'), ((827, 840), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (838, 840), False, 'from collections import OrderedDict\n'), ((1112, 1125), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1123, 1125), False, 'from collections import OrderedDict\n'), ((1755, 1779), 'numpy.prod', 'np.prod', (["info['out'][2:]"], {}), "(info['out'][2:])\n", (1762, 1779), True, 'import numpy as np\n')] |
import argparse
import os
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
from PIL import Image
from utils import visualization_utils as vis_util
from utils import label_map_util
if tf.__version__ < '1.4.0':
raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!')
NUM_CLASSES = 5
def parse_args(check=True):
parser = argparse.ArgumentParser()
parser.add_argument('--output_dir', type=str, required=True)
parser.add_argument('--dataset_dir', type=str, required=True)
FLAGS, unparsed = parser.parse_known_args()
return FLAGS, unparsed
if __name__ == '__main__':
FLAGS, unparsed = parse_args()
PATH_TO_CKPT = os.path.join(FLAGS.output_dir, 'exported_graphs/frozen_inference_graph.pb')
PATH_TO_LABELS = os.path.join(FLAGS.dataset_dir, 'labels_items.txt')
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
test_img_path = os.path.join(FLAGS.dataset_dir, 'test.jpg')
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
image = Image.open(test_img_path)
image_np = load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
print('skye boxes=', boxes)
# scores[0][0] = 0.99 # Output.png 上面没有预测结果信息. 准确率太低?是的。最后的框是会有个准确率阈值的。
print('skye scores=',scores)
print('skye classes=', classes)
print('skye category_index=', category_index)
image_np = vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
plt.imsave(os.path.join(FLAGS.output_dir, 'output.png'), image_np)
| [
"utils.label_map_util.load_labelmap",
"tensorflow.Graph",
"PIL.Image.open",
"argparse.ArgumentParser",
"tensorflow.Session",
"os.path.join",
"tensorflow.GraphDef",
"utils.label_map_util.convert_label_map_to_categories",
"numpy.squeeze",
"utils.label_map_util.create_category_index",
"tensorflow.i... | [((392, 417), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (415, 417), False, 'import argparse\n'), ((708, 783), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""exported_graphs/frozen_inference_graph.pb"""'], {}), "(FLAGS.output_dir, 'exported_graphs/frozen_inference_graph.pb')\n", (720, 783), False, 'import os\n'), ((805, 856), 'os.path.join', 'os.path.join', (['FLAGS.dataset_dir', '"""labels_items.txt"""'], {}), "(FLAGS.dataset_dir, 'labels_items.txt')\n", (817, 856), False, 'import os\n'), ((880, 890), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (888, 890), True, 'import tensorflow as tf\n'), ((1196, 1240), 'utils.label_map_util.load_labelmap', 'label_map_util.load_labelmap', (['PATH_TO_LABELS'], {}), '(PATH_TO_LABELS)\n', (1224, 1240), False, 'from utils import label_map_util\n'), ((1258, 1372), 'utils.label_map_util.convert_label_map_to_categories', 'label_map_util.convert_label_map_to_categories', (['label_map'], {'max_num_classes': 'NUM_CLASSES', 'use_display_name': '(True)'}), '(label_map, max_num_classes=\n NUM_CLASSES, use_display_name=True)\n', (1304, 1372), False, 'from utils import label_map_util\n'), ((1389, 1437), 'utils.label_map_util.create_category_index', 'label_map_util.create_category_index', (['categories'], {}), '(categories)\n', (1425, 1437), False, 'from utils import label_map_util\n'), ((1652, 1695), 'os.path.join', 'os.path.join', (['FLAGS.dataset_dir', '"""test.jpg"""'], {}), "(FLAGS.dataset_dir, 'test.jpg')\n", (1664, 1695), False, 'import os\n'), ((953, 966), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (964, 966), True, 'import tensorflow as tf\n'), ((980, 1014), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['PATH_TO_CKPT', '"""rb"""'], {}), "(PATH_TO_CKPT, 'rb')\n", (994, 1014), True, 'import tensorflow as tf\n'), ((1136, 1178), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (1155, 1178), True, 'import tensorflow as tf\n'), ((1749, 1782), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'detection_graph'}), '(graph=detection_graph)\n', (1759, 1782), True, 'import tensorflow as tf\n'), ((2240, 2265), 'PIL.Image.open', 'Image.open', (['test_img_path'], {}), '(test_img_path)\n', (2250, 2265), False, 'from PIL import Image\n'), ((2356, 2388), 'numpy.expand_dims', 'np.expand_dims', (['image_np'], {'axis': '(0)'}), '(image_np, axis=0)\n', (2370, 2388), True, 'import numpy as np\n'), ((2990, 3007), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (3000, 3007), True, 'import numpy as np\n'), ((3079, 3097), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (3089, 3097), True, 'import numpy as np\n'), ((3237, 3281), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""output.png"""'], {}), "(FLAGS.output_dir, 'output.png')\n", (3249, 3281), False, 'import os\n'), ((3025, 3044), 'numpy.squeeze', 'np.squeeze', (['classes'], {}), '(classes)\n', (3035, 3044), True, 'import numpy as np\n')] |
import numpy as np
from matrix import gemm
def main():
c = np.zeros((10, 10), np.int32)
c_np = np.zeros((10, 10), np.int32)
# 10x10 matrix -> assign random integers from 0 to 10
a = np.random.randint(0, 10, (10, 10), np.int32)
b = np.random.randint(0, 10, (10, 10), np.int32)
gemm(c, a, b)
# Numpy check
np.matmul(a, b, c_np)
if (c == c_np).all():
print("Correct result!")
else:
print("Wrong result!")
if __name__ == "__main__":
main()
| [
"matrix.gemm",
"numpy.zeros",
"numpy.matmul",
"numpy.random.randint"
] | [((65, 93), 'numpy.zeros', 'np.zeros', (['(10, 10)', 'np.int32'], {}), '((10, 10), np.int32)\n', (73, 93), True, 'import numpy as np\n'), ((105, 133), 'numpy.zeros', 'np.zeros', (['(10, 10)', 'np.int32'], {}), '((10, 10), np.int32)\n', (113, 133), True, 'import numpy as np\n'), ((201, 245), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(10, 10)', 'np.int32'], {}), '(0, 10, (10, 10), np.int32)\n', (218, 245), True, 'import numpy as np\n'), ((254, 298), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(10, 10)', 'np.int32'], {}), '(0, 10, (10, 10), np.int32)\n', (271, 298), True, 'import numpy as np\n'), ((308, 321), 'matrix.gemm', 'gemm', (['c', 'a', 'b'], {}), '(c, a, b)\n', (312, 321), False, 'from matrix import gemm\n'), ((345, 366), 'numpy.matmul', 'np.matmul', (['a', 'b', 'c_np'], {}), '(a, b, c_np)\n', (354, 366), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Batch Exploration
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import video_utils
from tensor2tensor.layers import modalities
from tensor2tensor.utils import registry
import h5py
import tensorflow as tf
# DATA_URL = ("/iris/u/asc8/taskexp/our-smm/exps/mean_block1/max_cms_seed0_block1_grads1/img_memory/0mem.hdf5") # just try this for now
NUMEP = 500 # Each im buffer has 500 eps: each with 50 steps -> 500 * 50 = 25000 frames each
EPLEN = 50 # Needs to be 50, should loop through 5 10-step trajs at a time
@registry.register_problem
class BatchExplorationBlock1Max(video_utils.VideoProblem):
@property
def num_channels(self):
return 3
@property
def frame_height(self):
return 64
@property
def frame_width(self):
return 64
@property
def is_generate_per_split(self):
return True
# num_hdf * 25000 (num of images per image memory hdf = NUMEP * EPLEN)
@property
def total_number_of_frames(self):
return 5*4*25000 + 9*25000
# Not sure if this is correct? We don't have videos
def max_frames_per_video(self, hparams):
return 50
@property
def random_skip(self):
return False
@property
def only_keep_videos_from_0th_frame(self):
return False
@property
def use_not_breaking_batching(self):
return True
@property
def extra_reading_spec(self):
"""Additional data fields to store on disk and their decoders."""
data_fields = {
"frame_number": tf.FixedLenFeature([1], tf.int64),
"action":tf.FixedLenFeature([5], tf.float32),
}
decoders = {
"frame_number": tf.contrib.slim.tfexample_decoder.Tensor(
tensor_key="frame_number"),
"action": tf.contrib.slim.tfexample_decoder.Tensor(tensor_key="action"),
}
return data_fields, decoders
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.modality = {"inputs": modalities.ModalityType.VIDEO,
"action":modalities.ModalityType.REAL_L2_LOSS,
"targets": modalities.ModalityType.VIDEO}
p.vocab_size = {"inputs": 256,
"action": 5,
"targets": 256}
def parse_frames(self, f, dataset_split):
ims = f['sim']['states'][:]
next_ims = f['sim']['next_states'][:]
acts = f['sim']['actions'][:]
ims = np.transpose(ims, (0, 1, 3, 4, 2)) # Should be (500, 50, 64, 64, 3)
if dataset_split == problem.DatasetSplit.TRAIN:
start_ep, end_ep = 0, int(NUMEP * 0.8) # 400 eps
else:
start_ep, end_ep = int(NUMEP * 0.8), NUMEP # 100
for ep in range(start_ep, end_ep): # goes from 0 to 399, each 50 step traj
for step in range(EPLEN): # int(EPLEN/5)
frame = ims[ep, step] * 255.0 # should be between 0 and 255
action = acts[ep, step]
yield step, frame, action
def generate_samples(self, data_dir, tmp_dir, dataset_split):
for i in range(5): # Number of seeds
for j in range(4): # Number of buffers per seed
path = '/iris/u/hjnam/task_exp/batch_exploration/exps/08_15_block1/max_sn__seed{}_block1/img_memory/{}mem.hdf5'.format(i, j)
# path= DATA_URL
f = h5py.File(path, "r")
for frame_number, frame, action in self.parse_frames(f, dataset_split): # frame number needs to be 0, ..., 49
yield {
"frame_number": [frame_number],
"frame": frame,
"action": action.tolist(),
}
# Load in random data
for j in range(9): # 10 buffers
path = '/iris/u/hjnam/task_exp/batch_exploration/exps/08_19_block_random/_seed0_block2/img_memory/{}mem.hdf5'.format(j)
# path= DATA_URL
f = h5py.File(path, "r")
for frame_number, frame, action in self.parse_frames(f, dataset_split): # frame number needs to be 0, ..., 49
yield {
"frame_number": [frame_number],
"frame": frame,
"action": action.tolist(),
}
| [
"h5py.File",
"numpy.transpose",
"tensorflow.contrib.slim.tfexample_decoder.Tensor",
"tensorflow.FixedLenFeature"
] | [((3334, 3368), 'numpy.transpose', 'np.transpose', (['ims', '(0, 1, 3, 4, 2)'], {}), '(ims, (0, 1, 3, 4, 2))\n', (3346, 3368), True, 'import numpy as np\n'), ((2384, 2417), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[1]', 'tf.int64'], {}), '([1], tf.int64)\n', (2402, 2417), True, 'import tensorflow as tf\n'), ((2440, 2475), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[5]', 'tf.float32'], {}), '([5], tf.float32)\n', (2458, 2475), True, 'import tensorflow as tf\n'), ((2536, 2603), 'tensorflow.contrib.slim.tfexample_decoder.Tensor', 'tf.contrib.slim.tfexample_decoder.Tensor', ([], {'tensor_key': '"""frame_number"""'}), "(tensor_key='frame_number')\n", (2576, 2603), True, 'import tensorflow as tf\n'), ((2644, 2705), 'tensorflow.contrib.slim.tfexample_decoder.Tensor', 'tf.contrib.slim.tfexample_decoder.Tensor', ([], {'tensor_key': '"""action"""'}), "(tensor_key='action')\n", (2684, 2705), True, 'import tensorflow as tf\n'), ((4923, 4943), 'h5py.File', 'h5py.File', (['path', '"""r"""'], {}), "(path, 'r')\n", (4932, 4943), False, 'import h5py\n'), ((4301, 4321), 'h5py.File', 'h5py.File', (['path', '"""r"""'], {}), "(path, 'r')\n", (4310, 4321), False, 'import h5py\n')] |
import cv2
import numpy
from logger import logger
class Sentence(object):
def __init__(self):
self.x = -1
self.y = -1
self.words = []
self.mask = None
def compute(self, grayed, contour, all_words):
(self.x, self.y), (_, _), _ = cv2.minAreaRect(contour)
for word in all_words:
if cv2.pointPolygonTest(contour, word.pt, False) >= 0:
if word.noise:
logger.warn('Skipping noisy word: {word}'.format(
word=word
))
else:
self.words.append(word)
mask8 = numpy.zeros(grayed.shape, numpy.uint8)
cv2.drawContours(mask8, [contour], -1, (255, 0, 0), -1)
mask32 = numpy.int32(mask8)
self.mask = cv2.convertScaleAbs(mask32)
self.words = sorted(self.words, key=lambda x: (x.pt[0], x.pt[1]))
logger.debug('Compute sentence with {count} words: {words}'.format(
count=len(self.words),
words=self.export()
))
def __repr__(self):
return '%s(%d, %r, %r)' % (
self.__class__,
len(self.words),
self.x, self.y
)
def export(self):
return [word.export() for word in self.words]
| [
"cv2.convertScaleAbs",
"cv2.drawContours",
"cv2.pointPolygonTest",
"numpy.int32",
"cv2.minAreaRect",
"numpy.zeros"
] | [((281, 305), 'cv2.minAreaRect', 'cv2.minAreaRect', (['contour'], {}), '(contour)\n', (296, 305), False, 'import cv2\n'), ((644, 682), 'numpy.zeros', 'numpy.zeros', (['grayed.shape', 'numpy.uint8'], {}), '(grayed.shape, numpy.uint8)\n', (655, 682), False, 'import numpy\n'), ((691, 746), 'cv2.drawContours', 'cv2.drawContours', (['mask8', '[contour]', '(-1)', '(255, 0, 0)', '(-1)'], {}), '(mask8, [contour], -1, (255, 0, 0), -1)\n', (707, 746), False, 'import cv2\n'), ((764, 782), 'numpy.int32', 'numpy.int32', (['mask8'], {}), '(mask8)\n', (775, 782), False, 'import numpy\n'), ((803, 830), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['mask32'], {}), '(mask32)\n', (822, 830), False, 'import cv2\n'), ((352, 397), 'cv2.pointPolygonTest', 'cv2.pointPolygonTest', (['contour', 'word.pt', '(False)'], {}), '(contour, word.pt, False)\n', (372, 397), False, 'import cv2\n')] |
import argparse
import tqdm
import random
import math
import os
import pandas as pd
import sklearn
import timeit
import numpy as np
import struct
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import pickle
import torch
import pandas as pd
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from asteroid_dataset import *
import torch.optim as optim
from classifier import *
import visdom
from torchnet.meter import ConfusionMeter
from torchnet.meter import AUCMeter
from sklearn.metrics import matthews_corrcoef
confusion_matrix = ConfusionMeter(2)
# temp_confusion_matrix = ConfusionMeter(2)
auc_meter = AUCMeter()
# confusion_matrix_validation = ConfusionMeter(2)
vis = visdom.Visdom()
draw_graph = None
draw_accuracy = None
draw_roc_curve = None
csv_file = "classifications.csv"
root_dir = "data/"
# hyperparameters
batch_size = 159
learning_rate = 0.001
epoch_num = 50
# experiment parameters
real_exp = True
experiment_num = 19
save_model = real_exp
validate_frequency = 5
draw_graph = None
draw_accuracy = None
draw_validation_graphs = None
# file
if real_exp:
f = open("saved_output/experiment_%d.out" % experiment_num, 'w+')
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_dataset = AsteroidDataset(csv_file=csv_file, root_dir=root_dir, train=True, transform=transform)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=1)
validation_dataset = AsteroidDataset(csv_file=csv_file, root_dir=root_dir, train=False, transform=transform)
validation_dataloader = DataLoader(validation_dataset, batch_size=batch_size, shuffle=True, num_workers=1)
classifier = CNN()
criterion = nn.NLLLoss()
optimizer = optim.Adam(classifier.parameters(), lr=learning_rate)
def model_save(model, path):
pickle.dump(model, open(path, 'wb'))
def adjust_learning_rate(optimizer, epoch):
lr = learning_rate * (0.1 ** (epoch // 1))
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
print('Starting training...')
start = timeit.default_timer()
if real_exp:
f.write('Starting training...\n')
total_iter = 0
for epoch in range(epoch_num):
corrects = 0.0
for i, data in enumerate(train_dataloader, 0):
if total_iter % validate_frequency == 0:
data = next(iter(validation_dataloader))
inputs = data["image"]
labels = data["class"]
inputs, labels = Variable(inputs), Variable(labels)
output = classifier(inputs)
loss = criterion(output, labels)
temp = output[:, 1].data.numpy()
temp = np.apply_along_axis(lambda x: np.rint(np.exp(x)), 0, temp)
temp = torch.from_numpy(temp).long()
num = torch.sum(temp == labels.data)
if type(num) is not int:
num = num.item()
accuracy = num/ float(batch_size)
update = None if draw_validation_graphs is None else 'append'
draw_validation_graphs = vis.line(X = np.array([total_iter]), Y = np.array([loss.data[0]]), win = draw_validation_graphs, update = update, opts=dict(title="Validation NLL loss"))
print("[EPOCH %d ITER %d] Validation Loss: %f (accuracy: %f)" % (epoch, i, loss.data[0], accuracy))
if real_exp:
f.write("[EPOCH %d ITER %d] Validation Loss: %f (accuracy: %f)\n" % (epoch, i, loss.data[0], accuracy))
# confusion_matrix_validation.add(torch.Tensor(output.data), labels.data)
inputs = data["image"]
labels = data["class"]
inputs, labels = Variable(inputs), Variable(labels)
optimizer.zero_grad()
output = classifier(inputs)
loss = criterion(output, labels)
update = None if draw_graph is None else 'append'
draw_graph = vis.line(X = np.array([total_iter]), Y = np.array([loss.data[0]]), win = draw_graph, update = update, opts=dict(title="NLL loss"))
temp = output[:, 1].data.numpy()
temp = np.apply_along_axis(lambda x: np.rint(np.exp(x)), 0, temp)
temp = torch.from_numpy(temp).long()
num = torch.sum(temp == labels.data)
if type(num) is not int:
num = num.item()
accuracy = num/ float(batch_size)
update = None if draw_accuracy is None else 'append'
draw_accuracy = vis.line(X = np.array([total_iter]), Y = np.array([accuracy]), win = draw_accuracy, update = update, opts=dict(title="Accuracy"))
print("[EPOCH %d ITER %d] Loss: %f (accuracy: %f)" % (epoch, i, loss.data[0], accuracy))
if real_exp:
f.write("[EPOCH %d ITER %d] Loss: %f (accuracy: %f)\n" % (epoch, i, loss.data[0], accuracy))
# mcoref = matthews_corrcoef(labels.data, output.data)
# print("matthews coefficient (training): %f" % mcoref)
# if real_exp:
# f.write("matthews coefficient (training): %f\n" % mcoref)
# confusion matrix calculations
if epoch == epoch_num -1:
confusion_matrix.add(torch.Tensor(output.data), labels.data)
print (output[:, 1].data.shape)
auc_meter.add(output[:, 1].data, labels.data)
area, tpr, fpr = auc_meter.value()
mcoref = matthews_corrcoef(labels.data, temp)
print("matthews coefficient (end of training): %f" % mcoref)
print("area under roc curve: %f" % area)
if real_exp:
f.write("matthews coefficient (training): %f\n" % mcoref)
f.write("area under roc curve: %f" % area)
update = None if draw_roc_curve is None else 'append'
draw_roc_curve = vis.line(X = fpr, Y = tpr, win = draw_roc_curve, update = update, opts=dict(title="ROC curve"))
# temp_confusion_matrix.add(torch.Tensor(output.data), labels.data)
# tpr = temp_confusion_matrix.conf[0][0]/float(temp_confusion_matrix.conf[0][0] + temp_confusion_matrix.conf[0][1])
# fpr = temp_confusion_matrix.conf[1][0]/float(temp_confusion_matrix.conf[1][0] + temp_confusion_matrix.conf[1][1])
# update = None if draw_roc_curve is None else 'append'
# draw_roc_curve = vis.line(X = np.array([fpr]), Y = np.array([tpr]), win = draw_roc_curve, update = update, opts=dict(title="ROC curve"))
loss.backward()
optimizer.step()
temp = timeit.default_timer()
if epoch % 30 == 0 and epoch != 0:
print("TRAINING AT EPOCH %d TOOK %f" % (epoch, (temp-start)))
total_iter += 1
adjust_learning_rate(optimizer, epoch)
stop = timeit.default_timer()
print("TRAINING DONE: TOOK %f s" % (stop-start))
if save_model:
model_save(classifier, "saved_models/experiment_"+str(experiment_num))
# print confusion matrix to verify model
print("CONFUSION MATRIX FOR TRAINING")
if real_exp:
f.write("CONFUSION MATRIX FOR TRAINING")
print(confusion_matrix.conf)
if real_exp:
f.write(np.array2string(confusion_matrix.conf, separator=', '))
# print("CONFUSION MATRIX FOR VALIDATION")
# if real_exp:
# f.write("CONFUSION MATRIX FOR VALIDATION")
# print(confusion_matrix_validation.conf)
# if real_exp:
# f.write(confusion_matrix_validation.conf)
| [
"torchnet.meter.ConfusionMeter",
"timeit.default_timer",
"numpy.array2string",
"torch.Tensor",
"torch.from_numpy",
"numpy.exp",
"numpy.array",
"torch.nn.NLLLoss",
"torchnet.meter.AUCMeter",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"torch.sum",
"torchvision.transform... | [((641, 658), 'torchnet.meter.ConfusionMeter', 'ConfusionMeter', (['(2)'], {}), '(2)\n', (655, 658), False, 'from torchnet.meter import ConfusionMeter\n'), ((715, 725), 'torchnet.meter.AUCMeter', 'AUCMeter', ([], {}), '()\n', (723, 725), False, 'from torchnet.meter import AUCMeter\n'), ((782, 797), 'visdom.Visdom', 'visdom.Visdom', ([], {}), '()\n', (795, 797), False, 'import visdom\n'), ((1496, 1573), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(1)'}), '(train_dataset, batch_size=batch_size, shuffle=True, num_workers=1)\n', (1506, 1573), False, 'from torch.utils.data import DataLoader\n'), ((1708, 1794), 'torch.utils.data.DataLoader', 'DataLoader', (['validation_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(1)'}), '(validation_dataset, batch_size=batch_size, shuffle=True,\n num_workers=1)\n', (1718, 1794), False, 'from torch.utils.data import DataLoader\n'), ((1825, 1837), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (1835, 1837), True, 'import torch.nn as nn\n'), ((2195, 2217), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (2215, 2217), False, 'import timeit\n'), ((6758, 6780), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (6778, 6780), False, 'import timeit\n'), ((1288, 1309), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1307, 1309), True, 'import torchvision.transforms as transforms\n'), ((1316, 1370), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (1336, 1370), True, 'import torchvision.transforms as transforms\n'), ((4285, 4315), 'torch.sum', 'torch.sum', (['(temp == labels.data)'], {}), '(temp == labels.data)\n', (4294, 4315), False, 'import torch\n'), ((6543, 6565), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (6563, 6565), False, 'import timeit\n'), ((7115, 7169), 'numpy.array2string', 'np.array2string', (['confusion_matrix.conf'], {'separator': '""", """'}), "(confusion_matrix.conf, separator=', ')\n", (7130, 7169), True, 'import numpy as np\n'), ((2901, 2931), 'torch.sum', 'torch.sum', (['(temp == labels.data)'], {}), '(temp == labels.data)\n', (2910, 2931), False, 'import torch\n'), ((3754, 3770), 'torch.autograd.Variable', 'Variable', (['inputs'], {}), '(inputs)\n', (3762, 3770), False, 'from torch.autograd import Variable\n'), ((3772, 3788), 'torch.autograd.Variable', 'Variable', (['labels'], {}), '(labels)\n', (3780, 3788), False, 'from torch.autograd import Variable\n'), ((5426, 5462), 'sklearn.metrics.matthews_corrcoef', 'matthews_corrcoef', (['labels.data', 'temp'], {}), '(labels.data, temp)\n', (5443, 5462), False, 'from sklearn.metrics import matthews_corrcoef\n'), ((2590, 2606), 'torch.autograd.Variable', 'Variable', (['inputs'], {}), '(inputs)\n', (2598, 2606), False, 'from torch.autograd import Variable\n'), ((2608, 2624), 'torch.autograd.Variable', 'Variable', (['labels'], {}), '(labels)\n', (2616, 2624), False, 'from torch.autograd import Variable\n'), ((3990, 4012), 'numpy.array', 'np.array', (['[total_iter]'], {}), '([total_iter])\n', (3998, 4012), True, 'import numpy as np\n'), ((4018, 4042), 'numpy.array', 'np.array', (['[loss.data[0]]'], {}), '([loss.data[0]])\n', (4026, 4042), True, 'import numpy as np\n'), ((4240, 4262), 'torch.from_numpy', 'torch.from_numpy', (['temp'], {}), '(temp)\n', (4256, 4262), False, 'import torch\n'), ((4527, 4549), 'numpy.array', 'np.array', (['[total_iter]'], {}), '([total_iter])\n', (4535, 4549), True, 'import numpy as np\n'), ((4555, 4575), 'numpy.array', 'np.array', (['[accuracy]'], {}), '([accuracy])\n', (4563, 4575), True, 'import numpy as np\n'), ((5215, 5240), 'torch.Tensor', 'torch.Tensor', (['output.data'], {}), '(output.data)\n', (5227, 5240), False, 'import torch\n'), ((2853, 2875), 'torch.from_numpy', 'torch.from_numpy', (['temp'], {}), '(temp)\n', (2869, 2875), False, 'import torch\n'), ((3179, 3201), 'numpy.array', 'np.array', (['[total_iter]'], {}), '([total_iter])\n', (3187, 3201), True, 'import numpy as np\n'), ((3207, 3231), 'numpy.array', 'np.array', (['[loss.data[0]]'], {}), '([loss.data[0]])\n', (3215, 3231), True, 'import numpy as np\n'), ((4204, 4213), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (4210, 4213), True, 'import numpy as np\n'), ((2813, 2822), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2819, 2822), True, 'import numpy as np\n')] |
# Copyright (c) 2020, <NAME>, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of <NAME>, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME>, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import os.path as osp
from init_args_serializer import Serializable
import rcsenv
from pyrado.environments.rcspysim.base import RcsSim
from pyrado.tasks.base import Task
from pyrado.tasks.desired_state import RadiallySymmDesStateTask
from pyrado.tasks.reward_functions import ExpQuadrErrRewFcn
rcsenv.addResourcePath(rcsenv.RCSPYSIM_CONFIG_PATH)
rcsenv.addResourcePath(osp.join(rcsenv.RCSPYSIM_CONFIG_PATH, "QuanserQube"))
class QQubeRcsSim(RcsSim, Serializable):
"""
Swing-up task on the underactuated Quanser Qube a.k.a. Furuta pendulum, simulated in Rcs
.. note::
The action is different to the `QQubeSim` in the directory `sim_py`.
"""
name: str = "qq-rcs"
def __init__(self, task_args: [dict, None] = None, max_dist_force: [float, None] = None, **kwargs):
"""
Constructor
:param task_args: arguments for the task construction, pass `None` to use default values
state_des: 4-dim np.ndarray
Q: 4x4-dim np.ndarray
R: 4x4-dim np.ndarray
:param max_dist_force: maximum disturbance force, pass `None` for no disturbance
:param kwargs: keyword arguments forwarded to the RcsSim
"""
Serializable._init(self, locals())
# Forward to RcsSim's constructor
RcsSim.__init__(
self,
task_args=task_args if task_args is not None else dict(),
envType="QuanserQube",
graphFileName="gQuanserQube_trqCtrl.xml",
physicsConfigFile="pQuanserQube.xml",
actionModelType="joint_acc",
**kwargs,
)
# Setup disturbance
self._max_dist_force = max_dist_force
def _create_task(self, task_args: dict) -> Task:
# Define the task including the reward function
state_des = task_args.get("state_des", np.array([0.0, np.pi, 0.0, 0.0]))
Q = task_args.get("Q", np.diag([2e-1, 1.0, 2e-2, 5e-3]))
R = task_args.get("R", np.diag([3e-3]))
return RadiallySymmDesStateTask(self.spec, state_des, ExpQuadrErrRewFcn(Q, R), idcs=[1])
def _disturbance_generator(self) -> (np.ndarray, None):
if self._max_dist_force is None:
return None
else:
# Sample angle and force uniformly
angle = np.random.uniform(-np.pi, np.pi)
force = np.random.uniform(0, self._max_dist_force)
return np.array([force * np.sin(angle), force * np.cos(angle), 0])
| [
"rcsenv.addResourcePath",
"os.path.join",
"numpy.diag",
"numpy.array",
"numpy.cos",
"numpy.random.uniform",
"numpy.sin",
"pyrado.tasks.reward_functions.ExpQuadrErrRewFcn"
] | [((2048, 2099), 'rcsenv.addResourcePath', 'rcsenv.addResourcePath', (['rcsenv.RCSPYSIM_CONFIG_PATH'], {}), '(rcsenv.RCSPYSIM_CONFIG_PATH)\n', (2070, 2099), False, 'import rcsenv\n'), ((2123, 2175), 'os.path.join', 'osp.join', (['rcsenv.RCSPYSIM_CONFIG_PATH', '"""QuanserQube"""'], {}), "(rcsenv.RCSPYSIM_CONFIG_PATH, 'QuanserQube')\n", (2131, 2175), True, 'import os.path as osp\n'), ((3641, 3673), 'numpy.array', 'np.array', (['[0.0, np.pi, 0.0, 0.0]'], {}), '([0.0, np.pi, 0.0, 0.0])\n', (3649, 3673), True, 'import numpy as np\n'), ((3706, 3738), 'numpy.diag', 'np.diag', (['[0.2, 1.0, 0.02, 0.005]'], {}), '([0.2, 1.0, 0.02, 0.005])\n', (3713, 3738), True, 'import numpy as np\n'), ((3771, 3787), 'numpy.diag', 'np.diag', (['[0.003]'], {}), '([0.003])\n', (3778, 3787), True, 'import numpy as np\n'), ((3850, 3873), 'pyrado.tasks.reward_functions.ExpQuadrErrRewFcn', 'ExpQuadrErrRewFcn', (['Q', 'R'], {}), '(Q, R)\n', (3867, 3873), False, 'from pyrado.tasks.reward_functions import ExpQuadrErrRewFcn\n'), ((4092, 4124), 'numpy.random.uniform', 'np.random.uniform', (['(-np.pi)', 'np.pi'], {}), '(-np.pi, np.pi)\n', (4109, 4124), True, 'import numpy as np\n'), ((4145, 4187), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self._max_dist_force'], {}), '(0, self._max_dist_force)\n', (4162, 4187), True, 'import numpy as np\n'), ((4225, 4238), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (4231, 4238), True, 'import numpy as np\n'), ((4248, 4261), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (4254, 4261), True, 'import numpy as np\n')] |
"""Numerical sklearn privacy metric modules and their attackers."""
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.neural_network import MLPRegressor
from sklearn.svm import SVR
from sdmetrics.single_table.privacy.base import NumericalPrivacyMetric, PrivacyAttackerModel
class NumericalSklearnAttacker(PrivacyAttackerModel):
"""Base class for numerical attacker based on sklearn models.
It is used to train a model to predict sensitive attributes from key attributes
using the synthetic data. Then, evaluate the privacy of the model by
trying to predict the sensitive attributes of the real data.
Attributes:
skl_learner (Class):
A (wrapped) sklearn classifier class that can be called with no arguments.
"""
SKL_LEARNER = None
def __init__(self):
self.predictor = self.SKL_LEARNER()
def fit(self, synthetic_data, key, sensitive):
"""Fit the NumericalSklearnAttacker on the synthetic data.
Args:
synthetic_data(pandas.DataFrame):
The synthetic data table used for adverserial learning.
key_fields(list[str]):
The names of the key columns.
sensitive_fields(list[str]):
The names of the sensitive columns.
"""
key_table = np.array(synthetic_data[key])
sensitive_table = np.array(synthetic_data[sensitive])
self.predictor.fit(key_table, sensitive_table)
def predict(self, key_data):
"""Make a prediction of the sensitive data given keys.
Args:
key_data(tuple):
The key data.
Returns:
tuple:
The predicted sensitive data.
"""
sensitive_pred = self.predictor.predict([key_data])
if len(np.array(sensitive_pred).shape) == 1:
sensitive_pred = [sensitive_pred]
return tuple(sensitive_pred[0])
class SVRWrapper():
"""A wrapper arround `sklearn.svm.SVR` to support multidimensional y."""
def __init__(self):
self.predictors = []
def fit(self, X, Y):
"""Fit the classifier to training data X and lables Y.
Arguments:
X (np.array):
training data matrix of shape (n_samples, n_features).
Y (np.array):
label matrix of shape (n_samples, n_labels).
"""
n_labels = Y.shape[1]
for idx in range(n_labels):
y = Y[:, idx]
predictor = SVR()
predictor.fit(X, y)
self.predictors.append(predictor)
def predict(self, X):
"""Predict the labels corresponding to data X.
Arguments:
X (np.array): training data matrix of shape (n_samples, n_features)
Returns:
np.array: label matrix of shape (n_samples, n_labels)
"""
Y = []
for predictor in self.predictors:
Y.append(predictor.predict(X))
Y = np.array(Y).T
return Y
class LRAttacker(NumericalSklearnAttacker):
"""The privacy attaker based on the Linear Regression model."""
SKL_LEARNER = LinearRegression
class NumericalLR(NumericalPrivacyMetric):
"""The Numerical Linear Regression privacy metric. Scored based on the LRAttacker."""
name = 'Numerical Linear Regression'
MODEL = LRAttacker
class MLPAttacker(NumericalSklearnAttacker):
"""The privacy attaker based on the MLP (Multi-layer Perceptron) regression model."""
SKL_LEARNER = MLPRegressor
class NumericalMLP(NumericalPrivacyMetric):
"""The Multi-layer Perceptron regression privacy metric. Scored based on the MLPAttacker."""
name = 'Multi-layer Perceptron Regression'
MODEL = MLPAttacker
class SVRAttacker(NumericalSklearnAttacker):
"""The privacy attaker based on the SVR (Support-vector Regression) model."""
SKL_LEARNER = SVRWrapper
class NumericalSVR(NumericalPrivacyMetric):
"""The Numerical Support-vector Regression privacy metric. Scored based on the SVRAttacker."""
name = 'Numerical Support-vector Regression'
MODEL = SVRAttacker
| [
"numpy.array",
"sklearn.svm.SVR"
] | [((1345, 1374), 'numpy.array', 'np.array', (['synthetic_data[key]'], {}), '(synthetic_data[key])\n', (1353, 1374), True, 'import numpy as np\n'), ((1401, 1436), 'numpy.array', 'np.array', (['synthetic_data[sensitive]'], {}), '(synthetic_data[sensitive])\n', (1409, 1436), True, 'import numpy as np\n'), ((2533, 2538), 'sklearn.svm.SVR', 'SVR', ([], {}), '()\n', (2536, 2538), False, 'from sklearn.svm import SVR\n'), ((3008, 3019), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (3016, 3019), True, 'import numpy as np\n'), ((1834, 1858), 'numpy.array', 'np.array', (['sensitive_pred'], {}), '(sensitive_pred)\n', (1842, 1858), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import time
import sys
import paddle
import paddle.fluid as fluid
import reader
import argparse
import functools
import models
import utils
from utils.utility import add_arguments,print_arguments
import math
parser = argparse.ArgumentParser(description=__doc__)
# yapf: disable
add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('class_dim', int, 1000, "Class number.")
add_arg('image_shape', str, "3,224,224", "Input image size")
add_arg('with_mem_opt', bool, True, "Whether to use memory optimization or not.")
add_arg('pretrained_model', str, None, "Whether to use pretrained model.")
add_arg('model', str, "SE_ResNeXt50_32x4d", "Set the network to use.")
add_arg('save_inference', bool, False, "Whether to save inference model or not")
# yapf: enable
def infer(args):
# parameters from arguments
class_dim = args.class_dim
model_name = args.model
save_inference = args.save_inference
pretrained_model = args.pretrained_model
with_memory_optimization = args.with_mem_opt
image_shape = [int(m) for m in args.image_shape.split(",")]
model_list = [m for m in dir(models) if "__" not in m]
assert model_name in model_list, "{} is not in lists: {}".format(args.model,
model_list)
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
# model definition
model = models.__dict__[model_name]()
if model_name == "GoogleNet":
out, _, _ = model.net(input=image, class_dim=class_dim)
else:
out = model.net(input=image, class_dim=class_dim)
out = fluid.layers.softmax(out)
test_program = fluid.default_main_program().clone(for_test=True)
fetch_list = [out.name]
if with_memory_optimization and not save_inference:
fluid.memory_optimize(
fluid.default_main_program(), skip_opt_set=set(fetch_list))
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
if pretrained_model:
def if_exist(var):
return os.path.exists(os.path.join(pretrained_model, var.name))
fluid.io.load_vars(exe, pretrained_model, predicate=if_exist)
if save_inference:
fluid.io.save_inference_model(
dirname=model_name,
feeded_var_names=['image'],
main_program=test_program,
target_vars=out,
executor=exe,
model_filename='model',
params_filename='params')
print("model: ",model_name," is already saved")
exit(0)
test_batch_size = 1
test_reader = paddle.batch(reader.test(), batch_size=test_batch_size)
feeder = fluid.DataFeeder(place=place, feed_list=[image])
TOPK = 1
for batch_id, data in enumerate(test_reader()):
result = exe.run(test_program,
fetch_list=fetch_list,
feed=feeder.feed(data))
result = result[0][0]
pred_label = np.argsort(result)[::-1][:TOPK]
print("Test-{0}-score: {1}, class {2}"
.format(batch_id, result[pred_label], pred_label))
sys.stdout.flush()
def main():
args = parser.parse_args()
print_arguments(args)
infer(args)
if __name__ == '__main__':
main()
| [
"paddle.fluid.DataFeeder",
"reader.test",
"argparse.ArgumentParser",
"paddle.fluid.layers.softmax",
"paddle.fluid.default_startup_program",
"paddle.fluid.layers.data",
"paddle.fluid.CPUPlace",
"os.path.join",
"utils.utility.print_arguments",
"paddle.fluid.io.save_inference_model",
"paddle.fluid.... | [((356, 400), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (379, 400), False, 'import argparse\n'), ((427, 477), 'functools.partial', 'functools.partial', (['add_arguments'], {'argparser': 'parser'}), '(add_arguments, argparser=parser)\n', (444, 477), False, 'import functools\n'), ((1646, 1713), 'paddle.fluid.layers.data', 'fluid.layers.data', ([], {'name': '"""image"""', 'shape': 'image_shape', 'dtype': '"""float32"""'}), "(name='image', shape=image_shape, dtype='float32')\n", (1663, 1713), True, 'import paddle.fluid as fluid\n'), ((2324, 2345), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (2338, 2345), True, 'import paddle.fluid as fluid\n'), ((3105, 3153), 'paddle.fluid.DataFeeder', 'fluid.DataFeeder', ([], {'place': 'place', 'feed_list': '[image]'}), '(place=place, feed_list=[image])\n', (3121, 3153), True, 'import paddle.fluid as fluid\n'), ((3627, 3648), 'utils.utility.print_arguments', 'print_arguments', (['args'], {}), '(args)\n', (3642, 3648), False, 'from utils.utility import add_arguments, print_arguments\n'), ((1960, 1985), 'paddle.fluid.layers.softmax', 'fluid.layers.softmax', (['out'], {}), '(out)\n', (1980, 1985), True, 'import paddle.fluid as fluid\n'), ((2257, 2275), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (2272, 2275), True, 'import paddle.fluid as fluid\n'), ((2297, 2313), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (2311, 2313), True, 'import paddle.fluid as fluid\n'), ((2358, 2389), 'paddle.fluid.default_startup_program', 'fluid.default_startup_program', ([], {}), '()\n', (2387, 2389), True, 'import paddle.fluid as fluid\n'), ((2530, 2591), 'paddle.fluid.io.load_vars', 'fluid.io.load_vars', (['exe', 'pretrained_model'], {'predicate': 'if_exist'}), '(exe, pretrained_model, predicate=if_exist)\n', (2548, 2591), True, 'import paddle.fluid as fluid\n'), ((2623, 2817), 'paddle.fluid.io.save_inference_model', 'fluid.io.save_inference_model', ([], {'dirname': 'model_name', 'feeded_var_names': "['image']", 'main_program': 'test_program', 'target_vars': 'out', 'executor': 'exe', 'model_filename': '"""model"""', 'params_filename': '"""params"""'}), "(dirname=model_name, feeded_var_names=['image'\n ], main_program=test_program, target_vars=out, executor=exe,\n model_filename='model', params_filename='params')\n", (2652, 2817), True, 'import paddle.fluid as fluid\n'), ((3049, 3062), 'reader.test', 'reader.test', ([], {}), '()\n', (3060, 3062), False, 'import reader\n'), ((3559, 3577), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3575, 3577), False, 'import sys\n'), ((2006, 2034), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (2032, 2034), True, 'import paddle.fluid as fluid\n'), ((2184, 2212), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (2210, 2212), True, 'import paddle.fluid as fluid\n'), ((2479, 2519), 'os.path.join', 'os.path.join', (['pretrained_model', 'var.name'], {}), '(pretrained_model, var.name)\n', (2491, 2519), False, 'import os\n'), ((3407, 3425), 'numpy.argsort', 'np.argsort', (['result'], {}), '(result)\n', (3417, 3425), True, 'import numpy as np\n')] |
#%%
import argparse
import torch
import numpy as np
from dataloader import *
from model import Model
from loss import *
import os
import random
from pathlib import Path
import wandb
import time
from utils.pyart import bnum2ls
# fix random seeds for reproducibility
SEED = 1
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(SEED)
random.seed(SEED)
def train_epoch(model, optimizer, input, label,Loss_Fn, args):
q_value = model.q_layer(input)
n_joint = q_value.size()[1]
q_loss = q_entropy(torch.abs(q_value.reshape(-1,2*n_joint)))
q_loss = torch.mean(q_loss,dim=0)
total_loss = - args.q_entropy * q_loss
output, _,_ = model.poe_layer(q_value)
loss = Loss_Fn(output,label)
# regularizer_loss = args.Twist_norm * Twist_norm(model)
# regularizer_loss = regularizer_loss + args.Twist2point * Twist2point(Twistls,label)
regularizer_loss = 0
total_loss = total_loss + loss + regularizer_loss
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
return total_loss
def test_epoch(model, input, label, Loss_Fn, args):
q_value = model.q_layer(input)
q_loss = q_entropy(torch.abs(q_value))
q_loss = torch.mean(q_loss,dim=0)
q_loss = args.q_entropy * q_loss
output, _, _ = model.poe_layer(q_value)
# Twist2pointloss = args.Twist2point * Twist2point(Twistls,label)
Twist2pointloss = 0
loss = Loss_Fn(output,label)
# regularizer_loss = args.Twist_norm * Twist_norm(model)
regularizer_loss = 0
total_loss = loss + regularizer_loss
return total_loss,q_loss,Twist2pointloss
def main(args):
#set logger
if args.wandb:
wandb.init(project = args.pname)
#set device
os.environ["CUDA_VISIBLE_DEVICES"]=args.device
device = torch.device('cuda:0')
torch.cuda.set_device(device)
branchLs = bnum2ls(args.branchNum)
#set model
model = Model(branchLs, args.input_dim)
model = model.to(device)
#load weight when requested
if os.path.isfile(args.resume_dir):
weight = torch.load(args.resume_dir)
model.load_state_dict(weight['state_dict'])
print("loading successful!")
#set optimizer
optimizer = torch.optim.Adam(model.parameters(),lr= args.lr, weight_decay=args.wd)
#declare loss function
if args.loss_function == 'Pos_norm2':
Loss_Fn = Pos_norm2
else:
print("Invalid loss_function")
exit(0)
#assert path to save model
pathname = args.save_dir
Path(pathname).mkdir(parents=True, exist_ok=True)
#set dataloader
print("Setting up dataloader")
train_data_loader = FoldToyDataloader(args.data_path, args.Foldstart, args.Foldend, args.n_workers, args.batch_size)
test_data_loader = FoldToyDataloader(args.data_path, args.Foldend, -1, args.n_workers, args.batch_size)
print("Initalizing Training loop")
for epoch in range(args.epochs):
# Timer start
time_start = time.time()
# Train
model.train()
data_length = len(train_data_loader)
for iterate, (input,label) in enumerate(train_data_loader):
input = input.to(device)
label = label.to(device)
train_loss = train_epoch(model, optimizer, input, label, Loss_Fn, args)
print('Epoch:{}, TrainLoss:{:.2f}, Progress:{:.2f}%'.format(epoch+1,train_loss,100*iterate/data_length), end='\r')
print('Epoch:{}, TrainLoss:{:.2f}, Progress:{:.2f}%'.format(epoch+1,train_loss,100*iterate/data_length))
#Evaluate
model.eval()
data_length = len(test_data_loader)
test_loss = np.array([])
for iterate, (input,label) in enumerate(test_data_loader):
input = input.to(device)
label = label.to(device)
total_loss,q_loss,Twist2pointloss = test_epoch(model, input, label, Loss_Fn, args)
total_loss = total_loss.detach().cpu().numpy()
test_loss = np.append(test_loss, total_loss)
print('Testing...{:.2f} Epoch:{}, Progress:{:.2f}%'.format(total_loss,epoch+1,100*iterate/data_length) , end='\r')
test_loss = test_loss.mean()
print('TestLoss:{:.2f}'.format(test_loss))
# Timer end
time_end = time.time()
avg_time = time_end-time_start
eta_time = (args.epochs - epoch) * avg_time
h = int(eta_time //3600)
m = int((eta_time %3600)//60)
s = int((eta_time %60))
print("Epoch: {}, TestLoss:{:.2f}, eta:{}:{}:{}".format(epoch+1, test_loss, h,m,s))
# Log to wandb
if args.wandb:
wandb.log({'TrainLoss':train_loss, 'TestLoss':test_loss, 'TimePerEpoch':avg_time,
'q_entropy':q_loss,'Twist2point':Twist2pointloss},step = epoch+1)
#save model
if (epoch+1) % args.save_period==0:
filename = pathname + '/checkpoint_{}.pth'.format(epoch+1)
print("saving... {}".format(filename))
state = {
'state_dict':model.state_dict(),
'optimizer':optimizer.state_dict(),
'branchNum':args.branchNum,
'input_dim':args.input_dim
}
torch.save(state, filename)
urdf = make_urdf(model)
def make_urdf(model):
from xml.dom import minidom
root = minidom.Document()
robot = root.createElement('robot')
robot.setAttribute('name','softrobot')
robot = addjoint(robot,jointname,model)
def addjoint(robot,jointidx,model):
jointname = get_jointname(jointidx,model)
Twist = getattr(model.poe_layer, jointname)
# creat root element
xml = robot.createElement('joint')
xml.setAttribute('name','J1')
xml.setAttribute('type','revolute')
robot.appendChild(xml)
# set origin for joint
productChild = robot.createElement('origin')
xyz = Twist.get_pos
productChild.setAttribute('xyz',xyz )
xml.appendChild(productChild)
# set axis for joint
productChild = robot.createElement('axis')
xyz = Twist.get_axis
productChild.setAttribute('xyz',xyz)
xml.appendChild(productChild)
# set axis for parent
productChild = robot.createElement('parent')
productChild.setAttribute('link','r_farm')
xml.appendChild(productChild)
# set axis for child
productChild = robot.createElement('child')
productChild.setAttribute('link','r_farm')
xml.appendChild(productChild)
return robot
if __name__ == '__main__':
args = argparse.ArgumentParser(description= 'parse for POENet')
args.add_argument('--batch_size', default= 1024*8, type=int,
help='batch_size')
args.add_argument('--data_path', default= './data/Multi_2dim_log_spiral',type=str,
help='path to data')
args.add_argument('--save_dir', default= './output/temp',type=str,
help='path to save model')
args.add_argument('--resume_dir', default= './output/',type=str,
help='path to load model')
args.add_argument('--device', default= '1',type=str,
help='device to use')
args.add_argument('--n_workers', default= 2, type=int,
help='number of data loading workers')
args.add_argument('--wd', default= 0.001, type=float,
help='weight_decay for model layer')
args.add_argument('--lr', default= 0.001, type=float,
help='learning rate for model layer')
# args.add_argument('--optim', default= 'adam',type=str,
# help='optimizer option')
args.add_argument('--loss_function', default= 'Pos_norm2', type=str,
help='get list of loss function')
args.add_argument('--Twist_norm', default= 0.01, type=float,
help='Coefficient for TwistNorm')
args.add_argument('--q_entropy', default= 0.01, type=float,
help='Coefficient for q_entropy')
args.add_argument('--Twist2point', default= 0.01, type=float,
help='Coefficient for Twist2point')
args.add_argument('--Twist2Twist', default= 0.1, type=float,
help='Coefficient for Twist2point')
args.add_argument('--wandb', action = 'store_true', help = 'Use wandb to log')
args.add_argument('--input_dim', default= 2, type=int,
help='dimension of input')
args.add_argument('--epochs', default= 50, type=int,
help='number of epoch to perform')
# args.add_argument('--early_stop', default= 50, type=int,
# help='number of n_Scence to early stop')
args.add_argument('--save_period', default= 1, type=int,
help='number of scenes after which model is saved')
args.add_argument('--pname', default= 'POE2D-1230',type=str,
help='Project name')
args.add_argument('--Foldstart', default= 0, type=int,
help='Number of Fold to start')
args.add_argument('--Foldend', default= 8, type=int,
help='Number of Fole to end')
args.add_argument("--branchNum", nargs="+", default= [3,3,3,3,3])
args = args.parse_args()
main(args)
#%%
| [
"model.Model",
"wandb.log",
"wandb.init",
"numpy.array",
"argparse.ArgumentParser",
"pathlib.Path",
"torch.mean",
"numpy.random.seed",
"torch.abs",
"os.path.isfile",
"torch.save",
"utils.pyart.bnum2ls",
"time.time",
"torch.cuda.set_device",
"torch.device",
"torch.manual_seed",
"torch... | [((275, 298), 'torch.manual_seed', 'torch.manual_seed', (['SEED'], {}), '(SEED)\n', (292, 298), False, 'import torch\n'), ((299, 327), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['SEED'], {}), '(SEED)\n', (321, 327), False, 'import torch\n'), ((409, 429), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (423, 429), True, 'import numpy as np\n'), ((430, 447), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (441, 447), False, 'import random\n'), ((657, 682), 'torch.mean', 'torch.mean', (['q_loss'], {'dim': '(0)'}), '(q_loss, dim=0)\n', (667, 682), False, 'import torch\n'), ((1278, 1303), 'torch.mean', 'torch.mean', (['q_loss'], {'dim': '(0)'}), '(q_loss, dim=0)\n', (1288, 1303), False, 'import torch\n'), ((1870, 1892), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (1882, 1892), False, 'import torch\n'), ((1897, 1926), 'torch.cuda.set_device', 'torch.cuda.set_device', (['device'], {}), '(device)\n', (1918, 1926), False, 'import torch\n'), ((1943, 1966), 'utils.pyart.bnum2ls', 'bnum2ls', (['args.branchNum'], {}), '(args.branchNum)\n', (1950, 1966), False, 'from utils.pyart import bnum2ls\n'), ((1995, 2026), 'model.Model', 'Model', (['branchLs', 'args.input_dim'], {}), '(branchLs, args.input_dim)\n', (2000, 2026), False, 'from model import Model\n'), ((2096, 2127), 'os.path.isfile', 'os.path.isfile', (['args.resume_dir'], {}), '(args.resume_dir)\n', (2110, 2127), False, 'import os\n'), ((5477, 5495), 'xml.dom.minidom.Document', 'minidom.Document', ([], {}), '()\n', (5493, 5495), False, 'from xml.dom import minidom\n'), ((6648, 6703), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""parse for POENet"""'}), "(description='parse for POENet')\n", (6671, 6703), False, 'import argparse\n'), ((1245, 1263), 'torch.abs', 'torch.abs', (['q_value'], {}), '(q_value)\n', (1254, 1263), False, 'import torch\n'), ((1756, 1786), 'wandb.init', 'wandb.init', ([], {'project': 'args.pname'}), '(project=args.pname)\n', (1766, 1786), False, 'import wandb\n'), ((2146, 2173), 'torch.load', 'torch.load', (['args.resume_dir'], {}), '(args.resume_dir)\n', (2156, 2173), False, 'import torch\n'), ((3062, 3073), 'time.time', 'time.time', ([], {}), '()\n', (3071, 3073), False, 'import time\n'), ((3736, 3748), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3744, 3748), True, 'import numpy as np\n'), ((4369, 4380), 'time.time', 'time.time', ([], {}), '()\n', (4378, 4380), False, 'import time\n'), ((2603, 2617), 'pathlib.Path', 'Path', (['pathname'], {}), '(pathname)\n', (2607, 2617), False, 'from pathlib import Path\n'), ((4068, 4100), 'numpy.append', 'np.append', (['test_loss', 'total_loss'], {}), '(test_loss, total_loss)\n', (4077, 4100), True, 'import numpy as np\n'), ((4734, 4897), 'wandb.log', 'wandb.log', (["{'TrainLoss': train_loss, 'TestLoss': test_loss, 'TimePerEpoch': avg_time,\n 'q_entropy': q_loss, 'Twist2point': Twist2pointloss}"], {'step': '(epoch + 1)'}), "({'TrainLoss': train_loss, 'TestLoss': test_loss, 'TimePerEpoch':\n avg_time, 'q_entropy': q_loss, 'Twist2point': Twist2pointloss}, step=\n epoch + 1)\n", (4743, 4897), False, 'import wandb\n'), ((5319, 5346), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (5329, 5346), False, 'import torch\n')] |
# Script use to collect incumbents_v5.pkl
# Uses incumbents_v4.pkl and reorders the list in a semi-deterministic manner
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.spatial import distance_matrix
# from scipy.spatial.distance import euclidean
from scipy.spatial import ConvexHull, convex_hull_plot_2d
# Incumbents for 'lasso' from regression tasks
lasso = [{'alpha': 0.0588816078969954,
'fit_intercept': 0,
'normalize': 0,
'max_iter': 1188,
'tol': 0.0142116958607831,
'positive': 0},
{'alpha': 0.01,
'fit_intercept': 1,
'normalize': 0,
'max_iter': 5000,
'tol': 0.0934421821777098,
'positive': 0},
{'alpha': 0.0301443773293404,
'fit_intercept': 1,
'normalize': 1,
'max_iter': 465,
'tol': 0.0994437776399929,
'positive': 0},
{'alpha': 0.0342844214778938,
'fit_intercept': 1,
'normalize': 1,
'max_iter': 20,
'tol': 0.086836065868564,
'positive': 0}]
# Incumbents for 'linear' from regression tasks
linear = [{'alpha': 0.0807158611555724,
'fit_intercept': 1,
'normalize': 1,
'max_iter': 1482,
'tol': 0.000985256913005844},
{'alpha': 0.0158280830848803,
'fit_intercept': 1,
'normalize': 1,
'max_iter': 2024,
'tol': 0.000274213922897436},
{'alpha': 0.0131360370365985,
'fit_intercept': 1,
'normalize': 0,
'max_iter': 27,
'tol': 0.000758983848008941},
{'alpha': 0.0286632897398748,
'fit_intercept': 1,
'normalize': 0,
'max_iter': 257,
'tol': 0.000567925032398133}]
def load_model(model_name):
with open('submissions/switching-optimizer/utils/incumbents_v4.pkl', 'rb') as f:
inc = pickle.load(f)
model_incs = inc.loc[model_name].dropna()
incs = []
for index in model_incs: incs.extend(index)
incs = pd.DataFrame(incs)
return incs
def plot_LR_hull():
model = load_model('linearC') # model name used in v4
mean_point = 10 ** np.log10(model).mean(axis=0)
mean_dist = distance_matrix([np.log10(mean_point)], np.log10(model))
closest_point = model.iloc[mean_dist.argmin()]
hull = ConvexHull(model)
x = model.iloc[hull.vertices, 0].to_numpy()
x = np.append(x, x[0])
y = model.iloc[hull.vertices, 1].to_numpy()
y = np.append(y, y[0])
plt.scatter(model['C'], model['intercept_scaling'])
plt.plot(x, y, color='red')
plt.scatter(mean_point['C'], mean_point['intercept_scaling'], label='mean point')
plt.scatter(closest_point['C'], closest_point['intercept_scaling'], label='closest point to mean')
plt.legend()
plt.xscale('log'); plt.yscale('log')
plt.show()
def plot_LR_hull_and_topN(N=16):
fig, axes = plt.subplots(2, 2)
LR = create_order('linearC')
hull = ConvexHull(LR)
x = LR.iloc[hull.vertices, 0].to_numpy()
x = np.append(x, x[0])
y = LR.iloc[hull.vertices, 1].to_numpy()
y = np.append(y, y[0])
mean_point = 10 ** np.log10(LR).mean(axis=0)
mean_dist = distance_matrix([np.log10(mean_point)], np.log10(LR))
closest_point = LR.iloc[mean_dist.argmin()]
# the incumbent space from v4
for i in range(2):
for j in range(2):
axes[i, j].scatter(LR['C'], LR['intercept_scaling'], color='black')
axes[i, j].plot(x, y, color='black', alpha=0.7)
axes[i, j].scatter(mean_point['C'], mean_point['intercept_scaling'], label='mean point')
axes[i, j].scatter(closest_point['C'], closest_point['intercept_scaling'], label='closest point to mean')
axes[i, j].set_xscale('log'); axes[i, j].set_yscale('log')
# trial 1
LR = LR.iloc[:N, :]
axes[0, 0].scatter(LR['C'], LR['intercept_scaling'], s=80, facecolors='none', edgecolors='red', label='top {}'.format(N))
axes[i, j].legend()
# trial 2
LR = create_order('linearC')
LR = LR.iloc[:N, :]
axes[0, 1].scatter(LR['C'], LR['intercept_scaling'], s=80, facecolors='none', edgecolors='red', label='top {}'.format(N))
# trial 3
LR = create_order('linearC')
LR = LR.iloc[:N, :]
axes[1, 0].scatter(LR['C'], LR['intercept_scaling'], s=80, facecolors='none', edgecolors='red', label='top {}'.format(N))
# trial 4
LR = create_order('linearC')
LR = LR.iloc[:N, :]
axes[1, 1].scatter(LR['C'], LR['intercept_scaling'], s=80, facecolors='none', edgecolors='red', label='top {}'.format(N))
for i in range(2):
for j in range(2):
axes[i, j].legend()
plt.suptitle('LR incumbents')
plt.show()
def create_order(model_name='linearC'):
if model_name == 'linear':
return pd.DataFrame(linear)
if model_name == 'lasso':
return pd.DataFrame(lasso)
model = load_model(model_name)
hull = ConvexHull(model)
mean_point = 10 ** np.log10(model).mean(axis=0)
incs_added = []
hull_vertex_added = []
# finding point closest to mean
mean_dist_all = distance_matrix([np.log10(mean_point)], np.log10(model))
closest_point = model.iloc[mean_dist_all.argmin()]
incs_added.append(mean_dist_all.argmin())
# distance between the vertices
hull_dist = distance_matrix(np.log10(model.iloc[hull.vertices]),
np.log10(model.iloc[hull.vertices]))
# distance of closest point to mean from all vertices
dist_point_vertices = distance_matrix([np.log10(closest_point)],
np.log10(model.iloc[hull.vertices]))
# store incumbent list, ranked through a heuristic
ranked_list = []
# adding point closest to mean as the first entry
ranked_list.extend([closest_point.to_numpy().tolist()])
# adding the convex hull vertex farthest from the point closest to mean
point = model.iloc[hull.vertices].iloc[np.argmax(dist_point_vertices)]
ranked_list.extend([point.to_numpy().tolist()])
hull_vertex_added.append(np.argmax(dist_point_vertices))
curr_idx = np.argmax(dist_point_vertices)
while len(model) > len(ranked_list) and len(hull_vertex_added) < len(hull.vertices) + 1:
candidates = np.argsort(hull_dist[curr_idx])[1:][::-1]
for i in range(len(candidates)):
if candidates[i] not in hull_vertex_added:
curr_idx = candidates[i]
break
point = model.iloc[hull.vertices].iloc[curr_idx]
ranked_list.extend([point.to_numpy().tolist()])
hull_vertex_added.append(curr_idx)
for idx in hull_vertex_added:
incs_added.append(hull.vertices[idx])
model = model.drop(index=incs_added).sample(frac=1)
model = pd.DataFrame(ranked_list, columns=model.columns).append(model, ignore_index=True)
model = model.drop_duplicates(ignore_index=True)
return model
model_list = ['RF', 'DT', 'MLP-sgd', 'MLP-adam', 'SVM', 'linear', 'lasso', 'ada', 'kNN', 'linearC']
incumbents = {}
for model_name in model_list:
if model_name == 'linearC':
incumbents['LR'] = create_order(model_name)
else:
incumbents[model_name] = create_order(model_name)
with open('submissions/switching-optimizer/utils/incumbents_v5.pkl', 'wb') as f:
pickle.dump(incumbents, f)
| [
"numpy.log10",
"pickle.dump",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.plot",
"pickle.load",
"numpy.argmax",
"numpy.append",
"scipy.spatial.ConvexHull",
"matplotlib.pyplot.suptitle",
"numpy.argsort",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"matplotlib.pyplot.yscale",
"matplo... | [((1755, 1773), 'pandas.DataFrame', 'pd.DataFrame', (['incs'], {}), '(incs)\n', (1767, 1773), True, 'import pandas as pd\n'), ((2059, 2076), 'scipy.spatial.ConvexHull', 'ConvexHull', (['model'], {}), '(model)\n', (2069, 2076), False, 'from scipy.spatial import ConvexHull, convex_hull_plot_2d\n'), ((2133, 2151), 'numpy.append', 'np.append', (['x', 'x[0]'], {}), '(x, x[0])\n', (2142, 2151), True, 'import numpy as np\n'), ((2208, 2226), 'numpy.append', 'np.append', (['y', 'y[0]'], {}), '(y, y[0])\n', (2217, 2226), True, 'import numpy as np\n'), ((2232, 2283), 'matplotlib.pyplot.scatter', 'plt.scatter', (["model['C']", "model['intercept_scaling']"], {}), "(model['C'], model['intercept_scaling'])\n", (2243, 2283), True, 'import matplotlib.pyplot as plt\n'), ((2288, 2315), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""red"""'}), "(x, y, color='red')\n", (2296, 2315), True, 'import matplotlib.pyplot as plt\n'), ((2320, 2406), 'matplotlib.pyplot.scatter', 'plt.scatter', (["mean_point['C']", "mean_point['intercept_scaling']"], {'label': '"""mean point"""'}), "(mean_point['C'], mean_point['intercept_scaling'], label=\n 'mean point')\n", (2331, 2406), True, 'import matplotlib.pyplot as plt\n'), ((2406, 2509), 'matplotlib.pyplot.scatter', 'plt.scatter', (["closest_point['C']", "closest_point['intercept_scaling']"], {'label': '"""closest point to mean"""'}), "(closest_point['C'], closest_point['intercept_scaling'], label=\n 'closest point to mean')\n", (2417, 2509), True, 'import matplotlib.pyplot as plt\n'), ((2509, 2521), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2519, 2521), True, 'import matplotlib.pyplot as plt\n'), ((2526, 2543), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (2536, 2543), True, 'import matplotlib.pyplot as plt\n'), ((2545, 2562), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (2555, 2562), True, 'import matplotlib.pyplot as plt\n'), ((2567, 2577), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2575, 2577), True, 'import matplotlib.pyplot as plt\n'), ((2630, 2648), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (2642, 2648), True, 'import matplotlib.pyplot as plt\n'), ((2694, 2708), 'scipy.spatial.ConvexHull', 'ConvexHull', (['LR'], {}), '(LR)\n', (2704, 2708), False, 'from scipy.spatial import ConvexHull, convex_hull_plot_2d\n'), ((2762, 2780), 'numpy.append', 'np.append', (['x', 'x[0]'], {}), '(x, x[0])\n', (2771, 2780), True, 'import numpy as np\n'), ((2834, 2852), 'numpy.append', 'np.append', (['y', 'y[0]'], {}), '(y, y[0])\n', (2843, 2852), True, 'import numpy as np\n'), ((4405, 4434), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""LR incumbents"""'], {}), "('LR incumbents')\n", (4417, 4434), True, 'import matplotlib.pyplot as plt\n'), ((4439, 4449), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4447, 4449), True, 'import matplotlib.pyplot as plt\n'), ((4672, 4689), 'scipy.spatial.ConvexHull', 'ConvexHull', (['model'], {}), '(model)\n', (4682, 4689), False, 'from scipy.spatial import ConvexHull, convex_hull_plot_2d\n'), ((5857, 5887), 'numpy.argmax', 'np.argmax', (['dist_point_vertices'], {}), '(dist_point_vertices)\n', (5866, 5887), True, 'import numpy as np\n'), ((7048, 7074), 'pickle.dump', 'pickle.dump', (['incumbents', 'f'], {}), '(incumbents, f)\n', (7059, 7074), False, 'import pickle\n'), ((1619, 1633), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1630, 1633), False, 'import pickle\n'), ((1979, 1994), 'numpy.log10', 'np.log10', (['model'], {}), '(model)\n', (1987, 1994), True, 'import numpy as np\n'), ((2958, 2970), 'numpy.log10', 'np.log10', (['LR'], {}), '(LR)\n', (2966, 2970), True, 'import numpy as np\n'), ((4539, 4559), 'pandas.DataFrame', 'pd.DataFrame', (['linear'], {}), '(linear)\n', (4551, 4559), True, 'import pandas as pd\n'), ((4605, 4624), 'pandas.DataFrame', 'pd.DataFrame', (['lasso'], {}), '(lasso)\n', (4617, 4624), True, 'import pandas as pd\n'), ((4887, 4902), 'numpy.log10', 'np.log10', (['model'], {}), '(model)\n', (4895, 4902), True, 'import numpy as np\n'), ((5074, 5109), 'numpy.log10', 'np.log10', (['model.iloc[hull.vertices]'], {}), '(model.iloc[hull.vertices])\n', (5082, 5109), True, 'import numpy as np\n'), ((5143, 5178), 'numpy.log10', 'np.log10', (['model.iloc[hull.vertices]'], {}), '(model.iloc[hull.vertices])\n', (5151, 5178), True, 'import numpy as np\n'), ((5349, 5384), 'numpy.log10', 'np.log10', (['model.iloc[hull.vertices]'], {}), '(model.iloc[hull.vertices])\n', (5357, 5384), True, 'import numpy as np\n'), ((5696, 5726), 'numpy.argmax', 'np.argmax', (['dist_point_vertices'], {}), '(dist_point_vertices)\n', (5705, 5726), True, 'import numpy as np\n'), ((5809, 5839), 'numpy.argmax', 'np.argmax', (['dist_point_vertices'], {}), '(dist_point_vertices)\n', (5818, 5839), True, 'import numpy as np\n'), ((1956, 1976), 'numpy.log10', 'np.log10', (['mean_point'], {}), '(mean_point)\n', (1964, 1976), True, 'import numpy as np\n'), ((2935, 2955), 'numpy.log10', 'np.log10', (['mean_point'], {}), '(mean_point)\n', (2943, 2955), True, 'import numpy as np\n'), ((4864, 4884), 'numpy.log10', 'np.log10', (['mean_point'], {}), '(mean_point)\n', (4872, 4884), True, 'import numpy as np\n'), ((5281, 5304), 'numpy.log10', 'np.log10', (['closest_point'], {}), '(closest_point)\n', (5289, 5304), True, 'import numpy as np\n'), ((6509, 6557), 'pandas.DataFrame', 'pd.DataFrame', (['ranked_list'], {'columns': 'model.columns'}), '(ranked_list, columns=model.columns)\n', (6521, 6557), True, 'import pandas as pd\n'), ((1894, 1909), 'numpy.log10', 'np.log10', (['model'], {}), '(model)\n', (1902, 1909), True, 'import numpy as np\n'), ((2876, 2888), 'numpy.log10', 'np.log10', (['LR'], {}), '(LR)\n', (2884, 2888), True, 'import numpy as np\n'), ((4713, 4728), 'numpy.log10', 'np.log10', (['model'], {}), '(model)\n', (4721, 4728), True, 'import numpy as np\n'), ((6002, 6033), 'numpy.argsort', 'np.argsort', (['hull_dist[curr_idx]'], {}), '(hull_dist[curr_idx])\n', (6012, 6033), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Boolean Network
================
Main class for Boolean network objects.
"""
# Copyright (C) 2021 by
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# All rights reserved.
# MIT license.
from collections import defaultdict
try:
import cStringIO.StringIO as StringIO
except ImportError:
from io import StringIO
import numpy as np
import networkx as nx
import random
import itertools
from cana.boolean_node import BooleanNode
import cana.bns as bns
from cana.control import fvs, mds, sc
from cana.utils import *
import warnings
import re
import copy
class BooleanNetwork:
"""
"""
def __init__(self, name='', Nnodes=0, logic=None, sg=None, stg=None, stg_r=None, _ef=None, attractors=None,
constants={}, Nconstants=0, keep_constants=False,
bin2num=binstate_to_statenum, num2bin=statenum_to_binstate,
verbose=False, *args, **kwargs):
self.name = name # Name of the Network
self.Nnodes = Nnodes # Number of Nodes
self.logic = logic # A dict that contains the network logic {<id>:{'name':<string>,'in':<list-input-node-id>,'out':<list-output-transitions>},..}
self._sg = sg # Structure-Graph (SG)
self._stg = stg # State-Transition-Graph (STG)
self._stg_r = stg_r # State-Transition-Graph Reachability dict (STG-R)
self._eg = None # Effective Graph, computed from the effective connectivity
self._attractors = attractors # Network Attractors
#
self.keep_constants = keep_constants # Keep/Include constants in some of the computations
#self.constants = constants # A dict that contains of constant variables in the network
#
self.Nstates = 2**Nnodes # Number of possible states in the network 2^N
#
self.verbose = verbose
# Intanciate BooleanNodes
self.name2int = {logic[i]['name']: i for i in range(Nnodes)}
self.Nself_loops = sum([self.name2int[logic[i]['name']] in logic[i]['in'] for i in range(Nnodes)])
self.nodes = list()
for i in range(Nnodes):
name = logic[i]['name']
k = len(logic[i]['in'])
inputs = [self.name2int[logic[j]['name']] for j in logic[i]['in']]
outputs = logic[i]['out']
node = BooleanNode(id=i, name=name, k=k, inputs=inputs, outputs=outputs, network=self,*args,**kwargs)
self.nodes.append(node)
self.Nconstants = sum([n.constant for n in self.nodes]) # Number of constant variables
#
self.input_nodes = [i for i in range(Nnodes) if (self.nodes[i].constant or ((self.nodes[i].k == 1) and (i in self.nodes[i].inputs)))]
#
self.bin2num = None # Helper function. Converts binstate to statenum. It gets updated by `_update_trans_func`
self.num2bin = None # Helper function. Converts statenum to binstate. It gets updated by `_update_trans_func`
self._update_trans_func() # Updates helper functions and other variables
def __str__(self):
node_names = [node.name for node in self.nodes]
return "<BNetwork(name='{name:s}', N={number_of_nodes:d}, Nodes={nodes:})>".format(name=self.name, number_of_nodes=self.Nnodes, nodes=node_names)
#
# I/O Methods
#
@classmethod
def from_file(self, file, type='cnet', keep_constants=True, **kwargs):
"""
Load the Boolean Network from a file.
Args:
file (string) : The name of a file containing the Boolean Network.
type (string) : The type of file, either 'cnet' (default) or 'logical' for Boolean logical rules.
Returns:
BooleanNetwork (object) : The boolean network object.
See also:
:func:`from_string` :func:`from_dict`
"""
with open(file, 'r') as infile:
if type == 'cnet':
return self.from_string_cnet(infile.read(), keep_constants=keep_constants, **kwargs)
elif type == 'logical':
return self.from_string_boolean(infile.read(), keep_constants=keep_constants, **kwargs)
@classmethod
def from_string_cnet(self, string, keep_constants=True, **kwargs):
"""
Instanciates a Boolean Network from a string in cnet format.
Args:
string (string): A cnet format representation of a Boolean Network.
Returns:
(BooleanNetwork)
Examples:
String should be structured as follow:
.. code-block:: text
#.v = number of nodes
.v 1
#.l = node label
.l 1 node-a
.l 2 node-b
#.n = (node number) (in-degree) (input node 1) … (input node k)
.n 1 2 4 5
01 1 # transition rule
See also:
:func:`from_file` :func:`from_dict`
"""
network_file = StringIO(string)
logic = defaultdict(dict)
line = network_file.readline()
while line != "":
if line[0] != '#' and line != '\n':
# .v <#-nodes>
if '.v' in line:
Nnodes = int(line.split()[1])
for inode in range(Nnodes):
logic[inode] = {'name': '', 'in': [], 'out': []}
# .l <node-id> <node-name>
elif '.l' in line:
logic[int(line.split()[1]) - 1]['name'] = line.split()[2]
# .n <node-id> <#-inputs> <input-node-id>
elif '.n' in line:
inode = int(line.split()[1]) - 1
indegree = int(line.split()[2])
for jnode in range(indegree):
logic[inode]['in'].append(int(line.split()[3 + jnode]) - 1)
logic[inode]['out'] = [0 for i in range(2**indegree) if indegree > 0]
logic_line = network_file.readline().strip()
if indegree <= 0:
if logic_line == '':
logic[inode]['in'] = [inode]
logic[inode]['out'] = [0, 1]
else:
logic[inode]['out'] = [int(logic_line)]
else:
while logic_line != '\n' and logic_line != '' and len(logic_line) > 1:
for nlogicline in expand_logic_line(logic_line):
logic[inode]['out'][binstate_to_statenum(nlogicline.split()[0])] = int(nlogicline.split()[1])
logic_line = network_file.readline().strip()
# .e = end of file
elif '.e' in line:
break
line = network_file.readline()
return self.from_dict(logic, keep_constants=keep_constants, **kwargs)
@classmethod
def from_string_boolean(self, string, keep_constants=True, **kwargs):
"""
Instanciates a Boolean Network from a Boolean update rules format.
Args:
string (string) : A boolean update rules format representation of a Boolean Network.
Returns:
(BooleanNetwork) : The boolean network object.
Examples:
String should be structured as follow
.. code-block:: text
# BOOLEAN RULES (this is a comment)
# node_name*=node_input_1 [logic operator] node_input_2 ...
NODE3*=NODE1 AND NODE2 ...
See also:
:func:`from_string` :func:`from_dict`
"""
logic = defaultdict(dict)
# parse lines to receive node names
network_file = StringIO(string)
line = network_file.readline()
i = 0
while line != "":
if line[0] == '#':
line = network_file.readline()
continue
logic[i] = {'name': line.split("*")[0].strip(), 'in': [], 'out': []}
line = network_file.readline()
i += 1
# Parse lines again to determine inputs and output sequence
network_file = StringIO(string)
line = network_file.readline()
i = 0
while line != "":
if line[0] == '#':
line = network_file.readline()
continue
eval_line = line.split("=")[1] # logical condition to evaluate
# RE checks for non-alphanumeric character before/after node name (node names are included in other node names)
# Additional characters added to eval_line to avoid start/end of string complications
input_names = [logic[node]['name'] for node in logic if re.compile('\W' + logic[node]['name'] + '\W').search('*' + eval_line + '*')]
input_nums = [node for input in input_names for node in logic if input == logic[node]['name']]
logic[i]['in'] = input_nums
# Determine output transitions
logic[i]['out'] = output_transitions(eval_line, input_names)
line = network_file.readline()
i += 1
return self.from_dict(logic, keep_constants=keep_constants, **kwargs)
@classmethod
def from_dict(self, logic, keep_constants=True, **kwargs):
"""Instanciaets a BooleanNetwork from a logic dictionary.
Args:
logic (dict) : The logic dict.
keep_constants (bool) :
Returns:
(BooleanNetwork)
See also:
:func:`from_file` :func:`from_dict`
"""
Nnodes = len(logic)
constants = {}
if 'name' in kwargs:
name = kwargs['name']
else:
name = ''
if keep_constants:
for i, nodelogic in logic.items():
# No inputs? It's a constant!
if len(nodelogic['in']) == 0:
constants[i] = logic[i]['out'][0]
return BooleanNetwork(logic=logic, Nnodes=Nnodes, constants=constants, keep_constants=keep_constants, **kwargs)
def to_cnet(self, file=None, adjust_no_input=False):
"""Outputs the network logic to ``.cnet`` format, which is similar to the Berkeley Logic Interchange Format (BLIF).
This is the format used by BNS to compute attractors.
Args:
file (string,optional) : A string of the file to write the output to. If not supplied, a string will be returned.
adjust_no_input (bool) : Adjust output string for nodes with no input.
Returns:
(string) : The ``.cnet`` format string.
Note:
See `BNS <https://people.kth.se/~dubrova/bns.html>`_ for more information.
"""
# Copy
logic = self.logic.copy()
#
if adjust_no_input:
for i, data in logic.items():
# updates in place
if len(data['in']) == 0:
data['in'] = [i + 1]
data['out'] = [0, 1]
bns_string = '.v ' + str(self.Nnodes) + '\n' + '\n'
for i in range(self.Nnodes):
k = len(logic[i]['in'])
bns_string += '.n ' + str(i + 1) + " " + str(k) + " " + " ".join([str(v + 1) for v in logic[i]['in']]) + "\n"
for statenum in range(2**k):
# If is a constant (TODO: This must come from the BooleanNode, not the logic)
if len(logic[i]['out']) == 1:
bns_string += str(logic[i]['out'][statenum]) + "\n"
# Not a constant, print the state and output
else:
bns_string += statenum_to_binstate(statenum, base=k) + " " + str(logic[i]['out'][statenum]) + "\n"
bns_string += "\n"
if file is None:
return bns_string
else:
if isinstance(file, string):
with open(file, 'w') as iofile:
iofile.write(bns_string)
iofile.close()
else:
raise AttributeError("File format not supported. Please specify a string.")
#
# Methods
#
def structural_graph(self, remove_constants=False):
"""Calculates and returns the structural graph of the boolean network.
Args:
remove_constants (bool) : Remove constants from the graph. Defaults to ``False``.
Returns:
G (networkx.Digraph) : The boolean network structural graph.
"""
self._sg = nx.DiGraph(name="Structural Graph: " + self.name)
# Add Nodes
self._sg.add_nodes_from((i, {'label': n.name}) for i, n in enumerate(self.nodes))
for target in range(self.Nnodes):
for source in self.logic[target]['in']:
self._sg.add_edge(source, target, **{'weight': 1.})
if remove_constants:
self._sg.remove_nodes_from(self.get_constants().keys())
#
return self._sg
def number_interactions(self):
"""Returns the number of interactions in the Structural Graph (SG).
Practically, it returns the number of edges of the SG.
Returns:
int
"""
self._check_compute_variables(sg=True)
return nx.number_of_edges(self._sg)
def structural_indegrees(self):
"""Returns the in-degrees of the Structural Graph. Sorted.
Returns:
(int) : the number of in-degrees.
See also:
:func:`structural_outdegrees`, :func:`effective_indegrees`, :func:`effective_outdegrees`
"""
self._check_compute_variables(sg=True)
return sorted([d for n, d in self._sg.in_degree()], reverse=True)
def structural_outdegrees(self):
"""Returns the out-degrees of the Structural Graph. Sorted.
Returns:
(list) : out-degrees.
See also:
:func:`structural_indegrees`, :func:`effective_indegrees`, :func:`effective_outdegrees`
"""
self._check_compute_variables(sg=True)
return sorted([d for n, d in self._sg.out_degree()], reverse=True)
def signed_interaction_graph(self):
"""Calculates and returns the signed interaction graph of the boolean network. Here, edge weights denote
if an interaction is activation (1), inhibition (-1), or cannot be classified (0).
Returns:
G (networkx.Digraph) : The boolean network structural graph.
"""
signed_ig = nx.DiGraph(name="Signed Interaction Graph: " + self.name)
signed_ig.add_nodes_from((i, {'label': n.name}) for i, n in enumerate(self.nodes))
for target in range(self.Nnodes):
input_signs = self.nodes[target].input_signs()
for idx, source in enumerate(self.logic[target]['in']):
signed_ig.add_edge(source, target, **{'weight': input_signs[idx]})
return signed_ig
def effective_graph(self, bound='mean', threshold=None):
"""Computes and returns the effective graph of the network.
In practive it asks each :class:`~cana.boolean_node.BooleanNode` for their :func:`~cana.boolean_node.BooleanNode.edge_effectiveness`.
Args:
bound (string) : The bound to which compute input redundancy.
Can be one of : ["lower", "mean", "upper", "tuple"].
Defaults to "mean".
threshold (float) : Only return edges above a certain effective connectivity threshold.
This is usefull when computing graph measures at diffent levels.
Returns:
(networkx.DiGraph) : directed graph
See Also:
:func:`~cana.boolean_node.BooleanNode.edge_effectiveness`
"""
if threshold is not None:
self._eg = nx.DiGraph(name="Effective Graph: " + self.name + "(Threshold: {threshold:.2f})".format(threshold=threshold))
else:
self._eg = nx.DiGraph(name="Effective Graph: " + self.name + "(Threshold: None)")
# Add Nodes
for i, node in enumerate(self.nodes, start=0):
self._eg.add_node(i, **{'label': node.name})
# Add Edges
for i, node in enumerate(self.nodes, start=0):
e_is = node.edge_effectiveness(bound=bound)
for inputs, e_i in zip(self.logic[i]['in'], e_is):
# If there is a threshold, only return those number above the threshold. Else, return all edges.
if (threshold is None) or ((threshold is not None) and (e_i > threshold)):
self._eg.add_edge(inputs, i, **{'weight': e_i})
return self._eg
def conditional_effective_graph(self, conditioned_nodes={}, bound='mean', threshold=None):
"""Computes and returns the BN effective graph conditioned on some known states.
Args:
conditioned_nodes (dict) : a dictionary mapping node ids to their conditioned states.
dict of form { nodeid : nodestate }
bound (string) : The bound to which compute input redundancy.
Can be one of : ["lower", "mean", "upper", "tuple"].
Defaults to "mean".
threshold (float) : Only return edges above a certain effective connectivity threshold.
This is usefull when computing graph measures at diffent levels.
Returns:
(networkx.DiGraph) : directed graph
See Also:
:func:`~cana.boolean_network.BooleanNetwork.effective_graph`
"""
conditional_eg = copy.deepcopy(self.effective_graph(bound=bound, threshold=None))
conditioned_subgraph = set([])
# make a copy of the logic dict so we can edit it
conditioned_logic = copy.deepcopy(self.logic)
# separate input conditioned nodes and nodes that get conditioned
all_conditioned_nodes = dict(conditioned_nodes)
# Queue of nodes to condition
nodes2condition = list(conditioned_nodes.keys())
while len(nodes2condition) > 0:
conditioned_node = nodes2condition.pop(0)
conditioned_value = str(all_conditioned_nodes[conditioned_node])
conditioned_subgraph.add(conditioned_node)
# take all successors of the conditioned node ignoring self-loops
successors = [n for n in list(conditional_eg.neighbors(conditioned_node)) if n != conditioned_node]
conditioned_subgraph.update(successors)
# we have to loop through all of the successors of the conditioned node and change their logic
for n in successors:
# find the index of the conditioned node in the successor logic
conditioned_node_idx = conditioned_logic[n]['in'].index(conditioned_node)
conditioned_subgraph.update(conditioned_logic[n]['in'])
# the new successor inputs without the conditioned node
new_successor_inputs = conditioned_logic[n]['in'][:conditioned_node_idx] + conditioned_logic[n]['in'][(conditioned_node_idx + 1):]
newk = len(new_successor_inputs)
# now we create a conditioned LUT as the subset of the original for which the conditioned node is fixed to its value
if newk == 0:
new_successor_outputs = [conditioned_logic[n]['out'][binstate_to_statenum(conditioned_value)]] * 2
else:
new_successor_outputs = []
for sn in range(2**newk):
binstate = statenum_to_binstate(sn, newk)
binstate = binstate[:conditioned_node_idx] + conditioned_value + binstate[conditioned_node_idx:]
new_successor_outputs.append(conditioned_logic[n]['out'][binstate_to_statenum(binstate)])
# use the new logic to calcuate a new edge effectiveness
new_edge_effectiveness = BooleanNode().from_output_list(new_successor_outputs).edge_effectiveness(bound=bound)
# and update the conditional effective graph with the new edge effectiveness values
for i in range(newk):
conditional_eg[new_successor_inputs[i]][n]['weight'] = new_edge_effectiveness[i]
# now update the conditioned_logic in case these nodes are further modified by additional conditioned variables
conditioned_logic[n]['in'] = new_successor_inputs
conditioned_logic[n]['out'] = new_successor_outputs
# check if we just made a constant node
if n not in all_conditioned_nodes and len(set(new_successor_outputs)) == 1:
# in which case, add it to the conditioned set and propagate the conditioned effect
nodes2condition.append(n)
all_conditioned_nodes[n] = new_successor_outputs[0]
conditional_eg.name = "Conditioned Effective Graph: {name:s} conditioned on {nodes:s}".format(name=self.name, nodes=str(conditioned_nodes))
if threshold is None:
conditional_eg.name = conditional_eg.name + " (Threshold: None)"
else:
conditional_eg.name = conditional_eg.name + " (Threshold: {threshold:.2f})".format(threshold=threshold)
remove_edges = [(i, j) for i, j, d in conditional_eg.edges(data=True) if d['weight'] <= threshold]
conditional_eg.remove_edges_from(remove_edges)
# add the conditional information into the effective graph object
dict_conditioned_subgraph = {n: (n in conditioned_subgraph) for n in conditional_eg.nodes()}
nx.set_node_attributes(conditional_eg, values=dict_conditioned_subgraph, name='conditioned_subgraph')
dict_all_conditioned_nodes = {n: all_conditioned_nodes.get(n, None) for n in conditional_eg.nodes()}
nx.set_node_attributes(conditional_eg, values=dict_all_conditioned_nodes, name='conditioned_state')
return conditional_eg
def effective_indegrees(self):
"""Returns the in-degrees of the Effective Graph. Sorted.
Returns:
(list)
See also:
:func:`effective_outdegrees`, :func:`structural_indegrees`, :func:`structural_outdegrees`
"""
self._check_compute_variables(eg=True)
return sorted([d for n, d in self._eg.in_degree()], reverse=True)
def effective_outdegrees(self):
"""Returns the out-degrees of the Effective Graph. Sorted.
Returns:
(list)
See also:
:func:`effective_indegrees`, :func:`structural_indegrees`, :func:`structural_outdegrees`
"""
self._check_compute_variables(eg=True)
return sorted([d for n, d in self._eg.out_degree()], reverse=True)
def activity_graph(self, threshold=None):
"""
Returns the activity graph as proposed in
Ghanbarnejad & Klemm (2012) EPL, 99
Args:
threshold (float) : Only return edges above a certain activity threshold.
This is usefull when computing graph measures at diffent levels.
Returns:
(networkx.DiGraph) : directed graph
"""
if threshold is not None:
act_g = nx.DiGraph(name="Activity Graph: " + self.name + "(Threshold: %.2f)" % threshold)
else:
act_g = nx.DiGraph(name="Activity Graph: " + self.name + "(Threshold: None)")
# Add Nodes
for i, node in enumerate(self.nodes, start=0):
act_g.add_node(i, **{'label': node.name})
# Add Edges
for i, node in enumerate(self.nodes, start=0):
a_is = node.activities()
for inputs, a_i in zip(self.logic[i]['in'], a_is):
# If there is a threshold, only return those number above the threshold. Else, return all edges.
if ((threshold is None) and (a_i > 0)) or ((threshold is not None) and (a_i > threshold)):
act_g.add_edge(inputs, i, **{'weight': a_i})
return act_g
def state_transition_graph(self):
"""Creates and returns the full State Transition Graph (STG) for the Boolean Network.
Returns:
(networkx.DiGraph) : The state transition graph for the Boolean Network.
"""
self._stg = nx.DiGraph(name='STG: ' + self.name)
self._stg.add_nodes_from((i, {'label': self.num2bin(i)}) for i in range(self.Nstates))
for i in range(self.Nstates):
b = self.num2bin(i)
self._stg.add_edge(i, self.bin2num(self.step(b)))
#
return self._stg
def stg_indegree(self):
"""Returns the In-degrees of the State-Transition-Graph (STG). Sorted.
Returns:
list
"""
self._check_compute_variables(stg=True)
return sorted(self._stg.in_degree().values(), reverse=True)
def step(self, initial):
"""Steps the boolean network from the given initial configuration.
Args:
initial (string) : the initial configuration state.
Returns:
(string) : The stepped binary state.
"""
# for every node:
# node input = breaks down initial by node input
# asks node to step with the input
# append output to list
# joins the results from each node output
assert len(initial) == self.Nnodes, "The initial configuration state does not match the number of nodes"
return ''.join([node.step(node.input_mask(initial)) for node in self.nodes])
def trajectory(self, initial, length=2):
"""Computes the trajectory of ``length`` steps without the State Transition Graph (STG)."""
trajectory = [initial]
for istep in range(length):
trajectory.append(self.step(trajectory[-1]))
return trajectory
def trajectory_to_attractor(self, initial, precompute_attractors=True, return_attractor=False):
"""Computes the trajectory starting at `initial` until it reaches an attracor (this is garanteed).
Args:
initial (string) : the initial binstate.
precompute_attractors (bool) : use precomputed attractors, default True.
return_attractor (bool) : also return the attractor reached, default False.
Returns:
(list) : the state trajectory between initial and the final attractor state.
if return_attractor: (list): the attractor
"""
# if the attractors are already precomputed, then we can check when we reach a known state
if precompute_attractors:
self._check_compute_variables(attractors=True)
attractor_states = [self.num2bin(s) for att in self._attractors for s in att]
trajectory = [initial]
while (trajectory[-1] not in attractor_states):
trajectory.append(self.step(trajectory[-1]))
if return_attractor:
attractor = self.attractor(trajectory[-1])
else:
trajectory = [initial]
while (trajectory[-1] not in trajectory[:-1]):
trajectory.append(self.step(trajectory[-1]))
# the attractor starts at the first occurence of the element
idxatt = trajectory.index(trajectory[-1])
if return_attractor:
attractor = [self.bin2num(s) for s in trajectory[idxatt:-1]]
trajectory = trajectory[:(idxatt + 1)]
if return_attractor:
return trajectory, attractor
else:
return trajectory
def attractor(self, initial):
"""Computes the trajectory starting at ``initial`` until it reaches an attracor (this is garanteed)
Args:
initial (string): the initial state.
Returns:
attractor (string): the atractor state.
"""
self._check_compute_variables(attractors=True)
trajectory = self.trajectory_to_attractor(initial)
for attractor in self._attractors:
if self.bin2num(trajectory[-1]) in attractor:
return attractor
def attractors(self, mode='stg'):
"""Find the attractors of the boolean network.
Args:
mode (string) : ``stg`` or ``sat``. Defaults to ``stg``.
``stg``: Uses the full State Transition Graph (STG) and identifies the attractors as strongly connected components.
``bns``: Uses the SAT-based :mod:`cana.bns` to find all attractors.
Returns:
attractors (list) : A list containing all attractors for the boolean network.
See also:
:mod:`cana.bns`
"""
self._check_compute_variables(stg=True)
if mode == 'stg':
self._attractors = [list(a) for a in nx.attracting_components(self._stg)]
elif mode == 'bns':
self._attractors = bns.attractors(self.to_cnet(file=None, adjust_no_input=False))
else:
raise AttributeError("Could not find the specified mode. Try 'stg' or 'bns'.")
self._attractors.sort(key=len, reverse=True)
return self._attractors
def network_bias(self):
"""Network Bias. The sum of individual node biases divided by the number of nodes.
Practically, it asks each node for their own bias.
.. math:
TODO
See Also:
:func:`~cana.boolean_node.BooleanNode.bias`
"""
return sum([node.bias() for node in self.nodes]) / self.Nnodes
def basin_entropy(self, base=2):
"""
"""
self._check_compute_variables(stg=True)
prob_vec = np.array([len(wcc) for wcc in nx.weakly_connected_components(self._stg)]) / 2.0**self.Nnodes
return entropy(prob_vec, base=base)
def set_constant(self, node, constant=True, state=None):
"""Sets or unsets a node as a constant.
Args:
node (int) : The node ``id`` in the logic dict.
constant (Boolean) : Whether to set or unset the node as a constant.
state (str; optional) : The state value to which to set the node. Either '0' or '1'; default to current state value.
"""
# Se a node to a constant or not
self.nodes[node].set_constant(constant=constant, state=state)
# Update the number of constant nodes
self.Nconstants = sum([n.constant for n in self.nodes])
self._update_trans_func()
def get_constants(self):
"""Retrieved a dictionary containing all constant nodes"""
return {i: node for i, node in enumerate(self.nodes) if node.constant}
def unset_all_constants(self):
self.keep_constants = False
for node in self.nodes:
node.set_constant(constant=False)
def _update_trans_func(self):
"""Sets the correct functions to convert from binary-state format to/from numeric-state format.
"""
if self.keep_constants:
self.Nstates = 2**(self.Nnodes - self.Nconstants)
# The template is a list that identifies the values of constant nodes: [None, '1', None, '0'].
constant_template = [None if not node.constant else node.state for node in self.nodes]
self.bin2num = lambda bs: constantbinstate_to_statenum(bs, constant_template)
self.num2bin = lambda sn: binstate_to_constantbinstate(
statenum_to_binstate(sn, base=self.Nnodes - self.Nconstants), constant_template)
else:
self.Nstates = 2**self.Nnodes
self.bin2num = binstate_to_statenum
self.num2bin = lambda sn: statenum_to_binstate(sn, base=self.Nnodes)
#
# Dynamical Control Methods
#
def state_transition_graph_reachability(self, filename=None):
"""Generates a State-Transition-Graph Reachability (STG-R) dictionary.
This dict/file will be used by the State Transition Graph Control Analysis.
Args:
filename (string) : The location to a file where the STG-R will be stored.
Returns:
(dict) : The STG-R in dict format.
"""
self._check_compute_variables(stg=True)
self._stg_r = {}
if (filename is None):
for source in self._stg:
self._stg_r[source] = len(self._dfs_reachable(self._stg, source)) - 1.0
else:
try:
with open(filename, 'rb') as handle:
self._stg_r = pickle.load(handle)
except IOError:
print("Finding STG dict")
for source in self._stg:
self._stg_r[source] = len(self._dfs_reachable(self._stg, source)) - 1.0
with open(filename, 'wb') as handle:
pickle.dump(self._stg_r, handle)
return self._stg_r
def attractor_driver_nodes(self, min_dvs=1, max_dvs=4, verbose=False):
"""Get the minimum necessary driver nodes by iterating the combination of all possible driver nodes of length :math:`min <= x <= max`.
Args:
min_dvs (int) : Mininum number of driver nodes to search.
max_dvs (int) : Maximum number of driver nodes to search.
Returns:
(list) : The list of driver nodes found in the search.
Note:
This is an inefficient bruit force search, maybe we can think of better ways to do this?
TODO:
Parallelize the search on each combination. Each CSTG is independent and can be searched in parallel.
See also:
:func:`controlled_state_transition_graph`, :func:`controlled_attractor_graph`.
"""
nodeids = list(range(self.Nnodes))
if self.keep_constants:
for cv in self.get_constants().keys():
nodeids.remove(cv)
attractor_controllers_found = []
nr_dvs = min_dvs
while (len(attractor_controllers_found) == 0) and (nr_dvs <= max_dvs):
if verbose:
print("Trying with {:d} Driver Nodes".format(nr_dvs))
for dvs in itertools.combinations(nodeids, nr_dvs):
dvs = list(dvs)
# cstg = self.controlled_state_transition_graph(dvs)
cag = self.controlled_attractor_graph(dvs)
att_reachable_from = self.mean_reachable_attractors(cag)
if att_reachable_from == 1.0:
attractor_controllers_found.append(dvs)
# Add another driver node
nr_dvs += 1
if len(attractor_controllers_found) == 0:
warnings.warn("No attractor control driver variable sets found after exploring all subsets of size {:,d} to {:,d} nodes!!".format(min_dvs, max_dvs))
return attractor_controllers_found
def controlled_state_transition_graph(self, driver_nodes=[]):
"""Returns the Controlled State-Transition-Graph (CSTG).
In practice, it copies the original STG, flips driver nodes (variables), and updates the CSTG.
Args:
driver_nodes (list) : The list of driver nodes.
Returns:
(networkx.DiGraph) : The Controlled State-Transition-Graph.
See also:
:func:`attractor_driver_nodes`, :func:`controlled_attractor_graph`.
"""
self._check_compute_variables(attractors=True)
if self.keep_constants:
for dv in driver_nodes:
if dv in self.get_constants():
warnings.warn("Cannot control a constant variable '%s'! Skipping" % self.nodes[dv].name )
# attractor_states = [s for att in self._attractors for s in att]
cstg = copy.deepcopy(self._stg)
cstg.name = 'C-' + cstg.name + ' (' + ','.join(map(str, [self.nodes[dv].name for dv in driver_nodes])) + ')'
# add the control pertubations applied to all other configurations
for statenum in range(self.Nstates):
binstate = self.num2bin(statenum)
controlled_states = flip_binstate_bit_set(binstate, copy.copy(driver_nodes))
controlled_states.remove(binstate)
for constate in controlled_states:
cstg.add_edge(statenum, self.bin2num(constate))
return cstg
def pinning_controlled_state_transition_graph(self, driver_nodes=[]):
"""Returns a dictionary of Controlled State-Transition-Graph (CSTG)
under the assumptions of pinning controllability.
In practice, it copies the original STG, flips driver nodes (variables), and updates the CSTG.
Args:
driver_nodes (list) : The list of driver nodes.
Returns:
(networkx.DiGraph) : The Pinning Controlled State-Transition-Graph.
See also:
:func:`controlled_state_transition_graph`, :func:`attractor_driver_nodes`, :func:`controlled_attractor_graph`.
"""
self._check_compute_variables(attractors=True)
if self.keep_constants:
for dv in driver_nodes:
if dv in self.get_constants():
warnings.warn("Cannot control a constant variable {dv:s}'! Skipping".format(dv=self.nodes[dv].name))
uncontrolled_system_size = self.Nnodes - len(driver_nodes)
pcstg_dict = {}
for att in self._attractors:
dn_attractor_transitions = [tuple(''.join([self.num2bin(s)[dn] for dn in driver_nodes]) for s in att_edge)
for att_edge in self._stg.subgraph(att).edges()]
pcstg_states = [self.bin2num(binstate_pinned_to_binstate(
statenum_to_binstate(statenum, base=uncontrolled_system_size), attsource, pinned_var=driver_nodes))
for statenum in range(2**uncontrolled_system_size) for attsource, attsink in dn_attractor_transitions]
pcstg = nx.DiGraph(name='STG: ' + self.name)
pcstg.name = 'PC-' + pcstg.name + ' (' + ','.join(map(str, [self.nodes[dv].name for dv in driver_nodes])) + ')'
pcstg.add_nodes_from((ps, {'label': ps}) for ps in pcstg_states)
for attsource, attsink in dn_attractor_transitions:
for statenum in range(2**uncontrolled_system_size):
initial = binstate_pinned_to_binstate(statenum_to_binstate(statenum, base=uncontrolled_system_size), attsource, pinned_var=driver_nodes)
pcstg.add_edge(self.bin2num(initial), self.bin2num(self.pinned_step(initial, pinned_binstate=attsink, pinned_var=driver_nodes)))
pcstg_dict[tuple(att)] = pcstg
return pcstg_dict
def pinned_step(self, initial, pinned_binstate, pinned_var):
"""Steps the boolean network 1 step from the given initial input condition when the driver variables are pinned
to their controlled states.
Args:
initial (string) : the initial state.
n (int) : the number of steps.
Returns:
(string) : The stepped binary state.
"""
# for every node:
# node input = breaks down initial by node input
# asks node to step with the input
# append output to list
# joins the results from each node output
assert len(initial) == self.Nnodes
return ''.join([str(node.step(''.join(initial[j] for j in self.logic[i]['in']))) if not (i in pinned_var) else initial[i] for i, node in enumerate(self.nodes, start=0)])
def controlled_attractor_graph(self, driver_nodes=[]):
"""
Args:
cstg (networkx.DiGraph) : A Controlled State-Transition-Graph (CSTG)
Returns:
(networkx.DiGraph) : The Controlled Attractor Graph (CAG)
See also:
:func:`attractor_driver_nodes`, :func:`controlled_state_transition_graph`.
"""
self._check_compute_variables(attractors=True)
if self.keep_constants:
for dv in driver_nodes:
if dv in self.get_constants():
warnings.warn("Cannot control a constant variable '%s'! Skipping" % self.nodes[dv].name )
attractor_states = [s for att in self._attractors for s in att]
cstg = copy.deepcopy(self._stg)
cstg.name = 'C-' + cstg.name + ' Att(' + ','.join(map(str, [self.nodes[dv].name for dv in driver_nodes])) + ')'
# add the control pertubations applied to only attractor configurations
for statenum in attractor_states:
binstate = self.num2bin(statenum)
controlled_states = flip_binstate_bit_set(binstate, copy.copy(driver_nodes))
controlled_states.remove(binstate)
for constate in controlled_states:
cstg.add_edge(statenum, self.bin2num(constate))
Nattract = len(self._attractors)
cag = nx.DiGraph(name='CAG: ' + cstg.name)
# Nodes
for i, attr in enumerate(self._attractors):
cag.add_node(i, **{'label': '|'.join([self.num2bin(a) for a in attr])})
# Edges
for i in range(Nattract):
ireach = self._dfs_reachable(cstg, self._attractors[i][0])
for j in range(i + 1, Nattract):
if self._attractors[j][0] in ireach:
cag.add_edge(i, j)
if self._attractors[i][0] in self._dfs_reachable(cstg, self._attractors[j][0]):
cag.add_edge(j, i)
return cag
def mean_reachable_configurations(self, cstg):
"""Returns the Mean Fraction of Reachable Configurations
Args:
cstg (networkx.DiGraph) : The Controlled State-Transition-Graph.
Returns:
(float) : Mean Fraction of Reachable Configurations
"""
reachable_from = []
for source in cstg:
control_reach = len(self._dfs_reachable(cstg, source)) - 1.0
reachable_from.append(control_reach)
norm = (2.0**self.Nnodes - 1.0) * len(reachable_from)
reachable_from = sum(reachable_from) / (norm)
return reachable_from
def mean_controlable_configurations(self, cstg):
"""The Mean Fraction of Controlable Configurations
Args:
cstg (networkx.DiGraph) : The Controlled State-Transition-Graph.
Returns:
(float) : Mean Fraction of Controlable Configurations.
"""
self._check_compute_variables(stg_r=True)
control_from, reachable_from = [], []
for source in cstg:
control_reach = len(self._dfs_reachable(cstg, source)) - 1.0
control_from.append(control_reach - self._stg_r[source])
reachable_from.append(control_reach)
norm = (2.0**self.Nnodes - 1.0) * len(reachable_from)
control_from = sum(control_from) / (norm)
return control_from
def mean_reachable_attractors(self, cag, norm=True):
"""The Mean Fraction of Reachable Attractors to a specific Controlled Attractor Graph (CAG).
Args:
cag (networkx.DiGraph) : A Controlled Attractor Graph (CAG).
Returns:
(float) Mean Fraction of Reachable Attractors
"""
att_norm = (float(len(cag)) - 1.0) * len(cag)
if att_norm == 0:
# if there is only one attractor everything is reachable
att_reachable_from = 1
else:
# otherwise find the reachable from each attractor
att_reachable_from = [len(self._dfs_reachable(cag, idxatt)) - 1.0 for idxatt in cag]
att_reachable_from = sum(att_reachable_from) / (att_norm)
return att_reachable_from
def fraction_pinned_attractors(self, pcstg_dict):
"""Returns the Number of Accessible Attractors
Args:
pcstg_dict (dict of networkx.DiGraph) : The dictionary of Pinned Controlled State-Transition-Graphs.
Returns:
(int) : Number of Accessible Attractors
"""
reached_attractors = []
for att, pcstg in pcstg_dict.items():
pinned_att = list(nx.attracting_components(pcstg))
print(set(att), pinned_att)
reached_attractors.append(set(att) in pinned_att)
return sum(reached_attractors) / float(len(pcstg_dict))
def fraction_pinned_configurations(self, pcstg_dict):
"""Returns the Fraction of successfully Pinned Configurations
Args:
pcstg_dict (dict of networkx.DiGraph) : The dictionary of Pinned Controlled State-Transition-Graphs.
Returns:
(list) : the Fraction of successfully Pinned Configurations to each attractor
"""
pinned_configurations = []
for att, pcstg in pcstg_dict.items():
att_reached = False
for wcc in nx.weakly_connected_components(pcstg):
if set(att) in list(nx.attracting_components(pcstg.subgraph(wcc))):
pinned_configurations.append(len(wcc) / len(pcstg))
att_reached = True
if not att_reached:
pinned_configurations.append(0)
return pinned_configurations
def mean_fraction_pinned_configurations(self, pcstg_dict):
"""Returns the mean Fraction of successfully Pinned Configurations
Args:
pcstg_dict (dict of networkx.DiGraph) : The dictionary of Pinned Controlled State-Transition-Graphs.
Returns:
(int) : the mean Fraction of successfully Pinned Configurations
"""
return sum(self.fraction_pinned_configurations(pcstg_dict)) / len(pcstg_dict)
def _dfs_reachable(self, G, source):
"""Produce nodes in a depth-first-search pre-ordering starting from source."""
return [n for n in nx.dfs_preorder_nodes(G, source)]
#
# Feedback Vertex Set (FVS)
#
def feedback_vertex_set_driver_nodes(self, graph='structural', method='grasp', max_iter=1, max_search=11, keep_self_loops=True, *args, **kwargs):
"""The minimum set of necessary driver nodes to control the network based on Feedback Vertex Set (FVS) theory.
Args:
graph (string) : Which graph to perform computation
method (string) : FVS method. ``bruteforce`` or ``grasp`` (default).
max_iter (int) : The maximum number of iterations used by the grasp method.
max_search (int) : The maximum number of searched used by the bruteforce method.
keep_self_loops (bool) : Keep or remove self loop in the graph to be searched.
Returns:
(list) : A list-of-lists with FVS solution nodes.
Note:
When computing FVS on the structural graph, you might want to use ``remove_constants=True``
to make sure the resulting set is minimal – since constants are not controlabled by definition.
Also, when computing on the effective graph, you can define the desired ``threshold`` level.
"""
self._check_compute_variables(sg=True)
if graph == 'structural':
dg = self.structural_graph(*args, **kwargs)
elif graph == 'effective':
dg = self.effective_graph(mode='input', bound='mean', threshold=None, *args, **kwargs)
else:
raise AttributeError("The graph type '%s' is not accepted. Try 'structural' or 'effective'." % graph)
#
if method == 'grasp':
fvssets = fvs.fvs_grasp(dg, max_iter=max_iter, keep_self_loops=keep_self_loops)
elif method == 'bruteforce':
fvssets = fvs.fvs_bruteforce(dg, max_search=max_search, keep_self_loops=keep_self_loops)
else:
raise AttributeError("The FVS method '%s' does not exist. Try 'grasp' or 'bruteforce'." % method)
fvssets = [fvc.union(set(self.input_nodes)) for fvc in fvssets]
return fvssets # [ [self.nodes[i].name for i in fvsset] for fvsset in fvssets]
#
# Minimum Dominating Set
#
def minimum_dominating_set_driver_nodes(self, graph='structural', max_search=5, keep_self_loops=True, *args, **kwargs):
"""The minimun set of necessary driver nodes to control the network based on Minimum Dominating Set (MDS) theory.
Args:
max_search (int) : Maximum search of additional variables. Defaults to 5.
keep_self_loops (bool) : If self-loops are used in the computation.
Returns:
(list) : A list-of-lists with MDS solution nodes.
"""
self._check_compute_variables(sg=True)
#
if graph == 'structural':
dg = self.structural_graph(*args, **kwargs)
elif graph == 'effective':
dg = self.effective_graph(mode='input', bound='mean', threshold=None, *args, **kwargs)
else:
raise AttributeError("The graph type '%s' is not accepted. Try 'structural' or 'effective'." % graph)
#
mdssets = mds.mds(dg, max_search=max_search, keep_self_loops=keep_self_loops)
return mdssets # [ [self.nodes[i].name for i in mdsset] for mdsset in mdssets]
# Structural Controllability
#
def structural_controllability_driver_nodes(self, graph='structural', keep_self_loops=True, *args, **kwargs):
"""The minimum set of necessary driver nodes to control the network based on Structural Controlability (SC) theory.
Args:
keep_self_loops (bool) : If self-loops are used in the computation.
Returns:
(list) : A list-of-lists with SC solution nodes.
"""
self._check_compute_variables(sg=True)
if graph == 'structural':
dg = self.structural_graph(*args, **kwargs)
elif graph == 'effective':
dg = self.effective_graph(mode='input', bound='mean', threshold=None, *args, **kwargs)
else:
raise AttributeError("The graph type '%s' is not accepted. Try 'structural' or 'effective'." % graph)
#
scsets = [set(scset).union(set(self.input_nodes)) for scset in sc.sc(dg, keep_self_loops=keep_self_loops)]
return scsets # [ [self.nodes[i].name for i in scset] for scset in scsets]
#
# Dynamical Impact
#
def partial_derative_node(self, node, n_traj=10, t=1):
"""The partial derivative of node on all other nodes after t steps
Args:
node (int) : the node index for perturbations
t (int) : the number of time steps the system is run before impact is calculated.
n_traj (int) : the number of trajectories used to approximate the dynamical impact of a node.
if 0 then the full STG is used to calculate the true value instead of the approximation method.
Returns:
(vector) : the partial derivatives
"""
partial = np.zeros((t, self.Nnodes), dtype=float)
if n_traj == 0:
config_genderator = (self.num2bin(statenum) for statenum in range(self.Nstates))
n_traj = self.Nstates
else:
# sample configurations
config_genderator = (random_binstate(self.Nnodes) for itraj in range(n_traj))
for config in config_genderator:
perturbed_config = flip_binstate_bit(config, node)
for n_step in range(t):
config = self.step(config)
perturbed_config = self.step(perturbed_config)
partial[n_step] += np.logical_not(binstate_compare(config, perturbed_config))
partial /= n_traj
return partial
def approx_dynamic_impact(self, source, n_steps=1, target_set=None, bound='mean', threshold=0.0):
"""Use the network structure to approximate the dynamical impact of a perturbation to node for each of n_steps
for details see: Gates et al (2020).
Args:
source (int) : the source index for perturbations
n_steps (int) : the number of time steps
bound (str) : the bound for the effective graph
'mean' - edge effectiveness
'upper' - activity
Returns:
(matrix) : approximate dynamical impact for each node at each step (2 x n_steps x n_nodes)
"""
if target_set is None:
target_set = range(self.Nnodes)
Gstr = self.structural_graph()
Geff = self.effective_graph(bound=bound, threshold=threshold)
# the maximum path with length given by product of weights is the same as minimal path of negative log weight
def eff_weight_func(u, v, e):
return -np.log(e['weight'])
def inv_eff_weight_func(pathlength):
return np.exp(-pathlength)
impact_matrix = np.zeros((2, n_steps + 1, len(target_set)))
impact_matrix[0, :, :] = self.Nnodes + 1 # if we can't reach the node, then the paths cant be longer than the number of nodes in the graph
# note that by default: impact_matrix[1, :, :] = 0 the minimum path for nodes we cant reach in the effective graph
# in the structural graph, calcluate the dijkstra shortest paths from the source to all targets that are shorter than the cufoff
Gstr_shortest_dist, Gstr_shortest_paths = nx.single_source_dijkstra(Gstr, source, target=None, cutoff=n_steps)
Gstr_shortest_dist = {n: int(l) for n, l in Gstr_shortest_dist.items()}
# in the effective graph, calcluate the dijkstra shortest paths from the source to all targets that are shorter than the cufoff
# where the edge weight is given by the effective weight function
Geff_shortest_dist, Geff_shortest_paths = nx.single_source_dijkstra(Geff, source, target=None, cutoff=n_steps, weight=eff_weight_func)
for itar, target in enumerate(target_set):
# we dont need to worry about a path to iteself (source==target)
# and if the target doesnt appear in the shortest path dict, then no path exists that is less than the cutoff
if target != source and not Gstr_shortest_dist.get(target, None) is None:
# the light cone is at least as big as the number of edges in the structural shorest path
impact_matrix[0, list(range(Gstr_shortest_dist[target], n_steps + 1)), itar] = Gstr_shortest_dist[target]
# if the path exists, then the number of edges (timesteps) is one less than the number of nodes
if not Geff_shortest_paths.get(target, None) is None:
eff_path_steps = len(Geff_shortest_paths[target]) - 1
else:
# or the path doesnt exist
eff_path_steps = n_steps + 100 # any number bigger than the longest path to represent we cannot reach the node
# start by checking if the number of timesteps is less than the maximum allowable number of steps
if eff_path_steps <= n_steps:
# now check if the most likely effective path is longer (in terms of # of timesteps) than the structural shortest path
if eff_path_steps > Gstr_shortest_dist[target]:
# if it is, then we need to find another effective path constrained by the light-cone
# for all time steps where the most likely effective path is longer (in terms of # of timesteps)
# than the structural shortest path
for istep in range(Gstr_shortest_dist[target], eff_path_steps):
# bc the effective graph has fully redundant edges, there may actually not be a path
try:
redo_dijkstra_dist, _ = nx.single_source_dijkstra(Geff,
source=source,
target=target,
cutoff=istep,
weight=eff_weight_func)
impact_matrix[1, istep, itar] = inv_eff_weight_func(redo_dijkstra_dist)
except nx.NetworkXNoPath:
pass
# once the lightcone includes the target node on the effective shortest path,
# then for all other steps the effective path is the best
impact_matrix[1, list(range(eff_path_steps, n_steps + 1)), itar] = inv_eff_weight_func(Geff_shortest_dist[target])
return impact_matrix[:, 1:]
def dist_from_attractor(self):
"""Find the distance from attractor for each configuration.
Returns:
distance (dict). Nodes are dictionary indexes and distances the values.
"""
self._check_compute_variables(attractors=True)
dist = {} # stores [node, distance] pair
for att in self._attractors:
dist.update({a: (0, a) for a in att})
dag = nx.bfs_tree(self._stg, att[0], reverse=True)
attractor_states = set(att)
for node in nx.topological_sort(dag):
# pairs of dist,node for all incoming edges
if node not in attractor_states:
pairs = [(dist[v][0] + 1, v) for v in dag.pred[node]]
if pairs:
dist[node] = min(pairs)
else:
dist[node] = (0, node)
return dist
def average_dist_from_attractor(self):
dist = self.dist_from_attractor()
return np.mean([d[0] for d in dist.values() if d[0] > 0])
#
# Dynamics Canalization Map (DCM)
#
def dynamics_canalization_map(self, output=None, simplify=True):
"""Computes the Dynamics Canalization Map (DCM).
In practice, it asks each node to compute their Canalization Map and then puts them together, simplifying it if possible.
Args:
output (int) : The output DCM to return. Default is ``None``, retuning both [0,1].
simplify (bool) : Attemps to simpify the DCM by removing thresholds nodes with :math:`\tao=1`.
Returns:
DCM (networkx.DiGraph) : a directed graph representation of the DCM.
See Also:
:func:`boolean_node.canalizing_map` for the CM and :func:`drawing.draw_dynamics_canalizing_map_graphviz` for plotting.
"""
CMs = []
for node in self.nodes:
if self.keep_constants or not node.constant:
CMs.append(node.canalizing_map(output))
# https://networkx.github.io/documentation/stable/reference/algorithms/generated/networkx.algorithms.operators.all.compose_all.html
DCM = nx.compose_all(CMs)
DCM.name = 'DCM: %s' % (self.name)
if simplify:
# Loop all threshold nodes
threshold_nodes = [(n, d) for n, d in DCM.nodes(data=True) if d['type'] == 'threshold']
for n, d in threshold_nodes:
# Constant, remove threshold node
if d['tau'] == 0:
DCM.remove_node(n)
# Tau == 1
if d['tau'] == 1:
in_nei = list(DCM.in_edges(n))[0]
out_nei = list(DCM.out_edges(n))[0]
# neis = set(list(in_nei) + list(out_nei))
# Convert to self loop
if (in_nei == out_nei[::-1]):
DCM.remove_node(n)
DCM.add_edge(in_nei[0], out_nei[1], **{'type': 'simplified', 'mode': 'selfloop'})
# Link variables nodes directly
elif not any([DCM.nodes[tn]['type'] == 'fusion' for tn in in_nei]):
DCM.remove_node(n)
DCM.add_edge(in_nei[0], out_nei[1], **{'type': 'simplified', 'mode': 'direct'})
# Remove Isolates
isolates = list(nx.isolates(DCM))
DCM.remove_nodes_from(isolates)
return DCM
def _check_compute_variables(self, **kwargs):
"""Recursevely check if the requested control variables are instantiated/computed.
Otherwise computes them in order.
"""
if 'sg' in kwargs:
if self._sg is None:
self._sg = self.structural_graph()
elif 'eg' in kwargs:
if self._eg is None:
self._eg = self.effective_graph()
elif 'stg' in kwargs:
if self._stg is None:
self._check_compute_variables(sg=True)
self._stg = self.state_transition_graph()
elif 'attractors' in kwargs:
if self._attractors is None:
self._check_compute_variables(stg=True)
self._attractors = self.attractors()
elif 'stg_r' in kwargs:
if self._stg_r is None:
self._check_compute_variables(stg=True)
self._stg_r = self.state_transition_graph_reachability()
else:
raise Exception('Control variable name not found. %s' % kwargs)
return True
#
# Get Node Names from Ids
#
def _get_node_name(self, id):
"""Return the name of the node based on its id.
Args:
id (int): id of the node.
Returns:
name (string): name of the node.
"""
try:
node = self.nodes[id]
except error:
raise AttributeError("Node with id {id:d} does not exist. {error::s}".format(id=id, error=error))
else:
return node.name
def get_node_name(self, iterable=[]):
"""Return node names. Optionally, it returns only the names of the ids requested.
Args:
iterable (int,list, optional) : The id (or list of ids) of nodes to which return their names.
Returns:
names (list) : The name of the nodes.
"""
# If only one id is passed, make it a list
if not isinstance(iterable, list):
iterable = [iterable]
# No ids requested, return all the names
if not len(iterable):
return [n.name for n in self.nodes]
# otherwise, use the recursive map to change ids to names
else:
return recursive_map(self._get_node_name, iterable)
def average_trajectory_length(self, nsamples=10, random_seed=None, method='random'):
"""The average length of trajectories from a random initial configuration to its attractor.
Args:
nsamples (int) : The number of samples per hammimg distance to get.
random_seed (int) : The random state seed.
method (string) : specify the method you want. either 'random' or ....
Returns:
trajlen (float) : The average trajectory length to an attractor.
"""
return sum(len(self.trajectory_to_attractor(random_binstate(self.Nnodes))) for isample in range(nsamples)) / nsamples
def derrida_curve(self, nsamples=10, max_hamm=None, random_seed=None, method='random'):
"""The Derrida Curve (also reffered as criticality measure :math:`D_s`).
When "mode" is set as "random" (default), it would use random sampling to estimate Derrida value
If "mode" is set as "sensitivity", it would use c-sensitivity to calculate Derrida value (slower)
You can refer to :cite:'kadelka2017influence' about why c-sensitivity can be used to caculate Derrida value
Args:
nsamples (int) : The number of samples per hammimg distance to get.
max_hamm (int) : The maximum Hamming distance between starting states. default: self.Nnodes
random_seed (int) : The random state seed.
method (string) : specify the method you want. either 'random' or 'sensitivity'
Returns:
(dx,dy) (tuple) : The dx and dy of the curve.
"""
random.seed(random_seed)
if max_hamm is None or (max_hamm > self.Nnodes):
max_hamm = self.Nnodes
dx = np.linspace(0, 1, max_hamm, endpoint=True)
dy = np.zeros(max_hamm + 1)
if method == 'random':
# for each possible hamming distance between the starting states
for hamm_dist in range(1, max_hamm + 1):
# sample nsample times
for isample in range(nsamples):
rnd_config = random_binstate(self.Nnodes)
perturbed_var = random.sample(range(self.Nnodes), hamm_dist)
perturbed_config = [flip_bit(rnd_config[ivar]) if ivar in perturbed_var else rnd_config[ivar] for ivar in range(self.Nnodes)]
dy[hamm_dist] += hamming_distance(self.step(rnd_config), self.step(perturbed_config)) / self.Nnodes # normalized Hamming Distance
dy /= nsamples
elif method == 'sensitivity':
for hamm_dist in range(1, max_hamm + 1):
dy[hamm_dist] = sum([node.c_sensitivity(hamm_dist, mode='forceK', max_k=self.Nnodes) for node in self.nodes]) / self.Nnodes
return dx, dy
def derrida_coefficient(self, nsamples=10, random_seed=None, method='random'):
"""The Derrida Coefficient.
When "mode" is set as "random" (default), it would use random sampling to estimate Derrida value
If "mode" is set as "sensitivity", it would use c-sensitivity to calculate Derrida value (slower)
You can refer to :cite:'kadelka2017influence' about why c-sensitivity can be used to caculate Derrida value
Args:
nsamples (int) : The number of samples per hammimg distance to get.
random_seed (int) : The random state seed.
method (string) : specify the method you want. either 'random' or 'sensitivity'
Returns:
(dx,dy) (tuple) : The dx and dy of the curve.
"""
random.seed(random_seed)
hamm_dist = 1
if method == 'random':
# for each possible hamming distance between the starting states
dy = 0
# sample nsample times
for isample in range(nsamples):
rnd_config = random_binstate(self.Nnodes)
perturbed_var = random.sample(range(self.Nnodes), hamm_dist)
perturbed_config = [flip_bit(rnd_config[ivar]) if ivar in perturbed_var else rnd_config[ivar] for ivar in range(self.Nnodes)]
dy += hamming_distance(self.step(rnd_config), self.step(perturbed_config))
dy /= float(nsamples)
elif method == 'sensitivity':
# raise NotImplementedError
dy = sum([node.c_sensitivity(hamm_dist, mode='forceK', max_k=self.Nnodes) for node in self.nodes])
return dy / float(self.Nnodes) | [
"re.compile",
"numpy.log",
"cana.control.fvs.fvs_grasp",
"networkx.weakly_connected_components",
"copy.deepcopy",
"copy.copy",
"networkx.dfs_preorder_nodes",
"networkx.DiGraph",
"numpy.exp",
"numpy.linspace",
"cana.control.fvs.fvs_bruteforce",
"warnings.warn",
"io.StringIO",
"networkx.attr... | [((5259, 5275), 'io.StringIO', 'StringIO', (['string'], {}), '(string)\n', (5267, 5275), False, 'from io import StringIO\n'), ((5292, 5309), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (5303, 5309), False, 'from collections import defaultdict\n'), ((7957, 7974), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (7968, 7974), False, 'from collections import defaultdict\n'), ((8043, 8059), 'io.StringIO', 'StringIO', (['string'], {}), '(string)\n', (8051, 8059), False, 'from io import StringIO\n'), ((8477, 8493), 'io.StringIO', 'StringIO', (['string'], {}), '(string)\n', (8485, 8493), False, 'from io import StringIO\n'), ((12804, 12853), 'networkx.DiGraph', 'nx.DiGraph', ([], {'name': "('Structural Graph: ' + self.name)"}), "(name='Structural Graph: ' + self.name)\n", (12814, 12853), True, 'import networkx as nx\n'), ((13542, 13570), 'networkx.number_of_edges', 'nx.number_of_edges', (['self._sg'], {}), '(self._sg)\n', (13560, 13570), True, 'import networkx as nx\n'), ((14772, 14829), 'networkx.DiGraph', 'nx.DiGraph', ([], {'name': "('Signed Interaction Graph: ' + self.name)"}), "(name='Signed Interaction Graph: ' + self.name)\n", (14782, 14829), True, 'import networkx as nx\n'), ((18013, 18038), 'copy.deepcopy', 'copy.deepcopy', (['self.logic'], {}), '(self.logic)\n', (18026, 18038), False, 'import copy\n'), ((21904, 22009), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['conditional_eg'], {'values': 'dict_conditioned_subgraph', 'name': '"""conditioned_subgraph"""'}), "(conditional_eg, values=dict_conditioned_subgraph,\n name='conditioned_subgraph')\n", (21926, 22009), True, 'import networkx as nx\n'), ((22124, 22227), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['conditional_eg'], {'values': 'dict_all_conditioned_nodes', 'name': '"""conditioned_state"""'}), "(conditional_eg, values=dict_all_conditioned_nodes,\n name='conditioned_state')\n", (22146, 22227), True, 'import networkx as nx\n'), ((24580, 24616), 'networkx.DiGraph', 'nx.DiGraph', ([], {'name': "('STG: ' + self.name)"}), "(name='STG: ' + self.name)\n", (24590, 24616), True, 'import networkx as nx\n'), ((35931, 35955), 'copy.deepcopy', 'copy.deepcopy', (['self._stg'], {}), '(self._stg)\n', (35944, 35955), False, 'import copy\n'), ((40465, 40489), 'copy.deepcopy', 'copy.deepcopy', (['self._stg'], {}), '(self._stg)\n', (40478, 40489), False, 'import copy\n'), ((41084, 41120), 'networkx.DiGraph', 'nx.DiGraph', ([], {'name': "('CAG: ' + cstg.name)"}), "(name='CAG: ' + cstg.name)\n", (41094, 41120), True, 'import networkx as nx\n'), ((49139, 49206), 'cana.control.mds.mds', 'mds.mds', (['dg'], {'max_search': 'max_search', 'keep_self_loops': 'keep_self_loops'}), '(dg, max_search=max_search, keep_self_loops=keep_self_loops)\n', (49146, 49206), False, 'from cana.control import fvs, mds, sc\n'), ((51020, 51059), 'numpy.zeros', 'np.zeros', (['(t, self.Nnodes)'], {'dtype': 'float'}), '((t, self.Nnodes), dtype=float)\n', (51028, 51059), True, 'import numpy as np\n'), ((53398, 53466), 'networkx.single_source_dijkstra', 'nx.single_source_dijkstra', (['Gstr', 'source'], {'target': 'None', 'cutoff': 'n_steps'}), '(Gstr, source, target=None, cutoff=n_steps)\n', (53423, 53466), True, 'import networkx as nx\n'), ((53808, 53905), 'networkx.single_source_dijkstra', 'nx.single_source_dijkstra', (['Geff', 'source'], {'target': 'None', 'cutoff': 'n_steps', 'weight': 'eff_weight_func'}), '(Geff, source, target=None, cutoff=n_steps, weight\n =eff_weight_func)\n', (53833, 53905), True, 'import networkx as nx\n'), ((58839, 58858), 'networkx.compose_all', 'nx.compose_all', (['CMs'], {}), '(CMs)\n', (58853, 58858), True, 'import networkx as nx\n'), ((64031, 64055), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (64042, 64055), False, 'import random\n'), ((64163, 64205), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'max_hamm'], {'endpoint': '(True)'}), '(0, 1, max_hamm, endpoint=True)\n', (64174, 64205), True, 'import numpy as np\n'), ((64219, 64241), 'numpy.zeros', 'np.zeros', (['(max_hamm + 1)'], {}), '(max_hamm + 1)\n', (64227, 64241), True, 'import numpy as np\n'), ((66001, 66025), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (66012, 66025), False, 'import random\n'), ((2583, 2683), 'cana.boolean_node.BooleanNode', 'BooleanNode', (['*args'], {'id': 'i', 'name': 'name', 'k': 'k', 'inputs': 'inputs', 'outputs': 'outputs', 'network': 'self'}), '(*args, id=i, name=name, k=k, inputs=inputs, outputs=outputs,\n network=self, **kwargs)\n', (2594, 2683), False, 'from cana.boolean_node import BooleanNode\n'), ((16220, 16290), 'networkx.DiGraph', 'nx.DiGraph', ([], {'name': "('Effective Graph: ' + self.name + '(Threshold: None)')"}), "(name='Effective Graph: ' + self.name + '(Threshold: None)')\n", (16230, 16290), True, 'import networkx as nx\n'), ((23512, 23597), 'networkx.DiGraph', 'nx.DiGraph', ([], {'name': "('Activity Graph: ' + self.name + '(Threshold: %.2f)' % threshold)"}), "(name='Activity Graph: ' + self.name + '(Threshold: %.2f)' %\n threshold)\n", (23522, 23597), True, 'import networkx as nx\n'), ((23628, 23697), 'networkx.DiGraph', 'nx.DiGraph', ([], {'name': "('Activity Graph: ' + self.name + '(Threshold: None)')"}), "(name='Activity Graph: ' + self.name + '(Threshold: None)')\n", (23638, 23697), True, 'import networkx as nx\n'), ((34350, 34389), 'itertools.combinations', 'itertools.combinations', (['nodeids', 'nr_dvs'], {}), '(nodeids, nr_dvs)\n', (34372, 34389), False, 'import itertools\n'), ((38130, 38166), 'networkx.DiGraph', 'nx.DiGraph', ([], {'name': "('STG: ' + self.name)"}), "(name='STG: ' + self.name)\n", (38140, 38166), True, 'import networkx as nx\n'), ((45013, 45050), 'networkx.weakly_connected_components', 'nx.weakly_connected_components', (['pcstg'], {}), '(pcstg)\n', (45043, 45050), True, 'import networkx as nx\n'), ((47646, 47715), 'cana.control.fvs.fvs_grasp', 'fvs.fvs_grasp', (['dg'], {'max_iter': 'max_iter', 'keep_self_loops': 'keep_self_loops'}), '(dg, max_iter=max_iter, keep_self_loops=keep_self_loops)\n', (47659, 47715), False, 'from cana.control import fvs, mds, sc\n'), ((52849, 52868), 'numpy.exp', 'np.exp', (['(-pathlength)'], {}), '(-pathlength)\n', (52855, 52868), True, 'import numpy as np\n'), ((57096, 57140), 'networkx.bfs_tree', 'nx.bfs_tree', (['self._stg', 'att[0]'], {'reverse': '(True)'}), '(self._stg, att[0], reverse=True)\n', (57107, 57140), True, 'import networkx as nx\n'), ((57205, 57229), 'networkx.topological_sort', 'nx.topological_sort', (['dag'], {}), '(dag)\n', (57224, 57229), True, 'import networkx as nx\n'), ((60044, 60060), 'networkx.isolates', 'nx.isolates', (['DCM'], {}), '(DCM)\n', (60055, 60060), True, 'import networkx as nx\n'), ((36304, 36327), 'copy.copy', 'copy.copy', (['driver_nodes'], {}), '(driver_nodes)\n', (36313, 36327), False, 'import copy\n'), ((40843, 40866), 'copy.copy', 'copy.copy', (['driver_nodes'], {}), '(driver_nodes)\n', (40852, 40866), False, 'import copy\n'), ((44301, 44332), 'networkx.attracting_components', 'nx.attracting_components', (['pcstg'], {}), '(pcstg)\n', (44325, 44332), True, 'import networkx as nx\n'), ((45980, 46012), 'networkx.dfs_preorder_nodes', 'nx.dfs_preorder_nodes', (['G', 'source'], {}), '(G, source)\n', (46001, 46012), True, 'import networkx as nx\n'), ((47775, 47853), 'cana.control.fvs.fvs_bruteforce', 'fvs.fvs_bruteforce', (['dg'], {'max_search': 'max_search', 'keep_self_loops': 'keep_self_loops'}), '(dg, max_search=max_search, keep_self_loops=keep_self_loops)\n', (47793, 47853), False, 'from cana.control import fvs, mds, sc\n'), ((50240, 50282), 'cana.control.sc.sc', 'sc.sc', (['dg'], {'keep_self_loops': 'keep_self_loops'}), '(dg, keep_self_loops=keep_self_loops)\n', (50245, 50282), False, 'from cana.control import fvs, mds, sc\n'), ((52764, 52783), 'numpy.log', 'np.log', (["e['weight']"], {}), "(e['weight'])\n", (52770, 52783), True, 'import numpy as np\n'), ((29071, 29106), 'networkx.attracting_components', 'nx.attracting_components', (['self._stg'], {}), '(self._stg)\n', (29095, 29106), True, 'import networkx as nx\n'), ((35751, 35844), 'warnings.warn', 'warnings.warn', (['("Cannot control a constant variable \'%s\'! Skipping" % self.nodes[dv].name)'], {}), '("Cannot control a constant variable \'%s\'! Skipping" % self.\n nodes[dv].name)\n', (35764, 35844), False, 'import warnings\n'), ((40287, 40380), 'warnings.warn', 'warnings.warn', (['("Cannot control a constant variable \'%s\'! Skipping" % self.nodes[dv].name)'], {}), '("Cannot control a constant variable \'%s\'! Skipping" % self.\n nodes[dv].name)\n', (40300, 40380), False, 'import warnings\n'), ((29955, 29996), 'networkx.weakly_connected_components', 'nx.weakly_connected_components', (['self._stg'], {}), '(self._stg)\n', (29985, 29996), True, 'import networkx as nx\n'), ((9042, 9089), 're.compile', 're.compile', (["('\\\\W' + logic[node]['name'] + '\\\\W')"], {}), "('\\\\W' + logic[node]['name'] + '\\\\W')\n", (9052, 9089), False, 'import re\n'), ((20204, 20217), 'cana.boolean_node.BooleanNode', 'BooleanNode', ([], {}), '()\n', (20215, 20217), False, 'from cana.boolean_node import BooleanNode\n'), ((55878, 55981), 'networkx.single_source_dijkstra', 'nx.single_source_dijkstra', (['Geff'], {'source': 'source', 'target': 'target', 'cutoff': 'istep', 'weight': 'eff_weight_func'}), '(Geff, source=source, target=target, cutoff=istep,\n weight=eff_weight_func)\n', (55903, 55981), True, 'import networkx as nx\n')] |
import numpy as np
import ecogdata.util as ut
def array_geometry(data, map, axis=-1):
# re-arange the 2D matrix of timeseries data such that the
# channel axis is ordered by the array geometry
map = map.as_row_major()
geo = map.geometry
dims = list(data.shape)
while axis < 0:
axis += len(dims)
chan_dim = dims.pop(axis)
ts_dim = dims[0]
new_data = np.zeros( geo + (ts_dim,), data.dtype )
new_data.shape = (-1, ts_dim)
new_data[map,:] = data if axis==0 else data.T
null_sites = set(range(geo[0]*geo[1]))
null_sites.difference_update(map)
return new_data, ut.ChannelMap(list(null_sites), geo, col_major=False)
| [
"numpy.zeros"
] | [((413, 450), 'numpy.zeros', 'np.zeros', (['(geo + (ts_dim,))', 'data.dtype'], {}), '(geo + (ts_dim,), data.dtype)\n', (421, 450), True, 'import numpy as np\n')] |
#!/usr/bin/evn python
"""
CMSC733 Spring 2019: Classical and Deep Learning Approaches for
Geometric Computer Vision
Project1: MyAutoPano: Phase 1 Starter Code
Author(s):
<NAME> (<EMAIL>)
M.Eng. Robotics,
University of Maryland, College Park
"""
# Python libraries
import argparse
import numpy as np
import matplotlib.pyplot as plt
import cv2
import math
from skimage.feature import peak_local_max
import random
from glob import glob
def harris_corner(img,Feature="mineig"):
img_temp = img.copy()
gray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
if Feature == "mineig":
Rs = cv2.cornerMinEigenVal(gray_img,2,3)
else:
Rs = cv2.cornerHarris(gray_img,2,3,k=0.04)
t = Rs<0.01*Rs.max()
m = Rs>0.01*Rs.max()
Rs[t] = 0
corners = np.where(m)
img_temp[corners]=[255,0,0]
return img_temp,Rs,corners
def anms(Cmap,corners,image,n_best=100):
img = image.copy()
C = Cmap.copy()
locmax = peak_local_max(C,min_distance=10)
n_strong = locmax.shape[0]
print(n_strong)
r = [np.Infinity for i in range(n_strong)]
x=np.zeros((n_strong,1))
y=np.zeros((n_strong,1))
ed=0
for i in range(n_strong):
for j in range(n_strong):
if(C[locmax[j][0],locmax[j][1]] > C[locmax[i][0],locmax[i][1]]):
ed = (locmax[j][0]-locmax[i][0])**2 + (locmax[j][1]-locmax[i][1])**2
if ed<r[i]:
r[i] = ed
x[i] = locmax[i][0]
y[i] = locmax[i][1]
ind = np.argsort(r)
ind = ind[-n_best:]
x_best=np.zeros((n_best,1))
y_best=np.zeros((n_best,1))
for i in range(n_best):
x_best[i] = np.int0(x[ind[i]])
y_best[i] = np.int0(y[ind[i]])
cv2.circle(img,(y_best[i],x_best[i]),3,255,-1)
return x_best,y_best,img
def corner_detect_with_ANMS(img,n=500):
gray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray_img,n,0.01,10)
corners = np.int0(corners)
img_temp = img.copy()
x_best = []
y_best = []
for i in corners:
x,y = i.ravel()
cv2.circle(img_temp,(x,y),3,255,-1)
x_best.append(y)
y_best.append(x)
return x_best,y_best,img_temp,corners
def feature_des(img,x_best,y_best):
gray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
top = 40
bottom = top
left = 40
right = left
dst = cv2.copyMakeBorder(gray_img, top, bottom, left, right, cv2.BORDER_CONSTANT, None, [0])
features = []
for i in range(len(x_best)):
patch = dst[x_best[i]+20:x_best[i]+60,y_best[i]+20:y_best[i]+60]
patch = cv2.GaussianBlur(patch,(5,5),cv2.BORDER_DEFAULT)
patch = cv2.resize(patch, (8,8), interpolation = cv2.INTER_AREA)
feature = np.reshape(patch, (64, ))
mean = np.mean(feature)
std = np.std(feature)
feature = (feature - mean)/std
features.append(feature)
# plt.figure(figsize=(15,15))
# plt.imshow(patch,cmap='gray')
# plt.show()
return features,patch
def feature_match(features1,features2,x1,y1,x2,y2,img1,img2):
features1 = np.array(features1,dtype='float32')
features2 = np.array(features2,dtype='float32')
bf = cv2.BFMatcher()
rawMatches = bf.knnMatch(features1,features2,2)
matches = []
good = []
c=0
for m in rawMatches:
if len(m) == 2 and m[0].distance < m[1].distance * 0.75:
matches.append((m[0].trainIdx, m[0].queryIdx))
good.append([m[0]])
c = c+1
corners1 = []
corners2 = []
for i in range(len(x1)):
corners1.append([x1[i],y1[i]])
for i in range(len(x2)):
corners2.append([x2[i],y2[i]])
pts1 = np.float32(corners1)
pts2 = np.float32(corners2)
pts1 = np.reshape(pts1,(len(x1),2))
pts2 = np.reshape(pts2,(len(x2),2))
kp1=[]
kp2=[]
for i in range(len(pts1)):
kp1.append(cv2.KeyPoint(pts1[i][1], pts1[i][0], 5))
kp2.append(cv2.KeyPoint(pts2[i][1], pts2[i][0], 5))
img = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,None,flags=2)
pts1f = []
pts2f = []
pts1t = []
pts2t = []
for mat in good:
pts1f.append([kp1[mat[0].queryIdx].pt[1],kp1[mat[0].queryIdx].pt[0]])
pts2f.append([kp2[mat[0].trainIdx].pt[1],kp2[mat[0].trainIdx].pt[0]])
pts1t.append([kp1[mat[0].queryIdx].pt[0],kp1[mat[0].queryIdx].pt[1]])
pts2t.append([kp2[mat[0].trainIdx].pt[0],kp2[mat[0].trainIdx].pt[1]])
pts1f = np.float32(np.reshape(pts1f,(len(pts1f),2)))
pts2f = np.float32(np.reshape(pts2f,(len(pts2f),2)))
pts1t = np.float32(np.reshape(pts1t,(len(pts1t),2)))
pts2t = np.float32(np.reshape(pts2t,(len(pts2t),2)))
# plt.figure(figsize=(15,15))
# plt.imshow(img)
# plt.show()
return pts1f,pts2f,pts1t,pts2t
def ransac(pts1, pts2, N=100, t=0.9, thresh=30):
H_new = np.zeros((3,3))
max_inliers = 0
print(len(pts1))
for j in range(N):
index = []
pts = [np.random.randint(0,len(pts1)) for i in range(4)]
p1 = pts1[pts]
p2 = pts2[pts]
H = cv2.getPerspectiveTransform( np.float32(p1), np.float32(p2) )
inLiers = 0
for ind in range(len(pts1)):
source = pts1[ind]
target = np.array([pts2[ind][0],pts2[ind][1]])
predict = np.dot(H, np.array([source[0],source[1],1]))
if predict[2] != 0:
predict_x = predict[0]/predict[2]
predict_y = predict[1]/predict[2]
else:
predict_x = predict[0]/0.000001
predict_y = predict[1]/0.000001
predict = np.array([predict_x,predict_y])
predict = np.float32([point for point in predict])
a = np.linalg.norm(target-predict)
# e = (a - np.mean(a)) / np.std(a)
if a < thresh:
inLiers += 1
index.append(ind)
if max_inliers < inLiers:
max_inliers = inLiers
H_new = H
if inLiers > t*len(pts1):
break
return H_new,index
## 3rd party code for blending images
def warpTwoImages(img1, img2, H):
'''warp img2 to img1 with homograph H'''
h1,w1 = img1.shape[:2]
h2,w2 = img2.shape[:2]
pts1 = np.float32([[0,0],[0,h1],[w1,h1],[w1,0]]).reshape(-1,1,2)
pts2 = np.float32([[0,0],[0,h2],[w2,h2],[w2,0]]).reshape(-1,1,2)
pts2_ = cv2.perspectiveTransform(pts2, H)
pts = np.concatenate((pts1, pts2_), axis=0)
[xmin, ymin] = np.int32(pts.min(axis=0).ravel() - 0.5)
[xmax, ymax] = np.int32(pts.max(axis=0).ravel() + 0.5)
t = [-xmin,-ymin]
Ht = np.array([[1,0,t[0]],[0,1,t[1]],[0,0,1]]) # translate
result = cv2.warpPerspective(img2, Ht.dot(H), (xmax-xmin, ymax-ymin))
result[t[1]:h1+t[1],t[0]:w1+t[0]] = img1
return result
def autopano(img1,img2,Feature="mineig"):
"""
Corner Detection
Save Corner detection output as corners.png
"""
img1 = cv2.GaussianBlur(img1,(3,3),0)
img2 = cv2.GaussianBlur(img2,(3,3),0)
if Feature == "harris" or Feature == "mineig":
img1_corner,map1,corners1 = harris_corner(img1,Feature)
img2_corner,map2,corners2 = harris_corner(img2,Feature)
locmax1 = peak_local_max(map1,min_distance=10)
n_strong1 = locmax1.shape[0]
locmax2 = peak_local_max(map2,min_distance=10)
n_strong2 = locmax2.shape[0]
n_best = min(n_strong1,n_strong2)
# plt.figure(figsize=(15,15))
# plt.subplot(121)
# plt.axis('off')
# plt.imshow(img1_corner)
# plt.subplot(122)
# plt.axis('off')
# plt.imshow(img2_corner)
# plt.show()
"""
Perform ANMS: Adaptive Non-Maximal Suppression
Save ANMS output as anms.png
"""
num = min(n_best,100)
print("Number of best corners: "+str(num))
x1,y1,img1_corner = anms(map1,corners1,img1,num)
x2,y2,img2_corner = anms(map2,corners2,img2,num)
x1 = np.int0(x1.reshape(1,len(x1)))[0]
x2 = np.int0(x2.reshape(1,len(x2)))[0]
y1 = np.int0(y1.reshape(1,len(y1)))[0]
y2 = np.int0(y2.reshape(1,len(y2)))[0]
else:
print("Good Features to track")
x1,y1,img1_corner,corners1 = corner_detect_with_ANMS(img1)
x2,y2,img2_corner,corners2 = corner_detect_with_ANMS(img2)
# plt.figure(figsize=(15,15))
# plt.subplot(121)
# plt.axis('off')
# plt.imshow(img1_corner)
# plt.subplot(122)
# plt.axis('off')
# plt.imshow(img2_corner)
# plt.show()
"""
Feature Descriptors
Save Feature Descriptor output as FD.png
"""
features1,patch1 = feature_des(img1,x1,y1)
features2,patch2 = feature_des(img2,x2,y2)
"""
Feature Matching
Save Feature Matching output as matching.png
"""
pts1f,pts2f,pts1t,pts2t = feature_match(features1,features2,x1,y1,x2,y2,img1,img2)
"""
Refine: RANSAC, Estimate Homography
"""
# print(len(pts1t))
H,index = ransac(pts1t,pts2t)
# print(len(index))
pts1n = []
pts2n = []
x1n=[]
x2n=[]
y1n=[]
y2n=[]
for i in index:
pts1n.append(pts1f[i])
x1n.append(np.int0(pts1f[i][0]))
y1n.append(np.int0(pts1f[i][1]))
pts2n.append(pts2f[i])
x2n.append(np.int0(pts2f[i][0]))
y2n.append(np.int0(pts2f[i][1]))
H = np.float64([pt for pt in H])
features1,patch1 = feature_des(img1,x1n,y1n)
features2,patch2 = feature_des(img2,x2n,y2n)
_,_,_,_ = feature_match(features1,features2,x1n,y1n,x2n,y2n,img1,img2)
result = cv2.warpPerspective(img1, H,(img1.shape[1], img1.shape[0]))
# plt.figure(figsize=(15,15))
# plt.autoscale(True)
# plt.imshow(result)
# plt.show()
"""
Image Warping + Blending
Save Panorama output as mypano.png
"""
result = warpTwoImages(img2, img1, H)
plt.figure()
plt.imshow(result)
plt.show()
cv2.imwrite('Final_Output'+'.png',result)
return result
def main():
# Add any Command Line arguments here
Parser = argparse.ArgumentParser()
Parser.add_argument('--BasePath', default="../Data/Train/Set1", help='Folder of Test Images')
Parser.add_argument('--Features',default="good",help='good or harris or mineig')
Args = Parser.parse_args()
BasePath = Args.BasePath
Feature = Args.Features
path = str(BasePath) + str("/*.jpg")
"""
Read a set of images for Panorama stitching
"""
ind = sorted(glob(path))
img = []
for i in ind:
img.append(plt.imread(i))
# res = []
# for i in range(0,len(img)-1,2):
# res.append(autopano(img[i],img[i+1],Feature))
# if len(img)%2 != 0:
# res.append(img[len(img)-1])
# for i in range(len(res)-1):
# res[i+1] = autopano(res[i],res[i+1])
for i in range(0,len(img)-1):
img[i+1] = autopano(img[i],img[i+1],Feature)
# res = autopano(img1,img2)
# final = autopano(img2,img3)
# result = autopano(res, img3)
# plt.figure()
# plt.imshow(result)
# plt.show()
if __name__ == '__main__':
main()
| [
"cv2.BFMatcher",
"numpy.argsort",
"numpy.array",
"cv2.warpPerspective",
"numpy.linalg.norm",
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.reshape",
"cv2.drawMatchesKnn",
"argparse.ArgumentParser",
"numpy.where",
"numpy.float64",
"skimage.feature.peak_local_max",
"cv2.cornerMinEigenVal"... | [((548, 585), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (560, 585), False, 'import cv2\n'), ((817, 828), 'numpy.where', 'np.where', (['m'], {}), '(m)\n', (825, 828), True, 'import numpy as np\n'), ((1003, 1037), 'skimage.feature.peak_local_max', 'peak_local_max', (['C'], {'min_distance': '(10)'}), '(C, min_distance=10)\n', (1017, 1037), False, 'from skimage.feature import peak_local_max\n'), ((1149, 1172), 'numpy.zeros', 'np.zeros', (['(n_strong, 1)'], {}), '((n_strong, 1))\n', (1157, 1172), True, 'import numpy as np\n'), ((1179, 1202), 'numpy.zeros', 'np.zeros', (['(n_strong, 1)'], {}), '((n_strong, 1))\n', (1187, 1202), True, 'import numpy as np\n'), ((1581, 1594), 'numpy.argsort', 'np.argsort', (['r'], {}), '(r)\n', (1591, 1594), True, 'import numpy as np\n'), ((1632, 1653), 'numpy.zeros', 'np.zeros', (['(n_best, 1)'], {}), '((n_best, 1))\n', (1640, 1653), True, 'import numpy as np\n'), ((1665, 1686), 'numpy.zeros', 'np.zeros', (['(n_best, 1)'], {}), '((n_best, 1))\n', (1673, 1686), True, 'import numpy as np\n'), ((1957, 1994), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1969, 1994), False, 'import cv2\n'), ((2009, 2055), 'cv2.goodFeaturesToTrack', 'cv2.goodFeaturesToTrack', (['gray_img', 'n', '(0.01)', '(10)'], {}), '(gray_img, n, 0.01, 10)\n', (2032, 2055), False, 'import cv2\n'), ((2068, 2084), 'numpy.int0', 'np.int0', (['corners'], {}), '(corners)\n', (2075, 2084), True, 'import numpy as np\n'), ((2389, 2426), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2401, 2426), False, 'import cv2\n'), ((2503, 2593), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['gray_img', 'top', 'bottom', 'left', 'right', 'cv2.BORDER_CONSTANT', 'None', '[0]'], {}), '(gray_img, top, bottom, left, right, cv2.BORDER_CONSTANT,\n None, [0])\n', (2521, 2593), False, 'import cv2\n'), ((3259, 3295), 'numpy.array', 'np.array', (['features1'], {'dtype': '"""float32"""'}), "(features1, dtype='float32')\n", (3267, 3295), True, 'import numpy as np\n'), ((3312, 3348), 'numpy.array', 'np.array', (['features2'], {'dtype': '"""float32"""'}), "(features2, dtype='float32')\n", (3320, 3348), True, 'import numpy as np\n'), ((3358, 3373), 'cv2.BFMatcher', 'cv2.BFMatcher', ([], {}), '()\n', (3371, 3373), False, 'import cv2\n'), ((3873, 3893), 'numpy.float32', 'np.float32', (['corners1'], {}), '(corners1)\n', (3883, 3893), True, 'import numpy as np\n'), ((3906, 3926), 'numpy.float32', 'np.float32', (['corners2'], {}), '(corners2)\n', (3916, 3926), True, 'import numpy as np\n'), ((4202, 4263), 'cv2.drawMatchesKnn', 'cv2.drawMatchesKnn', (['img1', 'kp1', 'img2', 'kp2', 'good', 'None'], {'flags': '(2)'}), '(img1, kp1, img2, kp2, good, None, flags=2)\n', (4220, 4263), False, 'import cv2\n'), ((5085, 5101), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (5093, 5101), True, 'import numpy as np\n'), ((6676, 6709), 'cv2.perspectiveTransform', 'cv2.perspectiveTransform', (['pts2', 'H'], {}), '(pts2, H)\n', (6700, 6709), False, 'import cv2\n'), ((6721, 6758), 'numpy.concatenate', 'np.concatenate', (['(pts1, pts2_)'], {'axis': '(0)'}), '((pts1, pts2_), axis=0)\n', (6735, 6758), True, 'import numpy as np\n'), ((6912, 6961), 'numpy.array', 'np.array', (['[[1, 0, t[0]], [0, 1, t[1]], [0, 0, 1]]'], {}), '([[1, 0, t[0]], [0, 1, t[1]], [0, 0, 1]])\n', (6920, 6961), True, 'import numpy as np\n'), ((7258, 7291), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img1', '(3, 3)', '(0)'], {}), '(img1, (3, 3), 0)\n', (7274, 7291), False, 'import cv2\n'), ((7301, 7334), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img2', '(3, 3)', '(0)'], {}), '(img2, (3, 3), 0)\n', (7317, 7334), False, 'import cv2\n'), ((9786, 9814), 'numpy.float64', 'np.float64', (['[pt for pt in H]'], {}), '([pt for pt in H])\n', (9796, 9814), True, 'import numpy as np\n'), ((10005, 10065), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img1', 'H', '(img1.shape[1], img1.shape[0])'], {}), '(img1, H, (img1.shape[1], img1.shape[0]))\n', (10024, 10065), False, 'import cv2\n'), ((10309, 10321), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10319, 10321), True, 'import matplotlib.pyplot as plt\n'), ((10327, 10345), 'matplotlib.pyplot.imshow', 'plt.imshow', (['result'], {}), '(result)\n', (10337, 10345), True, 'import matplotlib.pyplot as plt\n'), ((10351, 10361), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10359, 10361), True, 'import matplotlib.pyplot as plt\n'), ((10367, 10411), 'cv2.imwrite', 'cv2.imwrite', (["('Final_Output' + '.png')", 'result'], {}), "('Final_Output' + '.png', result)\n", (10378, 10411), False, 'import cv2\n'), ((10502, 10527), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10525, 10527), False, 'import argparse\n'), ((628, 665), 'cv2.cornerMinEigenVal', 'cv2.cornerMinEigenVal', (['gray_img', '(2)', '(3)'], {}), '(gray_img, 2, 3)\n', (649, 665), False, 'import cv2\n'), ((689, 729), 'cv2.cornerHarris', 'cv2.cornerHarris', (['gray_img', '(2)', '(3)'], {'k': '(0.04)'}), '(gray_img, 2, 3, k=0.04)\n', (705, 729), False, 'import cv2\n'), ((1736, 1754), 'numpy.int0', 'np.int0', (['x[ind[i]]'], {}), '(x[ind[i]])\n', (1743, 1754), True, 'import numpy as np\n'), ((1776, 1794), 'numpy.int0', 'np.int0', (['y[ind[i]]'], {}), '(y[ind[i]])\n', (1783, 1794), True, 'import numpy as np\n'), ((1805, 1856), 'cv2.circle', 'cv2.circle', (['img', '(y_best[i], x_best[i])', '(3)', '(255)', '(-1)'], {}), '(img, (y_best[i], x_best[i]), 3, 255, -1)\n', (1815, 1856), False, 'import cv2\n'), ((2203, 2243), 'cv2.circle', 'cv2.circle', (['img_temp', '(x, y)', '(3)', '(255)', '(-1)'], {}), '(img_temp, (x, y), 3, 255, -1)\n', (2213, 2243), False, 'import cv2\n'), ((2734, 2785), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['patch', '(5, 5)', 'cv2.BORDER_DEFAULT'], {}), '(patch, (5, 5), cv2.BORDER_DEFAULT)\n', (2750, 2785), False, 'import cv2\n'), ((2800, 2855), 'cv2.resize', 'cv2.resize', (['patch', '(8, 8)'], {'interpolation': 'cv2.INTER_AREA'}), '(patch, (8, 8), interpolation=cv2.INTER_AREA)\n', (2810, 2855), False, 'import cv2\n'), ((2876, 2900), 'numpy.reshape', 'np.reshape', (['patch', '(64,)'], {}), '(patch, (64,))\n', (2886, 2900), True, 'import numpy as np\n'), ((2918, 2934), 'numpy.mean', 'np.mean', (['feature'], {}), '(feature)\n', (2925, 2934), True, 'import numpy as np\n'), ((2950, 2965), 'numpy.std', 'np.std', (['feature'], {}), '(feature)\n', (2956, 2965), True, 'import numpy as np\n'), ((7537, 7574), 'skimage.feature.peak_local_max', 'peak_local_max', (['map1'], {'min_distance': '(10)'}), '(map1, min_distance=10)\n', (7551, 7574), False, 'from skimage.feature import peak_local_max\n'), ((7631, 7668), 'skimage.feature.peak_local_max', 'peak_local_max', (['map2'], {'min_distance': '(10)'}), '(map2, min_distance=10)\n', (7645, 7668), False, 'from skimage.feature import peak_local_max\n'), ((10943, 10953), 'glob.glob', 'glob', (['path'], {}), '(path)\n', (10947, 10953), False, 'from glob import glob\n'), ((4087, 4126), 'cv2.KeyPoint', 'cv2.KeyPoint', (['pts1[i][1]', 'pts1[i][0]', '(5)'], {}), '(pts1[i][1], pts1[i][0], 5)\n', (4099, 4126), False, 'import cv2\n'), ((4148, 4187), 'cv2.KeyPoint', 'cv2.KeyPoint', (['pts2[i][1]', 'pts2[i][0]', '(5)'], {}), '(pts2[i][1], pts2[i][0], 5)\n', (4160, 4187), False, 'import cv2\n'), ((5346, 5360), 'numpy.float32', 'np.float32', (['p1'], {}), '(p1)\n', (5356, 5360), True, 'import numpy as np\n'), ((5362, 5376), 'numpy.float32', 'np.float32', (['p2'], {}), '(p2)\n', (5372, 5376), True, 'import numpy as np\n'), ((5492, 5530), 'numpy.array', 'np.array', (['[pts2[ind][0], pts2[ind][1]]'], {}), '([pts2[ind][0], pts2[ind][1]])\n', (5500, 5530), True, 'import numpy as np\n'), ((5875, 5907), 'numpy.array', 'np.array', (['[predict_x, predict_y]'], {}), '([predict_x, predict_y])\n', (5883, 5907), True, 'import numpy as np\n'), ((5930, 5970), 'numpy.float32', 'np.float32', (['[point for point in predict]'], {}), '([point for point in predict])\n', (5940, 5970), True, 'import numpy as np\n'), ((5990, 6022), 'numpy.linalg.norm', 'np.linalg.norm', (['(target - predict)'], {}), '(target - predict)\n', (6004, 6022), True, 'import numpy as np\n'), ((6535, 6583), 'numpy.float32', 'np.float32', (['[[0, 0], [0, h1], [w1, h1], [w1, 0]]'], {}), '([[0, 0], [0, h1], [w1, h1], [w1, 0]])\n', (6545, 6583), True, 'import numpy as np\n'), ((6605, 6653), 'numpy.float32', 'np.float32', (['[[0, 0], [0, h2], [w2, h2], [w2, 0]]'], {}), '([[0, 0], [0, h2], [w2, h2], [w2, 0]])\n', (6615, 6653), True, 'import numpy as np\n'), ((9584, 9604), 'numpy.int0', 'np.int0', (['pts1f[i][0]'], {}), '(pts1f[i][0])\n', (9591, 9604), True, 'import numpy as np\n'), ((9626, 9646), 'numpy.int0', 'np.int0', (['pts1f[i][1]'], {}), '(pts1f[i][1])\n', (9633, 9646), True, 'import numpy as np\n'), ((9700, 9720), 'numpy.int0', 'np.int0', (['pts2f[i][0]'], {}), '(pts2f[i][0])\n', (9707, 9720), True, 'import numpy as np\n'), ((9742, 9762), 'numpy.int0', 'np.int0', (['pts2f[i][1]'], {}), '(pts2f[i][1])\n', (9749, 9762), True, 'import numpy as np\n'), ((11014, 11027), 'matplotlib.pyplot.imread', 'plt.imread', (['i'], {}), '(i)\n', (11024, 11027), True, 'import matplotlib.pyplot as plt\n'), ((5563, 5598), 'numpy.array', 'np.array', (['[source[0], source[1], 1]'], {}), '([source[0], source[1], 1])\n', (5571, 5598), True, 'import numpy as np\n')] |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import sympy
import cirq
import pytest
import qsimcirq
class NoiseTrigger(cirq.SingleQubitGate):
"""A no-op gate with no _unitary_ method defined.
Appending this gate to a circuit will force it to use qtrajectory, but the
new circuit will otherwise behave identically to the original.
"""
# def _mixture_(self):
# return ((1.0, np.asarray([1, 0, 0, 1])),)
def _kraus_(self):
return (np.asarray([1, 0, 0, 1]),)
def test_empty_circuit():
result = qsimcirq.QSimSimulator().simulate(cirq.Circuit())
assert result.final_state_vector.shape == (1,)
@pytest.mark.parametrize("mode", ["noiseless", "noisy"])
def test_empty_moment(mode: str):
qs = cirq.LineQubit.range(2)
circuit = cirq.Circuit(
cirq.X(qs[0]) ** 0.5,
cirq.Moment(),
cirq.X(qs[1]) ** 0.5,
)
if mode == "noisy":
circuit.append(NoiseTrigger().on(qs[0]))
result = qsimcirq.QSimSimulator().simulate(circuit)
assert result.final_state_vector.shape == (4,)
def test_cirq_too_big_gate():
# Pick qubits.
a, b, c, d, e, f, g = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(0, 2),
cirq.GridQubit(1, 0),
cirq.GridQubit(1, 1),
cirq.GridQubit(1, 2),
cirq.GridQubit(2, 0),
]
class BigGate(cirq.Gate):
def _num_qubits_(self):
return 7
def _qid_shape_(self):
return (2,) * 7
def _unitary_(self):
return np.eye(128)
# Create a circuit with a gate larger than 6 qubits.
cirq_circuit = cirq.Circuit(BigGate().on(a, b, c, d, e, f, g))
qsimSim = qsimcirq.QSimSimulator()
with pytest.raises(NotImplementedError):
qsimSim.compute_amplitudes(cirq_circuit, bitstrings=[0b0, 0b1])
def test_cirq_giant_identity():
# Pick qubits.
a, b, c, d, e, f, g, h = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(0, 2),
cirq.GridQubit(1, 0),
cirq.GridQubit(1, 1),
cirq.GridQubit(1, 2),
cirq.GridQubit(2, 0),
cirq.GridQubit(2, 1),
]
# Create a circuit with a gate larger than 6 qubits.
cirq_circuit = cirq.Circuit(
cirq.IdentityGate(7).on(a, b, c, d, e, f, g),
cirq.X(h),
)
no_id_circuit = cirq.Circuit(cirq.X(h))
qsimSim = qsimcirq.QSimSimulator()
assert qsimSim.simulate(cirq_circuit) == qsimSim.simulate(
no_id_circuit, qubit_order=[a, b, c, d, e, f, g, h]
)
@pytest.mark.parametrize("mode", ["noiseless", "noisy"])
def test_cirq_qsim_simulate(mode: str):
# Pick qubits.
a, b, c, d = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(1, 1),
cirq.GridQubit(1, 0),
]
# Create a circuit
cirq_circuit = cirq.Circuit(
cirq.X(a) ** 0.5, # Square root of X.
cirq.Y(b) ** 0.5, # Square root of Y.
cirq.Z(c), # Z.
cirq.CZ(a, d), # ControlZ.
)
if mode == "noisy":
cirq_circuit.append(NoiseTrigger().on(a))
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.compute_amplitudes(cirq_circuit, bitstrings=[0b0100, 0b1011])
assert np.allclose(result, [0.5j, 0j])
@pytest.mark.parametrize("mode", ["noiseless", "noisy"])
def test_cirq_qsim_simulate_fullstate(mode: str):
# Pick qubits.
a, b, c, d = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(1, 1),
cirq.GridQubit(1, 0),
]
# Create a circuit.
cirq_circuit = cirq.Circuit(
cirq.Moment(
cirq.X(a) ** 0.5, # Square root of X.
cirq.H(b), # Hadamard.
cirq.X(c), # X.
cirq.H(d), # Hadamard.
),
cirq.Moment(
cirq.X(a) ** 0.5, # Square root of X.
cirq.CX(b, c), # ControlX.
cirq.S(d), # S (square root of Z).
),
cirq.Moment(
cirq.I(a),
cirq.ISWAP(b, c),
),
)
if mode == "noisy":
cirq_circuit.append(NoiseTrigger().on(a))
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit, qubit_order=[a, b, c, d])
assert result.state_vector().shape == (16,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, qubit_order=[a, b, c, d])
# When using rotation gates such as S, qsim may add a global phase relative
# to other simulators. This is fine, as the result is equivalent.
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector()
)
@pytest.mark.parametrize("mode", ["noiseless", "noisy"])
def test_cirq_qsim_simulate_sweep(mode: str):
# Pick qubits.
a, b = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
]
x = sympy.Symbol("x")
# Create a circuit.
cirq_circuit = cirq.Circuit(
cirq.Moment(
cirq.X(a) ** x,
cirq.H(b), # Hadamard.
),
cirq.Moment(
cirq.CX(a, b), # ControlX.
),
)
if mode == "noisy":
cirq_circuit.append(NoiseTrigger().on(a))
params = [{x: 0.25}, {x: 0.5}, {x: 0.75}]
qsimSim = qsimcirq.QSimSimulator()
qsim_result = qsimSim.simulate_sweep(cirq_circuit, params)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate_sweep(cirq_circuit, params)
for i in range(len(qsim_result)):
assert cirq.linalg.allclose_up_to_global_phase(
qsim_result[i].state_vector(), cirq_result[i].state_vector()
)
# initial_state supports bitstrings.
qsim_result = qsimSim.simulate_sweep(cirq_circuit, params, initial_state=0b01)
cirq_result = cirqSim.simulate_sweep(cirq_circuit, params, initial_state=0b01)
for i in range(len(qsim_result)):
assert cirq.linalg.allclose_up_to_global_phase(
qsim_result[i].state_vector(), cirq_result[i].state_vector()
)
# initial_state supports state vectors.
initial_state = np.asarray([0.5j, 0.5, -0.5j, -0.5], dtype=np.complex64)
qsim_result = qsimSim.simulate_sweep(
cirq_circuit, params, initial_state=initial_state
)
cirq_result = cirqSim.simulate_sweep(
cirq_circuit, params, initial_state=initial_state
)
for i in range(len(qsim_result)):
assert cirq.linalg.allclose_up_to_global_phase(
qsim_result[i].state_vector(), cirq_result[i].state_vector()
)
def test_input_vector_validation():
cirq_circuit = cirq.Circuit(cirq.X(cirq.LineQubit(0)), cirq.X(cirq.LineQubit(1)))
params = [{}]
qsimSim = qsimcirq.QSimSimulator()
with pytest.raises(ValueError):
initial_state = np.asarray([0.25] * 16, dtype=np.complex64)
qsim_result = qsimSim.simulate_sweep(
cirq_circuit, params, initial_state=initial_state
)
with pytest.raises(TypeError):
initial_state = np.asarray([0.5] * 4)
qsim_result = qsimSim.simulate_sweep(
cirq_circuit, params, initial_state=initial_state
)
def test_numpy_params():
q0 = cirq.LineQubit(0)
x, y = sympy.Symbol("x"), sympy.Symbol("y")
circuit = cirq.Circuit(cirq.X(q0) ** x, cirq.H(q0) ** y)
prs = [{x: np.int64(0), y: np.int64(1)}, {x: np.int64(1), y: np.int64(0)}]
qsim_simulator = qsimcirq.QSimSimulator()
qsim_result = qsim_simulator.simulate_sweep(circuit, params=prs)
def test_invalid_params():
# Parameters must have numeric values.
q0 = cirq.LineQubit(0)
x, y = sympy.Symbol("x"), sympy.Symbol("y")
circuit = cirq.Circuit(cirq.X(q0) ** x, cirq.H(q0) ** y)
prs = [{x: np.int64(0), y: np.int64(1)}, {x: np.int64(1), y: "z"}]
qsim_simulator = qsimcirq.QSimSimulator()
with pytest.raises(ValueError, match="Parameters must be numeric"):
_ = qsim_simulator.simulate_sweep(circuit, params=prs)
def test_iterable_qubit_order():
# Check to confirm that iterable qubit_order works in all cases.
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(
cirq.H(q0),
cirq.H(q1),
)
qsim_simulator = qsimcirq.QSimSimulator()
assert qsim_simulator.compute_amplitudes(
circuit,
bitstrings=[0b00, 0b01],
qubit_order=reversed([q1, q0]),
) == qsim_simulator.compute_amplitudes(circuit, bitstrings=[0b00, 0b01])
assert qsim_simulator.simulate(
circuit, qubit_order=reversed([q1, q0])
) == qsim_simulator.simulate(circuit)
assert qsim_simulator.simulate_expectation_values_sweep(
circuit,
observables=[cirq.X(q0) * cirq.Z(q1)],
params={},
qubit_order=reversed([q1, q0]),
permit_terminal_measurements=True,
) == qsim_simulator.simulate_expectation_values_sweep(
circuit,
observables=[cirq.X(q0) * cirq.Z(q1)],
params={},
permit_terminal_measurements=True,
)
@pytest.mark.parametrize("mode", ["noiseless", "noisy"])
def test_preserve_qubits(mode: str):
# Check to confirm that qubits in qubit_order appear in the result.
q = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.X(q[0]))
if mode == "noisy":
circuit.append(NoiseTrigger().on(q[0]))
circuit_with_id = circuit + cirq.I(q[1])
qsim_simulator = qsimcirq.QSimSimulator()
order_result = qsim_simulator.simulate(circuit, qubit_order=q)
id_result = qsim_simulator.simulate(circuit_with_id)
assert order_result == id_result
assert order_result.final_state_vector.shape == (4,)
@pytest.mark.parametrize("mode", ["noiseless", "noisy"])
def test_cirq_qsim_run(mode: str):
# Pick qubits.
a, b, c, d = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(1, 1),
cirq.GridQubit(1, 0),
]
# Create a circuit
cirq_circuit = cirq.Circuit(
cirq.X(a) ** 0.5, # Square root of X.
cirq.Y(b) ** 0.5, # Square root of Y.
cirq.Z(c), # Z.
cirq.CZ(a, d), # ControlZ.
# measure qubits
cirq.measure(a, key="ma"),
cirq.measure(b, key="mb"),
cirq.measure(c, key="mc"),
cirq.measure(d, key="md"),
)
if mode == "noisy":
cirq_circuit.append(NoiseTrigger().on(a))
qsimSim = qsimcirq.QSimSimulator()
assert isinstance(qsimSim, cirq.SimulatesSamples)
result = qsimSim.run(cirq_circuit, repetitions=5)
for key, value in result.measurements.items():
assert value.shape == (5, 1)
def test_qsim_invert_mask():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(
cirq.measure(q0, q1, key="d", invert_mask=[False, True]),
)
cirq_sample = cirq.Simulator().sample(circuit, repetitions=5)
qsim_sample = qsimcirq.QSimSimulator().sample(circuit, repetitions=5)
assert qsim_sample.equals(cirq_sample)
def test_qsim_invert_mask_different_qubits():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(
cirq.measure(q1, key="a", invert_mask=[True]),
cirq.measure(q0, key="b", invert_mask=[True]),
cirq.measure(q0, q1, key="c", invert_mask=[False, True]),
cirq.measure(q1, q0, key="d", invert_mask=[False, True]),
)
cirq_sample = cirq.Simulator().sample(circuit, repetitions=5)
qsim_sample = qsimcirq.QSimSimulator().sample(circuit, repetitions=5)
assert qsim_sample.equals(cirq_sample)
def test_qsim_invert_mask_intermediate_measure():
q0, q1 = cirq.LineQubit.range(2)
# The dataframe generated by this should be all zeroes.
circuit = cirq.Circuit(
cirq.measure(q0, q1, key="a", invert_mask=[False, False]),
cirq.X(q0),
cirq.measure(q0, q1, key="b", invert_mask=[True, False]),
cirq.X(q1),
cirq.measure(q0, q1, key="c", invert_mask=[True, True]),
cirq.X(q0),
cirq.measure(q0, q1, key="d", invert_mask=[False, True]),
)
cirq_sample = cirq.Simulator().sample(circuit, repetitions=5)
qsim_sample = qsimcirq.QSimSimulator().sample(circuit, repetitions=5)
assert qsim_sample.equals(cirq_sample)
@pytest.mark.parametrize("mode", ["noiseless", "noisy"])
def test_qsim_run_vs_cirq_run(mode: str):
# Simple circuit, want to check mapping of qubit(s) to their measurements
a, b, c, d = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(1, 0),
cirq.GridQubit(1, 1),
]
circuit = cirq.Circuit(
cirq.X(b),
cirq.CX(b, d),
cirq.measure(a, b, c, key="mabc"),
cirq.measure(d, key="md"),
)
if mode == "noisy":
circuit.append(NoiseTrigger().on(a))
# run in cirq
simulator = cirq.Simulator()
cirq_result = simulator.run(circuit, repetitions=20)
# run in qsim
qsim_simulator = qsimcirq.QSimSimulator()
qsim_result = qsim_simulator.run(circuit, repetitions=20)
# are they the same?
assert qsim_result == cirq_result
@pytest.mark.parametrize("mode", ["noiseless", "noisy"])
def test_expectation_values(mode: str):
a, b = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
]
x_exp = sympy.Symbol("x_exp")
h_exp = sympy.Symbol("h_exp")
circuit = cirq.Circuit(
cirq.X(a) ** x_exp,
cirq.H(b),
cirq.H(a) ** h_exp,
cirq.H(b) ** h_exp,
)
params = [
{x_exp: 0, h_exp: 0}, # |0+)
{x_exp: 1, h_exp: 0}, # |1+)
{x_exp: 0, h_exp: 1}, # |+0)
{x_exp: 1, h_exp: 1}, # |-0)
]
psum1 = cirq.Z(a) + 3 * cirq.X(b)
psum2 = cirq.X(a) - 3 * cirq.Z(b)
if mode == "noisy":
circuit.append(NoiseTrigger().on(a))
qsim_simulator = qsimcirq.QSimSimulator()
qsim_result = qsim_simulator.simulate_expectation_values_sweep(
circuit, [psum1, psum2], params
)
cirq_simulator = cirq.Simulator()
cirq_result = cirq_simulator.simulate_expectation_values_sweep(
circuit, [psum1, psum2], params
)
assert cirq.approx_eq(qsim_result, cirq_result, atol=1e-6)
@pytest.mark.parametrize("mode", ["noiseless", "noisy"])
def test_moment_expectation_values(mode: str):
# Perform a single-pass Rabi oscillation, measuring Z at each step.
q0 = cirq.LineQubit(0)
steps = 20
circuit = cirq.Circuit(*[cirq.X(q0) ** 0.05 for _ in range(steps)])
psum = cirq.Z(q0)
params = {}
if mode == "noisy":
circuit.append(NoiseTrigger().on(q0))
qsim_simulator = qsimcirq.QSimSimulator()
qsim_result = qsim_simulator.simulate_moment_expectation_values(
circuit, psum, params
)
# Omit noise trigger element
results = [r[0] for r in qsim_result][:steps]
assert np.allclose(
[result.real for result in results],
[np.cos(np.pi * (i + 1) / 20) for i in range(steps)],
atol=1e-6,
)
@pytest.mark.parametrize("mode", ["noiseless", "noisy"])
def test_select_moment_expectation_values(mode: str):
# Measure different observables after specified steps.
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(
cirq.Moment(cirq.X(q0), cirq.H(q1)),
cirq.Moment(cirq.H(q0), cirq.Z(q1)),
cirq.Moment(cirq.Z(q0), cirq.H(q1)),
cirq.Moment(cirq.H(q0), cirq.X(q1)),
)
psum_map = {
0: cirq.Z(q0),
1: [cirq.X(q0), cirq.Z(q1)],
3: [cirq.Z(q0), cirq.Z(q1)],
}
params = {}
if mode == "noisy":
circuit.append(NoiseTrigger().on(q0))
qsim_simulator = qsimcirq.QSimSimulator()
qsim_result = qsim_simulator.simulate_moment_expectation_values(
circuit, psum_map, params
)
expected_results = [[-1], [-1, 0], [1, 1]]
for i, result in enumerate(qsim_result):
assert np.allclose(result, expected_results[i])
def test_expectation_values_terminal_measurement_check():
a, b = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
]
circuit = cirq.Circuit(cirq.X(a), cirq.H(b), cirq.measure(a, b, key="m"))
psum = cirq.Z(a) + 3 * cirq.X(b)
qsim_simulator = qsimcirq.QSimSimulator()
with pytest.raises(ValueError, match="Provided circuit has terminal measurements"):
_ = qsim_simulator.simulate_expectation_values(circuit, [psum])
# permit_terminal_measurements disables the error.
qsim_simulator.simulate_expectation_values(
circuit, [psum], permit_terminal_measurements=True
)
@pytest.mark.parametrize("mode", ["noiseless", "noisy"])
def test_intermediate_measure(mode: str):
# Demonstrate that intermediate measurement is possible.
a, b = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
]
circuit = cirq.Circuit(
cirq.X(a),
cirq.CX(a, b),
cirq.measure(a, b, key="m1"),
cirq.CZ(a, b),
cirq.measure(a, b, key="m2"),
cirq.X(a),
cirq.CX(a, b),
cirq.measure(a, b, key="m3"),
# Trailing gates with no measurement do not affect results.
cirq.H(a),
cirq.H(b),
)
if mode == "noisy":
circuit.append(NoiseTrigger().on(a))
simulator = cirq.Simulator()
cirq_result = simulator.run(circuit, repetitions=20)
qsim_simulator = qsimcirq.QSimSimulator()
qsim_result = qsim_simulator.run(circuit, repetitions=20)
assert qsim_result == cirq_result
@pytest.mark.parametrize("mode", ["noiseless", "noisy"])
def test_sampling_nondeterminism(mode: str):
# Ensure that reusing a QSimSimulator doesn't reuse the original seed.
q = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(cirq.H(q), cirq.measure(q, key="m"))
if mode == "noisy":
circuit.append(NoiseTrigger().on(q))
qsim_simulator = qsimcirq.QSimSimulator()
qsim_result = qsim_simulator.run(circuit, repetitions=100)
result_counts = qsim_result.histogram(key="m")
assert result_counts[0] > 1
assert result_counts[1] > 1
def test_matrix1_gate():
q = cirq.LineQubit(0)
m = np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5)
cirq_circuit = cirq.Circuit(cirq.MatrixGate(m).on(q))
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit)
assert result.state_vector().shape == (2,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit)
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector()
)
def test_matrix2_gate():
qubits = cirq.LineQubit.range(2)
m = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
cirq_circuit = cirq.Circuit(cirq.MatrixGate(m).on(*qubits))
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert result.state_vector().shape == (4,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, qubit_order=qubits)
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector()
)
def test_big_matrix_gates():
qubits = cirq.LineQubit.range(3)
# Toffoli gate as a matrix.
m = np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0],
]
)
cirq_circuit = cirq.Circuit(
cirq.H(qubits[0]),
cirq.H(qubits[1]),
cirq.MatrixGate(m).on(*qubits),
)
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert result.state_vector().shape == (8,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, qubit_order=qubits)
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector()
)
def test_decompose_to_matrix_gates():
class UnknownThreeQubitGate(cirq.ops.Gate):
"""This gate is not recognized by qsim, and cannot be decomposed.
qsim should attempt to convert it to a MatrixGate to resolve the issue.
"""
def __init__(self):
pass
def _num_qubits_(self):
return 3
def _qid_shape_(self):
return (2, 2, 2)
def _unitary_(self):
# Toffoli gate as a matrix.
return np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0],
]
)
qubits = cirq.LineQubit.range(3)
cirq_circuit = cirq.Circuit(
cirq.H(qubits[0]),
cirq.H(qubits[1]),
UnknownThreeQubitGate().on(*qubits),
)
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert result.state_vector().shape == (8,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, qubit_order=qubits)
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector()
)
def test_basic_controlled_gate():
qubits = cirq.LineQubit.range(3)
cirq_circuit = cirq.Circuit(
cirq.H(qubits[1]),
cirq.Y(qubits[2]),
cirq.X(qubits[0]).controlled_by(qubits[1]),
cirq.CX(*qubits[1:]).controlled_by(qubits[0]),
cirq.H(qubits[1]).controlled_by(qubits[0], qubits[2]),
)
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert result.state_vector().shape == (8,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, qubit_order=qubits)
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector()
)
def test_controlled_matrix_gates():
qubits = cirq.LineQubit.range(4)
m1 = np.array([[1, 1j], [1j, 1]]) * np.sqrt(0.5)
m2 = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
cirq_circuit = cirq.Circuit(
cirq.MatrixGate(m1).on(qubits[0]).controlled_by(qubits[3]),
cirq.MatrixGate(m2).on(*qubits[1:3]).controlled_by(qubits[0]),
cirq.MatrixGate(m1)
.on(qubits[2])
.controlled_by(qubits[0], qubits[1], qubits[3]),
cirq.MatrixGate(m2).on(qubits[0], qubits[3]).controlled_by(*qubits[1:3]),
)
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert result.state_vector().shape == (16,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, qubit_order=qubits)
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector()
)
def test_control_values():
qubits = cirq.LineQubit.range(3)
cirq_circuit = cirq.Circuit(
# Controlled by |01) state on qubits 1 and 2
cirq.X(qubits[0]).controlled_by(*qubits[1:], control_values=[0, 1]),
# Controlled by either |0) or |1) on qubit 0 (i.e., uncontrolled)
cirq.X(qubits[1]).controlled_by(qubits[0], control_values=[(0, 1)]),
# Controlled by |10) state on qubits 0 and 1
cirq.X(qubits[2]).controlled_by(qubits[1], qubits[0], control_values=[0, 1]),
)
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert result.state_vector().shape == (8,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, qubit_order=qubits)
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector()
)
qubits = cirq.LineQid.for_qid_shape([2, 3, 2])
cirq_circuit = cirq.Circuit(
# Controlled by |12) state on qubits 0 and 1
# Since qsim does not support qudits (yet), this gate is omitted.
cirq.X(qubits[2]).controlled_by(*qubits[:2], control_values=[1, 2]),
)
qsimSim = qsimcirq.QSimSimulator()
with pytest.warns(RuntimeWarning, match="Gate has no valid control value"):
result = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert result.state_vector()[0] == 1
def test_control_limits():
# qsim allows any number of controls, but at most 4 target qubits.
# Uncontrolled gates may have up to 6 qubits.
qubits = cirq.LineQubit.range(6)
CCCCCH = cirq.H(qubits[0]).controlled_by(*qubits[1:])
HHHHH = cirq.MatrixGate(cirq.unitary(cirq.Circuit(cirq.H.on_each(*qubits[1:])))).on(
*qubits[1:]
)
CHHHHH = HHHHH.controlled_by(qubits[0])
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq.Circuit(CCCCCH), qubit_order=qubits)
assert result.state_vector().shape == (64,)
result = qsimSim.simulate(cirq.Circuit(HHHHH), qubit_order=qubits)
assert result.state_vector().shape == (64,)
with pytest.raises(
NotImplementedError, match="Received control gate on 5 target qubits"
):
_ = qsimSim.simulate(cirq.Circuit(CHHHHH), qubit_order=qubits)
def test_decomposable_gate():
qubits = cirq.LineQubit.range(4)
# The Toffoli gate (CCX) decomposes into multiple qsim-supported gates.
cirq_circuit = cirq.Circuit(
cirq.H(qubits[0]),
cirq.H(qubits[1]),
cirq.Moment(
cirq.CCX(*qubits[:3]),
cirq.H(qubits[3]),
),
cirq.H(qubits[2]),
cirq.H(qubits[3]),
)
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert result.state_vector().shape == (16,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, qubit_order=qubits)
# Decomposition may result in gates which add a global phase.
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector()
)
def test_complicated_decomposition():
qubits = cirq.LineQubit.range(4)
# The QFT gate decomposes cleanly into the qsim gateset.
cirq_circuit = cirq.Circuit(cirq.QuantumFourierTransformGate(4).on(*qubits))
qsimSim = qsimcirq.QSimSimulator()
result = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert result.state_vector().shape == (16,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, qubit_order=qubits)
# Decomposition may result in gates which add a global phase.
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector()
)
# Helper class for noisy circuit tests.
class NoiseStep(cirq.Gate):
def __init__(self, matrix, num_qubits=1):
self._matrix = matrix
self._num_qubits = num_qubits
def _num_qubits_(self):
return self._num_qubits
def _unitary_(self):
# Not necessarily a unitary.
return self._matrix
def __str__(self):
return f"NoiseStep({self._matrix})"
def __repr__(self):
return str(self)
def test_mixture_simulation():
q0, q1 = cirq.LineQubit.range(2)
pflip = cirq.phase_flip(p=0.4)
bflip = cirq.bit_flip(p=0.6)
cirq_circuit = cirq.Circuit(
cirq.X(q0) ** 0.5,
cirq.X(q1) ** 0.5,
pflip.on(q0),
bflip.on(q1),
)
possible_circuits = [
cirq.Circuit(cirq.X(q0) ** 0.5, cirq.X(q1) ** 0.5, pf, bf)
# Extract the operators from the mixtures to construct trajectories.
for pf in [NoiseStep(m).on(q0) for m in cirq.kraus(pflip)]
for bf in [NoiseStep(m).on(q1) for m in cirq.kraus(bflip)]
]
possible_states = [
cirq.Simulator().simulate(pc).state_vector() for pc in possible_circuits
]
# Since some "gates" were non-unitary, we must normalize.
possible_states = [ps / np.linalg.norm(ps) for ps in possible_states]
# Minimize flaky tests with a fixed seed.
qsimSim = qsimcirq.QSimSimulator(seed=1)
result_hist = [0] * len(possible_states)
run_count = 100
for _ in range(run_count):
result = qsimSim.simulate(cirq_circuit, qubit_order=[q0, q1])
for i, ps in enumerate(possible_states):
if cirq.allclose_up_to_global_phase(result.state_vector(), ps):
result_hist[i] += 1
break
# Each observed result should match one of the possible_results.
assert sum(result_hist) == run_count
# Over 100 runs, it's reasonable to expect all four outcomes.
assert all(result_count > 0 for result_count in result_hist)
def test_channel_simulation():
q0, q1 = cirq.LineQubit.range(2)
# These probabilities are set unreasonably high in order to reduce the number
# of runs required to observe every possible operator.
amp_damp = cirq.amplitude_damp(gamma=0.5)
gen_amp_damp = cirq.generalized_amplitude_damp(p=0.4, gamma=0.6)
cirq_circuit = cirq.Circuit(
cirq.X(q0) ** 0.5,
cirq.X(q1) ** 0.5,
amp_damp.on(q0),
gen_amp_damp.on(q1),
)
possible_circuits = [
cirq.Circuit(cirq.X(q0) ** 0.5, cirq.X(q1) ** 0.5, ad, gad)
# Extract the operators from the channels to construct trajectories.
for ad in [NoiseStep(m).on(q0) for m in cirq.kraus(amp_damp)]
for gad in [NoiseStep(m).on(q1) for m in cirq.kraus(gen_amp_damp)]
]
possible_states = [
cirq.Simulator().simulate(pc).state_vector() for pc in possible_circuits
]
# Since some "gates" were non-unitary, we must normalize.
possible_states = [ps / np.linalg.norm(ps) for ps in possible_states]
# Minimize flaky tests with a fixed seed.
qsimSim = qsimcirq.QSimSimulator(seed=1)
result_hist = [0] * len(possible_states)
run_count = 200
for _ in range(run_count):
result = qsimSim.simulate(cirq_circuit, qubit_order=[q0, q1])
for i, ps in enumerate(possible_states):
if cirq.allclose_up_to_global_phase(result.state_vector(), ps):
result_hist[i] += 1
break
# Each observed result should match one of the possible_results.
assert sum(result_hist) == run_count
# Over 200 runs, it's reasonable to expect all eight outcomes.
assert all(result_count > 0 for result_count in result_hist)
# Helper class for multi-qubit noisy circuit tests.
class NoiseChannel(cirq.Gate):
def __init__(self, *prob_mat_pairs, num_qubits=1):
self._prob_op_pairs = [
(prob, NoiseStep(m, num_qubits)) for prob, m in prob_mat_pairs
]
self._num_qubits = num_qubits
def _num_qubits_(self):
return self._num_qubits
def _kraus_(self):
return [cirq.unitary(op) for _, op, in self._prob_op_pairs]
def steps(self):
return [m for _, m in self._prob_op_pairs]
def __str__(self):
return f"NoiseChannel({self._ops})"
def __repr__(self):
return str(self)
# Helper class for multi-qubit noisy circuit tests.
class NoiseMixture(NoiseChannel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _mixture_(self):
return [(prob, cirq.unitary(op)) for prob, op, in self._prob_op_pairs]
@pytest.mark.parametrize(
"cx_qubits",
[
[cirq.LineQubit(0), cirq.LineQubit(1)],
[cirq.LineQubit(0), cirq.LineQubit(2)],
[cirq.LineQubit(1), cirq.LineQubit(0)],
[cirq.LineQubit(1), cirq.LineQubit(2)],
[cirq.LineQubit(2), cirq.LineQubit(0)],
[cirq.LineQubit(2), cirq.LineQubit(1)],
],
)
@pytest.mark.parametrize("noise_type", [NoiseMixture, NoiseChannel])
def test_multi_qubit_noise(cx_qubits, noise_type):
# Tests that noise across multiple qubits works correctly.
qs = cirq.LineQubit.range(3)
for q in qs:
if q not in cx_qubits:
q_no_cx = q
break
# fmt: off
ambiguous_cx = noise_type(
# CX(*cx_qubits)
(
0.5, # prob
np.asarray([
1, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 1,
0, 0, 0, 0,
]) / np.sqrt(2),
),
# CX(*cx_qubits)
(
0.5, # prob
np.asarray([
0, 0, 0, 0,
0, 1, 0, 0,
0, 0, 0, 0,
0, 0, 1, 0,
]) / np.sqrt(2),
),
num_qubits=2,
)
# fmt: on
cirq_circuit = cirq.Circuit(
cirq.X(cx_qubits[0]) ** 0.5,
cirq.X(q_no_cx),
ambiguous_cx.on(*cx_qubits),
)
possible_circuits = [
cirq.Circuit(cirq.X(cx_qubits[0]) ** 0.5, cirq.X(q_no_cx), cx)
# Extract the operators from the mixture to construct trajectories.
for cx in [step.on(*cx_qubits) for step in ambiguous_cx.steps()]
]
possible_states = [
cirq.Simulator().simulate(pc).state_vector() for pc in possible_circuits
]
# Since some "gates" were non-unitary, we must normalize.
possible_states = [ps / np.linalg.norm(ps) for ps in possible_states]
# Minimize flaky tests with a fixed seed.
qsimSim = qsimcirq.QSimSimulator(seed=1)
result_hist = [0] * len(possible_states)
run_count = 20
for _ in range(run_count):
result = qsimSim.simulate(cirq_circuit, qubit_order=qs)
for i, ps in enumerate(possible_states):
if cirq.allclose_up_to_global_phase(result.state_vector(), ps):
result_hist[i] += 1
break
# Each observed result should match one of the possible_results.
assert sum(result_hist) == run_count
# Over 20 runs, it's reasonable to expect both outcomes.
assert all(result_count > 0 for result_count in result_hist)
def test_noise_aggregation():
q0 = cirq.LineQubit(0)
# damp_prob is set high to minimize test variance.
# Even with this setting, estimation of states and expectation values from
# noisy circuits is highly variable, so this test uses wide tolerances.
damp_prob = 0.4
circuit = cirq.Circuit(
cirq.X(q0),
cirq.amplitude_damp(gamma=damp_prob).on(q0),
)
psum1 = cirq.Z(q0)
psum2 = cirq.X(q0)
# Test expectation value aggregation over repetitions of a noisy circuit.
# Repetitions are handled in C++, so overhead costs are minimal.
qsim_options = qsimcirq.QSimOptions(ev_noisy_repetitions=10000)
qsim_simulator = qsimcirq.QSimSimulator(qsim_options=qsim_options, seed=1)
qsim_evs = qsim_simulator.simulate_expectation_values(circuit, [psum1, psum2])
assert len(qsim_evs) == 2
# <Z> = (-1) * (probability of |1>) + 1 * (probability of |0>)
# For damp_prob = 0.4, <Z> == -0.2
damped_zval = damp_prob - (1 - damp_prob)
expected_evs = [damped_zval, 0]
assert cirq.approx_eq(qsim_evs, expected_evs, atol=0.05)
def test_noise_model():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.X(q0), cirq.CNOT(q0, q1), cirq.measure(q0, q1, key="m"))
quiet_sim = qsimcirq.QSimSimulator()
quiet_results = quiet_sim.run(circuit, repetitions=100)
assert quiet_results.histogram(key="m")[0b11] == 100
class ReadoutError(cirq.NoiseModel):
def noisy_operation(self, operation: "cirq.Operation") -> "cirq.OP_TREE":
if isinstance(operation.gate, cirq.MeasurementGate):
return [cirq.X.on_each(*operation.qubits), operation]
return [operation]
noisy_sim = qsimcirq.QSimSimulator(noise=ReadoutError())
noisy_results = noisy_sim.run(circuit, repetitions=100)
# ReadoutError will flip both qubits.
assert noisy_results.histogram(key="m")[0b00] == 100
noisy_state = noisy_sim.simulate(circuit)
assert cirq.approx_eq(noisy_state.state_vector(), [1, 0, 0, 0])
obs = cirq.Z(q0) + cirq.Z(q1)
noisy_evs = noisy_sim.simulate_expectation_values(
circuit,
observables=obs,
permit_terminal_measurements=True,
)
assert noisy_evs == [2]
def test_multi_qubit_fusion():
q0, q1, q2, q3 = cirq.LineQubit.range(4)
qubits = [q0, q1, q2, q3]
cirq_circuit = cirq.Circuit(
cirq.CX(q0, q1),
cirq.X(q2) ** 0.5,
cirq.Y(q3) ** 0.5,
cirq.CX(q0, q2),
cirq.T(q1),
cirq.T(q3),
cirq.CX(q1, q2),
cirq.X(q3) ** 0.5,
cirq.Y(q0) ** 0.5,
cirq.CX(q1, q3),
cirq.T(q0),
cirq.T(q2),
cirq.CX(q2, q3),
cirq.X(q0) ** 0.5,
cirq.Y(q1) ** 0.5,
)
options = qsimcirq.QSimOptions(max_fused_gate_size=2)
qsimSim = qsimcirq.QSimSimulator(qsim_options=options)
result_2q_fusion = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
options.max_fused_gate_size = 4
qsimSim = qsimcirq.QSimSimulator(qsim_options=options)
result_4q_fusion = qsimSim.simulate(cirq_circuit, qubit_order=qubits)
assert cirq.linalg.allclose_up_to_global_phase(
result_2q_fusion.state_vector(), result_4q_fusion.state_vector()
)
@pytest.mark.parametrize("mode", ["noiseless", "noisy"])
def test_cirq_qsim_simulate_random_unitary(mode: str):
q0, q1 = cirq.LineQubit.range(2)
options = qsimcirq.QSimOptions(cpu_threads=16, verbosity=0)
qsimSim = qsimcirq.QSimSimulator(qsim_options=options)
for iter in range(10):
random_circuit = cirq.testing.random_circuit(
qubits=[q0, q1], n_moments=8, op_density=0.99, random_state=iter
)
cirq.ConvertToCzAndSingleGates().optimize_circuit(
random_circuit
) # cannot work with params
cirq.ExpandComposite().optimize_circuit(random_circuit)
if mode == "noisy":
random_circuit.append(NoiseTrigger().on(q0))
result = qsimSim.simulate(random_circuit, qubit_order=[q0, q1])
assert result.state_vector().shape == (4,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(random_circuit, qubit_order=[q0, q1])
# When using rotation gates such as S, qsim may add a global phase relative
# to other simulators. This is fine, as the result is equivalent.
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector(), atol=1.0e-6
)
def test_cirq_qsimh_simulate():
# Pick qubits.
a, b = [cirq.GridQubit(0, 0), cirq.GridQubit(0, 1)]
# Create a circuit
cirq_circuit = cirq.Circuit(cirq.CNOT(a, b), cirq.CNOT(b, a), cirq.X(a))
qsimh_options = {"k": [0], "w": 0, "p": 1, "r": 1}
qsimhSim = qsimcirq.QSimhSimulator(qsimh_options)
result = qsimhSim.compute_amplitudes(
cirq_circuit, bitstrings=[0b00, 0b01, 0b10, 0b11]
)
assert np.allclose(result, [0j, 0j, (1 + 0j), 0j])
def test_qsim_gpu_unavailable():
if qsimcirq.qsim_gpu is not None:
pytest.skip("GPU is available; skipping test.")
# Attempt to create a simulator with GPU support.
gpu_options = qsimcirq.QSimOptions(use_gpu=True)
with pytest.raises(
ValueError,
match="GPU execution requested, but not supported",
):
_ = qsimcirq.QSimSimulator(qsim_options=gpu_options)
def test_cirq_qsim_gpu_amplitudes():
if qsimcirq.qsim_gpu is None:
pytest.skip("GPU is not available for testing.")
# Pick qubits.
a, b = [cirq.GridQubit(0, 0), cirq.GridQubit(0, 1)]
# Create a circuit
cirq_circuit = cirq.Circuit(cirq.CNOT(a, b), cirq.CNOT(b, a), cirq.X(a))
# Enable GPU acceleration.
gpu_options = qsimcirq.QSimOptions(use_gpu=True)
qsimGpuSim = qsimcirq.QSimSimulator(qsim_options=gpu_options)
result = qsimGpuSim.compute_amplitudes(
cirq_circuit, bitstrings=[0b00, 0b01, 0b10, 0b11]
)
assert np.allclose(result, [0j, 0j, (1 + 0j), 0j])
def test_cirq_qsim_gpu_simulate():
if qsimcirq.qsim_gpu is None:
pytest.skip("GPU is not available for testing.")
# Pick qubits.
a, b = [cirq.GridQubit(0, 0), cirq.GridQubit(0, 1)]
# Create a circuit
cirq_circuit = cirq.Circuit(cirq.H(a), cirq.CNOT(a, b), cirq.X(b))
# Enable GPU acceleration.
gpu_options = qsimcirq.QSimOptions(use_gpu=True)
qsimGpuSim = qsimcirq.QSimSimulator(qsim_options=gpu_options)
result = qsimGpuSim.simulate(cirq_circuit)
assert result.state_vector().shape == (4,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit)
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector(), atol=1.0e-6
)
def test_cirq_qsim_gpu_expectation_values():
if qsimcirq.qsim_gpu is None:
pytest.skip("GPU is not available for testing.")
# Pick qubits.
a, b = [cirq.GridQubit(0, 0), cirq.GridQubit(0, 1)]
# Create a circuit
cirq_circuit = cirq.Circuit(cirq.H(a), cirq.CNOT(a, b), cirq.X(b))
obs = [cirq.Z(a) * cirq.Z(b)]
# Enable GPU acceleration.
gpu_options = qsimcirq.QSimOptions(use_gpu=True)
qsimGpuSim = qsimcirq.QSimSimulator(qsim_options=gpu_options)
result = qsimGpuSim.simulate_expectation_values(cirq_circuit, obs)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate_expectation_values(cirq_circuit, obs)
assert np.allclose(result, cirq_result)
def test_cirq_qsim_gpu_input_state():
if qsimcirq.qsim_gpu is None:
pytest.skip("GPU is not available for testing.")
# Pick qubits.
a, b = [cirq.GridQubit(0, 0), cirq.GridQubit(0, 1)]
# Create a circuit
cirq_circuit = cirq.Circuit(cirq.H(a), cirq.CNOT(a, b), cirq.X(b))
# Enable GPU acceleration.
gpu_options = qsimcirq.QSimOptions(use_gpu=True)
qsimGpuSim = qsimcirq.QSimSimulator(qsim_options=gpu_options)
initial_state = np.asarray([0.5] * 4, dtype=np.complex64)
result = qsimGpuSim.simulate(cirq_circuit, initial_state=initial_state)
assert result.state_vector().shape == (4,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, initial_state=initial_state)
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector(), atol=1.0e-6
)
def test_cirq_qsim_custatevec_amplitudes():
if qsimcirq.qsim_custatevec is None:
pytest.skip("cuStateVec library is not available for testing.")
# Pick qubits.
a, b = [cirq.GridQubit(0, 0), cirq.GridQubit(0, 1)]
# Create a circuit
cirq_circuit = cirq.Circuit(cirq.CNOT(a, b), cirq.CNOT(b, a), cirq.X(a))
# Enable GPU acceleration.
custatevec_options = qsimcirq.QSimOptions(gpu_mode=1)
qsimGpuSim = qsimcirq.QSimSimulator(qsim_options=custatevec_options)
result = qsimGpuSim.compute_amplitudes(
cirq_circuit, bitstrings=[0b00, 0b01, 0b10, 0b11]
)
assert np.allclose(result, [0j, 0j, (1 + 0j), 0j])
def test_cirq_qsim_custatevec_simulate():
if qsimcirq.qsim_custatevec is None:
pytest.skip("cuStateVec library is not available for testing.")
# Pick qubits.
a, b = [cirq.GridQubit(0, 0), cirq.GridQubit(0, 1)]
# Create a circuit
cirq_circuit = cirq.Circuit(cirq.H(a), cirq.CNOT(a, b), cirq.X(b))
# Enable GPU acceleration.
custatevec_options = qsimcirq.QSimOptions(gpu_mode=1)
qsimGpuSim = qsimcirq.QSimSimulator(qsim_options=custatevec_options)
result = qsimGpuSim.simulate(cirq_circuit)
assert result.state_vector().shape == (4,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit)
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector(), atol=1.0e-6
)
def test_cirq_qsim_custatevec_expectation_values():
if qsimcirq.qsim_custatevec is None:
pytest.skip("cuStateVec library is not available for testing.")
# Pick qubits.
a, b = [cirq.GridQubit(0, 0), cirq.GridQubit(0, 1)]
# Create a circuit
cirq_circuit = cirq.Circuit(cirq.H(a), cirq.CNOT(a, b), cirq.X(b))
obs = [cirq.Z(a) * cirq.Z(b)]
# Enable GPU acceleration.
custatevec_options = qsimcirq.QSimOptions(gpu_mode=1)
qsimGpuSim = qsimcirq.QSimSimulator(qsim_options=custatevec_options)
result = qsimGpuSim.simulate_expectation_values(cirq_circuit, obs)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate_expectation_values(cirq_circuit, obs)
assert np.allclose(result, cirq_result)
def test_cirq_qsim_custatevec_input_state():
if qsimcirq.qsim_custatevec is None:
pytest.skip("cuStateVec library is not available for testing.")
# Pick qubits.
a, b = [cirq.GridQubit(0, 0), cirq.GridQubit(0, 1)]
# Create a circuit
cirq_circuit = cirq.Circuit(cirq.H(a), cirq.CNOT(a, b), cirq.X(b))
# Enable GPU acceleration.
custatevec_options = qsimcirq.QSimOptions(gpu_mode=1)
qsimGpuSim = qsimcirq.QSimSimulator(qsim_options=custatevec_options)
initial_state = np.asarray([0.5] * 4, dtype=np.complex64)
result = qsimGpuSim.simulate(cirq_circuit, initial_state=initial_state)
assert result.state_vector().shape == (4,)
cirqSim = cirq.Simulator()
cirq_result = cirqSim.simulate(cirq_circuit, initial_state=initial_state)
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector(), atol=1.0e-6
)
def test_cirq_qsim_old_options():
old_options = {"f": 3, "t": 4, "r": 100, "v": 1}
old_sim = qsimcirq.QSimSimulator(qsim_options=old_options)
new_options = qsimcirq.QSimOptions(
max_fused_gate_size=3,
cpu_threads=4,
ev_noisy_repetitions=100,
verbosity=1,
)
new_sim = qsimcirq.QSimSimulator(qsim_options=new_options)
assert new_sim.qsim_options == old_sim.qsim_options
def test_cirq_qsim_params():
qubit = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(cirq.X(qubit) ** sympy.Symbol("beta"))
params = cirq.ParamResolver({"beta": 0.5})
simulator = cirq.Simulator()
cirq_result = simulator.simulate(circuit, param_resolver=params)
qsim_simulator = qsimcirq.QSimSimulator()
qsim_result = qsim_simulator.simulate(circuit, param_resolver=params)
assert cirq.linalg.allclose_up_to_global_phase(
qsim_result.state_vector(), cirq_result.state_vector()
)
def test_cirq_qsim_all_supported_gates():
q0 = cirq.GridQubit(1, 1)
q1 = cirq.GridQubit(1, 0)
q2 = cirq.GridQubit(0, 1)
q3 = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(
cirq.Moment(
cirq.H(q0),
cirq.H(q1),
cirq.H(q2),
cirq.H(q3),
),
cirq.Moment(
cirq.T(q0),
cirq.T(q1),
cirq.T(q2),
cirq.T(q3),
),
cirq.Moment(
cirq.CZPowGate(exponent=0.7, global_shift=0.2)(q0, q1),
cirq.CXPowGate(exponent=1.2, global_shift=0.4)(q2, q3),
),
cirq.Moment(
cirq.XPowGate(exponent=0.3, global_shift=1.1)(q0),
cirq.YPowGate(exponent=0.4, global_shift=1)(q1),
cirq.ZPowGate(exponent=0.5, global_shift=0.9)(q2),
cirq.HPowGate(exponent=0.6, global_shift=0.8)(q3),
),
cirq.Moment(
cirq.CX(q0, q2),
cirq.CZ(q1, q3),
),
cirq.Moment(
cirq.X(q0),
cirq.Y(q1),
cirq.Z(q2),
cirq.S(q3),
),
cirq.Moment(
cirq.XXPowGate(exponent=0.4, global_shift=0.7)(q0, q1),
cirq.YYPowGate(exponent=0.8, global_shift=0.5)(q2, q3),
),
cirq.Moment(cirq.I(q0), cirq.I(q1), cirq.IdentityGate(2)(q2, q3)),
cirq.Moment(
cirq.rx(0.7)(q0),
cirq.ry(0.2)(q1),
cirq.rz(0.4)(q2),
cirq.PhasedXPowGate(phase_exponent=0.8, exponent=0.6, global_shift=0.3)(q3),
),
cirq.Moment(
cirq.ZZPowGate(exponent=0.3, global_shift=1.3)(q0, q2),
cirq.ISwapPowGate(exponent=0.6, global_shift=1.2)(q1, q3),
),
cirq.Moment(
cirq.XPowGate(exponent=0.1, global_shift=0.9)(q0),
cirq.YPowGate(exponent=0.2, global_shift=1)(q1),
cirq.ZPowGate(exponent=0.3, global_shift=1.1)(q2),
cirq.HPowGate(exponent=0.4, global_shift=1.2)(q3),
),
cirq.Moment(
cirq.SwapPowGate(exponent=0.2, global_shift=0.9)(q0, q1),
cirq.PhasedISwapPowGate(phase_exponent=0.8, exponent=0.6)(q2, q3),
),
cirq.Moment(
cirq.PhasedXZGate(x_exponent=0.2, z_exponent=0.3, axis_phase_exponent=1.4)(
q0
),
cirq.T(q1),
cirq.H(q2),
cirq.S(q3),
),
cirq.Moment(
cirq.SWAP(q0, q2),
cirq.XX(q1, q3),
),
cirq.Moment(
cirq.rx(0.8)(q0),
cirq.ry(0.9)(q1),
cirq.rz(1.2)(q2),
cirq.T(q3),
),
cirq.Moment(
cirq.YY(q0, q1),
cirq.ISWAP(q2, q3),
),
cirq.Moment(
cirq.T(q0),
cirq.Z(q1),
cirq.Y(q2),
cirq.X(q3),
),
cirq.Moment(
cirq.FSimGate(0.3, 1.7)(q0, q2),
cirq.ZZ(q1, q3),
),
cirq.Moment(
cirq.ry(1.3)(q0),
cirq.rz(0.4)(q1),
cirq.rx(0.7)(q2),
cirq.S(q3),
),
cirq.Moment(
cirq.IdentityGate(4).on(q0, q1, q2, q3),
),
cirq.Moment(
cirq.CCZPowGate(exponent=0.7, global_shift=0.3)(q2, q0, q1),
),
cirq.Moment(
cirq.CCXPowGate(exponent=0.4, global_shift=0.6)(q3, q1, q0).controlled_by(
q2, control_values=[0]
),
),
cirq.Moment(
cirq.rx(0.3)(q0),
cirq.ry(0.5)(q1),
cirq.rz(0.7)(q2),
cirq.rx(0.9)(q3),
),
cirq.Moment(
cirq.TwoQubitDiagonalGate([0.1, 0.2, 0.3, 0.4])(q0, q1),
),
cirq.Moment(
cirq.ThreeQubitDiagonalGate([0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.2, 1.3])(
q1, q2, q3
),
),
cirq.Moment(
cirq.CSwapGate()(q0, q3, q1),
),
cirq.Moment(
cirq.rz(0.6)(q0),
cirq.rx(0.7)(q1),
cirq.ry(0.8)(q2),
cirq.rz(0.9)(q3),
),
cirq.Moment(
cirq.TOFFOLI(q3, q2, q0),
),
cirq.Moment(
cirq.FREDKIN(q1, q3, q2),
),
cirq.Moment(
cirq.MatrixGate(
np.array(
[
[0, -0.5 - 0.5j, -0.5 - 0.5j, 0],
[0.5 - 0.5j, 0, 0, -0.5 + 0.5j],
[0.5 - 0.5j, 0, 0, 0.5 - 0.5j],
[0, -0.5 - 0.5j, 0.5 + 0.5j, 0],
]
)
)(q0, q1),
cirq.MatrixGate(
np.array(
[
[0.5 - 0.5j, 0, 0, -0.5 + 0.5j],
[0, 0.5 - 0.5j, -0.5 + 0.5j, 0],
[0, -0.5 + 0.5j, -0.5 + 0.5j, 0],
[0.5 - 0.5j, 0, 0, 0.5 - 0.5j],
]
)
)(q2, q3),
),
cirq.Moment(
cirq.MatrixGate(np.array([[1, 0], [0, 1j]]))(q0),
cirq.MatrixGate(np.array([[0, -1j], [1j, 0]]))(q1),
cirq.MatrixGate(np.array([[0, 1], [1, 0]]))(q2),
cirq.MatrixGate(np.array([[1, 0], [0, -1]]))(q3),
),
cirq.Moment(
cirq.riswap(0.7)(q0, q1),
cirq.givens(1.2)(q2, q3),
),
cirq.Moment(
cirq.H(q0),
cirq.H(q1),
cirq.H(q2),
cirq.H(q3),
),
)
simulator = cirq.Simulator()
cirq_result = simulator.simulate(circuit)
qsim_simulator = qsimcirq.QSimSimulator()
qsim_result = qsim_simulator.simulate(circuit)
assert cirq.linalg.allclose_up_to_global_phase(
qsim_result.state_vector(), cirq_result.state_vector()
)
def test_cirq_qsim_global_shift():
q0 = cirq.GridQubit(1, 1)
q1 = cirq.GridQubit(1, 0)
q2 = cirq.GridQubit(0, 1)
q3 = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(
cirq.Moment(
cirq.H(q0),
cirq.H(q1),
cirq.H(q2),
cirq.H(q3),
),
cirq.Moment(
cirq.CXPowGate(exponent=1, global_shift=0.7)(q0, q1),
cirq.CZPowGate(exponent=1, global_shift=0.9)(q2, q3),
),
cirq.Moment(
cirq.XPowGate(exponent=1, global_shift=1.1)(q0),
cirq.YPowGate(exponent=1, global_shift=1)(q1),
cirq.ZPowGate(exponent=1, global_shift=0.9)(q2),
cirq.HPowGate(exponent=1, global_shift=0.8)(q3),
),
cirq.Moment(
cirq.XXPowGate(exponent=1, global_shift=0.2)(q0, q1),
cirq.YYPowGate(exponent=1, global_shift=0.3)(q2, q3),
),
cirq.Moment(
cirq.ZPowGate(exponent=0.25, global_shift=0.4)(q0),
cirq.ZPowGate(exponent=0.5, global_shift=0.5)(q1),
cirq.YPowGate(exponent=1, global_shift=0.2)(q2),
cirq.ZPowGate(exponent=1, global_shift=0.3)(q3),
),
cirq.Moment(
cirq.ZZPowGate(exponent=1, global_shift=0.2)(q0, q1),
cirq.SwapPowGate(exponent=1, global_shift=0.3)(q2, q3),
),
cirq.Moment(
cirq.XPowGate(exponent=1, global_shift=0)(q0),
cirq.YPowGate(exponent=1, global_shift=0)(q1),
cirq.ZPowGate(exponent=1, global_shift=0)(q2),
cirq.HPowGate(exponent=1, global_shift=0)(q3),
),
cirq.Moment(
cirq.ISwapPowGate(exponent=1, global_shift=0.3)(q0, q1),
cirq.ZZPowGate(exponent=1, global_shift=0.5)(q2, q3),
),
cirq.Moment(
cirq.ZPowGate(exponent=0.5, global_shift=0)(q0),
cirq.ZPowGate(exponent=0.25, global_shift=0)(q1),
cirq.XPowGate(exponent=0.9, global_shift=0)(q2),
cirq.YPowGate(exponent=0.8, global_shift=0)(q3),
),
cirq.Moment(
cirq.CZPowGate(exponent=0.3, global_shift=0)(q0, q1),
cirq.CXPowGate(exponent=0.4, global_shift=0)(q2, q3),
),
cirq.Moment(
cirq.ZPowGate(exponent=1.3, global_shift=0)(q0),
cirq.HPowGate(exponent=0.8, global_shift=0)(q1),
cirq.XPowGate(exponent=0.9, global_shift=0)(q2),
cirq.YPowGate(exponent=0.4, global_shift=0)(q3),
),
cirq.Moment(
cirq.XXPowGate(exponent=0.8, global_shift=0)(q0, q1),
cirq.YYPowGate(exponent=0.6, global_shift=0)(q2, q3),
),
cirq.Moment(
cirq.HPowGate(exponent=0.7, global_shift=0)(q0),
cirq.ZPowGate(exponent=0.2, global_shift=0)(q1),
cirq.YPowGate(exponent=0.3, global_shift=0)(q2),
cirq.XPowGate(exponent=0.7, global_shift=0)(q3),
),
cirq.Moment(
cirq.ZZPowGate(exponent=0.1, global_shift=0)(q0, q1),
cirq.SwapPowGate(exponent=0.6, global_shift=0)(q2, q3),
),
cirq.Moment(
cirq.XPowGate(exponent=0.4, global_shift=0)(q0),
cirq.YPowGate(exponent=0.3, global_shift=0)(q1),
cirq.ZPowGate(exponent=0.2, global_shift=0)(q2),
cirq.HPowGate(exponent=0.1, global_shift=0)(q3),
),
cirq.Moment(
cirq.ISwapPowGate(exponent=1.3, global_shift=0)(q0, q1),
cirq.CXPowGate(exponent=0.5, global_shift=0)(q2, q3),
),
cirq.Moment(
cirq.H(q0),
cirq.H(q1),
cirq.H(q2),
cirq.H(q3),
),
)
simulator = cirq.Simulator()
cirq_result = simulator.simulate(circuit)
qsim_simulator = qsimcirq.QSimSimulator()
qsim_result1 = qsim_simulator.simulate(circuit)
assert cirq.linalg.allclose_up_to_global_phase(
qsim_result1.state_vector(), cirq_result.state_vector()
)
qsim_simulator.qsim_options["z"] = True
qsim_result2 = qsim_simulator.simulate(circuit)
assert (qsim_result1.state_vector() == qsim_result2.state_vector()).all()
qsim_simulator.qsim_options["z"] = False
qsim_result3 = qsim_simulator.simulate(circuit)
assert (qsim_result1.state_vector() == qsim_result3.state_vector()).all()
@pytest.mark.parametrize("mode", ["noiseless", "noisy"])
def test_cirq_qsim_circuit_memoization_compute_amplitudes(mode: str):
"""Verifies the correctness of simulator functions when
circuit_memoization_size is set."""
execution_repetitions = 3
qsim_sim = qsimcirq.QSimSimulator(circuit_memoization_size=4)
# Pick qubits.
a, b, c, d = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(1, 1),
cirq.GridQubit(1, 0),
]
# Create a circuit
cirq_circuit = cirq.Circuit(
cirq.X(a) ** 0.5,
cirq.Y(b) ** 0.5,
cirq.Z(c),
cirq.CZ(a, d),
)
if mode == "noisy":
cirq_circuit.append(NoiseTrigger().on(a))
for _ in range(execution_repetitions):
result = qsim_sim.compute_amplitudes(cirq_circuit, bitstrings=[0b0100, 0b1011])
assert np.allclose(result, [0.5j, 0j])
@pytest.mark.parametrize("mode", ["noiseless", "noisy"])
def test_cirq_qsim_circuit_memoization_simulate(mode: str):
execution_repetitions = 3
qsim_sim = qsimcirq.QSimSimulator(circuit_memoization_size=4)
cirq_sim = cirq.Simulator()
# Pick qubits.
a, b, c, d = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(1, 1),
cirq.GridQubit(1, 0),
]
# Create a circuit.
cirq_circuit = cirq.Circuit(
cirq.Moment(
cirq.X(a) ** 0.5,
cirq.H(b),
cirq.X(c),
cirq.H(d),
),
cirq.Moment(
cirq.X(a) ** 0.5,
cirq.CX(b, c),
cirq.S(d),
),
cirq.Moment(
cirq.I(a),
cirq.ISWAP(b, c),
),
)
if mode == "noisy":
cirq_circuit.append(NoiseTrigger().on(a))
cirq_result = cirq_sim.simulate(cirq_circuit, qubit_order=[a, b, c, d])
for _ in range(execution_repetitions):
result = qsim_sim.simulate(cirq_circuit, qubit_order=[a, b, c, d])
assert result.state_vector().shape == (16,)
assert cirq.linalg.allclose_up_to_global_phase(
result.state_vector(), cirq_result.state_vector()
)
@pytest.mark.parametrize("mode", ["noiseless", "noisy"])
def test_cirq_qsim_circuit_memoization_run(mode: str):
execution_repetitions = 3
qsim_sim = qsimcirq.QSimSimulator(circuit_memoization_size=4)
# Pick qubits.
a, b, c, d = [
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(1, 1),
cirq.GridQubit(1, 0),
]
# Create a circuit
cirq_circuit = cirq.Circuit(
cirq.X(a) ** 0.5,
cirq.Y(b) ** 0.5,
cirq.Z(c),
cirq.CZ(a, d),
# measure qubits
cirq.measure(a, key="ma"),
cirq.measure(b, key="mb"),
cirq.measure(c, key="mc"),
cirq.measure(d, key="md"),
)
if mode == "noisy":
cirq_circuit.append(NoiseTrigger().on(a))
for _ in range(execution_repetitions):
result = qsim_sim.run(cirq_circuit, repetitions=5)
for key, value in result.measurements.items():
assert value.shape == (5, 1)
@pytest.mark.parametrize("mode", ["noiseless", "noisy"])
def test_cirq_qsim_circuit_memoization_simulate_expectation_values_sweep(mode: str):
execution_repetitions = 3
qsim_sim = qsimcirq.QSimSimulator(circuit_memoization_size=4)
cirq_sim = cirq.Simulator()
a, b = [cirq.GridQubit(0, 0), cirq.GridQubit(0, 1)]
x_exp = sympy.Symbol("x_exp")
h_exp = sympy.Symbol("h_exp")
circuit = cirq.Circuit(
cirq.X(a) ** x_exp,
cirq.H(b),
cirq.H(a) ** h_exp,
cirq.H(b) ** h_exp,
)
params = [
{x_exp: 0, h_exp: 0}, # |0+)
{x_exp: 1, h_exp: 0}, # |1+)
{x_exp: 0, h_exp: 1}, # |+0)
{x_exp: 1, h_exp: 1}, # |-0)
]
psum1 = cirq.Z(a) + 3 * cirq.X(b)
psum2 = cirq.X(a) - 3 * cirq.Z(b)
if mode == "noisy":
circuit.append(NoiseTrigger().on(a))
cirq_result = cirq_sim.simulate_expectation_values_sweep(
circuit, [psum1, psum2], params
)
for _ in range(execution_repetitions):
qsim_result = qsim_sim.simulate_expectation_values_sweep(
circuit, [psum1, psum2], params
)
assert cirq.approx_eq(qsim_result, cirq_result, atol=1e-6)
| [
"sympy.Symbol",
"cirq.S",
"cirq.ry",
"cirq.Circuit",
"cirq.ISwapPowGate",
"cirq.SWAP",
"cirq.HPowGate",
"cirq.X.on_each",
"cirq.CSwapGate",
"cirq.T",
"pytest.skip",
"cirq.kraus",
"cirq.TOFFOLI",
"cirq.SwapPowGate",
"cirq.Z",
"cirq.rz",
"cirq.IdentityGate",
"cirq.H",
"cirq.Moment"... | [((1215, 1270), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['noiseless', 'noisy']"], {}), "('mode', ['noiseless', 'noisy'])\n", (1238, 1270), False, 'import pytest\n'), ((3136, 3191), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['noiseless', 'noisy']"], {}), "('mode', ['noiseless', 'noisy'])\n", (3159, 3191), False, 'import pytest\n'), ((3858, 3913), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['noiseless', 'noisy']"], {}), "('mode', ['noiseless', 'noisy'])\n", (3881, 3913), False, 'import pytest\n'), ((5240, 5295), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['noiseless', 'noisy']"], {}), "('mode', ['noiseless', 'noisy'])\n", (5263, 5295), False, 'import pytest\n'), ((9537, 9592), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['noiseless', 'noisy']"], {}), "('mode', ['noiseless', 'noisy'])\n", (9560, 9592), False, 'import pytest\n'), ((10160, 10215), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['noiseless', 'noisy']"], {}), "('mode', ['noiseless', 'noisy'])\n", (10183, 10215), False, 'import pytest\n'), ((12696, 12751), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['noiseless', 'noisy']"], {}), "('mode', ['noiseless', 'noisy'])\n", (12719, 12751), False, 'import pytest\n'), ((13544, 13599), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['noiseless', 'noisy']"], {}), "('mode', ['noiseless', 'noisy'])\n", (13567, 13599), False, 'import pytest\n'), ((14624, 14679), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['noiseless', 'noisy']"], {}), "('mode', ['noiseless', 'noisy'])\n", (14647, 14679), False, 'import pytest\n'), ((15416, 15471), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['noiseless', 'noisy']"], {}), "('mode', ['noiseless', 'noisy'])\n", (15439, 15471), False, 'import pytest\n'), ((16980, 17035), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['noiseless', 'noisy']"], {}), "('mode', ['noiseless', 'noisy'])\n", (17003, 17035), False, 'import pytest\n'), ((17891, 17946), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['noiseless', 'noisy']"], {}), "('mode', ['noiseless', 'noisy'])\n", (17914, 17946), False, 'import pytest\n'), ((32301, 32368), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""noise_type"""', '[NoiseMixture, NoiseChannel]'], {}), "('noise_type', [NoiseMixture, NoiseChannel])\n", (32324, 32368), False, 'import pytest\n'), ((37734, 37789), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['noiseless', 'noisy']"], {}), "('mode', ['noiseless', 'noisy'])\n", (37757, 37789), False, 'import pytest\n'), ((57138, 57193), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['noiseless', 'noisy']"], {}), "('mode', ['noiseless', 'noisy'])\n", (57161, 57193), False, 'import pytest\n'), ((58039, 58094), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['noiseless', 'noisy']"], {}), "('mode', ['noiseless', 'noisy'])\n", (58062, 58094), False, 'import pytest\n'), ((59293, 59348), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['noiseless', 'noisy']"], {}), "('mode', ['noiseless', 'noisy'])\n", (59316, 59348), False, 'import pytest\n'), ((60263, 60318), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['noiseless', 'noisy']"], {}), "('mode', ['noiseless', 'noisy'])\n", (60286, 60318), False, 'import pytest\n'), ((1314, 1337), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (1334, 1337), False, 'import cirq\n'), ((2277, 2301), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (2299, 2301), False, 'import qsimcirq\n'), ((2978, 3002), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (3000, 3002), False, 'import qsimcirq\n'), ((3704, 3728), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (3726, 3728), False, 'import qsimcirq\n'), ((3823, 3856), 'numpy.allclose', 'np.allclose', (['result', '[0.5j, 0.0j]'], {}), '(result, [0.5j, 0.0j])\n', (3834, 3856), True, 'import numpy as np\n'), ((4722, 4746), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (4744, 4746), False, 'import qsimcirq\n'), ((4879, 4895), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (4893, 4895), False, 'import cirq\n'), ((5448, 5465), 'sympy.Symbol', 'sympy.Symbol', (['"""x"""'], {}), "('x')\n", (5460, 5465), False, 'import sympy\n'), ((5834, 5858), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (5856, 5858), False, 'import qsimcirq\n'), ((5936, 5952), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (5950, 5952), False, 'import cirq\n'), ((6644, 6700), 'numpy.asarray', 'np.asarray', (['[0.5j, 0.5, -0.5j, -0.5]'], {'dtype': 'np.complex64'}), '([0.5j, 0.5, -0.5j, -0.5], dtype=np.complex64)\n', (6654, 6700), True, 'import numpy as np\n'), ((7246, 7270), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (7268, 7270), False, 'import qsimcirq\n'), ((7730, 7747), 'cirq.LineQubit', 'cirq.LineQubit', (['(0)'], {}), '(0)\n', (7744, 7747), False, 'import cirq\n'), ((7958, 7982), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (7980, 7982), False, 'import qsimcirq\n'), ((8133, 8150), 'cirq.LineQubit', 'cirq.LineQubit', (['(0)'], {}), '(0)\n', (8147, 8150), False, 'import cirq\n'), ((8353, 8377), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (8375, 8377), False, 'import qsimcirq\n'), ((8630, 8653), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (8650, 8653), False, 'import cirq\n'), ((8749, 8773), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (8771, 8773), False, 'import qsimcirq\n'), ((9710, 9733), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (9730, 9733), False, 'import cirq\n'), ((9913, 9937), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (9935, 9937), False, 'import qsimcirq\n'), ((10886, 10910), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (10908, 10910), False, 'import qsimcirq\n'), ((11152, 11175), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (11172, 11175), False, 'import cirq\n'), ((11520, 11543), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (11540, 11543), False, 'import cirq\n'), ((12068, 12091), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (12088, 12091), False, 'import cirq\n'), ((13276, 13292), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (13290, 13292), False, 'import cirq\n'), ((13390, 13414), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (13412, 13414), False, 'import qsimcirq\n'), ((13731, 13752), 'sympy.Symbol', 'sympy.Symbol', (['"""x_exp"""'], {}), "('x_exp')\n", (13743, 13752), False, 'import sympy\n'), ((13765, 13786), 'sympy.Symbol', 'sympy.Symbol', (['"""h_exp"""'], {}), "('h_exp')\n", (13777, 13786), False, 'import sympy\n'), ((14265, 14289), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (14287, 14289), False, 'import qsimcirq\n'), ((14426, 14442), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (14440, 14442), False, 'import cirq\n'), ((14569, 14621), 'cirq.approx_eq', 'cirq.approx_eq', (['qsim_result', 'cirq_result'], {'atol': '(1e-06)'}), '(qsim_result, cirq_result, atol=1e-06)\n', (14583, 14621), False, 'import cirq\n'), ((14808, 14825), 'cirq.LineQubit', 'cirq.LineQubit', (['(0)'], {}), '(0)\n', (14822, 14825), False, 'import cirq\n'), ((14924, 14934), 'cirq.Z', 'cirq.Z', (['q0'], {}), '(q0)\n', (14930, 14934), False, 'import cirq\n'), ((15044, 15068), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (15066, 15068), False, 'import qsimcirq\n'), ((15598, 15621), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (15618, 15621), False, 'import cirq\n'), ((16065, 16089), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (16087, 16089), False, 'import qsimcirq\n'), ((16623, 16647), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (16645, 16647), False, 'import qsimcirq\n'), ((17666, 17682), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (17680, 17682), False, 'import cirq\n'), ((17762, 17786), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (17784, 17786), False, 'import qsimcirq\n'), ((18075, 18095), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (18089, 18095), False, 'import cirq\n'), ((18251, 18275), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (18273, 18275), False, 'import qsimcirq\n'), ((18490, 18507), 'cirq.LineQubit', 'cirq.LineQubit', (['(0)'], {}), '(0)\n', (18504, 18507), False, 'import cirq\n'), ((18633, 18657), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (18655, 18657), False, 'import qsimcirq\n'), ((18763, 18779), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (18777, 18779), False, 'import cirq\n'), ((18985, 19008), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (19005, 19008), False, 'import cirq\n'), ((19017, 19083), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])\n', (19025, 19083), True, 'import numpy as np\n'), ((19163, 19187), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (19185, 19187), False, 'import qsimcirq\n'), ((19313, 19329), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (19327, 19329), False, 'import cirq\n'), ((19559, 19582), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(3)'], {}), '(3)\n', (19579, 19582), False, 'import cirq\n'), ((19623, 19850), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, \n 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0,\n 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 1, 0]]'], {}), '([[1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, \n 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0,\n 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 1, 0]])\n', (19631, 19850), True, 'import numpy as np\n'), ((20111, 20135), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (20133, 20135), False, 'import qsimcirq\n'), ((20261, 20277), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (20275, 20277), False, 'import cirq\n'), ((21410, 21433), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(3)'], {}), '(3)\n', (21430, 21433), False, 'import cirq\n'), ((21586, 21610), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (21608, 21610), False, 'import qsimcirq\n'), ((21736, 21752), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (21750, 21752), False, 'import cirq\n'), ((21987, 22010), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(3)'], {}), '(3)\n', (22007, 22010), False, 'import cirq\n'), ((22289, 22313), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (22311, 22313), False, 'import qsimcirq\n'), ((22439, 22455), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (22453, 22455), False, 'import cirq\n'), ((22692, 22715), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(4)'], {}), '(4)\n', (22712, 22715), False, 'import cirq\n'), ((22778, 22844), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])\n', (22786, 22844), True, 'import numpy as np\n'), ((23228, 23252), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (23250, 23252), False, 'import qsimcirq\n'), ((23379, 23395), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (23393, 23395), False, 'import cirq\n'), ((23623, 23646), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(3)'], {}), '(3)\n', (23643, 23646), False, 'import cirq\n'), ((24121, 24145), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (24143, 24145), False, 'import qsimcirq\n'), ((24271, 24287), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (24285, 24287), False, 'import cirq\n'), ((24487, 24524), 'cirq.LineQid.for_qid_shape', 'cirq.LineQid.for_qid_shape', (['[2, 3, 2]'], {}), '([2, 3, 2])\n', (24513, 24524), False, 'import cirq\n'), ((24782, 24806), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (24804, 24806), False, 'import qsimcirq\n'), ((25159, 25182), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(6)'], {}), '(6)\n', (25179, 25182), False, 'import cirq\n'), ((25415, 25439), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (25437, 25439), False, 'import qsimcirq\n'), ((25906, 25929), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(4)'], {}), '(4)\n', (25926, 25929), False, 'import cirq\n'), ((26267, 26291), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (26289, 26291), False, 'import qsimcirq\n'), ((26418, 26434), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (26432, 26434), False, 'import cirq\n'), ((26739, 26762), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(4)'], {}), '(4)\n', (26759, 26762), False, 'import cirq\n'), ((26921, 26945), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (26943, 26945), False, 'import qsimcirq\n'), ((27072, 27088), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (27086, 27088), False, 'import cirq\n'), ((27840, 27863), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (27860, 27863), False, 'import cirq\n'), ((27876, 27898), 'cirq.phase_flip', 'cirq.phase_flip', ([], {'p': '(0.4)'}), '(p=0.4)\n', (27891, 27898), False, 'import cirq\n'), ((27911, 27931), 'cirq.bit_flip', 'cirq.bit_flip', ([], {'p': '(0.6)'}), '(p=0.6)\n', (27924, 27931), False, 'import cirq\n'), ((28688, 28718), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'seed': '(1)'}), '(seed=1)\n', (28710, 28718), False, 'import qsimcirq\n'), ((29356, 29379), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (29376, 29379), False, 'import cirq\n'), ((29536, 29566), 'cirq.amplitude_damp', 'cirq.amplitude_damp', ([], {'gamma': '(0.5)'}), '(gamma=0.5)\n', (29555, 29566), False, 'import cirq\n'), ((29586, 29635), 'cirq.generalized_amplitude_damp', 'cirq.generalized_amplitude_damp', ([], {'p': '(0.4)', 'gamma': '(0.6)'}), '(p=0.4, gamma=0.6)\n', (29617, 29635), False, 'import cirq\n'), ((30414, 30444), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'seed': '(1)'}), '(seed=1)\n', (30436, 30444), False, 'import qsimcirq\n'), ((32492, 32515), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(3)'], {}), '(3)\n', (32512, 32515), False, 'import cirq\n'), ((33868, 33898), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'seed': '(1)'}), '(seed=1)\n', (33890, 33898), False, 'import qsimcirq\n'), ((34519, 34536), 'cirq.LineQubit', 'cirq.LineQubit', (['(0)'], {}), '(0)\n', (34533, 34536), False, 'import cirq\n'), ((34886, 34896), 'cirq.Z', 'cirq.Z', (['q0'], {}), '(q0)\n', (34892, 34896), False, 'import cirq\n'), ((34909, 34919), 'cirq.X', 'cirq.X', (['q0'], {}), '(q0)\n', (34915, 34919), False, 'import cirq\n'), ((35087, 35135), 'qsimcirq.QSimOptions', 'qsimcirq.QSimOptions', ([], {'ev_noisy_repetitions': '(10000)'}), '(ev_noisy_repetitions=10000)\n', (35107, 35135), False, 'import qsimcirq\n'), ((35157, 35214), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'qsim_options': 'qsim_options', 'seed': '(1)'}), '(qsim_options=qsim_options, seed=1)\n', (35179, 35214), False, 'import qsimcirq\n'), ((35528, 35577), 'cirq.approx_eq', 'cirq.approx_eq', (['qsim_evs', 'expected_evs'], {'atol': '(0.05)'}), '(qsim_evs, expected_evs, atol=0.05)\n', (35542, 35577), False, 'import cirq\n'), ((35617, 35640), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (35637, 35640), False, 'import cirq\n'), ((35747, 35771), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (35769, 35771), False, 'import qsimcirq\n'), ((36778, 36801), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(4)'], {}), '(4)\n', (36798, 36801), False, 'import cirq\n'), ((37253, 37296), 'qsimcirq.QSimOptions', 'qsimcirq.QSimOptions', ([], {'max_fused_gate_size': '(2)'}), '(max_fused_gate_size=2)\n', (37273, 37296), False, 'import qsimcirq\n'), ((37311, 37355), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'qsim_options': 'options'}), '(qsim_options=options)\n', (37333, 37355), False, 'import qsimcirq\n'), ((37481, 37525), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'qsim_options': 'options'}), '(qsim_options=options)\n', (37503, 37525), False, 'import qsimcirq\n'), ((37859, 37882), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (37879, 37882), False, 'import cirq\n'), ((37897, 37946), 'qsimcirq.QSimOptions', 'qsimcirq.QSimOptions', ([], {'cpu_threads': '(16)', 'verbosity': '(0)'}), '(cpu_threads=16, verbosity=0)\n', (37917, 37946), False, 'import qsimcirq\n'), ((37961, 38005), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'qsim_options': 'options'}), '(qsim_options=options)\n', (37983, 38005), False, 'import qsimcirq\n'), ((39264, 39302), 'qsimcirq.QSimhSimulator', 'qsimcirq.QSimhSimulator', (['qsimh_options'], {}), '(qsimh_options)\n', (39287, 39302), False, 'import qsimcirq\n'), ((39420, 39469), 'numpy.allclose', 'np.allclose', (['result', '[0.0j, 0.0j, 1 + 0.0j, 0.0j]'], {}), '(result, [0.0j, 0.0j, 1 + 0.0j, 0.0j])\n', (39431, 39469), True, 'import numpy as np\n'), ((39666, 39700), 'qsimcirq.QSimOptions', 'qsimcirq.QSimOptions', ([], {'use_gpu': '(True)'}), '(use_gpu=True)\n', (39686, 39700), False, 'import qsimcirq\n'), ((40229, 40263), 'qsimcirq.QSimOptions', 'qsimcirq.QSimOptions', ([], {'use_gpu': '(True)'}), '(use_gpu=True)\n', (40249, 40263), False, 'import qsimcirq\n'), ((40281, 40329), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'qsim_options': 'gpu_options'}), '(qsim_options=gpu_options)\n', (40303, 40329), False, 'import qsimcirq\n'), ((40449, 40498), 'numpy.allclose', 'np.allclose', (['result', '[0.0j, 0.0j, 1 + 0.0j, 0.0j]'], {}), '(result, [0.0j, 0.0j, 1 + 0.0j, 0.0j])\n', (40460, 40498), True, 'import numpy as np\n'), ((40841, 40875), 'qsimcirq.QSimOptions', 'qsimcirq.QSimOptions', ([], {'use_gpu': '(True)'}), '(use_gpu=True)\n', (40861, 40875), False, 'import qsimcirq\n'), ((40893, 40941), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'qsim_options': 'gpu_options'}), '(qsim_options=gpu_options)\n', (40915, 40941), False, 'import qsimcirq\n'), ((41051, 41067), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (41065, 41067), False, 'import cirq\n'), ((41638, 41672), 'qsimcirq.QSimOptions', 'qsimcirq.QSimOptions', ([], {'use_gpu': '(True)'}), '(use_gpu=True)\n', (41658, 41672), False, 'import qsimcirq\n'), ((41690, 41738), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'qsim_options': 'gpu_options'}), '(qsim_options=gpu_options)\n', (41712, 41738), False, 'import qsimcirq\n'), ((41825, 41841), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (41839, 41841), False, 'import cirq\n'), ((41926, 41958), 'numpy.allclose', 'np.allclose', (['result', 'cirq_result'], {}), '(result, cirq_result)\n', (41937, 41958), True, 'import numpy as np\n'), ((42310, 42344), 'qsimcirq.QSimOptions', 'qsimcirq.QSimOptions', ([], {'use_gpu': '(True)'}), '(use_gpu=True)\n', (42330, 42344), False, 'import qsimcirq\n'), ((42362, 42410), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'qsim_options': 'gpu_options'}), '(qsim_options=gpu_options)\n', (42384, 42410), False, 'import qsimcirq\n'), ((42431, 42472), 'numpy.asarray', 'np.asarray', (['([0.5] * 4)'], {'dtype': 'np.complex64'}), '([0.5] * 4, dtype=np.complex64)\n', (42441, 42472), True, 'import numpy as np\n'), ((42611, 42627), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (42625, 42627), False, 'import cirq\n'), ((43227, 43259), 'qsimcirq.QSimOptions', 'qsimcirq.QSimOptions', ([], {'gpu_mode': '(1)'}), '(gpu_mode=1)\n', (43247, 43259), False, 'import qsimcirq\n'), ((43277, 43332), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'qsim_options': 'custatevec_options'}), '(qsim_options=custatevec_options)\n', (43299, 43332), False, 'import qsimcirq\n'), ((43452, 43501), 'numpy.allclose', 'np.allclose', (['result', '[0.0j, 0.0j, 1 + 0.0j, 0.0j]'], {}), '(result, [0.0j, 0.0j, 1 + 0.0j, 0.0j])\n', (43463, 43501), True, 'import numpy as np\n'), ((43880, 43912), 'qsimcirq.QSimOptions', 'qsimcirq.QSimOptions', ([], {'gpu_mode': '(1)'}), '(gpu_mode=1)\n', (43900, 43912), False, 'import qsimcirq\n'), ((43930, 43985), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'qsim_options': 'custatevec_options'}), '(qsim_options=custatevec_options)\n', (43952, 43985), False, 'import qsimcirq\n'), ((44095, 44111), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (44109, 44111), False, 'import cirq\n'), ((44718, 44750), 'qsimcirq.QSimOptions', 'qsimcirq.QSimOptions', ([], {'gpu_mode': '(1)'}), '(gpu_mode=1)\n', (44738, 44750), False, 'import qsimcirq\n'), ((44768, 44823), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'qsim_options': 'custatevec_options'}), '(qsim_options=custatevec_options)\n', (44790, 44823), False, 'import qsimcirq\n'), ((44910, 44926), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (44924, 44926), False, 'import cirq\n'), ((45011, 45043), 'numpy.allclose', 'np.allclose', (['result', 'cirq_result'], {}), '(result, cirq_result)\n', (45022, 45043), True, 'import numpy as np\n'), ((45431, 45463), 'qsimcirq.QSimOptions', 'qsimcirq.QSimOptions', ([], {'gpu_mode': '(1)'}), '(gpu_mode=1)\n', (45451, 45463), False, 'import qsimcirq\n'), ((45481, 45536), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'qsim_options': 'custatevec_options'}), '(qsim_options=custatevec_options)\n', (45503, 45536), False, 'import qsimcirq\n'), ((45557, 45598), 'numpy.asarray', 'np.asarray', (['([0.5] * 4)'], {'dtype': 'np.complex64'}), '([0.5] * 4, dtype=np.complex64)\n', (45567, 45598), True, 'import numpy as np\n'), ((45737, 45753), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (45751, 45753), False, 'import cirq\n'), ((46064, 46112), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'qsim_options': 'old_options'}), '(qsim_options=old_options)\n', (46086, 46112), False, 'import qsimcirq\n'), ((46132, 46233), 'qsimcirq.QSimOptions', 'qsimcirq.QSimOptions', ([], {'max_fused_gate_size': '(3)', 'cpu_threads': '(4)', 'ev_noisy_repetitions': '(100)', 'verbosity': '(1)'}), '(max_fused_gate_size=3, cpu_threads=4,\n ev_noisy_repetitions=100, verbosity=1)\n', (46152, 46233), False, 'import qsimcirq\n'), ((46283, 46331), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'qsim_options': 'new_options'}), '(qsim_options=new_options)\n', (46305, 46331), False, 'import qsimcirq\n'), ((46431, 46451), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (46445, 46451), False, 'import cirq\n'), ((46532, 46565), 'cirq.ParamResolver', 'cirq.ParamResolver', (["{'beta': 0.5}"], {}), "({'beta': 0.5})\n", (46550, 46565), False, 'import cirq\n'), ((46583, 46599), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (46597, 46599), False, 'import cirq\n'), ((46691, 46715), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (46713, 46715), False, 'import qsimcirq\n'), ((46965, 46985), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (46979, 46985), False, 'import cirq\n'), ((46995, 47015), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(0)'], {}), '(1, 0)\n', (47009, 47015), False, 'import cirq\n'), ((47025, 47045), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (47039, 47045), False, 'import cirq\n'), ((47055, 47075), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (47069, 47075), False, 'import cirq\n'), ((52499, 52515), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (52513, 52515), False, 'import cirq\n'), ((52584, 52608), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (52606, 52608), False, 'import qsimcirq\n'), ((52828, 52848), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (52842, 52848), False, 'import cirq\n'), ((52858, 52878), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(0)'], {}), '(1, 0)\n', (52872, 52878), False, 'import cirq\n'), ((52888, 52908), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (52902, 52908), False, 'import cirq\n'), ((52918, 52938), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (52932, 52938), False, 'import cirq\n'), ((56497, 56513), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (56511, 56513), False, 'import cirq\n'), ((56582, 56606), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (56604, 56606), False, 'import qsimcirq\n'), ((57409, 57459), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'circuit_memoization_size': '(4)'}), '(circuit_memoization_size=4)\n', (57431, 57459), False, 'import qsimcirq\n'), ((58200, 58250), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'circuit_memoization_size': '(4)'}), '(circuit_memoization_size=4)\n', (58222, 58250), False, 'import qsimcirq\n'), ((58266, 58282), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (58280, 58282), False, 'import cirq\n'), ((59449, 59499), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'circuit_memoization_size': '(4)'}), '(circuit_memoization_size=4)\n', (59471, 59499), False, 'import qsimcirq\n'), ((60449, 60499), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'circuit_memoization_size': '(4)'}), '(circuit_memoization_size=4)\n', (60471, 60499), False, 'import qsimcirq\n'), ((60515, 60531), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (60529, 60531), False, 'import cirq\n'), ((60602, 60623), 'sympy.Symbol', 'sympy.Symbol', (['"""x_exp"""'], {}), "('x_exp')\n", (60614, 60623), False, 'import sympy\n'), ((60636, 60657), 'sympy.Symbol', 'sympy.Symbol', (['"""h_exp"""'], {}), "('h_exp')\n", (60648, 60657), False, 'import sympy\n'), ((1145, 1159), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (1157, 1159), False, 'import cirq\n'), ((1404, 1417), 'cirq.Moment', 'cirq.Moment', ([], {}), '()\n', (1415, 1417), False, 'import cirq\n'), ((1724, 1744), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (1738, 1744), False, 'import cirq\n'), ((1754, 1774), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (1768, 1774), False, 'import cirq\n'), ((1784, 1804), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(2)'], {}), '(0, 2)\n', (1798, 1804), False, 'import cirq\n'), ((1814, 1834), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(0)'], {}), '(1, 0)\n', (1828, 1834), False, 'import cirq\n'), ((1844, 1864), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (1858, 1864), False, 'import cirq\n'), ((1874, 1894), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (1888, 1894), False, 'import cirq\n'), ((1904, 1924), 'cirq.GridQubit', 'cirq.GridQubit', (['(2)', '(0)'], {}), '(2, 0)\n', (1918, 1924), False, 'import cirq\n'), ((2311, 2345), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (2324, 2345), False, 'import pytest\n'), ((2511, 2531), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (2525, 2531), False, 'import cirq\n'), ((2541, 2561), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (2555, 2561), False, 'import cirq\n'), ((2571, 2591), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(2)'], {}), '(0, 2)\n', (2585, 2591), False, 'import cirq\n'), ((2601, 2621), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(0)'], {}), '(1, 0)\n', (2615, 2621), False, 'import cirq\n'), ((2631, 2651), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (2645, 2651), False, 'import cirq\n'), ((2661, 2681), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(2)'], {}), '(1, 2)\n', (2675, 2681), False, 'import cirq\n'), ((2691, 2711), 'cirq.GridQubit', 'cirq.GridQubit', (['(2)', '(0)'], {}), '(2, 0)\n', (2705, 2711), False, 'import cirq\n'), ((2721, 2741), 'cirq.GridQubit', 'cirq.GridQubit', (['(2)', '(1)'], {}), '(2, 1)\n', (2735, 2741), False, 'import cirq\n'), ((2902, 2911), 'cirq.X', 'cirq.X', (['h'], {}), '(h)\n', (2908, 2911), False, 'import cirq\n'), ((2953, 2962), 'cirq.X', 'cirq.X', (['h'], {}), '(h)\n', (2959, 2962), False, 'import cirq\n'), ((3278, 3298), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (3292, 3298), False, 'import cirq\n'), ((3308, 3328), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (3322, 3328), False, 'import cirq\n'), ((3338, 3358), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (3352, 3358), False, 'import cirq\n'), ((3368, 3388), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(0)'], {}), '(1, 0)\n', (3382, 3388), False, 'import cirq\n'), ((3555, 3564), 'cirq.Z', 'cirq.Z', (['c'], {}), '(c)\n', (3561, 3564), False, 'import cirq\n'), ((3580, 3593), 'cirq.CZ', 'cirq.CZ', (['a', 'd'], {}), '(a, d)\n', (3587, 3593), False, 'import cirq\n'), ((4010, 4030), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (4024, 4030), False, 'import cirq\n'), ((4040, 4060), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (4054, 4060), False, 'import cirq\n'), ((4070, 4090), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (4084, 4090), False, 'import cirq\n'), ((4100, 4120), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(0)'], {}), '(1, 0)\n', (4114, 4120), False, 'import cirq\n'), ((5382, 5402), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (5396, 5402), False, 'import cirq\n'), ((5412, 5432), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (5426, 5432), False, 'import cirq\n'), ((7281, 7306), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7294, 7306), False, 'import pytest\n'), ((7332, 7375), 'numpy.asarray', 'np.asarray', (['([0.25] * 16)'], {'dtype': 'np.complex64'}), '([0.25] * 16, dtype=np.complex64)\n', (7342, 7375), True, 'import numpy as np\n'), ((7504, 7528), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (7517, 7528), False, 'import pytest\n'), ((7554, 7575), 'numpy.asarray', 'np.asarray', (['([0.5] * 4)'], {}), '([0.5] * 4)\n', (7564, 7575), True, 'import numpy as np\n'), ((7759, 7776), 'sympy.Symbol', 'sympy.Symbol', (['"""x"""'], {}), "('x')\n", (7771, 7776), False, 'import sympy\n'), ((7778, 7795), 'sympy.Symbol', 'sympy.Symbol', (['"""y"""'], {}), "('y')\n", (7790, 7795), False, 'import sympy\n'), ((8162, 8179), 'sympy.Symbol', 'sympy.Symbol', (['"""x"""'], {}), "('x')\n", (8174, 8179), False, 'import sympy\n'), ((8181, 8198), 'sympy.Symbol', 'sympy.Symbol', (['"""y"""'], {}), "('y')\n", (8193, 8198), False, 'import sympy\n'), ((8387, 8448), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Parameters must be numeric"""'}), "(ValueError, match='Parameters must be numeric')\n", (8400, 8448), False, 'import pytest\n'), ((8690, 8700), 'cirq.H', 'cirq.H', (['q0'], {}), '(q0)\n', (8696, 8700), False, 'import cirq\n'), ((8710, 8720), 'cirq.H', 'cirq.H', (['q1'], {}), '(q1)\n', (8716, 8720), False, 'import cirq\n'), ((9761, 9773), 'cirq.X', 'cirq.X', (['q[0]'], {}), '(q[0])\n', (9767, 9773), False, 'import cirq\n'), ((9879, 9891), 'cirq.I', 'cirq.I', (['q[1]'], {}), '(q[1])\n', (9885, 9891), False, 'import cirq\n'), ((10297, 10317), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (10311, 10317), False, 'import cirq\n'), ((10327, 10347), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (10341, 10347), False, 'import cirq\n'), ((10357, 10377), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (10371, 10377), False, 'import cirq\n'), ((10387, 10407), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(0)'], {}), '(1, 0)\n', (10401, 10407), False, 'import cirq\n'), ((10573, 10582), 'cirq.Z', 'cirq.Z', (['c'], {}), '(c)\n', (10579, 10582), False, 'import cirq\n'), ((10598, 10611), 'cirq.CZ', 'cirq.CZ', (['a', 'd'], {}), '(a, d)\n', (10605, 10611), False, 'import cirq\n'), ((10659, 10684), 'cirq.measure', 'cirq.measure', (['a'], {'key': '"""ma"""'}), "(a, key='ma')\n", (10671, 10684), False, 'import cirq\n'), ((10694, 10719), 'cirq.measure', 'cirq.measure', (['b'], {'key': '"""mb"""'}), "(b, key='mb')\n", (10706, 10719), False, 'import cirq\n'), ((10729, 10754), 'cirq.measure', 'cirq.measure', (['c'], {'key': '"""mc"""'}), "(c, key='mc')\n", (10741, 10754), False, 'import cirq\n'), ((10764, 10789), 'cirq.measure', 'cirq.measure', (['d'], {'key': '"""md"""'}), "(d, key='md')\n", (10776, 10789), False, 'import cirq\n'), ((11212, 11268), 'cirq.measure', 'cirq.measure', (['q0', 'q1'], {'key': '"""d"""', 'invert_mask': '[False, True]'}), "(q0, q1, key='d', invert_mask=[False, True])\n", (11224, 11268), False, 'import cirq\n'), ((11580, 11625), 'cirq.measure', 'cirq.measure', (['q1'], {'key': '"""a"""', 'invert_mask': '[True]'}), "(q1, key='a', invert_mask=[True])\n", (11592, 11625), False, 'import cirq\n'), ((11635, 11680), 'cirq.measure', 'cirq.measure', (['q0'], {'key': '"""b"""', 'invert_mask': '[True]'}), "(q0, key='b', invert_mask=[True])\n", (11647, 11680), False, 'import cirq\n'), ((11690, 11746), 'cirq.measure', 'cirq.measure', (['q0', 'q1'], {'key': '"""c"""', 'invert_mask': '[False, True]'}), "(q0, q1, key='c', invert_mask=[False, True])\n", (11702, 11746), False, 'import cirq\n'), ((11756, 11812), 'cirq.measure', 'cirq.measure', (['q1', 'q0'], {'key': '"""d"""', 'invert_mask': '[False, True]'}), "(q1, q0, key='d', invert_mask=[False, True])\n", (11768, 11812), False, 'import cirq\n'), ((12188, 12245), 'cirq.measure', 'cirq.measure', (['q0', 'q1'], {'key': '"""a"""', 'invert_mask': '[False, False]'}), "(q0, q1, key='a', invert_mask=[False, False])\n", (12200, 12245), False, 'import cirq\n'), ((12255, 12265), 'cirq.X', 'cirq.X', (['q0'], {}), '(q0)\n', (12261, 12265), False, 'import cirq\n'), ((12275, 12331), 'cirq.measure', 'cirq.measure', (['q0', 'q1'], {'key': '"""b"""', 'invert_mask': '[True, False]'}), "(q0, q1, key='b', invert_mask=[True, False])\n", (12287, 12331), False, 'import cirq\n'), ((12341, 12351), 'cirq.X', 'cirq.X', (['q1'], {}), '(q1)\n', (12347, 12351), False, 'import cirq\n'), ((12361, 12416), 'cirq.measure', 'cirq.measure', (['q0', 'q1'], {'key': '"""c"""', 'invert_mask': '[True, True]'}), "(q0, q1, key='c', invert_mask=[True, True])\n", (12373, 12416), False, 'import cirq\n'), ((12426, 12436), 'cirq.X', 'cirq.X', (['q0'], {}), '(q0)\n', (12432, 12436), False, 'import cirq\n'), ((12446, 12502), 'cirq.measure', 'cirq.measure', (['q0', 'q1'], {'key': '"""d"""', 'invert_mask': '[False, True]'}), "(q0, q1, key='d', invert_mask=[False, True])\n", (12458, 12502), False, 'import cirq\n'), ((12899, 12919), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (12913, 12919), False, 'import cirq\n'), ((12929, 12949), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (12943, 12949), False, 'import cirq\n'), ((12959, 12979), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(0)'], {}), '(1, 0)\n', (12973, 12979), False, 'import cirq\n'), ((12989, 13009), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (13003, 13009), False, 'import cirq\n'), ((13053, 13062), 'cirq.X', 'cirq.X', (['b'], {}), '(b)\n', (13059, 13062), False, 'import cirq\n'), ((13072, 13085), 'cirq.CX', 'cirq.CX', (['b', 'd'], {}), '(b, d)\n', (13079, 13085), False, 'import cirq\n'), ((13095, 13128), 'cirq.measure', 'cirq.measure', (['a', 'b', 'c'], {'key': '"""mabc"""'}), "(a, b, c, key='mabc')\n", (13107, 13128), False, 'import cirq\n'), ((13138, 13163), 'cirq.measure', 'cirq.measure', (['d'], {'key': '"""md"""'}), "(d, key='md')\n", (13150, 13163), False, 'import cirq\n'), ((13661, 13681), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (13675, 13681), False, 'import cirq\n'), ((13691, 13711), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (13705, 13711), False, 'import cirq\n'), ((13851, 13860), 'cirq.H', 'cirq.H', (['b'], {}), '(b)\n', (13857, 13860), False, 'import cirq\n'), ((14109, 14118), 'cirq.Z', 'cirq.Z', (['a'], {}), '(a)\n', (14115, 14118), False, 'import cirq\n'), ((14147, 14156), 'cirq.X', 'cirq.X', (['a'], {}), '(a)\n', (14153, 14156), False, 'import cirq\n'), ((15864, 15874), 'cirq.Z', 'cirq.Z', (['q0'], {}), '(q0)\n', (15870, 15874), False, 'import cirq\n'), ((16306, 16346), 'numpy.allclose', 'np.allclose', (['result', 'expected_results[i]'], {}), '(result, expected_results[i])\n', (16317, 16346), True, 'import numpy as np\n'), ((16428, 16448), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (16442, 16448), False, 'import cirq\n'), ((16458, 16478), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (16472, 16478), False, 'import cirq\n'), ((16513, 16522), 'cirq.X', 'cirq.X', (['a'], {}), '(a)\n', (16519, 16522), False, 'import cirq\n'), ((16524, 16533), 'cirq.H', 'cirq.H', (['b'], {}), '(b)\n', (16530, 16533), False, 'import cirq\n'), ((16535, 16562), 'cirq.measure', 'cirq.measure', (['a', 'b'], {'key': '"""m"""'}), "(a, b, key='m')\n", (16547, 16562), False, 'import cirq\n'), ((16575, 16584), 'cirq.Z', 'cirq.Z', (['a'], {}), '(a)\n', (16581, 16584), False, 'import cirq\n'), ((16657, 16734), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Provided circuit has terminal measurements"""'}), "(ValueError, match='Provided circuit has terminal measurements')\n", (16670, 16734), False, 'import pytest\n'), ((17160, 17180), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (17174, 17180), False, 'import cirq\n'), ((17190, 17210), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (17204, 17210), False, 'import cirq\n'), ((17254, 17263), 'cirq.X', 'cirq.X', (['a'], {}), '(a)\n', (17260, 17263), False, 'import cirq\n'), ((17273, 17286), 'cirq.CX', 'cirq.CX', (['a', 'b'], {}), '(a, b)\n', (17280, 17286), False, 'import cirq\n'), ((17296, 17324), 'cirq.measure', 'cirq.measure', (['a', 'b'], {'key': '"""m1"""'}), "(a, b, key='m1')\n", (17308, 17324), False, 'import cirq\n'), ((17334, 17347), 'cirq.CZ', 'cirq.CZ', (['a', 'b'], {}), '(a, b)\n', (17341, 17347), False, 'import cirq\n'), ((17357, 17385), 'cirq.measure', 'cirq.measure', (['a', 'b'], {'key': '"""m2"""'}), "(a, b, key='m2')\n", (17369, 17385), False, 'import cirq\n'), ((17395, 17404), 'cirq.X', 'cirq.X', (['a'], {}), '(a)\n', (17401, 17404), False, 'import cirq\n'), ((17414, 17427), 'cirq.CX', 'cirq.CX', (['a', 'b'], {}), '(a, b)\n', (17421, 17427), False, 'import cirq\n'), ((17437, 17465), 'cirq.measure', 'cirq.measure', (['a', 'b'], {'key': '"""m3"""'}), "(a, b, key='m3')\n", (17449, 17465), False, 'import cirq\n'), ((17543, 17552), 'cirq.H', 'cirq.H', (['a'], {}), '(a)\n', (17549, 17552), False, 'import cirq\n'), ((17562, 17571), 'cirq.H', 'cirq.H', (['b'], {}), '(b)\n', (17568, 17571), False, 'import cirq\n'), ((18123, 18132), 'cirq.H', 'cirq.H', (['q'], {}), '(q)\n', (18129, 18132), False, 'import cirq\n'), ((18134, 18158), 'cirq.measure', 'cirq.measure', (['q'], {'key': '"""m"""'}), "(q, key='m')\n", (18146, 18158), False, 'import cirq\n'), ((18516, 18548), 'numpy.array', 'np.array', (['[[1, 1.0j], [1.0j, 1]]'], {}), '([[1, 1.0j], [1.0j, 1]])\n', (18524, 18548), True, 'import numpy as np\n'), ((18547, 18559), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (18554, 18559), True, 'import numpy as np\n'), ((20005, 20022), 'cirq.H', 'cirq.H', (['qubits[0]'], {}), '(qubits[0])\n', (20011, 20022), False, 'import cirq\n'), ((20032, 20049), 'cirq.H', 'cirq.H', (['qubits[1]'], {}), '(qubits[1])\n', (20038, 20049), False, 'import cirq\n'), ((21475, 21492), 'cirq.H', 'cirq.H', (['qubits[0]'], {}), '(qubits[0])\n', (21481, 21492), False, 'import cirq\n'), ((21502, 21519), 'cirq.H', 'cirq.H', (['qubits[1]'], {}), '(qubits[1])\n', (21508, 21519), False, 'import cirq\n'), ((22053, 22070), 'cirq.H', 'cirq.H', (['qubits[1]'], {}), '(qubits[1])\n', (22059, 22070), False, 'import cirq\n'), ((22080, 22097), 'cirq.Y', 'cirq.Y', (['qubits[2]'], {}), '(qubits[2])\n', (22086, 22097), False, 'import cirq\n'), ((22725, 22757), 'numpy.array', 'np.array', (['[[1, 1.0j], [1.0j, 1]]'], {}), '([[1, 1.0j], [1.0j, 1]])\n', (22733, 22757), True, 'import numpy as np\n'), ((22756, 22768), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (22763, 22768), True, 'import numpy as np\n'), ((24816, 24885), 'pytest.warns', 'pytest.warns', (['RuntimeWarning'], {'match': '"""Gate has no valid control value"""'}), "(RuntimeWarning, match='Gate has no valid control value')\n", (24828, 24885), False, 'import pytest\n'), ((25470, 25490), 'cirq.Circuit', 'cirq.Circuit', (['CCCCCH'], {}), '(CCCCCH)\n', (25482, 25490), False, 'import cirq\n'), ((25591, 25610), 'cirq.Circuit', 'cirq.Circuit', (['HHHHH'], {}), '(HHHHH)\n', (25603, 25610), False, 'import cirq\n'), ((25690, 25779), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {'match': '"""Received control gate on 5 target qubits"""'}), "(NotImplementedError, match=\n 'Received control gate on 5 target qubits')\n", (25703, 25779), False, 'import pytest\n'), ((26048, 26065), 'cirq.H', 'cirq.H', (['qubits[0]'], {}), '(qubits[0])\n', (26054, 26065), False, 'import cirq\n'), ((26075, 26092), 'cirq.H', 'cirq.H', (['qubits[1]'], {}), '(qubits[1])\n', (26081, 26092), False, 'import cirq\n'), ((26200, 26217), 'cirq.H', 'cirq.H', (['qubits[2]'], {}), '(qubits[2])\n', (26206, 26217), False, 'import cirq\n'), ((26227, 26244), 'cirq.H', 'cirq.H', (['qubits[3]'], {}), '(qubits[3])\n', (26233, 26244), False, 'import cirq\n'), ((33247, 33262), 'cirq.X', 'cirq.X', (['q_no_cx'], {}), '(q_no_cx)\n', (33253, 33262), False, 'import cirq\n'), ((34803, 34813), 'cirq.X', 'cirq.X', (['q0'], {}), '(q0)\n', (34809, 34813), False, 'import cirq\n'), ((35669, 35679), 'cirq.X', 'cirq.X', (['q0'], {}), '(q0)\n', (35675, 35679), False, 'import cirq\n'), ((35681, 35698), 'cirq.CNOT', 'cirq.CNOT', (['q0', 'q1'], {}), '(q0, q1)\n', (35690, 35698), False, 'import cirq\n'), ((35700, 35729), 'cirq.measure', 'cirq.measure', (['q0', 'q1'], {'key': '"""m"""'}), "(q0, q1, key='m')\n", (35712, 35729), False, 'import cirq\n'), ((36526, 36536), 'cirq.Z', 'cirq.Z', (['q0'], {}), '(q0)\n', (36532, 36536), False, 'import cirq\n'), ((36539, 36549), 'cirq.Z', 'cirq.Z', (['q1'], {}), '(q1)\n', (36545, 36549), False, 'import cirq\n'), ((36873, 36888), 'cirq.CX', 'cirq.CX', (['q0', 'q1'], {}), '(q0, q1)\n', (36880, 36888), False, 'import cirq\n'), ((36952, 36967), 'cirq.CX', 'cirq.CX', (['q0', 'q2'], {}), '(q0, q2)\n', (36959, 36967), False, 'import cirq\n'), ((36977, 36987), 'cirq.T', 'cirq.T', (['q1'], {}), '(q1)\n', (36983, 36987), False, 'import cirq\n'), ((36997, 37007), 'cirq.T', 'cirq.T', (['q3'], {}), '(q3)\n', (37003, 37007), False, 'import cirq\n'), ((37017, 37032), 'cirq.CX', 'cirq.CX', (['q1', 'q2'], {}), '(q1, q2)\n', (37024, 37032), False, 'import cirq\n'), ((37096, 37111), 'cirq.CX', 'cirq.CX', (['q1', 'q3'], {}), '(q1, q3)\n', (37103, 37111), False, 'import cirq\n'), ((37121, 37131), 'cirq.T', 'cirq.T', (['q0'], {}), '(q0)\n', (37127, 37131), False, 'import cirq\n'), ((37141, 37151), 'cirq.T', 'cirq.T', (['q2'], {}), '(q2)\n', (37147, 37151), False, 'import cirq\n'), ((37161, 37176), 'cirq.CX', 'cirq.CX', (['q2', 'q3'], {}), '(q2, q3)\n', (37168, 37176), False, 'import cirq\n'), ((38058, 38155), 'cirq.testing.random_circuit', 'cirq.testing.random_circuit', ([], {'qubits': '[q0, q1]', 'n_moments': '(8)', 'op_density': '(0.99)', 'random_state': 'iter'}), '(qubits=[q0, q1], n_moments=8, op_density=0.99,\n random_state=iter)\n', (38085, 38155), False, 'import cirq\n'), ((38590, 38606), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (38604, 38606), False, 'import cirq\n'), ((39048, 39068), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (39062, 39068), False, 'import cirq\n'), ((39070, 39090), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (39084, 39090), False, 'import cirq\n'), ((39148, 39163), 'cirq.CNOT', 'cirq.CNOT', (['a', 'b'], {}), '(a, b)\n', (39157, 39163), False, 'import cirq\n'), ((39165, 39180), 'cirq.CNOT', 'cirq.CNOT', (['b', 'a'], {}), '(b, a)\n', (39174, 39180), False, 'import cirq\n'), ((39182, 39191), 'cirq.X', 'cirq.X', (['a'], {}), '(a)\n', (39188, 39191), False, 'import cirq\n'), ((39545, 39592), 'pytest.skip', 'pytest.skip', (['"""GPU is available; skipping test."""'], {}), "('GPU is available; skipping test.')\n", (39556, 39592), False, 'import pytest\n'), ((39710, 39787), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""GPU execution requested, but not supported"""'}), "(ValueError, match='GPU execution requested, but not supported')\n", (39723, 39787), False, 'import pytest\n'), ((39824, 39872), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {'qsim_options': 'gpu_options'}), '(qsim_options=gpu_options)\n', (39846, 39872), False, 'import qsimcirq\n'), ((39954, 40002), 'pytest.skip', 'pytest.skip', (['"""GPU is not available for testing."""'], {}), "('GPU is not available for testing.')\n", (39965, 40002), False, 'import pytest\n'), ((40034, 40054), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (40048, 40054), False, 'import cirq\n'), ((40056, 40076), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (40070, 40076), False, 'import cirq\n'), ((40134, 40149), 'cirq.CNOT', 'cirq.CNOT', (['a', 'b'], {}), '(a, b)\n', (40143, 40149), False, 'import cirq\n'), ((40151, 40166), 'cirq.CNOT', 'cirq.CNOT', (['b', 'a'], {}), '(b, a)\n', (40160, 40166), False, 'import cirq\n'), ((40168, 40177), 'cirq.X', 'cirq.X', (['a'], {}), '(a)\n', (40174, 40177), False, 'import cirq\n'), ((40572, 40620), 'pytest.skip', 'pytest.skip', (['"""GPU is not available for testing."""'], {}), "('GPU is not available for testing.')\n", (40583, 40620), False, 'import pytest\n'), ((40652, 40672), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (40666, 40672), False, 'import cirq\n'), ((40674, 40694), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (40688, 40694), False, 'import cirq\n'), ((40752, 40761), 'cirq.H', 'cirq.H', (['a'], {}), '(a)\n', (40758, 40761), False, 'import cirq\n'), ((40763, 40778), 'cirq.CNOT', 'cirq.CNOT', (['a', 'b'], {}), '(a, b)\n', (40772, 40778), False, 'import cirq\n'), ((40780, 40789), 'cirq.X', 'cirq.X', (['b'], {}), '(b)\n', (40786, 40789), False, 'import cirq\n'), ((41335, 41383), 'pytest.skip', 'pytest.skip', (['"""GPU is not available for testing."""'], {}), "('GPU is not available for testing.')\n", (41346, 41383), False, 'import pytest\n'), ((41415, 41435), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (41429, 41435), False, 'import cirq\n'), ((41437, 41457), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (41451, 41457), False, 'import cirq\n'), ((41515, 41524), 'cirq.H', 'cirq.H', (['a'], {}), '(a)\n', (41521, 41524), False, 'import cirq\n'), ((41526, 41541), 'cirq.CNOT', 'cirq.CNOT', (['a', 'b'], {}), '(a, b)\n', (41535, 41541), False, 'import cirq\n'), ((41543, 41552), 'cirq.X', 'cirq.X', (['b'], {}), '(b)\n', (41549, 41552), False, 'import cirq\n'), ((42041, 42089), 'pytest.skip', 'pytest.skip', (['"""GPU is not available for testing."""'], {}), "('GPU is not available for testing.')\n", (42052, 42089), False, 'import pytest\n'), ((42121, 42141), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (42135, 42141), False, 'import cirq\n'), ((42143, 42163), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (42157, 42163), False, 'import cirq\n'), ((42221, 42230), 'cirq.H', 'cirq.H', (['a'], {}), '(a)\n', (42227, 42230), False, 'import cirq\n'), ((42232, 42247), 'cirq.CNOT', 'cirq.CNOT', (['a', 'b'], {}), '(a, b)\n', (42241, 42247), False, 'import cirq\n'), ((42249, 42258), 'cirq.X', 'cirq.X', (['b'], {}), '(b)\n', (42255, 42258), False, 'import cirq\n'), ((42930, 42993), 'pytest.skip', 'pytest.skip', (['"""cuStateVec library is not available for testing."""'], {}), "('cuStateVec library is not available for testing.')\n", (42941, 42993), False, 'import pytest\n'), ((43025, 43045), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (43039, 43045), False, 'import cirq\n'), ((43047, 43067), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (43061, 43067), False, 'import cirq\n'), ((43125, 43140), 'cirq.CNOT', 'cirq.CNOT', (['a', 'b'], {}), '(a, b)\n', (43134, 43140), False, 'import cirq\n'), ((43142, 43157), 'cirq.CNOT', 'cirq.CNOT', (['b', 'a'], {}), '(b, a)\n', (43151, 43157), False, 'import cirq\n'), ((43159, 43168), 'cirq.X', 'cirq.X', (['a'], {}), '(a)\n', (43165, 43168), False, 'import cirq\n'), ((43589, 43652), 'pytest.skip', 'pytest.skip', (['"""cuStateVec library is not available for testing."""'], {}), "('cuStateVec library is not available for testing.')\n", (43600, 43652), False, 'import pytest\n'), ((43684, 43704), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (43698, 43704), False, 'import cirq\n'), ((43706, 43726), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (43720, 43726), False, 'import cirq\n'), ((43784, 43793), 'cirq.H', 'cirq.H', (['a'], {}), '(a)\n', (43790, 43793), False, 'import cirq\n'), ((43795, 43810), 'cirq.CNOT', 'cirq.CNOT', (['a', 'b'], {}), '(a, b)\n', (43804, 43810), False, 'import cirq\n'), ((43812, 43821), 'cirq.X', 'cirq.X', (['b'], {}), '(b)\n', (43818, 43821), False, 'import cirq\n'), ((44393, 44456), 'pytest.skip', 'pytest.skip', (['"""cuStateVec library is not available for testing."""'], {}), "('cuStateVec library is not available for testing.')\n", (44404, 44456), False, 'import pytest\n'), ((44488, 44508), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (44502, 44508), False, 'import cirq\n'), ((44510, 44530), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (44524, 44530), False, 'import cirq\n'), ((44588, 44597), 'cirq.H', 'cirq.H', (['a'], {}), '(a)\n', (44594, 44597), False, 'import cirq\n'), ((44599, 44614), 'cirq.CNOT', 'cirq.CNOT', (['a', 'b'], {}), '(a, b)\n', (44608, 44614), False, 'import cirq\n'), ((44616, 44625), 'cirq.X', 'cirq.X', (['b'], {}), '(b)\n', (44622, 44625), False, 'import cirq\n'), ((45140, 45203), 'pytest.skip', 'pytest.skip', (['"""cuStateVec library is not available for testing."""'], {}), "('cuStateVec library is not available for testing.')\n", (45151, 45203), False, 'import pytest\n'), ((45235, 45255), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (45249, 45255), False, 'import cirq\n'), ((45257, 45277), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (45271, 45277), False, 'import cirq\n'), ((45335, 45344), 'cirq.H', 'cirq.H', (['a'], {}), '(a)\n', (45341, 45344), False, 'import cirq\n'), ((45346, 45361), 'cirq.CNOT', 'cirq.CNOT', (['a', 'b'], {}), '(a, b)\n', (45355, 45361), False, 'import cirq\n'), ((45363, 45372), 'cirq.X', 'cirq.X', (['b'], {}), '(b)\n', (45369, 45372), False, 'import cirq\n'), ((57507, 57527), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (57521, 57527), False, 'import cirq\n'), ((57537, 57557), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (57551, 57557), False, 'import cirq\n'), ((57567, 57587), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (57581, 57587), False, 'import cirq\n'), ((57597, 57617), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(0)'], {}), '(1, 0)\n', (57611, 57617), False, 'import cirq\n'), ((57742, 57751), 'cirq.Z', 'cirq.Z', (['c'], {}), '(c)\n', (57748, 57751), False, 'import cirq\n'), ((57761, 57774), 'cirq.CZ', 'cirq.CZ', (['a', 'd'], {}), '(a, d)\n', (57768, 57774), False, 'import cirq\n'), ((58004, 58037), 'numpy.allclose', 'np.allclose', (['result', '[0.5j, 0.0j]'], {}), '(result, [0.5j, 0.0j])\n', (58015, 58037), True, 'import numpy as np\n'), ((58330, 58350), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (58344, 58350), False, 'import cirq\n'), ((58360, 58380), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (58374, 58380), False, 'import cirq\n'), ((58390, 58410), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (58404, 58410), False, 'import cirq\n'), ((58420, 58440), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(0)'], {}), '(1, 0)\n', (58434, 58440), False, 'import cirq\n'), ((59547, 59567), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (59561, 59567), False, 'import cirq\n'), ((59577, 59597), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (59591, 59597), False, 'import cirq\n'), ((59607, 59627), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(1)'], {}), '(1, 1)\n', (59621, 59627), False, 'import cirq\n'), ((59637, 59657), 'cirq.GridQubit', 'cirq.GridQubit', (['(1)', '(0)'], {}), '(1, 0)\n', (59651, 59657), False, 'import cirq\n'), ((59782, 59791), 'cirq.Z', 'cirq.Z', (['c'], {}), '(c)\n', (59788, 59791), False, 'import cirq\n'), ((59801, 59814), 'cirq.CZ', 'cirq.CZ', (['a', 'd'], {}), '(a, d)\n', (59808, 59814), False, 'import cirq\n'), ((59849, 59874), 'cirq.measure', 'cirq.measure', (['a'], {'key': '"""ma"""'}), "(a, key='ma')\n", (59861, 59874), False, 'import cirq\n'), ((59884, 59909), 'cirq.measure', 'cirq.measure', (['b'], {'key': '"""mb"""'}), "(b, key='mb')\n", (59896, 59909), False, 'import cirq\n'), ((59919, 59944), 'cirq.measure', 'cirq.measure', (['c'], {'key': '"""mc"""'}), "(c, key='mc')\n", (59931, 59944), False, 'import cirq\n'), ((59954, 59979), 'cirq.measure', 'cirq.measure', (['d'], {'key': '"""md"""'}), "(d, key='md')\n", (59966, 59979), False, 'import cirq\n'), ((60545, 60565), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (60559, 60565), False, 'import cirq\n'), ((60567, 60587), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(1)'], {}), '(0, 1)\n', (60581, 60587), False, 'import cirq\n'), ((60722, 60731), 'cirq.H', 'cirq.H', (['b'], {}), '(b)\n', (60728, 60731), False, 'import cirq\n'), ((60980, 60989), 'cirq.Z', 'cirq.Z', (['a'], {}), '(a)\n', (60986, 60989), False, 'import cirq\n'), ((61018, 61027), 'cirq.X', 'cirq.X', (['a'], {}), '(a)\n', (61024, 61027), False, 'import cirq\n'), ((61402, 61454), 'cirq.approx_eq', 'cirq.approx_eq', (['qsim_result', 'cirq_result'], {'atol': '(1e-06)'}), '(qsim_result, cirq_result, atol=1e-06)\n', (61416, 61454), False, 'import cirq\n'), ((1043, 1067), 'numpy.asarray', 'np.asarray', (['[1, 0, 0, 1]'], {}), '([1, 0, 0, 1])\n', (1053, 1067), True, 'import numpy as np\n'), ((1111, 1135), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (1133, 1135), False, 'import qsimcirq\n'), ((1374, 1387), 'cirq.X', 'cirq.X', (['qs[0]'], {}), '(qs[0])\n', (1380, 1387), False, 'import cirq\n'), ((1427, 1440), 'cirq.X', 'cirq.X', (['qs[1]'], {}), '(qs[1])\n', (1433, 1440), False, 'import cirq\n'), ((1543, 1567), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (1565, 1567), False, 'import qsimcirq\n'), ((2125, 2136), 'numpy.eye', 'np.eye', (['(128)'], {}), '(128)\n', (2131, 2136), True, 'import numpy as np\n'), ((3461, 3470), 'cirq.X', 'cirq.X', (['a'], {}), '(a)\n', (3467, 3470), False, 'import cirq\n'), ((3508, 3517), 'cirq.Y', 'cirq.Y', (['b'], {}), '(b)\n', (3514, 3517), False, 'import cirq\n'), ((4270, 4279), 'cirq.H', 'cirq.H', (['b'], {}), '(b)\n', (4276, 4279), False, 'import cirq\n'), ((4306, 4315), 'cirq.X', 'cirq.X', (['c'], {}), '(c)\n', (4312, 4315), False, 'import cirq\n'), ((4335, 4344), 'cirq.H', 'cirq.H', (['d'], {}), '(d)\n', (4341, 4344), False, 'import cirq\n'), ((4454, 4467), 'cirq.CX', 'cirq.CX', (['b', 'c'], {}), '(b, c)\n', (4461, 4467), False, 'import cirq\n'), ((4494, 4503), 'cirq.S', 'cirq.S', (['d'], {}), '(d)\n', (4500, 4503), False, 'import cirq\n'), ((4574, 4583), 'cirq.I', 'cirq.I', (['a'], {}), '(a)\n', (4580, 4583), False, 'import cirq\n'), ((4597, 4613), 'cirq.ISWAP', 'cirq.ISWAP', (['b', 'c'], {}), '(b, c)\n', (4607, 4613), False, 'import cirq\n'), ((5585, 5594), 'cirq.H', 'cirq.H', (['b'], {}), '(b)\n', (5591, 5594), False, 'import cirq\n'), ((5653, 5666), 'cirq.CX', 'cirq.CX', (['a', 'b'], {}), '(a, b)\n', (5660, 5666), False, 'import cirq\n'), ((7167, 7184), 'cirq.LineQubit', 'cirq.LineQubit', (['(0)'], {}), '(0)\n', (7181, 7184), False, 'import cirq\n'), ((7194, 7211), 'cirq.LineQubit', 'cirq.LineQubit', (['(1)'], {}), '(1)\n', (7208, 7211), False, 'import cirq\n'), ((7823, 7833), 'cirq.X', 'cirq.X', (['q0'], {}), '(q0)\n', (7829, 7833), False, 'import cirq\n'), ((7840, 7850), 'cirq.H', 'cirq.H', (['q0'], {}), '(q0)\n', (7846, 7850), False, 'import cirq\n'), ((7872, 7883), 'numpy.int64', 'np.int64', (['(0)'], {}), '(0)\n', (7880, 7883), True, 'import numpy as np\n'), ((7888, 7899), 'numpy.int64', 'np.int64', (['(1)'], {}), '(1)\n', (7896, 7899), True, 'import numpy as np\n'), ((7906, 7917), 'numpy.int64', 'np.int64', (['(1)'], {}), '(1)\n', (7914, 7917), True, 'import numpy as np\n'), ((7922, 7933), 'numpy.int64', 'np.int64', (['(0)'], {}), '(0)\n', (7930, 7933), True, 'import numpy as np\n'), ((8226, 8236), 'cirq.X', 'cirq.X', (['q0'], {}), '(q0)\n', (8232, 8236), False, 'import cirq\n'), ((8243, 8253), 'cirq.H', 'cirq.H', (['q0'], {}), '(q0)\n', (8249, 8253), False, 'import cirq\n'), ((8275, 8286), 'numpy.int64', 'np.int64', (['(0)'], {}), '(0)\n', (8283, 8286), True, 'import numpy as np\n'), ((8291, 8302), 'numpy.int64', 'np.int64', (['(1)'], {}), '(1)\n', (8299, 8302), True, 'import numpy as np\n'), ((8309, 8320), 'numpy.int64', 'np.int64', (['(1)'], {}), '(1)\n', (8317, 8320), True, 'import numpy as np\n'), ((10479, 10488), 'cirq.X', 'cirq.X', (['a'], {}), '(a)\n', (10485, 10488), False, 'import cirq\n'), ((10526, 10535), 'cirq.Y', 'cirq.Y', (['b'], {}), '(b)\n', (10532, 10535), False, 'import cirq\n'), ((11294, 11310), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (11308, 11310), False, 'import cirq\n'), ((11360, 11384), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (11382, 11384), False, 'import qsimcirq\n'), ((11838, 11854), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (11852, 11854), False, 'import cirq\n'), ((11904, 11928), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (11926, 11928), False, 'import qsimcirq\n'), ((12528, 12544), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (12542, 12544), False, 'import cirq\n'), ((12594, 12618), 'qsimcirq.QSimSimulator', 'qsimcirq.QSimSimulator', ([], {}), '()\n', (12616, 12618), False, 'import qsimcirq\n'), ((13823, 13832), 'cirq.X', 'cirq.X', (['a'], {}), '(a)\n', (13829, 13832), False, 'import cirq\n'), ((13870, 13879), 'cirq.H', 'cirq.H', (['a'], {}), '(a)\n', (13876, 13879), False, 'import cirq\n'), ((13898, 13907), 'cirq.H', 'cirq.H', (['b'], {}), '(b)\n', (13904, 13907), False, 'import cirq\n'), ((14125, 14134), 'cirq.X', 'cirq.X', (['b'], {}), '(b)\n', (14131, 14134), False, 'import cirq\n'), ((14163, 14172), 'cirq.Z', 'cirq.Z', (['b'], {}), '(b)\n', (14169, 14172), False, 'import cirq\n'), ((15335, 15363), 'numpy.cos', 'np.cos', (['(np.pi * (i + 1) / 20)'], {}), '(np.pi * (i + 1) / 20)\n', (15341, 15363), True, 'import numpy as np\n'), ((15670, 15680), 'cirq.X', 'cirq.X', (['q0'], {}), '(q0)\n', (15676, 15680), False, 'import cirq\n'), ((15682, 15692), 'cirq.H', 'cirq.H', (['q1'], {}), '(q1)\n', (15688, 15692), False, 'import cirq\n'), ((15715, 15725), 'cirq.H', 'cirq.H', (['q0'], {}), '(q0)\n', (15721, 15725), False, 'import cirq\n'), ((15727, 15737), 'cirq.Z', 'cirq.Z', (['q1'], {}), '(q1)\n', (15733, 15737), False, 'import cirq\n'), ((15760, 15770), 'cirq.Z', 'cirq.Z', (['q0'], {}), '(q0)\n', (15766, 15770), False, 'import cirq\n'), ((15772, 15782), 'cirq.H', 'cirq.H', (['q1'], {}), '(q1)\n', (15778, 15782), False, 'import cirq\n'), ((15805, 15815), 'cirq.H', 'cirq.H', (['q0'], {}), '(q0)\n', (15811, 15815), False, 'import cirq\n'), ((15817, 15827), 'cirq.X', 'cirq.X', (['q1'], {}), '(q1)\n', (15823, 15827), False, 'import cirq\n'), ((15888, 15898), 'cirq.X', 'cirq.X', (['q0'], {}), '(q0)\n', (15894, 15898), False, 'import cirq\n'), ((15900, 15910), 'cirq.Z', 'cirq.Z', (['q1'], {}), '(q1)\n', (15906, 15910), False, 'import cirq\n'), ((15925, 15935), 'cirq.Z', 'cirq.Z', (['q0'], {}), '(q0)\n', (15931, 15935), False, 'import cirq\n'), ((15937, 15947), 'cirq.Z', 'cirq.Z', (['q1'], {}), '(q1)\n', (15943, 15947), False, 'import cirq\n'), ((16591, 16600), 'cirq.X', 'cirq.X', (['b'], {}), '(b)\n', (16597, 16600), False, 'import cirq\n'), ((20968, 21195), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, \n 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0,\n 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 1, 0]]'], {}), '([[1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, \n 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0,\n 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 1, 0]])\n', (20976, 21195), True, 'import numpy as np\n'), ((25196, 25213), 'cirq.H', 'cirq.H', (['qubits[0]'], {}), '(qubits[0])\n', (25202, 25213), False, 'import cirq\n'), ((25819, 25839), 'cirq.Circuit', 'cirq.Circuit', (['CHHHHH'], {}), '(CHHHHH)\n', (25831, 25839), False, 'import cirq\n'), ((26127, 26148), 'cirq.CCX', 'cirq.CCX', (['*qubits[:3]'], {}), '(*qubits[:3])\n', (26135, 26148), False, 'import cirq\n'), ((26162, 26179), 'cirq.H', 'cirq.H', (['qubits[3]'], {}), '(qubits[3])\n', (26168, 26179), False, 'import cirq\n'), ((27973, 27983), 'cirq.X', 'cirq.X', (['q0'], {}), '(q0)\n', (27979, 27983), False, 'import cirq\n'), ((28000, 28010), 'cirq.X', 'cirq.X', (['q1'], {}), '(q1)\n', (28006, 28010), False, 'import cirq\n'), ((28581, 28599), 'numpy.linalg.norm', 'np.linalg.norm', (['ps'], {}), '(ps)\n', (28595, 28599), True, 'import numpy as np\n'), ((29677, 29687), 'cirq.X', 'cirq.X', (['q0'], {}), '(q0)\n', (29683, 29687), False, 'import cirq\n'), ((29704, 29714), 'cirq.X', 'cirq.X', (['q1'], {}), '(q1)\n', (29710, 29714), False, 'import cirq\n'), ((30307, 30325), 'numpy.linalg.norm', 'np.linalg.norm', (['ps'], {}), '(ps)\n', (30321, 30325), True, 'import numpy as np\n'), ((31433, 31449), 'cirq.unitary', 'cirq.unitary', (['op'], {}), '(op)\n', (31445, 31449), False, 'import cirq\n'), ((33210, 33230), 'cirq.X', 'cirq.X', (['cx_qubits[0]'], {}), '(cx_qubits[0])\n', (33216, 33230), False, 'import cirq\n'), ((33384, 33399), 'cirq.X', 'cirq.X', (['q_no_cx'], {}), '(q_no_cx)\n', (33390, 33399), False, 'import cirq\n'), ((33761, 33779), 'numpy.linalg.norm', 'np.linalg.norm', (['ps'], {}), '(ps)\n', (33775, 33779), True, 'import numpy as np\n'), ((32012, 32029), 'cirq.LineQubit', 'cirq.LineQubit', (['(0)'], {}), '(0)\n', (32026, 32029), False, 'import cirq\n'), ((32031, 32048), 'cirq.LineQubit', 'cirq.LineQubit', (['(1)'], {}), '(1)\n', (32045, 32048), False, 'import cirq\n'), ((32060, 32077), 'cirq.LineQubit', 'cirq.LineQubit', (['(0)'], {}), '(0)\n', (32074, 32077), False, 'import cirq\n'), ((32079, 32096), 'cirq.LineQubit', 'cirq.LineQubit', (['(2)'], {}), '(2)\n', (32093, 32096), False, 'import cirq\n'), ((32108, 32125), 'cirq.LineQubit', 'cirq.LineQubit', (['(1)'], {}), '(1)\n', (32122, 32125), False, 'import cirq\n'), ((32127, 32144), 'cirq.LineQubit', 'cirq.LineQubit', (['(0)'], {}), '(0)\n', (32141, 32144), False, 'import cirq\n'), ((32156, 32173), 'cirq.LineQubit', 'cirq.LineQubit', (['(1)'], {}), '(1)\n', (32170, 32173), False, 'import cirq\n'), ((32175, 32192), 'cirq.LineQubit', 'cirq.LineQubit', (['(2)'], {}), '(2)\n', (32189, 32192), False, 'import cirq\n'), ((32204, 32221), 'cirq.LineQubit', 'cirq.LineQubit', (['(2)'], {}), '(2)\n', (32218, 32221), False, 'import cirq\n'), ((32223, 32240), 'cirq.LineQubit', 'cirq.LineQubit', (['(0)'], {}), '(0)\n', (32237, 32240), False, 'import cirq\n'), ((32252, 32269), 'cirq.LineQubit', 'cirq.LineQubit', (['(2)'], {}), '(2)\n', (32266, 32269), False, 'import cirq\n'), ((32271, 32288), 'cirq.LineQubit', 'cirq.LineQubit', (['(1)'], {}), '(1)\n', (32285, 32288), False, 'import cirq\n'), ((36898, 36908), 'cirq.X', 'cirq.X', (['q2'], {}), '(q2)\n', (36904, 36908), False, 'import cirq\n'), ((36925, 36935), 'cirq.Y', 'cirq.Y', (['q3'], {}), '(q3)\n', (36931, 36935), False, 'import cirq\n'), ((37042, 37052), 'cirq.X', 'cirq.X', (['q3'], {}), '(q3)\n', (37048, 37052), False, 'import cirq\n'), ((37069, 37079), 'cirq.Y', 'cirq.Y', (['q0'], {}), '(q0)\n', (37075, 37079), False, 'import cirq\n'), ((37186, 37196), 'cirq.X', 'cirq.X', (['q0'], {}), '(q0)\n', (37192, 37196), False, 'import cirq\n'), ((37213, 37223), 'cirq.Y', 'cirq.Y', (['q1'], {}), '(q1)\n', (37219, 37223), False, 'import cirq\n'), ((41565, 41574), 'cirq.Z', 'cirq.Z', (['a'], {}), '(a)\n', (41571, 41574), False, 'import cirq\n'), ((41577, 41586), 'cirq.Z', 'cirq.Z', (['b'], {}), '(b)\n', (41583, 41586), False, 'import cirq\n'), ((44638, 44647), 'cirq.Z', 'cirq.Z', (['a'], {}), '(a)\n', (44644, 44647), False, 'import cirq\n'), ((44650, 44659), 'cirq.Z', 'cirq.Z', (['b'], {}), '(b)\n', (44656, 44659), False, 'import cirq\n'), ((46480, 46493), 'cirq.X', 'cirq.X', (['qubit'], {}), '(qubit)\n', (46486, 46493), False, 'import cirq\n'), ((46497, 46517), 'sympy.Symbol', 'sympy.Symbol', (['"""beta"""'], {}), "('beta')\n", (46509, 46517), False, 'import sympy\n'), ((47138, 47148), 'cirq.H', 'cirq.H', (['q0'], {}), '(q0)\n', (47144, 47148), False, 'import cirq\n'), ((47162, 47172), 'cirq.H', 'cirq.H', (['q1'], {}), '(q1)\n', (47168, 47172), False, 'import cirq\n'), ((47186, 47196), 'cirq.H', 'cirq.H', (['q2'], {}), '(q2)\n', (47192, 47196), False, 'import cirq\n'), ((47210, 47220), 'cirq.H', 'cirq.H', (['q3'], {}), '(q3)\n', (47216, 47220), False, 'import cirq\n'), ((47266, 47276), 'cirq.T', 'cirq.T', (['q0'], {}), '(q0)\n', (47272, 47276), False, 'import cirq\n'), ((47290, 47300), 'cirq.T', 'cirq.T', (['q1'], {}), '(q1)\n', (47296, 47300), False, 'import cirq\n'), ((47314, 47324), 'cirq.T', 'cirq.T', (['q2'], {}), '(q2)\n', (47320, 47324), False, 'import cirq\n'), ((47338, 47348), 'cirq.T', 'cirq.T', (['q3'], {}), '(q3)\n', (47344, 47348), False, 'import cirq\n'), ((47844, 47859), 'cirq.CX', 'cirq.CX', (['q0', 'q2'], {}), '(q0, q2)\n', (47851, 47859), False, 'import cirq\n'), ((47873, 47888), 'cirq.CZ', 'cirq.CZ', (['q1', 'q3'], {}), '(q1, q3)\n', (47880, 47888), False, 'import cirq\n'), ((47934, 47944), 'cirq.X', 'cirq.X', (['q0'], {}), '(q0)\n', (47940, 47944), False, 'import cirq\n'), ((47958, 47968), 'cirq.Y', 'cirq.Y', (['q1'], {}), '(q1)\n', (47964, 47968), False, 'import cirq\n'), ((47982, 47992), 'cirq.Z', 'cirq.Z', (['q2'], {}), '(q2)\n', (47988, 47992), False, 'import cirq\n'), ((48006, 48016), 'cirq.S', 'cirq.S', (['q3'], {}), '(q3)\n', (48012, 48016), False, 'import cirq\n'), ((48217, 48227), 'cirq.I', 'cirq.I', (['q0'], {}), '(q0)\n', (48223, 48227), False, 'import cirq\n'), ((48229, 48239), 'cirq.I', 'cirq.I', (['q1'], {}), '(q1)\n', (48235, 48239), False, 'import cirq\n'), ((49272, 49282), 'cirq.T', 'cirq.T', (['q1'], {}), '(q1)\n', (49278, 49282), False, 'import cirq\n'), ((49296, 49306), 'cirq.H', 'cirq.H', (['q2'], {}), '(q2)\n', (49302, 49306), False, 'import cirq\n'), ((49320, 49330), 'cirq.S', 'cirq.S', (['q3'], {}), '(q3)\n', (49326, 49330), False, 'import cirq\n'), ((49376, 49393), 'cirq.SWAP', 'cirq.SWAP', (['q0', 'q2'], {}), '(q0, q2)\n', (49385, 49393), False, 'import cirq\n'), ((49407, 49422), 'cirq.XX', 'cirq.XX', (['q1', 'q3'], {}), '(q1, q3)\n', (49414, 49422), False, 'import cirq\n'), ((49558, 49568), 'cirq.T', 'cirq.T', (['q3'], {}), '(q3)\n', (49564, 49568), False, 'import cirq\n'), ((49614, 49629), 'cirq.YY', 'cirq.YY', (['q0', 'q1'], {}), '(q0, q1)\n', (49621, 49629), False, 'import cirq\n'), ((49643, 49661), 'cirq.ISWAP', 'cirq.ISWAP', (['q2', 'q3'], {}), '(q2, q3)\n', (49653, 49661), False, 'import cirq\n'), ((49707, 49717), 'cirq.T', 'cirq.T', (['q0'], {}), '(q0)\n', (49713, 49717), False, 'import cirq\n'), ((49731, 49741), 'cirq.Z', 'cirq.Z', (['q1'], {}), '(q1)\n', (49737, 49741), False, 'import cirq\n'), ((49755, 49765), 'cirq.Y', 'cirq.Y', (['q2'], {}), '(q2)\n', (49761, 49765), False, 'import cirq\n'), ((49779, 49789), 'cirq.X', 'cirq.X', (['q3'], {}), '(q3)\n', (49785, 49789), False, 'import cirq\n'), ((49880, 49895), 'cirq.ZZ', 'cirq.ZZ', (['q1', 'q3'], {}), '(q1, q3)\n', (49887, 49895), False, 'import cirq\n'), ((50031, 50041), 'cirq.S', 'cirq.S', (['q3'], {}), '(q3)\n', (50037, 50041), False, 'import cirq\n'), ((51084, 51108), 'cirq.TOFFOLI', 'cirq.TOFFOLI', (['q3', 'q2', 'q0'], {}), '(q3, q2, q0)\n', (51096, 51108), False, 'import cirq\n'), ((51154, 51178), 'cirq.FREDKIN', 'cirq.FREDKIN', (['q1', 'q3', 'q2'], {}), '(q1, q3, q2)\n', (51166, 51178), False, 'import cirq\n'), ((52381, 52391), 'cirq.H', 'cirq.H', (['q0'], {}), '(q0)\n', (52387, 52391), False, 'import cirq\n'), ((52405, 52415), 'cirq.H', 'cirq.H', (['q1'], {}), '(q1)\n', (52411, 52415), False, 'import cirq\n'), ((52429, 52439), 'cirq.H', 'cirq.H', (['q2'], {}), '(q2)\n', (52435, 52439), False, 'import cirq\n'), ((52453, 52463), 'cirq.H', 'cirq.H', (['q3'], {}), '(q3)\n', (52459, 52463), False, 'import cirq\n'), ((53001, 53011), 'cirq.H', 'cirq.H', (['q0'], {}), '(q0)\n', (53007, 53011), False, 'import cirq\n'), ((53025, 53035), 'cirq.H', 'cirq.H', (['q1'], {}), '(q1)\n', (53031, 53035), False, 'import cirq\n'), ((53049, 53059), 'cirq.H', 'cirq.H', (['q2'], {}), '(q2)\n', (53055, 53059), False, 'import cirq\n'), ((53073, 53083), 'cirq.H', 'cirq.H', (['q3'], {}), '(q3)\n', (53079, 53083), False, 'import cirq\n'), ((56379, 56389), 'cirq.H', 'cirq.H', (['q0'], {}), '(q0)\n', (56385, 56389), False, 'import cirq\n'), ((56403, 56413), 'cirq.H', 'cirq.H', (['q1'], {}), '(q1)\n', (56409, 56413), False, 'import cirq\n'), ((56427, 56437), 'cirq.H', 'cirq.H', (['q2'], {}), '(q2)\n', (56433, 56437), False, 'import cirq\n'), ((56451, 56461), 'cirq.H', 'cirq.H', (['q3'], {}), '(q3)\n', (56457, 56461), False, 'import cirq\n'), ((57690, 57699), 'cirq.X', 'cirq.X', (['a'], {}), '(a)\n', (57696, 57699), False, 'import cirq\n'), ((57716, 57725), 'cirq.Y', 'cirq.Y', (['b'], {}), '(b)\n', (57722, 57725), False, 'import cirq\n'), ((58569, 58578), 'cirq.H', 'cirq.H', (['b'], {}), '(b)\n', (58575, 58578), False, 'import cirq\n'), ((58592, 58601), 'cirq.X', 'cirq.X', (['c'], {}), '(c)\n', (58598, 58601), False, 'import cirq\n'), ((58615, 58624), 'cirq.H', 'cirq.H', (['d'], {}), '(d)\n', (58621, 58624), False, 'import cirq\n'), ((58700, 58713), 'cirq.CX', 'cirq.CX', (['b', 'c'], {}), '(b, c)\n', (58707, 58713), False, 'import cirq\n'), ((58727, 58736), 'cirq.S', 'cirq.S', (['d'], {}), '(d)\n', (58733, 58736), False, 'import cirq\n'), ((58782, 58791), 'cirq.I', 'cirq.I', (['a'], {}), '(a)\n', (58788, 58791), False, 'import cirq\n'), ((58805, 58821), 'cirq.ISWAP', 'cirq.ISWAP', (['b', 'c'], {}), '(b, c)\n', (58815, 58821), False, 'import cirq\n'), ((59730, 59739), 'cirq.X', 'cirq.X', (['a'], {}), '(a)\n', (59736, 59739), False, 'import cirq\n'), ((59756, 59765), 'cirq.Y', 'cirq.Y', (['b'], {}), '(b)\n', (59762, 59765), False, 'import cirq\n'), ((60694, 60703), 'cirq.X', 'cirq.X', (['a'], {}), '(a)\n', (60700, 60703), False, 'import cirq\n'), ((60741, 60750), 'cirq.H', 'cirq.H', (['a'], {}), '(a)\n', (60747, 60750), False, 'import cirq\n'), ((60769, 60778), 'cirq.H', 'cirq.H', (['b'], {}), '(b)\n', (60775, 60778), False, 'import cirq\n'), ((60996, 61005), 'cirq.X', 'cirq.X', (['b'], {}), '(b)\n', (61002, 61005), False, 'import cirq\n'), ((61034, 61043), 'cirq.Z', 'cirq.Z', (['b'], {}), '(b)\n', (61040, 61043), False, 'import cirq\n'), ((2848, 2868), 'cirq.IdentityGate', 'cirq.IdentityGate', (['(7)'], {}), '(7)\n', (2865, 2868), False, 'import cirq\n'), ((4219, 4228), 'cirq.X', 'cirq.X', (['a'], {}), '(a)\n', (4225, 4228), False, 'import cirq\n'), ((4403, 4412), 'cirq.X', 'cirq.X', (['a'], {}), '(a)\n', (4409, 4412), False, 'import cirq\n'), ((5557, 5566), 'cirq.X', 'cirq.X', (['a'], {}), '(a)\n', (5563, 5566), False, 'import cirq\n'), ((18593, 18611), 'cirq.MatrixGate', 'cirq.MatrixGate', (['m'], {}), '(m)\n', (18608, 18611), False, 'import cirq\n'), ((19117, 19135), 'cirq.MatrixGate', 'cirq.MatrixGate', (['m'], {}), '(m)\n', (19132, 19135), False, 'import cirq\n'), ((20059, 20077), 'cirq.MatrixGate', 'cirq.MatrixGate', (['m'], {}), '(m)\n', (20074, 20077), False, 'import cirq\n'), ((22107, 22124), 'cirq.X', 'cirq.X', (['qubits[0]'], {}), '(qubits[0])\n', (22113, 22124), False, 'import cirq\n'), ((22159, 22179), 'cirq.CX', 'cirq.CX', (['*qubits[1:]'], {}), '(*qubits[1:])\n', (22166, 22179), False, 'import cirq\n'), ((22214, 22231), 'cirq.H', 'cirq.H', (['qubits[1]'], {}), '(qubits[1])\n', (22220, 22231), False, 'import cirq\n'), ((23742, 23759), 'cirq.X', 'cirq.X', (['qubits[0]'], {}), '(qubits[0])\n', (23748, 23759), False, 'import cirq\n'), ((23893, 23910), 'cirq.X', 'cirq.X', (['qubits[1]'], {}), '(qubits[1])\n', (23899, 23910), False, 'import cirq\n'), ((24023, 24040), 'cirq.X', 'cirq.X', (['qubits[2]'], {}), '(qubits[2])\n', (24029, 24040), False, 'import cirq\n'), ((24693, 24710), 'cirq.X', 'cirq.X', (['qubits[2]'], {}), '(qubits[2])\n', (24699, 24710), False, 'import cirq\n'), ((26857, 26892), 'cirq.QuantumFourierTransformGate', 'cirq.QuantumFourierTransformGate', (['(4)'], {}), '(4)\n', (26889, 26892), False, 'import cirq\n'), ((28117, 28127), 'cirq.X', 'cirq.X', (['q0'], {}), '(q0)\n', (28123, 28127), False, 'import cirq\n'), ((28136, 28146), 'cirq.X', 'cirq.X', (['q1'], {}), '(q1)\n', (28142, 28146), False, 'import cirq\n'), ((29831, 29841), 'cirq.X', 'cirq.X', (['q0'], {}), '(q0)\n', (29837, 29841), False, 'import cirq\n'), ((29850, 29860), 'cirq.X', 'cirq.X', (['q1'], {}), '(q1)\n', (29856, 29860), False, 'import cirq\n'), ((31896, 31912), 'cirq.unitary', 'cirq.unitary', (['op'], {}), '(op)\n', (31908, 31912), False, 'import cirq\n'), ((32725, 32785), 'numpy.asarray', 'np.asarray', (['[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0]'], {}), '([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0])\n', (32735, 32785), True, 'import numpy as np\n'), ((32867, 32877), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (32874, 32877), True, 'import numpy as np\n'), ((32962, 33022), 'numpy.asarray', 'np.asarray', (['[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]'], {}), '([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0])\n', (32972, 33022), True, 'import numpy as np\n'), ((33104, 33114), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (33111, 33114), True, 'import numpy as np\n'), ((33355, 33375), 'cirq.X', 'cirq.X', (['cx_qubits[0]'], {}), '(cx_qubits[0])\n', (33361, 33375), False, 'import cirq\n'), ((34823, 34859), 'cirq.amplitude_damp', 'cirq.amplitude_damp', ([], {'gamma': 'damp_prob'}), '(gamma=damp_prob)\n', (34842, 34859), False, 'import cirq\n'), ((38183, 38215), 'cirq.ConvertToCzAndSingleGates', 'cirq.ConvertToCzAndSingleGates', ([], {}), '()\n', (38213, 38215), False, 'import cirq\n'), ((38306, 38328), 'cirq.ExpandComposite', 'cirq.ExpandComposite', ([], {}), '()\n', (38326, 38328), False, 'import cirq\n'), ((47394, 47440), 'cirq.CZPowGate', 'cirq.CZPowGate', ([], {'exponent': '(0.7)', 'global_shift': '(0.2)'}), '(exponent=0.7, global_shift=0.2)\n', (47408, 47440), False, 'import cirq\n'), ((47462, 47508), 'cirq.CXPowGate', 'cirq.CXPowGate', ([], {'exponent': '(1.2)', 'global_shift': '(0.4)'}), '(exponent=1.2, global_shift=0.4)\n', (47476, 47508), False, 'import cirq\n'), ((47562, 47607), 'cirq.XPowGate', 'cirq.XPowGate', ([], {'exponent': '(0.3)', 'global_shift': '(1.1)'}), '(exponent=0.3, global_shift=1.1)\n', (47575, 47607), False, 'import cirq\n'), ((47625, 47668), 'cirq.YPowGate', 'cirq.YPowGate', ([], {'exponent': '(0.4)', 'global_shift': '(1)'}), '(exponent=0.4, global_shift=1)\n', (47638, 47668), False, 'import cirq\n'), ((47686, 47731), 'cirq.ZPowGate', 'cirq.ZPowGate', ([], {'exponent': '(0.5)', 'global_shift': '(0.9)'}), '(exponent=0.5, global_shift=0.9)\n', (47699, 47731), False, 'import cirq\n'), ((47749, 47794), 'cirq.HPowGate', 'cirq.HPowGate', ([], {'exponent': '(0.6)', 'global_shift': '(0.8)'}), '(exponent=0.6, global_shift=0.8)\n', (47762, 47794), False, 'import cirq\n'), ((48062, 48108), 'cirq.XXPowGate', 'cirq.XXPowGate', ([], {'exponent': '(0.4)', 'global_shift': '(0.7)'}), '(exponent=0.4, global_shift=0.7)\n', (48076, 48108), False, 'import cirq\n'), ((48130, 48176), 'cirq.YYPowGate', 'cirq.YYPowGate', ([], {'exponent': '(0.8)', 'global_shift': '(0.5)'}), '(exponent=0.8, global_shift=0.5)\n', (48144, 48176), False, 'import cirq\n'), ((48241, 48261), 'cirq.IdentityGate', 'cirq.IdentityGate', (['(2)'], {}), '(2)\n', (48258, 48261), False, 'import cirq\n'), ((48305, 48317), 'cirq.rx', 'cirq.rx', (['(0.7)'], {}), '(0.7)\n', (48312, 48317), False, 'import cirq\n'), ((48335, 48347), 'cirq.ry', 'cirq.ry', (['(0.2)'], {}), '(0.2)\n', (48342, 48347), False, 'import cirq\n'), ((48365, 48377), 'cirq.rz', 'cirq.rz', (['(0.4)'], {}), '(0.4)\n', (48372, 48377), False, 'import cirq\n'), ((48395, 48466), 'cirq.PhasedXPowGate', 'cirq.PhasedXPowGate', ([], {'phase_exponent': '(0.8)', 'exponent': '(0.6)', 'global_shift': '(0.3)'}), '(phase_exponent=0.8, exponent=0.6, global_shift=0.3)\n', (48414, 48466), False, 'import cirq\n'), ((48516, 48562), 'cirq.ZZPowGate', 'cirq.ZZPowGate', ([], {'exponent': '(0.3)', 'global_shift': '(1.3)'}), '(exponent=0.3, global_shift=1.3)\n', (48530, 48562), False, 'import cirq\n'), ((48584, 48633), 'cirq.ISwapPowGate', 'cirq.ISwapPowGate', ([], {'exponent': '(0.6)', 'global_shift': '(1.2)'}), '(exponent=0.6, global_shift=1.2)\n', (48601, 48633), False, 'import cirq\n'), ((48687, 48732), 'cirq.XPowGate', 'cirq.XPowGate', ([], {'exponent': '(0.1)', 'global_shift': '(0.9)'}), '(exponent=0.1, global_shift=0.9)\n', (48700, 48732), False, 'import cirq\n'), ((48750, 48793), 'cirq.YPowGate', 'cirq.YPowGate', ([], {'exponent': '(0.2)', 'global_shift': '(1)'}), '(exponent=0.2, global_shift=1)\n', (48763, 48793), False, 'import cirq\n'), ((48811, 48856), 'cirq.ZPowGate', 'cirq.ZPowGate', ([], {'exponent': '(0.3)', 'global_shift': '(1.1)'}), '(exponent=0.3, global_shift=1.1)\n', (48824, 48856), False, 'import cirq\n'), ((48874, 48919), 'cirq.HPowGate', 'cirq.HPowGate', ([], {'exponent': '(0.4)', 'global_shift': '(1.2)'}), '(exponent=0.4, global_shift=1.2)\n', (48887, 48919), False, 'import cirq\n'), ((48969, 49017), 'cirq.SwapPowGate', 'cirq.SwapPowGate', ([], {'exponent': '(0.2)', 'global_shift': '(0.9)'}), '(exponent=0.2, global_shift=0.9)\n', (48985, 49017), False, 'import cirq\n'), ((49039, 49096), 'cirq.PhasedISwapPowGate', 'cirq.PhasedISwapPowGate', ([], {'phase_exponent': '(0.8)', 'exponent': '(0.6)'}), '(phase_exponent=0.8, exponent=0.6)\n', (49062, 49096), False, 'import cirq\n'), ((49150, 49224), 'cirq.PhasedXZGate', 'cirq.PhasedXZGate', ([], {'x_exponent': '(0.2)', 'z_exponent': '(0.3)', 'axis_phase_exponent': '(1.4)'}), '(x_exponent=0.2, z_exponent=0.3, axis_phase_exponent=1.4)\n', (49167, 49224), False, 'import cirq\n'), ((49468, 49480), 'cirq.rx', 'cirq.rx', (['(0.8)'], {}), '(0.8)\n', (49475, 49480), False, 'import cirq\n'), ((49498, 49510), 'cirq.ry', 'cirq.ry', (['(0.9)'], {}), '(0.9)\n', (49505, 49510), False, 'import cirq\n'), ((49528, 49540), 'cirq.rz', 'cirq.rz', (['(1.2)'], {}), '(1.2)\n', (49535, 49540), False, 'import cirq\n'), ((49835, 49858), 'cirq.FSimGate', 'cirq.FSimGate', (['(0.3)', '(1.7)'], {}), '(0.3, 1.7)\n', (49848, 49858), False, 'import cirq\n'), ((49941, 49953), 'cirq.ry', 'cirq.ry', (['(1.3)'], {}), '(1.3)\n', (49948, 49953), False, 'import cirq\n'), ((49971, 49983), 'cirq.rz', 'cirq.rz', (['(0.4)'], {}), '(0.4)\n', (49978, 49983), False, 'import cirq\n'), ((50001, 50013), 'cirq.rx', 'cirq.rx', (['(0.7)'], {}), '(0.7)\n', (50008, 50013), False, 'import cirq\n'), ((50172, 50219), 'cirq.CCZPowGate', 'cirq.CCZPowGate', ([], {'exponent': '(0.7)', 'global_shift': '(0.3)'}), '(exponent=0.7, global_shift=0.3)\n', (50187, 50219), False, 'import cirq\n'), ((50450, 50462), 'cirq.rx', 'cirq.rx', (['(0.3)'], {}), '(0.3)\n', (50457, 50462), False, 'import cirq\n'), ((50480, 50492), 'cirq.ry', 'cirq.ry', (['(0.5)'], {}), '(0.5)\n', (50487, 50492), False, 'import cirq\n'), ((50510, 50522), 'cirq.rz', 'cirq.rz', (['(0.7)'], {}), '(0.7)\n', (50517, 50522), False, 'import cirq\n'), ((50540, 50552), 'cirq.rx', 'cirq.rx', (['(0.9)'], {}), '(0.9)\n', (50547, 50552), False, 'import cirq\n'), ((50602, 50649), 'cirq.TwoQubitDiagonalGate', 'cirq.TwoQubitDiagonalGate', (['[0.1, 0.2, 0.3, 0.4]'], {}), '([0.1, 0.2, 0.3, 0.4])\n', (50627, 50649), False, 'import cirq\n'), ((50703, 50770), 'cirq.ThreeQubitDiagonalGate', 'cirq.ThreeQubitDiagonalGate', (['[0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.2, 1.3]'], {}), '([0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.2, 1.3])\n', (50730, 50770), False, 'import cirq\n'), ((50858, 50874), 'cirq.CSwapGate', 'cirq.CSwapGate', ([], {}), '()\n', (50872, 50874), False, 'import cirq\n'), ((50932, 50944), 'cirq.rz', 'cirq.rz', (['(0.6)'], {}), '(0.6)\n', (50939, 50944), False, 'import cirq\n'), ((50962, 50974), 'cirq.rx', 'cirq.rx', (['(0.7)'], {}), '(0.7)\n', (50969, 50974), False, 'import cirq\n'), ((50992, 51004), 'cirq.ry', 'cirq.ry', (['(0.8)'], {}), '(0.8)\n', (50999, 51004), False, 'import cirq\n'), ((51022, 51034), 'cirq.rz', 'cirq.rz', (['(0.9)'], {}), '(0.9)\n', (51029, 51034), False, 'import cirq\n'), ((52273, 52289), 'cirq.riswap', 'cirq.riswap', (['(0.7)'], {}), '(0.7)\n', (52284, 52289), False, 'import cirq\n'), ((52311, 52327), 'cirq.givens', 'cirq.givens', (['(1.2)'], {}), '(1.2)\n', (52322, 52327), False, 'import cirq\n'), ((53129, 53173), 'cirq.CXPowGate', 'cirq.CXPowGate', ([], {'exponent': '(1)', 'global_shift': '(0.7)'}), '(exponent=1, global_shift=0.7)\n', (53143, 53173), False, 'import cirq\n'), ((53195, 53239), 'cirq.CZPowGate', 'cirq.CZPowGate', ([], {'exponent': '(1)', 'global_shift': '(0.9)'}), '(exponent=1, global_shift=0.9)\n', (53209, 53239), False, 'import cirq\n'), ((53293, 53336), 'cirq.XPowGate', 'cirq.XPowGate', ([], {'exponent': '(1)', 'global_shift': '(1.1)'}), '(exponent=1, global_shift=1.1)\n', (53306, 53336), False, 'import cirq\n'), ((53354, 53395), 'cirq.YPowGate', 'cirq.YPowGate', ([], {'exponent': '(1)', 'global_shift': '(1)'}), '(exponent=1, global_shift=1)\n', (53367, 53395), False, 'import cirq\n'), ((53413, 53456), 'cirq.ZPowGate', 'cirq.ZPowGate', ([], {'exponent': '(1)', 'global_shift': '(0.9)'}), '(exponent=1, global_shift=0.9)\n', (53426, 53456), False, 'import cirq\n'), ((53474, 53517), 'cirq.HPowGate', 'cirq.HPowGate', ([], {'exponent': '(1)', 'global_shift': '(0.8)'}), '(exponent=1, global_shift=0.8)\n', (53487, 53517), False, 'import cirq\n'), ((53567, 53611), 'cirq.XXPowGate', 'cirq.XXPowGate', ([], {'exponent': '(1)', 'global_shift': '(0.2)'}), '(exponent=1, global_shift=0.2)\n', (53581, 53611), False, 'import cirq\n'), ((53633, 53677), 'cirq.YYPowGate', 'cirq.YYPowGate', ([], {'exponent': '(1)', 'global_shift': '(0.3)'}), '(exponent=1, global_shift=0.3)\n', (53647, 53677), False, 'import cirq\n'), ((53731, 53777), 'cirq.ZPowGate', 'cirq.ZPowGate', ([], {'exponent': '(0.25)', 'global_shift': '(0.4)'}), '(exponent=0.25, global_shift=0.4)\n', (53744, 53777), False, 'import cirq\n'), ((53795, 53840), 'cirq.ZPowGate', 'cirq.ZPowGate', ([], {'exponent': '(0.5)', 'global_shift': '(0.5)'}), '(exponent=0.5, global_shift=0.5)\n', (53808, 53840), False, 'import cirq\n'), ((53858, 53901), 'cirq.YPowGate', 'cirq.YPowGate', ([], {'exponent': '(1)', 'global_shift': '(0.2)'}), '(exponent=1, global_shift=0.2)\n', (53871, 53901), False, 'import cirq\n'), ((53919, 53962), 'cirq.ZPowGate', 'cirq.ZPowGate', ([], {'exponent': '(1)', 'global_shift': '(0.3)'}), '(exponent=1, global_shift=0.3)\n', (53932, 53962), False, 'import cirq\n'), ((54012, 54056), 'cirq.ZZPowGate', 'cirq.ZZPowGate', ([], {'exponent': '(1)', 'global_shift': '(0.2)'}), '(exponent=1, global_shift=0.2)\n', (54026, 54056), False, 'import cirq\n'), ((54078, 54124), 'cirq.SwapPowGate', 'cirq.SwapPowGate', ([], {'exponent': '(1)', 'global_shift': '(0.3)'}), '(exponent=1, global_shift=0.3)\n', (54094, 54124), False, 'import cirq\n'), ((54178, 54219), 'cirq.XPowGate', 'cirq.XPowGate', ([], {'exponent': '(1)', 'global_shift': '(0)'}), '(exponent=1, global_shift=0)\n', (54191, 54219), False, 'import cirq\n'), ((54237, 54278), 'cirq.YPowGate', 'cirq.YPowGate', ([], {'exponent': '(1)', 'global_shift': '(0)'}), '(exponent=1, global_shift=0)\n', (54250, 54278), False, 'import cirq\n'), ((54296, 54337), 'cirq.ZPowGate', 'cirq.ZPowGate', ([], {'exponent': '(1)', 'global_shift': '(0)'}), '(exponent=1, global_shift=0)\n', (54309, 54337), False, 'import cirq\n'), ((54355, 54396), 'cirq.HPowGate', 'cirq.HPowGate', ([], {'exponent': '(1)', 'global_shift': '(0)'}), '(exponent=1, global_shift=0)\n', (54368, 54396), False, 'import cirq\n'), ((54446, 54493), 'cirq.ISwapPowGate', 'cirq.ISwapPowGate', ([], {'exponent': '(1)', 'global_shift': '(0.3)'}), '(exponent=1, global_shift=0.3)\n', (54463, 54493), False, 'import cirq\n'), ((54515, 54559), 'cirq.ZZPowGate', 'cirq.ZZPowGate', ([], {'exponent': '(1)', 'global_shift': '(0.5)'}), '(exponent=1, global_shift=0.5)\n', (54529, 54559), False, 'import cirq\n'), ((54613, 54656), 'cirq.ZPowGate', 'cirq.ZPowGate', ([], {'exponent': '(0.5)', 'global_shift': '(0)'}), '(exponent=0.5, global_shift=0)\n', (54626, 54656), False, 'import cirq\n'), ((54674, 54718), 'cirq.ZPowGate', 'cirq.ZPowGate', ([], {'exponent': '(0.25)', 'global_shift': '(0)'}), '(exponent=0.25, global_shift=0)\n', (54687, 54718), False, 'import cirq\n'), ((54736, 54779), 'cirq.XPowGate', 'cirq.XPowGate', ([], {'exponent': '(0.9)', 'global_shift': '(0)'}), '(exponent=0.9, global_shift=0)\n', (54749, 54779), False, 'import cirq\n'), ((54797, 54840), 'cirq.YPowGate', 'cirq.YPowGate', ([], {'exponent': '(0.8)', 'global_shift': '(0)'}), '(exponent=0.8, global_shift=0)\n', (54810, 54840), False, 'import cirq\n'), ((54890, 54934), 'cirq.CZPowGate', 'cirq.CZPowGate', ([], {'exponent': '(0.3)', 'global_shift': '(0)'}), '(exponent=0.3, global_shift=0)\n', (54904, 54934), False, 'import cirq\n'), ((54956, 55000), 'cirq.CXPowGate', 'cirq.CXPowGate', ([], {'exponent': '(0.4)', 'global_shift': '(0)'}), '(exponent=0.4, global_shift=0)\n', (54970, 55000), False, 'import cirq\n'), ((55054, 55097), 'cirq.ZPowGate', 'cirq.ZPowGate', ([], {'exponent': '(1.3)', 'global_shift': '(0)'}), '(exponent=1.3, global_shift=0)\n', (55067, 55097), False, 'import cirq\n'), ((55115, 55158), 'cirq.HPowGate', 'cirq.HPowGate', ([], {'exponent': '(0.8)', 'global_shift': '(0)'}), '(exponent=0.8, global_shift=0)\n', (55128, 55158), False, 'import cirq\n'), ((55176, 55219), 'cirq.XPowGate', 'cirq.XPowGate', ([], {'exponent': '(0.9)', 'global_shift': '(0)'}), '(exponent=0.9, global_shift=0)\n', (55189, 55219), False, 'import cirq\n'), ((55237, 55280), 'cirq.YPowGate', 'cirq.YPowGate', ([], {'exponent': '(0.4)', 'global_shift': '(0)'}), '(exponent=0.4, global_shift=0)\n', (55250, 55280), False, 'import cirq\n'), ((55330, 55374), 'cirq.XXPowGate', 'cirq.XXPowGate', ([], {'exponent': '(0.8)', 'global_shift': '(0)'}), '(exponent=0.8, global_shift=0)\n', (55344, 55374), False, 'import cirq\n'), ((55396, 55440), 'cirq.YYPowGate', 'cirq.YYPowGate', ([], {'exponent': '(0.6)', 'global_shift': '(0)'}), '(exponent=0.6, global_shift=0)\n', (55410, 55440), False, 'import cirq\n'), ((55494, 55537), 'cirq.HPowGate', 'cirq.HPowGate', ([], {'exponent': '(0.7)', 'global_shift': '(0)'}), '(exponent=0.7, global_shift=0)\n', (55507, 55537), False, 'import cirq\n'), ((55555, 55598), 'cirq.ZPowGate', 'cirq.ZPowGate', ([], {'exponent': '(0.2)', 'global_shift': '(0)'}), '(exponent=0.2, global_shift=0)\n', (55568, 55598), False, 'import cirq\n'), ((55616, 55659), 'cirq.YPowGate', 'cirq.YPowGate', ([], {'exponent': '(0.3)', 'global_shift': '(0)'}), '(exponent=0.3, global_shift=0)\n', (55629, 55659), False, 'import cirq\n'), ((55677, 55720), 'cirq.XPowGate', 'cirq.XPowGate', ([], {'exponent': '(0.7)', 'global_shift': '(0)'}), '(exponent=0.7, global_shift=0)\n', (55690, 55720), False, 'import cirq\n'), ((55770, 55814), 'cirq.ZZPowGate', 'cirq.ZZPowGate', ([], {'exponent': '(0.1)', 'global_shift': '(0)'}), '(exponent=0.1, global_shift=0)\n', (55784, 55814), False, 'import cirq\n'), ((55836, 55882), 'cirq.SwapPowGate', 'cirq.SwapPowGate', ([], {'exponent': '(0.6)', 'global_shift': '(0)'}), '(exponent=0.6, global_shift=0)\n', (55852, 55882), False, 'import cirq\n'), ((55936, 55979), 'cirq.XPowGate', 'cirq.XPowGate', ([], {'exponent': '(0.4)', 'global_shift': '(0)'}), '(exponent=0.4, global_shift=0)\n', (55949, 55979), False, 'import cirq\n'), ((55997, 56040), 'cirq.YPowGate', 'cirq.YPowGate', ([], {'exponent': '(0.3)', 'global_shift': '(0)'}), '(exponent=0.3, global_shift=0)\n', (56010, 56040), False, 'import cirq\n'), ((56058, 56101), 'cirq.ZPowGate', 'cirq.ZPowGate', ([], {'exponent': '(0.2)', 'global_shift': '(0)'}), '(exponent=0.2, global_shift=0)\n', (56071, 56101), False, 'import cirq\n'), ((56119, 56162), 'cirq.HPowGate', 'cirq.HPowGate', ([], {'exponent': '(0.1)', 'global_shift': '(0)'}), '(exponent=0.1, global_shift=0)\n', (56132, 56162), False, 'import cirq\n'), ((56212, 56259), 'cirq.ISwapPowGate', 'cirq.ISwapPowGate', ([], {'exponent': '(1.3)', 'global_shift': '(0)'}), '(exponent=1.3, global_shift=0)\n', (56229, 56259), False, 'import cirq\n'), ((56281, 56325), 'cirq.CXPowGate', 'cirq.CXPowGate', ([], {'exponent': '(0.5)', 'global_shift': '(0)'}), '(exponent=0.5, global_shift=0)\n', (56295, 56325), False, 'import cirq\n'), ((58539, 58548), 'cirq.X', 'cirq.X', (['a'], {}), '(a)\n', (58545, 58548), False, 'import cirq\n'), ((58670, 58679), 'cirq.X', 'cirq.X', (['a'], {}), '(a)\n', (58676, 58679), False, 'import cirq\n'), ((14870, 14880), 'cirq.X', 'cirq.X', (['q0'], {}), '(q0)\n', (14876, 14880), False, 'import cirq\n'), ((28288, 28305), 'cirq.kraus', 'cirq.kraus', (['pflip'], {}), '(pflip)\n', (28298, 28305), False, 'import cirq\n'), ((28355, 28372), 'cirq.kraus', 'cirq.kraus', (['bflip'], {}), '(bflip)\n', (28365, 28372), False, 'import cirq\n'), ((30003, 30023), 'cirq.kraus', 'cirq.kraus', (['amp_damp'], {}), '(amp_damp)\n', (30013, 30023), False, 'import cirq\n'), ((30074, 30098), 'cirq.kraus', 'cirq.kraus', (['gen_amp_damp'], {}), '(gen_amp_damp)\n', (30084, 30098), False, 'import cirq\n'), ((36102, 36135), 'cirq.X.on_each', 'cirq.X.on_each', (['*operation.qubits'], {}), '(*operation.qubits)\n', (36116, 36135), False, 'import cirq\n'), ((50087, 50107), 'cirq.IdentityGate', 'cirq.IdentityGate', (['(4)'], {}), '(4)\n', (50104, 50107), False, 'import cirq\n'), ((51257, 51403), 'numpy.array', 'np.array', (['[[0, -0.5 - 0.5j, -0.5 - 0.5j, 0], [0.5 - 0.5j, 0, 0, -0.5 + 0.5j], [0.5 - \n 0.5j, 0, 0, 0.5 - 0.5j], [0, -0.5 - 0.5j, 0.5 + 0.5j, 0]]'], {}), '([[0, -0.5 - 0.5j, -0.5 - 0.5j, 0], [0.5 - 0.5j, 0, 0, -0.5 + 0.5j],\n [0.5 - 0.5j, 0, 0, 0.5 - 0.5j], [0, -0.5 - 0.5j, 0.5 + 0.5j, 0]])\n', (51265, 51403), True, 'import numpy as np\n'), ((51625, 51771), 'numpy.array', 'np.array', (['[[0.5 - 0.5j, 0, 0, -0.5 + 0.5j], [0, 0.5 - 0.5j, -0.5 + 0.5j, 0], [0, -0.5 +\n 0.5j, -0.5 + 0.5j, 0], [0.5 - 0.5j, 0, 0, 0.5 - 0.5j]]'], {}), '([[0.5 - 0.5j, 0, 0, -0.5 + 0.5j], [0, 0.5 - 0.5j, -0.5 + 0.5j, 0],\n [0, -0.5 + 0.5j, -0.5 + 0.5j, 0], [0.5 - 0.5j, 0, 0, 0.5 - 0.5j]])\n', (51633, 51771), True, 'import numpy as np\n'), ((52008, 52037), 'numpy.array', 'np.array', (['[[1, 0], [0, 1.0j]]'], {}), '([[1, 0], [0, 1.0j]])\n', (52016, 52037), True, 'import numpy as np\n'), ((52070, 52103), 'numpy.array', 'np.array', (['[[0, -1.0j], [1.0j, 0]]'], {}), '([[0, -1.0j], [1.0j, 0]])\n', (52078, 52103), True, 'import numpy as np\n'), ((52134, 52160), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (52142, 52160), True, 'import numpy as np\n'), ((52195, 52222), 'numpy.array', 'np.array', (['[[1, 0], [0, -1]]'], {}), '([[1, 0], [0, -1]])\n', (52203, 52222), True, 'import numpy as np\n'), ((9215, 9225), 'cirq.X', 'cirq.X', (['q0'], {}), '(q0)\n', (9221, 9225), False, 'import cirq\n'), ((9228, 9238), 'cirq.Z', 'cirq.Z', (['q1'], {}), '(q1)\n', (9234, 9238), False, 'import cirq\n'), ((9440, 9450), 'cirq.X', 'cirq.X', (['q0'], {}), '(q0)\n', (9446, 9450), False, 'import cirq\n'), ((9453, 9463), 'cirq.Z', 'cirq.Z', (['q1'], {}), '(q1)\n', (9459, 9463), False, 'import cirq\n'), ((22887, 22906), 'cirq.MatrixGate', 'cirq.MatrixGate', (['m1'], {}), '(m1)\n', (22902, 22906), False, 'import cirq\n'), ((22955, 22974), 'cirq.MatrixGate', 'cirq.MatrixGate', (['m2'], {}), '(m2)\n', (22970, 22974), False, 'import cirq\n'), ((23026, 23045), 'cirq.MatrixGate', 'cirq.MatrixGate', (['m1'], {}), '(m1)\n', (23041, 23045), False, 'import cirq\n'), ((23134, 23153), 'cirq.MatrixGate', 'cirq.MatrixGate', (['m2'], {}), '(m2)\n', (23149, 23153), False, 'import cirq\n'), ((25295, 25322), 'cirq.H.on_each', 'cirq.H.on_each', (['*qubits[1:]'], {}), '(*qubits[1:])\n', (25309, 25322), False, 'import cirq\n'), ((28412, 28428), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (28426, 28428), False, 'import cirq\n'), ((30138, 30154), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (30152, 30154), False, 'import cirq\n'), ((33592, 33608), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (33606, 33608), False, 'import cirq\n'), ((50277, 50324), 'cirq.CCXPowGate', 'cirq.CCXPowGate', ([], {'exponent': '(0.4)', 'global_shift': '(0.6)'}), '(exponent=0.4, global_shift=0.6)\n', (50292, 50324), False, 'import cirq\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv('../input/kc_house_data.csv')
df.head()
# In[ ]:
# Get Year and Month
df['Year'], df['Month'] = df['date'].str[:4], df['date'].str[4:6]
df.head()
# In[ ]:
# Review info
df.info()
# In[ ]:
plt.figure(figsize=(15,12))
mask = np.zeros_like(df.corr())
mask[np.triu_indices_from(mask)] = True
with sns.axes_style('white'):
ax = sns.heatmap(df.corr(), mask=mask, vmax=.3, annot=True)
# In[ ]:
# Split label from X
y = df['price']
X = df.drop('price', axis=1)
# In[ ]:
# Convert yr_renovated to years since renovation
X['sold_year'] = X['date'].apply(lambda x: int(x[:4]))
X['yrs_since_renovated'] = (X['sold_year'] - X['yr_renovated'][X['yr_renovated'] != 0]).fillna(0)
# Create dummy features for zip code
zip_dummies = pd.get_dummies(X['zipcode'], prefix='zipcode')
X = pd.concat([X, zip_dummies], axis=1)
# Create dummy features for year and month
year_dummies = pd.get_dummies(X['Year'], prefix='year')
month_dummies = pd.get_dummies(X['Month'], prefix='month')
X = pd.concat([X, year_dummies, month_dummies], axis=1)
# Drop certain features now, revisit later to add
X = X.drop(['date', 'yr_renovated', 'sold_year', 'zipcode', 'id'], axis=1)
X.head()
# In[ ]:
from sklearn.linear_model import LinearRegression, Lasso
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import PolynomialFeatures
# Create Polynomial Features
poly = PolynomialFeatures(2)
X_poly = poly.fit_transform(X)
# Split train and test sets
X_train, X_test, y_train, y_test = train_test_split(X_poly, y, test_size=0.4)
# Fit model
model = LinearRegression()
model.fit(X_train, y_train)
# Return MSE
print('Train set MSE: {}'.format(mean_squared_error(y_train, model.predict(X_train))))
print('Test set MSE: {}'.format(mean_squared_error(y_test, model.predict(X_test))))
# In[ ]:
# Return R^2
print('Train Score: {:.2f}'.format(model.score(X_train, y_train)))
print('Test Score: {:.2f}'.format(model.score(X_test, y_test)))
# In[ ]:
# Kaggle kept timeing out when generating the Learning curve, so I commented it out
# any ideas why this happens?
# Debug learning Curve
#from sklearn.model_selection import learning_curve
#
#train_sizes, train_scores, valid_scores = learning_curve(model, X_train, y_train,
# train_sizes=np.linspace(.1, 1.0, 5), cv=5)
# In[ ]:
#plt.grid()
#plt.plot(train_sizes, train_scores, label='Training Score')
#plt.plot(train_sizes, valid_scores, label='Test Score')
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| [
"sklearn.preprocessing.PolynomialFeatures",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.triu_indices_from",
"matplotlib.pyplot.figure",
"pandas.get_dummies",
"seaborn.axes_style",
"pandas.concat",
"sklearn.linear_model.LinearRegression"
] | [((202, 243), 'pandas.read_csv', 'pd.read_csv', (['"""../input/kc_house_data.csv"""'], {}), "('../input/kc_house_data.csv')\n", (213, 243), True, 'import pandas as pd\n'), ((414, 442), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 12)'}), '(figsize=(15, 12))\n', (424, 442), True, 'import matplotlib.pyplot as plt\n'), ((954, 1000), 'pandas.get_dummies', 'pd.get_dummies', (["X['zipcode']"], {'prefix': '"""zipcode"""'}), "(X['zipcode'], prefix='zipcode')\n", (968, 1000), True, 'import pandas as pd\n'), ((1005, 1040), 'pandas.concat', 'pd.concat', (['[X, zip_dummies]'], {'axis': '(1)'}), '([X, zip_dummies], axis=1)\n', (1014, 1040), True, 'import pandas as pd\n'), ((1100, 1140), 'pandas.get_dummies', 'pd.get_dummies', (["X['Year']"], {'prefix': '"""year"""'}), "(X['Year'], prefix='year')\n", (1114, 1140), True, 'import pandas as pd\n'), ((1157, 1199), 'pandas.get_dummies', 'pd.get_dummies', (["X['Month']"], {'prefix': '"""month"""'}), "(X['Month'], prefix='month')\n", (1171, 1199), True, 'import pandas as pd\n'), ((1204, 1255), 'pandas.concat', 'pd.concat', (['[X, year_dummies, month_dummies]'], {'axis': '(1)'}), '([X, year_dummies, month_dummies], axis=1)\n', (1213, 1255), True, 'import pandas as pd\n'), ((1653, 1674), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['(2)'], {}), '(2)\n', (1671, 1674), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((1770, 1812), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_poly', 'y'], {'test_size': '(0.4)'}), '(X_poly, y, test_size=0.4)\n', (1786, 1812), False, 'from sklearn.model_selection import train_test_split\n'), ((1834, 1852), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1850, 1852), False, 'from sklearn.linear_model import LinearRegression, Lasso\n'), ((479, 505), 'numpy.triu_indices_from', 'np.triu_indices_from', (['mask'], {}), '(mask)\n', (499, 505), True, 'import numpy as np\n'), ((519, 542), 'seaborn.axes_style', 'sns.axes_style', (['"""white"""'], {}), "('white')\n", (533, 542), True, 'import seaborn as sns\n')] |
import numpy as np
import torch
from scipy.signal import convolve2d, correlate2d
from torch.autograd import Function
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
import EggNet.NeuralNetwork.Ext.NeuralNetworkExtension as NNExt
class ReLU_FPGA(torch.autograd.Function):
"""
We can implement our own custom autograd Functions by subclassing
torch.autograd.Function and implementing the forward and backward passes
which operate on Tensors.
"""
@staticmethod
def forward(ctx, input):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
ToDo: Place here the access to the FPGA
"""
ctx.save_for_backward(input)
return NNExt.relu1D(NNExt)
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
input, = ctx.saved_tensors
grad_input = grad_output.clone()
grad_input[input < 0] = 0
return grad_input
class FullyConnect_FPGA(torch.autograd.Function):
"""
We can implement our own custom autograd Functions by subclassing
torch.autograd.Function and implementing the forward and backward passes
which operate on Tensors.
"""
@staticmethod
def forward(ctx, input):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
ToDo: Place here the access to the FPGA
"""
ctx.save_for_backward(input)
return input.clamp(min=0)
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
input, = ctx.saved_tensors
grad_input = grad_output.clone()
grad_input[input < 0] = 0
return grad_input
class Rescale(torch.autograd.Function):
@staticmethod
def forward(x, *args):
pass
@staticmethod
def backward(*args):
pass
class ScipyConv2dFunction(Function):
@staticmethod
def forward(ctx, input, filter, bias):
# detach so we can cast to NumPy
input, filter, bias = input.detach(), filter.detach(), bias.detach()
result = correlate2d(input.numpy(), filter.numpy(), mode='valid')
result += bias.numpy()
ctx.save_for_backward(input, filter, bias)
return torch.as_tensor(result, dtype=input.dtype)
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.detach()
input, filter, bias = ctx.saved_tensors
grad_output = grad_output.numpy()
grad_bias = np.sum(grad_output, keepdims=True)
grad_input = convolve2d(grad_output, filter.numpy(), mode='full')
# the previous line can be expressed equivalently as:
# grad_input = correlate2d(grad_output, flip(flip(filter.numpy(), axis=0), axis=1), mode='full')
grad_filter = correlate2d(input.numpy(), grad_output, mode='valid')
return torch.from_numpy(grad_input), torch.from_numpy(grad_filter).to(torch.float), torch.from_numpy(
grad_bias).to(torch.float)
class ScipyConv2d(Module):
def __init__(self, filter_width, filter_height):
super(ScipyConv2d, self).__init__()
self.filter = Parameter(torch.randn(filter_width, filter_height))
self.bias = Parameter(torch.randn(1, 1))
def forward(self, input):
return ScipyConv2dFunction.apply(input, self.filter, self.bias)
class RescaleLayer(Module):
def __init__(self):
super(RescaleLayer, self).__init__()
self.moving_average = 0
self.moving_std = 0
self.count = 0
def forward(self, *input, **kwargs):
x = input[0]
self.moving_average = \
self.moving_average * self.count / (self.count + 1) + \
x * 1.0 / (self.count + 1)
self.count = self.count + 1
self.moving_std = 1.0 / self.count * (x - self.moving_average) ** 2 + \
self.count / (self.count + 1) * self.moving_std
return (x - self.moving_average) / self.moving_std
| [
"torch.as_tensor",
"torch.from_numpy",
"EggNet.NeuralNetwork.Ext.NeuralNetworkExtension.relu1D",
"numpy.sum",
"torch.randn"
] | [((1001, 1020), 'EggNet.NeuralNetwork.Ext.NeuralNetworkExtension.relu1D', 'NNExt.relu1D', (['NNExt'], {}), '(NNExt)\n', (1013, 1020), True, 'import EggNet.NeuralNetwork.Ext.NeuralNetworkExtension as NNExt\n'), ((3170, 3212), 'torch.as_tensor', 'torch.as_tensor', (['result'], {'dtype': 'input.dtype'}), '(result, dtype=input.dtype)\n', (3185, 3212), False, 'import torch\n'), ((3421, 3455), 'numpy.sum', 'np.sum', (['grad_output'], {'keepdims': '(True)'}), '(grad_output, keepdims=True)\n', (3427, 3455), True, 'import numpy as np\n'), ((3788, 3816), 'torch.from_numpy', 'torch.from_numpy', (['grad_input'], {}), '(grad_input)\n', (3804, 3816), False, 'import torch\n'), ((4080, 4120), 'torch.randn', 'torch.randn', (['filter_width', 'filter_height'], {}), '(filter_width, filter_height)\n', (4091, 4120), False, 'import torch\n'), ((4152, 4169), 'torch.randn', 'torch.randn', (['(1)', '(1)'], {}), '(1, 1)\n', (4163, 4169), False, 'import torch\n'), ((3818, 3847), 'torch.from_numpy', 'torch.from_numpy', (['grad_filter'], {}), '(grad_filter)\n', (3834, 3847), False, 'import torch\n'), ((3865, 3892), 'torch.from_numpy', 'torch.from_numpy', (['grad_bias'], {}), '(grad_bias)\n', (3881, 3892), False, 'import torch\n')] |
import os
import numpy as np
import pandas as pd
baseline_data_dir = "../data/baselines"
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 2015_WP #
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
class WP_2015(object):
def __init__(self,):
pass
@classmethod
def Fig_6(self, direction, sectors):
def turbine_array(direction):
assert direction in [270, 222, 312], \
"Invalid wind direction in WP_2015!"
if direction == 270:
wt_array_270 = np.array([1, 9, 17, 25, 33, 41, 49, 57])
wts_270 = np.zeros((8, 8))
for i in range(8):
wts_270[i, :] = wt_array_270 + i * 1
return wts_270.astype(np.int)
if direction == 222:
wt_array_222 = np.array([8, 15, 22, 29, 36])
wts_222 = np.zeros((8, 5))
wts_222[3, :] = wt_array_222
for i in range(3):
wts_222[i, :] = wt_array_222 - (3 - i)
for i in range(4):
wts_222[4 + i] = wt_array_222 + 8 * (i + 1)
return wts_222.astype(np.int)
else:
wt_array_312 = np.array([1, 10, 19, 28, 37])
wts_312 = np.zeros((8, 5))
wts_312[4, :] = wt_array_312
for i in range(4):
wts_312[i, :] = wt_array_312 + 8 * (4 - i)
for i in range(3):
wts_312[5 + i] = wt_array_312 + (i + 1)
return wts_312.astype(np.int)
def power_data(direction, sectors):
assert len(set(sectors + [1., 5., 10., 15.])) == 4, \
"Invalid wind sector in WP_2015!"
file_dir = os.path.dirname(os.path.abspath(__file__))
data = pd.read_csv(
os.path.join(file_dir, f"../data/baselines/WP_2015/Fig_6/LES_OBS_{int(direction)}.csv"), header=0)
if sectors:
cols = []
for s in sectors:
cols.extend([f"{direction}+{int(s)}+LES", f"{direction}+{int(s)}+OBS"])
return data[cols]
else:
return data.iloc[:, :8]
return turbine_array(direction), power_data(direction, sectors)
if __name__ == "__main__":
wts, pd = WP_2015.Fig_6(270, [1, 5])
print(wts, pd)
| [
"os.path.abspath",
"numpy.array",
"numpy.zeros"
] | [((664, 704), 'numpy.array', 'np.array', (['[1, 9, 17, 25, 33, 41, 49, 57]'], {}), '([1, 9, 17, 25, 33, 41, 49, 57])\n', (672, 704), True, 'import numpy as np\n'), ((731, 747), 'numpy.zeros', 'np.zeros', (['(8, 8)'], {}), '((8, 8))\n', (739, 747), True, 'import numpy as np\n'), ((950, 979), 'numpy.array', 'np.array', (['[8, 15, 22, 29, 36]'], {}), '([8, 15, 22, 29, 36])\n', (958, 979), True, 'import numpy as np\n'), ((1006, 1022), 'numpy.zeros', 'np.zeros', (['(8, 5)'], {}), '((8, 5))\n', (1014, 1022), True, 'import numpy as np\n'), ((1356, 1385), 'numpy.array', 'np.array', (['[1, 10, 19, 28, 37]'], {}), '([1, 10, 19, 28, 37])\n', (1364, 1385), True, 'import numpy as np\n'), ((1412, 1428), 'numpy.zeros', 'np.zeros', (['(8, 5)'], {}), '((8, 5))\n', (1420, 1428), True, 'import numpy as np\n'), ((1913, 1938), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1928, 1938), False, 'import os\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ===============================================================
# Copyright (C) 2019 HuangYk.
# Licensed under The MIT Lincese.
#
# Filename : optic_flow_loader.py
# Author : HuangYK
# Last Modified: 2019-07-16 21:41
# Description :
# ===============================================================
import flow_vis
import numpy as np
from PIL import Image
def construct_opticflow_image(opt_x, opt_y, method='TV-L1'):
if method == 'TV-L1':
opf_image = Image.fromarray(
flow_vis.flow_to_color(np.concatenate(
[np.array(opt_x)[:, :, np.newaxis],
np.array(opt_y)[:, :, np.newaxis]],
axis=2), convert_to_bgr=False).astype('uint8')).convert('RGB')
return opf_image
| [
"numpy.array"
] | [((612, 627), 'numpy.array', 'np.array', (['opt_x'], {}), '(opt_x)\n', (620, 627), True, 'import numpy as np\n'), ((664, 679), 'numpy.array', 'np.array', (['opt_y'], {}), '(opt_y)\n', (672, 679), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from keras.datasets import cifar10
from sklearn.ensemble import RandomForestClassifier
import time
start_time = time.time()
# データの読み込み
(x_train_origin, y_train), (x_test_origin, y_test) = cifar10.load_data()
# 小数化
x_train_origin = x_train_origin / 255
x_test_origin = x_test_origin / 255
# データ数
m_train, m_test = x_train_origin.shape[0], x_test_origin.shape[0]
# グレースケール化
def gray_scaler(tensor):
return 0.2126 * tensor[:, :, :, 0] + 0.7152 * tensor[:, :, :, 1] + 0.0722 * tensor[:, :, :, 2]
# エセグレースケール化
def trial_scaler(tensor):
return (1000000 * tensor[:, :, :, 0] + 1000 * tensor[:, :, :, 1] + 1.0 * tensor[:, :, :, 2]) / 255255255
# x_train, x_test = gray_scaler(x_train_origin), gray_scaler(x_test_origin)
x_train, x_test = trial_scaler(x_train_origin), trial_scaler(x_test_origin)
# ベクトル化
x_train, x_test = x_train.reshape(m_train, -1), x_test.reshape(m_test, -1)
# L2ノルムで標準化
x_train = x_train / np.linalg.norm(x_train, ord=2, axis=1, keepdims=True)
x_test = x_test / np.linalg.norm(x_test, ord=2, axis=1, keepdims=True)
# ランダムフォレスト回帰で学習
rf = RandomForestClassifier(max_depth=8)
rf.fit(x_train, y_train)
print("Elapsed[s]: ", time.time() - start_time)
print("Train: ", rf.score(x_train, y_train))
print("Test: ", rf.score(x_test, y_test)) | [
"time.time",
"sklearn.ensemble.RandomForestClassifier",
"keras.datasets.cifar10.load_data",
"numpy.linalg.norm"
] | [((164, 175), 'time.time', 'time.time', ([], {}), '()\n', (173, 175), False, 'import time\n'), ((241, 260), 'keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (258, 260), False, 'from keras.datasets import cifar10\n'), ((1119, 1154), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'max_depth': '(8)'}), '(max_depth=8)\n', (1141, 1154), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((971, 1024), 'numpy.linalg.norm', 'np.linalg.norm', (['x_train'], {'ord': '(2)', 'axis': '(1)', 'keepdims': '(True)'}), '(x_train, ord=2, axis=1, keepdims=True)\n', (985, 1024), True, 'import numpy as np\n'), ((1043, 1095), 'numpy.linalg.norm', 'np.linalg.norm', (['x_test'], {'ord': '(2)', 'axis': '(1)', 'keepdims': '(True)'}), '(x_test, ord=2, axis=1, keepdims=True)\n', (1057, 1095), True, 'import numpy as np\n'), ((1203, 1214), 'time.time', 'time.time', ([], {}), '()\n', (1212, 1214), False, 'import time\n')] |
from __future__ import print_function, unicode_literals
import tensorflow as tf
import numpy as np
import scipy.misc
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os
from mpl_toolkits.mplot3d import Axes3D
import argparse
import cv2
import operator
import pickle
from nets.ColorHandPose3DNetwork import ColorHandPose3DNetwork
from utils.general import detect_keypoints, trafo_coords, plot_hand, plot_hand_2d, plot_hand_3d
from pose.DeterminePositions import create_known_finger_poses, determine_position, get_position_name_with_pose_id
from pose.utils.FingerPoseEstimate import FingerPoseEstimate
def parse_args():
parser = argparse.ArgumentParser(description = 'Classify hand gestures from the set of images in folder')
parser.add_argument('data_path', help = 'Path of folder containing images', type = str)
parser.add_argument('--output-path', dest = 'output_path', type = str, default = None,
help = 'Path of folder where to store the evaluation result')
parser.add_argument('--plot-fingers', dest = 'plot_fingers', help = 'Should fingers be plotted.(1 = Yes, 0 = No)',
default = 1, type = int)
# Threshold is used for confidence measurement of Geometry and Neural Network methods
parser.add_argument('--thresh', dest = 'threshold', help = 'Threshold of confidence level(0-1)', default = 0.45,
type = float)
parser.add_argument('--solve-by', dest = 'solve_by', default = 0, type = int,
help = 'Solve the keypoints of Hand3d by which method: (0=Geometry, 1=Neural Network, 2=SVM)')
# If solving by neural network, give the path of PB file.
parser.add_argument('--pb-file', dest = 'pb_file', type = str, default = None,
help = 'Path where neural network graph is kept.')
# If solving by SVM, give the path of svc pickle file.
parser.add_argument('--svc-file', dest = 'svc_file', type = str, default = None,
help = 'Path where SVC pickle file is kept.')
args = parser.parse_args()
return args
def prepare_input(data_path, output_path):
data_path = os.path.abspath(data_path)
data_files = os.listdir(data_path)
data_files = [os.path.join(data_path, data_file) for data_file in data_files]
# If output path is not given, output will be stored in input folder.
if output_path is None:
output_path = data_path
else:
output_path = os.path.abspath(output_path)
return data_files, output_path
def predict_by_geometry(keypoint_coord3d_v, known_finger_poses, threshold):
fingerPoseEstimate = FingerPoseEstimate(keypoint_coord3d_v)
fingerPoseEstimate.calculate_positions_of_fingers(print_finger_info = True)
obtained_positions = determine_position(fingerPoseEstimate.finger_curled,
fingerPoseEstimate.finger_position, known_finger_poses,
threshold * 10)
score_label = 'Undefined'
if len(obtained_positions) > 0:
max_pose_label = max(obtained_positions.items(), key=operator.itemgetter(1))[0]
if obtained_positions[max_pose_label] >= threshold:
score_label = max_pose_label
print(obtained_positions)
return score_label
def predict_by_neural_network(keypoint_coord3d_v, known_finger_poses, pb_file, threshold):
detection_graph = tf.Graph()
score_label = 'Undefined'
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(pb_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name = '')
with tf.Session(graph = detection_graph) as sess:
input_tensor = detection_graph.get_tensor_by_name('input:0')
output_tensor = detection_graph.get_tensor_by_name('output:0')
flat_keypoint = np.array([entry for sublist in keypoint_coord3d_v for entry in sublist])
flat_keypoint = np.expand_dims(flat_keypoint, axis = 0)
outputs = sess.run(output_tensor, feed_dict = {input_tensor: flat_keypoint})[0]
max_index = np.argmax(outputs)
score_index = max_index if outputs[max_index] >= threshold else -1
score_label = 'Undefined' if score_index == -1 else get_position_name_with_pose_id(score_index, known_finger_poses)
print(outputs)
return score_label
def predict_by_svm(keypoint_coord3d_v, known_finger_poses, svc_file):
with open(svc_file, 'rb') as handle:
svc = pickle.load(handle)
flat_keypoint = np.array([entry for sublist in keypoint_coord3d_v for entry in sublist])
flat_keypoint = np.expand_dims(flat_keypoint, axis = 0)
max_index = svc.predict(flat_keypoint)[0]
score_label = get_position_name_with_pose_id(max_index, known_finger_poses)
return score_label
if __name__ == '__main__':
args = parse_args()
data_files, output_path = prepare_input(args.data_path, args.output_path)
if not os.path.exists(output_path):
os.mkdir(output_path)
known_finger_poses = create_known_finger_poses()
# network input
image_tf = tf.placeholder(tf.float32, shape = (1, 180, 240, 3))
hand_side_tf = tf.constant([[1.0, 1.0]]) # Both left and right hands included
evaluation = tf.placeholder_with_default(True, shape = ())
# build network
net = ColorHandPose3DNetwork()
hand_scoremap_tf, image_crop_tf, scale_tf, center_tf,\
keypoints_scoremap_tf, keypoint_coord3d_tf = net.inference(image_tf, hand_side_tf, evaluation)
# Start TF
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
# initialize network
net.init(sess)
# Feed image list through network
for img_name in data_files:
image_raw = scipy.misc.imread(img_name)[:, :, :3]
image_raw = scipy.misc.imresize(image_raw, (180, 240))
image_v = np.expand_dims((image_raw.astype('float') / 255.0) - 0.5, 0)
if args.plot_fingers == 1:
scale_v, center_v, keypoints_scoremap_v, \
keypoint_coord3d_v = sess.run([scale_tf, center_tf, keypoints_scoremap_tf,\
keypoint_coord3d_tf], feed_dict = {image_tf: image_v})
keypoints_scoremap_v = np.squeeze(keypoints_scoremap_v)
keypoint_coord3d_v = np.squeeze(keypoint_coord3d_v)
# post processing
coord_hw_crop = detect_keypoints(np.squeeze(keypoints_scoremap_v))
coord_hw = trafo_coords(coord_hw_crop, center_v, scale_v, 256)
plot_hand_2d(coord_hw, image_raw)
else:
keypoint_coord3d_v = sess.run(keypoint_coord3d_tf, feed_dict = {image_tf: image_v})
# Classifying based on Geometry
if args.solve_by == 0:
score_label = predict_by_geometry(keypoint_coord3d_v, known_finger_poses, args.threshold)
# Classifying based on Neural networks
elif args.solve_by == 1:
score_label = predict_by_neural_network(keypoint_coord3d_v, known_finger_poses,
args.pb_file, args.threshold)
# Classifying based on SVM
elif args.solve_by == 2:
score_label = predict_by_svm(keypoint_coord3d_v, known_finger_poses, args.svc_file)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image_raw, score_label, (10, 200), font, 1.0, (255, 0, 0), 2, cv2.LINE_AA)
file_name = os.path.basename(img_name)
file_name_comp = file_name.split('.')
file_save_path = os.path.join(output_path, "{}_out.png".format(file_name_comp[0]))
mpimg.imsave(file_save_path, image_raw)
print('{} --> {}\n\n'.format(file_name, score_label))
| [
"numpy.array",
"tensorflow.gfile.GFile",
"pose.DeterminePositions.get_position_name_with_pose_id",
"operator.itemgetter",
"nets.ColorHandPose3DNetwork.ColorHandPose3DNetwork",
"tensorflow.GPUOptions",
"tensorflow.Graph",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"pose.utils.Fing... | [((654, 753), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Classify hand gestures from the set of images in folder"""'}), "(description=\n 'Classify hand gestures from the set of images in folder')\n", (677, 753), False, 'import argparse\n'), ((2049, 2075), 'os.path.abspath', 'os.path.abspath', (['data_path'], {}), '(data_path)\n', (2064, 2075), False, 'import os\n'), ((2090, 2111), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (2100, 2111), False, 'import os\n'), ((2498, 2536), 'pose.utils.FingerPoseEstimate.FingerPoseEstimate', 'FingerPoseEstimate', (['keypoint_coord3d_v'], {}), '(keypoint_coord3d_v)\n', (2516, 2536), False, 'from pose.utils.FingerPoseEstimate import FingerPoseEstimate\n'), ((2636, 2765), 'pose.DeterminePositions.determine_position', 'determine_position', (['fingerPoseEstimate.finger_curled', 'fingerPoseEstimate.finger_position', 'known_finger_poses', '(threshold * 10)'], {}), '(fingerPoseEstimate.finger_curled, fingerPoseEstimate.\n finger_position, known_finger_poses, threshold * 10)\n', (2654, 2765), False, 'from pose.DeterminePositions import create_known_finger_poses, determine_position, get_position_name_with_pose_id\n'), ((3171, 3181), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3179, 3181), True, 'import tensorflow as tf\n'), ((4292, 4364), 'numpy.array', 'np.array', (['[entry for sublist in keypoint_coord3d_v for entry in sublist]'], {}), '([entry for sublist in keypoint_coord3d_v for entry in sublist])\n', (4300, 4364), True, 'import numpy as np\n'), ((4382, 4419), 'numpy.expand_dims', 'np.expand_dims', (['flat_keypoint'], {'axis': '(0)'}), '(flat_keypoint, axis=0)\n', (4396, 4419), True, 'import numpy as np\n'), ((4480, 4541), 'pose.DeterminePositions.get_position_name_with_pose_id', 'get_position_name_with_pose_id', (['max_index', 'known_finger_poses'], {}), '(max_index, known_finger_poses)\n', (4510, 4541), False, 'from pose.DeterminePositions import create_known_finger_poses, determine_position, get_position_name_with_pose_id\n'), ((4770, 4797), 'pose.DeterminePositions.create_known_finger_poses', 'create_known_finger_poses', ([], {}), '()\n', (4795, 4797), False, 'from pose.DeterminePositions import create_known_finger_poses, determine_position, get_position_name_with_pose_id\n'), ((4828, 4878), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(1, 180, 240, 3)'}), '(tf.float32, shape=(1, 180, 240, 3))\n', (4842, 4878), True, 'import tensorflow as tf\n'), ((4897, 4922), 'tensorflow.constant', 'tf.constant', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (4908, 4922), True, 'import tensorflow as tf\n'), ((4975, 5018), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(True)'], {'shape': '()'}), '(True, shape=())\n', (5002, 5018), True, 'import tensorflow as tf\n'), ((5046, 5070), 'nets.ColorHandPose3DNetwork.ColorHandPose3DNetwork', 'ColorHandPose3DNetwork', ([], {}), '()\n', (5068, 5070), False, 'from nets.ColorHandPose3DNetwork import ColorHandPose3DNetwork\n'), ((5252, 5302), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(0.8)'}), '(per_process_gpu_memory_fraction=0.8)\n', (5265, 5302), True, 'import tensorflow as tf\n'), ((2127, 2161), 'os.path.join', 'os.path.join', (['data_path', 'data_file'], {}), '(data_path, data_file)\n', (2139, 2161), False, 'import os\n'), ((2337, 2365), 'os.path.abspath', 'os.path.abspath', (['output_path'], {}), '(output_path)\n', (2352, 2365), False, 'import os\n'), ((3262, 3275), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (3273, 3275), True, 'import tensorflow as tf\n'), ((4253, 4272), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (4264, 4272), False, 'import pickle\n'), ((4695, 4722), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (4709, 4722), False, 'import os\n'), ((4726, 4747), 'os.mkdir', 'os.mkdir', (['output_path'], {}), '(output_path)\n', (4734, 4747), False, 'import os\n'), ((6816, 6906), 'cv2.putText', 'cv2.putText', (['image_raw', 'score_label', '(10, 200)', 'font', '(1.0)', '(255, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), '(image_raw, score_label, (10, 200), font, 1.0, (255, 0, 0), 2,\n cv2.LINE_AA)\n', (6827, 6906), False, 'import cv2\n'), ((6918, 6944), 'os.path.basename', 'os.path.basename', (['img_name'], {}), '(img_name)\n', (6934, 6944), False, 'import os\n'), ((7072, 7111), 'matplotlib.image.imsave', 'mpimg.imsave', (['file_save_path', 'image_raw'], {}), '(file_save_path, image_raw)\n', (7084, 7111), True, 'import matplotlib.image as mpimg\n'), ((3283, 3312), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['pb_file', '"""rb"""'], {}), "(pb_file, 'rb')\n", (3297, 3312), True, 'import tensorflow as tf\n'), ((3407, 3449), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (3426, 3449), True, 'import tensorflow as tf\n'), ((3463, 3496), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'detection_graph'}), '(graph=detection_graph)\n', (3473, 3496), True, 'import tensorflow as tf\n'), ((3658, 3730), 'numpy.array', 'np.array', (['[entry for sublist in keypoint_coord3d_v for entry in sublist]'], {}), '([entry for sublist in keypoint_coord3d_v for entry in sublist])\n', (3666, 3730), True, 'import numpy as np\n'), ((3750, 3787), 'numpy.expand_dims', 'np.expand_dims', (['flat_keypoint'], {'axis': '(0)'}), '(flat_keypoint, axis=0)\n', (3764, 3787), True, 'import numpy as np\n'), ((3889, 3907), 'numpy.argmax', 'np.argmax', (['outputs'], {}), '(outputs)\n', (3898, 3907), True, 'import numpy as np\n'), ((5329, 5368), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (5343, 5368), True, 'import tensorflow as tf\n'), ((5905, 5937), 'numpy.squeeze', 'np.squeeze', (['keypoints_scoremap_v'], {}), '(keypoints_scoremap_v)\n', (5915, 5937), True, 'import numpy as np\n'), ((5962, 5992), 'numpy.squeeze', 'np.squeeze', (['keypoint_coord3d_v'], {}), '(keypoint_coord3d_v)\n', (5972, 5992), True, 'import numpy as np\n'), ((6099, 6150), 'utils.general.trafo_coords', 'trafo_coords', (['coord_hw_crop', 'center_v', 'scale_v', '(256)'], {}), '(coord_hw_crop, center_v, scale_v, 256)\n', (6111, 6150), False, 'from utils.general import detect_keypoints, trafo_coords, plot_hand, plot_hand_2d, plot_hand_3d\n'), ((6155, 6188), 'utils.general.plot_hand_2d', 'plot_hand_2d', (['coord_hw', 'image_raw'], {}), '(coord_hw, image_raw)\n', (6167, 6188), False, 'from utils.general import detect_keypoints, trafo_coords, plot_hand, plot_hand_2d, plot_hand_3d\n'), ((4033, 4096), 'pose.DeterminePositions.get_position_name_with_pose_id', 'get_position_name_with_pose_id', (['score_index', 'known_finger_poses'], {}), '(score_index, known_finger_poses)\n', (4063, 4096), False, 'from pose.DeterminePositions import create_known_finger_poses, determine_position, get_position_name_with_pose_id\n'), ((6051, 6083), 'numpy.squeeze', 'np.squeeze', (['keypoints_scoremap_v'], {}), '(keypoints_scoremap_v)\n', (6061, 6083), True, 'import numpy as np\n'), ((2898, 2920), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (2917, 2920), False, 'import operator\n')] |
from __future__ import division, print_function
# Turn off plotting imports for production
if False:
if __name__ == '__main__':
import matplotlib
matplotlib.use('Agg')
import os
import argparse
import sys
import numpy as np
from scipy.stats import sigmaclip
import fitsio
from astropy.table import Table, vstack
from astrometry.util.file import trymakedirs
from astrometry.util.ttime import Time
from astrometry.util.fits import fits_table, merge_tables
from astrometry.libkd.spherematch import match_radec
import legacypipe
from legacypipe.ps1cat import ps1cat, sdsscat
from legacypipe.gaiacat import GaiaCatalog
from legacypipe.survey import radec_at_mjd, get_git_version
from legacypipe.image import validate_version
from legacypipe.survey import LegacySurveyData
import logging
logger = logging.getLogger('legacyzpts.legacy_zeropoints')
def debug(*args):
from legacypipe.utils import log_debug
log_debug(logger, args)
CAMERAS=['decam','mosaic','90prime','megaprime', 'hsc']
MAGLIM=dict(
u=[16, 20],
g=[16, 20],
r=[16, 19.5],
i=[16, 19.5],
z=[16.5, 19],
Y=[16.5, 19],
N501=[16,20],
N673=[16,19.5],
)
def ptime(text,t0):
tnow=Time()
print('TIMING:%s ' % text,tnow-t0)
return tnow
def read_lines(fn):
fin=open(fn,'r')
lines=fin.readlines()
fin.close()
if len(lines) < 1: raise ValueError('lines not read properly from %s' % fn)
return np.array( list(np.char.strip(lines)) )
def astropy_to_astrometry_table(t):
T = fits_table()
for c in t.colnames:
T.set(c, t[c])
return T
def _ccds_table(camera='decam', overrides=None):
'''Initialize the CCDs table.
Description and Units at:
https://github.com/legacysurvey/legacyzpts/blob/master/DESCRIPTION_OF_OUTPUTS.md
'''
max_camera_length = max([len(c) for c in CAMERAS])
if max_camera_length > 9:
print('Warning! Increase camera length header card to S{}'.format(max_camera_length))
cols = [
('err_message', 'S30'),
('image_filename', 'S120'),
('image_hdu', 'i2'),
('camera', 'S9'),
('expnum', 'i8'),
('plver', 'S8'),
('procdate', 'S19'),
('plprocid', 'S7'),
('ccdname', 'S4'),
('ccdnum', 'i2'),
('expid', 'S17'),
('object', 'S35'),
('propid', 'S10'),
('filter', 'S4'),
('exptime', 'f4'),
('mjd_obs', 'f8'),
('airmass', 'f4'),
('fwhm', 'f4'),
('width', 'i2'),
('height', 'i2'),
('ra_bore', 'f8'),
('dec_bore', 'f8'),
('crpix1', 'f4'),
('crpix2', 'f4'),
('crval1', 'f8'),
('crval2', 'f8'),
('cd1_1', 'f4'),
('cd1_2', 'f4'),
('cd2_1', 'f4'),
('cd2_2', 'f4'),
('pixscale', 'f4'),
('zptavg', 'f4'),
('yshift', 'bool'),
# -- CCD-level quantities --
('ra', 'f8'),
('dec', 'f8'),
('skysb', 'f4'),
('skycounts', 'f4'),
('skyrms', 'f4'),
('sig1', 'f4'),
('nstars_photom', 'i2'),
('nstars_astrom', 'i2'),
('phoff', 'f4'),
('phrms', 'f4'),
('phrmsavg', 'f4'),
('zpt', 'f4'),
('raoff', 'f4'),
('decoff', 'f4'),
('rarms', 'f4'),
('decrms', 'f4'),
('rastddev', 'f4'),
('decstddev', 'f4'),
]
if overrides is not None:
ovr = []
for k,v in cols:
ovr.append((k, overrides.get(k, v)))
cols = ovr
ccds = Table(np.zeros(1, dtype=cols))
return ccds
def _stars_table(nstars=1):
'''Initialize the stars table.
Description and Units at:
https://github.com/legacysurvey/legacyzpts/blob/master/DESCRIPTION_OF_OUTPUTS.md
'''
cols = [('image_filename', 'S100'),('image_hdu', 'i2'),
('expid', 'S16'), ('filter', 'S1'),('nstars', 'i2'),
('x', 'f4'), ('y', 'f4'), ('expnum', 'i8'),
('plver', 'S8'), ('procdate', 'S19'), ('plprocid', 'S7'),
('gain', 'f4'),
('ra', 'f8'), ('dec', 'f8'),
('apmag', 'f4'),('apflux', 'f4'),('apskyflux', 'f4'),('apskyflux_perpix', 'f4'),
('radiff', 'f8'), ('decdiff', 'f8'),
('ps1_mag', 'f4'),
('gaia_g','f8'),('ps1_g','f8'),('ps1_r','f8'),('ps1_i','f8'),('ps1_z','f8'),
('exptime', 'f4')]
stars = Table(np.zeros(nstars, dtype=cols))
return stars
def cols_for_survey_table():
"""Return list of -survey.fits table colums
"""
return ['airmass', 'ccdskysb', 'plver', 'procdate', 'plprocid',
'ccdnastrom', 'ccdnphotom', 'ra', 'dec', 'ra_bore', 'dec_bore',
'image_filename', 'image_hdu', 'expnum', 'ccdname', 'object',
'filter', 'exptime', 'camera', 'width', 'height', 'propid',
'mjd_obs', 'fwhm', 'zpt', 'ccdzpt', 'ccdraoff', 'ccddecoff',
'ccdrarms', 'ccddecrms', 'ccdskycounts', 'phrms', 'ccdphrms',
'cd1_1', 'cd2_2', 'cd1_2', 'cd2_1', 'crval1', 'crval2', 'crpix1',
'crpix2', 'skyrms', 'sig1', 'yshift']
def prep_survey_table(T, camera=None, bad_expid=None):
assert(camera in CAMERAS)
need_keys = cols_for_survey_table()
# Rename
rename_keys= [('zpt','ccdzpt'),
('zptavg','zpt'),
('raoff','ccdraoff'),
('decoff','ccddecoff'),
('skycounts', 'ccdskycounts'),
('skysb', 'ccdskysb'),
('rarms', 'ccdrarms'),
('decrms', 'ccddecrms'),
('phrms', 'ccdphrms'),
('phrmsavg', 'phrms'),
('nstars_astrom','ccdnastrom'),
('nstars_photom','ccdnphotom')]
for old,new in rename_keys:
T.rename(old,new)
# Delete
del_keys= list( set(T.get_columns()).difference(set(need_keys)) )
for key in del_keys:
T.delete_column(key)
# precision
T.width = T.width.astype(np.int16)
T.height = T.height.astype(np.int16)
T.cd1_1 = T.cd1_1.astype(np.float32)
T.cd1_2 = T.cd1_2.astype(np.float32)
T.cd2_1 = T.cd2_1.astype(np.float32)
T.cd2_2 = T.cd2_2.astype(np.float32)
# Set placeholder that masks everything until update_ccd_cuts is
# run.
from legacyzpts import psfzpt_cuts
T.ccd_cuts = np.zeros(len(T), np.int16) + psfzpt_cuts.CCD_CUT_BITS['err_legacyzpts']
return T
def create_annotated_table(T, ann_fn, camera, survey, mp):
from legacyzpts.annotate_ccds import annotate, init_annotations
T = survey.cleanup_ccds_table(T)
init_annotations(T)
I, = np.nonzero(T.ccdzpt)
if len(I):
annotate(T, survey, camera, mp=mp, normalizePsf=True, carryOn=True)
writeto_via_temp(ann_fn, T)
print('Wrote %s' % ann_fn)
def getrms(x):
return np.sqrt(np.mean(x**2))
def measure_image(img_fn, mp, image_dir='images', run_calibs_only=False,
just_imobj=False,
survey=None, psfex=True, camera=None, **measureargs):
'''Wrapper on the camera-specific classes to measure the CCD-level data on all
the FITS extensions for a given set of images.
'''
t0 = Time()
quiet = measureargs.get('quiet', False)
image_hdu = measureargs.get('image_hdu', None)
img = survey.get_image_object(None, camera=camera,
image_fn=img_fn, image_hdu=image_hdu)
if just_imobj:
return img
print('Got image object', img)
# Confirm camera field.
assert(img.camera == camera)
primhdr = img.read_image_primary_header()
if (not img.calibration_good(primhdr)) or (img.exptime == 0):
# FIXME
# - all-zero weight map
if run_calibs_only:
return
debug('%s: Zero exposure time or low-level calibration flagged as bad; skipping image.'
% str(img))
ccds = _ccds_table(camera)
ccds['image_filename'] = img_fn
ccds['err_message'] = 'Failed CP calib, or Exptime=0'
ccds['zpt'] = 0.
return ccds, None, img
if measureargs['choose_ccd']:
extlist = [measureargs['choose_ccd']]
else:
extlist = img.get_extension_list(debug=measureargs['debug'])
print('Extensions to process:', extlist)
all_ccds = []
all_photom = []
splinesky = measureargs['splinesky']
survey_blob_mask = None
blobdir = measureargs.pop('blob_mask_dir', None)
if blobdir is not None:
survey_blob_mask = LegacySurveyData(survey_dir=blobdir)
survey_zeropoints = None
zptdir = measureargs.pop('zeropoints_dir', None)
if zptdir is not None:
survey_zeropoints = LegacySurveyData(survey_dir=zptdir)
plots = measureargs.get('plots', False)
# Validate the splinesky and psfex merged files, and (re)make them if
# they're missing.
if splinesky:
if validate_version(img.get_splinesky_merged_filename(),
'table', img.expnum, img.plver, img.plprocid, quiet=quiet):
splinesky = False
if psfex:
if validate_version(img.get_psfex_merged_filename(),
'table', img.expnum, img.plver, img.plprocid, quiet=quiet):
psfex = False
if splinesky or psfex:
imgs = mp.map(run_one_calib, [(img_fn, camera, survey, ext, psfex, splinesky,
plots, survey_blob_mask, survey_zeropoints)
for ext in extlist])
from legacyzpts.merge_calibs import merge_splinesky, merge_psfex
class FakeOpts(object):
pass
opts = FakeOpts()
# Allow some CCDs to be missing, e.g., if the weight map is all zero.
opts.all_found = False
if splinesky:
skyoutfn = img.get_splinesky_merged_filename()
ccds = None
err_splinesky = merge_splinesky(survey, img.expnum, ccds, skyoutfn, opts, imgs=imgs)
if err_splinesky != 1:
print('Problem writing {}'.format(skyoutfn))
if psfex:
psfoutfn = img.get_psfex_merged_filename()
ccds = None
err_psfex = merge_psfex(survey, img.expnum, ccds, psfoutfn, opts, imgs=imgs)
if err_psfex != 1:
print('Problem writing {}'.format(psfoutfn))
# Now, if they're still missing it's because the entire exposure is borked
# (WCS failed, weight maps are all zero, etc.), so exit gracefully.
if splinesky:
fn = img.get_splinesky_merged_filename()
if not os.path.exists(fn):
print('Merged splinesky file not found {}'.format(fn))
return []
if not validate_version(img.get_splinesky_merged_filename(),
'table', img.expnum, img.plver, img.plprocid):
raise RuntimeError('Merged splinesky file did not validate!')
# At this point the merged file exists and has been validated, so remove
# the individual splinesky files.
for img in imgs:
fn = img.get_splinesky_unmerged_filename()
if os.path.isfile(fn):
os.remove(fn)
if psfex:
fn = img.get_psfex_merged_filename()
if not os.path.exists(fn):
print('Merged psfex file not found {}'.format(fn))
return []
if not validate_version(img.get_psfex_merged_filename(),
'table', img.expnum, img.plver, img.plprocid):
raise RuntimeError('Merged psfex file did not validate!')
# At this point the merged file exists and has been validated, so remove
# the individual PSFEx and SE files.
for img in imgs:
fn = img.get_psfex_unmerged_filename()
if os.path.isfile(fn):
os.remove(fn)
sefn = img.sefn
if os.path.isfile(sefn):
os.remove(sefn)
# FIXME -- remove temporary individual files directory
if run_calibs_only:
return
rtns = mp.map(run_one_ext, [(img, ext, survey, splinesky,
measureargs['sdss_photom'])
for ext in extlist])
for ccd,photom in rtns:
if ccd is not None:
all_ccds.append(ccd)
if photom is not None:
all_photom.append(photom)
# Compute the median zeropoint across all the CCDs.
all_ccds = vstack(all_ccds)
if len(all_photom):
all_photom = merge_tables(all_photom)
else:
all_photom = None
zpts = all_ccds['zpt']
zptgood = np.isfinite(zpts)
if np.sum(zptgood) > 0:
all_ccds['zptavg'] = np.median(zpts[zptgood])
phrms = all_ccds['phrms']
phrmsgood = np.isfinite(phrms) & (all_ccds['phrms'] > 0)
if np.sum(phrmsgood) > 0:
all_ccds['phrmsavg'] = np.median(phrms[phrmsgood])
t0 = ptime('measure-image-%s' % img_fn,t0)
return all_ccds, all_photom, img
def run_one_calib(X):
(img_fn, camera, survey, ext, psfex, splinesky, _, survey_blob_mask,
survey_zeropoints) = X
img = survey.get_image_object(None, camera=camera,
image_fn=img_fn, image_hdu=ext)
do_psf = False
do_sky = False
if psfex:
do_psf = True
try:
psf = img.read_psf_model(0., 0., pixPsf=True)
if psf is not None:
do_psf = False
except:
pass
if splinesky:
do_sky = True
try:
sky = img.read_sky_model()
if sky is not None:
do_sky = False
except:
pass
if (not do_psf) and (not do_sky):
# Nothing to do!
return img
# Only do stellar halo subtraction if we have a zeropoint (via --zeropoint-dir)
# Note that this is the only place we use survey_zeropoints; it does not get
# passed to image.run_calibs().
have_zpt = False
if survey_zeropoints is not None:
ccds = survey_zeropoints.find_ccds(
expnum=img.expnum, ccdname=img.ccdname, camera=img.camera)
if len(ccds) != 1:
print('WARNING, did not find a zeropoint for', img_fn,
'by camera', camera, 'expnum', img.expnum,
'ext', ext)
else:
img.ccdzpt = ccds[0].ccdzpt
img.ccdraoff = ccds[0].ccdraoff
img.ccddecoff = ccds[0].ccddecoff
if img.ccdzpt == 0.:
print('WARNING, found zeropoint=0 for', img_fn,
'by camera', camera, 'expnum', img.expnum,
'ext', ext)
else:
have_zpt = True
git_version = get_git_version(dirnm=os.path.dirname(legacypipe.__file__))
ps = None
img.run_calibs(psfex=do_psf, sky=do_sky, splinesky=True,
git_version=git_version, survey=survey, ps=ps,
survey_blob_mask=survey_blob_mask,
halos=have_zpt,
subtract_largegalaxies=have_zpt)
return img
def run_one_ext(X):
img, ext, survey, splinesky, sdss_photom = X
img = survey.get_image_object(None, camera=img.camera,
image_fn=img.image_filename, image_hdu=ext)
return run_zeropoints(img, splinesky=splinesky, sdss_photom=sdss_photom)
class outputFns(object):
def __init__(self, imgfn, outdir, camera, image_dir='images', debug=False):
"""
Assigns filename, makes needed dirs.
Args:
imgfn: abs path to image, should be a ooi or oki file
outdir: root dir for outptus
debug: 4 ccds only if true
Attributes:
imgfn: image that will be read
zptfn: zeropoints file
starfn: stars file
Example:
outdir/decam/DECam_CP/CP20151226/img_fn.fits.fz
outdir/decam/DECam_CP/CP20151226/img_fn-zpt%s.fits
outdir/decam/DECam_CP/CP20151226/img_fn-star%s.fits
"""
self.imgfn = imgfn
self.image_dir = image_dir
"""
# Keep the last directory component
dirname = os.path.basename(os.path.dirname(self.imgfn))
basedir = os.path.join(outdir, camera, dirname)
"""
# Keep same path structure as the images
dirname = os.path.dirname(self.imgfn)
basedir = os.path.join(outdir, dirname)
trymakedirs(basedir)
basename = os.path.basename(self.imgfn)
# zpt,star fns
base = basename
if base.endswith('.fz'):
base = base[:-len('.fz')]
if base.endswith('.fits'):
base = base[:-len('.fits')]
if debug:
base += '-debug'
self.photomfn = os.path.join(basedir, base + '-photom.fits')
self.annfn = os.path.join(basedir, base + '-annotated.fits')
def writeto_via_temp(outfn, obj, func_write=False, **kwargs):
tempfn = os.path.join(os.path.dirname(outfn), 'tmp-' + os.path.basename(outfn))
if func_write:
obj.write(tempfn, **kwargs)
else:
obj.writeto(tempfn, **kwargs)
os.rename(tempfn, outfn)
def runit(imgfn, photomfn, annfn, mp, bad_expid=None,
survey=None, run_calibs_only=False, **measureargs):
'''Generate a legacypipe-compatible (survey) CCDs file for a given image.
'''
t0 = Time()
results = measure_image(imgfn, mp, survey=survey, run_calibs_only=run_calibs_only,
**measureargs)
if run_calibs_only:
return
if len(results) == 0:
print('All CCDs bad, quitting.')
return
ccds, photom, img = results
t0 = ptime('measure_image',t0)
primhdr = img.read_image_primary_header()
hdr = fitsio.FITSHDR()
for key in ['AIRMASS', 'OBJECT', 'TELESCOP', 'INSTRUME', 'EXPTIME', 'DATE-OBS',
'MJD-OBS', 'PROGRAM', 'OBSERVER', 'PROPID', 'FILTER', 'HA', 'ZD',
'AZ', 'DOMEAZ', 'HUMIDITY', 'PLVER', ]:
if not key in primhdr:
continue
v = primhdr[key]
if isinstance(v, str):
v = v.strip()
hdr.add_record(dict(name=key, value=v,
comment=primhdr.get_comment(key)))
hdr.add_record(dict(name='EXPNUM', value=img.expnum,
comment='Exposure number'))
hdr.add_record(dict(name='PROCDATE', value=img.procdate,
comment='CP processing date'))
hdr.add_record(dict(name='PLPROCID', value=img.plprocid,
comment='CP processing batch'))
hdr.add_record(dict(name='RA_BORE', value=ccds['ra_bore'][0], comment='Boresight RA (deg)'))
hdr.add_record(dict(name='DEC_BORE', value=ccds['dec_bore'][0], comment='Boresight Dec (deg)'))
zptgood = np.isfinite(ccds['zpt'])
if np.sum(zptgood) > 0:
medzpt = np.median(ccds['zpt'][zptgood])
else:
medzpt = 0.0
hdr.add_record(dict(name='CCD_ZPT', value=medzpt,
comment='Exposure median zeropoint'))
goodfwhm = (ccds['fwhm'] > 0)
if np.sum(goodfwhm) > 0:
fwhm = np.median(ccds['fwhm'][goodfwhm])
else:
fwhm = 0.0
pixscale = ccds['pixscale'][0]
hdr.add_record(dict(name='FWHM', value=fwhm, comment='Exposure median FWHM (CP)'))
hdr.add_record(dict(name='SEEING', value=fwhm * pixscale,
comment='Exposure median seeing (FWHM*pixscale)'))
base = os.path.basename(imgfn)
dirnm = os.path.dirname(imgfn)
firstdir = os.path.basename(dirnm)
hdr.add_record(dict(name='FILENAME', value=os.path.join(firstdir, base)))
if photom is not None:
writeto_via_temp(photomfn, photom, overwrite=True, header=hdr)
accds = astropy_to_astrometry_table(ccds)
# survey table
T = prep_survey_table(accds, camera=measureargs['camera'], bad_expid=bad_expid)
# survey --> annotated
create_annotated_table(T, annfn, measureargs['camera'], survey, mp)
t0 = ptime('write-results-to-fits',t0)
def get_parser():
'''return parser object, tells it what options to look for
options can come from a list of strings or command line'''
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\
description='Generate a legacypipe-compatible (survey) CCDs file \
from a set of reduced imaging.')
parser.add_argument('--camera',choices=CAMERAS, action='store',required=True)
parser.add_argument('--image',action='store',default=None,help='relative path to image starting from decam,bok,mosaicz dir',required=False)
parser.add_argument('--image_list',action='store',default=None,help='text file listing multiples images in same was as --image',required=False)
parser.add_argument('--survey-dir', type=str, default=None,
help='Override the $LEGACY_SURVEY_DIR environment variable')
parser.add_argument('--outdir', type=str, default=None, help='Where to write photom and annotated files; default [survey_dir]/zpt')
parser.add_argument('--sdss-photom', default=False, action='store_true',
help='Use SDSS rather than PS-1 for photometric cal.')
parser.add_argument('--debug', action='store_true', default=False, help='Write additional files and plots for debugging')
parser.add_argument('--choose_ccd', action='store', default=None, help='forced to use only the specified ccd')
parser.add_argument('--prefix', type=str, default='', help='Prefix to prepend to the output files.')
parser.add_argument('--verboseplots', action='store_true', default=False, help='use to plot FWHM Moffat PSF fits to the 20 brightest stars')
parser.add_argument('--calibrate', action='store_true',
help='Use this option when deriving the photometric transformation equations.')
parser.add_argument('--plots', action='store_true', help='Calib plots?')
parser.add_argument('--nproc', type=int,action='store',default=1,
help='set to > 1 if using legacy-zeropoints-mpiwrapper.py')
parser.add_argument('--run-calibs-only', default=False, action='store_true',
help='Only ensure calib files exist, do not compute zeropoints.')
parser.add_argument('--no-splinesky', dest='splinesky', default=True, action='store_false',
help='Do not use spline sky model for sky subtraction?')
parser.add_argument('--blob-mask-dir', type=str, default=None,
help='The base directory to search for blob masks during sky model construction')
parser.add_argument('--zeropoints-dir', type=str, default=None,
help='The base directory to search for survey-ccds files for subtracting star halos before doing sky calibration.')
parser.add_argument('--calibdir', default=None,
help='if None will use LEGACY_SURVEY_DIR/calib, e.g. /global/cscratch1/sd/desiproc/dr5-new/calib')
parser.add_argument('--threads', default=None, type=int,
help='Multiprocessing threads (parallel by HDU)')
parser.add_argument('--quiet', default=False, action='store_true', help='quiet down')
parser.add_argument('--overhead', type=str, default=None, help='Print python startup time since the given date.')
parser.add_argument('--verbose', '-v', action='store_true', default=False, help='More logging')
return parser
def main(args=None):
parser= get_parser()
args = parser.parse_args(args=args)
if args.overhead is not None:
t0 = args.overhead
if t0.endswith('.N'):
t0 = t0[:-2]
t0 = float(t0)
import time
print('Startup time:', time.time()-t0, 'seconds')
if args.image_list:
image_list = read_lines(args.image_list)
elif args.image:
image_list = [args.image]
''' Produce zeropoints for all CP images in image_list
image_list -- iterable list of image filenames
args -- parsed argparser objection from get_parser()
'''
from pkg_resources import resource_filename
assert(not args is None)
assert(not image_list is None)
t0 = tbegin = Time()
# Build a dictionary with the optional inputs.
measureargs = vars(args)
measureargs.pop('image_list')
measureargs.pop('image')
nimage = len(image_list)
quiet = measureargs.get('quiet', False)
from astrometry.util.multiproc import multiproc
threads = measureargs.pop('threads')
mp = multiproc(nthreads=(threads or 1))
if args.verbose:
lvl = logging.DEBUG
else:
lvl = logging.INFO
logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)
camera = measureargs['camera']
survey = LegacySurveyData(survey_dir=measureargs['survey_dir'])
if measureargs.get('calibdir'):
survey.calib_dir = measureargs['calibdir']
measureargs.update(survey=survey)
outdir = measureargs.pop('outdir')
if outdir is None:
outdir = os.path.join(survey.survey_dir, 'zpt')
if camera in ['mosaic', 'decam', '90prime']:
from legacyzpts.psfzpt_cuts import read_bad_expid
fn = resource_filename('legacyzpts', 'data/{}-bad_expid.txt'.format(camera))
if os.path.isfile(fn):
print('Reading {}'.format(fn))
measureargs.update(bad_expid=read_bad_expid(fn))
else:
print('No bad exposure file found for camera {}'.format(camera))
for ii, imgfn in enumerate(image_list):
print('Working on image {}/{}: {}'.format(ii+1, nimage, imgfn))
# Check if the outputs are done and have the correct data model.
F = outputFns(imgfn, outdir, camera, image_dir=survey.get_image_dir(),
debug=measureargs['debug'])
measure = measure_image(F.imgfn, None, just_imobj=True, image_hdu=None,
**measureargs)
psffn = measure.get_psfex_merged_filename()
skyfn = measure.get_splinesky_merged_filename()
ann_ok, psf_ok, sky_ok = [validate_version(
fn, 'table', measure.expnum, measure.plver,
measure.plprocid, quiet=quiet)
for fn in [F.annfn, psffn, skyfn]]
if measureargs['run_calibs_only'] and psf_ok and sky_ok:
print('Already finished {}'.format(psffn))
print('Already finished {}'.format(skyfn))
continue
phot_ok = validate_version(F.photomfn, 'header', measure.expnum,
measure.plver, measure.plprocid,
ext=1, quiet=quiet)
if ann_ok and phot_ok and psf_ok and sky_ok:
print('Already finished: {}'.format(F.annfn))
continue
# Create the file
t0 = ptime('before-run',t0)
runit(F.imgfn, F.photomfn, F.annfn, mp, **measureargs)
t0 = ptime('after-run',t0)
tnow = Time()
print("TIMING:total %s" % (tnow-tbegin,))
def estimate_sky_from_pixels(img):
nsigma = 3.
clip_vals,_,_ = sigmaclip(img, low=nsigma, high=nsigma)
skymed= np.median(clip_vals)
skystd= np.std(clip_vals)
skyimg= np.zeros(img.shape) + skymed
return skyimg, skymed, skystd
def run_zeropoints(imobj, splinesky=False, sdss_photom=False):
"""Computes photometric and astrometric zeropoints for one CCD.
Args:
Returns:
ccds, stars_photom, stars_astrom
"""
#
t0= Time()
t0= ptime('Measuring CCD=%s from image=%s' % (imobj.ccdname, imobj.image_filename),t0)
# Initialize CCDs (annotated) table data structure.
ccds = _ccds_table(imobj.camera, overrides=imobj.override_ccd_table_types())
# init_ccd():
namemap = { 'object': 'obj',
'filter': 'band',
'image_hdu': 'hdu',
'mjd_obs': 'mjdobs',
}
for key in ['image_filename', 'image_hdu', 'camera', 'expnum', 'plver', 'procdate',
'plprocid', 'ccdname', 'propid', 'exptime', 'mjd_obs',
'pixscale', 'width', 'height', 'fwhm', 'filter']:
val = getattr(imobj, namemap.get(key, key))
print('Setting', key, '=', val)
ccds[key] = val
primhdr = imobj.read_image_primary_header()
hdr = imobj.read_image_header(ext=imobj.hdu)
ra_bore, dec_bore = imobj.get_radec_bore(primhdr)
ccds['ra_bore'],ccds['dec_bore'] = ra_bore, dec_bore
airmass = imobj.get_airmass(primhdr, hdr, ra_bore, dec_bore)
ccds['airmass'] = airmass
ccds['gain'] = imobj.get_gain(primhdr, hdr)
ccds['object'] = primhdr.get('OBJECT')
optional = ['avsky']
for ccd_col in ['avsky', 'crpix1', 'crpix2', 'crval1', 'crval2',
'cd1_1','cd1_2', 'cd2_1', 'cd2_2']:
if ccd_col.upper() in hdr:
ccds[ccd_col] = hdr[ccd_col]
elif ccd_col in optional:
ccds[ccd_col] = np.nan
else:
raise KeyError('Could not find %s key in header:' % ccd_col)
wcs = imobj.get_wcs(hdr=hdr)
H = imobj.height
W = imobj.width
ccdra, ccddec = wcs.pixelxy2radec((W+1) / 2.0, (H+1) / 2.0)
ccds['ra'] = ccdra
ccds['dec'] = ccddec
t0= ptime('header-info',t0)
# Quick check for PsfEx file
psf = imobj.read_psf_model(0., 0., pixPsf=True)
if psf.psfex.sampling == 0.:
print('PsfEx model has SAMPLING=0')
nacc = psf.header.get('ACCEPTED')
print('PsfEx model number of stars accepted:', nacc)
return imobj.return_on_error(err_message='Bad PSF model', ccds=ccds)
dq,dqhdr = imobj.read_dq(header=True)
invvar = imobj.read_invvar(dq=dq)
img = imobj.read_image()
imobj.fix_saturation(img, dq, invvar, primhdr, hdr, None)
if dq is not None:
dq = imobj.remap_dq(dq, dqhdr)
# Compute sig1 before rescaling... well, not quite
mediv = np.median(invvar[(invvar > 0) * (dq == 0)])
mediv = imobj.scale_weight(mediv)
imobj.sig1 = (1. / np.sqrt(mediv)) / imobj.exptime
ccds['sig1'] = imobj.sig1
invvar = imobj.remap_invvar(invvar, primhdr, img, dq)
invvar = imobj.scale_weight(invvar)
img = imobj.scale_image(img)
t0= ptime('read image',t0)
# Measure the sky brightness and (sky) noise level.
zp0 = imobj.nominal_zeropoint(imobj.band)
# Bunch of sky estimates
#print('Computing the sky background.')
sky_img, skymed, skyrms = estimate_sky_from_pixels(img)
print('sky from median of image = %.2f' % skymed)
skybr = zp0 - 2.5*np.log10(skymed / imobj.pixscale / imobj.pixscale / imobj.exptime)
print('Sky brightness: {:.3f} mag/arcsec^2 (assuming nominal zeropoint)'.format(skybr))
ccds['skyrms'] = skyrms / imobj.exptime # e/sec
ccds['skycounts'] = skymed / imobj.exptime # [electron/pix]
ccds['skysb'] = skybr # [mag/arcsec^2]
t0= ptime('measure-sky',t0)
# Load PS1 & Gaia catalogues
phot = None
if sdss_photom:
try:
phot = sdsscat(ccdwcs=wcs).get_stars(magrange=None)
except OSError as e:
print('No SDSS stars found for this image -- outside the SDSS footprint?', e)
else:
try:
phot = ps1cat(ccdwcs=wcs).get_stars(magrange=None)
except OSError as e:
print('No PS1 stars found for this image -- outside the PS1 footprint, or in the Galactic plane?', e)
if phot is not None and len(phot) == 0:
phot = None
if sdss_photom:
name = 'sdss'
else:
name = 'ps1'
if phot is not None:
phot.cut(imobj.get_photometric_calibrator_cuts(name, phot))
if len(phot) == 0:
phot = None
else:
# Convert to Legacy Survey mags
phot.legacy_survey_mag = imobj.photometric_calibrator_to_observed(name, phot)
print(len(phot), 'photometric calibrator stars')
gaia = GaiaCatalog().get_catalog_in_wcs(wcs)
assert(gaia is not None)
assert(len(gaia) > 0)
gaia = GaiaCatalog.catalog_nantozero(gaia)
assert(gaia is not None)
print(len(gaia), 'Gaia stars')
maxgaia = 10000
if len(gaia) > maxgaia:
I = np.argsort(gaia.phot_g_mean_mag)
print('Min mag:', gaia.phot_g_mean_mag[I[0]])
gaia.cut(I[:maxgaia])
print('Cut to', len(gaia), 'Gaia stars')
t0= Time()
# Now put Gaia stars into the image and re-fit their centroids
# and fluxes using the tractor with the PsfEx PSF model.
# assume that the CP WCS has gotten us to within a few pixels
# of the right answer. Find Gaia stars, initialize Tractor
# sources there, optimize them and see how much they want to
# move.
if splinesky:
sky = imobj.read_sky_model()
print('Instantiating and subtracting sky model')
skymod = np.zeros_like(img)
sky.addTo(skymod)
# Apply the same transformation that was applied to the image...
skymod = imobj.scale_image(skymod)
fit_img = img - skymod
else:
fit_img = img - sky_img
# after sky subtraction, apply optional per-amp relative zeropoints.
imobj.apply_amp_correction(fit_img, invvar, 0, 0)
with np.errstate(invalid='ignore'):
# sqrt(0.) can trigger complaints;
# https://github.com/numpy/numpy/issues/11448
ierr = np.sqrt(invvar)
# Move Gaia stars to the epoch of this image.
gaia.rename('ra', 'ra_gaia')
gaia.rename('dec', 'dec_gaia')
gaia.rename('source_id', 'gaia_sourceid')
ra,dec = radec_at_mjd(gaia.ra_gaia, gaia.dec_gaia,
gaia.ref_epoch.astype(float),
gaia.pmra, gaia.pmdec, gaia.parallax, imobj.mjdobs)
gaia.ra_now = ra
gaia.dec_now = dec
for b in ['g', 'bp', 'rp']:
sn = gaia.get('phot_%s_mean_flux_over_error' % b)
magerr = np.abs(2.5/np.log(10.) * 1./np.fmax(1., sn))
gaia.set('phot_%s_mean_mag_error' % b, magerr)
gaia.flux0 = np.ones(len(gaia), np.float32)
# we set 'astrom' and omit 'photom'; it will get filled in with zeros.
gaia.astrom = np.ones(len(gaia), bool)
refs = [gaia]
if phot is not None:
# Photometry
# Initial flux estimate, from nominal zeropoint
phot.flux0 = (10.**((zp0 - phot.legacy_survey_mag) / 2.5) * imobj.exptime
).astype(np.float32)
if sdss_photom:
phot.ra_sdss = phot.ra.copy()
phot.dec_sdss = phot.dec.copy()
phot.ra_phot = phot.ra_sdss
phot.dec_phot = phot.dec_sdss
bands = 'ugriz'
for band in bands:
i = sdsscat.sdssband.get(band, None)
if i is None:
print('No band', band, 'in SDSS catalog')
continue
phot.set('sdss_'+band.lower(), phot.psfmag[:,i].astype(np.float32))
phot_cols = [
('ra_sdss', np.double),
('dec_sdss', np.double),
('sdss_u', np.float32),
('sdss_g', np.float32),
('sdss_r', np.float32),
('sdss_i', np.float32),
('sdss_z', np.float32),
]
else:
# we don't have/use proper motions for PS1 stars
phot.rename('ra_ok', 'ra_now')
phot.rename('dec_ok', 'dec_now')
phot.ra_ps1 = phot.ra_now.copy()
phot.dec_ps1 = phot.dec_now.copy()
phot.ra_phot = phot.ra_ps1
phot.dec_phot = phot.dec_ps1
phot.ps1_objid = phot.obj_id
bands = 'grizY'
for band in bands:
i = ps1cat.ps1band.get(band, None)
if i is None:
print('No band', band, 'in PS1 catalog')
continue
phot.set('ps1_'+band.lower(), phot.median[:,i].astype(np.float32))
phot_cols = [
('ps1_objid', np.int64),
('ra_ps1', np.double),
('dec_ps1', np.double),
('ps1_g', np.float32),
('ps1_r', np.float32),
('ps1_i', np.float32),
('ps1_z', np.float32),
('ps1_y', np.float32),
]
# we set 'photom' and omit 'astrom'; it will get filled in with zeros.
phot.photom = np.ones (len(phot), bool)
# Match photometric stars to Gaia stars within 1".
I,J,_ = match_radec(gaia.ra_gaia, gaia.dec_gaia,
phot.ra_phot, phot.dec_phot, 1./3600.,
nearest=True)
print(len(I), 'of', len(gaia), 'Gaia and', len(phot), 'photometric cal stars matched')
# Merged = photocal stars + unmatched Gaia
if len(I):
# Merge columns for the matched stars
for c in gaia.get_columns():
G = gaia.get(c)
# If column exists in both (eg, ra_now, dec_now), override
# the PHOT value with the Gaia value; except for "photom".
if c in phot.get_columns():
X = phot.get(c)
else:
X = np.zeros(len(phot), G.dtype)
X[J] = G[I]
phot.set(c, X)
# unmatched Gaia stars
unmatched = np.ones(len(gaia), bool)
unmatched[I] = False
gaia.cut(unmatched)
del unmatched
refs.append(phot)
if len(refs) == 1:
refs = refs[0]
else:
refs = merge_tables(refs, columns='fillzero')
cols = ([('ra_gaia', np.double),
('dec_gaia', np.double),
('gaia_sourceid', np.int64),
('phot_g_mean_mag', np.float32),
('phot_g_mean_mag_error', np.float32),
('phot_bp_mean_mag', np.float32),
('phot_bp_mean_mag_error', np.float32),
('phot_rp_mean_mag', np.float32),
('phot_rp_mean_mag_error', np.float32),
('ra_phot', np.double),
('dec_phot', np.double),]
+ phot_cols + [
('ra_now', np.double),
('dec_now', np.double),
('flux0', np.float32),
('legacy_survey_mag', np.float32),
('psfmag', np.float32),
('astrom', bool),
('photom', bool),
])
refcols = refs.get_columns()
for c,dt in cols:
if not c in refcols:
refs.set(c, np.zeros(len(refs), dt))
refcols = refs.get_columns()
wantcols = dict(cols)
for c in refcols:
if not c in wantcols:
refs.delete_column(c)
continue
# Run tractor fitting on the ref stars, using the PsfEx model.
phot = tractor_fit_sources(imobj, wcs, refs.ra_now, refs.dec_now, refs.flux0,
fit_img, ierr, psf)
print('Got photometry results for', len(phot), 'reference stars')
if len(phot) == 0:
return imobj.return_on_error('No photometry available',ccds=ccds)
# Cut to ref stars that were photometered
refs.cut(phot.iref)
phot.delete_column('iref')
refs.delete_column('flux0')
with np.errstate(divide='ignore'):
phot.flux_sn = (phot.flux / phot.dflux)
phot.flux_sn[phot.dflux == 0] = 0.
phot.raoff = (refs.ra_now - phot.ra_fit ) * 3600. * np.cos(np.deg2rad(refs.dec_now))
phot.decoff = (refs.dec_now - phot.dec_fit) * 3600.
dra = phot.raoff [refs.astrom]
ddec = phot.decoff[refs.astrom]
raoff = np.median(dra)
decoff = np.median(ddec)
rastd = np.std(dra)
decstd = np.std(ddec)
ra_clip, _, _ = sigmaclip(dra, low=3., high=3.)
rarms = getrms(ra_clip)
dec_clip, _, _ = sigmaclip(ddec, low=3., high=3.)
decrms = getrms(dec_clip)
# For astrom, since we have Gaia everywhere, count the number
# that pass the sigma-clip, ie, the number of stars that
# corresponds to the reported offset and scatter (in RA).
nastrom = len(ra_clip)
print('RA, Dec offsets (arcsec): %.4f, %.4f' % (raoff, decoff))
print('RA, Dec stddev (arcsec): %.4f, %.4f' % (rastd, decstd))
print('RA, Dec RMS (arcsec): %.4f, %.4f' % (rarms, decrms))
ok, = np.nonzero(phot.flux > 0)
phot.instpsfmag = np.zeros(len(phot), np.float32)
phot.instpsfmag[ok] = -2.5*np.log10(phot.flux[ok] / imobj.exptime)
# Uncertainty on psfmag
phot.dpsfmag = np.zeros(len(phot), np.float32)
phot.dpsfmag[ok] = np.abs((-2.5 / np.log(10.)) * phot.dflux[ok] / phot.flux[ok])
H,W = dq.shape
phot.bitmask = dq[np.clip(phot.y1, 0, H-1).astype(int),
np.clip(phot.x1, 0, W-1).astype(int)]
phot.psfmag = np.zeros(len(phot), np.float32)
print('Flux S/N min/median/max: %.1f / %.1f / %.1f' %
(phot.flux_sn.min(), np.median(phot.flux_sn), phot.flux_sn.max()))
# Note that this is independent of whether we have reference stars
# (eg, will work where we don't have PS1)
nphotom = np.sum(phot.flux_sn > 5.)
dmag = refs.legacy_survey_mag - phot.instpsfmag
maglo, maghi = MAGLIM[imobj.band]
dmag = dmag[refs.photom &
(refs.legacy_survey_mag > maglo) &
(refs.legacy_survey_mag < maghi) &
np.isfinite(dmag)]
if len(dmag):
print('Zeropoint: using', len(dmag), 'good stars')
dmag, _, _ = sigmaclip(dmag, low=2.5, high=2.5)
print('Zeropoint: using', len(dmag), 'stars after sigma-clipping')
zptstd = np.std(dmag)
zptmed = np.median(dmag)
dzpt = zptmed - zp0
kext = imobj.extinction(imobj.band)
transp = 10.**(-0.4 * (-dzpt - kext * (airmass - 1.0)))
print('Number of stars used for zeropoint median %d' % nphotom)
print('Zeropoint %.4f' % zptmed)
print('Offset from nominal: %.4f' % dzpt)
print('Scatter: %.4f' % zptstd)
print('Transparency %.4f' % transp)
ok = (phot.instpsfmag != 0)
phot.psfmag[ok] = phot.instpsfmag[ok] + zptmed
from tractor.brightness import NanoMaggies
zpscale = NanoMaggies.zeropointToScale(zptmed)
ccds['sig1'] /= zpscale
else:
dzpt = 0.
zptmed = 0.
zptstd = 0.
transp = 0.
for c in ['x0','y0','x1','y1','flux','raoff','decoff', 'psfmag',
'dflux','dx','dy']:
phot.set(c, phot.get(c).astype(np.float32))
phot.rename('x0', 'x_ref')
phot.rename('y0', 'y_ref')
phot.rename('x1', 'x_fit')
phot.rename('y1', 'y_fit')
phot.add_columns_from(refs)
# Save CCD-level information in the per-star table.
phot.ccd_raoff = np.zeros(len(phot), np.float32) + raoff
phot.ccd_decoff = np.zeros(len(phot), np.float32) + decoff
phot.ccd_phoff = np.zeros(len(phot), np.float32) + dzpt
phot.ccd_zpt = np.zeros(len(phot), np.float32) + zptmed
phot.expnum = np.zeros(len(phot), np.int64)
# Can't just add it as in previous lines, because of weird int64 type issues in
# fitsio (see https://github.com/legacysurvey/legacypipe/issues/478)
phot.expnum += imobj.expnum
phot.ccdname = np.array([imobj.ccdname] * len(phot))
phot.filter = np.array([imobj.band] * len(phot))
# pad ccdname to 4 characters (Bok: "CCD1")
if len(imobj.ccdname) < 4:
phot.ccdname = phot.ccdname.astype('S4')
phot.exptime = np.zeros(len(phot), np.float32) + imobj.exptime
phot.gain = np.zeros(len(phot), np.float32) + ccds['gain']
phot.airmass = np.zeros(len(phot), np.float32) + airmass
import photutils
apertures_arcsec_diam = [6, 7, 8]
for arcsec_diam in apertures_arcsec_diam:
ap = photutils.CircularAperture(np.vstack((phot.x_fit, phot.y_fit)).T,
arcsec_diam / 2. / imobj.pixscale)
with np.errstate(divide='ignore'):
err = 1./ierr
apphot = photutils.aperture_photometry(fit_img, ap,
error=err, mask=(ierr==0))
phot.set('apflux_%i' % arcsec_diam,
apphot.field('aperture_sum').data.astype(np.float32))
phot.set('apflux_%i_err' % arcsec_diam,
apphot.field('aperture_sum_err').data.astype(np.float32))
# Add to the zeropoints table
ccds['raoff'] = raoff
ccds['decoff'] = decoff
ccds['rastddev'] = rastd
ccds['decstddev'] = decstd
ccds['rarms'] = rarms
ccds['decrms'] = decrms
ccds['phoff'] = dzpt
ccds['phrms'] = zptstd
ccds['zpt'] = zptmed
ccds['nstars_photom'] = nphotom
ccds['nstars_astrom'] = nastrom
# .ra,.dec = Gaia else PS1
phot.ra = phot.ra_gaia
phot.dec = phot.dec_gaia
I, = np.nonzero(phot.ra == 0)
phot.ra [I] = phot.ra_phot [I]
phot.dec[I] = phot.dec_phot[I]
# Create subset table for Eddie's ubercal
cols = ([
'ra', 'dec', 'flux', 'dflux', 'chi2', 'fracmasked', 'instpsfmag',
'dpsfmag',
'bitmask', 'x_fit', 'y_fit', 'gaia_sourceid', 'ra_gaia', 'dec_gaia',
'phot_g_mean_mag', 'phot_bp_mean_mag', 'phot_rp_mean_mag',
'phot_g_mean_mag_error', 'phot_bp_mean_mag_error',
'phot_rp_mean_mag_error',
] + [c for c,t in phot_cols] + [
'legacy_survey_mag', 'psfmag',
'expnum', 'ccdname', 'exptime', 'gain', 'airmass', 'filter',
'apflux_6', 'apflux_7', 'apflux_8',
'apflux_6_err', 'apflux_7_err', 'apflux_8_err',
'ra_now', 'dec_now', 'ra_fit', 'dec_fit', 'x_ref', 'y_ref'
])
for c in phot.get_columns():
if not c in cols:
phot.delete_column(c)
t0= ptime('all-computations-for-this-ccd',t0)
# Plots for comparing to Arjuns zeropoints*.ps
verboseplots = False
if verboseplots:
imobj.make_plots(phot,dmag,ccds['zpt'],transp)
t0= ptime('made-plots',t0)
return ccds, phot
def tractor_fit_sources(imobj, wcs, ref_ra, ref_dec, ref_flux, img, ierr,
psf, normalize_psf=True):
import tractor
from tractor import PixelizedPSF
from tractor.brightness import LinearPhotoCal
plots = False
plot_this = False
if plots:
from astrometry.util.plotutils import PlotSequence
ps = PlotSequence('astromfit')
print('Fitting positions & fluxes of %i stars' % len(ref_ra))
cal = fits_table()
# These x0,y0,x1,y1 are zero-indexed coords.
cal.x0 = []
cal.y0 = []
cal.x1 = []
cal.y1 = []
cal.flux = []
cal.dx = []
cal.dy = []
cal.dflux = []
cal.psfsum = []
cal.iref = []
cal.chi2 = []
cal.fracmasked = []
nzeroivar = 0
noffim = 0
for istar,(ra,dec) in enumerate(zip(ref_ra, ref_dec)):
_,x,y = wcs.radec2pixelxy(ra, dec)
x -= 1
y -= 1
# Fitting radius
R = 10
H,W = img.shape
xlo = int(x - R)
ylo = int(y - R)
if xlo < 0 or ylo < 0:
noffim += 1
continue
xhi = xlo + R*2
yhi = ylo + R*2
if xhi >= W or yhi >= H:
noffim += 1
continue
subimg = img[ylo:yhi+1, xlo:xhi+1]
# FIXME -- check that ierr is correct
subie = ierr[ylo:yhi+1, xlo:xhi+1]
if False:
subpsf = psf.constantPsfAt(x, y)
psfsum = np.sum(subpsf.img)
if normalize_psf:
# print('Normalizing PsfEx model with sum:', s)
subpsf.img /= psfsum
psfimg = psf.getImage(x, y)
ph,pw = psf.img.shape
psfsum = np.sum(psfimg)
if normalize_psf:
psfimg /= psfsum
sz = R + 5
psfimg = psfimg[ph//2-sz:ph//2+sz+1, pw//2-sz:pw//2+sz+1]
subpsf = PixelizedPSF(psfimg)
if np.all(subie == 0):
nzeroivar += 1
# print('Inverse-variance map is all zero')
continue
tim = tractor.Image(data=subimg, inverr=subie, psf=subpsf)
flux0 = ref_flux[istar]
x0 = x - xlo
y0 = y - ylo
src = tractor.PointSource(tractor.PixPos(x0, y0), tractor.Flux(flux0))
tr = tractor.Tractor([tim], [src])
tr.freezeParam('images')
optargs = dict(priors=False, shared_params=False,
alphas=[0.1, 0.3, 1.0])
# Do a quick flux fit first
src.freezeParam('pos')
pc = tim.photocal
tim.photocal = LinearPhotoCal(1.)
tr.optimize_forced_photometry()
tim.photocal = pc
src.thawParam('pos')
if plots and plot_this:
import pylab as plt
plt.clf()
plt.subplot(2,2,1)
plt.imshow(subimg, interpolation='nearest', origin='lower')
plt.colorbar()
plt.subplot(2,2,2)
mod = tr.getModelImage(0)
plt.imshow(mod, interpolation='nearest', origin='lower')
plt.colorbar()
plt.subplot(2,2,3)
plt.imshow((subimg - mod) * subie, interpolation='nearest', origin='lower')
plt.colorbar()
plt.suptitle('Before')
ps.savefig()
# Now the position and flux fit
for _ in range(50):
dlnp,_,_ = tr.optimize(**optargs)
if dlnp == 0:
break
variance = tr.optimize(variance=True, just_variance=True, **optargs)
# Yuck -- if inverse-variance is all zero, weird-shaped result...
if len(variance) == 4 and variance[3] is None:
print('No variance estimate available')
continue
mod = tr.getModelImage(0)
chi = (subimg - mod) * subie
psfimg = mod / mod.sum()
# profile-weighted chi-squared
cal.chi2.append(np.sum(chi**2 * psfimg))
# profile-weighted fraction of masked pixels
cal.fracmasked.append(np.sum(psfimg * (subie == 0)))
cal.psfsum.append(psfsum)
cal.x0.append(x0 + xlo)
cal.y0.append(y0 + ylo)
cal.x1.append(src.pos.x + xlo)
cal.y1.append(src.pos.y + ylo)
cal.flux.append(src.brightness.getValue())
cal.iref.append(istar)
std = np.sqrt(variance)
cal.dx.append(std[0])
cal.dy.append(std[1])
cal.dflux.append(std[2])
if plots and plot_this:
plt.clf()
plt.subplot(2,2,1)
plt.imshow(subimg, interpolation='nearest', origin='lower')
plt.colorbar()
plt.subplot(2,2,2)
mod = tr.getModelImage(0)
plt.imshow(mod, interpolation='nearest', origin='lower')
plt.colorbar()
plt.subplot(2,2,3)
plt.imshow((subimg - mod) * subie, interpolation='nearest', origin='lower')
plt.colorbar()
plt.suptitle('After')
ps.savefig()
if nzeroivar > 0:
print('Zero ivar for %d stars' % nzeroivar)
if noffim > 0:
print('Off image for %d stars' % noffim)
cal.to_np_arrays()
cal.ra_fit,cal.dec_fit = wcs.pixelxy2radec(cal.x1 + 1, cal.y1 + 1)
return cal
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"numpy.clip",
"astrometry.util.multiproc.multiproc",
"numpy.sqrt",
"numpy.log10",
"tractor.Tractor",
"numpy.log",
"scipy.stats.sigmaclip",
"numpy.argsort",
"tractor.PixPos",
"numpy.isfinite",
"astropy.table.vstack",
"astrometry.util.ttime.Time",
"tractor.brightness.Nan... | [((818, 867), 'logging.getLogger', 'logging.getLogger', (['"""legacyzpts.legacy_zeropoints"""'], {}), "('legacyzpts.legacy_zeropoints')\n", (835, 867), False, 'import logging\n'), ((933, 956), 'legacypipe.utils.log_debug', 'log_debug', (['logger', 'args'], {}), '(logger, args)\n', (942, 956), False, 'from legacypipe.utils import log_debug\n'), ((1206, 1212), 'astrometry.util.ttime.Time', 'Time', ([], {}), '()\n', (1210, 1212), False, 'from astrometry.util.ttime import Time\n'), ((1527, 1539), 'astrometry.util.fits.fits_table', 'fits_table', ([], {}), '()\n', (1537, 1539), False, 'from astrometry.util.fits import fits_table, merge_tables\n'), ((6579, 6598), 'legacyzpts.annotate_ccds.init_annotations', 'init_annotations', (['T'], {}), '(T)\n', (6595, 6598), False, 'from legacyzpts.annotate_ccds import annotate, init_annotations\n'), ((6608, 6628), 'numpy.nonzero', 'np.nonzero', (['T.ccdzpt'], {}), '(T.ccdzpt)\n', (6618, 6628), True, 'import numpy as np\n'), ((7166, 7172), 'astrometry.util.ttime.Time', 'Time', ([], {}), '()\n', (7170, 7172), False, 'from astrometry.util.ttime import Time\n'), ((12408, 12424), 'astropy.table.vstack', 'vstack', (['all_ccds'], {}), '(all_ccds)\n', (12414, 12424), False, 'from astropy.table import Table, vstack\n'), ((12574, 12591), 'numpy.isfinite', 'np.isfinite', (['zpts'], {}), '(zpts)\n', (12585, 12591), True, 'import numpy as np\n'), ((17084, 17108), 'os.rename', 'os.rename', (['tempfn', 'outfn'], {}), '(tempfn, outfn)\n', (17093, 17108), False, 'import os\n'), ((17321, 17327), 'astrometry.util.ttime.Time', 'Time', ([], {}), '()\n', (17325, 17327), False, 'from astrometry.util.ttime import Time\n'), ((17707, 17723), 'fitsio.FITSHDR', 'fitsio.FITSHDR', ([], {}), '()\n', (17721, 17723), False, 'import fitsio\n'), ((18747, 18771), 'numpy.isfinite', 'np.isfinite', (["ccds['zpt']"], {}), "(ccds['zpt'])\n", (18758, 18771), True, 'import numpy as np\n'), ((19409, 19432), 'os.path.basename', 'os.path.basename', (['imgfn'], {}), '(imgfn)\n', (19425, 19432), False, 'import os\n'), ((19445, 19467), 'os.path.dirname', 'os.path.dirname', (['imgfn'], {}), '(imgfn)\n', (19460, 19467), False, 'import os\n'), ((19483, 19506), 'os.path.basename', 'os.path.basename', (['dirnm'], {}), '(dirnm)\n', (19499, 19506), False, 'import os\n'), ((20136, 20378), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'description': '"""Generate a legacypipe-compatible (survey) CCDs file from a set of reduced imaging."""'}), "(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, description=\n 'Generate a legacypipe-compatible (survey) CCDs file from a set of reduced imaging.'\n )\n", (20159, 20378), False, 'import argparse\n'), ((24212, 24218), 'astrometry.util.ttime.Time', 'Time', ([], {}), '()\n', (24216, 24218), False, 'from astrometry.util.ttime import Time\n'), ((24540, 24572), 'astrometry.util.multiproc.multiproc', 'multiproc', ([], {'nthreads': '(threads or 1)'}), '(nthreads=threads or 1)\n', (24549, 24572), False, 'from astrometry.util.multiproc import multiproc\n'), ((24666, 24737), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'lvl', 'format': '"""%(message)s"""', 'stream': 'sys.stdout'}), "(level=lvl, format='%(message)s', stream=sys.stdout)\n", (24685, 24737), False, 'import logging\n'), ((24788, 24842), 'legacypipe.survey.LegacySurveyData', 'LegacySurveyData', ([], {'survey_dir': "measureargs['survey_dir']"}), "(survey_dir=measureargs['survey_dir'])\n", (24804, 24842), False, 'from legacypipe.survey import LegacySurveyData\n'), ((26960, 26966), 'astrometry.util.ttime.Time', 'Time', ([], {}), '()\n', (26964, 26966), False, 'from astrometry.util.ttime import Time\n'), ((27085, 27124), 'scipy.stats.sigmaclip', 'sigmaclip', (['img'], {'low': 'nsigma', 'high': 'nsigma'}), '(img, low=nsigma, high=nsigma)\n', (27094, 27124), False, 'from scipy.stats import sigmaclip\n'), ((27137, 27157), 'numpy.median', 'np.median', (['clip_vals'], {}), '(clip_vals)\n', (27146, 27157), True, 'import numpy as np\n'), ((27170, 27187), 'numpy.std', 'np.std', (['clip_vals'], {}), '(clip_vals)\n', (27176, 27187), True, 'import numpy as np\n'), ((27483, 27489), 'astrometry.util.ttime.Time', 'Time', ([], {}), '()\n', (27487, 27489), False, 'from astrometry.util.ttime import Time\n'), ((29868, 29911), 'numpy.median', 'np.median', (['invvar[(invvar > 0) * (dq == 0)]'], {}), '(invvar[(invvar > 0) * (dq == 0)])\n', (29877, 29911), True, 'import numpy as np\n'), ((31970, 32005), 'legacypipe.gaiacat.GaiaCatalog.catalog_nantozero', 'GaiaCatalog.catalog_nantozero', (['gaia'], {}), '(gaia)\n', (31999, 32005), False, 'from legacypipe.gaiacat import GaiaCatalog\n'), ((32306, 32312), 'astrometry.util.ttime.Time', 'Time', ([], {}), '()\n', (32310, 32312), False, 'from astrometry.util.ttime import Time\n'), ((39485, 39499), 'numpy.median', 'np.median', (['dra'], {}), '(dra)\n', (39494, 39499), True, 'import numpy as np\n'), ((39513, 39528), 'numpy.median', 'np.median', (['ddec'], {}), '(ddec)\n', (39522, 39528), True, 'import numpy as np\n'), ((39542, 39553), 'numpy.std', 'np.std', (['dra'], {}), '(dra)\n', (39548, 39553), True, 'import numpy as np\n'), ((39567, 39579), 'numpy.std', 'np.std', (['ddec'], {}), '(ddec)\n', (39573, 39579), True, 'import numpy as np\n'), ((39600, 39633), 'scipy.stats.sigmaclip', 'sigmaclip', (['dra'], {'low': '(3.0)', 'high': '(3.0)'}), '(dra, low=3.0, high=3.0)\n', (39609, 39633), False, 'from scipy.stats import sigmaclip\n'), ((39681, 39715), 'scipy.stats.sigmaclip', 'sigmaclip', (['ddec'], {'low': '(3.0)', 'high': '(3.0)'}), '(ddec, low=3.0, high=3.0)\n', (39690, 39715), False, 'from scipy.stats import sigmaclip\n'), ((40177, 40202), 'numpy.nonzero', 'np.nonzero', (['(phot.flux > 0)'], {}), '(phot.flux > 0)\n', (40187, 40202), True, 'import numpy as np\n'), ((40950, 40976), 'numpy.sum', 'np.sum', (['(phot.flux_sn > 5.0)'], {}), '(phot.flux_sn > 5.0)\n', (40956, 40976), True, 'import numpy as np\n'), ((44655, 44679), 'numpy.nonzero', 'np.nonzero', (['(phot.ra == 0)'], {}), '(phot.ra == 0)\n', (44665, 44679), True, 'import numpy as np\n'), ((46275, 46287), 'astrometry.util.fits.fits_table', 'fits_table', ([], {}), '()\n', (46285, 46287), False, 'from astrometry.util.fits import fits_table, merge_tables\n'), ((167, 188), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (181, 188), False, 'import matplotlib\n'), ((3573, 3596), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'cols'}), '(1, dtype=cols)\n', (3581, 3596), True, 'import numpy as np\n'), ((4433, 4461), 'numpy.zeros', 'np.zeros', (['nstars'], {'dtype': 'cols'}), '(nstars, dtype=cols)\n', (4441, 4461), True, 'import numpy as np\n'), ((6652, 6719), 'legacyzpts.annotate_ccds.annotate', 'annotate', (['T', 'survey', 'camera'], {'mp': 'mp', 'normalizePsf': '(True)', 'carryOn': '(True)'}), '(T, survey, camera, mp=mp, normalizePsf=True, carryOn=True)\n', (6660, 6719), False, 'from legacyzpts.annotate_ccds import annotate, init_annotations\n'), ((6818, 6833), 'numpy.mean', 'np.mean', (['(x ** 2)'], {}), '(x ** 2)\n', (6825, 6833), True, 'import numpy as np\n'), ((8477, 8513), 'legacypipe.survey.LegacySurveyData', 'LegacySurveyData', ([], {'survey_dir': 'blobdir'}), '(survey_dir=blobdir)\n', (8493, 8513), False, 'from legacypipe.survey import LegacySurveyData\n'), ((8652, 8687), 'legacypipe.survey.LegacySurveyData', 'LegacySurveyData', ([], {'survey_dir': 'zptdir'}), '(survey_dir=zptdir)\n', (8668, 8687), False, 'from legacypipe.survey import LegacySurveyData\n'), ((12471, 12495), 'astrometry.util.fits.merge_tables', 'merge_tables', (['all_photom'], {}), '(all_photom)\n', (12483, 12495), False, 'from astrometry.util.fits import fits_table, merge_tables\n'), ((12599, 12614), 'numpy.sum', 'np.sum', (['zptgood'], {}), '(zptgood)\n', (12605, 12614), True, 'import numpy as np\n'), ((12649, 12673), 'numpy.median', 'np.median', (['zpts[zptgood]'], {}), '(zpts[zptgood])\n', (12658, 12673), True, 'import numpy as np\n'), ((12720, 12738), 'numpy.isfinite', 'np.isfinite', (['phrms'], {}), '(phrms)\n', (12731, 12738), True, 'import numpy as np\n'), ((12772, 12789), 'numpy.sum', 'np.sum', (['phrmsgood'], {}), '(phrmsgood)\n', (12778, 12789), True, 'import numpy as np\n'), ((12826, 12853), 'numpy.median', 'np.median', (['phrms[phrmsgood]'], {}), '(phrms[phrmsgood])\n', (12835, 12853), True, 'import numpy as np\n'), ((16298, 16325), 'os.path.dirname', 'os.path.dirname', (['self.imgfn'], {}), '(self.imgfn)\n', (16313, 16325), False, 'import os\n'), ((16344, 16373), 'os.path.join', 'os.path.join', (['outdir', 'dirname'], {}), '(outdir, dirname)\n', (16356, 16373), False, 'import os\n'), ((16382, 16402), 'astrometry.util.file.trymakedirs', 'trymakedirs', (['basedir'], {}), '(basedir)\n', (16393, 16402), False, 'from astrometry.util.file import trymakedirs\n'), ((16423, 16451), 'os.path.basename', 'os.path.basename', (['self.imgfn'], {}), '(self.imgfn)\n', (16439, 16451), False, 'import os\n'), ((16716, 16760), 'os.path.join', 'os.path.join', (['basedir', "(base + '-photom.fits')"], {}), "(basedir, base + '-photom.fits')\n", (16728, 16760), False, 'import os\n'), ((16782, 16829), 'os.path.join', 'os.path.join', (['basedir', "(base + '-annotated.fits')"], {}), "(basedir, base + '-annotated.fits')\n", (16794, 16829), False, 'import os\n'), ((16919, 16941), 'os.path.dirname', 'os.path.dirname', (['outfn'], {}), '(outfn)\n', (16934, 16941), False, 'import os\n'), ((18779, 18794), 'numpy.sum', 'np.sum', (['zptgood'], {}), '(zptgood)\n', (18785, 18794), True, 'import numpy as np\n'), ((18817, 18848), 'numpy.median', 'np.median', (["ccds['zpt'][zptgood]"], {}), "(ccds['zpt'][zptgood])\n", (18826, 18848), True, 'import numpy as np\n'), ((19038, 19054), 'numpy.sum', 'np.sum', (['goodfwhm'], {}), '(goodfwhm)\n', (19044, 19054), True, 'import numpy as np\n'), ((19075, 19108), 'numpy.median', 'np.median', (["ccds['fwhm'][goodfwhm]"], {}), "(ccds['fwhm'][goodfwhm])\n", (19084, 19108), True, 'import numpy as np\n'), ((25048, 25086), 'os.path.join', 'os.path.join', (['survey.survey_dir', '"""zpt"""'], {}), "(survey.survey_dir, 'zpt')\n", (25060, 25086), False, 'import os\n'), ((25291, 25309), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (25305, 25309), False, 'import os\n'), ((26477, 26588), 'legacypipe.image.validate_version', 'validate_version', (['F.photomfn', '"""header"""', 'measure.expnum', 'measure.plver', 'measure.plprocid'], {'ext': '(1)', 'quiet': 'quiet'}), "(F.photomfn, 'header', measure.expnum, measure.plver,\n measure.plprocid, ext=1, quiet=quiet)\n", (26493, 26588), False, 'from legacypipe.image import validate_version\n'), ((27200, 27219), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (27208, 27219), True, 'import numpy as np\n'), ((32131, 32163), 'numpy.argsort', 'np.argsort', (['gaia.phot_g_mean_mag'], {}), '(gaia.phot_g_mean_mag)\n', (32141, 32163), True, 'import numpy as np\n'), ((32780, 32798), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (32793, 32798), True, 'import numpy as np\n'), ((33152, 33181), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (33163, 33181), True, 'import numpy as np\n'), ((33297, 33312), 'numpy.sqrt', 'np.sqrt', (['invvar'], {}), '(invvar)\n', (33304, 33312), True, 'import numpy as np\n'), ((36414, 36515), 'astrometry.libkd.spherematch.match_radec', 'match_radec', (['gaia.ra_gaia', 'gaia.dec_gaia', 'phot.ra_phot', 'phot.dec_phot', '(1.0 / 3600.0)'], {'nearest': '(True)'}), '(gaia.ra_gaia, gaia.dec_gaia, phot.ra_phot, phot.dec_phot, 1.0 /\n 3600.0, nearest=True)\n', (36425, 36515), False, 'from astrometry.libkd.spherematch import match_radec\n'), ((37491, 37529), 'astrometry.util.fits.merge_tables', 'merge_tables', (['refs'], {'columns': '"""fillzero"""'}), "(refs, columns='fillzero')\n", (37503, 37529), False, 'from astrometry.util.fits import fits_table, merge_tables\n'), ((39133, 39161), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (39144, 39161), True, 'import numpy as np\n'), ((40288, 40327), 'numpy.log10', 'np.log10', (['(phot.flux[ok] / imobj.exptime)'], {}), '(phot.flux[ok] / imobj.exptime)\n', (40296, 40327), True, 'import numpy as np\n'), ((41332, 41366), 'scipy.stats.sigmaclip', 'sigmaclip', (['dmag'], {'low': '(2.5)', 'high': '(2.5)'}), '(dmag, low=2.5, high=2.5)\n', (41341, 41366), False, 'from scipy.stats import sigmaclip\n'), ((41460, 41472), 'numpy.std', 'np.std', (['dmag'], {}), '(dmag)\n', (41466, 41472), True, 'import numpy as np\n'), ((41490, 41505), 'numpy.median', 'np.median', (['dmag'], {}), '(dmag)\n', (41499, 41505), True, 'import numpy as np\n'), ((42052, 42088), 'tractor.brightness.NanoMaggies.zeropointToScale', 'NanoMaggies.zeropointToScale', (['zptmed'], {}), '(zptmed)\n', (42080, 42088), False, 'from tractor.brightness import NanoMaggies\n'), ((43847, 43916), 'photutils.aperture_photometry', 'photutils.aperture_photometry', (['fit_img', 'ap'], {'error': 'err', 'mask': '(ierr == 0)'}), '(fit_img, ap, error=err, mask=ierr == 0)\n', (43876, 43916), False, 'import photutils\n'), ((46171, 46196), 'astrometry.util.plotutils.PlotSequence', 'PlotSequence', (['"""astromfit"""'], {}), "('astromfit')\n", (46183, 46196), False, 'from astrometry.util.plotutils import PlotSequence\n'), ((47483, 47497), 'numpy.sum', 'np.sum', (['psfimg'], {}), '(psfimg)\n', (47489, 47497), True, 'import numpy as np\n'), ((47655, 47675), 'tractor.PixelizedPSF', 'PixelizedPSF', (['psfimg'], {}), '(psfimg)\n', (47667, 47675), False, 'from tractor import PixelizedPSF\n'), ((47688, 47706), 'numpy.all', 'np.all', (['(subie == 0)'], {}), '(subie == 0)\n', (47694, 47706), True, 'import numpy as np\n'), ((47827, 47879), 'tractor.Image', 'tractor.Image', ([], {'data': 'subimg', 'inverr': 'subie', 'psf': 'subpsf'}), '(data=subimg, inverr=subie, psf=subpsf)\n', (47840, 47879), False, 'import tractor\n'), ((48046, 48075), 'tractor.Tractor', 'tractor.Tractor', (['[tim]', '[src]'], {}), '([tim], [src])\n', (48061, 48075), False, 'import tractor\n'), ((48332, 48351), 'tractor.brightness.LinearPhotoCal', 'LinearPhotoCal', (['(1.0)'], {}), '(1.0)\n', (48346, 48351), False, 'from tractor.brightness import LinearPhotoCal\n'), ((50057, 50074), 'numpy.sqrt', 'np.sqrt', (['variance'], {}), '(variance)\n', (50064, 50074), True, 'import numpy as np\n'), ((1458, 1478), 'numpy.char.strip', 'np.char.strip', (['lines'], {}), '(lines)\n', (1471, 1478), True, 'import numpy as np\n'), ((9867, 9935), 'legacyzpts.merge_calibs.merge_splinesky', 'merge_splinesky', (['survey', 'img.expnum', 'ccds', 'skyoutfn', 'opts'], {'imgs': 'imgs'}), '(survey, img.expnum, ccds, skyoutfn, opts, imgs=imgs)\n', (9882, 9935), False, 'from legacyzpts.merge_calibs import merge_splinesky, merge_psfex\n'), ((10153, 10217), 'legacyzpts.merge_calibs.merge_psfex', 'merge_psfex', (['survey', 'img.expnum', 'ccds', 'psfoutfn', 'opts'], {'imgs': 'imgs'}), '(survey, img.expnum, ccds, psfoutfn, opts, imgs=imgs)\n', (10164, 10217), False, 'from legacyzpts.merge_calibs import merge_splinesky, merge_psfex\n'), ((10544, 10562), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (10558, 10562), False, 'import os\n'), ((11093, 11111), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (11107, 11111), False, 'import os\n'), ((11217, 11235), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (11231, 11235), False, 'import os\n'), ((11753, 11771), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (11767, 11771), False, 'import os\n'), ((11846, 11866), 'os.path.isfile', 'os.path.isfile', (['sefn'], {}), '(sefn)\n', (11860, 11866), False, 'import os\n'), ((14689, 14725), 'os.path.dirname', 'os.path.dirname', (['legacypipe.__file__'], {}), '(legacypipe.__file__)\n', (14704, 14725), False, 'import os\n'), ((16952, 16975), 'os.path.basename', 'os.path.basename', (['outfn'], {}), '(outfn)\n', (16968, 16975), False, 'import os\n'), ((26097, 26193), 'legacypipe.image.validate_version', 'validate_version', (['fn', '"""table"""', 'measure.expnum', 'measure.plver', 'measure.plprocid'], {'quiet': 'quiet'}), "(fn, 'table', measure.expnum, measure.plver, measure.\n plprocid, quiet=quiet)\n", (26113, 26193), False, 'from legacypipe.image import validate_version\n'), ((29973, 29987), 'numpy.sqrt', 'np.sqrt', (['mediv'], {}), '(mediv)\n', (29980, 29987), True, 'import numpy as np\n'), ((30513, 30579), 'numpy.log10', 'np.log10', (['(skymed / imobj.pixscale / imobj.pixscale / imobj.exptime)'], {}), '(skymed / imobj.pixscale / imobj.pixscale / imobj.exptime)\n', (30521, 30579), True, 'import numpy as np\n'), ((31866, 31879), 'legacypipe.gaiacat.GaiaCatalog', 'GaiaCatalog', ([], {}), '()\n', (31877, 31879), False, 'from legacypipe.gaiacat import GaiaCatalog\n'), ((39316, 39340), 'numpy.deg2rad', 'np.deg2rad', (['refs.dec_now'], {}), '(refs.dec_now)\n', (39326, 39340), True, 'import numpy as np\n'), ((41215, 41232), 'numpy.isfinite', 'np.isfinite', (['dmag'], {}), '(dmag)\n', (41226, 41232), True, 'import numpy as np\n'), ((43774, 43802), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (43785, 43802), True, 'import numpy as np\n'), ((47249, 47267), 'numpy.sum', 'np.sum', (['subpsf.img'], {}), '(subpsf.img)\n', (47255, 47267), True, 'import numpy as np\n'), ((47988, 48010), 'tractor.PixPos', 'tractor.PixPos', (['x0', 'y0'], {}), '(x0, y0)\n', (48002, 48010), False, 'import tractor\n'), ((48012, 48031), 'tractor.Flux', 'tractor.Flux', (['flux0'], {}), '(flux0)\n', (48024, 48031), False, 'import tractor\n'), ((48523, 48532), 'pylab.clf', 'plt.clf', ([], {}), '()\n', (48530, 48532), True, 'import pylab as plt\n'), ((48545, 48565), 'pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (48556, 48565), True, 'import pylab as plt\n'), ((48576, 48635), 'pylab.imshow', 'plt.imshow', (['subimg'], {'interpolation': '"""nearest"""', 'origin': '"""lower"""'}), "(subimg, interpolation='nearest', origin='lower')\n", (48586, 48635), True, 'import pylab as plt\n'), ((48648, 48662), 'pylab.colorbar', 'plt.colorbar', ([], {}), '()\n', (48660, 48662), True, 'import pylab as plt\n'), ((48675, 48695), 'pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (48686, 48695), True, 'import pylab as plt\n'), ((48744, 48800), 'pylab.imshow', 'plt.imshow', (['mod'], {'interpolation': '"""nearest"""', 'origin': '"""lower"""'}), "(mod, interpolation='nearest', origin='lower')\n", (48754, 48800), True, 'import pylab as plt\n'), ((48813, 48827), 'pylab.colorbar', 'plt.colorbar', ([], {}), '()\n', (48825, 48827), True, 'import pylab as plt\n'), ((48840, 48860), 'pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (48851, 48860), True, 'import pylab as plt\n'), ((48871, 48946), 'pylab.imshow', 'plt.imshow', (['((subimg - mod) * subie)'], {'interpolation': '"""nearest"""', 'origin': '"""lower"""'}), "((subimg - mod) * subie, interpolation='nearest', origin='lower')\n", (48881, 48946), True, 'import pylab as plt\n'), ((48959, 48973), 'pylab.colorbar', 'plt.colorbar', ([], {}), '()\n', (48971, 48973), True, 'import pylab as plt\n'), ((48986, 49008), 'pylab.suptitle', 'plt.suptitle', (['"""Before"""'], {}), "('Before')\n", (48998, 49008), True, 'import pylab as plt\n'), ((49644, 49669), 'numpy.sum', 'np.sum', (['(chi ** 2 * psfimg)'], {}), '(chi ** 2 * psfimg)\n', (49650, 49669), True, 'import numpy as np\n'), ((49752, 49781), 'numpy.sum', 'np.sum', (['(psfimg * (subie == 0))'], {}), '(psfimg * (subie == 0))\n', (49758, 49781), True, 'import numpy as np\n'), ((50213, 50222), 'pylab.clf', 'plt.clf', ([], {}), '()\n', (50220, 50222), True, 'import pylab as plt\n'), ((50235, 50255), 'pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (50246, 50255), True, 'import pylab as plt\n'), ((50266, 50325), 'pylab.imshow', 'plt.imshow', (['subimg'], {'interpolation': '"""nearest"""', 'origin': '"""lower"""'}), "(subimg, interpolation='nearest', origin='lower')\n", (50276, 50325), True, 'import pylab as plt\n'), ((50338, 50352), 'pylab.colorbar', 'plt.colorbar', ([], {}), '()\n', (50350, 50352), True, 'import pylab as plt\n'), ((50365, 50385), 'pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (50376, 50385), True, 'import pylab as plt\n'), ((50434, 50490), 'pylab.imshow', 'plt.imshow', (['mod'], {'interpolation': '"""nearest"""', 'origin': '"""lower"""'}), "(mod, interpolation='nearest', origin='lower')\n", (50444, 50490), True, 'import pylab as plt\n'), ((50503, 50517), 'pylab.colorbar', 'plt.colorbar', ([], {}), '()\n', (50515, 50517), True, 'import pylab as plt\n'), ((50530, 50550), 'pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (50541, 50550), True, 'import pylab as plt\n'), ((50561, 50636), 'pylab.imshow', 'plt.imshow', (['((subimg - mod) * subie)'], {'interpolation': '"""nearest"""', 'origin': '"""lower"""'}), "((subimg - mod) * subie, interpolation='nearest', origin='lower')\n", (50571, 50636), True, 'import pylab as plt\n'), ((50649, 50663), 'pylab.colorbar', 'plt.colorbar', ([], {}), '()\n', (50661, 50663), True, 'import pylab as plt\n'), ((50676, 50697), 'pylab.suptitle', 'plt.suptitle', (['"""After"""'], {}), "('After')\n", (50688, 50697), True, 'import pylab as plt\n'), ((11129, 11142), 'os.remove', 'os.remove', (['fn'], {}), '(fn)\n', (11138, 11142), False, 'import os\n'), ((11789, 11802), 'os.remove', 'os.remove', (['fn'], {}), '(fn)\n', (11798, 11802), False, 'import os\n'), ((11884, 11899), 'os.remove', 'os.remove', (['sefn'], {}), '(sefn)\n', (11893, 11899), False, 'import os\n'), ((19554, 19582), 'os.path.join', 'os.path.join', (['firstdir', 'base'], {}), '(firstdir, base)\n', (19566, 19582), False, 'import os\n'), ((23748, 23759), 'time.time', 'time.time', ([], {}), '()\n', (23757, 23759), False, 'import time\n'), ((33847, 33863), 'numpy.fmax', 'np.fmax', (['(1.0)', 'sn'], {}), '(1.0, sn)\n', (33854, 33863), True, 'import numpy as np\n'), ((34604, 34636), 'legacypipe.ps1cat.sdsscat.sdssband.get', 'sdsscat.sdssband.get', (['band', 'None'], {}), '(band, None)\n', (34624, 34636), False, 'from legacypipe.ps1cat import ps1cat, sdsscat\n'), ((35621, 35651), 'legacypipe.ps1cat.ps1cat.ps1band.get', 'ps1cat.ps1band.get', (['band', 'None'], {}), '(band, None)\n', (35639, 35651), False, 'from legacypipe.ps1cat import ps1cat, sdsscat\n'), ((40773, 40796), 'numpy.median', 'np.median', (['phot.flux_sn'], {}), '(phot.flux_sn)\n', (40782, 40796), True, 'import numpy as np\n'), ((43647, 43682), 'numpy.vstack', 'np.vstack', (['(phot.x_fit, phot.y_fit)'], {}), '((phot.x_fit, phot.y_fit))\n', (43656, 43682), True, 'import numpy as np\n'), ((25395, 25413), 'legacyzpts.psfzpt_cuts.read_bad_expid', 'read_bad_expid', (['fn'], {}), '(fn)\n', (25409, 25413), False, 'from legacyzpts.psfzpt_cuts import read_bad_expid\n'), ((30968, 30987), 'legacypipe.ps1cat.sdsscat', 'sdsscat', ([], {'ccdwcs': 'wcs'}), '(ccdwcs=wcs)\n', (30975, 30987), False, 'from legacypipe.ps1cat import ps1cat, sdsscat\n'), ((31174, 31192), 'legacypipe.ps1cat.ps1cat', 'ps1cat', ([], {'ccdwcs': 'wcs'}), '(ccdwcs=wcs)\n', (31180, 31192), False, 'from legacypipe.ps1cat import ps1cat, sdsscat\n'), ((40445, 40457), 'numpy.log', 'np.log', (['(10.0)'], {}), '(10.0)\n', (40451, 40457), True, 'import numpy as np\n'), ((40534, 40560), 'numpy.clip', 'np.clip', (['phot.y1', '(0)', '(H - 1)'], {}), '(phot.y1, 0, H - 1)\n', (40541, 40560), True, 'import numpy as np\n'), ((40594, 40620), 'numpy.clip', 'np.clip', (['phot.x1', '(0)', '(W - 1)'], {}), '(phot.x1, 0, W - 1)\n', (40601, 40620), True, 'import numpy as np\n'), ((33830, 33842), 'numpy.log', 'np.log', (['(10.0)'], {}), '(10.0)\n', (33836, 33842), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import torch
import torchvision
import torch.nn.functional as F
from torchvision import datasets,transforms,models
import matplotlib.pyplot as plt
#from train import get_pretrained_model
from torch import nn,optim
from PIL import Image
#%matplotlib inline
def load_dataset(data_direc):
data_dir = data_direc
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
valid_transforms=transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485,0.456,0.406],
[0.229,0.224,0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
train_data=datasets.ImageFolder(train_dir,transform=train_transforms)
valid_data=datasets.ImageFolder(valid_dir,transform=valid_transforms)
test_data=datasets.ImageFolder(test_dir,transform=test_transforms)
trainloader =torch.utils.data.DataLoader(train_data,batch_size=64,shuffle=True)
validloader =torch.utils.data.DataLoader(valid_data,batch_size=64)
testloader =torch.utils.data.DataLoader(test_data,batch_size=64)
return testloader,validloader,testloader,train_data
def load_checkpoint(filepath,device):
# Checkpoint for when using GPU
if torch.cuda.is_available():
map_location=lambda storage, loc: storage.cuda()
else:
map_location='cpu'
pretrained_model= 'vgg16' #get_pretrained_model()
checkpoint = torch.load(filepath, map_location=map_location)
model = getattr(models,pretrained_model)(pretrained=True)
model.classifier=checkpoint['classifier']
model.class_to_idx=checkpoint['class_to_idx']
model.load_state_dict(checkpoint['state_dict'])
#device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# TODO: Process a PIL image for use in a PyTorch model
im=Image.open(image)
transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
## Transforming image for use with network
trans_im = transform(im)
# Converting to Numpy array
array_im_tfd = np.array(trans_im)
return array_im_tfd
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
#image = image.numpy().transpose((1, 2, 0))
image=image.transpose((1,2,0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
def predict_im(image_path, model, topk):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# TODO: Implement the code to predict the class from an image file
k=topk
test_img=process_image(image_path)
test_img=torch.from_numpy(test_img)
batch_img=torch.unsqueeze(test_img,0)
outputs=model(batch_img)
top_ps,top_indices = torch.topk(outputs,k)
top_ps=torch.exp(top_ps)
class_to_idx_inv={k:v for v,k in model.class_to_idx.items()}
top_ps=top_ps.view(k).tolist()
top_indices=top_indices.view(k).tolist()
final_indices=[class_to_idx_inv[x] for x in top_indices]
return top_ps,final_indices
| [
"numpy.clip",
"torch.from_numpy",
"torch.exp",
"numpy.array",
"torch.cuda.is_available",
"torch.unsqueeze",
"torchvision.datasets.ImageFolder",
"torchvision.transforms.ToTensor",
"torchvision.transforms.RandomResizedCrop",
"torch.topk",
"torchvision.transforms.RandomHorizontalFlip",
"torchvisi... | [((1643, 1702), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['train_dir'], {'transform': 'train_transforms'}), '(train_dir, transform=train_transforms)\n', (1663, 1702), False, 'from torchvision import datasets, transforms, models\n'), ((1717, 1776), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['valid_dir'], {'transform': 'valid_transforms'}), '(valid_dir, transform=valid_transforms)\n', (1737, 1776), False, 'from torchvision import datasets, transforms, models\n'), ((1790, 1847), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['test_dir'], {'transform': 'test_transforms'}), '(test_dir, transform=test_transforms)\n', (1810, 1847), False, 'from torchvision import datasets, transforms, models\n'), ((1866, 1934), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_data'], {'batch_size': '(64)', 'shuffle': '(True)'}), '(train_data, batch_size=64, shuffle=True)\n', (1893, 1934), False, 'import torch\n'), ((1950, 2004), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_data'], {'batch_size': '(64)'}), '(valid_data, batch_size=64)\n', (1977, 2004), False, 'import torch\n'), ((2020, 2073), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_data'], {'batch_size': '(64)'}), '(test_data, batch_size=64)\n', (2047, 2073), False, 'import torch\n'), ((2215, 2240), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2238, 2240), False, 'import torch\n'), ((2407, 2454), 'torch.load', 'torch.load', (['filepath'], {'map_location': 'map_location'}), '(filepath, map_location=map_location)\n', (2417, 2454), False, 'import torch\n'), ((2984, 3001), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (2994, 3001), False, 'from PIL import Image\n'), ((3492, 3510), 'numpy.array', 'np.array', (['trans_im'], {}), '(trans_im)\n', (3500, 3510), True, 'import numpy as np\n'), ((3913, 3944), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (3921, 3944), True, 'import numpy as np\n'), ((3955, 3986), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (3963, 3986), True, 'import numpy as np\n'), ((4121, 4141), 'numpy.clip', 'np.clip', (['image', '(0)', '(1)'], {}), '(image, 0, 1)\n', (4128, 4141), True, 'import numpy as np\n'), ((4464, 4490), 'torch.from_numpy', 'torch.from_numpy', (['test_img'], {}), '(test_img)\n', (4480, 4490), False, 'import torch\n'), ((4505, 4533), 'torch.unsqueeze', 'torch.unsqueeze', (['test_img', '(0)'], {}), '(test_img, 0)\n', (4520, 4533), False, 'import torch\n'), ((4587, 4609), 'torch.topk', 'torch.topk', (['outputs', 'k'], {}), '(outputs, k)\n', (4597, 4609), False, 'import torch\n'), ((4620, 4637), 'torch.exp', 'torch.exp', (['top_ps'], {}), '(top_ps)\n', (4629, 4637), False, 'import torch\n'), ((3647, 3661), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3659, 3661), True, 'import matplotlib.pyplot as plt\n'), ((506, 535), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(30)'], {}), '(30)\n', (531, 535), False, 'from torchvision import datasets, transforms, models\n'), ((576, 609), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (604, 609), False, 'from torchvision import datasets, transforms, models\n'), ((650, 683), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (681, 683), False, 'from torchvision import datasets, transforms, models\n'), ((724, 745), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (743, 745), False, 'from torchvision import datasets, transforms, models\n'), ((786, 852), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (806, 852), False, 'from torchvision import datasets, transforms, models\n'), ((957, 979), 'torchvision.transforms.Resize', 'transforms.Resize', (['(255)'], {}), '(255)\n', (974, 979), False, 'from torchvision import datasets, transforms, models\n'), ((1018, 1044), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (1039, 1044), False, 'from torchvision import datasets, transforms, models\n'), ((1083, 1104), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1102, 1104), False, 'from torchvision import datasets, transforms, models\n'), ((1143, 1209), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (1163, 1209), False, 'from torchvision import datasets, transforms, models\n'), ((1310, 1332), 'torchvision.transforms.Resize', 'transforms.Resize', (['(255)'], {}), '(255)\n', (1327, 1332), False, 'from torchvision import datasets, transforms, models\n'), ((1372, 1398), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (1393, 1398), False, 'from torchvision import datasets, transforms, models\n'), ((1438, 1459), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1457, 1459), False, 'from torchvision import datasets, transforms, models\n'), ((1499, 1565), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (1519, 1565), False, 'from torchvision import datasets, transforms, models\n'), ((3043, 3065), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (3060, 3065), False, 'from torchvision import datasets, transforms, models\n'), ((3103, 3129), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (3124, 3129), False, 'from torchvision import datasets, transforms, models\n'), ((3167, 3188), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3186, 3188), False, 'from torchvision import datasets, transforms, models\n'), ((3226, 3292), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (3246, 3292), False, 'from torchvision import datasets, transforms, models\n')] |
# -*- coding: utf-8 -*-
# MLToolkit (mltoolkit)
__name__="mltk"
"""
MLToolkit - a verstile helping library for machine learning
===========================================================
'MLToolkit' is a Python package providing a set of user-friendly functions to
help building machine learning models in data science research or production
focused projects. It is compatible with and interoperate with popular data
analysis, manipulation and machine learning libraries Pandas, Sci-kit Learn,
Tensorflow, Statmodels, Catboost, XGboost, etc.
Main Features
-------------
- Data Extraction (SQL, Flatfiles, Images, etc.)
- Exploratory data analysis (statistical summary, univariate analysis, etc.)
- Feature Extraction and Engineering
- Model performance analysis, Explain Predictions and comparison between models
- Cross Validation and Hyper parameter tuning
- JSON input script for executing model building and scoring tasks.
- Model Building UI
- Auto ML (automated machine learning)
- Model Deploymet and Serving via RESTful API
Author
------
- <NAME>
Links
-----
Website: http://sumudu.tennakoon.net/projects/MLToolkit
Github: https://mltoolkit.github.io/MLToolKit
License
-------
Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import pandas as pd
import numpy as np
import shap
import lime
import traceback
import time
import matplotlib.pyplot as plt
class MLExplainer():
def __init__(self, model_id=None, explainer_object=None, explainer_config=None, model_parameters=None):
self.model_id = model_id
self.explainer_object = explainer_object
self.explainer_config = explainer_config
self.model_parameters = model_parameters
def get_explainer_object(self):
return self.explainer_object
def get_explainer_config(self):
return self.explainer_config
def get_model_parameters(self):
return self.model_parameters
def get_model_parameter(self, param):
return self.model_parameters[param]
def get_explainer_config_item(self, item):
return self.explainer_config[item]
def get_explainer_method(self):
return self.explainer_config['Method']
def get_model_variables(self):
return self.model_parameters['ModelVariables']
def get_base_value(self, class_number=None):
try:
if class_number == None:
class_number = self.explainer_config['ClassNumber']
else:
class_number = 0
return self.explainer_object.expected_value[class_number]
except:
print('Base Value calculating Error !\n{}'.format(traceback.format_exc()))
return None
def load_object(file_name):
try:
import pickle
pickle_in = open(file_name,'rb')
print('Loading explainer from file {}'.format(file_name))
object = pickle.load(pickle_in)
pickle_in.close()
return object
except:
print('ERROR in loading explainer: {}'.format(traceback.format_exc()))
return None
def save_object(object_to_save, file_name):
try:
import pickle
pickle_out = open(file_name,'wb')
print('Saving explainer to file {}'.format(file_name))
pickle.dump(object_to_save, pickle_out)
pickle_out.close()
except:
print('ERROR in saving explainer: {}'.format(traceback.format_exc()))
def save_explainer(Explainer, file_path):
"""
Parameters
----------
Explainer : mltk.MLExplainer
file_path : str, dafault ''
Path to file including the file name.
Returns
-------
None
"""
save_object(Explainer, file_path)
def load_explainer(file_path):
"""
Parameters
----------
file_path : str, dafault ''
Path to file including the file name.
Returns
-------
Explainer : mltk.MLExplainer
"""
Explainer = load_object(file_path)
return Explainer
def build_explainer(Model, explainer_config={'Method':'shap'}, train_variable_values=None):
"""
Parameters
----------
Model : mltk.MLModel
explainer_config : dict or json, optional
train_variable_values : pd.DataFrame (optional for Linear Explainer)
Returns
-------
Explainer : mltk.MLExplainer
"""
try:
model_id = Model.get_model_id()
ml_algorithm = Model.get_model_algorithm()
model = Model.get_model_object()
model_type = Model.get_model_type()
model_variables = Model.get_model_variables()
predicted_value_column = Model.get_score_variable()
except:
print('Error in processing Model\n{}'.format(traceback.format_exc()))
return None
method = explainer_config['Method']
if 'IdColumns' not in explainer_config:
explainer_config['IdColumns'] = []
if 'ClassNumber' not in explainer_config:
explainer_config['ClassNumber'] = 0
model_parameters = {
'ModelType':model_type,
'ModelVariables':model_variables,
'PredictedValueColumn':predicted_value_column
}
try:
if method=='shap':
if ml_algorithm in ['RF']:
explainer_object = shap.TreeExplainer(model)
elif ml_algorithm in ['LGR']:
explainer_object = shap.LinearExplainer(model, train_variable_values[model_variables].values, feature_dependence="independent")
elif ml_algorithm in ['NN']:
explainer_object = shap.DeepExplainer(model, train_variable_values[model_variables].values)
else:
explainer_object = None
print('Model not supported')
elif method=='lime':
explainer_object = None
# Not implemented
print('Explainer created ...')
except:
print('Explainer Create Error !\n{}'.format(traceback.format_exc()))
return None
Explainer = MLExplainer(model_id, explainer_object, explainer_config, model_parameters)
return Explainer
def get_explainer_values_task(DataFrame, Explainer=None, Model=None, explainer_config={'Method':'shap'}, verbose=False):
"""
Parameters
----------
DataFrame : pandas.DataFrame
Explainer : mltk.MLExplainer
Model : mltk.MLModel
explainer_config : dict or json
Returns
-------
ShapValues : pandas.DataFrame
VariableValues : pandas.DataFrame
"""
if Explainer==None and Model!=None:
Explainer = build_explainer(Model, explainer_config)
elif Explainer!=None and Model==None:
Explainer = Explainer
else:
print('explainer or materials to create explainer not provided !')
model_variables = Explainer.get_model_parameter('ModelVariables')
model_type = Explainer.get_model_parameter('ModelType')
predicted_value_column = Explainer.get_model_parameter('PredictedValueColumn')
id_columns = Explainer.get_explainer_config_item('IdColumns')
class_number = Explainer.get_explainer_config_item('ClassNumber')
method = Explainer.get_explainer_config_item('Method')
explainer = Explainer.get_explainer_object()
fill_missing = Explainer.get_explainer_config_item('FillMissing')
# Blanck columns for non-existance variables
missing_variables = list(set(model_variables) - set(DataFrame.columns)) # Find columns not found in the dataset
for f in missing_variables:
DataFrame[f]=fill_missing
if verbose:
print('Column [{}] does not exist in the dataset. Created new column and set to {}...\n'.format(f,missing_variables))
if method == 'shap':
ImpactValues, VariableValues = get_shap_values(DataFrame, explainer,
model_variables=model_variables,
id_columns=id_columns,
model_type=model_type,
class_number=class_number,
predicted_value_column=predicted_value_column
)
else:
ImpactValues = None
VariableValues = None
return ImpactValues, VariableValues
def get_explainer_visual(ImpactValues, VariableValues, Explainer, visual_config={'figsize':[20,4]}):
"""
Parameters
----------
ImpactValues : pandas.DataFrame
VariableValues : pandas.DataFrame
Explainer : mltk.MLExplainer
visual_config : dict
Returns
-------
explainer_visual : matplotlib.figure.Figure
"""
base_value = Explainer.get_base_value()
model_variables = Explainer.get_model_variables()
method = Explainer.get_explainer_method()
if method == 'shap':
figsize = visual_config['figsize']
text_rotation = visual_config['text_rotation']
explainer_visual = get_shap_force_plot(ImpactValues, VariableValues, model_variables, base_value, figsize=figsize, text_rotation=text_rotation)
else:
explainer_visual = None
print('method {} not implemented'.format(method))
return explainer_visual
def get_shap_values(DataFrame, explainer, model_variables, id_columns=[], model_type='classification', class_number=0, predicted_value_column=None):
"""
Parameters
----------
DataFrame : pandas.DataFrame
explainer : shap.Explainer
model_variables : list(str)
id_columns : list(str)
model_type : {'regression', 'classification'}
class_number : int
predicted_value_column : str
Returns
-------
ShapValues : pandas.DataFrame
VariableValues : pandas.DataFrame
"""
DataFrame = DataFrame.reset_index(drop=True)
try:
if model_type=='classification':
shap_values = explainer.shap_values(DataFrame[model_variables])[class_number]
base_value = explainer.expected_value[class_number]
elif model_type=='regression':
shap_values = explainer.shap_values(DataFrame[model_variables])
base_value = explainer.expected_value[0]
# To DataFrame
ShapValues = pd.DataFrame(data=shap_values, columns=model_variables)
ShapValues['BaseValue'] = base_value
del(shap_values)
except:
print('Error creating Shap Values\n{}'.format(traceback.format_exc()))
return None
try:
for c in id_columns:
ShapValues[c] = DataFrame[c].values
except:
print('Error creating Id columns\n{}'.format(traceback.format_exc()))
try:
ShapValues['OutputValue'] = DataFrame[predicted_value_column].values
except:
ShapValues['OutputValue'] = None
print('Error creating OutputValue columns\n{}'.format(traceback.format_exc()))
return ShapValues[id_columns+model_variables+['BaseValue', 'OutputValue']], DataFrame[id_columns+model_variables]#Pandas Dataframe
def get_shap_force_plot(ShapValues, VariableValues, model_variables, base_value, figsize=(20, 3), text_rotation=90):
"""
Parameters
----------
ShapValues : pandas.DataFrame
VariableValues : pandas.DataFrame
model_variables : list(str)
figsize : (int, int), default (20, 3)
text_rotation : float, default 90
Returns
-------
shap_force_plot : matplotlib.figure.Figure
"""
shap_force_plot = shap.force_plot(
base_value=base_value,
shap_values=ShapValues[model_variables].values,
features=VariableValues[model_variables].values,
feature_names=model_variables,
out_names=None,
link='identity',
plot_cmap='RdBu',
matplotlib=True,
show=False,
figsize=figsize,
ordering_keys=None,
ordering_keys_time_format=None,
text_rotation=text_rotation,
)
return shap_force_plot
def get_shap_impact_plot(ShapValues, VariableValues, model_variables, iloc=0, top_n=5):
"""
Parameters
----------
ShapValues : pandas.DataFrame
VariableValues : pandas.DataFrame
model_variables : list(str)
iloc : int, default 0
top_n : int, default 5
Returns
-------
figure : matplotlib.figure.Figure
"""
impact_values = ShapValues[model_variables].iloc[iloc]
variable_values = VariableValues[model_variables].iloc[iloc]
variable_values.name = 'Value'
posivite_impact_values = impact_values[impact_values>=0.0].sort_values(ascending=False)
posivite_impact_values.name = 'Impact'
negative_impact_values = impact_values[impact_values<=0.0].sort_values(ascending=True)
negative_impact_values.name = 'Impact'
figure, axes = plt.subplots(nrows=1, ncols=2)
negative_impact_values.head(top_n)[::-1].plot(kind='barh', ax=axes[0], color='r', legend=False)
posivite_impact_values.head(top_n)[::-1].plot(kind='barh', ax=axes[1], color='g', legend=False)
axes[1].yaxis.tick_right()
return figure
def get_shap_impact_summary(ShapValues, VariableValues, model_variables, base_value=None, iloc=0, top_n=None, show_plot=False):
"""
Parameters
----------
ShapValues : pandas.DataFrame
VariableValues : pandas.DataFrame
model_variables : list(str)
iloc : int, default 0
top_n : int, default None
Returns
-------
ImpactSummary : pandas.DataFrame
"""
impact_values = ShapValues[model_variables].iloc[iloc]
variable_values = VariableValues[model_variables].iloc[iloc]
variable_values.name = 'Value'
posivite_impact_values = impact_values[impact_values>=0.0].sort_values(ascending=False)
posivite_impact_values.name = 'Impact'
negative_impact_values = impact_values[impact_values<=0.0].sort_values(ascending=True)
negative_impact_values.name = 'Impact'
sum_posivite_impact_values = posivite_impact_values.sum()
sum_negative_impact_values = negative_impact_values.sum()
norm_posivite_impact_values = posivite_impact_values/sum_posivite_impact_values
norm_negative_impact_values = negative_impact_values/sum_negative_impact_values
norm_posivite_impact_values.name = 'NormalizedImpact'
norm_negative_impact_values.name = 'NormalizedImpact'
if base_value != None:
output_value = sum_posivite_impact_values + sum_negative_impact_values + base_value
else:
output_value = None
out_posivite_impact_values = pd.concat([posivite_impact_values, norm_posivite_impact_values, variable_values], join='inner', axis=1)
out_posivite_impact_values['Sign'] = 'P'
out_posivite_impact_values['Rank'] = np.arange(out_posivite_impact_values.shape[0])+1
out_posivite_impact_values['Output'] = output_value
out_negative_impact_values = pd.concat([negative_impact_values, norm_negative_impact_values, variable_values], join='inner', axis=1)
out_negative_impact_values['Sign'] = 'N'
out_negative_impact_values['Rank'] = np.arange(out_negative_impact_values.shape[0])+1
out_negative_impact_values['Output'] = output_value
print('sum_posivite_impact_values', sum_posivite_impact_values)
print('sum_negative_impact_values', sum_negative_impact_values)
if show_plot:
get_shap_impact_plot(ShapValues, VariableValues, model_variables, iloc=iloc, top_n=top_n)
if top_n==None:
return out_posivite_impact_values.append(out_negative_impact_values)
else:
return out_posivite_impact_values.head(top_n).append(out_negative_impact_values.head(top_n))
def get_explainer_report(DataFrame, Explainer, top_n=5, show_plot=False, return_type='frame'):
"""
Parameters
----------
DataFrame : pandas.DataFrame
Explainer : mltk.MLExplainer
top_n : int, default 5
show_plot : bool, defualt False
return_type : {'frame', 'json'}, defua;t 'frame'
Returns
-------
ImpactSummary : pandas.DataFrame
impact_plot : matplotlib.figure.Figure (optional)
"""
ImpactValues, VariableValues = get_explainer_values_task(DataFrame, Explainer=Explainer)
model_variables = Explainer.get_model_variables()
base_value = Explainer.get_base_value()
ImpactSummary = get_shap_impact_summary(ImpactValues, VariableValues, model_variables, base_value=base_value, iloc=0, top_n=top_n)
if show_plot:
impact_plot = get_shap_impact_plot(ImpactValues, VariableValues, model_variables, iloc=0, top_n=top_n)
if return_type=='frame':
if show_plot:
return ImpactSummary, impact_plot
else:
return ImpactSummary
elif return_type=='json':
if show_plot:
return ImpactSummary.to_json(orient='columns'), impact_plot
else:
return ImpactSummary.to_json(orient='columns')
else:
print("return type is set to 'frame'. use 'json' to return json output")
if show_plot:
ImpactSummary, impact_plot
else:
return ImpactSummary | [
"traceback.format_exc",
"shap.DeepExplainer",
"pickle.dump",
"pickle.load",
"shap.force_plot",
"shap.LinearExplainer",
"shap.TreeExplainer",
"pandas.DataFrame",
"pandas.concat",
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((11996, 12363), 'shap.force_plot', 'shap.force_plot', ([], {'base_value': 'base_value', 'shap_values': 'ShapValues[model_variables].values', 'features': 'VariableValues[model_variables].values', 'feature_names': 'model_variables', 'out_names': 'None', 'link': '"""identity"""', 'plot_cmap': '"""RdBu"""', 'matplotlib': '(True)', 'show': '(False)', 'figsize': 'figsize', 'ordering_keys': 'None', 'ordering_keys_time_format': 'None', 'text_rotation': 'text_rotation'}), "(base_value=base_value, shap_values=ShapValues[\n model_variables].values, features=VariableValues[model_variables].\n values, feature_names=model_variables, out_names=None, link='identity',\n plot_cmap='RdBu', matplotlib=True, show=False, figsize=figsize,\n ordering_keys=None, ordering_keys_time_format=None, text_rotation=\n text_rotation)\n", (12011, 12363), False, 'import shap\n'), ((13353, 13383), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)'}), '(nrows=1, ncols=2)\n', (13365, 13383), True, 'import matplotlib.pyplot as plt\n'), ((15139, 15246), 'pandas.concat', 'pd.concat', (['[posivite_impact_values, norm_posivite_impact_values, variable_values]'], {'join': '"""inner"""', 'axis': '(1)'}), "([posivite_impact_values, norm_posivite_impact_values,\n variable_values], join='inner', axis=1)\n", (15148, 15246), True, 'import pandas as pd\n'), ((15473, 15580), 'pandas.concat', 'pd.concat', (['[negative_impact_values, norm_negative_impact_values, variable_values]'], {'join': '"""inner"""', 'axis': '(1)'}), "([negative_impact_values, norm_negative_impact_values,\n variable_values], join='inner', axis=1)\n", (15482, 15580), True, 'import pandas as pd\n'), ((3001, 3023), 'pickle.load', 'pickle.load', (['pickle_in'], {}), '(pickle_in)\n', (3012, 3023), False, 'import pickle\n'), ((3388, 3427), 'pickle.dump', 'pickle.dump', (['object_to_save', 'pickle_out'], {}), '(object_to_save, pickle_out)\n', (3399, 3427), False, 'import pickle\n'), ((10699, 10754), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'shap_values', 'columns': 'model_variables'}), '(data=shap_values, columns=model_variables)\n', (10711, 10754), True, 'import pandas as pd\n'), ((15331, 15377), 'numpy.arange', 'np.arange', (['out_posivite_impact_values.shape[0]'], {}), '(out_posivite_impact_values.shape[0])\n', (15340, 15377), True, 'import numpy as np\n'), ((15666, 15712), 'numpy.arange', 'np.arange', (['out_negative_impact_values.shape[0]'], {}), '(out_negative_impact_values.shape[0])\n', (15675, 15712), True, 'import numpy as np\n'), ((5491, 5516), 'shap.TreeExplainer', 'shap.TreeExplainer', (['model'], {}), '(model)\n', (5509, 5516), False, 'import shap\n'), ((3142, 3164), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3162, 3164), False, 'import traceback\n'), ((3523, 3545), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3543, 3545), False, 'import traceback\n'), ((4898, 4920), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4918, 4920), False, 'import traceback\n'), ((5596, 5708), 'shap.LinearExplainer', 'shap.LinearExplainer', (['model', 'train_variable_values[model_variables].values'], {'feature_dependence': '"""independent"""'}), "(model, train_variable_values[model_variables].values,\n feature_dependence='independent')\n", (5616, 5708), False, 'import shap\n'), ((6175, 6197), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6195, 6197), False, 'import traceback\n'), ((10895, 10917), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (10915, 10917), False, 'import traceback\n'), ((11107, 11129), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (11127, 11129), False, 'import traceback\n'), ((11349, 11371), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (11369, 11371), False, 'import traceback\n'), ((2758, 2780), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2778, 2780), False, 'import traceback\n'), ((5783, 5855), 'shap.DeepExplainer', 'shap.DeepExplainer', (['model', 'train_variable_values[model_variables].values'], {}), '(model, train_variable_values[model_variables].values)\n', (5801, 5855), False, 'import shap\n')] |
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.autograd import Variable
import numpy as np
from skimage import transform
import sys
sys.path.insert(0, '/root/LKVOLearner/DEN')
import modeling
import fdc
import den
sys.path.insert(0,"~/LKVOLearner/src/util")
import util
DISP_SCALING = 10
MIN_DISP = 0.01
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class ConvBlock(nn.Module):
def __init__(self, input_nc, output_nc, kernel_size):
super(ConvBlock, self).__init__()
p = int(np.floor((kernel_size - 1) / 2))
self.activation_fn = nn.ELU()
self.conv1 = Conv(input_nc, output_nc, kernel_size, 1, p, self.activation_fn)
# self.conv2 = Conv(output_nc, output_nc, kernel_size, 2, p)
self.conv2 = Conv(output_nc, output_nc, kernel_size, 1, p, None)
def forward(self, input):
x = self.conv1(input)
x = self.conv2(x)
padding = [0, int(np.mod(input.size(-1), 2)), 0, int(np.mod(input.size(-2), 2))]
x_pad = torch.nn.ReplicationPad2d(padding)(x)
return torch.nn.AvgPool2d(kernel_size=2, stride=2, padding=0)(self.activation_fn(x_pad))
class UpConv(nn.Module):
def __init__(self, input_nc, output_nc, kernel_size):
super(UpConv, self).__init__()
self.deconv = nn.ConvTranspose2d(in_channels=input_nc,
out_channels=output_nc,
kernel_size=2,
bias=True,
stride=2,
padding=0)
self.activation_fn = nn.ELU()
def forward(self, input):
return self.activation_fn(self.deconv(input))
class Conv(nn.Module):
def __init__(self, input_nc, output_nc, kernel_size, stride, padding, activation_func=nn.ELU()):
super(Conv, self).__init__()
self.conv = nn.Conv2d(in_channels=input_nc,
out_channels=output_nc,
kernel_size=kernel_size,
stride=stride,
padding=0,
bias=True)
self.activation_fn = activation_func
self.pad_fn = nn.ReplicationPad2d(padding)
def forward(self, input):
if self.activation_fn == None:
return self.conv(self.pad_fn(input))
else:
return self.activation_fn(self.conv(self.pad_fn(input)))
class FDCInverseDepthMap(fdc.FDC):
def getInverseDepthMap(self, batch):
predictions = fdc.FDC.__call__(self, batch)
# print("predictions shape: ", len(predictions))
for i in range(len(predictions)):
for j in range(predictions[i].shape[0]):
predictions[i][j] = torch.tensor(list(map(lambda x: 0 if 1 / x == float('inf') else 1 / x, predictions[i][j])))
return predictions
class VggDepthEstimator(nn.Module):
def __init__(self, input_size=None):
super(VggDepthEstimator, self).__init__()
self.conv_layers = nn.ModuleList([ConvBlock(3, 32, 7)])
self.conv_layers.append(ConvBlock(32, 64, 5))
self.conv_layers.append(ConvBlock(64, 128, 3))
self.conv_layers.append(ConvBlock(128, 256, 3))
self.conv_layers.append(ConvBlock(256, 512, 3))
self.conv_layers.append(ConvBlock(512, 512, 3))
self.conv_layers.append(ConvBlock(512, 512, 3))
# print(conv_feat_sizes)
self.upconv_layers = nn.ModuleList([UpConv(512, 512, 3)])
self.iconv_layers = nn.ModuleList([Conv(512*2, 512, 3, 1, 1)])
self.upconv_layers.append(UpConv(512, 512, 3))
self.iconv_layers.append(Conv(512*2, 512, 3, 1, 1))
self.invdepth_layers = nn.ModuleList([Conv(512, 1, 3, 1, 1, nn.Sigmoid())])
self.upconv_layers.append(UpConv(512, 256, 3))
self.iconv_layers.append(Conv(256*2, 256, 3, 1, 1))
self.invdepth_layers.append(Conv(256, 1, 3, 1, 1, nn.Sigmoid()))
self.upconv_layers.append(UpConv(256, 128, 3))
self.iconv_layers.append(Conv(128*2, 128, 3, 1, 1))
self.invdepth_layers.append(Conv(128, 1, 3, 1, 1, nn.Sigmoid()))
self.upconv_layers.append(UpConv(128, 64, 3))
self.iconv_layers.append(Conv(64*2+1, 64, 3, 1, 1))
self.invdepth_layers.append(Conv(64, 1, 3, 1, 1, nn.Sigmoid()))
self.upconv_layers.append(UpConv(64, 32, 3))
self.iconv_layers.append(Conv(32*2+1, 32, 3, 1, 1))
self.invdepth_layers.append(Conv(32, 1, 3, 1, 1, nn.Sigmoid()))
self.upconv_layers.append(UpConv(32, 16, 3))
self.iconv_layers.append(Conv(16+1, 16, 3, 1, 1))
self.invdepth_layers.append(Conv(16, 1, 3, 1, 1, nn.Sigmoid()))
# self.invdepth_layers.append(InvDepth(16))
def init_weights(self):
def weights_init(m):
classname = m.__class__.__name__
if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.ConvTranspose2d):
# m.weight.data.normal_(0.0, 0.02)
m.bias.data = torch.zeros(m.bias.data.size())
self.apply(weights_init)
def forward(self, input):
conv_feat = self.conv_layers[0].forward(input)
self.conv_feats = [conv_feat]
for i in range(1, len(self.conv_layers)):
conv_feat = self.conv_layers[i].forward(self.conv_feats[i-1])
self.conv_feats.append(conv_feat)
upconv_feats = []
invdepth_pyramid = []
for i in range(0, len(self.upconv_layers)):
if i==0:
x = self.upconv_layers[i].forward(self.conv_feats[-1])
else:
x = self.upconv_layers[i].forward(upconv_feats[i-1])
if i<len(self.upconv_layers)-1:
if x.size(-1) != self.conv_feats[-2-i].size(-1):
x = x[:, :, :, :-1]
if x.size(-2) != self.conv_feats[-2-i].size(-2):
x = x[:, :, :-1, :]
if i==(len(self.upconv_layers)-1):
x = torch.cat((x, nn.Upsample(scale_factor=2, mode='bilinear')(invdepth_pyramid[-1])), 1)
elif i > 3:
x = torch.cat((x, self.conv_feats[-(2+i)], nn.Upsample(scale_factor=2, mode='bilinear')(invdepth_pyramid[-1])), 1)
else:
x = torch.cat((x, self.conv_feats[-(2+i)]), 1)
upconv_feats.append(self.iconv_layers[i].forward(x))
if i>0:
# invdepth_pyramid.append(self.invdepth_layers[i-1].forward(upconv_feats[-1])*DISP_SCALING+MIN_DISP)
invdepth_pyramid.append(self.invdepth_layers[i-1].forward(upconv_feats[-1]))
# invdepth_pyramid.append(self.invdepth_layers[i-1].forward(upconv_feats[-1]))
invdepth_pyramid = invdepth_pyramid[-1::-1]
invdepth_pyramid = invdepth_pyramid[0:5]
# conv_feats_output = conv_feats_output[0:5]
for i in range(len(invdepth_pyramid)):
# *10 + 0.01
print(i,': before squeeze',invdepth_pyramid[i].shape)
invdepth_pyramid[i] = invdepth_pyramid[i].squeeze(1)*DISP_SCALING+MIN_DISP
print(i,': after squeeze',invdepth_pyramid[i].shape)
print()
return invdepth_pyramid
class FDCDepthEstimator(nn.Module):
def __init__(self, input_size=None):
super(FDCDepthEstimator, self).__init__()
den_ = den.DEN()
den_ = den_.to(device)
den_.eval()
self.fdc_model = FDCInverseDepthMap(den_)
self.fdc_model.load_weights('/root/LKVOLearner/DEN/models/FDC/den_dbe/')
def forward(self, input):
sizes = [(128, 416), (64, 208), (32, 104), (16, 52), (8, 26)]
invdepth_pyramid = [[] for _ in range(len(sizes))]
origin_indepth_map = self.fdc_model.getInverseDepthMap(input)
# util.save_image(origin_indepth_map, "/data/log/checkpoints/origin_invdepth_map_%d.mat" % (self.index))
# self.index += 1
for i in range(len(sizes)): # num_pyrimid: 5
for j in range(len(origin_indepth_map)):
invdepth_pyramid[i].append(transform.resize(origin_indepth_map[j].numpy(), sizes[i], mode='reflect',
anti_aliasing=True, preserve_range=True).astype('float32'))
invdepth_pyramid[i] = torch.tensor(invdepth_pyramid[i]).cuda()
invdepth_pyramid[i] = invdepth_pyramid[i]*DISP_SCALING+MIN_DISP
return invdepth_pyramid
def init_weights(self):
# already loaded in intializing.
pass
class PoseNet(nn.Module):
def __init__(self, bundle_size):
super(PoseNet, self).__init__()
self.bundle_size = bundle_size
model = [nn.Conv2d(bundle_size*3, 16, kernel_size=7, stride=2, padding=3, bias=True),
nn.ReLU(True),
nn.Conv2d(16, 32, kernel_size=5, stride=2, padding=2, bias=True),
nn.ReLU(True),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True),
nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True),
nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True),
nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True),
nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True),
nn.Conv2d(256, 6*(bundle_size-1), kernel_size=3, stride=2, padding=1, bias=True)
]
self.model = nn.Sequential(*model)
def forward(self, input):
assert(self.bundle_size*3 == input.size(1))
p = self.model.forward(input)
p = p.view(input.size(0), 6*(self.bundle_size-1), -1).mean(2)
return p.view(input.size(0), self.bundle_size-1, 6) * 0.01
class PoseExpNet(nn.Module):
def __init__(self, bundle_size):
super(PoseExpNet, self).__init__()
self.bundle_size = bundle_size
self.convlyr1 = nn.Sequential(*[nn.Conv2d(bundle_size*3, 16, kernel_size=7, stride=2, padding=3, bias=True),
nn.ReLU(True)])
self.convlyr2 = nn.Sequential(*[nn.Conv2d(16, 32, kernel_size=5, stride=2, padding=2, bias=True),
nn.ReLU(True)])
self.convlyr3 = nn.Sequential(*[nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True)])
self.convlyr4 = nn.Sequential(*[nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True)])
self.convlyr5 = nn.Sequential(*[nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True)])
self.poselyr = nn.Sequential(*[nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True),
nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(True),
nn.Conv2d(256, 6*(bundle_size-1), kernel_size=3, stride=2, padding=1, bias=True)])
self.uplyr5 = nn.Sequential(*[nn.ConvTranspose2d(in_channels=256,
out_channels=256,
kernel_size=2,
bias=True,
stride=2,
padding=0),
nn.ReLU(True)])
self.uplyr4 = nn.Sequential(*[nn.ConvTranspose2d(in_channels=256,
out_channels=128,
kernel_size=2,
bias=True,
stride=2,
padding=0),
nn.ReLU(True)])
self.uplyr3 = nn.Sequential(*[nn.ConvTranspose2d(in_channels=128,
out_channels=64,
kernel_size=2,
bias=True,
stride=2,
padding=0),
nn.ReLU(True)])
self.uplyr2 = nn.Sequential(*[nn.ConvTranspose2d(in_channels=64,
out_channels=32,
kernel_size=2,
bias=True,
stride=2,
padding=0),
nn.ReLU(True)])
self.uplyr1 = nn.Sequential(*[nn.ConvTranspose2d(in_channels=32,
out_channels=16,
kernel_size=2,
bias=True,
stride=2,
padding=0),
nn.ReLU(True)])
self.explyr4 = nn.Sequential(*[nn.Conv2d(128, bundle_size, kernel_size=3,
stride=1, padding=1, bias=True),
nn.Sigmoid()])
self.explyr3 = nn.Sequential(*[nn.Conv2d(64, bundle_size, kernel_size=3,
stride=1, padding=1, bias=True),
nn.Sigmoid()])
self.explyr2 = nn.Sequential(*[nn.Conv2d(32, bundle_size, kernel_size=3,
stride=1, padding=1, bias=True),
nn.Sigmoid()])
self.explyr1 = nn.Sequential(*[nn.Conv2d(16, bundle_size, kernel_size=3,
stride=1, padding=1, bias=True),
nn.Sigmoid()])
def forward(self, input):
conv1 = self.convlyr1(input)
conv2 = self.convlyr2(conv1)
conv3 = self.convlyr3(conv2)
conv4 = self.convlyr4(conv3)
conv5 = self.convlyr5(conv4)
# output pose
p = self.poselyr.forward(conv5)
p = p.view(input.size(0), 6*(self.bundle_size-1), -1).mean(2)
# multiply predicted pose with a small constant
p = p.view(input.size(0), self.bundle_size-1, 6) * 0.01
# predict multi-scale explainable mask
upcnv5 = self.uplyr5(conv5)
upcnv4 = self.uplyr4(upcnv5)
upcnv3 = self.uplyr3(upcnv4)
upcnv2 = self.uplyr2(upcnv3)
upcnv1 = self.uplyr1(upcnv2)
mask4 = self.explyr4(upcnv4)
mask3 = self.explyr3(upcnv3)
mask2 = self.explyr2(upcnv2)
mask1 = self.explyr1(upcnv1)
return p, [mask1, mask2, mask3, mask4]
if __name__ == "__main__":
model = PoseExpNet(3).cuda()
x = Variable(torch.randn(1,9,128,416).cuda())
p, masks = model.forward(x)
for i in range(4):
print(masks[i].size())
dnet = VggDepthEstimator([128,416]).cuda()
I = Variable(torch.randn(1,3,128,416).cuda())
invdepth_pyramid = dnet.forward(I)
for i in range(len(invdepth_pyramid)):
print(invdepth_pyramid[i].size())
| [
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"sys.path.insert",
"fdc.FDC.__call__",
"torch.nn.Sequential",
"numpy.floor",
"torch.nn.Conv2d",
"den.DEN",
"torch.nn.AvgPool2d",
"torch.tensor",
"torch.cuda.is_available",
"torch.nn.ReplicationPad2d",
"torch.nn.Upsample",
"torch.nn.ELU",
"torch.nn.Con... | [((175, 218), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/root/LKVOLearner/DEN"""'], {}), "(0, '/root/LKVOLearner/DEN')\n", (190, 218), False, 'import sys\n'), ((259, 303), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""~/LKVOLearner/src/util"""'], {}), "(0, '~/LKVOLearner/src/util')\n", (274, 303), False, 'import sys\n'), ((383, 408), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (406, 408), False, 'import torch\n'), ((628, 636), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (634, 636), True, 'import torch.nn as nn\n'), ((1338, 1453), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': 'input_nc', 'out_channels': 'output_nc', 'kernel_size': '(2)', 'bias': '(True)', 'stride': '(2)', 'padding': '(0)'}), '(in_channels=input_nc, out_channels=output_nc,\n kernel_size=2, bias=True, stride=2, padding=0)\n', (1356, 1453), True, 'import torch.nn as nn\n'), ((1723, 1731), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (1729, 1731), True, 'import torch.nn as nn\n'), ((1930, 1938), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (1936, 1938), True, 'import torch.nn as nn\n'), ((1998, 2120), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'input_nc', 'out_channels': 'output_nc', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': '(0)', 'bias': '(True)'}), '(in_channels=input_nc, out_channels=output_nc, kernel_size=\n kernel_size, stride=stride, padding=0, bias=True)\n', (2007, 2120), True, 'import torch.nn as nn\n'), ((2333, 2361), 'torch.nn.ReplicationPad2d', 'nn.ReplicationPad2d', (['padding'], {}), '(padding)\n', (2352, 2361), True, 'import torch.nn as nn\n'), ((2664, 2693), 'fdc.FDC.__call__', 'fdc.FDC.__call__', (['self', 'batch'], {}), '(self, batch)\n', (2680, 2693), False, 'import fdc\n'), ((7503, 7512), 'den.DEN', 'den.DEN', ([], {}), '()\n', (7510, 7512), False, 'import den\n'), ((9784, 9805), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (9797, 9805), True, 'import torch.nn as nn\n'), ((566, 597), 'numpy.floor', 'np.floor', (['((kernel_size - 1) / 2)'], {}), '((kernel_size - 1) / 2)\n', (574, 597), True, 'import numpy as np\n'), ((1057, 1091), 'torch.nn.ReplicationPad2d', 'torch.nn.ReplicationPad2d', (['padding'], {}), '(padding)\n', (1082, 1091), False, 'import torch\n'), ((1110, 1164), 'torch.nn.AvgPool2d', 'torch.nn.AvgPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)', 'padding': '(0)'}), '(kernel_size=2, stride=2, padding=0)\n', (1128, 1164), False, 'import torch\n'), ((8839, 8916), 'torch.nn.Conv2d', 'nn.Conv2d', (['(bundle_size * 3)', '(16)'], {'kernel_size': '(7)', 'stride': '(2)', 'padding': '(3)', 'bias': '(True)'}), '(bundle_size * 3, 16, kernel_size=7, stride=2, padding=3, bias=True)\n', (8848, 8916), True, 'import torch.nn as nn\n'), ((8933, 8946), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (8940, 8946), True, 'import torch.nn as nn\n'), ((8965, 9029), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(32)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(True)'}), '(16, 32, kernel_size=5, stride=2, padding=2, bias=True)\n', (8974, 9029), True, 'import torch.nn as nn\n'), ((9048, 9061), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (9055, 9061), True, 'import torch.nn as nn\n'), ((9080, 9144), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(32, 64, kernel_size=3, stride=2, padding=1, bias=True)\n', (9089, 9144), True, 'import torch.nn as nn\n'), ((9163, 9176), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (9170, 9176), True, 'import torch.nn as nn\n'), ((9195, 9260), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(64, 128, kernel_size=3, stride=2, padding=1, bias=True)\n', (9204, 9260), True, 'import torch.nn as nn\n'), ((9279, 9292), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (9286, 9292), True, 'import torch.nn as nn\n'), ((9311, 9377), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(128, 256, kernel_size=3, stride=2, padding=1, bias=True)\n', (9320, 9377), True, 'import torch.nn as nn\n'), ((9396, 9409), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (9403, 9409), True, 'import torch.nn as nn\n'), ((9428, 9494), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(256, 256, kernel_size=3, stride=2, padding=1, bias=True)\n', (9437, 9494), True, 'import torch.nn as nn\n'), ((9513, 9526), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (9520, 9526), True, 'import torch.nn as nn\n'), ((9545, 9611), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(256, 256, kernel_size=3, stride=2, padding=1, bias=True)\n', (9554, 9611), True, 'import torch.nn as nn\n'), ((9630, 9643), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (9637, 9643), True, 'import torch.nn as nn\n'), ((9662, 9750), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(6 * (bundle_size - 1))'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(256, 6 * (bundle_size - 1), kernel_size=3, stride=2, padding=1,\n bias=True)\n', (9671, 9750), True, 'import torch.nn as nn\n'), ((4089, 4101), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4099, 4101), True, 'import torch.nn as nn\n'), ((4278, 4290), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4288, 4290), True, 'import torch.nn as nn\n'), ((4465, 4477), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4475, 4477), True, 'import torch.nn as nn\n'), ((4651, 4663), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4661, 4663), True, 'import torch.nn as nn\n'), ((4835, 4847), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4845, 4847), True, 'import torch.nn as nn\n'), ((15046, 15073), 'torch.randn', 'torch.randn', (['(1)', '(9)', '(128)', '(416)'], {}), '(1, 9, 128, 416)\n', (15057, 15073), False, 'import torch\n'), ((15231, 15258), 'torch.randn', 'torch.randn', (['(1)', '(3)', '(128)', '(416)'], {}), '(1, 3, 128, 416)\n', (15242, 15258), False, 'import torch\n'), ((3899, 3911), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (3909, 3911), True, 'import torch.nn as nn\n'), ((6425, 6469), 'torch.cat', 'torch.cat', (['(x, self.conv_feats[-(2 + i)])', '(1)'], {}), '((x, self.conv_feats[-(2 + i)]), 1)\n', (6434, 6469), False, 'import torch\n'), ((8446, 8479), 'torch.tensor', 'torch.tensor', (['invdepth_pyramid[i]'], {}), '(invdepth_pyramid[i])\n', (8458, 8479), False, 'import torch\n'), ((10254, 10331), 'torch.nn.Conv2d', 'nn.Conv2d', (['(bundle_size * 3)', '(16)'], {'kernel_size': '(7)', 'stride': '(2)', 'padding': '(3)', 'bias': '(True)'}), '(bundle_size * 3, 16, kernel_size=7, stride=2, padding=3, bias=True)\n', (10263, 10331), True, 'import torch.nn as nn\n'), ((10348, 10361), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (10355, 10361), True, 'import torch.nn as nn\n'), ((10404, 10468), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(32)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(True)'}), '(16, 32, kernel_size=5, stride=2, padding=2, bias=True)\n', (10413, 10468), True, 'import torch.nn as nn\n'), ((10487, 10500), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (10494, 10500), True, 'import torch.nn as nn\n'), ((10543, 10607), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(32, 64, kernel_size=3, stride=2, padding=1, bias=True)\n', (10552, 10607), True, 'import torch.nn as nn\n'), ((10626, 10639), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (10633, 10639), True, 'import torch.nn as nn\n'), ((10682, 10747), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(64, 128, kernel_size=3, stride=2, padding=1, bias=True)\n', (10691, 10747), True, 'import torch.nn as nn\n'), ((10766, 10779), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (10773, 10779), True, 'import torch.nn as nn\n'), ((10822, 10888), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(128, 256, kernel_size=3, stride=2, padding=1, bias=True)\n', (10831, 10888), True, 'import torch.nn as nn\n'), ((10907, 10920), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (10914, 10920), True, 'import torch.nn as nn\n'), ((10963, 11029), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(256, 256, kernel_size=3, stride=2, padding=1, bias=True)\n', (10972, 11029), True, 'import torch.nn as nn\n'), ((11055, 11068), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (11062, 11068), True, 'import torch.nn as nn\n'), ((11094, 11160), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(256, 256, kernel_size=3, stride=2, padding=1, bias=True)\n', (11103, 11160), True, 'import torch.nn as nn\n'), ((11186, 11199), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (11193, 11199), True, 'import torch.nn as nn\n'), ((11225, 11313), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(6 * (bundle_size - 1))'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(256, 6 * (bundle_size - 1), kernel_size=3, stride=2, padding=1,\n bias=True)\n', (11234, 11313), True, 'import torch.nn as nn\n'), ((11347, 11452), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(256)', 'out_channels': '(256)', 'kernel_size': '(2)', 'bias': '(True)', 'stride': '(2)', 'padding': '(0)'}), '(in_channels=256, out_channels=256, kernel_size=2, bias=\n True, stride=2, padding=0)\n', (11365, 11452), True, 'import torch.nn as nn\n'), ((11682, 11695), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (11689, 11695), True, 'import torch.nn as nn\n'), ((11736, 11841), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(256)', 'out_channels': '(128)', 'kernel_size': '(2)', 'bias': '(True)', 'stride': '(2)', 'padding': '(0)'}), '(in_channels=256, out_channels=128, kernel_size=2, bias=\n True, stride=2, padding=0)\n', (11754, 11841), True, 'import torch.nn as nn\n'), ((12071, 12084), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (12078, 12084), True, 'import torch.nn as nn\n'), ((12125, 12229), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(128)', 'out_channels': '(64)', 'kernel_size': '(2)', 'bias': '(True)', 'stride': '(2)', 'padding': '(0)'}), '(in_channels=128, out_channels=64, kernel_size=2, bias=\n True, stride=2, padding=0)\n', (12143, 12229), True, 'import torch.nn as nn\n'), ((12472, 12485), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (12479, 12485), True, 'import torch.nn as nn\n'), ((12526, 12629), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(64)', 'out_channels': '(32)', 'kernel_size': '(2)', 'bias': '(True)', 'stride': '(2)', 'padding': '(0)'}), '(in_channels=64, out_channels=32, kernel_size=2, bias=\n True, stride=2, padding=0)\n', (12544, 12629), True, 'import torch.nn as nn\n'), ((12878, 12891), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (12885, 12891), True, 'import torch.nn as nn\n'), ((12932, 13035), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(32)', 'out_channels': '(16)', 'kernel_size': '(2)', 'bias': '(True)', 'stride': '(2)', 'padding': '(0)'}), '(in_channels=32, out_channels=16, kernel_size=2, bias=\n True, stride=2, padding=0)\n', (12950, 13035), True, 'import torch.nn as nn\n'), ((13284, 13297), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (13291, 13297), True, 'import torch.nn as nn\n'), ((13339, 13413), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', 'bundle_size'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(128, bundle_size, kernel_size=3, stride=1, padding=1, bias=True)\n', (13348, 13413), True, 'import torch.nn as nn\n'), ((13478, 13490), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (13488, 13490), True, 'import torch.nn as nn\n'), ((13532, 13605), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', 'bundle_size'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(64, bundle_size, kernel_size=3, stride=1, padding=1, bias=True)\n', (13541, 13605), True, 'import torch.nn as nn\n'), ((13670, 13682), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (13680, 13682), True, 'import torch.nn as nn\n'), ((13724, 13797), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', 'bundle_size'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(32, bundle_size, kernel_size=3, stride=1, padding=1, bias=True)\n', (13733, 13797), True, 'import torch.nn as nn\n'), ((13862, 13874), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (13872, 13874), True, 'import torch.nn as nn\n'), ((13916, 13989), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', 'bundle_size'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(16, bundle_size, kernel_size=3, stride=1, padding=1, bias=True)\n', (13925, 13989), True, 'import torch.nn as nn\n'), ((14054, 14066), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (14064, 14066), True, 'import torch.nn as nn\n'), ((6160, 6204), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""'}), "(scale_factor=2, mode='bilinear')\n", (6171, 6204), True, 'import torch.nn as nn\n'), ((6315, 6359), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""bilinear"""'}), "(scale_factor=2, mode='bilinear')\n", (6326, 6359), True, 'import torch.nn as nn\n')] |
#!/usr/bin/env python3
import sys, os
import matplotlib as mat
mat.use('GTK3Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import cv2
from sklearn.cluster import KMeans
import numpy as np
import sqlite3
from joblib import Parallel, delayed
import logging
from typing import *
CLUSTERS = 5
connection : sqlite3.Connection = None
images = Dict
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(funcName)s - %(message)s', level=logging.DEBUG)
def process(file):
if not file.endswith("jpg"):
return
global connection
logging.debug("reading {}".format(file))
img = cv2.imread(file)
img = img.reshape((img.shape[0] * img.shape[1], 3))
logging.debug("KMeans for {}".format(file))
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
ret, label, center = cv2.kmeans(np.float32(img), CLUSTERS, None, criteria, 10, cv2.KMEANS_PP_CENTERS)
# kmeans = KMeans(n_clusters=CLUSTERS)
# kmeans.fit(img)
# center = kmeans.cluster_centers_
# label = kmeans.labels_
numLabels = np.arange(0, CLUSTERS + 1)
(hist, _) = np.histogram(label, bins=numLabels)
hist = hist.astype("float")
hist /= hist.sum()
colors = center[(-hist).argsort()]
# c = connection.cursor()
color_ints = []
logging.debug("{}: {}".format(file, str(colors)))
for i in range(CLUSTERS):
col = int(colors[i][2]) << 16 | int(colors[i][1]) << 8 | int(colors[i][0])
assert col <= 2**24
color_ints.append(col)
logging.debug("{}: {}".format(file, str(color_ints)))
#c.execute("INSERT INTO images VALUES (\"{}\",{})".format(os.path.basename(file),",".join([str(i) for i in color_ints])))
#connection.commit()
#c.close()
images.update({os.path.basename(file): color_ints})
def main(folder, img, db):
global connection
logging.info("Createing Database: hist_{}.sqlite".format(db))
connection = sqlite3.connect("hist_{}.sqlite".format(db), check_same_thread = False)
c = connection.cursor()
c.execute("CREATE TABLE images (imgname text)")
for i in range(CLUSTERS):
c.execute("ALTER TABLE images ADD COLUMN color{} integer".format(i))
connection.commit()
for root, dirs, files in os.walk(folder):
Parallel(n_jobs=2)(delayed(process)(os.path.join(root, file)) for file in files)
#[process(os.path.join(root, file)) for file in files]
for key, value in images.items():
c.execute("INSERT INTO images VALUES (\"{}\",{})".format(key,
",".join([str(i) for i in value])))
connection.commit()
connection.close()
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
assert len(sys.argv) > 3
folder = sys.argv[1]
img = sys.argv[2]
db = sys.argv[3]
main(folder, img, db)
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| [
"logging.basicConfig",
"numpy.histogram",
"matplotlib.use",
"os.walk",
"os.path.join",
"joblib.Parallel",
"os.path.basename",
"joblib.delayed",
"cv2.imread",
"numpy.float32",
"numpy.arange"
] | [((64, 82), 'matplotlib.use', 'mat.use', (['"""GTK3Agg"""'], {}), "('GTK3Agg')\n", (71, 82), True, 'import matplotlib as mat\n'), ((378, 495), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(funcName)s - %(message)s"""', 'level': 'logging.DEBUG'}), "(format=\n '%(asctime)s - %(levelname)s - %(funcName)s - %(message)s', level=\n logging.DEBUG)\n", (397, 495), False, 'import logging\n'), ((631, 647), 'cv2.imread', 'cv2.imread', (['file'], {}), '(file)\n', (641, 647), False, 'import cv2\n'), ((1088, 1114), 'numpy.arange', 'np.arange', (['(0)', '(CLUSTERS + 1)'], {}), '(0, CLUSTERS + 1)\n', (1097, 1114), True, 'import numpy as np\n'), ((1131, 1166), 'numpy.histogram', 'np.histogram', (['label'], {'bins': 'numLabels'}), '(label, bins=numLabels)\n', (1143, 1166), True, 'import numpy as np\n'), ((2266, 2281), 'os.walk', 'os.walk', (['folder'], {}), '(folder)\n', (2273, 2281), False, 'import sys, os\n'), ((867, 882), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (877, 882), True, 'import numpy as np\n'), ((1783, 1805), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (1799, 1805), False, 'import sys, os\n'), ((2291, 2309), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(2)'}), '(n_jobs=2)\n', (2299, 2309), False, 'from joblib import Parallel, delayed\n'), ((2310, 2326), 'joblib.delayed', 'delayed', (['process'], {}), '(process)\n', (2317, 2326), False, 'from joblib import Parallel, delayed\n'), ((2327, 2351), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (2339, 2351), False, 'import sys, os\n')] |
import matplotlib.pyplot as plt
import numpy as np
def inverse_normalization(X):
return X * 255.0
def plot_generated_batch(X_full, X_sketch, generator_model, epoch_num, dataset_name, batch_num):
# Generate images
X_gen = generator_model.predict(X_sketch)
X_sketch = inverse_normalization(X_sketch)
X_full = inverse_normalization(X_full)
X_gen = inverse_normalization(X_gen)
# limit to 8 images as output
Xs = X_sketch[:8]
Xg = X_gen[:8]
Xr = X_full[:8]
# put |decoded, generated, original| images next to each other
X = np.concatenate((Xs, Xg, Xr), axis=3)
# make one giant block of images
X = np.concatenate(X, axis=1)
# save the giant n x 3 images
plt.imsave('./pix2pix_out/progress_imgs/{}_epoch_{}_batch_{}.png'.format(dataset_name, epoch_num, batch_num), X[0], cmap='Greys_r')
| [
"numpy.concatenate"
] | [((574, 610), 'numpy.concatenate', 'np.concatenate', (['(Xs, Xg, Xr)'], {'axis': '(3)'}), '((Xs, Xg, Xr), axis=3)\n', (588, 610), True, 'import numpy as np\n'), ((657, 682), 'numpy.concatenate', 'np.concatenate', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (671, 682), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as sp
import astropy.units as au
import astropy.constants as ac
def Sigma_E(SFE, Psi):
"""Eddington surface density"""
return SFE/(1.0 + SFE)*Psi/(2.0*np.pi*ac.c.cgs.value*ac.G.cgs.value)
def mu_M(Sigma_cl, SFE, x, sigma):
return np.log(Sigma_cl*(1.0 - SFE)/(4.0*x**2)) + 0.5*sigma**2
def y_E(Sigma_cl, SFE, x, sigma, Psi):
"""
Returns the random variable with a standard normal distribution
for mass-weighted surface density distribution corresponding to Sigma_E
y_E = (ln(Sigma_E) - mu_M)/(sqrt(2)*sigma_lnSigma)
"""
Sigma_E_ = Sigma_E(SFE,Psi)
muM_ = mu_M(Sigma_cl, SFE, x, sigma)
return (np.log(Sigma_E_) - muM_)/(np.sqrt(2.0)*sigma)
def argmax_eps_of(Sigmacl, Psi=2000.0, sigma=1.0, x=1.0):
SFE=np.linspace(0.0001, 0.9999, num=1000)
yE = y_E(Sigmacl, SFE, x, sigma, Psi)
eps_of = 0.5*(1.0 - SFE)*(1.0 + sp.erf(yE))
return SFE, yE, eps_of, SFE[np.argmax(eps_of)]
def eps_min_max(Sigmacl, Psi=2000.0, sigma=1.0, x=1.0):
"""
Compute final SF efficiency given Sigmacl, Psi, sigma_lnSigma, x
"""
if not isinstance(Sigmacl, (np.ndarray, np.generic)):
if isinstance(Sigmacl, float):
Sigmacl = np.asarray([Sigmacl])
else:
Sigmacl = np.asarray(Sigmacl)
eps_min = np.zeros_like(Sigmacl)
eps_max = np.zeros_like(Sigmacl)
for i, Sigmacl_ in enumerate(Sigmacl):
SFE, yE, eps_of, SFE_min = argmax_eps_of(Sigmacl_, Psi=Psi, sigma=sigma, x=x)
eps_min[i] = SFE_min
eps_max[i] = 1.0 - max(eps_of)
return eps_min, eps_max
| [
"numpy.sqrt",
"numpy.log",
"numpy.asarray",
"numpy.argmax",
"numpy.linspace",
"scipy.special.erf",
"numpy.zeros_like"
] | [((829, 866), 'numpy.linspace', 'np.linspace', (['(0.0001)', '(0.9999)'], {'num': '(1000)'}), '(0.0001, 0.9999, num=1000)\n', (840, 866), True, 'import numpy as np\n'), ((1366, 1388), 'numpy.zeros_like', 'np.zeros_like', (['Sigmacl'], {}), '(Sigmacl)\n', (1379, 1388), True, 'import numpy as np\n'), ((1403, 1425), 'numpy.zeros_like', 'np.zeros_like', (['Sigmacl'], {}), '(Sigmacl)\n', (1416, 1425), True, 'import numpy as np\n'), ((316, 363), 'numpy.log', 'np.log', (['(Sigma_cl * (1.0 - SFE) / (4.0 * x ** 2))'], {}), '(Sigma_cl * (1.0 - SFE) / (4.0 * x ** 2))\n', (322, 363), True, 'import numpy as np\n'), ((716, 732), 'numpy.log', 'np.log', (['Sigma_E_'], {}), '(Sigma_E_)\n', (722, 732), True, 'import numpy as np\n'), ((742, 754), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (749, 754), True, 'import numpy as np\n'), ((945, 955), 'scipy.special.erf', 'sp.erf', (['yE'], {}), '(yE)\n', (951, 955), True, 'import scipy.special as sp\n'), ((989, 1006), 'numpy.argmax', 'np.argmax', (['eps_of'], {}), '(eps_of)\n', (998, 1006), True, 'import numpy as np\n'), ((1269, 1290), 'numpy.asarray', 'np.asarray', (['[Sigmacl]'], {}), '([Sigmacl])\n', (1279, 1290), True, 'import numpy as np\n'), ((1327, 1346), 'numpy.asarray', 'np.asarray', (['Sigmacl'], {}), '(Sigmacl)\n', (1337, 1346), True, 'import numpy as np\n')] |
import numpy as np
from scipy.sparse import csr_matrix
def read( file_path, min_feature_count=0 ) :
file = open(file_path, "r")
data = []
column = []
row = [0]
y = []
rowCount = 0;
columnCount = 0
elementCount = 0
for line in file:
cur = line.split(" ")
y.append( int(cur[0]) )
for i in range(1, len(cur) ):
item = cur[i].split(":")
data.append( float(item[1]) )
column.append( int(item[0]) )
if int(item[0]) + 1 > columnCount:
columnCount = int(item[0]) + 1
elementCount += 1
rowCount += 1
row.append( elementCount )
X = csr_matrix( ( np.array( data, np.float32 ), np.array( column, np.int32 ), row ), shape=( rowCount, max(columnCount, min_feature_count) ) )
return ( X, y )
def correct( file_path ) :
file = open(file_path, "r")
fileW = open("res.txt", "w")
data = []
column = []
row = [0]
y = []
rowCount = 0;
columnCount = 0
elementCount = 0
for line in file:
cur = line.split(" ")
y.append( int(cur[0]) )
fileW.write( str( int( cur[0] ) - 1 ) )
for i in range(1, len(cur) ):
item = cur[i].split(":")
data.append( float(item[1]) )
column.append( int(item[0]) )
if int(item[0]) + 1 > columnCount:
columnCount = int(item[0]) + 1
elementCount += 1
s = str(int(item[0]) - 1 ) + ":" + item[1]
fileW.write( " " + s )
rowCount += 1
row.append( elementCount )
X = csr_matrix( ( np.array( data, np.float32 ), np.array( column, np.int32 ), row ), shape=( rowCount, columnCount ) )
return ( X, y )
| [
"numpy.array"
] | [((582, 608), 'numpy.array', 'np.array', (['data', 'np.float32'], {}), '(data, np.float32)\n', (590, 608), True, 'import numpy as np\n'), ((612, 638), 'numpy.array', 'np.array', (['column', 'np.int32'], {}), '(column, np.int32)\n', (620, 638), True, 'import numpy as np\n'), ((1379, 1405), 'numpy.array', 'np.array', (['data', 'np.float32'], {}), '(data, np.float32)\n', (1387, 1405), True, 'import numpy as np\n'), ((1409, 1435), 'numpy.array', 'np.array', (['column', 'np.int32'], {}), '(column, np.int32)\n', (1417, 1435), True, 'import numpy as np\n')] |
"""
Example using sksym with a two-dimensional landmap.
"""
import functools
import glob
import os
import numpy
import lightgbm
import matplotlib
from matplotlib import pyplot
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from PIL import Image
import sksym
RNG = numpy.random.Generator(numpy.random.Philox(0xF00D1))
PREFIX = os.path.basename(__file__)[:-3]
CMAP = matplotlib.cm.inferno
def main():
ndata = 5_000
def violate(scale, dist):
x = numpy.linspace(1, 10, dist.shape[1])
dist *= 1 - scale * numpy.sin(x) ** 2
def wave(dist):
for i in range(dist.shape[0]):
rot = int(numpy.sin(i / 20) * 100)
dist[i] = numpy.roll(dist[i], rot)
def filter_(shape):
ngap = 30
scale = 0.2
filt = numpy.ones(shape)
filt[:ngap] = scale
filt[-ngap:] = scale
filt[:, :ngap] = scale
filt[:, -ngap:] = scale
return filt
for frac in (0.0, 0.5, 0.9):
suffix = "%dk_%.1f" % (ndata // 1000, frac)
suffix = suffix.replace(".", "p")
seasons = lambda d: violate(frac, d)
example_map(suffix, ndata, seasons)
example_map(suffix + "_filtered", ndata, seasons, filter_)
frac = 0.9
suffix = "%dk_%.1f" % (ndata // 1000, frac)
suffix = suffix.replace(".", "p")
seasons = lambda d: violate(frac, d)
example_map(suffix + "_filtered_n10", ndata, seasons, filter_, nfakes=10)
def example_map(suffix, ndata, violate=None, filter_=None, *, nfakes=1):
"""Made data, fit a model, and output diagnostics."""
print("suffix:", suffix, flush=True)
images, heights = load_images()
# build map from pixel to blob index
shape = images[0].shape # (y, x)
pixel_index = numpy.empty(shape, numpy.int_)
for i, img in enumerate(images):
pixel_index[img] = i
# make initial distribution scale with height
dist = numpy.empty(shape, numpy.float_)
min_height = min(heights)
for i, img in enumerate(images):
dist[img] = (heights[i] - min_height) ** 1.5
# violate its symmetry
if violate is not None:
violate(dist)
# apply filtering
if filter_ is None:
filt = numpy.ones(dist.shape)
else:
filt = filter_(dist.shape)
dist *= filt
map_cdf = dist.ravel().cumsum()
map_cdf /= map_cdf[-1]
# make un-violated distributions for contours
cdfs = []
for img in images:
pdf = img * filt
cdf = pdf.ravel().cumsum()
if cdf[-1]:
cdf /= cdf[-1]
cdfs.append(cdf)
# sample data (y, x)
def sample_blob(cdf):
# discrete cdf sample
yxi = numpy.searchsorted(cdf, RNG.uniform())
y = yxi // shape[1]
x = yxi % shape[1]
# assign within pixel
y += RNG.uniform()
x += RNG.uniform()
return y, x
data = numpy.empty((ndata * 2, 2))
for i in range(len(data)):
data[i] = sample_blob(map_cdf)
x_train = data[:ndata]
x_test = data[ndata:]
# fit model
def transform(data):
new = numpy.empty_like(data)
for i, (y0, x0) in enumerate(data):
index = pixel_index[int(y0), int(x0)]
new[i] = sample_blob(cdfs[index])
return new
blobber = sksym.WhichIsReal(transform, nfakes)
model = lightgbm.LGBMRegressor(
objective=blobber.objective(),
max_depth=2,
random_state=RNG.integers(2 ** 31),
)
sksym.fit(model, blobber.pack(x_train))
# score
x_pack = blobber.pack(x_test)
print("mean llr: %.3f +- %.3f" % sksym.score(model, x_pack, and_std=True))
# figure: data
nscat = 10_000
figure, axis = pyplot.subplots(
dpi=120,
figsize=(9, 5),
tight_layout=(0, 0, 0),
)
axis.scatter(
x_train[:nscat, 1],
x_train[:nscat, 0],
c="k",
s=1,
marker=",",
lw=0,
)
axis.set_xlim(0, shape[1])
axis.set_ylim(shape[0], 0)
axis.set_xlabel(r"$x$")
axis.set_ylabel(r"$y$")
axis.set_aspect("equal")
save_fig(figure, "data_%s.png" % suffix)
# figure: classifier by blob
scores = sksym.predict_log_proba(model, x_pack)[..., 0] - numpy.log(0.5)
if nfakes > 1:
scores = scores.mean(axis=0)
blob_scores = numpy.zeros(len(images))
for i, score in enumerate(scores):
y, x = x_train[i]
index = pixel_index[int(y), int(x)]
blob_scores[index] += score
blob_scores /= len(scores)
influence = numpy.empty(shape)
for i, bs in enumerate(blob_scores):
img = images[i]
influence[img] = bs
figure, axis = pyplot.subplots(
dpi=120,
figsize=(9, 5),
tight_layout=(0, 0, 0),
)
size = 0.007
im = axis.imshow(
influence,
vmin=-size,
vmax=size,
cmap=CMAP,
interpolation="None",
)
figure.colorbar(im)
axis.set_xlim(0, shape[1])
axis.set_ylim(shape[0] - 1, 0)
axis.set_xlabel(r"$x$")
axis.set_ylabel(r"$y$")
axis.set_aspect("equal")
save_fig(figure, "score_%s.png" % suffix)
# figure: model output
ygrid = numpy.linspace(0, shape[0], 100)
xgrid = numpy.linspace(0, shape[1], 200)
xgrid, ygrid = numpy.meshgrid(xgrid, ygrid)
grid = numpy.stack([ygrid.ravel(), xgrid.ravel()], axis=-1)
zgrid = model.predict(grid).reshape(xgrid.shape)
figure, axis = pyplot.subplots(
dpi=120,
figsize=(9, 5),
tight_layout=(0, 0, 0),
)
cont = axis.contourf(xgrid, ygrid, zgrid, cmap=CMAP, levels=255)
figure.colorbar(cont, ax=axis)
axis.set_xlim(0, shape[1])
axis.set_ylim(shape[0], 0)
axis.set_xlabel(r"$x$")
axis.set_ylabel(r"$y$")
axis.set_aspect("equal")
save_fig(figure, "zeta_%s.png" % suffix)
@functools.lru_cache
def load_images():
"""Return lists of input images and height values."""
images = []
heights = []
shape = None
for path in glob.glob("data_map/mask_*_*.png"):
img = numpy.array(Image.open(path))
if shape is None:
shape = img.shape
assert img.shape == shape
assert img.dtype == numpy.bool_
images.append(img)
heights.append(int(path.split("_")[-2]))
return images, heights
# utilities
def save_fig(figure, path):
fullpath = os.path.join(PREFIX, path)
figure.savefig(fullpath)
pyplot.close(figure)
if __name__ == "__main__":
main()
| [
"PIL.Image.open",
"numpy.roll",
"numpy.ones",
"numpy.sin",
"numpy.log",
"os.path.join",
"numpy.random.Philox",
"matplotlib.pyplot.close",
"sksym.WhichIsReal",
"numpy.linspace",
"numpy.empty",
"os.path.basename",
"numpy.empty_like",
"sksym.score",
"sksym.predict_log_proba",
"numpy.meshg... | [((305, 332), 'numpy.random.Philox', 'numpy.random.Philox', (['(983249)'], {}), '(983249)\n', (324, 332), False, 'import numpy\n'), ((345, 371), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (361, 371), False, 'import os\n'), ((1769, 1799), 'numpy.empty', 'numpy.empty', (['shape', 'numpy.int_'], {}), '(shape, numpy.int_)\n', (1780, 1799), False, 'import numpy\n'), ((1928, 1960), 'numpy.empty', 'numpy.empty', (['shape', 'numpy.float_'], {}), '(shape, numpy.float_)\n', (1939, 1960), False, 'import numpy\n'), ((2900, 2927), 'numpy.empty', 'numpy.empty', (['(ndata * 2, 2)'], {}), '((ndata * 2, 2))\n', (2911, 2927), False, 'import numpy\n'), ((3305, 3341), 'sksym.WhichIsReal', 'sksym.WhichIsReal', (['transform', 'nfakes'], {}), '(transform, nfakes)\n', (3322, 3341), False, 'import sksym\n'), ((3720, 3784), 'matplotlib.pyplot.subplots', 'pyplot.subplots', ([], {'dpi': '(120)', 'figsize': '(9, 5)', 'tight_layout': '(0, 0, 0)'}), '(dpi=120, figsize=(9, 5), tight_layout=(0, 0, 0))\n', (3735, 3784), False, 'from matplotlib import pyplot\n'), ((4557, 4575), 'numpy.empty', 'numpy.empty', (['shape'], {}), '(shape)\n', (4568, 4575), False, 'import numpy\n'), ((4689, 4753), 'matplotlib.pyplot.subplots', 'pyplot.subplots', ([], {'dpi': '(120)', 'figsize': '(9, 5)', 'tight_layout': '(0, 0, 0)'}), '(dpi=120, figsize=(9, 5), tight_layout=(0, 0, 0))\n', (4704, 4753), False, 'from matplotlib import pyplot\n'), ((5202, 5234), 'numpy.linspace', 'numpy.linspace', (['(0)', 'shape[0]', '(100)'], {}), '(0, shape[0], 100)\n', (5216, 5234), False, 'import numpy\n'), ((5247, 5279), 'numpy.linspace', 'numpy.linspace', (['(0)', 'shape[1]', '(200)'], {}), '(0, shape[1], 200)\n', (5261, 5279), False, 'import numpy\n'), ((5299, 5327), 'numpy.meshgrid', 'numpy.meshgrid', (['xgrid', 'ygrid'], {}), '(xgrid, ygrid)\n', (5313, 5327), False, 'import numpy\n'), ((5466, 5530), 'matplotlib.pyplot.subplots', 'pyplot.subplots', ([], {'dpi': '(120)', 'figsize': '(9, 5)', 'tight_layout': '(0, 0, 0)'}), '(dpi=120, figsize=(9, 5), tight_layout=(0, 0, 0))\n', (5481, 5530), False, 'from matplotlib import pyplot\n'), ((6027, 6061), 'glob.glob', 'glob.glob', (['"""data_map/mask_*_*.png"""'], {}), "('data_map/mask_*_*.png')\n", (6036, 6061), False, 'import glob\n'), ((6399, 6425), 'os.path.join', 'os.path.join', (['PREFIX', 'path'], {}), '(PREFIX, path)\n', (6411, 6425), False, 'import os\n'), ((6459, 6479), 'matplotlib.pyplot.close', 'pyplot.close', (['figure'], {}), '(figure)\n', (6471, 6479), False, 'from matplotlib import pyplot\n'), ((482, 518), 'numpy.linspace', 'numpy.linspace', (['(1)', '(10)', 'dist.shape[1]'], {}), '(1, 10, dist.shape[1])\n', (496, 518), False, 'import numpy\n'), ((797, 814), 'numpy.ones', 'numpy.ones', (['shape'], {}), '(shape)\n', (807, 814), False, 'import numpy\n'), ((2221, 2243), 'numpy.ones', 'numpy.ones', (['dist.shape'], {}), '(dist.shape)\n', (2231, 2243), False, 'import numpy\n'), ((3108, 3130), 'numpy.empty_like', 'numpy.empty_like', (['data'], {}), '(data)\n', (3124, 3130), False, 'import numpy\n'), ((4249, 4263), 'numpy.log', 'numpy.log', (['(0.5)'], {}), '(0.5)\n', (4258, 4263), False, 'import numpy\n'), ((694, 718), 'numpy.roll', 'numpy.roll', (['dist[i]', 'rot'], {}), '(dist[i], rot)\n', (704, 718), False, 'import numpy\n'), ((3619, 3659), 'sksym.score', 'sksym.score', (['model', 'x_pack'], {'and_std': '(True)'}), '(model, x_pack, and_std=True)\n', (3630, 3659), False, 'import sksym\n'), ((4200, 4238), 'sksym.predict_log_proba', 'sksym.predict_log_proba', (['model', 'x_pack'], {}), '(model, x_pack)\n', (4223, 4238), False, 'import sksym\n'), ((6089, 6105), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (6099, 6105), False, 'from PIL import Image\n'), ((547, 559), 'numpy.sin', 'numpy.sin', (['x'], {}), '(x)\n', (556, 559), False, 'import numpy\n'), ((647, 664), 'numpy.sin', 'numpy.sin', (['(i / 20)'], {}), '(i / 20)\n', (656, 664), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
# Copyright © 2019 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import numpy as np
from PIL import Image
import turicreate.toolkits._tf_utils as _utils
import turicreate as tc
from turicreate._deps.minimal_package import _minimal_package_import_check
# in conjunction with minimal package
def _lazy_import_tensorflow():
from turicreate._deps.minimal_package import _minimal_package_import_check
_tf = _minimal_package_import_check("tensorflow.compat.v1")
return _tf
_DEFAULT_AUG_PARAMS = {
"max_hue_adjust": 0.05,
"max_brightness": 0.05,
"max_contrast": 1.25,
"max_saturation": 1.25,
"skip_probability_flip": 0.5,
"min_aspect_ratio": 0.8,
"max_aspect_ratio": 1.25,
"min_area_fraction_crop": 0.15,
"max_area_fraction_crop": 1.0,
"min_area_fraction_pad": 1.0,
"max_area_fraction_pad": 2.0,
"max_attempts": 50,
"skip_probability_pad": 0.1,
"skip_probability_crop": 0.1,
"min_object_covered": 0.0,
"min_eject_coverage": 0.5,
"resize_method": "turicreate",
}
def interpolate(alpha_tf, x, y):
tf = _lazy_import_tensorflow()
x = tf.constant(x, dtype=tf.float32)
y = tf.constant(y, dtype=tf.float32)
range = tf.math.subtract(y, x)
delta = tf.math.multiply(alpha_tf, range)
return tf.math.add(x, delta)
def hue_augmenter(
image,
annotation,
random_alpha_tf,
max_hue_adjust=_DEFAULT_AUG_PARAMS["max_hue_adjust"],
):
tf = _lazy_import_tensorflow()
# Apply the random rotation around the color wheel.
hue_delta = interpolate(random_alpha_tf[0], -max_hue_adjust, max_hue_adjust)
image = tf.image.adjust_hue(image, hue_delta)
image = tf.clip_by_value(image, 0, 1)
# No geometry changes, so just copy the annotations.
return image, annotation
def color_augmenter(
image,
annotation,
random_alpha_tf,
max_brightness=_DEFAULT_AUG_PARAMS["max_brightness"],
max_contrast=_DEFAULT_AUG_PARAMS["max_contrast"],
max_saturation=_DEFAULT_AUG_PARAMS["max_saturation"],
):
tf = _lazy_import_tensorflow()
# Apply the random adjustment to brightness.
brightness_delta = interpolate(random_alpha_tf[0], -max_brightness, max_brightness)
image = tf.image.adjust_brightness(image, brightness_delta)
# Apply the random adjustment to contrast.
saturation_delta = interpolate(random_alpha_tf[1], 1/max_saturation, max_saturation)
image = tf.image.adjust_saturation(image, saturation_delta)
# Apply random adjustment to saturation.
contrast_delta = interpolate(random_alpha_tf[2], 1/max_contrast, max_contrast)
image = tf.image.adjust_contrast(image, contrast_delta)
image = tf.clip_by_value(image, 0, 1)
# No geometry changes, so just copy the annotations.
return image, annotation
def resize_augmenter(image, annotation, output_shape):
resize_method = _DEFAULT_AUG_PARAMS["resize_method"]
def resize_PIL_image(image, output_shape):
image *= 255.0
image = image.astype("uint8")
pil_img = Image.fromarray(image)
resize_img = pil_img.resize(
(output_shape[1], output_shape[0]), resample=Image.BILINEAR
)
np_img = np.array(resize_img)
np_img = np_img.astype(np.float32)
np_img /= 255.0
return np_img
def resize_turicreate_image(image, output_shape):
image *= 255.0
image = image.astype("uint8")
FORMAT_RAW = 2
tc_image = tc.Image(
_image_data=image.tobytes(),
_width=image.shape[1],
_height=image.shape[0],
_channels=image.shape[2],
_format_enum=FORMAT_RAW,
_image_data_size=image.size,
)
tc_image = tc.image_analysis.resize(
tc_image, output_shape[1], output_shape[0], resample="bilinear"
)
image = tc_image.pixel_data
image = image.astype(np.float32)
image /= 255.0
return image
if resize_method == "tensorflow":
tf = _lazy_import_tensorflow()
new_height = tf.cast(output_shape[0], dtype=tf.int32)
new_width = tf.cast(output_shape[1], dtype=tf.int32)
# Determine the affine transform to apply and apply to the image itself.
image_scaled = tf.squeeze(
tf.image.resize_bilinear(tf.expand_dims(image, 0), [new_height, new_width]),
[0],
)
elif resize_method == "PIL":
image_scaled = tf.numpy_function(
func=resize_PIL_image, inp=[image, output_shape], Tout=[tf.float32]
)
elif resize_method == "turicreate":
tf = _lazy_import_tensorflow()
image_scaled = tf.numpy_function(
func=resize_turicreate_image, inp=[image, output_shape], Tout=[tf.float32]
)
else:
raise Exception("Non-supported resize method.")
image_clipped = tf.clip_by_value(image_scaled, 0.0, 1.0)
# No geometry changes (because of relative co-ordinate system)
return image_clipped, annotation
def horizontal_flip_augmenter(
image,
annotation,
random,
skip_probability=_DEFAULT_AUG_PARAMS["skip_probability_flip"],
):
if random.uniform(0.0, 1.0) < skip_probability:
return image, annotation
image_height, image_width, _ = image.shape
flipped_image = np.flip(image, 1)
# One image can have more than one annotation. so loop through annotations and flip across the horizontal axis
for i in range(len(annotation)):
# Only flip if the annotation is not an empty annotation.
if np.any(annotation[i][1:5]):
annotation[i][1] = 1 - annotation[i][1] - annotation[i][3]
return flipped_image, annotation
def padding_augmenter(
image,
annotation,
random,
skip_probability=_DEFAULT_AUG_PARAMS["skip_probability_pad"],
min_aspect_ratio=_DEFAULT_AUG_PARAMS["min_aspect_ratio"],
max_aspect_ratio=_DEFAULT_AUG_PARAMS["max_aspect_ratio"],
min_area_fraction=_DEFAULT_AUG_PARAMS["min_area_fraction_pad"],
max_area_fraction=_DEFAULT_AUG_PARAMS["max_area_fraction_pad"],
max_attempts=_DEFAULT_AUG_PARAMS["max_attempts"],
):
if random.uniform(0.0, 1.0) < skip_probability:
return np.array(image), annotation
image_height, image_width, _ = image.shape
# Randomly sample aspect ratios until one derives a non-empty range of
# compatible heights, or until reaching the upper limit on attempts.
for i in range(max_attempts):
# Randomly sample an aspect ratio.
aspect_ratio = random.uniform(min_aspect_ratio, max_aspect_ratio)
# The padded height must be at least as large as the original height.
# h' >= h
min_height = float(image_height)
# The padded width must be at least as large as the original width.
# w' >= w IMPLIES ah' >= w IMPLIES h' >= w / a
min_height_from_width = float(image_width) / aspect_ratio
if min_height < min_height_from_width:
min_height = min_height_from_width
# The padded area must attain the minimum area fraction.
# w'h' >= fhw IMPLIES ah'h' >= fhw IMPLIES h' >= sqrt(fhw/a)
min_height_from_area = np.sqrt(
image_height * image_width * min_area_fraction / aspect_ratio
)
if min_height < min_height_from_area:
min_height = min_height_from_area
# The padded area must not exceed the maximum area fraction.
max_height = np.sqrt(
image_height * image_width * max_area_fraction / aspect_ratio
)
if min_height >= max_height:
break
# We did not find a compatible aspect ratio. Just return the original data.
if min_height > max_height:
return np.array(image), annotation
# Sample a final size, given the sampled aspect ratio and range of heights.
padded_height = random.uniform(min_height, max_height)
padded_width = padded_height * aspect_ratio
# Sample the offset of the source image inside the padded image.
x_offset = random.uniform(0.0, (padded_width - image_width))
y_offset = random.uniform(0.0, (padded_height - image_height))
# Compute padding needed on the image
after_padding_width = padded_width - image_width - x_offset
after_padding_height = padded_height - image_height - y_offset
# Pad the image
npad = (
(int(y_offset), int(after_padding_height)),
(int(x_offset), int(after_padding_width)),
(0, 0),
)
padded_image = np.pad(image, pad_width=npad, mode="constant", constant_values=0.5)
ty = float(y_offset)
tx = float(x_offset)
# Transformation matrix for the annotations
transformation_matrix = np.array([[1.0, 0.0, ty], [0.0, 1.0, tx], [0.0, 0.0, 1.0]])
# Use transformation matrix to augment annotations
formatted_annotation = []
for aug in annotation:
identifier = aug[0:1]
bounds = aug[1:5]
confidence = aug[5:6]
if not np.any(bounds):
formatted_annotation.append(
np.concatenate([identifier, np.array([0, 0, 0, 0]), confidence])
)
continue
width = bounds[2]
height = bounds[3]
x1 = bounds[0] * image_width
y1 = bounds[1] * image_height
x2 = (bounds[0] + width) * image_width
y2 = (bounds[1] + height) * image_height
augmentation_coordinates = np.array([y1, x1, y2, x2], dtype=np.float32)
v = np.concatenate(
[
augmentation_coordinates.reshape((2, 2)),
np.ones((2, 1), dtype=np.float32),
],
axis=1,
)
transposed_v = np.dot(v, np.transpose(transformation_matrix))
t_intersection = np.squeeze(transposed_v[:, :2].reshape(-1, 4))
# Sort the points top, left, bottom, right
if t_intersection[0] > t_intersection[2]:
t_intersection[0], t_intersection[2] = t_intersection[2], t_intersection[0]
if t_intersection[1] > t_intersection[3]:
t_intersection[1], t_intersection[3] = t_intersection[3], t_intersection[1]
# Normalize the elements to the cropped width and height
ele_1 = t_intersection[1] / padded_width
ele_2 = t_intersection[0] / padded_height
ele_3 = (t_intersection[3] - t_intersection[1]) / padded_width
ele_4 = (t_intersection[2] - t_intersection[0]) / padded_height
formatted_annotation.append(
np.concatenate(
[identifier, np.array([ele_1, ele_2, ele_3, ele_4]), confidence]
)
)
return np.array(padded_image), np.array(formatted_annotation, dtype=np.float32)
def crop_augmenter(
image,
annotation,
random,
skip_probability=_DEFAULT_AUG_PARAMS["skip_probability_crop"],
min_aspect_ratio=_DEFAULT_AUG_PARAMS["min_aspect_ratio"],
max_aspect_ratio=_DEFAULT_AUG_PARAMS["max_aspect_ratio"],
min_area_fraction=_DEFAULT_AUG_PARAMS["min_area_fraction_crop"],
max_area_fraction=_DEFAULT_AUG_PARAMS["max_area_fraction_crop"],
min_object_covered=_DEFAULT_AUG_PARAMS["min_object_covered"],
max_attempts=_DEFAULT_AUG_PARAMS["max_attempts"],
min_eject_coverage=_DEFAULT_AUG_PARAMS["min_eject_coverage"],
):
if random.uniform(0.0, 1.0) < skip_probability:
return np.array(image), annotation
image_height, image_width, _ = image.shape
# Sample crop rects until one satisfies our constraints (by yielding a valid
# list of cropped annotations), or reaching the limit on attempts.
for i in range(max_attempts):
# Randomly sample an aspect ratio.
aspect_ratio = random.uniform(min_aspect_ratio, max_aspect_ratio)
# Next we'll sample a height (which combined with the now known aspect
# ratio, determines the size and area). But first we must compute the range
# of valid heights. The crop cannot be taller than the original image,
# cannot be wider than the original image, and must have an area in the
# specified range.
# The cropped height must be no larger the original height.
# h' <= h
max_height = float(image_height)
# The cropped width must be no larger than the original width.
# w' <= w IMPLIES ah' <= w IMPLIES h' <= w / a
max_height_from_width = float(image_width) / aspect_ratio
if max_height > max_height_from_width:
max_height = max_height_from_width
# The cropped area must not exceed the maximum area fraction.
max_height_from_area = np.sqrt(
image_height * image_width * max_area_fraction / aspect_ratio
)
if max_height > max_height_from_area:
max_height = max_height_from_area
# The padded area must attain the minimum area fraction.
min_height = np.sqrt(
image_height * image_width * min_area_fraction / aspect_ratio
)
# If the range is empty, then crops with the sampled aspect ratio cannot
# satisfy the area constraint.
if min_height > max_height:
continue
# Sample a position for the crop, constrained to lie within the image.
cropped_height = random.uniform(min_height, max_height)
cropped_width = cropped_height * aspect_ratio
x_offset = random.uniform(0.0, (image_width - cropped_width))
y_offset = random.uniform(0.0, (image_height - cropped_height))
crop_bounds_x1 = x_offset
crop_bounds_y1 = y_offset
crop_bounds_x2 = x_offset + cropped_width
crop_bounds_y2 = y_offset + cropped_height
formatted_annotation = []
is_min_object_covered = True
for aug in annotation:
identifier = aug[0:1]
bounds = aug[1:5]
confidence = aug[5:6]
width = bounds[2]
height = bounds[3]
x1 = bounds[0] * image_width
y1 = bounds[1] * image_height
x2 = (bounds[0] + width) * image_width
y2 = (bounds[1] + height) * image_height
# This tests whether the crop bounds are out of the annotated bounds, if not it returns an empty annotation
if (
crop_bounds_x1 < x2
and crop_bounds_y1 < y2
and x1 < crop_bounds_x2
and y1 < crop_bounds_y2
):
x_bounds = [x1, x2, x_offset, x_offset + cropped_width]
y_bounds = [y1, y2, y_offset, y_offset + cropped_height]
x_bounds.sort()
y_bounds.sort()
x_pairs = x_bounds[1:3]
y_pairs = y_bounds[1:3]
intersection = np.array(
[y_pairs[0], x_pairs[0], y_pairs[1], x_pairs[1]]
)
intersection_area = (intersection[3] - intersection[1]) * (
intersection[2] - intersection[0]
)
annotation_area = (y2 - y1) * (x2 - x1)
area_coverage = intersection_area / annotation_area
# Invalidate the crop if it did not sufficiently overlap each annotation and try again.
if area_coverage < min_object_covered:
is_min_object_covered = False
break
# If the area coverage is greater the min_eject_coverage, then actually keep the annotation
if area_coverage >= min_eject_coverage:
# Transformation matrix for the annotations
transformation_matrix = np.array(
[[1.0, 0.0, -y_offset], [0.0, 1.0, -x_offset], [0.0, 0.0, 1.0]]
)
v = np.concatenate(
[
intersection.reshape((2, 2)),
np.ones((2, 1), dtype=np.float32),
],
axis=1,
)
transposed_v = np.dot(v, np.transpose(transformation_matrix))
t_intersection = np.squeeze(transposed_v[:, :2].reshape(-1, 4))
# Sort the points top, left, bottom, right
if t_intersection[0] > t_intersection[2]:
t_intersection[0], t_intersection[2] = (
t_intersection[2],
t_intersection[0],
)
if t_intersection[1] > t_intersection[3]:
t_intersection[1], t_intersection[3] = (
t_intersection[3],
t_intersection[1],
)
# Normalize the elements to the cropped width and height
ele_1 = t_intersection[1] / cropped_width
ele_2 = t_intersection[0] / cropped_height
ele_3 = (t_intersection[3] - t_intersection[1]) / cropped_width
ele_4 = (t_intersection[2] - t_intersection[0]) / cropped_height
formatted_annotation.append(
np.concatenate(
[
identifier,
np.array([ele_1, ele_2, ele_3, ele_4]),
confidence,
]
)
)
else:
formatted_annotation.append(
np.concatenate(
[identifier, np.array([0.0, 0.0, 0.0, 0.0]), confidence]
)
)
else:
formatted_annotation.append(
np.concatenate(
[identifier, np.array([0.0, 0.0, 0.0, 0.0]), confidence]
)
)
if not is_min_object_covered:
continue
y_offset = int(y_offset)
x_offset = int(x_offset)
end_y = int(cropped_height + y_offset)
end_x = int(cropped_width + x_offset)
image_cropped = image[y_offset:end_y, x_offset:end_x]
return np.array(image_cropped), np.array(formatted_annotation, dtype=np.float32)
return np.array(image), annotation
def numpy_augmenter(img, ann, seed):
random = np.random.RandomState(seed=seed)
img, ann = crop_augmenter(img, ann, random)
img, ann = padding_augmenter(img, ann, random)
img, ann = horizontal_flip_augmenter(img, ann, random)
return img, ann
def complete_augmenter(img_tf, ann_tf, seed_tf, alpha_tf, output_height, output_width):
tf = _lazy_import_tensorflow()
img_tf, ann_tf = tf.numpy_function(
func=numpy_augmenter,
inp=[img_tf, ann_tf, seed_tf],
Tout=[tf.float32, tf.float32],
)
img_tf, ann_tf = color_augmenter(img_tf, ann_tf, alpha_tf[0:3])
img_tf, ann_tf = hue_augmenter(img_tf, ann_tf, alpha_tf[3:4])
img_tf, ann_tf = resize_augmenter(img_tf, ann_tf, (output_height, output_width))
return img_tf, ann_tf
class DataAugmenter(object):
def __init__(self, output_height, output_width, batch_size, resize_only):
tf = _lazy_import_tensorflow()
self.batch_size = batch_size
self.graph = tf.Graph()
self.resize_only = resize_only
with self.graph.as_default():
self.img_tf = [
tf.placeholder(tf.float32, [None, None, 3])
for x in range(0, self.batch_size)
]
self.ann_tf = [
tf.placeholder(tf.float32, [None, 6]) for x in range(0, self.batch_size)
]
self.alpha_tf = tf.placeholder(tf.float32, [self.batch_size, 4])
self.random_seed_tf = tf.placeholder(tf.uint32)
self.resize_op_batch = []
for i in range(0, self.batch_size):
if resize_only:
aug_img_tf, aug_ann_tf = resize_augmenter(
self.img_tf[i], self.ann_tf[i], (output_height, output_width)
)
self.resize_op_batch.append([aug_img_tf, aug_ann_tf])
else:
aug_img_tf, aug_ann_tf = complete_augmenter(
self.img_tf[i],
self.ann_tf[i],
self.random_seed_tf[i],
self.alpha_tf[i],
output_height,
output_width,
)
self.resize_op_batch.append([aug_img_tf, aug_ann_tf])
def get_augmented_data(self, images, annotations, random_seed):
tf = _lazy_import_tensorflow()
with tf.Session(graph=self.graph) as session:
feed_dict = dict()
# Populate feed_dict with images and annotations
graph_op = self.resize_op_batch[0 : len(images)]
for i in range(len(images)):
feed_dict[self.img_tf[i]] = _utils.convert_shared_float_array_to_numpy(
images[i]
)
feed_dict[self.ann_tf[i]] = _utils.convert_shared_float_array_to_numpy(
annotations[i]
)
# Populate feed_dict with random seed and random alpha values, used
# to sample image perturbations. We don't use TensorFlow's built-in
# support for random number generation, since we want to effectively
# reset the seed for each session (batch).
random = np.random.RandomState(seed=random_seed)
feed_dict[self.alpha_tf] = random.rand(*self.alpha_tf.shape)
feed_dict[self.random_seed_tf] = random.randint(
0, 2 ** 32, size=self.batch_size
)
aug_output = session.run(graph_op, feed_dict=feed_dict)
processed_images = []
processed_annotations = []
for o in aug_output:
processed_images.append(o[0])
processed_annotations.append(
np.ascontiguousarray(o[1], dtype=np.float32)
)
processed_images = np.array(processed_images, dtype=np.float32)
processed_images = np.ascontiguousarray(processed_images, dtype=np.float32)
return (processed_images, processed_annotations)
| [
"turicreate._deps.minimal_package._minimal_package_import_check",
"numpy.flip",
"PIL.Image.fromarray",
"numpy.sqrt",
"turicreate.image_analysis.resize",
"numpy.ones",
"turicreate.toolkits._tf_utils.convert_shared_float_array_to_numpy",
"numpy.any",
"numpy.ascontiguousarray",
"numpy.array",
"nump... | [((710, 763), 'turicreate._deps.minimal_package._minimal_package_import_check', '_minimal_package_import_check', (['"""tensorflow.compat.v1"""'], {}), "('tensorflow.compat.v1')\n", (739, 763), False, 'from turicreate._deps.minimal_package import _minimal_package_import_check\n'), ((5604, 5621), 'numpy.flip', 'np.flip', (['image', '(1)'], {}), '(image, 1)\n', (5611, 5621), True, 'import numpy as np\n'), ((8798, 8865), 'numpy.pad', 'np.pad', (['image'], {'pad_width': 'npad', 'mode': '"""constant"""', 'constant_values': '(0.5)'}), "(image, pad_width=npad, mode='constant', constant_values=0.5)\n", (8804, 8865), True, 'import numpy as np\n'), ((8994, 9053), 'numpy.array', 'np.array', (['[[1.0, 0.0, ty], [0.0, 1.0, tx], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, ty], [0.0, 1.0, tx], [0.0, 0.0, 1.0]])\n', (9002, 9053), True, 'import numpy as np\n'), ((18664, 18696), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (18685, 18696), True, 'import numpy as np\n'), ((3328, 3350), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (3343, 3350), False, 'from PIL import Image\n'), ((3487, 3507), 'numpy.array', 'np.array', (['resize_img'], {}), '(resize_img)\n', (3495, 3507), True, 'import numpy as np\n'), ((4022, 4115), 'turicreate.image_analysis.resize', 'tc.image_analysis.resize', (['tc_image', 'output_shape[1]', 'output_shape[0]'], {'resample': '"""bilinear"""'}), "(tc_image, output_shape[1], output_shape[0],\n resample='bilinear')\n", (4046, 4115), True, 'import turicreate as tc\n'), ((5852, 5878), 'numpy.any', 'np.any', (['annotation[i][1:5]'], {}), '(annotation[i][1:5])\n', (5858, 5878), True, 'import numpy as np\n'), ((7475, 7545), 'numpy.sqrt', 'np.sqrt', (['(image_height * image_width * min_area_fraction / aspect_ratio)'], {}), '(image_height * image_width * min_area_fraction / aspect_ratio)\n', (7482, 7545), True, 'import numpy as np\n'), ((7751, 7821), 'numpy.sqrt', 'np.sqrt', (['(image_height * image_width * max_area_fraction / aspect_ratio)'], {}), '(image_height * image_width * max_area_fraction / aspect_ratio)\n', (7758, 7821), True, 'import numpy as np\n'), ((9704, 9748), 'numpy.array', 'np.array', (['[y1, x1, y2, x2]'], {'dtype': 'np.float32'}), '([y1, x1, y2, x2], dtype=np.float32)\n', (9712, 9748), True, 'import numpy as np\n'), ((10907, 10929), 'numpy.array', 'np.array', (['padded_image'], {}), '(padded_image)\n', (10915, 10929), True, 'import numpy as np\n'), ((10931, 10979), 'numpy.array', 'np.array', (['formatted_annotation'], {'dtype': 'np.float32'}), '(formatted_annotation, dtype=np.float32)\n', (10939, 10979), True, 'import numpy as np\n'), ((12874, 12944), 'numpy.sqrt', 'np.sqrt', (['(image_height * image_width * max_area_fraction / aspect_ratio)'], {}), '(image_height * image_width * max_area_fraction / aspect_ratio)\n', (12881, 12944), True, 'import numpy as np\n'), ((13146, 13216), 'numpy.sqrt', 'np.sqrt', (['(image_height * image_width * min_area_fraction / aspect_ratio)'], {}), '(image_height * image_width * min_area_fraction / aspect_ratio)\n', (13153, 13216), True, 'import numpy as np\n'), ((18584, 18599), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (18592, 18599), True, 'import numpy as np\n'), ((6503, 6518), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (6511, 6518), True, 'import numpy as np\n'), ((8028, 8043), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (8036, 8043), True, 'import numpy as np\n'), ((9269, 9283), 'numpy.any', 'np.any', (['bounds'], {}), '(bounds)\n', (9275, 9283), True, 'import numpy as np\n'), ((9979, 10014), 'numpy.transpose', 'np.transpose', (['transformation_matrix'], {}), '(transformation_matrix)\n', (9991, 10014), True, 'import numpy as np\n'), ((11627, 11642), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (11635, 11642), True, 'import numpy as np\n'), ((18498, 18521), 'numpy.array', 'np.array', (['image_cropped'], {}), '(image_cropped)\n', (18506, 18521), True, 'import numpy as np\n'), ((18523, 18571), 'numpy.array', 'np.array', (['formatted_annotation'], {'dtype': 'np.float32'}), '(formatted_annotation, dtype=np.float32)\n', (18531, 18571), True, 'import numpy as np\n'), ((21859, 21898), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'random_seed'}), '(seed=random_seed)\n', (21880, 21898), True, 'import numpy as np\n'), ((22476, 22520), 'numpy.array', 'np.array', (['processed_images'], {'dtype': 'np.float32'}), '(processed_images, dtype=np.float32)\n', (22484, 22520), True, 'import numpy as np\n'), ((22552, 22608), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['processed_images'], {'dtype': 'np.float32'}), '(processed_images, dtype=np.float32)\n', (22572, 22608), True, 'import numpy as np\n'), ((9866, 9899), 'numpy.ones', 'np.ones', (['(2, 1)'], {'dtype': 'np.float32'}), '((2, 1), dtype=np.float32)\n', (9873, 9899), True, 'import numpy as np\n'), ((15012, 15070), 'numpy.array', 'np.array', (['[y_pairs[0], x_pairs[0], y_pairs[1], x_pairs[1]]'], {}), '([y_pairs[0], x_pairs[0], y_pairs[1], x_pairs[1]])\n', (15020, 15070), True, 'import numpy as np\n'), ((21308, 21361), 'turicreate.toolkits._tf_utils.convert_shared_float_array_to_numpy', '_utils.convert_shared_float_array_to_numpy', (['images[i]'], {}), '(images[i])\n', (21350, 21361), True, 'import turicreate.toolkits._tf_utils as _utils\n'), ((21444, 21502), 'turicreate.toolkits._tf_utils.convert_shared_float_array_to_numpy', '_utils.convert_shared_float_array_to_numpy', (['annotations[i]'], {}), '(annotations[i])\n', (21486, 21502), True, 'import turicreate.toolkits._tf_utils as _utils\n'), ((10819, 10857), 'numpy.array', 'np.array', (['[ele_1, ele_2, ele_3, ele_4]'], {}), '([ele_1, ele_2, ele_3, ele_4])\n', (10827, 10857), True, 'import numpy as np\n'), ((15892, 15965), 'numpy.array', 'np.array', (['[[1.0, 0.0, -y_offset], [0.0, 1.0, -x_offset], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, -y_offset], [0.0, 1.0, -x_offset], [0.0, 0.0, 1.0]])\n', (15900, 15965), True, 'import numpy as np\n'), ((22382, 22426), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['o[1]'], {'dtype': 'np.float32'}), '(o[1], dtype=np.float32)\n', (22402, 22426), True, 'import numpy as np\n'), ((9370, 9392), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (9378, 9392), True, 'import numpy as np\n'), ((16326, 16361), 'numpy.transpose', 'np.transpose', (['transformation_matrix'], {}), '(transformation_matrix)\n', (16338, 16361), True, 'import numpy as np\n'), ((16165, 16198), 'numpy.ones', 'np.ones', (['(2, 1)'], {'dtype': 'np.float32'}), '((2, 1), dtype=np.float32)\n', (16172, 16198), True, 'import numpy as np\n'), ((18115, 18145), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (18123, 18145), True, 'import numpy as np\n'), ((17573, 17611), 'numpy.array', 'np.array', (['[ele_1, ele_2, ele_3, ele_4]'], {}), '([ele_1, ele_2, ele_3, ele_4])\n', (17581, 17611), True, 'import numpy as np\n'), ((17887, 17917), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (17895, 17917), True, 'import numpy as np\n')] |
# Copyright 2021 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytests for `pydrobert.kaldi.io.command_line`"""
import os
import pickle
import numpy as np
import pytest
import pydrobert.kaldi.io.command_line as command_line
from pydrobert.kaldi.io.util import infer_kaldi_data_type
from pydrobert.kaldi.io import open as kaldi_open
@pytest.mark.parametrize(
"values",
[
[
np.array([1, 2, 3], dtype=np.float32),
np.array([4], dtype=np.float32),
np.array([], dtype=np.float32),
],
[np.random.random((100, 20))],
["foo", "bar", "baz"],
[("foo", "bar"), ("baz",)],
[],
],
)
def test_write_pickle_to_table(values, temp_file_1_name, temp_file_2_name):
if len(values):
kaldi_dtype = infer_kaldi_data_type(values[0]).value
else:
kaldi_dtype = "bm"
with open(temp_file_1_name, "wb") as pickle_file:
for num, value in enumerate(values):
pickle.dump((str(num), value), pickle_file)
ret_code = command_line.write_pickle_to_table(
[temp_file_1_name, "ark:" + temp_file_2_name, "-o", kaldi_dtype]
)
assert ret_code == 0
kaldi_reader = kaldi_open("ark:" + temp_file_2_name, kaldi_dtype, "r")
num_entries = 0
for key, value in kaldi_reader.items():
num_entries = int(key) + 1
try:
values[num_entries - 1].dtype
assert np.allclose(value, values[num_entries - 1])
except AttributeError:
assert value == values[num_entries - 1]
assert num_entries == len(values)
@pytest.mark.parametrize(
"values",
[
[
np.array([1, 2, 3], dtype=np.float32),
np.array([4], dtype=np.float32),
np.array([], dtype=np.float32),
],
[np.random.random((100, 20))],
["foo", "bar", "baz"],
[("foo", "bar"), ("baz",)],
[],
],
)
def test_write_table_to_pickle(values, temp_file_1_name, temp_file_2_name):
if len(values):
kaldi_dtype = infer_kaldi_data_type(values[0]).value
else:
kaldi_dtype = "bm"
with kaldi_open("ark:" + temp_file_1_name, kaldi_dtype, "w") as writer:
for num, value in enumerate(values):
writer.write(str(num), value)
ret_code = command_line.write_table_to_pickle(
["ark:" + temp_file_1_name, temp_file_2_name, "-i", kaldi_dtype]
)
assert ret_code == 0
num_entries = 0
pickle_file = open(temp_file_2_name, "rb")
num_entries = 0
try:
while True:
key, value = pickle.load(pickle_file)
num_entries = int(key) + 1
try:
values[num_entries - 1].dtype
assert np.allclose(value, values[num_entries - 1])
except AttributeError:
assert value == values[num_entries - 1]
except EOFError:
pass
assert num_entries == len(values)
@pytest.mark.pytorch
def test_write_table_to_torch_dir(temp_dir):
import torch
out_dir = os.path.join(temp_dir, "test_write_table_to_torch_dir")
os.makedirs(out_dir)
rwspecifier = "ark:" + os.path.join(out_dir, "table.ark")
a = torch.rand(10, 4)
b = torch.rand(5, 2)
c = torch.rand(5, 100)
with kaldi_open(rwspecifier, "bm", mode="w") as table:
table.write("a", a.numpy())
table.write("b", b.numpy())
table.write("c", c.numpy())
assert not command_line.write_table_to_torch_dir([rwspecifier, out_dir])
assert torch.allclose(c, torch.load(os.path.join(out_dir, "c.pt")))
assert torch.allclose(b, torch.load(os.path.join(out_dir, "b.pt")))
assert torch.allclose(a, torch.load(os.path.join(out_dir, "a.pt")))
@pytest.mark.pytorch
def test_write_torch_dir_to_table(temp_dir):
import torch
in_dir = os.path.join(temp_dir, "test_write_torch_dir_to_table")
rwspecifier = "ark:" + os.path.join(in_dir, "table.ark")
os.makedirs(in_dir)
a = torch.rand(5, 4)
b = torch.rand(4, 3)
c = torch.rand(3, 2)
torch.save(a, os.path.join(in_dir, "a.pt"))
torch.save(b, os.path.join(in_dir, "b.pt"))
torch.save(c, os.path.join(in_dir, "c.pt"))
assert not command_line.write_torch_dir_to_table([in_dir, rwspecifier])
with kaldi_open(rwspecifier, "bm") as table:
keys, vals = zip(*table.items())
keys = tuple(keys)
vals = tuple(vals)
assert keys == ("a", "b", "c")
assert len(vals) == 3
for dval, tval in zip((a, b, c), vals):
assert torch.allclose(dval, torch.from_numpy(tval))
| [
"pydrobert.kaldi.io.open",
"numpy.allclose",
"pydrobert.kaldi.io.command_line.write_pickle_to_table",
"os.makedirs",
"pydrobert.kaldi.io.util.infer_kaldi_data_type",
"numpy.random.random",
"pydrobert.kaldi.io.command_line.write_torch_dir_to_table",
"os.path.join",
"pydrobert.kaldi.io.command_line.wr... | [((1544, 1648), 'pydrobert.kaldi.io.command_line.write_pickle_to_table', 'command_line.write_pickle_to_table', (["[temp_file_1_name, 'ark:' + temp_file_2_name, '-o', kaldi_dtype]"], {}), "([temp_file_1_name, 'ark:' +\n temp_file_2_name, '-o', kaldi_dtype])\n", (1578, 1648), True, 'import pydrobert.kaldi.io.command_line as command_line\n'), ((1703, 1758), 'pydrobert.kaldi.io.open', 'kaldi_open', (["('ark:' + temp_file_2_name)", 'kaldi_dtype', '"""r"""'], {}), "('ark:' + temp_file_2_name, kaldi_dtype, 'r')\n", (1713, 1758), True, 'from pydrobert.kaldi.io import open as kaldi_open\n'), ((2805, 2909), 'pydrobert.kaldi.io.command_line.write_table_to_pickle', 'command_line.write_table_to_pickle', (["['ark:' + temp_file_1_name, temp_file_2_name, '-i', kaldi_dtype]"], {}), "(['ark:' + temp_file_1_name,\n temp_file_2_name, '-i', kaldi_dtype])\n", (2839, 2909), True, 'import pydrobert.kaldi.io.command_line as command_line\n'), ((3543, 3598), 'os.path.join', 'os.path.join', (['temp_dir', '"""test_write_table_to_torch_dir"""'], {}), "(temp_dir, 'test_write_table_to_torch_dir')\n", (3555, 3598), False, 'import os\n'), ((3603, 3623), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (3614, 3623), False, 'import os\n'), ((3694, 3711), 'torch.rand', 'torch.rand', (['(10)', '(4)'], {}), '(10, 4)\n', (3704, 3711), False, 'import torch\n'), ((3720, 3736), 'torch.rand', 'torch.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (3730, 3736), False, 'import torch\n'), ((3745, 3763), 'torch.rand', 'torch.rand', (['(5)', '(100)'], {}), '(5, 100)\n', (3755, 3763), False, 'import torch\n'), ((4323, 4378), 'os.path.join', 'os.path.join', (['temp_dir', '"""test_write_torch_dir_to_table"""'], {}), "(temp_dir, 'test_write_torch_dir_to_table')\n", (4335, 4378), False, 'import os\n'), ((4444, 4463), 'os.makedirs', 'os.makedirs', (['in_dir'], {}), '(in_dir)\n', (4455, 4463), False, 'import os\n'), ((4472, 4488), 'torch.rand', 'torch.rand', (['(5)', '(4)'], {}), '(5, 4)\n', (4482, 4488), False, 'import torch\n'), ((4497, 4513), 'torch.rand', 'torch.rand', (['(4)', '(3)'], {}), '(4, 3)\n', (4507, 4513), False, 'import torch\n'), ((4522, 4538), 'torch.rand', 'torch.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (4532, 4538), False, 'import torch\n'), ((2636, 2691), 'pydrobert.kaldi.io.open', 'kaldi_open', (["('ark:' + temp_file_1_name)", 'kaldi_dtype', '"""w"""'], {}), "('ark:' + temp_file_1_name, kaldi_dtype, 'w')\n", (2646, 2691), True, 'from pydrobert.kaldi.io import open as kaldi_open\n'), ((3651, 3685), 'os.path.join', 'os.path.join', (['out_dir', '"""table.ark"""'], {}), "(out_dir, 'table.ark')\n", (3663, 3685), False, 'import os\n'), ((3773, 3812), 'pydrobert.kaldi.io.open', 'kaldi_open', (['rwspecifier', '"""bm"""'], {'mode': '"""w"""'}), "(rwspecifier, 'bm', mode='w')\n", (3783, 3812), True, 'from pydrobert.kaldi.io import open as kaldi_open\n'), ((3946, 4007), 'pydrobert.kaldi.io.command_line.write_table_to_torch_dir', 'command_line.write_table_to_torch_dir', (['[rwspecifier, out_dir]'], {}), '([rwspecifier, out_dir])\n', (3983, 4007), True, 'import pydrobert.kaldi.io.command_line as command_line\n'), ((4406, 4439), 'os.path.join', 'os.path.join', (['in_dir', '"""table.ark"""'], {}), "(in_dir, 'table.ark')\n", (4418, 4439), False, 'import os\n'), ((4557, 4585), 'os.path.join', 'os.path.join', (['in_dir', '"""a.pt"""'], {}), "(in_dir, 'a.pt')\n", (4569, 4585), False, 'import os\n'), ((4605, 4633), 'os.path.join', 'os.path.join', (['in_dir', '"""b.pt"""'], {}), "(in_dir, 'b.pt')\n", (4617, 4633), False, 'import os\n'), ((4653, 4681), 'os.path.join', 'os.path.join', (['in_dir', '"""c.pt"""'], {}), "(in_dir, 'c.pt')\n", (4665, 4681), False, 'import os\n'), ((4698, 4758), 'pydrobert.kaldi.io.command_line.write_torch_dir_to_table', 'command_line.write_torch_dir_to_table', (['[in_dir, rwspecifier]'], {}), '([in_dir, rwspecifier])\n', (4735, 4758), True, 'import pydrobert.kaldi.io.command_line as command_line\n'), ((4768, 4797), 'pydrobert.kaldi.io.open', 'kaldi_open', (['rwspecifier', '"""bm"""'], {}), "(rwspecifier, 'bm')\n", (4778, 4797), True, 'from pydrobert.kaldi.io import open as kaldi_open\n'), ((1298, 1330), 'pydrobert.kaldi.io.util.infer_kaldi_data_type', 'infer_kaldi_data_type', (['values[0]'], {}), '(values[0])\n', (1319, 1330), False, 'from pydrobert.kaldi.io.util import infer_kaldi_data_type\n'), ((1932, 1975), 'numpy.allclose', 'np.allclose', (['value', 'values[num_entries - 1]'], {}), '(value, values[num_entries - 1])\n', (1943, 1975), True, 'import numpy as np\n'), ((914, 951), 'numpy.array', 'np.array', (['[1, 2, 3]'], {'dtype': 'np.float32'}), '([1, 2, 3], dtype=np.float32)\n', (922, 951), True, 'import numpy as np\n'), ((965, 996), 'numpy.array', 'np.array', (['[4]'], {'dtype': 'np.float32'}), '([4], dtype=np.float32)\n', (973, 996), True, 'import numpy as np\n'), ((1010, 1040), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float32'}), '([], dtype=np.float32)\n', (1018, 1040), True, 'import numpy as np\n'), ((1062, 1089), 'numpy.random.random', 'np.random.random', (['(100, 20)'], {}), '((100, 20))\n', (1078, 1089), True, 'import numpy as np\n'), ((2551, 2583), 'pydrobert.kaldi.io.util.infer_kaldi_data_type', 'infer_kaldi_data_type', (['values[0]'], {}), '(values[0])\n', (2572, 2583), False, 'from pydrobert.kaldi.io.util import infer_kaldi_data_type\n'), ((3086, 3110), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (3097, 3110), False, 'import pickle\n'), ((2167, 2204), 'numpy.array', 'np.array', (['[1, 2, 3]'], {'dtype': 'np.float32'}), '([1, 2, 3], dtype=np.float32)\n', (2175, 2204), True, 'import numpy as np\n'), ((2218, 2249), 'numpy.array', 'np.array', (['[4]'], {'dtype': 'np.float32'}), '([4], dtype=np.float32)\n', (2226, 2249), True, 'import numpy as np\n'), ((2263, 2293), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float32'}), '([], dtype=np.float32)\n', (2271, 2293), True, 'import numpy as np\n'), ((2315, 2342), 'numpy.random.random', 'np.random.random', (['(100, 20)'], {}), '((100, 20))\n', (2331, 2342), True, 'import numpy as np\n'), ((4048, 4077), 'os.path.join', 'os.path.join', (['out_dir', '"""c.pt"""'], {}), "(out_dir, 'c.pt')\n", (4060, 4077), False, 'import os\n'), ((4120, 4149), 'os.path.join', 'os.path.join', (['out_dir', '"""b.pt"""'], {}), "(out_dir, 'b.pt')\n", (4132, 4149), False, 'import os\n'), ((4192, 4221), 'os.path.join', 'os.path.join', (['out_dir', '"""a.pt"""'], {}), "(out_dir, 'a.pt')\n", (4204, 4221), False, 'import os\n'), ((5044, 5066), 'torch.from_numpy', 'torch.from_numpy', (['tval'], {}), '(tval)\n', (5060, 5066), False, 'import torch\n'), ((3236, 3279), 'numpy.allclose', 'np.allclose', (['value', 'values[num_entries - 1]'], {}), '(value, values[num_entries - 1])\n', (3247, 3279), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
##########################################################################
# pySAP - Copyright (C) CEA, 2017 - 2018
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Wavelet transform base module.
"""
# System import
from __future__ import division, print_function, absolute_import
from pprint import pprint
import uuid
import os
import warnings
# Package import
import pysap
from .utils import with_metaclass
from pysap.plotting import plot_transform
try:
import pysparse
except:
warnings.warn("Sparse2d python bindings not found, use binaries.")
pysparse = None
# Third party import
import numpy
class MetaRegister(type):
""" Simple Python metaclass registry pattern.
"""
REGISTRY = {}
def __new__(cls, name, bases, attrs):
""" Allocation.
Parameters
----------
name: str
the name of the class.
bases: tuple
the base classes.
attrs:
the attributes defined for the class.
"""
new_cls = type.__new__(cls, name, bases, attrs)
if name in cls.REGISTRY:
raise ValueError(
"'{0}' name already used in registry.".format(name))
if name not in ("WaveletTransformBase", "ISAPWaveletTransformBase"):
cls.REGISTRY[name] = new_cls
return new_cls
class WaveletTransformBase(with_metaclass(MetaRegister)):
""" Data structure representing a signal wavelet decomposition.
Available transforms are define in 'pysap.transform'.
"""
def __init__(self, nb_scale, verbose=0, **kwargs):
""" Initialize the WaveletTransformBase class.
Parameters
----------
data: ndarray
the input data.
nb_scale: int
the number of scale of the decomposition that includes the
approximation scale.
verbose: int, default 0
control the verbosity level
"""
# Wavelet transform parameters
self.nb_scale = nb_scale
self.name = None
self.bands_names = None
self.nb_band_per_scale = None
self.bands_lengths = None
self.bands_shapes = None
self.isap_transform_id = None
self.flatten_fct = None
self.unflatten_fct = None
self.is_decimated = None
self.scales_lengths = None
self.scales_padds = None
self.use_wrapping = pysparse is None
# Data that can be decalred afterward
self._data = None
self._image_metadata = {}
self._data_shape = None
self._iso_shape = None
self._analysis_data = None
self._analysis_shape = None
self._analysis_header = None
self._analysis_buffer_shape = None
self.verbose = verbose
# Transformation
if not self.use_wrapping:
kwargs["type_of_multiresolution_transform"] = (
self.__isap_transform_id__)
kwargs["number_of_scales"] = self.nb_scale
self.trf = pysparse.MRTransform(**kwargs)
else:
self.trf = None
def __getitem__(self, given):
""" Access the analysis designated scale/band coefficients.
Parameters
----------
given: int, slice or tuple
the scale and band indices.
Returns
-------
coeffs: ndarray or list of ndarray
the analysis coefficients.
"""
# Convert given index to generic scale/band index
if not isinstance(given, tuple):
given = (given, slice(None))
# Check that we have a valid given index
if len(given) != 2:
raise ValueError("Expect a scale/band int or 2-uplet index.")
# Check some data are stored in the structure
if self._analysis_data is None:
raise ValueError("Please specify first the decomposition "
"coefficients array.")
# Handle multi-dim slice object
if isinstance(given[0], slice):
start = given[0].start or 0
stop = given[0].stop or self.nb_scale
step = given[0].step or 1
coeffs = [self.__getitem__((index, given[1]))
for index in range(start, stop, step)]
elif isinstance(given[1], slice):
start = given[1].start or 0
stop = given[1].stop or self.nb_band_per_scale[given[0]]
step = given[1].step or 1
coeffs = [self.band_at(given[0], index)
for index in range(start, stop, step)]
else:
coeffs = [self.band_at(given[0], given[1])]
# Format output
if len(coeffs) == 1:
coeffs = coeffs[0]
return coeffs
def __setitem__(self, given, array):
""" Set the analysis designated scale/band coefficients.
Parameters
----------
given: tuple
the scale and band indices.
array: ndarray
the specific scale/band data as an array.
"""
# Check that we have a valid given index
if len(given) != 2:
raise ValueError("Expect a scale/band int or 2-uplet index.")
# Check given index
if isinstance(given[0], slice) or isinstance(given[1], slice):
raise ValueError("Expect a scale/band int index (no slice).")
# Check some data are stored in the structure
if self._analysis_data is None:
raise ValueError("Please specify first the decomposition "
"coefficients array.")
# Handle multidim slice object
if isinstance(given[0], slice):
start = given[0].start or 0
stop = given[0].stop or self.nb_scale
step = given[0].step or 1
coeffs = [self.__getitem__((index, given[1]))
for index in range(start, stop, step)]
elif isinstance(given[1], slice):
start = given[1].start or 0
stop = given[1].stop or self.nb_band_per_scale[given[0]]
step = given[1].step or 1
coeffs = [self.band_at(given[0], index)
for index in range(start, stop, step)]
else:
coeffs = [self.band_at(given[0], given[1])]
# Format output
if len(coeffs) == 1:
coeffs = coeffs[0]
return coeffs
##########################################################################
# Properties
##########################################################################
def _set_data(self, data):
""" Set the input data array.
Parameters
----------
data: nd-array or pysap.Image
input data/signal.
"""
if self.verbose > 0 and self._data is not None:
print("[info] Replacing existing input data array.")
if not all([e == data.shape[0] for e in data.shape]):
raise ValueError("Expect a square shape data.")
if data.ndim != 2:
raise ValueError("Expect a two-dim data array.")
if self.is_decimated and not (data.shape[0] // 2**(self.nb_scale) > 0):
raise ValueError("Can't decimate the data with the specified "
"number of scales.")
if isinstance(data, pysap.Image):
self._data = data.data
self._image_metadata = data.metadata
else:
self._data = data
self._data_shape = self._data.shape
self._iso_shape = self._data_shape[0]
if self.use_wrapping:
self._set_transformation_parameters()
self._compute_transformation_parameters()
def _get_data(self):
""" Get the input data array.
Returns
-------
data: nd-array
input data/signal.
"""
return self._data
def _set_analysis_data(self, analysis_data):
""" Set the decomposition coefficients array.
Parameters
----------
analysis_data: lsit of nd-array
decomposition coefficients array.
"""
if self.verbose > 0 and self._analysis_data is not None:
print("[info] Replacing existing decomposition coefficients "
"array.")
if len(analysis_data) != sum(self.nb_band_per_scale):
raise ValueError("The wavelet coefficients do not correspond to "
"the wavelet transform parameters.")
self._analysis_data = analysis_data
def _get_analysis_data(self):
""" Get the decomposition coefficients array.
Returns
-------
analysis_data: nd-array
decomposition coefficients array.
"""
return self._analysis_data
def _set_analysis_header(self, analysis_header):
""" Set the decomposition coefficients header.
Parameters
----------
analysis_header: dict
decomposition coefficients array.
"""
if self.verbose > 0 and self._analysis_header is not None:
print("[info] Replacing existing decomposition coefficients "
"header.")
self._analysis_header = analysis_header
def _get_analysis_header(self):
""" Get the decomposition coefficients header.
Returns
-------
analysis_header: dict
decomposition coefficients header.
"""
return self._analysis_header
def _get_info(self):
""" Return the transformation information. This iformation is only
available when using the Python bindings.
"""
if not self.use_wrapping:
self.trf.info()
data = property(_get_data, _set_data)
analysis_data = property(_get_analysis_data, _set_analysis_data)
analysis_header = property(_get_analysis_header, _set_analysis_header)
info = property(_get_info)
##########################################################################
# Public members
##########################################################################
@classmethod
def bands_shapes(cls, bands_lengths, ratio=None):
""" Return the different bands associated shapes given there lengths.
Parameters
----------
bands_lengths: ndarray (<nb_scale>, max(<nb_band_per_scale>, 0))
array holding the length between two bands of the data
vector per scale.
ratio: ndarray, default None
a array containing ratios for eeach scale and each band.
Returns
-------
bands_shapes: list of list of 2-uplet (<nb_scale>, <nb_band_per_scale>)
structure holding the shape of each bands at each scale.
"""
if ratio is None:
ratio = numpy.ones_like(bands_lengths)
bands_shapes = []
for band_number, scale_data in enumerate(bands_lengths):
scale_shapes = []
for scale_number, scale_padd in enumerate(scale_data):
shape = (
int(numpy.sqrt(
scale_padd * ratio[band_number, scale_number])),
int(numpy.sqrt(
scale_padd / ratio[band_number, scale_number])))
scale_shapes.append(shape)
bands_shapes.append(scale_shapes)
return bands_shapes
def show(self):
""" Display the different bands at the different decomposition scales.
"""
plot_transform(self)
def analysis(self, **kwargs):
""" Decompose a real or complex signal using ISAP.
Fill the instance 'analysis_data' and 'analysis_header' parameters.
Parameters
----------
kwargs: dict (optional)
the parameters that will be passed to
'pysap.extensions.mr_tansform'.
"""
# Checks
if self._data is None:
raise ValueError("Please specify first the input data.")
# Update ISAP parameters
kwargs["type_of_multiresolution_transform"] = self.isap_transform_id
kwargs["number_of_scales"] = self.nb_scale
# Analysis
if numpy.iscomplexobj(self._data):
analysis_data_real, self.analysis_header = self._analysis(
self._data.real, **kwargs)
analysis_data_imag, _ = self._analysis(
self._data.imag, **kwargs)
if isinstance(analysis_data_real, numpy.ndarray):
self._analysis_data = (
analysis_data_real + 1.j * analysis_data_imag)
else:
self._analysis_data = [
re + 1.j * ima
for re, ima in zip(analysis_data_real, analysis_data_imag)]
else:
self._analysis_data, self._analysis_header = self._analysis(
self._data, **kwargs)
def synthesis(self):
""" Reconstruct a real or complex signal from the wavelet coefficients
using ISAP.
Returns
-------
data: pysap.Image
the reconstructed data/signal.
"""
# Checks
if self._analysis_data is None:
raise ValueError("Please specify first the decomposition "
"coefficients array.")
if self.use_wrapping and self._analysis_header is None:
raise ValueError("Please specify first the decomposition "
"coefficients header.")
# Message
if self.verbose > 1:
print("[info] Synthesis header:")
pprint(self._analysis_header)
# Reorganize the coefficents with ISAP convention
# TODO: do not backup the list of bands
if self.use_wrapping:
analysis_buffer = numpy.zeros(
self._analysis_buffer_shape, dtype=self.analysis_data[0].dtype)
for scale, nb_bands in enumerate(self.nb_band_per_scale):
for band in range(nb_bands):
self._set_linear_band(scale, band, analysis_buffer,
self.band_at(scale, band))
_saved_analysis_data = self._analysis_data
self._analysis_data = analysis_buffer
self._analysis_data = [self.unflatten_fct(self)]
# Synthesis
if numpy.iscomplexobj(self._analysis_data[0]):
data_real = self._synthesis(
[arr.real for arr in self._analysis_data],
self._analysis_header)
data_imag = self._synthesis(
[arr.imag for arr in self._analysis_data],
self._analysis_header)
data = data_real + 1.j * data_imag
else:
data = self._synthesis(
self._analysis_data, self._analysis_header)
# TODO: remove this code asap
if self.use_wrapping:
self._analysis_data = _saved_analysis_data
return pysap.Image(data=data, metadata=self._image_metadata)
def band_at(self, scale, band):
""" Get the band at a specific scale.
Parameters
----------
scale: int
index of the scale.
band: int
index of the band.
Returns
-------
band_data: nd-arry
the requested band data array.
"""
# Message
if self.verbose > 1:
print("[info] Accessing scale '{0}' and band '{1}'...".format(
scale, band))
# Get the band array
index = numpy.sum(self.nb_band_per_scale[:scale]).astype(int) + band
band_data = self.analysis_data[index]
return band_data
##########################################################################
# Private members
##########################################################################
def _get_linear_band(self, scale, band, analysis_data):
""" Access the desired band data from a 1D linear analysis buffer.
Parameters
----------
scale: int
index of the scale.
band: int
index of the band.
analysis_data: nd-array (N, )
the analysis buffer.
Returns
-------
band_data: nd-arry (M, )
the requested band buffer.
"""
# Compute selected scale/band start/stop indices
start_scale_padd = self.scales_padds[scale]
start_band_padd = (
self.bands_lengths[scale, :band + 1].sum() -
self.bands_lengths[scale, band])
start_padd = start_scale_padd + start_band_padd
stop_padd = start_padd + self.bands_lengths[scale, band]
# Get the band array
band_data = analysis_data[start_padd: stop_padd].reshape(
self.bands_shapes[scale][band])
return band_data
def _set_linear_band(self, scale, band, analysis_data, band_data):
""" Set the desired band data in a 1D linear analysis buffer.
Parameters
----------
scale: int
index of the scale.
band: int
index of the band.
analysis_data: nd-array (N, )
the analysis buffer.
band_data: nd-array (M, M)
the band data to be added in the analysis buffer.
Returns
-------
analysis_data: nd-arry (N, )
the updated analysis buffer.
"""
# Compute selected scale/band start/stop indices
start_scale_padd = self.scales_padds[scale]
start_band_padd = (
self.bands_lengths[scale, :band + 1].sum() -
self.bands_lengths[scale, band])
start_padd = start_scale_padd + start_band_padd
stop_padd = start_padd + self.bands_lengths[scale, band]
# Get the band array
analysis_data[start_padd: stop_padd] = band_data.flatten()
return analysis_data
def _set_transformation_parameters(self):
""" Define the transformation class parameters.
Attributes
----------
name: str
the name of the decomposition.
bands_names: list of str
the name of the different bands.
flatten_fct: callable
a function used to reorganize the ISAP decomposition coefficients,
see 'pysap/extensions/formating.py' module for more details.
unflatten_fct: callable
a function used to reorganize the decomposition coefficients using
ISAP convention, see 'pysap/extensions/formating.py' module for
more details.
is_decimated: bool
True if the decomposition include a decimation of the
band number of coefficients.
nb_band_per_scale: ndarray (<nb_scale>, )
vector of int holding the number of band per scale.
bands_lengths: ndarray (<nb_scale>, max(<nb_band_per_scale>, 0))
array holding the length between two bands of the data
vector per scale.
bands_shapes: list of list of 2-uplet (<nb_scale>, <nb_band_per_scale>)
structure holding the shape of each bands at each scale.
isap_transform_id: int
the label of the ISAP transformation.
"""
raise NotImplementedError("Abstract method should not be declared "
"in derivate classes.")
def _compute_transformation_parameters(self):
""" Compute information in order to split scale/band flatten data.
Attributes
----------
scales_lengths: ndarray (<nb_scale>, )
the length of each band.
scales_padds: ndarray (<nb_scale> + 1, )
the index of the data associated to each scale.
"""
if self.bands_lengths is None:
raise ValueError(
"The transformation parameters have not been set.")
self.scales_lengths = self.bands_lengths.sum(axis=1)
self.scales_padds = numpy.zeros((self.nb_scale + 1, ), dtype=int)
self.scales_padds[1:] = self.scales_lengths.cumsum()
def _analysis(self, data, **kwargs):
""" Decompose a real signal using ISAP.
Parameters
----------
data: nd-array
a real array to be decomposed.
kwargs: dict (optional)
the parameters that will be passed to
'pysap.extensions.mr_tansform'.
Returns
-------
analysis_data: nd_array
the decomposition coefficients.
analysis_header: dict
the decomposition associated information.
"""
# Use subprocess to execute binaries
if self.use_wrapping:
kwargs["verbose"] = self.verbose > 0
with pysap.TempDir(isap=True) as tmpdir:
in_image = os.path.join(tmpdir, "in.fits")
out_mr_file = os.path.join(tmpdir, "cube.mr")
pysap.io.save(data, in_image)
pysap.extensions.mr_transform(in_image, out_mr_file, **kwargs)
image = pysap.io.load(out_mr_file)
analysis_data = image.data
analysis_header = image.metadata
# Reorganize the generated coefficents
self._analysis_shape = analysis_data.shape
analysis_buffer = self.flatten_fct(analysis_data, self)
self._analysis_buffer_shape = analysis_buffer.shape
if not isinstance(self.nb_band_per_scale, list):
self.nb_band_per_scale = (
self.nb_band_per_scale.squeeze().tolist())
analysis_data = []
for scale, nb_bands in enumerate(self.nb_band_per_scale):
for band in range(nb_bands):
analysis_data.append(self._get_linear_band(
scale, band, analysis_buffer))
# Use Python bindings
else:
analysis_data, self.nb_band_per_scale = self.trf.transform(
data.astype(numpy.double), save=False)
analysis_header = None
return analysis_data, analysis_header
def _synthesis(self, analysis_data, analysis_header):
""" Reconstruct a real signal from the wavelet coefficients using ISAP.
Parameters
----------
analysis_data: list of nd-array
the wavelet coefficients array.
analysis_header: dict
the wavelet decomposition parameters.
Returns
-------
data: nd-array
the reconstructed data array.
"""
# Use subprocess to execute binaries
if self.use_wrapping:
cube = pysap.Image(data=analysis_data[0], metadata=analysis_header)
with pysap.TempDir(isap=True) as tmpdir:
in_mr_file = os.path.join(tmpdir, "cube.mr")
out_image = os.path.join(tmpdir, "out.fits")
pysap.io.save(cube, in_mr_file)
pysap.extensions.mr_recons(
in_mr_file, out_image, verbose=(self.verbose > 0))
data = pysap.io.load(out_image).data
# Use Python bindings
else:
data = self.trf.reconstruct(analysis_data)
return data
| [
"numpy.ones_like",
"pysap.Image",
"numpy.sqrt",
"pysap.TempDir",
"pysap.plotting.plot_transform",
"os.path.join",
"numpy.iscomplexobj",
"numpy.zeros",
"numpy.sum",
"pysap.extensions.mr_recons",
"pysap.io.load",
"pysap.extensions.mr_transform",
"warnings.warn",
"pysap.io.save",
"pysparse.... | [((747, 813), 'warnings.warn', 'warnings.warn', (['"""Sparse2d python bindings not found, use binaries."""'], {}), "('Sparse2d python bindings not found, use binaries.')\n", (760, 813), False, 'import warnings\n'), ((11776, 11796), 'pysap.plotting.plot_transform', 'plot_transform', (['self'], {}), '(self)\n', (11790, 11796), False, 'from pysap.plotting import plot_transform\n'), ((12455, 12485), 'numpy.iscomplexobj', 'numpy.iscomplexobj', (['self._data'], {}), '(self._data)\n', (12473, 12485), False, 'import numpy\n'), ((14620, 14662), 'numpy.iscomplexobj', 'numpy.iscomplexobj', (['self._analysis_data[0]'], {}), '(self._analysis_data[0])\n', (14638, 14662), False, 'import numpy\n'), ((15239, 15292), 'pysap.Image', 'pysap.Image', ([], {'data': 'data', 'metadata': 'self._image_metadata'}), '(data=data, metadata=self._image_metadata)\n', (15250, 15292), False, 'import pysap\n'), ((20256, 20300), 'numpy.zeros', 'numpy.zeros', (['(self.nb_scale + 1,)'], {'dtype': 'int'}), '((self.nb_scale + 1,), dtype=int)\n', (20267, 20300), False, 'import numpy\n'), ((3271, 3301), 'pysparse.MRTransform', 'pysparse.MRTransform', ([], {}), '(**kwargs)\n', (3291, 3301), False, 'import pysparse\n'), ((11076, 11106), 'numpy.ones_like', 'numpy.ones_like', (['bands_lengths'], {}), '(bands_lengths)\n', (11091, 11106), False, 'import numpy\n'), ((13876, 13905), 'pprint.pprint', 'pprint', (['self._analysis_header'], {}), '(self._analysis_header)\n', (13882, 13905), False, 'from pprint import pprint\n'), ((14073, 14148), 'numpy.zeros', 'numpy.zeros', (['self._analysis_buffer_shape'], {'dtype': 'self.analysis_data[0].dtype'}), '(self._analysis_buffer_shape, dtype=self.analysis_data[0].dtype)\n', (14084, 14148), False, 'import numpy\n'), ((22927, 22987), 'pysap.Image', 'pysap.Image', ([], {'data': 'analysis_data[0]', 'metadata': 'analysis_header'}), '(data=analysis_data[0], metadata=analysis_header)\n', (22938, 22987), False, 'import pysap\n'), ((21030, 21054), 'pysap.TempDir', 'pysap.TempDir', ([], {'isap': '(True)'}), '(isap=True)\n', (21043, 21054), False, 'import pysap\n'), ((21093, 21124), 'os.path.join', 'os.path.join', (['tmpdir', '"""in.fits"""'], {}), "(tmpdir, 'in.fits')\n", (21105, 21124), False, 'import os\n'), ((21155, 21186), 'os.path.join', 'os.path.join', (['tmpdir', '"""cube.mr"""'], {}), "(tmpdir, 'cube.mr')\n", (21167, 21186), False, 'import os\n'), ((21203, 21232), 'pysap.io.save', 'pysap.io.save', (['data', 'in_image'], {}), '(data, in_image)\n', (21216, 21232), False, 'import pysap\n'), ((21249, 21311), 'pysap.extensions.mr_transform', 'pysap.extensions.mr_transform', (['in_image', 'out_mr_file'], {}), '(in_image, out_mr_file, **kwargs)\n', (21278, 21311), False, 'import pysap\n'), ((21336, 21362), 'pysap.io.load', 'pysap.io.load', (['out_mr_file'], {}), '(out_mr_file)\n', (21349, 21362), False, 'import pysap\n'), ((23005, 23029), 'pysap.TempDir', 'pysap.TempDir', ([], {'isap': '(True)'}), '(isap=True)\n', (23018, 23029), False, 'import pysap\n'), ((23070, 23101), 'os.path.join', 'os.path.join', (['tmpdir', '"""cube.mr"""'], {}), "(tmpdir, 'cube.mr')\n", (23082, 23101), False, 'import os\n'), ((23130, 23162), 'os.path.join', 'os.path.join', (['tmpdir', '"""out.fits"""'], {}), "(tmpdir, 'out.fits')\n", (23142, 23162), False, 'import os\n'), ((23179, 23210), 'pysap.io.save', 'pysap.io.save', (['cube', 'in_mr_file'], {}), '(cube, in_mr_file)\n', (23192, 23210), False, 'import pysap\n'), ((23227, 23302), 'pysap.extensions.mr_recons', 'pysap.extensions.mr_recons', (['in_mr_file', 'out_image'], {'verbose': '(self.verbose > 0)'}), '(in_mr_file, out_image, verbose=self.verbose > 0)\n', (23253, 23302), False, 'import pysap\n'), ((15828, 15869), 'numpy.sum', 'numpy.sum', (['self.nb_band_per_scale[:scale]'], {}), '(self.nb_band_per_scale[:scale])\n', (15837, 15869), False, 'import numpy\n'), ((23349, 23373), 'pysap.io.load', 'pysap.io.load', (['out_image'], {}), '(out_image)\n', (23362, 23373), False, 'import pysap\n'), ((11345, 11402), 'numpy.sqrt', 'numpy.sqrt', (['(scale_padd * ratio[band_number, scale_number])'], {}), '(scale_padd * ratio[band_number, scale_number])\n', (11355, 11402), False, 'import numpy\n'), ((11454, 11511), 'numpy.sqrt', 'numpy.sqrt', (['(scale_padd / ratio[band_number, scale_number])'], {}), '(scale_padd / ratio[band_number, scale_number])\n', (11464, 11511), False, 'import numpy\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 9 14:13:23 2021
@author: willrichardson
"""
# This script calculates other relevant stats and quantities of interest for each half-hour period
# includes 30-minute quantites and spectral quantites in each period
# leaving RMSDs as local variables; pass them into this script
#%% import libraries
import sys
sys.path.insert(0, '/Users/willrichardson/opt/anaconda3/lib/python3.8/site-packages')
import os
import glob
import pandas as pd
import numpy as np
from funcs import sort_ByDate_DMY, read_conc
from funcs import anc_unit_conv, Fco2_name, sigma_m
#%% get current working directory, relevant environment variables
args = sys.argv
RMSDq_slope = np.float64(args[1])
RMSDc_slope = np.float64(args[2])
base_path = os.getcwd()
site_yr = os.environ['site_yr']
run_ID = os.environ['run_ID']
fn_LB = os.environ['LB']
fn_UB = os.environ['UB']
DT = np.float64(os.environ['DT'])
nj = int(np.rint(np.log2((30*60)/DT)))
zm = np.float64(os.environ['Zm'])
N_wvlt = 2**nj
# make output file name
output_fname = base_path + '/ref_data/Output/%s/%s/%s_Interm_RefDf_allHH_PostProcStats.csv'%(site_yr, run_ID, site_yr)
#%% load reference dfs, outputs from partitioning program, other ancillary info
df_ref = pd.read_csv(base_path + '/ref_data/Output/%s/%s_Interm_RefDf_allHH.csv' %(site_yr, site_yr),
index_col='Timestamp', infer_datetime_format=True, parse_dates=True)
RLM_df = pd.read_csv(base_path + '/ref_data/Output/%s/%s_RLMfitRMSDs_fnLB_%s_fnUB_%s.csv' %(site_yr, site_yr, fn_LB, fn_UB),
index_col=0, infer_datetime_format=True, parse_dates=True)
zc_df = pd.read_csv(os.environ['zc_file'], sep='\t', index_col=0, names=['Timestamp', 'zc'], parse_dates=True, infer_datetime_format=True)
# partitioning program outputs
## load and sort partitioned fluxes and random error estimates
flux = glob.glob(base_path + '/flux/%s/flux*.txt'%site_yr)
rerr = glob.glob(base_path + '/flux/%s/rerror*.txt'%site_yr)
flux = sort_ByDate_DMY(flux); rerr = sort_ByDate_DMY(rerr)
raw_files, part_df = read_conc(flux, rerr)
## partitioning program yields fluxes in mmol m-2 s-1; convert to umol m-2 s-1
part_df = part_df*1000
#%% join into a unified dataframe (use 'inner' join so as to only keep rows where raw data made it all the way to the partitioning stage)
df_master = df_ref.join([part_df, RLM_df], how='inner')
#%% 30 minute stats
# add raw filenames as a column in the data frame
df_master['raw_file'] = raw_files
# fraction of total CH4 flux that is ebullition
df_master['frac_eb_q'] = df_master['CH4_eb_q']/df_master['CH4_tot']
df_master['frac_eb_c'] = df_master['CH4_eb_c']/df_master['CH4_tot']
df_master['frac_eb_T'] = df_master['CH4_eb_T']/df_master['CH4_tot']
# diffusive fluxes
df_master['CH4_diff_q'] = df_master['CH4_tot'] - df_master['CH4_eb_q']
df_master['CH4_diff_c'] = df_master['CH4_tot'] - df_master['CH4_eb_c']
df_master['CH4_diff_T'] = df_master['CH4_tot'] - df_master['CH4_eb_T']
# Some unit sonversions if desired
if anc_unit_conv == True:
from funcs import air_temp_K, T_dew, P_atm, VP, VP_sat, VPD_name
df_master['T_air_C'] = df_master[air_temp_K] - 273.15; df_master[T_dew] = df_master[T_dew] - 273.15 #[K to C]
df_master[VP] = df_master[VP]/1000; df_master[VP_sat] = df_master[VP_sat]/1000 #[Pa to kPa]
df_master[VPD_name] = df_master[VPD_name]/1000; df_master['P'] = df_master[P_atm]/1000 # [Pa to kPa]
df_master.drop([P_atm, air_temp_K], axis=1, inplace=True)
# co2 flux magnitude (for reference scalar thresholding)
df_master['co2_flux_mag'] = np.absolute(df_master[Fco2_name])
# normalized random error stats
df_master['Ebq_rerr_FebNorm'] = df_master['CH4_ebq_err']/df_master['CH4_eb_q']
df_master['Ebq_rerr_FtotNorm'] = df_master['CH4_ebq_err']/df_master['CH4_tot']
df_master['Diffq_rerr_FdiffNorm'] = df_master['CH4_diffq_err']/df_master['CH4_diff_q']
df_master['Diffq_rerr_FtotNorm'] = df_master['CH4_diffq_err']/df_master['CH4_tot']
df_master['Ebc_rerr_FebNorm'] = df_master['CH4_ebc_err']/df_master['CH4_eb_c']
df_master['Ebc_rerr_FtotNorm'] = df_master['CH4_ebc_err']/df_master['CH4_tot']
df_master['Diffc_rerr_FdiffNorm'] = df_master['CH4_diffc_err']/df_master['CH4_diff_c']
df_master['Diffc_rerr_FtotNorm'] = df_master['CH4_diffc_err']/df_master['CH4_tot']
#%% function for spectral stats on each period
def SpectralStats(tstamp):
# convert timestamp to format of file naming convention
day = tstamp.strftime('%Y%m%d'); datetime = tstamp.strftime('%Y%m%d_%H%M')
# load data; first row is coarse-grained mean, skip it
wvlt_df = pd.read_table(base_path + '/wvlet/%s/'%site_yr + day + '/' + 'wvlet-' + datetime + '.dat', sep='\s+',
names=['Index_wvlt', 'u', 'w', 'T', 'q', 'c', 'm'], delim_whitespace=False,
skiprows=1)
# get canopy height
zc = np.float64(zc_df.loc[tstamp.floor('D')])
# calculate displacement height as 0.66*canopy height
d = 0.67*zc #[m]
# calculate frequency for filtering
u = np.mean(wvlt_df['u'])
wvlt_df['j'] = (N_wvlt/wvlt_df['Index_wvlt'])*0.05
wvlt_df['fn'] = (zm - d)/(wvlt_df['j']*u)
# filter out low frequency components for partitioning
wvlt_df_filt = wvlt_df[wvlt_df['fn'] > np.float64(fn_LB)]
# add ebullition flags so that stats on diffusive/ebullitive fluxes can be calculated
wvlt_df_filt.loc[:, 'thresh_q_upp'] = df_master.loc[tstamp, 'slope_mq']*wvlt_df_filt.loc[:, 'q'] + (3*(RMSDq_slope*df_master.loc[tstamp, sigma_m]))
wvlt_df_filt.loc[:, 'thresh_q_low'] = df_master.loc[tstamp, 'slope_mq']*wvlt_df_filt.loc[:, 'q'] - (3*(RMSDq_slope*df_master.loc[tstamp, sigma_m]))
wvlt_df_filt.loc[:, 'thresh_c_upp'] = df_master.loc[tstamp, 'slope_mc']*wvlt_df_filt.loc[:, 'c'] + (3*(RMSDc_slope*df_master.loc[tstamp, sigma_m]))
wvlt_df_filt.loc[:, 'thresh_c_low'] = df_master.loc[tstamp, 'slope_mc']*wvlt_df_filt.loc[:, 'c'] - (3*(RMSDc_slope*df_master.loc[tstamp, sigma_m]))
wvlt_df_filt.loc[:, 'Ebq'] = (wvlt_df_filt.loc[:, 'm'] > wvlt_df_filt.loc[:, 'thresh_q_upp']) | (wvlt_df_filt.loc[:, 'm'] < wvlt_df_filt.loc[:, 'thresh_q_low'])
wvlt_df_filt.loc[:, 'Ebc'] = (wvlt_df_filt.loc[:, 'm'] > wvlt_df_filt.loc[:, 'thresh_c_upp']) | (wvlt_df_filt.loc[:, 'm'] < wvlt_df_filt.loc[:, 'thresh_c_low'])
# do spectral stats
## result objects
### master output for funtion
out = pd.DataFrame(index=[tstamp])
### frames for scale-wise stats
#### frequency values
fn_res = pd.DataFrame(index=[tstamp])
#### variances
mvar_res = pd.DataFrame(index=[tstamp]); qvar_res = pd.DataFrame(index=[tstamp])
cvar_res = pd.DataFrame(index=[tstamp]); Tvar_res = pd.DataFrame(index=[tstamp]); wvar_res = pd.DataFrame(index=[tstamp])
#### ebullitive covariances
Ebq_cov_res = pd.DataFrame(index=[tstamp]); Ebc_cov_res = pd.DataFrame(index=[tstamp])
#### covariances
wmcov_res = pd.DataFrame(index=[tstamp]); wqcov_res = pd.DataFrame(index=[tstamp]); wccov_res = pd.DataFrame(index=[tstamp]); wTcov_res = pd.DataFrame(index=[tstamp])
mqcoh_res = pd.DataFrame(index=[tstamp]); mccoh_res = pd.DataFrame(index=[tstamp]); qccoh_res = pd.DataFrame(index=[tstamp]);
## variables for 30-min. values calculated from scale-wise stats
m_var = 0; q_var = 0; c_var = 0; T_var = 0; w_var = 0;
m_ebq_var = 0; m_ebc_var = 0
wm_tot_cov = 0; wm_ebq_cov = 0; wm_ebc_cov = 0
wq_tot_cov = 0; wc_tot_cov = 0; wT_tot_cov = 0
if wvlt_df.isnull().values.any() == False and wvlt_df.shape[0] > 30000:
for i in np.arange(nj):
# select data at this scale
scale_index = 2**i
# all scales of transform
scale_data = wvlt_df.loc[wvlt_df['Index_wvlt'] == scale_index]
# dataframe filtered for coefficients to be included in partitioning
scale_data_filt = wvlt_df_filt.loc[wvlt_df_filt['Index_wvlt'] == scale_index]
# get labelling string of this scale for results; make scale-wise variable labels
scal_lab = 'j' + str(15-i) + '_ts' + str(np.float64(scale_data['j'].iloc[0]))
fn_scal_lab = 'fn_' + scal_lab
fn_scale = np.float64(np.unique(scale_data['fn'])); fn_res[fn_scal_lab] = fn_scale
## variances
mlab = 'm_NormVar_' + scal_lab; qlab = 'q_NormVar_' + scal_lab; clab = 'c_NormVar_' + scal_lab;
Tlab = 'Ts_NormVar_' + scal_lab; wlab = 'w_NormVar_' + scal_lab;
## covariances
wm_lab = 'wm_NormCov_' + scal_lab; wq_lab = 'wq_NormCov_' + scal_lab; wc_lab = 'wc_NormCov_' + scal_lab;
wT_lab = 'wT_NormCov_' + scal_lab;
Ebq_lab = 'Ebq_NormCov_' + scal_lab; Ebc_lab = 'Ebc_NormCov_' + scal_lab
## coherences
mqcoh_lab1 = 'mq_coh1_' + scal_lab; mccoh_lab1 = 'mc_coh1_' + scal_lab; qccoh_lab1 = 'qc_coh1_' + scal_lab
# variances and covariances (variance calculated in this iterative process, std for m calculated from square root of global variance at the end)
mvar_scale = np.sum(scale_data['m']**2)/N_wvlt; mvar_res.loc[tstamp, mlab] = mvar_scale; m_var = m_var + mvar_scale
qvar_scale = np.sum(scale_data['q']**2)/N_wvlt; qvar_res.loc[tstamp, qlab] = qvar_scale; q_var = q_var + qvar_scale
cvar_scale = np.sum(scale_data['c']**2)/N_wvlt; cvar_res.loc[tstamp, clab] = cvar_scale; c_var = c_var + cvar_scale
Tvar_scale = np.sum(scale_data['T']**2)/N_wvlt; Tvar_res.loc[tstamp, Tlab] = Tvar_scale; T_var = T_var + Tvar_scale
wvar_scale = np.sum(scale_data['w']**2)/N_wvlt; wvar_res.loc[tstamp, wlab] = wvar_scale; w_var = w_var + wvar_scale
wmcov_scale = np.sum(scale_data['w']*scale_data['m'])/N_wvlt; wmcov_res.loc[tstamp, wm_lab] = wmcov_scale; wm_tot_cov = wm_tot_cov + wmcov_scale
wqcov_scale = np.sum(scale_data['w']*scale_data['q'])/N_wvlt; wqcov_res.loc[tstamp, wq_lab] = wqcov_scale; wq_tot_cov = wq_tot_cov + wqcov_scale
wccov_scale = np.sum(scale_data['w']*scale_data['c'])/N_wvlt; wccov_res.loc[tstamp, wc_lab] = wccov_scale; wc_tot_cov = wc_tot_cov + wccov_scale
wTcov_scale = np.sum(scale_data['w']*scale_data['T'])/N_wvlt; wTcov_res.loc[tstamp, wT_lab] = wTcov_scale; wT_tot_cov = wT_tot_cov + wTcov_scale
if scale_data_filt.empty != True:
# variances
m_ebq_var = m_ebq_var + np.sum(scale_data_filt['Ebq']*scale_data_filt['m']**2)/N_wvlt
m_ebc_var = m_ebc_var + np.sum(scale_data_filt['Ebc']*scale_data_filt['m']**2)/N_wvlt
# covariances
Ebq_cov_scale = np.sum(scale_data_filt['Ebq']*scale_data_filt['w']*scale_data_filt['m'])/N_wvlt
Ebq_cov_res[Ebq_lab] = Ebq_cov_scale
wm_ebq_cov = wm_ebq_cov + Ebq_cov_scale
Ebc_cov_scale = np.sum(scale_data_filt['Ebc']*scale_data_filt['w']*scale_data_filt['m'])/N_wvlt
Ebc_cov_res[Ebc_lab] = Ebc_cov_scale
wm_ebc_cov = wm_ebc_cov + Ebc_cov_scale
# calculate coherences (must double check to make sure this scale has 3 or more data points)
if scale_data.shape[0] >= 3:
# (1) just using an out of box correlation coefficient calculation
mqcoh_res[mqcoh_lab1] = np.corrcoef(scale_data['q'], scale_data['m'])[0][1]
mccoh_res[mccoh_lab1] = np.corrcoef(scale_data['c'], scale_data['m'])[0][1]
qccoh_res[qccoh_lab1] = np.corrcoef(scale_data['q'], scale_data['c'])[0][1]
# convert variances to stdevs; normalize scale-wise variances
m_std = np.sqrt(m_var); m_ebq_std = np.sqrt(m_ebq_var); m_diffq_std = m_std - m_ebq_std
m_ebc_std = np.sqrt(m_ebc_var); m_diffc_std = m_std - m_ebc_std
mvar_res = mvar_res/m_var; qvar_res = qvar_res/q_var; cvar_res = cvar_res/c_var; Tvar_res = Tvar_res/T_var; wvar_res = wvar_res/w_var;
# convert w-m covariances from mmol m-2 s-1 to umol m-2 s-1; normalize scale-wise covariances
wmcov_res = wmcov_res/wm_tot_cov; Ebq_cov_res = Ebq_cov_res/wm_ebq_cov; Ebc_cov_res = Ebc_cov_res/wm_ebc_cov
wm_tot_cov = wm_tot_cov*1000; wm_ebq_cov = wm_ebq_cov*1000; wm_diffq_cov = wm_tot_cov - wm_ebq_cov
wm_ebc_cov = wm_ebc_cov*1000; wm_diffc_cov = wm_tot_cov - wm_ebc_cov
wqcov_res = wqcov_res/wq_tot_cov; wccov_res = wccov_res/wc_tot_cov; wTcov_res = wTcov_res/wT_tot_cov
# save all stats to output frame
## stanalone values not already in a preliminary dataframe
cols_write = ['var_m', 'stdev_m', 'stdev_m_ebq', 'stdev_m_diffq', 'stdev_m_ebc', 'stdev_m_diffc', 'cov_wm', 'cov_wm_ebq',
'cov_wm_diffq', 'cov_wm_ebc','cov_wm_diffc', 'var_q', 'var_c', 'var_T', 'var_w',
'cov_wq', 'cov_wc', 'cov_wT']
output_arr = np.array([m_var, m_std, m_ebq_std, m_diffq_std, m_ebc_std, m_diffc_std, wm_tot_cov, wm_ebq_cov, wm_diffq_cov,
wm_ebc_cov, wm_diffc_cov, q_var, c_var, T_var, w_var, wq_tot_cov, wc_tot_cov, wT_tot_cov])
output_arr = np.expand_dims(output_arr, 0)
out_interm = pd.DataFrame(output_arr, index=[tstamp], columns=cols_write)
out = out.join([fn_res, out_interm, mvar_res, qvar_res, cvar_res, Tvar_res, wvar_res, wmcov_res, wqcov_res, wccov_res,
wTcov_res, mqcoh_res, mccoh_res, qccoh_res, Ebq_cov_res, Ebc_cov_res])
print('%s spectral stats done'%datetime)
return(out)
#%% join spectral stats with master frame, save output
for n in np.arange(df_master.shape[0]):
spec_out = SpectralStats(df_master.index[n])
if n == 0:
out_df = spec_out
else:
# out_df = out_df.append(spec_out)
out_df = pd.concat([out_df, spec_out], sort=True)
#%% save new reference df with all of these stats
df_master = df_master.join(out_df, how='left')
df_master.to_csv(output_fname, index_label='Timestamp')
| [
"sys.path.insert",
"numpy.sqrt",
"pandas.read_csv",
"numpy.array",
"funcs.read_conc",
"numpy.arange",
"numpy.mean",
"numpy.float64",
"funcs.sort_ByDate_DMY",
"pandas.DataFrame",
"glob.glob",
"numpy.corrcoef",
"numpy.log2",
"numpy.unique",
"numpy.absolute",
"os.getcwd",
"numpy.sum",
... | [((381, 470), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/Users/willrichardson/opt/anaconda3/lib/python3.8/site-packages"""'], {}), "(0,\n '/Users/willrichardson/opt/anaconda3/lib/python3.8/site-packages')\n", (396, 470), False, 'import sys\n'), ((723, 742), 'numpy.float64', 'np.float64', (['args[1]'], {}), '(args[1])\n', (733, 742), True, 'import numpy as np\n'), ((757, 776), 'numpy.float64', 'np.float64', (['args[2]'], {}), '(args[2])\n', (767, 776), True, 'import numpy as np\n'), ((790, 801), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (799, 801), False, 'import os\n'), ((920, 948), 'numpy.float64', 'np.float64', (["os.environ['DT']"], {}), "(os.environ['DT'])\n", (930, 948), True, 'import numpy as np\n'), ((993, 1021), 'numpy.float64', 'np.float64', (["os.environ['Zm']"], {}), "(os.environ['Zm'])\n", (1003, 1021), True, 'import numpy as np\n'), ((1272, 1443), 'pandas.read_csv', 'pd.read_csv', (["(base_path + '/ref_data/Output/%s/%s_Interm_RefDf_allHH.csv' % (site_yr,\n site_yr))"], {'index_col': '"""Timestamp"""', 'infer_datetime_format': '(True)', 'parse_dates': '(True)'}), "(base_path + '/ref_data/Output/%s/%s_Interm_RefDf_allHH.csv' % (\n site_yr, site_yr), index_col='Timestamp', infer_datetime_format=True,\n parse_dates=True)\n", (1283, 1443), True, 'import pandas as pd\n'), ((1466, 1654), 'pandas.read_csv', 'pd.read_csv', (["(base_path + '/ref_data/Output/%s/%s_RLMfitRMSDs_fnLB_%s_fnUB_%s.csv' % (\n site_yr, site_yr, fn_LB, fn_UB))"], {'index_col': '(0)', 'infer_datetime_format': '(True)', 'parse_dates': '(True)'}), "(base_path + \n '/ref_data/Output/%s/%s_RLMfitRMSDs_fnLB_%s_fnUB_%s.csv' % (site_yr,\n site_yr, fn_LB, fn_UB), index_col=0, infer_datetime_format=True,\n parse_dates=True)\n", (1477, 1654), True, 'import pandas as pd\n'), ((1671, 1806), 'pandas.read_csv', 'pd.read_csv', (["os.environ['zc_file']"], {'sep': '"""\t"""', 'index_col': '(0)', 'names': "['Timestamp', 'zc']", 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "(os.environ['zc_file'], sep='\\t', index_col=0, names=[\n 'Timestamp', 'zc'], parse_dates=True, infer_datetime_format=True)\n", (1682, 1806), True, 'import pandas as pd\n'), ((1904, 1957), 'glob.glob', 'glob.glob', (["(base_path + '/flux/%s/flux*.txt' % site_yr)"], {}), "(base_path + '/flux/%s/flux*.txt' % site_yr)\n", (1913, 1957), False, 'import glob\n'), ((1963, 2018), 'glob.glob', 'glob.glob', (["(base_path + '/flux/%s/rerror*.txt' % site_yr)"], {}), "(base_path + '/flux/%s/rerror*.txt' % site_yr)\n", (1972, 2018), False, 'import glob\n'), ((2024, 2045), 'funcs.sort_ByDate_DMY', 'sort_ByDate_DMY', (['flux'], {}), '(flux)\n', (2039, 2045), False, 'from funcs import sort_ByDate_DMY, read_conc\n'), ((2054, 2075), 'funcs.sort_ByDate_DMY', 'sort_ByDate_DMY', (['rerr'], {}), '(rerr)\n', (2069, 2075), False, 'from funcs import sort_ByDate_DMY, read_conc\n'), ((2098, 2119), 'funcs.read_conc', 'read_conc', (['flux', 'rerr'], {}), '(flux, rerr)\n', (2107, 2119), False, 'from funcs import sort_ByDate_DMY, read_conc\n'), ((3603, 3636), 'numpy.absolute', 'np.absolute', (['df_master[Fco2_name]'], {}), '(df_master[Fco2_name])\n', (3614, 3636), True, 'import numpy as np\n'), ((13724, 13753), 'numpy.arange', 'np.arange', (['df_master.shape[0]'], {}), '(df_master.shape[0])\n', (13733, 13753), True, 'import numpy as np\n'), ((4627, 4827), 'pandas.read_table', 'pd.read_table', (["(base_path + '/wvlet/%s/' % site_yr + day + '/' + 'wvlet-' + datetime + '.dat')"], {'sep': '"""\\\\s+"""', 'names': "['Index_wvlt', 'u', 'w', 'T', 'q', 'c', 'm']", 'delim_whitespace': '(False)', 'skiprows': '(1)'}), "(base_path + '/wvlet/%s/' % site_yr + day + '/' + 'wvlet-' +\n datetime + '.dat', sep='\\\\s+', names=['Index_wvlt', 'u', 'w', 'T', 'q',\n 'c', 'm'], delim_whitespace=False, skiprows=1)\n", (4640, 4827), True, 'import pandas as pd\n'), ((5079, 5100), 'numpy.mean', 'np.mean', (["wvlt_df['u']"], {}), "(wvlt_df['u'])\n", (5086, 5100), True, 'import numpy as np\n'), ((6448, 6476), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[tstamp]'}), '(index=[tstamp])\n', (6460, 6476), True, 'import pandas as pd\n'), ((6552, 6580), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[tstamp]'}), '(index=[tstamp])\n', (6564, 6580), True, 'import pandas as pd\n'), ((6615, 6643), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[tstamp]'}), '(index=[tstamp])\n', (6627, 6643), True, 'import pandas as pd\n'), ((6656, 6684), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[tstamp]'}), '(index=[tstamp])\n', (6668, 6684), True, 'import pandas as pd\n'), ((6700, 6728), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[tstamp]'}), '(index=[tstamp])\n', (6712, 6728), True, 'import pandas as pd\n'), ((6741, 6769), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[tstamp]'}), '(index=[tstamp])\n', (6753, 6769), True, 'import pandas as pd\n'), ((6782, 6810), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[tstamp]'}), '(index=[tstamp])\n', (6794, 6810), True, 'import pandas as pd\n'), ((6861, 6889), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[tstamp]'}), '(index=[tstamp])\n', (6873, 6889), True, 'import pandas as pd\n'), ((6905, 6933), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[tstamp]'}), '(index=[tstamp])\n', (6917, 6933), True, 'import pandas as pd\n'), ((6972, 7000), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[tstamp]'}), '(index=[tstamp])\n', (6984, 7000), True, 'import pandas as pd\n'), ((7014, 7042), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[tstamp]'}), '(index=[tstamp])\n', (7026, 7042), True, 'import pandas as pd\n'), ((7056, 7084), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[tstamp]'}), '(index=[tstamp])\n', (7068, 7084), True, 'import pandas as pd\n'), ((7098, 7126), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[tstamp]'}), '(index=[tstamp])\n', (7110, 7126), True, 'import pandas as pd\n'), ((7143, 7171), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[tstamp]'}), '(index=[tstamp])\n', (7155, 7171), True, 'import pandas as pd\n'), ((7185, 7213), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[tstamp]'}), '(index=[tstamp])\n', (7197, 7213), True, 'import pandas as pd\n'), ((7227, 7255), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[tstamp]'}), '(index=[tstamp])\n', (7239, 7255), True, 'import pandas as pd\n'), ((11834, 11848), 'numpy.sqrt', 'np.sqrt', (['m_var'], {}), '(m_var)\n', (11841, 11848), True, 'import numpy as np\n'), ((11862, 11880), 'numpy.sqrt', 'np.sqrt', (['m_ebq_var'], {}), '(m_ebq_var)\n', (11869, 11880), True, 'import numpy as np\n'), ((11931, 11949), 'numpy.sqrt', 'np.sqrt', (['m_ebc_var'], {}), '(m_ebc_var)\n', (11938, 11949), True, 'import numpy as np\n'), ((13014, 13222), 'numpy.array', 'np.array', (['[m_var, m_std, m_ebq_std, m_diffq_std, m_ebc_std, m_diffc_std, wm_tot_cov,\n wm_ebq_cov, wm_diffq_cov, wm_ebc_cov, wm_diffc_cov, q_var, c_var, T_var,\n w_var, wq_tot_cov, wc_tot_cov, wT_tot_cov]'], {}), '([m_var, m_std, m_ebq_std, m_diffq_std, m_ebc_std, m_diffc_std,\n wm_tot_cov, wm_ebq_cov, wm_diffq_cov, wm_ebc_cov, wm_diffc_cov, q_var,\n c_var, T_var, w_var, wq_tot_cov, wc_tot_cov, wT_tot_cov])\n', (13022, 13222), True, 'import numpy as np\n'), ((13259, 13288), 'numpy.expand_dims', 'np.expand_dims', (['output_arr', '(0)'], {}), '(output_arr, 0)\n', (13273, 13288), True, 'import numpy as np\n'), ((13311, 13371), 'pandas.DataFrame', 'pd.DataFrame', (['output_arr'], {'index': '[tstamp]', 'columns': 'cols_write'}), '(output_arr, index=[tstamp], columns=cols_write)\n', (13323, 13371), True, 'import pandas as pd\n'), ((966, 987), 'numpy.log2', 'np.log2', (['(30 * 60 / DT)'], {}), '(30 * 60 / DT)\n', (973, 987), True, 'import numpy as np\n'), ((7623, 7636), 'numpy.arange', 'np.arange', (['nj'], {}), '(nj)\n', (7632, 7636), True, 'import numpy as np\n'), ((13915, 13955), 'pandas.concat', 'pd.concat', (['[out_df, spec_out]'], {'sort': '(True)'}), '([out_df, spec_out], sort=True)\n', (13924, 13955), True, 'import pandas as pd\n'), ((5309, 5326), 'numpy.float64', 'np.float64', (['fn_LB'], {}), '(fn_LB)\n', (5319, 5326), True, 'import numpy as np\n'), ((8279, 8306), 'numpy.unique', 'np.unique', (["scale_data['fn']"], {}), "(scale_data['fn'])\n", (8288, 8306), True, 'import numpy as np\n'), ((9172, 9200), 'numpy.sum', 'np.sum', (["(scale_data['m'] ** 2)"], {}), "(scale_data['m'] ** 2)\n", (9178, 9200), True, 'import numpy as np\n'), ((9300, 9328), 'numpy.sum', 'np.sum', (["(scale_data['q'] ** 2)"], {}), "(scale_data['q'] ** 2)\n", (9306, 9328), True, 'import numpy as np\n'), ((9428, 9456), 'numpy.sum', 'np.sum', (["(scale_data['c'] ** 2)"], {}), "(scale_data['c'] ** 2)\n", (9434, 9456), True, 'import numpy as np\n'), ((9556, 9584), 'numpy.sum', 'np.sum', (["(scale_data['T'] ** 2)"], {}), "(scale_data['T'] ** 2)\n", (9562, 9584), True, 'import numpy as np\n'), ((9684, 9712), 'numpy.sum', 'np.sum', (["(scale_data['w'] ** 2)"], {}), "(scale_data['w'] ** 2)\n", (9690, 9712), True, 'import numpy as np\n'), ((9826, 9867), 'numpy.sum', 'np.sum', (["(scale_data['w'] * scale_data['m'])"], {}), "(scale_data['w'] * scale_data['m'])\n", (9832, 9867), True, 'import numpy as np\n'), ((9983, 10024), 'numpy.sum', 'np.sum', (["(scale_data['w'] * scale_data['q'])"], {}), "(scale_data['w'] * scale_data['q'])\n", (9989, 10024), True, 'import numpy as np\n'), ((10140, 10181), 'numpy.sum', 'np.sum', (["(scale_data['w'] * scale_data['c'])"], {}), "(scale_data['w'] * scale_data['c'])\n", (10146, 10181), True, 'import numpy as np\n'), ((10297, 10338), 'numpy.sum', 'np.sum', (["(scale_data['w'] * scale_data['T'])"], {}), "(scale_data['w'] * scale_data['T'])\n", (10303, 10338), True, 'import numpy as np\n'), ((8165, 8200), 'numpy.float64', 'np.float64', (["scale_data['j'].iloc[0]"], {}), "(scale_data['j'].iloc[0])\n", (8175, 8200), True, 'import numpy as np\n'), ((10815, 10891), 'numpy.sum', 'np.sum', (["(scale_data_filt['Ebq'] * scale_data_filt['w'] * scale_data_filt['m'])"], {}), "(scale_data_filt['Ebq'] * scale_data_filt['w'] * scale_data_filt['m'])\n", (10821, 10891), True, 'import numpy as np\n'), ((11036, 11112), 'numpy.sum', 'np.sum', (["(scale_data_filt['Ebc'] * scale_data_filt['w'] * scale_data_filt['m'])"], {}), "(scale_data_filt['Ebc'] * scale_data_filt['w'] * scale_data_filt['m'])\n", (11042, 11112), True, 'import numpy as np\n'), ((10572, 10630), 'numpy.sum', 'np.sum', (["(scale_data_filt['Ebq'] * scale_data_filt['m'] ** 2)"], {}), "(scale_data_filt['Ebq'] * scale_data_filt['m'] ** 2)\n", (10578, 10630), True, 'import numpy as np\n'), ((10674, 10732), 'numpy.sum', 'np.sum', (["(scale_data_filt['Ebc'] * scale_data_filt['m'] ** 2)"], {}), "(scale_data_filt['Ebc'] * scale_data_filt['m'] ** 2)\n", (10680, 10732), True, 'import numpy as np\n'), ((11515, 11560), 'numpy.corrcoef', 'np.corrcoef', (["scale_data['q']", "scale_data['m']"], {}), "(scale_data['q'], scale_data['m'])\n", (11526, 11560), True, 'import numpy as np\n'), ((11607, 11652), 'numpy.corrcoef', 'np.corrcoef', (["scale_data['c']", "scale_data['m']"], {}), "(scale_data['c'], scale_data['m'])\n", (11618, 11652), True, 'import numpy as np\n'), ((11699, 11744), 'numpy.corrcoef', 'np.corrcoef', (["scale_data['q']", "scale_data['c']"], {}), "(scale_data['q'], scale_data['c'])\n", (11710, 11744), True, 'import numpy as np\n')] |
# Image augmentation with ImageDataGenerator
# Steps
# 1. Prepare images dataset
# 2. create ImageDataGenerator
# 3. Create iterators flow() or flow_from_directory(...)
# 4. Fit
# Horizontal shift image augmentation
from PIL import Image
from numpy import expand_dims
from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator
from matplotlib import pyplot
img = load_img('bird.jpg')
data = img_to_array(img)
# expand dimension to one sample
samples = expand_dims(data, 0)
dgen = ImageDataGenerator(horizontal_flip=True)
# make iterator
it = dgen.flow(samples, batch_size=1)
# gen samples and plot
for i in range(9):
pyplot.subplot(330 + 1 + i)
# Generate batch of images
batch = it.next()
# convert to unsigned ints for viewing
image = batch[0].astype('uint8')
pyplot.imshow(image)
pyplot.show() | [
"matplotlib.pyplot.imshow",
"keras.preprocessing.image.img_to_array",
"keras.preprocessing.image.ImageDataGenerator",
"keras.preprocessing.image.load_img",
"numpy.expand_dims",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((389, 409), 'keras.preprocessing.image.load_img', 'load_img', (['"""bird.jpg"""'], {}), "('bird.jpg')\n", (397, 409), False, 'from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator\n'), ((418, 435), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (430, 435), False, 'from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator\n'), ((480, 500), 'numpy.expand_dims', 'expand_dims', (['data', '(0)'], {}), '(data, 0)\n', (491, 500), False, 'from numpy import expand_dims\n'), ((509, 549), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'horizontal_flip': '(True)'}), '(horizontal_flip=True)\n', (527, 549), False, 'from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator\n'), ((838, 851), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (849, 851), False, 'from matplotlib import pyplot\n'), ((652, 679), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['(330 + 1 + i)'], {}), '(330 + 1 + i)\n', (666, 679), False, 'from matplotlib import pyplot\n'), ((817, 837), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['image'], {}), '(image)\n', (830, 837), False, 'from matplotlib import pyplot\n')] |
"""
An interface to PISM NetCDF files
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset
from .colors import default_cmaps
sec_year = 365*24*3600.
class PISMDataset():
"""An interface to PISM NetCDF files
Provide simple plot methods for quick visualization of PISM results.
"""
def __init__(self, file_name, *args, **kwargs):
self.data = Dataset(file_name, *args, **kwargs)
self._file_name = file_name
try:
self.x = self.data.variables['x'][:]/1000.0 # km
self.y = self.data.variables['y'][:]/1000.0
self.node_x = np.zeros((len(self.data.variables['y'][:]),
len(self.data.variables['x'][:])))
for j in range(len(self.data.variables['x'][:])):
self.node_x[:, j] = self.x[j]
self.node_y = np.zeros((len(self.data.variables['y'][:]),
len(self.data.variables['x'][:])))
for i in range(len(self.data.variables['y'][:])):
self.node_y[i, :] = self.y[i]
except:
self.x = None
self.y = None
try:
self.time = self.data.variables['time'][:]/sec_year # yr
except:
self.time = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _get_axes(self, ax=None):
if ax is None:
ax = plt.gca()
return ax
def _get_fig(self, fig=None):
if fig is None:
fig = plt.gcf()
return fig
def _add_mask(self, z, t=None, mask_var=None, mask_thold=None, *args, **kwargs):
if mask_var is None or mask_thold is None:
return z
if mask_var in self.data.variables:
mask_data = self._get_2d_data(mask_var, t)
z[np.where(mask_data<=mask_thold)] = np.nan
z = np.ma.masked_where(np.isnan(z), z)
np.ma.set_fill_value(z, np.nan)
else:
sys.exit("pypismplot error: Cannot find {} in {}".format(mask_var, self._file_name))
return z
def _get_2d_data(self, var_name, t=None):
var = self.data.variables[var_name]
time = self.time
if t is None:
if len(var.shape)==2:
z = var[:]
else:
if len(var)==1:
z = var[:][0]
else:
sys.exit("pypismplot error: "\
+"t is required because {} has time dimension".format(self._file_name))
else:
try:
z = var[np.where(time==t)]
except:
sys.exit("pypismplot error: Cannot find t={} in {}".format(t, self._file_name))
z = z.squeeze()
return z
def close(self):
self.data.close()
def get_masked_data(self, var_name, t=None, mask_var=None, mask_thold=None):
"""Extract data with mask
Parameters
----------
var_name : str
Variable name to plot
t : float, optional
When the PISM data contains time dimension, t is required
mask_var : str, optional
Variable to create mask
mask_thold : float, optional
Variable threshold to create mask
"""
z = self._get_2d_data(var_name, t)
z = self._add_mask(z, t=t, mask_var=mask_var, mask_thold=mask_thold)
return z
def pcolormesh(self, var_name, ax=None, *args, **kwargs):
"""Plot mapview of PISM data
Parameters
----------
var_name : str
Variable name to plot
ax : matplotlib axes
Axes where data are potted
t : float, optional
When the PISM data contains time dimension, t is required
mask_var : str, optional
Variable to create mask
mask_thold : float, optional
Variable threshold to create mask
title : str, optional
Title, default is long_name of the variable,
use var_name if long_name does not exist,
set None to disable
allow_colorbar : bool, optional
If ture, show colorbar, default False
For other parameters, see matplotlib doc
"""
t = kwargs.pop('t', None)
mask_var = kwargs.pop('mask_var', None)
mask_thold = kwargs.pop('mask_thold', None)
try:
title = self.data.variables[var_name].long_name
except:
title = var_name
title = kwargs.pop('title', title)
allow_colorbar = kwargs.pop('allow_colorbar', False)
ax = self._get_axes(ax)
ax.set_aspect(aspect='equal', adjustable='box-forced')
xx, yy = np.meshgrid(self.x, self.y)
z = self.get_masked_data(var_name, t=t, mask_var=mask_var, mask_thold=mask_thold)
im = ax.pcolormesh(xx, yy, z,
cmap=kwargs.pop('cmap', default_cmaps.get(var_name)),
**kwargs)
ax.set_xlabel('X (km)')
ax.set_ylabel('Y (km)')
if title is not None:
ax.set_title(title)
if allow_colorbar:
cbar = plt.colorbar(im, ax=ax, orientation='horizontal', shrink=0.6)
try:
cbar.set_label(self.data.variables[var_name].units)
except:
pass
return im
def imshow(self, var_name, ax=None, *args, **kwargs):
"""Plot mapview of PISM data
Parameters
----------
var_name : str
Variable name to plot
ax : matplotlib axes
Axes where data are potted
t : float, optional
When the PISM data contains time dimension, t is required
mask_var : str, optional
Variable to create mask
mask_thold : float, optional
Variable threshold to create mask
title : str, optional
Title, default is long_name of the variable,
use var_name if long_name does not exist,
set None to disable
allow_colorbar : bool, optional
If ture, show colorbar, default False
For other parameters, see matplotlib doc
"""
t = kwargs.pop('t', None)
mask_var = kwargs.pop('mask_var', None)
mask_thold = kwargs.pop('mask_thold', None)
try:
title = self.data.variables[var_name].long_name
except:
title = var_name
title = kwargs.pop('title', title)
allow_colorbar = kwargs.pop('allow_colorbar', False)
ax = self._get_axes(ax)
ax.set_aspect(aspect='equal', adjustable='box-forced')
xx, yy = np.meshgrid(self.x, self.y)
z = self.get_masked_data(var_name, t=t, mask_var=mask_var, mask_thold=mask_thold)
im = ax.imshow(z, cmap=kwargs.pop('cmap', default_cmaps.get(var_name)),
origin=kwargs.pop('origin', 'lower'),
extent=kwargs.pop('extent',
[self.node_x.min(), self.node_x.max(), self.node_y.min(), self.node_y.max()]),
**kwargs)
ax.set_xlabel('X (km)')
ax.set_ylabel('Y (km)')
if title is not None:
ax.set_title(title)
if allow_colorbar:
cbar = plt.colorbar(im, ax=ax, orientation='horizontal', shrink=0.6)
try:
cbar.set_label(self.data.variables[var_name].units)
except:
pass
return im
def plot_time_series(self, var_name, ax=None, *args, **kwargs):
"""Plot time series of PISM data
Parameters
----------
var_name : str
Variable name to plot
ax : matplotlib axes
Axes where data are potted
time_range : list, optional
Range of time shown in figure
ylabel : str, optional
Label of Y axis, default is long_name of the variable,
use var_name if long_name does not exist,
set None to disable
"""
time_range = kwargs.pop('time_range', None)
try:
ylabel = self.data.variables[var_name].long_name
except:
ylabel = var_name
ylabel = kwargs.pop('title', ylabel)
ax = self._get_axes(ax)
time = self.time
if time is None:
sys.exit("pypismplot error: "\
+"{} does not have time dimension.".format(self._file_name))
plot_data = self.data.variables[var_name][:]
im = ax.plot(time/1000.0, plot_data, **kwargs)
ax.set_xlabel('Time (kyr)')
if ylabel is not None:
ax.set_ylabel(ylabel)
if time_range is not None:
ax.set_xlim(time_range)
return im
def animation(self, var_name, fig=None, *args, **kwargs):
"""Make animation of PISM data
Parameters
----------
var_name : str
Variable name to plot
fig : matplotlib figure
Figure where data are plotted
outfile : str, optional
Name of out file
time_range : list, optional
Range of time shown in animation
title : str, optional
Title, default is long_name of the variable,
use var_name if long_name does not exist,
set None to disable
interval : number, optional
Delay between frames in milliseconds. Defaults to 200.
repeat_delay : number, optional
If the animation in repeated,
adds a delay in milliseconds before repeating the animation.
Defaults to 500.
repeat : bool, optional
Controls whether the animation should repeat
when the sequence of frames is completed.
Defaults to True.
"""
import matplotlib.animation
time_range = kwargs.pop('time_range', None)
outfile = kwargs.pop('outfile', 'animation_{}.gif'.format(self._file_name))
try:
title = self.data.variables[var_name].long_name
except:
title = var_name
title = kwargs.pop('title', title)
interval = kwargs.pop('interval', 200)
repeat_delay = kwargs.pop('repeat_delay', 500)
repeat = kwargs.pop('repeat', True)
blit = kwargs.pop('blit', False) # it seems this must be False for pcolormesh
fig = self._get_fig(fig)
time = self.time
if time is None:
sys.exit("pypismplot error: "\
+"{} does not have time dimension.".format(self._file_name))
if time_range is None:
start_index = 0
end_index = len(time)
else:
start_index, = np.where(time==time_range[0])
if len(start_index)<1:
sys.exit("pypismplot error: time_range is invalid.")
start_index = start_index[0]
end_index, = np.where(time==time_range[1])
if len(end_index)<1:
sys.exit("pypismplot error: time_range is invalid.")
end_index = end_index[0]
def _update(t):
fig.clf()
ax = plt.gca()
im = self.imshow(var_name, ax=ax, t=t, title=title+' (t={})'.format(t), **kwargs)
return ax, im
ani = matplotlib.animation.FuncAnimation(fig, _update, time[start_index:end_index+1],
interval=interval,
repeat_delay=repeat_delay,
repeat=repeat, blit=blit)
ani.save(outfile)
return ani
| [
"numpy.where",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"netCDF4.Dataset",
"matplotlib.pyplot.colorbar",
"numpy.isnan",
"numpy.ma.set_fill_value",
"sys.exit",
"numpy.meshgrid"
] | [((410, 445), 'netCDF4.Dataset', 'Dataset', (['file_name', '*args'], {}), '(file_name, *args, **kwargs)\n', (417, 445), False, 'from netCDF4 import Dataset\n'), ((4840, 4867), 'numpy.meshgrid', 'np.meshgrid', (['self.x', 'self.y'], {}), '(self.x, self.y)\n', (4851, 4867), True, 'import numpy as np\n'), ((6799, 6826), 'numpy.meshgrid', 'np.meshgrid', (['self.x', 'self.y'], {}), '(self.x, self.y)\n', (6810, 6826), True, 'import numpy as np\n'), ((1500, 1509), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1507, 1509), True, 'import matplotlib.pyplot as plt\n'), ((1605, 1614), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1612, 1614), True, 'import matplotlib.pyplot as plt\n'), ((2010, 2041), 'numpy.ma.set_fill_value', 'np.ma.set_fill_value', (['z', 'np.nan'], {}), '(z, np.nan)\n', (2030, 2041), True, 'import numpy as np\n'), ((5287, 5348), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'ax': 'ax', 'orientation': '"""horizontal"""', 'shrink': '(0.6)'}), "(im, ax=ax, orientation='horizontal', shrink=0.6)\n", (5299, 5348), True, 'import matplotlib.pyplot as plt\n'), ((7435, 7496), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'ax': 'ax', 'orientation': '"""horizontal"""', 'shrink': '(0.6)'}), "(im, ax=ax, orientation='horizontal', shrink=0.6)\n", (7447, 7496), True, 'import matplotlib.pyplot as plt\n'), ((10904, 10935), 'numpy.where', 'np.where', (['(time == time_range[0])'], {}), '(time == time_range[0])\n', (10912, 10935), True, 'import numpy as np\n'), ((11104, 11135), 'numpy.where', 'np.where', (['(time == time_range[1])'], {}), '(time == time_range[1])\n', (11112, 11135), True, 'import numpy as np\n'), ((11344, 11353), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11351, 11353), True, 'import matplotlib.pyplot as plt\n'), ((1905, 1938), 'numpy.where', 'np.where', (['(mask_data <= mask_thold)'], {}), '(mask_data <= mask_thold)\n', (1913, 1938), True, 'import numpy as np\n'), ((1982, 1993), 'numpy.isnan', 'np.isnan', (['z'], {}), '(z)\n', (1990, 1993), True, 'import numpy as np\n'), ((10985, 11037), 'sys.exit', 'sys.exit', (['"""pypismplot error: time_range is invalid."""'], {}), "('pypismplot error: time_range is invalid.')\n", (10993, 11037), False, 'import sys\n'), ((11183, 11235), 'sys.exit', 'sys.exit', (['"""pypismplot error: time_range is invalid."""'], {}), "('pypismplot error: time_range is invalid.')\n", (11191, 11235), False, 'import sys\n'), ((2688, 2707), 'numpy.where', 'np.where', (['(time == t)'], {}), '(time == t)\n', (2696, 2707), True, 'import numpy as np\n')] |
# Copyright 2016, FBPIC contributors
# Authors: <NAME>, <NAME>
# License: 3-Clause-BSD-LBNL
"""
This file is part of the Fourier-Bessel Particle-In-Cell code (FB-PIC)
It defines a set of common longitudinal laser profiles.
"""
import numpy as np
from scipy.constants import c
from scipy.interpolate import interp1d
from scipy.special import factorial
# Generic classes
# ---------------
class LaserLongitudinalProfile(object):
"""
Base class for all 1D longitudinal laser profiles.
Such a profile can be combined with a 2D transverse laser profile to
define a 3D laser profile that is valid under the paraxial approximation.
Any new longitudinal laser profile should inherit from this class,
and define its own `evaluate(z,t)` method, using the same signature
as the method below.
"""
def __init__(self, propagation_direction, gpu_capable=False):
"""
Initialize the propagation direction of the laser.
(Each subclass should call this method at initialization.)
Parameter
---------
propagation_direction: int
Indicates in which direction the laser propagates.
This should be either 1 (laser propagates towards positive z)
or -1 (laser propagates towards negative z).
gpu_capable: boolean
Indicates whether this laser profile works with cupy arrays on
GPU. This is usually the case if it only uses standard arithmetic
and numpy operations. Default: False.
"""
assert propagation_direction in [-1, 1]
self.propag_direction = float(propagation_direction)
self.gpu_capable = gpu_capable
def evaluate(self, z, t):
"""
Return the complex longitudinal laser profile.
This profile should be valid for any z and t. Under the paraxial
approximation, this is true if this function is a simple translation
at c*t along the z axis. The other propagation effects, namely the
diffraction effects, are taken into account by the transverse profile.
Parameters
-----------
z: ndarray (meters)
The longitudinal position at which to calculate the profile
(in the lab frame)
t: ndarray or float (seconds)
The time at which to calculate the profile (in the lab frame)
Returns:
--------
profile: ndarray
Arrays of the same shape as z, containing the complex
longitudinal profile
"""
# The base class only defines dummy fields
# (This should be replaced by any class that inherits from this one.)
return np.zeros_like(z, dtype='complex')
def squared_profile_integral(self):
"""
Return the integral of the square of the absolute value of
of the (complex) laser profile along the `z` axis:
.. math::
\\int_{-\\infty}^\\infty \,dz|f(z)|^2
Returns:
--------
integral: float
"""
# The base class only defines a dummy implementation
# (This should be replaced by any class that inherits from this one.)
return 0
# Particular classes for each longitudinal laser profile
# ------------------------------------------------------
class GaussianChirpedLongitudinalProfile(LaserLongitudinalProfile):
"""Class that calculates a Gaussian chirped longitudinal laser profile."""
def __init__(self, tau, z0, lambda0=0.8e-6, cep_phase=0.,
phi2_chirp=0., propagation_direction=1):
"""
Define the complex longitudinal profile of a Gaussian laser pulse.
At the focus and for zero chirp, this translates to a laser with an
axial electric field:
.. math::
E(z,t) \propto \exp\left( \\frac{(z-z_0-ct)^2}{c^2\\tau^2} \\right)
\cos[ k_0( z - z_0 - ct ) - \phi_{cep} ]
where :math:`k_0 = 2\pi/\\lambda_0` is the wavevector, :math:`\\tau`
is the laser duration, :math:`\phi_{cep}` is the CEP phase.
Note that, for a transform-limited pulse, the peak field amplitude of
the profile is unity. For a non-zero chirp, the peak amplitude is
reduced while keeping the pulse energy constant.
Parameters
----------
tau: float (in second)
The duration of the laser (in the lab frame),
defined as :math:`\\tau` in the above formula.
z0: float (in meter)
The initial position of the centroid of the laser
(in the lab frame), defined as :math:`z_0` in the above formula.
lambda0: float (in meter), optional
The wavelength of the laser (in the lab frame), defined as
:math:`\\lambda_0` in the above formula.
Default: 0.8 microns (Ti:Sapph laser).
cep_phase: float (in radian), optional
The Carrier Enveloppe Phase (CEP), defined as :math:`\phi_{cep}`
in the above formula (i.e. the phase of the laser
oscillation, at the position where the laser enveloppe is maximum)
phi2_chirp: float (in second^2)
The amount of temporal chirp, at focus (in the lab frame)
Namely, a wave packet centered on the frequency
:math:`(\omega_0 + \delta \omega)` will reach its peak intensity
at :math:`z(\delta \omega) = z_0 - c \phi^{(2)} \, \delta \omega`.
Thus, a positive :math:`\phi^{(2)}` corresponds to positive chirp,
i.e. red part of the spectrum in the front of the pulse and blue
part of the spectrum in the back.
propagation_direction: int, optional
Indicates in which direction the laser propagates.
This should be either 1 (laser propagates towards positive z)
or -1 (laser propagates towards negative z).
"""
# Initialize propagation direction and mark the profile as GPU capable
LaserLongitudinalProfile.__init__(self,propagation_direction,
gpu_capable=True)
# Set and store the parameters
self.k0 = 2*np.pi/lambda0
self.z0 = z0
self.cep_phase = cep_phase
self.phi2_chirp = phi2_chirp
self.inv_ctau2 = 1. / (c * tau) ** 2
def evaluate(self, z, t):
"""
See the docstring of LaserLongitudinalProfile.evaluate
"""
# The formula for the longitudinal laser profile (in complex numbers)
# is obtained by defining the Fourier transform of the laser at focus
# E(\omega) = exp( -(\omega-\omega_0)^2(\tau^2/4 + i \phi^(2)/2) )
# and then by taking the inverse Fourier transform in t.
prop_dir = self.propag_direction
# Stretch factor due to chirp
stretch_factor = 1 - 2j * self.phi2_chirp * c ** 2 * self.inv_ctau2
# Calculate the argument of the complex exponential
exp_argument = - 1j * self.cep_phase \
+ 1j * self.k0 * (prop_dir * (z - self.z0) - c * t) \
- 1. / stretch_factor * self.inv_ctau2 * \
(prop_dir * (z - self.z0) - c * t) ** 2
# Get the longitudinal profile
profile = np.exp(exp_argument) / stretch_factor ** 0.5
return profile
def squared_profile_integral(self):
"""
See the docstring of LaserLongitudinalProfile.squared_profile_integral
"""
return (0.5 * np.pi * 1./self.inv_ctau2)**.5
class CustomSpectrumLongitudinalProfile(LaserLongitudinalProfile):
"""Class that calculates a longitudinal profile with a user defined
spectral amplitude and phase."""
def __init__(self, z0, spectrum_file,
phi2_chirp=0., phi3_chirp=0., phi4_chirp=0.,
subtract_linear_phase=False,
propagation_direction=1):
"""
Define the complex longitudinal profile of the laser pulse,
from the spectrum provided in `spectrum_file`.
More specifically, the temporal characteristics of the pulse are
calculated numerically via the spectral intensity and phase which
are provided to the class as a path to a csv file containing the data.
More specifically, the electric field computed according to:
.. math::
E(z,t) \propto \int_{-\infty}^{\infty}\!\! dk \;k
\sqrt{I(2\pi/k)}
\,e^{i\phi(2\pi/k)}\,e^{i k (z-z0 -ct)}
where :math:`I` and :math:`\phi` are the
spectral intensity provided in the csv file (as a function of the
wavelength :math:`\lambda=2\pi/k`). (The fact that the integrand
is multiplied by :math:`k` in the above formula is because
the csv file provides the intensity distribution over wavelengths
:math:`\lambda`, instead of over wavevectors :math:`k`.)
Parameters:
-----------
z0: float (in meter)
The initial position of the centroid of the laser
(in the lab frame), defined as :math:`z_0` in the above formula.
spectrum_file: file path
The path to a csv file containing 3 columns (no headers).
The three columns should represent wavelength (in m), spectral
intensity (in the same dimension as J/m ; the exact unit/scaling
coefficient does not matter, since the overall amplitude will
be rescaled anyway by FBPIC when this class is used) and
spectral phase (in radians). Use a "\t" tab as the deliminator
in the file. The spectral phase can be omitted, in which case it
will be assumed to be 0.
phi2_chirp: float (in second^2), optional
The amount of additional temporal chirp, at focus (in the
lab frame). This spectral phase is added to that read from
spectrum_file. A positive :math:`\phi^{(2)}` corresponds to
a positive chirp, i.e. red part of the spectrum in the
front of the pulse and blue part of the spectrum in the back.
phi3_chirp: float (in second^3), optional
The amount of additional third order dispersion,
at focus (in the lab frame). This spectral phase is added to
that read from spectrum_file. This parameter leads to an
asymmetrical temporal shape of the laser pulse envelope.
A positive :math:`\phi^{(3)}` leads to a slow rise
of the pulse envelope and fast fall, along with appearance
of post-pulses for higher values of :math:`\phi^{(3)}`.
phi4_chirp: float (in second^4), optional
Additional fourth order dispersion at focus (in the lab frame).
This spectral phase is added to that read from spectrum_file.
subtract_linear_phase: bool, optional
If True, a linear slope in the spectral phase read
from spectrum_file will be subtracted from it. A non-zero
linear slope leads to pulse being shifted in time.
propagation_direction: int, optional
Indicates in which direction the laser propagates.
This should be either 1 (laser propagates towards positive z)
or -1 (laser propagates towards negative z).
"""
# Initialize propagation direction
LaserLongitudinalProfile.__init__(self, propagation_direction,
gpu_capable=False)
# Load the spectrum from the text file
spectrum_data = np.loadtxt( spectrum_file, delimiter='\t' )
wavelength = spectrum_data[:, 0]
intensity = spectrum_data[:, 1]
if spectrum_data.shape[1] < 3:
spectral_phase = np.zeros_like(wavelength)
else:
spectral_phase = spectrum_data[:, 2]
omega_axis = 2 * np.pi * c / wavelength
# Subtract linear slope, if asked to
if subtract_linear_phase:
# Fit a 4 order polynomial and subtract the linear term
poly_coeffs = np.polyfit(omega_axis, spectral_phase, 4)
spectral_phase -= omega_axis * poly_coeffs[-2]
# Compute central wavelength from the text file data
self.lambda0 = np.trapz(wavelength*intensity, wavelength) * \
1. / np.trapz(intensity, wavelength)
self.k0 = 2 * np.pi / self.lambda0
# Apply spectral phase expansion terms
spectral_phase += 1 / factorial(2) * phi2_chirp * \
(omega_axis - self.k0 * c) ** 2
spectral_phase += 1 / factorial(3) * phi3_chirp * \
(omega_axis - self.k0 * c) ** 3
spectral_phase += 1 / factorial(4) * phi4_chirp * \
(omega_axis - self.k0 * c) ** 4
# Create functions that interpolate the spectral phase and intensity
# from the text file data
spectral_inten_fn = interp1d( omega_axis,
intensity*wavelength**2,
fill_value=0, bounds_error=False)
spectral_phase_fn = interp1d( omega_axis, spectral_phase,
fill_value=0, bounds_error=False)
# Computation Parameters
lambda_resolution = self.lambda0 / 1000 # spectral resolution
dt = lambda_resolution / c
time_window = self.lambda0 * self.lambda0 / c / lambda_resolution
Nt = np.round(time_window/dt).astype(int)
# Define the time array and its corresponding frequency array
time_arr = -0.5*time_window + dt*np.arange(Nt)
omega_arr = 2*np.pi * np.fft.fftfreq( Nt, dt )
# Calculate the normalised temporal profile of the electric
# field from user defined spectrum
spectral_Efield = np.sqrt( spectral_inten_fn(omega_arr) ) * \
np.exp( 1j*spectral_phase_fn(omega_arr) )
temporal_Efield = np.fft.fftshift(np.fft.fft(spectral_Efield))
temporal_Efield = temporal_Efield/abs(temporal_Efield).max()
# Note: this part could be potentially ported to GPU with cupy.interp
self.interp_Efield_function = interp1d(
self.propag_direction*z0 - c*time_arr,
temporal_Efield, fill_value=0, bounds_error=False )
# Compute integral of squared field
self.squared_field_integral = np.trapz( abs(temporal_Efield)**2,
c*time_arr )
def get_mean_wavelength(self):
"""
Extract the mean wavelength.
"""
return self.lambda0
def squared_profile_integral(self):
"""
See the docstring of LaserLongitudinalProfile.squared_profile_integral
"""
return self.squared_field_integral
def evaluate(self, z, t):
"""
See the docstring of LaserLongitudinalProfile.evaluate
"""
# Interpolate the temporal profile of the pulse.
# We center the pulse temporally around the pulse starting point
profile = self.interp_Efield_function( self.propag_direction*z - c*t )
return profile
| [
"numpy.trapz",
"numpy.polyfit",
"numpy.arange",
"scipy.special.factorial",
"numpy.fft.fftfreq",
"numpy.fft.fft",
"scipy.interpolate.interp1d",
"numpy.exp",
"numpy.loadtxt",
"numpy.zeros_like",
"numpy.round"
] | [((2684, 2717), 'numpy.zeros_like', 'np.zeros_like', (['z'], {'dtype': '"""complex"""'}), "(z, dtype='complex')\n", (2697, 2717), True, 'import numpy as np\n'), ((11578, 11619), 'numpy.loadtxt', 'np.loadtxt', (['spectrum_file'], {'delimiter': '"""\t"""'}), "(spectrum_file, delimiter='\\t')\n", (11588, 11619), True, 'import numpy as np\n'), ((12961, 13048), 'scipy.interpolate.interp1d', 'interp1d', (['omega_axis', '(intensity * wavelength ** 2)'], {'fill_value': '(0)', 'bounds_error': '(False)'}), '(omega_axis, intensity * wavelength ** 2, fill_value=0,\n bounds_error=False)\n', (12969, 13048), False, 'from scipy.interpolate import interp1d\n'), ((13146, 13216), 'scipy.interpolate.interp1d', 'interp1d', (['omega_axis', 'spectral_phase'], {'fill_value': '(0)', 'bounds_error': '(False)'}), '(omega_axis, spectral_phase, fill_value=0, bounds_error=False)\n', (13154, 13216), False, 'from scipy.interpolate import interp1d\n'), ((14211, 14317), 'scipy.interpolate.interp1d', 'interp1d', (['(self.propag_direction * z0 - c * time_arr)', 'temporal_Efield'], {'fill_value': '(0)', 'bounds_error': '(False)'}), '(self.propag_direction * z0 - c * time_arr, temporal_Efield,\n fill_value=0, bounds_error=False)\n', (14219, 14317), False, 'from scipy.interpolate import interp1d\n'), ((7264, 7284), 'numpy.exp', 'np.exp', (['exp_argument'], {}), '(exp_argument)\n', (7270, 7284), True, 'import numpy as np\n'), ((11771, 11796), 'numpy.zeros_like', 'np.zeros_like', (['wavelength'], {}), '(wavelength)\n', (11784, 11796), True, 'import numpy as np\n'), ((12082, 12123), 'numpy.polyfit', 'np.polyfit', (['omega_axis', 'spectral_phase', '(4)'], {}), '(omega_axis, spectral_phase, 4)\n', (12092, 12123), True, 'import numpy as np\n'), ((12344, 12375), 'numpy.trapz', 'np.trapz', (['intensity', 'wavelength'], {}), '(intensity, wavelength)\n', (12352, 12375), True, 'import numpy as np\n'), ((13676, 13698), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['Nt', 'dt'], {}), '(Nt, dt)\n', (13690, 13698), True, 'import numpy as np\n'), ((13995, 14022), 'numpy.fft.fft', 'np.fft.fft', (['spectral_Efield'], {}), '(spectral_Efield)\n', (14005, 14022), True, 'import numpy as np\n'), ((12268, 12312), 'numpy.trapz', 'np.trapz', (['(wavelength * intensity)', 'wavelength'], {}), '(wavelength * intensity, wavelength)\n', (12276, 12312), True, 'import numpy as np\n'), ((13483, 13509), 'numpy.round', 'np.round', (['(time_window / dt)'], {}), '(time_window / dt)\n', (13491, 13509), True, 'import numpy as np\n'), ((13632, 13645), 'numpy.arange', 'np.arange', (['Nt'], {}), '(Nt)\n', (13641, 13645), True, 'import numpy as np\n'), ((12497, 12509), 'scipy.special.factorial', 'factorial', (['(2)'], {}), '(2)\n', (12506, 12509), False, 'from scipy.special import factorial\n'), ((12615, 12627), 'scipy.special.factorial', 'factorial', (['(3)'], {}), '(3)\n', (12624, 12627), False, 'from scipy.special import factorial\n'), ((12733, 12745), 'scipy.special.factorial', 'factorial', (['(4)'], {}), '(4)\n', (12742, 12745), False, 'from scipy.special import factorial\n')] |
import unittest
from testinfrastructure.InDirTest import InDirTest
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from sympy import Symbol, Piecewise
from scipy.interpolate import interp1d
from CompartmentalSystems.myOdeResult import get_sub_t_spans, solve_ivp_pwc
class TestmyOdeResult(InDirTest):
def test_solve_ivp_pwc(self):
t = Symbol('t')
ms = [1, -1, 1]
disc_times = [410, 820]
t_start = 0
t_end = 1000
ref_times = np.arange(t_start, t_end, 10)
times = np.array([0, 300, 600, 900])
t_span = (t_start, t_end)
# first build a single rhs
m = Piecewise(
(ms[0], t < disc_times[0]),
(ms[1], t < disc_times[1]),
(ms[2], True)
)
def rhs(t, x): return m.subs({'t': t})
x0 = np.asarray([0])
# To see the differencte between a many piece and
# a one piece solotion
# we deliberately chose a combination
# of method and first step where the
# solver will get lost if it does not restart
# at the disc times.
sol_obj = solve_ivp_pwc(
rhs,
t_span,
x0,
t_eval=times,
method='RK45',
first_step=None
)
def funcmaker(m):
return lambda t, x: m
rhss = [funcmaker(m) for m in ms]
ref_func = interp1d(
[t_start] + list(disc_times) + [t_end],
[0.0, 410.0, 0.0, 180.0]
)
self.assertFalse(
np.allclose(
ref_func(times),
sol_obj.y[0, :],
atol=400
)
)
sol_obj_pw = solve_ivp_pwc(
rhss,
t_span,
x0,
method='RK45',
first_step=None,
disc_times=disc_times
)
self.assertTrue(
np.allclose(
ref_func(sol_obj_pw.t),
sol_obj_pw.y[0, :]
)
)
self.assertTrue(
np.allclose(
ref_func(ref_times),
sol_obj_pw.sol(ref_times)[0, :]
)
)
sol_obj_pw_t_eval = solve_ivp_pwc(
rhss,
t_span,
x0,
t_eval=times,
method='RK45',
first_step=None,
disc_times=disc_times
)
self.assertTrue(
np.allclose(
times,
sol_obj_pw_t_eval.t
)
)
self.assertTrue(
np.allclose(
ref_func(times),
sol_obj_pw_t_eval.y[0, :]
)
)
fig, ax = plt.subplots(
nrows=1,
ncols=1
)
ax.plot(
ref_times,
ref_func(ref_times),
color='blue',
label='ref'
)
ax.plot(
times,
ref_func(times),
'*',
label='ref points',
)
ax.plot(
sol_obj.t,
sol_obj.y[0, :],
'o',
label='pure solve_ivp'
)
ax.plot(
sol_obj_pw.t,
sol_obj_pw.y[0, :],
'+',
label='solve_ivp_pwc',
ls='--'
)
ax.legend()
fig.savefig('inaccuracies.pdf')#, tight_layout=True)
def test_sub_t_spans(self):
disc_times = np.array([2, 3, 4])
t_spans = [
(0, 1), (0, 2), (0, 2.5), (0, 5),
(2, 2), (2, 2.5), (2, 3.2), (2, 5),
(3.2, 4), (3.5, 4.5),
(4, 5), (5, 7)
]
refs = [
[(0, 1), (), (), ()],
[(0, 2), (2, 2), (), ()],
[(0, 2), (2, 2.5), (), ()],
[(0, 2), (2, 3), (3, 4), (4, 5)],
[(2, 2), (2, 2), (), ()],
[(2, 2), (2, 2.5), (), ()],
[(2, 2), (2, 3), (3, 3.2), ()],
[(2, 2), (2, 3), (3, 4), (4, 5)],
[(), (), (3.2, 4), (4, 4)],
[(), (), (3.5, 4), (4, 4.5)],
[(), (), (4, 4), (4, 5)],
[(), (), (), (5, 7)]
]
for t_span, ref in zip(t_spans, refs):
with self.subTest():
sub_t_spans = get_sub_t_spans(t_span, disc_times)
self.assertEqual(sub_t_spans, ref)
###############################################################################
if __name__ == '__main__':
suite = unittest.defaultTestLoader.discover(".", pattern=__file__)
# # Run same tests across 16 processes
# concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(1))
# runner = unittest.TextTestRunner()
# res=runner.run(concurrent_suite)
# # to let the buildbot fail we set the exit value !=0
# # if either a failure or error occurs
# if (len(res.errors)+len(res.failures))>0:
# sys.exit(1)
unittest.main()
| [
"sympy.Symbol",
"numpy.allclose",
"matplotlib.use",
"CompartmentalSystems.myOdeResult.get_sub_t_spans",
"numpy.asarray",
"numpy.array",
"sympy.Piecewise",
"matplotlib.pyplot.subplots",
"unittest.main",
"CompartmentalSystems.myOdeResult.solve_ivp_pwc",
"numpy.arange",
"unittest.defaultTestLoade... | [((105, 126), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (119, 126), False, 'import matplotlib\n'), ((4546, 4604), 'unittest.defaultTestLoader.discover', 'unittest.defaultTestLoader.discover', (['"""."""'], {'pattern': '__file__'}), "('.', pattern=__file__)\n", (4581, 4604), False, 'import unittest\n'), ((4970, 4985), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4983, 4985), False, 'import unittest\n'), ((398, 409), 'sympy.Symbol', 'Symbol', (['"""t"""'], {}), "('t')\n", (404, 409), False, 'from sympy import Symbol, Piecewise\n'), ((527, 556), 'numpy.arange', 'np.arange', (['t_start', 't_end', '(10)'], {}), '(t_start, t_end, 10)\n', (536, 556), True, 'import numpy as np\n'), ((573, 601), 'numpy.array', 'np.array', (['[0, 300, 600, 900]'], {}), '([0, 300, 600, 900])\n', (581, 601), True, 'import numpy as np\n'), ((684, 769), 'sympy.Piecewise', 'Piecewise', (['(ms[0], t < disc_times[0])', '(ms[1], t < disc_times[1])', '(ms[2], True)'], {}), '((ms[0], t < disc_times[0]), (ms[1], t < disc_times[1]), (ms[2], True)\n )\n', (693, 769), False, 'from sympy import Symbol, Piecewise\n'), ((872, 887), 'numpy.asarray', 'np.asarray', (['[0]'], {}), '([0])\n', (882, 887), True, 'import numpy as np\n'), ((1169, 1245), 'CompartmentalSystems.myOdeResult.solve_ivp_pwc', 'solve_ivp_pwc', (['rhs', 't_span', 'x0'], {'t_eval': 'times', 'method': '"""RK45"""', 'first_step': 'None'}), "(rhs, t_span, x0, t_eval=times, method='RK45', first_step=None)\n", (1182, 1245), False, 'from CompartmentalSystems.myOdeResult import get_sub_t_spans, solve_ivp_pwc\n'), ((1750, 1841), 'CompartmentalSystems.myOdeResult.solve_ivp_pwc', 'solve_ivp_pwc', (['rhss', 't_span', 'x0'], {'method': '"""RK45"""', 'first_step': 'None', 'disc_times': 'disc_times'}), "(rhss, t_span, x0, method='RK45', first_step=None, disc_times=\n disc_times)\n", (1763, 1841), False, 'from CompartmentalSystems.myOdeResult import get_sub_t_spans, solve_ivp_pwc\n'), ((2258, 2363), 'CompartmentalSystems.myOdeResult.solve_ivp_pwc', 'solve_ivp_pwc', (['rhss', 't_span', 'x0'], {'t_eval': 'times', 'method': '"""RK45"""', 'first_step': 'None', 'disc_times': 'disc_times'}), "(rhss, t_span, x0, t_eval=times, method='RK45', first_step=\n None, disc_times=disc_times)\n", (2271, 2363), False, 'from CompartmentalSystems.myOdeResult import get_sub_t_spans, solve_ivp_pwc\n'), ((2756, 2786), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)'}), '(nrows=1, ncols=1)\n', (2768, 2786), True, 'from matplotlib import pyplot as plt\n'), ((3510, 3529), 'numpy.array', 'np.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (3518, 3529), True, 'import numpy as np\n'), ((2491, 2530), 'numpy.allclose', 'np.allclose', (['times', 'sol_obj_pw_t_eval.t'], {}), '(times, sol_obj_pw_t_eval.t)\n', (2502, 2530), True, 'import numpy as np\n'), ((4336, 4371), 'CompartmentalSystems.myOdeResult.get_sub_t_spans', 'get_sub_t_spans', (['t_span', 'disc_times'], {}), '(t_span, disc_times)\n', (4351, 4371), False, 'from CompartmentalSystems.myOdeResult import get_sub_t_spans, solve_ivp_pwc\n')] |
from ..feature_types import secondary_feature, log
from ..primary.survey_scores import survey_scores
import numpy as np
@secondary_feature(
name="cortex.survey_results",
dependencies=[survey_scores]
)
def survey_results(**kwargs):
"""
:param id (str): participant id.
:param start (int): start timestamp UTC (ms).
:param end (int): end timestamp UTC (ms).
:param resolution (int): time step (ms).
:return data (dict): TODO.
"""
all_scores=survey_scores(**kwargs)['data']
data={}
for survey in all_scores:
data[survey]=np.nanmean([s['score'] for s in all_scores[survey]])
return {'timestamp':kwargs['start'],
'duration':kwargs['end']-kwargs['start'],
**data} | [
"numpy.nanmean"
] | [((580, 632), 'numpy.nanmean', 'np.nanmean', (["[s['score'] for s in all_scores[survey]]"], {}), "([s['score'] for s in all_scores[survey]])\n", (590, 632), True, 'import numpy as np\n')] |
import numpy as np
import libs.pd_lib as lib #library for simulation routines
import libs.data as data
import libs.plot as vplt #plotting library
from structure.global_constants import *
import structure.initialisation as init
from structure.cell import Tissue, BasicSpringForceNoGrowth
"""run a single voronoi tessellation model simulation"""
l = 10 # population size N=l*l
timend = 10 # simulation time (hours)
timestep = 1. # time intervals to save simulation history
rand = np.random.RandomState()
b,c,DELTA = 10.,1.0,0.0 #prisoner's dilemma game parameters
simulation = lib.simulation_decoupled_update #simulation routine imported from lib
game = lib.prisoners_dilemma_averaged #game imported from lib
game_parameters = (b,c)
history = lib.run_simulation(simulation,l,timestep,timend,rand,DELTA,game,game_parameters)
| [
"libs.pd_lib.run_simulation",
"numpy.random.RandomState"
] | [((481, 504), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (502, 504), True, 'import numpy as np\n'), ((747, 838), 'libs.pd_lib.run_simulation', 'lib.run_simulation', (['simulation', 'l', 'timestep', 'timend', 'rand', 'DELTA', 'game', 'game_parameters'], {}), '(simulation, l, timestep, timend, rand, DELTA, game,\n game_parameters)\n', (765, 838), True, 'import libs.pd_lib as lib\n')] |
import os
from multiprocessing import Queue, Process, Lock
import numpy as np
from twitter_bot_type_classification.features.user import UserFeatures
class CalcWorker(Process):
def __init__(self, task_q, results_q):
self.task_q = task_q
self.results_q = results_q
super(CalcWorker, self).__init__()
def run(self):
while True:
feature_data = self.task_q.get()
if feature_data is None:
self.results_q.put(None)
break
user, tweets = feature_data
features = UserFeatures(user, tweets)
self.results_q.put((user.id, features))
class SaveWorker(Process):
def __init__(self, results_q, filename, fetch_worker_count, std_out_lock):
self.results_q = results_q
self.filename = filename
self.fetch_worker_count = fetch_worker_count
self.std_out_lock = std_out_lock
super(SaveWorker, self).__init__()
def run(self):
features = []
user_ids = []
while True:
res = self.results_q.get()
if res is None:
self.fetch_worker_count -= 1
if self.fetch_worker_count == 0:
np.savez_compressed(self.filename, features=np.asarray(features), ids=np.asarray(user_ids))
self.std_out_lock.acquire()
print("\nSuccessfully saved features as npz file")
self.std_out_lock.release()
break
else:
user_ids.append(res[0])
features.append(res[1])
class FeatureCalculator:
save_worker = None
def __init__(self, tasks_q_size=100, results_q_size=100):
assert tasks_q_size > 0
assert results_q_size > 0
self.tasks_q_size = tasks_q_size
self.results_q_size = results_q_size
self.std_out_lock = Lock()
def calc_features(self, user_db, tweet_db, filename, limit=500, n_worker=os.cpu_count()):
assert user_db is not None and tweet_db is not None
assert filename is not None
assert limit > -2
assert n_worker is not None and n_worker > 0
tasks_q = Queue(self.tasks_q_size)
results_q = Queue(self.results_q_size)
workers = []
for _ in range(n_worker):
worker = CalcWorker(tasks_q, results_q)
workers.append(worker)
worker.start()
for u in user_db.get_all_user():
tweets = []
if tweet_db is not None:
tweets = tweet_db.get_tweets_for_user(u.id)
tweets.sort(key=lambda t: t.id, reverse=True)
tasks_q.put((u, tweets))
for _ in range(len(workers)):
tasks_q.put(None)
self.save_worker = SaveWorker(results_q, filename, len(workers), self.std_out_lock)
self.save_worker.start()
self.save_worker.join()
| [
"twitter_bot_type_classification.features.user.UserFeatures",
"numpy.asarray",
"os.cpu_count",
"multiprocessing.Lock",
"multiprocessing.Queue"
] | [((1917, 1923), 'multiprocessing.Lock', 'Lock', ([], {}), '()\n', (1921, 1923), False, 'from multiprocessing import Queue, Process, Lock\n'), ((2002, 2016), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (2014, 2016), False, 'import os\n'), ((2213, 2237), 'multiprocessing.Queue', 'Queue', (['self.tasks_q_size'], {}), '(self.tasks_q_size)\n', (2218, 2237), False, 'from multiprocessing import Queue, Process, Lock\n'), ((2258, 2284), 'multiprocessing.Queue', 'Queue', (['self.results_q_size'], {}), '(self.results_q_size)\n', (2263, 2284), False, 'from multiprocessing import Queue, Process, Lock\n'), ((579, 605), 'twitter_bot_type_classification.features.user.UserFeatures', 'UserFeatures', (['user', 'tweets'], {}), '(user, tweets)\n', (591, 605), False, 'from twitter_bot_type_classification.features.user import UserFeatures\n'), ((1281, 1301), 'numpy.asarray', 'np.asarray', (['features'], {}), '(features)\n', (1291, 1301), True, 'import numpy as np\n'), ((1307, 1327), 'numpy.asarray', 'np.asarray', (['user_ids'], {}), '(user_ids)\n', (1317, 1327), True, 'import numpy as np\n')] |
# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# ******************************************** GymForagerCFA ***************************************************
"""
GymForagerCFA
-------------
A `GymForagerCFA` is a subclass of `CompositionFunctionApproximator` that uses the `gym-forager
<https://github.com/IntelPNI/gym-forager>`_ as an agent to predict the `net_outcome <ControlMechanism.net_outcome>`
for a `Composition` (or part of one) controlled by an `OptimizationControlMechanism`.
It instantiates an agent with an interface to the gym-forager envirnoment and a specified action-space.
At present its `adapt <CompositionFunctionApproximator.adapt>` method is not implemented.
Its `evaluate <CompositionFunctionApproximator.evaluate>` method calls the DQN to generate an action, and then
calls the gym-forager agent with a specified
action and returns the reward associated with that action.
It is assumed that the action
COMMENT:
.. note::
The RegressionCFA's `update_weights <RegressionCFA.update_weights>` is provided the `feature_values
<OptimizationControlMechanism.feature_values>` and `net_outcome <ControlMechanism.net_outcome>` from the
*previous* trial to update its parameters. Those are then used to determine the `control_allocation
<ControlMechanism.control_allocation>` predicted to yield the greatest `EVC <OptimizationControlMechanism_EVC>`
based on the `feature_values <OptimizationControlMechanism.feature_values>` for the current trial.
COMMENT
"""
import itertools
import numpy as np
import typecheck as tc
import gym_forager
from enum import Enum
from itertools import product
from psyneulink.core.components.functions.learningfunctions import BayesGLM
from psyneulink.core.compositions.compositionfunctionapproximator import CompositionFunctionApproximator
from psyneulink.core.globals.keywords import DEFAULT_VARIABLE
__all__ = ['GymForagerCFA']
class GymForagerCFAError(Exception):
def __init__(self, error_value):
self.error_value = error_value
class GymForagerCFA(CompositionFunctionApproximator):
'''Parameterizes weights of a `update_weights <RegressorCFA.update_weights>` used by its `evaluate
<CompositionFunctionApproximator.evaluate>` method to predict the `net_outcome <ControlMechanism.net_outcome>`
for a `Composition` (or part of one) controlled by an `OptimiziationControlMechanism`, from a set of `feature_values
<OptimizationControlMechanism.feature_values>` and a `control_allocation <ControlMechanism.control_allocation>`
provided by the OptimiziationControlMechanism.
The `feature_values <OptimiziationControlMechanism.feature_values>` and `control_allocation
<ControlMechanism.control_allocation>` passed to the RegressorCFA's `adapt <RegressorCFA.adapt>` method,
and provided as the input to its `update_weights <RegressorCFA.update_weights>`, are represented in the
`vector <PredictionVector.vector>` attribute of a `PredictionVector` assigned to the RegressorCFA`s
`prediction_vector <RegressorCFA.prediction_vector>` attribute. The `feature_values
<OptimizationControlMechanism.feature_values>` are assigned to the features field of the
`prediction_vector <RegressorCFA.prediction_vector>`, and the `control_allocation
<ControlMechanism_control_allocation>` is assigned to the control_allocation field of the `prediction_vector
<RegressorCFA.prediction_vector>`. The `prediction_vector <RegressorCFA.prediction_vector>` may also contain
fields for the `costs ControlMechanism.costs` associated with the `control_allocation
<ControlMechanism.control_allocation>` and for interactions among those terms.
The `regression_weights <RegressorCFA.regression_weights>` returned by the `update_weights
<RegressorCFA.update_weights>` are used by the RegressorCFA's `evaluate <RegressorCFA.evaluate>` method to
predict the `net_outcome <ControlMechanism.net_outcome>` from the
`prediction_vector <RegressorCFA.prediction_vector>`.
'''
def __init__(self,
name=None,
update_weights=BayesGLM,
prediction_terms:tc.optional(list)=None):
'''
Arguments
---------
Attributes
----------
'''
self.update_weights = update_weights
self._instantiate_prediction_terms(prediction_terms)
super().__init__(name=name, update_weights=update_weights)
def initialize(self, features_array, control_signals):
'''Assign owner and instantiate `prediction_vector <RegressorCFA.prediction_vector>`
Must be called before RegressorCFA's methods can be used.
'''
prediction_terms = self.prediction_terms
self.prediction_vector = self.PredictionVector(features_array, control_signals, prediction_terms)
# Assign parameters to update_weights
update_weights_default_variable = [self.prediction_vector.vector, np.zeros(1)]
if isinstance(self.update_weights, type):
self.update_weights = \
self.update_weights(default_variable=update_weights_default_variable)
else:
self.update_weights.reinitialize({DEFAULT_VARIABLE: update_weights_default_variable})
def adapt(self, feature_values, control_allocation, net_outcome, execution_id=None):
'''Update `regression_weights <RegressorCFA.regression_weights>` so as to improve prediction of
**net_outcome** from **feature_values** and **control_allocation**.'''
prediction_vector = self.parameters.prediction_vector.get(execution_id)
previous_state = self.parameters.previous_state.get(execution_id)
if previous_state is not None:
# Update regression_weights
regression_weights = self.update_weights([previous_state, net_outcome], execution_id=execution_id)
# Update vector with current feature_values and control_allocation and store for next trial
prediction_vector.update_vector(control_allocation, feature_values, execution_id)
previous_state = prediction_vector.vector
else:
# Initialize vector and control_signals on first trial
# Note: initialize vector to 1's so that learning_function returns specified priors
# FIX: 11/9/19 LOCALLY MANAGE STATEFULNESS OF ControlSignals AND costs
# prediction_vector.reference_variable = control_allocation
previous_state = np.full_like(prediction_vector.vector, 0)
regression_weights = self.update_weights([previous_state, 0], execution_id=execution_id)
self._set_multiple_parameter_values(
execution_id,
previous_state=previous_state,
prediction_vector=prediction_vector,
regression_weights=regression_weights,
override=True
)
# FIX: RENAME AS _EXECUTE_AS_REP ONCE SAME IS DONE FOR COMPOSITION
# def evaluate(self, control_allocation, num_samples, reinitialize_values, feature_values, context):
def evaluate(self, feature_values, control_allocation, num_estimates, context, execution_id=None):
'''Update prediction_vector <RegressorCFA.prediction_vector>`,
then multiply by regression_weights.
Uses the current values of `regression_weights <RegressorCFA.regression_weights>` together with
values of **control_allocation** and **feature_values** arguments to generate predicted `net_outcome
<OptimiziationControlMechanism.net_outcome>`.
.. note::
If this method is assigned as the `objective_funtion of a `GradientOptimization` `Function`,
it is differentiated using `autograd <https://github.com/HIPS/autograd>`_\\.grad().
'''
predicted_outcome=0
prediction_vector = self.parameters.prediction_vector.get(execution_id)
count = num_estimates if num_estimates else 1
for i in range(count):
terms = self.prediction_terms
vector = prediction_vector.compute_terms(control_allocation, execution_id=execution_id)
# FIX: THIS SHOULD GET A SAMPLE RATHER THAN JUST USE THE ONE RETURNED FROM ADAPT METHOD
# OR SHOULD MULTIPLE SAMPLES BE DRAWN AND AVERAGED AT END OF ADAPT METHOD?
# I.E., AVERAGE WEIGHTS AND THEN OPTIMIZE OR OTPIMZE FOR EACH SAMPLE OF WEIGHTS AND THEN AVERAGE?
weights = self.parameters.regression_weights.get(execution_id)
net_outcome = 0
for term_label, term_value in vector.items():
if term_label in terms:
pv_enum_val = term_label.value
item_idx = prediction_vector.idx[pv_enum_val]
net_outcome += np.sum(term_value.reshape(-1) * weights[item_idx])
predicted_outcome+=net_outcome
predicted_outcome/=count
return predicted_outcome
@property
def _dependent_components(self):
return list(itertools.chain(
[self.update_weights]
))
| [
"itertools.chain",
"typecheck.optional",
"numpy.full_like",
"numpy.zeros"
] | [((4669, 4686), 'typecheck.optional', 'tc.optional', (['list'], {}), '(list)\n', (4680, 4686), True, 'import typecheck as tc\n'), ((5481, 5492), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (5489, 5492), True, 'import numpy as np\n'), ((7010, 7051), 'numpy.full_like', 'np.full_like', (['prediction_vector.vector', '(0)'], {}), '(prediction_vector.vector, 0)\n', (7022, 7051), True, 'import numpy as np\n'), ((9531, 9569), 'itertools.chain', 'itertools.chain', (['[self.update_weights]'], {}), '([self.update_weights])\n', (9546, 9569), False, 'import itertools\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Train a ConvNet on MNIST using K-FAC.
This library fits a 5-layer ConvNet on MNIST using K-FAC. The model has the
following structure,
- Conv Layer: 5x5 kernel, 16 output channels.
- Max Pool: 3x3 kernel, stride 2.
- Conv Layer: 5x5 kernel, 16 output channels.
- Max Pool: 3x3 kernel, stride 2.
- Linear: 10 output dims.
After 3k~6k steps, this should reach perfect accuracy on the training set.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from tensorflow.contrib.kfac.examples import mlp
from tensorflow.contrib.kfac.examples import mnist
lc = tf.contrib.kfac.layer_collection
oq = tf.contrib.kfac.op_queue
opt = tf.contrib.kfac.optimizer
__all__ = [
"conv_layer",
"max_pool_layer",
"linear_layer",
"build_model",
"minimize_loss_single_machine",
"minimize_loss_distributed",
"train_mnist_single_machine",
"train_mnist_distributed",
]
def conv_layer(layer_id, inputs, kernel_size, out_channels):
"""Builds a convolutional layer with ReLU non-linearity.
Args:
layer_id: int. Integer ID for this layer's variables.
inputs: Tensor of shape [num_examples, width, height, in_channels]. Each row
corresponds to a single example.
kernel_size: int. Width and height of the convolution kernel. The kernel is
assumed to be square.
out_channels: int. Number of output features per pixel.
Returns:
preactivations: Tensor of shape [num_examples, width, height, out_channels].
Values of the layer immediately before the activation function.
activations: Tensor of shape [num_examples, width, height, out_channels].
Values of the layer immediately after the activation function.
params: Tuple of (kernel, bias), parameters for this layer.
"""
# TODO (b/67004004): Delete this function and rely on tf.layers exclusively. id:1036 gh:1037
layer = tf.layers.Conv2D(
out_channels,
kernel_size=[kernel_size, kernel_size],
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
padding="SAME",
name="conv_%d" % layer_id)
preactivations = layer(inputs)
activations = tf.nn.relu(preactivations)
# layer.weights is a list. This converts it a (hashable) tuple.
return preactivations, activations, (layer.kernel, layer.bias)
def max_pool_layer(layer_id, inputs, kernel_size, stride):
"""Build a max-pooling layer.
Args:
layer_id: int. Integer ID for this layer's variables.
inputs: Tensor of shape [num_examples, width, height, in_channels]. Each row
corresponds to a single example.
kernel_size: int. Width and height to pool over per input channel. The
kernel is assumed to be square.
stride: int. Step size between pooling operations.
Returns:
Tensor of shape [num_examples, width/stride, height/stride, out_channels].
Result of applying max pooling to 'inputs'.
"""
# TODO (b/67004004): Delete this function and rely on tf.layers exclusively. id:893 gh:894
with tf.variable_scope("pool_%d" % layer_id):
return tf.nn.max_pool(
inputs, [1, kernel_size, kernel_size, 1], [1, stride, stride, 1],
padding="SAME",
name="pool")
def linear_layer(layer_id, inputs, output_size):
"""Builds the final linear layer for an MNIST classification problem.
Args:
layer_id: int. Integer ID for this layer's variables.
inputs: Tensor of shape [num_examples, width, height, in_channels]. Each row
corresponds to a single example.
output_size: int. Number of output dims per example.
Returns:
activations: Tensor of shape [num_examples, output_size]. Values of the
layer immediately after the activation function.
params: Tuple of (weights, bias), parameters for this layer.
"""
# TODO (b/67004004): Delete this function and rely on tf.layers exclusively. id:688 gh:689
pre, _, params = mlp.fc_layer(layer_id, inputs, output_size)
return pre, params
def build_model(examples, labels, num_labels, layer_collection):
"""Builds a ConvNet classification model.
Args:
examples: Tensor of shape [num_examples, num_features]. Represents inputs of
model.
labels: Tensor of shape [num_examples]. Contains integer IDs to be predicted
by softmax for each example.
num_labels: int. Number of distinct values 'labels' can take on.
layer_collection: LayerCollection instance. Layers will be registered here.
Returns:
loss: 0-D Tensor representing loss to be minimized.
accuracy: 0-D Tensor representing model's accuracy.
"""
# Build a ConvNet. For each layer with parameters, we'll keep track of the
# preactivations, activations, weights, and bias.
tf.logging.info("Building model.")
pre0, act0, params0 = conv_layer(
layer_id=0, inputs=examples, kernel_size=5, out_channels=16)
act1 = max_pool_layer(layer_id=1, inputs=act0, kernel_size=3, stride=2)
pre2, act2, params2 = conv_layer(
layer_id=2, inputs=act1, kernel_size=5, out_channels=16)
act3 = max_pool_layer(layer_id=3, inputs=act2, kernel_size=3, stride=2)
flat_act3 = tf.reshape(act3, shape=[-1, int(np.prod(act3.shape[1:4]))])
logits, params4 = linear_layer(
layer_id=4, inputs=flat_act3, output_size=num_labels)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits))
accuracy = tf.reduce_mean(
tf.cast(tf.equal(labels, tf.argmax(logits, axis=1)), dtype=tf.float32))
tf.summary.scalar("loss", loss)
tf.summary.scalar("accuracy", accuracy)
# Register parameters. K-FAC needs to know about the inputs, outputs, and
# parameters of each conv/fully connected layer and the logits powering the
# posterior probability over classes.
tf.logging.info("Building LayerCollection.")
layer_collection.register_conv2d(params0, (1, 1, 1, 1), "SAME", examples,
pre0)
layer_collection.register_conv2d(params2, (1, 1, 1, 1), "SAME", act1, pre2)
layer_collection.register_fully_connected(params4, flat_act3, logits)
layer_collection.register_categorical_predictive_distribution(
logits, name="logits")
return loss, accuracy
def minimize_loss_single_machine(loss,
accuracy,
layer_collection,
session_config=None):
"""Minimize loss with K-FAC on a single machine.
A single Session is responsible for running all of K-FAC's ops.
Args:
loss: 0-D Tensor. Loss to be minimized.
accuracy: 0-D Tensor. Accuracy of classifier on current minibatch.
layer_collection: LayerCollection instance describing model architecture.
Used by K-FAC to construct preconditioner.
session_config: None or tf.ConfigProto. Configuration for tf.Session().
Returns:
final value for 'accuracy'.
"""
# Train with K-FAC.
global_step = tf.train.get_or_create_global_step()
optimizer = opt.KfacOptimizer(
learning_rate=0.0001,
cov_ema_decay=0.95,
damping=0.001,
layer_collection=layer_collection,
momentum=0.9)
train_op = optimizer.minimize(loss, global_step=global_step)
tf.logging.info("Starting training.")
with tf.train.MonitoredTrainingSession(config=session_config) as sess:
while not sess.should_stop():
global_step_, loss_, accuracy_, _, _ = sess.run(
[global_step, loss, accuracy, train_op, optimizer.cov_update_op])
if global_step_ % 100 == 0:
sess.run(optimizer.inv_update_op)
if global_step_ % 100 == 0:
tf.logging.info("global_step: %d | loss: %f | accuracy: %s",
global_step_, loss_, accuracy_)
return accuracy_
def _is_gradient_task(task_id, num_tasks):
"""Returns True if this task should update the weights."""
if num_tasks < 3:
return True
return 0 <= task_id < 0.6 * num_tasks
def _is_cov_update_task(task_id, num_tasks):
"""Returns True if this task should update K-FAC's covariance matrices."""
if num_tasks < 3:
return False
return 0.6 * num_tasks <= task_id < num_tasks - 1
def _is_inv_update_task(task_id, num_tasks):
"""Returns True if this task should update K-FAC's preconditioner."""
if num_tasks < 3:
return False
return task_id == num_tasks - 1
def _num_gradient_tasks(num_tasks):
"""Number of tasks that will update weights."""
if num_tasks < 3:
return num_tasks
return int(np.ceil(0.6 * num_tasks))
def minimize_loss_distributed(task_id, num_worker_tasks, num_ps_tasks, master,
checkpoint_dir, loss, accuracy, layer_collection):
"""Minimize loss with an synchronous implementation of K-FAC.
Different tasks are responsible for different parts of K-FAC's Ops. The first
60% of tasks update weights; the next 20% accumulate covariance statistics;
the last 20% invert the matrices used to precondition gradients.
Args:
task_id: int. Integer in [0, num_worker_tasks). ID for this worker.
num_worker_tasks: int. Number of workers in this distributed training setup.
num_ps_tasks: int. Number of parameter servers holding variables. If 0,
parameter servers are not used.
master: string. IP and port of TensorFlow runtime process. Set to empty
string to run locally.
checkpoint_dir: string or None. Path to store checkpoints under.
loss: 0-D Tensor. Loss to be minimized.
accuracy: dict mapping strings to 0-D Tensors. Additional accuracy to
run with each step.
layer_collection: LayerCollection instance describing model architecture.
Used by K-FAC to construct preconditioner.
Returns:
final value for 'accuracy'.
Raises:
ValueError: if task_id >= num_worker_tasks.
"""
with tf.device(tf.train.replica_device_setter(num_ps_tasks)):
global_step = tf.train.get_or_create_global_step()
optimizer = opt.KfacOptimizer(
learning_rate=0.0001,
cov_ema_decay=0.95,
damping=0.001,
layer_collection=layer_collection,
momentum=0.9)
inv_update_queue = oq.OpQueue(optimizer.inv_updates_dict.values())
sync_optimizer = tf.train.SyncReplicasOptimizer(
opt=optimizer,
replicas_to_aggregate=_num_gradient_tasks(num_worker_tasks))
train_op = sync_optimizer.minimize(loss, global_step=global_step)
tf.logging.info("Starting training.")
is_chief = (task_id == 0)
hooks = [sync_optimizer.make_session_run_hook(is_chief)]
with tf.train.MonitoredTrainingSession(
master=master,
is_chief=is_chief,
checkpoint_dir=checkpoint_dir,
hooks=hooks,
stop_grace_period_secs=0) as sess:
while not sess.should_stop():
# Choose which op this task is responsible for running.
if _is_gradient_task(task_id, num_worker_tasks):
learning_op = train_op
elif _is_cov_update_task(task_id, num_worker_tasks):
learning_op = optimizer.cov_update_op
elif _is_inv_update_task(task_id, num_worker_tasks):
# TODO (duckworthd): Running this op before cov_update_op has been run a id:628 gh:629
# few times can result in "InvalidArgumentError: Cholesky decomposition
# was not successful." Delay running this op until cov_update_op has
# been run a few times.
learning_op = inv_update_queue.next_op(sess)
else:
raise ValueError("Which op should task %d do?" % task_id)
global_step_, loss_, accuracy_, _ = sess.run(
[global_step, loss, accuracy, learning_op])
tf.logging.info("global_step: %d | loss: %f | accuracy: %s", global_step_,
loss_, accuracy_)
return accuracy_
def train_mnist_single_machine(data_dir, num_epochs, use_fake_data=False):
"""Train a ConvNet on MNIST.
Args:
data_dir: string. Directory to read MNIST examples from.
num_epochs: int. Number of passes to make over the training set.
use_fake_data: bool. If True, generate a synthetic dataset.
Returns:
accuracy of model on the final minibatch of training data.
"""
# Load a dataset.
tf.logging.info("Loading MNIST into memory.")
examples, labels = mnist.load_mnist(
data_dir,
num_epochs=num_epochs,
batch_size=128,
use_fake_data=use_fake_data,
flatten_images=False)
# Build a ConvNet.
layer_collection = lc.LayerCollection()
loss, accuracy = build_model(
examples, labels, num_labels=10, layer_collection=layer_collection)
# Fit model.
return minimize_loss_single_machine(loss, accuracy, layer_collection)
def train_mnist_multitower(data_dir, num_epochs, num_towers,
use_fake_data=True):
"""Train a ConvNet on MNIST.
Args:
data_dir: string. Directory to read MNIST examples from.
num_epochs: int. Number of passes to make over the training set.
num_towers: int. Number of CPUs to split inference across.
use_fake_data: bool. If True, generate a synthetic dataset.
Returns:
accuracy of model on the final minibatch of training data.
"""
# Load a dataset.
tf.logging.info("Loading MNIST into memory.")
tower_batch_size = 128
batch_size = tower_batch_size * num_towers
tf.logging.info(
("Loading MNIST into memory. Using batch_size = %d = %d towers * %d "
"tower batch size.") % (batch_size, num_towers, tower_batch_size))
examples, labels = mnist.load_mnist(
data_dir,
num_epochs=num_epochs,
batch_size=batch_size,
use_fake_data=use_fake_data,
flatten_images=False)
# Split minibatch across towers.
examples = tf.split(examples, num_towers)
labels = tf.split(labels, num_towers)
# Build an MLP. Each tower's layers will be added to the LayerCollection.
layer_collection = lc.LayerCollection()
tower_results = []
for tower_id in range(num_towers):
with tf.device("/cpu:%d" % tower_id):
with tf.name_scope("tower%d" % tower_id):
with tf.variable_scope(tf.get_variable_scope(), reuse=(tower_id > 0)):
tf.logging.info("Building tower %d." % tower_id)
tower_results.append(
build_model(examples[tower_id], labels[tower_id], 10,
layer_collection))
losses, accuracies = zip(*tower_results)
# Average across towers.
loss = tf.reduce_mean(losses)
accuracy = tf.reduce_mean(accuracies)
# Fit model.
session_config = tf.ConfigProto(
allow_soft_placement=False, device_count={
"CPU": num_towers
})
return minimize_loss_single_machine(
loss, accuracy, layer_collection, session_config=session_config)
def train_mnist_distributed(task_id,
num_worker_tasks,
num_ps_tasks,
master,
data_dir,
num_epochs,
use_fake_data=False):
"""Train a ConvNet on MNIST.
Args:
task_id: int. Integer in [0, num_worker_tasks). ID for this worker.
num_worker_tasks: int. Number of workers in this distributed training setup.
num_ps_tasks: int. Number of parameter servers holding variables.
master: string. IP and port of TensorFlow runtime process.
data_dir: string. Directory to read MNIST examples from.
num_epochs: int. Number of passes to make over the training set.
use_fake_data: bool. If True, generate a synthetic dataset.
Returns:
accuracy of model on the final minibatch of training data.
"""
# Load a dataset.
tf.logging.info("Loading MNIST into memory.")
examples, labels = mnist.load_mnist(
data_dir,
num_epochs=num_epochs,
batch_size=128,
use_fake_data=use_fake_data,
flatten_images=False)
# Build a ConvNet.
layer_collection = lc.LayerCollection()
with tf.device(tf.train.replica_device_setter(num_ps_tasks)):
loss, accuracy = build_model(
examples, labels, num_labels=10, layer_collection=layer_collection)
# Fit model.
checkpoint_dir = None if data_dir is None else os.path.join(data_dir, "kfac")
return minimize_loss_distributed(task_id, num_worker_tasks, num_ps_tasks,
master, checkpoint_dir, loss, accuracy,
layer_collection)
if __name__ == "__main__":
tf.app.run()
| [
"numpy.prod",
"tensorflow.split",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.get_variable_scope",
"tensorflow.train.MonitoredTrainingSession",
"tensorflow.reduce_mean",
"tensorflow.app.run",
"tensorflow.random_normal_initializer",
"tensorflow.ConfigProto",
"tensorflow.su... | [((2909, 2935), 'tensorflow.nn.relu', 'tf.nn.relu', (['preactivations'], {}), '(preactivations)\n', (2919, 2935), True, 'import tensorflow as tf\n'), ((4641, 4684), 'tensorflow.contrib.kfac.examples.mlp.fc_layer', 'mlp.fc_layer', (['layer_id', 'inputs', 'output_size'], {}), '(layer_id, inputs, output_size)\n', (4653, 4684), False, 'from tensorflow.contrib.kfac.examples import mlp\n'), ((5446, 5480), 'tensorflow.logging.info', 'tf.logging.info', (['"""Building model."""'], {}), "('Building model.')\n", (5461, 5480), True, 'import tensorflow as tf\n'), ((6229, 6260), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (6246, 6260), True, 'import tensorflow as tf\n'), ((6263, 6302), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'accuracy'], {}), "('accuracy', accuracy)\n", (6280, 6302), True, 'import tensorflow as tf\n'), ((6500, 6544), 'tensorflow.logging.info', 'tf.logging.info', (['"""Building LayerCollection."""'], {}), "('Building LayerCollection.')\n", (6515, 6544), True, 'import tensorflow as tf\n'), ((7654, 7690), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (7688, 7690), True, 'import tensorflow as tf\n'), ((7926, 7963), 'tensorflow.logging.info', 'tf.logging.info', (['"""Starting training."""'], {}), "('Starting training.')\n", (7941, 7963), True, 'import tensorflow as tf\n'), ((11083, 11120), 'tensorflow.logging.info', 'tf.logging.info', (['"""Starting training."""'], {}), "('Starting training.')\n", (11098, 11120), True, 'import tensorflow as tf\n'), ((12816, 12861), 'tensorflow.logging.info', 'tf.logging.info', (['"""Loading MNIST into memory."""'], {}), "('Loading MNIST into memory.')\n", (12831, 12861), True, 'import tensorflow as tf\n'), ((12883, 13003), 'tensorflow.contrib.kfac.examples.mnist.load_mnist', 'mnist.load_mnist', (['data_dir'], {'num_epochs': 'num_epochs', 'batch_size': '(128)', 'use_fake_data': 'use_fake_data', 'flatten_images': '(False)'}), '(data_dir, num_epochs=num_epochs, batch_size=128,\n use_fake_data=use_fake_data, flatten_images=False)\n', (12899, 13003), False, 'from tensorflow.contrib.kfac.examples import mnist\n'), ((13800, 13845), 'tensorflow.logging.info', 'tf.logging.info', (['"""Loading MNIST into memory."""'], {}), "('Loading MNIST into memory.')\n", (13815, 13845), True, 'import tensorflow as tf\n'), ((13918, 14075), 'tensorflow.logging.info', 'tf.logging.info', (["('Loading MNIST into memory. Using batch_size = %d = %d towers * %d tower batch size.'\n % (batch_size, num_towers, tower_batch_size))"], {}), "(\n 'Loading MNIST into memory. Using batch_size = %d = %d towers * %d tower batch size.'\n % (batch_size, num_towers, tower_batch_size))\n", (13933, 14075), True, 'import tensorflow as tf\n'), ((14106, 14233), 'tensorflow.contrib.kfac.examples.mnist.load_mnist', 'mnist.load_mnist', (['data_dir'], {'num_epochs': 'num_epochs', 'batch_size': 'batch_size', 'use_fake_data': 'use_fake_data', 'flatten_images': '(False)'}), '(data_dir, num_epochs=num_epochs, batch_size=batch_size,\n use_fake_data=use_fake_data, flatten_images=False)\n', (14122, 14233), False, 'from tensorflow.contrib.kfac.examples import mnist\n'), ((14310, 14340), 'tensorflow.split', 'tf.split', (['examples', 'num_towers'], {}), '(examples, num_towers)\n', (14318, 14340), True, 'import tensorflow as tf\n'), ((14352, 14380), 'tensorflow.split', 'tf.split', (['labels', 'num_towers'], {}), '(labels, num_towers)\n', (14360, 14380), True, 'import tensorflow as tf\n'), ((15011, 15033), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['losses'], {}), '(losses)\n', (15025, 15033), True, 'import tensorflow as tf\n'), ((15047, 15073), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['accuracies'], {}), '(accuracies)\n', (15061, 15073), True, 'import tensorflow as tf\n'), ((15109, 15185), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(False)', 'device_count': "{'CPU': num_towers}"}), "(allow_soft_placement=False, device_count={'CPU': num_towers})\n", (15123, 15185), True, 'import tensorflow as tf\n'), ((16235, 16280), 'tensorflow.logging.info', 'tf.logging.info', (['"""Loading MNIST into memory."""'], {}), "('Loading MNIST into memory.')\n", (16250, 16280), True, 'import tensorflow as tf\n'), ((16302, 16422), 'tensorflow.contrib.kfac.examples.mnist.load_mnist', 'mnist.load_mnist', (['data_dir'], {'num_epochs': 'num_epochs', 'batch_size': '(128)', 'use_fake_data': 'use_fake_data', 'flatten_images': '(False)'}), '(data_dir, num_epochs=num_epochs, batch_size=128,\n use_fake_data=use_fake_data, flatten_images=False)\n', (16318, 16422), False, 'from tensorflow.contrib.kfac.examples import mnist\n'), ((17019, 17031), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (17029, 17031), True, 'import tensorflow as tf\n'), ((3761, 3800), 'tensorflow.variable_scope', 'tf.variable_scope', (["('pool_%d' % layer_id)"], {}), "('pool_%d' % layer_id)\n", (3778, 3800), True, 'import tensorflow as tf\n'), ((3813, 3926), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['inputs', '[1, kernel_size, kernel_size, 1]', '[1, stride, stride, 1]'], {'padding': '"""SAME"""', 'name': '"""pool"""'}), "(inputs, [1, kernel_size, kernel_size, 1], [1, stride, stride,\n 1], padding='SAME', name='pool')\n", (3827, 3926), True, 'import tensorflow as tf\n'), ((6030, 6106), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (6076, 6106), True, 'import tensorflow as tf\n'), ((7971, 8027), 'tensorflow.train.MonitoredTrainingSession', 'tf.train.MonitoredTrainingSession', ([], {'config': 'session_config'}), '(config=session_config)\n', (8004, 8027), True, 'import tensorflow as tf\n'), ((9186, 9210), 'numpy.ceil', 'np.ceil', (['(0.6 * num_tasks)'], {}), '(0.6 * num_tasks)\n', (9193, 9210), True, 'import numpy as np\n'), ((10576, 10612), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (10610, 10612), True, 'import tensorflow as tf\n'), ((11215, 11356), 'tensorflow.train.MonitoredTrainingSession', 'tf.train.MonitoredTrainingSession', ([], {'master': 'master', 'is_chief': 'is_chief', 'checkpoint_dir': 'checkpoint_dir', 'hooks': 'hooks', 'stop_grace_period_secs': '(0)'}), '(master=master, is_chief=is_chief,\n checkpoint_dir=checkpoint_dir, hooks=hooks, stop_grace_period_secs=0)\n', (11248, 11356), True, 'import tensorflow as tf\n'), ((16753, 16783), 'os.path.join', 'os.path.join', (['data_dir', '"""kfac"""'], {}), "(data_dir, 'kfac')\n", (16765, 16783), False, 'import os\n'), ((2762, 2803), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (2790, 2803), True, 'import tensorflow as tf\n'), ((10511, 10555), 'tensorflow.train.replica_device_setter', 'tf.train.replica_device_setter', (['num_ps_tasks'], {}), '(num_ps_tasks)\n', (10541, 10555), True, 'import tensorflow as tf\n'), ((12267, 12363), 'tensorflow.logging.info', 'tf.logging.info', (['"""global_step: %d | loss: %f | accuracy: %s"""', 'global_step_', 'loss_', 'accuracy_'], {}), "('global_step: %d | loss: %f | accuracy: %s', global_step_,\n loss_, accuracy_)\n", (12282, 12363), True, 'import tensorflow as tf\n'), ((14567, 14598), 'tensorflow.device', 'tf.device', (["('/cpu:%d' % tower_id)"], {}), "('/cpu:%d' % tower_id)\n", (14576, 14598), True, 'import tensorflow as tf\n'), ((16531, 16575), 'tensorflow.train.replica_device_setter', 'tf.train.replica_device_setter', (['num_ps_tasks'], {}), '(num_ps_tasks)\n', (16561, 16575), True, 'import tensorflow as tf\n'), ((6179, 6204), 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(1)'}), '(logits, axis=1)\n', (6188, 6204), True, 'import tensorflow as tf\n'), ((8322, 8418), 'tensorflow.logging.info', 'tf.logging.info', (['"""global_step: %d | loss: %f | accuracy: %s"""', 'global_step_', 'loss_', 'accuracy_'], {}), "('global_step: %d | loss: %f | accuracy: %s', global_step_,\n loss_, accuracy_)\n", (8337, 8418), True, 'import tensorflow as tf\n'), ((14611, 14646), 'tensorflow.name_scope', 'tf.name_scope', (["('tower%d' % tower_id)"], {}), "('tower%d' % tower_id)\n", (14624, 14646), True, 'import tensorflow as tf\n'), ((5877, 5901), 'numpy.prod', 'np.prod', (['act3.shape[1:4]'], {}), '(act3.shape[1:4])\n', (5884, 5901), True, 'import numpy as np\n'), ((14737, 14785), 'tensorflow.logging.info', 'tf.logging.info', (["('Building tower %d.' % tower_id)"], {}), "('Building tower %d.' % tower_id)\n", (14752, 14785), True, 'import tensorflow as tf\n'), ((14679, 14702), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (14700, 14702), True, 'import tensorflow as tf\n')] |
"""
eureka_train.py
<NAME>, Dec 25 2019
train mask rcnn with eureka data
"""
import transforms as T
from engine import train_one_epoch, evaluate
import utils
import torch
from rock import Dataset
from model import get_rock_model_instance_segmentation
import numpy as np
torch.manual_seed(0)
class ToTensor(object):
def __call__(self, image, target):
# image = F.to_tensor(image).float()
image = torch.from_numpy(image / 255.0).float()
image = image.permute((2, 0, 1))
return image, target
def get_transform(train):
transforms = []
transforms.append(ToTensor()) # torchvision.transforms.functional is a garbage, sorry guys
return T.Compose(transforms)
def get_mean_std(input_channel, image_mean, image_std):
if input_channel == 8:
return image_mean, image_std
elif input_channel == 3:
return image_mean[:3], image_std[:3]
elif input_channel == 5:
return image_mean[:5], image_std[:5]
elif input_channel == 6:
return image_mean[:3] + image_mean[-3:], image_std[:3] + image_mean[-3:]
elif input_channel == 4:
return image_mean[:3] + [np.mean(image_mean[-3:]).tolist()], image_std[:3] + [np.mean(image_std[-3:]).tolist()]
elif input_channel == -3:
return image_mean[-3:], image_std[-3:]
if __name__ == '__main__':
# train on the GPU or on the CPU, if a GPU is not available
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
device = torch.device('cuda:1')
# our dataset has three classes only - background, non-damaged, and damaged
num_classes = 2
input_c = 5
# use our dataset and defined transformations
dataset = Dataset("./datasets/iros/bishop/aug/", transforms=get_transform(train=True), include_name=False, input_channel=input_c)
##dataset_test = Dataset("./datasets/Rock/data_test/", transforms=get_transform(train=False), include_name=False, input_channel=input_c)
#dataset_test = Dataset("./datasets/Rock_test/mult/", transforms=get_transform(train=False), include_name=False, input_channel=input_c)
dataset_test = Dataset("./datasets/iros/bishop_test/mult_masks/", transforms=get_transform(train=False), include_name=False, input_channel=input_c)
# image_mean, image_std, _, _ = dataset.imageStat()
image_mean = [0.2635908247051704, 0.2565450032962188, 0.24311759802366928, 0.3007502338171828, 0.35368477144149774, 0.35639093071269307, 0.5402165474345183, 0.24508291731782375]
image_std = [0.14736204788409055, 0.13722317885795837, 0.12990199087409235, 0.15134661805921518, 0.16149370275679834, 0.26121346688112296, 0.22545272450120832, 0.2513372955897915]
image_mean, image_std = get_mean_std(input_c, image_mean, image_std)
# split the dataset in train and test set
#indices = torch.randperm(len(dataset)).tolist()
#dataset = torch.utils.data.Subset(dataset, indices)
#indices_test = torch.randperm(len(dataset_test)).tolist()
#dataset_test = torch.utils.data.Subset(dataset_test, indices_test)
# define training and validation data loaders
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=4, shuffle=False, num_workers=8,
collate_fn=utils.collate_fn)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1, shuffle=False, num_workers=4,
collate_fn=utils.collate_fn)
# get the model using our helper function
mask_rcnn = get_rock_model_instance_segmentation(num_classes, input_channel=input_c, image_mean=image_mean, image_std=image_std, pretrained=False)
read_param = False
if read_param:
mask_rcnn.load_state_dict(torch.load("trained_param_6/epoch_0050.param"))
print("Loaded weights")
# move model to the right device
mask_rcnn.to(device)
# construct an optimizer
params = [p for p in mask_rcnn.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.01, momentum=0.9, weight_decay=0.0005)
# and a learning rate scheduler
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=10,
gamma=0.5)
init_epoch = 0
num_epochs = 50
save_param = "trained_param_bishop_ntl_rgb_re_nir/epoch_{:04d}.param".format(init_epoch)
torch.save(mask_rcnn.state_dict(), save_param)
#'''
for epoch in range(init_epoch, init_epoch + num_epochs):
save_param = "trained_param_bishop_ntl_rgb_re_nir/epoch_{:04d}.param".format(epoch)
#torch.save(mask_rcnn.state_dict(), save_param)
# train for one epoch, printing every 10 iterations
print(save_param)
train_one_epoch(mask_rcnn, optimizer, data_loader, device, epoch, print_freq=100)
# update the learning rate
lr_scheduler.step()
# evaluate on the test dataset
#print('\n')
#print("trained_param_4/epoch_00%02d.param" % epoch)
#mask_rcnn.load_state_dict(torch.load("trained_param_4/epoch_00%02d.param" % epoch))
evaluate(mask_rcnn, data_loader_test, device=device)
#save_param = "trained_param_8_fresh/epoch_{:04d}.param".format(epoch)
torch.save(mask_rcnn.state_dict(), save_param)
'''
for epoch in range(init_epoch, init_epoch + num_epochs):
#save_param = "trained_param_3_fresh/epoch_{:04d}.param".format(epoch)
#torch.save(mask_rcnn.state_dict(), save_param)
# train for one epoch, printing every 10 iterations
#train_one_epoch(mask_rcnn, optimizer, data_loader, device, epoch, print_freq=100)
# update the learning rate
#lr_scheduler.step()
# evaluate on the test dataset
print('\n')
name = "trained_param_8/epoch_00%02d.param" % epoch
print(name)
mask_rcnn.load_state_dict(torch.load(name))
evaluate(mask_rcnn, data_loader_test, device=device)
#save_param = "trained_param_8_fresh/epoch_{:04d}.param".format(epoch)
#torch.save(mask_rcnn.state_dict(), save_param)
'''
| [
"torch.manual_seed",
"torch.optim.SGD",
"numpy.mean",
"torch.load",
"torch.optim.lr_scheduler.StepLR",
"transforms.Compose",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"engine.evaluate",
"engine.train_one_epoch",
"model.get_rock_model_instance_segmentation",
... | [((272, 292), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (289, 292), False, 'import torch\n'), ((681, 702), 'transforms.Compose', 'T.Compose', (['transforms'], {}), '(transforms)\n', (690, 702), True, 'import transforms as T\n'), ((1501, 1523), 'torch.device', 'torch.device', (['"""cuda:1"""'], {}), "('cuda:1')\n", (1513, 1523), False, 'import torch\n'), ((3117, 3230), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(4)', 'shuffle': '(False)', 'num_workers': '(8)', 'collate_fn': 'utils.collate_fn'}), '(dataset, batch_size=4, shuffle=False,\n num_workers=8, collate_fn=utils.collate_fn)\n', (3144, 3230), False, 'import torch\n'), ((3268, 3386), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset_test'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(4)', 'collate_fn': 'utils.collate_fn'}), '(dataset_test, batch_size=1, shuffle=False,\n num_workers=4, collate_fn=utils.collate_fn)\n', (3295, 3386), False, 'import torch\n'), ((3462, 3600), 'model.get_rock_model_instance_segmentation', 'get_rock_model_instance_segmentation', (['num_classes'], {'input_channel': 'input_c', 'image_mean': 'image_mean', 'image_std': 'image_std', 'pretrained': '(False)'}), '(num_classes, input_channel=input_c,\n image_mean=image_mean, image_std=image_std, pretrained=False)\n', (3498, 3600), False, 'from model import get_rock_model_instance_segmentation\n'), ((3931, 3998), 'torch.optim.SGD', 'torch.optim.SGD', (['params'], {'lr': '(0.01)', 'momentum': '(0.9)', 'weight_decay': '(0.0005)'}), '(params, lr=0.01, momentum=0.9, weight_decay=0.0005)\n', (3946, 3998), False, 'import torch\n'), ((4054, 4121), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(10)', 'gamma': '(0.5)'}), '(optimizer, step_size=10, gamma=0.5)\n', (4085, 4121), False, 'import torch\n'), ((1437, 1462), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1460, 1462), False, 'import torch\n'), ((1413, 1433), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1425, 1433), False, 'import torch\n'), ((1468, 1487), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1480, 1487), False, 'import torch\n'), ((4721, 4806), 'engine.train_one_epoch', 'train_one_epoch', (['mask_rcnn', 'optimizer', 'data_loader', 'device', 'epoch'], {'print_freq': '(100)'}), '(mask_rcnn, optimizer, data_loader, device, epoch,\n print_freq=100)\n', (4736, 4806), False, 'from engine import train_one_epoch, evaluate\n'), ((5088, 5140), 'engine.evaluate', 'evaluate', (['mask_rcnn', 'data_loader_test'], {'device': 'device'}), '(mask_rcnn, data_loader_test, device=device)\n', (5096, 5140), False, 'from engine import train_one_epoch, evaluate\n'), ((3674, 3720), 'torch.load', 'torch.load', (['"""trained_param_6/epoch_0050.param"""'], {}), "('trained_param_6/epoch_0050.param')\n", (3684, 3720), False, 'import torch\n'), ((418, 449), 'torch.from_numpy', 'torch.from_numpy', (['(image / 255.0)'], {}), '(image / 255.0)\n', (434, 449), False, 'import torch\n'), ((1144, 1168), 'numpy.mean', 'np.mean', (['image_mean[-3:]'], {}), '(image_mean[-3:])\n', (1151, 1168), True, 'import numpy as np\n'), ((1197, 1220), 'numpy.mean', 'np.mean', (['image_std[-3:]'], {}), '(image_std[-3:])\n', (1204, 1220), True, 'import numpy as np\n')] |
import numpy as np
from typing import Any, Dict, List
nptype = np.float64
class Type:
def __init__(self):
pass
supertypes: Dict[Type, Type] = {}
subtypes: Dict[Type, List[Type]] = {}
def register_supertype(supertype: Any):
def _register_supertype(program_type: Type):
assert (
program_type not in supertypes
), f"{program_type} already has a supertype!"
supertypes[program_type] = supertype
if supertype not in subtypes:
subtypes[supertype] = []
subtypes[supertype].append(program_type)
return program_type
return _register_supertype
def type_and_supertypes(program_type: Type):
return [program_type] + all_supertypes(program_type)
def all_supertypes(program_type: Type):
if program_type not in supertypes:
return [Type]
else:
supertype = supertypes[program_type]
return [supertype] + all_supertypes(supertype)
def equal_or_supertype(program_type: Type, potential_supertype: Type):
return (
potential_supertype == Type
or program_type == potential_supertype
or potential_supertype in all_supertypes(program_type)
)
class Vector(Type):
value_class = np.ndarray
@classmethod
def is_valid_value(cls, value):
f1 = not np.isnan(value).any()
f2 = not np.isinf(value).any()
f3 = value.dtype == nptype
return f1 and f2 and f3
@register_supertype(Vector)
class Scalar(Vector):
value_class = nptype
@classmethod
def is_valid_value(cls, value):
f1 = not np.isnan(value).any()
f2 = not np.isinf(value).any()
f3 = value.dtype == nptype
return f1 and f2
class DataStructure(Type):
def create_empty(self):
raise NotImplementedError()
@register_supertype(Scalar)
class Constant(DataStructure):
value_class = nptype
def __init__(self, constant_value):
super().__init__()
self.constant_value = constant_value
def create_empty(self):
return nptype(self.constant_value)
| [
"numpy.isinf",
"numpy.isnan"
] | [((1313, 1328), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (1321, 1328), True, 'import numpy as np\n'), ((1352, 1367), 'numpy.isinf', 'np.isinf', (['value'], {}), '(value)\n', (1360, 1367), True, 'import numpy as np\n'), ((1589, 1604), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (1597, 1604), True, 'import numpy as np\n'), ((1628, 1643), 'numpy.isinf', 'np.isinf', (['value'], {}), '(value)\n', (1636, 1643), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.